Compare commits

...

934 Commits

Author SHA1 Message Date
7aa05618a3 data_replicator -> ncp
Fixes #327
2018-06-07 17:11:17 -06:00
cdfbbe5e60 Fix diagram typos 2018-06-07 17:11:17 -06:00
fe7d1cb81c Race -> Or
Thanks for the suggestion @FishmanL!
2018-06-07 17:11:03 -06:00
c2a9395a4b perf counters 2018-06-07 14:59:21 -07:00
586279bcfc Add server diagrams 2018-06-07 15:24:44 -06:00
8bd10e7c4c Cleanup top-level lib doc 2018-06-07 15:24:44 -06:00
928e6165bc Add TPU & RPU diagrams 2018-06-07 15:24:44 -06:00
77c9e801aa fixed client demo (#325)
* fixed client demo
2018-06-07 13:51:15 -07:00
c78132417f fix deadlock 2018-06-07 13:52:33 -06:00
849928887e undo 2018-06-07 13:52:33 -06:00
ba1163d49f fix logs 2018-06-07 13:52:33 -06:00
6f9c89af39 fix deadlock 2018-06-07 13:52:33 -06:00
246b8b1242 No longer cat scripts
Because we keep changing those scripts and not updating the readme.

Also, this removes the "-b 9000" starting validators. Is that right?
Or should we be passing that to the validator config?
2018-06-07 12:17:43 -06:00
f0db68cb75 Add note about validator.json and -d flag to config generating scripts 2018-06-07 11:15:41 -06:00
f0d1fdfb46 Add missing module descriptions 2018-06-07 09:25:36 -06:00
3b8b2e030a Better docs for transaction 2018-06-07 09:25:36 -06:00
b4fee677a5 Better docs for payment_plan 2018-06-07 09:25:36 -06:00
fe706583f9 Better docs for sigverify_stage 2018-06-07 09:25:36 -06:00
d0e0c17ece Better docs for rpu 2018-06-07 09:25:36 -06:00
5aaa38bcaf Better docs for write_stage 2018-06-07 09:25:36 -06:00
6ff9b27f8e Better docs for entry 2018-06-07 09:25:36 -06:00
3f4e035506 Better docs for budget 2018-06-07 09:25:36 -06:00
57d9fbb927 Better docs for banking_stage 2018-06-07 09:25:36 -06:00
ee44e51b30 Better docs for the bank 2018-06-07 09:25:36 -06:00
5011f24123 Move more interesting content into first header
The first header and its content is the only text displayed on
GitHub's mobile page. Reorder so that the disclaimer is the only
information people see.

Disclaimer: IANAL and assume reordering these doesn't matter. :)
2018-06-07 09:25:36 -06:00
d1eda334f3 gdb 2018-06-07 09:25:08 -06:00
2ae5ce9f2c Do not use cuda for multinode-demo validator component 2018-06-07 07:04:33 -06:00
4f5ac78b7e Add readme to crates.io 2018-06-06 15:00:25 -06:00
074c9af020 Shellcheck again 2018-06-05 15:32:25 -06:00
2da2d4e365 More shellcheck 2018-06-05 15:32:25 -06:00
8eb76ab2a5 Fix shellcheck 2018-06-05 15:32:25 -06:00
a710d95243 Fix non-erasure blob nulling 2018-06-05 15:32:25 -06:00
a06535d7ed cargo fmt 2018-06-05 15:32:25 -06:00
f511ac9be7 Fixes for receiving old blobs and nulling the window with coding 2018-06-05 15:32:25 -06:00
e28ad2177e Receive fixes 2018-06-05 15:32:25 -06:00
cb16fe84cd Rework to fix coding blob insertion 2018-06-05 15:32:25 -06:00
ec3569aa39 Move receive_index to correct place 2018-06-05 15:32:25 -06:00
246edecf53 Add receive_index for broadcast blobs and fix blobs_len position 2018-06-05 15:32:25 -06:00
34834c5af9 Store another size in the data block so it is coded as well 2018-06-05 15:32:25 -06:00
b845245614 Restore more of the blob window and add is_coding helper 2018-06-05 15:32:25 -06:00
5711fb9969 Generate coding for the current blob set not just the first coding set 2018-06-05 15:32:25 -06:00
d1eaecde9a Fix deadlock and only push to contq if it's not a coding blob 2018-06-05 15:32:25 -06:00
00c8505d1e Handle set_flags error 2018-06-05 15:32:25 -06:00
33f01efe69 Fixes for erasure coding 2018-06-05 15:32:25 -06:00
377d312c81 Revert log levels 2018-06-05 15:32:25 -06:00
badf5d5412 Add window recovery 2018-06-05 15:32:25 -06:00
0339f90b40 Fix gf-complete url and symlinks 2018-06-05 15:32:25 -06:00
5455e8e6a9 Review comments 2018-06-05 15:32:25 -06:00
6843b71a0d Debug erasure ci script 2018-06-05 15:32:25 -06:00
634408b5e8 Add erasure build to ci 2018-06-05 15:32:25 -06:00
d053f78b74 Erasure refinements, fix generating orders table 2018-06-05 15:32:25 -06:00
93b6fceb2f generate coding after indexing 2018-06-05 15:32:25 -06:00
ac7860c35d indexing blobs then coding 2018-06-05 15:32:25 -06:00
b0eab8729f Add erasure ci script 2018-06-05 15:32:25 -06:00
cb81f80b31 Enable logging for client demo 2018-06-05 15:32:25 -06:00
ea97529185 Fix erasure compilation 2018-06-05 15:32:25 -06:00
f1075191fe Clean up comments: Event -> Transaction 2018-06-04 21:43:46 -06:00
74c479fbc9 Delete bitrotted docs 2018-06-04 21:43:46 -06:00
7e788d3a17 No longer need explicit refs in rustc 1.26 2018-06-04 21:43:46 -06:00
69b3c75f0d Power of two chance (#314)
* fix validator script
* 1/2^30 that we fail due to random returning the same value
2018-06-04 13:32:34 -07:00
b2c2fa40a2 comments 2018-06-03 22:08:25 -06:00
50458d9524 more tests 2018-06-03 22:08:25 -06:00
9679e3e356 more tests 2018-06-03 22:08:25 -06:00
6db9f92b8a crdt gossip tests 2018-06-03 22:08:25 -06:00
4a44498d45 Fix args in validator script, readme version, client-demo perf print 2018-06-02 21:55:27 -06:00
216510c573 repair socket and receiver thread (#303)
repair socket and receiver thread
2018-06-02 08:32:51 -07:00
fd338c3097 Run release binary for leader node 2018-06-01 17:10:48 -06:00
b66ebf5dec Version bump 2018-06-01 17:10:37 -06:00
5da99de579 Review feedback 2018-06-01 13:43:38 -06:00
3aa2907bd6 Restore shellcheck 2018-06-01 13:43:38 -06:00
05d1618659 Add more detail to testnet setup 2018-06-01 13:43:38 -06:00
86113811f2 Readme/demo cleanup 2018-06-01 13:43:38 -06:00
53ecaa03f1 Need another beta 2018-05-31 19:08:09 -06:00
205c1aa505 Version bump 2018-05-31 18:49:41 -06:00
9b54c1542b Move defaults from bash to Rust 2018-05-31 17:18:11 -07:00
93d5d1b2ad Default to 1 node 2018-05-31 17:18:11 -07:00
4c0f3ed6f3 Attempt to revive the singlenode demo 2018-05-31 17:18:11 -07:00
2580155bf2 Enable last of the ignored tests 2018-05-31 16:45:21 -06:00
6ab0dd4df9 Remove config options from fullnode 2018-05-31 16:15:02 -06:00
4b8c36b6b9 Add solana-fullnode-config 2018-05-31 16:15:02 -06:00
359a8397c0 Make bootstrapping functions accessible to other binaries 2018-05-31 16:15:02 -06:00
c9fd5d74b5 Boot futures 0.1
We added them thinking it'd be a good stepping stone towards an
asynchronous thin client, but it's used inconsistently and where
it used, the function is still synchronous, which is just confusing.
2018-05-31 14:13:09 -06:00
391744af97 Speed up the creation of the million accounts
All threads were locked on the same set of signatures.
2018-05-31 12:13:18 -06:00
587ab29e09 Don't register entry ID until after processing its transactions 2018-05-31 12:13:18 -06:00
80f07dadc5 Generalize process_entries()
And use it in fullnode
2018-05-31 12:13:18 -06:00
60609a44ba Initialize recorder from bank's last_id 2018-05-31 12:13:18 -06:00
30c8fa46b4 rustc version bump 2018-05-30 20:49:55 -06:00
7aab7d2f82 Sleep between events if PoH is disabled 2018-05-30 15:55:10 -06:00
a8e1c44663 names 2018-05-30 14:50:53 -06:00
a2b92c35e1 thread names 2018-05-30 14:50:53 -06:00
9f2086c772 names 2018-05-30 14:50:53 -06:00
3eb005d492 names for threds 2018-05-30 14:50:53 -06:00
68955bfcf4 Change multinode script argument to leader path
Some may have cloned their code in different place
2018-05-30 14:49:42 -06:00
9ac7070e08 fix ci 2018-05-30 14:04:48 -06:00
e44e81bd17 fmt 2018-05-30 14:04:48 -06:00
f5eedd2d19 fmt 2018-05-30 14:04:48 -06:00
46059a37eb skip shell check 2018-05-30 14:04:48 -06:00
adc655a3a2 scripts 2018-05-30 14:04:48 -06:00
3058f80489 log 2018-05-30 14:04:48 -06:00
df98cae4b6 cleanup 2018-05-30 14:04:48 -06:00
d327e0aabd warn on tx verify sig 2018-05-30 14:04:48 -06:00
17d3a6763c update 2018-05-30 14:04:48 -06:00
02c5b0343b fixed cloned 2018-05-30 14:04:48 -06:00
2888e45fea comments 2018-05-30 14:04:48 -06:00
f1311075d9 integration tests 2018-05-30 14:04:48 -06:00
6c380e04a3 fix 2018-05-30 14:04:48 -06:00
cef1c208a5 Crdt pipeline, coalesce window repair requests in the listener by examining all of them at once, and ublock those threads from doing io. 2018-05-30 14:04:48 -06:00
ef8eac92e3 Version bump 2018-05-29 20:33:45 -07:00
9c9c63572b cargo fmt
rustfmt was updated with 1.26.1
2018-05-29 20:33:45 -07:00
6c0c6de1d0 Better error names 2018-05-29 20:33:45 -07:00
b57aecc24c Better error if Bank doesn't recognize tx last_id 2018-05-29 20:33:45 -07:00
290dde60a0 Test invalid tokens and fees 2018-05-29 20:33:45 -07:00
38623785f9 Add fee to Transaction
Fixes #161
2018-05-29 20:33:45 -07:00
256ecc7208 Build status badge now excludes pull requests 2018-05-29 20:33:34 -07:00
76b06b47ba Delete dead code 2018-05-29 18:09:03 -06:00
cf15cf587f spending plan -> budget
Review feedback from @sakridge
2018-05-29 18:09:03 -06:00
134c7add57 Fix bench build 2018-05-29 18:09:03 -06:00
ac0791826a plan.rs -> payment_plan.rs 2018-05-29 18:09:03 -06:00
d2622b7798 Allow for addtional smart contract languages
Fixes #159
2018-05-29 18:09:03 -06:00
f82cbf3a27 Move Budget EDSL into its own module 2018-05-29 18:09:03 -06:00
aa7e3df8d6 Plan -> Budget
Budget is now an EDSL. PaymentPlan is the interface to it.
2018-05-29 18:09:03 -06:00
ad00d7bd9c Move plan methods to a trait 2018-05-29 18:09:03 -06:00
8d1f82c34d breaks 2018-05-29 16:53:26 -07:00
0cb2036e3a comment on bad blob usage 2018-05-29 16:53:26 -07:00
2b1e90b0a5 More idiomatic Rust 2018-05-29 14:04:27 -06:00
f2ccc133a2 Finally made fetch happen 2018-05-29 14:04:27 -06:00
5e824b39dd Move multinode communication outside TPU 2018-05-29 14:04:27 -06:00
41efcae64b Remove dead code
History: we thought SigVerifyStage would use these, but it does
signature verification before deserializing transactions.
2018-05-29 10:38:58 -06:00
cf5671d058 tr -> tx
Missed a few.
2018-05-29 10:38:58 -06:00
2570bba6b1 Make apply_payment a method
History: the function was pulled out of Bank when each field wasn't
wrapped in a RwLock, and that locking 'balances' meant to lock
everything in the bank. Now that the RwLocks are here to stay,
we can make it a method again.
2018-05-29 10:38:58 -06:00
71cb7d5c97 Better names 2018-05-29 10:38:58 -06:00
0df6541d5e Fewer public functions 2018-05-29 10:38:58 -06:00
52145caf7e Cleanup: make 'verified' qualifier implicit
History: Qualifying the method names with 'verified' was done to
distinguish them from methods that first did signature verification.
After we moved all signature verication to SigVerifyStage, we removed
those methods from Bank, leaving only the 'verified' ones.

This patch removes the word 'verified' from all method names, since
it is now implied by any code running after SigVerifyStage.
2018-05-29 10:38:58 -06:00
86a50ae9e1 Add RUST_BACKTRACE 2018-05-28 22:23:25 -07:00
c64cfb74f3 Update code coverage command 2018-05-28 22:23:25 -07:00
26153d9919 Avoid docker buildkite plugin, which is not supported by bkrun 2018-05-28 22:23:25 -07:00
5af922722f Add local buildkite CI runner 2018-05-28 22:23:25 -07:00
b70d730b32 Support local .a, skip if unable to find .a 2018-05-28 22:23:25 -07:00
bf4b856e0c Don't fail if CODECOV_TOKEN is undefined 2018-05-28 22:23:25 -07:00
0cf0ae6755 s/label:/name:/g 2018-05-28 22:23:25 -07:00
29061cff39 Delint existing shell scripts 2018-05-28 05:18:46 -06:00
b7eec4c89f Lint shell scripts in CI 2018-05-28 05:18:46 -06:00
a3854c229e More rebase typos 2018-05-26 20:48:42 -06:00
dcde256433 Fix rebase typo 2018-05-26 20:28:22 -06:00
931bdbd5cd Fix typo 2018-05-26 20:25:44 -06:00
b7bd59c344 Cleanup whitespace
And delete rebasing artifact
2018-05-26 20:23:18 -06:00
2dbf9a6017 rename 2018-05-26 20:13:42 -06:00
fe93bba457 logs
poll both endpoints in client

logs

logs

logs

names

verify plan not sig

log

set udp buffer to max

drop output

more verbose about window requests

log the leader

load leader identity

readme for single node demo

update

asserts

update

replay all

rsync

dynamic file read in testnode

fix

cleanup

readme

sum

fix scripts

cleanup

cleanup

readme
2018-05-26 20:13:42 -06:00
6e35f54738 Simplify environment blocks 2018-05-26 14:38:26 -07:00
089294a85e 'ignored' step failures are no longer ignored 2018-05-26 11:00:20 -07:00
25c0b44641 Run ignored build step in docker 2018-05-26 11:00:20 -07:00
58c1589688 More typos 2018-05-26 00:36:50 -06:00
bb53f69016 Fix typos 2018-05-26 00:36:50 -06:00
75659ca042 Light up coverage build 2018-05-26 00:36:50 -06:00
fc00594ea4 Move multinode test to integration tests 2018-05-26 00:36:50 -06:00
8d26be8b89 Run benchmarks in nightly
And name functions the same way as test functions
2018-05-26 00:36:50 -06:00
af4e95ae0f Only check formatting in stable build 2018-05-26 00:36:50 -06:00
ffb4a7aa78 Boot TravisCI configuration 2018-05-26 00:36:50 -06:00
dcaeacc507 request_stage::serialize_packets -> packet::to_blobs
Good stuff - no need to hide them.
2018-05-25 17:31:07 -06:00
4f377e6710 Generalize serialize_responses 2018-05-25 17:31:07 -06:00
122db85727 Move channel-oriented code into request_stage 2018-05-25 17:31:07 -06:00
a598e4aa74 Fix comments 2018-05-25 17:31:07 -06:00
733b31ebbd testnode -> fullnode
It's the real deal.
2018-05-25 17:31:07 -06:00
dac9775de0 Replace client-demo with multinode-demo 2018-05-25 17:31:07 -06:00
46c19a5783 Rename sigverify modules 2018-05-25 17:31:07 -06:00
aaeb5ba52f tr -> tx 2018-05-25 16:47:21 -06:00
9f5a3d6064 events -> transactions 2018-05-25 16:47:21 -06:00
4cdf873f98 Delete event.rs 2018-05-25 16:47:21 -06:00
b43ae748c3 Update publish.sh 2018-05-25 16:08:14 -06:00
02ddd89653 Version bump
And solana.io -> solana.com
2018-05-25 15:37:07 -06:00
bbe6eccefe log 2018-05-25 07:02:39 -06:00
6677a7b66a verify plan not sig 2018-05-25 07:02:39 -06:00
75c37fcc73 names 2018-05-25 07:02:39 -06:00
5be71a8a9d logs 2018-05-25 07:02:39 -06:00
b9ae7d1ebb logs 2018-05-25 07:02:39 -06:00
8b02e0f57c logs 2018-05-25 07:02:39 -06:00
342cc7350a poll both endpoints in client 2018-05-25 07:02:39 -06:00
2335a51ced logs 2018-05-25 07:02:39 -06:00
868df1824c fmt 2018-05-24 17:40:33 -06:00
83c11f0f9d logs 2018-05-24 17:40:33 -06:00
1022f1b0c6 logs 2018-05-24 17:40:33 -06:00
c2c80232e3 logs 2018-05-24 17:40:33 -06:00
115f4e54b8 update 2018-05-24 17:40:33 -06:00
669b1694b8 exponentail backoff for retransmit 2018-05-24 17:40:33 -06:00
2128c58fbe logs and tps counting 2018-05-24 10:35:23 -06:00
e12e154877 Boot Event timestamp/singature constructors 2018-05-24 10:10:41 -06:00
73d3c17507 Migrate from Event to Transaction Timestramp/Signature 2018-05-24 10:10:41 -06:00
7f647a93da Add last_id to Event timestamp/signature constructors 2018-05-24 10:10:41 -06:00
ecb3dbbb60 Add witness tx constructors 2018-05-24 10:10:41 -06:00
cc907ba69d Add Instruction type 2018-05-24 10:10:41 -06:00
5a45eef1dc Exit cleanup (#252)
* Ignore record_stage exit reason. We only really care about panic exit versus graceful exit.
* Ignore coverage build in CI
2018-05-24 10:03:17 -06:00
0d980e89bc cargo fmt
@aeyakovenko: https://github.com/rust-lang/rust.vim#formatting-with-rustfmt
2018-05-23 20:05:08 -06:00
ef87832bff fixed 2018-05-23 17:24:58 -06:00
94507d1aca cuda 2018-05-23 17:24:58 -06:00
89924a38ff cuda 2018-05-23 17:24:58 -06:00
7faa2b8698 fixed demo 2018-05-23 17:24:58 -06:00
65352ce8e7 fix 2018-05-23 17:24:58 -06:00
f1988ee1e3 help 2018-05-23 17:24:58 -06:00
82ac8eb731 use client ports 2018-05-23 17:24:58 -06:00
ae47e34fa5 fix 2018-05-23 17:24:58 -06:00
28e781efc3 break early 2018-05-23 17:24:58 -06:00
5c3ceb8355 aws demo2 2018-05-23 17:24:58 -06:00
c9113b381d Pull channel functionality into record_stage
This makes record_stage consistent with the other stages. The stage
manages the channels. Anything else is in a standalone object. In
the case of the record_stage, that leaves almost nothing!
2018-05-23 17:15:28 -06:00
75e69eecfa Fix nightly bench 2018-05-23 17:15:03 -06:00
f3c4acc723 cleanup multi node test 2018-05-23 16:59:17 -06:00
2a0095e322 Remove unused variable in multinode-demo fix compiler warning 2018-05-23 16:55:45 -06:00
9ad5f3c65b fix option (#246) 2018-05-23 14:48:00 -07:00
579de64d49 Delete binary again 2018-05-23 14:15:59 -06:00
d4200a7b1e Fix build
GenKeys() fix and new multinode module crossed in flight.
2018-05-23 14:10:26 -06:00
84477835dc Fix nondeterministic key generation (#243)
Our one and only unsafe operation was ...unsafe.
2018-05-23 14:04:07 -06:00
504b318ef1 Hooks for binaries to run as leader or replicator and attach to network (#221) 2018-05-23 14:03:19 -06:00
f154c8c490 Add data to errors to ease debugging 2018-05-23 12:50:23 -06:00
d4959bc157 Test cleanup
GenKey unit tests were in the benchmark suite.
2018-05-23 12:50:23 -06:00
87e025fe22 fmt 2018-05-23 12:07:44 -06:00
8049323ca8 @garious review 2018-05-23 12:07:44 -06:00
b38c7ea2ff fmt 2018-05-23 12:07:44 -06:00
239b925fb3 woop 2018-05-23 12:07:44 -06:00
60da7f7aaf wip 2018-05-23 12:07:44 -06:00
8646ff4927 refactor wip 2018-05-23 12:07:44 -06:00
59be94a81f cleanup 2018-05-23 12:07:44 -06:00
437c485e5c cleanup 2018-05-23 12:07:44 -06:00
79a58da6a9 Merge pull request #240 from mvines/master
CI overhaul follow-ups
2018-05-22 23:27:19 -07:00
ae29641a18 Run most CI steps in docker 2018-05-22 23:16:25 -07:00
9c3f65bca9 Update build status badge 2018-05-22 22:59:19 -07:00
086365b4c4 Merge pull request #237 from garious/hoist-lastid
Hoist last_id
2018-05-22 17:48:25 -06:00
64044da49c Merge pull request #239 from sakridge/fix_bad_sig_mac
Fix test_bad_sig on mac
2018-05-22 17:48:01 -06:00
7b5b7feb63 Fix test_bad_sig on mac 2018-05-22 16:40:01 -07:00
2e059f8504 Rename TransactionData to Contract
No longer a single place to get all the data that was signed.
2018-05-22 17:00:40 -06:00
207b6686d1 Hoist last_id
First step in unifying Witness processing and Transaction processing
2018-05-22 17:00:40 -06:00
abfd7d6951 Merge pull request #234 from sakridge/fix_events_addr
Send events to the right address
2018-05-22 16:59:28 -06:00
7fc166b5ba Merge pull request #238 from aeyakovenko/tvu_cleanup
tvu cleanup
2018-05-22 15:41:33 -07:00
021953d59a cleanup 2018-05-22 15:30:46 -07:00
bbe89df2ff fmt 2018-05-22 15:18:07 -07:00
a638ec5911 builds 2018-05-22 15:17:59 -07:00
26272a3600 split out stages 2018-05-22 14:26:28 -07:00
8454eb79d0 Send events to the right address and set recv socket timeout 2018-05-22 13:52:50 -07:00
796f4b981b Merge pull request #233 from mvines/ci
Add in-tree buildkite pipeline
2018-05-22 13:06:24 -07:00
34514d65bc Add in-tree buildkite pipeline 2018-05-21 23:43:27 -07:00
2786357082 Merge pull request #230 from garious/generalize-topackets
Benchmark the banking stage
2018-05-18 19:47:26 -07:00
4badeacd1d Merge pull request #226 from aeyakovenko/converge_test
check convergence
2018-05-16 23:44:23 -07:00
63a0ba6ec8 fixed 2018-05-16 23:28:03 -07:00
9a4ce6d70e fmt 2018-05-16 23:27:26 -07:00
35ee2d0ce1 cleanup 2018-05-16 23:27:26 -07:00
b04716d40d fmt 2018-05-16 23:27:26 -07:00
051fa6f1f1 cleanup 2018-05-16 23:27:26 -07:00
8dc1b07e75 docs 2018-05-16 23:27:26 -07:00
bee1e7ebaf compute convergence maximum 2018-05-16 23:27:26 -07:00
f3f0b9f0c5 update 2018-05-16 23:27:26 -07:00
a5cf745e1c check convergence 2018-05-16 23:27:26 -07:00
273b800047 Benchmark the banking stage 2018-05-16 23:18:58 -07:00
6c1f1c2a7a Promote create_entry() to Entry::new() 2018-05-16 23:18:58 -07:00
9c62f8d81f Add Event::Transaction constructor 2018-05-16 23:18:58 -07:00
82aef7ebe2 Merge pull request #225 from mvines/deploy
Auto deploy tagged versions of solana to crate.io
2018-05-16 23:36:15 -06:00
57636d3d5f Auto deploy tagged versions of solana to crate.io 2018-05-16 21:38:14 -07:00
dc87effc0a Merge pull request #229 from garious/fix-bench
Fix the benchmark build
2018-05-16 16:37:56 -06:00
f0c9823e9f Merge pull request #228 from garious/generalize-topackets
request::to_request_packets -> packet::to_packets
2018-05-16 16:37:29 -06:00
0b91dd6163 Fix the benchmark build 2018-05-16 16:35:50 -06:00
4955c6f13a request::to_request_packets -> packet::to_packets 2018-05-16 16:11:53 -06:00
2e7beca9ba Generalize to_request_packets 2018-05-16 16:01:19 -06:00
59c1b9983d Merge pull request #220 from garious/add-tpu
Add tpu
2018-05-16 12:21:07 -06:00
f7083e0923 Remove transaction processing from RPU and request processing from TVU 2018-05-15 12:15:29 -06:00
6d4defdf96 Offload event processing to the TPU 2018-05-15 11:33:43 -06:00
b826f837f8 First attempt to pull TPU into the server 2018-05-15 11:25:55 -06:00
5855e18a4e Let server own the bank, not TPU/RPU 2018-05-15 11:21:48 -06:00
3f38c0a245 Feed events socket into the server 2018-05-15 11:19:58 -06:00
cfe8b3fc55 Wrap the RPU with new object Server 2018-05-15 11:00:01 -06:00
e9ee020b5f Rename constructors 2018-05-15 10:45:36 -06:00
1bcf3891b4 New TPU/RPU constructors 2018-05-15 10:44:47 -06:00
5456de63e9 Less state 2018-05-15 10:38:17 -06:00
9026c70952 Inline Rpu::new 2018-05-15 10:33:16 -06:00
99dc4ea4a9 Spin up threads from Rpu/Tpu constructors 2018-05-15 10:30:52 -06:00
0aaa500f7c Rpu/Tpu serve() functions now only spin up threads 2018-05-15 10:10:45 -06:00
5f5be83a17 Hoist socket creation/configuration
TODO: Add a library for socket configuration.
2018-05-15 10:05:23 -06:00
7e44005a0f Don't do error-prone things in functions that spawn threads 2018-05-15 09:53:51 -06:00
ee3fb985ea Hoist set_timeout 2018-05-15 09:42:28 -06:00
2a268aa528 Reorder to reflect dependencies 2018-05-15 09:17:48 -06:00
cd262cf860 Merge pull request #223 from rlkelly/202__rust_refactor
202  rust refactor
2018-05-15 08:44:47 -06:00
a1889c32d4 fixed CrdtToSmall typo 2018-05-15 10:29:56 -04:00
d42d024d9c minor changes 2018-05-15 10:23:11 -04:00
7b88b8d159 Merge pull request #222 from aeyakovenko/fixed_ignore_tests
fix ignore tests
2018-05-14 22:18:38 -07:00
4131071b9a fix ignore tests 2018-05-14 22:06:42 -07:00
ef6bd7e3b8 Add TPU 2018-05-14 17:36:19 -06:00
374bff6550 Extract event processing from request_stage 2018-05-14 17:31:27 -06:00
0a46bbe4f9 Merge pull request #219 from garious/add-write-stage
Move write_service and drain_service into new write_stage module
2018-05-14 17:18:04 -06:00
f4971be236 Merge pull request #218 from aeyakovenko/multitest-rebase
multinode test
2018-05-14 17:17:34 -06:00
421273f862 disable tests that fail with kcov 2018-05-14 16:07:21 -07:00
2c7f229883 wait longer 2018-05-14 15:48:43 -07:00
904eabad2f waint longer 2018-05-14 15:48:24 -07:00
8b233f6be4 update 2018-05-14 15:43:26 -07:00
08fc821ca9 rebase 2018-05-14 15:35:54 -07:00
81706f2d75 Move write_service and drain_service into new write_stage module 2018-05-14 16:31:31 -06:00
7b50c3910f fmt 2018-05-14 15:21:41 -07:00
2d635386af rebased 2018-05-14 15:20:41 -07:00
a604dcb4c4 Merge pull request #217 from garious/add-historian-stage
Add record_stage to pipeline
2018-05-14 16:01:45 -06:00
7736b9cac6 Boot Alice and Bob from the unit tests 2018-05-14 15:39:34 -06:00
d2dd005a59 accountant -> bank 2018-05-14 15:33:11 -06:00
6e8f99d9b2 Purge EventProcessor 2018-05-14 14:45:29 -06:00
685de30047 Purge EventProcessor from RPU 2018-05-14 14:35:25 -06:00
17cc9ab07f Rename Historian to RecordStage
Historian was a legacy name. The new name reflects the new pipelined
architecture.
2018-05-14 14:19:19 -06:00
3f10bf44db Config recorder with any kind of Duration, not just milliseconds 2018-05-14 14:12:36 -06:00
27984e469a Multiply duration, not milliseconds 2018-05-14 13:58:42 -06:00
a2c05b112e Add historian to pipeline
No longer intercept entries to register_entry_id(). Intead,
register the ID in the Write stage.

EventProcessor is now just being used as a place to store data.

Fixes #216
2018-05-14 12:43:40 -06:00
a578c1a5e3 Merge pull request #215 from garious/suppress_panic_message_in_tests
Don't output panic noise from panic test
2018-05-14 11:46:22 -06:00
500aaed48e Merge pull request #211 from garious/add-tx-count
Drop EntryInfo subscriptions
2018-05-14 10:41:09 -06:00
4a94da8a94 Don't output panic noise from panic test
P.S. rustfmt 0.4.1-stable (7a807262 2018-04-20)
2018-05-14 10:38:59 -06:00
cc447c0fda Drop support for EntryInfo subscriptions 2018-05-14 09:53:57 -06:00
0ae69bdcd9 Get transactionn_count via GetTransactionCount instead of EntryInfo 2018-05-14 09:45:09 -06:00
5ba20a94e8 Panic on error to get same signature as transaction_count() 2018-05-14 09:43:40 -06:00
f168c377fd Get last_id via GetLastId instead of EntryInfo 2018-05-14 09:40:29 -06:00
dfb754dd13 Revive GetLastId messages 2018-05-14 09:35:10 -06:00
455050e19c Expose the server-side transaction count 2018-05-14 07:21:12 -06:00
317031f455 Add transaction count to accountant 2018-05-14 06:49:51 -06:00
b132ce1944 Merge pull request #210 from aeyakovenko/buildite_coverage
ignore unstable tests
2018-05-13 22:00:32 -06:00
8b226652aa unstable 2018-05-13 20:54:41 -07:00
2c7fe3ed8d unstable 2018-05-13 20:51:07 -07:00
3d5f2b3c28 unstable 2018-05-13 20:45:55 -07:00
7a79afe4a6 unstable 2018-05-13 20:41:54 -07:00
1f7387a39b increase sleep 2018-05-13 20:33:41 -07:00
0fc2bee144 Merge pull request #208 from rlkelly/203__remove_old_genkey
removed old keygen
2018-05-13 19:04:23 -06:00
791ae852a2 removed old keygen 2018-05-13 18:14:10 -04:00
c2fcd876d7 Merge pull request #206 from garious/add-accounting-stage
More modules
2018-05-12 18:05:10 -06:00
d239d4a495 Add missing files 2018-05-12 17:57:28 -06:00
aec05ef602 Move RequestProcessor into its own module 2018-05-12 17:50:55 -06:00
e5d46d998b Move thin client messages into their own module 2018-05-12 17:41:27 -06:00
b2e3299539 Only pass accountant write_service 2018-05-12 17:30:15 -06:00
c308a6459f cargo fmt 2018-05-12 17:27:15 -06:00
4eb1bc08a7 Merge pull request #205 from rlkelly/203__test_key_generation
203  test key generation
2018-05-12 17:26:46 -06:00
ff5e1c635f increased iterations 2018-05-12 18:18:18 -04:00
6149c2fcb5 added benchmarks for two GenKeys 2018-05-12 18:08:08 -04:00
d7cd80dce5 Merge pull request #204 from garious/add-accounting-stage
TPU cleanup
2018-05-12 15:47:37 -06:00
6264508f5e Consistent naming of senders and receivers 2018-05-12 15:24:20 -06:00
a3869dd4c1 Move entry_receiver to RequestStage
This can move to AccountingStage once RequestStage stops
calling process_events().
2018-05-12 15:14:37 -06:00
a3d2831f8c Free up the name 'accounting_stage' 2018-05-12 14:05:57 -06:00
4cd1fa8c38 refactored seed generation 2018-05-12 15:42:27 -04:00
1511dc43d7 Move RequestProcessor out of Rpu/Tvu state 2018-05-12 11:39:24 -06:00
3d82807965 Delete dead code 2018-05-12 11:24:40 -06:00
4180571660 Don't pass events_socket to RPU 2018-05-12 11:11:30 -06:00
421d9aa501 Free up the name 'tpu' 2018-05-12 10:53:25 -06:00
898f4971a2 Free up name 'thin_client_service' 2018-05-12 10:50:22 -06:00
7ab3331f01 Move validation processor to its own module 2018-05-12 00:31:32 -06:00
b4ca414492 More object-oriented 2018-05-12 00:19:12 -06:00
73abea088a No need for TPU dependency 2018-05-11 23:51:35 -06:00
2376dfc139 Let thin client own the receiver channel 2018-05-11 23:46:04 -06:00
d2f95d5319 Move thin client service thread into thin_client_service.rs 2018-05-11 23:37:44 -06:00
cd96843699 Free up name ThinClientService 2018-05-11 23:37:14 -06:00
ca80bc33c6 Move the writer stage's utilities to its own module 2018-05-11 22:36:16 -06:00
19607886f7 Move sig verification stage into its own module 2018-05-11 21:51:37 -06:00
3c11a91f77 Cleanup verifier error handling 2018-05-11 21:01:07 -06:00
b781fdbd04 Reorganize 2018-05-11 20:50:50 -06:00
765d901530 Better names 2018-05-11 20:18:04 -06:00
3cedbc493e Reorder to reflect the pipeline order 2018-05-11 20:11:25 -06:00
0488d0a82f Extract sig verify functions 2018-05-11 19:59:40 -06:00
f0be595e4c Create function for thin client thread 2018-05-11 17:58:27 -06:00
55100854d6 Better names 2018-05-11 16:41:35 -06:00
600a1f8866 Initialize thin client with events port 2018-05-11 16:35:53 -06:00
95bf68f3f5 Correct some strange naming 2018-05-11 16:24:18 -06:00
bcdb058492 cargo fmt 2018-05-11 13:06:05 -06:00
7f46aef624 Merge pull request #200 from jackson-sandland/153-panic-cleanup
issue #153 - panic cleanup
2018-05-11 13:05:04 -06:00
e779496dfb Update signature.rs 2018-05-11 11:49:22 -07:00
3d77fa5fbc Merge branch 'master' into 153-panic-cleanup 2018-05-11 11:40:20 -07:00
250830ade9 cargo fmt run 2018-05-11 11:38:52 -07:00
7b2eb7ccfc Merge pull request #189 from rlkelly/156__remove_user_keys_in_mintdemo
156  remove user keys in mintdemo
2018-05-11 12:19:32 -06:00
458c27c6e9 Merge branch 'master' into 153-panic-cleanup 2018-05-11 11:18:45 -07:00
a49e664e63 Merge branch '156__remove_user_keys_in_mintdemo' of github.com:rlkelly/solana into 156__remove_user_keys_in_mintdemo 2018-05-11 14:07:48 -04:00
f20380d6b4 changed RwLock to RefCell 2018-05-11 14:07:41 -04:00
05a5e551d6 Merge branch 'master' into 156__remove_user_keys_in_mintdemo 2018-05-11 13:00:44 -04:00
d278b71cb2 added tests and utility method for key generation 2018-05-11 12:55:05 -04:00
a485c141d5 Merge pull request #199 from garious/add-accounting-stage
Fix race condition in Accountant::apply_payment()
2018-05-11 10:54:32 -06:00
8a9f6b9ae3 Merge pull request #201 from CriesofCarrots/master
Generalize next tick functions to carry events
2018-05-11 10:54:14 -06:00
7144090528 Fix whitespace 2018-05-11 10:40:31 -06:00
ee0015ac38 Fix whitespace 2018-05-11 10:34:46 -06:00
8b7f7f1088 Generalize next tick functions to carry events 2018-05-11 09:45:42 -06:00
c95c6a75f8 tpu.rs - panic cleanup 2018-05-10 20:49:58 -07:00
44bf79e35f transaction.rs - panic cleanup 2018-05-10 18:24:33 -07:00
bb654f286c tpu.rs - panic cleanup 2018-05-10 18:21:10 -07:00
1acd2aa8cf Fix race condition in Accountant::apply_payment() 2018-05-10 19:07:15 -06:00
18d3659b91 timing.rs - panic cleanup 2018-05-10 17:47:27 -07:00
63a4bafa72 thin_client - panic cleanup 2018-05-10 17:46:10 -07:00
4eb2e84c9f streamer.rs - panic cleanup 2018-05-10 17:38:00 -07:00
73c7fb87e8 signature.rs - panic cleanup 2018-05-10 17:15:53 -07:00
c1496722aa packet.rs - panic cleanup 2018-05-10 17:11:31 -07:00
d9f81b0c8c mint.rs - panic cleanup 2018-05-10 17:06:43 -07:00
d69beaabe1 historian.rs - panic cleanup 2018-05-10 17:00:37 -07:00
b7a0bd6347 event.rs - panic cleanup 2018-05-10 16:59:13 -07:00
882ea6b672 erasure.rs - panic cleanup 2018-05-10 16:54:21 -07:00
736d3eabae Merge pull request #198 from garious/add-accounting-stage
Move more code out of TPU
2018-05-10 17:24:22 -06:00
af53197c04 cargo +nightly fmt 2018-05-10 16:58:37 -06:00
cf186c5762 Better names 2018-05-10 16:58:37 -06:00
f384a2ce85 Move streamer-specific utility into streamer module 2018-05-10 16:58:37 -06:00
803b76e997 More idiomatic Rust 2018-05-10 16:58:37 -06:00
230d7c3dd6 Move all Request processing into thin_client_service 2018-05-10 16:58:37 -06:00
4f629dd982 Add events socket instead of modifying the existing socket 2018-05-10 16:54:43 -06:00
4fdd891b54 More precise function names 2018-05-10 16:54:43 -06:00
64a892321a Merge pull request #197 from sakridge/fixes_for_entry_serialization
Fixes for serializing entries over blobs
2018-05-10 16:53:30 -06:00
a80991f2b3 Fixes for serializing entries over blobs and reorg into ledger 2018-05-10 15:30:30 -07:00
c9cd81319a Set theme jekyll-theme-slate 2018-05-10 13:28:29 -07:00
521ae21632 Merge pull request #193 from sakridge/serialize_entries_over_multiple_blobs
Serialize entries over multiple blobs
2018-05-10 13:53:48 -06:00
bcd6606a16 ecdsa.rs - panic cleanup 2018-05-09 18:19:23 -07:00
52ebb88205 accountant.rs - simplify error messages 2018-05-09 18:16:37 -07:00
1e91d09be7 crdt.rs - panic cleanup 2018-05-09 18:10:48 -07:00
02c573986b historian / transaction updates 2018-05-09 17:22:14 -07:00
f2de486658 accountant.rs - panic cleanup 2018-05-09 17:19:12 -07:00
900b4f2644 Serialize entries over multiple blobs 2018-05-09 16:03:47 -07:00
1cfaa9afb6 Merge pull request #194 from garious/add-accounting-stage
Fix nightly
2018-05-09 16:53:45 -06:00
801468d70d Fix nightly 2018-05-09 16:51:34 -06:00
0601e05978 Merge pull request #192 from garious/add-accounting-stage
Add accounting stage
2018-05-09 16:47:50 -06:00
7ce11b5d1c Cleanup: use full words for field names
and optionally for variable names
2018-05-09 16:19:42 -06:00
f2d4799491 Cleanup: field names should be nouns 2018-05-09 16:14:40 -06:00
ebc458cd32 Remove redundant Arcs 2018-05-09 15:45:10 -06:00
43cd631579 Add thin_client_service 2018-05-09 14:56:34 -06:00
bc824c1a6c Reference count the accountant
So that the thin client can reference the AccountingStage's accountant
from separate threads.
2018-05-09 14:33:20 -06:00
4223aff840 Remove useless ref counts 2018-05-09 14:25:52 -06:00
f107c6c2ca Don't wrap thread-safe objects with mutexes 2018-05-09 14:21:42 -06:00
7daf14caa7 Don't depend on client from server 2018-05-09 13:33:33 -06:00
ded28c705f Tuck away the Historian
The Historian is now just a utility of the accounting stage.
2018-05-09 12:25:19 -06:00
778bec0777 Intercept historian output from accounting stage
We were accessing the accountant from multiple stages just to
register the ID the historian adds to Events.

This change should cause a whole lot of Arcs and Mutexes to go away.
2018-05-09 12:00:37 -06:00
6967cf7f86 Boot sync_channel()
This is less useful now that we send Vec<Event> instead of Event.
2018-05-09 11:43:16 -06:00
0ee3ec86bd Fix nightly 2018-05-09 10:48:56 -06:00
e4c47e8417 Use AccountingStage in Tpu 2018-05-09 10:31:23 -06:00
98ae80f4ed Hoist historian 2018-05-09 09:26:58 -06:00
876c77d0bc Extract accounting stage code from tpu 2018-05-09 09:22:46 -06:00
d44a6f7541 Move Accounting stage functionality into its own object 2018-05-09 09:03:00 -06:00
9040c04d27 Remove redundant Tick 2018-05-09 08:18:52 -06:00
ebbdef0538 Ignore flakey test 2018-05-09 08:16:59 -06:00
bfbee988d0 No longer wait for a Tick signal to record events 2018-05-09 08:15:51 -06:00
1d4d0272ca Drop support for logging a single event 2018-05-09 08:12:33 -06:00
77a76f0783 Record a batch of events 2018-05-09 08:11:19 -06:00
d9079de262 Add a way of sending a batch of events 2018-05-09 08:05:40 -06:00
b3d732a1a1 No longer artificially limit the size of entries
Instead, serialize the entries and split them up over multiple
blobs.
2018-05-09 07:59:55 -06:00
52f1a02938 Delete historical artifact
This was just to explain Proof of History. We have better explanations
elsewhere. Delete!
2018-05-09 07:53:24 -06:00
fe51669e85 signature.rs - panic cleanup 2018-05-08 23:21:45 -07:00
670a6c50c9 event.rs - panic cleanup 2018-05-08 22:58:48 -07:00
86c1aaf7d8 transaction.rs - panic cleanup 2018-05-08 22:46:22 -07:00
658e787b60 timing.rs panic cleanup 2018-05-08 22:40:07 -07:00
40c50aef50 deterministic random wallet generationg 2018-05-09 00:07:19 -04:00
a24c2bbe73 merge bug 2018-05-09 00:07:03 -04:00
bdbe90b891 Merge branch 'master' of github.com:solana-labs/solana 2018-05-08 23:40:54 -04:00
3236be7877 Merge pull request #188 from garious/add-tpu
AccountantSkel -> Tpu
2018-05-08 19:50:58 -06:00
1dca17fdb4 cargo +nightly fmt 2018-05-08 18:59:01 -06:00
785e971698 AccountantSkel -> Tpu
The terms Stub and Skel come from OMG IDL and only made sense while
the Stub was acting as an RPC client for the the Accountant object.
Nowadays, the Stub interface looks nothing like the Accountant and
meanwhile we've recognized the multithreaded implementation is more
reminiscent of a pipelined CPU. Thus, we finally bite the bullet and
rename our modules.

AccountantSkel -> Tpu
AccountantStub -> ThinClient

Up next will be moving much of the TPU code into separate modules,
each representing a stage of the pipeline. The interface of each
will follow the precedent set by the Historian object.
2018-05-08 17:40:02 -06:00
2bfa20ff85 Merge pull request #182 from garious/split-request
Control port prep
2018-05-08 17:11:34 -06:00
474a9af78d Merge pull request #187 from sakridge/fix_blob_size_check
Trust the recorder not to give us more than we can serialize
2018-05-08 17:11:18 -06:00
61425eacb8 Merge pull request #185 from sakridge/fix_default_client_port
Fix default client port, server uses 8000-8002 for gossip
2018-05-08 16:58:04 -06:00
4870def1fb Fix default client port, server uses 8000-8002 for gossip. 2018-05-08 15:40:55 -07:00
3e73fb9233 Trust the recorder not to give us more than we can serialize
Also run client for 10 seconds, 5 is bit too short
2018-05-08 15:23:41 -07:00
5ad6061c3f Merge pull request #184 from sakridge/add_debug_msg_in_readme
Add message about trace debugging
2018-05-08 14:39:09 -06:00
fae019b974 Add message about trace debugging 2018-05-08 13:26:09 -07:00
3bb06d8364 Merge pull request #183 from sakridge/verify_thread_rework
Rework sig processing threads and add perf for process/verify
2018-05-08 13:15:41 -06:00
c9c9afa472 Remove the note about git-lfs 2018-05-08 12:52:24 -06:00
bd0671e123 Rework sig processing threads and add perf for process/verify 2018-05-08 11:49:29 -07:00
6f3ec8d21f Merge pull request #181 from aeyakovenko/link
update link
2018-05-08 08:20:43 -06:00
9a0bf13feb update link 2018-05-08 06:44:24 -07:00
9ff1a6f0cd Add a thread to support thin clients 2018-05-07 21:44:44 -06:00
a59f64cae1 Merge pull request #179 from garious/update-readme
Update README with proposed way to download the gpu lib
2018-05-07 16:43:20 -06:00
a4ecd09723 Delete .gitattributes
This was used by git-lfs.
2018-05-07 16:35:54 -06:00
f159dfd15a Update README with proposed way to download the gpu lib
If you checked here yesterday, this was a top-level file in git-lfs,
but that made the developer workflow more painful so we boot that
file and are making it available via an http endpoint.
2018-05-07 16:33:27 -06:00
9e8ec86fa3 Merge pull request #178 from garious/split-request
Refactoring for upcoming thin client port
2018-05-07 16:21:48 -06:00
62bb78f58d Prepwork to hoist processing requests 2018-05-07 15:09:08 -06:00
893011c3ba Process events instead of processing only transactions
Prep work to allow clients to send any type that can end up in
the ledger.
2018-05-07 14:51:13 -06:00
880cb8e7cc Merge pull request #176 from aeyakovenko/multinode
Multinode
2018-05-07 09:05:12 -06:00
85f83f2c74 fmt 2018-05-06 22:29:33 -07:00
4751e459cc fixed! 2018-05-06 22:25:05 -07:00
138efa6cec fixed constant 2018-05-06 22:06:19 -07:00
a68e50935e useless timeouts i think 2018-05-06 21:48:46 -07:00
e8f5fb35ac Multinode fixes and test
* Replace magic numbers for 64k event size
* Fix gossip, dont ping yourself
* Retransmit only to listening nodes
* Multinode test in stub marked unstable
2018-05-06 21:36:06 -07:00
6af27669b0 Merge pull request #175 from garious/64k-entries
Limit 256 events per entry
2018-05-04 12:19:25 -07:00
e162f24119 Limit 256 events per entry
Attempt to keep blob size under 64kb
2018-05-04 11:52:05 -06:00
dbcc462a48 Merge pull request #173 from sakridge/entry_process_cleanup
Factor out entry processing and fix replicate test to call global setup fn
2018-05-04 11:19:28 -06:00
2d5313639a Factor out entry processing and fix replicate test to call global setup fn 2018-05-03 22:24:30 -07:00
38af0f436d Merge pull request #174 from sakridge/fix_bind_for_external
Fix bind so we can talk on external interfaces and surface send error
2018-05-03 18:20:00 -06:00
888c2ffb20 Fix bind so we can talk on external interfaces and surface send error 2018-05-03 17:05:02 -07:00
588593f619 Merge pull request #172 from sakridge/fix_entry_serialize
Fix entry serialize
2018-05-03 16:12:42 -06:00
2cdd515b12 Compiles/fmt and add assert for forward progress 2018-05-03 14:58:08 -07:00
0aad71d46e fix entry serialize 2018-05-03 14:35:04 -07:00
6f9285322d Merge pull request #171 from garious/cleanup-lastid
Cleanup last_id access in stub and skel
2018-05-03 14:57:28 -06:00
68c7f992fa Sooth all versions of rustfmt 2018-05-03 13:56:10 -06:00
1feff408ff Implement get_last_id() with transaction_count()
This is more precice than the previous implementation because it'll
drain the EntryInfo queue and return the most recent last_id instead
of the first one.
2018-05-03 13:34:57 -06:00
f752e02487 Implement GetLastId with EntryInfo subscription 2018-05-03 13:31:43 -06:00
c9c7fb0a27 Update comment
The last PR added a thread that logs entries without needing to
be driven by the client.
2018-05-03 13:27:37 -06:00
de680c2a8e Remove duplicate state 2018-05-03 13:24:37 -06:00
03695ba4c5 Merge pull request #169 from sakridge/broadcast_rebase
Add broadcast impl
2018-05-03 12:22:34 -06:00
c2e2960bf7 Add broadcast impl 2018-05-03 10:34:01 -07:00
385d2a580c Merge pull request #168 from aeyakovenko/fix_multi_host_client_demo
multi host client demo
2018-05-03 10:21:41 -06:00
7e02652068 Merge pull request #170 from garious/refactor-historian
Fix nightly build
2018-05-03 10:16:05 -06:00
ae29c9b4a0 Fix nightly build 2018-05-03 09:38:59 -06:00
078f917e61 useless assert 2018-05-03 08:34:57 -07:00
b65f04d500 multi host client demo
Bind to the same interface as the user supplied client address.
2018-05-03 08:28:11 -07:00
6acaffe581 Merge pull request #166 from garious/refactor-historian
TPU-friendly Historian
2018-05-02 18:13:30 -06:00
e47ef42a33 Merge pull request #167 from djKooks/readme-version
Add comment about rustc version in README
2018-05-02 18:08:13 -06:00
b950e33d81 Remove useless comment 2018-05-03 09:06:41 +09:00
ec8cfc77ad Remove component adding part 2018-05-03 09:04:56 +09:00
00a16db9cd Add comment about rustc version in README 2018-05-03 08:38:09 +09:00
4b9f115586 Hoist Historian input 2018-05-02 16:35:37 -06:00
c5cc91443e Rename sender/receiver to input/output 2018-05-02 15:54:53 -06:00
48d94143e7 Fix CI 2018-05-02 11:05:11 -06:00
8174a05156 Merge pull request #165 from rlkelly/126__atomic_balances
126  atomic balances
2018-05-02 10:43:31 -06:00
63cf6363a2 more rustfmt 2018-05-02 12:24:25 -04:00
cc6de605ac rustfmt 2018-05-02 12:21:20 -04:00
d0151d2b79 restored original test logic 2018-05-02 12:07:42 -04:00
6b45d453b8 modified verification map 2018-05-02 10:44:41 -04:00
b992a84d67 modified verification to loop until success or failure 2018-05-02 10:15:08 -04:00
cb362e9052 rust format 2018-05-01 16:38:15 -04:00
ccb478c1f6 improved error handling and atomic transactions 2018-05-01 16:38:15 -04:00
6af3680f99 Version bump 2018-04-30 22:38:39 -06:00
e6c3c215ab Add note about installing git-lfs 2018-04-30 15:26:31 -06:00
5c66bbde01 Add a note about running with GPU optimizations 2018-04-30 15:20:39 -06:00
77dd1bdd4a move CI specific scripts to solana-labs/buildkite repo 2018-04-29 23:43:43 -07:00
6268d540a8 move CI specific scripts to solana-labs/buildkite repo
Former-commit-id: 77dd1bdd4a
2018-04-29 23:43:43 -07:00
5918e38747 Version bump 2018-04-27 15:49:48 -07:00
3cfb571356 Version bump
Former-commit-id: f7385e866207b3ec2269bac36d52ef1e7f09337c
2018-04-27 15:49:48 -07:00
5eb80f8027 Add GPU library for Linux systems
To get solana to use the GPU, invoke cargo with "--features=cuda".
2018-04-27 15:47:22 -07:00
f6e5f2439d Add GPU library for Linux systems
To get solana to use the GPU, invoke cargo with "--features=cuda".


Former-commit-id: ea904df6e53d98a32e3f6103ee82cdf7ba08bf21
2018-04-27 15:47:22 -07:00
edf6272374 Merge pull request #154 from sakridge/replicator
Replicator
2018-04-27 14:30:52 -06:00
7f6a4b0ce3 Deserialize the Entry structs and process them 2018-04-27 13:15:19 -07:00
3be5f25f2f Work on test_replicate to test replicate service
generate some messages to send to replicator service
2018-04-27 08:21:34 -07:00
1b6cdd5637 Fix some compilation issues 2018-04-27 08:21:34 -07:00
f752e55929 update 2018-04-27 08:21:34 -07:00
ebb089b3f1 wip 2018-04-27 08:21:34 -07:00
ad6303f031 docs 2018-04-27 08:21:34 -07:00
828b9d6717 docs 2018-04-27 08:21:34 -07:00
444adcd1ca update 2018-04-27 08:21:34 -07:00
69ac305883 wip 2018-04-27 08:21:34 -07:00
2ff57df2a0 state replication 2018-04-27 08:21:34 -07:00
7077f4cbe2 Merge pull request #128 from garious/faster-demo
Utilize parallelized accountant in demo
2018-04-27 08:47:42 -06:00
266f85f607 Merge pull request #152 from aeyakovenko/star
recover full network from a star
2018-04-26 15:36:08 -06:00
d90ab90145 bind to all 2018-04-26 13:54:29 -07:00
48018b3f5b docs 2018-04-26 13:50:57 -07:00
15584e7062 recover full network from a star 2018-04-26 13:48:42 -07:00
d415b17146 sleepless demo to complement sleepless nights
18 ktps on macbook pro, no gpu
2018-04-26 13:17:38 -06:00
9ed953e8c3 Fix rebase fails 2018-04-26 09:35:10 -06:00
b60a98bd6e Startup log can reference IDs without itself 2018-04-26 08:42:34 -06:00
a15e30d4b3 Report transactions processed 2018-04-26 08:42:34 -06:00
d5d133353f Port blocking stub functions to new stateful ones 2018-04-26 08:42:34 -06:00
6badc98510 Add low-level response-handling functions to skel 2018-04-26 08:42:34 -06:00
ea8bfb46ce Add a way to subscribe for new entry metadata 2018-04-26 08:42:34 -06:00
58860ed19f WIP: New demo that makes better use of the parallelized accountant 2018-04-26 08:42:34 -06:00
583f652197 Generate genesis log for the demo
This log contains a bunch of transactions that generate new
accounts, so that transactions to and from them can be processed
in parallel.
2018-04-26 08:42:34 -06:00
3215dcff78 Update readme for new demo
Need to create a bunch of unrelated accounts to the genesis block
so that transactions can be processed in parallel without waiting
on write-locks. And then stuff the private keys of those accounts
into mint.json so that the client-demo can send the tokens from
those accounts.
2018-04-26 08:42:34 -06:00
38fdd17067 Add initializing log message to server
Handy when gesesis block is large.
2018-04-26 08:42:34 -06:00
807ccd15ba Add solana-mint-demo CLI
This extends solana-mint with additional data that will be used by
both solana-client-demo and creating the demo's genesis block.
2018-04-26 08:42:34 -06:00
1c923d2f9e Fix entry hash when no events and num_hashes is one 2018-04-26 08:42:34 -06:00
2676b21400 Merge pull request #151 from rlkelly/139__forget_signature
added forget_signature method
2018-04-26 08:28:11 -06:00
fd5ef94b5a added forget signature method 2018-04-26 07:22:11 -04:00
02c7eea236 Merge pull request #150 from garious/141__add_futures
Add FutureResult to return a Future that immediately resolves
2018-04-25 20:44:40 -06:00
34d1805b54 Add FutureResult to return a Future that immediately resolves 2018-04-25 19:23:24 -07:00
753eaa8266 buildkite script 2018-04-24 11:32:00 -07:00
0b39c6f98e Merge pull request #145 from aeyakovenko/crdt
initial crdt implementation
2018-04-24 10:49:23 -07:00
55b8d0db4d cleanup 2018-04-23 23:33:21 -07:00
3d7969d8a2 initial crdt implementation 2018-04-23 23:06:28 -07:00
041de8082a Merge pull request #144 from rleungx/improve-error-messages
improve the error messages
2018-04-21 08:17:25 -06:00
3da1fa4d88 improve the error messages 2018-04-21 21:52:55 +08:00
39df21de30 Merge pull request #142 from ansrivas/master
git clone instruction
2018-04-20 16:05:21 -06:00
8cbb7d7362 git clone instruction 2018-04-20 23:02:10 +02:00
10a0c47210 Merge pull request #137 from garious/linux-hang
Workaround linux hang
2018-04-19 11:46:48 -06:00
89bf3765f3 Merge pull request #138 from sakridge/help_options
Add -h/--help options for client-demo and testnode
2018-04-19 11:40:07 -06:00
8181bc591b Add -h/--help options for client-demo and testnode 2018-04-19 10:22:31 -07:00
ca877e689c Merge pull request #136 from rleungx/report-errors-to-stderr
report serde parse errors to stderr
2018-04-19 11:15:57 -06:00
c6048e2bab Workaround linux hang
Without this patch, Linux systems would hang when running the demo.

The root cause (why Linux is acting differently than macOS) was
not determined, but we know the problem is caused by a known
issue in the transaction pipeline - that entries are not pulled
off the historian channel until after the full transaction batch
is processed. This patch makes the sync_channel large enough that
it should never block on a gigabit network.
2018-04-19 10:04:32 -07:00
60015aee04 report serde parse errors to stderr 2018-04-19 23:51:57 +08:00
43e6741071 Merge pull request #134 from rleungx/report-parse-errors-to-stderr
report parse errors to stderr
2018-04-19 08:38:38 -06:00
b91f6bcbff report parse errors to stderr 2018-04-19 22:24:46 +08:00
64e2f1b949 Merge pull request #133 from djKooks/rm-mut
Remove out for immutable variable
2018-04-19 08:24:32 -06:00
13a2f05776 Remove out for immutable variable 2018-04-19 23:00:16 +09:00
903374ae9b Merge pull request #132 from aeyakovenko/readme
readme and site update
2018-04-18 21:49:09 -06:00
d366a07403 add gregs abstract as an intro 2018-04-18 20:17:37 -07:00
e94921174a Merge pull request #131 from sakridge/erasure
Add erasure rust logic under feature flag
2018-04-18 21:10:06 -06:00
dea5ab2f79 Add erasure rust logic under feature flag 2018-04-18 19:42:09 -07:00
5e11078f34 Add Stephen 2018-04-18 17:22:58 -06:00
d7670cd4ff Merge pull request #129 from aeyakovenko/retransmit
Retransmit
2018-04-18 11:10:50 -04:00
29f3230089 docs 2018-04-17 19:53:18 -07:00
d003efb522 fix docs 2018-04-17 19:52:46 -07:00
97e772e87a docs 2018-04-17 19:46:50 -07:00
0b33615979 udpate 2018-04-17 12:48:06 -07:00
249cead13e docs 2018-04-17 11:07:43 -07:00
7c96dea359 fmt 2018-04-17 11:05:35 -07:00
374c9921fd comments 2018-04-17 11:05:15 -07:00
fb55ab8c33 format 2018-04-16 21:02:37 -07:00
13485074ac test cast 2018-04-16 20:57:15 -07:00
4944c965e4 update
heap

update

update

wip

use a vec and sort

builds

update

tests

update

fmt

update

progress

fmt

passes needs retransmit test

tests

cleanup

update

update

update

update

fmt
2018-04-16 20:33:09 -07:00
83c5b3bc38 Merge pull request #125 from garious/fix-parallelized-ledger
Tell verifiers when not to parallelize accounting
2018-04-13 08:43:36 -06:00
7fc42de758 Fix bench 2018-04-13 00:36:23 -04:00
0a30bd74c1 Tell verifiers when not to parallelize accounting
Without this patch, many batches of transactions could be tossed
into a single entry, but the parallelized accountant can only
guarentee the transactions in the batch can be processed in
parallel.

This patch signals the historian to generate a new Entry after
each batch. Validators must maintain sequential consistency
across Entries.
2018-04-12 21:08:53 -06:00
9b12a79c8d cargo +nightly fmt 2018-04-12 17:04:11 -06:00
0dcde23b05 Merge pull request #119 from sakridge/skel_verify_test
Add skel test which sends a bad transaction, verify it doesn't make it
2018-04-12 17:02:08 -06:00
8dc15b88eb Add skel test which sends a bad transaction, verify it doesn't make it 2018-04-12 15:01:59 -07:00
d20c952f92 Merge pull request #121 from aeyakovenko/helpers
requests to packets utility function
2018-04-12 12:58:30 -07:00
c2eeeb27fd bump timer 2018-04-12 11:12:10 -07:00
180d8b67e4 requests to packets function 2018-04-12 10:44:09 -07:00
9c989c46ee Merge pull request #123 from garious/cleanup-tests
Cleanup tests
2018-04-11 22:37:35 -06:00
51633f509d Fix test
The test was meant to ensure the signature covered the 'tokens'
field, but then when the 'plan' field was rolled in, Transaction::verify()
started failing because Plan::verify() failed. When Transaction::verify()
was split into two, the unexpected failure was exposed but went unnoticed.
This patch brings it back to its original intent, to ensure signature
verification fails if the network attempts to change the client's payment.
2018-04-11 22:17:21 -06:00
705228ecc2 Remove redundant signs 2018-04-11 22:17:21 -06:00
740f6d2258 Merge pull request #122 from garious/fix-ci
Fix the nightly build
2018-04-11 20:56:58 -06:00
3b9ef5ccab Fix the nightly build 2018-04-11 20:24:14 -06:00
ab74e7f24f Merge pull request #117 from garious/parallelize-accountant
Enable parallelized accountant
2018-04-11 19:39:45 -06:00
be9a670fb7 Add process_packets() benchmark 2018-04-11 18:02:45 -06:00
6e43e7a146 Enable parallelized accountant 2018-04-11 18:01:59 -06:00
ab2093926a Merge pull request #120 from aeyakovenko/fix_bench_compile
fix compile error
2018-04-11 18:01:13 -06:00
916b90f415 Merge pull request #118 from sakridge/ecdsa_tests
Add tests for ecdsa sig checking
2018-04-11 17:58:45 -06:00
2ef3db9fab fix compile error 2018-04-11 15:40:25 -07:00
6987b6fd58 Add tests for ecdsa sig checking 2018-04-11 12:29:44 -07:00
078179e9b8 Merge pull request #115 from garious/parallelize-accountant
More refactoring
2018-04-11 10:28:15 -06:00
50ccecdff5 Refactor 2018-04-11 09:02:33 -06:00
e838a8c28a Delete unused function 2018-04-10 21:56:13 -06:00
e5f7eeedbf Use iterators 2018-04-10 21:48:26 -06:00
d1948b5a00 Zip earlier
And remove redundant into_iter() calls.
2018-04-10 21:18:39 -06:00
c07f700c53 Merge pull request #113 from aeyakovenko/master_pclient
command-line options for testnode and client
2018-04-09 23:07:03 -06:00
c934a30f66 commandline options for client and testnode 2018-04-09 21:14:52 -07:00
310d01d8a2 Merge pull request #112 from aeyakovenko/recycler_test
Recycler test should verifyt that its recycling
2018-04-07 09:29:50 -06:00
f330739bc7 Recycler test should verifyt that its recycling 2018-04-07 07:08:42 -07:00
58626721ad Merge pull request #111 from garious/parallelize-accountant
Cleanup
2018-04-06 17:03:10 -06:00
584c8c07b8 Better symmetry
deserialize -> process -> serialize
2018-04-06 16:34:59 -06:00
a93ec03d2c Move creating blobs into its own function 2018-04-06 16:22:02 -06:00
7bd3a8e004 Reduce cyclomatic complexity 2018-04-06 16:12:13 -06:00
912a5f951e Why is msgs cloned here? 2018-04-06 15:58:11 -06:00
6869089111 Parallelize deserialize 2018-04-06 15:52:58 -06:00
6fd32fe850 Cleanup constants 2018-04-06 15:43:05 -06:00
81e2b36d38 Cleanup packet_verify 2018-04-06 15:24:15 -06:00
7d811afab1 Parallelize CPU sig verify 2018-04-06 15:21:49 -06:00
39f5aaab8b Merge pull request #110 from garious/parallelize-accountant
Parallel processing of arbitrary transactions
2018-04-06 09:02:36 -06:00
5fc81dd6c8 Fix the nightly build
Nightly uses a different (but backward compatible) version of rustfmt.
2018-04-05 22:39:29 -06:00
491a530d90 Support parallelization of arbitrary transactions
Still assumes witnesses are processed serially afterward.
2018-04-05 22:30:25 -06:00
c12da50f9b Fix race condition
Without this patch, it was possible for two transactions with the same
'from' address to drive its balance below zero. With the patch, we'll
hold a write lock from just before we verify sufficient funds until
after those funds are deducted from the account.
2018-04-05 22:30:25 -06:00
41e8500fc5 Break up process_verified_transaction() 2018-04-05 22:29:13 -06:00
a7f59ef3c1 Merge pull request #109 from sakridge/wip_gpu
Change for cuda verify integration
2018-04-05 22:24:35 -06:00
f4466c8c0a Change for cuda verify integration 2018-04-05 20:00:44 -07:00
bc6d6b20fa Merge pull request #108 from garious/parallelize-accountant
Reject old transactions so that we can boot old signatures
2018-04-05 15:11:22 -06:00
01326936e6 Expire all transactions after some amount of time
Reject old transactions so that we can calculate an upper bound
for memory usage, and therefore ensure the server won't slow
down over time to crash due to memory exhaustion.
2018-04-05 10:26:45 -06:00
c960e8d351 Reject transactions with a last_id that isn't from this ledger
Before this patch, a client could put any value into `last_id` and
was primarily there to ensure the transaction had a globally unique
signature. With this patch, the server can use `last_id` as an
indicator of how long its been since the transaction was created.
The server may choose to reject sufficiently old transactions so
that it can forget about old signatures.
2018-04-05 09:54:03 -06:00
fc69d31914 Merge pull request #106 from garious/parallelize-accountant
Parallelize accountant
2018-04-04 22:42:28 -06:00
8d425e127b Update benchmark to avoid write locks in sig duplicate detection 2018-04-04 17:29:22 -06:00
3cfb07ea38 Sort signatures by last_id
This will allow for additional concurrency as well as give the server
a means of garbage-collecting old signatures.
2018-04-04 17:06:31 -06:00
76679ffb92 Per-cell locking
This allows us to use read-locks for balances most of the time. We
only lock the full table if we need to add one.
2018-04-04 16:31:13 -06:00
dc2ec925d7 Better test 2018-04-04 16:01:43 -06:00
81d6ba3ec5 Merge pull request #105 from garious/coverage-comments
Add the 'why' for code coverage to readme
2018-04-04 14:34:26 -07:00
014bdaa355 Add benchmark for parallel transaction processing 2018-04-04 12:43:27 -06:00
0c60fdd2ce Make accountant thread-safe
Before this change, parallel transaction processing required locking
the full accountant. Since we only call one method,
process_verified_transaction, the global lock equates to doing no
parallelization at all.  With this change, we only lock the data that's
being written to.
2018-04-04 12:33:03 -06:00
43d986d14e Add the 'why' for code coverage to readme 2018-04-04 09:26:38 -06:00
123d7c6a37 Merge pull request #99 from aeyakovenko/subscribers
Blobs and windows
2018-04-03 17:12:53 -06:00
5ac7df17f9 Implement window service
Batch out of order blobs until we have a contigious window.
2018-04-03 13:53:19 -07:00
bc0dde696a Merge pull request #102 from garious/rollback
Fix clippy warnings
2018-04-03 10:08:42 -06:00
c323bd3c87 Fix clippy warnings 2018-04-03 09:55:33 -06:00
5c672adc21 Merge pull request #101 from garious/rollback
Move tests
2018-04-02 21:58:10 -06:00
2f80747dc7 Move tests
After we restructured for parallel verification, the tests here
were unreferenced by the accountant, but still meaningful to
transaction verification.
2018-04-02 21:45:21 -06:00
95749ed0e3 Merge pull request #100 from garious/rollback
Cleanup use of event signatures and entry hashing
2018-04-02 21:17:37 -06:00
94eea3abec fmt 2018-04-02 21:15:21 -06:00
fe32159673 Add a test to ensure witness data continues to be hashed 2018-04-02 21:07:38 -06:00
07aa2e1260 Add witness data to entry hash
Otherwise, witnesses can be dropped or reordered by a malicious
generator.
2018-04-02 20:47:51 -06:00
6fec8fad57 Adding from to the signature is redundant 2018-04-02 20:34:18 -06:00
84df487f7d Merge pull request #97 from garious/rollback
Refactoring for rollback
2018-04-02 15:41:33 -06:00
49708e92d3 Use last_id instead of seed
It doesn't really matter, but was confusing since the seed points
to an entry before the mint's deposit.
2018-04-02 15:06:42 -06:00
daadae7987 Move replaying ledger out of accountant 2018-04-02 14:51:55 -06:00
2b788d06b7 Move the historian up to accountant_skel 2018-04-02 14:41:07 -06:00
90cd9bd533 Move balance check so that log_* methods are only used to add logging 2018-04-02 14:14:49 -06:00
d63506f98c No longer allow deposits outside the constructor 2018-04-02 14:00:42 -06:00
17de6876bb Add simpler accountant constructor 2018-04-02 13:51:44 -06:00
fc540395f9 Update docs 2018-04-02 11:51:56 -06:00
da2b4962a9 Move verify_slice() into a trait 2018-04-02 11:43:38 -06:00
3abe305a21 Move reserve_signatures into accountant
Reasons Transaction signatures need to be unique:

1. guard against duplicates
2. accountant uses them as IDs to link Witness signatures to transactions via the
`pending` hash map
2018-04-02 09:38:36 -06:00
46e8c09bd8 Revoke API access to first_id 2018-04-02 09:30:10 -06:00
e683c34a89 Version bump 2018-03-31 14:44:43 -06:00
54e4f75081 Merge pull request #95 from jackson-sandland/source-documentation-review
94: source doc review
2018-03-30 14:50:51 -06:00
9f256f0929 94 - snakecase mod names 2018-03-30 13:10:27 -07:00
ef169a6652 94: source doc review 2018-03-30 10:43:38 -07:00
eaec25f940 Version bump 2018-03-29 15:05:38 -06:00
6a87d8975c Merge pull request #93 from garious/par-req-processing
Better benchmark, fix logging
2018-03-29 14:02:40 -06:00
b8cf5f9427 Fix transaction logging 2018-03-29 13:50:32 -06:00
2f1e585446 Better benchmark
Tolerates dropped UDP packets
2018-03-29 13:41:11 -06:00
f9309b46aa Merge pull request #92 from garious/par-req-processing
Parallel request verification
2018-03-29 13:28:21 -06:00
22f5985f1b Do request verification in parallel, and then process the verified requests 2018-03-29 13:18:08 -06:00
c59c38e50e Refactor for batch verification 2018-03-29 13:09:21 -06:00
232e1bb8a3 Colocate packet dependencies 2018-03-29 12:55:41 -06:00
1fbb34620c Fix compiler warning 2018-03-29 12:54:10 -06:00
89f5b803c9 Merge pull request #91 from garious/more-docs
Add more documentation
2018-03-29 12:39:03 -06:00
55179101cd Add more documentation 2018-03-29 12:20:54 -06:00
132495b1fc A simple consensus diagram to guide rollback/coalescing
Diagram for what's described in #84 for rollback support.
2018-03-29 10:52:02 -06:00
a03d7bf5cd Missed a couple 2018-03-28 22:20:31 -06:00
3bf225e85f Don't require install to run demo 2018-03-28 22:18:33 -06:00
cc2bb290c4 Merge pull request #89 from garious/sig-verify-bench
Add microbenchmark for signature verification
2018-03-28 22:15:10 -06:00
878ca8c5c5 Add microbenchmark for signature verification 2018-03-28 22:02:47 -06:00
4bc41d81ee Fix compiler warning 2018-03-28 21:05:21 -06:00
f6ca176fc8 Merge pull request #88 from garious/revert-tcp-client
Revert TCP sync of ledger
2018-03-28 20:28:05 -06:00
0bec360a31 Revert TCP sync of ledger
The feature was too rushed. We technically don't need it until we
implement consensus. It'll come back another day (with many more tests!)
2018-03-28 20:16:15 -06:00
04f30710c5 Merge pull request #87 from garious/tcp-client
tx confirmed/sec ---> tx processed/sec
2018-03-28 17:04:36 -06:00
98c0a2af87 tx confirmed/sec ---> tx processed/sec
Before this patch, we were waiting until the full log was
sent back across the wire, parsed, and interpreted. That was giving
us a metric of "transactions confirmed per second" instead of
"transactions processed per second". Instead, we'll just send one
tiny packet back with the balance. As soon as the balance is what
we expect it to be, we end the benchmark.
2018-03-28 16:51:21 -06:00
9db42c1769 Merge pull request #86 from garious/tcp-client
Fix up client demo
2018-03-28 14:57:09 -06:00
849bced602 Fix up client demo 2018-03-28 14:40:58 -06:00
27f29019ef Merge pull request #83 from garious/tcp-client
TCP subscription service
2018-03-28 13:19:38 -06:00
8642a41f2b See if Travis will tolerate executing some of the test 2018-03-28 10:25:16 -06:00
bf902ef5bc Ignore accountant_stub test
TODO: Figure out why this test fails on TravisCI
2018-03-28 10:05:00 -06:00
7656b55c22 nit 2018-03-27 17:22:31 -06:00
7d3d4b9443 nit 2018-03-27 17:20:23 -06:00
15c093c5e2 typo 2018-03-27 16:31:19 -06:00
116166f62d Rename project: silk -> solana 2018-03-27 16:25:12 -06:00
26b19dde75 Rename project: silk -> solana 2018-03-27 16:19:28 -06:00
c8ddc68f13 Rename project: silk -> solana 2018-03-27 16:16:27 -06:00
7c9681007c Drop support for random access to the ledger
No longer store the ledger locally.
2018-03-27 14:47:03 -06:00
13206e4976 Let clients subscribe to the ledger over TCP
TODO: Add more tests

Fixes #27
2018-03-27 14:46:24 -06:00
2f18302d32 Merge pull request #80 from garious/fix-ci
Fix CI
2018-03-26 22:13:11 -06:00
ddb21d151d Nightly rustfmt
Format code with the nightly version of rustfmt, which sorts imports.
2018-03-26 22:03:28 -06:00
c64a9fb456 Give Travis a little more time to start threads 2018-03-26 22:02:05 -06:00
ee19b4f86e See if CI hangs because of wait_on_signature() 2018-03-26 21:53:30 -06:00
14239e584f fix writer 2018-03-26 21:36:29 -06:00
112aecf6eb Merge pull request #77 from aeyakovenko/responder
Responder
2018-03-25 17:01:53 -07:00
c1783d77d7 fixed test 2018-03-25 16:18:27 -07:00
f089abb3c5 fix bench 2018-03-25 15:37:00 -07:00
8e551f5e32 debug trait tests 2018-03-25 08:22:04 -07:00
290960c3b5 wip 2018-03-25 08:06:33 -07:00
62af09adbe wip 2018-03-25 08:05:03 -07:00
e39c0b34e5 update 2018-03-25 00:06:48 -07:00
8ad90807ee responder with larger block size 2018-03-24 23:46:25 -07:00
533b3170a7 responder 2018-03-24 23:31:54 -07:00
7732f3f5fb services 2018-03-24 18:01:54 -07:00
f52f02a434 services 2018-03-24 18:01:40 -07:00
4d7d4d673e Merge pull request #75 from garious/fix-testnode
Revive silk-testnode
2018-03-23 22:16:59 -06:00
9a437f0d38 Revive silk-testnode 2018-03-23 21:49:28 -06:00
c385f8bb6e Merge pull request #73 from garious/yes-clippy
Automated mentoring by clippy
2018-03-22 15:22:12 -06:00
fa44be2a9d Ignore some clippy advice 2018-03-22 14:59:25 -06:00
117ab0c141 Clippy review 2018-03-22 14:50:24 -06:00
7488d19ae6 Clippy review 2018-03-22 14:40:28 -06:00
60524ad5f2 Clippy review 2018-03-22 14:38:06 -06:00
fad7ff8bf0 Clippy review 2018-03-22 14:31:58 -06:00
383d445ba1 Clippy review 2018-03-22 14:15:29 -06:00
803dcb0800 Mutex<bool> -> AtomicBool 2018-03-22 14:05:23 -06:00
fde320e2f2 Merge pull request #71 from garious/rework-recorder
Replicate the ledger
2018-03-21 17:23:55 -06:00
8ea97141ea Update the test to replicate the ledger 2018-03-21 17:15:32 -06:00
9f232bac58 Allow clients to sync the ledger
Fixes #4
2018-03-21 15:46:49 -06:00
8295cc11c0 Move JSON printing up the stack 2018-03-20 23:15:44 -06:00
70f80adb9a Merge pull request #70 from garious/planevent-to-witness
Cleanup
2018-03-20 19:13:02 -06:00
9a7cac1e07 Use the Entry API to remove the double lookup 2018-03-20 18:07:54 -06:00
c584a25ec9 Move complete_transaction from method to function
So that we can hold separate mutable references to the pending queue
and the map of balances.
2018-03-20 17:47:57 -06:00
bff32bf7bc Cleanup 2018-03-20 17:32:02 -06:00
d0e7450389 Add docs 2018-03-20 16:58:14 -06:00
4da89ac8a9 Cleanup naming 2018-03-20 16:53:41 -06:00
f7032f7d9a Cleanup: replace bool retval with is_complete() method 2018-03-20 16:52:47 -06:00
7c7e3931a0 Better docs 2018-03-20 15:52:46 -06:00
6be3d62d89 Remove Action from spending plans 2018-03-20 15:43:07 -06:00
6f509a8a1e Reorder 2018-03-20 15:31:28 -06:00
4379fabf16 PlanEvent -> Witness
The term used by the Simplicity smart contract language
2018-03-20 15:25:50 -06:00
6b66e1a077 Merge pull request #69 from garious/move-streamer-benchmark
Move streamer benchmark out of unit tests
2018-03-19 17:33:45 -06:00
c11a3e0fdc Move streamer benchmark out of unit tests 2018-03-19 17:10:01 -06:00
3418033c55 Merge pull request #68 from garious/fix-bench
Fix bench
2018-03-19 16:52:41 -06:00
caa9a846ed Boot sha2-asm
Stick with pure Rust until someone can write a benchmark that
demonstrates that sha2-asm adds value. If we go with a GPU
implementation first, we may never need to do that.
2018-03-19 16:42:30 -06:00
8ee76bcea0 Fix benchmark build 2018-03-19 16:41:01 -06:00
47325cbe01 Merge pull request #67 from garious/cleanup-naming
Cleanup naming
2018-03-19 16:29:08 -06:00
e0c8417297 Apply renames to docs 2018-03-19 10:23:43 -06:00
9238ee9572 No longer rename log crate 2018-03-19 10:18:51 -06:00
64af37e0cd logger -> recorder
Free up namespace for a traditional runtime logger.
2018-03-19 10:16:21 -06:00
9f9b79f30b log -> ledger
Free up namespace for traditional runtime logs.
2018-03-19 10:09:19 -06:00
265f41887f asset -> tokens 2018-03-19 10:03:41 -06:00
4f09e5d04c Merge pull request #66 from garious/conditional-plan
Simplify contract language
2018-03-18 21:12:26 -06:00
434f321336 Add spending plan tests 2018-03-18 21:02:28 -06:00
f4e0d1be58 Make conditions explicit in races
And boot recursive spending plans. That path required heap allocations.
Since we don't have a need for this generality right now, reduce the
language to the smallest one that can pass our test suite.
2018-03-17 20:43:05 -06:00
e5bae0604b Specialize transaction assets to i64
Proof-of-history is generic, but now that we're using it entirely
for tokens, we can specialize the type and start doing more interesting
things than just Eq and Serialize operations.
2018-03-17 19:56:15 -06:00
e7da083c31 Move spending plans to their own crate 2018-03-17 19:56:15 -06:00
367c32dabe Guard spending plans, not just payments 2018-03-17 19:56:15 -06:00
e054238af6 Merge pull request #65 from aeyakovenko/fixtest
fix test
2018-03-14 12:21:08 -07:00
e8faf6d59a trait test 2018-03-14 11:28:05 -07:00
baa4ea3cd8 wfmt 2018-03-14 11:14:40 -07:00
75ef0f0329 fix test 2018-03-14 11:02:38 -07:00
65185c0011 Merge pull request #63 from aeyakovenko/streamer-integrated
Streamer integrated
2018-03-12 08:38:59 -06:00
eb94613d7d Use streaming socket interface within accountant
Pull messages from streamer process them and forward them to the sender.
2018-03-11 23:41:09 -05:00
67f4f4fb49 Merge pull request #64 from garious/dumb-contracts
Entry-level smart contracts
2018-03-11 13:23:11 -06:00
a7ecf4ac4c Merge pull request #57 from aeyakovenko/streamer
Streamer
2018-03-11 13:22:49 -06:00
45765b625a Don't let users accidentally burn their funds either 2018-03-11 12:04:49 -06:00
aa0a184ebe Ensure the server isn't passed a Plan that spends more than is bonded 2018-03-11 11:53:45 -06:00
069f9f0d5d add ipv6 flag to cargo.toml 2018-03-11 12:53:16 -05:00
c82b520ea8 remove unecessary returns 2018-03-11 11:45:17 -05:00
9d6e5bde4a ipv6 test with a separate flag 2018-03-11 11:22:21 -05:00
0eb3669fbf cleanup timestamp processing 2018-03-11 00:30:01 -07:00
30449b6054 cleanup sig processing 2018-03-11 00:11:08 -07:00
f5f71a19b8 First go at smart contracts
Needs lots of cleanup.
2018-03-10 22:00:48 -07:00
0135971769 Fast UdpSocket reader
* message needs to fit into 256 bytes
* allocator to keep track of blocks of messages
* udp socket receiver server that fills up the block as fast as possible
* udp socket sender server that sends out the block as fast as possible
2018-03-10 21:09:23 -06:00
8579795c40 Ensure transactions won't get canceled after next refactor 2018-03-10 19:44:45 -07:00
9d77fd7eec Store only spending plans, not full transactions 2018-03-10 18:35:10 -07:00
8c40d1bd72 Move spending endpoints into expressions 2018-03-10 17:41:18 -07:00
7a0bc7d888 Move smart contract fields into their own struct 2018-03-10 16:55:39 -07:00
1e07014f86 Merge pull request #62 from garious/batch-events
Batch events
2018-03-09 17:37:02 -07:00
49281b24e5 Move Tick out of Event
Every Entry is now a Tick and the entries contain events.
2018-03-09 17:22:17 -07:00
a8b1980de4 Restore reorder attack test 2018-03-09 17:02:17 -07:00
b8cd5f0482 Boot Cargo.lock from git
Only add Cargo.lock to downstream dependencies.
2018-03-09 16:26:26 -07:00
cc9f0788aa Batch events
It's now a Tick that locks down event order. Before this change, the
event order would be locked down in the order the server sees it.

Fixes #59
Fixes #61
2018-03-09 16:16:33 -07:00
209910299d Version bump
Next release probably won't have a compatible entry log with the
0.3.x line.
2018-03-09 14:33:37 -07:00
17926ff5d9 Merge pull request #58 from garious/deterministic-historian
Deterministic historian/accountant hashes
2018-03-09 07:06:40 -07:00
957fb0667c Deterministic historian/accountant hashes
When in tick-less mode, no longer continuously hash on the
background thread. That mode is just used for testing and
genesis log generation, and those extra hashes are just noise.

Note that without the extra hashes, with lose the duration between
events. Effectively, we distinguish proof-of-order from proof-of-time.
2018-03-09 06:58:40 -07:00
8d17aed785 Process timestamps as they are added 2018-03-08 15:39:03 -07:00
7ef8d5ddde Lock down dependencies 2018-03-08 13:25:40 -07:00
9930a2e167 With v0.3.1 published to crates.io, you can now run silk without git 2018-03-08 11:42:06 -07:00
a86be9ebf2 Merge pull request #56 from garious/add-conditions
Add conditions to transactions
2018-03-08 11:15:31 -07:00
ad6665c8b6 Complete timestamp and signature transactions 2018-03-08 11:06:52 -07:00
923162ae9d WIP: process timestamps 2018-03-08 10:19:54 -07:00
dd2bd67049 Add a barebones test for transaction conditions 2018-03-08 08:58:34 -07:00
d500bbff04 Add public key to mint
This turns the mint into a handy way to generate public keys
without throwing the private key away.
2018-03-08 08:33:00 -07:00
e759bd1a99 Add conditions to the signature to reject duplicates 2018-03-08 08:18:34 -07:00
94daf4cea4 Add Cancel and Timestamp events
Fixes #31, #34, #39
2018-03-08 08:17:34 -07:00
2379792e0a Add DateTime and Cancel conditions
Fixes #32, #33
2018-03-08 08:17:08 -07:00
dba6d7a8a6 Update README.md 2018-03-07 17:20:40 -07:00
086c206b76 Merge pull request #55 from garious/the-mint
More intuitive demo, introducing The Mint
2018-03-07 17:18:24 -07:00
5dd567deef Rename Genesis to Mint
Genesis is a story of creation. We should only use that term to
for the event log that bootstraps the system.
2018-03-07 17:08:15 -07:00
b6d8f737ca Introducing, the mint
Use the mint to pair a new private key with new tokens.
2018-03-07 16:58:04 -07:00
491ba9da84 Add accessors to keypairs and signatures 2018-03-07 15:32:22 -07:00
a420a9293f Fix demo 2018-03-07 11:37:30 -07:00
c1bc5f6a07 Merge pull request #54 from garious/imperative-genesis
Boot genesis block helper
2018-03-07 11:19:16 -07:00
9834c251d0 Boot genesis block helper
Before this change, if you wanted to use a new Transaction
feature in the genesis block, you'd need to extend its
Creator object and associated methods.  With yesterday's
addtions to Transcation, it's now so easy to work with
Transactions directly that we can get rid of the middleman.

Also added a KeyPair type alias, so that ring could be easily swapped
out with a competing library, if needed.
2018-03-07 11:10:15 -07:00
54340ed4c6 Delete debugging println
Thanks @jackson-sandland!
2018-03-06 21:17:41 -07:00
96a0a9202c Update README.md 2018-03-06 21:12:50 -07:00
a4c081d3a1 Merge pull request #53 from garious/monorphic-entry
Monomorphisize Entry and Event
2018-03-06 20:39:11 -07:00
d1b6206858 Monomorphisize Entry and Event
Transaction turned out to be the only struct worth making generic.
2018-03-06 20:29:18 -07:00
0eb6849fe3 Merge pull request #52 from garious/add-transaction-struct
Break dependency cycle
2018-03-06 17:53:48 -07:00
b725fdb093 Sha256Hash -> Hash
Because in Loom, there's just the one. Hopefully no worries that it
shares a name with std::Hash.
2018-03-06 17:40:01 -07:00
1436bb1ff2 Move entry into its own module
Hmm, Logger doesn't depend on log.
2018-03-06 17:40:01 -07:00
5a44c36b1f Move hash into its own module 2018-03-06 17:40:01 -07:00
5d990502cb Merge pull request #51 from jackson-sandland/50-proof-read-README
Issue #50 - proof read README
2018-03-06 17:39:33 -07:00
64735da716 Issue #50 - proof read README 2018-03-06 16:21:45 -08:00
95b82aa6dc Merge pull request #49 from garious/add-transaction-struct
DRY up transaction signing
2018-03-06 16:48:27 -07:00
f09952f3d7 DRY up transaction signing
Cleanup the big mess I copy-pasted myself into.
2018-03-06 16:34:25 -07:00
b98e04dc56 Update README.md 2018-03-06 15:03:06 -07:00
cb436250da Merge pull request #48 from garious/add-transaction-struct
data -> asset
2018-03-06 15:01:56 -07:00
4376032e3a data -> asset
'data' is too vague.
2018-03-06 14:50:32 -07:00
c231331e05 Merge pull request #47 from garious/add-transaction-struct
Reorg
2018-03-06 12:57:49 -07:00
624c151ca2 Add signature module
Because things other than transactions can be signed.
2018-03-06 12:48:28 -07:00
5d0356f74b Move verify_entry to a method as well 2018-03-06 12:35:12 -07:00
b019416518 Move verify into methods
A little overly-coupled to Serialize, but makes the code a lot tighter
2018-03-06 12:27:08 -07:00
4fcd9e3bd6 Give Transaction its own module 2018-03-06 12:18:17 -07:00
66bf889c39 Rename Transfer to Transaction
struct names should be nouns
2018-03-06 11:54:47 -07:00
a2811842c8 More cleanup
Far fewer branches when we process transfers outside the context
of events.
2018-03-06 11:43:55 -07:00
1929601425 Cleanup
Now that Transfer is out of the enum, we don't need to pattern
match to access its fields.
2018-03-06 11:19:59 -07:00
282afee47e Use Transfer struct on the client side too
Sharing is caring.
2018-03-06 11:03:43 -07:00
e701ccc949 Rename Request::Transfer to Request::Transaction 2018-03-06 10:59:47 -07:00
6543497c17 Move Transaction data into its own struct
This will allow us to add addition transfer types to the log.
2018-03-06 10:50:32 -07:00
7d9af5a937 Merge pull request #46 from garious/be-negative
Allow balances to be negative
2018-03-05 23:47:02 -07:00
720c54a5bb Allow balances to be negative
* Will allow owners to loan token to others.
* Will allow for parallel verification of balances without spilling
  over 64 bits.

Fixes #43
2018-03-05 17:30:53 -07:00
5dca3c41f2 Update README.md 2018-03-05 16:19:26 -07:00
929546f60b Update README.md 2018-03-05 16:18:46 -07:00
cb0ce9986c Merge pull request #45 from garious/init-from-log
Towards sending the log to clients
2018-03-05 16:17:41 -07:00
064eba00fd Update readme 2018-03-05 16:05:16 -07:00
a4336a39d6 Initialize the testnode from a log
$ cargo run --bin silk-genesis-file-demo > demo-genesis.json
$ cat demo-genesis.json | cargo run --bin silk-genesis-block > demo-genesis.log
$ cat demo-genesis.log | cargo run --bin silk-testnode
2018-03-05 15:34:44 -07:00
298989c4b9 Generate log from Genesis 2018-03-05 13:03:56 -07:00
48c28c2267 Transactions now require a hash of the last entry they've seen
This ensures the transaction cannot be processed on a chain
that forked before that ID. It will also provide a basis for
expiration constraints. A client may want their transaction
to expire, and the generators may want to reject transactions
that have been floating in the ether for years.
2018-03-05 12:48:14 -07:00
d76ecbc9c9 Don't block the server 2018-03-05 11:39:59 -07:00
79fb9c00aa Boot wait_on_signature() from accountant
Instead, there should be a way to query for a page of log data,
and checking whether it has a signature should be done client-side.
2018-03-05 10:45:18 -07:00
c9e03f37ce Logger now only speaks when spoken to
Before this change, the logger's send channel could quickly be
flooded with Tick events. Those events should only be passed to
a writer.

Also, the log_event() function no longer sends entries. That
functionality moved to the new process_events() function. This
will allow us to initialize the with the genesis block without
flooding the send channel with events the historian won't read.
2018-03-05 10:33:12 -07:00
aa5f1699a7 Update the set of unique signatures when loading an existing log. 2018-03-04 22:31:12 -07:00
e1e9126d03 Merge pull request #44 from garious/genesis
Finally, genesis block generation without channels
2018-03-04 14:39:28 -07:00
672a4b3723 Update historian diagram 2018-03-04 14:36:55 -07:00
955f76baab Finally, genesis block generation without channels 2018-03-04 14:32:30 -07:00
7da8a5e2d1 Merge pull request #42 from garious/genesis
Make num_hashes more intuitive
2018-03-04 13:05:38 -07:00
ff82fbf112 Make num_hashes mean the num_hashes since the last ID
Before this change, num_hashes meant the number of hashes since
the last ID, minus any hashing done on the event data. It made
no difference for Tick events, but logged Transaction events with
one less hash than actually occurred.
2018-03-04 09:52:36 -07:00
8503a0a58f Refactor 2018-03-04 09:21:45 -07:00
b1e9512f44 Rename end_hash to id 2018-03-04 07:50:26 -07:00
608def9c78 Consolidate imports 2018-03-04 07:28:51 -07:00
bcb21bc1d8 Delete dead code 2018-03-04 07:20:17 -07:00
f63096620a Merge pull request #41 from garious/genesis
Add command-line tool for generating a genesis block
2018-03-04 01:27:59 -07:00
9b26892bae Add a demo app to generate the genesis file 2018-03-04 01:21:40 -07:00
572475ce14 Load the genesis block 2018-03-04 00:15:17 -07:00
876d7995e1 Refactor to support loading an existing ledger 2018-03-03 22:25:40 -07:00
b8655e30d4 Make client-demo standalone
And remove deposit() methods from the API. Those should only be
used on the server to bootstrap.
2018-03-03 21:15:51 -07:00
7cf0d55546 Remove optional 'from' field 2018-03-03 20:41:07 -07:00
ce60b960c0 Special case sending money to self
In the genesis block, let matching 'from' and 'to' keys be used
to mint new coin.
2018-03-03 20:27:12 -07:00
cebcb5b92d Start genesis with a Tick, so that its hash can be used to bootstrap verification 2018-03-03 19:57:22 -07:00
11a0f96f5e Add command-line tool for generating a genesis block 2018-03-03 17:35:05 -07:00
74ebaf1744 Merge pull request #40 from garious/add-logger
Add logger
2018-03-03 14:37:15 -07:00
f7496ea6d1 Make create_logger a static method
Allows us to share the super long type signature in impl.
2018-03-03 14:26:59 -07:00
bebba7dc1f Give logger its own crate 2018-03-03 14:24:32 -07:00
afb2bf442c Use Instant instead of SystemTime for more precise ticking
And convert log_event from function to method
2018-03-03 14:08:53 -07:00
c7de48c982 Convert log_events from function to method 2018-03-03 14:00:37 -07:00
f906112c03 Move logging thread's state into a struct 2018-03-03 13:52:57 -07:00
8ef864fb39 Merge pull request #37 from garious/split-benchmark
Split benchmark
2018-03-03 12:13:54 -07:00
1c9b5ab53c Report performance of signature verification too 2018-03-03 11:59:34 -07:00
c10faae3b5 More readable metrics 2018-03-03 11:52:50 -07:00
2104dd5a0a Fix benchmark
Was measuring the creation of the iterator, not running it.
2018-03-03 11:45:23 -07:00
fbe64037db Merge pull request #35 from garious/split-benchmark
Move key generation and signing from transaction benchmark
2018-03-03 11:25:58 -07:00
d8c50b150c Move key generation and signing from transaction benchmark
Key generation, signing and verification are not the performance
bottleneck. Something is probably wrong here.
2018-03-03 11:11:46 -07:00
8871bb2d8e Merge pull request #30 from garious/simplify
Unify Claim and Transaction handling
2018-03-02 12:24:44 -07:00
a148454376 Update readme 2018-03-02 12:07:05 -07:00
be518b569b Remove cyclic dependency between event and log 2018-03-02 12:03:59 -07:00
c998fbe2ae Sign the owner's public key
Without this, the accountant will reject transfers from different
entities if they are for the same amount and to the same entity.
2018-03-02 11:56:42 -07:00
9f12cd0c09 Purge the Claim event type
It's now represented as a Transaction from an unknown party.
2018-03-02 11:48:58 -07:00
0d0fee1ca1 Sign Claim's 'to' field
Otherwise, the accountant will treat deposits of the same amount as
duplicates.
2018-03-02 11:46:22 -07:00
a0410c4677 Pipe all Claim constructors through a function 2018-03-02 10:58:43 -07:00
8fe464cfa3 Rename Claim's key field to match same field in Transaction 2018-03-02 10:47:21 -07:00
3e2d6d9e8b Generalize Transaction to express a Claim
If a Transaction doesn't have an existing address, it's being used
to create new funds.
2018-03-02 10:41:15 -07:00
32d677787b Reduce transactions sent by demo
We don't do retries yet, so keep tx count to something that won't
trigger any packet loss.
2018-03-02 10:35:38 -07:00
dfd1c4eab3 Don't process transaction if channel.send() fails.
Do all input validation first, then log (which can fail). If all
goes swimmingly, process the transaction.
2018-03-02 10:17:52 -07:00
36bb1f989d More defense against a double-spend attack
Before this change, a client could spend funds before the accountant
processed a previous spend. With this change in place, the accountant
updates balances immediately, but that comes at an architectural cost.
The accountant now verifies signatures on behalf of the historian, so
that it can ensure logging will not fail.
2018-03-02 09:55:44 -07:00
684f4c59e0 Delete commented out code
accountant crate shouldn't verify the log. Instead, it should
only add valid entries and leave verification to network nodes.
2018-03-02 08:51:29 -07:00
1b77e8a69a Move Event into its own crate
The log crate was starting to be the catch-all for all things
related to entries, events, signatures, and hashes. This split
shows us that:

* Event depends only on signatures, not on hashes [directly]
* All event testing was done via log testing (shame on me)
* Accounting depends only on events
2018-03-02 08:43:57 -07:00
662e10c3e0 Merge pull request #29 from garious/simplify
Remove Discovery event
2018-03-01 18:53:25 -07:00
c935fdb12f Move signature duplicate detection into the historian 2018-03-01 17:44:10 -07:00
9e16937914 Delete the Discovery event
Not useful to the accountant.
2018-03-01 17:02:41 -07:00
f705202381 No need to hash data that's already hashed to create the signature 2018-03-01 16:39:09 -07:00
f5532ad9f7 Merge pull request #28 from garious/go-udp
Switch to UDP
2018-03-01 14:25:20 -07:00
570e71f050 Check for duplicate signatures
TODO: have client add recent hash to each message
2018-03-01 14:07:39 -07:00
c9cc4b4369 Switch to UDP from TCP
And remove all the sleep()'ing around.
2018-03-01 13:47:53 -07:00
7111aa3b18 Copy disclaimer from the loom repository
Per @aeyakovenko, added Loom's disclaimer.
2018-03-01 09:16:39 -07:00
12eba4bcc7 Merge pull request #26 from garious/add-accountant
Add testnode and client-demo
2018-02-28 19:48:05 -07:00
4610de8fdd Switch to sync_channel to preserve order 2018-02-28 19:33:28 -07:00
3fcc2dd944 Add testnode
Fixes #20
2018-02-28 18:05:20 -07:00
8299bae2d4 Add accountant stub 2018-02-28 16:01:12 -07:00
604ccf7552 Add network interface for accountant 2018-02-28 14:00:04 -07:00
f3dd47948a Merge pull request #25 from garious/verify-historian-input
Verify event signatures before adding log entries
2018-02-28 10:34:10 -07:00
c3bb207488 Verify event signatures before adding log entries 2018-02-28 10:23:01 -07:00
9009d1bfb3 Merge pull request #24 from garious/add-accountant
Add accountant
2018-02-27 11:41:40 -07:00
fa4d9e8bcb Add more tests 2018-02-27 11:28:10 -07:00
34b77efc87 Sleep longer for TravisCI 2018-02-27 11:08:28 -07:00
5ca0ccbcd2 Add accountant 2018-02-27 10:54:06 -07:00
6aa4e52480 Merge pull request #23 from garious/add-transaction
Generalize the event log
2018-02-26 17:40:55 -07:00
f98e9a2ad7 Fix overuse of search-and-replace 2018-02-26 17:03:50 -07:00
c6134cc25b Allow the historian to track ownership of any type of data 2018-02-26 17:01:22 -07:00
0443b39264 Allow event log to hold events of any serializable (hashable) type 2018-02-26 16:42:31 -07:00
8b0b8efbcb Allow Entry to hold events of any kind of data 2018-02-26 15:37:33 -07:00
97449cee43 Allow events to hold any kind of data 2018-02-26 15:31:01 -07:00
ab5252c750 Move entry verification out of Entry impl 2018-02-26 14:39:01 -07:00
05a27cb34d Merge pull request #22 from garious/add-transaction
Extend the event log with a Transaction event to transfer possession
2018-02-26 11:26:58 -07:00
b02eab57d2 Extend the event log with a Transaction event to transfer possession
This implementation assumes 'from' is the current owner of 'data'.
Once that's verified, the signature ensures that nobody modified
'data' (the asset being transferred) or 'to' the entity taking
ownership.

Fixes #14
2018-02-26 11:09:11 -07:00
b8d52cc3e4 Make the Discovery event into a struct instead of a tuple 2018-02-24 11:15:03 -07:00
7d9bab9508 Update rendered demo diagram 2018-02-24 11:09:00 -07:00
944181a30e Version bump 2018-02-24 11:06:08 -07:00
d8dd50505a Merge pull request #21 from garious/add-signatures
Add signatures
2018-02-24 10:47:25 -07:00
d78082f5e4 Test bad signature 2018-02-24 10:27:51 -07:00
08e501e57b Extend the event log with a Claim event to claim possession
Unlike a Discovery event, a Claim event associates a public key
with a hash. It's intended to to be used to claim ownership of
some hashable data. For example, a graphic designer could claim
copyright by hashing some image they created, signing it with
their private key, and publishing the hash-signature pair via
the historian. If someone else tries to claim it as their own,
the designer can point to the historian's log as cryptographically
secure evidence that the designer's copy existed before anyone
else's.

Note there's nothing here that verifies the first claim is the actual
content owner, only that the first claim almost certainly happened
before a second.
2018-02-24 10:09:49 -07:00
29a607427d Rename UserDataKey to Discovery
From the perspective of the log, when some data's hash is added,
that data is "discovered" by the historian.  Another event
might be a "claim" that some signed data belongs to the owner of a
public key.
2018-02-24 05:25:19 -07:00
afb830c91f Merge pull request #18 from garious/add-historian
self-ticking logger
2018-02-21 12:30:10 -07:00
c1326ac3d5 Up the time to sleep so that ticks are generated 2018-02-21 12:22:23 -07:00
513a1adf57 Version bump 2018-02-21 12:01:17 -07:00
7871b38c80 Update demo to use self-ticking logger 2018-02-21 11:52:03 -07:00
b34d2d7dee Allow the logger to inject Tick events on its own 2018-02-21 11:33:42 -07:00
d7dfa8c22d Readme cleanup 2018-02-21 10:07:32 -07:00
8df274f0af Add hash seed to verify_slice() 2018-02-21 09:43:34 -07:00
07c4ebb7f2 Add message sequence chart for readme demo
Fixes #17
2018-02-21 09:33:50 -07:00
49605b257d Merge pull request #16 from garious/add-serde
Add serialization/deseriation support to event log
2018-02-20 16:55:46 -07:00
fa4e232d73 Add serialization/deseriation support to event log
See bincode and serde_json for usage:
https://github.com/TyOverby/bincode

Fixes #1
2018-02-20 16:26:13 -07:00
bd84cf6586 Merge pull request #15 from garious/add-historian
Demo proof-of-history and reordering attack
2018-02-20 15:05:20 -07:00
6e37f70d55 Test reorder attack 2018-02-20 14:46:36 -07:00
d97112d7f0 Explain proof-of-history in the readme
Also:
* Hash userdata so that verification works as the readme describes.
* Drop itertools package. Found a way to use std::iter instead.

Fixes #8
2018-02-20 14:04:49 -07:00
71 changed files with 9201 additions and 431 deletions

4
.gitignore vendored
View File

@ -1,4 +1,4 @@
Cargo.lock
/target/
**/*.rs.bk
Cargo.lock
.cargo

View File

@ -1,22 +0,0 @@
language: rust
required: sudo
services:
- docker
matrix:
allow_failures:
- rust: nightly
include:
- rust: stable
- rust: nightly
env:
- FEATURES='asm,unstable'
before_script: |
export PATH="$PATH:$HOME/.cargo/bin"
rustup component add rustfmt-preview
script:
- cargo fmt -- --write-mode=diff
- cargo build --verbose --features "$FEATURES"
- cargo test --verbose --features "$FEATURES"
after_success: |
docker run -it --rm --security-opt seccomp=unconfined --volume "$PWD:/volume" elmtai/docker-rust-kcov
bash <(curl -s https://codecov.io/bash) -s target/cov

View File

@ -1,30 +1,72 @@
[package]
name = "silk"
description = "A silky smooth implementation of the Loom architecture"
version = "0.2.1"
documentation = "https://docs.rs/silk"
homepage = "http://loomprotocol.com/"
repository = "https://github.com/loomprotocol/silk"
name = "solana"
description = "Blockchain, Rebuilt for Scale"
version = "0.6.1"
documentation = "https://docs.rs/solana"
homepage = "http://solana.com/"
readme = "README.md"
repository = "https://github.com/solana-labs/solana"
authors = [
"Anatoly Yakovenko <aeyakovenko@gmail.com>",
"Greg Fitzgerald <garious@gmail.com>",
"Anatoly Yakovenko <anatoly@solana.com>",
"Greg Fitzgerald <greg@solana.com>",
"Stephen Akridge <stephen@solana.com>",
]
license = "Apache-2.0"
[[bin]]
name = "silk-demo"
path = "src/bin/demo.rs"
name = "solana-client-demo"
path = "src/bin/client-demo.rs"
[[bin]]
name = "solana-fullnode"
path = "src/bin/fullnode.rs"
[[bin]]
name = "solana-fullnode-config"
path = "src/bin/fullnode-config.rs"
[[bin]]
name = "solana-genesis"
path = "src/bin/genesis.rs"
[[bin]]
name = "solana-genesis-demo"
path = "src/bin/genesis-demo.rs"
[[bin]]
name = "solana-mint"
path = "src/bin/mint.rs"
[[bin]]
name = "solana-mint-demo"
path = "src/bin/mint-demo.rs"
[badges]
codecov = { repository = "loomprotocol/silk", branch = "master", service = "github" }
codecov = { repository = "solana-labs/solana", branch = "master", service = "github" }
[features]
unstable = []
asm = ["sha2-asm"]
ipv6 = []
cuda = []
erasure = []
[dependencies]
rayon = "1.0.0"
itertools = "0.7.6"
sha2 = "0.7.0"
sha2-asm = {version="0.3", optional=true}
digest = "0.7.2"
generic-array = { version = "0.9.0", default-features = false, features = ["serde"] }
serde = "1.0.27"
serde_derive = "1.0.27"
serde_json = "1.0.10"
ring = "0.12.1"
untrusted = "0.5.1"
bincode = "1.0.0"
chrono = { version = "0.4.0", features = ["serde"] }
log = "^0.4.1"
env_logger = "^0.4.1"
matches = "^0.1.6"
byteorder = "^1.2.1"
libc = "^0.2.1"
getopts = "^0.2"
isatty = "0.1"
rand = "0.4.2"
pnet = "^0.21.0"

View File

@ -1,4 +1,4 @@
Copyright 2018 Anatoly Yakovenko <anatoly@loomprotocol.com> and Greg Fitzgerald <garious@gmail.com>
Copyright 2018 Anatoly Yakovenko, Greg Fitzgerald and Stephen Akridge
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

249
README.md
View File

@ -1,69 +1,151 @@
[![Silk crate](https://img.shields.io/crates/v/silk.svg)](https://crates.io/crates/silk)
[![Silk documentation](https://docs.rs/silk/badge.svg)](https://docs.rs/silk)
[![Build Status](https://travis-ci.org/loomprotocol/silk.svg?branch=master)](https://travis-ci.org/loomprotocol/silk)
[![codecov](https://codecov.io/gh/loomprotocol/silk/branch/master/graph/badge.svg)](https://codecov.io/gh/loomprotocol/silk)
[![Solana crate](https://img.shields.io/crates/v/solana.svg)](https://crates.io/crates/solana)
[![Solana documentation](https://docs.rs/solana/badge.svg)](https://docs.rs/solana)
[![Build status](https://badge.buildkite.com/d4c4d7da9154e3a8fb7199325f430ccdb05be5fc1e92777e51.svg?branch=master)](https://buildkite.com/solana-labs/solana)
[![codecov](https://codecov.io/gh/solana-labs/solana/branch/master/graph/badge.svg)](https://codecov.io/gh/solana-labs/solana)
# Silk, a silky smooth implementation of the Loom specification
Blockchain, Rebuilt for Scale
===
Loom is a new achitecture for a high performance blockchain. Its whitepaper boasts a theoretical
throughput of 710k transactions per second on a 1 gbps network. The specification is implemented
in two git repositories. Reserach is performed in the loom repository. That work drives the
Loom specification forward. This repository, on the other hand, aims to implement the specification
as-is. We care a great deal about quality, clarity and short learning curve. We avoid the use
of `unsafe` Rust and write tests for *everything*. Optimizations are only added when
corresponding benchmarks are also added that demonstrate real performance boots. We expect the
feature set here will always be a ways behind the loom repo, but that this is an implementation
you can take to the bank, literally.
Solana&trade; is a new blockchain architecture built from the ground up for scale. The architecture supports
up to 710 thousand transactions per second on a gigabit network.
# Usage
Disclaimer
===
Add the latest [silk package](https://crates.io/crates/silk) to the `[dependencies]` section
of your Cargo.toml.
All claims, content, designs, algorithms, estimates, roadmaps, specifications, and performance measurements described in this project are done with the author's best effort. It is up to the reader to check and validate their accuracy and truthfulness. Furthermore nothing in this project constitutes a solicitation for investment.
Create a *Historian* and send it *events* to generate an *event log*, where each log *entry*
is tagged with the historian's latest *hash*. Then ensure the order of events was not tampered
with by verifying each entry's hash can be generated from the hash in the previous entry:
Introduction
===
```rust
extern crate silk;
It's possible for a centralized database to process 710,000 transactions per second on a standard gigabit network if the transactions are, on average, no more than 176 bytes. A centralized database can also replicate itself and maintain high availability without significantly compromising that transaction rate using the distributed system technique known as Optimistic Concurrency Control [H.T.Kung, J.T.Robinson (1981)]. At Solana, we're demonstrating that these same theoretical limits apply just as well to blockchain on an adversarial network. The key ingredient? Finding a way to share time when nodes can't trust one-another. Once nodes can trust time, suddenly ~40 years of distributed systems research becomes applicable to blockchain! Furthermore, and much to our surprise, it can implemented using a mechanism that has existed in Bitcoin since day one. The Bitcoin feature is called nLocktime and it can be used to postdate transactions using block height instead of a timestamp. As a Bitcoin client, you'd use block height instead of a timestamp if you don't trust the network. Block height turns out to be an instance of what's being called a Verifiable Delay Function in cryptography circles. It's a cryptographically secure way to say time has passed. In Solana, we use a far more granular verifiable delay function, a SHA 256 hash chain, to checkpoint the ledger and coordinate consensus. With it, we implement Optimistic Concurrency Control and are now well in route towards that theoretical limit of 710,000 transactions per second.
use silk::historian::Historian;
use silk::log::{verify_slice, Entry, Event, Sha256Hash};
use std::{thread, time};
use std::sync::mpsc::SendError;
fn create_log(hist: &Historian) -> Result<(), SendError<Event>> {
hist.sender.send(Event::Tick)?;
thread::sleep(time::Duration::new(0, 100_000));
hist.sender.send(Event::UserDataKey(0xdeadbeef))?;
thread::sleep(time::Duration::new(0, 100_000));
hist.sender.send(Event::Tick)?;
Ok(())
}
Testnet Demos
===
fn main() {
let seed = Sha256Hash::default();
let hist = Historian::new(&seed);
create_log(&hist).expect("send error");
drop(hist.sender);
let entries: Vec<Entry> = hist.receiver.iter().collect();
for entry in &entries {
println!("{:?}", entry);
}
assert!(verify_slice(&entries, &seed));
}
The Solana repo contains all the scripts you might need to spin up your own
local testnet. Depending on what you're looking to achieve, you may want to
run a different variation, as the full-fledged, performance-enhanced
multinode testnet is considerably more complex to set up than a Rust-only,
singlenode testnode. If you are looking to develop high-level features, such
as experimenting with smart contracts, save yourself some setup headaches and
stick to the Rust-only singlenode demo. If you're doing performance optimization
of the transaction pipeline, consider the enhanced singlenode demo. If you're
doing consensus work, you'll need at least a Rust-only multinode demo. If you want
to reproduce our TPS metrics, run the enhanced multinode demo.
For all four variations, you'd need the latest Rust toolchain and the Solana
source code:
First, install Rust's package manager Cargo.
```bash
$ curl https://sh.rustup.rs -sSf | sh
$ source $HOME/.cargo/env
```
Running the program should produce a log similar to:
Now checkout the code from github:
```rust
Entry { num_hashes: 0, end_hash: [0, ...], event: Tick }
Entry { num_hashes: 6, end_hash: [67, ...], event: UserDataKey(3735928559) }
Entry { num_hashes: 5, end_hash: [123, ...], event: Tick }
```bash
$ git clone https://github.com/solana-labs/solana.git
$ cd solana
```
The demo code is sometimes broken between releases as we add new low-level
features, so if this is your first time running the demo, you'll improve
your odds of success if you check out the
[latest release](https://github.com/solana-labs/solana/releases)
before proceeding:
```bash
$ git checkout v0.6.0
```
Singlenode Testnet
---
The fullnode server is initialized with a ledger from stdin and
generates new ledger entries on stdout. To create the input ledger, we'll need
to create *the mint* and use it to generate a *genesis ledger*. It's done in
two steps because the mint-demo.json file contains private keys that will be
used later in this demo.
```bash
$ echo 1000000000 | cargo run --release --bin solana-mint-demo > mint-demo.json
$ cat mint-demo.json | cargo run --release --bin solana-genesis-demo > genesis.log
```
Before you start a fullnode, make sure you know the IP address of the machine you
want to be the leader for the demo, and make sure that udp ports 8000-10000 are
open on all the machines you want to test with.
Generate a leader configuration file with:
```bash
cargo run --release --bin solana-fullnode-config -- -d > leader.json
```
Now start the server:
```bash
$ ./multinode-demo/leader.sh > leader-txs.log
```
To run a performance-enhanced fullnode on Linux, download `libcuda_verify_ed25519.a`. Enable
it by adding `--features=cuda` to the line that runs `solana-fullnode` in `leader.sh`.
```bash
$ wget https://solana-build-artifacts.s3.amazonaws.com/v0.5.0/libcuda_verify_ed25519.a
cargo run --release --features=cuda --bin solana-fullnode -- -l leader.json < genesis.log
```
Wait a few seconds for the server to initialize. It will print "Ready." when it's ready to
receive transactions.
Multinode Testnet
---
To run a multinode testnet, after starting a leader node, spin up some validator nodes:
Generate the validator's configuration file:
```bash
cargo run --release --bin solana-fullnode-config -- -d > validator.json
```
```bash
$ ./multinode-demo/validator.sh ubuntu@10.0.1.51:~/solana > validator-txs.log #The leader machine
```
As with the leader node, you can run a performance-enhanced validator fullnode by adding
`--features=cuda` to the line that runs `solana-fullnode` in `validator.sh`.
```bash
cargo run --release --features=cuda --bin solana-fullnode -- -l validator.json -v leader.json < genesis.log
```
# Developing
Testnet Client Demo
---
Now that your singlenode or multinode testnet is up and running, in a separate shell, let's send it some transactions! Note we pass in
the JSON configuration file here, not the genesis ledger.
```bash
$ ./multinode-demo/client.sh ubuntu@10.0.1.51:~/solana 2 #The leader machine and the total number of nodes in the network
```
What just happened? The client demo spins up several threads to send 500,000 transactions
to the testnet as quickly as it can. The client then pings the testnet periodically to see
how many transactions it processed in that time. Take note that the demo intentionally
floods the network with UDP packets, such that the network will almost certainly drop a
bunch of them. This ensures the testnet has an opportunity to reach 710k TPS. The client
demo completes after it has convinced itself the testnet won't process any additional
transactions. You should see several TPS measurements printed to the screen. In the
multinode variation, you'll see TPS measurements for each validator node as well.
Developing
===
Building
---
@ -76,11 +158,17 @@ $ source $HOME/.cargo/env
$ rustup component add rustfmt-preview
```
If your rustc version is lower than 1.26.1, please update it:
```bash
$ rustup update
```
Download the source code:
```bash
$ git clone https://github.com/loomprotocol/silk.git
$ cd silk
$ git clone https://github.com/solana-labs/solana.git
$ cd solana
```
Testing
@ -89,9 +177,37 @@ Testing
Run the test suite:
```bash
cargo test
$ cargo test
```
To emulate all the tests that will run on a Pull Request, run:
```bash
$ ./ci/run-local.sh
```
Debugging
---
There are some useful debug messages in the code, you can enable them on a per-module and per-level
basis with the normal RUST\_LOG environment variable. Run the fullnode with this syntax:
```bash
$ RUST_LOG=solana::streamer=debug,solana::server=info cat genesis.log | ./target/release/solana-fullnode > transactions0.log
```
to see the debug and info sections for streamer and server respectively. Generally
we are using debug for infrequent debug messages, trace for potentially frequent messages and
info for performance-related logging.
Attaching to a running process with gdb
```
$ sudo gdb
attach <PID>
set logging on
thread apply all bt
```
This will dump all the threads stack traces into gdb.txt
Benchmarking
---
@ -104,5 +220,28 @@ $ rustup install nightly
Run the benchmarks:
```bash
$ cargo +nightly bench --features="asm,unstable"
$ cargo +nightly bench --features="unstable"
```
Code coverage
---
To generate code coverage statistics, run kcov via Docker:
```bash
$ ./ci/coverage.sh
```
The coverage report will be written to `./target/cov/index.html`
Why coverage? While most see coverage as a code quality metric, we see it primarily as a developer
productivity metric. When a developer makes a change to the codebase, presumably it's a *solution* to
some problem. Our unit-test suite is how we encode the set of *problems* the codebase solves. Running
the test suite should indicate that your change didn't *infringe* on anyone else's solutions. Adding a
test *protects* your solution from future changes. Say you don't understand why a line of code exists,
try deleting it and running the unit-tests. The nearest test failure should tell you what problem
was solved by that code. If no test fails, go ahead and submit a Pull Request that asks, "what
problem is solved by this code?" On the other hand, if a test does fail and you can think of a
better way to solve the same problem, a Pull Request with your solution would most certainly be
welcome! Likewise, if rewriting a test can better communicate what code it's protecting, please
send us that patch!

1
_config.yml Normal file
View File

@ -0,0 +1 @@
theme: jekyll-theme-slate

15
build.rs Normal file
View File

@ -0,0 +1,15 @@
use std::env;
fn main() {
println!("cargo:rustc-link-search=native=.");
if !env::var("CARGO_FEATURE_CUDA").is_err() {
println!("cargo:rustc-link-lib=static=cuda_verify_ed25519");
println!("cargo:rustc-link-search=native=/usr/local/cuda/lib64");
println!("cargo:rustc-link-lib=dylib=cudart");
println!("cargo:rustc-link-lib=dylib=cuda");
println!("cargo:rustc-link-lib=dylib=cudadevrt");
}
if !env::var("CARGO_FEATURE_ERASURE").is_err() {
println!("cargo:rustc-link-lib=dylib=Jerasure");
}
}

2
ci/.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
/node_modules/
/package-lock.json

18
ci/buildkite.yml Normal file
View File

@ -0,0 +1,18 @@
steps:
- command: "ci/coverage.sh"
name: "coverage [public]"
- command: "ci/docker-run.sh rust ci/test-stable.sh"
name: "stable [public]"
- command: "ci/docker-run.sh rustlang/rust:nightly ci/test-nightly.sh || true"
name: "nightly - FAILURES IGNORED [public]"
- command: "ci/docker-run.sh rust ci/test-ignored.sh"
name: "ignored [public]"
- command: "ci/test-cuda.sh"
name: "cuda"
- command: "ci/shellcheck.sh"
name: "shellcheck [public]"
- command: "ci/test-erasure.sh"
name: "erasure"
- wait
- command: "ci/publish.sh"
name: "publish release artifacts"

21
ci/coverage.sh Executable file
View File

@ -0,0 +1,21 @@
#!/bin/bash -e
cd "$(dirname "$0")/.."
ci/docker-run.sh evilmachines/rust-cargo-kcov \
bash -exc "\
export RUST_BACKTRACE=1; \
cargo build --verbose; \
cargo kcov --lib --verbose; \
"
echo Coverage report:
ls -l target/cov/index.html
if [[ -z "$CODECOV_TOKEN" ]]; then
echo CODECOV_TOKEN undefined
else
bash <(curl -s https://codecov.io/bash)
fi
exit 0

41
ci/docker-run.sh Executable file
View File

@ -0,0 +1,41 @@
#!/bin/bash -e
usage() {
echo "Usage: $0 [docker image name] [command]"
echo
echo Runs command in the specified docker image with
echo a CI-appropriate environment
echo
}
cd "$(dirname "$0")/.."
IMAGE="$1"
if [[ -z "$IMAGE" ]]; then
echo Error: image not defined
exit 1
fi
docker pull "$IMAGE"
shift
ARGS=(--workdir /solana --volume "$PWD:/solana" --rm)
ARGS+=(--env "CARGO_HOME=/solana/.cargo")
# kcov tries to set the personality of the binary which docker
# doesn't allow by default.
ARGS+=(--security-opt "seccomp=unconfined")
# Ensure files are created with the current host uid/gid
ARGS+=(--user "$(id -u):$(id -g)")
# Environment variables to propagate into the container
ARGS+=(
--env BUILDKITE_TAG
--env CODECOV_TOKEN
--env CRATES_IO_TOKEN
)
set -x
docker run "${ARGS[@]}" "$IMAGE" "$@"

19
ci/publish.sh Executable file
View File

@ -0,0 +1,19 @@
#!/bin/bash -e
cd "$(dirname "$0")/.."
if [[ -z "$BUILDKITE_TAG" ]]; then
# Skip publish if this is not a tagged release
exit 0
fi
if [[ -z "$CRATES_IO_TOKEN" ]]; then
echo CRATES_IO_TOKEN undefined
exit 1
fi
# TODO: Ensure the published version matches the contents of BUILDKITE_TAG
ci/docker-run.sh rust \
bash -exc "cargo package; cargo publish --token $CRATES_IO_TOKEN"
exit 0

19
ci/run-local.sh Executable file
View File

@ -0,0 +1,19 @@
#!/bin/bash -e
#
# Run the entire buildkite CI pipeline locally for pre-testing before sending a
# Github pull request
#
cd "$(dirname "$0")/.."
BKRUN=ci/node_modules/.bin/bkrun
if [[ ! -x $BKRUN ]]; then
(
set -x
cd ci/
npm install bkrun
)
fi
set -x
./ci/node_modules/.bin/bkrun ci/buildkite.yml

11
ci/shellcheck.sh Executable file
View File

@ -0,0 +1,11 @@
#!/bin/bash -e
#
# Reference: https://github.com/koalaman/shellcheck/wiki/Directive
cd "$(dirname "$0")/.."
set -x
find . -name "*.sh" -not -regex ".*/.cargo/.*" -not -regex ".*/node_modules/.*" -print0 \
| xargs -0 \
ci/docker-run.sh koalaman/shellcheck --color=always --external-sources --shell=bash
exit 0

22
ci/test-cuda.sh Executable file
View File

@ -0,0 +1,22 @@
#!/bin/bash -e
cd "$(dirname "$0")/.."
LIB=libcuda_verify_ed25519.a
if [[ ! -r $LIB ]]; then
if [[ -z "${libcuda_verify_ed25519_URL:-}" ]]; then
echo "$0 skipped. Unable to locate $LIB"
exit 0
fi
export LD_LIBRARY_PATH=/usr/local/cuda/lib64
export PATH=$PATH:/usr/local/cuda/bin
curl -X GET -o $LIB "$libcuda_verify_ed25519_URL"
fi
# shellcheck disable=SC1090 # <-- shellcheck can't follow ~
source ~/.cargo/env
export RUST_BACKTRACE=1
cargo test --features=cuda
exit 0

29
ci/test-erasure.sh Executable file
View File

@ -0,0 +1,29 @@
#!/bin/bash -e
set -o xtrace
cd "$(dirname "$0")/.."
if [[ -z "${libgf_complete_URL:-}" ]]; then
echo libgf_complete_URL undefined
exit 1
fi
if [[ -z "${libJerasure_URL:-}" ]]; then
echo libJerasure_URL undefined
exit 1
fi
curl -X GET -o libJerasure.so "$libJerasure_URL"
curl -X GET -o libgf_complete.so "$libgf_complete_URL"
ln -s libJerasure.so libJerasure.so.2
ln -s libJerasure.so libJerasure.so.2.0.0
ln -s libgf_complete.so libgf_complete.so.1.0.0
export LD_LIBRARY_PATH=$PWD:$LD_LIBRARY_PATH
# shellcheck disable=SC1090 # <-- shellcheck can't follow ~
source ~/.cargo/env
cargo test --features="erasure"
exit 0

9
ci/test-ignored.sh Executable file
View File

@ -0,0 +1,9 @@
#!/bin/bash -e
cd "$(dirname "$0")/.."
rustc --version
cargo --version
export RUST_BACKTRACE=1
cargo test -- --ignored

14
ci/test-nightly.sh Executable file
View File

@ -0,0 +1,14 @@
#!/bin/bash -e
cd "$(dirname "$0")/.."
rustc --version
cargo --version
export RUST_BACKTRACE=1
rustup component add rustfmt-preview
cargo build --verbose --features unstable
cargo test --verbose --features unstable
cargo bench --verbose --features unstable
exit 0

14
ci/test-stable.sh Executable file
View File

@ -0,0 +1,14 @@
#!/bin/bash -e
cd "$(dirname "$0")/.."
rustc --version
cargo --version
export RUST_BACKTRACE=1
rustup component add rustfmt-preview
cargo fmt -- --write-mode=diff
cargo build --verbose
cargo test --verbose
exit 0

15
doc/consensus.msc Normal file
View File

@ -0,0 +1,15 @@
msc {
client,leader,verifier_a,verifier_b,verifier_c;
client=>leader [ label = "SUBMIT" ] ;
leader=>client [ label = "CONFIRMED" ] ;
leader=>verifier_a [ label = "CONFIRMED" ] ;
leader=>verifier_b [ label = "CONFIRMED" ] ;
leader=>verifier_c [ label = "CONFIRMED" ] ;
verifier_a=>leader [ label = "VERIFIED" ] ;
verifier_b=>leader [ label = "VERIFIED" ] ;
leader=>client [ label = "FINALIZED" ] ;
leader=>verifier_a [ label = "FINALIZED" ] ;
leader=>verifier_b [ label = "FINALIZED" ] ;
leader=>verifier_c [ label = "FINALIZED" ] ;
}

20
multinode-demo/client.sh Executable file
View File

@ -0,0 +1,20 @@
#!/bin/bash -e
if [[ -z "$1" ]]; then
echo "usage: $0 [network path to solana repo on leader machine] [number of nodes in the network if greater then 1]"
exit 1
fi
LEADER="$1"
COUNT="$2"
if [[ -z "$2" ]]; then
COUNT=1
fi
set -x
export RUST_LOG=solana=info
rsync -v -e ssh "$LEADER/leader.json" .
rsync -v -e ssh "$LEADER/mint-demo.json" .
cargo run --release --bin solana-client-demo -- \
-l leader.json -n $COUNT -d < mint-demo.json 2>&1 | tee client.log

4
multinode-demo/leader.sh Executable file
View File

@ -0,0 +1,4 @@
#!/bin/bash
export RUST_LOG=solana=info
sudo sysctl -w net.core.rmem_max=26214400
cargo run --release --bin solana-fullnode -- -l leader.json < genesis.log

21
multinode-demo/validator.sh Executable file
View File

@ -0,0 +1,21 @@
#!/bin/bash -e
if [[ -z "$1" ]]; then
echo "usage: $0 [network path to solana repo on leader machine]"
exit 1
fi
LEADER="$1"
set -x
rsync -v -e ssh "$LEADER/mint-demo.json" .
rsync -v -e ssh "$LEADER/leader.json" .
rsync -v -e ssh "$LEADER/genesis.log" .
export RUST_LOG=solana=info
sudo sysctl -w net.core.rmem_max=26214400
cargo run --release --bin solana-fullnode -- \
-l validator.json -v leader.json < genesis.log

703
src/bank.rs Normal file
View File

@ -0,0 +1,703 @@
//! The `bank` module tracks client balances and the progress of smart
//! contracts. It offers a high-level API that signs transactions
//! on behalf of the caller, and a low-level API for when they have
//! already been signed and verified.
extern crate libc;
use chrono::prelude::*;
use entry::Entry;
use hash::Hash;
use mint::Mint;
use payment_plan::{Payment, PaymentPlan, Witness};
use rayon::prelude::*;
use signature::{KeyPair, PublicKey, Signature};
use std::collections::hash_map::Entry::Occupied;
use std::collections::{HashMap, HashSet, VecDeque};
use std::result;
use std::sync::atomic::{AtomicIsize, AtomicUsize, Ordering};
use std::sync::RwLock;
use transaction::{Instruction, Plan, Transaction};
/// The number of most recent `last_id` values that the bank will track the signatures
/// of. Once the bank discards a `last_id`, it will reject any transactions that use
/// that `last_id` in a transaction. Lowering this value reduces memory consumption,
/// but requires clients to update its `last_id` more frequently. Raising the value
/// lengthens the time a client must wait to be certain a missing transaction will
/// not be processed by the network.
pub const MAX_ENTRY_IDS: usize = 1024 * 4;
/// Reasons a transaction might be rejected.
#[derive(Debug, PartialEq, Eq)]
pub enum BankError {
/// Attempt to debit from `PublicKey`, but no found no record of a prior credit.
AccountNotFound(PublicKey),
/// The requested debit from `PublicKey` has the potential to draw the balance
/// below zero. This can occur when a debit and credit are processed in parallel.
/// The bank may reject the debit or push it to a future entry.
InsufficientFunds(PublicKey),
/// The bank has seen `Signature` before. This can occur under normal operation
/// when a UDP packet is duplicated, as a user error from a client not updating
/// its `last_id`, or as a double-spend attack.
DuplicateSiganture(Signature),
/// The bank has not seen the given `last_id` or the transaction is too old and
/// the `last_id` has been discarded.
LastIdNotFound(Hash),
/// The transaction is invalid and has requested a debit or credit of negative
/// tokens.
NegativeTokens,
}
pub type Result<T> = result::Result<T, BankError>;
/// The state of all accounts and contracts after processing its entries.
pub struct Bank {
/// A map of account public keys to the balance in that account.
balances: RwLock<HashMap<PublicKey, AtomicIsize>>,
/// A map of smart contract transaction signatures to what remains of its payment
/// plan. Each transaction that targets the plan should cause it to be reduced.
/// Once it cannot be reduced, final payments are made and it is discarded.
pending: RwLock<HashMap<Signature, Plan>>,
/// A FIFO queue of `last_id` items, where each item is a set of signatures
/// that have been processed using that `last_id`. The bank uses this data to
/// reject transactions with signatures its seen before as well as `last_id`
/// values that are so old that its `last_id` has been pulled out of the queue.
last_ids: RwLock<VecDeque<(Hash, RwLock<HashSet<Signature>>)>>,
/// The set of trusted timekeepers. A Timestamp transaction from a `PublicKey`
/// outside this set will be discarded. Note that if validators do not have the
/// same set as leaders, they may interpret the ledger differently.
time_sources: RwLock<HashSet<PublicKey>>,
/// The most recent timestamp from a trusted timekeeper. This timestamp is applied
/// to every smart contract when it enters the system. If it is waiting on a
/// timestamp witness before that timestamp, the bank will execute it immediately.
last_time: RwLock<DateTime<Utc>>,
/// The number of transactions the bank has processed without error since the
/// start of the ledger.
transaction_count: AtomicUsize,
}
impl Bank {
/// Create an Bank using a deposit.
pub fn new_from_deposit(deposit: &Payment) -> Self {
let bank = Bank {
balances: RwLock::new(HashMap::new()),
pending: RwLock::new(HashMap::new()),
last_ids: RwLock::new(VecDeque::new()),
time_sources: RwLock::new(HashSet::new()),
last_time: RwLock::new(Utc.timestamp(0, 0)),
transaction_count: AtomicUsize::new(0),
};
bank.apply_payment(deposit);
bank
}
/// Create an Bank with only a Mint. Typically used by unit tests.
pub fn new(mint: &Mint) -> Self {
let deposit = Payment {
to: mint.pubkey(),
tokens: mint.tokens,
};
let bank = Self::new_from_deposit(&deposit);
bank.register_entry_id(&mint.last_id());
bank
}
/// Commit funds to the `payment.to` party.
fn apply_payment(&self, payment: &Payment) {
// First we check balances with a read lock to maximize potential parallelization.
if self.balances
.read()
.expect("'balances' read lock in apply_payment")
.contains_key(&payment.to)
{
let bals = self.balances.read().expect("'balances' read lock");
bals[&payment.to].fetch_add(payment.tokens as isize, Ordering::Relaxed);
} else {
// Now we know the key wasn't present a nanosecond ago, but it might be there
// by the time we aquire a write lock, so we'll have to check again.
let mut bals = self.balances.write().expect("'balances' write lock");
if bals.contains_key(&payment.to) {
bals[&payment.to].fetch_add(payment.tokens as isize, Ordering::Relaxed);
} else {
bals.insert(payment.to, AtomicIsize::new(payment.tokens as isize));
}
}
}
/// Return the last entry ID registered.
pub fn last_id(&self) -> Hash {
let last_ids = self.last_ids.read().expect("'last_ids' read lock");
let last_item = last_ids.iter().last().expect("empty 'last_ids' list");
last_item.0
}
/// Store the given signature. The bank will reject any transaction with the same signature.
fn reserve_signature(signatures: &RwLock<HashSet<Signature>>, sig: &Signature) -> Result<()> {
if signatures
.read()
.expect("'signatures' read lock")
.contains(sig)
{
return Err(BankError::DuplicateSiganture(*sig));
}
signatures
.write()
.expect("'signatures' write lock")
.insert(*sig);
Ok(())
}
/// Forget the given `signature` because its transaction was rejected.
fn forget_signature(signatures: &RwLock<HashSet<Signature>>, signature: &Signature) {
signatures
.write()
.expect("'signatures' write lock in forget_signature")
.remove(signature);
}
/// Forget the given `signature` with `last_id` because the transaction was rejected.
fn forget_signature_with_last_id(&self, signature: &Signature, last_id: &Hash) {
if let Some(entry) = self.last_ids
.read()
.expect("'last_ids' read lock in forget_signature_with_last_id")
.iter()
.rev()
.find(|x| x.0 == *last_id)
{
Self::forget_signature(&entry.1, signature);
}
}
fn reserve_signature_with_last_id(&self, signature: &Signature, last_id: &Hash) -> Result<()> {
if let Some(entry) = self.last_ids
.read()
.expect("'last_ids' read lock in reserve_signature_with_last_id")
.iter()
.rev()
.find(|x| x.0 == *last_id)
{
return Self::reserve_signature(&entry.1, signature);
}
Err(BankError::LastIdNotFound(*last_id))
}
/// Tell the bank which Entry IDs exist on the ledger. This function
/// assumes subsequent calls correspond to later entries, and will boot
/// the oldest ones once its internal cache is full. Once boot, the
/// bank will reject transactions using that `last_id`.
pub fn register_entry_id(&self, last_id: &Hash) {
let mut last_ids = self.last_ids
.write()
.expect("'last_ids' write lock in register_entry_id");
if last_ids.len() >= MAX_ENTRY_IDS {
last_ids.pop_front();
}
last_ids.push_back((*last_id, RwLock::new(HashSet::new())));
}
/// Deduct tokens from the 'from' address the account has sufficient
/// funds and isn't a duplicate.
fn apply_debits(&self, tx: &Transaction) -> Result<()> {
if let Instruction::NewContract(contract) = &tx.instruction {
trace!("Transaction {}", contract.tokens);
if contract.tokens < 0 {
return Err(BankError::NegativeTokens);
}
}
let bals = self.balances
.read()
.expect("'balances' read lock in apply_debits");
let option = bals.get(&tx.from);
if option.is_none() {
return Err(BankError::AccountNotFound(tx.from));
}
self.reserve_signature_with_last_id(&tx.sig, &tx.last_id)?;
loop {
let result = if let Instruction::NewContract(contract) = &tx.instruction {
let bal = option.expect("assignment of option to bal");
let current = bal.load(Ordering::Relaxed) as i64;
if current < contract.tokens {
self.forget_signature_with_last_id(&tx.sig, &tx.last_id);
return Err(BankError::InsufficientFunds(tx.from));
}
bal.compare_exchange(
current as isize,
(current - contract.tokens) as isize,
Ordering::Relaxed,
Ordering::Relaxed,
)
} else {
Ok(0)
};
match result {
Ok(_) => {
self.transaction_count.fetch_add(1, Ordering::Relaxed);
return Ok(());
}
Err(_) => continue,
};
}
}
/// Apply only a transaction's credits. Credits from multiple transactions
/// may safely be applied in parallel.
fn apply_credits(&self, tx: &Transaction) {
match &tx.instruction {
Instruction::NewContract(contract) => {
let mut plan = contract.plan.clone();
plan.apply_witness(&Witness::Timestamp(*self.last_time
.read()
.expect("timestamp creation in apply_credits")));
if let Some(payment) = plan.final_payment() {
self.apply_payment(&payment);
} else {
let mut pending = self.pending
.write()
.expect("'pending' write lock in apply_credits");
pending.insert(tx.sig, plan);
}
}
Instruction::ApplyTimestamp(dt) => {
let _ = self.apply_timestamp(tx.from, *dt);
}
Instruction::ApplySignature(tx_sig) => {
let _ = self.apply_signature(tx.from, *tx_sig);
}
}
}
/// Process a Transaction. If it contains a payment plan that requires a witness
/// to progress, the payment plan will be stored in the bank.
fn process_transaction(&self, tx: &Transaction) -> Result<()> {
self.apply_debits(tx)?;
self.apply_credits(tx);
Ok(())
}
/// Process a batch of transactions. It runs all debits first to filter out any
/// transactions that can't be processed in parallel deterministically.
pub fn process_transactions(&self, txs: Vec<Transaction>) -> Vec<Result<Transaction>> {
info!("processing Transactions {}", txs.len());
let results: Vec<_> = txs.into_par_iter()
.map(|tx| self.apply_debits(&tx).map(|_| tx))
.collect(); // Calling collect() here forces all debits to complete before moving on.
results
.into_par_iter()
.map(|result| {
result.map(|tx| {
self.apply_credits(&tx);
tx
})
})
.collect()
}
/// Process an ordered list of entries.
pub fn process_entries<I>(&self, entries: I) -> Result<()>
where
I: IntoIterator<Item = Entry>,
{
for entry in entries {
for result in self.process_transactions(entry.transactions) {
result?;
}
self.register_entry_id(&entry.id);
}
Ok(())
}
/// Process a Witness Signature. Any payment plans waiting on this signature
/// will progress one step.
fn apply_signature(&self, from: PublicKey, tx_sig: Signature) -> Result<()> {
if let Occupied(mut e) = self.pending
.write()
.expect("write() in apply_signature")
.entry(tx_sig)
{
e.get_mut().apply_witness(&Witness::Signature(from));
if let Some(payment) = e.get().final_payment() {
self.apply_payment(&payment);
e.remove_entry();
}
};
Ok(())
}
/// Process a Witness Timestamp. Any payment plans waiting on this timestamp
/// will progress one step.
fn apply_timestamp(&self, from: PublicKey, dt: DateTime<Utc>) -> Result<()> {
// If this is the first timestamp we've seen, it probably came from the genesis block,
// so we'll trust it.
if *self.last_time
.read()
.expect("'last_time' read lock on first timestamp check")
== Utc.timestamp(0, 0)
{
self.time_sources
.write()
.expect("'time_sources' write lock on first timestamp")
.insert(from);
}
if self.time_sources
.read()
.expect("'time_sources' read lock")
.contains(&from)
{
if dt > *self.last_time.read().expect("'last_time' read lock") {
*self.last_time.write().expect("'last_time' write lock") = dt;
}
} else {
return Ok(());
}
// Check to see if any timelocked transactions can be completed.
let mut completed = vec![];
// Hold 'pending' write lock until the end of this function. Otherwise another thread can
// double-spend if it enters before the modified plan is removed from 'pending'.
let mut pending = self.pending
.write()
.expect("'pending' write lock in apply_timestamp");
for (key, plan) in pending.iter_mut() {
plan.apply_witness(&Witness::Timestamp(*self.last_time
.read()
.expect("'last_time' read lock when creating timestamp")));
if let Some(payment) = plan.final_payment() {
self.apply_payment(&payment);
completed.push(key.clone());
}
}
for key in completed {
pending.remove(&key);
}
Ok(())
}
/// Create, sign, and process a Transaction from `keypair` to `to` of
/// `n` tokens where `last_id` is the last Entry ID observed by the client.
pub fn transfer(
&self,
n: i64,
keypair: &KeyPair,
to: PublicKey,
last_id: Hash,
) -> Result<Signature> {
let tx = Transaction::new(keypair, to, n, last_id);
let sig = tx.sig;
self.process_transaction(&tx).map(|_| sig)
}
/// Create, sign, and process a postdated Transaction from `keypair`
/// to `to` of `n` tokens on `dt` where `last_id` is the last Entry ID
/// observed by the client.
pub fn transfer_on_date(
&self,
n: i64,
keypair: &KeyPair,
to: PublicKey,
dt: DateTime<Utc>,
last_id: Hash,
) -> Result<Signature> {
let tx = Transaction::new_on_date(keypair, to, dt, n, last_id);
let sig = tx.sig;
self.process_transaction(&tx).map(|_| sig)
}
pub fn get_balance(&self, pubkey: &PublicKey) -> Option<i64> {
let bals = self.balances
.read()
.expect("'balances' read lock in get_balance");
bals.get(pubkey).map(|x| x.load(Ordering::Relaxed) as i64)
}
pub fn transaction_count(&self) -> usize {
self.transaction_count.load(Ordering::Relaxed)
}
}
#[cfg(test)]
mod tests {
use super::*;
use bincode::serialize;
use hash::hash;
use signature::KeyPairUtil;
#[test]
fn test_two_payments_to_one_party() {
let mint = Mint::new(10_000);
let pubkey = KeyPair::new().pubkey();
let bank = Bank::new(&mint);
assert_eq!(bank.last_id(), mint.last_id());
bank.transfer(1_000, &mint.keypair(), pubkey, mint.last_id())
.unwrap();
assert_eq!(bank.get_balance(&pubkey).unwrap(), 1_000);
bank.transfer(500, &mint.keypair(), pubkey, mint.last_id())
.unwrap();
assert_eq!(bank.get_balance(&pubkey).unwrap(), 1_500);
assert_eq!(bank.transaction_count(), 2);
}
#[test]
fn test_negative_tokens() {
let mint = Mint::new(1);
let pubkey = KeyPair::new().pubkey();
let bank = Bank::new(&mint);
assert_eq!(
bank.transfer(-1, &mint.keypair(), pubkey, mint.last_id()),
Err(BankError::NegativeTokens)
);
assert_eq!(bank.transaction_count(), 0);
}
#[test]
fn test_account_not_found() {
let mint = Mint::new(1);
let bank = Bank::new(&mint);
let keypair = KeyPair::new();
assert_eq!(
bank.transfer(1, &keypair, mint.pubkey(), mint.last_id()),
Err(BankError::AccountNotFound(keypair.pubkey()))
);
assert_eq!(bank.transaction_count(), 0);
}
#[test]
fn test_insufficient_funds() {
let mint = Mint::new(11_000);
let bank = Bank::new(&mint);
let pubkey = KeyPair::new().pubkey();
bank.transfer(1_000, &mint.keypair(), pubkey, mint.last_id())
.unwrap();
assert_eq!(bank.transaction_count(), 1);
assert_eq!(
bank.transfer(10_001, &mint.keypair(), pubkey, mint.last_id()),
Err(BankError::InsufficientFunds(mint.pubkey()))
);
assert_eq!(bank.transaction_count(), 1);
let mint_pubkey = mint.keypair().pubkey();
assert_eq!(bank.get_balance(&mint_pubkey).unwrap(), 10_000);
assert_eq!(bank.get_balance(&pubkey).unwrap(), 1_000);
}
#[test]
fn test_transfer_to_newb() {
let mint = Mint::new(10_000);
let bank = Bank::new(&mint);
let pubkey = KeyPair::new().pubkey();
bank.transfer(500, &mint.keypair(), pubkey, mint.last_id())
.unwrap();
assert_eq!(bank.get_balance(&pubkey).unwrap(), 500);
}
#[test]
fn test_transfer_on_date() {
let mint = Mint::new(1);
let bank = Bank::new(&mint);
let pubkey = KeyPair::new().pubkey();
let dt = Utc::now();
bank.transfer_on_date(1, &mint.keypair(), pubkey, dt, mint.last_id())
.unwrap();
// Mint's balance will be zero because all funds are locked up.
assert_eq!(bank.get_balance(&mint.pubkey()), Some(0));
// tx count is 1, because debits were applied.
assert_eq!(bank.transaction_count(), 1);
// pubkey's balance will be None because the funds have not been
// sent.
assert_eq!(bank.get_balance(&pubkey), None);
// Now, acknowledge the time in the condition occurred and
// that pubkey's funds are now available.
bank.apply_timestamp(mint.pubkey(), dt).unwrap();
assert_eq!(bank.get_balance(&pubkey), Some(1));
// tx count is still 1, because we chose not to count timestamp transactions
// tx count.
assert_eq!(bank.transaction_count(), 1);
bank.apply_timestamp(mint.pubkey(), dt).unwrap(); // <-- Attack! Attempt to process completed transaction.
assert_ne!(bank.get_balance(&pubkey), Some(2));
}
#[test]
fn test_transfer_after_date() {
let mint = Mint::new(1);
let bank = Bank::new(&mint);
let pubkey = KeyPair::new().pubkey();
let dt = Utc::now();
bank.apply_timestamp(mint.pubkey(), dt).unwrap();
// It's now past now, so this transfer should be processed immediately.
bank.transfer_on_date(1, &mint.keypair(), pubkey, dt, mint.last_id())
.unwrap();
assert_eq!(bank.get_balance(&mint.pubkey()), Some(0));
assert_eq!(bank.get_balance(&pubkey), Some(1));
}
#[test]
fn test_cancel_transfer() {
let mint = Mint::new(1);
let bank = Bank::new(&mint);
let pubkey = KeyPair::new().pubkey();
let dt = Utc::now();
let sig = bank.transfer_on_date(1, &mint.keypair(), pubkey, dt, mint.last_id())
.unwrap();
// Assert the debit counts as a transaction.
assert_eq!(bank.transaction_count(), 1);
// Mint's balance will be zero because all funds are locked up.
assert_eq!(bank.get_balance(&mint.pubkey()), Some(0));
// pubkey's balance will be None because the funds have not been
// sent.
assert_eq!(bank.get_balance(&pubkey), None);
// Now, cancel the trancaction. Mint gets her funds back, pubkey never sees them.
bank.apply_signature(mint.pubkey(), sig).unwrap();
assert_eq!(bank.get_balance(&mint.pubkey()), Some(1));
assert_eq!(bank.get_balance(&pubkey), None);
// Assert cancel doesn't cause count to go backward.
assert_eq!(bank.transaction_count(), 1);
bank.apply_signature(mint.pubkey(), sig).unwrap(); // <-- Attack! Attempt to cancel completed transaction.
assert_ne!(bank.get_balance(&mint.pubkey()), Some(2));
}
#[test]
fn test_duplicate_transaction_signature() {
let mint = Mint::new(1);
let bank = Bank::new(&mint);
let sig = Signature::default();
assert!(
bank.reserve_signature_with_last_id(&sig, &mint.last_id())
.is_ok()
);
assert_eq!(
bank.reserve_signature_with_last_id(&sig, &mint.last_id()),
Err(BankError::DuplicateSiganture(sig))
);
}
#[test]
fn test_forget_signature() {
let mint = Mint::new(1);
let bank = Bank::new(&mint);
let sig = Signature::default();
bank.reserve_signature_with_last_id(&sig, &mint.last_id())
.unwrap();
bank.forget_signature_with_last_id(&sig, &mint.last_id());
assert!(
bank.reserve_signature_with_last_id(&sig, &mint.last_id())
.is_ok()
);
}
#[test]
fn test_reject_old_last_id() {
let mint = Mint::new(1);
let bank = Bank::new(&mint);
let sig = Signature::default();
for i in 0..MAX_ENTRY_IDS {
let last_id = hash(&serialize(&i).unwrap()); // Unique hash
bank.register_entry_id(&last_id);
}
// Assert we're no longer able to use the oldest entry ID.
assert_eq!(
bank.reserve_signature_with_last_id(&sig, &mint.last_id()),
Err(BankError::LastIdNotFound(mint.last_id()))
);
}
#[test]
fn test_debits_before_credits() {
let mint = Mint::new(2);
let bank = Bank::new(&mint);
let keypair = KeyPair::new();
let tx0 = Transaction::new(&mint.keypair(), keypair.pubkey(), 2, mint.last_id());
let tx1 = Transaction::new(&keypair, mint.pubkey(), 1, mint.last_id());
let txs = vec![tx0, tx1];
let results = bank.process_transactions(txs);
assert!(results[1].is_err());
// Assert bad transactions aren't counted.
assert_eq!(bank.transaction_count(), 1);
}
}
#[cfg(all(feature = "unstable", test))]
mod bench {
extern crate test;
use self::test::Bencher;
use bank::*;
use bincode::serialize;
use hash::hash;
use signature::KeyPairUtil;
#[bench]
fn bench_process_transaction(bencher: &mut Bencher) {
let mint = Mint::new(100_000_000);
let bank = Bank::new(&mint);
// Create transactions between unrelated parties.
let transactions: Vec<_> = (0..4096)
.into_par_iter()
.map(|i| {
// Seed the 'from' account.
let rando0 = KeyPair::new();
let tx = Transaction::new(&mint.keypair(), rando0.pubkey(), 1_000, mint.last_id());
bank.process_transaction(&tx).unwrap();
// Seed the 'to' account and a cell for its signature.
let last_id = hash(&serialize(&i).unwrap()); // Unique hash
bank.register_entry_id(&last_id);
let rando1 = KeyPair::new();
let tx = Transaction::new(&rando0, rando1.pubkey(), 1, last_id);
bank.process_transaction(&tx).unwrap();
// Finally, return a transaction that's unique
Transaction::new(&rando0, rando1.pubkey(), 1, last_id)
})
.collect();
bencher.iter(|| {
// Since benchmarker runs this multiple times, we need to clear the signatures.
for sigs in bank.last_ids.read().unwrap().iter() {
sigs.1.write().unwrap().clear();
}
assert!(
bank.process_transactions(transactions.clone())
.iter()
.all(|x| x.is_ok())
);
});
}
}

321
src/banking_stage.rs Normal file
View File

@ -0,0 +1,321 @@
//! The `banking_stage` processes Transaction messages. It is intended to be used
//! to contruct a software pipeline. The stage uses all available CPU cores and
//! can do its processing in parallel with signature verification on the GPU.
use bank::Bank;
use bincode::deserialize;
use counter::Counter;
use packet;
use packet::SharedPackets;
use rayon::prelude::*;
use record_stage::Signal;
use result::Result;
use std::net::SocketAddr;
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::sync::mpsc::{channel, Receiver, Sender};
use std::sync::Arc;
use std::thread::{Builder, JoinHandle};
use std::time::Duration;
use std::time::Instant;
use timing;
use transaction::Transaction;
/// Stores the stage's thread handle and output receiver.
pub struct BankingStage {
/// Handle to the stage's thread.
pub thread_hdl: JoinHandle<()>,
/// Output receiver for the following stage.
pub signal_receiver: Receiver<Signal>,
}
impl BankingStage {
/// Create the stage using `bank`. Exit when either `exit` is set or
/// when `verified_receiver` or the stage's output receiver is dropped.
/// Discard input packets using `packet_recycler` to minimize memory
/// allocations in a previous stage such as the `fetch_stage`.
pub fn new(
bank: Arc<Bank>,
exit: Arc<AtomicBool>,
verified_receiver: Receiver<Vec<(SharedPackets, Vec<u8>)>>,
packet_recycler: packet::PacketRecycler,
) -> Self {
let (signal_sender, signal_receiver) = channel();
let thread_hdl = Builder::new()
.name("solana-banking-stage".to_string())
.spawn(move || loop {
let e = Self::process_packets(
bank.clone(),
&verified_receiver,
&signal_sender,
&packet_recycler,
);
if e.is_err() {
if exit.load(Ordering::Relaxed) {
break;
}
}
})
.unwrap();
BankingStage {
thread_hdl,
signal_receiver,
}
}
/// Convert the transactions from a blob of binary data to a vector of transactions and
/// an unused `SocketAddr` that could be used to send a response.
fn deserialize_transactions(p: &packet::Packets) -> Vec<Option<(Transaction, SocketAddr)>> {
p.packets
.par_iter()
.map(|x| {
deserialize(&x.data[0..x.meta.size])
.map(|req| (req, x.meta.addr()))
.ok()
})
.collect()
}
/// Process the incoming packets and send output `Signal` messages to `signal_sender`.
/// Discard packets via `packet_recycler`.
fn process_packets(
bank: Arc<Bank>,
verified_receiver: &Receiver<Vec<(SharedPackets, Vec<u8>)>>,
signal_sender: &Sender<Signal>,
packet_recycler: &packet::PacketRecycler,
) -> Result<()> {
let timer = Duration::new(1, 0);
let recv_start = Instant::now();
let mms = verified_receiver.recv_timeout(timer)?;
let mut reqs_len = 0;
let mms_len = mms.len();
info!(
"@{:?} process start stalled for: {:?}ms batches: {}",
timing::timestamp(),
timing::duration_as_ms(&recv_start.elapsed()),
mms.len(),
);
let count = mms.iter().map(|x| x.1.len()).sum();
static mut COUNTER: Counter = create_counter!("banking_stage_process_packets", 1);
let proc_start = Instant::now();
for (msgs, vers) in mms {
let transactions = Self::deserialize_transactions(&msgs.read().unwrap());
reqs_len += transactions.len();
let transactions = transactions
.into_iter()
.zip(vers)
.filter_map(|(tx, ver)| match tx {
None => None,
Some((tx, _addr)) => if tx.verify_plan() && ver != 0 {
Some(tx)
} else {
None
},
})
.collect();
debug!("process_transactions");
let results = bank.process_transactions(transactions);
let transactions = results.into_iter().filter_map(|x| x.ok()).collect();
signal_sender.send(Signal::Transactions(transactions))?;
debug!("done process_transactions");
packet_recycler.recycle(msgs);
}
let total_time_s = timing::duration_as_s(&proc_start.elapsed());
let total_time_ms = timing::duration_as_ms(&proc_start.elapsed());
info!(
"@{:?} done processing transaction batches: {} time: {:?}ms reqs: {} reqs/s: {}",
timing::timestamp(),
mms_len,
total_time_ms,
reqs_len,
(reqs_len as f32) / (total_time_s)
);
inc_counter!(COUNTER, count, proc_start);
Ok(())
}
}
// TODO: When banking is pulled out of RequestStage, add this test back in.
//use bank::Bank;
//use entry::Entry;
//use hash::Hash;
//use record_stage::RecordStage;
//use record_stage::Signal;
//use result::Result;
//use std::sync::mpsc::{channel, Sender};
//use std::sync::{Arc, Mutex};
//use std::time::Duration;
//use transaction::Transaction;
//
//#[cfg(test)]
//mod tests {
// use bank::Bank;
// use mint::Mint;
// use signature::{KeyPair, KeyPairUtil};
// use transaction::Transaction;
//
// #[test]
// // TODO: Move this test banking_stage. Calling process_transactions() directly
// // defeats the purpose of this test.
// fn test_banking_sequential_consistency() {
// // In this attack we'll demonstrate that a verifier can interpret the ledger
// // differently if either the server doesn't signal the ledger to add an
// // Entry OR if the verifier tries to parallelize across multiple Entries.
// let mint = Mint::new(2);
// let bank = Bank::new(&mint);
// let banking_stage = EventProcessor::new(bank, &mint.last_id(), None);
//
// // Process a batch that includes a transaction that receives two tokens.
// let alice = KeyPair::new();
// let tx = Transaction::new(&mint.keypair(), alice.pubkey(), 2, mint.last_id());
// let transactions = vec![tx];
// let entry0 = banking_stage.process_transactions(transactions).unwrap();
//
// // Process a second batch that spends one of those tokens.
// let tx = Transaction::new(&alice, mint.pubkey(), 1, mint.last_id());
// let transactions = vec![tx];
// let entry1 = banking_stage.process_transactions(transactions).unwrap();
//
// // Collect the ledger and feed it to a new bank.
// let entries = vec![entry0, entry1];
//
// // Assert the user holds one token, not two. If the server only output one
// // entry, then the second transaction will be rejected, because it drives
// // the account balance below zero before the credit is added.
// let bank = Bank::new(&mint);
// for entry in entries {
// assert!(
// bank
// .process_transactions(entry.transactions)
// .into_iter()
// .all(|x| x.is_ok())
// );
// }
// assert_eq!(bank.get_balance(&alice.pubkey()), Some(1));
// }
//}
//
//#[cfg(all(feature = "unstable", test))]
//mod bench {
// extern crate test;
// use self::test::Bencher;
// use bank::{Bank, MAX_ENTRY_IDS};
// use bincode::serialize;
// use hash::hash;
// use mint::Mint;
// use rayon::prelude::*;
// use signature::{KeyPair, KeyPairUtil};
// use std::collections::HashSet;
// use std::time::Instant;
// use transaction::Transaction;
//
// #[bench]
// fn bench_process_transactions(_bencher: &mut Bencher) {
// let mint = Mint::new(100_000_000);
// let bank = Bank::new(&mint);
// // Create transactions between unrelated parties.
// let txs = 100_000;
// let last_ids: Mutex<HashSet<Hash>> = Mutex::new(HashSet::new());
// let transactions: Vec<_> = (0..txs)
// .into_par_iter()
// .map(|i| {
// // Seed the 'to' account and a cell for its signature.
// let dummy_id = i % (MAX_ENTRY_IDS as i32);
// let last_id = hash(&serialize(&dummy_id).unwrap()); // Semi-unique hash
// {
// let mut last_ids = last_ids.lock().unwrap();
// if !last_ids.contains(&last_id) {
// last_ids.insert(last_id);
// bank.register_entry_id(&last_id);
// }
// }
//
// // Seed the 'from' account.
// let rando0 = KeyPair::new();
// let tx = Transaction::new(&mint.keypair(), rando0.pubkey(), 1_000, last_id);
// bank.process_transaction(&tx).unwrap();
//
// let rando1 = KeyPair::new();
// let tx = Transaction::new(&rando0, rando1.pubkey(), 2, last_id);
// bank.process_transaction(&tx).unwrap();
//
// // Finally, return a transaction that's unique
// Transaction::new(&rando0, rando1.pubkey(), 1, last_id)
// })
// .collect();
//
// let banking_stage = EventProcessor::new(bank, &mint.last_id(), None);
//
// let now = Instant::now();
// assert!(banking_stage.process_transactions(transactions).is_ok());
// let duration = now.elapsed();
// let sec = duration.as_secs() as f64 + duration.subsec_nanos() as f64 / 1_000_000_000.0;
// let tps = txs as f64 / sec;
//
// // Ensure that all transactions were successfully logged.
// drop(banking_stage.historian_input);
// let entries: Vec<Entry> = banking_stage.output.lock().unwrap().iter().collect();
// assert_eq!(entries.len(), 1);
// assert_eq!(entries[0].transactions.len(), txs as usize);
//
// println!("{} tps", tps);
// }
//}
#[cfg(all(feature = "unstable", test))]
mod bench {
extern crate test;
use self::test::Bencher;
use bank::*;
use banking_stage::BankingStage;
use mint::Mint;
use packet::{to_packets, PacketRecycler};
use record_stage::Signal;
use signature::{KeyPair, KeyPairUtil};
use std::iter;
use std::sync::mpsc::channel;
use std::sync::Arc;
use transaction::Transaction;
#[bench]
fn bench_stage(bencher: &mut Bencher) {
let tx = 100_usize;
let mint = Mint::new(1_000_000_000);
let pubkey = KeyPair::new().pubkey();
let transactions: Vec<_> = (0..tx)
.map(|i| Transaction::new(&mint.keypair(), pubkey, i as i64, mint.last_id()))
.collect();
let (verified_sender, verified_receiver) = channel();
let (signal_sender, signal_receiver) = channel();
let packet_recycler = PacketRecycler::default();
let verified: Vec<_> = to_packets(&packet_recycler, transactions)
.into_iter()
.map(|x| {
let len = (*x).read().unwrap().packets.len();
(x, iter::repeat(1).take(len).collect())
})
.collect();
bencher.iter(move || {
let bank = Arc::new(Bank::new(&mint));
verified_sender.send(verified.clone()).unwrap();
BankingStage::process_packets(
bank.clone(),
&verified_receiver,
&signal_sender,
&packet_recycler,
).unwrap();
let signal = signal_receiver.recv().unwrap();
if let Signal::Transactions(transactions) = signal {
assert_eq!(transactions.len(), tx);
} else {
assert!(false);
}
});
}
}

326
src/bin/client-demo.rs Normal file
View File

@ -0,0 +1,326 @@
extern crate env_logger;
extern crate getopts;
extern crate isatty;
extern crate pnet;
extern crate rayon;
extern crate serde_json;
extern crate solana;
use getopts::Options;
use isatty::stdin_isatty;
use pnet::datalink;
use rayon::prelude::*;
use solana::crdt::{Crdt, ReplicatedData};
use solana::mint::MintDemo;
use solana::ncp::Ncp;
use solana::signature::{GenKeys, KeyPair, KeyPairUtil};
use solana::streamer::default_window;
use solana::thin_client::ThinClient;
use solana::transaction::Transaction;
use std::env;
use std::fs::File;
use std::io::{stdin, Read};
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
use std::process::exit;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, RwLock};
use std::thread::sleep;
use std::thread::JoinHandle;
use std::time::Duration;
use std::time::Instant;
fn print_usage(program: &str, opts: Options) {
let mut brief = format!("Usage: cat <mint.json> | {} [options]\n\n", program);
brief += " Solana client demo creates a number of transactions and\n";
brief += " sends them to a target node.";
brief += " Takes json formatted mint file to stdin.";
print!("{}", opts.usage(&brief));
}
fn get_ip_addr() -> Option<IpAddr> {
for iface in datalink::interfaces() {
for p in iface.ips {
if !p.ip().is_loopback() && !p.ip().is_multicast() {
return Some(p.ip());
}
}
}
None
}
fn main() {
env_logger::init().unwrap();
let mut threads = 4usize;
let mut num_nodes = 1usize;
let mut opts = Options::new();
opts.optopt("l", "", "leader", "leader.json");
opts.optopt("c", "", "client port", "port");
opts.optopt("t", "", "number of threads", &format!("{}", threads));
opts.optflag("d", "dyn", "detect network address dynamically");
opts.optopt(
"n",
"",
"number of nodes to converge to",
&format!("{}", num_nodes),
);
opts.optflag("h", "help", "print help");
let args: Vec<String> = env::args().collect();
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(e) => {
eprintln!("{}", e);
exit(1);
}
};
if matches.opt_present("h") {
let program = args[0].clone();
print_usage(&program, opts);
return;
}
let mut addr: SocketAddr = "0.0.0.0:8100".parse().unwrap();
if matches.opt_present("c") {
let port = matches.opt_str("c").unwrap().parse().unwrap();
addr.set_port(port);
}
if matches.opt_present("d") {
addr.set_ip(get_ip_addr().unwrap());
}
let client_addr: Arc<RwLock<SocketAddr>> = Arc::new(RwLock::new(addr));
if matches.opt_present("t") {
threads = matches.opt_str("t").unwrap().parse().expect("integer");
}
if matches.opt_present("n") {
num_nodes = matches.opt_str("n").unwrap().parse().expect("integer");
}
let leader = if matches.opt_present("l") {
read_leader(matches.opt_str("l").unwrap())
} else {
let server_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
ReplicatedData::new_leader(&server_addr)
};
let signal = Arc::new(AtomicBool::new(false));
let mut c_threads = vec![];
let validators = converge(
&client_addr,
&leader,
signal.clone(),
num_nodes,
&mut c_threads,
);
assert_eq!(validators.len(), num_nodes);
if stdin_isatty() {
eprintln!("nothing found on stdin, expected a json file");
exit(1);
}
let mut buffer = String::new();
let num_bytes = stdin().read_to_string(&mut buffer).unwrap();
if num_bytes == 0 {
eprintln!("empty file on stdin, expected a json file");
exit(1);
}
println!("Parsing stdin...");
let demo: MintDemo = serde_json::from_str(&buffer).unwrap_or_else(|e| {
eprintln!("failed to parse json: {}", e);
exit(1);
});
let mut client = mk_client(&client_addr, &leader);
println!("Get last ID...");
let last_id = client.get_last_id();
println!("Got last ID {:?}", last_id);
let rnd = GenKeys::new(demo.mint.keypair().public_key_bytes());
println!("Creating keypairs...");
let txs = demo.num_accounts / 2;
let keypairs = rnd.gen_n_keypairs(demo.num_accounts);
let keypair_pairs: Vec<_> = keypairs.chunks(2).collect();
println!("Signing transactions...");
let now = Instant::now();
let transactions: Vec<_> = keypair_pairs
.into_par_iter()
.map(|chunk| Transaction::new(&chunk[0], chunk[1].pubkey(), 1, last_id))
.collect();
let duration = now.elapsed();
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
let bsps = txs as f64 / ns as f64;
let nsps = ns as f64 / txs as f64;
println!(
"Done. {} thousand signatures per second, {}us per signature",
bsps * 1_000_000_f64,
nsps / 1_000_f64
);
let first_count = client.transaction_count();
println!("initial count {}", first_count);
println!("Transfering {} transactions in {} batches", txs, threads);
let sz = transactions.len() / threads;
let chunks: Vec<_> = transactions.chunks(sz).collect();
chunks.into_par_iter().for_each(|txs| {
println!(
"Transferring 1 unit {} times... to {:?}",
txs.len(),
leader.transactions_addr
);
let client = mk_client(&client_addr, &leader);
for tx in txs {
client.transfer_signed(tx.clone()).unwrap();
}
});
let sample_period = 1; // in seconds
println!("Sampling tps every second...",);
let maxes: Vec<_> = validators
.into_par_iter()
.map(|val| {
let mut client = mk_client(&client_addr, &val);
let mut now = Instant::now();
let mut initial_tx_count = client.transaction_count();
let mut max_tps = 0.0;
let mut total = 0;
for i in 0..100 {
let tx_count = client.transaction_count();
let duration = now.elapsed();
now = Instant::now();
let sample = tx_count - initial_tx_count;
initial_tx_count = tx_count;
println!(
"{}: Transactions processed {}",
val.transactions_addr, sample
);
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
let tps = (sample * 1_000_000_000) as f64 / ns as f64;
if tps > max_tps {
max_tps = tps;
}
println!("{}: {} tps", val.transactions_addr, tps);
total = tx_count - first_count;
println!(
"{}: Total Transactions processed {}",
val.transactions_addr, total
);
if total == transactions.len() as u64 {
break;
}
if i > 20 && sample == 0 {
break;
}
sleep(Duration::new(sample_period, 0));
}
(max_tps, total)
})
.collect();
let mut max_of_maxes = 0.0;
let mut total_txs = 0;
for (max, txs) in &maxes {
if *max > max_of_maxes {
max_of_maxes = *max;
}
total_txs += *txs;
}
println!(
"\nHighest TPS: {} sampling period {}s total transactions: {} clients: {}",
max_of_maxes,
sample_period,
total_txs,
maxes.len()
);
signal.store(true, Ordering::Relaxed);
for t in c_threads {
t.join().unwrap();
}
}
fn mk_client(locked_addr: &Arc<RwLock<SocketAddr>>, r: &ReplicatedData) -> ThinClient {
let mut addr = locked_addr.write().unwrap();
let port = addr.port();
let transactions_socket = UdpSocket::bind(addr.clone()).unwrap();
addr.set_port(port + 1);
let requests_socket = UdpSocket::bind(addr.clone()).unwrap();
addr.set_port(port + 2);
ThinClient::new(
r.requests_addr,
requests_socket,
r.transactions_addr,
transactions_socket,
)
}
fn spy_node(client_addr: &Arc<RwLock<SocketAddr>>) -> (ReplicatedData, UdpSocket) {
let mut addr = client_addr.write().unwrap();
let port = addr.port();
let gossip = UdpSocket::bind(addr.clone()).unwrap();
addr.set_port(port + 1);
let daddr = "0.0.0.0:0".parse().unwrap();
let pubkey = KeyPair::new().pubkey();
let node = ReplicatedData::new(
pubkey,
gossip.local_addr().unwrap(),
daddr,
daddr,
daddr,
daddr,
);
(node, gossip)
}
fn converge(
client_addr: &Arc<RwLock<SocketAddr>>,
leader: &ReplicatedData,
exit: Arc<AtomicBool>,
num_nodes: usize,
threads: &mut Vec<JoinHandle<()>>,
) -> Vec<ReplicatedData> {
//lets spy on the network
let daddr = "0.0.0.0:0".parse().unwrap();
let (spy, spy_gossip) = spy_node(client_addr);
let mut spy_crdt = Crdt::new(spy);
spy_crdt.insert(&leader);
spy_crdt.set_leader(leader.id);
let spy_ref = Arc::new(RwLock::new(spy_crdt));
let window = default_window();
let gossip_send_socket = UdpSocket::bind("0.0.0.0:0").expect("bind 0");
let ncp = Ncp::new(
spy_ref.clone(),
window.clone(),
spy_gossip,
gossip_send_socket,
exit.clone(),
).expect("DataReplicator::new");
let mut rv = vec![];
//wait for the network to converge, 30 seconds should be plenty
for _ in 0..30 {
let v: Vec<ReplicatedData> = spy_ref
.read()
.unwrap()
.table
.values()
.into_iter()
.filter(|x| x.requests_addr != daddr)
.cloned()
.collect();
if v.len() >= num_nodes {
println!("CONVERGED!");
rv.extend(v.into_iter());
break;
}
sleep(Duration::new(1, 0));
}
threads.extend(ncp.thread_hdls.into_iter());
rv
}
fn read_leader(path: String) -> ReplicatedData {
let file = File::open(path).expect("file");
serde_json::from_reader(file).expect("parse")
}

View File

@ -1,27 +0,0 @@
extern crate silk;
use silk::historian::Historian;
use silk::log::{verify_slice, Entry, Event, Sha256Hash};
use std::{thread, time};
use std::sync::mpsc::SendError;
fn create_log(hist: &Historian) -> Result<(), SendError<Event>> {
hist.sender.send(Event::Tick)?;
thread::sleep(time::Duration::new(0, 100_000));
hist.sender.send(Event::UserDataKey(0xdeadbeef))?;
thread::sleep(time::Duration::new(0, 100_000));
hist.sender.send(Event::Tick)?;
Ok(())
}
fn main() {
let seed = Sha256Hash::default();
let hist = Historian::new(&seed);
create_log(&hist).expect("send error");
drop(hist.sender);
let entries: Vec<Entry> = hist.receiver.iter().collect();
for entry in &entries {
println!("{:?}", entry);
}
assert!(verify_slice(&entries, &seed));
}

View File

@ -0,0 +1,52 @@
extern crate getopts;
extern crate serde_json;
extern crate solana;
use getopts::Options;
use solana::crdt::{get_ip_addr, parse_port_or_addr, ReplicatedData};
use std::env;
use std::io;
use std::net::SocketAddr;
use std::process::exit;
fn print_usage(program: &str, opts: Options) {
let mut brief = format!("Usage: {} [options]\n\n", program);
brief += " Create a solana fullnode config file\n";
print!("{}", opts.usage(&brief));
}
fn main() {
let mut opts = Options::new();
opts.optopt("b", "", "bind", "bind to port or address");
opts.optflag("d", "dyn", "detect network address dynamically");
opts.optflag("h", "help", "print help");
let args: Vec<String> = env::args().collect();
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(e) => {
eprintln!("{}", e);
exit(1);
}
};
if matches.opt_present("h") {
let program = args[0].clone();
print_usage(&program, opts);
return;
}
let bind_addr: SocketAddr = {
let mut bind_addr = parse_port_or_addr(matches.opt_str("b"));
if matches.opt_present("d") {
let ip = get_ip_addr().unwrap();
bind_addr.set_ip(ip);
}
bind_addr
};
// we need all the receiving sockets to be bound within the expected
// port range that we open on aws
let repl_data = ReplicatedData::new_leader(&bind_addr);
let stdout = io::stdout();
serde_json::to_writer(stdout, &repl_data).expect("serialize");
}

161
src/bin/fullnode.rs Normal file
View File

@ -0,0 +1,161 @@
extern crate env_logger;
extern crate getopts;
extern crate isatty;
extern crate serde_json;
extern crate solana;
#[macro_use]
extern crate log;
use getopts::Options;
use isatty::stdin_isatty;
use solana::bank::Bank;
use solana::crdt::ReplicatedData;
use solana::entry::Entry;
use solana::payment_plan::PaymentPlan;
use solana::server::Server;
use solana::transaction::Instruction;
use std::env;
use std::fs::File;
use std::io::{stdin, Read};
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
use std::process::exit;
use std::sync::atomic::AtomicBool;
use std::sync::Arc;
//use std::time::Duration;
fn print_usage(program: &str, opts: Options) {
let mut brief = format!("Usage: cat <transaction.log> | {} [options]\n\n", program);
brief += " Run a Solana node to handle transactions and\n";
brief += " write a new transaction log to stdout.\n";
brief += " Takes existing transaction log from stdin.";
print!("{}", opts.usage(&brief));
}
fn main() {
env_logger::init().unwrap();
let mut opts = Options::new();
opts.optopt("l", "", "load", "load my identity to path.json");
opts.optflag("h", "help", "print help");
opts.optopt(
"v",
"",
"validator",
"run as replicate with path to leader.json",
);
let args: Vec<String> = env::args().collect();
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(e) => {
eprintln!("{}", e);
exit(1);
}
};
if matches.opt_present("h") {
let program = args[0].clone();
print_usage(&program, opts);
return;
}
if stdin_isatty() {
eprintln!("nothing found on stdin, expected a log file");
exit(1);
}
let mut buffer = String::new();
let num_bytes = stdin().read_to_string(&mut buffer).unwrap();
if num_bytes == 0 {
eprintln!("empty file on stdin, expected a log file");
exit(1);
}
eprintln!("Initializing...");
let mut entries = buffer.lines().map(|line| {
serde_json::from_str(&line).unwrap_or_else(|e| {
eprintln!("failed to parse json: {}", e);
exit(1);
})
});
eprintln!("done parsing...");
// The first item in the ledger is required to be an entry with zero num_hashes,
// which implies its id can be used as the ledger's seed.
let entry0 = entries.next().unwrap();
// The second item in the ledger is a special transaction where the to and from
// fields are the same. That entry should be treated as a deposit, not a
// transfer to oneself.
let entry1: Entry = entries.next().unwrap();
let tx = &entry1.transactions[0];
let deposit = if let Instruction::NewContract(contract) = &tx.instruction {
contract.plan.final_payment()
} else {
None
};
eprintln!("creating bank...");
let bank = Bank::new_from_deposit(&deposit.unwrap());
bank.register_entry_id(&entry0.id);
bank.register_entry_id(&entry1.id);
eprintln!("processing entries...");
bank.process_entries(entries).expect("process_entries");
eprintln!("creating networking stack...");
let exit = Arc::new(AtomicBool::new(false));
let bind_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
let mut repl_data = ReplicatedData::new_leader(&bind_addr);
if matches.opt_present("l") {
let path = matches.opt_str("l").unwrap();
if let Ok(file) = File::open(path.clone()) {
if let Ok(data) = serde_json::from_reader(file) {
repl_data = data;
} else {
warn!("failed to parse leader {}, generating new identity", path);
}
}
}
let threads = if matches.opt_present("v") {
eprintln!("starting validator... {}", repl_data.requests_addr);
let path = matches.opt_str("v").unwrap();
let file = File::open(path).expect("file");
let leader = serde_json::from_reader(file).expect("parse");
let s = Server::new_validator(
bank,
repl_data.clone(),
UdpSocket::bind(repl_data.requests_addr).unwrap(),
UdpSocket::bind("0.0.0.0:0").unwrap(),
UdpSocket::bind(repl_data.replicate_addr).unwrap(),
UdpSocket::bind(repl_data.gossip_addr).unwrap(),
UdpSocket::bind(repl_data.repair_addr).unwrap(),
leader,
exit.clone(),
);
s.thread_hdls
} else {
eprintln!("starting leader... {}", repl_data.requests_addr);
repl_data.current_leader_id = repl_data.id.clone();
let file = File::create("leader.log").expect("leader.log create");
let server = Server::new_leader(
bank,
//Some(Duration::from_millis(1000)),
None,
repl_data.clone(),
UdpSocket::bind(repl_data.requests_addr).unwrap(),
UdpSocket::bind(repl_data.transactions_addr).unwrap(),
UdpSocket::bind("0.0.0.0:0").unwrap(),
UdpSocket::bind("0.0.0.0:0").unwrap(),
UdpSocket::bind(repl_data.gossip_addr).unwrap(),
exit.clone(),
file,
);
server.thread_hdls
};
eprintln!("Ready. Listening on {}", repl_data.transactions_addr);
for t in threads {
t.join().expect("join");
}
}

77
src/bin/genesis-demo.rs Normal file
View File

@ -0,0 +1,77 @@
extern crate isatty;
extern crate rayon;
extern crate serde_json;
extern crate solana;
use isatty::stdin_isatty;
use rayon::prelude::*;
use solana::bank::MAX_ENTRY_IDS;
use solana::entry::{next_entry, Entry};
use solana::mint::MintDemo;
use solana::signature::{GenKeys, KeyPairUtil};
use solana::transaction::Transaction;
use std::io::{stdin, Read};
use std::process::exit;
// Generate a ledger with lots and lots of accounts.
fn main() {
if stdin_isatty() {
eprintln!("nothing found on stdin, expected a json file");
exit(1);
}
let mut buffer = String::new();
let num_bytes = stdin().read_to_string(&mut buffer).unwrap();
if num_bytes == 0 {
eprintln!("empty file on stdin, expected a json file");
exit(1);
}
let demo: MintDemo = serde_json::from_str(&buffer).unwrap_or_else(|e| {
eprintln!("failed to parse json: {}", e);
exit(1);
});
let rnd = GenKeys::new(demo.mint.keypair().public_key_bytes());
let num_accounts = demo.num_accounts;
let tokens_per_user = 1_000;
let keypairs = rnd.gen_n_keypairs(num_accounts);
let mint_keypair = demo.mint.keypair();
let last_id = demo.mint.last_id();
for entry in demo.mint.create_entries() {
println!("{}", serde_json::to_string(&entry).unwrap());
}
eprintln!("Creating {} empty entries...", MAX_ENTRY_IDS);
// Offer client lots of entry IDs to use for each transaction's last_id.
let mut last_id = last_id;
let mut last_ids = vec![];
for _ in 0..MAX_ENTRY_IDS {
let entry = next_entry(&last_id, 1, vec![]);
last_id = entry.id;
last_ids.push(last_id);
let serialized = serde_json::to_string(&entry).unwrap_or_else(|e| {
eprintln!("failed to serialize: {}", e);
exit(1);
});
println!("{}", serialized);
}
eprintln!("Creating {} transactions...", num_accounts);
let transactions: Vec<_> = keypairs
.into_par_iter()
.enumerate()
.map(|(i, rando)| {
let last_id = last_ids[i % MAX_ENTRY_IDS];
Transaction::new(&mint_keypair, rando.pubkey(), tokens_per_user, last_id)
})
.collect();
eprintln!("Logging the creation of {} accounts...", num_accounts);
let entry = Entry::new(&last_id, 0, transactions);
println!("{}", serde_json::to_string(&entry).unwrap());
}

36
src/bin/genesis.rs Normal file
View File

@ -0,0 +1,36 @@
//! A command-line executable for generating the chain's genesis block.
extern crate isatty;
extern crate serde_json;
extern crate solana;
use isatty::stdin_isatty;
use solana::mint::Mint;
use std::io::{stdin, Read};
use std::process::exit;
fn main() {
if stdin_isatty() {
eprintln!("nothing found on stdin, expected a json file");
exit(1);
}
let mut buffer = String::new();
let num_bytes = stdin().read_to_string(&mut buffer).unwrap();
if num_bytes == 0 {
eprintln!("empty file on stdin, expected a json file");
exit(1);
}
let mint: Mint = serde_json::from_str(&buffer).unwrap_or_else(|e| {
eprintln!("failed to parse json: {}", e);
exit(1);
});
for x in mint.create_entries() {
let serialized = serde_json::to_string(&x).unwrap_or_else(|e| {
eprintln!("failed to serialize: {}", e);
exit(1);
});
println!("{}", serialized);
}
}

21
src/bin/mint-demo.rs Normal file
View File

@ -0,0 +1,21 @@
extern crate rayon;
extern crate ring;
extern crate serde_json;
extern crate solana;
use solana::mint::{Mint, MintDemo};
use std::io;
fn main() {
let mut input_text = String::new();
io::stdin().read_line(&mut input_text).unwrap();
let trimmed = input_text.trim();
let tokens = trimmed.parse::<i64>().unwrap();
let mint = Mint::new(tokens);
let tokens_per_user = 1_000;
let num_accounts = tokens / tokens_per_user;
let demo = MintDemo { mint, num_accounts };
println!("{}", serde_json::to_string(&demo).unwrap());
}

29
src/bin/mint.rs Normal file
View File

@ -0,0 +1,29 @@
extern crate isatty;
extern crate serde_json;
extern crate solana;
use isatty::stdin_isatty;
use solana::mint::Mint;
use std::io;
use std::process::exit;
fn main() {
let mut input_text = String::new();
if stdin_isatty() {
eprintln!("nothing found on stdin, expected a token number");
exit(1);
}
io::stdin().read_line(&mut input_text).unwrap();
let trimmed = input_text.trim();
let tokens = trimmed.parse::<i64>().unwrap_or_else(|e| {
eprintln!("{}", e);
exit(1);
});
let mint = Mint::new(tokens);
let serialized = serde_json::to_string(&mint).unwrap_or_else(|e| {
eprintln!("failed to serialize: {}", e);
exit(1);
});
println!("{}", serialized);
}

175
src/budget.rs Normal file
View File

@ -0,0 +1,175 @@
//! The `budget` module provides a domain-specific language for payment plans. Users create Budget objects that
//! are given to an interpreter. The interpreter listens for `Witness` transactions,
//! which it uses to reduce the payment plan. When the budget is reduced to a
//! `Payment`, the payment is executed.
use chrono::prelude::*;
use payment_plan::{Payment, PaymentPlan, Witness};
use signature::PublicKey;
use std::mem;
/// A data type representing a `Witness` that the payment plan is waiting on.
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub enum Condition {
/// Wait for a `Timestamp` `Witness` at or after the given `DateTime`.
Timestamp(DateTime<Utc>),
/// Wait for a `Signature` `Witness` from `PublicKey`.
Signature(PublicKey),
}
impl Condition {
/// Return true if the given Witness satisfies this Condition.
pub fn is_satisfied(&self, witness: &Witness) -> bool {
match (self, witness) {
(Condition::Signature(pubkey), Witness::Signature(from)) => pubkey == from,
(Condition::Timestamp(dt), Witness::Timestamp(last_time)) => dt <= last_time,
_ => false,
}
}
}
/// A data type reprsenting a payment plan.
#[repr(C)]
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub enum Budget {
/// Make a payment.
Pay(Payment),
/// Make a payment after some condition.
After(Condition, Payment),
/// Either make a payment after one condition or a different payment after another
/// condition, which ever condition is satisfied first.
Or((Condition, Payment), (Condition, Payment)),
}
impl Budget {
/// Create the simplest budget - one that pays `tokens` to PublicKey.
pub fn new_payment(tokens: i64, to: PublicKey) -> Self {
Budget::Pay(Payment { tokens, to })
}
/// Create a budget that pays `tokens` to `to` after being witnessed by `from`.
pub fn new_authorized_payment(from: PublicKey, tokens: i64, to: PublicKey) -> Self {
Budget::After(Condition::Signature(from), Payment { tokens, to })
}
/// Create a budget that pays `tokens` to `to` after the given DateTime.
pub fn new_future_payment(dt: DateTime<Utc>, tokens: i64, to: PublicKey) -> Self {
Budget::After(Condition::Timestamp(dt), Payment { tokens, to })
}
/// Create a budget that pays `tokens` to `to` after the given DateTime
/// unless cancelled by `from`.
pub fn new_cancelable_future_payment(
dt: DateTime<Utc>,
from: PublicKey,
tokens: i64,
to: PublicKey,
) -> Self {
Budget::Or(
(Condition::Timestamp(dt), Payment { tokens, to }),
(Condition::Signature(from), Payment { tokens, to: from }),
)
}
}
impl PaymentPlan for Budget {
/// Return Payment if the budget requires no additional Witnesses.
fn final_payment(&self) -> Option<Payment> {
match self {
Budget::Pay(payment) => Some(payment.clone()),
_ => None,
}
}
/// Return true if the budget spends exactly `spendable_tokens`.
fn verify(&self, spendable_tokens: i64) -> bool {
match self {
Budget::Pay(payment) | Budget::After(_, payment) => payment.tokens == spendable_tokens,
Budget::Or(a, b) => a.1.tokens == spendable_tokens && b.1.tokens == spendable_tokens,
}
}
/// Apply a witness to the budget to see if the budget can be reduced.
/// If so, modify the budget in-place.
fn apply_witness(&mut self, witness: &Witness) {
let new_payment = match self {
Budget::After(cond, payment) if cond.is_satisfied(witness) => Some(payment),
Budget::Or((cond, payment), _) if cond.is_satisfied(witness) => Some(payment),
Budget::Or(_, (cond, payment)) if cond.is_satisfied(witness) => Some(payment),
_ => None,
}.cloned();
if let Some(payment) = new_payment {
mem::replace(self, Budget::Pay(payment));
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_signature_satisfied() {
let sig = PublicKey::default();
assert!(Condition::Signature(sig).is_satisfied(&Witness::Signature(sig)));
}
#[test]
fn test_timestamp_satisfied() {
let dt1 = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10);
let dt2 = Utc.ymd(2014, 11, 14).and_hms(10, 9, 8);
assert!(Condition::Timestamp(dt1).is_satisfied(&Witness::Timestamp(dt1)));
assert!(Condition::Timestamp(dt1).is_satisfied(&Witness::Timestamp(dt2)));
assert!(!Condition::Timestamp(dt2).is_satisfied(&Witness::Timestamp(dt1)));
}
#[test]
fn test_verify() {
let dt = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10);
let from = PublicKey::default();
let to = PublicKey::default();
assert!(Budget::new_payment(42, to).verify(42));
assert!(Budget::new_authorized_payment(from, 42, to).verify(42));
assert!(Budget::new_future_payment(dt, 42, to).verify(42));
assert!(Budget::new_cancelable_future_payment(dt, from, 42, to).verify(42));
}
#[test]
fn test_authorized_payment() {
let from = PublicKey::default();
let to = PublicKey::default();
let mut budget = Budget::new_authorized_payment(from, 42, to);
budget.apply_witness(&Witness::Signature(from));
assert_eq!(budget, Budget::new_payment(42, to));
}
#[test]
fn test_future_payment() {
let dt = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10);
let to = PublicKey::default();
let mut budget = Budget::new_future_payment(dt, 42, to);
budget.apply_witness(&Witness::Timestamp(dt));
assert_eq!(budget, Budget::new_payment(42, to));
}
#[test]
fn test_cancelable_future_payment() {
let dt = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10);
let from = PublicKey::default();
let to = PublicKey::default();
let mut budget = Budget::new_cancelable_future_payment(dt, from, 42, to);
budget.apply_witness(&Witness::Timestamp(dt));
assert_eq!(budget, Budget::new_payment(42, to));
let mut budget = Budget::new_cancelable_future_payment(dt, from, 42, to);
budget.apply_witness(&Witness::Signature(from));
assert_eq!(budget, Budget::new_payment(42, from));
}
}

70
src/counter.rs Normal file
View File

@ -0,0 +1,70 @@
use std::sync::atomic::{AtomicUsize, Ordering};
use std::time::{Duration, SystemTime, UNIX_EPOCH};
pub struct Counter {
pub name: &'static str,
pub counts: AtomicUsize,
pub nanos: AtomicUsize,
pub times: AtomicUsize,
pub lograte: usize,
}
macro_rules! create_counter {
($name:expr, $lograte:expr) => {
Counter {
name: $name,
counts: AtomicUsize::new(0),
nanos: AtomicUsize::new(0),
times: AtomicUsize::new(0),
lograte: $lograte,
}
};
}
macro_rules! inc_counter {
($name:expr, $count:expr, $start:expr) => {
unsafe { $name.inc($count, $start.elapsed()) };
};
}
impl Counter {
pub fn inc(&mut self, events: usize, dur: Duration) {
let total = dur.as_secs() * 1_000_000_000 + dur.subsec_nanos() as u64;
let counts = self.counts.fetch_add(events, Ordering::Relaxed);
let nanos = self.nanos.fetch_add(total as usize, Ordering::Relaxed);
let times = self.times.fetch_add(1, Ordering::Relaxed);
if times % self.lograte == 0 && times > 0 {
let now = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
let now_ms = now.as_secs() * 1_000 + now.subsec_nanos() as u64 / 1_000_000;
info!(
"COUNTER:{{\"name:\":\"{}\", \"counts\": {}, \"nanos\": {}, \"samples\": {} \"rate\": {}, \"now\": {}}}",
self.name,
counts,
nanos,
times,
counts as f64 * 1e9 / nanos as f64,
now_ms,
);
}
}
}
#[cfg(test)]
mod tests {
use counter::Counter;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::time::Instant;
#[test]
fn test_counter() {
static mut COUNTER: Counter = create_counter!("test", 100);
let start = Instant::now();
let count = 1;
inc_counter!(COUNTER, count, start);
unsafe {
assert_eq!(COUNTER.counts.load(Ordering::Relaxed), 1);
assert_ne!(COUNTER.nanos.load(Ordering::Relaxed), 0);
assert_eq!(COUNTER.times.load(Ordering::Relaxed), 1);
assert_eq!(COUNTER.lograte, 100);
assert_eq!(COUNTER.name, "test");
}
}
}

1002
src/crdt.rs Normal file

File diff suppressed because it is too large Load Diff

178
src/entry.rs Normal file
View File

@ -0,0 +1,178 @@
//! The `entry` module is a fundamental building block of Proof of History. It contains a
//! unique ID that is the hash of the Entry before it, plus the hash of the
//! transactions within it. Entries cannot be reordered, and its field `num_hashes`
//! represents an approximate amount of time since the last Entry was created.
use hash::{extend_and_hash, hash, Hash};
use rayon::prelude::*;
use transaction::Transaction;
/// Each Entry contains three pieces of data. The `num_hashes` field is the number
/// of hashes performed since the previous entry. The `id` field is the result
/// of hashing `id` from the previous entry `num_hashes` times. The `transactions`
/// field points to Transactions that took place shortly before `id` was generated.
///
/// If you divide `num_hashes` by the amount of time it takes to generate a new hash, you
/// get a duration estimate since the last Entry. Since processing power increases
/// over time, one should expect the duration `num_hashes` represents to decrease proportionally.
/// An upper bound on Duration can be estimated by assuming each hash was generated by the
/// world's fastest processor at the time the entry was recorded. Or said another way, it
/// is physically not possible for a shorter duration to have occurred if one assumes the
/// hash was computed by the world's fastest processor at that time. The hash chain is both
/// a Verifiable Delay Function (VDF) and a Proof of Work (not to be confused with Proof or
/// Work consensus!)
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub struct Entry {
/// The number of hashes since the previous Entry ID.
pub num_hashes: u64,
/// The SHA-256 hash `num_hashes` after the previous Entry ID.
pub id: Hash,
/// An unordered list of transactions that were observed before the Entry ID was
/// generated. The may have been observed before a previous Entry ID but were
/// pushed back into this list to ensure deterministic interpretation of the ledger.
pub transactions: Vec<Transaction>,
}
impl Entry {
/// Creates the next Entry `num_hashes` after `start_hash`.
pub fn new(start_hash: &Hash, cur_hashes: u64, transactions: Vec<Transaction>) -> Self {
let num_hashes = cur_hashes + if transactions.is_empty() { 0 } else { 1 };
let id = next_hash(start_hash, 0, &transactions);
Entry {
num_hashes,
id,
transactions,
}
}
/// Creates the next Tick Entry `num_hashes` after `start_hash`.
pub fn new_mut(
start_hash: &mut Hash,
cur_hashes: &mut u64,
transactions: Vec<Transaction>,
) -> Self {
let entry = Self::new(start_hash, *cur_hashes, transactions);
*start_hash = entry.id;
*cur_hashes = 0;
entry
}
/// Creates a Entry from the number of hashes `num_hashes` since the previous transaction
/// and that resulting `id`.
pub fn new_tick(num_hashes: u64, id: &Hash) -> Self {
Entry {
num_hashes,
id: *id,
transactions: vec![],
}
}
/// Verifies self.id is the result of hashing a `start_hash` `self.num_hashes` times.
/// If the transaction is not a Tick, then hash that as well.
pub fn verify(&self, start_hash: &Hash) -> bool {
self.transactions.par_iter().all(|tx| tx.verify_plan())
&& self.id == next_hash(start_hash, self.num_hashes, &self.transactions)
}
}
fn add_transaction_data(hash_data: &mut Vec<u8>, tx: &Transaction) {
hash_data.push(0u8);
hash_data.extend_from_slice(&tx.sig);
}
/// Creates the hash `num_hashes` after `start_hash`. If the transaction contains
/// a signature, the final hash will be a hash of both the previous ID and
/// the signature.
pub fn next_hash(start_hash: &Hash, num_hashes: u64, transactions: &[Transaction]) -> Hash {
let mut id = *start_hash;
for _ in 1..num_hashes {
id = hash(&id);
}
// Hash all the transaction data
let mut hash_data = vec![];
for tx in transactions {
add_transaction_data(&mut hash_data, tx);
}
if !hash_data.is_empty() {
extend_and_hash(&id, &hash_data)
} else if num_hashes != 0 {
hash(&id)
} else {
id
}
}
/// Creates the next Tick or Transaction Entry `num_hashes` after `start_hash`.
pub fn next_entry(start_hash: &Hash, num_hashes: u64, transactions: Vec<Transaction>) -> Entry {
Entry {
num_hashes,
id: next_hash(start_hash, num_hashes, &transactions),
transactions,
}
}
#[cfg(test)]
mod tests {
use super::*;
use chrono::prelude::*;
use entry::Entry;
use hash::hash;
use signature::{KeyPair, KeyPairUtil};
use transaction::Transaction;
#[test]
fn test_entry_verify() {
let zero = Hash::default();
let one = hash(&zero);
assert!(Entry::new_tick(0, &zero).verify(&zero)); // base case
assert!(!Entry::new_tick(0, &zero).verify(&one)); // base case, bad
assert!(next_entry(&zero, 1, vec![]).verify(&zero)); // inductive step
assert!(!next_entry(&zero, 1, vec![]).verify(&one)); // inductive step, bad
}
#[test]
fn test_transaction_reorder_attack() {
let zero = Hash::default();
// First, verify entries
let keypair = KeyPair::new();
let tx0 = Transaction::new(&keypair, keypair.pubkey(), 0, zero);
let tx1 = Transaction::new(&keypair, keypair.pubkey(), 1, zero);
let mut e0 = Entry::new(&zero, 0, vec![tx0.clone(), tx1.clone()]);
assert!(e0.verify(&zero));
// Next, swap two transactions and ensure verification fails.
e0.transactions[0] = tx1; // <-- attack
e0.transactions[1] = tx0;
assert!(!e0.verify(&zero));
}
#[test]
fn test_witness_reorder_attack() {
let zero = Hash::default();
// First, verify entries
let keypair = KeyPair::new();
let tx0 = Transaction::new_timestamp(&keypair, Utc::now(), zero);
let tx1 = Transaction::new_signature(&keypair, Default::default(), zero);
let mut e0 = Entry::new(&zero, 0, vec![tx0.clone(), tx1.clone()]);
assert!(e0.verify(&zero));
// Next, swap two witness transactions and ensure verification fails.
e0.transactions[0] = tx1; // <-- attack
e0.transactions[1] = tx0;
assert!(!e0.verify(&zero));
}
#[test]
fn test_next_entry() {
let zero = Hash::default();
let tick = next_entry(&zero, 1, vec![]);
assert_eq!(tick.num_hashes, 1);
assert_ne!(tick.id, zero);
}
}

82
src/entry_writer.rs Normal file
View File

@ -0,0 +1,82 @@
//! The `entry_writer` module helps implement the TPU's write stage. It
//! writes entries to the given writer, which is typically a file or
//! stdout, and then sends the Entry to its output channel.
use bank::Bank;
use entry::Entry;
use ledger::Block;
use packet;
use result::Result;
use serde_json;
use std::collections::VecDeque;
use std::io::sink;
use std::io::Write;
use std::sync::mpsc::Receiver;
use std::sync::{Arc, Mutex};
use std::time::Duration;
use streamer;
pub struct EntryWriter<'a> {
bank: &'a Bank,
}
impl<'a> EntryWriter<'a> {
/// Create a new Tpu that wraps the given Bank.
pub fn new(bank: &'a Bank) -> Self {
EntryWriter { bank }
}
fn write_entry<W: Write>(&self, writer: &Mutex<W>, entry: &Entry) {
trace!("write_entry entry");
self.bank.register_entry_id(&entry.id);
writeln!(
writer.lock().expect("'writer' lock in fn fn write_entry"),
"{}",
serde_json::to_string(&entry).expect("'entry' to_strong in fn write_entry")
).expect("writeln! in fn write_entry");
}
fn write_entries<W: Write>(
&self,
writer: &Mutex<W>,
entry_receiver: &Receiver<Entry>,
) -> Result<Vec<Entry>> {
//TODO implement a serialize for channel that does this without allocations
let mut l = vec![];
let entry = entry_receiver.recv_timeout(Duration::new(1, 0))?;
self.write_entry(writer, &entry);
l.push(entry);
while let Ok(entry) = entry_receiver.try_recv() {
self.write_entry(writer, &entry);
l.push(entry);
}
Ok(l)
}
/// Process any Entry items that have been published by the Historian.
/// continuosly broadcast blobs of entries out
pub fn write_and_send_entries<W: Write>(
&self,
broadcast: &streamer::BlobSender,
blob_recycler: &packet::BlobRecycler,
writer: &Mutex<W>,
entry_receiver: &Receiver<Entry>,
) -> Result<()> {
let mut q = VecDeque::new();
let list = self.write_entries(writer, entry_receiver)?;
trace!("New blobs? {}", list.len());
list.to_blobs(blob_recycler, &mut q);
if !q.is_empty() {
trace!("broadcasting {}", q.len());
broadcast.send(q)?;
}
Ok(())
}
/// Process any Entry items that have been published by the Historian.
/// continuosly broadcast blobs of entries out
pub fn drain_entries(&self, entry_receiver: &Receiver<Entry>) -> Result<()> {
self.write_entries(&Arc::new(Mutex::new(sink())), entry_receiver)?;
Ok(())
}
}

624
src/erasure.rs Normal file
View File

@ -0,0 +1,624 @@
// Support erasure coding
use packet::{BlobRecycler, SharedBlob, BLOB_HEADER_SIZE};
use std::result;
//TODO(sakridge) pick these values
pub const NUM_CODED: usize = 20;
pub const MAX_MISSING: usize = 4;
const NUM_DATA: usize = NUM_CODED - MAX_MISSING;
#[derive(Debug, PartialEq, Eq)]
pub enum ErasureError {
NotEnoughBlocksToDecode,
DecodeError,
EncodeError,
InvalidBlockSize,
}
pub type Result<T> = result::Result<T, ErasureError>;
// k = number of data devices
// m = number of coding devices
// w = word size
extern "C" {
fn jerasure_matrix_encode(
k: i32,
m: i32,
w: i32,
matrix: *const i32,
data_ptrs: *const *const u8,
coding_ptrs: *const *mut u8,
size: i32,
);
fn jerasure_matrix_decode(
k: i32,
m: i32,
w: i32,
matrix: *const i32,
row_k_ones: i32,
erasures: *const i32,
data_ptrs: *const *mut u8,
coding_ptrs: *const *const u8,
size: i32,
) -> i32;
fn galois_single_divide(a: i32, b: i32, w: i32) -> i32;
}
fn get_matrix(m: i32, k: i32, w: i32) -> Vec<i32> {
let mut matrix = vec![0; (m * k) as usize];
for i in 0..m {
for j in 0..k {
unsafe {
matrix[(i * k + j) as usize] = galois_single_divide(1, i ^ (m + j), w);
}
}
}
matrix
}
pub const ERASURE_W: i32 = 32;
// Generate coding blocks into coding
// There are some alignment restrictions, blocks should be aligned by 16 bytes
// which means their size should be >= 16 bytes
pub fn generate_coding_blocks(coding: &mut [&mut [u8]], data: &[&[u8]]) -> Result<()> {
if data.len() == 0 {
return Ok(());
}
let m = coding.len() as i32;
let block_len = data[0].len();
let matrix: Vec<i32> = get_matrix(m, data.len() as i32, ERASURE_W);
let mut coding_arg = Vec::new();
let mut data_arg = Vec::new();
for block in data {
if block_len != block.len() {
trace!(
"data block size incorrect {} expected {}",
block.len(),
block_len
);
return Err(ErasureError::InvalidBlockSize);
}
data_arg.push(block.as_ptr());
}
for mut block in coding {
if block_len != block.len() {
trace!(
"coding block size incorrect {} expected {}",
block.len(),
block_len
);
return Err(ErasureError::InvalidBlockSize);
}
coding_arg.push(block.as_mut_ptr());
}
unsafe {
jerasure_matrix_encode(
data.len() as i32,
m,
ERASURE_W,
matrix.as_ptr(),
data_arg.as_ptr(),
coding_arg.as_ptr(),
data[0].len() as i32,
);
}
Ok(())
}
// Recover data + coding blocks into data blocks
// data: array of blocks to recover into
// coding: arry of coding blocks
// erasures: list of indices in data where blocks should be recovered
pub fn decode_blocks(data: &mut [&mut [u8]], coding: &[&[u8]], erasures: &[i32]) -> Result<()> {
if data.len() == 0 {
return Ok(());
}
let block_len = data[0].len();
let matrix: Vec<i32> = get_matrix(coding.len() as i32, data.len() as i32, ERASURE_W);
// generate coding pointers, blocks should be the same size
let mut coding_arg: Vec<*const u8> = Vec::new();
for x in coding.iter() {
if x.len() != block_len {
return Err(ErasureError::InvalidBlockSize);
}
coding_arg.push(x.as_ptr());
}
// generate data pointers, blocks should be the same size
let mut data_arg: Vec<*mut u8> = Vec::new();
for x in data.iter_mut() {
if x.len() != block_len {
return Err(ErasureError::InvalidBlockSize);
}
data_arg.push(x.as_mut_ptr());
}
unsafe {
let ret = jerasure_matrix_decode(
data.len() as i32,
coding.len() as i32,
ERASURE_W,
matrix.as_ptr(),
0,
erasures.as_ptr(),
data_arg.as_ptr(),
coding_arg.as_ptr(),
data[0].len() as i32,
);
trace!("jerasure_matrix_decode ret: {}", ret);
for x in data[erasures[0] as usize][0..8].iter() {
trace!("{} ", x)
}
trace!("");
if ret < 0 {
return Err(ErasureError::DecodeError);
}
}
Ok(())
}
// Allocate some coding blobs and insert into the blobs array
pub fn add_coding_blobs(recycler: &BlobRecycler, blobs: &mut Vec<SharedBlob>, consumed: u64) {
let mut added = 0;
let blobs_len = blobs.len() as u64;
for i in consumed..consumed + blobs_len {
let is = i as usize;
if is != 0 && ((is + MAX_MISSING) % NUM_CODED) == 0 {
for _ in 0..MAX_MISSING {
trace!("putting coding at {}", (i - consumed));
let new_blob = recycler.allocate();
let new_blob_clone = new_blob.clone();
let mut new_blob_l = new_blob_clone.write().unwrap();
new_blob_l.set_size(0);
new_blob_l.set_coding().unwrap();
drop(new_blob_l);
blobs.insert((i - consumed) as usize, new_blob);
added += 1;
}
}
}
info!(
"add_coding consumed: {} blobs.len(): {} added: {}",
consumed,
blobs.len(),
added
);
}
// Generate coding blocks in window starting from consumed
pub fn generate_coding(
window: &mut Vec<Option<SharedBlob>>,
consumed: usize,
num_blobs: usize,
) -> Result<()> {
let mut block_start = consumed - (consumed % NUM_CODED);
for i in consumed..consumed + num_blobs {
if (i % NUM_CODED) == (NUM_CODED - 1) {
let mut data_blobs = Vec::new();
let mut coding_blobs = Vec::new();
let mut data_locks = Vec::new();
let mut data_ptrs: Vec<&[u8]> = Vec::new();
let mut coding_locks = Vec::new();
let mut coding_ptrs: Vec<&mut [u8]> = Vec::new();
info!(
"generate_coding start: {} end: {} consumed: {} num_blobs: {}",
block_start,
block_start + NUM_DATA,
consumed,
num_blobs
);
for i in block_start..block_start + NUM_DATA {
let n = i % window.len();
trace!("window[{}] = {:?}", n, window[n]);
if window[n].is_none() {
trace!("data block is null @ {}", n);
return Ok(());
}
data_blobs.push(
window[n]
.clone()
.expect("'data_blobs' arr in pub fn generate_coding"),
);
}
let mut max_data_size = 0;
for b in &data_blobs {
let lck = b.write().expect("'b' write lock in pub fn generate_coding");
if lck.meta.size > max_data_size {
max_data_size = lck.meta.size;
}
data_locks.push(lck);
}
trace!("max_data_size: {}", max_data_size);
for (i, l) in data_locks.iter_mut().enumerate() {
trace!("i: {} data: {}", i, l.data[0]);
data_ptrs.push(&l.data[..max_data_size]);
}
// generate coding ptr array
let coding_start = block_start + NUM_DATA;
let coding_end = block_start + NUM_CODED;
for i in coding_start..coding_end {
let n = i % window.len();
if window[n].is_none() {
trace!("coding block is null @ {}", n);
return Ok(());
}
let w_l = window[n].clone().unwrap();
w_l.write().unwrap().set_size(max_data_size);
if w_l.write().unwrap().set_coding().is_err() {
return Err(ErasureError::EncodeError);
}
coding_blobs.push(
window[n]
.clone()
.expect("'coding_blobs' arr in pub fn generate_coding"),
);
}
for b in &coding_blobs {
coding_locks.push(
b.write()
.expect("'coding_locks' arr in pub fn generate_coding"),
);
}
for (i, l) in coding_locks.iter_mut().enumerate() {
trace!("i: {} coding: {} size: {}", i, l.data[0], max_data_size);
coding_ptrs.push(&mut l.data_mut()[..max_data_size]);
}
generate_coding_blocks(coding_ptrs.as_mut_slice(), &data_ptrs)?;
debug!(
"consumed: {} data: {}:{} coding: {}:{}",
consumed,
block_start,
block_start + NUM_DATA,
coding_start,
coding_end
);
block_start += NUM_CODED;
}
}
Ok(())
}
// Recover missing blocks into window
// missing blocks should be None, will use re
// to allocate new ones. Returns err if not enough
// coding blocks are present to restore
pub fn recover(
re: &BlobRecycler,
window: &mut Vec<Option<SharedBlob>>,
consumed: usize,
received: usize,
) -> Result<()> {
//recover with erasure coding
if received <= consumed {
return Ok(());
}
let num_blocks = (received - consumed) / NUM_CODED;
let mut block_start = consumed - (consumed % NUM_CODED);
if num_blocks > 0 {
debug!(
"num_blocks: {} received: {} consumed: {}",
num_blocks, received, consumed
);
}
for i in 0..num_blocks {
if i > 100 {
break;
}
let mut data_missing = 0;
let mut coded_missing = 0;
let coding_start = block_start + NUM_DATA;
let coding_end = block_start + NUM_CODED;
trace!(
"recover: block_start: {} coding_start: {} coding_end: {}",
block_start,
coding_start,
coding_end
);
for i in block_start..coding_end {
let n = i % window.len();
if window[n].is_none() {
if i >= coding_start {
coded_missing += 1;
} else {
data_missing += 1;
}
}
}
if (data_missing + coded_missing) != NUM_CODED && (data_missing + coded_missing) != 0 {
debug!(
"1: start: {} recovering: data: {} coding: {}",
block_start, data_missing, coded_missing
);
}
if data_missing > 0 {
if (data_missing + coded_missing) <= MAX_MISSING {
debug!(
"2: recovering: data: {} coding: {}",
data_missing, coded_missing
);
let mut blobs: Vec<SharedBlob> = Vec::new();
let mut locks = Vec::new();
let mut erasures: Vec<i32> = Vec::new();
let mut meta = None;
let mut size = None;
for i in block_start..coding_end {
let j = i % window.len();
let mut b = &mut window[j];
if b.is_some() {
if i >= NUM_DATA && size.is_none() {
let bl = b.clone().unwrap();
size = Some(bl.read().unwrap().meta.size - BLOB_HEADER_SIZE);
}
if meta.is_none() {
let bl = b.clone().unwrap();
meta = Some(bl.read().unwrap().meta.clone());
}
blobs.push(b.clone().expect("'blobs' arr in pb fn recover"));
continue;
}
let n = re.allocate();
*b = Some(n.clone());
//mark the missing memory
blobs.push(n);
erasures.push((i - block_start) as i32);
}
erasures.push(-1);
trace!(
"erasures: {:?} data_size: {} header_size: {}",
erasures,
size.unwrap(),
BLOB_HEADER_SIZE
);
//lock everything
for b in &blobs {
locks.push(b.write().expect("'locks' arr in pb fn recover"));
}
{
let mut coding_ptrs: Vec<&[u8]> = Vec::new();
let mut data_ptrs: Vec<&mut [u8]> = Vec::new();
for (i, l) in locks.iter_mut().enumerate() {
if i >= NUM_DATA {
trace!("pushing coding: {}", i);
coding_ptrs.push(&l.data()[..size.unwrap()]);
} else {
trace!("pushing data: {}", i);
data_ptrs.push(&mut l.data[..size.unwrap()]);
}
}
trace!(
"coding_ptrs.len: {} data_ptrs.len {}",
coding_ptrs.len(),
data_ptrs.len()
);
decode_blocks(data_ptrs.as_mut_slice(), &coding_ptrs, &erasures)?;
}
for i in &erasures[..erasures.len() - 1] {
let idx = *i as usize;
let data_size = locks[idx].get_data_size().unwrap() - BLOB_HEADER_SIZE as u64;
locks[idx].meta = meta.clone().unwrap();
locks[idx].set_size(data_size as usize);
trace!(
"erasures[{}] size: {} data[0]: {}",
*i,
data_size,
locks[idx].data()[0]
);
}
}
}
block_start += NUM_CODED;
}
Ok(())
}
#[cfg(test)]
mod test {
use crdt;
use erasure;
use logger;
use packet::{BlobRecycler, SharedBlob, BLOB_HEADER_SIZE};
use signature::KeyPair;
use signature::KeyPairUtil;
use std::sync::{Arc, RwLock};
#[test]
pub fn test_coding() {
let zero_vec = vec![0; 16];
let mut vs: Vec<Vec<u8>> = (0..4).map(|i| (i..(16 + i)).collect()).collect();
let v_orig: Vec<u8> = vs[0].clone();
let m = 2;
let mut coding_blocks: Vec<_> = (0..m).map(|_| vec![0u8; 16]).collect();
{
let mut coding_blocks_slices: Vec<_> =
coding_blocks.iter_mut().map(|x| x.as_mut_slice()).collect();
let v_slices: Vec<_> = vs.iter().map(|x| x.as_slice()).collect();
assert!(
erasure::generate_coding_blocks(
coding_blocks_slices.as_mut_slice(),
v_slices.as_slice()
).is_ok()
);
}
trace!("coding blocks:");
for b in &coding_blocks {
trace!("{:?}", b);
}
let erasure: i32 = 1;
let erasures = vec![erasure, -1];
// clear an entry
vs[erasure as usize].copy_from_slice(zero_vec.as_slice());
{
let coding_blocks_slices: Vec<_> = coding_blocks.iter().map(|x| x.as_slice()).collect();
let mut v_slices: Vec<_> = vs.iter_mut().map(|x| x.as_mut_slice()).collect();
assert!(
erasure::decode_blocks(
v_slices.as_mut_slice(),
coding_blocks_slices.as_slice(),
erasures.as_slice(),
).is_ok()
);
}
trace!("vs:");
for v in &vs {
trace!("{:?}", v);
}
assert_eq!(v_orig, vs[0]);
}
fn print_window(window: &Vec<Option<SharedBlob>>) {
for (i, w) in window.iter().enumerate() {
print!("window({}): ", i);
if w.is_some() {
let window_l1 = w.clone().unwrap();
let window_l2 = window_l1.read().unwrap();
print!(
"index: {:?} meta.size: {} data: ",
window_l2.get_index(),
window_l2.meta.size
);
for i in 0..8 {
print!("{} ", window_l2.data()[i]);
}
} else {
print!("null");
}
println!("");
}
}
fn generate_window(
data_len: usize,
blob_recycler: &BlobRecycler,
offset: usize,
num_blobs: usize,
) -> (Vec<Option<SharedBlob>>, usize) {
let mut window = vec![None; 32];
let mut blobs = Vec::new();
for i in 0..num_blobs {
let b = blob_recycler.allocate();
let b_ = b.clone();
let mut w = b.write().unwrap();
w.set_size(data_len);
for k in 0..data_len {
w.data_mut()[k] = (k + i) as u8;
}
blobs.push(b_);
}
erasure::add_coding_blobs(blob_recycler, &mut blobs, offset as u64);
let blobs_len = blobs.len();
let d = crdt::ReplicatedData::new(
KeyPair::new().pubkey(),
"127.0.0.1:1234".parse().unwrap(),
"127.0.0.1:1235".parse().unwrap(),
"127.0.0.1:1236".parse().unwrap(),
"127.0.0.1:1237".parse().unwrap(),
"127.0.0.1:1238".parse().unwrap(),
);
let crdt = Arc::new(RwLock::new(crdt::Crdt::new(d.clone())));
assert!(crdt::Crdt::index_blobs(&crdt, &blobs, &mut (offset as u64)).is_ok());
for b in blobs {
let idx = b.read().unwrap().get_index().unwrap() as usize;
window[idx] = Some(b);
}
(window, blobs_len)
}
#[test]
pub fn test_window_recover_basic() {
logger::setup();
let data_len = 16;
let blob_recycler = BlobRecycler::default();
// Generate a window
let offset = 1;
let num_blobs = erasure::NUM_DATA + 2;
let (mut window, blobs_len) = generate_window(data_len, &blob_recycler, 0, num_blobs);
println!("** after-gen-window:");
print_window(&window);
// Generate the coding blocks
assert!(erasure::generate_coding(&mut window, offset, blobs_len).is_ok());
println!("** after-gen-coding:");
print_window(&window);
let erase_offset = offset;
// Create a hole in the window
let refwindow = window[erase_offset].clone();
window[erase_offset] = None;
// Recover it from coding
assert!(erasure::recover(&blob_recycler, &mut window, offset, offset + blobs_len).is_ok());
println!("** after-recover:");
print_window(&window);
// Check the result
let window_l = window[erase_offset].clone().unwrap();
let window_l2 = window_l.read().unwrap();
let ref_l = refwindow.clone().unwrap();
let ref_l2 = ref_l.read().unwrap();
assert_eq!(
window_l2.data[..(data_len + BLOB_HEADER_SIZE)],
ref_l2.data[..(data_len + BLOB_HEADER_SIZE)]
);
assert_eq!(window_l2.meta.size, ref_l2.meta.size);
assert_eq!(window_l2.meta.addr, ref_l2.meta.addr);
assert_eq!(window_l2.meta.port, ref_l2.meta.port);
assert_eq!(window_l2.meta.v6, ref_l2.meta.v6);
assert_eq!(window_l2.get_index().unwrap(), erase_offset as u64);
}
//TODO This needs to be reworked
#[test]
#[ignore]
pub fn test_window_recover() {
logger::setup();
let blob_recycler = BlobRecycler::default();
let offset = 4;
let data_len = 16;
let num_blobs = erasure::NUM_DATA + 2;
let (mut window, blobs_len) = generate_window(data_len, &blob_recycler, offset, num_blobs);
println!("** after-gen:");
print_window(&window);
assert!(erasure::generate_coding(&mut window, offset, blobs_len).is_ok());
println!("** after-coding:");
print_window(&window);
let refwindow = window[offset + 1].clone();
window[offset + 1] = None;
window[offset + 2] = None;
window[offset + erasure::NUM_CODED + 3] = None;
window[offset + (2 * erasure::NUM_CODED) + 0] = None;
window[offset + (2 * erasure::NUM_CODED) + 1] = None;
window[offset + (2 * erasure::NUM_CODED) + 2] = None;
let window_l0 = &(window[offset + (3 * erasure::NUM_CODED)]).clone().unwrap();
window_l0.write().unwrap().data[0] = 55;
println!("** after-nulling:");
print_window(&window);
assert!(erasure::recover(&blob_recycler, &mut window, offset, offset + blobs_len).is_ok());
println!("** after-restore:");
print_window(&window);
let window_l = window[offset + 1].clone().unwrap();
let ref_l = refwindow.clone().unwrap();
assert_eq!(
window_l.read().unwrap().data()[..data_len],
ref_l.read().unwrap().data()[..data_len]
);
}
}

31
src/fetch_stage.rs Normal file
View File

@ -0,0 +1,31 @@
//! The `fetch_stage` batches input from a UDP socket and sends it to a channel.
use packet;
use std::net::UdpSocket;
use std::sync::atomic::AtomicBool;
use std::sync::mpsc::channel;
use std::sync::Arc;
use std::thread::JoinHandle;
use streamer;
pub struct FetchStage {
pub packet_receiver: streamer::PacketReceiver,
pub thread_hdl: JoinHandle<()>,
}
impl FetchStage {
pub fn new(
socket: UdpSocket,
exit: Arc<AtomicBool>,
packet_recycler: packet::PacketRecycler,
) -> Self {
let (packet_sender, packet_receiver) = channel();
let thread_hdl =
streamer::receiver(socket, exit.clone(), packet_recycler.clone(), packet_sender);
FetchStage {
packet_receiver,
thread_hdl,
}
}
}

21
src/hash.rs Normal file
View File

@ -0,0 +1,21 @@
//! The `hash` module provides functions for creating SHA-256 hashes.
use generic_array::typenum::U32;
use generic_array::GenericArray;
use sha2::{Digest, Sha256};
pub type Hash = GenericArray<u8, U32>;
/// Return a Sha256 hash for the given data.
pub fn hash(val: &[u8]) -> Hash {
let mut hasher = Sha256::default();
hasher.input(val);
hasher.result()
}
/// Return the hash of the given hash extended with the given value.
pub fn extend_and_hash(id: &Hash, val: &[u8]) -> Hash {
let mut hash_data = id.to_vec();
hash_data.extend_from_slice(val);
hash(&hash_data)
}

View File

@ -1,139 +0,0 @@
//! The `historian` crate provides a microservice for generating a Proof-of-History.
//! It logs Event items on behalf of its users. It continuously generates
//! new hashes, only stopping to check if it has been sent an Event item. It
//! tags each Event with an Entry and sends it back. The Entry includes the
//! Event, the latest hash, and the number of hashes since the last event.
//! The resulting stream of entries represents ordered events in time.
use std::thread::JoinHandle;
use std::sync::mpsc::{Receiver, Sender};
use log::{hash, Entry, Event, Sha256Hash};
pub struct Historian {
pub sender: Sender<Event>,
pub receiver: Receiver<Entry>,
pub thread_hdl: JoinHandle<(Entry, ExitReason)>,
}
#[derive(Debug, PartialEq, Eq)]
pub enum ExitReason {
RecvDisconnected,
SendDisconnected,
}
fn log_events(
receiver: &Receiver<Event>,
sender: &Sender<Entry>,
num_hashes: u64,
end_hash: Sha256Hash,
) -> Result<u64, (Entry, ExitReason)> {
use std::sync::mpsc::TryRecvError;
let mut num_hashes = num_hashes;
loop {
match receiver.try_recv() {
Ok(event) => {
let entry = Entry {
end_hash,
num_hashes,
event,
};
if let Err(_) = sender.send(entry.clone()) {
return Err((entry, ExitReason::SendDisconnected));
}
num_hashes = 0;
}
Err(TryRecvError::Empty) => {
return Ok(num_hashes);
}
Err(TryRecvError::Disconnected) => {
let entry = Entry {
end_hash,
num_hashes,
event: Event::Tick,
};
return Err((entry, ExitReason::RecvDisconnected));
}
}
}
}
/// A background thread that will continue tagging received Event messages and
/// sending back Entry messages until either the receiver or sender channel is closed.
pub fn create_logger(
start_hash: Sha256Hash,
receiver: Receiver<Event>,
sender: Sender<Entry>,
) -> JoinHandle<(Entry, ExitReason)> {
use std::thread;
thread::spawn(move || {
let mut end_hash = start_hash;
let mut num_hashes = 0;
loop {
match log_events(&receiver, &sender, num_hashes, end_hash) {
Ok(n) => num_hashes = n,
Err(err) => return err,
}
end_hash = hash(&end_hash);
num_hashes += 1;
}
})
}
impl Historian {
pub fn new(start_hash: &Sha256Hash) -> Self {
use std::sync::mpsc::channel;
let (sender, event_receiver) = channel();
let (entry_sender, receiver) = channel();
let thread_hdl = create_logger(*start_hash, event_receiver, entry_sender);
Historian {
sender,
receiver,
thread_hdl,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use log::*;
#[test]
fn test_historian() {
use std::thread::sleep;
use std::time::Duration;
let zero = Sha256Hash::default();
let hist = Historian::new(&zero);
hist.sender.send(Event::Tick).unwrap();
sleep(Duration::new(0, 1_000_000));
hist.sender.send(Event::UserDataKey(0xdeadbeef)).unwrap();
sleep(Duration::new(0, 1_000_000));
hist.sender.send(Event::Tick).unwrap();
let entry0 = hist.receiver.recv().unwrap();
let entry1 = hist.receiver.recv().unwrap();
let entry2 = hist.receiver.recv().unwrap();
drop(hist.sender);
assert_eq!(
hist.thread_hdl.join().unwrap().1,
ExitReason::RecvDisconnected
);
assert!(verify_slice(&[entry0, entry1, entry2], &zero));
}
#[test]
fn test_historian_closed_sender() {
let zero = Sha256Hash::default();
let hist = Historian::new(&zero);
drop(hist.receiver);
hist.sender.send(Event::Tick).unwrap();
assert_eq!(
hist.thread_hdl.join().unwrap().1,
ExitReason::SendDisconnected
);
}
}

203
src/ledger.rs Normal file
View File

@ -0,0 +1,203 @@
//! The `ledger` module provides functions for parallel verification of the
//! Proof of History ledger.
use bincode::{deserialize, serialize_into};
use entry::{next_entry, Entry};
use hash::Hash;
use packet;
use packet::{SharedBlob, BLOB_DATA_SIZE, BLOB_SIZE};
use rayon::prelude::*;
use std::cmp::min;
use std::collections::VecDeque;
use std::io::Cursor;
use std::mem::size_of;
use transaction::Transaction;
pub trait Block {
/// Verifies the hashes and counts of a slice of transactions are all consistent.
fn verify(&self, start_hash: &Hash) -> bool;
fn to_blobs(&self, blob_recycler: &packet::BlobRecycler, q: &mut VecDeque<SharedBlob>);
}
impl Block for [Entry] {
fn verify(&self, start_hash: &Hash) -> bool {
let genesis = [Entry::new_tick(0, start_hash)];
let entry_pairs = genesis.par_iter().chain(self).zip(self);
entry_pairs.all(|(x0, x1)| x1.verify(&x0.id))
}
fn to_blobs(&self, blob_recycler: &packet::BlobRecycler, q: &mut VecDeque<SharedBlob>) {
let mut start = 0;
let mut end = 0;
while start < self.len() {
let mut entries: Vec<Vec<Entry>> = Vec::new();
let mut total = 0;
for i in &self[start..] {
total += size_of::<Transaction>() * i.transactions.len();
total += size_of::<Entry>();
if total >= BLOB_DATA_SIZE {
break;
}
end += 1;
}
// See if we need to split the transactions
if end <= start {
let mut transaction_start = 0;
let num_transactions_per_blob = BLOB_DATA_SIZE / size_of::<Transaction>();
let total_entry_chunks = (self[end].transactions.len() + num_transactions_per_blob
- 1) / num_transactions_per_blob;
trace!(
"splitting transactions end: {} total_chunks: {}",
end,
total_entry_chunks
);
for _ in 0..total_entry_chunks {
let transaction_end = min(
transaction_start + num_transactions_per_blob,
self[end].transactions.len(),
);
let mut entry = Entry {
num_hashes: self[end].num_hashes,
id: self[end].id,
transactions: self[end].transactions[transaction_start..transaction_end]
.to_vec(),
};
entries.push(vec![entry]);
transaction_start = transaction_end;
}
end += 1;
} else {
entries.push(self[start..end].to_vec());
}
for entry in entries {
let b = blob_recycler.allocate();
let pos = {
let mut bd = b.write().unwrap();
let mut out = Cursor::new(bd.data_mut());
serialize_into(&mut out, &entry).expect("failed to serialize output");
out.position() as usize
};
assert!(pos < BLOB_SIZE);
b.write().unwrap().set_size(pos);
q.push_back(b);
}
start = end;
}
}
}
/// Create a vector of Entries of length `transaction_batches.len()` from `start_hash` hash, `num_hashes`, and `transaction_batches`.
pub fn next_entries(
start_hash: &Hash,
num_hashes: u64,
transaction_batches: Vec<Vec<Transaction>>,
) -> Vec<Entry> {
let mut id = *start_hash;
let mut entries = vec![];
for transactions in &transaction_batches {
let transactions = transactions.clone();
let entry = next_entry(&id, num_hashes, transactions);
id = entry.id;
entries.push(entry);
}
entries
}
pub fn reconstruct_entries_from_blobs(blobs: &VecDeque<SharedBlob>) -> Vec<Entry> {
let mut entries_to_apply: Vec<Entry> = Vec::new();
let mut last_id = Hash::default();
for msgs in blobs {
let blob = msgs.read().unwrap();
let entries: Vec<Entry> = deserialize(&blob.data()[..blob.meta.size]).unwrap();
for entry in entries {
if entry.id == last_id {
if let Some(last_entry) = entries_to_apply.last_mut() {
last_entry.transactions.extend(entry.transactions);
}
} else {
last_id = entry.id;
entries_to_apply.push(entry);
}
}
//TODO respond back to leader with hash of the state
}
entries_to_apply
}
#[cfg(test)]
mod tests {
use super::*;
use hash::hash;
use packet::BlobRecycler;
use signature::{KeyPair, KeyPairUtil};
use transaction::Transaction;
#[test]
fn test_verify_slice() {
let zero = Hash::default();
let one = hash(&zero);
assert!(vec![][..].verify(&zero)); // base case
assert!(vec![Entry::new_tick(0, &zero)][..].verify(&zero)); // singleton case 1
assert!(!vec![Entry::new_tick(0, &zero)][..].verify(&one)); // singleton case 2, bad
assert!(next_entries(&zero, 0, vec![vec![]; 2])[..].verify(&zero)); // inductive step
let mut bad_ticks = next_entries(&zero, 0, vec![vec![]; 2]);
bad_ticks[1].id = one;
assert!(!bad_ticks.verify(&zero)); // inductive step, bad
}
#[test]
fn test_entry_to_blobs() {
let zero = Hash::default();
let one = hash(&zero);
let keypair = KeyPair::new();
let tx0 = Transaction::new(&keypair, keypair.pubkey(), 1, one);
let transactions = vec![tx0.clone(); 10000];
let e0 = Entry::new(&zero, 0, transactions);
let entries = vec![e0.clone(); 1];
let blob_recycler = BlobRecycler::default();
let mut blob_q = VecDeque::new();
entries.to_blobs(&blob_recycler, &mut blob_q);
assert_eq!(reconstruct_entries_from_blobs(&blob_q), entries);
}
#[test]
fn test_next_entries() {
let mut id = Hash::default();
let next_id = hash(&id);
let keypair = KeyPair::new();
let tx0 = Transaction::new(&keypair, keypair.pubkey(), 1, next_id);
let transactions = vec![tx0.clone(); 5];
let transaction_batches = vec![transactions.clone(); 5];
let entries0 = next_entries(&id, 0, transaction_batches);
assert_eq!(entries0.len(), 5);
let mut entries1 = vec![];
for _ in 0..5 {
let entry = next_entry(&id, 0, transactions.clone());
id = entry.id;
entries1.push(entry);
}
assert_eq!(entries0, entries1);
}
}
#[cfg(all(feature = "unstable", test))]
mod bench {
extern crate test;
use self::test::Bencher;
use ledger::*;
#[bench]
fn bench_next_entries(bencher: &mut Bencher) {
let start_hash = Hash::default();
let entries = next_entries(&start_hash, 10_000, vec![vec![]; 8]);
bencher.iter(|| {
assert!(entries.verify(&start_hash));
});
}
}

View File

@ -1,7 +1,68 @@
//! The `solana` library implements the Solana high-performance blockchain architecture.
//! It includes a full Rust implementation of the architecture (see
//! [Server](server/struct.Server.html)) as well as hooks to GPU implementations of its most
//! paralellizable components (i.e. [SigVerify](sigverify/index.html)). It also includes
//! command-line tools to spin up fullnodes and a Rust library
//! (see [ThinClient](thin_client/struct.ThinClient.html)) to interact with them.
//!
#![cfg_attr(feature = "unstable", feature(test))]
pub mod log;
pub mod historian;
extern crate digest;
extern crate itertools;
#[macro_use]
pub mod counter;
pub mod bank;
pub mod banking_stage;
pub mod budget;
pub mod crdt;
pub mod entry;
pub mod entry_writer;
#[cfg(feature = "erasure")]
pub mod erasure;
pub mod fetch_stage;
pub mod hash;
pub mod ledger;
pub mod logger;
pub mod mint;
pub mod ncp;
pub mod packet;
pub mod payment_plan;
pub mod record_stage;
pub mod recorder;
pub mod replicate_stage;
pub mod request;
pub mod request_processor;
pub mod request_stage;
pub mod result;
pub mod rpu;
pub mod server;
pub mod signature;
pub mod sigverify;
pub mod sigverify_stage;
pub mod streamer;
pub mod thin_client;
pub mod timing;
pub mod tpu;
pub mod transaction;
pub mod tvu;
pub mod write_stage;
extern crate bincode;
extern crate byteorder;
extern crate chrono;
extern crate generic_array;
extern crate libc;
#[macro_use]
extern crate log;
extern crate rayon;
extern crate ring;
extern crate serde;
#[macro_use]
extern crate serde_derive;
extern crate pnet;
extern crate serde_json;
extern crate sha2;
extern crate untrusted;
#[cfg(test)]
#[macro_use]
extern crate matches;
extern crate rand;

View File

@ -1,166 +0,0 @@
//! The `log` crate provides the foundational data structures for Proof-of-History,
//! an ordered log of events in time.
/// Each log entry contains three pieces of data. The 'num_hashes' field is the number
/// of hashes performed since the previous entry. The 'end_hash' field is the result
/// of hashing 'end_hash' from the previous entry 'num_hashes' times. The 'event'
/// field points to an Event that took place shortly after 'end_hash' was generated.
///
/// If you divide 'num_hashes' by the amount of time it takes to generate a new hash, you
/// get a duration estimate since the last event. Since processing power increases
/// over time, one should expect the duration 'num_hashes' represents to decrease proportionally.
/// Though processing power varies across nodes, the network gives priority to the
/// fastest processor. Duration should therefore be estimated by assuming that the hash
/// was generated by the fastest processor at the time the entry was logged.
use digest::generic_array::GenericArray;
use digest::generic_array::typenum::U32;
pub type Sha256Hash = GenericArray<u8, U32>;
#[derive(Debug, PartialEq, Eq, Clone)]
pub struct Entry {
pub num_hashes: u64,
pub end_hash: Sha256Hash,
pub event: Event,
}
/// When 'event' is Tick, the event represents a simple clock tick, and exists for the
/// sole purpose of improving the performance of event log verification. A tick can
/// be generated in 'num_hashes' hashes and verified in 'num_hashes' hashes. By logging
/// a hash alongside the tick, each tick and be verified in parallel using the 'end_hash'
/// of the preceding tick to seed its hashing.
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum Event {
Tick,
UserDataKey(u64),
}
impl Entry {
/// Creates a Entry from the number of hashes 'num_hashes' since the previous event
/// and that resulting 'end_hash'.
pub fn new_tick(num_hashes: u64, end_hash: &Sha256Hash) -> Self {
let event = Event::Tick;
Entry {
num_hashes,
end_hash: *end_hash,
event,
}
}
/// Verifies self.end_hash is the result of hashing a 'start_hash' 'self.num_hashes' times.
pub fn verify(self: &Self, start_hash: &Sha256Hash) -> bool {
self.end_hash == next_tick(start_hash, self.num_hashes).end_hash
}
}
pub fn hash(val: &[u8]) -> Sha256Hash {
use sha2::{Digest, Sha256};
let mut hasher = Sha256::default();
hasher.input(val);
hasher.result()
}
/// Creates the next Tick Entry 'num_hashes' after 'start_hash'.
pub fn next_tick(start_hash: &Sha256Hash, num_hashes: u64) -> Entry {
let mut end_hash = *start_hash;
for _ in 0..num_hashes {
end_hash = hash(&end_hash);
}
Entry::new_tick(num_hashes, &end_hash)
}
/// Verifies the hashes and counts of a slice of events are all consistent.
pub fn verify_slice(events: &[Entry], start_hash: &Sha256Hash) -> bool {
use rayon::prelude::*;
let genesis = [Entry::new_tick(Default::default(), start_hash)];
let event_pairs = genesis.par_iter().chain(events).zip(events);
event_pairs.all(|(x0, x1)| x1.verify(&x0.end_hash))
}
/// Verifies the hashes and events serially. Exists only for reference.
pub fn verify_slice_seq(events: &[Entry], start_hash: &Sha256Hash) -> bool {
let genesis = [Entry::new_tick(0, start_hash)];
let mut event_pairs = genesis.iter().chain(events).zip(events);
event_pairs.all(|(x0, x1)| x1.verify(&x0.end_hash))
}
/// Create a vector of Ticks of length 'len' from 'start_hash' hash and 'num_hashes'.
pub fn create_ticks(start_hash: &Sha256Hash, num_hashes: u64, len: usize) -> Vec<Entry> {
use itertools::unfold;
let mut events = unfold(*start_hash, |state| {
let event = next_tick(state, num_hashes);
*state = event.end_hash;
return Some(event);
});
events.by_ref().take(len).collect()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_event_verify() {
let zero = Sha256Hash::default();
let one = hash(&zero);
assert!(Entry::new_tick(0, &zero).verify(&zero)); // base case
assert!(!Entry::new_tick(0, &zero).verify(&one)); // base case, bad
assert!(next_tick(&zero, 1).verify(&zero)); // inductive step
assert!(!next_tick(&zero, 1).verify(&one)); // inductive step, bad
}
#[test]
fn test_next_tick() {
let zero = Sha256Hash::default();
assert_eq!(next_tick(&zero, 1).num_hashes, 1)
}
fn verify_slice_generic(verify_slice: fn(&[Entry], &Sha256Hash) -> bool) {
let zero = Sha256Hash::default();
let one = hash(&zero);
assert!(verify_slice(&vec![], &zero)); // base case
assert!(verify_slice(&vec![Entry::new_tick(0, &zero)], &zero)); // singleton case 1
assert!(!verify_slice(&vec![Entry::new_tick(0, &zero)], &one)); // singleton case 2, bad
assert!(verify_slice(&create_ticks(&zero, 0, 2), &zero)); // inductive step
let mut bad_ticks = create_ticks(&zero, 0, 2);
bad_ticks[1].end_hash = one;
assert!(!verify_slice(&bad_ticks, &zero)); // inductive step, bad
}
#[test]
fn test_verify_slice() {
verify_slice_generic(verify_slice);
}
#[test]
fn test_verify_slice_seq() {
verify_slice_generic(verify_slice_seq);
}
}
#[cfg(all(feature = "unstable", test))]
mod bench {
extern crate test;
use self::test::Bencher;
use log::*;
#[bench]
fn event_bench(bencher: &mut Bencher) {
let start_hash = Default::default();
let events = create_ticks(&start_hash, 10_000, 8);
bencher.iter(|| {
assert!(verify_slice(&events, &start_hash));
});
}
#[bench]
fn event_bench_seq(bencher: &mut Bencher) {
let start_hash = Default::default();
let events = create_ticks(&start_hash, 10_000, 8);
bencher.iter(|| {
assert!(verify_slice_seq(&events, &start_hash));
});
}
}

14
src/logger.rs Normal file
View File

@ -0,0 +1,14 @@
//! The `logger` module provides a setup function for `env_logger`. Its only function,
//! `setup()` may be called multiple times.
use std::sync::{Once, ONCE_INIT};
extern crate env_logger;
static INIT: Once = ONCE_INIT;
/// Setup function that is only run once, even if called multiple times.
pub fn setup() {
INIT.call_once(|| {
let _ = env_logger::init();
});
}

92
src/mint.rs Normal file
View File

@ -0,0 +1,92 @@
//! The `mint` module is a library for generating the chain's genesis block.
use entry::Entry;
use hash::{hash, Hash};
use ring::rand::SystemRandom;
use signature::{KeyPair, KeyPairUtil, PublicKey};
use transaction::Transaction;
use untrusted::Input;
#[derive(Serialize, Deserialize, Debug)]
pub struct Mint {
pub pkcs8: Vec<u8>,
pubkey: PublicKey,
pub tokens: i64,
}
impl Mint {
pub fn new(tokens: i64) -> Self {
let rnd = SystemRandom::new();
let pkcs8 = KeyPair::generate_pkcs8(&rnd)
.expect("generate_pkcs8 in mint pub fn new")
.to_vec();
let keypair =
KeyPair::from_pkcs8(Input::from(&pkcs8)).expect("from_pkcs8 in mint pub fn new");
let pubkey = keypair.pubkey();
Mint {
pkcs8,
pubkey,
tokens,
}
}
pub fn seed(&self) -> Hash {
hash(&self.pkcs8)
}
pub fn last_id(&self) -> Hash {
self.create_entries()[1].id
}
pub fn keypair(&self) -> KeyPair {
KeyPair::from_pkcs8(Input::from(&self.pkcs8)).expect("from_pkcs8 in mint pub fn keypair")
}
pub fn pubkey(&self) -> PublicKey {
self.pubkey
}
pub fn create_transactions(&self) -> Vec<Transaction> {
let keypair = self.keypair();
let tx = Transaction::new(&keypair, self.pubkey(), self.tokens, self.seed());
vec![tx]
}
pub fn create_entries(&self) -> Vec<Entry> {
let e0 = Entry::new(&self.seed(), 0, vec![]);
let e1 = Entry::new(&e0.id, 0, self.create_transactions());
vec![e0, e1]
}
}
#[derive(Serialize, Deserialize, Debug)]
pub struct MintDemo {
pub mint: Mint,
pub num_accounts: i64,
}
#[cfg(test)]
mod tests {
use super::*;
use budget::Budget;
use ledger::Block;
use transaction::{Instruction, Plan};
#[test]
fn test_create_transactions() {
let mut transactions = Mint::new(100).create_transactions().into_iter();
let tx = transactions.next().unwrap();
if let Instruction::NewContract(contract) = tx.instruction {
if let Plan::Budget(Budget::Pay(payment)) = contract.plan {
assert_eq!(tx.from, payment.to);
}
}
assert_eq!(transactions.next(), None);
}
#[test]
fn test_verify_entries() {
let entries = Mint::new(100).create_entries();
assert!(entries[..].verify(&entries[0].id));
}
}

86
src/ncp.rs Normal file
View File

@ -0,0 +1,86 @@
//! The `ncp` module implements the network control plane.
use crdt;
use packet;
use result::Result;
use std::net::UdpSocket;
use std::sync::atomic::AtomicBool;
use std::sync::mpsc::channel;
use std::sync::{Arc, RwLock};
use std::thread::JoinHandle;
use streamer;
pub struct Ncp {
pub thread_hdls: Vec<JoinHandle<()>>,
}
impl Ncp {
pub fn new(
crdt: Arc<RwLock<crdt::Crdt>>,
window: Arc<RwLock<Vec<Option<packet::SharedBlob>>>>,
gossip_listen_socket: UdpSocket,
gossip_send_socket: UdpSocket,
exit: Arc<AtomicBool>,
) -> Result<Ncp> {
let blob_recycler = packet::BlobRecycler::default();
let (request_sender, request_receiver) = channel();
trace!(
"Ncp: id: {:?}, listening on: {:?}",
&crdt.read().unwrap().me[..4],
gossip_listen_socket.local_addr().unwrap()
);
let t_receiver = streamer::blob_receiver(
exit.clone(),
blob_recycler.clone(),
gossip_listen_socket,
request_sender,
)?;
let (response_sender, response_receiver) = channel();
let t_responder = streamer::responder(
gossip_send_socket,
exit.clone(),
blob_recycler.clone(),
response_receiver,
);
let t_listen = crdt::Crdt::listen(
crdt.clone(),
window,
blob_recycler.clone(),
request_receiver,
response_sender.clone(),
exit.clone(),
);
let t_gossip = crdt::Crdt::gossip(crdt.clone(), blob_recycler, response_sender, exit);
let thread_hdls = vec![t_receiver, t_responder, t_listen, t_gossip];
Ok(Ncp { thread_hdls })
}
}
#[cfg(test)]
mod tests {
use crdt::{Crdt, TestNode};
use ncp::Ncp;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, RwLock};
#[test]
// test that stage will exit when flag is set
fn test_exit() {
let exit = Arc::new(AtomicBool::new(false));
let tn = TestNode::new();
let crdt = Crdt::new(tn.data.clone());
let c = Arc::new(RwLock::new(crdt));
let w = Arc::new(RwLock::new(vec![]));
let d = Ncp::new(
c.clone(),
w,
tn.sockets.gossip,
tn.sockets.gossip_send,
exit.clone(),
).unwrap();
exit.store(true, Ordering::Relaxed);
for t in d.thread_hdls {
t.join().expect("thread join");
}
}
}

546
src/packet.rs Normal file
View File

@ -0,0 +1,546 @@
//! The `packet` module defines data structures and methods to pull data from the network.
use bincode::{deserialize, serialize};
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use counter::Counter;
use result::{Error, Result};
use serde::Serialize;
use signature::PublicKey;
use std::collections::VecDeque;
use std::fmt;
use std::io;
use std::mem::size_of;
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket};
use std::sync::atomic::AtomicUsize;
use std::sync::{Arc, Mutex, RwLock};
use std::time::Instant;
pub type SharedPackets = Arc<RwLock<Packets>>;
pub type SharedBlob = Arc<RwLock<Blob>>;
pub type PacketRecycler = Recycler<Packets>;
pub type BlobRecycler = Recycler<Blob>;
pub const NUM_PACKETS: usize = 1024 * 8;
pub const BLOB_SIZE: usize = 64 * 1024;
pub const BLOB_DATA_SIZE: usize = BLOB_SIZE - BLOB_ID_END;
pub const PACKET_DATA_SIZE: usize = 256;
pub const NUM_BLOBS: usize = (NUM_PACKETS * PACKET_DATA_SIZE) / BLOB_SIZE;
#[derive(Clone, Default)]
#[repr(C)]
pub struct Meta {
pub size: usize,
pub addr: [u16; 8],
pub port: u16,
pub v6: bool,
}
#[derive(Clone)]
#[repr(C)]
pub struct Packet {
pub data: [u8; PACKET_DATA_SIZE],
pub meta: Meta,
}
impl fmt::Debug for Packet {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"Packet {{ size: {:?}, addr: {:?} }}",
self.meta.size,
self.meta.addr()
)
}
}
impl Default for Packet {
fn default() -> Packet {
Packet {
data: [0u8; PACKET_DATA_SIZE],
meta: Meta::default(),
}
}
}
impl Meta {
pub fn addr(&self) -> SocketAddr {
if !self.v6 {
let addr = [
self.addr[0] as u8,
self.addr[1] as u8,
self.addr[2] as u8,
self.addr[3] as u8,
];
let ipv4: Ipv4Addr = From::<[u8; 4]>::from(addr);
SocketAddr::new(IpAddr::V4(ipv4), self.port)
} else {
let ipv6: Ipv6Addr = From::<[u16; 8]>::from(self.addr);
SocketAddr::new(IpAddr::V6(ipv6), self.port)
}
}
pub fn set_addr(&mut self, a: &SocketAddr) {
match *a {
SocketAddr::V4(v4) => {
let ip = v4.ip().octets();
self.addr[0] = u16::from(ip[0]);
self.addr[1] = u16::from(ip[1]);
self.addr[2] = u16::from(ip[2]);
self.addr[3] = u16::from(ip[3]);
self.port = a.port();
}
SocketAddr::V6(v6) => {
self.addr = v6.ip().segments();
self.port = a.port();
self.v6 = true;
}
}
}
}
#[derive(Debug)]
pub struct Packets {
pub packets: Vec<Packet>,
}
//auto derive doesn't support large arrays
impl Default for Packets {
fn default() -> Packets {
Packets {
packets: vec![Packet::default(); NUM_PACKETS],
}
}
}
#[derive(Clone)]
pub struct Blob {
pub data: [u8; BLOB_SIZE],
pub meta: Meta,
}
impl fmt::Debug for Blob {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"Blob {{ size: {:?}, addr: {:?} }}",
self.meta.size,
self.meta.addr()
)
}
}
//auto derive doesn't support large arrays
impl Default for Blob {
fn default() -> Blob {
Blob {
data: [0u8; BLOB_SIZE],
meta: Meta::default(),
}
}
}
pub struct Recycler<T> {
gc: Arc<Mutex<Vec<Arc<RwLock<T>>>>>,
}
impl<T: Default> Default for Recycler<T> {
fn default() -> Recycler<T> {
Recycler {
gc: Arc::new(Mutex::new(vec![])),
}
}
}
impl<T: Default> Clone for Recycler<T> {
fn clone(&self) -> Recycler<T> {
Recycler {
gc: self.gc.clone(),
}
}
}
impl<T: Default> Recycler<T> {
pub fn allocate(&self) -> Arc<RwLock<T>> {
let mut gc = self.gc.lock().expect("recycler lock in pb fn allocate");
gc.pop()
.unwrap_or_else(|| Arc::new(RwLock::new(Default::default())))
}
pub fn recycle(&self, msgs: Arc<RwLock<T>>) {
let mut gc = self.gc.lock().expect("recycler lock in pub fn recycle");
gc.push(msgs);
}
}
impl Packets {
fn run_read_from(&mut self, socket: &UdpSocket) -> Result<usize> {
static mut COUNTER: Counter = create_counter!("packets", 10);
self.packets.resize(NUM_PACKETS, Packet::default());
let mut i = 0;
//DOCUMENTED SIDE-EFFECT
//Performance out of the IO without poll
// * block on the socket until it's readable
// * set the socket to non blocking
// * read until it fails
// * set it back to blocking before returning
socket.set_nonblocking(false)?;
let mut start = Instant::now();
for p in &mut self.packets {
p.meta.size = 0;
trace!("receiving on {}", socket.local_addr().unwrap());
match socket.recv_from(&mut p.data) {
Err(_) if i > 0 => {
inc_counter!(COUNTER, i, start);
debug!("got {:?} messages on {}", i, socket.local_addr().unwrap());
break;
}
Err(e) => {
trace!("recv_from err {:?}", e);
return Err(Error::IO(e));
}
Ok((nrecv, from)) => {
p.meta.size = nrecv;
p.meta.set_addr(&from);
if i == 0 {
start = Instant::now();
socket.set_nonblocking(true)?;
}
}
}
i += 1;
}
Ok(i)
}
pub fn recv_from(&mut self, socket: &UdpSocket) -> Result<()> {
let sz = self.run_read_from(socket)?;
self.packets.resize(sz, Packet::default());
debug!("recv_from: {}", sz);
Ok(())
}
pub fn send_to(&self, socket: &UdpSocket) -> Result<()> {
for p in &self.packets {
let a = p.meta.addr();
socket.send_to(&p.data[..p.meta.size], &a)?;
}
Ok(())
}
}
pub fn to_packets<T: Serialize>(r: &PacketRecycler, xs: Vec<T>) -> Vec<SharedPackets> {
let mut out = vec![];
for x in xs.chunks(NUM_PACKETS) {
let p = r.allocate();
p.write()
.unwrap()
.packets
.resize(x.len(), Default::default());
for (i, o) in x.iter().zip(p.write().unwrap().packets.iter_mut()) {
let v = serialize(&i).expect("serialize request");
let len = v.len();
o.data[..len].copy_from_slice(&v);
o.meta.size = len;
}
out.push(p);
}
return out;
}
pub fn to_blob<T: Serialize>(
resp: T,
rsp_addr: SocketAddr,
blob_recycler: &BlobRecycler,
) -> Result<SharedBlob> {
let blob = blob_recycler.allocate();
{
let mut b = blob.write().unwrap();
let v = serialize(&resp)?;
let len = v.len();
// TODO: we are not using .data_mut() method here because
// the raw bytes are being serialized and sent, this isn't the
// right interface, and we should create a separate path for
// sending request responses in the RPU
assert!(len < BLOB_SIZE);
b.data[..len].copy_from_slice(&v);
b.meta.size = len;
b.meta.set_addr(&rsp_addr);
}
Ok(blob)
}
pub fn to_blobs<T: Serialize>(
rsps: Vec<(T, SocketAddr)>,
blob_recycler: &BlobRecycler,
) -> Result<VecDeque<SharedBlob>> {
let mut blobs = VecDeque::new();
for (resp, rsp_addr) in rsps {
blobs.push_back(to_blob(resp, rsp_addr, blob_recycler)?);
}
Ok(blobs)
}
const BLOB_INDEX_END: usize = size_of::<u64>();
const BLOB_ID_END: usize = BLOB_INDEX_END + size_of::<usize>() + size_of::<PublicKey>();
const BLOB_FLAGS_END: usize = BLOB_ID_END + size_of::<u32>();
const BLOB_SIZE_END: usize = BLOB_FLAGS_END + size_of::<u64>();
macro_rules! align {
($x:expr, $align:expr) => {
$x + ($align - 1) & !($align - 1)
};
}
pub const BLOB_FLAG_IS_CODING: u32 = 0x1;
pub const BLOB_HEADER_SIZE: usize = align!(BLOB_SIZE_END, 64);
impl Blob {
pub fn get_index(&self) -> Result<u64> {
let mut rdr = io::Cursor::new(&self.data[0..BLOB_INDEX_END]);
let r = rdr.read_u64::<LittleEndian>()?;
Ok(r)
}
pub fn set_index(&mut self, ix: u64) -> Result<()> {
let mut wtr = vec![];
wtr.write_u64::<LittleEndian>(ix)?;
self.data[..BLOB_INDEX_END].clone_from_slice(&wtr);
Ok(())
}
/// sender id, we use this for identifying if its a blob from the leader that we should
/// retransmit. eventually blobs should have a signature that we can use ffor spam filtering
pub fn get_id(&self) -> Result<PublicKey> {
let e = deserialize(&self.data[BLOB_INDEX_END..BLOB_ID_END])?;
Ok(e)
}
pub fn set_id(&mut self, id: PublicKey) -> Result<()> {
let wtr = serialize(&id)?;
self.data[BLOB_INDEX_END..BLOB_ID_END].clone_from_slice(&wtr);
Ok(())
}
pub fn get_flags(&self) -> Result<u32> {
let mut rdr = io::Cursor::new(&self.data[BLOB_ID_END..BLOB_FLAGS_END]);
let r = rdr.read_u32::<LittleEndian>()?;
Ok(r)
}
pub fn set_flags(&mut self, ix: u32) -> Result<()> {
let mut wtr = vec![];
wtr.write_u32::<LittleEndian>(ix)?;
self.data[BLOB_ID_END..BLOB_FLAGS_END].clone_from_slice(&wtr);
Ok(())
}
pub fn is_coding(&self) -> bool {
return (self.get_flags().unwrap() & BLOB_FLAG_IS_CODING) != 0;
}
pub fn set_coding(&mut self) -> Result<()> {
let flags = self.get_flags().unwrap();
self.set_flags(flags | BLOB_FLAG_IS_CODING)
}
pub fn get_data_size(&self) -> Result<u64> {
let mut rdr = io::Cursor::new(&self.data[BLOB_FLAGS_END..BLOB_SIZE_END]);
let r = rdr.read_u64::<LittleEndian>()?;
Ok(r)
}
pub fn set_data_size(&mut self, ix: u64) -> Result<()> {
let mut wtr = vec![];
wtr.write_u64::<LittleEndian>(ix)?;
self.data[BLOB_FLAGS_END..BLOB_SIZE_END].clone_from_slice(&wtr);
Ok(())
}
pub fn data(&self) -> &[u8] {
&self.data[BLOB_HEADER_SIZE..]
}
pub fn data_mut(&mut self) -> &mut [u8] {
&mut self.data[BLOB_HEADER_SIZE..]
}
pub fn set_size(&mut self, size: usize) {
let new_size = size + BLOB_HEADER_SIZE;
self.meta.size = new_size;
self.set_data_size(new_size as u64).unwrap();
}
pub fn recv_from(re: &BlobRecycler, socket: &UdpSocket) -> Result<VecDeque<SharedBlob>> {
let mut v = VecDeque::new();
//DOCUMENTED SIDE-EFFECT
//Performance out of the IO without poll
// * block on the socket until it's readable
// * set the socket to non blocking
// * read until it fails
// * set it back to blocking before returning
socket.set_nonblocking(false)?;
for i in 0..NUM_BLOBS {
let r = re.allocate();
{
let mut p = r.write().expect("'r' write lock in pub fn recv_from");
trace!("receiving on {}", socket.local_addr().unwrap());
match socket.recv_from(&mut p.data) {
Err(_) if i > 0 => {
trace!("got {:?} messages on {}", i, socket.local_addr().unwrap());
break;
}
Err(e) => {
if e.kind() != io::ErrorKind::WouldBlock {
info!("recv_from err {:?}", e);
}
return Err(Error::IO(e));
}
Ok((nrecv, from)) => {
p.meta.size = nrecv;
p.meta.set_addr(&from);
if i == 0 {
socket.set_nonblocking(true)?;
}
}
}
}
v.push_back(r);
}
Ok(v)
}
pub fn send_to(
re: &BlobRecycler,
socket: &UdpSocket,
v: &mut VecDeque<SharedBlob>,
) -> Result<()> {
while let Some(r) = v.pop_front() {
{
let p = r.read().expect("'r' read lock in pub fn send_to");
let a = p.meta.addr();
socket.send_to(&p.data[..p.meta.size], &a)?;
}
re.recycle(r);
}
Ok(())
}
}
#[cfg(test)]
mod test {
use packet::{to_packets, Blob, BlobRecycler, Packet, PacketRecycler, Packets, NUM_PACKETS};
use request::Request;
use std::collections::VecDeque;
use std::io;
use std::io::Write;
use std::net::UdpSocket;
#[test]
pub fn packet_recycler_test() {
let r = PacketRecycler::default();
let p = r.allocate();
r.recycle(p);
assert_eq!(r.gc.lock().unwrap().len(), 1);
let _ = r.allocate();
assert_eq!(r.gc.lock().unwrap().len(), 0);
}
#[test]
pub fn blob_recycler_test() {
let r = BlobRecycler::default();
let p = r.allocate();
r.recycle(p);
assert_eq!(r.gc.lock().unwrap().len(), 1);
let _ = r.allocate();
assert_eq!(r.gc.lock().unwrap().len(), 0);
}
#[test]
pub fn packet_send_recv() {
let reader = UdpSocket::bind("127.0.0.1:0").expect("bind");
let addr = reader.local_addr().unwrap();
let sender = UdpSocket::bind("127.0.0.1:0").expect("bind");
let saddr = sender.local_addr().unwrap();
let r = PacketRecycler::default();
let p = r.allocate();
p.write().unwrap().packets.resize(10, Packet::default());
for m in p.write().unwrap().packets.iter_mut() {
m.meta.set_addr(&addr);
m.meta.size = 256;
}
p.read().unwrap().send_to(&sender).unwrap();
p.write().unwrap().recv_from(&reader).unwrap();
for m in p.write().unwrap().packets.iter_mut() {
assert_eq!(m.meta.size, 256);
assert_eq!(m.meta.addr(), saddr);
}
r.recycle(p);
}
#[test]
fn test_to_packets() {
let tx = Request::GetTransactionCount;
let re = PacketRecycler::default();
let rv = to_packets(&re, vec![tx.clone(); 1]);
assert_eq!(rv.len(), 1);
assert_eq!(rv[0].read().unwrap().packets.len(), 1);
let rv = to_packets(&re, vec![tx.clone(); NUM_PACKETS]);
assert_eq!(rv.len(), 1);
assert_eq!(rv[0].read().unwrap().packets.len(), NUM_PACKETS);
let rv = to_packets(&re, vec![tx.clone(); NUM_PACKETS + 1]);
assert_eq!(rv.len(), 2);
assert_eq!(rv[0].read().unwrap().packets.len(), NUM_PACKETS);
assert_eq!(rv[1].read().unwrap().packets.len(), 1);
}
#[test]
pub fn blob_send_recv() {
trace!("start");
let reader = UdpSocket::bind("127.0.0.1:0").expect("bind");
let addr = reader.local_addr().unwrap();
let sender = UdpSocket::bind("127.0.0.1:0").expect("bind");
let r = BlobRecycler::default();
let p = r.allocate();
p.write().unwrap().meta.set_addr(&addr);
p.write().unwrap().meta.size = 1024;
let mut v = VecDeque::new();
v.push_back(p);
assert_eq!(v.len(), 1);
Blob::send_to(&r, &sender, &mut v).unwrap();
trace!("send_to");
assert_eq!(v.len(), 0);
let mut rv = Blob::recv_from(&r, &reader).unwrap();
trace!("recv_from");
assert_eq!(rv.len(), 1);
let rp = rv.pop_front().unwrap();
assert_eq!(rp.write().unwrap().meta.size, 1024);
r.recycle(rp);
}
#[cfg(all(feature = "ipv6", test))]
#[test]
pub fn blob_ipv6_send_recv() {
let reader = UdpSocket::bind("[::1]:0").expect("bind");
let addr = reader.local_addr().unwrap();
let sender = UdpSocket::bind("[::1]:0").expect("bind");
let r = BlobRecycler::default();
let p = r.allocate();
p.write().unwrap().meta.set_addr(&addr);
p.write().unwrap().meta.size = 1024;
let mut v = VecDeque::default();
v.push_back(p);
Blob::send_to(&r, &sender, &mut v).unwrap();
let mut rv = Blob::recv_from(&r, &reader).unwrap();
let rp = rv.pop_front().unwrap();
assert_eq!(rp.write().unwrap().meta.size, 1024);
r.recycle(rp);
}
#[test]
pub fn debug_trait() {
write!(io::sink(), "{:?}", Packet::default()).unwrap();
write!(io::sink(), "{:?}", Packets::default()).unwrap();
write!(io::sink(), "{:?}", Blob::default()).unwrap();
}
#[test]
pub fn blob_test() {
let mut b = Blob::default();
b.set_index(<u64>::max_value()).unwrap();
assert_eq!(b.get_index().unwrap(), <u64>::max_value());
b.data_mut()[0] = 1;
assert_eq!(b.data()[0], 1);
assert_eq!(b.get_index().unwrap(), <u64>::max_value());
}
}

40
src/payment_plan.rs Normal file
View File

@ -0,0 +1,40 @@
//! The `plan` module provides a domain-specific language for payment plans. Users create Budget objects that
//! are given to an interpreter. The interpreter listens for `Witness` transactions,
//! which it uses to reduce the payment plan. When the plan is reduced to a
//! `Payment`, the payment is executed.
use chrono::prelude::*;
use signature::PublicKey;
/// The types of events a payment plan can process.
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub enum Witness {
/// The current time.
Timestamp(DateTime<Utc>),
/// A siganture from PublicKey.
Signature(PublicKey),
}
/// Some amount of tokens that should be sent to the `to` `PublicKey`.
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub struct Payment {
/// Amount to be paid.
pub tokens: i64,
/// The `PublicKey` that `tokens` should be paid to.
pub to: PublicKey,
}
/// Interface to smart contracts.
pub trait PaymentPlan {
/// Return Payment if the payment plan requires no additional Witnesses.
fn final_payment(&self) -> Option<Payment>;
/// Return true if the plan spends exactly `spendable_tokens`.
fn verify(&self, spendable_tokens: i64) -> bool;
/// Apply a witness to the payment plan to see if the plan can be reduced.
/// If so, modify the plan in-place.
fn apply_witness(&mut self, witness: &Witness);
}

206
src/record_stage.rs Normal file
View File

@ -0,0 +1,206 @@
//! The `record_stage` module provides an object for generating a Proof of History.
//! It records Transaction items on behalf of its users. It continuously generates
//! new hashes, only stopping to check if it has been sent an Transaction item. It
//! tags each Transaction with an Entry, and sends it back. The Entry includes the
//! Transaction, the latest hash, and the number of hashes since the last transaction.
//! The resulting stream of entries represents ordered transactions in time.
use entry::Entry;
use hash::Hash;
use recorder::Recorder;
use std::sync::mpsc::{channel, Receiver, RecvError, Sender, TryRecvError};
use std::thread::{Builder, JoinHandle};
use std::time::{Duration, Instant};
use transaction::Transaction;
#[cfg_attr(feature = "cargo-clippy", allow(large_enum_variant))]
pub enum Signal {
Tick,
Transactions(Vec<Transaction>),
}
pub struct RecordStage {
pub entry_receiver: Receiver<Entry>,
pub thread_hdl: JoinHandle<()>,
}
impl RecordStage {
/// A background thread that will continue tagging received Transaction messages and
/// sending back Entry messages until either the receiver or sender channel is closed.
pub fn new(signal_receiver: Receiver<Signal>, start_hash: &Hash) -> Self {
let (entry_sender, entry_receiver) = channel();
let start_hash = start_hash.clone();
let thread_hdl = Builder::new()
.name("solana-record-stage".to_string())
.spawn(move || {
let mut recorder = Recorder::new(start_hash);
let _ = Self::process_signals(&mut recorder, &signal_receiver, &entry_sender);
})
.unwrap();
RecordStage {
entry_receiver,
thread_hdl,
}
}
/// Same as `RecordStage::new`, but will automatically produce entries every `tick_duration`.
pub fn new_with_clock(
signal_receiver: Receiver<Signal>,
start_hash: &Hash,
tick_duration: Duration,
) -> Self {
let (entry_sender, entry_receiver) = channel();
let start_hash = start_hash.clone();
let thread_hdl = Builder::new()
.name("solana-record-stage".to_string())
.spawn(move || {
let mut recorder = Recorder::new(start_hash);
let start_time = Instant::now();
loop {
if let Err(_) = Self::try_process_signals(
&mut recorder,
start_time,
tick_duration,
&signal_receiver,
&entry_sender,
) {
return;
}
recorder.hash();
}
})
.unwrap();
RecordStage {
entry_receiver,
thread_hdl,
}
}
fn process_signal(
signal: Signal,
recorder: &mut Recorder,
sender: &Sender<Entry>,
) -> Result<(), ()> {
let txs = if let Signal::Transactions(txs) = signal {
txs
} else {
vec![]
};
let entry = recorder.record(txs);
sender.send(entry).map_err(|_| ())
}
fn process_signals(
recorder: &mut Recorder,
receiver: &Receiver<Signal>,
sender: &Sender<Entry>,
) -> Result<(), ()> {
loop {
match receiver.recv() {
Ok(signal) => Self::process_signal(signal, recorder, sender)?,
Err(RecvError) => return Err(()),
}
}
}
fn try_process_signals(
recorder: &mut Recorder,
start_time: Instant,
tick_duration: Duration,
receiver: &Receiver<Signal>,
sender: &Sender<Entry>,
) -> Result<(), ()> {
loop {
if let Some(entry) = recorder.tick(start_time, tick_duration) {
sender.send(entry).or(Err(()))?;
}
match receiver.try_recv() {
Ok(signal) => Self::process_signal(signal, recorder, sender)?,
Err(TryRecvError::Empty) => return Ok(()),
Err(TryRecvError::Disconnected) => return Err(()),
};
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use ledger::Block;
use signature::{KeyPair, KeyPairUtil};
use std::sync::mpsc::channel;
use std::thread::sleep;
#[test]
fn test_historian() {
let (tx_sender, tx_receiver) = channel();
let zero = Hash::default();
let record_stage = RecordStage::new(tx_receiver, &zero);
tx_sender.send(Signal::Tick).unwrap();
sleep(Duration::new(0, 1_000_000));
tx_sender.send(Signal::Tick).unwrap();
sleep(Duration::new(0, 1_000_000));
tx_sender.send(Signal::Tick).unwrap();
let entry0 = record_stage.entry_receiver.recv().unwrap();
let entry1 = record_stage.entry_receiver.recv().unwrap();
let entry2 = record_stage.entry_receiver.recv().unwrap();
assert_eq!(entry0.num_hashes, 0);
assert_eq!(entry1.num_hashes, 0);
assert_eq!(entry2.num_hashes, 0);
drop(tx_sender);
assert_eq!(record_stage.thread_hdl.join().unwrap(), ());
assert!([entry0, entry1, entry2].verify(&zero));
}
#[test]
fn test_historian_closed_sender() {
let (tx_sender, tx_receiver) = channel();
let zero = Hash::default();
let record_stage = RecordStage::new(tx_receiver, &zero);
drop(record_stage.entry_receiver);
tx_sender.send(Signal::Tick).unwrap();
assert_eq!(record_stage.thread_hdl.join().unwrap(), ());
}
#[test]
fn test_transactions() {
let (tx_sender, signal_receiver) = channel();
let zero = Hash::default();
let record_stage = RecordStage::new(signal_receiver, &zero);
let alice_keypair = KeyPair::new();
let bob_pubkey = KeyPair::new().pubkey();
let tx0 = Transaction::new(&alice_keypair, bob_pubkey, 1, zero);
let tx1 = Transaction::new(&alice_keypair, bob_pubkey, 2, zero);
tx_sender
.send(Signal::Transactions(vec![tx0, tx1]))
.unwrap();
drop(tx_sender);
let entries: Vec<_> = record_stage.entry_receiver.iter().collect();
assert_eq!(entries.len(), 1);
}
#[test]
fn test_clock() {
let (tx_sender, tx_receiver) = channel();
let zero = Hash::default();
let record_stage =
RecordStage::new_with_clock(tx_receiver, &zero, Duration::from_millis(20));
sleep(Duration::from_millis(900));
tx_sender.send(Signal::Tick).unwrap();
drop(tx_sender);
let entries: Vec<Entry> = record_stage.entry_receiver.iter().collect();
assert!(entries.len() > 1);
// Ensure the ID is not the seed.
assert_ne!(entries[0].id, zero);
}
}

42
src/recorder.rs Normal file
View File

@ -0,0 +1,42 @@
//! The `recorder` module provides an object for generating a Proof of History.
//! It records Transaction items on behalf of its users.
use entry::Entry;
use hash::{hash, Hash};
use std::time::{Duration, Instant};
use transaction::Transaction;
pub struct Recorder {
last_hash: Hash,
num_hashes: u64,
num_ticks: u32,
}
impl Recorder {
pub fn new(last_hash: Hash) -> Self {
Recorder {
last_hash,
num_hashes: 0,
num_ticks: 0,
}
}
pub fn hash(&mut self) {
self.last_hash = hash(&self.last_hash);
self.num_hashes += 1;
}
pub fn record(&mut self, transactions: Vec<Transaction>) -> Entry {
Entry::new_mut(&mut self.last_hash, &mut self.num_hashes, transactions)
}
pub fn tick(&mut self, start_time: Instant, tick_duration: Duration) -> Option<Entry> {
if start_time.elapsed() > tick_duration * (self.num_ticks + 1) {
// TODO: don't let this overflow u32
self.num_ticks += 1;
Some(self.record(vec![]))
} else {
None
}
}
}

55
src/replicate_stage.rs Normal file
View File

@ -0,0 +1,55 @@
//! The `replicate_stage` replicates transactions broadcast by the leader.
use bank::Bank;
use ledger;
use packet;
use result::Result;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::thread::{Builder, JoinHandle};
use std::time::Duration;
use streamer;
pub struct ReplicateStage {
pub thread_hdl: JoinHandle<()>,
}
impl ReplicateStage {
/// Process verified blobs, already in order
fn replicate_requests(
bank: &Arc<Bank>,
verified_receiver: &streamer::BlobReceiver,
blob_recycler: &packet::BlobRecycler,
) -> Result<()> {
let timer = Duration::new(1, 0);
let blobs = verified_receiver.recv_timeout(timer)?;
let entries = ledger::reconstruct_entries_from_blobs(&blobs);
let res = bank.process_entries(entries);
if res.is_err() {
error!("process_entries {} {:?}", blobs.len(), res);
}
res?;
for blob in blobs {
blob_recycler.recycle(blob);
}
Ok(())
}
pub fn new(
bank: Arc<Bank>,
exit: Arc<AtomicBool>,
window_receiver: streamer::BlobReceiver,
blob_recycler: packet::BlobRecycler,
) -> Self {
let thread_hdl = Builder::new()
.name("solana-replicate-stage".to_string())
.spawn(move || loop {
let e = Self::replicate_requests(&bank, &window_receiver, &blob_recycler);
if e.is_err() && exit.load(Ordering::Relaxed) {
break;
}
})
.unwrap();
ReplicateStage { thread_hdl }
}
}

26
src/request.rs Normal file
View File

@ -0,0 +1,26 @@
//! The `request` module defines the messages for the thin client.
use hash::Hash;
use signature::PublicKey;
#[cfg_attr(feature = "cargo-clippy", allow(large_enum_variant))]
#[derive(Serialize, Deserialize, Debug, Clone)]
pub enum Request {
GetBalance { key: PublicKey },
GetLastId,
GetTransactionCount,
}
impl Request {
/// Verify the request is valid.
pub fn verify(&self) -> bool {
true
}
}
#[derive(Serialize, Deserialize, Debug)]
pub enum Response {
Balance { key: PublicKey, val: Option<i64> },
LastId { id: Hash },
TransactionCount { transaction_count: u64 },
}

54
src/request_processor.rs Normal file
View File

@ -0,0 +1,54 @@
//! The `request_processor` processes thin client Request messages.
use bank::Bank;
use request::{Request, Response};
use std::net::SocketAddr;
use std::sync::Arc;
pub struct RequestProcessor {
bank: Arc<Bank>,
}
impl RequestProcessor {
/// Create a new Tpu that wraps the given Bank.
pub fn new(bank: Arc<Bank>) -> Self {
RequestProcessor { bank }
}
/// Process Request items sent by clients.
fn process_request(
&self,
msg: Request,
rsp_addr: SocketAddr,
) -> Option<(Response, SocketAddr)> {
match msg {
Request::GetBalance { key } => {
let val = self.bank.get_balance(&key);
let rsp = (Response::Balance { key, val }, rsp_addr);
info!("Response::Balance {:?}", rsp);
Some(rsp)
}
Request::GetLastId => {
let id = self.bank.last_id();
let rsp = (Response::LastId { id }, rsp_addr);
info!("Response::LastId {:?}", rsp);
Some(rsp)
}
Request::GetTransactionCount => {
let transaction_count = self.bank.transaction_count() as u64;
let rsp = (Response::TransactionCount { transaction_count }, rsp_addr);
info!("Response::TransactionCount {:?}", rsp);
Some(rsp)
}
}
}
pub fn process_requests(
&self,
reqs: Vec<(Request, SocketAddr)>,
) -> Vec<(Response, SocketAddr)> {
reqs.into_iter()
.filter_map(|(req, rsp_addr)| self.process_request(req, rsp_addr))
.collect()
}
}

116
src/request_stage.rs Normal file
View File

@ -0,0 +1,116 @@
//! The `request_stage` processes thin client Request messages.
use bincode::deserialize;
use packet;
use packet::SharedPackets;
use rayon::prelude::*;
use request::Request;
use request_processor::RequestProcessor;
use result::Result;
use std::net::SocketAddr;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::{channel, Receiver};
use std::sync::Arc;
use std::thread::{Builder, JoinHandle};
use std::time::Instant;
use streamer;
use timing;
pub struct RequestStage {
pub thread_hdl: JoinHandle<()>,
pub blob_receiver: streamer::BlobReceiver,
pub request_processor: Arc<RequestProcessor>,
}
impl RequestStage {
pub fn deserialize_requests(p: &packet::Packets) -> Vec<Option<(Request, SocketAddr)>> {
p.packets
.par_iter()
.map(|x| {
deserialize(&x.data[0..x.meta.size])
.map(|req| (req, x.meta.addr()))
.ok()
})
.collect()
}
pub fn process_request_packets(
request_processor: &RequestProcessor,
packet_receiver: &Receiver<SharedPackets>,
blob_sender: &streamer::BlobSender,
packet_recycler: &packet::PacketRecycler,
blob_recycler: &packet::BlobRecycler,
) -> Result<()> {
let (batch, batch_len) = streamer::recv_batch(packet_receiver)?;
debug!(
"@{:?} request_stage: processing: {}",
timing::timestamp(),
batch_len
);
let mut reqs_len = 0;
let proc_start = Instant::now();
for msgs in batch {
let reqs: Vec<_> = Self::deserialize_requests(&msgs.read().unwrap())
.into_iter()
.filter_map(|x| x)
.collect();
reqs_len += reqs.len();
let rsps = request_processor.process_requests(reqs);
let blobs = packet::to_blobs(rsps, blob_recycler)?;
if !blobs.is_empty() {
info!("process: sending blobs: {}", blobs.len());
//don't wake up the other side if there is nothing
blob_sender.send(blobs)?;
}
packet_recycler.recycle(msgs);
}
let total_time_s = timing::duration_as_s(&proc_start.elapsed());
let total_time_ms = timing::duration_as_ms(&proc_start.elapsed());
debug!(
"@{:?} done process batches: {} time: {:?}ms reqs: {} reqs/s: {}",
timing::timestamp(),
batch_len,
total_time_ms,
reqs_len,
(reqs_len as f32) / (total_time_s)
);
Ok(())
}
pub fn new(
request_processor: RequestProcessor,
exit: Arc<AtomicBool>,
packet_receiver: Receiver<SharedPackets>,
packet_recycler: packet::PacketRecycler,
blob_recycler: packet::BlobRecycler,
) -> Self {
let request_processor = Arc::new(request_processor);
let request_processor_ = request_processor.clone();
let (blob_sender, blob_receiver) = channel();
let thread_hdl = Builder::new()
.name("solana-request-stage".to_string())
.spawn(move || loop {
let e = Self::process_request_packets(
&request_processor_,
&packet_receiver,
&blob_sender,
&packet_recycler,
&blob_recycler,
);
if e.is_err() {
if exit.load(Ordering::Relaxed) {
break;
}
}
})
.unwrap();
RequestStage {
thread_hdl,
blob_receiver,
request_processor,
}
}
}

136
src/result.rs Normal file
View File

@ -0,0 +1,136 @@
//! The `result` module exposes a Result type that propagates one of many different Error types.
use bank;
use bincode;
use serde_json;
use std;
use std::any::Any;
#[derive(Debug)]
pub enum Error {
IO(std::io::Error),
JSON(serde_json::Error),
AddrParse(std::net::AddrParseError),
JoinError(Box<Any + Send + 'static>),
RecvError(std::sync::mpsc::RecvError),
RecvTimeoutError(std::sync::mpsc::RecvTimeoutError),
Serialize(std::boxed::Box<bincode::ErrorKind>),
BankError(bank::BankError),
SendError,
Services,
CrdtTooSmall,
GenericError,
}
pub type Result<T> = std::result::Result<T, Error>;
impl std::convert::From<std::sync::mpsc::RecvError> for Error {
fn from(e: std::sync::mpsc::RecvError) -> Error {
Error::RecvError(e)
}
}
impl std::convert::From<std::sync::mpsc::RecvTimeoutError> for Error {
fn from(e: std::sync::mpsc::RecvTimeoutError) -> Error {
Error::RecvTimeoutError(e)
}
}
impl std::convert::From<bank::BankError> for Error {
fn from(e: bank::BankError) -> Error {
Error::BankError(e)
}
}
impl<T> std::convert::From<std::sync::mpsc::SendError<T>> for Error {
fn from(_e: std::sync::mpsc::SendError<T>) -> Error {
Error::SendError
}
}
impl std::convert::From<Box<Any + Send + 'static>> for Error {
fn from(e: Box<Any + Send + 'static>) -> Error {
Error::JoinError(e)
}
}
impl std::convert::From<std::io::Error> for Error {
fn from(e: std::io::Error) -> Error {
Error::IO(e)
}
}
impl std::convert::From<serde_json::Error> for Error {
fn from(e: serde_json::Error) -> Error {
Error::JSON(e)
}
}
impl std::convert::From<std::net::AddrParseError> for Error {
fn from(e: std::net::AddrParseError) -> Error {
Error::AddrParse(e)
}
}
impl std::convert::From<std::boxed::Box<bincode::ErrorKind>> for Error {
fn from(e: std::boxed::Box<bincode::ErrorKind>) -> Error {
Error::Serialize(e)
}
}
#[cfg(test)]
mod tests {
use result::Error;
use result::Result;
use serde_json;
use std::io;
use std::io::Write;
use std::net::SocketAddr;
use std::panic;
use std::sync::mpsc::channel;
use std::sync::mpsc::RecvError;
use std::sync::mpsc::RecvTimeoutError;
use std::thread;
fn addr_parse_error() -> Result<SocketAddr> {
let r = "12fdfasfsafsadfs".parse()?;
Ok(r)
}
fn join_error() -> Result<()> {
panic::set_hook(Box::new(|_info| {}));
let r = thread::spawn(|| panic!("hi")).join()?;
Ok(r)
}
fn json_error() -> Result<()> {
let r = serde_json::from_slice("=342{;;;;:}".as_bytes())?;
Ok(r)
}
fn send_error() -> Result<()> {
let (s, r) = channel();
drop(r);
s.send(())?;
Ok(())
}
#[test]
fn from_test() {
assert_matches!(addr_parse_error(), Err(Error::AddrParse(_)));
assert_matches!(Error::from(RecvError {}), Error::RecvError(_));
assert_matches!(
Error::from(RecvTimeoutError::Timeout),
Error::RecvTimeoutError(_)
);
assert_matches!(send_error(), Err(Error::SendError));
assert_matches!(join_error(), Err(Error::JoinError(_)));
let ioe = io::Error::new(io::ErrorKind::NotFound, "hi");
assert_matches!(Error::from(ioe), Error::IO(_));
}
#[test]
fn fmt_test() {
write!(io::sink(), "{:?}", addr_parse_error()).unwrap();
write!(io::sink(), "{:?}", Error::from(RecvError {})).unwrap();
write!(io::sink(), "{:?}", Error::from(RecvTimeoutError::Timeout)).unwrap();
write!(io::sink(), "{:?}", send_error()).unwrap();
write!(io::sink(), "{:?}", join_error()).unwrap();
write!(io::sink(), "{:?}", json_error()).unwrap();
write!(
io::sink(),
"{:?}",
Error::from(io::Error::new(io::ErrorKind::NotFound, "hi"))
).unwrap();
}
}

77
src/rpu.rs Normal file
View File

@ -0,0 +1,77 @@
//! The `rpu` module implements the Request Processing Unit, a
//! 3-stage transaction processing pipeline in software. It listens
//! for `Request` messages from clients and replies with `Response`
//! messages.
//!
//! ```text
//! .------.
//! | Bank |
//! `---+--`
//! |
//! .------------------|-------------------.
//! | RPU | |
//! | v |
//! .---------. | .-------. .---------. .---------. | .---------.
//! | Alice |--->| | | | | +---->| Alice |
//! `---------` | | Fetch | | Request | | Respond | | `---------`
//! | | Stage |->| Stage |->| Stage | |
//! .---------. | | | | | | | | .---------.
//! | Bob |--->| | | | | +---->| Bob |
//! `---------` | `-------` `---------` `---------` | `---------`
//! | |
//! | |
//! `--------------------------------------`
//! ```
use bank::Bank;
use packet;
use request_processor::RequestProcessor;
use request_stage::RequestStage;
use std::net::UdpSocket;
use std::sync::atomic::AtomicBool;
use std::sync::mpsc::channel;
use std::sync::Arc;
use std::thread::JoinHandle;
use streamer;
pub struct Rpu {
pub thread_hdls: Vec<JoinHandle<()>>,
}
impl Rpu {
pub fn new(
bank: Arc<Bank>,
requests_socket: UdpSocket,
respond_socket: UdpSocket,
exit: Arc<AtomicBool>,
) -> Self {
let packet_recycler = packet::PacketRecycler::default();
let (packet_sender, packet_receiver) = channel();
let t_receiver = streamer::receiver(
requests_socket,
exit.clone(),
packet_recycler.clone(),
packet_sender,
);
let blob_recycler = packet::BlobRecycler::default();
let request_processor = RequestProcessor::new(bank.clone());
let request_stage = RequestStage::new(
request_processor,
exit.clone(),
packet_receiver,
packet_recycler.clone(),
blob_recycler.clone(),
);
let t_responder = streamer::responder(
respond_socket,
exit.clone(),
blob_recycler.clone(),
request_stage.blob_receiver,
);
let thread_hdls = vec![t_receiver, t_responder, request_stage.thread_hdl];
Rpu { thread_hdls }
}
}

182
src/server.rs Normal file
View File

@ -0,0 +1,182 @@
//! The `server` module hosts all the server microservices.
use bank::Bank;
use crdt::{Crdt, ReplicatedData};
use ncp::Ncp;
use packet;
use rpu::Rpu;
use std::io::Write;
use std::net::UdpSocket;
use std::sync::atomic::AtomicBool;
use std::sync::{Arc, RwLock};
use std::thread::JoinHandle;
use std::time::Duration;
use streamer;
use tpu::Tpu;
use tvu::Tvu;
pub struct Server {
pub thread_hdls: Vec<JoinHandle<()>>,
}
impl Server {
/// Create a server instance acting as a leader.
///
/// ```text
/// .---------------------.
/// | Leader |
/// | |
/// .--------. | .-----. |
/// | |---->| | |
/// | Client | | | RPU | |
/// | |<----| | |
/// `----+---` | `-----` |
/// | | ^ |
/// | | | |
/// | | .--+---. |
/// | | | Bank | |
/// | | `------` |
/// | | ^ |
/// | | | | .------------.
/// | | .--+--. .-----. | | |
/// `-------->| TPU +-->| NCP +------>| Validators |
/// | `-----` `-----` | | |
/// | | `------------`
/// `---------------------`
/// ```
pub fn new_leader<W: Write + Send + 'static>(
bank: Bank,
tick_duration: Option<Duration>,
me: ReplicatedData,
requests_socket: UdpSocket,
transactions_socket: UdpSocket,
broadcast_socket: UdpSocket,
respond_socket: UdpSocket,
gossip_socket: UdpSocket,
exit: Arc<AtomicBool>,
writer: W,
) -> Self {
let bank = Arc::new(bank);
let mut thread_hdls = vec![];
let rpu = Rpu::new(bank.clone(), requests_socket, respond_socket, exit.clone());
thread_hdls.extend(rpu.thread_hdls);
let blob_recycler = packet::BlobRecycler::default();
let tpu = Tpu::new(
bank.clone(),
tick_duration,
transactions_socket,
blob_recycler.clone(),
exit.clone(),
writer,
);
thread_hdls.extend(tpu.thread_hdls);
let crdt = Arc::new(RwLock::new(Crdt::new(me)));
let window = streamer::default_window();
let gossip_send_socket = UdpSocket::bind("0.0.0.0:0").expect("bind 0");
let ncp = Ncp::new(
crdt.clone(),
window.clone(),
gossip_socket,
gossip_send_socket,
exit.clone(),
).expect("Ncp::new");
thread_hdls.extend(ncp.thread_hdls);
let t_broadcast = streamer::broadcaster(
broadcast_socket,
exit.clone(),
crdt,
window,
blob_recycler.clone(),
tpu.blob_receiver,
);
thread_hdls.extend(vec![t_broadcast]);
Server { thread_hdls }
}
/// Create a server instance acting as a validator.
///
/// ```text
/// .-------------------------------.
/// | Validator |
/// | |
/// .--------. | .-----. |
/// | |-------------->| | |
/// | Client | | | RPU | |
/// | |<--------------| | |
/// `--------` | `-----` |
/// | ^ |
/// | | |
/// | .--+---. |
/// | | Bank | |
/// | `------` |
/// | ^ |
/// .--------. | | | .------------.
/// | | | .-----. .--+--. .-----. | | |
/// | Leader |--->| NCP +-->| TVU +-->| NCP +------>| Validators |
/// | | | `-----` `-----` `-----` | | |
/// `--------` | | `------------`
/// `-------------------------------`
/// ```
pub fn new_validator(
bank: Bank,
me: ReplicatedData,
requests_socket: UdpSocket,
respond_socket: UdpSocket,
replicate_socket: UdpSocket,
gossip_socket: UdpSocket,
repair_socket: UdpSocket,
leader_repl_data: ReplicatedData,
exit: Arc<AtomicBool>,
) -> Self {
let bank = Arc::new(bank);
let mut thread_hdls = vec![];
let rpu = Rpu::new(bank.clone(), requests_socket, respond_socket, exit.clone());
thread_hdls.extend(rpu.thread_hdls);
let tvu = Tvu::new(
bank.clone(),
me,
gossip_socket,
replicate_socket,
repair_socket,
leader_repl_data,
exit.clone(),
);
thread_hdls.extend(tvu.thread_hdls);
Server { thread_hdls }
}
}
#[cfg(test)]
mod tests {
use bank::Bank;
use crdt::TestNode;
use mint::Mint;
use server::Server;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
#[test]
fn validator_exit() {
let tn = TestNode::new();
let alice = Mint::new(10_000);
let bank = Bank::new(&alice);
let exit = Arc::new(AtomicBool::new(false));
let v = Server::new_validator(
bank,
tn.data.clone(),
tn.sockets.requests,
tn.sockets.respond,
tn.sockets.replicate,
tn.sockets.gossip,
tn.sockets.repair,
tn.data,
exit.clone(),
);
exit.store(true, Ordering::Relaxed);
for t in v.thread_hdls {
t.join().unwrap();
}
}
}

139
src/signature.rs Normal file
View File

@ -0,0 +1,139 @@
//! The `signature` module provides functionality for public, and private keys.
use generic_array::typenum::{U32, U64};
use generic_array::GenericArray;
use rand::{ChaChaRng, Rng, SeedableRng};
use rayon::prelude::*;
use ring::error::Unspecified;
use ring::rand::SecureRandom;
use ring::signature::Ed25519KeyPair;
use ring::{rand, signature};
use std::cell::RefCell;
use untrusted;
pub type KeyPair = Ed25519KeyPair;
pub type PublicKey = GenericArray<u8, U32>;
pub type Signature = GenericArray<u8, U64>;
pub trait KeyPairUtil {
fn new() -> Self;
fn pubkey(&self) -> PublicKey;
}
impl KeyPairUtil for Ed25519KeyPair {
/// Return a new ED25519 keypair
fn new() -> Self {
let rng = rand::SystemRandom::new();
let pkcs8_bytes = signature::Ed25519KeyPair::generate_pkcs8(&rng)
.expect("generate_pkcs8 in signature pb fn new");
signature::Ed25519KeyPair::from_pkcs8(untrusted::Input::from(&pkcs8_bytes))
.expect("from_pcks8 in signature pb fn new")
}
/// Return the public key for the given keypair
fn pubkey(&self) -> PublicKey {
GenericArray::clone_from_slice(self.public_key_bytes())
}
}
pub trait SignatureUtil {
fn verify(&self, peer_public_key_bytes: &[u8], msg_bytes: &[u8]) -> bool;
}
impl SignatureUtil for GenericArray<u8, U64> {
fn verify(&self, peer_public_key_bytes: &[u8], msg_bytes: &[u8]) -> bool {
let peer_public_key = untrusted::Input::from(peer_public_key_bytes);
let msg = untrusted::Input::from(msg_bytes);
let sig = untrusted::Input::from(self);
signature::verify(&signature::ED25519, peer_public_key, msg, sig).is_ok()
}
}
pub struct GenKeys {
// This is necessary because the rng needs to mutate its state to remain
// deterministic, and the fill trait requires an immuatble reference to self
generator: RefCell<ChaChaRng>,
}
impl GenKeys {
pub fn new(seed: &[u8]) -> GenKeys {
let seed32: Vec<_> = seed.iter().map(|&x| x as u32).collect();
let rng = ChaChaRng::from_seed(&seed32);
GenKeys {
generator: RefCell::new(rng),
}
}
pub fn new_key(&self) -> Vec<u8> {
KeyPair::generate_pkcs8(self).unwrap().to_vec()
}
pub fn gen_n_seeds(&self, n: i64) -> Vec<[u8; 16]> {
let mut rng = self.generator.borrow_mut();
(0..n).map(|_| rng.gen()).collect()
}
pub fn gen_n_keypairs(&self, n: i64) -> Vec<KeyPair> {
self.gen_n_seeds(n)
.into_par_iter()
.map(|seed| {
let pkcs8 = GenKeys::new(&seed).new_key();
KeyPair::from_pkcs8(untrusted::Input::from(&pkcs8)).unwrap()
})
.collect()
}
}
impl SecureRandom for GenKeys {
fn fill(&self, dest: &mut [u8]) -> Result<(), Unspecified> {
let mut rng = self.generator.borrow_mut();
rng.fill_bytes(dest);
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::collections::HashSet;
#[test]
fn test_new_key_is_deterministic() {
let seed = [1, 2, 3, 4];
let rng0 = GenKeys::new(&seed);
let rng1 = GenKeys::new(&seed);
for _ in 0..100 {
assert_eq!(rng0.new_key(), rng1.new_key());
}
}
fn gen_n_pubkeys(seed: &[u8], n: i64) -> HashSet<PublicKey> {
GenKeys::new(&seed)
.gen_n_keypairs(n)
.into_iter()
.map(|x| x.pubkey())
.collect()
}
#[test]
fn test_gen_n_pubkeys_deterministic() {
let seed = [1, 2, 3, 4];
assert_eq!(gen_n_pubkeys(&seed, 50), gen_n_pubkeys(&seed, 50));
}
}
#[cfg(all(feature = "unstable", test))]
mod bench {
extern crate test;
use self::test::Bencher;
use super::*;
#[bench]
fn bench_gen_keys(b: &mut Bencher) {
let seed: &[_] = &[1, 2, 3, 4];
let rnd = GenKeys::new(seed);
b.iter(|| rnd.gen_n_keypairs(1000));
}
}

231
src/sigverify.rs Normal file
View File

@ -0,0 +1,231 @@
//! The `sigverify` module provides digital signature verification functions.
//! By default, signatures are verified in parallel using all available CPU
//! cores. When `--features=cuda` is enabled, signature verification is
//! offloaded to the GPU.
//!
use counter::Counter;
use packet::{Packet, SharedPackets};
use std::mem::size_of;
use std::sync::atomic::AtomicUsize;
use std::time::Instant;
use transaction::{PUB_KEY_OFFSET, SIGNED_DATA_OFFSET, SIG_OFFSET};
pub const TX_OFFSET: usize = 0;
#[cfg(feature = "cuda")]
#[repr(C)]
struct Elems {
elems: *const Packet,
num: u32,
}
#[cfg(feature = "cuda")]
#[link(name = "cuda_verify_ed25519")]
extern "C" {
fn ed25519_verify_many(
vecs: *const Elems,
num: u32, //number of vecs
message_size: u32, //size of each element inside the elems field of the vec
public_key_offset: u32,
signature_offset: u32,
signed_message_offset: u32,
signed_message_len_offset: u32,
out: *mut u8, //combined length of all the items in vecs
) -> u32;
}
#[cfg(not(feature = "cuda"))]
fn verify_packet(packet: &Packet) -> u8 {
use ring::signature;
use signature::{PublicKey, Signature};
use untrusted;
let msg_start = TX_OFFSET + SIGNED_DATA_OFFSET;
let sig_start = TX_OFFSET + SIG_OFFSET;
let sig_end = sig_start + size_of::<Signature>();
let pub_key_start = TX_OFFSET + PUB_KEY_OFFSET;
let pub_key_end = pub_key_start + size_of::<PublicKey>();
if packet.meta.size <= msg_start {
return 0;
}
let msg_end = packet.meta.size;
signature::verify(
&signature::ED25519,
untrusted::Input::from(&packet.data[pub_key_start..pub_key_end]),
untrusted::Input::from(&packet.data[msg_start..msg_end]),
untrusted::Input::from(&packet.data[sig_start..sig_end]),
).is_ok() as u8
}
fn batch_size(batches: &Vec<SharedPackets>) -> usize {
batches
.iter()
.map(|p| p.read().unwrap().packets.len())
.sum()
}
#[cfg(not(feature = "cuda"))]
pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> {
use rayon::prelude::*;
static mut COUNTER: Counter = create_counter!("ed25519_verify", 1);
let start = Instant::now();
let count = batch_size(batches);
info!("CPU ECDSA for {}", batch_size(batches));
let rv = batches
.into_par_iter()
.map(|p| {
p.read()
.expect("'p' read lock in ed25519_verify")
.packets
.par_iter()
.map(verify_packet)
.collect()
})
.collect();
inc_counter!(COUNTER, count, start);
rv
}
#[cfg(feature = "cuda")]
pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> {
use packet::PACKET_DATA_SIZE;
static mut COUNTER: Counter = create_counter!("ed25519_verify_cuda", 1);
let start = Instant::now();
let count = batch_size(batches);
info!("CUDA ECDSA for {}", batch_size(batches));
let mut out = Vec::new();
let mut elems = Vec::new();
let mut locks = Vec::new();
let mut rvs = Vec::new();
for packets in batches {
locks.push(
packets
.read()
.expect("'packets' read lock in pub fn ed25519_verify"),
);
}
let mut num = 0;
for p in locks {
elems.push(Elems {
elems: p.packets.as_ptr(),
num: p.packets.len() as u32,
});
let mut v = Vec::new();
v.resize(p.packets.len(), 0);
rvs.push(v);
num += p.packets.len();
}
out.resize(num, 0);
trace!("Starting verify num packets: {}", num);
trace!("elem len: {}", elems.len() as u32);
trace!("packet sizeof: {}", size_of::<Packet>() as u32);
trace!("pub key: {}", (TX_OFFSET + PUB_KEY_OFFSET) as u32);
trace!("sig offset: {}", (TX_OFFSET + SIG_OFFSET) as u32);
trace!("sign data: {}", (TX_OFFSET + SIGNED_DATA_OFFSET) as u32);
trace!("len offset: {}", PACKET_DATA_SIZE as u32);
unsafe {
let res = ed25519_verify_many(
elems.as_ptr(),
elems.len() as u32,
size_of::<Packet>() as u32,
(TX_OFFSET + PUB_KEY_OFFSET) as u32,
(TX_OFFSET + SIG_OFFSET) as u32,
(TX_OFFSET + SIGNED_DATA_OFFSET) as u32,
PACKET_DATA_SIZE as u32,
out.as_mut_ptr(),
);
if res != 0 {
trace!("RETURN!!!: {}", res);
}
}
trace!("done verify");
let mut num = 0;
for vs in rvs.iter_mut() {
for mut v in vs.iter_mut() {
*v = out[num];
if *v != 0 {
trace!("VERIFIED PACKET!!!!!");
}
num += 1;
}
}
inc_counter!(COUNTER, count, start);
rvs
}
#[cfg(test)]
mod tests {
use bincode::serialize;
use packet::{Packet, Packets, SharedPackets};
use sigverify;
use std::sync::RwLock;
use transaction::Transaction;
use transaction::{memfind, test_tx};
#[test]
fn test_layout() {
let tx = test_tx();
let tx_bytes = serialize(&tx).unwrap();
let packet = serialize(&tx).unwrap();
assert_matches!(memfind(&packet, &tx_bytes), Some(sigverify::TX_OFFSET));
assert_matches!(memfind(&packet, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), None);
}
fn make_packet_from_transaction(tx: Transaction) -> Packet {
let tx_bytes = serialize(&tx).unwrap();
let mut packet = Packet::default();
packet.meta.size = tx_bytes.len();
packet.data[..packet.meta.size].copy_from_slice(&tx_bytes);
return packet;
}
fn test_verify_n(n: usize, modify_data: bool) {
let tx = test_tx();
let mut packet = make_packet_from_transaction(tx);
// jumble some data to test failure
if modify_data {
packet.data[20] = 10;
}
// generate packet vector
let mut packets = Packets::default();
packets.packets = Vec::new();
for _ in 0..n {
packets.packets.push(packet.clone());
}
let shared_packets = SharedPackets::new(RwLock::new(packets));
let batches = vec![shared_packets.clone(), shared_packets.clone()];
// verify packets
let ans = sigverify::ed25519_verify(&batches);
// check result
let ref_ans = if modify_data { 0u8 } else { 1u8 };
assert_eq!(ans, vec![vec![ref_ans; n], vec![ref_ans; n]]);
}
#[test]
fn test_verify_zero() {
test_verify_n(0, false);
}
#[test]
fn test_verify_one() {
test_verify_n(1, false);
}
#[test]
fn test_verify_seventy_one() {
test_verify_n(71, false);
}
#[test]
fn test_verify_fail() {
test_verify_n(5, true);
}
}

101
src/sigverify_stage.rs Normal file
View File

@ -0,0 +1,101 @@
//! The `sigverify_stage` implements the signature verification stage of the TPU. It
//! receives a list of lists of packets and outputs the same list, but tags each
//! top-level list with a list of booleans, telling the next stage whether the
//! signature in that packet is valid. It assumes each packet contains one
//! transaction. All processing is done on the CPU by default and on a GPU
//! if the `cuda` feature is enabled with `--features=cuda`.
use packet::SharedPackets;
use rand::{thread_rng, Rng};
use result::Result;
use sigverify;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::{channel, Receiver, Sender};
use std::sync::{Arc, Mutex};
use std::thread::{spawn, JoinHandle};
use std::time::Instant;
use streamer;
use timing;
pub struct SigVerifyStage {
pub verified_receiver: Receiver<Vec<(SharedPackets, Vec<u8>)>>,
pub thread_hdls: Vec<JoinHandle<()>>,
}
impl SigVerifyStage {
pub fn new(exit: Arc<AtomicBool>, packet_receiver: Receiver<SharedPackets>) -> Self {
let (verified_sender, verified_receiver) = channel();
let thread_hdls = Self::verifier_services(exit, packet_receiver, verified_sender);
SigVerifyStage {
thread_hdls,
verified_receiver,
}
}
fn verify_batch(batch: Vec<SharedPackets>) -> Vec<(SharedPackets, Vec<u8>)> {
let r = sigverify::ed25519_verify(&batch);
batch.into_iter().zip(r).collect()
}
fn verifier(
recvr: &Arc<Mutex<streamer::PacketReceiver>>,
sendr: &Arc<Mutex<Sender<Vec<(SharedPackets, Vec<u8>)>>>>,
) -> Result<()> {
let (batch, len) =
streamer::recv_batch(&recvr.lock().expect("'recvr' lock in fn verifier"))?;
let now = Instant::now();
let batch_len = batch.len();
let rand_id = thread_rng().gen_range(0, 100);
info!(
"@{:?} verifier: verifying: {} id: {}",
timing::timestamp(),
batch.len(),
rand_id
);
let verified_batch = Self::verify_batch(batch);
sendr
.lock()
.expect("lock in fn verify_batch in tpu")
.send(verified_batch)?;
let total_time_ms = timing::duration_as_ms(&now.elapsed());
let total_time_s = timing::duration_as_s(&now.elapsed());
info!(
"@{:?} verifier: done. batches: {} total verify time: {:?} id: {} verified: {} v/s {}",
timing::timestamp(),
batch_len,
total_time_ms,
rand_id,
len,
(len as f32 / total_time_s)
);
Ok(())
}
fn verifier_service(
exit: Arc<AtomicBool>,
packet_receiver: Arc<Mutex<streamer::PacketReceiver>>,
verified_sender: Arc<Mutex<Sender<Vec<(SharedPackets, Vec<u8>)>>>>,
) -> JoinHandle<()> {
spawn(move || loop {
let e = Self::verifier(&packet_receiver.clone(), &verified_sender.clone());
if e.is_err() && exit.load(Ordering::Relaxed) {
break;
}
})
}
fn verifier_services(
exit: Arc<AtomicBool>,
packet_receiver: streamer::PacketReceiver,
verified_sender: Sender<Vec<(SharedPackets, Vec<u8>)>>,
) -> Vec<JoinHandle<()>> {
let sender = Arc::new(Mutex::new(verified_sender));
let receiver = Arc::new(Mutex::new(packet_receiver));
(0..4)
.map(|_| Self::verifier_service(exit.clone(), receiver.clone(), sender.clone()))
.collect()
}
}

853
src/streamer.rs Normal file
View File

@ -0,0 +1,853 @@
//! The `streamer` module defines a set of services for efficiently pulling data from UDP sockets.
//!
use crdt::Crdt;
#[cfg(feature = "erasure")]
use erasure;
use packet::{Blob, BlobRecycler, PacketRecycler, SharedBlob, SharedPackets, BLOB_SIZE};
use result::{Error, Result};
use std::collections::VecDeque;
use std::net::{SocketAddr, UdpSocket};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc;
use std::sync::{Arc, RwLock};
use std::thread::{Builder, JoinHandle};
use std::time::Duration;
pub const WINDOW_SIZE: usize = 2 * 1024;
pub type PacketReceiver = mpsc::Receiver<SharedPackets>;
pub type PacketSender = mpsc::Sender<SharedPackets>;
pub type BlobSender = mpsc::Sender<VecDeque<SharedBlob>>;
pub type BlobReceiver = mpsc::Receiver<VecDeque<SharedBlob>>;
fn recv_loop(
sock: &UdpSocket,
exit: &Arc<AtomicBool>,
re: &PacketRecycler,
channel: &PacketSender,
) -> Result<()> {
loop {
let msgs = re.allocate();
let msgs_ = msgs.clone();
loop {
match msgs.write()
.expect("write lock in fn recv_loop")
.recv_from(sock)
{
Ok(()) => {
channel.send(msgs_)?;
break;
}
Err(_) => {
if exit.load(Ordering::Relaxed) {
re.recycle(msgs_);
return Ok(());
}
}
}
}
}
}
pub fn receiver(
sock: UdpSocket,
exit: Arc<AtomicBool>,
recycler: PacketRecycler,
packet_sender: PacketSender,
) -> JoinHandle<()> {
let res = sock.set_read_timeout(Some(Duration::new(1, 0)));
if res.is_err() {
panic!("streamer::receiver set_read_timeout error");
}
Builder::new()
.name("solana-receiver".to_string())
.spawn(move || {
let _ = recv_loop(&sock, &exit, &recycler, &packet_sender);
()
})
.unwrap()
}
fn recv_send(sock: &UdpSocket, recycler: &BlobRecycler, r: &BlobReceiver) -> Result<()> {
let timer = Duration::new(1, 0);
let mut msgs = r.recv_timeout(timer)?;
Blob::send_to(recycler, sock, &mut msgs)?;
Ok(())
}
pub fn recv_batch(recvr: &PacketReceiver) -> Result<(Vec<SharedPackets>, usize)> {
let timer = Duration::new(1, 0);
let msgs = recvr.recv_timeout(timer)?;
trace!("got msgs");
let mut len = msgs.read().unwrap().packets.len();
let mut batch = vec![msgs];
while let Ok(more) = recvr.try_recv() {
trace!("got more msgs");
len += more.read().unwrap().packets.len();
batch.push(more);
if len > 100_000 {
break;
}
}
debug!("batch len {}", batch.len());
Ok((batch, len))
}
pub fn responder(
sock: UdpSocket,
exit: Arc<AtomicBool>,
recycler: BlobRecycler,
r: BlobReceiver,
) -> JoinHandle<()> {
Builder::new()
.name("solana-responder".to_string())
.spawn(move || loop {
if recv_send(&sock, &recycler, &r).is_err() && exit.load(Ordering::Relaxed) {
break;
}
})
.unwrap()
}
//TODO, we would need to stick block authentication before we create the
//window.
fn recv_blobs(recycler: &BlobRecycler, sock: &UdpSocket, s: &BlobSender) -> Result<()> {
trace!("receiving on {}", sock.local_addr().unwrap());
let dq = Blob::recv_from(recycler, sock)?;
if !dq.is_empty() {
s.send(dq)?;
}
Ok(())
}
pub fn blob_receiver(
exit: Arc<AtomicBool>,
recycler: BlobRecycler,
sock: UdpSocket,
s: BlobSender,
) -> Result<JoinHandle<()>> {
//DOCUMENTED SIDE-EFFECT
//1 second timeout on socket read
let timer = Duration::new(1, 0);
sock.set_read_timeout(Some(timer))?;
let t = Builder::new()
.name("solana-blob_receiver".to_string())
.spawn(move || loop {
if exit.load(Ordering::Relaxed) {
break;
}
let _ = recv_blobs(&recycler, &sock, &s);
})
.unwrap();
Ok(t)
}
fn find_next_missing(
locked_window: &Arc<RwLock<Vec<Option<SharedBlob>>>>,
crdt: &Arc<RwLock<Crdt>>,
consumed: &mut usize,
received: &mut usize,
) -> Result<Vec<(SocketAddr, Vec<u8>)>> {
if *received <= *consumed {
return Err(Error::GenericError);
}
let window = locked_window.read().unwrap();
let reqs: Vec<_> = (*consumed..*received)
.filter_map(|pix| {
let i = pix % WINDOW_SIZE;
if let &None = &window[i] {
let val = crdt.read().unwrap().window_index_request(pix as u64);
if let Ok((to, req)) = val {
return Some((to, req));
}
}
None
})
.collect();
Ok(reqs)
}
fn repair_window(
locked_window: &Arc<RwLock<Vec<Option<SharedBlob>>>>,
crdt: &Arc<RwLock<Crdt>>,
_recycler: &BlobRecycler,
last: &mut usize,
times: &mut usize,
consumed: &mut usize,
received: &mut usize,
) -> Result<()> {
#[cfg(feature = "erasure")]
{
if erasure::recover(
_recycler,
&mut locked_window.write().unwrap(),
*consumed,
*received,
).is_err()
{
trace!("erasure::recover failed");
}
}
//exponential backoff
if *last != *consumed {
*times = 0;
}
*last = *consumed;
*times += 1;
//if times flips from all 1s 7 -> 8, 15 -> 16, we retry otherwise return Ok
if *times & (*times - 1) != 0 {
trace!("repair_window counter {} {}", *times, *consumed);
return Ok(());
}
let reqs = find_next_missing(locked_window, crdt, consumed, received)?;
let sock = UdpSocket::bind("0.0.0.0:0")?;
for (to, req) in reqs {
//todo cache socket
info!("repair_window request {} {} {}", *consumed, *received, to);
assert!(req.len() < BLOB_SIZE);
sock.send_to(&req, to)?;
}
Ok(())
}
fn recv_window(
locked_window: &Arc<RwLock<Vec<Option<SharedBlob>>>>,
crdt: &Arc<RwLock<Crdt>>,
recycler: &BlobRecycler,
consumed: &mut usize,
received: &mut usize,
r: &BlobReceiver,
s: &BlobSender,
retransmit: &BlobSender,
) -> Result<()> {
let timer = Duration::from_millis(200);
let mut dq = r.recv_timeout(timer)?;
let leader_id = crdt.read()
.expect("'crdt' read lock in fn recv_window")
.leader_data()
.id;
while let Ok(mut nq) = r.try_recv() {
dq.append(&mut nq)
}
{
//retransmit all leader blocks
let mut retransmitq = VecDeque::new();
for b in &dq {
let p = b.read().expect("'b' read lock in fn recv_window");
//TODO this check isn't safe against adverserial packets
//we need to maintain a sequence window
trace!(
"idx: {} addr: {:?} id: {:?} leader: {:?}",
p.get_index().expect("get_index in fn recv_window"),
p.get_id().expect("get_id in trace! fn recv_window"),
p.meta.addr(),
leader_id
);
if p.get_id().expect("get_id in fn recv_window") == leader_id {
//TODO
//need to copy the retransmitted blob
//otherwise we get into races with which thread
//should do the recycling
//
//a better abstraction would be to recycle when the blob
//is dropped via a weakref to the recycler
let nv = recycler.allocate();
{
let mut mnv = nv.write().expect("recycler write lock in fn recv_window");
let sz = p.meta.size;
mnv.meta.size = sz;
mnv.data[..sz].copy_from_slice(&p.data[..sz]);
}
retransmitq.push_back(nv);
}
}
if !retransmitq.is_empty() {
retransmit.send(retransmitq)?;
}
}
//send a contiguous set of blocks
let mut contq = VecDeque::new();
while let Some(b) = dq.pop_front() {
let b_ = b.clone();
let p = b.write().expect("'b' write lock in fn recv_window");
let pix = p.get_index()? as usize;
if pix > *received {
*received = pix;
}
// Got a blob which has already been consumed, skip it
// probably from a repair window request
if pix < *consumed {
debug!(
"received: {} but older than consumed: {} skipping..",
pix, *consumed
);
continue;
}
let w = pix % WINDOW_SIZE;
//TODO, after the block are authenticated
//if we get different blocks at the same index
//that is a network failure/attack
trace!("window w: {} size: {}", w, p.meta.size);
drop(p);
{
let mut window = locked_window.write().unwrap();
if window[w].is_none() {
window[w] = Some(b_);
} else if let Some(cblob) = &window[w] {
if cblob.read().unwrap().get_index().unwrap() != pix as u64 {
warn!("overrun blob at index {:}", w);
} else {
debug!("duplicate blob at index {:}", w);
}
}
loop {
let k = *consumed % WINDOW_SIZE;
trace!("k: {} consumed: {}", k, *consumed);
if window[k].is_none() {
break;
}
let mut is_coding = false;
if let &Some(ref cblob) = &window[k] {
if cblob
.read()
.expect("blob read lock for flags streamer::window")
.is_coding()
{
is_coding = true;
}
}
if !is_coding {
contq.push_back(window[k].clone().expect("clone in fn recv_window"));
*consumed += 1;
#[cfg(not(feature = "erasure"))]
{
window[k] = None;
}
} else {
#[cfg(feature = "erasure")]
{
let block_start = *consumed - (*consumed % erasure::NUM_CODED);
let coding_end = block_start + erasure::NUM_CODED;
// We've received all this block's data blobs, go and null out the window now
for j in block_start..coding_end {
window[j % WINDOW_SIZE] = None;
}
*consumed += erasure::MAX_MISSING;
debug!(
"skipping processing coding blob k: {} consumed: {}",
k, *consumed
);
}
}
}
}
}
print_window(locked_window, *consumed);
trace!("sending contq.len: {}", contq.len());
if !contq.is_empty() {
trace!("sending contq.len: {}", contq.len());
s.send(contq)?;
}
Ok(())
}
fn print_window(locked_window: &Arc<RwLock<Vec<Option<SharedBlob>>>>, consumed: usize) {
{
let buf: Vec<_> = locked_window
.read()
.unwrap()
.iter()
.enumerate()
.map(|(i, v)| {
if i == (consumed % WINDOW_SIZE) {
"_"
} else if v.is_none() {
"0"
} else {
if let &Some(ref cblob) = &v {
if cblob.read().unwrap().is_coding() {
"C"
} else {
"1"
}
} else {
"0"
}
}
})
.collect();
debug!("WINDOW ({}): {}", consumed, buf.join(""));
}
}
pub fn default_window() -> Arc<RwLock<Vec<Option<SharedBlob>>>> {
Arc::new(RwLock::new(vec![None; WINDOW_SIZE]))
}
pub fn window(
exit: Arc<AtomicBool>,
crdt: Arc<RwLock<Crdt>>,
window: Arc<RwLock<Vec<Option<SharedBlob>>>>,
recycler: BlobRecycler,
r: BlobReceiver,
s: BlobSender,
retransmit: BlobSender,
) -> JoinHandle<()> {
Builder::new()
.name("solana-window".to_string())
.spawn(move || {
let mut consumed = 0;
let mut received = 0;
let mut last = 0;
let mut times = 0;
loop {
if exit.load(Ordering::Relaxed) {
break;
}
let _ = recv_window(
&window,
&crdt,
&recycler,
&mut consumed,
&mut received,
&r,
&s,
&retransmit,
);
let _ = repair_window(
&window,
&crdt,
&recycler,
&mut last,
&mut times,
&mut consumed,
&mut received,
);
}
})
.unwrap()
}
fn broadcast(
crdt: &Arc<RwLock<Crdt>>,
window: &Arc<RwLock<Vec<Option<SharedBlob>>>>,
recycler: &BlobRecycler,
r: &BlobReceiver,
sock: &UdpSocket,
transmit_index: &mut u64,
receive_index: &mut u64,
) -> Result<()> {
let timer = Duration::new(1, 0);
let mut dq = r.recv_timeout(timer)?;
while let Ok(mut nq) = r.try_recv() {
dq.append(&mut nq);
}
let mut blobs: Vec<_> = dq.into_iter().collect();
print_window(window, *receive_index as usize);
// Insert the coding blobs into the blob stream
#[cfg(feature = "erasure")]
erasure::add_coding_blobs(recycler, &mut blobs, *receive_index);
let blobs_len = blobs.len();
info!("broadcast blobs.len: {}", blobs_len);
// Index the blobs
Crdt::index_blobs(crdt, &blobs, receive_index)?;
// keep the cache of blobs that are broadcast
{
let mut win = window.write().unwrap();
for b in &blobs {
let ix = b.read().unwrap().get_index().expect("blob index");
let pos = (ix as usize) % WINDOW_SIZE;
if let Some(x) = &win[pos] {
trace!(
"popped {} at {}",
x.read().unwrap().get_index().unwrap(),
pos
);
recycler.recycle(x.clone());
}
trace!("null {}", pos);
win[pos] = None;
assert!(win[pos].is_none());
}
while let Some(b) = blobs.pop() {
let ix = b.read().unwrap().get_index().expect("blob index");
let pos = (ix as usize) % WINDOW_SIZE;
trace!("caching {} at {}", ix, pos);
assert!(win[pos].is_none());
win[pos] = Some(b);
}
}
// Fill in the coding blob data from the window data blobs
#[cfg(feature = "erasure")]
{
if erasure::generate_coding(
&mut window.write().unwrap(),
*receive_index as usize,
blobs_len,
).is_err()
{
return Err(Error::GenericError);
}
}
*receive_index += blobs_len as u64;
// Send blobs out from the window
Crdt::broadcast(crdt, &window, &sock, transmit_index, *receive_index)?;
Ok(())
}
/// Service to broadcast messages from the leader to layer 1 nodes.
/// See `crdt` for network layer definitions.
/// # Arguments
/// * `sock` - Socket to send from.
/// * `exit` - Boolean to signal system exit.
/// * `crdt` - CRDT structure
/// * `window` - Cache of blobs that we have broadcast
/// * `recycler` - Blob recycler.
/// * `r` - Receive channel for blobs to be retransmitted to all the layer 1 nodes.
pub fn broadcaster(
sock: UdpSocket,
exit: Arc<AtomicBool>,
crdt: Arc<RwLock<Crdt>>,
window: Arc<RwLock<Vec<Option<SharedBlob>>>>,
recycler: BlobRecycler,
r: BlobReceiver,
) -> JoinHandle<()> {
Builder::new()
.name("solana-broadcaster".to_string())
.spawn(move || {
let mut transmit_index = 0;
let mut receive_index = 0;
loop {
if exit.load(Ordering::Relaxed) {
break;
}
let _ = broadcast(
&crdt,
&window,
&recycler,
&r,
&sock,
&mut transmit_index,
&mut receive_index,
);
}
})
.unwrap()
}
fn retransmit(
crdt: &Arc<RwLock<Crdt>>,
recycler: &BlobRecycler,
r: &BlobReceiver,
sock: &UdpSocket,
) -> Result<()> {
let timer = Duration::new(1, 0);
let mut dq = r.recv_timeout(timer)?;
while let Ok(mut nq) = r.try_recv() {
dq.append(&mut nq);
}
{
for b in &dq {
Crdt::retransmit(&crdt, b, sock)?;
}
}
while let Some(b) = dq.pop_front() {
recycler.recycle(b);
}
Ok(())
}
/// Service to retransmit messages from the leader to layer 1 nodes.
/// See `crdt` for network layer definitions.
/// # Arguments
/// * `sock` - Socket to read from. Read timeout is set to 1.
/// * `exit` - Boolean to signal system exit.
/// * `crdt` - This structure needs to be updated and populated by the bank and via gossip.
/// * `recycler` - Blob recycler.
/// * `r` - Receive channel for blobs to be retransmitted to all the layer 1 nodes.
pub fn retransmitter(
sock: UdpSocket,
exit: Arc<AtomicBool>,
crdt: Arc<RwLock<Crdt>>,
recycler: BlobRecycler,
r: BlobReceiver,
) -> JoinHandle<()> {
Builder::new()
.name("solana-retransmitter".to_string())
.spawn(move || {
trace!("retransmitter started");
loop {
if exit.load(Ordering::Relaxed) {
break;
}
// TODO: handle this error
let _ = retransmit(&crdt, &recycler, &r, &sock);
}
trace!("exiting retransmitter");
})
.unwrap()
}
#[cfg(all(feature = "unstable", test))]
mod bench {
extern crate test;
use self::test::Bencher;
use packet::{Packet, PacketRecycler, BLOB_SIZE, PACKET_DATA_SIZE};
use result::Result;
use std::net::{SocketAddr, UdpSocket};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::channel;
use std::sync::{Arc, Mutex};
use std::thread::sleep;
use std::thread::{spawn, JoinHandle};
use std::time::Duration;
use std::time::SystemTime;
use streamer::{receiver, PacketReceiver};
fn producer(
addr: &SocketAddr,
recycler: PacketRecycler,
exit: Arc<AtomicBool>,
) -> JoinHandle<()> {
let send = UdpSocket::bind("0.0.0.0:0").unwrap();
let msgs = recycler.allocate();
let msgs_ = msgs.clone();
msgs.write().unwrap().packets.resize(10, Packet::default());
for w in msgs.write().unwrap().packets.iter_mut() {
w.meta.size = PACKET_DATA_SIZE;
w.meta.set_addr(&addr);
}
spawn(move || loop {
if exit.load(Ordering::Relaxed) {
return;
}
let mut num = 0;
for p in msgs_.read().unwrap().packets.iter() {
let a = p.meta.addr();
assert!(p.meta.size < BLOB_SIZE);
send.send_to(&p.data[..p.meta.size], &a).unwrap();
num += 1;
}
assert_eq!(num, 10);
})
}
fn sink(
recycler: PacketRecycler,
exit: Arc<AtomicBool>,
rvs: Arc<Mutex<usize>>,
r: PacketReceiver,
) -> JoinHandle<()> {
spawn(move || loop {
if exit.load(Ordering::Relaxed) {
return;
}
let timer = Duration::new(1, 0);
match r.recv_timeout(timer) {
Ok(msgs) => {
let msgs_ = msgs.clone();
*rvs.lock().unwrap() += msgs.read().unwrap().packets.len();
recycler.recycle(msgs_);
}
_ => (),
}
})
}
fn bench_streamer_with_result() -> Result<()> {
let read = UdpSocket::bind("127.0.0.1:0")?;
read.set_read_timeout(Some(Duration::new(1, 0)))?;
let addr = read.local_addr()?;
let exit = Arc::new(AtomicBool::new(false));
let pack_recycler = PacketRecycler::default();
let (s_reader, r_reader) = channel();
let t_reader = receiver(read, exit.clone(), pack_recycler.clone(), s_reader);
let t_producer1 = producer(&addr, pack_recycler.clone(), exit.clone());
let t_producer2 = producer(&addr, pack_recycler.clone(), exit.clone());
let t_producer3 = producer(&addr, pack_recycler.clone(), exit.clone());
let rvs = Arc::new(Mutex::new(0));
let t_sink = sink(pack_recycler.clone(), exit.clone(), rvs.clone(), r_reader);
let start = SystemTime::now();
let start_val = *rvs.lock().unwrap();
sleep(Duration::new(5, 0));
let elapsed = start.elapsed().unwrap();
let end_val = *rvs.lock().unwrap();
let time = elapsed.as_secs() * 10000000000 + elapsed.subsec_nanos() as u64;
let ftime = (time as f64) / 10000000000f64;
let fcount = (end_val - start_val) as f64;
trace!("performance: {:?}", fcount / ftime);
exit.store(true, Ordering::Relaxed);
t_reader.join()?;
t_producer1.join()?;
t_producer2.join()?;
t_producer3.join()?;
t_sink.join()?;
Ok(())
}
#[bench]
pub fn bench_streamer(_bench: &mut Bencher) {
bench_streamer_with_result().unwrap();
}
}
#[cfg(test)]
mod test {
use crdt::{Crdt, TestNode};
use packet::{Blob, BlobRecycler, Packet, PacketRecycler, Packets, PACKET_DATA_SIZE};
use std::collections::VecDeque;
use std::io;
use std::io::Write;
use std::net::UdpSocket;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::channel;
use std::sync::{Arc, RwLock};
use std::time::Duration;
use streamer::{blob_receiver, receiver, responder, window};
use streamer::{default_window, BlobReceiver, PacketReceiver};
fn get_msgs(r: PacketReceiver, num: &mut usize) {
for _t in 0..5 {
let timer = Duration::new(1, 0);
match r.recv_timeout(timer) {
Ok(m) => *num += m.read().unwrap().packets.len(),
e => info!("error {:?}", e),
}
if *num == 10 {
break;
}
}
}
#[test]
pub fn streamer_debug() {
write!(io::sink(), "{:?}", Packet::default()).unwrap();
write!(io::sink(), "{:?}", Packets::default()).unwrap();
write!(io::sink(), "{:?}", Blob::default()).unwrap();
}
#[test]
pub fn streamer_send_test() {
let read = UdpSocket::bind("127.0.0.1:0").expect("bind");
read.set_read_timeout(Some(Duration::new(1, 0))).unwrap();
let addr = read.local_addr().unwrap();
let send = UdpSocket::bind("127.0.0.1:0").expect("bind");
let exit = Arc::new(AtomicBool::new(false));
let pack_recycler = PacketRecycler::default();
let resp_recycler = BlobRecycler::default();
let (s_reader, r_reader) = channel();
let t_receiver = receiver(read, exit.clone(), pack_recycler.clone(), s_reader);
let (s_responder, r_responder) = channel();
let t_responder = responder(send, exit.clone(), resp_recycler.clone(), r_responder);
let mut msgs = VecDeque::new();
for i in 0..10 {
let b = resp_recycler.allocate();
let b_ = b.clone();
let mut w = b.write().unwrap();
w.data[0] = i as u8;
w.meta.size = PACKET_DATA_SIZE;
w.meta.set_addr(&addr);
msgs.push_back(b_);
}
s_responder.send(msgs).expect("send");
let mut num = 0;
get_msgs(r_reader, &mut num);
assert_eq!(num, 10);
exit.store(true, Ordering::Relaxed);
t_receiver.join().expect("join");
t_responder.join().expect("join");
}
fn get_blobs(r: BlobReceiver, num: &mut usize) {
for _t in 0..5 {
let timer = Duration::new(1, 0);
match r.recv_timeout(timer) {
Ok(m) => {
for (i, v) in m.iter().enumerate() {
assert_eq!(v.read().unwrap().get_index().unwrap() as usize, *num + i);
}
*num += m.len();
}
e => info!("error {:?}", e),
}
if *num == 10 {
break;
}
}
}
#[test]
pub fn window_send_test() {
let tn = TestNode::new();
let exit = Arc::new(AtomicBool::new(false));
let mut crdt_me = Crdt::new(tn.data.clone());
let me_id = crdt_me.my_data().id;
crdt_me.set_leader(me_id);
let subs = Arc::new(RwLock::new(crdt_me));
let resp_recycler = BlobRecycler::default();
let (s_reader, r_reader) = channel();
let t_receiver = blob_receiver(
exit.clone(),
resp_recycler.clone(),
tn.sockets.gossip,
s_reader,
).unwrap();
let (s_window, r_window) = channel();
let (s_retransmit, r_retransmit) = channel();
let win = default_window();
let t_window = window(
exit.clone(),
subs,
win,
resp_recycler.clone(),
r_reader,
s_window,
s_retransmit,
);
let (s_responder, r_responder) = channel();
let t_responder = responder(
tn.sockets.replicate,
exit.clone(),
resp_recycler.clone(),
r_responder,
);
let mut msgs = VecDeque::new();
for v in 0..10 {
let i = 9 - v;
let b = resp_recycler.allocate();
let b_ = b.clone();
let mut w = b.write().unwrap();
w.set_index(i).unwrap();
w.set_id(me_id).unwrap();
assert_eq!(i, w.get_index().unwrap());
w.meta.size = PACKET_DATA_SIZE;
w.meta.set_addr(&tn.data.gossip_addr);
msgs.push_back(b_);
}
s_responder.send(msgs).expect("send");
let mut num = 0;
get_blobs(r_window, &mut num);
assert_eq!(num, 10);
let mut q = r_retransmit.recv().unwrap();
while let Ok(mut nq) = r_retransmit.try_recv() {
q.append(&mut nq);
}
assert_eq!(q.len(), 10);
exit.store(true, Ordering::Relaxed);
t_receiver.join().expect("join");
t_responder.join().expect("join");
t_window.join().expect("join");
}
}

293
src/thin_client.rs Normal file
View File

@ -0,0 +1,293 @@
//! The `thin_client` module is a client-side object that interfaces with
//! a server-side TPU. Client code should use this object instead of writing
//! messages to the network directly. The binary encoding of its messages are
//! unstable and may change in future releases.
use bincode::{deserialize, serialize};
use hash::Hash;
use request::{Request, Response};
use signature::{KeyPair, PublicKey, Signature};
use std::collections::HashMap;
use std::io;
use std::net::{SocketAddr, UdpSocket};
use transaction::Transaction;
/// An object for querying and sending transactions to the network.
pub struct ThinClient {
requests_addr: SocketAddr,
requests_socket: UdpSocket,
transactions_addr: SocketAddr,
transactions_socket: UdpSocket,
last_id: Option<Hash>,
transaction_count: u64,
balances: HashMap<PublicKey, Option<i64>>,
}
impl ThinClient {
/// Create a new ThinClient that will interface with Rpu
/// over `requests_socket` and `transactions_socket`. To receive responses, the caller must bind `socket`
/// to a public address before invoking ThinClient methods.
pub fn new(
requests_addr: SocketAddr,
requests_socket: UdpSocket,
transactions_addr: SocketAddr,
transactions_socket: UdpSocket,
) -> Self {
let client = ThinClient {
requests_addr,
requests_socket,
transactions_addr,
transactions_socket,
last_id: None,
transaction_count: 0,
balances: HashMap::new(),
};
client
}
pub fn recv_response(&self) -> io::Result<Response> {
let mut buf = vec![0u8; 1024];
trace!("start recv_from");
self.requests_socket.recv_from(&mut buf)?;
trace!("end recv_from");
let resp = deserialize(&buf).expect("deserialize balance in thin_client");
Ok(resp)
}
pub fn process_response(&mut self, resp: Response) {
match resp {
Response::Balance { key, val } => {
trace!("Response balance {:?} {:?}", key, val);
self.balances.insert(key, val);
}
Response::LastId { id } => {
info!("Response last_id {:?}", id);
self.last_id = Some(id);
}
Response::TransactionCount { transaction_count } => {
info!("Response transaction count {:?}", transaction_count);
self.transaction_count = transaction_count;
}
}
}
/// Send a signed Transaction to the server for processing. This method
/// does not wait for a response.
pub fn transfer_signed(&self, tx: Transaction) -> io::Result<usize> {
let data = serialize(&tx).expect("serialize Transaction in pub fn transfer_signed");
self.transactions_socket
.send_to(&data, &self.transactions_addr)
}
/// Creates, signs, and processes a Transaction. Useful for writing unit-tests.
pub fn transfer(
&self,
n: i64,
keypair: &KeyPair,
to: PublicKey,
last_id: &Hash,
) -> io::Result<Signature> {
let tx = Transaction::new(keypair, to, n, *last_id);
let sig = tx.sig;
self.transfer_signed(tx).map(|_| sig)
}
/// Request the balance of the user holding `pubkey`. This method blocks
/// until the server sends a response. If the response packet is dropped
/// by the network, this method will hang indefinitely.
pub fn get_balance(&mut self, pubkey: &PublicKey) -> io::Result<i64> {
trace!("get_balance");
let req = Request::GetBalance { key: *pubkey };
let data = serialize(&req).expect("serialize GetBalance in pub fn get_balance");
self.requests_socket
.send_to(&data, &self.requests_addr)
.expect("buffer error in pub fn get_balance");
let mut done = false;
while !done {
let resp = self.recv_response()?;
trace!("recv_response {:?}", resp);
if let Response::Balance { key, .. } = &resp {
done = key == pubkey;
}
self.process_response(resp);
}
self.balances[pubkey].ok_or(io::Error::new(io::ErrorKind::Other, "nokey"))
}
/// Request the transaction count. If the response packet is dropped by the network,
/// this method will hang.
pub fn transaction_count(&mut self) -> u64 {
info!("transaction_count");
let req = Request::GetTransactionCount;
let data =
serialize(&req).expect("serialize GetTransactionCount in pub fn transaction_count");
self.requests_socket
.send_to(&data, &self.requests_addr)
.expect("buffer error in pub fn transaction_count");
let mut done = false;
while !done {
let resp = self.recv_response().expect("transaction count dropped");
info!("recv_response {:?}", resp);
if let &Response::TransactionCount { .. } = &resp {
done = true;
}
self.process_response(resp);
}
self.transaction_count
}
/// Request the last Entry ID from the server. This method blocks
/// until the server sends a response.
pub fn get_last_id(&mut self) -> Hash {
info!("get_last_id");
let req = Request::GetLastId;
let data = serialize(&req).expect("serialize GetLastId in pub fn get_last_id");
self.requests_socket
.send_to(&data, &self.requests_addr)
.expect("buffer error in pub fn get_last_id");
let mut done = false;
while !done {
let resp = self.recv_response().expect("get_last_id response");
if let &Response::LastId { .. } = &resp {
done = true;
}
self.process_response(resp);
}
self.last_id.expect("some last_id")
}
pub fn poll_get_balance(&mut self, pubkey: &PublicKey) -> io::Result<i64> {
use std::time::Instant;
let mut balance;
let now = Instant::now();
loop {
balance = self.get_balance(pubkey);
if balance.is_ok() || now.elapsed().as_secs() > 1 {
break;
}
}
balance
}
}
#[cfg(test)]
mod tests {
use super::*;
use bank::Bank;
use budget::Budget;
use crdt::TestNode;
use logger;
use mint::Mint;
use server::Server;
use signature::{KeyPair, KeyPairUtil};
use std::io::sink;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::thread::sleep;
use std::time::Duration;
use transaction::{Instruction, Plan};
#[test]
fn test_thin_client() {
logger::setup();
let leader = TestNode::new();
let alice = Mint::new(10_000);
let bank = Bank::new(&alice);
let bob_pubkey = KeyPair::new().pubkey();
let exit = Arc::new(AtomicBool::new(false));
let server = Server::new_leader(
bank,
Some(Duration::from_millis(30)),
leader.data.clone(),
leader.sockets.requests,
leader.sockets.transaction,
leader.sockets.broadcast,
leader.sockets.respond,
leader.sockets.gossip,
exit.clone(),
sink(),
);
sleep(Duration::from_millis(900));
let requests_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let mut client = ThinClient::new(
leader.data.requests_addr,
requests_socket,
leader.data.transactions_addr,
transactions_socket,
);
let last_id = client.get_last_id();
let _sig = client
.transfer(500, &alice.keypair(), bob_pubkey, &last_id)
.unwrap();
let balance = client.poll_get_balance(&bob_pubkey);
assert_eq!(balance.unwrap(), 500);
exit.store(true, Ordering::Relaxed);
for t in server.thread_hdls {
t.join().unwrap();
}
}
#[test]
fn test_bad_sig() {
logger::setup();
let leader = TestNode::new();
let alice = Mint::new(10_000);
let bank = Bank::new(&alice);
let bob_pubkey = KeyPair::new().pubkey();
let exit = Arc::new(AtomicBool::new(false));
let server = Server::new_leader(
bank,
Some(Duration::from_millis(30)),
leader.data.clone(),
leader.sockets.requests,
leader.sockets.transaction,
leader.sockets.broadcast,
leader.sockets.respond,
leader.sockets.gossip,
exit.clone(),
sink(),
);
sleep(Duration::from_millis(300));
let requests_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
requests_socket
.set_read_timeout(Some(Duration::new(5, 0)))
.unwrap();
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let mut client = ThinClient::new(
leader.data.requests_addr,
requests_socket,
leader.data.transactions_addr,
transactions_socket,
);
let last_id = client.get_last_id();
let tx = Transaction::new(&alice.keypair(), bob_pubkey, 500, last_id);
let _sig = client.transfer_signed(tx).unwrap();
let last_id = client.get_last_id();
let mut tr2 = Transaction::new(&alice.keypair(), bob_pubkey, 501, last_id);
if let Instruction::NewContract(contract) = &mut tr2.instruction {
contract.tokens = 502;
contract.plan = Plan::Budget(Budget::new_payment(502, bob_pubkey));
}
let _sig = client.transfer_signed(tr2).unwrap();
let balance = client.poll_get_balance(&bob_pubkey);
assert_eq!(balance.unwrap(), 500);
exit.store(true, Ordering::Relaxed);
for t in server.thread_hdls {
t.join().unwrap();
}
}
}

18
src/timing.rs Normal file
View File

@ -0,0 +1,18 @@
//! The `timing` module provides std::time utility functions.
use std::time::Duration;
use std::time::{SystemTime, UNIX_EPOCH};
pub fn duration_as_ms(d: &Duration) -> u64 {
return (d.as_secs() * 1000) + (d.subsec_nanos() as u64 / 1_000_000);
}
pub fn duration_as_s(d: &Duration) -> f32 {
return d.as_secs() as f32 + (d.subsec_nanos() as f32 / 1_000_000_000.0);
}
pub fn timestamp() -> u64 {
let now = SystemTime::now()
.duration_since(UNIX_EPOCH)
.expect("create timestamp in timing");
return duration_as_ms(&now);
}

99
src/tpu.rs Normal file
View File

@ -0,0 +1,99 @@
//! The `tpu` module implements the Transaction Processing Unit, a
//! 5-stage transaction processing pipeline in software.
//!
//! ```text
//! .---------------------------------------------------------------.
//! | TPU .-----. |
//! | | PoH | |
//! | `--+--` |
//! | | |
//! | v |
//! | .-------. .-----------. .---------. .--------. .-------. |
//! .---------. | | Fetch | | SigVerify | | Banking | | Record | | Write | | .------------.
//! | Clients |--->| Stage |->| Stage |->| Stage |->| Stage |->| Stage +--->| Validators |
//! `---------` | | | | | | | | | | | | `------------`
//! | `-------` `-----------` `----+----` `--------` `---+---` |
//! | | | |
//! | | | |
//! | | | |
//! | | | |
//! `---------------------------------|-----------------------|-----`
//! | |
//! v v
//! .------. .--------.
//! | Bank | | Ledger |
//! `------` `--------`
//! ```
use bank::Bank;
use banking_stage::BankingStage;
use fetch_stage::FetchStage;
use packet::{BlobRecycler, PacketRecycler};
use record_stage::RecordStage;
use sigverify_stage::SigVerifyStage;
use std::io::Write;
use std::net::UdpSocket;
use std::sync::atomic::AtomicBool;
use std::sync::{Arc, Mutex};
use std::thread::JoinHandle;
use std::time::Duration;
use streamer::BlobReceiver;
use write_stage::WriteStage;
pub struct Tpu {
pub blob_receiver: BlobReceiver,
pub thread_hdls: Vec<JoinHandle<()>>,
}
impl Tpu {
pub fn new<W: Write + Send + 'static>(
bank: Arc<Bank>,
tick_duration: Option<Duration>,
transactions_socket: UdpSocket,
blob_recycler: BlobRecycler,
exit: Arc<AtomicBool>,
writer: W,
) -> Self {
let packet_recycler = PacketRecycler::default();
let fetch_stage =
FetchStage::new(transactions_socket, exit.clone(), packet_recycler.clone());
let sigverify_stage = SigVerifyStage::new(exit.clone(), fetch_stage.packet_receiver);
let banking_stage = BankingStage::new(
bank.clone(),
exit.clone(),
sigverify_stage.verified_receiver,
packet_recycler.clone(),
);
let record_stage = match tick_duration {
Some(tick_duration) => RecordStage::new_with_clock(
banking_stage.signal_receiver,
&bank.last_id(),
tick_duration,
),
None => RecordStage::new(banking_stage.signal_receiver, &bank.last_id()),
};
let write_stage = WriteStage::new(
bank.clone(),
exit.clone(),
blob_recycler.clone(),
Mutex::new(writer),
record_stage.entry_receiver,
);
let mut thread_hdls = vec![
fetch_stage.thread_hdl,
banking_stage.thread_hdl,
record_stage.thread_hdl,
write_stage.thread_hdl,
];
thread_hdls.extend(sigverify_stage.thread_hdls.into_iter());
Tpu {
blob_receiver: write_stage.blob_receiver,
thread_hdls,
}
}
}

327
src/transaction.rs Normal file
View File

@ -0,0 +1,327 @@
//! The `transaction` module provides functionality for creating log transactions.
use bincode::serialize;
use budget::{Budget, Condition};
use chrono::prelude::*;
use hash::Hash;
use payment_plan::{Payment, PaymentPlan, Witness};
use signature::{KeyPair, KeyPairUtil, PublicKey, Signature, SignatureUtil};
pub const SIGNED_DATA_OFFSET: usize = 112;
pub const SIG_OFFSET: usize = 8;
pub const PUB_KEY_OFFSET: usize = 80;
/// The type of payment plan. Each item must implement the PaymentPlan trait.
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub enum Plan {
/// The builtin contract language Budget.
Budget(Budget),
}
// A proxy for the underlying DSL.
impl PaymentPlan for Plan {
fn final_payment(&self) -> Option<Payment> {
match self {
Plan::Budget(budget) => budget.final_payment(),
}
}
fn verify(&self, spendable_tokens: i64) -> bool {
match self {
Plan::Budget(budget) => budget.verify(spendable_tokens),
}
}
fn apply_witness(&mut self, witness: &Witness) {
match self {
Plan::Budget(budget) => budget.apply_witness(witness),
}
}
}
/// A smart contract.
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub struct Contract {
/// The number of tokens allocated to the `Plan` and any transaction fees.
pub tokens: i64,
pub plan: Plan,
}
/// An instruction to progress the smart contract.
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub enum Instruction {
/// Declare and instanstansiate `Contract`.
NewContract(Contract),
/// Tell a payment plan acknowledge the given `DateTime` has past.
ApplyTimestamp(DateTime<Utc>),
/// Tell the payment plan that the `NewContract` with `Signature` has been
/// signed by the containing transaction's `PublicKey`.
ApplySignature(Signature),
}
/// An instruction signed by a client with `PublicKey`.
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub struct Transaction {
/// A digital signature of `instruction`, `last_id` and `fee`, signed by `PublicKey`.
pub sig: Signature,
/// The `PublicKey` of the entity that signed the transaction data.
pub from: PublicKey,
/// The action the server should take.
pub instruction: Instruction,
/// The ID of a recent ledger entry.
pub last_id: Hash,
/// The number of tokens paid for processing and storage of this transaction.
pub fee: i64,
}
impl Transaction {
/// Create a signed transaction from the given `Instruction`.
fn new_from_instruction(
from_keypair: &KeyPair,
instruction: Instruction,
last_id: Hash,
fee: i64,
) -> Self {
let from = from_keypair.pubkey();
let mut tx = Transaction {
sig: Signature::default(),
instruction,
last_id,
from,
fee,
};
tx.sign(from_keypair);
tx
}
/// Create and sign a new Transaction. Used for unit-testing.
pub fn new_taxed(
from_keypair: &KeyPair,
to: PublicKey,
tokens: i64,
fee: i64,
last_id: Hash,
) -> Self {
let payment = Payment {
tokens: tokens - fee,
to,
};
let budget = Budget::Pay(payment);
let plan = Plan::Budget(budget);
let instruction = Instruction::NewContract(Contract { plan, tokens });
Self::new_from_instruction(from_keypair, instruction, last_id, fee)
}
/// Create and sign a new Transaction. Used for unit-testing.
pub fn new(from_keypair: &KeyPair, to: PublicKey, tokens: i64, last_id: Hash) -> Self {
Self::new_taxed(from_keypair, to, tokens, 0, last_id)
}
/// Create and sign a new Witness Timestamp. Used for unit-testing.
pub fn new_timestamp(from_keypair: &KeyPair, dt: DateTime<Utc>, last_id: Hash) -> Self {
let instruction = Instruction::ApplyTimestamp(dt);
Self::new_from_instruction(from_keypair, instruction, last_id, 0)
}
/// Create and sign a new Witness Signature. Used for unit-testing.
pub fn new_signature(from_keypair: &KeyPair, tx_sig: Signature, last_id: Hash) -> Self {
let instruction = Instruction::ApplySignature(tx_sig);
Self::new_from_instruction(from_keypair, instruction, last_id, 0)
}
/// Create and sign a postdated Transaction. Used for unit-testing.
pub fn new_on_date(
from_keypair: &KeyPair,
to: PublicKey,
dt: DateTime<Utc>,
tokens: i64,
last_id: Hash,
) -> Self {
let from = from_keypair.pubkey();
let budget = Budget::Or(
(Condition::Timestamp(dt), Payment { tokens, to }),
(Condition::Signature(from), Payment { tokens, to: from }),
);
let plan = Plan::Budget(budget);
let instruction = Instruction::NewContract(Contract { plan, tokens });
Self::new_from_instruction(from_keypair, instruction, last_id, 0)
}
/// Get the transaction data to sign.
fn get_sign_data(&self) -> Vec<u8> {
let mut data = serialize(&(&self.instruction)).expect("serialize Contract");
let last_id_data = serialize(&(&self.last_id)).expect("serialize last_id");
data.extend_from_slice(&last_id_data);
let fee_data = serialize(&(&self.fee)).expect("serialize last_id");
data.extend_from_slice(&fee_data);
data
}
/// Sign this transaction.
pub fn sign(&mut self, keypair: &KeyPair) {
let sign_data = self.get_sign_data();
self.sig = Signature::clone_from_slice(keypair.sign(&sign_data).as_ref());
}
/// Verify only the transaction signature.
pub fn verify_sig(&self) -> bool {
warn!("transaction signature verification called");
self.sig.verify(&self.from, &self.get_sign_data())
}
/// Verify only the payment plan.
pub fn verify_plan(&self) -> bool {
if let Instruction::NewContract(contract) = &self.instruction {
self.fee >= 0 && self.fee <= contract.tokens
&& contract.plan.verify(contract.tokens - self.fee)
} else {
true
}
}
}
#[cfg(test)]
pub fn test_tx() -> Transaction {
let keypair1 = KeyPair::new();
let pubkey1 = keypair1.pubkey();
let zero = Hash::default();
Transaction::new(&keypair1, pubkey1, 42, zero)
}
#[cfg(test)]
pub fn memfind<A: Eq>(a: &[A], b: &[A]) -> Option<usize> {
assert!(a.len() >= b.len());
let end = a.len() - b.len() + 1;
for i in 0..end {
if a[i..i + b.len()] == b[..] {
return Some(i);
}
}
None
}
#[cfg(test)]
mod tests {
use super::*;
use bincode::{deserialize, serialize};
#[test]
fn test_claim() {
let keypair = KeyPair::new();
let zero = Hash::default();
let tx0 = Transaction::new(&keypair, keypair.pubkey(), 42, zero);
assert!(tx0.verify_plan());
}
#[test]
fn test_transfer() {
let zero = Hash::default();
let keypair0 = KeyPair::new();
let keypair1 = KeyPair::new();
let pubkey1 = keypair1.pubkey();
let tx0 = Transaction::new(&keypair0, pubkey1, 42, zero);
assert!(tx0.verify_plan());
}
#[test]
fn test_transfer_with_fee() {
let zero = Hash::default();
let keypair0 = KeyPair::new();
let pubkey1 = KeyPair::new().pubkey();
assert!(Transaction::new_taxed(&keypair0, pubkey1, 1, 1, zero).verify_plan());
assert!(!Transaction::new_taxed(&keypair0, pubkey1, 1, 2, zero).verify_plan());
assert!(!Transaction::new_taxed(&keypair0, pubkey1, 1, -1, zero).verify_plan());
}
#[test]
fn test_serialize_claim() {
let budget = Budget::Pay(Payment {
tokens: 0,
to: Default::default(),
});
let plan = Plan::Budget(budget);
let instruction = Instruction::NewContract(Contract { plan, tokens: 0 });
let claim0 = Transaction {
instruction,
from: Default::default(),
last_id: Default::default(),
sig: Default::default(),
fee: 0,
};
let buf = serialize(&claim0).unwrap();
let claim1: Transaction = deserialize(&buf).unwrap();
assert_eq!(claim1, claim0);
}
#[test]
fn test_token_attack() {
let zero = Hash::default();
let keypair = KeyPair::new();
let pubkey = keypair.pubkey();
let mut tx = Transaction::new(&keypair, pubkey, 42, zero);
if let Instruction::NewContract(contract) = &mut tx.instruction {
contract.tokens = 1_000_000; // <-- attack, part 1!
if let Plan::Budget(Budget::Pay(ref mut payment)) = contract.plan {
payment.tokens = contract.tokens; // <-- attack, part 2!
}
}
assert!(tx.verify_plan());
assert!(!tx.verify_sig());
}
#[test]
fn test_hijack_attack() {
let keypair0 = KeyPair::new();
let keypair1 = KeyPair::new();
let thief_keypair = KeyPair::new();
let pubkey1 = keypair1.pubkey();
let zero = Hash::default();
let mut tx = Transaction::new(&keypair0, pubkey1, 42, zero);
if let Instruction::NewContract(contract) = &mut tx.instruction {
if let Plan::Budget(Budget::Pay(ref mut payment)) = contract.plan {
payment.to = thief_keypair.pubkey(); // <-- attack!
}
}
assert!(tx.verify_plan());
assert!(!tx.verify_sig());
}
#[test]
fn test_layout() {
let tx = test_tx();
let sign_data = tx.get_sign_data();
let tx_bytes = serialize(&tx).unwrap();
assert_matches!(memfind(&tx_bytes, &sign_data), Some(SIGNED_DATA_OFFSET));
assert_matches!(memfind(&tx_bytes, &tx.sig), Some(SIG_OFFSET));
assert_matches!(memfind(&tx_bytes, &tx.from), Some(PUB_KEY_OFFSET));
}
#[test]
fn test_overspend_attack() {
let keypair0 = KeyPair::new();
let keypair1 = KeyPair::new();
let zero = Hash::default();
let mut tx = Transaction::new(&keypair0, keypair1.pubkey(), 1, zero);
if let Instruction::NewContract(contract) = &mut tx.instruction {
if let Plan::Budget(Budget::Pay(ref mut payment)) = contract.plan {
payment.tokens = 2; // <-- attack!
}
}
assert!(!tx.verify_plan());
// Also, ensure all branchs of the plan spend all tokens
if let Instruction::NewContract(contract) = &mut tx.instruction {
if let Plan::Budget(Budget::Pay(ref mut payment)) = contract.plan {
payment.tokens = 0; // <-- whoops!
}
}
assert!(!tx.verify_plan());
}
}

304
src/tvu.rs Normal file
View File

@ -0,0 +1,304 @@
//! The `tvu` module implements the Transaction Validation Unit, a
//! 5-stage transaction validation pipeline in software.
//! 1. streamer
//! - Incoming blobs are picked up from the replicate socket.
//! 2. verifier
//! - TODO Blobs are sent to the GPU, and while the memory is there the PoH stream is verified
//! along with the ecdsa signature for the blob and each signature in all the transactions. Blobs
//! with errors are dropped, or marked for slashing.
//! 3.a retransmit
//! - Blobs originating from the parent (leader, at the moment, is the only parent), are retransmit to all the
//! peers in the crdt. Peers is everyone who is not me or the leader that has a known replicate
//! address.
//! 3.b window
//! - Verified blobs are placed into a window, indexed by the counter set by the leader.sockets. This could
//! be the PoH counter if its monotonically increasing in each blob. Erasure coding is used to
//! recover any missing packets, and requests are made at random to peers and parents to retransmit
//! a missing packet.
//! 4. accountant
//! - Contigous blobs are sent to the accountant for processing transactions
//! 5. validator
//! - TODO Validation messages are sent back to the leader
use bank::Bank;
use crdt::{Crdt, ReplicatedData};
use ncp::Ncp;
use packet;
use replicate_stage::ReplicateStage;
use std::net::UdpSocket;
use std::sync::atomic::AtomicBool;
use std::sync::mpsc::channel;
use std::sync::{Arc, RwLock};
use std::thread::JoinHandle;
use streamer;
pub struct Tvu {
pub thread_hdls: Vec<JoinHandle<()>>,
}
impl Tvu {
/// This service receives messages from a leader in the network and processes the transactions
/// on the bank state.
/// # Arguments
/// * `bank` - The bank state.
/// * `me` - my configuration
/// * `gossip` - my gossisp socket
/// * `replicate` - my replicate socket
/// * `leader` - leader configuration
/// * `exit` - The exit signal.
pub fn new(
bank: Arc<Bank>,
me: ReplicatedData,
gossip_listen_socket: UdpSocket,
replicate: UdpSocket,
repair_socket: UdpSocket,
leader: ReplicatedData,
exit: Arc<AtomicBool>,
) -> Self {
//replicate pipeline
let crdt = Arc::new(RwLock::new(Crdt::new(me)));
crdt.write()
.expect("'crdt' write lock in pub fn replicate")
.set_leader(leader.id);
crdt.write()
.expect("'crdt' write lock before insert() in pub fn replicate")
.insert(&leader);
let window = streamer::default_window();
let gossip_send_socket = UdpSocket::bind("0.0.0.0:0").expect("bind 0");
let ncp = Ncp::new(
crdt.clone(),
window.clone(),
gossip_listen_socket,
gossip_send_socket,
exit.clone(),
).expect("Ncp::new");
// TODO pull this socket out through the public interface
// make sure we are on the same interface
let mut local = replicate.local_addr().expect("tvu: get local address");
local.set_port(0);
let write = UdpSocket::bind(local).expect("tvu: bind to local socket");
let blob_recycler = packet::BlobRecycler::default();
let (blob_sender, blob_receiver) = channel();
let t_blob_receiver = streamer::blob_receiver(
exit.clone(),
blob_recycler.clone(),
replicate,
blob_sender.clone(),
).expect("tvu: blob receiver creation");
let (window_sender, window_receiver) = channel();
let (retransmit_sender, retransmit_receiver) = channel();
let t_retransmit = streamer::retransmitter(
write,
exit.clone(),
crdt.clone(),
blob_recycler.clone(),
retransmit_receiver,
);
let t_repair_receiver = streamer::blob_receiver(
exit.clone(),
blob_recycler.clone(),
repair_socket,
blob_sender.clone(),
).expect("tvu: blob repair receiver fail");
//TODO
//the packets coming out of blob_receiver need to be sent to the GPU and verified
//then sent to the window, which does the erasure coding reconstruction
let t_window = streamer::window(
exit.clone(),
crdt.clone(),
window,
blob_recycler.clone(),
blob_receiver,
window_sender,
retransmit_sender,
);
let replicate_stage = ReplicateStage::new(
bank.clone(),
exit.clone(),
window_receiver,
blob_recycler.clone(),
);
let mut threads = vec![
//replicate threads
t_blob_receiver,
t_retransmit,
t_window,
t_repair_receiver,
replicate_stage.thread_hdl,
];
threads.extend(ncp.thread_hdls.into_iter());
Tvu {
thread_hdls: threads,
}
}
}
#[cfg(test)]
pub mod tests {
use bank::Bank;
use bincode::serialize;
use crdt::{Crdt, TestNode};
use entry::Entry;
use hash::{hash, Hash};
use logger;
use mint::Mint;
use ncp::Ncp;
use packet::BlobRecycler;
use result::Result;
use signature::{KeyPair, KeyPairUtil};
use std::collections::VecDeque;
use std::net::UdpSocket;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::channel;
use std::sync::{Arc, RwLock};
use std::time::Duration;
use streamer;
use transaction::Transaction;
use tvu::Tvu;
fn new_replicator(
crdt: Arc<RwLock<Crdt>>,
listen: UdpSocket,
exit: Arc<AtomicBool>,
) -> Result<Ncp> {
let window = streamer::default_window();
let send_sock = UdpSocket::bind("0.0.0.0:0").expect("bind 0");
Ncp::new(crdt, window, listen, send_sock, exit)
}
/// Test that message sent from leader to target1 and replicated to target2
#[test]
fn test_replicate() {
logger::setup();
let leader = TestNode::new();
let target1 = TestNode::new();
let target2 = TestNode::new();
let exit = Arc::new(AtomicBool::new(false));
//start crdt_leader
let mut crdt_l = Crdt::new(leader.data.clone());
crdt_l.set_leader(leader.data.id);
let cref_l = Arc::new(RwLock::new(crdt_l));
let dr_l = new_replicator(cref_l, leader.sockets.gossip, exit.clone()).unwrap();
//start crdt2
let mut crdt2 = Crdt::new(target2.data.clone());
crdt2.insert(&leader.data);
crdt2.set_leader(leader.data.id);
let leader_id = leader.data.id;
let cref2 = Arc::new(RwLock::new(crdt2));
let dr_2 = new_replicator(cref2, target2.sockets.gossip, exit.clone()).unwrap();
// setup some blob services to send blobs into the socket
// to simulate the source peer and get blobs out of the socket to
// simulate target peer
let recv_recycler = BlobRecycler::default();
let resp_recycler = BlobRecycler::default();
let (s_reader, r_reader) = channel();
let t_receiver = streamer::blob_receiver(
exit.clone(),
recv_recycler.clone(),
target2.sockets.replicate,
s_reader,
).unwrap();
// simulate leader sending messages
let (s_responder, r_responder) = channel();
let t_responder = streamer::responder(
leader.sockets.requests,
exit.clone(),
resp_recycler.clone(),
r_responder,
);
let starting_balance = 10_000;
let mint = Mint::new(starting_balance);
let replicate_addr = target1.data.replicate_addr;
let bank = Arc::new(Bank::new(&mint));
let tvu = Tvu::new(
bank.clone(),
target1.data,
target1.sockets.gossip,
target1.sockets.replicate,
target1.sockets.repair,
leader.data,
exit.clone(),
);
let mut alice_ref_balance = starting_balance;
let mut msgs = VecDeque::new();
let mut cur_hash = Hash::default();
let num_blobs = 10;
let transfer_amount = 501;
let bob_keypair = KeyPair::new();
for i in 0..num_blobs {
let b = resp_recycler.allocate();
let b_ = b.clone();
let mut w = b.write().unwrap();
w.set_index(i).unwrap();
w.set_id(leader_id).unwrap();
let entry0 = Entry::new(&cur_hash, i, vec![]);
bank.register_entry_id(&cur_hash);
cur_hash = hash(&cur_hash);
let tx0 = Transaction::new(
&mint.keypair(),
bob_keypair.pubkey(),
transfer_amount,
cur_hash,
);
bank.register_entry_id(&cur_hash);
cur_hash = hash(&cur_hash);
let entry1 = Entry::new(&cur_hash, i + num_blobs, vec![tx0]);
bank.register_entry_id(&cur_hash);
cur_hash = hash(&cur_hash);
alice_ref_balance -= transfer_amount;
let serialized_entry = serialize(&vec![entry0, entry1]).unwrap();
w.data_mut()[..serialized_entry.len()].copy_from_slice(&serialized_entry);
w.set_size(serialized_entry.len());
w.meta.set_addr(&replicate_addr);
drop(w);
msgs.push_back(b_);
}
// send the blobs into the socket
s_responder.send(msgs).expect("send");
// receive retransmitted messages
let timer = Duration::new(1, 0);
let mut msgs: Vec<_> = Vec::new();
while let Ok(msg) = r_reader.recv_timeout(timer) {
trace!("msg: {:?}", msg);
msgs.push(msg);
}
let alice_balance = bank.get_balance(&mint.keypair().pubkey()).unwrap();
assert_eq!(alice_balance, alice_ref_balance);
let bob_balance = bank.get_balance(&bob_keypair.pubkey()).unwrap();
assert_eq!(bob_balance, starting_balance - alice_ref_balance);
exit.store(true, Ordering::Relaxed);
for t in tvu.thread_hdls {
t.join().expect("join");
}
for t in dr_l.thread_hdls {
t.join().expect("join");
}
for t in dr_2.thread_hdls {
t.join().expect("join");
}
t_receiver.join().expect("join");
t_responder.join().expect("join");
}
}

79
src/write_stage.rs Normal file
View File

@ -0,0 +1,79 @@
//! The `write_stage` module implements the TPU's write stage. It
//! writes entries to the given writer, which is typically a file or
//! stdout, and then sends the Entry to its output channel.
use bank::Bank;
use entry::Entry;
use entry_writer::EntryWriter;
use packet;
use std::io::Write;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::{channel, Receiver};
use std::sync::{Arc, Mutex};
use std::thread::{Builder, JoinHandle};
use streamer;
pub struct WriteStage {
pub thread_hdl: JoinHandle<()>,
pub blob_receiver: streamer::BlobReceiver,
}
impl WriteStage {
/// Create a new Rpu that wraps the given Bank.
pub fn new<W: Write + Send + 'static>(
bank: Arc<Bank>,
exit: Arc<AtomicBool>,
blob_recycler: packet::BlobRecycler,
writer: Mutex<W>,
entry_receiver: Receiver<Entry>,
) -> Self {
let (blob_sender, blob_receiver) = channel();
let thread_hdl = Builder::new()
.name("solana-writer".to_string())
.spawn(move || loop {
let entry_writer = EntryWriter::new(&bank);
let _ = entry_writer.write_and_send_entries(
&blob_sender,
&blob_recycler,
&writer,
&entry_receiver,
);
if exit.load(Ordering::Relaxed) {
info!("broadcat_service exiting");
break;
}
})
.unwrap();
WriteStage {
thread_hdl,
blob_receiver,
}
}
pub fn new_drain(
bank: Arc<Bank>,
exit: Arc<AtomicBool>,
entry_receiver: Receiver<Entry>,
) -> Self {
let (_blob_sender, blob_receiver) = channel();
let thread_hdl = Builder::new()
.name("solana-drain".to_string())
.spawn(move || {
let entry_writer = EntryWriter::new(&bank);
loop {
let _ = entry_writer.drain_entries(&entry_receiver);
if exit.load(Ordering::Relaxed) {
info!("drain_service exiting");
break;
}
}
})
.unwrap();
WriteStage {
thread_hdl,
blob_receiver,
}
}
}

184
tests/data_replicator.rs Normal file
View File

@ -0,0 +1,184 @@
#[macro_use]
extern crate log;
extern crate rayon;
extern crate solana;
use rayon::iter::*;
use solana::crdt::{Crdt, TestNode};
use solana::logger;
use solana::ncp::Ncp;
use solana::packet::Blob;
use std::net::UdpSocket;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, RwLock};
use std::thread::sleep;
use std::time::Duration;
fn test_node(exit: Arc<AtomicBool>) -> (Arc<RwLock<Crdt>>, Ncp, UdpSocket) {
let tn = TestNode::new();
let crdt = Crdt::new(tn.data.clone());
let c = Arc::new(RwLock::new(crdt));
let w = Arc::new(RwLock::new(vec![]));
let d = Ncp::new(
c.clone(),
w,
tn.sockets.gossip,
tn.sockets.gossip_send,
exit,
).unwrap();
(c, d, tn.sockets.replicate)
}
/// Test that the network converges.
/// Run until every node in the network has a full ReplicatedData set.
/// Check that nodes stop sending updates after all the ReplicatedData has been shared.
/// tests that actually use this function are below
fn run_gossip_topo<F>(topo: F)
where
F: Fn(&Vec<(Arc<RwLock<Crdt>>, Ncp, UdpSocket)>) -> (),
{
let num: usize = 5;
let exit = Arc::new(AtomicBool::new(false));
let listen: Vec<_> = (0..num).map(|_| test_node(exit.clone())).collect();
topo(&listen);
let mut done = true;
for i in 0..(num * 32) {
done = false;
trace!("round {}", i);
for (c, _, _) in &listen {
if num == c.read().unwrap().convergence() as usize {
done = true;
break;
}
}
//at least 1 node converged
if done == true {
break;
}
sleep(Duration::new(1, 0));
}
exit.store(true, Ordering::Relaxed);
for (c, dr, _) in listen.into_iter() {
for j in dr.thread_hdls.into_iter() {
j.join().unwrap();
}
// make it clear what failed
// protocol is to chatty, updates should stop after everyone receives `num`
assert!(c.read().unwrap().update_index <= num as u64);
// protocol is not chatty enough, everyone should get `num` entries
assert_eq!(c.read().unwrap().table.len(), num);
}
assert!(done);
}
/// ring a -> b -> c -> d -> e -> a
#[test]
fn gossip_ring() {
logger::setup();
run_gossip_topo(|listen| {
let num = listen.len();
for n in 0..num {
let y = n % listen.len();
let x = (n + 1) % listen.len();
let mut xv = listen[x].0.write().unwrap();
let yv = listen[y].0.read().unwrap();
let mut d = yv.table[&yv.me].clone();
d.version = 0;
xv.insert(&d);
}
});
}
/// star a -> (b,c,d,e)
#[test]
fn gossip_star() {
logger::setup();
run_gossip_topo(|listen| {
let num = listen.len();
for n in 0..(num - 1) {
let x = 0;
let y = (n + 1) % listen.len();
let mut xv = listen[x].0.write().unwrap();
let yv = listen[y].0.read().unwrap();
let mut yd = yv.table[&yv.me].clone();
yd.version = 0;
xv.insert(&yd);
trace!("star leader {:?}", &xv.me[..4]);
}
});
}
/// rstar a <- (b,c,d,e)
#[test]
fn gossip_rstar() {
logger::setup();
run_gossip_topo(|listen| {
let num = listen.len();
let xd = {
let xv = listen[0].0.read().unwrap();
xv.table[&xv.me].clone()
};
trace!("rstar leader {:?}", &xd.id[..4]);
for n in 0..(num - 1) {
let y = (n + 1) % listen.len();
let mut yv = listen[y].0.write().unwrap();
yv.insert(&xd);
trace!("rstar insert {:?} into {:?}", &xd.id[..4], &yv.me[..4]);
}
});
}
#[test]
pub fn crdt_retransmit() {
logger::setup();
let exit = Arc::new(AtomicBool::new(false));
trace!("c1:");
let (c1, dr1, tn1) = test_node(exit.clone());
trace!("c2:");
let (c2, dr2, tn2) = test_node(exit.clone());
trace!("c3:");
let (c3, dr3, tn3) = test_node(exit.clone());
let c1_data = c1.read().unwrap().my_data().clone();
c1.write().unwrap().set_leader(c1_data.id);
c2.write().unwrap().insert(&c1_data);
c3.write().unwrap().insert(&c1_data);
c2.write().unwrap().set_leader(c1_data.id);
c3.write().unwrap().set_leader(c1_data.id);
//wait to converge
trace!("waiting to converge:");
let mut done = false;
for _ in 0..30 {
done = c1.read().unwrap().table.len() == 3 && c2.read().unwrap().table.len() == 3
&& c3.read().unwrap().table.len() == 3;
if done {
break;
}
sleep(Duration::new(1, 0));
}
assert!(done);
let mut b = Blob::default();
b.meta.size = 10;
Crdt::retransmit(&c1, &Arc::new(RwLock::new(b)), &tn1).unwrap();
let res: Vec<_> = [tn1, tn2, tn3]
.into_par_iter()
.map(|s| {
let mut b = Blob::default();
s.set_read_timeout(Some(Duration::new(1, 0))).unwrap();
let res = s.recv_from(&mut b.data);
res.is_err() //true if failed to receive the retransmit packet
})
.collect();
//true if failed receive the retransmit packet, r2, and r3 should succeed
//r1 was the sender, so it should fail to receive the packet
assert_eq!(res, [true, false, false]);
exit.store(true, Ordering::Relaxed);
let mut threads = vec![];
threads.extend(dr1.thread_hdls.into_iter());
threads.extend(dr2.thread_hdls.into_iter());
threads.extend(dr3.thread_hdls.into_iter());
for t in threads.into_iter() {
t.join().unwrap();
}
}

175
tests/multinode.rs Normal file
View File

@ -0,0 +1,175 @@
#[macro_use]
extern crate log;
extern crate bincode;
extern crate solana;
use solana::bank::Bank;
use solana::crdt::TestNode;
use solana::crdt::{Crdt, ReplicatedData};
use solana::logger;
use solana::mint::Mint;
use solana::ncp::Ncp;
use solana::server::Server;
use solana::signature::{KeyPair, KeyPairUtil, PublicKey};
use solana::streamer::default_window;
use solana::thin_client::ThinClient;
use std::io;
use std::io::sink;
use std::net::UdpSocket;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, RwLock};
use std::thread::sleep;
use std::thread::JoinHandle;
use std::time::Duration;
fn validator(
leader: &ReplicatedData,
exit: Arc<AtomicBool>,
alice: &Mint,
threads: &mut Vec<JoinHandle<()>>,
) {
let validator = TestNode::new();
let replicant_bank = Bank::new(&alice);
let mut ts = Server::new_validator(
replicant_bank,
validator.data.clone(),
validator.sockets.requests,
validator.sockets.respond,
validator.sockets.replicate,
validator.sockets.gossip,
validator.sockets.repair,
leader.clone(),
exit.clone(),
);
threads.append(&mut ts.thread_hdls);
}
fn converge(
leader: &ReplicatedData,
exit: Arc<AtomicBool>,
num_nodes: usize,
threads: &mut Vec<JoinHandle<()>>,
) -> Vec<ReplicatedData> {
//lets spy on the network
let mut spy = TestNode::new();
let daddr = "0.0.0.0:0".parse().unwrap();
let me = spy.data.id.clone();
spy.data.replicate_addr = daddr;
spy.data.requests_addr = daddr;
let mut spy_crdt = Crdt::new(spy.data);
spy_crdt.insert(&leader);
spy_crdt.set_leader(leader.id);
let spy_ref = Arc::new(RwLock::new(spy_crdt));
let spy_window = default_window();
let dr = Ncp::new(
spy_ref.clone(),
spy_window,
spy.sockets.gossip,
spy.sockets.gossip_send,
exit,
).unwrap();
//wait for the network to converge
let mut converged = false;
for _ in 0..30 {
let num = spy_ref.read().unwrap().convergence();
if num == num_nodes as u64 {
converged = true;
break;
}
sleep(Duration::new(1, 0));
}
assert!(converged);
threads.extend(dr.thread_hdls.into_iter());
let v: Vec<ReplicatedData> = spy_ref
.read()
.unwrap()
.table
.values()
.into_iter()
.filter(|x| x.id != me)
.map(|x| x.clone())
.collect();
v.clone()
}
#[test]
fn test_multi_node() {
logger::setup();
const N: usize = 5;
trace!("test_multi_accountant_stub");
let leader = TestNode::new();
let alice = Mint::new(10_000);
let bob_pubkey = KeyPair::new().pubkey();
let exit = Arc::new(AtomicBool::new(false));
let leader_bank = Bank::new(&alice);
let server = Server::new_leader(
leader_bank,
None,
leader.data.clone(),
leader.sockets.requests,
leader.sockets.transaction,
leader.sockets.broadcast,
leader.sockets.respond,
leader.sockets.gossip,
exit.clone(),
sink(),
);
let mut threads = server.thread_hdls;
for _ in 0..N {
validator(&leader.data, exit.clone(), &alice, &mut threads);
}
let servers = converge(&leader.data, exit.clone(), N + 2, &mut threads);
//contains the leader addr as well
assert_eq!(servers.len(), N + 1);
//verify leader can do transfer
let leader_balance = tx_and_retry_get_balance(&leader.data, &alice, &bob_pubkey).unwrap();
assert_eq!(leader_balance, 500);
//verify validator has the same balance
let mut success = 0usize;
for server in servers.iter() {
let mut client = mk_client(server);
if let Ok(bal) = client.poll_get_balance(&bob_pubkey) {
trace!("validator balance {}", bal);
if bal == leader_balance {
success += 1;
}
}
}
assert_eq!(success, servers.len());
exit.store(true, Ordering::Relaxed);
for t in threads {
t.join().unwrap();
}
}
fn mk_client(leader: &ReplicatedData) -> ThinClient {
let requests_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
requests_socket
.set_read_timeout(Some(Duration::new(1, 0)))
.unwrap();
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
ThinClient::new(
leader.requests_addr,
requests_socket,
leader.transactions_addr,
transactions_socket,
)
}
fn tx_and_retry_get_balance(
leader: &ReplicatedData,
alice: &Mint,
bob_pubkey: &PublicKey,
) -> io::Result<i64> {
let mut client = mk_client(leader);
trace!("getting leader last_id");
let last_id = client.get_last_id();
info!("executing leader transer");
let _sig = client
.transfer(500, &alice.keypair(), *bob_pubkey, &last_id)
.unwrap();
client.poll_get_balance(bob_pubkey)
}