Compare commits

...

434 Commits

Author SHA1 Message Date
e6c3c215ab Add note about installing git-lfs 2018-04-30 15:26:31 -06:00
5c66bbde01 Add a note about running with GPU optimizations 2018-04-30 15:20:39 -06:00
6268d540a8 move CI specific scripts to solana-labs/buildkite repo
Former-commit-id: 77dd1bdd4a
2018-04-29 23:43:43 -07:00
3cfb571356 Version bump
Former-commit-id: f7385e866207b3ec2269bac36d52ef1e7f09337c
2018-04-27 15:49:48 -07:00
f6e5f2439d Add GPU library for Linux systems
To get solana to use the GPU, invoke cargo with "--features=cuda".


Former-commit-id: ea904df6e53d98a32e3f6103ee82cdf7ba08bf21
2018-04-27 15:47:22 -07:00
edf6272374 Merge pull request #154 from sakridge/replicator
Replicator
2018-04-27 14:30:52 -06:00
7f6a4b0ce3 Deserialize the Entry structs and process them 2018-04-27 13:15:19 -07:00
3be5f25f2f Work on test_replicate to test replicate service
generate some messages to send to replicator service
2018-04-27 08:21:34 -07:00
1b6cdd5637 Fix some compilation issues 2018-04-27 08:21:34 -07:00
f752e55929 update 2018-04-27 08:21:34 -07:00
ebb089b3f1 wip 2018-04-27 08:21:34 -07:00
ad6303f031 docs 2018-04-27 08:21:34 -07:00
828b9d6717 docs 2018-04-27 08:21:34 -07:00
444adcd1ca update 2018-04-27 08:21:34 -07:00
69ac305883 wip 2018-04-27 08:21:34 -07:00
2ff57df2a0 state replication 2018-04-27 08:21:34 -07:00
7077f4cbe2 Merge pull request #128 from garious/faster-demo
Utilize parallelized accountant in demo
2018-04-27 08:47:42 -06:00
266f85f607 Merge pull request #152 from aeyakovenko/star
recover full network from a star
2018-04-26 15:36:08 -06:00
d90ab90145 bind to all 2018-04-26 13:54:29 -07:00
48018b3f5b docs 2018-04-26 13:50:57 -07:00
15584e7062 recover full network from a star 2018-04-26 13:48:42 -07:00
d415b17146 sleepless demo to complement sleepless nights
18 ktps on macbook pro, no gpu
2018-04-26 13:17:38 -06:00
9ed953e8c3 Fix rebase fails 2018-04-26 09:35:10 -06:00
b60a98bd6e Startup log can reference IDs without itself 2018-04-26 08:42:34 -06:00
a15e30d4b3 Report transactions processed 2018-04-26 08:42:34 -06:00
d5d133353f Port blocking stub functions to new stateful ones 2018-04-26 08:42:34 -06:00
6badc98510 Add low-level response-handling functions to skel 2018-04-26 08:42:34 -06:00
ea8bfb46ce Add a way to subscribe for new entry metadata 2018-04-26 08:42:34 -06:00
58860ed19f WIP: New demo that makes better use of the parallelized accountant 2018-04-26 08:42:34 -06:00
583f652197 Generate genesis log for the demo
This log contains a bunch of transactions that generate new
accounts, so that transactions to and from them can be processed
in parallel.
2018-04-26 08:42:34 -06:00
3215dcff78 Update readme for new demo
Need to create a bunch of unrelated accounts to the genesis block
so that transactions can be processed in parallel without waiting
on write-locks. And then stuff the private keys of those accounts
into mint.json so that the client-demo can send the tokens from
those accounts.
2018-04-26 08:42:34 -06:00
38fdd17067 Add initializing log message to server
Handy when gesesis block is large.
2018-04-26 08:42:34 -06:00
807ccd15ba Add solana-mint-demo CLI
This extends solana-mint with additional data that will be used by
both solana-client-demo and creating the demo's genesis block.
2018-04-26 08:42:34 -06:00
1c923d2f9e Fix entry hash when no events and num_hashes is one 2018-04-26 08:42:34 -06:00
2676b21400 Merge pull request #151 from rlkelly/139__forget_signature
added forget_signature method
2018-04-26 08:28:11 -06:00
fd5ef94b5a added forget signature method 2018-04-26 07:22:11 -04:00
02c7eea236 Merge pull request #150 from garious/141__add_futures
Add FutureResult to return a Future that immediately resolves
2018-04-25 20:44:40 -06:00
34d1805b54 Add FutureResult to return a Future that immediately resolves 2018-04-25 19:23:24 -07:00
753eaa8266 buildkite script 2018-04-24 11:32:00 -07:00
0b39c6f98e Merge pull request #145 from aeyakovenko/crdt
initial crdt implementation
2018-04-24 10:49:23 -07:00
55b8d0db4d cleanup 2018-04-23 23:33:21 -07:00
3d7969d8a2 initial crdt implementation 2018-04-23 23:06:28 -07:00
041de8082a Merge pull request #144 from rleungx/improve-error-messages
improve the error messages
2018-04-21 08:17:25 -06:00
3da1fa4d88 improve the error messages 2018-04-21 21:52:55 +08:00
39df21de30 Merge pull request #142 from ansrivas/master
git clone instruction
2018-04-20 16:05:21 -06:00
8cbb7d7362 git clone instruction 2018-04-20 23:02:10 +02:00
10a0c47210 Merge pull request #137 from garious/linux-hang
Workaround linux hang
2018-04-19 11:46:48 -06:00
89bf3765f3 Merge pull request #138 from sakridge/help_options
Add -h/--help options for client-demo and testnode
2018-04-19 11:40:07 -06:00
8181bc591b Add -h/--help options for client-demo and testnode 2018-04-19 10:22:31 -07:00
ca877e689c Merge pull request #136 from rleungx/report-errors-to-stderr
report serde parse errors to stderr
2018-04-19 11:15:57 -06:00
c6048e2bab Workaround linux hang
Without this patch, Linux systems would hang when running the demo.

The root cause (why Linux is acting differently than macOS) was
not determined, but we know the problem is caused by a known
issue in the transaction pipeline - that entries are not pulled
off the historian channel until after the full transaction batch
is processed. This patch makes the sync_channel large enough that
it should never block on a gigabit network.
2018-04-19 10:04:32 -07:00
60015aee04 report serde parse errors to stderr 2018-04-19 23:51:57 +08:00
43e6741071 Merge pull request #134 from rleungx/report-parse-errors-to-stderr
report parse errors to stderr
2018-04-19 08:38:38 -06:00
b91f6bcbff report parse errors to stderr 2018-04-19 22:24:46 +08:00
64e2f1b949 Merge pull request #133 from djKooks/rm-mut
Remove out for immutable variable
2018-04-19 08:24:32 -06:00
13a2f05776 Remove out for immutable variable 2018-04-19 23:00:16 +09:00
903374ae9b Merge pull request #132 from aeyakovenko/readme
readme and site update
2018-04-18 21:49:09 -06:00
d366a07403 add gregs abstract as an intro 2018-04-18 20:17:37 -07:00
e94921174a Merge pull request #131 from sakridge/erasure
Add erasure rust logic under feature flag
2018-04-18 21:10:06 -06:00
dea5ab2f79 Add erasure rust logic under feature flag 2018-04-18 19:42:09 -07:00
5e11078f34 Add Stephen 2018-04-18 17:22:58 -06:00
d7670cd4ff Merge pull request #129 from aeyakovenko/retransmit
Retransmit
2018-04-18 11:10:50 -04:00
29f3230089 docs 2018-04-17 19:53:18 -07:00
d003efb522 fix docs 2018-04-17 19:52:46 -07:00
97e772e87a docs 2018-04-17 19:46:50 -07:00
0b33615979 udpate 2018-04-17 12:48:06 -07:00
249cead13e docs 2018-04-17 11:07:43 -07:00
7c96dea359 fmt 2018-04-17 11:05:35 -07:00
374c9921fd comments 2018-04-17 11:05:15 -07:00
fb55ab8c33 format 2018-04-16 21:02:37 -07:00
13485074ac test cast 2018-04-16 20:57:15 -07:00
4944c965e4 update
heap

update

update

wip

use a vec and sort

builds

update

tests

update

fmt

update

progress

fmt

passes needs retransmit test

tests

cleanup

update

update

update

update

fmt
2018-04-16 20:33:09 -07:00
83c5b3bc38 Merge pull request #125 from garious/fix-parallelized-ledger
Tell verifiers when not to parallelize accounting
2018-04-13 08:43:36 -06:00
7fc42de758 Fix bench 2018-04-13 00:36:23 -04:00
0a30bd74c1 Tell verifiers when not to parallelize accounting
Without this patch, many batches of transactions could be tossed
into a single entry, but the parallelized accountant can only
guarentee the transactions in the batch can be processed in
parallel.

This patch signals the historian to generate a new Entry after
each batch. Validators must maintain sequential consistency
across Entries.
2018-04-12 21:08:53 -06:00
9b12a79c8d cargo +nightly fmt 2018-04-12 17:04:11 -06:00
0dcde23b05 Merge pull request #119 from sakridge/skel_verify_test
Add skel test which sends a bad transaction, verify it doesn't make it
2018-04-12 17:02:08 -06:00
8dc15b88eb Add skel test which sends a bad transaction, verify it doesn't make it 2018-04-12 15:01:59 -07:00
d20c952f92 Merge pull request #121 from aeyakovenko/helpers
requests to packets utility function
2018-04-12 12:58:30 -07:00
c2eeeb27fd bump timer 2018-04-12 11:12:10 -07:00
180d8b67e4 requests to packets function 2018-04-12 10:44:09 -07:00
9c989c46ee Merge pull request #123 from garious/cleanup-tests
Cleanup tests
2018-04-11 22:37:35 -06:00
51633f509d Fix test
The test was meant to ensure the signature covered the 'tokens'
field, but then when the 'plan' field was rolled in, Transaction::verify()
started failing because Plan::verify() failed. When Transaction::verify()
was split into two, the unexpected failure was exposed but went unnoticed.
This patch brings it back to its original intent, to ensure signature
verification fails if the network attempts to change the client's payment.
2018-04-11 22:17:21 -06:00
705228ecc2 Remove redundant signs 2018-04-11 22:17:21 -06:00
740f6d2258 Merge pull request #122 from garious/fix-ci
Fix the nightly build
2018-04-11 20:56:58 -06:00
3b9ef5ccab Fix the nightly build 2018-04-11 20:24:14 -06:00
ab74e7f24f Merge pull request #117 from garious/parallelize-accountant
Enable parallelized accountant
2018-04-11 19:39:45 -06:00
be9a670fb7 Add process_packets() benchmark 2018-04-11 18:02:45 -06:00
6e43e7a146 Enable parallelized accountant 2018-04-11 18:01:59 -06:00
ab2093926a Merge pull request #120 from aeyakovenko/fix_bench_compile
fix compile error
2018-04-11 18:01:13 -06:00
916b90f415 Merge pull request #118 from sakridge/ecdsa_tests
Add tests for ecdsa sig checking
2018-04-11 17:58:45 -06:00
2ef3db9fab fix compile error 2018-04-11 15:40:25 -07:00
6987b6fd58 Add tests for ecdsa sig checking 2018-04-11 12:29:44 -07:00
078179e9b8 Merge pull request #115 from garious/parallelize-accountant
More refactoring
2018-04-11 10:28:15 -06:00
50ccecdff5 Refactor 2018-04-11 09:02:33 -06:00
e838a8c28a Delete unused function 2018-04-10 21:56:13 -06:00
e5f7eeedbf Use iterators 2018-04-10 21:48:26 -06:00
d1948b5a00 Zip earlier
And remove redundant into_iter() calls.
2018-04-10 21:18:39 -06:00
c07f700c53 Merge pull request #113 from aeyakovenko/master_pclient
command-line options for testnode and client
2018-04-09 23:07:03 -06:00
c934a30f66 commandline options for client and testnode 2018-04-09 21:14:52 -07:00
310d01d8a2 Merge pull request #112 from aeyakovenko/recycler_test
Recycler test should verifyt that its recycling
2018-04-07 09:29:50 -06:00
f330739bc7 Recycler test should verifyt that its recycling 2018-04-07 07:08:42 -07:00
58626721ad Merge pull request #111 from garious/parallelize-accountant
Cleanup
2018-04-06 17:03:10 -06:00
584c8c07b8 Better symmetry
deserialize -> process -> serialize
2018-04-06 16:34:59 -06:00
a93ec03d2c Move creating blobs into its own function 2018-04-06 16:22:02 -06:00
7bd3a8e004 Reduce cyclomatic complexity 2018-04-06 16:12:13 -06:00
912a5f951e Why is msgs cloned here? 2018-04-06 15:58:11 -06:00
6869089111 Parallelize deserialize 2018-04-06 15:52:58 -06:00
6fd32fe850 Cleanup constants 2018-04-06 15:43:05 -06:00
81e2b36d38 Cleanup packet_verify 2018-04-06 15:24:15 -06:00
7d811afab1 Parallelize CPU sig verify 2018-04-06 15:21:49 -06:00
39f5aaab8b Merge pull request #110 from garious/parallelize-accountant
Parallel processing of arbitrary transactions
2018-04-06 09:02:36 -06:00
5fc81dd6c8 Fix the nightly build
Nightly uses a different (but backward compatible) version of rustfmt.
2018-04-05 22:39:29 -06:00
491a530d90 Support parallelization of arbitrary transactions
Still assumes witnesses are processed serially afterward.
2018-04-05 22:30:25 -06:00
c12da50f9b Fix race condition
Without this patch, it was possible for two transactions with the same
'from' address to drive its balance below zero. With the patch, we'll
hold a write lock from just before we verify sufficient funds until
after those funds are deducted from the account.
2018-04-05 22:30:25 -06:00
41e8500fc5 Break up process_verified_transaction() 2018-04-05 22:29:13 -06:00
a7f59ef3c1 Merge pull request #109 from sakridge/wip_gpu
Change for cuda verify integration
2018-04-05 22:24:35 -06:00
f4466c8c0a Change for cuda verify integration 2018-04-05 20:00:44 -07:00
bc6d6b20fa Merge pull request #108 from garious/parallelize-accountant
Reject old transactions so that we can boot old signatures
2018-04-05 15:11:22 -06:00
01326936e6 Expire all transactions after some amount of time
Reject old transactions so that we can calculate an upper bound
for memory usage, and therefore ensure the server won't slow
down over time to crash due to memory exhaustion.
2018-04-05 10:26:45 -06:00
c960e8d351 Reject transactions with a last_id that isn't from this ledger
Before this patch, a client could put any value into `last_id` and
was primarily there to ensure the transaction had a globally unique
signature. With this patch, the server can use `last_id` as an
indicator of how long its been since the transaction was created.
The server may choose to reject sufficiently old transactions so
that it can forget about old signatures.
2018-04-05 09:54:03 -06:00
fc69d31914 Merge pull request #106 from garious/parallelize-accountant
Parallelize accountant
2018-04-04 22:42:28 -06:00
8d425e127b Update benchmark to avoid write locks in sig duplicate detection 2018-04-04 17:29:22 -06:00
3cfb07ea38 Sort signatures by last_id
This will allow for additional concurrency as well as give the server
a means of garbage-collecting old signatures.
2018-04-04 17:06:31 -06:00
76679ffb92 Per-cell locking
This allows us to use read-locks for balances most of the time. We
only lock the full table if we need to add one.
2018-04-04 16:31:13 -06:00
dc2ec925d7 Better test 2018-04-04 16:01:43 -06:00
81d6ba3ec5 Merge pull request #105 from garious/coverage-comments
Add the 'why' for code coverage to readme
2018-04-04 14:34:26 -07:00
014bdaa355 Add benchmark for parallel transaction processing 2018-04-04 12:43:27 -06:00
0c60fdd2ce Make accountant thread-safe
Before this change, parallel transaction processing required locking
the full accountant. Since we only call one method,
process_verified_transaction, the global lock equates to doing no
parallelization at all.  With this change, we only lock the data that's
being written to.
2018-04-04 12:33:03 -06:00
43d986d14e Add the 'why' for code coverage to readme 2018-04-04 09:26:38 -06:00
123d7c6a37 Merge pull request #99 from aeyakovenko/subscribers
Blobs and windows
2018-04-03 17:12:53 -06:00
5ac7df17f9 Implement window service
Batch out of order blobs until we have a contigious window.
2018-04-03 13:53:19 -07:00
bc0dde696a Merge pull request #102 from garious/rollback
Fix clippy warnings
2018-04-03 10:08:42 -06:00
c323bd3c87 Fix clippy warnings 2018-04-03 09:55:33 -06:00
5c672adc21 Merge pull request #101 from garious/rollback
Move tests
2018-04-02 21:58:10 -06:00
2f80747dc7 Move tests
After we restructured for parallel verification, the tests here
were unreferenced by the accountant, but still meaningful to
transaction verification.
2018-04-02 21:45:21 -06:00
95749ed0e3 Merge pull request #100 from garious/rollback
Cleanup use of event signatures and entry hashing
2018-04-02 21:17:37 -06:00
94eea3abec fmt 2018-04-02 21:15:21 -06:00
fe32159673 Add a test to ensure witness data continues to be hashed 2018-04-02 21:07:38 -06:00
07aa2e1260 Add witness data to entry hash
Otherwise, witnesses can be dropped or reordered by a malicious
generator.
2018-04-02 20:47:51 -06:00
6fec8fad57 Adding from to the signature is redundant 2018-04-02 20:34:18 -06:00
84df487f7d Merge pull request #97 from garious/rollback
Refactoring for rollback
2018-04-02 15:41:33 -06:00
49708e92d3 Use last_id instead of seed
It doesn't really matter, but was confusing since the seed points
to an entry before the mint's deposit.
2018-04-02 15:06:42 -06:00
daadae7987 Move replaying ledger out of accountant 2018-04-02 14:51:55 -06:00
2b788d06b7 Move the historian up to accountant_skel 2018-04-02 14:41:07 -06:00
90cd9bd533 Move balance check so that log_* methods are only used to add logging 2018-04-02 14:14:49 -06:00
d63506f98c No longer allow deposits outside the constructor 2018-04-02 14:00:42 -06:00
17de6876bb Add simpler accountant constructor 2018-04-02 13:51:44 -06:00
fc540395f9 Update docs 2018-04-02 11:51:56 -06:00
da2b4962a9 Move verify_slice() into a trait 2018-04-02 11:43:38 -06:00
3abe305a21 Move reserve_signatures into accountant
Reasons Transaction signatures need to be unique:

1. guard against duplicates
2. accountant uses them as IDs to link Witness signatures to transactions via the
`pending` hash map
2018-04-02 09:38:36 -06:00
46e8c09bd8 Revoke API access to first_id 2018-04-02 09:30:10 -06:00
e683c34a89 Version bump 2018-03-31 14:44:43 -06:00
54e4f75081 Merge pull request #95 from jackson-sandland/source-documentation-review
94: source doc review
2018-03-30 14:50:51 -06:00
9f256f0929 94 - snakecase mod names 2018-03-30 13:10:27 -07:00
ef169a6652 94: source doc review 2018-03-30 10:43:38 -07:00
eaec25f940 Version bump 2018-03-29 15:05:38 -06:00
6a87d8975c Merge pull request #93 from garious/par-req-processing
Better benchmark, fix logging
2018-03-29 14:02:40 -06:00
b8cf5f9427 Fix transaction logging 2018-03-29 13:50:32 -06:00
2f1e585446 Better benchmark
Tolerates dropped UDP packets
2018-03-29 13:41:11 -06:00
f9309b46aa Merge pull request #92 from garious/par-req-processing
Parallel request verification
2018-03-29 13:28:21 -06:00
22f5985f1b Do request verification in parallel, and then process the verified requests 2018-03-29 13:18:08 -06:00
c59c38e50e Refactor for batch verification 2018-03-29 13:09:21 -06:00
232e1bb8a3 Colocate packet dependencies 2018-03-29 12:55:41 -06:00
1fbb34620c Fix compiler warning 2018-03-29 12:54:10 -06:00
89f5b803c9 Merge pull request #91 from garious/more-docs
Add more documentation
2018-03-29 12:39:03 -06:00
55179101cd Add more documentation 2018-03-29 12:20:54 -06:00
132495b1fc A simple consensus diagram to guide rollback/coalescing
Diagram for what's described in #84 for rollback support.
2018-03-29 10:52:02 -06:00
a03d7bf5cd Missed a couple 2018-03-28 22:20:31 -06:00
3bf225e85f Don't require install to run demo 2018-03-28 22:18:33 -06:00
cc2bb290c4 Merge pull request #89 from garious/sig-verify-bench
Add microbenchmark for signature verification
2018-03-28 22:15:10 -06:00
878ca8c5c5 Add microbenchmark for signature verification 2018-03-28 22:02:47 -06:00
4bc41d81ee Fix compiler warning 2018-03-28 21:05:21 -06:00
f6ca176fc8 Merge pull request #88 from garious/revert-tcp-client
Revert TCP sync of ledger
2018-03-28 20:28:05 -06:00
0bec360a31 Revert TCP sync of ledger
The feature was too rushed. We technically don't need it until we
implement consensus. It'll come back another day (with many more tests!)
2018-03-28 20:16:15 -06:00
04f30710c5 Merge pull request #87 from garious/tcp-client
tx confirmed/sec ---> tx processed/sec
2018-03-28 17:04:36 -06:00
98c0a2af87 tx confirmed/sec ---> tx processed/sec
Before this patch, we were waiting until the full log was
sent back across the wire, parsed, and interpreted. That was giving
us a metric of "transactions confirmed per second" instead of
"transactions processed per second". Instead, we'll just send one
tiny packet back with the balance. As soon as the balance is what
we expect it to be, we end the benchmark.
2018-03-28 16:51:21 -06:00
9db42c1769 Merge pull request #86 from garious/tcp-client
Fix up client demo
2018-03-28 14:57:09 -06:00
849bced602 Fix up client demo 2018-03-28 14:40:58 -06:00
27f29019ef Merge pull request #83 from garious/tcp-client
TCP subscription service
2018-03-28 13:19:38 -06:00
8642a41f2b See if Travis will tolerate executing some of the test 2018-03-28 10:25:16 -06:00
bf902ef5bc Ignore accountant_stub test
TODO: Figure out why this test fails on TravisCI
2018-03-28 10:05:00 -06:00
7656b55c22 nit 2018-03-27 17:22:31 -06:00
7d3d4b9443 nit 2018-03-27 17:20:23 -06:00
15c093c5e2 typo 2018-03-27 16:31:19 -06:00
116166f62d Rename project: silk -> solana 2018-03-27 16:25:12 -06:00
26b19dde75 Rename project: silk -> solana 2018-03-27 16:19:28 -06:00
c8ddc68f13 Rename project: silk -> solana 2018-03-27 16:16:27 -06:00
7c9681007c Drop support for random access to the ledger
No longer store the ledger locally.
2018-03-27 14:47:03 -06:00
13206e4976 Let clients subscribe to the ledger over TCP
TODO: Add more tests

Fixes #27
2018-03-27 14:46:24 -06:00
2f18302d32 Merge pull request #80 from garious/fix-ci
Fix CI
2018-03-26 22:13:11 -06:00
ddb21d151d Nightly rustfmt
Format code with the nightly version of rustfmt, which sorts imports.
2018-03-26 22:03:28 -06:00
c64a9fb456 Give Travis a little more time to start threads 2018-03-26 22:02:05 -06:00
ee19b4f86e See if CI hangs because of wait_on_signature() 2018-03-26 21:53:30 -06:00
14239e584f fix writer 2018-03-26 21:36:29 -06:00
112aecf6eb Merge pull request #77 from aeyakovenko/responder
Responder
2018-03-25 17:01:53 -07:00
c1783d77d7 fixed test 2018-03-25 16:18:27 -07:00
f089abb3c5 fix bench 2018-03-25 15:37:00 -07:00
8e551f5e32 debug trait tests 2018-03-25 08:22:04 -07:00
290960c3b5 wip 2018-03-25 08:06:33 -07:00
62af09adbe wip 2018-03-25 08:05:03 -07:00
e39c0b34e5 update 2018-03-25 00:06:48 -07:00
8ad90807ee responder with larger block size 2018-03-24 23:46:25 -07:00
533b3170a7 responder 2018-03-24 23:31:54 -07:00
7732f3f5fb services 2018-03-24 18:01:54 -07:00
f52f02a434 services 2018-03-24 18:01:40 -07:00
4d7d4d673e Merge pull request #75 from garious/fix-testnode
Revive silk-testnode
2018-03-23 22:16:59 -06:00
9a437f0d38 Revive silk-testnode 2018-03-23 21:49:28 -06:00
c385f8bb6e Merge pull request #73 from garious/yes-clippy
Automated mentoring by clippy
2018-03-22 15:22:12 -06:00
fa44be2a9d Ignore some clippy advice 2018-03-22 14:59:25 -06:00
117ab0c141 Clippy review 2018-03-22 14:50:24 -06:00
7488d19ae6 Clippy review 2018-03-22 14:40:28 -06:00
60524ad5f2 Clippy review 2018-03-22 14:38:06 -06:00
fad7ff8bf0 Clippy review 2018-03-22 14:31:58 -06:00
383d445ba1 Clippy review 2018-03-22 14:15:29 -06:00
803dcb0800 Mutex<bool> -> AtomicBool 2018-03-22 14:05:23 -06:00
fde320e2f2 Merge pull request #71 from garious/rework-recorder
Replicate the ledger
2018-03-21 17:23:55 -06:00
8ea97141ea Update the test to replicate the ledger 2018-03-21 17:15:32 -06:00
9f232bac58 Allow clients to sync the ledger
Fixes #4
2018-03-21 15:46:49 -06:00
8295cc11c0 Move JSON printing up the stack 2018-03-20 23:15:44 -06:00
70f80adb9a Merge pull request #70 from garious/planevent-to-witness
Cleanup
2018-03-20 19:13:02 -06:00
9a7cac1e07 Use the Entry API to remove the double lookup 2018-03-20 18:07:54 -06:00
c584a25ec9 Move complete_transaction from method to function
So that we can hold separate mutable references to the pending queue
and the map of balances.
2018-03-20 17:47:57 -06:00
bff32bf7bc Cleanup 2018-03-20 17:32:02 -06:00
d0e7450389 Add docs 2018-03-20 16:58:14 -06:00
4da89ac8a9 Cleanup naming 2018-03-20 16:53:41 -06:00
f7032f7d9a Cleanup: replace bool retval with is_complete() method 2018-03-20 16:52:47 -06:00
7c7e3931a0 Better docs 2018-03-20 15:52:46 -06:00
6be3d62d89 Remove Action from spending plans 2018-03-20 15:43:07 -06:00
6f509a8a1e Reorder 2018-03-20 15:31:28 -06:00
4379fabf16 PlanEvent -> Witness
The term used by the Simplicity smart contract language
2018-03-20 15:25:50 -06:00
6b66e1a077 Merge pull request #69 from garious/move-streamer-benchmark
Move streamer benchmark out of unit tests
2018-03-19 17:33:45 -06:00
c11a3e0fdc Move streamer benchmark out of unit tests 2018-03-19 17:10:01 -06:00
3418033c55 Merge pull request #68 from garious/fix-bench
Fix bench
2018-03-19 16:52:41 -06:00
caa9a846ed Boot sha2-asm
Stick with pure Rust until someone can write a benchmark that
demonstrates that sha2-asm adds value. If we go with a GPU
implementation first, we may never need to do that.
2018-03-19 16:42:30 -06:00
8ee76bcea0 Fix benchmark build 2018-03-19 16:41:01 -06:00
47325cbe01 Merge pull request #67 from garious/cleanup-naming
Cleanup naming
2018-03-19 16:29:08 -06:00
e0c8417297 Apply renames to docs 2018-03-19 10:23:43 -06:00
9238ee9572 No longer rename log crate 2018-03-19 10:18:51 -06:00
64af37e0cd logger -> recorder
Free up namespace for a traditional runtime logger.
2018-03-19 10:16:21 -06:00
9f9b79f30b log -> ledger
Free up namespace for traditional runtime logs.
2018-03-19 10:09:19 -06:00
265f41887f asset -> tokens 2018-03-19 10:03:41 -06:00
4f09e5d04c Merge pull request #66 from garious/conditional-plan
Simplify contract language
2018-03-18 21:12:26 -06:00
434f321336 Add spending plan tests 2018-03-18 21:02:28 -06:00
f4e0d1be58 Make conditions explicit in races
And boot recursive spending plans. That path required heap allocations.
Since we don't have a need for this generality right now, reduce the
language to the smallest one that can pass our test suite.
2018-03-17 20:43:05 -06:00
e5bae0604b Specialize transaction assets to i64
Proof-of-history is generic, but now that we're using it entirely
for tokens, we can specialize the type and start doing more interesting
things than just Eq and Serialize operations.
2018-03-17 19:56:15 -06:00
e7da083c31 Move spending plans to their own crate 2018-03-17 19:56:15 -06:00
367c32dabe Guard spending plans, not just payments 2018-03-17 19:56:15 -06:00
e054238af6 Merge pull request #65 from aeyakovenko/fixtest
fix test
2018-03-14 12:21:08 -07:00
e8faf6d59a trait test 2018-03-14 11:28:05 -07:00
baa4ea3cd8 wfmt 2018-03-14 11:14:40 -07:00
75ef0f0329 fix test 2018-03-14 11:02:38 -07:00
65185c0011 Merge pull request #63 from aeyakovenko/streamer-integrated
Streamer integrated
2018-03-12 08:38:59 -06:00
eb94613d7d Use streaming socket interface within accountant
Pull messages from streamer process them and forward them to the sender.
2018-03-11 23:41:09 -05:00
67f4f4fb49 Merge pull request #64 from garious/dumb-contracts
Entry-level smart contracts
2018-03-11 13:23:11 -06:00
a7ecf4ac4c Merge pull request #57 from aeyakovenko/streamer
Streamer
2018-03-11 13:22:49 -06:00
45765b625a Don't let users accidentally burn their funds either 2018-03-11 12:04:49 -06:00
aa0a184ebe Ensure the server isn't passed a Plan that spends more than is bonded 2018-03-11 11:53:45 -06:00
069f9f0d5d add ipv6 flag to cargo.toml 2018-03-11 12:53:16 -05:00
c82b520ea8 remove unecessary returns 2018-03-11 11:45:17 -05:00
9d6e5bde4a ipv6 test with a separate flag 2018-03-11 11:22:21 -05:00
0eb3669fbf cleanup timestamp processing 2018-03-11 00:30:01 -07:00
30449b6054 cleanup sig processing 2018-03-11 00:11:08 -07:00
f5f71a19b8 First go at smart contracts
Needs lots of cleanup.
2018-03-10 22:00:48 -07:00
0135971769 Fast UdpSocket reader
* message needs to fit into 256 bytes
* allocator to keep track of blocks of messages
* udp socket receiver server that fills up the block as fast as possible
* udp socket sender server that sends out the block as fast as possible
2018-03-10 21:09:23 -06:00
8579795c40 Ensure transactions won't get canceled after next refactor 2018-03-10 19:44:45 -07:00
9d77fd7eec Store only spending plans, not full transactions 2018-03-10 18:35:10 -07:00
8c40d1bd72 Move spending endpoints into expressions 2018-03-10 17:41:18 -07:00
7a0bc7d888 Move smart contract fields into their own struct 2018-03-10 16:55:39 -07:00
1e07014f86 Merge pull request #62 from garious/batch-events
Batch events
2018-03-09 17:37:02 -07:00
49281b24e5 Move Tick out of Event
Every Entry is now a Tick and the entries contain events.
2018-03-09 17:22:17 -07:00
a8b1980de4 Restore reorder attack test 2018-03-09 17:02:17 -07:00
b8cd5f0482 Boot Cargo.lock from git
Only add Cargo.lock to downstream dependencies.
2018-03-09 16:26:26 -07:00
cc9f0788aa Batch events
It's now a Tick that locks down event order. Before this change, the
event order would be locked down in the order the server sees it.

Fixes #59
Fixes #61
2018-03-09 16:16:33 -07:00
209910299d Version bump
Next release probably won't have a compatible entry log with the
0.3.x line.
2018-03-09 14:33:37 -07:00
17926ff5d9 Merge pull request #58 from garious/deterministic-historian
Deterministic historian/accountant hashes
2018-03-09 07:06:40 -07:00
957fb0667c Deterministic historian/accountant hashes
When in tick-less mode, no longer continuously hash on the
background thread. That mode is just used for testing and
genesis log generation, and those extra hashes are just noise.

Note that without the extra hashes, with lose the duration between
events. Effectively, we distinguish proof-of-order from proof-of-time.
2018-03-09 06:58:40 -07:00
8d17aed785 Process timestamps as they are added 2018-03-08 15:39:03 -07:00
7ef8d5ddde Lock down dependencies 2018-03-08 13:25:40 -07:00
9930a2e167 With v0.3.1 published to crates.io, you can now run silk without git 2018-03-08 11:42:06 -07:00
a86be9ebf2 Merge pull request #56 from garious/add-conditions
Add conditions to transactions
2018-03-08 11:15:31 -07:00
ad6665c8b6 Complete timestamp and signature transactions 2018-03-08 11:06:52 -07:00
923162ae9d WIP: process timestamps 2018-03-08 10:19:54 -07:00
dd2bd67049 Add a barebones test for transaction conditions 2018-03-08 08:58:34 -07:00
d500bbff04 Add public key to mint
This turns the mint into a handy way to generate public keys
without throwing the private key away.
2018-03-08 08:33:00 -07:00
e759bd1a99 Add conditions to the signature to reject duplicates 2018-03-08 08:18:34 -07:00
94daf4cea4 Add Cancel and Timestamp events
Fixes #31, #34, #39
2018-03-08 08:17:34 -07:00
2379792e0a Add DateTime and Cancel conditions
Fixes #32, #33
2018-03-08 08:17:08 -07:00
dba6d7a8a6 Update README.md 2018-03-07 17:20:40 -07:00
086c206b76 Merge pull request #55 from garious/the-mint
More intuitive demo, introducing The Mint
2018-03-07 17:18:24 -07:00
5dd567deef Rename Genesis to Mint
Genesis is a story of creation. We should only use that term to
for the event log that bootstraps the system.
2018-03-07 17:08:15 -07:00
b6d8f737ca Introducing, the mint
Use the mint to pair a new private key with new tokens.
2018-03-07 16:58:04 -07:00
491ba9da84 Add accessors to keypairs and signatures 2018-03-07 15:32:22 -07:00
a420a9293f Fix demo 2018-03-07 11:37:30 -07:00
c1bc5f6a07 Merge pull request #54 from garious/imperative-genesis
Boot genesis block helper
2018-03-07 11:19:16 -07:00
9834c251d0 Boot genesis block helper
Before this change, if you wanted to use a new Transaction
feature in the genesis block, you'd need to extend its
Creator object and associated methods.  With yesterday's
addtions to Transcation, it's now so easy to work with
Transactions directly that we can get rid of the middleman.

Also added a KeyPair type alias, so that ring could be easily swapped
out with a competing library, if needed.
2018-03-07 11:10:15 -07:00
54340ed4c6 Delete debugging println
Thanks @jackson-sandland!
2018-03-06 21:17:41 -07:00
96a0a9202c Update README.md 2018-03-06 21:12:50 -07:00
a4c081d3a1 Merge pull request #53 from garious/monorphic-entry
Monomorphisize Entry and Event
2018-03-06 20:39:11 -07:00
d1b6206858 Monomorphisize Entry and Event
Transaction turned out to be the only struct worth making generic.
2018-03-06 20:29:18 -07:00
0eb6849fe3 Merge pull request #52 from garious/add-transaction-struct
Break dependency cycle
2018-03-06 17:53:48 -07:00
b725fdb093 Sha256Hash -> Hash
Because in Loom, there's just the one. Hopefully no worries that it
shares a name with std::Hash.
2018-03-06 17:40:01 -07:00
1436bb1ff2 Move entry into its own module
Hmm, Logger doesn't depend on log.
2018-03-06 17:40:01 -07:00
5a44c36b1f Move hash into its own module 2018-03-06 17:40:01 -07:00
5d990502cb Merge pull request #51 from jackson-sandland/50-proof-read-README
Issue #50 - proof read README
2018-03-06 17:39:33 -07:00
64735da716 Issue #50 - proof read README 2018-03-06 16:21:45 -08:00
95b82aa6dc Merge pull request #49 from garious/add-transaction-struct
DRY up transaction signing
2018-03-06 16:48:27 -07:00
f09952f3d7 DRY up transaction signing
Cleanup the big mess I copy-pasted myself into.
2018-03-06 16:34:25 -07:00
b98e04dc56 Update README.md 2018-03-06 15:03:06 -07:00
cb436250da Merge pull request #48 from garious/add-transaction-struct
data -> asset
2018-03-06 15:01:56 -07:00
4376032e3a data -> asset
'data' is too vague.
2018-03-06 14:50:32 -07:00
c231331e05 Merge pull request #47 from garious/add-transaction-struct
Reorg
2018-03-06 12:57:49 -07:00
624c151ca2 Add signature module
Because things other than transactions can be signed.
2018-03-06 12:48:28 -07:00
5d0356f74b Move verify_entry to a method as well 2018-03-06 12:35:12 -07:00
b019416518 Move verify into methods
A little overly-coupled to Serialize, but makes the code a lot tighter
2018-03-06 12:27:08 -07:00
4fcd9e3bd6 Give Transaction its own module 2018-03-06 12:18:17 -07:00
66bf889c39 Rename Transfer to Transaction
struct names should be nouns
2018-03-06 11:54:47 -07:00
a2811842c8 More cleanup
Far fewer branches when we process transfers outside the context
of events.
2018-03-06 11:43:55 -07:00
1929601425 Cleanup
Now that Transfer is out of the enum, we don't need to pattern
match to access its fields.
2018-03-06 11:19:59 -07:00
282afee47e Use Transfer struct on the client side too
Sharing is caring.
2018-03-06 11:03:43 -07:00
e701ccc949 Rename Request::Transfer to Request::Transaction 2018-03-06 10:59:47 -07:00
6543497c17 Move Transaction data into its own struct
This will allow us to add addition transfer types to the log.
2018-03-06 10:50:32 -07:00
7d9af5a937 Merge pull request #46 from garious/be-negative
Allow balances to be negative
2018-03-05 23:47:02 -07:00
720c54a5bb Allow balances to be negative
* Will allow owners to loan token to others.
* Will allow for parallel verification of balances without spilling
  over 64 bits.

Fixes #43
2018-03-05 17:30:53 -07:00
5dca3c41f2 Update README.md 2018-03-05 16:19:26 -07:00
929546f60b Update README.md 2018-03-05 16:18:46 -07:00
cb0ce9986c Merge pull request #45 from garious/init-from-log
Towards sending the log to clients
2018-03-05 16:17:41 -07:00
064eba00fd Update readme 2018-03-05 16:05:16 -07:00
a4336a39d6 Initialize the testnode from a log
$ cargo run --bin silk-genesis-file-demo > demo-genesis.json
$ cat demo-genesis.json | cargo run --bin silk-genesis-block > demo-genesis.log
$ cat demo-genesis.log | cargo run --bin silk-testnode
2018-03-05 15:34:44 -07:00
298989c4b9 Generate log from Genesis 2018-03-05 13:03:56 -07:00
48c28c2267 Transactions now require a hash of the last entry they've seen
This ensures the transaction cannot be processed on a chain
that forked before that ID. It will also provide a basis for
expiration constraints. A client may want their transaction
to expire, and the generators may want to reject transactions
that have been floating in the ether for years.
2018-03-05 12:48:14 -07:00
d76ecbc9c9 Don't block the server 2018-03-05 11:39:59 -07:00
79fb9c00aa Boot wait_on_signature() from accountant
Instead, there should be a way to query for a page of log data,
and checking whether it has a signature should be done client-side.
2018-03-05 10:45:18 -07:00
c9e03f37ce Logger now only speaks when spoken to
Before this change, the logger's send channel could quickly be
flooded with Tick events. Those events should only be passed to
a writer.

Also, the log_event() function no longer sends entries. That
functionality moved to the new process_events() function. This
will allow us to initialize the with the genesis block without
flooding the send channel with events the historian won't read.
2018-03-05 10:33:12 -07:00
aa5f1699a7 Update the set of unique signatures when loading an existing log. 2018-03-04 22:31:12 -07:00
e1e9126d03 Merge pull request #44 from garious/genesis
Finally, genesis block generation without channels
2018-03-04 14:39:28 -07:00
672a4b3723 Update historian diagram 2018-03-04 14:36:55 -07:00
955f76baab Finally, genesis block generation without channels 2018-03-04 14:32:30 -07:00
7da8a5e2d1 Merge pull request #42 from garious/genesis
Make num_hashes more intuitive
2018-03-04 13:05:38 -07:00
ff82fbf112 Make num_hashes mean the num_hashes since the last ID
Before this change, num_hashes meant the number of hashes since
the last ID, minus any hashing done on the event data. It made
no difference for Tick events, but logged Transaction events with
one less hash than actually occurred.
2018-03-04 09:52:36 -07:00
8503a0a58f Refactor 2018-03-04 09:21:45 -07:00
b1e9512f44 Rename end_hash to id 2018-03-04 07:50:26 -07:00
608def9c78 Consolidate imports 2018-03-04 07:28:51 -07:00
bcb21bc1d8 Delete dead code 2018-03-04 07:20:17 -07:00
f63096620a Merge pull request #41 from garious/genesis
Add command-line tool for generating a genesis block
2018-03-04 01:27:59 -07:00
9b26892bae Add a demo app to generate the genesis file 2018-03-04 01:21:40 -07:00
572475ce14 Load the genesis block 2018-03-04 00:15:17 -07:00
876d7995e1 Refactor to support loading an existing ledger 2018-03-03 22:25:40 -07:00
b8655e30d4 Make client-demo standalone
And remove deposit() methods from the API. Those should only be
used on the server to bootstrap.
2018-03-03 21:15:51 -07:00
7cf0d55546 Remove optional 'from' field 2018-03-03 20:41:07 -07:00
ce60b960c0 Special case sending money to self
In the genesis block, let matching 'from' and 'to' keys be used
to mint new coin.
2018-03-03 20:27:12 -07:00
cebcb5b92d Start genesis with a Tick, so that its hash can be used to bootstrap verification 2018-03-03 19:57:22 -07:00
11a0f96f5e Add command-line tool for generating a genesis block 2018-03-03 17:35:05 -07:00
74ebaf1744 Merge pull request #40 from garious/add-logger
Add logger
2018-03-03 14:37:15 -07:00
f7496ea6d1 Make create_logger a static method
Allows us to share the super long type signature in impl.
2018-03-03 14:26:59 -07:00
bebba7dc1f Give logger its own crate 2018-03-03 14:24:32 -07:00
afb2bf442c Use Instant instead of SystemTime for more precise ticking
And convert log_event from function to method
2018-03-03 14:08:53 -07:00
c7de48c982 Convert log_events from function to method 2018-03-03 14:00:37 -07:00
f906112c03 Move logging thread's state into a struct 2018-03-03 13:52:57 -07:00
8ef864fb39 Merge pull request #37 from garious/split-benchmark
Split benchmark
2018-03-03 12:13:54 -07:00
1c9b5ab53c Report performance of signature verification too 2018-03-03 11:59:34 -07:00
c10faae3b5 More readable metrics 2018-03-03 11:52:50 -07:00
2104dd5a0a Fix benchmark
Was measuring the creation of the iterator, not running it.
2018-03-03 11:45:23 -07:00
fbe64037db Merge pull request #35 from garious/split-benchmark
Move key generation and signing from transaction benchmark
2018-03-03 11:25:58 -07:00
d8c50b150c Move key generation and signing from transaction benchmark
Key generation, signing and verification are not the performance
bottleneck. Something is probably wrong here.
2018-03-03 11:11:46 -07:00
8871bb2d8e Merge pull request #30 from garious/simplify
Unify Claim and Transaction handling
2018-03-02 12:24:44 -07:00
a148454376 Update readme 2018-03-02 12:07:05 -07:00
be518b569b Remove cyclic dependency between event and log 2018-03-02 12:03:59 -07:00
c998fbe2ae Sign the owner's public key
Without this, the accountant will reject transfers from different
entities if they are for the same amount and to the same entity.
2018-03-02 11:56:42 -07:00
9f12cd0c09 Purge the Claim event type
It's now represented as a Transaction from an unknown party.
2018-03-02 11:48:58 -07:00
0d0fee1ca1 Sign Claim's 'to' field
Otherwise, the accountant will treat deposits of the same amount as
duplicates.
2018-03-02 11:46:22 -07:00
a0410c4677 Pipe all Claim constructors through a function 2018-03-02 10:58:43 -07:00
8fe464cfa3 Rename Claim's key field to match same field in Transaction 2018-03-02 10:47:21 -07:00
3e2d6d9e8b Generalize Transaction to express a Claim
If a Transaction doesn't have an existing address, it's being used
to create new funds.
2018-03-02 10:41:15 -07:00
32d677787b Reduce transactions sent by demo
We don't do retries yet, so keep tx count to something that won't
trigger any packet loss.
2018-03-02 10:35:38 -07:00
dfd1c4eab3 Don't process transaction if channel.send() fails.
Do all input validation first, then log (which can fail). If all
goes swimmingly, process the transaction.
2018-03-02 10:17:52 -07:00
36bb1f989d More defense against a double-spend attack
Before this change, a client could spend funds before the accountant
processed a previous spend. With this change in place, the accountant
updates balances immediately, but that comes at an architectural cost.
The accountant now verifies signatures on behalf of the historian, so
that it can ensure logging will not fail.
2018-03-02 09:55:44 -07:00
684f4c59e0 Delete commented out code
accountant crate shouldn't verify the log. Instead, it should
only add valid entries and leave verification to network nodes.
2018-03-02 08:51:29 -07:00
1b77e8a69a Move Event into its own crate
The log crate was starting to be the catch-all for all things
related to entries, events, signatures, and hashes. This split
shows us that:

* Event depends only on signatures, not on hashes [directly]
* All event testing was done via log testing (shame on me)
* Accounting depends only on events
2018-03-02 08:43:57 -07:00
662e10c3e0 Merge pull request #29 from garious/simplify
Remove Discovery event
2018-03-01 18:53:25 -07:00
c935fdb12f Move signature duplicate detection into the historian 2018-03-01 17:44:10 -07:00
9e16937914 Delete the Discovery event
Not useful to the accountant.
2018-03-01 17:02:41 -07:00
f705202381 No need to hash data that's already hashed to create the signature 2018-03-01 16:39:09 -07:00
f5532ad9f7 Merge pull request #28 from garious/go-udp
Switch to UDP
2018-03-01 14:25:20 -07:00
570e71f050 Check for duplicate signatures
TODO: have client add recent hash to each message
2018-03-01 14:07:39 -07:00
c9cc4b4369 Switch to UDP from TCP
And remove all the sleep()'ing around.
2018-03-01 13:47:53 -07:00
7111aa3b18 Copy disclaimer from the loom repository
Per @aeyakovenko, added Loom's disclaimer.
2018-03-01 09:16:39 -07:00
12eba4bcc7 Merge pull request #26 from garious/add-accountant
Add testnode and client-demo
2018-02-28 19:48:05 -07:00
4610de8fdd Switch to sync_channel to preserve order 2018-02-28 19:33:28 -07:00
3fcc2dd944 Add testnode
Fixes #20
2018-02-28 18:05:20 -07:00
8299bae2d4 Add accountant stub 2018-02-28 16:01:12 -07:00
604ccf7552 Add network interface for accountant 2018-02-28 14:00:04 -07:00
f3dd47948a Merge pull request #25 from garious/verify-historian-input
Verify event signatures before adding log entries
2018-02-28 10:34:10 -07:00
c3bb207488 Verify event signatures before adding log entries 2018-02-28 10:23:01 -07:00
9009d1bfb3 Merge pull request #24 from garious/add-accountant
Add accountant
2018-02-27 11:41:40 -07:00
fa4d9e8bcb Add more tests 2018-02-27 11:28:10 -07:00
34b77efc87 Sleep longer for TravisCI 2018-02-27 11:08:28 -07:00
5ca0ccbcd2 Add accountant 2018-02-27 10:54:06 -07:00
6aa4e52480 Merge pull request #23 from garious/add-transaction
Generalize the event log
2018-02-26 17:40:55 -07:00
f98e9a2ad7 Fix overuse of search-and-replace 2018-02-26 17:03:50 -07:00
c6134cc25b Allow the historian to track ownership of any type of data 2018-02-26 17:01:22 -07:00
0443b39264 Allow event log to hold events of any serializable (hashable) type 2018-02-26 16:42:31 -07:00
8b0b8efbcb Allow Entry to hold events of any kind of data 2018-02-26 15:37:33 -07:00
97449cee43 Allow events to hold any kind of data 2018-02-26 15:31:01 -07:00
ab5252c750 Move entry verification out of Entry impl 2018-02-26 14:39:01 -07:00
05a27cb34d Merge pull request #22 from garious/add-transaction
Extend the event log with a Transaction event to transfer possession
2018-02-26 11:26:58 -07:00
b02eab57d2 Extend the event log with a Transaction event to transfer possession
This implementation assumes 'from' is the current owner of 'data'.
Once that's verified, the signature ensures that nobody modified
'data' (the asset being transferred) or 'to' the entity taking
ownership.

Fixes #14
2018-02-26 11:09:11 -07:00
b8d52cc3e4 Make the Discovery event into a struct instead of a tuple 2018-02-24 11:15:03 -07:00
7d9bab9508 Update rendered demo diagram 2018-02-24 11:09:00 -07:00
944181a30e Version bump 2018-02-24 11:06:08 -07:00
d8dd50505a Merge pull request #21 from garious/add-signatures
Add signatures
2018-02-24 10:47:25 -07:00
d78082f5e4 Test bad signature 2018-02-24 10:27:51 -07:00
08e501e57b Extend the event log with a Claim event to claim possession
Unlike a Discovery event, a Claim event associates a public key
with a hash. It's intended to to be used to claim ownership of
some hashable data. For example, a graphic designer could claim
copyright by hashing some image they created, signing it with
their private key, and publishing the hash-signature pair via
the historian. If someone else tries to claim it as their own,
the designer can point to the historian's log as cryptographically
secure evidence that the designer's copy existed before anyone
else's.

Note there's nothing here that verifies the first claim is the actual
content owner, only that the first claim almost certainly happened
before a second.
2018-02-24 10:09:49 -07:00
29a607427d Rename UserDataKey to Discovery
From the perspective of the log, when some data's hash is added,
that data is "discovered" by the historian.  Another event
might be a "claim" that some signed data belongs to the owner of a
public key.
2018-02-24 05:25:19 -07:00
afb830c91f Merge pull request #18 from garious/add-historian
self-ticking logger
2018-02-21 12:30:10 -07:00
c1326ac3d5 Up the time to sleep so that ticks are generated 2018-02-21 12:22:23 -07:00
513a1adf57 Version bump 2018-02-21 12:01:17 -07:00
7871b38c80 Update demo to use self-ticking logger 2018-02-21 11:52:03 -07:00
b34d2d7dee Allow the logger to inject Tick events on its own 2018-02-21 11:33:42 -07:00
d7dfa8c22d Readme cleanup 2018-02-21 10:07:32 -07:00
8df274f0af Add hash seed to verify_slice() 2018-02-21 09:43:34 -07:00
07c4ebb7f2 Add message sequence chart for readme demo
Fixes #17
2018-02-21 09:33:50 -07:00
49605b257d Merge pull request #16 from garious/add-serde
Add serialization/deseriation support to event log
2018-02-20 16:55:46 -07:00
fa4e232d73 Add serialization/deseriation support to event log
See bincode and serde_json for usage:
https://github.com/TyOverby/bincode

Fixes #1
2018-02-20 16:26:13 -07:00
bd84cf6586 Merge pull request #15 from garious/add-historian
Demo proof-of-history and reordering attack
2018-02-20 15:05:20 -07:00
6e37f70d55 Test reorder attack 2018-02-20 14:46:36 -07:00
d97112d7f0 Explain proof-of-history in the readme
Also:
* Hash userdata so that verification works as the readme describes.
* Drop itertools package. Found a way to use std::iter instead.

Fixes #8
2018-02-20 14:04:49 -07:00
e57bba17c1 Version bump 2018-02-19 16:59:41 -07:00
959da300cc Shorten readme lines 2018-02-19 16:53:58 -07:00
ba90e43f72 Update benchmark
* Add asm, though it doesn't make it faster. TODO: use avx instructions.
* Do 10x less hashes, since sha256 is more expensive.
2018-02-19 16:51:35 -07:00
6effd64ab0 Update readme with sha256 usage 2018-02-19 16:48:29 -07:00
e18da7c7c1 Merge pull request #13 from garious/sha256-hash
Use sha256 hashes instead of Rust's builtin hasher.
2018-02-19 16:43:26 -07:00
0297edaf1f Use sha256 hashes instead of Rust's builtin hasher.
Causes a 20x performance degradation. Enabling asm did not
speed things up.
2018-02-19 16:23:53 -07:00
b317d13b44 Add codecov configuration 2018-02-19 13:02:59 -07:00
40 changed files with 5591 additions and 347 deletions

2
.codecov.yml Normal file
View File

@ -0,0 +1,2 @@
ignore:
- "src/bin"

1
.gitattributes vendored Normal file
View File

@ -0,0 +1 @@
*.a filter=lfs diff=lfs merge=lfs -text

3
.gitignore vendored
View File

@ -1,4 +1,3 @@
Cargo.lock
/target/
**/*.rs.bk
Cargo.lock

View File

@ -1,26 +1,70 @@
[package]
name = "silk"
description = "A silky smooth implementation of the Loom architecture"
version = "0.2.0"
documentation = "https://docs.rs/silk"
homepage = "http://loomprotocol.com/"
repository = "https://github.com/loomprotocol/silk"
name = "solana"
description = "High Performance Blockchain"
version = "0.5.0-beta"
documentation = "https://docs.rs/solana"
homepage = "http://solana.io/"
repository = "https://github.com/solana-labs/solana"
authors = [
"Anatoly Yakovenko <aeyakovenko@gmail.com>",
"Greg Fitzgerald <garious@gmail.com>",
"Anatoly Yakovenko <anatoly@solana.io>",
"Greg Fitzgerald <greg@solana.io>",
"Stephen Akridge <stephen@solana.io>",
]
license = "Apache-2.0"
[[bin]]
name = "silk-demo"
path = "src/bin/demo.rs"
name = "solana-historian-demo"
path = "src/bin/historian-demo.rs"
[[bin]]
name = "solana-client-demo"
path = "src/bin/client-demo.rs"
[[bin]]
name = "solana-testnode"
path = "src/bin/testnode.rs"
[[bin]]
name = "solana-genesis"
path = "src/bin/genesis.rs"
[[bin]]
name = "solana-genesis-demo"
path = "src/bin/genesis-demo.rs"
[[bin]]
name = "solana-mint"
path = "src/bin/mint.rs"
[[bin]]
name = "solana-mint-demo"
path = "src/bin/mint-demo.rs"
[badges]
codecov = { repository = "loomprotocol/silk", branch = "master", service = "github" }
codecov = { repository = "solana-labs/solana", branch = "master", service = "github" }
[features]
unstable = []
ipv6 = []
cuda = []
erasure = []
[dependencies]
rayon = "1.0.0"
itertools = "0.7.6"
sha2 = "0.7.0"
generic-array = { version = "0.9.0", default-features = false, features = ["serde"] }
serde = "1.0.27"
serde_derive = "1.0.27"
serde_json = "1.0.10"
ring = "0.12.1"
untrusted = "0.5.1"
bincode = "1.0.0"
chrono = { version = "0.4.0", features = ["serde"] }
log = "^0.4.1"
env_logger = "^0.4.1"
matches = "^0.1.6"
byteorder = "^1.2.1"
libc = "^0.2.1"
getopts = "^0.2"
isatty = "0.1"
futures = "0.1"

View File

@ -1,4 +1,4 @@
Copyright 2018 Anatoly Yakovenko <anatoly@loomprotocol.com> and Greg Fitzgerald <garious@gmail.com>
Copyright 2018 Anatoly Yakovenko, Greg Fitzgerald and Stephen Akridge
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

163
README.md
View File

@ -1,69 +1,99 @@
[![Silk crate](https://img.shields.io/crates/v/silk.svg)](https://crates.io/crates/silk)
[![Silk documentation](https://docs.rs/silk/badge.svg)](https://docs.rs/silk)
[![Build Status](https://travis-ci.org/loomprotocol/silk.svg?branch=master)](https://travis-ci.org/loomprotocol/silk)
[![codecov](https://codecov.io/gh/loomprotocol/silk/branch/master/graph/badge.svg)](https://codecov.io/gh/loomprotocol/silk)
[![Solana crate](https://img.shields.io/crates/v/solana.svg)](https://crates.io/crates/solana)
[![Solana documentation](https://docs.rs/solana/badge.svg)](https://docs.rs/solana)
[![Build Status](https://travis-ci.org/solana-labs/solana.svg?branch=master)](https://travis-ci.org/solana-labs/solana)
[![codecov](https://codecov.io/gh/solana-labs/solana/branch/master/graph/badge.svg)](https://codecov.io/gh/solana-labs/solana)
# Silk, a silky smooth implementation of the Loom specification
Disclaimer
===
Loom is a new achitecture for a high performance blockchain. Its whitepaper boasts a theoretical
throughput of 710k transactions per second on a 1 gbps network. The specification is implemented
in two git repositories. Reserach is performed in the loom repository. That work drives the
Loom specification forward. This repository, on the other hand, aims to implement the specification
as-is. We care a great deal about quality, clarity and short learning curve. We avoid the use
of `unsafe` Rust and write tests for *everything*. Optimizations are only added when
corresponding benchmarks are also added that demonstrate real performance boots. We expect the
feature set here will always be a ways behind the loom repo, but that this is an implementation
you can take to the bank, literally.
All claims, content, designs, algorithms, estimates, roadmaps, specifications, and performance measurements described in this project are done with the author's best effort. It is up to the reader to check and validate their accuracy and truthfulness. Furthermore nothing in this project constitutes a solicitation for investment.
# Usage
Solana: High Performance Blockchain
===
Add the latest [silk package](https://crates.io/crates/silk) to the `[dependencies]` section
of your Cargo.toml.
Solana&trade; is a new architecture for a high performance blockchain. It aims to support
over 700 thousand transactions per second on a gigabit network.
Create a *Historian* and send it *events* to generate an *event log*, where each log *entry*
is tagged with the historian's latest *hash*. Then ensure the order of events was not tampered
with by verifying each entry's hash can be generated from the hash in the previous entry:
Introduction
===
```rust
extern crate silk;
It's possible for a centralized database to process 710,000 transactions per second on a standard gigabit network if the transactions are, on average, no more than 178 bytes. A centralized database can also replicate itself and maintain high availability without significantly compromising that transaction rate using the distributed system technique known as Optimistic Concurrency Control [H.T.Kung, J.T.Robinson (1981)]. At Solana, we're demonstrating that these same theoretical limits apply just as well to blockchain on an adversarial network. The key ingredient? Finding a way to share time when nodes can't trust one-another. Once nodes can trust time, suddenly ~40 years of distributed systems research becomes applicable to blockchain! Furthermore, and much to our surprise, it can implemented using a mechanism that has existed in Bitcoin since day one. The Bitcoin feature is called nLocktime and it can be used to postdate transactions using block height instead of a timestamp. As a Bitcoin client, you'd use block height instead of a timestamp if you don't trust the network. Block height turns out to be an instance of what's being called a Verifiable Delay Function in cryptography circles. It's a cryptographically secure way to say time has passed. In Solana, we use a far more granular verifiable delay function, a SHA 256 hash chain, to checkpoint the ledger and coordinate consensus. With it, we implement Optimistic Concurrency Control and are now well in route towards that theoretical limit of 710,000 transactions per second.
use silk::historian::Historian;
use silk::log::{verify_slice, Entry, Event};
use std::{thread, time};
use std::sync::mpsc::SendError;
Running the demo
===
fn create_log(hist: &Historian) -> Result<(), SendError<Event>> {
hist.sender.send(Event::Tick)?;
thread::sleep(time::Duration::new(0, 100_000));
hist.sender.send(Event::UserDataKey(0xdeadbeef))?;
thread::sleep(time::Duration::new(0, 100_000));
hist.sender.send(Event::Tick)?;
Ok(())
}
First, install Rust's package manager Cargo.
fn main() {
let seed = 0;
let hist = Historian::new(seed);
create_log(&hist).expect("send error");
drop(hist.sender);
let entries: Vec<Entry> = hist.receiver.iter().collect();
for entry in &entries {
println!("{:?}", entry);
}
assert!(verify_slice(&entries, seed));
}
```bash
$ curl https://sh.rustup.rs -sSf | sh
$ source $HOME/.cargo/env
```
Running the program should produce a log similar to:
If you plan to run with GPU optimizations enabled (not recommended), you'll need a CUDA library stored in git LFS. Install git-lfs here:
```
Entry { num_hashes: 0, end_hash: 0, event: Tick }
Entry { num_hashes: 245, end_hash: 11504657626326377539, event: UserDataKey(3735928559) }
Entry { num_hashes: 154, end_hash: 13410333856574024888, event: Tick }
https://git-lfs.github.com/
Now checkout the code from github:
```bash
$ git clone https://github.com/solana-labs/solana.git
$ cd solana
```
The testnode server is initialized with a ledger from stdin and
generates new ledger entries on stdout. To create the input ledger, we'll need
to create *the mint* and use it to generate a *genesis ledger*. It's done in
two steps because the mint-demo.json file contains private keys that will be
used later in this demo.
# Developing
```bash
$ echo 1000000000 | cargo run --release --bin solana-mint-demo > mint-demo.json
$ cat mint-demo.json | cargo run --release --bin solana-genesis-demo > genesis.log
```
Now you can start the server:
```bash
$ cat genesis.log | cargo run --release --bin solana-testnode > transactions0.log
```
Wait a few seconds for the server to initialize. It will print "Ready." when it's safe
to start sending it transactions.
Then, in a separate shell, let's execute some transactions. Note we pass in
the JSON configuration file here, not the genesis ledger.
```bash
$ cat mint-demo.json | cargo run --release --bin solana-client-demo
```
Now kill the server with Ctrl-C, and take a look at the ledger. You should
see something similar to:
```json
{"num_hashes":27,"id":[0, "..."],"event":"Tick"}
{"num_hashes":3,"id":[67, "..."],"event":{"Transaction":{"tokens":42}}}
{"num_hashes":27,"id":[0, "..."],"event":"Tick"}
```
Now restart the server from where we left off. Pass it both the genesis ledger, and
the transaction ledger.
```bash
$ cat genesis.log transactions0.log | cargo run --release --bin solana-testnode > transactions1.log
```
Lastly, run the client demo again, and verify that all funds were spent in the
previous round, and so no additional transactions are added.
```bash
$ cat mint-demo.json | cargo run --release --bin solana-client-demo
```
Stop the server again, and verify there are only Tick entries, and no Transaction entries.
Developing
===
Building
---
@ -79,8 +109,8 @@ $ rustup component add rustfmt-preview
Download the source code:
```bash
$ git clone https://github.com/loomprotocol/silk.git
$ cd silk
$ git clone https://github.com/solana-labs/solana.git
$ cd solana
```
Testing
@ -106,3 +136,30 @@ Run the benchmarks:
```bash
$ cargo +nightly bench --features="unstable"
```
To run the benchmarks on Linux with GPU optimizations enabled:
```bash
$ cargo +nightly bench --features="unstable,cuda"
```
Code coverage
---
To generate code coverage statistics, run kcov via Docker:
```bash
$ docker run -it --rm --security-opt seccomp=unconfined --volume "$PWD:/volume" elmtai/docker-rust-kcov
```
Why coverage? While most see coverage as a code quality metric, we see it primarily as a developer
productivity metric. When a developer makes a change to the codebase, presumably it's a *solution* to
some problem. Our unit-test suite is how we encode the set of *problems* the codebase solves. Running
the test suite should indicate that your change didn't *infringe* on anyone else's solutions. Adding a
test *protects* your solution from future changes. Say you don't understand why a line of code exists,
try deleting it and running the unit-tests. The nearest test failure should tell you what problem
was solved by that code. If no test fails, go ahead and submit a Pull Request that asks, "what
problem is solved by this code?" On the other hand, if a test does fail and you can think of a
better way to solve the same problem, a Pull Request with your solution would most certainly be
welcome! Likewise, if rewriting a test can better communicate what code it's protecting, please
send us that patch!

15
build.rs Normal file
View File

@ -0,0 +1,15 @@
use std::env;
fn main() {
println!("cargo:rustc-link-search=native=.");
if !env::var("CARGO_FEATURE_CUDA").is_err() {
println!("cargo:rustc-link-lib=static=cuda_verify_ed25519");
println!("cargo:rustc-link-search=native=/usr/local/cuda/lib64");
println!("cargo:rustc-link-lib=dylib=cudart");
println!("cargo:rustc-link-lib=dylib=cuda");
println!("cargo:rustc-link-lib=dylib=cudadevrt");
}
if !env::var("CARGO_FEATURE_ERASURE").is_err() {
println!("cargo:rustc-link-lib=dylib=Jerasure");
}
}

15
doc/consensus.msc Normal file
View File

@ -0,0 +1,15 @@
msc {
client,leader,verifier_a,verifier_b,verifier_c;
client=>leader [ label = "SUBMIT" ] ;
leader=>client [ label = "CONFIRMED" ] ;
leader=>verifier_a [ label = "CONFIRMED" ] ;
leader=>verifier_b [ label = "CONFIRMED" ] ;
leader=>verifier_c [ label = "CONFIRMED" ] ;
verifier_a=>leader [ label = "VERIFIED" ] ;
verifier_b=>leader [ label = "VERIFIED" ] ;
leader=>client [ label = "FINALIZED" ] ;
leader=>verifier_a [ label = "FINALIZED" ] ;
leader=>verifier_b [ label = "FINALIZED" ] ;
leader=>verifier_c [ label = "FINALIZED" ] ;
}

65
doc/historian.md Normal file
View File

@ -0,0 +1,65 @@
The Historian
===
Create a *Historian* and send it *events* to generate an *event log*, where each *entry*
is tagged with the historian's latest *hash*. Then ensure the order of events was not tampered
with by verifying each entry's hash can be generated from the hash in the previous entry:
![historian](https://user-images.githubusercontent.com/55449/36950845-459bdb58-1fb9-11e8-850e-894586f3729b.png)
```rust
extern crate solana;
use solana::historian::Historian;
use solana::ledger::{Block, Entry, Hash};
use solana::event::{generate_keypair, get_pubkey, sign_claim_data, Event};
use std::thread::sleep;
use std::time::Duration;
use std::sync::mpsc::SendError;
fn create_ledger(hist: &Historian<Hash>) -> Result<(), SendError<Event<Hash>>> {
sleep(Duration::from_millis(15));
let tokens = 42;
let keypair = generate_keypair();
let event0 = Event::new_claim(get_pubkey(&keypair), tokens, sign_claim_data(&tokens, &keypair));
hist.sender.send(event0)?;
sleep(Duration::from_millis(10));
Ok(())
}
fn main() {
let seed = Hash::default();
let hist = Historian::new(&seed, Some(10));
create_ledger(&hist).expect("send error");
drop(hist.sender);
let entries: Vec<Entry<Hash>> = hist.receiver.iter().collect();
for entry in &entries {
println!("{:?}", entry);
}
// Proof-of-History: Verify the historian learned about the events
// in the same order they appear in the vector.
assert!(entries[..].verify(&seed));
}
```
Running the program should produce a ledger similar to:
```rust
Entry { num_hashes: 0, id: [0, ...], event: Tick }
Entry { num_hashes: 3, id: [67, ...], event: Transaction { tokens: 42 } }
Entry { num_hashes: 3, id: [123, ...], event: Tick }
```
Proof-of-History
---
Take note of the last line:
```rust
assert!(entries[..].verify(&seed));
```
[It's a proof!](https://en.wikipedia.org/wiki/CurryHoward_correspondence) For each entry returned by the
historian, we can verify that `id` is the result of applying a sha256 hash to the previous `id`
exactly `num_hashes` times, and then hashing then event data on top of that. Because the event data is
included in the hash, the events cannot be reordered without regenerating all the hashes.

18
doc/historian.msc Normal file
View File

@ -0,0 +1,18 @@
msc {
client,historian,recorder;
recorder=>historian [ label = "e0 = Entry{id: h0, n: 0, event: Tick}" ] ;
recorder=>recorder [ label = "h1 = hash(h0)" ] ;
recorder=>recorder [ label = "h2 = hash(h1)" ] ;
client=>historian [ label = "Transaction(d0)" ] ;
historian=>recorder [ label = "Transaction(d0)" ] ;
recorder=>recorder [ label = "h3 = hash(h2 + d0)" ] ;
recorder=>historian [ label = "e1 = Entry{id: hash(h3), n: 3, event: Transaction(d0)}" ] ;
recorder=>recorder [ label = "h4 = hash(h3)" ] ;
recorder=>recorder [ label = "h5 = hash(h4)" ] ;
recorder=>recorder [ label = "h6 = hash(h5)" ] ;
recorder=>historian [ label = "e2 = Entry{id: h6, n: 3, event: Tick}" ] ;
client=>historian [ label = "collect()" ] ;
historian=>client [ label = "entries = [e0, e1, e2]" ] ;
client=>client [ label = "entries.verify(h0)" ] ;
}

526
src/accountant.rs Normal file
View File

@ -0,0 +1,526 @@
//! The `accountant` module tracks client balances, and the progress of pending
//! transactions. It offers a high-level public API that signs transactions
//! on behalf of the caller, and a private low-level API for when they have
//! already been signed and verified.
extern crate libc;
use chrono::prelude::*;
use event::Event;
use hash::Hash;
use mint::Mint;
use plan::{Payment, Plan, Witness};
use rayon::prelude::*;
use signature::{KeyPair, PublicKey, Signature};
use std::collections::hash_map::Entry::Occupied;
use std::collections::{HashMap, HashSet, VecDeque};
use std::result;
use std::sync::RwLock;
use transaction::Transaction;
pub const MAX_ENTRY_IDS: usize = 1024 * 4;
#[derive(Debug, PartialEq, Eq)]
pub enum AccountingError {
AccountNotFound,
InsufficientFunds,
InvalidTransferSignature,
}
pub type Result<T> = result::Result<T, AccountingError>;
/// Commit funds to the 'to' party.
fn apply_payment(balances: &RwLock<HashMap<PublicKey, RwLock<i64>>>, payment: &Payment) {
if balances.read().unwrap().contains_key(&payment.to) {
let bals = balances.read().unwrap();
*bals[&payment.to].write().unwrap() += payment.tokens;
} else {
let mut bals = balances.write().unwrap();
bals.insert(payment.to, RwLock::new(payment.tokens));
}
}
pub struct Accountant {
balances: RwLock<HashMap<PublicKey, RwLock<i64>>>,
pending: RwLock<HashMap<Signature, Plan>>,
last_ids: RwLock<VecDeque<(Hash, RwLock<HashSet<Signature>>)>>,
time_sources: RwLock<HashSet<PublicKey>>,
last_time: RwLock<DateTime<Utc>>,
}
impl Accountant {
/// Create an Accountant using a deposit.
pub fn new_from_deposit(deposit: &Payment) -> Self {
let balances = RwLock::new(HashMap::new());
apply_payment(&balances, deposit);
Accountant {
balances,
pending: RwLock::new(HashMap::new()),
last_ids: RwLock::new(VecDeque::new()),
time_sources: RwLock::new(HashSet::new()),
last_time: RwLock::new(Utc.timestamp(0, 0)),
}
}
/// Create an Accountant with only a Mint. Typically used by unit tests.
pub fn new(mint: &Mint) -> Self {
let deposit = Payment {
to: mint.pubkey(),
tokens: mint.tokens,
};
let acc = Self::new_from_deposit(&deposit);
acc.register_entry_id(&mint.last_id());
acc
}
fn reserve_signature(signatures: &RwLock<HashSet<Signature>>, sig: &Signature) -> bool {
if signatures.read().unwrap().contains(sig) {
return false;
}
signatures.write().unwrap().insert(*sig);
true
}
fn forget_signature(signatures: &RwLock<HashSet<Signature>>, sig: &Signature) -> bool {
signatures.write().unwrap().remove(sig)
}
fn forget_signature_with_last_id(&self, sig: &Signature, last_id: &Hash) -> bool {
if let Some(entry) = self.last_ids
.read()
.unwrap()
.iter()
.rev()
.find(|x| x.0 == *last_id)
{
return Self::forget_signature(&entry.1, sig);
}
return false;
}
fn reserve_signature_with_last_id(&self, sig: &Signature, last_id: &Hash) -> bool {
if let Some(entry) = self.last_ids
.read()
.unwrap()
.iter()
.rev()
.find(|x| x.0 == *last_id)
{
return Self::reserve_signature(&entry.1, sig);
}
false
}
/// Tell the accountant which Entry IDs exist on the ledger. This function
/// assumes subsequent calls correspond to later entries, and will boot
/// the oldest ones once its internal cache is full. Once boot, the
/// accountant will reject transactions using that `last_id`.
pub fn register_entry_id(&self, last_id: &Hash) {
let mut last_ids = self.last_ids.write().unwrap();
if last_ids.len() >= MAX_ENTRY_IDS {
last_ids.pop_front();
}
last_ids.push_back((*last_id, RwLock::new(HashSet::new())));
}
/// Deduct tokens from the 'from' address the account has sufficient
/// funds and isn't a duplicate.
pub fn process_verified_transaction_debits(&self, tr: &Transaction) -> Result<()> {
let bals = self.balances.read().unwrap();
// Hold a write lock before the condition check, so that a debit can't occur
// between checking the balance and the withdraw.
let option = bals.get(&tr.from);
if option.is_none() {
return Err(AccountingError::AccountNotFound);
}
let mut bal = option.unwrap().write().unwrap();
if !self.reserve_signature_with_last_id(&tr.sig, &tr.data.last_id) {
return Err(AccountingError::InvalidTransferSignature);
}
if *bal < tr.data.tokens {
self.forget_signature_with_last_id(&tr.sig, &tr.data.last_id);
return Err(AccountingError::InsufficientFunds);
}
*bal -= tr.data.tokens;
Ok(())
}
pub fn process_verified_transaction_credits(&self, tr: &Transaction) {
let mut plan = tr.data.plan.clone();
plan.apply_witness(&Witness::Timestamp(*self.last_time.read().unwrap()));
if let Some(ref payment) = plan.final_payment() {
apply_payment(&self.balances, payment);
} else {
let mut pending = self.pending.write().unwrap();
pending.insert(tr.sig, plan);
}
}
/// Process a Transaction that has already been verified.
pub fn process_verified_transaction(&self, tr: &Transaction) -> Result<()> {
self.process_verified_transaction_debits(tr)?;
self.process_verified_transaction_credits(tr);
Ok(())
}
/// Process a batch of verified transactions.
pub fn process_verified_transactions(&self, trs: Vec<Transaction>) -> Vec<Result<Transaction>> {
// Run all debits first to filter out any transactions that can't be processed
// in parallel deterministically.
let results: Vec<_> = trs.into_par_iter()
.map(|tr| self.process_verified_transaction_debits(&tr).map(|_| tr))
.collect(); // Calling collect() here forces all debits to complete before moving on.
results
.into_par_iter()
.map(|result| {
result.map(|tr| {
self.process_verified_transaction_credits(&tr);
tr
})
})
.collect()
}
fn partition_events(events: Vec<Event>) -> (Vec<Transaction>, Vec<Event>) {
let mut trs = vec![];
let mut rest = vec![];
for event in events {
match event {
Event::Transaction(tr) => trs.push(tr),
_ => rest.push(event),
}
}
(trs, rest)
}
pub fn process_verified_events(&self, events: Vec<Event>) -> Result<()> {
let (trs, rest) = Self::partition_events(events);
self.process_verified_transactions(trs);
for event in rest {
self.process_verified_event(&event)?;
}
Ok(())
}
/// Process a Witness Signature that has already been verified.
fn process_verified_sig(&self, from: PublicKey, tx_sig: Signature) -> Result<()> {
if let Occupied(mut e) = self.pending.write().unwrap().entry(tx_sig) {
e.get_mut().apply_witness(&Witness::Signature(from));
if let Some(ref payment) = e.get().final_payment() {
apply_payment(&self.balances, payment);
e.remove_entry();
}
};
Ok(())
}
/// Process a Witness Timestamp that has already been verified.
fn process_verified_timestamp(&self, from: PublicKey, dt: DateTime<Utc>) -> Result<()> {
// If this is the first timestamp we've seen, it probably came from the genesis block,
// so we'll trust it.
if *self.last_time.read().unwrap() == Utc.timestamp(0, 0) {
self.time_sources.write().unwrap().insert(from);
}
if self.time_sources.read().unwrap().contains(&from) {
if dt > *self.last_time.read().unwrap() {
*self.last_time.write().unwrap() = dt;
}
} else {
return Ok(());
}
// Check to see if any timelocked transactions can be completed.
let mut completed = vec![];
// Hold 'pending' write lock until the end of this function. Otherwise another thread can
// double-spend if it enters before the modified plan is removed from 'pending'.
let mut pending = self.pending.write().unwrap();
for (key, plan) in pending.iter_mut() {
plan.apply_witness(&Witness::Timestamp(*self.last_time.read().unwrap()));
if let Some(ref payment) = plan.final_payment() {
apply_payment(&self.balances, payment);
completed.push(key.clone());
}
}
for key in completed {
pending.remove(&key);
}
Ok(())
}
/// Process an Transaction or Witness that has already been verified.
pub fn process_verified_event(&self, event: &Event) -> Result<()> {
match *event {
Event::Transaction(ref tr) => self.process_verified_transaction(tr),
Event::Signature { from, tx_sig, .. } => self.process_verified_sig(from, tx_sig),
Event::Timestamp { from, dt, .. } => self.process_verified_timestamp(from, dt),
}
}
/// Create, sign, and process a Transaction from `keypair` to `to` of
/// `n` tokens where `last_id` is the last Entry ID observed by the client.
pub fn transfer(
&self,
n: i64,
keypair: &KeyPair,
to: PublicKey,
last_id: Hash,
) -> Result<Signature> {
let tr = Transaction::new(keypair, to, n, last_id);
let sig = tr.sig;
self.process_verified_transaction(&tr).map(|_| sig)
}
/// Create, sign, and process a postdated Transaction from `keypair`
/// to `to` of `n` tokens on `dt` where `last_id` is the last Entry ID
/// observed by the client.
pub fn transfer_on_date(
&self,
n: i64,
keypair: &KeyPair,
to: PublicKey,
dt: DateTime<Utc>,
last_id: Hash,
) -> Result<Signature> {
let tr = Transaction::new_on_date(keypair, to, dt, n, last_id);
let sig = tr.sig;
self.process_verified_transaction(&tr).map(|_| sig)
}
pub fn get_balance(&self, pubkey: &PublicKey) -> Option<i64> {
let bals = self.balances.read().unwrap();
bals.get(pubkey).map(|x| *x.read().unwrap())
}
}
#[cfg(test)]
mod tests {
use super::*;
use bincode::serialize;
use hash::hash;
use signature::KeyPairUtil;
#[test]
fn test_accountant() {
let alice = Mint::new(10_000);
let bob_pubkey = KeyPair::new().pubkey();
let acc = Accountant::new(&alice);
acc.transfer(1_000, &alice.keypair(), bob_pubkey, alice.last_id())
.unwrap();
assert_eq!(acc.get_balance(&bob_pubkey).unwrap(), 1_000);
acc.transfer(500, &alice.keypair(), bob_pubkey, alice.last_id())
.unwrap();
assert_eq!(acc.get_balance(&bob_pubkey).unwrap(), 1_500);
}
#[test]
fn test_account_not_found() {
let mint = Mint::new(1);
let acc = Accountant::new(&mint);
assert_eq!(
acc.transfer(1, &KeyPair::new(), mint.pubkey(), mint.last_id()),
Err(AccountingError::AccountNotFound)
);
}
#[test]
fn test_invalid_transfer() {
let alice = Mint::new(11_000);
let acc = Accountant::new(&alice);
let bob_pubkey = KeyPair::new().pubkey();
acc.transfer(1_000, &alice.keypair(), bob_pubkey, alice.last_id())
.unwrap();
assert_eq!(
acc.transfer(10_001, &alice.keypair(), bob_pubkey, alice.last_id()),
Err(AccountingError::InsufficientFunds)
);
let alice_pubkey = alice.keypair().pubkey();
assert_eq!(acc.get_balance(&alice_pubkey).unwrap(), 10_000);
assert_eq!(acc.get_balance(&bob_pubkey).unwrap(), 1_000);
}
#[test]
fn test_transfer_to_newb() {
let alice = Mint::new(10_000);
let acc = Accountant::new(&alice);
let alice_keypair = alice.keypair();
let bob_pubkey = KeyPair::new().pubkey();
acc.transfer(500, &alice_keypair, bob_pubkey, alice.last_id())
.unwrap();
assert_eq!(acc.get_balance(&bob_pubkey).unwrap(), 500);
}
#[test]
fn test_transfer_on_date() {
let alice = Mint::new(1);
let acc = Accountant::new(&alice);
let alice_keypair = alice.keypair();
let bob_pubkey = KeyPair::new().pubkey();
let dt = Utc::now();
acc.transfer_on_date(1, &alice_keypair, bob_pubkey, dt, alice.last_id())
.unwrap();
// Alice's balance will be zero because all funds are locked up.
assert_eq!(acc.get_balance(&alice.pubkey()), Some(0));
// Bob's balance will be None because the funds have not been
// sent.
assert_eq!(acc.get_balance(&bob_pubkey), None);
// Now, acknowledge the time in the condition occurred and
// that bob's funds are now available.
acc.process_verified_timestamp(alice.pubkey(), dt).unwrap();
assert_eq!(acc.get_balance(&bob_pubkey), Some(1));
acc.process_verified_timestamp(alice.pubkey(), dt).unwrap(); // <-- Attack! Attempt to process completed transaction.
assert_ne!(acc.get_balance(&bob_pubkey), Some(2));
}
#[test]
fn test_transfer_after_date() {
let alice = Mint::new(1);
let acc = Accountant::new(&alice);
let alice_keypair = alice.keypair();
let bob_pubkey = KeyPair::new().pubkey();
let dt = Utc::now();
acc.process_verified_timestamp(alice.pubkey(), dt).unwrap();
// It's now past now, so this transfer should be processed immediately.
acc.transfer_on_date(1, &alice_keypair, bob_pubkey, dt, alice.last_id())
.unwrap();
assert_eq!(acc.get_balance(&alice.pubkey()), Some(0));
assert_eq!(acc.get_balance(&bob_pubkey), Some(1));
}
#[test]
fn test_cancel_transfer() {
let alice = Mint::new(1);
let acc = Accountant::new(&alice);
let alice_keypair = alice.keypair();
let bob_pubkey = KeyPair::new().pubkey();
let dt = Utc::now();
let sig = acc.transfer_on_date(1, &alice_keypair, bob_pubkey, dt, alice.last_id())
.unwrap();
// Alice's balance will be zero because all funds are locked up.
assert_eq!(acc.get_balance(&alice.pubkey()), Some(0));
// Bob's balance will be None because the funds have not been
// sent.
assert_eq!(acc.get_balance(&bob_pubkey), None);
// Now, cancel the trancaction. Alice gets her funds back, Bob never sees them.
acc.process_verified_sig(alice.pubkey(), sig).unwrap();
assert_eq!(acc.get_balance(&alice.pubkey()), Some(1));
assert_eq!(acc.get_balance(&bob_pubkey), None);
acc.process_verified_sig(alice.pubkey(), sig).unwrap(); // <-- Attack! Attempt to cancel completed transaction.
assert_ne!(acc.get_balance(&alice.pubkey()), Some(2));
}
#[test]
fn test_duplicate_event_signature() {
let alice = Mint::new(1);
let acc = Accountant::new(&alice);
let sig = Signature::default();
assert!(acc.reserve_signature_with_last_id(&sig, &alice.last_id()));
assert!(!acc.reserve_signature_with_last_id(&sig, &alice.last_id()));
}
#[test]
fn test_forget_signature() {
let alice = Mint::new(1);
let acc = Accountant::new(&alice);
let sig = Signature::default();
acc.reserve_signature_with_last_id(&sig, &alice.last_id());
assert!(acc.forget_signature_with_last_id(&sig, &alice.last_id()));
assert!(!acc.forget_signature_with_last_id(&sig, &alice.last_id()));
}
#[test]
fn test_max_entry_ids() {
let alice = Mint::new(1);
let acc = Accountant::new(&alice);
let sig = Signature::default();
for i in 0..MAX_ENTRY_IDS {
let last_id = hash(&serialize(&i).unwrap()); // Unique hash
acc.register_entry_id(&last_id);
}
// Assert we're no longer able to use the oldest entry ID.
assert!(!acc.reserve_signature_with_last_id(&sig, &alice.last_id()));
}
#[test]
fn test_debits_before_credits() {
let mint = Mint::new(2);
let acc = Accountant::new(&mint);
let alice = KeyPair::new();
let tr0 = Transaction::new(&mint.keypair(), alice.pubkey(), 2, mint.last_id());
let tr1 = Transaction::new(&alice, mint.pubkey(), 1, mint.last_id());
let trs = vec![tr0, tr1];
assert!(acc.process_verified_transactions(trs)[1].is_err());
}
}
#[cfg(all(feature = "unstable", test))]
mod bench {
extern crate test;
use self::test::Bencher;
use accountant::*;
use bincode::serialize;
use hash::hash;
use signature::KeyPairUtil;
#[bench]
fn process_verified_event_bench(bencher: &mut Bencher) {
let mint = Mint::new(100_000_000);
let acc = Accountant::new(&mint);
// Create transactions between unrelated parties.
let transactions: Vec<_> = (0..4096)
.into_par_iter()
.map(|i| {
// Seed the 'from' account.
let rando0 = KeyPair::new();
let tr = Transaction::new(&mint.keypair(), rando0.pubkey(), 1_000, mint.last_id());
acc.process_verified_transaction(&tr).unwrap();
// Seed the 'to' account and a cell for its signature.
let last_id = hash(&serialize(&i).unwrap()); // Unique hash
acc.register_entry_id(&last_id);
let rando1 = KeyPair::new();
let tr = Transaction::new(&rando0, rando1.pubkey(), 1, last_id);
acc.process_verified_transaction(&tr).unwrap();
// Finally, return a transaction that's unique
Transaction::new(&rando0, rando1.pubkey(), 1, last_id)
})
.collect();
bencher.iter(|| {
// Since benchmarker runs this multiple times, we need to clear the signatures.
for sigs in acc.last_ids.read().unwrap().iter() {
sigs.1.write().unwrap().clear();
}
assert!(
acc.process_verified_transactions(transactions.clone())
.iter()
.all(|x| x.is_ok())
);
});
}
}

810
src/accountant_skel.rs Normal file
View File

@ -0,0 +1,810 @@
//! The `accountant_skel` module is a microservice that exposes the high-level
//! Accountant API to the network. Its message encoding is currently
//! in flux. Clients should use AccountantStub to interact with it.
use accountant::Accountant;
use bincode::{deserialize, serialize};
use ecdsa;
use entry::Entry;
use event::Event;
use hash::Hash;
use historian::Historian;
use packet;
use packet::SharedPackets;
use rayon::prelude::*;
use recorder::Signal;
use result::Result;
use serde_json;
use signature::PublicKey;
use std::cmp::max;
use std::collections::VecDeque;
use std::io::Write;
use std::net::{SocketAddr, UdpSocket};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::{channel, Receiver, Sender};
use std::sync::{Arc, Mutex, RwLock};
use std::thread::{spawn, JoinHandle};
use std::time::Duration;
use streamer;
use transaction::Transaction;
use subscribers;
pub struct AccountantSkel<W: Write + Send + 'static> {
acc: Accountant,
last_id: Hash,
writer: W,
historian: Historian,
entry_info_subscribers: Vec<SocketAddr>,
}
#[cfg_attr(feature = "cargo-clippy", allow(large_enum_variant))]
#[derive(Serialize, Deserialize, Debug, Clone)]
pub enum Request {
Transaction(Transaction),
GetBalance { key: PublicKey },
GetLastId,
Subscribe { subscriptions: Vec<Subscription> },
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub enum Subscription {
EntryInfo,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct EntryInfo {
pub id: Hash,
pub num_hashes: u64,
pub num_events: u64,
}
impl Request {
/// Verify the request is valid.
pub fn verify(&self) -> bool {
match *self {
Request::Transaction(ref tr) => tr.verify_plan(),
_ => true,
}
}
}
#[derive(Serialize, Deserialize, Debug)]
pub enum Response {
Balance { key: PublicKey, val: Option<i64> },
EntryInfo(EntryInfo),
LastId { id: Hash },
}
impl<W: Write + Send + 'static> AccountantSkel<W> {
/// Create a new AccountantSkel that wraps the given Accountant.
pub fn new(acc: Accountant, last_id: Hash, writer: W, historian: Historian) -> Self {
AccountantSkel {
acc,
last_id,
writer,
historian,
entry_info_subscribers: vec![],
}
}
fn notify_entry_info_subscribers(&mut self, entry: &Entry) {
// TODO: No need to bind().
let socket = UdpSocket::bind("127.0.0.1:0").expect("bind");
for addr in &self.entry_info_subscribers {
let entry_info = EntryInfo {
id: entry.id,
num_hashes: entry.num_hashes,
num_events: entry.events.len() as u64,
};
let data = serialize(&Response::EntryInfo(entry_info)).expect("serialize EntryInfo");
let _res = socket.send_to(&data, addr);
}
}
/// Process any Entry items that have been published by the Historian.
pub fn sync(&mut self) -> Hash {
while let Ok(entry) = self.historian.receiver.try_recv() {
self.last_id = entry.id;
self.acc.register_entry_id(&self.last_id);
writeln!(self.writer, "{}", serde_json::to_string(&entry).unwrap()).unwrap();
self.notify_entry_info_subscribers(&entry);
}
self.last_id
}
/// Process Request items sent by clients.
pub fn process_request(
&mut self,
msg: Request,
rsp_addr: SocketAddr,
) -> Option<(Response, SocketAddr)> {
match msg {
Request::GetBalance { key } => {
let val = self.acc.get_balance(&key);
Some((Response::Balance { key, val }, rsp_addr))
}
Request::GetLastId => Some((Response::LastId { id: self.sync() }, rsp_addr)),
Request::Transaction(_) => unreachable!(),
Request::Subscribe { subscriptions } => {
for subscription in subscriptions {
match subscription {
Subscription::EntryInfo => self.entry_info_subscribers.push(rsp_addr),
}
}
None
}
}
}
fn recv_batch(recvr: &streamer::PacketReceiver) -> Result<Vec<SharedPackets>> {
let timer = Duration::new(1, 0);
let msgs = recvr.recv_timeout(timer)?;
trace!("got msgs");
let mut batch = vec![msgs];
while let Ok(more) = recvr.try_recv() {
trace!("got more msgs");
batch.push(more);
}
info!("batch len {}", batch.len());
Ok(batch)
}
fn verify_batch(batch: Vec<SharedPackets>) -> Vec<Vec<(SharedPackets, Vec<u8>)>> {
let chunk_size = max(1, (batch.len() + 3) / 4);
let batches: Vec<_> = batch.chunks(chunk_size).map(|x| x.to_vec()).collect();
batches
.into_par_iter()
.map(|batch| {
let r = ecdsa::ed25519_verify(&batch);
batch.into_iter().zip(r).collect()
})
.collect()
}
fn verifier(
recvr: &streamer::PacketReceiver,
sendr: &Sender<Vec<(SharedPackets, Vec<u8>)>>,
) -> Result<()> {
let batch = Self::recv_batch(recvr)?;
let verified_batches = Self::verify_batch(batch);
for xs in verified_batches {
sendr.send(xs)?;
}
Ok(())
}
pub fn deserialize_packets(p: &packet::Packets) -> Vec<Option<(Request, SocketAddr)>> {
p.packets
.par_iter()
.map(|x| {
deserialize(&x.data[0..x.meta.size])
.map(|req| (req, x.meta.addr()))
.ok()
})
.collect()
}
/// Split Request list into verified transactions and the rest
fn partition_requests(
req_vers: Vec<(Request, SocketAddr, u8)>,
) -> (Vec<Transaction>, Vec<(Request, SocketAddr)>) {
let mut trs = vec![];
let mut reqs = vec![];
for (msg, rsp_addr, verify) in req_vers {
match msg {
Request::Transaction(tr) => {
if verify != 0 {
trs.push(tr);
}
}
_ => reqs.push((msg, rsp_addr)),
}
}
(trs, reqs)
}
fn process_packets(
&mut self,
req_vers: Vec<(Request, SocketAddr, u8)>,
) -> Result<Vec<(Response, SocketAddr)>> {
let (trs, reqs) = Self::partition_requests(req_vers);
// Process the transactions in parallel and then log the successful ones.
for result in self.acc.process_verified_transactions(trs) {
if let Ok(tr) = result {
self.historian
.sender
.send(Signal::Event(Event::Transaction(tr)))?;
}
}
// Let validators know they should not attempt to process additional
// transactions in parallel.
self.historian.sender.send(Signal::Tick)?;
// Process the remaining requests serially.
let rsps = reqs.into_iter()
.filter_map(|(req, rsp_addr)| self.process_request(req, rsp_addr))
.collect();
Ok(rsps)
}
fn serialize_response(
resp: Response,
rsp_addr: SocketAddr,
blob_recycler: &packet::BlobRecycler,
) -> Result<packet::SharedBlob> {
let blob = blob_recycler.allocate();
{
let mut b = blob.write().unwrap();
let v = serialize(&resp)?;
let len = v.len();
b.data[..len].copy_from_slice(&v);
b.meta.size = len;
b.meta.set_addr(&rsp_addr);
}
Ok(blob)
}
fn serialize_responses(
rsps: Vec<(Response, SocketAddr)>,
blob_recycler: &packet::BlobRecycler,
) -> Result<VecDeque<packet::SharedBlob>> {
let mut blobs = VecDeque::new();
for (resp, rsp_addr) in rsps {
blobs.push_back(Self::serialize_response(resp, rsp_addr, blob_recycler)?);
}
Ok(blobs)
}
fn process(
obj: &Arc<Mutex<AccountantSkel<W>>>,
verified_receiver: &Receiver<Vec<(SharedPackets, Vec<u8>)>>,
blob_sender: &streamer::BlobSender,
packet_recycler: &packet::PacketRecycler,
blob_recycler: &packet::BlobRecycler,
) -> Result<()> {
let timer = Duration::new(1, 0);
let mms = verified_receiver.recv_timeout(timer)?;
for (msgs, vers) in mms {
let reqs = Self::deserialize_packets(&msgs.read().unwrap());
let req_vers = reqs.into_iter()
.zip(vers)
.filter_map(|(req, ver)| req.map(|(msg, addr)| (msg, addr, ver)))
.filter(|x| x.0.verify())
.collect();
let rsps = obj.lock().unwrap().process_packets(req_vers)?;
let blobs = Self::serialize_responses(rsps, blob_recycler)?;
if !blobs.is_empty() {
//don't wake up the other side if there is nothing
blob_sender.send(blobs)?;
}
packet_recycler.recycle(msgs);
// Write new entries to the ledger and notify subscribers.
obj.lock().unwrap().sync();
}
Ok(())
}
/// Process verified blobs, already in order
/// Respond with a signed hash of the state
fn replicate_state(
obj: &Arc<Mutex<AccountantSkel<W>>>,
verified_receiver: &streamer::BlobReceiver,
blob_recycler: &packet::BlobRecycler,
) -> Result<()> {
let timer = Duration::new(1, 0);
let blobs = verified_receiver.recv_timeout(timer)?;
for msgs in &blobs {
let blob = msgs.read().unwrap();
let entries: Vec<Entry> = deserialize(&blob.data()[..blob.meta.size]).unwrap();
for entry in entries {
obj.lock().unwrap().acc.register_entry_id(&entry.id);
obj.lock()
.unwrap()
.acc
.process_verified_events(entry.events)?;
}
//TODO respond back to leader with hash of the state
}
for blob in blobs {
blob_recycler.recycle(blob);
}
Ok(())
}
/// Create a UDP microservice that forwards messages the given AccountantSkel.
/// This service is the network leader
/// Set `exit` to shutdown its threads.
pub fn serve(
obj: &Arc<Mutex<AccountantSkel<W>>>,
addr: &str,
exit: Arc<AtomicBool>,
) -> Result<Vec<JoinHandle<()>>> {
let read = UdpSocket::bind(addr)?;
// make sure we are on the same interface
let mut local = read.local_addr()?;
local.set_port(0);
let write = UdpSocket::bind(local)?;
let packet_recycler = packet::PacketRecycler::default();
let blob_recycler = packet::BlobRecycler::default();
let (packet_sender, packet_receiver) = channel();
let t_receiver =
streamer::receiver(read, exit.clone(), packet_recycler.clone(), packet_sender)?;
let (blob_sender, blob_receiver) = channel();
let t_responder =
streamer::responder(write, exit.clone(), blob_recycler.clone(), blob_receiver);
let (verified_sender, verified_receiver) = channel();
let exit_ = exit.clone();
let t_verifier = spawn(move || loop {
let e = Self::verifier(&packet_receiver, &verified_sender);
if e.is_err() && exit_.load(Ordering::Relaxed) {
break;
}
});
let skel = obj.clone();
let t_server = spawn(move || loop {
let e = Self::process(
&skel,
&verified_receiver,
&blob_sender,
&packet_recycler,
&blob_recycler,
);
if e.is_err() {
// Assume this was a timeout, so sync any empty entries.
skel.lock().unwrap().sync();
if exit.load(Ordering::Relaxed) {
break;
}
}
});
Ok(vec![t_receiver, t_responder, t_server, t_verifier])
}
/// This service receives messages from a leader in the network and processes the transactions
/// on the accountant state.
/// # Arguments
/// * `obj` - The accountant state.
/// * `rsubs` - The subscribers.
/// * `exit` - The exit signal.
/// # Remarks
/// The pipeline is constructed as follows:
/// 1. receive blobs from the network, these are out of order
/// 2. verify blobs, PoH, signatures (TODO)
/// 3. reconstruct contiguous window
/// a. order the blobs
/// b. use erasure coding to reconstruct missing blobs
/// c. ask the network for missing blobs, if erasure coding is insufficient
/// d. make sure that the blobs PoH sequences connect (TODO)
/// 4. process the transaction state machine
/// 5. respond with the hash of the state back to the leader
pub fn replicate(
obj: &Arc<Mutex<AccountantSkel<W>>>,
rsubs: subscribers::Subscribers,
exit: Arc<AtomicBool>,
) -> Result<Vec<JoinHandle<()>>> {
let read = UdpSocket::bind(rsubs.me.addr)?;
// make sure we are on the same interface
let mut local = read.local_addr()?;
local.set_port(0);
let write = UdpSocket::bind(local)?;
let blob_recycler = packet::BlobRecycler::default();
let (blob_sender, blob_receiver) = channel();
let t_blob_receiver = streamer::blob_receiver(
exit.clone(),
blob_recycler.clone(),
read,
blob_sender.clone(),
)?;
let (window_sender, window_receiver) = channel();
let (retransmit_sender, retransmit_receiver) = channel();
let subs = Arc::new(RwLock::new(rsubs));
let t_retransmit = streamer::retransmitter(
write,
exit.clone(),
subs.clone(),
blob_recycler.clone(),
retransmit_receiver,
);
//TODO
//the packets coming out of blob_receiver need to be sent to the GPU and verified
//then sent to the window, which does the erasure coding reconstruction
let t_window = streamer::window(
exit.clone(),
subs,
blob_recycler.clone(),
blob_receiver,
window_sender,
retransmit_sender,
);
let skel = obj.clone();
let t_server = spawn(move || loop {
let e = Self::replicate_state(&skel, &window_receiver, &blob_recycler);
if e.is_err() && exit.load(Ordering::Relaxed) {
break;
}
});
Ok(vec![t_blob_receiver, t_retransmit, t_window, t_server])
}
}
#[cfg(test)]
pub fn to_packets(r: &packet::PacketRecycler, reqs: Vec<Request>) -> Vec<SharedPackets> {
let mut out = vec![];
for rrs in reqs.chunks(packet::NUM_PACKETS) {
let p = r.allocate();
p.write()
.unwrap()
.packets
.resize(rrs.len(), Default::default());
for (i, o) in rrs.iter().zip(p.write().unwrap().packets.iter_mut()) {
let v = serialize(&i).expect("serialize request");
let len = v.len();
o.data[..len].copy_from_slice(&v);
o.meta.size = len;
}
out.push(p);
}
return out;
}
#[cfg(test)]
mod tests {
use accountant_skel::{to_packets, Request};
use bincode::serialize;
use ecdsa;
use packet::{BlobRecycler, PacketRecycler, NUM_PACKETS};
use transaction::{memfind, test_tx};
use accountant::Accountant;
use accountant_skel::AccountantSkel;
use accountant_stub::AccountantStub;
use entry::Entry;
use futures::Future;
use historian::Historian;
use mint::Mint;
use plan::Plan;
use recorder::Signal;
use signature::{KeyPair, KeyPairUtil};
use std::io::sink;
use std::net::{SocketAddr, UdpSocket};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Mutex};
use std::thread::sleep;
use std::time::Duration;
use transaction::Transaction;
use subscribers::{Node, Subscribers};
use streamer;
use std::sync::mpsc::channel;
use std::collections::VecDeque;
use hash::{hash, Hash};
use event::Event;
use entry;
use chrono::prelude::*;
#[test]
fn test_layout() {
let tr = test_tx();
let tx = serialize(&tr).unwrap();
let packet = serialize(&Request::Transaction(tr)).unwrap();
assert_matches!(memfind(&packet, &tx), Some(ecdsa::TX_OFFSET));
assert_matches!(memfind(&packet, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), None);
}
#[test]
fn test_to_packets() {
let tr = Request::Transaction(test_tx());
let re = PacketRecycler::default();
let rv = to_packets(&re, vec![tr.clone(); 1]);
assert_eq!(rv.len(), 1);
assert_eq!(rv[0].read().unwrap().packets.len(), 1);
let rv = to_packets(&re, vec![tr.clone(); NUM_PACKETS]);
assert_eq!(rv.len(), 1);
assert_eq!(rv[0].read().unwrap().packets.len(), NUM_PACKETS);
let rv = to_packets(&re, vec![tr.clone(); NUM_PACKETS + 1]);
assert_eq!(rv.len(), 2);
assert_eq!(rv[0].read().unwrap().packets.len(), NUM_PACKETS);
assert_eq!(rv[1].read().unwrap().packets.len(), 1);
}
#[test]
fn test_accounting_sequential_consistency() {
// In this attack we'll demonstrate that a verifier can interpret the ledger
// differently if either the server doesn't signal the ledger to add an
// Entry OR if the verifier tries to parallelize across multiple Entries.
let mint = Mint::new(2);
let acc = Accountant::new(&mint);
let rsp_addr: SocketAddr = "0.0.0.0:0".parse().expect("socket address");
let historian = Historian::new(&mint.last_id(), None);
let mut skel = AccountantSkel::new(acc, mint.last_id(), sink(), historian);
// Process a batch that includes a transaction that receives two tokens.
let alice = KeyPair::new();
let tr = Transaction::new(&mint.keypair(), alice.pubkey(), 2, mint.last_id());
let req_vers = vec![(Request::Transaction(tr), rsp_addr, 1_u8)];
assert!(skel.process_packets(req_vers).is_ok());
// Process a second batch that spends one of those tokens.
let tr = Transaction::new(&alice, mint.pubkey(), 1, mint.last_id());
let req_vers = vec![(Request::Transaction(tr), rsp_addr, 1_u8)];
assert!(skel.process_packets(req_vers).is_ok());
// Collect the ledger and feed it to a new accountant.
skel.historian.sender.send(Signal::Tick).unwrap();
drop(skel.historian.sender);
let entries: Vec<Entry> = skel.historian.receiver.iter().collect();
// Assert the user holds one token, not two. If the server only output one
// entry, then the second transaction will be rejected, because it drives
// the account balance below zero before the credit is added.
let acc = Accountant::new(&mint);
for entry in entries {
acc.process_verified_events(entry.events).unwrap();
}
assert_eq!(acc.get_balance(&alice.pubkey()), Some(1));
}
#[test]
fn test_accountant_bad_sig() {
let serve_port = 9002;
let send_port = 9003;
let addr = format!("127.0.0.1:{}", serve_port);
let send_addr = format!("127.0.0.1:{}", send_port);
let alice = Mint::new(10_000);
let acc = Accountant::new(&alice);
let bob_pubkey = KeyPair::new().pubkey();
let exit = Arc::new(AtomicBool::new(false));
let historian = Historian::new(&alice.last_id(), Some(30));
let acc = Arc::new(Mutex::new(AccountantSkel::new(
acc,
alice.last_id(),
sink(),
historian,
)));
let _threads = AccountantSkel::serve(&acc, &addr, exit.clone()).unwrap();
sleep(Duration::from_millis(300));
let socket = UdpSocket::bind(send_addr).unwrap();
socket.set_read_timeout(Some(Duration::new(5, 0))).unwrap();
let mut acc = AccountantStub::new(&addr, socket);
let last_id = acc.get_last_id().wait().unwrap();
let tr = Transaction::new(&alice.keypair(), bob_pubkey, 500, last_id);
let _sig = acc.transfer_signed(tr).unwrap();
let last_id = acc.get_last_id().wait().unwrap();
let mut tr2 = Transaction::new(&alice.keypair(), bob_pubkey, 501, last_id);
tr2.data.tokens = 502;
tr2.data.plan = Plan::new_payment(502, bob_pubkey);
let _sig = acc.transfer_signed(tr2).unwrap();
assert_eq!(acc.get_balance(&bob_pubkey).wait().unwrap(), 500);
exit.store(true, Ordering::Relaxed);
}
use std::sync::{Once, ONCE_INIT};
extern crate env_logger;
static INIT: Once = ONCE_INIT;
/// Setup function that is only run once, even if called multiple times.
fn setup() {
INIT.call_once(|| {
env_logger::init().unwrap();
});
}
#[test]
fn test_replicate() {
setup();
let leader_sock = UdpSocket::bind("127.0.0.1:0").expect("bind");
let leader_addr = leader_sock.local_addr().unwrap();
let me_addr = "127.0.0.1:9010".parse().unwrap();
let target_peer_sock = UdpSocket::bind("127.0.0.1:0").expect("bind");
let target_peer_addr = target_peer_sock.local_addr().unwrap();
let source_peer_sock = UdpSocket::bind("127.0.0.1:0").expect("bind");
let exit = Arc::new(AtomicBool::new(false));
let node_me = Node::new([0, 0, 0, 0, 0, 0, 0, 1], 10, me_addr);
let node_subs = vec![Node::new([0, 0, 0, 0, 0, 0, 0, 2], 8, target_peer_addr); 1];
let node_leader = Node::new([0, 0, 0, 0, 0, 0, 0, 3], 20, leader_addr);
let subs = Subscribers::new(node_me, node_leader, &node_subs);
// setup some blob services to send blobs into the socket
// to simulate the source peer and get blobs out of the socket to
// simulate target peer
let recv_recycler = BlobRecycler::default();
let resp_recycler = BlobRecycler::default();
let (s_reader, r_reader) = channel();
let t_receiver = streamer::blob_receiver(
exit.clone(),
recv_recycler.clone(),
target_peer_sock,
s_reader,
).unwrap();
let (s_responder, r_responder) = channel();
let t_responder = streamer::responder(
source_peer_sock,
exit.clone(),
resp_recycler.clone(),
r_responder,
);
let starting_balance = 10_000;
let alice = Mint::new(starting_balance);
let acc = Accountant::new(&alice);
let historian = Historian::new(&alice.last_id(), Some(30));
let acc = Arc::new(Mutex::new(AccountantSkel::new(
acc,
alice.last_id(),
sink(),
historian,
)));
let _threads = AccountantSkel::replicate(&acc, subs, exit.clone()).unwrap();
let mut alice_ref_balance = starting_balance;
let mut msgs = VecDeque::new();
let mut cur_hash = Hash::default();
let num_blobs = 10;
let transfer_amount = 501;
let bob_keypair = KeyPair::new();
for i in 0..num_blobs {
let b = resp_recycler.allocate();
let b_ = b.clone();
let mut w = b.write().unwrap();
w.set_index(i).unwrap();
let tr0 = Event::new_timestamp(&bob_keypair, Utc::now());
let entry0 = entry::create_entry(&cur_hash, i, vec![tr0]);
acc.lock().unwrap().acc.register_entry_id(&cur_hash);
cur_hash = hash(&cur_hash);
let tr1 = Transaction::new(
&alice.keypair(),
bob_keypair.pubkey(),
transfer_amount,
cur_hash,
);
acc.lock().unwrap().acc.register_entry_id(&cur_hash);
cur_hash = hash(&cur_hash);
let entry1 =
entry::create_entry(&cur_hash, i + num_blobs, vec![Event::Transaction(tr1)]);
acc.lock().unwrap().acc.register_entry_id(&cur_hash);
cur_hash = hash(&cur_hash);
alice_ref_balance -= transfer_amount;
let serialized_entry = serialize(&vec![entry0, entry1]).unwrap();
w.data_mut()[..serialized_entry.len()].copy_from_slice(&serialized_entry);
w.set_size(serialized_entry.len());
w.meta.set_addr(&me_addr);
drop(w);
msgs.push_back(b_);
}
// send the blobs into the socket
s_responder.send(msgs).expect("send");
// receive retransmitted messages
let timer = Duration::new(1, 0);
let mut msgs: Vec<_> = Vec::new();
while let Ok(msg) = r_reader.recv_timeout(timer) {
trace!("msg: {:?}", msg);
msgs.push(msg);
}
let alice_balance = acc.lock()
.unwrap()
.acc
.get_balance(&alice.keypair().pubkey())
.unwrap();
assert_eq!(alice_balance, alice_ref_balance);
let bob_balance = acc.lock()
.unwrap()
.acc
.get_balance(&bob_keypair.pubkey())
.unwrap();
assert_eq!(bob_balance, starting_balance - alice_ref_balance);
exit.store(true, Ordering::Relaxed);
t_receiver.join().expect("join");
t_responder.join().expect("join");
}
}
#[cfg(all(feature = "unstable", test))]
mod bench {
extern crate test;
use self::test::Bencher;
use accountant::{Accountant, MAX_ENTRY_IDS};
use accountant_skel::*;
use bincode::serialize;
use hash::hash;
use mint::Mint;
use signature::{KeyPair, KeyPairUtil};
use std::collections::HashSet;
use std::io::sink;
use std::time::Instant;
use transaction::Transaction;
#[bench]
fn process_packets_bench(_bencher: &mut Bencher) {
let mint = Mint::new(100_000_000);
let acc = Accountant::new(&mint);
let rsp_addr: SocketAddr = "0.0.0.0:0".parse().expect("socket address");
// Create transactions between unrelated parties.
let txs = 100_000;
let last_ids: Mutex<HashSet<Hash>> = Mutex::new(HashSet::new());
let transactions: Vec<_> = (0..txs)
.into_par_iter()
.map(|i| {
// Seed the 'to' account and a cell for its signature.
let dummy_id = i % (MAX_ENTRY_IDS as i32);
let last_id = hash(&serialize(&dummy_id).unwrap()); // Semi-unique hash
{
let mut last_ids = last_ids.lock().unwrap();
if !last_ids.contains(&last_id) {
last_ids.insert(last_id);
acc.register_entry_id(&last_id);
}
}
// Seed the 'from' account.
let rando0 = KeyPair::new();
let tr = Transaction::new(&mint.keypair(), rando0.pubkey(), 1_000, last_id);
acc.process_verified_transaction(&tr).unwrap();
let rando1 = KeyPair::new();
let tr = Transaction::new(&rando0, rando1.pubkey(), 2, last_id);
acc.process_verified_transaction(&tr).unwrap();
// Finally, return a transaction that's unique
Transaction::new(&rando0, rando1.pubkey(), 1, last_id)
})
.collect();
let req_vers = transactions
.into_iter()
.map(|tr| (Request::Transaction(tr), rsp_addr, 1_u8))
.collect();
let historian = Historian::new(&mint.last_id(), None);
let mut skel = AccountantSkel::new(acc, mint.last_id(), sink(), historian);
let now = Instant::now();
assert!(skel.process_packets(req_vers).is_ok());
let duration = now.elapsed();
let sec = duration.as_secs() as f64 + duration.subsec_nanos() as f64 / 1_000_000_000.0;
let tps = txs as f64 / sec;
// Ensure that all transactions were successfully logged.
drop(skel.historian.sender);
let entries: Vec<Entry> = skel.historian.receiver.iter().collect();
assert_eq!(entries.len(), 1);
assert_eq!(entries[0].events.len(), txs as usize);
println!("{} tps", tps);
}
}

201
src/accountant_stub.rs Normal file
View File

@ -0,0 +1,201 @@
//! The `accountant_stub` module is a client-side object that interfaces with a server-side Accountant
//! object via the network interface exposed by AccountantSkel. Client code should use
//! this object instead of writing messages to the network directly. The binary
//! encoding of its messages are unstable and may change in future releases.
use accountant_skel::{Request, Response, Subscription};
use bincode::{deserialize, serialize};
use futures::future::{ok, FutureResult};
use hash::Hash;
use signature::{KeyPair, PublicKey, Signature};
use std::collections::HashMap;
use std::io;
use std::net::UdpSocket;
use transaction::Transaction;
pub struct AccountantStub {
pub addr: String,
pub socket: UdpSocket,
last_id: Option<Hash>,
num_events: u64,
balances: HashMap<PublicKey, Option<i64>>,
}
impl AccountantStub {
/// Create a new AccountantStub that will interface with AccountantSkel
/// over `socket`. To receive responses, the caller must bind `socket`
/// to a public address before invoking AccountantStub methods.
pub fn new(addr: &str, socket: UdpSocket) -> Self {
let stub = AccountantStub {
addr: addr.to_string(),
socket,
last_id: None,
num_events: 0,
balances: HashMap::new(),
};
stub.init();
stub
}
pub fn init(&self) {
let subscriptions = vec![Subscription::EntryInfo];
let req = Request::Subscribe { subscriptions };
let data = serialize(&req).expect("serialize Subscribe");
let _res = self.socket.send_to(&data, &self.addr);
}
pub fn recv_response(&self) -> io::Result<Response> {
let mut buf = vec![0u8; 1024];
self.socket.recv_from(&mut buf)?;
let resp = deserialize(&buf).expect("deserialize balance");
Ok(resp)
}
pub fn process_response(&mut self, resp: Response) {
match resp {
Response::Balance { key, val } => {
self.balances.insert(key, val);
}
Response::LastId { id } => {
self.last_id = Some(id);
}
Response::EntryInfo(entry_info) => {
self.last_id = Some(entry_info.id);
self.num_events += entry_info.num_events;
}
}
}
/// Send a signed Transaction to the server for processing. This method
/// does not wait for a response.
pub fn transfer_signed(&self, tr: Transaction) -> io::Result<usize> {
let req = Request::Transaction(tr);
let data = serialize(&req).unwrap();
self.socket.send_to(&data, &self.addr)
}
/// Creates, signs, and processes a Transaction. Useful for writing unit-tests.
pub fn transfer(
&self,
n: i64,
keypair: &KeyPair,
to: PublicKey,
last_id: &Hash,
) -> io::Result<Signature> {
let tr = Transaction::new(keypair, to, n, *last_id);
let sig = tr.sig;
self.transfer_signed(tr).map(|_| sig)
}
/// Request the balance of the user holding `pubkey`. This method blocks
/// until the server sends a response. If the response packet is dropped
/// by the network, this method will hang indefinitely.
pub fn get_balance(&mut self, pubkey: &PublicKey) -> FutureResult<i64, i64> {
let req = Request::GetBalance { key: *pubkey };
let data = serialize(&req).expect("serialize GetBalance");
self.socket
.send_to(&data, &self.addr)
.expect("buffer error");
let mut done = false;
while !done {
let resp = self.recv_response().expect("recv response");
if let &Response::Balance { ref key, .. } = &resp {
done = key == pubkey;
}
self.process_response(resp);
}
ok(self.balances[pubkey].unwrap())
}
/// Request the last Entry ID from the server. This method blocks
/// until the server sends a response. At the time of this writing,
/// it also has the side-effect of causing the server to log any
/// entries that have been published by the Historian.
pub fn get_last_id(&mut self) -> FutureResult<Hash, ()> {
let req = Request::GetLastId;
let data = serialize(&req).expect("serialize GetId");
self.socket
.send_to(&data, &self.addr)
.expect("buffer error");
let mut done = false;
while !done {
let resp = self.recv_response().expect("recv response");
if let &Response::LastId { .. } = &resp {
done = true;
}
self.process_response(resp);
}
ok(self.last_id.unwrap_or(Hash::default()))
}
/// Return the number of transactions the server processed since creating
/// this stub instance.
pub fn transaction_count(&mut self) -> u64 {
// Wait for at least one EntryInfo.
let mut done = false;
while !done {
let resp = self.recv_response().expect("recv response");
if let &Response::EntryInfo(_) = &resp {
done = true;
}
self.process_response(resp);
}
// Then take the rest.
self.socket.set_nonblocking(true).expect("set nonblocking");
loop {
match self.recv_response() {
Err(_) => break,
Ok(resp) => self.process_response(resp),
}
}
self.socket.set_nonblocking(false).expect("set blocking");
self.num_events
}
}
#[cfg(test)]
mod tests {
use super::*;
use accountant::Accountant;
use accountant_skel::AccountantSkel;
use futures::Future;
use historian::Historian;
use mint::Mint;
use signature::{KeyPair, KeyPairUtil};
use std::io::sink;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Mutex};
use std::thread::sleep;
use std::time::Duration;
// TODO: Figure out why this test sometimes hangs on TravisCI.
#[test]
fn test_accountant_stub() {
let addr = "127.0.0.1:9000";
let send_addr = "127.0.0.1:9001";
let alice = Mint::new(10_000);
let acc = Accountant::new(&alice);
let bob_pubkey = KeyPair::new().pubkey();
let exit = Arc::new(AtomicBool::new(false));
let historian = Historian::new(&alice.last_id(), Some(30));
let acc = Arc::new(Mutex::new(AccountantSkel::new(
acc,
alice.last_id(),
sink(),
historian,
)));
let _threads = AccountantSkel::serve(&acc, addr, exit.clone()).unwrap();
sleep(Duration::from_millis(300));
let socket = UdpSocket::bind(send_addr).unwrap();
socket.set_read_timeout(Some(Duration::new(5, 0))).unwrap();
let mut acc = AccountantStub::new(addr, socket);
let last_id = acc.get_last_id().wait().unwrap();
let _sig = acc.transfer(500, &alice.keypair(), bob_pubkey, &last_id)
.unwrap();
assert_eq!(acc.get_balance(&bob_pubkey).wait().unwrap(), 500);
exit.store(true, Ordering::Relaxed);
}
}

143
src/bin/client-demo.rs Normal file
View File

@ -0,0 +1,143 @@
extern crate futures;
extern crate getopts;
extern crate isatty;
extern crate rayon;
extern crate serde_json;
extern crate solana;
extern crate untrusted;
use futures::Future;
use getopts::Options;
use isatty::stdin_isatty;
use rayon::prelude::*;
use solana::accountant_stub::AccountantStub;
use solana::mint::MintDemo;
use solana::signature::{KeyPair, KeyPairUtil};
use solana::transaction::Transaction;
use std::env;
use std::io::{stdin, Read};
use std::net::UdpSocket;
use std::process::exit;
use std::time::Instant;
use untrusted::Input;
fn print_usage(program: &str, opts: Options) {
let mut brief = format!("Usage: cat <mint.json> | {} [options]\n\n", program);
brief += " Solana client demo creates a number of transactions and\n";
brief += " sends them to a target node.";
brief += " Takes json formatted mint file to stdin.";
print!("{}", opts.usage(&brief));
}
fn main() {
let mut threads = 4usize;
let mut addr: String = "127.0.0.1:8000".to_string();
let mut send_addr: String = "127.0.0.1:8001".to_string();
let mut opts = Options::new();
opts.optopt("s", "", "server address", "host:port");
opts.optopt("c", "", "client address", "host:port");
opts.optopt("t", "", "number of threads", "4");
opts.optflag("h", "help", "print help");
let args: Vec<String> = env::args().collect();
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(e) => {
eprintln!("{}", e);
exit(1);
}
};
if matches.opt_present("h") {
let program = args[0].clone();
print_usage(&program, opts);
return;
}
if matches.opt_present("s") {
addr = matches.opt_str("s").unwrap();
}
if matches.opt_present("c") {
send_addr = matches.opt_str("c").unwrap();
}
if matches.opt_present("t") {
threads = matches.opt_str("t").unwrap().parse().expect("integer");
}
if stdin_isatty() {
eprintln!("nothing found on stdin, expected a json file");
exit(1);
}
let mut buffer = String::new();
let num_bytes = stdin().read_to_string(&mut buffer).unwrap();
if num_bytes == 0 {
eprintln!("empty file on stdin, expected a json file");
exit(1);
}
println!("Parsing stdin...");
let demo: MintDemo = serde_json::from_str(&buffer).unwrap_or_else(|e| {
eprintln!("failed to parse json: {}", e);
exit(1);
});
let socket = UdpSocket::bind(&send_addr).unwrap();
let mut acc = AccountantStub::new(&addr, socket);
println!("Get last ID...");
let last_id = acc.get_last_id().wait().unwrap();
println!("Creating keypairs...");
let txs = demo.users.len() / 2;
let keypairs: Vec<_> = demo.users
.into_par_iter()
.map(|(pkcs8, _)| KeyPair::from_pkcs8(Input::from(&pkcs8)).unwrap())
.collect();
let keypair_pairs: Vec<_> = keypairs.chunks(2).collect();
println!("Signing transactions...");
let now = Instant::now();
let transactions: Vec<_> = keypair_pairs
.into_par_iter()
.map(|chunk| Transaction::new(&chunk[0], chunk[1].pubkey(), 1, last_id))
.collect();
let duration = now.elapsed();
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
let bsps = txs as f64 / ns as f64;
let nsps = ns as f64 / txs as f64;
println!(
"Done. {} thousand signatures per second, {}us per signature",
bsps * 1_000_000_f64,
nsps / 1_000_f64
);
let initial_tx_count = acc.transaction_count();
println!("Transfering {} transactions in {} batches", txs, threads);
let now = Instant::now();
let sz = transactions.len() / threads;
let chunks: Vec<_> = transactions.chunks(sz).collect();
chunks.into_par_iter().for_each(|trs| {
println!("Transferring 1 unit {} times...", trs.len());
let send_addr = "0.0.0.0:0";
let socket = UdpSocket::bind(send_addr).unwrap();
let acc = AccountantStub::new(&addr, socket);
for tr in trs {
acc.transfer_signed(tr.clone()).unwrap();
}
});
println!("Waiting for half the transactions to complete...",);
let mut tx_count = acc.transaction_count();
while tx_count < transactions.len() as u64 / 2 {
tx_count = acc.transaction_count();
}
let txs = tx_count - initial_tx_count;
println!("Transactions processed {}", txs);
let duration = now.elapsed();
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
let tps = (txs * 1_000_000_000) as f64 / ns as f64;
println!("Done. {} tps", tps);
}

View File

@ -1,27 +0,0 @@
extern crate silk;
use silk::historian::Historian;
use silk::log::{verify_slice, Entry, Event};
use std::{thread, time};
use std::sync::mpsc::SendError;
fn create_log(hist: &Historian) -> Result<(), SendError<Event>> {
hist.sender.send(Event::Tick)?;
thread::sleep(time::Duration::new(0, 100_000));
hist.sender.send(Event::UserDataKey(0xdeadbeef))?;
thread::sleep(time::Duration::new(0, 100_000));
hist.sender.send(Event::Tick)?;
Ok(())
}
fn main() {
let seed = 0;
let hist = Historian::new(seed);
create_log(&hist).expect("send error");
drop(hist.sender);
let entries: Vec<Entry> = hist.receiver.iter().collect();
for entry in &entries {
println!("{:?}", entry);
}
assert!(verify_slice(&entries, seed));
}

73
src/bin/genesis-demo.rs Normal file
View File

@ -0,0 +1,73 @@
extern crate isatty;
extern crate rayon;
extern crate ring;
extern crate serde_json;
extern crate solana;
extern crate untrusted;
use isatty::stdin_isatty;
use rayon::prelude::*;
use solana::accountant::MAX_ENTRY_IDS;
use solana::entry::{create_entry, next_tick};
use solana::event::Event;
use solana::mint::MintDemo;
use solana::signature::{KeyPair, KeyPairUtil};
use solana::transaction::Transaction;
use std::io::{stdin, Read};
use std::process::exit;
use untrusted::Input;
// Generate a ledger with lots and lots of accounts.
fn main() {
if stdin_isatty() {
eprintln!("nothing found on stdin, expected a json file");
exit(1);
}
let mut buffer = String::new();
let num_bytes = stdin().read_to_string(&mut buffer).unwrap();
if num_bytes == 0 {
eprintln!("empty file on stdin, expected a json file");
exit(1);
}
let demo: MintDemo = serde_json::from_str(&buffer).unwrap_or_else(|e| {
eprintln!("failed to parse json: {}", e);
exit(1);
});
let num_accounts = demo.users.len();
let last_id = demo.mint.last_id();
let mint_keypair = demo.mint.keypair();
eprintln!("Signing {} transactions...", num_accounts);
let events: Vec<_> = demo.users
.into_par_iter()
.map(|(pkcs8, tokens)| {
let rando = KeyPair::from_pkcs8(Input::from(&pkcs8)).unwrap();
let tr = Transaction::new(&mint_keypair, rando.pubkey(), tokens, last_id);
Event::Transaction(tr)
})
.collect();
for entry in demo.mint.create_entries() {
println!("{}", serde_json::to_string(&entry).unwrap());
}
eprintln!("Logging the creation of {} accounts...", num_accounts);
let entry = create_entry(&last_id, 0, events);
println!("{}", serde_json::to_string(&entry).unwrap());
eprintln!("Creating {} empty entries...", MAX_ENTRY_IDS);
// Offer client lots of entry IDs to use for each transaction's last_id.
let mut last_id = last_id;
for _ in 0..MAX_ENTRY_IDS {
let entry = next_tick(&last_id, 1);
last_id = entry.id;
let serialized = serde_json::to_string(&entry).unwrap_or_else(|e| {
eprintln!("failed to serialize: {}", e);
exit(1);
});
println!("{}", serialized);
}
}

36
src/bin/genesis.rs Normal file
View File

@ -0,0 +1,36 @@
//! A command-line executable for generating the chain's genesis block.
extern crate isatty;
extern crate serde_json;
extern crate solana;
use isatty::stdin_isatty;
use solana::mint::Mint;
use std::io::{stdin, Read};
use std::process::exit;
fn main() {
if stdin_isatty() {
eprintln!("nothing found on stdin, expected a json file");
exit(1);
}
let mut buffer = String::new();
let num_bytes = stdin().read_to_string(&mut buffer).unwrap();
if num_bytes == 0 {
eprintln!("empty file on stdin, expected a json file");
exit(1);
}
let mint: Mint = serde_json::from_str(&buffer).unwrap_or_else(|e| {
eprintln!("failed to parse json: {}", e);
exit(1);
});
for x in mint.create_entries() {
let serialized = serde_json::to_string(&x).unwrap_or_else(|e| {
eprintln!("failed to serialize: {}", e);
exit(1);
});
println!("{}", serialized);
}
}

37
src/bin/historian-demo.rs Normal file
View File

@ -0,0 +1,37 @@
extern crate solana;
use solana::entry::Entry;
use solana::event::Event;
use solana::hash::Hash;
use solana::historian::Historian;
use solana::ledger::Block;
use solana::recorder::Signal;
use solana::signature::{KeyPair, KeyPairUtil};
use solana::transaction::Transaction;
use std::sync::mpsc::SendError;
use std::thread::sleep;
use std::time::Duration;
fn create_ledger(hist: &Historian, seed: &Hash) -> Result<(), SendError<Signal>> {
sleep(Duration::from_millis(15));
let keypair = KeyPair::new();
let tr = Transaction::new(&keypair, keypair.pubkey(), 42, *seed);
let signal0 = Signal::Event(Event::Transaction(tr));
hist.sender.send(signal0)?;
sleep(Duration::from_millis(10));
Ok(())
}
fn main() {
let seed = Hash::default();
let hist = Historian::new(&seed, Some(10));
create_ledger(&hist, &seed).expect("send error");
drop(hist.sender);
let entries: Vec<Entry> = hist.receiver.iter().collect();
for entry in &entries {
println!("{:?}", entry);
}
// Proof-of-History: Verify the historian learned about the events
// in the same order they appear in the vector.
assert!(entries[..].verify(&seed));
}

33
src/bin/mint-demo.rs Normal file
View File

@ -0,0 +1,33 @@
extern crate rayon;
extern crate ring;
extern crate serde_json;
extern crate solana;
use rayon::prelude::*;
use ring::rand::SystemRandom;
use solana::mint::{Mint, MintDemo};
use solana::signature::KeyPair;
use std::io;
fn main() {
let mut input_text = String::new();
io::stdin().read_line(&mut input_text).unwrap();
let trimmed = input_text.trim();
let tokens = trimmed.parse::<i64>().unwrap();
let mint = Mint::new(tokens);
let tokens_per_user = 1_000;
let num_accounts = tokens / tokens_per_user;
let rnd = SystemRandom::new();
let users: Vec<_> = (0..num_accounts)
.into_par_iter()
.map(|_| {
let pkcs8 = KeyPair::generate_pkcs8(&rnd).unwrap().to_vec();
(pkcs8, tokens_per_user)
})
.collect();
let demo = MintDemo { mint, users };
println!("{}", serde_json::to_string(&demo).unwrap());
}

29
src/bin/mint.rs Normal file
View File

@ -0,0 +1,29 @@
extern crate isatty;
extern crate serde_json;
extern crate solana;
use isatty::stdin_isatty;
use solana::mint::Mint;
use std::io;
use std::process::exit;
fn main() {
let mut input_text = String::new();
if stdin_isatty() {
eprintln!("nothing found on stdin, expected a token number");
exit(1);
}
io::stdin().read_line(&mut input_text).unwrap();
let trimmed = input_text.trim();
let tokens = trimmed.parse::<i64>().unwrap_or_else(|e| {
eprintln!("{}", e);
exit(1);
});
let mint = Mint::new(tokens);
let serialized = serde_json::to_string(&mint).unwrap_or_else(|e| {
eprintln!("failed to serialize: {}", e);
exit(1);
});
println!("{}", serialized);
}

111
src/bin/testnode.rs Normal file
View File

@ -0,0 +1,111 @@
extern crate env_logger;
extern crate getopts;
extern crate isatty;
extern crate serde_json;
extern crate solana;
use getopts::Options;
use isatty::stdin_isatty;
use solana::accountant::Accountant;
use solana::accountant_skel::AccountantSkel;
use solana::entry::Entry;
use solana::event::Event;
use solana::historian::Historian;
use std::env;
use std::io::{stdin, stdout, Read};
use std::process::exit;
use std::sync::atomic::AtomicBool;
use std::sync::{Arc, Mutex};
fn print_usage(program: &str, opts: Options) {
let mut brief = format!("Usage: cat <transaction.log> | {} [options]\n\n", program);
brief += " Run a Solana node to handle transactions and\n";
brief += " write a new transaction log to stdout.\n";
brief += " Takes existing transaction log from stdin.";
print!("{}", opts.usage(&brief));
}
fn main() {
env_logger::init().unwrap();
let mut port = 8000u16;
let mut opts = Options::new();
opts.optopt("p", "", "port", "port");
opts.optflag("h", "help", "print help");
let args: Vec<String> = env::args().collect();
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(e) => {
eprintln!("{}", e);
exit(1);
}
};
if matches.opt_present("h") {
let program = args[0].clone();
print_usage(&program, opts);
return;
}
if matches.opt_present("p") {
port = matches.opt_str("p").unwrap().parse().expect("port");
}
let addr = format!("0.0.0.0:{}", port);
if stdin_isatty() {
eprintln!("nothing found on stdin, expected a log file");
exit(1);
}
let mut buffer = String::new();
let num_bytes = stdin().read_to_string(&mut buffer).unwrap();
if num_bytes == 0 {
eprintln!("empty file on stdin, expected a log file");
exit(1);
}
eprintln!("Initializing...");
let mut entries = buffer.lines().map(|line| {
serde_json::from_str(&line).unwrap_or_else(|e| {
eprintln!("failed to parse json: {}", e);
exit(1);
})
});
// The first item in the ledger is required to be an entry with zero num_hashes,
// which implies its id can be used as the ledger's seed.
let entry0 = entries.next().unwrap();
// The second item in the ledger is a special transaction where the to and from
// fields are the same. That entry should be treated as a deposit, not a
// transfer to oneself.
let entry1: Entry = entries.next().unwrap();
let deposit = if let Event::Transaction(ref tr) = entry1.events[0] {
tr.data.plan.final_payment()
} else {
None
};
let acc = Accountant::new_from_deposit(&deposit.unwrap());
acc.register_entry_id(&entry0.id);
acc.register_entry_id(&entry1.id);
let mut last_id = entry1.id;
for entry in entries {
last_id = entry.id;
acc.process_verified_events(entry.events).unwrap();
acc.register_entry_id(&last_id);
}
let historian = Historian::new(&last_id, Some(1000));
let exit = Arc::new(AtomicBool::new(false));
let skel = Arc::new(Mutex::new(AccountantSkel::new(
acc,
last_id,
stdout(),
historian,
)));
let threads = AccountantSkel::serve(&skel, &addr, exit.clone()).unwrap();
eprintln!("Ready. Listening on {}", addr);
for t in threads {
t.join().expect("join");
}
}

372
src/crdt.rs Normal file
View File

@ -0,0 +1,372 @@
//! The `crdt` module defines a data structure that is shared by all the nodes in the network over
//! a gossip control plane. The goal is to share small bits of of-chain information and detect and
//! repair partitions.
//!
//! This CRDT only supports a very limited set of types. A map of PublicKey -> Versioned Struct.
//! The last version is always picked durring an update.
use bincode::{deserialize, serialize};
use byteorder::{LittleEndian, ReadBytesExt};
use hash::Hash;
use result::Result;
use ring::rand::{SecureRandom, SystemRandom};
use signature::{PublicKey, Signature};
use std::collections::HashMap;
use std::io::Cursor;
use std::net::{SocketAddr, UdpSocket};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, RwLock};
use std::thread::{sleep, spawn, JoinHandle};
use std::time::Duration;
/// Structure to be replicated by the network
#[derive(Serialize, Deserialize, Clone)]
pub struct ReplicatedData {
id: PublicKey,
sig: Signature,
/// should always be increasing
version: u64,
/// address to connect to for gossip
gossip_addr: SocketAddr,
/// address to connect to for replication
replicate_addr: SocketAddr,
/// address to connect to when this node is leader
lead_addr: SocketAddr,
/// current leader identity
current_leader_id: PublicKey,
/// last verified hash that was submitted to the leader
last_verified_hash: Hash,
/// last verified count, always increasing
last_verified_count: u64,
}
impl ReplicatedData {
pub fn new(id: PublicKey, gossip_addr: SocketAddr) -> ReplicatedData {
let daddr = "0.0.0.0:0".parse().unwrap();
ReplicatedData {
id,
sig: Signature::default(),
version: 0,
gossip_addr,
replicate_addr: daddr,
lead_addr: daddr,
current_leader_id: PublicKey::default(),
last_verified_hash: Hash::default(),
last_verified_count: 0,
}
}
}
/// `Crdt` structure keeps a table of `ReplicatedData` structs
/// # Properties
/// * `table` - map of public id's to versioned and signed ReplicatedData structs
/// * `local` - map of public id's to what `self.update_index` `self.table` was updated
/// * `remote` - map of public id's to the `remote.update_index` was sent
/// * `update_index` - my update index
/// # Remarks
/// This implements two services, `gossip` and `listen`.
/// * `gossip` - asynchronously ask nodes to send updates
/// * `listen` - listen for requests and responses
/// No attempt to keep track of timeouts or dropped requests is made, or should be.
pub struct Crdt {
table: HashMap<PublicKey, ReplicatedData>,
/// Value of my update index when entry in table was updated.
/// Nodes will ask for updates since `update_index`, and this node
/// should respond with all the identities that are greater then the
/// request's `update_index` in this list
local: HashMap<PublicKey, u64>,
/// The value of the remote update index that i have last seen
/// This Node will ask external nodes for updates since the value in this list
remote: HashMap<PublicKey, u64>,
update_index: u64,
me: PublicKey,
timeout: Duration,
}
// TODO These messages should be signed, and go through the gpu pipeline for spam filtering
#[derive(Serialize, Deserialize)]
enum Protocol {
/// forward your own latest data structure when requesting an update
/// this doesn't update the `remote` update index, but it allows the
/// recepient of this request to add knowledge of this node to the network
RequestUpdates(u64, ReplicatedData),
//TODO might need a since?
/// from id, form's last update index, ReplicatedData
ReceiveUpdates(PublicKey, u64, Vec<ReplicatedData>),
}
impl Crdt {
pub fn new(me: ReplicatedData) -> Crdt {
assert_eq!(me.version, 0);
let mut g = Crdt {
table: HashMap::new(),
local: HashMap::new(),
remote: HashMap::new(),
me: me.id,
update_index: 1,
timeout: Duration::new(0, 100_000),
};
g.local.insert(me.id, g.update_index);
g.table.insert(me.id, me);
g
}
pub fn import(&mut self, v: &ReplicatedData) {
// TODO check that last_verified types are always increasing
// TODO probably an error or attack
if self.me != v.id {
self.insert(v);
}
}
pub fn insert(&mut self, v: &ReplicatedData) {
if self.table.get(&v.id).is_none() || (v.version > self.table[&v.id].version) {
trace!("insert! {}", v.version);
self.update_index += 1;
let _ = self.table.insert(v.id, v.clone());
let _ = self.local.insert(v.id, self.update_index);
} else {
trace!("INSERT FAILED {}", v.version);
}
}
fn random() -> u64 {
let rnd = SystemRandom::new();
let mut buf = [0u8; 8];
rnd.fill(&mut buf).unwrap();
let mut rdr = Cursor::new(&buf);
rdr.read_u64::<LittleEndian>().unwrap()
}
fn get_updates_since(&self, v: u64) -> (PublicKey, u64, Vec<ReplicatedData>) {
trace!("get updates since {}", v);
let data = self.table
.values()
.filter(|x| self.local[&x.id] > v)
.cloned()
.collect();
let id = self.me;
let ups = self.update_index;
(id, ups, data)
}
/// Create a random gossip request
/// # Returns
/// (A,B,C)
/// * A - Remote gossip address
/// * B - My gossip address
/// * C - Remote update index to request updates since
fn gossip_request(&self) -> (SocketAddr, Protocol) {
let n = (Self::random() as usize) % self.table.len();
trace!("random {:?} {}", &self.me[0..1], n);
let v = self.table.values().nth(n).unwrap().clone();
let remote_update_index = *self.remote.get(&v.id).unwrap_or(&0);
let req = Protocol::RequestUpdates(remote_update_index, self.table[&self.me].clone());
(v.gossip_addr, req)
}
/// At random pick a node and try to get updated changes from them
fn run_gossip(obj: &Arc<RwLock<Self>>) -> Result<()> {
//TODO we need to keep track of stakes and weight the selection by stake size
//TODO cache sockets
// Lock the object only to do this operation and not for any longer
// especially not when doing the `sock.send_to`
let (remote_gossip_addr, req) = obj.read().unwrap().gossip_request();
let sock = UdpSocket::bind("0.0.0.0:0")?;
// TODO this will get chatty, so we need to first ask for number of updates since
// then only ask for specific data that we dont have
let r = serialize(&req)?;
sock.send_to(&r, remote_gossip_addr)?;
Ok(())
}
/// Apply updates that we received from the identity `from`
/// # Arguments
/// * `from` - identity of the sender of the updates
/// * `update_index` - the number of updates that `from` has completed and this set of `data` represents
/// * `data` - the update data
fn apply_updates(&mut self, from: PublicKey, update_index: u64, data: &[ReplicatedData]) {
trace!("got updates {}", data.len());
// TODO we need to punish/spam resist here
// sig verify the whole update and slash anyone who sends a bad update
for v in data {
self.import(&v);
}
*self.remote.entry(from).or_insert(update_index) = update_index;
}
/// randomly pick a node and ask them for updates asynchronously
pub fn gossip(obj: Arc<RwLock<Self>>, exit: Arc<AtomicBool>) -> JoinHandle<()> {
spawn(move || loop {
let _ = Self::run_gossip(&obj);
if exit.load(Ordering::Relaxed) {
return;
}
//TODO this should be a tuned parameter
sleep(obj.read().unwrap().timeout);
})
}
/// Process messages from the network
fn run_listen(obj: &Arc<RwLock<Self>>, sock: &UdpSocket) -> Result<()> {
//TODO cache connections
let mut buf = vec![0u8; 1024 * 64];
let (amt, src) = sock.recv_from(&mut buf)?;
trace!("got request from {}", src);
buf.resize(amt, 0);
let r = deserialize(&buf)?;
match r {
// TODO sigverify these
Protocol::RequestUpdates(v, reqdata) => {
trace!("RequestUpdates {}", v);
let addr = reqdata.gossip_addr;
// only lock for this call, dont lock durring IO `sock.send_to` or `sock.recv_from`
let (from, ups, data) = obj.read().unwrap().get_updates_since(v);
trace!("get updates since response {} {}", v, data.len());
let rsp = serialize(&Protocol::ReceiveUpdates(from, ups, data))?;
trace!("send_to {}", addr);
//TODO verify reqdata belongs to sender
obj.write().unwrap().import(&reqdata);
sock.send_to(&rsp, addr).unwrap();
trace!("send_to done!");
}
Protocol::ReceiveUpdates(from, ups, data) => {
trace!("ReceivedUpdates");
obj.write().unwrap().apply_updates(from, ups, &data);
}
}
Ok(())
}
pub fn listen(
obj: Arc<RwLock<Self>>,
sock: UdpSocket,
exit: Arc<AtomicBool>,
) -> JoinHandle<()> {
sock.set_read_timeout(Some(Duration::new(2, 0))).unwrap();
spawn(move || loop {
let _ = Self::run_listen(&obj, &sock);
if exit.load(Ordering::Relaxed) {
return;
}
})
}
}
#[cfg(test)]
mod test {
use crdt::{Crdt, ReplicatedData};
use signature::KeyPair;
use signature::KeyPairUtil;
use std::net::UdpSocket;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, RwLock};
use std::thread::{sleep, JoinHandle};
use std::time::Duration;
/// Test that the network converges.
/// Run until every node in the network has a full ReplicatedData set.
/// Check that nodes stop sending updates after all the ReplicatedData has been shared.
/// tests that actually use this function are below
fn run_gossip_topo<F>(topo: F)
where
F: Fn(&Vec<(Arc<RwLock<Crdt>>, JoinHandle<()>)>) -> (),
{
let num: usize = 5;
let exit = Arc::new(AtomicBool::new(false));
let listen: Vec<_> = (0..num)
.map(|_| {
let listener = UdpSocket::bind("0.0.0.0:0").unwrap();
let pubkey = KeyPair::new().pubkey();
let d = ReplicatedData::new(pubkey, listener.local_addr().unwrap());
let crdt = Crdt::new(d);
let c = Arc::new(RwLock::new(crdt));
let l = Crdt::listen(c.clone(), listener, exit.clone());
(c, l)
})
.collect();
topo(&listen);
let gossip: Vec<_> = listen
.iter()
.map(|&(ref c, _)| Crdt::gossip(c.clone(), exit.clone()))
.collect();
let mut done = true;
for _ in 0..(num * 16) {
done = true;
for &(ref c, _) in listen.iter() {
trace!(
"done updates {} {}",
c.read().unwrap().table.len(),
c.read().unwrap().update_index
);
//make sure the number of updates doesn't grow unbounded
assert!(c.read().unwrap().update_index <= num as u64);
//make sure we got all the updates
if c.read().unwrap().table.len() != num {
done = false;
}
}
if done == true {
break;
}
sleep(Duration::new(1, 0));
}
exit.store(true, Ordering::Relaxed);
for j in gossip {
j.join().unwrap();
}
for (c, j) in listen.into_iter() {
j.join().unwrap();
// make it clear what failed
// protocol is to chatty, updates should stop after everyone receives `num`
assert!(c.read().unwrap().update_index <= num as u64);
// protocol is not chatty enough, everyone should get `num` entries
assert_eq!(c.read().unwrap().table.len(), num);
}
assert!(done);
}
/// ring a -> b -> c -> d -> e -> a
#[test]
fn gossip_ring_test() {
run_gossip_topo(|listen| {
let num = listen.len();
for n in 0..num {
let y = n % listen.len();
let x = (n + 1) % listen.len();
let mut xv = listen[x].0.write().unwrap();
let yv = listen[y].0.read().unwrap();
let mut d = yv.table[&yv.me].clone();
d.version = 0;
xv.insert(&d);
}
});
}
/// star (b,c,d,e) -> a
#[test]
fn gossip_star_test() {
run_gossip_topo(|listen| {
let num = listen.len();
for n in 0..(num - 1) {
let x = 0;
let y = (n + 1) % listen.len();
let mut xv = listen[x].0.write().unwrap();
let yv = listen[y].0.read().unwrap();
let mut d = yv.table[&yv.me].clone();
d.version = 0;
xv.insert(&d);
}
});
}
/// Test that insert drops messages that are older
#[test]
fn insert_test() {
let mut d = ReplicatedData::new(KeyPair::new().pubkey(), "127.0.0.1:1234".parse().unwrap());
assert_eq!(d.version, 0);
let mut crdt = Crdt::new(d.clone());
assert_eq!(crdt.table[&d.id].version, 0);
d.version = 2;
crdt.insert(&d);
assert_eq!(crdt.table[&d.id].version, 2);
d.version = 1;
crdt.insert(&d);
assert_eq!(crdt.table[&d.id].version, 2);
}
}

194
src/ecdsa.rs Normal file
View File

@ -0,0 +1,194 @@
use packet::{Packet, SharedPackets};
use std::mem::size_of;
use transaction::{PUB_KEY_OFFSET, SIGNED_DATA_OFFSET, SIG_OFFSET};
pub const TX_OFFSET: usize = 4;
#[cfg(feature = "cuda")]
#[repr(C)]
struct Elems {
elems: *const Packet,
num: u32,
}
#[cfg(feature = "cuda")]
#[link(name = "cuda_verify_ed25519")]
extern "C" {
fn ed25519_verify_many(
vecs: *const Elems,
num: u32, //number of vecs
message_size: u32, //size of each element inside the elems field of the vec
public_key_offset: u32,
signature_offset: u32,
signed_message_offset: u32,
signed_message_len_offset: u32,
out: *mut u8, //combined length of all the items in vecs
) -> u32;
}
#[cfg(not(feature = "cuda"))]
fn verify_packet(packet: &Packet) -> u8 {
use ring::signature;
use signature::{PublicKey, Signature};
use untrusted;
let msg_start = TX_OFFSET + SIGNED_DATA_OFFSET;
let sig_start = TX_OFFSET + SIG_OFFSET;
let sig_end = sig_start + size_of::<Signature>();
let pub_key_start = TX_OFFSET + PUB_KEY_OFFSET;
let pub_key_end = pub_key_start + size_of::<PublicKey>();
if packet.meta.size <= msg_start {
return 0;
}
let msg_end = packet.meta.size;
signature::verify(
&signature::ED25519,
untrusted::Input::from(&packet.data[pub_key_start..pub_key_end]),
untrusted::Input::from(&packet.data[msg_start..msg_end]),
untrusted::Input::from(&packet.data[sig_start..sig_end]),
).is_ok() as u8
}
#[cfg(not(feature = "cuda"))]
pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> {
use rayon::prelude::*;
batches
.into_par_iter()
.map(|p| {
p.read()
.unwrap()
.packets
.par_iter()
.map(verify_packet)
.collect()
})
.collect()
}
#[cfg(feature = "cuda")]
pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> {
use packet::PACKET_DATA_SIZE;
let mut out = Vec::new();
let mut elems = Vec::new();
let mut locks = Vec::new();
let mut rvs = Vec::new();
for packets in batches {
locks.push(packets.read().unwrap());
}
let mut num = 0;
for p in locks {
elems.push(Elems {
elems: p.packets.as_ptr(),
num: p.packets.len() as u32,
});
let mut v = Vec::new();
v.resize(p.packets.len(), 0);
rvs.push(v);
num += p.packets.len();
}
out.resize(num, 0);
trace!("Starting verify num packets: {}", num);
trace!("elem len: {}", elems.len() as u32);
trace!("packet sizeof: {}", size_of::<Packet>() as u32);
trace!("pub key: {}", (TX_OFFSET + PUB_KEY_OFFSET) as u32);
trace!("sig offset: {}", (TX_OFFSET + SIG_OFFSET) as u32);
trace!("sign data: {}", (TX_OFFSET + SIGNED_DATA_OFFSET) as u32);
trace!("len offset: {}", PACKET_DATA_SIZE as u32);
unsafe {
let res = ed25519_verify_many(
elems.as_ptr(),
elems.len() as u32,
size_of::<Packet>() as u32,
(TX_OFFSET + PUB_KEY_OFFSET) as u32,
(TX_OFFSET + SIG_OFFSET) as u32,
(TX_OFFSET + SIGNED_DATA_OFFSET) as u32,
PACKET_DATA_SIZE as u32,
out.as_mut_ptr(),
);
if res != 0 {
trace!("RETURN!!!: {}", res);
}
}
trace!("done verify");
let mut num = 0;
for vs in rvs.iter_mut() {
for mut v in vs.iter_mut() {
*v = out[num];
if *v != 0 {
trace!("VERIFIED PACKET!!!!!");
}
num += 1;
}
}
rvs
}
#[cfg(test)]
mod tests {
use accountant_skel::Request;
use bincode::serialize;
use ecdsa;
use packet::{Packet, Packets, SharedPackets};
use std::sync::RwLock;
use transaction::test_tx;
use transaction::Transaction;
fn make_packet_from_transaction(tr: Transaction) -> Packet {
let tx = serialize(&Request::Transaction(tr)).unwrap();
let mut packet = Packet::default();
packet.meta.size = tx.len();
packet.data[..packet.meta.size].copy_from_slice(&tx);
return packet;
}
fn test_verify_n(n: usize, modify_data: bool) {
let tr = test_tx();
let mut packet = make_packet_from_transaction(tr);
// jumble some data to test failure
if modify_data {
packet.data[20] = 10;
}
// generate packet vector
let mut packets = Packets::default();
packets.packets = Vec::new();
for _ in 0..n {
packets.packets.push(packet.clone());
}
let shared_packets = SharedPackets::new(RwLock::new(packets));
let batches = vec![shared_packets.clone(), shared_packets.clone()];
// verify packets
let ans = ecdsa::ed25519_verify(&batches);
// check result
let ref_ans = if modify_data { 0u8 } else { 1u8 };
assert_eq!(ans, vec![vec![ref_ans; n], vec![ref_ans; n]]);
}
#[test]
fn test_verify_zero() {
test_verify_n(0, false);
}
#[test]
fn test_verify_one() {
test_verify_n(1, false);
}
#[test]
fn test_verify_seventy_one() {
test_verify_n(71, false);
}
#[test]
fn test_verify_fail() {
test_verify_n(5, true);
}
}

176
src/entry.rs Normal file
View File

@ -0,0 +1,176 @@
//! The `entry` module is a fundamental building block of Proof of History. It contains a
//! unique ID that is the hash of the Entry before it, plus the hash of the
//! transactions within it. Entries cannot be reordered, and its field `num_hashes`
//! represents an approximate amount of time since the last Entry was created.
use event::Event;
use hash::{extend_and_hash, hash, Hash};
use rayon::prelude::*;
/// Each Entry contains three pieces of data. The `num_hashes` field is the number
/// of hashes performed since the previous entry. The `id` field is the result
/// of hashing `id` from the previous entry `num_hashes` times. The `events`
/// field points to Events that took place shortly after `id` was generated.
///
/// If you divide `num_hashes` by the amount of time it takes to generate a new hash, you
/// get a duration estimate since the last Entry. Since processing power increases
/// over time, one should expect the duration `num_hashes` represents to decrease proportionally.
/// Though processing power varies across nodes, the network gives priority to the
/// fastest processor. Duration should therefore be estimated by assuming that the hash
/// was generated by the fastest processor at the time the entry was recorded.
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub struct Entry {
pub num_hashes: u64,
pub id: Hash,
pub events: Vec<Event>,
}
impl Entry {
/// Creates a Entry from the number of hashes `num_hashes` since the previous event
/// and that resulting `id`.
pub fn new_tick(num_hashes: u64, id: &Hash) -> Self {
Entry {
num_hashes,
id: *id,
events: vec![],
}
}
/// Verifies self.id is the result of hashing a `start_hash` `self.num_hashes` times.
/// If the event is not a Tick, then hash that as well.
pub fn verify(&self, start_hash: &Hash) -> bool {
self.events.par_iter().all(|event| event.verify())
&& self.id == next_hash(start_hash, self.num_hashes, &self.events)
}
}
fn add_event_data(hash_data: &mut Vec<u8>, event: &Event) {
match *event {
Event::Transaction(ref tr) => {
hash_data.push(0u8);
hash_data.extend_from_slice(&tr.sig);
}
Event::Signature { ref sig, .. } => {
hash_data.push(1u8);
hash_data.extend_from_slice(sig);
}
Event::Timestamp { ref sig, .. } => {
hash_data.push(2u8);
hash_data.extend_from_slice(sig);
}
}
}
/// Creates the hash `num_hashes` after `start_hash`. If the event contains
/// a signature, the final hash will be a hash of both the previous ID and
/// the signature.
pub fn next_hash(start_hash: &Hash, num_hashes: u64, events: &[Event]) -> Hash {
let mut id = *start_hash;
for _ in 1..num_hashes {
id = hash(&id);
}
// Hash all the event data
let mut hash_data = vec![];
for event in events {
add_event_data(&mut hash_data, event);
}
if !hash_data.is_empty() {
extend_and_hash(&id, &hash_data)
} else if num_hashes != 0 {
hash(&id)
} else {
id
}
}
/// Creates the next Entry `num_hashes` after `start_hash`.
pub fn create_entry(start_hash: &Hash, cur_hashes: u64, events: Vec<Event>) -> Entry {
let num_hashes = cur_hashes + if events.is_empty() { 0 } else { 1 };
let id = next_hash(start_hash, 0, &events);
Entry {
num_hashes,
id,
events,
}
}
/// Creates the next Tick Entry `num_hashes` after `start_hash`.
pub fn create_entry_mut(start_hash: &mut Hash, cur_hashes: &mut u64, events: Vec<Event>) -> Entry {
let entry = create_entry(start_hash, *cur_hashes, events);
*start_hash = entry.id;
*cur_hashes = 0;
entry
}
/// Creates the next Tick Entry `num_hashes` after `start_hash`.
pub fn next_tick(start_hash: &Hash, num_hashes: u64) -> Entry {
Entry {
num_hashes,
id: next_hash(start_hash, num_hashes, &[]),
events: vec![],
}
}
#[cfg(test)]
mod tests {
use super::*;
use chrono::prelude::*;
use entry::create_entry;
use event::Event;
use hash::hash;
use signature::{KeyPair, KeyPairUtil};
use transaction::Transaction;
#[test]
fn test_entry_verify() {
let zero = Hash::default();
let one = hash(&zero);
assert!(Entry::new_tick(0, &zero).verify(&zero)); // base case
assert!(!Entry::new_tick(0, &zero).verify(&one)); // base case, bad
assert!(next_tick(&zero, 1).verify(&zero)); // inductive step
assert!(!next_tick(&zero, 1).verify(&one)); // inductive step, bad
}
#[test]
fn test_event_reorder_attack() {
let zero = Hash::default();
// First, verify entries
let keypair = KeyPair::new();
let tr0 = Event::Transaction(Transaction::new(&keypair, keypair.pubkey(), 0, zero));
let tr1 = Event::Transaction(Transaction::new(&keypair, keypair.pubkey(), 1, zero));
let mut e0 = create_entry(&zero, 0, vec![tr0.clone(), tr1.clone()]);
assert!(e0.verify(&zero));
// Next, swap two events and ensure verification fails.
e0.events[0] = tr1; // <-- attack
e0.events[1] = tr0;
assert!(!e0.verify(&zero));
}
#[test]
fn test_witness_reorder_attack() {
let zero = Hash::default();
// First, verify entries
let keypair = KeyPair::new();
let tr0 = Event::new_timestamp(&keypair, Utc::now());
let tr1 = Event::new_signature(&keypair, Default::default());
let mut e0 = create_entry(&zero, 0, vec![tr0.clone(), tr1.clone()]);
assert!(e0.verify(&zero));
// Next, swap two witness events and ensure verification fails.
e0.events[0] = tr1; // <-- attack
e0.events[1] = tr0;
assert!(!e0.verify(&zero));
}
#[test]
fn test_next_tick() {
let zero = Hash::default();
let tick = next_tick(&zero, 1);
assert_eq!(tick.num_hashes, 1);
assert_ne!(tick.id, zero);
}
}

420
src/erasure.rs Normal file
View File

@ -0,0 +1,420 @@
// Support erasure coding
use packet::{BlobRecycler, SharedBlob};
use std::result;
//TODO(sakridge) pick these values
const NUM_CODED: usize = 10;
const MAX_MISSING: usize = 2;
const NUM_DATA: usize = NUM_CODED - MAX_MISSING;
#[derive(Debug, PartialEq, Eq)]
pub enum ErasureError {
NotEnoughBlocksToDecode,
DecodeError,
InvalidBlockSize,
}
pub type Result<T> = result::Result<T, ErasureError>;
// k = number of data devices
// m = number of coding devices
// w = word size
extern "C" {
fn jerasure_matrix_encode(
k: i32,
m: i32,
w: i32,
matrix: *const i32,
data_ptrs: *const *const u8,
coding_ptrs: *const *mut u8,
size: i32,
);
fn jerasure_matrix_decode(
k: i32,
m: i32,
w: i32,
matrix: *const i32,
row_k_ones: i32,
erasures: *const i32,
data_ptrs: *const *mut u8,
coding_ptrs: *const *const u8,
size: i32,
) -> i32;
fn galois_single_divide(a: i32, b: i32, w: i32) -> i32;
}
fn get_matrix(m: i32, k: i32, w: i32) -> Vec<i32> {
let mut matrix = vec![0; (m * k) as usize];
for i in 0..m {
for j in 0..k {
unsafe {
matrix[(i * k + j) as usize] = galois_single_divide(1, i ^ (m + j), w);
}
}
}
matrix
}
pub const ERASURE_W: i32 = 32;
// Generate coding blocks into coding
// There are some alignment restrictions, blocks should be aligned by 16 bytes
// which means their size should be >= 16 bytes
pub fn generate_coding_blocks(coding: &mut [&mut [u8]], data: &[&[u8]]) -> Result<()> {
if data.len() == 0 {
return Ok(());
}
let m = coding.len() as i32;
let block_len = data[0].len();
let matrix: Vec<i32> = get_matrix(m, data.len() as i32, ERASURE_W);
let mut coding_arg = Vec::new();
let mut data_arg = Vec::new();
for block in data {
if block_len != block.len() {
return Err(ErasureError::InvalidBlockSize);
}
data_arg.push(block.as_ptr());
}
for mut block in coding {
if block_len != block.len() {
return Err(ErasureError::InvalidBlockSize);
}
coding_arg.push(block.as_mut_ptr());
}
unsafe {
jerasure_matrix_encode(
data.len() as i32,
m,
ERASURE_W,
matrix.as_ptr(),
data_arg.as_ptr(),
coding_arg.as_ptr(),
data[0].len() as i32,
);
}
Ok(())
}
// Recover data + coding blocks into data blocks
// data: array of blocks to recover into
// coding: arry of coding blocks
// erasures: list of indices in data where blocks should be recovered
pub fn decode_blocks(data: &mut [&mut [u8]], coding: &[&[u8]], erasures: &[i32]) -> Result<()> {
if data.len() == 0 {
return Ok(());
}
let block_len = data[0].len();
let matrix: Vec<i32> = get_matrix(coding.len() as i32, data.len() as i32, ERASURE_W);
// generate coding pointers, blocks should be the same size
let mut coding_arg: Vec<*const u8> = Vec::new();
for x in coding.iter() {
if x.len() != block_len {
return Err(ErasureError::InvalidBlockSize);
}
coding_arg.push(x.as_ptr());
}
// generate data pointers, blocks should be the same size
let mut data_arg: Vec<*mut u8> = Vec::new();
for x in data.iter_mut() {
if x.len() != block_len {
return Err(ErasureError::InvalidBlockSize);
}
data_arg.push(x.as_mut_ptr());
}
unsafe {
let ret = jerasure_matrix_decode(
data.len() as i32,
coding.len() as i32,
ERASURE_W,
matrix.as_ptr(),
0,
erasures.as_ptr(),
data_arg.as_ptr(),
coding_arg.as_ptr(),
data[0].len() as i32,
);
trace!("jerasure_matrix_decode ret: {}", ret);
for x in data[erasures[0] as usize][0..8].iter() {
trace!("{} ", x)
}
trace!("");
if ret < 0 {
return Err(ErasureError::DecodeError);
}
}
Ok(())
}
// Generate coding blocks in window from consumed to consumed+NUM_DATA
pub fn generate_coding(
re: &BlobRecycler,
window: &mut Vec<Option<SharedBlob>>,
consumed: usize,
) -> Result<()> {
let mut data_blobs = Vec::new();
let mut coding_blobs = Vec::new();
let mut data_locks = Vec::new();
let mut data_ptrs: Vec<&[u8]> = Vec::new();
let mut coding_locks = Vec::new();
let mut coding_ptrs: Vec<&mut [u8]> = Vec::new();
for i in consumed..consumed + NUM_DATA {
let n = i % window.len();
data_blobs.push(window[n].clone().unwrap());
}
for b in &data_blobs {
data_locks.push(b.write().unwrap());
}
for (i, l) in data_locks.iter_mut().enumerate() {
trace!("i: {} data: {}", i, l.data[0]);
data_ptrs.push(&l.data);
}
// generate coding ptr array
let coding_start = consumed + NUM_DATA;
let coding_end = consumed + NUM_CODED;
for i in coding_start..coding_end {
let n = i % window.len();
window[n] = Some(re.allocate());
coding_blobs.push(window[n].clone().unwrap());
}
for b in &coding_blobs {
coding_locks.push(b.write().unwrap());
}
for (i, l) in coding_locks.iter_mut().enumerate() {
trace!("i: {} data: {}", i, l.data[0]);
coding_ptrs.push(&mut l.data);
}
generate_coding_blocks(coding_ptrs.as_mut_slice(), &data_ptrs)?;
trace!("consumed: {}", consumed);
Ok(())
}
// Recover missing blocks into window
// missing blocks should be None, will use re
// to allocate new ones. Returns err if not enough
// coding blocks are present to restore
pub fn recover(
re: &BlobRecycler,
window: &mut Vec<Option<SharedBlob>>,
consumed: usize,
) -> Result<()> {
//recover with erasure coding
let mut data_missing = 0;
let mut coded_missing = 0;
let coding_start = consumed + NUM_DATA;
let coding_end = consumed + NUM_CODED;
for i in consumed..coding_end {
let n = i % window.len();
if window[n].is_none() {
if i >= coding_start {
coded_missing += 1;
} else {
data_missing += 1;
}
}
}
trace!("missing: data: {} coding: {}", data_missing, coded_missing);
if data_missing > 0 {
if (data_missing + coded_missing) <= MAX_MISSING {
let mut blobs: Vec<SharedBlob> = Vec::new();
let mut locks = Vec::new();
let mut data_ptrs: Vec<&mut [u8]> = Vec::new();
let mut coding_ptrs: Vec<&[u8]> = Vec::new();
let mut erasures: Vec<i32> = Vec::new();
for i in consumed..coding_end {
let j = i % window.len();
let mut b = &mut window[j];
if b.is_some() {
blobs.push(b.clone().unwrap());
continue;
}
let n = re.allocate();
*b = Some(n.clone());
//mark the missing memory
blobs.push(n);
erasures.push((i - consumed) as i32);
}
erasures.push(-1);
trace!("erasures: {:?}", erasures);
//lock everything
for b in &blobs {
locks.push(b.write().unwrap());
}
for (i, l) in locks.iter_mut().enumerate() {
if i >= NUM_DATA {
trace!("pushing coding: {}", i);
coding_ptrs.push(&l.data);
} else {
trace!("pushing data: {}", i);
data_ptrs.push(&mut l.data);
}
}
trace!(
"coding_ptrs.len: {} data_ptrs.len {}",
coding_ptrs.len(),
data_ptrs.len()
);
decode_blocks(data_ptrs.as_mut_slice(), &coding_ptrs, &erasures)?;
} else {
return Err(ErasureError::NotEnoughBlocksToDecode);
}
}
Ok(())
}
#[cfg(test)]
mod test {
use erasure;
use packet::{BlobRecycler, SharedBlob, PACKET_DATA_SIZE};
extern crate env_logger;
#[test]
pub fn test_coding() {
let zero_vec = vec![0; 16];
let mut vs: Vec<Vec<u8>> = (0..4).map(|i| (i..(16 + i)).collect()).collect();
let v_orig: Vec<u8> = vs[0].clone();
let m = 2;
let mut coding_blocks: Vec<_> = (0..m).map(|_| vec![0u8; 16]).collect();
{
let mut coding_blocks_slices: Vec<_> =
coding_blocks.iter_mut().map(|x| x.as_mut_slice()).collect();
let v_slices: Vec<_> = vs.iter().map(|x| x.as_slice()).collect();
assert!(
erasure::generate_coding_blocks(
coding_blocks_slices.as_mut_slice(),
v_slices.as_slice()
).is_ok()
);
}
trace!("coding blocks:");
for b in &coding_blocks {
trace!("{:?}", b);
}
let erasure: i32 = 1;
let erasures = vec![erasure, -1];
// clear an entry
vs[erasure as usize].copy_from_slice(zero_vec.as_slice());
{
let coding_blocks_slices: Vec<_> = coding_blocks.iter().map(|x| x.as_slice()).collect();
let mut v_slices: Vec<_> = vs.iter_mut().map(|x| x.as_mut_slice()).collect();
assert!(
erasure::decode_blocks(
v_slices.as_mut_slice(),
coding_blocks_slices.as_slice(),
erasures.as_slice(),
).is_ok()
);
}
trace!("vs:");
for v in &vs {
trace!("{:?}", v);
}
assert_eq!(v_orig, vs[0]);
}
fn print_window(window: &Vec<Option<SharedBlob>>) {
for (i, w) in window.iter().enumerate() {
print!("window({}): ", i);
if w.is_some() {
let window_lock = w.clone().unwrap();
let window_data = window_lock.read().unwrap().data;
for i in 0..8 {
print!("{} ", window_data[i]);
}
} else {
print!("null");
}
println!("");
}
}
#[test]
pub fn test_window_recover() {
let mut window = Vec::new();
let blob_recycler = BlobRecycler::default();
let offset = 4;
for i in 0..(4 * erasure::NUM_CODED + 1) {
let b = blob_recycler.allocate();
let b_ = b.clone();
let data_len = b.read().unwrap().data.len();
let mut w = b.write().unwrap();
w.set_index(i as u64).unwrap();
assert_eq!(i as u64, w.get_index().unwrap());
w.meta.size = PACKET_DATA_SIZE;
for k in 0..data_len {
w.data[k] = (k + i) as u8;
}
window.push(Some(b_));
}
println!("** after-gen:");
print_window(&window);
assert!(erasure::generate_coding(&blob_recycler, &mut window, offset).is_ok());
assert!(
erasure::generate_coding(&blob_recycler, &mut window, offset + erasure::NUM_CODED)
.is_ok()
);
assert!(
erasure::generate_coding(
&blob_recycler,
&mut window,
offset + (2 * erasure::NUM_CODED)
).is_ok()
);
assert!(
erasure::generate_coding(
&blob_recycler,
&mut window,
offset + (3 * erasure::NUM_CODED)
).is_ok()
);
println!("** after-coding:");
print_window(&window);
let refwindow = window[offset + 1].clone();
window[offset + 1] = None;
window[offset + 2] = None;
window[offset + erasure::NUM_CODED + 3] = None;
window[offset + (2 * erasure::NUM_CODED) + 0] = None;
window[offset + (2 * erasure::NUM_CODED) + 1] = None;
window[offset + (2 * erasure::NUM_CODED) + 2] = None;
let window_l0 = &(window[offset + (3 * erasure::NUM_CODED)]).clone().unwrap();
window_l0.write().unwrap().data[0] = 55;
println!("** after-nulling:");
print_window(&window);
assert!(erasure::recover(&blob_recycler, &mut window, offset).is_ok());
assert!(erasure::recover(&blob_recycler, &mut window, offset + erasure::NUM_CODED).is_ok());
assert!(
erasure::recover(
&blob_recycler,
&mut window,
offset + (2 * erasure::NUM_CODED)
).is_err()
);
assert!(
erasure::recover(
&blob_recycler,
&mut window,
offset + (3 * erasure::NUM_CODED)
).is_ok()
);
println!("** after-restore:");
print_window(&window);
let window_l = window[offset + 1].clone().unwrap();
let ref_l = refwindow.clone().unwrap();
assert_eq!(
window_l.read().unwrap().data.to_vec(),
ref_l.read().unwrap().data.to_vec()
);
}
}

67
src/event.rs Normal file
View File

@ -0,0 +1,67 @@
//! The `event` module handles events, which may be a `Transaction`, or a `Witness` used to process a pending
//! Transaction.
use bincode::serialize;
use chrono::prelude::*;
use signature::{KeyPair, KeyPairUtil, PublicKey, Signature, SignatureUtil};
use transaction::Transaction;
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub enum Event {
Transaction(Transaction),
Signature {
from: PublicKey,
tx_sig: Signature,
sig: Signature,
},
Timestamp {
from: PublicKey,
dt: DateTime<Utc>,
sig: Signature,
},
}
impl Event {
/// Create and sign a new Witness Timestamp. Used for unit-testing.
pub fn new_timestamp(from: &KeyPair, dt: DateTime<Utc>) -> Self {
let sign_data = serialize(&dt).unwrap();
let sig = Signature::clone_from_slice(from.sign(&sign_data).as_ref());
Event::Timestamp {
from: from.pubkey(),
dt,
sig,
}
}
/// Create and sign a new Witness Signature. Used for unit-testing.
pub fn new_signature(from: &KeyPair, tx_sig: Signature) -> Self {
let sig = Signature::clone_from_slice(from.sign(&tx_sig).as_ref());
Event::Signature {
from: from.pubkey(),
tx_sig,
sig,
}
}
/// Verify the Event's signature's are valid and if a transaction, that its
/// spending plan is valid.
pub fn verify(&self) -> bool {
match *self {
Event::Transaction(ref tr) => tr.verify_sig(),
Event::Signature { from, tx_sig, sig } => sig.verify(&from, &tx_sig),
Event::Timestamp { from, dt, sig } => sig.verify(&from, &serialize(&dt).unwrap()),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use signature::{KeyPair, KeyPairUtil};
#[test]
fn test_event_verify() {
assert!(Event::new_timestamp(&KeyPair::new(), Utc::now()).verify());
assert!(Event::new_signature(&KeyPair::new(), Signature::default()).verify());
}
}

21
src/hash.rs Normal file
View File

@ -0,0 +1,21 @@
//! The `hash` module provides functions for creating SHA-256 hashes.
use generic_array::typenum::U32;
use generic_array::GenericArray;
use sha2::{Digest, Sha256};
pub type Hash = GenericArray<u8, U32>;
/// Return a Sha256 hash for the given data.
pub fn hash(val: &[u8]) -> Hash {
let mut hasher = Sha256::default();
hasher.input(val);
hasher.result()
}
/// Return the hash of the given hash extended with the given value.
pub fn extend_and_hash(id: &Hash, val: &[u8]) -> Hash {
let mut hash_data = id.to_vec();
hash_data.extend_from_slice(val);
hash(&hash_data)
}

View File

@ -1,137 +1,113 @@
//! The `historian` crate provides a microservice for generating a Proof-of-History.
//! It logs Event items on behalf of its users. It continuously generates
//! new hashes, only stopping to check if it has been sent an Event item. It
//! tags each Event with an Entry and sends it back. The Entry includes the
//! Event, the latest hash, and the number of hashes since the last event.
//! The resulting stream of entries represents ordered events in time.
//! The `historian` module provides a microservice for generating a Proof of History.
//! It manages a thread containing a Proof of History Recorder.
use std::thread::JoinHandle;
use std::sync::mpsc::{Receiver, Sender};
use log::{hash, Entry, Event};
use entry::Entry;
use hash::Hash;
use recorder::{ExitReason, Recorder, Signal};
use std::sync::mpsc::{sync_channel, Receiver, SyncSender};
use std::thread::{spawn, JoinHandle};
use std::time::Instant;
pub struct Historian {
pub sender: Sender<Event>,
pub sender: SyncSender<Signal>,
pub receiver: Receiver<Entry>,
pub thread_hdl: JoinHandle<(Entry, ExitReason)>,
}
#[derive(Debug, PartialEq, Eq)]
pub enum ExitReason {
RecvDisconnected,
SendDisconnected,
}
fn log_events(
receiver: &Receiver<Event>,
sender: &Sender<Entry>,
num_hashes: u64,
end_hash: u64,
) -> Result<u64, (Entry, ExitReason)> {
use std::sync::mpsc::TryRecvError;
let mut num_hashes = num_hashes;
loop {
match receiver.try_recv() {
Ok(event) => {
let entry = Entry {
end_hash,
num_hashes,
event,
};
if let Err(_) = sender.send(entry.clone()) {
return Err((entry, ExitReason::SendDisconnected));
}
num_hashes = 0;
}
Err(TryRecvError::Empty) => {
return Ok(num_hashes);
}
Err(TryRecvError::Disconnected) => {
let entry = Entry {
end_hash,
num_hashes,
event: Event::Tick,
};
return Err((entry, ExitReason::RecvDisconnected));
}
}
}
}
/// A background thread that will continue tagging received Event messages and
/// sending back Entry messages until either the receiver or sender channel is closed.
pub fn create_logger(
start_hash: u64,
receiver: Receiver<Event>,
sender: Sender<Entry>,
) -> JoinHandle<(Entry, ExitReason)> {
use std::thread;
thread::spawn(move || {
let mut end_hash = start_hash;
let mut num_hashes = 0;
loop {
match log_events(&receiver, &sender, num_hashes, end_hash) {
Ok(n) => num_hashes = n,
Err(err) => return err,
}
end_hash = hash(end_hash);
num_hashes += 1;
}
})
pub thread_hdl: JoinHandle<ExitReason>,
}
impl Historian {
pub fn new(start_hash: u64) -> Self {
use std::sync::mpsc::channel;
let (sender, event_receiver) = channel();
let (entry_sender, receiver) = channel();
let thread_hdl = create_logger(start_hash, event_receiver, entry_sender);
pub fn new(start_hash: &Hash, ms_per_tick: Option<u64>) -> Self {
let (sender, event_receiver) = sync_channel(10_000);
let (entry_sender, receiver) = sync_channel(10_000);
let thread_hdl =
Historian::create_recorder(*start_hash, ms_per_tick, event_receiver, entry_sender);
Historian {
sender,
receiver,
thread_hdl,
}
}
/// A background thread that will continue tagging received Event messages and
/// sending back Entry messages until either the receiver or sender channel is closed.
fn create_recorder(
start_hash: Hash,
ms_per_tick: Option<u64>,
receiver: Receiver<Signal>,
sender: SyncSender<Entry>,
) -> JoinHandle<ExitReason> {
spawn(move || {
let mut recorder = Recorder::new(receiver, sender, start_hash);
let now = Instant::now();
loop {
if let Err(err) = recorder.process_events(now, ms_per_tick) {
return err;
}
if ms_per_tick.is_some() {
recorder.hash();
}
}
})
}
}
#[cfg(test)]
mod tests {
use super::*;
use log::*;
use ledger::Block;
use std::thread::sleep;
use std::time::Duration;
#[test]
fn test_historian() {
use std::thread::sleep;
use std::time::Duration;
let zero = Hash::default();
let hist = Historian::new(&zero, None);
let hist = Historian::new(0);
hist.sender.send(Event::Tick).unwrap();
hist.sender.send(Signal::Tick).unwrap();
sleep(Duration::new(0, 1_000_000));
hist.sender.send(Event::UserDataKey(0xdeadbeef)).unwrap();
hist.sender.send(Signal::Tick).unwrap();
sleep(Duration::new(0, 1_000_000));
hist.sender.send(Event::Tick).unwrap();
hist.sender.send(Signal::Tick).unwrap();
let entry0 = hist.receiver.recv().unwrap();
let entry1 = hist.receiver.recv().unwrap();
let entry2 = hist.receiver.recv().unwrap();
assert_eq!(entry0.num_hashes, 0);
assert_eq!(entry1.num_hashes, 0);
assert_eq!(entry2.num_hashes, 0);
drop(hist.sender);
assert_eq!(
hist.thread_hdl.join().unwrap().1,
hist.thread_hdl.join().unwrap(),
ExitReason::RecvDisconnected
);
assert!(verify_slice(&[entry0, entry1, entry2], 0));
assert!([entry0, entry1, entry2].verify(&zero));
}
#[test]
fn test_historian_closed_sender() {
let hist = Historian::new(0);
let zero = Hash::default();
let hist = Historian::new(&zero, None);
drop(hist.receiver);
hist.sender.send(Event::Tick).unwrap();
hist.sender.send(Signal::Tick).unwrap();
assert_eq!(
hist.thread_hdl.join().unwrap().1,
hist.thread_hdl.join().unwrap(),
ExitReason::SendDisconnected
);
}
#[test]
fn test_ticking_historian() {
let zero = Hash::default();
let hist = Historian::new(&zero, Some(20));
sleep(Duration::from_millis(300));
hist.sender.send(Signal::Tick).unwrap();
drop(hist.sender);
let entries: Vec<Entry> = hist.receiver.iter().collect();
assert!(entries.len() > 1);
// Ensure the ID is not the seed.
assert_ne!(entries[0].id, zero);
}
}

67
src/ledger.rs Normal file
View File

@ -0,0 +1,67 @@
//! The `ledger` module provides functions for parallel verification of the
//! Proof of History ledger.
use entry::{next_tick, Entry};
use hash::Hash;
use rayon::prelude::*;
pub trait Block {
/// Verifies the hashes and counts of a slice of events are all consistent.
fn verify(&self, start_hash: &Hash) -> bool;
}
impl Block for [Entry] {
fn verify(&self, start_hash: &Hash) -> bool {
let genesis = [Entry::new_tick(0, start_hash)];
let entry_pairs = genesis.par_iter().chain(self).zip(self);
entry_pairs.all(|(x0, x1)| x1.verify(&x0.id))
}
}
/// Create a vector of Ticks of length `len` from `start_hash` hash and `num_hashes`.
pub fn next_ticks(start_hash: &Hash, num_hashes: u64, len: usize) -> Vec<Entry> {
let mut id = *start_hash;
let mut ticks = vec![];
for _ in 0..len {
let entry = next_tick(&id, num_hashes);
id = entry.id;
ticks.push(entry);
}
ticks
}
#[cfg(test)]
mod tests {
use super::*;
use hash::hash;
#[test]
fn test_verify_slice() {
let zero = Hash::default();
let one = hash(&zero);
assert!(vec![][..].verify(&zero)); // base case
assert!(vec![Entry::new_tick(0, &zero)][..].verify(&zero)); // singleton case 1
assert!(!vec![Entry::new_tick(0, &zero)][..].verify(&one)); // singleton case 2, bad
assert!(next_ticks(&zero, 0, 2)[..].verify(&zero)); // inductive step
let mut bad_ticks = next_ticks(&zero, 0, 2);
bad_ticks[1].id = one;
assert!(!bad_ticks.verify(&zero)); // inductive step, bad
}
}
#[cfg(all(feature = "unstable", test))]
mod bench {
extern crate test;
use self::test::Bencher;
use ledger::*;
#[bench]
fn event_bench(bencher: &mut Bencher) {
let start_hash = Hash::default();
let entries = next_ticks(&start_hash, 10_000, 8);
bencher.iter(|| {
assert!(entries.verify(&start_hash));
});
}
}

View File

@ -1,5 +1,43 @@
#![cfg_attr(feature = "unstable", feature(test))]
pub mod log;
pub mod accountant;
pub mod accountant_skel;
pub mod accountant_stub;
pub mod crdt;
pub mod ecdsa;
pub mod entry;
#[cfg(feature = "erasure")]
pub mod erasure;
pub mod event;
pub mod hash;
pub mod historian;
extern crate itertools;
pub mod ledger;
pub mod mint;
pub mod packet;
pub mod plan;
pub mod recorder;
pub mod result;
pub mod signature;
pub mod streamer;
pub mod subscribers;
pub mod transaction;
extern crate bincode;
extern crate byteorder;
extern crate chrono;
extern crate generic_array;
extern crate libc;
#[macro_use]
extern crate log;
extern crate rayon;
extern crate ring;
extern crate serde;
#[macro_use]
extern crate serde_derive;
extern crate serde_json;
extern crate sha2;
extern crate untrusted;
extern crate futures;
#[cfg(test)]
#[macro_use]
extern crate matches;

View File

@ -1,157 +0,0 @@
//! The `log` crate provides the foundational data structures for Proof-of-History,
//! an ordered log of events in time.
/// Each log entry contains three pieces of data. The 'num_hashes' field is the number
/// of hashes performed since the previous entry. The 'end_hash' field is the result
/// of hashing 'end_hash' from the previous entry 'num_hashes' times. The 'event'
/// field points to an Event that took place shortly after 'end_hash' was generated.
///
/// If you divide 'num_hashes' by the amount of time it takes to generate a new hash, you
/// get a duration estimate since the last event. Since processing power increases
/// over time, one should expect the duration 'num_hashes' represents to decrease proportionally.
/// Though processing power varies across nodes, the network gives priority to the
/// fastest processor. Duration should therefore be estimated by assuming that the hash
/// was generated by the fastest processor at the time the entry was logged.
#[derive(Debug, PartialEq, Eq, Clone)]
pub struct Entry {
pub num_hashes: u64,
pub end_hash: u64,
pub event: Event,
}
/// When 'event' is Tick, the event represents a simple clock tick, and exists for the
/// sole purpose of improving the performance of event log verification. A tick can
/// be generated in 'num_hashes' hashes and verified in 'num_hashes' hashes. By logging
/// a hash alongside the tick, each tick and be verified in parallel using the 'end_hash'
/// of the preceding tick to seed its hashing.
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum Event {
Tick,
UserDataKey(u64),
}
impl Entry {
/// Creates a Entry from the number of hashes 'num_hashes' since the previous event
/// and that resulting 'end_hash'.
pub fn new_tick(num_hashes: u64, end_hash: u64) -> Self {
let event = Event::Tick;
Entry {
num_hashes,
end_hash,
event,
}
}
/// Verifies self.end_hash is the result of hashing a 'start_hash' 'self.num_hashes' times.
pub fn verify(self: &Self, start_hash: u64) -> bool {
self.end_hash == next_tick(start_hash, self.num_hashes).end_hash
}
}
pub fn hash(val: u64) -> u64 {
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
let mut hasher = DefaultHasher::new();
val.hash(&mut hasher);
hasher.finish()
}
/// Creates the next Tick Entry 'num_hashes' after 'start_hash'.
pub fn next_tick(start_hash: u64, num_hashes: u64) -> Entry {
let mut end_hash = start_hash;
for _ in 0..num_hashes {
end_hash = hash(end_hash);
}
Entry::new_tick(num_hashes, end_hash)
}
/// Verifies the hashes and counts of a slice of events are all consistent.
pub fn verify_slice(events: &[Entry], start_hash: u64) -> bool {
use rayon::prelude::*;
let genesis = [Entry::new_tick(0, start_hash)];
let event_pairs = genesis.par_iter().chain(events).zip(events);
event_pairs.all(|(x0, x1)| x1.verify(x0.end_hash))
}
/// Verifies the hashes and events serially. Exists only for reference.
pub fn verify_slice_seq(events: &[Entry], start_hash: u64) -> bool {
let genesis = [Entry::new_tick(0, start_hash)];
let mut event_pairs = genesis.iter().chain(events).zip(events);
event_pairs.all(|(x0, x1)| x1.verify(x0.end_hash))
}
/// Create a vector of Ticks of length 'len' from 'start_hash' hash and 'num_hashes'.
pub fn create_ticks(start_hash: u64, num_hashes: u64, len: usize) -> Vec<Entry> {
use itertools::unfold;
let mut events = unfold(start_hash, |state| {
let event = next_tick(*state, num_hashes);
*state = event.end_hash;
return Some(event);
});
events.by_ref().take(len).collect()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_event_verify() {
assert!(Entry::new_tick(0, 0).verify(0)); // base case
assert!(!Entry::new_tick(0, 0).verify(1)); // base case, bad
assert!(next_tick(0, 1).verify(0)); // inductive step
assert!(!next_tick(0, 1).verify(1)); // inductive step, bad
}
#[test]
fn test_next_tick() {
assert_eq!(next_tick(0, 1).num_hashes, 1)
}
fn verify_slice_generic(verify_slice: fn(&[Entry], u64) -> bool) {
assert!(verify_slice(&vec![], 0)); // base case
assert!(verify_slice(&vec![Entry::new_tick(0, 0)], 0)); // singleton case 1
assert!(!verify_slice(&vec![Entry::new_tick(0, 0)], 1)); // singleton case 2, bad
assert!(verify_slice(&create_ticks(0, 0, 2), 0)); // inductive step
let mut bad_ticks = create_ticks(0, 0, 2);
bad_ticks[1].end_hash = 1;
assert!(!verify_slice(&bad_ticks, 0)); // inductive step, bad
}
#[test]
fn test_verify_slice() {
verify_slice_generic(verify_slice);
}
#[test]
fn test_verify_slice_seq() {
verify_slice_generic(verify_slice_seq);
}
}
#[cfg(all(feature = "unstable", test))]
mod bench {
extern crate test;
use self::test::Bencher;
use log::*;
#[bench]
fn event_bench(bencher: &mut Bencher) {
let start_hash = 0;
let events = create_ticks(start_hash, 100_000, 8);
bencher.iter(|| {
assert!(verify_slice(&events, start_hash));
});
}
#[bench]
fn event_bench_seq(bencher: &mut Bencher) {
let start_hash = 0;
let events = create_ticks(start_hash, 100_000, 8);
bencher.iter(|| {
assert!(verify_slice_seq(&events, start_hash));
});
}
}

89
src/mint.rs Normal file
View File

@ -0,0 +1,89 @@
//! The `mint` module is a library for generating the chain's genesis block.
use entry::create_entry;
use entry::Entry;
use event::Event;
use hash::{hash, Hash};
use ring::rand::SystemRandom;
use signature::{KeyPair, KeyPairUtil, PublicKey};
use transaction::Transaction;
use untrusted::Input;
#[derive(Serialize, Deserialize, Debug)]
pub struct Mint {
pub pkcs8: Vec<u8>,
pubkey: PublicKey,
pub tokens: i64,
}
impl Mint {
pub fn new(tokens: i64) -> Self {
let rnd = SystemRandom::new();
let pkcs8 = KeyPair::generate_pkcs8(&rnd).unwrap().to_vec();
let keypair = KeyPair::from_pkcs8(Input::from(&pkcs8)).unwrap();
let pubkey = keypair.pubkey();
Mint {
pkcs8,
pubkey,
tokens,
}
}
pub fn seed(&self) -> Hash {
hash(&self.pkcs8)
}
pub fn last_id(&self) -> Hash {
self.create_entries()[1].id
}
pub fn keypair(&self) -> KeyPair {
KeyPair::from_pkcs8(Input::from(&self.pkcs8)).unwrap()
}
pub fn pubkey(&self) -> PublicKey {
self.pubkey
}
pub fn create_events(&self) -> Vec<Event> {
let keypair = self.keypair();
let tr = Transaction::new(&keypair, self.pubkey(), self.tokens, self.seed());
vec![Event::Transaction(tr)]
}
pub fn create_entries(&self) -> Vec<Entry> {
let e0 = create_entry(&self.seed(), 0, vec![]);
let e1 = create_entry(&e0.id, 0, self.create_events());
vec![e0, e1]
}
}
#[derive(Serialize, Deserialize, Debug)]
pub struct MintDemo {
pub mint: Mint,
pub users: Vec<(Vec<u8>, i64)>,
}
#[cfg(test)]
mod tests {
use super::*;
use ledger::Block;
use plan::Plan;
#[test]
fn test_create_events() {
let mut events = Mint::new(100).create_events().into_iter();
if let Event::Transaction(tr) = events.next().unwrap() {
if let Plan::Pay(payment) = tr.data.plan {
assert_eq!(tr.from, payment.to);
}
}
assert_eq!(events.next(), None);
}
#[test]
fn test_verify_entries() {
let entries = Mint::new(100).create_entries();
assert!(entries[..].verify(&entries[0].id));
}
}

396
src/packet.rs Normal file
View File

@ -0,0 +1,396 @@
//! The `packet` module defines data structures and methods to pull data from the network.
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use result::{Error, Result};
use std::collections::VecDeque;
use std::fmt;
use std::io;
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket};
use std::sync::{Arc, Mutex, RwLock};
use std::mem::size_of;
pub type SharedPackets = Arc<RwLock<Packets>>;
pub type SharedBlob = Arc<RwLock<Blob>>;
pub type PacketRecycler = Recycler<Packets>;
pub type BlobRecycler = Recycler<Blob>;
pub const NUM_PACKETS: usize = 1024 * 8;
const BLOB_SIZE: usize = 64 * 1024;
pub const PACKET_DATA_SIZE: usize = 256;
pub const NUM_BLOBS: usize = (NUM_PACKETS * PACKET_DATA_SIZE) / BLOB_SIZE;
#[derive(Clone, Default)]
#[repr(C)]
pub struct Meta {
pub size: usize,
pub addr: [u16; 8],
pub port: u16,
pub v6: bool,
}
#[derive(Clone)]
#[repr(C)]
pub struct Packet {
pub data: [u8; PACKET_DATA_SIZE],
pub meta: Meta,
}
impl fmt::Debug for Packet {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"Packet {{ size: {:?}, addr: {:?} }}",
self.meta.size,
self.meta.addr()
)
}
}
impl Default for Packet {
fn default() -> Packet {
Packet {
data: [0u8; PACKET_DATA_SIZE],
meta: Meta::default(),
}
}
}
impl Meta {
pub fn addr(&self) -> SocketAddr {
if !self.v6 {
let addr = [
self.addr[0] as u8,
self.addr[1] as u8,
self.addr[2] as u8,
self.addr[3] as u8,
];
let ipv4: Ipv4Addr = From::<[u8; 4]>::from(addr);
SocketAddr::new(IpAddr::V4(ipv4), self.port)
} else {
let ipv6: Ipv6Addr = From::<[u16; 8]>::from(self.addr);
SocketAddr::new(IpAddr::V6(ipv6), self.port)
}
}
pub fn set_addr(&mut self, a: &SocketAddr) {
match *a {
SocketAddr::V4(v4) => {
let ip = v4.ip().octets();
self.addr[0] = u16::from(ip[0]);
self.addr[1] = u16::from(ip[1]);
self.addr[2] = u16::from(ip[2]);
self.addr[3] = u16::from(ip[3]);
self.port = a.port();
}
SocketAddr::V6(v6) => {
self.addr = v6.ip().segments();
self.port = a.port();
self.v6 = true;
}
}
}
}
#[derive(Debug)]
pub struct Packets {
pub packets: Vec<Packet>,
}
//auto derive doesn't support large arrays
impl Default for Packets {
fn default() -> Packets {
Packets {
packets: vec![Packet::default(); NUM_PACKETS],
}
}
}
#[derive(Clone)]
pub struct Blob {
pub data: [u8; BLOB_SIZE],
pub meta: Meta,
}
impl fmt::Debug for Blob {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"Blob {{ size: {:?}, addr: {:?} }}",
self.meta.size,
self.meta.addr()
)
}
}
//auto derive doesn't support large arrays
impl Default for Blob {
fn default() -> Blob {
Blob {
data: [0u8; BLOB_SIZE],
meta: Meta::default(),
}
}
}
pub struct Recycler<T> {
gc: Arc<Mutex<Vec<Arc<RwLock<T>>>>>,
}
impl<T: Default> Default for Recycler<T> {
fn default() -> Recycler<T> {
Recycler {
gc: Arc::new(Mutex::new(vec![])),
}
}
}
impl<T: Default> Clone for Recycler<T> {
fn clone(&self) -> Recycler<T> {
Recycler {
gc: self.gc.clone(),
}
}
}
impl<T: Default> Recycler<T> {
pub fn allocate(&self) -> Arc<RwLock<T>> {
let mut gc = self.gc.lock().expect("recycler lock");
gc.pop()
.unwrap_or_else(|| Arc::new(RwLock::new(Default::default())))
}
pub fn recycle(&self, msgs: Arc<RwLock<T>>) {
let mut gc = self.gc.lock().expect("recycler lock");
gc.push(msgs);
}
}
impl Packets {
fn run_read_from(&mut self, socket: &UdpSocket) -> Result<usize> {
self.packets.resize(NUM_PACKETS, Packet::default());
let mut i = 0;
//DOCUMENTED SIDE-EFFECT
//Performance out of the IO without poll
// * block on the socket until its readable
// * set the socket to non blocking
// * read until it fails
// * set it back to blocking before returning
socket.set_nonblocking(false)?;
for p in &mut self.packets {
p.meta.size = 0;
match socket.recv_from(&mut p.data) {
Err(_) if i > 0 => {
trace!("got {:?} messages", i);
break;
}
Err(e) => {
info!("recv_from err {:?}", e);
return Err(Error::IO(e));
}
Ok((nrecv, from)) => {
p.meta.size = nrecv;
p.meta.set_addr(&from);
if i == 0 {
socket.set_nonblocking(true)?;
}
}
}
i += 1;
}
Ok(i)
}
pub fn recv_from(&mut self, socket: &UdpSocket) -> Result<()> {
let sz = self.run_read_from(socket)?;
self.packets.resize(sz, Packet::default());
Ok(())
}
pub fn send_to(&self, socket: &UdpSocket) -> Result<()> {
for p in &self.packets {
let a = p.meta.addr();
socket.send_to(&p.data[..p.meta.size], &a)?;
}
Ok(())
}
}
const BLOB_INDEX_SIZE: usize = size_of::<u64>();
impl Blob {
pub fn get_index(&self) -> Result<u64> {
let mut rdr = io::Cursor::new(&self.data[0..BLOB_INDEX_SIZE]);
let r = rdr.read_u64::<LittleEndian>()?;
Ok(r)
}
pub fn set_index(&mut self, ix: u64) -> Result<()> {
let mut wtr = vec![];
wtr.write_u64::<LittleEndian>(ix)?;
self.data[..BLOB_INDEX_SIZE].clone_from_slice(&wtr);
Ok(())
}
pub fn data(&self) -> &[u8] {
&self.data[BLOB_INDEX_SIZE..]
}
pub fn data_mut(&mut self) -> &mut [u8] {
&mut self.data[BLOB_INDEX_SIZE..]
}
pub fn set_size(&mut self, size: usize) {
self.meta.size = size + BLOB_INDEX_SIZE;
}
pub fn recv_from(re: &BlobRecycler, socket: &UdpSocket) -> Result<VecDeque<SharedBlob>> {
let mut v = VecDeque::new();
//DOCUMENTED SIDE-EFFECT
//Performance out of the IO without poll
// * block on the socket until its readable
// * set the socket to non blocking
// * read until it fails
// * set it back to blocking before returning
socket.set_nonblocking(false)?;
for i in 0..NUM_BLOBS {
let r = re.allocate();
{
let mut p = r.write().unwrap();
match socket.recv_from(&mut p.data) {
Err(_) if i > 0 => {
trace!("got {:?} messages", i);
break;
}
Err(e) => {
info!("recv_from err {:?}", e);
return Err(Error::IO(e));
}
Ok((nrecv, from)) => {
p.meta.size = nrecv;
p.meta.set_addr(&from);
if i == 0 {
socket.set_nonblocking(true)?;
}
}
}
}
v.push_back(r);
}
Ok(v)
}
pub fn send_to(
re: &BlobRecycler,
socket: &UdpSocket,
v: &mut VecDeque<SharedBlob>,
) -> Result<()> {
while let Some(r) = v.pop_front() {
{
let p = r.read().unwrap();
let a = p.meta.addr();
socket.send_to(&p.data[..p.meta.size], &a)?;
}
re.recycle(r);
}
Ok(())
}
}
#[cfg(test)]
mod test {
use packet::{Blob, BlobRecycler, Packet, PacketRecycler, Packets};
use std::collections::VecDeque;
use std::io;
use std::io::Write;
use std::net::UdpSocket;
#[test]
pub fn packet_recycler_test() {
let r = PacketRecycler::default();
let p = r.allocate();
r.recycle(p);
assert_eq!(r.gc.lock().unwrap().len(), 1);
let _ = r.allocate();
assert_eq!(r.gc.lock().unwrap().len(), 0);
}
#[test]
pub fn blob_recycler_test() {
let r = BlobRecycler::default();
let p = r.allocate();
r.recycle(p);
assert_eq!(r.gc.lock().unwrap().len(), 1);
let _ = r.allocate();
assert_eq!(r.gc.lock().unwrap().len(), 0);
}
#[test]
pub fn packet_send_recv() {
let reader = UdpSocket::bind("127.0.0.1:0").expect("bind");
let addr = reader.local_addr().unwrap();
let sender = UdpSocket::bind("127.0.0.1:0").expect("bind");
let saddr = sender.local_addr().unwrap();
let r = PacketRecycler::default();
let p = r.allocate();
p.write().unwrap().packets.resize(10, Packet::default());
for m in p.write().unwrap().packets.iter_mut() {
m.meta.set_addr(&addr);
m.meta.size = 256;
}
p.read().unwrap().send_to(&sender).unwrap();
p.write().unwrap().recv_from(&reader).unwrap();
for m in p.write().unwrap().packets.iter_mut() {
assert_eq!(m.meta.size, 256);
assert_eq!(m.meta.addr(), saddr);
}
r.recycle(p);
}
#[test]
pub fn blob_send_recv() {
trace!("start");
let reader = UdpSocket::bind("127.0.0.1:0").expect("bind");
let addr = reader.local_addr().unwrap();
let sender = UdpSocket::bind("127.0.0.1:0").expect("bind");
let r = BlobRecycler::default();
let p = r.allocate();
p.write().unwrap().meta.set_addr(&addr);
p.write().unwrap().meta.size = 1024;
let mut v = VecDeque::new();
v.push_back(p);
assert_eq!(v.len(), 1);
Blob::send_to(&r, &sender, &mut v).unwrap();
trace!("send_to");
assert_eq!(v.len(), 0);
let mut rv = Blob::recv_from(&r, &reader).unwrap();
trace!("recv_from");
assert_eq!(rv.len(), 1);
let rp = rv.pop_front().unwrap();
assert_eq!(rp.write().unwrap().meta.size, 1024);
r.recycle(rp);
}
#[cfg(all(feature = "ipv6", test))]
#[test]
pub fn blob_ipv6_send_recv() {
let reader = UdpSocket::bind("[::1]:0").expect("bind");
let addr = reader.local_addr().unwrap();
let sender = UdpSocket::bind("[::1]:0").expect("bind");
let r = BlobRecycler::default();
let p = r.allocate();
p.write().unwrap().meta.set_addr(&addr);
p.write().unwrap().meta.size = 1024;
let mut v = VecDeque::default();
v.push_back(p);
Blob::send_to(&r, &sender, &mut v).unwrap();
let mut rv = Blob::recv_from(&r, &reader).unwrap();
let rp = rv.pop_front().unwrap();
assert_eq!(rp.write().unwrap().meta.size, 1024);
r.recycle(rp);
}
#[test]
pub fn debug_trait() {
write!(io::sink(), "{:?}", Packet::default()).unwrap();
write!(io::sink(), "{:?}", Packets::default()).unwrap();
write!(io::sink(), "{:?}", Blob::default()).unwrap();
}
#[test]
pub fn blob_test() {
let mut b = Blob::default();
b.set_index(<u64>::max_value()).unwrap();
assert_eq!(b.get_index().unwrap(), <u64>::max_value());
b.data_mut()[0] = 1;
assert_eq!(b.data()[0], 1);
assert_eq!(b.get_index().unwrap(), <u64>::max_value());
}
}

176
src/plan.rs Normal file
View File

@ -0,0 +1,176 @@
//! The `plan` module provides a domain-specific language for payment plans. Users create Plan objects that
//! are given to an interpreter. The interpreter listens for `Witness` events,
//! which it uses to reduce the payment plan. When the plan is reduced to a
//! `Payment`, the payment is executed.
use chrono::prelude::*;
use signature::PublicKey;
use std::mem;
pub enum Witness {
Timestamp(DateTime<Utc>),
Signature(PublicKey),
}
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub enum Condition {
Timestamp(DateTime<Utc>),
Signature(PublicKey),
}
impl Condition {
/// Return true if the given Witness satisfies this Condition.
pub fn is_satisfied(&self, witness: &Witness) -> bool {
match (self, witness) {
(&Condition::Signature(ref pubkey), &Witness::Signature(ref from)) => pubkey == from,
(&Condition::Timestamp(ref dt), &Witness::Timestamp(ref last_time)) => dt <= last_time,
_ => false,
}
}
}
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub struct Payment {
pub tokens: i64,
pub to: PublicKey,
}
#[repr(C)]
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub enum Plan {
Pay(Payment),
After(Condition, Payment),
Race((Condition, Payment), (Condition, Payment)),
}
impl Plan {
/// Create the simplest spending plan - one that pays `tokens` to PublicKey.
pub fn new_payment(tokens: i64, to: PublicKey) -> Self {
Plan::Pay(Payment { tokens, to })
}
/// Create a spending plan that pays `tokens` to `to` after being witnessed by `from`.
pub fn new_authorized_payment(from: PublicKey, tokens: i64, to: PublicKey) -> Self {
Plan::After(Condition::Signature(from), Payment { tokens, to })
}
/// Create a spending plan that pays `tokens` to `to` after the given DateTime.
pub fn new_future_payment(dt: DateTime<Utc>, tokens: i64, to: PublicKey) -> Self {
Plan::After(Condition::Timestamp(dt), Payment { tokens, to })
}
/// Create a spending plan that pays `tokens` to `to` after the given DateTime
/// unless cancelled by `from`.
pub fn new_cancelable_future_payment(
dt: DateTime<Utc>,
from: PublicKey,
tokens: i64,
to: PublicKey,
) -> Self {
Plan::Race(
(Condition::Timestamp(dt), Payment { tokens, to }),
(Condition::Signature(from), Payment { tokens, to: from }),
)
}
/// Return Payment if the spending plan requires no additional Witnesses.
pub fn final_payment(&self) -> Option<Payment> {
match *self {
Plan::Pay(ref payment) => Some(payment.clone()),
_ => None,
}
}
/// Return true if the plan spends exactly `spendable_tokens`.
pub fn verify(&self, spendable_tokens: i64) -> bool {
match *self {
Plan::Pay(ref payment) | Plan::After(_, ref payment) => {
payment.tokens == spendable_tokens
}
Plan::Race(ref a, ref b) => {
a.1.tokens == spendable_tokens && b.1.tokens == spendable_tokens
}
}
}
/// Apply a witness to the spending plan to see if the plan can be reduced.
/// If so, modify the plan in-place.
pub fn apply_witness(&mut self, witness: &Witness) {
let new_payment = match *self {
Plan::After(ref cond, ref payment) if cond.is_satisfied(witness) => Some(payment),
Plan::Race((ref cond, ref payment), _) if cond.is_satisfied(witness) => Some(payment),
Plan::Race(_, (ref cond, ref payment)) if cond.is_satisfied(witness) => Some(payment),
_ => None,
}.cloned();
if let Some(payment) = new_payment {
mem::replace(self, Plan::Pay(payment));
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_signature_satisfied() {
let sig = PublicKey::default();
assert!(Condition::Signature(sig).is_satisfied(&Witness::Signature(sig)));
}
#[test]
fn test_timestamp_satisfied() {
let dt1 = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10);
let dt2 = Utc.ymd(2014, 11, 14).and_hms(10, 9, 8);
assert!(Condition::Timestamp(dt1).is_satisfied(&Witness::Timestamp(dt1)));
assert!(Condition::Timestamp(dt1).is_satisfied(&Witness::Timestamp(dt2)));
assert!(!Condition::Timestamp(dt2).is_satisfied(&Witness::Timestamp(dt1)));
}
#[test]
fn test_verify_plan() {
let dt = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10);
let from = PublicKey::default();
let to = PublicKey::default();
assert!(Plan::new_payment(42, to).verify(42));
assert!(Plan::new_authorized_payment(from, 42, to).verify(42));
assert!(Plan::new_future_payment(dt, 42, to).verify(42));
assert!(Plan::new_cancelable_future_payment(dt, from, 42, to).verify(42));
}
#[test]
fn test_authorized_payment() {
let from = PublicKey::default();
let to = PublicKey::default();
let mut plan = Plan::new_authorized_payment(from, 42, to);
plan.apply_witness(&Witness::Signature(from));
assert_eq!(plan, Plan::new_payment(42, to));
}
#[test]
fn test_future_payment() {
let dt = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10);
let to = PublicKey::default();
let mut plan = Plan::new_future_payment(dt, 42, to);
plan.apply_witness(&Witness::Timestamp(dt));
assert_eq!(plan, Plan::new_payment(42, to));
}
#[test]
fn test_cancelable_future_payment() {
let dt = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10);
let from = PublicKey::default();
let to = PublicKey::default();
let mut plan = Plan::new_cancelable_future_payment(dt, from, 42, to);
plan.apply_witness(&Witness::Timestamp(dt));
assert_eq!(plan, Plan::new_payment(42, to));
let mut plan = Plan::new_cancelable_future_payment(dt, from, 42, to);
plan.apply_witness(&Witness::Signature(from));
assert_eq!(plan, Plan::new_payment(42, from));
}
}

89
src/recorder.rs Normal file
View File

@ -0,0 +1,89 @@
//! The `recorder` module provides an object for generating a Proof of History.
//! It records Event items on behalf of its users. It continuously generates
//! new hashes, only stopping to check if it has been sent an Event item. It
//! tags each Event with an Entry, and sends it back. The Entry includes the
//! Event, the latest hash, and the number of hashes since the last event.
//! The resulting stream of entries represents ordered events in time.
use entry::{create_entry_mut, Entry};
use event::Event;
use hash::{hash, Hash};
use std::mem;
use std::sync::mpsc::{Receiver, SyncSender, TryRecvError};
use std::time::{Duration, Instant};
#[cfg_attr(feature = "cargo-clippy", allow(large_enum_variant))]
pub enum Signal {
Tick,
Event(Event),
}
#[derive(Debug, PartialEq, Eq)]
pub enum ExitReason {
RecvDisconnected,
SendDisconnected,
}
pub struct Recorder {
sender: SyncSender<Entry>,
receiver: Receiver<Signal>,
last_hash: Hash,
events: Vec<Event>,
num_hashes: u64,
num_ticks: u64,
}
impl Recorder {
pub fn new(receiver: Receiver<Signal>, sender: SyncSender<Entry>, last_hash: Hash) -> Self {
Recorder {
receiver,
sender,
last_hash,
events: vec![],
num_hashes: 0,
num_ticks: 0,
}
}
pub fn hash(&mut self) {
self.last_hash = hash(&self.last_hash);
self.num_hashes += 1;
}
pub fn record_entry(&mut self) -> Result<(), ExitReason> {
let events = mem::replace(&mut self.events, vec![]);
let entry = create_entry_mut(&mut self.last_hash, &mut self.num_hashes, events);
self.sender
.send(entry)
.or(Err(ExitReason::SendDisconnected))?;
Ok(())
}
pub fn process_events(
&mut self,
epoch: Instant,
ms_per_tick: Option<u64>,
) -> Result<(), ExitReason> {
loop {
if let Some(ms) = ms_per_tick {
if epoch.elapsed() > Duration::from_millis((self.num_ticks + 1) * ms) {
self.record_entry()?;
self.num_ticks += 1;
}
}
match self.receiver.try_recv() {
Ok(signal) => match signal {
Signal::Tick => {
self.record_entry()?;
}
Signal::Event(event) => {
self.events.push(event);
}
},
Err(TryRecvError::Empty) => return Ok(()),
Err(TryRecvError::Disconnected) => return Err(ExitReason::RecvDisconnected),
};
}
}
}

132
src/result.rs Normal file
View File

@ -0,0 +1,132 @@
//! The `result` module exposes a Result type that propagates one of many different Error types.
use bincode;
use serde_json;
use std;
use std::any::Any;
use accountant;
#[derive(Debug)]
pub enum Error {
IO(std::io::Error),
JSON(serde_json::Error),
AddrParse(std::net::AddrParseError),
JoinError(Box<Any + Send + 'static>),
RecvError(std::sync::mpsc::RecvError),
RecvTimeoutError(std::sync::mpsc::RecvTimeoutError),
Serialize(std::boxed::Box<bincode::ErrorKind>),
AccountingError(accountant::AccountingError),
SendError,
Services,
}
pub type Result<T> = std::result::Result<T, Error>;
impl std::convert::From<std::sync::mpsc::RecvError> for Error {
fn from(e: std::sync::mpsc::RecvError) -> Error {
Error::RecvError(e)
}
}
impl std::convert::From<std::sync::mpsc::RecvTimeoutError> for Error {
fn from(e: std::sync::mpsc::RecvTimeoutError) -> Error {
Error::RecvTimeoutError(e)
}
}
impl std::convert::From<accountant::AccountingError> for Error {
fn from(e: accountant::AccountingError) -> Error {
Error::AccountingError(e)
}
}
impl<T> std::convert::From<std::sync::mpsc::SendError<T>> for Error {
fn from(_e: std::sync::mpsc::SendError<T>) -> Error {
Error::SendError
}
}
impl std::convert::From<Box<Any + Send + 'static>> for Error {
fn from(e: Box<Any + Send + 'static>) -> Error {
Error::JoinError(e)
}
}
impl std::convert::From<std::io::Error> for Error {
fn from(e: std::io::Error) -> Error {
Error::IO(e)
}
}
impl std::convert::From<serde_json::Error> for Error {
fn from(e: serde_json::Error) -> Error {
Error::JSON(e)
}
}
impl std::convert::From<std::net::AddrParseError> for Error {
fn from(e: std::net::AddrParseError) -> Error {
Error::AddrParse(e)
}
}
impl std::convert::From<std::boxed::Box<bincode::ErrorKind>> for Error {
fn from(e: std::boxed::Box<bincode::ErrorKind>) -> Error {
Error::Serialize(e)
}
}
#[cfg(test)]
mod tests {
use result::Error;
use result::Result;
use serde_json;
use std::io;
use std::io::Write;
use std::net::SocketAddr;
use std::sync::mpsc::channel;
use std::sync::mpsc::RecvError;
use std::sync::mpsc::RecvTimeoutError;
use std::thread;
fn addr_parse_error() -> Result<SocketAddr> {
let r = "12fdfasfsafsadfs".parse()?;
Ok(r)
}
fn join_error() -> Result<()> {
let r = thread::spawn(|| panic!("hi")).join()?;
Ok(r)
}
fn json_error() -> Result<()> {
let r = serde_json::from_slice("=342{;;;;:}".as_bytes())?;
Ok(r)
}
fn send_error() -> Result<()> {
let (s, r) = channel();
drop(r);
s.send(())?;
Ok(())
}
#[test]
fn from_test() {
assert_matches!(addr_parse_error(), Err(Error::AddrParse(_)));
assert_matches!(Error::from(RecvError {}), Error::RecvError(_));
assert_matches!(
Error::from(RecvTimeoutError::Timeout),
Error::RecvTimeoutError(_)
);
assert_matches!(send_error(), Err(Error::SendError));
assert_matches!(join_error(), Err(Error::JoinError(_)));
let ioe = io::Error::new(io::ErrorKind::NotFound, "hi");
assert_matches!(Error::from(ioe), Error::IO(_));
}
#[test]
fn fmt_test() {
write!(io::sink(), "{:?}", addr_parse_error()).unwrap();
write!(io::sink(), "{:?}", Error::from(RecvError {})).unwrap();
write!(io::sink(), "{:?}", Error::from(RecvTimeoutError::Timeout)).unwrap();
write!(io::sink(), "{:?}", send_error()).unwrap();
write!(io::sink(), "{:?}", join_error()).unwrap();
write!(io::sink(), "{:?}", json_error()).unwrap();
write!(
io::sink(),
"{:?}",
Error::from(io::Error::new(io::ErrorKind::NotFound, "hi"))
).unwrap();
}
}

43
src/signature.rs Normal file
View File

@ -0,0 +1,43 @@
//! The `signature` module provides functionality for public, and private keys.
use generic_array::typenum::{U32, U64};
use generic_array::GenericArray;
use ring::signature::Ed25519KeyPair;
use ring::{rand, signature};
use untrusted;
pub type KeyPair = Ed25519KeyPair;
pub type PublicKey = GenericArray<u8, U32>;
pub type Signature = GenericArray<u8, U64>;
pub trait KeyPairUtil {
fn new() -> Self;
fn pubkey(&self) -> PublicKey;
}
impl KeyPairUtil for Ed25519KeyPair {
/// Return a new ED25519 keypair
fn new() -> Self {
let rng = rand::SystemRandom::new();
let pkcs8_bytes = signature::Ed25519KeyPair::generate_pkcs8(&rng).unwrap();
signature::Ed25519KeyPair::from_pkcs8(untrusted::Input::from(&pkcs8_bytes)).unwrap()
}
/// Return the public key for the given keypair
fn pubkey(&self) -> PublicKey {
GenericArray::clone_from_slice(self.public_key_bytes())
}
}
pub trait SignatureUtil {
fn verify(&self, peer_public_key_bytes: &[u8], msg_bytes: &[u8]) -> bool;
}
impl SignatureUtil for GenericArray<u8, U64> {
fn verify(&self, peer_public_key_bytes: &[u8], msg_bytes: &[u8]) -> bool {
let peer_public_key = untrusted::Input::from(peer_public_key_bytes);
let msg = untrusted::Input::from(msg_bytes);
let sig = untrusted::Input::from(self);
signature::verify(&signature::ED25519, peer_public_key, msg, sig).is_ok()
}
}

549
src/streamer.rs Normal file
View File

@ -0,0 +1,549 @@
//! The `streamer` module defines a set of services for effecently pulling data from udp sockets.
use packet::{Blob, BlobRecycler, PacketRecycler, SharedBlob, SharedPackets, NUM_BLOBS};
use result::Result;
use std::collections::VecDeque;
use std::net::UdpSocket;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc;
use std::sync::{Arc, RwLock};
use std::thread::{spawn, JoinHandle};
use std::time::Duration;
use subscribers::Subscribers;
pub type PacketReceiver = mpsc::Receiver<SharedPackets>;
pub type PacketSender = mpsc::Sender<SharedPackets>;
pub type BlobSender = mpsc::Sender<VecDeque<SharedBlob>>;
pub type BlobReceiver = mpsc::Receiver<VecDeque<SharedBlob>>;
fn recv_loop(
sock: &UdpSocket,
exit: &Arc<AtomicBool>,
re: &PacketRecycler,
channel: &PacketSender,
) -> Result<()> {
loop {
let msgs = re.allocate();
let msgs_ = msgs.clone();
loop {
match msgs.write().unwrap().recv_from(sock) {
Ok(()) => {
channel.send(msgs_)?;
break;
}
Err(_) => {
if exit.load(Ordering::Relaxed) {
re.recycle(msgs_);
return Ok(());
}
}
}
}
}
}
pub fn receiver(
sock: UdpSocket,
exit: Arc<AtomicBool>,
recycler: PacketRecycler,
channel: PacketSender,
) -> Result<JoinHandle<()>> {
let timer = Duration::new(1, 0);
sock.set_read_timeout(Some(timer))?;
Ok(spawn(move || {
let _ = recv_loop(&sock, &exit, &recycler, &channel);
()
}))
}
fn recv_send(sock: &UdpSocket, recycler: &BlobRecycler, r: &BlobReceiver) -> Result<()> {
let timer = Duration::new(1, 0);
let mut msgs = r.recv_timeout(timer)?;
Blob::send_to(recycler, sock, &mut msgs)?;
Ok(())
}
pub fn responder(
sock: UdpSocket,
exit: Arc<AtomicBool>,
recycler: BlobRecycler,
r: BlobReceiver,
) -> JoinHandle<()> {
spawn(move || loop {
if recv_send(&sock, &recycler, &r).is_err() && exit.load(Ordering::Relaxed) {
break;
}
})
}
//TODO, we would need to stick block authentication before we create the
//window.
fn recv_blobs(recycler: &BlobRecycler, sock: &UdpSocket, s: &BlobSender) -> Result<()> {
let dq = Blob::recv_from(recycler, sock)?;
if !dq.is_empty() {
s.send(dq)?;
}
Ok(())
}
pub fn blob_receiver(
exit: Arc<AtomicBool>,
recycler: BlobRecycler,
sock: UdpSocket,
s: BlobSender,
) -> Result<JoinHandle<()>> {
//DOCUMENTED SIDE-EFFECT
//1 second timeout on socket read
let timer = Duration::new(1, 0);
sock.set_read_timeout(Some(timer))?;
let t = spawn(move || loop {
if exit.load(Ordering::Relaxed) {
break;
}
let ret = recv_blobs(&recycler, &sock, &s);
if ret.is_err() {
break;
}
});
Ok(t)
}
fn recv_window(
window: &mut Vec<Option<SharedBlob>>,
subs: &Arc<RwLock<Subscribers>>,
recycler: &BlobRecycler,
consumed: &mut usize,
r: &BlobReceiver,
s: &BlobSender,
retransmit: &BlobSender,
) -> Result<()> {
let timer = Duration::new(1, 0);
let mut dq = r.recv_timeout(timer)?;
while let Ok(mut nq) = r.try_recv() {
dq.append(&mut nq)
}
{
//retransmit all leader blocks
let mut retransmitq = VecDeque::new();
let rsubs = subs.read().unwrap();
for b in &dq {
let p = b.read().unwrap();
//TODO this check isn't safe against adverserial packets
//we need to maintain a sequence window
trace!(
"idx: {} addr: {:?} leader: {:?}",
p.get_index().unwrap(),
p.meta.addr(),
rsubs.leader.addr
);
if p.meta.addr() == rsubs.leader.addr {
//TODO
//need to copy the retransmited blob
//otherwise we get into races with which thread
//should do the recycling
//
//a better absraction would be to recycle when the blob
//is dropped via a weakref to the recycler
let nv = recycler.allocate();
{
let mut mnv = nv.write().unwrap();
let sz = p.meta.size;
mnv.meta.size = sz;
mnv.data[..sz].copy_from_slice(&p.data[..sz]);
}
retransmitq.push_back(nv);
}
}
if !retransmitq.is_empty() {
retransmit.send(retransmitq)?;
}
}
//send a contiguous set of blocks
let mut contq = VecDeque::new();
while let Some(b) = dq.pop_front() {
let b_ = b.clone();
let p = b.write().unwrap();
let pix = p.get_index()? as usize;
let w = pix % NUM_BLOBS;
//TODO, after the block are authenticated
//if we get different blocks at the same index
//that is a network failure/attack
trace!("window w: {} size: {}", w, p.meta.size);
{
if window[w].is_none() {
window[w] = Some(b_);
} else {
debug!("duplicate blob at index {:}", w);
}
loop {
let k = *consumed % NUM_BLOBS;
trace!("k: {} consumed: {}", k, *consumed);
if window[k].is_none() {
break;
}
contq.push_back(window[k].clone().unwrap());
window[k] = None;
*consumed += 1;
}
}
}
trace!("sending contq.len: {}", contq.len());
if !contq.is_empty() {
s.send(contq)?;
}
Ok(())
}
pub fn window(
exit: Arc<AtomicBool>,
subs: Arc<RwLock<Subscribers>>,
recycler: BlobRecycler,
r: BlobReceiver,
s: BlobSender,
retransmit: BlobSender,
) -> JoinHandle<()> {
spawn(move || {
let mut window = vec![None; NUM_BLOBS];
let mut consumed = 0;
loop {
if exit.load(Ordering::Relaxed) {
break;
}
let _ = recv_window(
&mut window,
&subs,
&recycler,
&mut consumed,
&r,
&s,
&retransmit,
);
}
})
}
fn retransmit(
subs: &Arc<RwLock<Subscribers>>,
recycler: &BlobRecycler,
r: &BlobReceiver,
sock: &UdpSocket,
) -> Result<()> {
let timer = Duration::new(1, 0);
let mut dq = r.recv_timeout(timer)?;
while let Ok(mut nq) = r.try_recv() {
dq.append(&mut nq);
}
{
let wsubs = subs.read().unwrap();
for b in &dq {
let mut mb = b.write().unwrap();
wsubs.retransmit(&mut mb, sock)?;
}
}
while let Some(b) = dq.pop_front() {
recycler.recycle(b);
}
Ok(())
}
/// Service to retransmit messages from the leader to layer 1 nodes.
/// See `subscribers` for network layer definitions.
/// # Arguments
/// * `sock` - Socket to read from. Read timeout is set to 1.
/// * `exit` - Boolean to signal system exit.
/// * `subs` - Shared Subscriber structure. This structure needs to be updated and popualted by
/// the accountant.
/// * `recycler` - Blob recycler.
/// * `r` - Receive channel for blobs to be retransmitted to all the layer 1 nodes.
pub fn retransmitter(
sock: UdpSocket,
exit: Arc<AtomicBool>,
subs: Arc<RwLock<Subscribers>>,
recycler: BlobRecycler,
r: BlobReceiver,
) -> JoinHandle<()> {
spawn(move || loop {
if exit.load(Ordering::Relaxed) {
break;
}
let _ = retransmit(&subs, &recycler, &r, &sock);
})
}
#[cfg(all(feature = "unstable", test))]
mod bench {
extern crate test;
use self::test::Bencher;
use packet::{Packet, PacketRecycler, PACKET_DATA_SIZE};
use result::Result;
use std::net::{SocketAddr, UdpSocket};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::channel;
use std::sync::{Arc, Mutex};
use std::thread::sleep;
use std::thread::{spawn, JoinHandle};
use std::time::Duration;
use std::time::SystemTime;
use streamer::{receiver, PacketReceiver};
fn producer(
addr: &SocketAddr,
recycler: PacketRecycler,
exit: Arc<AtomicBool>,
) -> JoinHandle<()> {
let send = UdpSocket::bind("0.0.0.0:0").unwrap();
let msgs = recycler.allocate();
let msgs_ = msgs.clone();
msgs.write().unwrap().packets.resize(10, Packet::default());
for w in msgs.write().unwrap().packets.iter_mut() {
w.meta.size = PACKET_DATA_SIZE;
w.meta.set_addr(&addr);
}
spawn(move || loop {
if exit.load(Ordering::Relaxed) {
return;
}
let mut num = 0;
for p in msgs_.read().unwrap().packets.iter() {
let a = p.meta.addr();
send.send_to(&p.data[..p.meta.size], &a).unwrap();
num += 1;
}
assert_eq!(num, 10);
})
}
fn sink(
recycler: PacketRecycler,
exit: Arc<AtomicBool>,
rvs: Arc<Mutex<usize>>,
r: PacketReceiver,
) -> JoinHandle<()> {
spawn(move || loop {
if exit.load(Ordering::Relaxed) {
return;
}
let timer = Duration::new(1, 0);
match r.recv_timeout(timer) {
Ok(msgs) => {
let msgs_ = msgs.clone();
*rvs.lock().unwrap() += msgs.read().unwrap().packets.len();
recycler.recycle(msgs_);
}
_ => (),
}
})
}
fn run_streamer_bench() -> Result<()> {
let read = UdpSocket::bind("127.0.0.1:0")?;
let addr = read.local_addr()?;
let exit = Arc::new(AtomicBool::new(false));
let pack_recycler = PacketRecycler::default();
let (s_reader, r_reader) = channel();
let t_reader = receiver(read, exit.clone(), pack_recycler.clone(), s_reader)?;
let t_producer1 = producer(&addr, pack_recycler.clone(), exit.clone());
let t_producer2 = producer(&addr, pack_recycler.clone(), exit.clone());
let t_producer3 = producer(&addr, pack_recycler.clone(), exit.clone());
let rvs = Arc::new(Mutex::new(0));
let t_sink = sink(pack_recycler.clone(), exit.clone(), rvs.clone(), r_reader);
let start = SystemTime::now();
let start_val = *rvs.lock().unwrap();
sleep(Duration::new(5, 0));
let elapsed = start.elapsed().unwrap();
let end_val = *rvs.lock().unwrap();
let time = elapsed.as_secs() * 10000000000 + elapsed.subsec_nanos() as u64;
let ftime = (time as f64) / 10000000000f64;
let fcount = (end_val - start_val) as f64;
println!("performance: {:?}", fcount / ftime);
exit.store(true, Ordering::Relaxed);
t_reader.join()?;
t_producer1.join()?;
t_producer2.join()?;
t_producer3.join()?;
t_sink.join()?;
Ok(())
}
#[bench]
pub fn streamer_bench(_bench: &mut Bencher) {
run_streamer_bench().unwrap();
}
}
#[cfg(test)]
mod test {
use packet::{Blob, BlobRecycler, Packet, PacketRecycler, Packets, PACKET_DATA_SIZE};
use std::collections::VecDeque;
use std::io;
use std::io::Write;
use std::net::UdpSocket;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::channel;
use std::sync::{Arc, RwLock};
use std::time::Duration;
use streamer::{blob_receiver, receiver, responder, retransmitter, window, BlobReceiver,
PacketReceiver};
use subscribers::{Node, Subscribers};
fn get_msgs(r: PacketReceiver, num: &mut usize) {
for _t in 0..5 {
let timer = Duration::new(1, 0);
match r.recv_timeout(timer) {
Ok(m) => *num += m.read().unwrap().packets.len(),
e => println!("error {:?}", e),
}
if *num == 10 {
break;
}
}
}
#[test]
pub fn streamer_debug() {
write!(io::sink(), "{:?}", Packet::default()).unwrap();
write!(io::sink(), "{:?}", Packets::default()).unwrap();
write!(io::sink(), "{:?}", Blob::default()).unwrap();
}
#[test]
pub fn streamer_send_test() {
let read = UdpSocket::bind("127.0.0.1:0").expect("bind");
let addr = read.local_addr().unwrap();
let send = UdpSocket::bind("127.0.0.1:0").expect("bind");
let exit = Arc::new(AtomicBool::new(false));
let pack_recycler = PacketRecycler::default();
let resp_recycler = BlobRecycler::default();
let (s_reader, r_reader) = channel();
let t_receiver = receiver(read, exit.clone(), pack_recycler.clone(), s_reader).unwrap();
let (s_responder, r_responder) = channel();
let t_responder = responder(send, exit.clone(), resp_recycler.clone(), r_responder);
let mut msgs = VecDeque::new();
for i in 0..10 {
let b = resp_recycler.allocate();
let b_ = b.clone();
let mut w = b.write().unwrap();
w.data[0] = i as u8;
w.meta.size = PACKET_DATA_SIZE;
w.meta.set_addr(&addr);
msgs.push_back(b_);
}
s_responder.send(msgs).expect("send");
let mut num = 0;
get_msgs(r_reader, &mut num);
assert_eq!(num, 10);
exit.store(true, Ordering::Relaxed);
t_receiver.join().expect("join");
t_responder.join().expect("join");
}
fn get_blobs(r: BlobReceiver, num: &mut usize) {
for _t in 0..5 {
let timer = Duration::new(1, 0);
match r.recv_timeout(timer) {
Ok(m) => {
for (i, v) in m.iter().enumerate() {
assert_eq!(v.read().unwrap().get_index().unwrap() as usize, *num + i);
}
*num += m.len();
}
e => println!("error {:?}", e),
}
if *num == 10 {
break;
}
}
}
#[test]
pub fn window_send_test() {
let read = UdpSocket::bind("127.0.0.1:0").expect("bind");
let addr = read.local_addr().unwrap();
let send = UdpSocket::bind("127.0.0.1:0").expect("bind");
let exit = Arc::new(AtomicBool::new(false));
let subs = Arc::new(RwLock::new(Subscribers::new(
Node::default(),
Node::new([0; 8], 0, send.local_addr().unwrap()),
&[],
)));
let resp_recycler = BlobRecycler::default();
let (s_reader, r_reader) = channel();
let t_receiver =
blob_receiver(exit.clone(), resp_recycler.clone(), read, s_reader).unwrap();
let (s_window, r_window) = channel();
let (s_retransmit, r_retransmit) = channel();
let t_window = window(
exit.clone(),
subs,
resp_recycler.clone(),
r_reader,
s_window,
s_retransmit,
);
let (s_responder, r_responder) = channel();
let t_responder = responder(send, exit.clone(), resp_recycler.clone(), r_responder);
let mut msgs = VecDeque::new();
for v in 0..10 {
let i = 9 - v;
let b = resp_recycler.allocate();
let b_ = b.clone();
let mut w = b.write().unwrap();
w.set_index(i).unwrap();
assert_eq!(i, w.get_index().unwrap());
w.meta.size = PACKET_DATA_SIZE;
w.meta.set_addr(&addr);
msgs.push_back(b_);
}
s_responder.send(msgs).expect("send");
let mut num = 0;
get_blobs(r_window, &mut num);
assert_eq!(num, 10);
let mut q = r_retransmit.recv().unwrap();
while let Ok(mut nq) = r_retransmit.try_recv() {
q.append(&mut nq);
}
assert_eq!(q.len(), 10);
exit.store(true, Ordering::Relaxed);
t_receiver.join().expect("join");
t_responder.join().expect("join");
t_window.join().expect("join");
}
#[test]
pub fn retransmit() {
let read = UdpSocket::bind("127.0.0.1:0").expect("bind");
let send = UdpSocket::bind("127.0.0.1:0").expect("bind");
let exit = Arc::new(AtomicBool::new(false));
let subs = Arc::new(RwLock::new(Subscribers::new(
Node::default(),
Node::default(),
&[Node::new([0; 8], 1, read.local_addr().unwrap())],
)));
let (s_retransmit, r_retransmit) = channel();
let blob_recycler = BlobRecycler::default();
let saddr = send.local_addr().unwrap();
let t_retransmit = retransmitter(
send,
exit.clone(),
subs,
blob_recycler.clone(),
r_retransmit,
);
let mut bq = VecDeque::new();
let b = blob_recycler.allocate();
b.write().unwrap().meta.size = 10;
bq.push_back(b);
s_retransmit.send(bq).unwrap();
let (s_blob_receiver, r_blob_receiver) = channel();
let t_receiver =
blob_receiver(exit.clone(), blob_recycler.clone(), read, s_blob_receiver).unwrap();
let mut oq = r_blob_receiver.recv().unwrap();
assert_eq!(oq.len(), 1);
let o = oq.pop_front().unwrap();
let ro = o.read().unwrap();
assert_eq!(ro.meta.size, 10);
assert_eq!(ro.meta.addr(), saddr);
exit.store(true, Ordering::Relaxed);
t_receiver.join().expect("join");
t_retransmit.join().expect("join");
}
}

149
src/subscribers.rs Normal file
View File

@ -0,0 +1,149 @@
//! The `subscribers` module defines data structures to keep track of nodes on the network.
//! The network is arranged in layers:
//!
//! * layer 0 - Leader.
//! * layer 1 - As many nodes as we can fit to quickly get reliable `2/3+1` finality
//! * layer 2 - Everyone else, if layer 1 is `2^10`, layer 2 should be able to fit `2^20` number of nodes.
//!
//! It's up to the external state machine to keep this updated.
use packet::Blob;
use rayon::prelude::*;
use result::{Error, Result};
use std::net::{SocketAddr, UdpSocket};
use std::fmt;
#[derive(Clone, PartialEq)]
pub struct Node {
pub id: [u64; 8],
pub weight: u64,
pub addr: SocketAddr,
}
//sockaddr doesn't implement default
impl Default for Node {
fn default() -> Node {
Node {
id: [0; 8],
weight: 0,
addr: "0.0.0.0:0".parse().unwrap(),
}
}
}
impl Node {
pub fn new(id: [u64; 8], weight: u64, addr: SocketAddr) -> Node {
Node { id, weight, addr }
}
fn key(&self) -> i64 {
(self.weight as i64).checked_neg().unwrap()
}
}
impl fmt::Debug for Node {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Node {{ weight: {} addr: {} }}", self.weight, self.addr)
}
}
pub struct Subscribers {
data: Vec<Node>,
pub me: Node,
pub leader: Node,
}
impl Subscribers {
pub fn new(me: Node, leader: Node, network: &[Node]) -> Subscribers {
let mut h = Subscribers {
data: vec![],
me: me.clone(),
leader: leader.clone(),
};
h.insert(&[me, leader]);
h.insert(network);
h
}
/// retransmit messages from the leader to layer 1 nodes
pub fn retransmit(&self, blob: &mut Blob, s: &UdpSocket) -> Result<()> {
let errs: Vec<_> = self.data
.par_iter()
.map(|i| {
if self.me == *i {
return Ok(0);
}
if self.leader == *i {
return Ok(0);
}
trace!("retransmit blob to {}", i.addr);
s.send_to(&blob.data[..blob.meta.size], &i.addr)
})
.collect();
for e in errs {
trace!("retransmit result {:?}", e);
match e {
Err(e) => return Err(Error::IO(e)),
_ => (),
}
}
Ok(())
}
pub fn insert(&mut self, ns: &[Node]) {
self.data.extend_from_slice(ns);
self.data.sort_by_key(Node::key);
}
}
#[cfg(test)]
mod test {
use packet::Blob;
use rayon::prelude::*;
use std::net::UdpSocket;
use std::time::Duration;
use subscribers::{Node, Subscribers};
#[test]
pub fn subscriber() {
let mut me = Node::default();
me.weight = 10;
let mut leader = Node::default();
leader.weight = 11;
let mut s = Subscribers::new(me, leader, &[]);
assert_eq!(s.data.len(), 2);
assert_eq!(s.data[0].weight, 11);
assert_eq!(s.data[1].weight, 10);
let mut n = Node::default();
n.weight = 12;
s.insert(&[n]);
assert_eq!(s.data.len(), 3);
assert_eq!(s.data[0].weight, 12);
}
#[test]
pub fn retransmit() {
let s1 = UdpSocket::bind("127.0.0.1:0").expect("bind");
let s2 = UdpSocket::bind("127.0.0.1:0").expect("bind");
let s3 = UdpSocket::bind("127.0.0.1:0").expect("bind");
let n1 = Node::new([0; 8], 0, s1.local_addr().unwrap());
let n2 = Node::new([0; 8], 0, s2.local_addr().unwrap());
let mut s = Subscribers::new(n1.clone(), n2.clone(), &[]);
let n3 = Node::new([0; 8], 0, s3.local_addr().unwrap());
s.insert(&[n3]);
let mut b = Blob::default();
b.meta.size = 10;
let s4 = UdpSocket::bind("127.0.0.1:0").expect("bind");
s.retransmit(&mut b, &s4).unwrap();
let res: Vec<_> = [s1, s2, s3]
.into_par_iter()
.map(|s| {
let mut b = Blob::default();
s.set_read_timeout(Some(Duration::new(1, 0))).unwrap();
s.recv_from(&mut b.data).is_err()
})
.collect();
assert_eq!(res, [true, true, false]);
let mut n4 = Node::default();
n4.addr = "255.255.255.255:1".parse().unwrap();
s.insert(&[n4]);
assert!(s.retransmit(&mut b, &s4).is_err());
}
}

259
src/transaction.rs Normal file
View File

@ -0,0 +1,259 @@
//! The `transaction` module provides functionality for creating log transactions.
use bincode::serialize;
use chrono::prelude::*;
use hash::Hash;
use plan::{Condition, Payment, Plan};
use rayon::prelude::*;
use signature::{KeyPair, KeyPairUtil, PublicKey, Signature, SignatureUtil};
pub const SIGNED_DATA_OFFSET: usize = 112;
pub const SIG_OFFSET: usize = 8;
pub const PUB_KEY_OFFSET: usize = 80;
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub struct TransactionData {
pub tokens: i64,
pub last_id: Hash,
pub plan: Plan,
}
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub struct Transaction {
pub sig: Signature,
pub from: PublicKey,
pub data: TransactionData,
}
impl Transaction {
/// Create and sign a new Transaction. Used for unit-testing.
pub fn new(from_keypair: &KeyPair, to: PublicKey, tokens: i64, last_id: Hash) -> Self {
let from = from_keypair.pubkey();
let plan = Plan::Pay(Payment { tokens, to });
let mut tr = Transaction {
sig: Signature::default(),
data: TransactionData {
plan,
tokens,
last_id,
},
from: from,
};
tr.sign(from_keypair);
tr
}
/// Create and sign a postdated Transaction. Used for unit-testing.
pub fn new_on_date(
from_keypair: &KeyPair,
to: PublicKey,
dt: DateTime<Utc>,
tokens: i64,
last_id: Hash,
) -> Self {
let from = from_keypair.pubkey();
let plan = Plan::Race(
(Condition::Timestamp(dt), Payment { tokens, to }),
(Condition::Signature(from), Payment { tokens, to: from }),
);
let mut tr = Transaction {
data: TransactionData {
plan,
tokens,
last_id,
},
from: from,
sig: Signature::default(),
};
tr.sign(from_keypair);
tr
}
fn get_sign_data(&self) -> Vec<u8> {
serialize(&(&self.data)).unwrap()
}
/// Sign this transaction.
pub fn sign(&mut self, keypair: &KeyPair) {
let sign_data = self.get_sign_data();
self.sig = Signature::clone_from_slice(keypair.sign(&sign_data).as_ref());
}
pub fn verify_sig(&self) -> bool {
self.sig.verify(&self.from, &self.get_sign_data())
}
pub fn verify_plan(&self) -> bool {
self.data.plan.verify(self.data.tokens)
}
}
#[cfg(test)]
pub fn test_tx() -> Transaction {
let keypair1 = KeyPair::new();
let pubkey1 = keypair1.pubkey();
let zero = Hash::default();
Transaction::new(&keypair1, pubkey1, 42, zero)
}
#[cfg(test)]
pub fn memfind<A: Eq>(a: &[A], b: &[A]) -> Option<usize> {
assert!(a.len() >= b.len());
let end = a.len() - b.len() + 1;
for i in 0..end {
if a[i..i + b.len()] == b[..] {
return Some(i);
}
}
None
}
/// Verify a batch of signatures.
pub fn verify_signatures(transactions: &[Transaction]) -> bool {
transactions.par_iter().all(|tr| tr.verify_sig())
}
/// Verify a batch of spending plans.
pub fn verify_plans(transactions: &[Transaction]) -> bool {
transactions.par_iter().all(|tr| tr.verify_plan())
}
/// Verify a batch of transactions.
pub fn verify_transactions(transactions: &[Transaction]) -> bool {
verify_signatures(transactions) && verify_plans(transactions)
}
#[cfg(test)]
mod tests {
use super::*;
use bincode::{deserialize, serialize};
#[test]
fn test_claim() {
let keypair = KeyPair::new();
let zero = Hash::default();
let tr0 = Transaction::new(&keypair, keypair.pubkey(), 42, zero);
assert!(tr0.verify_plan());
}
#[test]
fn test_transfer() {
let zero = Hash::default();
let keypair0 = KeyPair::new();
let keypair1 = KeyPair::new();
let pubkey1 = keypair1.pubkey();
let tr0 = Transaction::new(&keypair0, pubkey1, 42, zero);
assert!(tr0.verify_plan());
}
#[test]
fn test_serialize_claim() {
let plan = Plan::Pay(Payment {
tokens: 0,
to: Default::default(),
});
let claim0 = Transaction {
data: TransactionData {
plan,
tokens: 0,
last_id: Default::default(),
},
from: Default::default(),
sig: Default::default(),
};
let buf = serialize(&claim0).unwrap();
let claim1: Transaction = deserialize(&buf).unwrap();
assert_eq!(claim1, claim0);
}
#[test]
fn test_token_attack() {
let zero = Hash::default();
let keypair = KeyPair::new();
let pubkey = keypair.pubkey();
let mut tr = Transaction::new(&keypair, pubkey, 42, zero);
tr.data.tokens = 1_000_000; // <-- attack, part 1!
if let Plan::Pay(ref mut payment) = tr.data.plan {
payment.tokens = tr.data.tokens; // <-- attack, part 2!
};
assert!(tr.verify_plan());
assert!(!tr.verify_sig());
}
#[test]
fn test_hijack_attack() {
let keypair0 = KeyPair::new();
let keypair1 = KeyPair::new();
let thief_keypair = KeyPair::new();
let pubkey1 = keypair1.pubkey();
let zero = Hash::default();
let mut tr = Transaction::new(&keypair0, pubkey1, 42, zero);
if let Plan::Pay(ref mut payment) = tr.data.plan {
payment.to = thief_keypair.pubkey(); // <-- attack!
};
assert!(tr.verify_plan());
assert!(!tr.verify_sig());
}
#[test]
fn test_layout() {
let tr = test_tx();
let sign_data = tr.get_sign_data();
let tx = serialize(&tr).unwrap();
assert_matches!(memfind(&tx, &sign_data), Some(SIGNED_DATA_OFFSET));
assert_matches!(memfind(&tx, &tr.sig), Some(SIG_OFFSET));
assert_matches!(memfind(&tx, &tr.from), Some(PUB_KEY_OFFSET));
}
#[test]
fn test_overspend_attack() {
let keypair0 = KeyPair::new();
let keypair1 = KeyPair::new();
let zero = Hash::default();
let mut tr = Transaction::new(&keypair0, keypair1.pubkey(), 1, zero);
if let Plan::Pay(ref mut payment) = tr.data.plan {
payment.tokens = 2; // <-- attack!
}
assert!(!tr.verify_plan());
// Also, ensure all branchs of the plan spend all tokens
if let Plan::Pay(ref mut payment) = tr.data.plan {
payment.tokens = 0; // <-- whoops!
}
assert!(!tr.verify_plan());
}
#[test]
fn test_verify_transactions() {
let alice_keypair = KeyPair::new();
let bob_pubkey = KeyPair::new().pubkey();
let carol_pubkey = KeyPair::new().pubkey();
let last_id = Hash::default();
let tr0 = Transaction::new(&alice_keypair, bob_pubkey, 1, last_id);
let tr1 = Transaction::new(&alice_keypair, carol_pubkey, 1, last_id);
let transactions = vec![tr0, tr1];
assert!(verify_transactions(&transactions));
}
}
#[cfg(all(feature = "unstable", test))]
mod bench {
extern crate test;
use self::test::Bencher;
use transaction::*;
#[bench]
fn verify_signatures_bench(bencher: &mut Bencher) {
let alice_keypair = KeyPair::new();
let last_id = Hash::default();
let transactions: Vec<_> = (0..64)
.into_par_iter()
.map(|_| {
let rando_pubkey = KeyPair::new().pubkey();
Transaction::new(&alice_keypair, rando_pubkey, 1, last_id)
})
.collect();
bencher.iter(|| {
assert!(verify_signatures(&transactions));
});
}
}