Compare commits

...

152 Commits

Author SHA1 Message Date
e6c3c215ab Add note about installing git-lfs 2018-04-30 15:26:31 -06:00
5c66bbde01 Add a note about running with GPU optimizations 2018-04-30 15:20:39 -06:00
6268d540a8 move CI specific scripts to solana-labs/buildkite repo
Former-commit-id: 77dd1bdd4a
2018-04-29 23:43:43 -07:00
3cfb571356 Version bump
Former-commit-id: f7385e866207b3ec2269bac36d52ef1e7f09337c
2018-04-27 15:49:48 -07:00
f6e5f2439d Add GPU library for Linux systems
To get solana to use the GPU, invoke cargo with "--features=cuda".


Former-commit-id: ea904df6e53d98a32e3f6103ee82cdf7ba08bf21
2018-04-27 15:47:22 -07:00
edf6272374 Merge pull request #154 from sakridge/replicator
Replicator
2018-04-27 14:30:52 -06:00
7f6a4b0ce3 Deserialize the Entry structs and process them 2018-04-27 13:15:19 -07:00
3be5f25f2f Work on test_replicate to test replicate service
generate some messages to send to replicator service
2018-04-27 08:21:34 -07:00
1b6cdd5637 Fix some compilation issues 2018-04-27 08:21:34 -07:00
f752e55929 update 2018-04-27 08:21:34 -07:00
ebb089b3f1 wip 2018-04-27 08:21:34 -07:00
ad6303f031 docs 2018-04-27 08:21:34 -07:00
828b9d6717 docs 2018-04-27 08:21:34 -07:00
444adcd1ca update 2018-04-27 08:21:34 -07:00
69ac305883 wip 2018-04-27 08:21:34 -07:00
2ff57df2a0 state replication 2018-04-27 08:21:34 -07:00
7077f4cbe2 Merge pull request #128 from garious/faster-demo
Utilize parallelized accountant in demo
2018-04-27 08:47:42 -06:00
266f85f607 Merge pull request #152 from aeyakovenko/star
recover full network from a star
2018-04-26 15:36:08 -06:00
d90ab90145 bind to all 2018-04-26 13:54:29 -07:00
48018b3f5b docs 2018-04-26 13:50:57 -07:00
15584e7062 recover full network from a star 2018-04-26 13:48:42 -07:00
d415b17146 sleepless demo to complement sleepless nights
18 ktps on macbook pro, no gpu
2018-04-26 13:17:38 -06:00
9ed953e8c3 Fix rebase fails 2018-04-26 09:35:10 -06:00
b60a98bd6e Startup log can reference IDs without itself 2018-04-26 08:42:34 -06:00
a15e30d4b3 Report transactions processed 2018-04-26 08:42:34 -06:00
d5d133353f Port blocking stub functions to new stateful ones 2018-04-26 08:42:34 -06:00
6badc98510 Add low-level response-handling functions to skel 2018-04-26 08:42:34 -06:00
ea8bfb46ce Add a way to subscribe for new entry metadata 2018-04-26 08:42:34 -06:00
58860ed19f WIP: New demo that makes better use of the parallelized accountant 2018-04-26 08:42:34 -06:00
583f652197 Generate genesis log for the demo
This log contains a bunch of transactions that generate new
accounts, so that transactions to and from them can be processed
in parallel.
2018-04-26 08:42:34 -06:00
3215dcff78 Update readme for new demo
Need to create a bunch of unrelated accounts to the genesis block
so that transactions can be processed in parallel without waiting
on write-locks. And then stuff the private keys of those accounts
into mint.json so that the client-demo can send the tokens from
those accounts.
2018-04-26 08:42:34 -06:00
38fdd17067 Add initializing log message to server
Handy when gesesis block is large.
2018-04-26 08:42:34 -06:00
807ccd15ba Add solana-mint-demo CLI
This extends solana-mint with additional data that will be used by
both solana-client-demo and creating the demo's genesis block.
2018-04-26 08:42:34 -06:00
1c923d2f9e Fix entry hash when no events and num_hashes is one 2018-04-26 08:42:34 -06:00
2676b21400 Merge pull request #151 from rlkelly/139__forget_signature
added forget_signature method
2018-04-26 08:28:11 -06:00
fd5ef94b5a added forget signature method 2018-04-26 07:22:11 -04:00
02c7eea236 Merge pull request #150 from garious/141__add_futures
Add FutureResult to return a Future that immediately resolves
2018-04-25 20:44:40 -06:00
34d1805b54 Add FutureResult to return a Future that immediately resolves 2018-04-25 19:23:24 -07:00
753eaa8266 buildkite script 2018-04-24 11:32:00 -07:00
0b39c6f98e Merge pull request #145 from aeyakovenko/crdt
initial crdt implementation
2018-04-24 10:49:23 -07:00
55b8d0db4d cleanup 2018-04-23 23:33:21 -07:00
3d7969d8a2 initial crdt implementation 2018-04-23 23:06:28 -07:00
041de8082a Merge pull request #144 from rleungx/improve-error-messages
improve the error messages
2018-04-21 08:17:25 -06:00
3da1fa4d88 improve the error messages 2018-04-21 21:52:55 +08:00
39df21de30 Merge pull request #142 from ansrivas/master
git clone instruction
2018-04-20 16:05:21 -06:00
8cbb7d7362 git clone instruction 2018-04-20 23:02:10 +02:00
10a0c47210 Merge pull request #137 from garious/linux-hang
Workaround linux hang
2018-04-19 11:46:48 -06:00
89bf3765f3 Merge pull request #138 from sakridge/help_options
Add -h/--help options for client-demo and testnode
2018-04-19 11:40:07 -06:00
8181bc591b Add -h/--help options for client-demo and testnode 2018-04-19 10:22:31 -07:00
ca877e689c Merge pull request #136 from rleungx/report-errors-to-stderr
report serde parse errors to stderr
2018-04-19 11:15:57 -06:00
c6048e2bab Workaround linux hang
Without this patch, Linux systems would hang when running the demo.

The root cause (why Linux is acting differently than macOS) was
not determined, but we know the problem is caused by a known
issue in the transaction pipeline - that entries are not pulled
off the historian channel until after the full transaction batch
is processed. This patch makes the sync_channel large enough that
it should never block on a gigabit network.
2018-04-19 10:04:32 -07:00
60015aee04 report serde parse errors to stderr 2018-04-19 23:51:57 +08:00
43e6741071 Merge pull request #134 from rleungx/report-parse-errors-to-stderr
report parse errors to stderr
2018-04-19 08:38:38 -06:00
b91f6bcbff report parse errors to stderr 2018-04-19 22:24:46 +08:00
64e2f1b949 Merge pull request #133 from djKooks/rm-mut
Remove out for immutable variable
2018-04-19 08:24:32 -06:00
13a2f05776 Remove out for immutable variable 2018-04-19 23:00:16 +09:00
903374ae9b Merge pull request #132 from aeyakovenko/readme
readme and site update
2018-04-18 21:49:09 -06:00
d366a07403 add gregs abstract as an intro 2018-04-18 20:17:37 -07:00
e94921174a Merge pull request #131 from sakridge/erasure
Add erasure rust logic under feature flag
2018-04-18 21:10:06 -06:00
dea5ab2f79 Add erasure rust logic under feature flag 2018-04-18 19:42:09 -07:00
5e11078f34 Add Stephen 2018-04-18 17:22:58 -06:00
d7670cd4ff Merge pull request #129 from aeyakovenko/retransmit
Retransmit
2018-04-18 11:10:50 -04:00
29f3230089 docs 2018-04-17 19:53:18 -07:00
d003efb522 fix docs 2018-04-17 19:52:46 -07:00
97e772e87a docs 2018-04-17 19:46:50 -07:00
0b33615979 udpate 2018-04-17 12:48:06 -07:00
249cead13e docs 2018-04-17 11:07:43 -07:00
7c96dea359 fmt 2018-04-17 11:05:35 -07:00
374c9921fd comments 2018-04-17 11:05:15 -07:00
fb55ab8c33 format 2018-04-16 21:02:37 -07:00
13485074ac test cast 2018-04-16 20:57:15 -07:00
4944c965e4 update
heap

update

update

wip

use a vec and sort

builds

update

tests

update

fmt

update

progress

fmt

passes needs retransmit test

tests

cleanup

update

update

update

update

fmt
2018-04-16 20:33:09 -07:00
83c5b3bc38 Merge pull request #125 from garious/fix-parallelized-ledger
Tell verifiers when not to parallelize accounting
2018-04-13 08:43:36 -06:00
7fc42de758 Fix bench 2018-04-13 00:36:23 -04:00
0a30bd74c1 Tell verifiers when not to parallelize accounting
Without this patch, many batches of transactions could be tossed
into a single entry, but the parallelized accountant can only
guarentee the transactions in the batch can be processed in
parallel.

This patch signals the historian to generate a new Entry after
each batch. Validators must maintain sequential consistency
across Entries.
2018-04-12 21:08:53 -06:00
9b12a79c8d cargo +nightly fmt 2018-04-12 17:04:11 -06:00
0dcde23b05 Merge pull request #119 from sakridge/skel_verify_test
Add skel test which sends a bad transaction, verify it doesn't make it
2018-04-12 17:02:08 -06:00
8dc15b88eb Add skel test which sends a bad transaction, verify it doesn't make it 2018-04-12 15:01:59 -07:00
d20c952f92 Merge pull request #121 from aeyakovenko/helpers
requests to packets utility function
2018-04-12 12:58:30 -07:00
c2eeeb27fd bump timer 2018-04-12 11:12:10 -07:00
180d8b67e4 requests to packets function 2018-04-12 10:44:09 -07:00
9c989c46ee Merge pull request #123 from garious/cleanup-tests
Cleanup tests
2018-04-11 22:37:35 -06:00
51633f509d Fix test
The test was meant to ensure the signature covered the 'tokens'
field, but then when the 'plan' field was rolled in, Transaction::verify()
started failing because Plan::verify() failed. When Transaction::verify()
was split into two, the unexpected failure was exposed but went unnoticed.
This patch brings it back to its original intent, to ensure signature
verification fails if the network attempts to change the client's payment.
2018-04-11 22:17:21 -06:00
705228ecc2 Remove redundant signs 2018-04-11 22:17:21 -06:00
740f6d2258 Merge pull request #122 from garious/fix-ci
Fix the nightly build
2018-04-11 20:56:58 -06:00
3b9ef5ccab Fix the nightly build 2018-04-11 20:24:14 -06:00
ab74e7f24f Merge pull request #117 from garious/parallelize-accountant
Enable parallelized accountant
2018-04-11 19:39:45 -06:00
be9a670fb7 Add process_packets() benchmark 2018-04-11 18:02:45 -06:00
6e43e7a146 Enable parallelized accountant 2018-04-11 18:01:59 -06:00
ab2093926a Merge pull request #120 from aeyakovenko/fix_bench_compile
fix compile error
2018-04-11 18:01:13 -06:00
916b90f415 Merge pull request #118 from sakridge/ecdsa_tests
Add tests for ecdsa sig checking
2018-04-11 17:58:45 -06:00
2ef3db9fab fix compile error 2018-04-11 15:40:25 -07:00
6987b6fd58 Add tests for ecdsa sig checking 2018-04-11 12:29:44 -07:00
078179e9b8 Merge pull request #115 from garious/parallelize-accountant
More refactoring
2018-04-11 10:28:15 -06:00
50ccecdff5 Refactor 2018-04-11 09:02:33 -06:00
e838a8c28a Delete unused function 2018-04-10 21:56:13 -06:00
e5f7eeedbf Use iterators 2018-04-10 21:48:26 -06:00
d1948b5a00 Zip earlier
And remove redundant into_iter() calls.
2018-04-10 21:18:39 -06:00
c07f700c53 Merge pull request #113 from aeyakovenko/master_pclient
command-line options for testnode and client
2018-04-09 23:07:03 -06:00
c934a30f66 commandline options for client and testnode 2018-04-09 21:14:52 -07:00
310d01d8a2 Merge pull request #112 from aeyakovenko/recycler_test
Recycler test should verifyt that its recycling
2018-04-07 09:29:50 -06:00
f330739bc7 Recycler test should verifyt that its recycling 2018-04-07 07:08:42 -07:00
58626721ad Merge pull request #111 from garious/parallelize-accountant
Cleanup
2018-04-06 17:03:10 -06:00
584c8c07b8 Better symmetry
deserialize -> process -> serialize
2018-04-06 16:34:59 -06:00
a93ec03d2c Move creating blobs into its own function 2018-04-06 16:22:02 -06:00
7bd3a8e004 Reduce cyclomatic complexity 2018-04-06 16:12:13 -06:00
912a5f951e Why is msgs cloned here? 2018-04-06 15:58:11 -06:00
6869089111 Parallelize deserialize 2018-04-06 15:52:58 -06:00
6fd32fe850 Cleanup constants 2018-04-06 15:43:05 -06:00
81e2b36d38 Cleanup packet_verify 2018-04-06 15:24:15 -06:00
7d811afab1 Parallelize CPU sig verify 2018-04-06 15:21:49 -06:00
39f5aaab8b Merge pull request #110 from garious/parallelize-accountant
Parallel processing of arbitrary transactions
2018-04-06 09:02:36 -06:00
5fc81dd6c8 Fix the nightly build
Nightly uses a different (but backward compatible) version of rustfmt.
2018-04-05 22:39:29 -06:00
491a530d90 Support parallelization of arbitrary transactions
Still assumes witnesses are processed serially afterward.
2018-04-05 22:30:25 -06:00
c12da50f9b Fix race condition
Without this patch, it was possible for two transactions with the same
'from' address to drive its balance below zero. With the patch, we'll
hold a write lock from just before we verify sufficient funds until
after those funds are deducted from the account.
2018-04-05 22:30:25 -06:00
41e8500fc5 Break up process_verified_transaction() 2018-04-05 22:29:13 -06:00
a7f59ef3c1 Merge pull request #109 from sakridge/wip_gpu
Change for cuda verify integration
2018-04-05 22:24:35 -06:00
f4466c8c0a Change for cuda verify integration 2018-04-05 20:00:44 -07:00
bc6d6b20fa Merge pull request #108 from garious/parallelize-accountant
Reject old transactions so that we can boot old signatures
2018-04-05 15:11:22 -06:00
01326936e6 Expire all transactions after some amount of time
Reject old transactions so that we can calculate an upper bound
for memory usage, and therefore ensure the server won't slow
down over time to crash due to memory exhaustion.
2018-04-05 10:26:45 -06:00
c960e8d351 Reject transactions with a last_id that isn't from this ledger
Before this patch, a client could put any value into `last_id` and
was primarily there to ensure the transaction had a globally unique
signature. With this patch, the server can use `last_id` as an
indicator of how long its been since the transaction was created.
The server may choose to reject sufficiently old transactions so
that it can forget about old signatures.
2018-04-05 09:54:03 -06:00
fc69d31914 Merge pull request #106 from garious/parallelize-accountant
Parallelize accountant
2018-04-04 22:42:28 -06:00
8d425e127b Update benchmark to avoid write locks in sig duplicate detection 2018-04-04 17:29:22 -06:00
3cfb07ea38 Sort signatures by last_id
This will allow for additional concurrency as well as give the server
a means of garbage-collecting old signatures.
2018-04-04 17:06:31 -06:00
76679ffb92 Per-cell locking
This allows us to use read-locks for balances most of the time. We
only lock the full table if we need to add one.
2018-04-04 16:31:13 -06:00
dc2ec925d7 Better test 2018-04-04 16:01:43 -06:00
81d6ba3ec5 Merge pull request #105 from garious/coverage-comments
Add the 'why' for code coverage to readme
2018-04-04 14:34:26 -07:00
014bdaa355 Add benchmark for parallel transaction processing 2018-04-04 12:43:27 -06:00
0c60fdd2ce Make accountant thread-safe
Before this change, parallel transaction processing required locking
the full accountant. Since we only call one method,
process_verified_transaction, the global lock equates to doing no
parallelization at all.  With this change, we only lock the data that's
being written to.
2018-04-04 12:33:03 -06:00
43d986d14e Add the 'why' for code coverage to readme 2018-04-04 09:26:38 -06:00
123d7c6a37 Merge pull request #99 from aeyakovenko/subscribers
Blobs and windows
2018-04-03 17:12:53 -06:00
5ac7df17f9 Implement window service
Batch out of order blobs until we have a contigious window.
2018-04-03 13:53:19 -07:00
bc0dde696a Merge pull request #102 from garious/rollback
Fix clippy warnings
2018-04-03 10:08:42 -06:00
c323bd3c87 Fix clippy warnings 2018-04-03 09:55:33 -06:00
5c672adc21 Merge pull request #101 from garious/rollback
Move tests
2018-04-02 21:58:10 -06:00
2f80747dc7 Move tests
After we restructured for parallel verification, the tests here
were unreferenced by the accountant, but still meaningful to
transaction verification.
2018-04-02 21:45:21 -06:00
95749ed0e3 Merge pull request #100 from garious/rollback
Cleanup use of event signatures and entry hashing
2018-04-02 21:17:37 -06:00
94eea3abec fmt 2018-04-02 21:15:21 -06:00
fe32159673 Add a test to ensure witness data continues to be hashed 2018-04-02 21:07:38 -06:00
07aa2e1260 Add witness data to entry hash
Otherwise, witnesses can be dropped or reordered by a malicious
generator.
2018-04-02 20:47:51 -06:00
6fec8fad57 Adding from to the signature is redundant 2018-04-02 20:34:18 -06:00
84df487f7d Merge pull request #97 from garious/rollback
Refactoring for rollback
2018-04-02 15:41:33 -06:00
49708e92d3 Use last_id instead of seed
It doesn't really matter, but was confusing since the seed points
to an entry before the mint's deposit.
2018-04-02 15:06:42 -06:00
daadae7987 Move replaying ledger out of accountant 2018-04-02 14:51:55 -06:00
2b788d06b7 Move the historian up to accountant_skel 2018-04-02 14:41:07 -06:00
90cd9bd533 Move balance check so that log_* methods are only used to add logging 2018-04-02 14:14:49 -06:00
d63506f98c No longer allow deposits outside the constructor 2018-04-02 14:00:42 -06:00
17de6876bb Add simpler accountant constructor 2018-04-02 13:51:44 -06:00
fc540395f9 Update docs 2018-04-02 11:51:56 -06:00
da2b4962a9 Move verify_slice() into a trait 2018-04-02 11:43:38 -06:00
3abe305a21 Move reserve_signatures into accountant
Reasons Transaction signatures need to be unique:

1. guard against duplicates
2. accountant uses them as IDs to link Witness signatures to transactions via the
`pending` hash map
2018-04-02 09:38:36 -06:00
46e8c09bd8 Revoke API access to first_id 2018-04-02 09:30:10 -06:00
35 changed files with 3670 additions and 769 deletions

1
.gitattributes vendored Normal file
View File

@ -0,0 +1 @@
*.a filter=lfs diff=lfs merge=lfs -text

View File

@ -1,13 +1,14 @@
[package]
name = "solana"
description = "High Performance Blockchain"
version = "0.4.0"
version = "0.5.0-beta"
documentation = "https://docs.rs/solana"
homepage = "http://loomprotocol.com/"
homepage = "http://solana.io/"
repository = "https://github.com/solana-labs/solana"
authors = [
"Anatoly Yakovenko <anatoly@solana.co>",
"Greg Fitzgerald <greg@solana.co>",
"Anatoly Yakovenko <anatoly@solana.io>",
"Greg Fitzgerald <greg@solana.io>",
"Stephen Akridge <stephen@solana.io>",
]
license = "Apache-2.0"
@ -35,12 +36,18 @@ path = "src/bin/genesis-demo.rs"
name = "solana-mint"
path = "src/bin/mint.rs"
[[bin]]
name = "solana-mint-demo"
path = "src/bin/mint-demo.rs"
[badges]
codecov = { repository = "solana-labs/solana", branch = "master", service = "github" }
[features]
unstable = []
ipv6 = []
cuda = []
erasure = []
[dependencies]
rayon = "1.0.0"
@ -54,4 +61,10 @@ untrusted = "0.5.1"
bincode = "1.0.0"
chrono = { version = "0.4.0", features = ["serde"] }
log = "^0.4.1"
env_logger = "^0.4.1"
matches = "^0.1.6"
byteorder = "^1.2.1"
libc = "^0.2.1"
getopts = "^0.2"
isatty = "0.1"
futures = "0.1"

View File

@ -1,4 +1,4 @@
Copyright 2018 Anatoly Yakovenko <anatoly@loomprotocol.com> and Greg Fitzgerald <garious@gmail.com>
Copyright 2018 Anatoly Yakovenko, Greg Fitzgerald and Stephen Akridge
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@ -14,6 +14,11 @@ Solana: High Performance Blockchain
Solana&trade; is a new architecture for a high performance blockchain. It aims to support
over 700 thousand transactions per second on a gigabit network.
Introduction
===
It's possible for a centralized database to process 710,000 transactions per second on a standard gigabit network if the transactions are, on average, no more than 178 bytes. A centralized database can also replicate itself and maintain high availability without significantly compromising that transaction rate using the distributed system technique known as Optimistic Concurrency Control [H.T.Kung, J.T.Robinson (1981)]. At Solana, we're demonstrating that these same theoretical limits apply just as well to blockchain on an adversarial network. The key ingredient? Finding a way to share time when nodes can't trust one-another. Once nodes can trust time, suddenly ~40 years of distributed systems research becomes applicable to blockchain! Furthermore, and much to our surprise, it can implemented using a mechanism that has existed in Bitcoin since day one. The Bitcoin feature is called nLocktime and it can be used to postdate transactions using block height instead of a timestamp. As a Bitcoin client, you'd use block height instead of a timestamp if you don't trust the network. Block height turns out to be an instance of what's being called a Verifiable Delay Function in cryptography circles. It's a cryptographically secure way to say time has passed. In Solana, we use a far more granular verifiable delay function, a SHA 256 hash chain, to checkpoint the ledger and coordinate consensus. With it, we implement Optimistic Concurrency Control and are now well in route towards that theoretical limit of 710,000 transactions per second.
Running the demo
===
@ -24,28 +29,42 @@ $ curl https://sh.rustup.rs -sSf | sh
$ source $HOME/.cargo/env
```
If you plan to run with GPU optimizations enabled (not recommended), you'll need a CUDA library stored in git LFS. Install git-lfs here:
https://git-lfs.github.com/
Now checkout the code from github:
```bash
$ git clone https://github.com/solana-labs/solana.git
$ cd solana
```
The testnode server is initialized with a ledger from stdin and
generates new ledger entries on stdout. To create the input ledger, we'll need
to create *the mint* and use it to generate a *genesis ledger*. It's done in
two steps because the mint.json file contains a private key that will be
two steps because the mint-demo.json file contains private keys that will be
used later in this demo.
```bash
$ echo 1000000000 | cargo run --release --bin solana-mint | tee mint.json
$ cat mint.json | cargo run --release --bin solana-genesis | tee genesis.log
$ echo 1000000000 | cargo run --release --bin solana-mint-demo > mint-demo.json
$ cat mint-demo.json | cargo run --release --bin solana-genesis-demo > genesis.log
```
Now you can start the server:
```bash
$ cat genesis.log | cargo run --release --bin solana-testnode | tee transactions0.log
$ cat genesis.log | cargo run --release --bin solana-testnode > transactions0.log
```
Wait a few seconds for the server to initialize. It will print "Ready." when it's safe
to start sending it transactions.
Then, in a separate shell, let's execute some transactions. Note we pass in
the JSON configuration file here, not the genesis ledger.
```bash
$ cat mint.json | cargo run --release --bin solana-client-demo
$ cat mint-demo.json | cargo run --release --bin solana-client-demo
```
Now kill the server with Ctrl-C, and take a look at the ledger. You should
@ -61,14 +80,14 @@ Now restart the server from where we left off. Pass it both the genesis ledger,
the transaction ledger.
```bash
$ cat genesis.log transactions0.log | cargo run --release --bin solana-testnode | tee transactions1.log
$ cat genesis.log transactions0.log | cargo run --release --bin solana-testnode > transactions1.log
```
Lastly, run the client demo again, and verify that all funds were spent in the
previous round, and so no additional transactions are added.
```bash
$ cat mint.json | cargo run --release --bin solana-client-demo
$ cat mint-demo.json | cargo run --release --bin solana-client-demo
```
Stop the server again, and verify there are only Tick entries, and no Transaction entries.
@ -117,3 +136,30 @@ Run the benchmarks:
```bash
$ cargo +nightly bench --features="unstable"
```
To run the benchmarks on Linux with GPU optimizations enabled:
```bash
$ cargo +nightly bench --features="unstable,cuda"
```
Code coverage
---
To generate code coverage statistics, run kcov via Docker:
```bash
$ docker run -it --rm --security-opt seccomp=unconfined --volume "$PWD:/volume" elmtai/docker-rust-kcov
```
Why coverage? While most see coverage as a code quality metric, we see it primarily as a developer
productivity metric. When a developer makes a change to the codebase, presumably it's a *solution* to
some problem. Our unit-test suite is how we encode the set of *problems* the codebase solves. Running
the test suite should indicate that your change didn't *infringe* on anyone else's solutions. Adding a
test *protects* your solution from future changes. Say you don't understand why a line of code exists,
try deleting it and running the unit-tests. The nearest test failure should tell you what problem
was solved by that code. If no test fails, go ahead and submit a Pull Request that asks, "what
problem is solved by this code?" On the other hand, if a test does fail and you can think of a
better way to solve the same problem, a Pull Request with your solution would most certainly be
welcome! Likewise, if rewriting a test can better communicate what code it's protecting, please
send us that patch!

15
build.rs Normal file
View File

@ -0,0 +1,15 @@
use std::env;
fn main() {
println!("cargo:rustc-link-search=native=.");
if !env::var("CARGO_FEATURE_CUDA").is_err() {
println!("cargo:rustc-link-lib=static=cuda_verify_ed25519");
println!("cargo:rustc-link-search=native=/usr/local/cuda/lib64");
println!("cargo:rustc-link-lib=dylib=cudart");
println!("cargo:rustc-link-lib=dylib=cuda");
println!("cargo:rustc-link-lib=dylib=cudadevrt");
}
if !env::var("CARGO_FEATURE_ERASURE").is_err() {
println!("cargo:rustc-link-lib=dylib=Jerasure");
}
}

View File

@ -11,7 +11,7 @@ with by verifying each entry's hash can be generated from the hash in the previo
extern crate solana;
use solana::historian::Historian;
use solana::ledger::{verify_slice, Entry, Hash};
use solana::ledger::{Block, Entry, Hash};
use solana::event::{generate_keypair, get_pubkey, sign_claim_data, Event};
use std::thread::sleep;
use std::time::Duration;
@ -38,7 +38,7 @@ fn main() {
}
// Proof-of-History: Verify the historian learned about the events
// in the same order they appear in the vector.
assert!(verify_slice(&entries, &seed));
assert!(entries[..].verify(&seed));
}
```
@ -56,7 +56,7 @@ Proof-of-History
Take note of the last line:
```rust
assert!(verify_slice(&entries, &seed));
assert!(entries[..].verify(&seed));
```
[It's a proof!](https://en.wikipedia.org/wiki/CurryHoward_correspondence) For each entry returned by the

View File

@ -14,5 +14,5 @@ msc {
recorder=>historian [ label = "e2 = Entry{id: h6, n: 3, event: Tick}" ] ;
client=>historian [ label = "collect()" ] ;
historian=>client [ label = "entries = [e0, e1, e2]" ] ;
client=>client [ label = "verify_slice(entries, h0)" ] ;
client=>client [ label = "entries.verify(h0)" ] ;
}

View File

@ -3,157 +3,218 @@
//! on behalf of the caller, and a private low-level API for when they have
//! already been signed and verified.
extern crate libc;
use chrono::prelude::*;
use entry::Entry;
use event::Event;
use hash::Hash;
use historian::Historian;
use mint::Mint;
use plan::{Plan, Witness};
use recorder::Signal;
use plan::{Payment, Plan, Witness};
use rayon::prelude::*;
use signature::{KeyPair, PublicKey, Signature};
use std::collections::hash_map::Entry::Occupied;
use std::collections::{HashMap, HashSet};
use std::collections::{HashMap, HashSet, VecDeque};
use std::result;
use std::sync::mpsc::SendError;
use std::sync::RwLock;
use transaction::Transaction;
pub const MAX_ENTRY_IDS: usize = 1024 * 4;
#[derive(Debug, PartialEq, Eq)]
pub enum AccountingError {
AccountNotFound,
InsufficientFunds,
InvalidTransfer,
InvalidTransferSignature,
SendError,
}
pub type Result<T> = result::Result<T, AccountingError>;
/// Commit funds to the 'to' party.
fn complete_transaction(balances: &mut HashMap<PublicKey, i64>, plan: &Plan) {
if let Plan::Pay(ref payment) = *plan {
*balances.entry(payment.to).or_insert(0) += payment.tokens;
fn apply_payment(balances: &RwLock<HashMap<PublicKey, RwLock<i64>>>, payment: &Payment) {
if balances.read().unwrap().contains_key(&payment.to) {
let bals = balances.read().unwrap();
*bals[&payment.to].write().unwrap() += payment.tokens;
} else {
let mut bals = balances.write().unwrap();
bals.insert(payment.to, RwLock::new(payment.tokens));
}
}
pub struct Accountant {
pub historian: Historian,
pub balances: HashMap<PublicKey, i64>,
pub first_id: Hash,
pending: HashMap<Signature, Plan>,
time_sources: HashSet<PublicKey>,
last_time: DateTime<Utc>,
balances: RwLock<HashMap<PublicKey, RwLock<i64>>>,
pending: RwLock<HashMap<Signature, Plan>>,
last_ids: RwLock<VecDeque<(Hash, RwLock<HashSet<Signature>>)>>,
time_sources: RwLock<HashSet<PublicKey>>,
last_time: RwLock<DateTime<Utc>>,
}
impl Accountant {
/// Create an Accountant using an existing ledger.
pub fn new_from_entries<I>(entries: I, ms_per_tick: Option<u64>) -> Self
where
I: IntoIterator<Item = Entry>,
{
let mut entries = entries.into_iter();
// The first item in the ledger is required to be an entry with zero num_hashes,
// which implies its id can be used as the ledger's seed.
let entry0 = entries.next().unwrap();
let start_hash = entry0.id;
let hist = Historian::new(&start_hash, ms_per_tick);
let mut acc = Accountant {
historian: hist,
balances: HashMap::new(),
first_id: start_hash,
pending: HashMap::new(),
time_sources: HashSet::new(),
last_time: Utc.timestamp(0, 0),
};
// The second item in the ledger is a special transaction where the to and from
// fields are the same. That entry should be treated as a deposit, not a
// transfer to oneself.
let entry1 = entries.next().unwrap();
acc.process_verified_event(&entry1.events[0], true).unwrap();
for entry in entries {
for event in entry.events {
acc.process_verified_event(&event, false).unwrap();
}
/// Create an Accountant using a deposit.
pub fn new_from_deposit(deposit: &Payment) -> Self {
let balances = RwLock::new(HashMap::new());
apply_payment(&balances, deposit);
Accountant {
balances,
pending: RwLock::new(HashMap::new()),
last_ids: RwLock::new(VecDeque::new()),
time_sources: RwLock::new(HashSet::new()),
last_time: RwLock::new(Utc.timestamp(0, 0)),
}
acc
}
/// Create an Accountant with only a Mint. Typically used by unit tests.
pub fn new(mint: &Mint, ms_per_tick: Option<u64>) -> Self {
Self::new_from_entries(mint.create_entries(), ms_per_tick)
pub fn new(mint: &Mint) -> Self {
let deposit = Payment {
to: mint.pubkey(),
tokens: mint.tokens,
};
let acc = Self::new_from_deposit(&deposit);
acc.register_entry_id(&mint.last_id());
acc
}
fn is_deposit(allow_deposits: bool, from: &PublicKey, plan: &Plan) -> bool {
if let Plan::Pay(ref payment) = *plan {
allow_deposits && *from == payment.to
} else {
false
fn reserve_signature(signatures: &RwLock<HashSet<Signature>>, sig: &Signature) -> bool {
if signatures.read().unwrap().contains(sig) {
return false;
}
signatures.write().unwrap().insert(*sig);
true
}
/// Process and log the given Transaction.
pub fn log_verified_transaction(&mut self, tr: Transaction) -> Result<()> {
if self.get_balance(&tr.from).unwrap_or(0) < tr.tokens {
fn forget_signature(signatures: &RwLock<HashSet<Signature>>, sig: &Signature) -> bool {
signatures.write().unwrap().remove(sig)
}
fn forget_signature_with_last_id(&self, sig: &Signature, last_id: &Hash) -> bool {
if let Some(entry) = self.last_ids
.read()
.unwrap()
.iter()
.rev()
.find(|x| x.0 == *last_id)
{
return Self::forget_signature(&entry.1, sig);
}
return false;
}
fn reserve_signature_with_last_id(&self, sig: &Signature, last_id: &Hash) -> bool {
if let Some(entry) = self.last_ids
.read()
.unwrap()
.iter()
.rev()
.find(|x| x.0 == *last_id)
{
return Self::reserve_signature(&entry.1, sig);
}
false
}
/// Tell the accountant which Entry IDs exist on the ledger. This function
/// assumes subsequent calls correspond to later entries, and will boot
/// the oldest ones once its internal cache is full. Once boot, the
/// accountant will reject transactions using that `last_id`.
pub fn register_entry_id(&self, last_id: &Hash) {
let mut last_ids = self.last_ids.write().unwrap();
if last_ids.len() >= MAX_ENTRY_IDS {
last_ids.pop_front();
}
last_ids.push_back((*last_id, RwLock::new(HashSet::new())));
}
/// Deduct tokens from the 'from' address the account has sufficient
/// funds and isn't a duplicate.
pub fn process_verified_transaction_debits(&self, tr: &Transaction) -> Result<()> {
let bals = self.balances.read().unwrap();
// Hold a write lock before the condition check, so that a debit can't occur
// between checking the balance and the withdraw.
let option = bals.get(&tr.from);
if option.is_none() {
return Err(AccountingError::AccountNotFound);
}
let mut bal = option.unwrap().write().unwrap();
if !self.reserve_signature_with_last_id(&tr.sig, &tr.data.last_id) {
return Err(AccountingError::InvalidTransferSignature);
}
if *bal < tr.data.tokens {
self.forget_signature_with_last_id(&tr.sig, &tr.data.last_id);
return Err(AccountingError::InsufficientFunds);
}
self.process_verified_transaction(&tr, false)?;
if let Err(SendError(_)) = self.historian
.sender
.send(Signal::Event(Event::Transaction(tr)))
{
return Err(AccountingError::SendError);
}
*bal -= tr.data.tokens;
Ok(())
}
/// Verify and process the given Transaction.
pub fn log_transaction(&mut self, tr: Transaction) -> Result<()> {
if !tr.verify() {
return Err(AccountingError::InvalidTransfer);
}
pub fn process_verified_transaction_credits(&self, tr: &Transaction) {
let mut plan = tr.data.plan.clone();
plan.apply_witness(&Witness::Timestamp(*self.last_time.read().unwrap()));
self.log_verified_transaction(tr)
if let Some(ref payment) = plan.final_payment() {
apply_payment(&self.balances, payment);
} else {
let mut pending = self.pending.write().unwrap();
pending.insert(tr.sig, plan);
}
}
/// Process a Transaction that has already been verified.
fn process_verified_transaction(
self: &mut Self,
tr: &Transaction,
allow_deposits: bool,
) -> Result<()> {
if !self.historian.reserve_signature(&tr.sig) {
return Err(AccountingError::InvalidTransferSignature);
}
pub fn process_verified_transaction(&self, tr: &Transaction) -> Result<()> {
self.process_verified_transaction_debits(tr)?;
self.process_verified_transaction_credits(tr);
Ok(())
}
if !Self::is_deposit(allow_deposits, &tr.from, &tr.plan) {
if let Some(x) = self.balances.get_mut(&tr.from) {
*x -= tr.tokens;
/// Process a batch of verified transactions.
pub fn process_verified_transactions(&self, trs: Vec<Transaction>) -> Vec<Result<Transaction>> {
// Run all debits first to filter out any transactions that can't be processed
// in parallel deterministically.
let results: Vec<_> = trs.into_par_iter()
.map(|tr| self.process_verified_transaction_debits(&tr).map(|_| tr))
.collect(); // Calling collect() here forces all debits to complete before moving on.
results
.into_par_iter()
.map(|result| {
result.map(|tr| {
self.process_verified_transaction_credits(&tr);
tr
})
})
.collect()
}
fn partition_events(events: Vec<Event>) -> (Vec<Transaction>, Vec<Event>) {
let mut trs = vec![];
let mut rest = vec![];
for event in events {
match event {
Event::Transaction(tr) => trs.push(tr),
_ => rest.push(event),
}
}
(trs, rest)
}
let mut plan = tr.plan.clone();
plan.apply_witness(&Witness::Timestamp(self.last_time));
if plan.is_complete() {
complete_transaction(&mut self.balances, &plan);
} else {
self.pending.insert(tr.sig, plan);
pub fn process_verified_events(&self, events: Vec<Event>) -> Result<()> {
let (trs, rest) = Self::partition_events(events);
self.process_verified_transactions(trs);
for event in rest {
self.process_verified_event(&event)?;
}
Ok(())
}
/// Process a Witness Signature that has already been verified.
fn process_verified_sig(&mut self, from: PublicKey, tx_sig: Signature) -> Result<()> {
if let Occupied(mut e) = self.pending.entry(tx_sig) {
fn process_verified_sig(&self, from: PublicKey, tx_sig: Signature) -> Result<()> {
if let Occupied(mut e) = self.pending.write().unwrap().entry(tx_sig) {
e.get_mut().apply_witness(&Witness::Signature(from));
if e.get().is_complete() {
complete_transaction(&mut self.balances, e.get());
if let Some(ref payment) = e.get().final_payment() {
apply_payment(&self.balances, payment);
e.remove_entry();
}
};
@ -162,16 +223,16 @@ impl Accountant {
}
/// Process a Witness Timestamp that has already been verified.
fn process_verified_timestamp(&mut self, from: PublicKey, dt: DateTime<Utc>) -> Result<()> {
fn process_verified_timestamp(&self, from: PublicKey, dt: DateTime<Utc>) -> Result<()> {
// If this is the first timestamp we've seen, it probably came from the genesis block,
// so we'll trust it.
if self.last_time == Utc.timestamp(0, 0) {
self.time_sources.insert(from);
if *self.last_time.read().unwrap() == Utc.timestamp(0, 0) {
self.time_sources.write().unwrap().insert(from);
}
if self.time_sources.contains(&from) {
if dt > self.last_time {
self.last_time = dt;
if self.time_sources.read().unwrap().contains(&from) {
if dt > *self.last_time.read().unwrap() {
*self.last_time.write().unwrap() = dt;
}
} else {
return Ok(());
@ -179,25 +240,29 @@ impl Accountant {
// Check to see if any timelocked transactions can be completed.
let mut completed = vec![];
for (key, plan) in &mut self.pending {
plan.apply_witness(&Witness::Timestamp(self.last_time));
if plan.is_complete() {
complete_transaction(&mut self.balances, plan);
// Hold 'pending' write lock until the end of this function. Otherwise another thread can
// double-spend if it enters before the modified plan is removed from 'pending'.
let mut pending = self.pending.write().unwrap();
for (key, plan) in pending.iter_mut() {
plan.apply_witness(&Witness::Timestamp(*self.last_time.read().unwrap()));
if let Some(ref payment) = plan.final_payment() {
apply_payment(&self.balances, payment);
completed.push(key.clone());
}
}
for key in completed {
self.pending.remove(&key);
pending.remove(&key);
}
Ok(())
}
/// Process an Transaction or Witness that has already been verified.
fn process_verified_event(self: &mut Self, event: &Event, allow_deposits: bool) -> Result<()> {
pub fn process_verified_event(&self, event: &Event) -> Result<()> {
match *event {
Event::Transaction(ref tr) => self.process_verified_transaction(tr, allow_deposits),
Event::Transaction(ref tr) => self.process_verified_transaction(tr),
Event::Signature { from, tx_sig, .. } => self.process_verified_sig(from, tx_sig),
Event::Timestamp { from, dt, .. } => self.process_verified_timestamp(from, dt),
}
@ -206,7 +271,7 @@ impl Accountant {
/// Create, sign, and process a Transaction from `keypair` to `to` of
/// `n` tokens where `last_id` is the last Entry ID observed by the client.
pub fn transfer(
self: &mut Self,
&self,
n: i64,
keypair: &KeyPair,
to: PublicKey,
@ -214,14 +279,14 @@ impl Accountant {
) -> Result<Signature> {
let tr = Transaction::new(keypair, to, n, last_id);
let sig = tr.sig;
self.log_transaction(tr).map(|_| sig)
self.process_verified_transaction(&tr).map(|_| sig)
}
/// Create, sign, and process a postdated Transaction from `keypair`
/// to `to` of `n` tokens on `dt` where `last_id` is the last Entry ID
/// observed by the client.
pub fn transfer_on_date(
self: &mut Self,
&self,
n: i64,
keypair: &KeyPair,
to: PublicKey,
@ -230,112 +295,82 @@ impl Accountant {
) -> Result<Signature> {
let tr = Transaction::new_on_date(keypair, to, dt, n, last_id);
let sig = tr.sig;
self.log_transaction(tr).map(|_| sig)
self.process_verified_transaction(&tr).map(|_| sig)
}
pub fn get_balance(self: &Self, pubkey: &PublicKey) -> Option<i64> {
self.balances.get(pubkey).cloned()
pub fn get_balance(&self, pubkey: &PublicKey) -> Option<i64> {
let bals = self.balances.read().unwrap();
bals.get(pubkey).map(|x| *x.read().unwrap())
}
}
#[cfg(test)]
mod tests {
use super::*;
use recorder::ExitReason;
use bincode::serialize;
use hash::hash;
use signature::KeyPairUtil;
#[test]
fn test_accountant() {
let alice = Mint::new(10_000);
let bob_pubkey = KeyPair::new().pubkey();
let mut acc = Accountant::new(&alice, Some(2));
acc.transfer(1_000, &alice.keypair(), bob_pubkey, alice.seed())
let acc = Accountant::new(&alice);
acc.transfer(1_000, &alice.keypair(), bob_pubkey, alice.last_id())
.unwrap();
assert_eq!(acc.get_balance(&bob_pubkey).unwrap(), 1_000);
acc.transfer(500, &alice.keypair(), bob_pubkey, alice.seed())
acc.transfer(500, &alice.keypair(), bob_pubkey, alice.last_id())
.unwrap();
assert_eq!(acc.get_balance(&bob_pubkey).unwrap(), 1_500);
}
drop(acc.historian.sender);
#[test]
fn test_account_not_found() {
let mint = Mint::new(1);
let acc = Accountant::new(&mint);
assert_eq!(
acc.historian.thread_hdl.join().unwrap(),
ExitReason::RecvDisconnected
acc.transfer(1, &KeyPair::new(), mint.pubkey(), mint.last_id()),
Err(AccountingError::AccountNotFound)
);
}
#[test]
fn test_invalid_transfer() {
let alice = Mint::new(11_000);
let mut acc = Accountant::new(&alice, Some(2));
let acc = Accountant::new(&alice);
let bob_pubkey = KeyPair::new().pubkey();
acc.transfer(1_000, &alice.keypair(), bob_pubkey, alice.seed())
acc.transfer(1_000, &alice.keypair(), bob_pubkey, alice.last_id())
.unwrap();
assert_eq!(
acc.transfer(10_001, &alice.keypair(), bob_pubkey, alice.seed()),
acc.transfer(10_001, &alice.keypair(), bob_pubkey, alice.last_id()),
Err(AccountingError::InsufficientFunds)
);
let alice_pubkey = alice.keypair().pubkey();
assert_eq!(acc.get_balance(&alice_pubkey).unwrap(), 10_000);
assert_eq!(acc.get_balance(&bob_pubkey).unwrap(), 1_000);
drop(acc.historian.sender);
assert_eq!(
acc.historian.thread_hdl.join().unwrap(),
ExitReason::RecvDisconnected
);
}
#[test]
fn test_overspend_attack() {
let alice = Mint::new(1);
let mut acc = Accountant::new(&alice, None);
let bob_pubkey = KeyPair::new().pubkey();
let mut tr = Transaction::new(&alice.keypair(), bob_pubkey, 1, alice.seed());
if let Plan::Pay(ref mut payment) = tr.plan {
payment.tokens = 2; // <-- attack!
}
assert_eq!(
acc.log_transaction(tr.clone()),
Err(AccountingError::InvalidTransfer)
);
// Also, ensure all branchs of the plan spend all tokens
if let Plan::Pay(ref mut payment) = tr.plan {
payment.tokens = 0; // <-- whoops!
}
assert_eq!(
acc.log_transaction(tr.clone()),
Err(AccountingError::InvalidTransfer)
);
}
#[test]
fn test_transfer_to_newb() {
let alice = Mint::new(10_000);
let mut acc = Accountant::new(&alice, Some(2));
let acc = Accountant::new(&alice);
let alice_keypair = alice.keypair();
let bob_pubkey = KeyPair::new().pubkey();
acc.transfer(500, &alice_keypair, bob_pubkey, alice.seed())
acc.transfer(500, &alice_keypair, bob_pubkey, alice.last_id())
.unwrap();
assert_eq!(acc.get_balance(&bob_pubkey).unwrap(), 500);
drop(acc.historian.sender);
assert_eq!(
acc.historian.thread_hdl.join().unwrap(),
ExitReason::RecvDisconnected
);
}
#[test]
fn test_transfer_on_date() {
let alice = Mint::new(1);
let mut acc = Accountant::new(&alice, Some(2));
let acc = Accountant::new(&alice);
let alice_keypair = alice.keypair();
let bob_pubkey = KeyPair::new().pubkey();
let dt = Utc::now();
acc.transfer_on_date(1, &alice_keypair, bob_pubkey, dt, alice.seed())
acc.transfer_on_date(1, &alice_keypair, bob_pubkey, dt, alice.last_id())
.unwrap();
// Alice's balance will be zero because all funds are locked up.
@ -357,14 +392,14 @@ mod tests {
#[test]
fn test_transfer_after_date() {
let alice = Mint::new(1);
let mut acc = Accountant::new(&alice, Some(2));
let acc = Accountant::new(&alice);
let alice_keypair = alice.keypair();
let bob_pubkey = KeyPair::new().pubkey();
let dt = Utc::now();
acc.process_verified_timestamp(alice.pubkey(), dt).unwrap();
// It's now past now, so this transfer should be processed immediately.
acc.transfer_on_date(1, &alice_keypair, bob_pubkey, dt, alice.seed())
acc.transfer_on_date(1, &alice_keypair, bob_pubkey, dt, alice.last_id())
.unwrap();
assert_eq!(acc.get_balance(&alice.pubkey()), Some(0));
@ -374,11 +409,11 @@ mod tests {
#[test]
fn test_cancel_transfer() {
let alice = Mint::new(1);
let mut acc = Accountant::new(&alice, Some(2));
let acc = Accountant::new(&alice);
let alice_keypair = alice.keypair();
let bob_pubkey = KeyPair::new().pubkey();
let dt = Utc::now();
let sig = acc.transfer_on_date(1, &alice_keypair, bob_pubkey, dt, alice.seed())
let sig = acc.transfer_on_date(1, &alice_keypair, bob_pubkey, dt, alice.last_id())
.unwrap();
// Alice's balance will be zero because all funds are locked up.
@ -396,4 +431,96 @@ mod tests {
acc.process_verified_sig(alice.pubkey(), sig).unwrap(); // <-- Attack! Attempt to cancel completed transaction.
assert_ne!(acc.get_balance(&alice.pubkey()), Some(2));
}
#[test]
fn test_duplicate_event_signature() {
let alice = Mint::new(1);
let acc = Accountant::new(&alice);
let sig = Signature::default();
assert!(acc.reserve_signature_with_last_id(&sig, &alice.last_id()));
assert!(!acc.reserve_signature_with_last_id(&sig, &alice.last_id()));
}
#[test]
fn test_forget_signature() {
let alice = Mint::new(1);
let acc = Accountant::new(&alice);
let sig = Signature::default();
acc.reserve_signature_with_last_id(&sig, &alice.last_id());
assert!(acc.forget_signature_with_last_id(&sig, &alice.last_id()));
assert!(!acc.forget_signature_with_last_id(&sig, &alice.last_id()));
}
#[test]
fn test_max_entry_ids() {
let alice = Mint::new(1);
let acc = Accountant::new(&alice);
let sig = Signature::default();
for i in 0..MAX_ENTRY_IDS {
let last_id = hash(&serialize(&i).unwrap()); // Unique hash
acc.register_entry_id(&last_id);
}
// Assert we're no longer able to use the oldest entry ID.
assert!(!acc.reserve_signature_with_last_id(&sig, &alice.last_id()));
}
#[test]
fn test_debits_before_credits() {
let mint = Mint::new(2);
let acc = Accountant::new(&mint);
let alice = KeyPair::new();
let tr0 = Transaction::new(&mint.keypair(), alice.pubkey(), 2, mint.last_id());
let tr1 = Transaction::new(&alice, mint.pubkey(), 1, mint.last_id());
let trs = vec![tr0, tr1];
assert!(acc.process_verified_transactions(trs)[1].is_err());
}
}
#[cfg(all(feature = "unstable", test))]
mod bench {
extern crate test;
use self::test::Bencher;
use accountant::*;
use bincode::serialize;
use hash::hash;
use signature::KeyPairUtil;
#[bench]
fn process_verified_event_bench(bencher: &mut Bencher) {
let mint = Mint::new(100_000_000);
let acc = Accountant::new(&mint);
// Create transactions between unrelated parties.
let transactions: Vec<_> = (0..4096)
.into_par_iter()
.map(|i| {
// Seed the 'from' account.
let rando0 = KeyPair::new();
let tr = Transaction::new(&mint.keypair(), rando0.pubkey(), 1_000, mint.last_id());
acc.process_verified_transaction(&tr).unwrap();
// Seed the 'to' account and a cell for its signature.
let last_id = hash(&serialize(&i).unwrap()); // Unique hash
acc.register_entry_id(&last_id);
let rando1 = KeyPair::new();
let tr = Transaction::new(&rando0, rando1.pubkey(), 1, last_id);
acc.process_verified_transaction(&tr).unwrap();
// Finally, return a transaction that's unique
Transaction::new(&rando0, rando1.pubkey(), 1, last_id)
})
.collect();
bencher.iter(|| {
// Since benchmarker runs this multiple times, we need to clear the signatures.
for sigs in acc.last_ids.read().unwrap().iter() {
sigs.1.write().unwrap().clear();
}
assert!(
acc.process_verified_transactions(transactions.clone())
.iter()
.all(|x| x.is_ok())
);
});
}
}

View File

@ -4,154 +4,325 @@
use accountant::Accountant;
use bincode::{deserialize, serialize};
use ecdsa;
use entry::Entry;
use event::Event;
use hash::Hash;
use historian::Historian;
use packet;
use packet::SharedPackets;
use rayon::prelude::*;
use recorder::Signal;
use result::Result;
use serde_json;
use signature::PublicKey;
use std::default::Default;
use std::cmp::max;
use std::collections::VecDeque;
use std::io::Write;
use std::net::{SocketAddr, UdpSocket};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::channel;
use std::sync::{Arc, Mutex};
use std::sync::mpsc::{channel, Receiver, Sender};
use std::sync::{Arc, Mutex, RwLock};
use std::thread::{spawn, JoinHandle};
use std::time::Duration;
use streamer;
use transaction::Transaction;
use rayon::prelude::*;
use subscribers;
pub struct AccountantSkel<W: Write + Send + 'static> {
pub acc: Accountant,
pub last_id: Hash,
acc: Accountant,
last_id: Hash,
writer: W,
historian: Historian,
entry_info_subscribers: Vec<SocketAddr>,
}
#[cfg_attr(feature = "cargo-clippy", allow(large_enum_variant))]
#[derive(Serialize, Deserialize, Debug)]
#[derive(Serialize, Deserialize, Debug, Clone)]
pub enum Request {
Transaction(Transaction),
GetBalance { key: PublicKey },
GetId { is_last: bool },
GetLastId,
Subscribe { subscriptions: Vec<Subscription> },
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub enum Subscription {
EntryInfo,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct EntryInfo {
pub id: Hash,
pub num_hashes: u64,
pub num_events: u64,
}
impl Request {
/// Verify the request is valid.
pub fn verify(&self) -> bool {
match *self {
Request::Transaction(ref tr) => tr.verify(),
Request::Transaction(ref tr) => tr.verify_plan(),
_ => true,
}
}
}
/// Parallel verfication of a batch of requests.
fn filter_valid_requests(reqs: Vec<(Request, SocketAddr)>) -> Vec<(Request, SocketAddr)> {
reqs.into_par_iter().filter({ |x| x.0.verify() }).collect()
}
#[derive(Serialize, Deserialize, Debug)]
pub enum Response {
Balance { key: PublicKey, val: Option<i64> },
Entries { entries: Vec<Entry> },
Id { id: Hash, is_last: bool },
EntryInfo(EntryInfo),
LastId { id: Hash },
}
impl<W: Write + Send + 'static> AccountantSkel<W> {
/// Create a new AccountantSkel that wraps the given Accountant.
pub fn new(acc: Accountant, w: W) -> Self {
let last_id = acc.first_id;
pub fn new(acc: Accountant, last_id: Hash, writer: W, historian: Historian) -> Self {
AccountantSkel {
acc,
last_id,
writer: w,
writer,
historian,
entry_info_subscribers: vec![],
}
}
fn notify_entry_info_subscribers(&mut self, entry: &Entry) {
// TODO: No need to bind().
let socket = UdpSocket::bind("127.0.0.1:0").expect("bind");
for addr in &self.entry_info_subscribers {
let entry_info = EntryInfo {
id: entry.id,
num_hashes: entry.num_hashes,
num_events: entry.events.len() as u64,
};
let data = serialize(&Response::EntryInfo(entry_info)).expect("serialize EntryInfo");
let _res = socket.send_to(&data, addr);
}
}
/// Process any Entry items that have been published by the Historian.
pub fn sync(&mut self) -> Hash {
while let Ok(entry) = self.acc.historian.receiver.try_recv() {
while let Ok(entry) = self.historian.receiver.try_recv() {
self.last_id = entry.id;
self.acc.register_entry_id(&self.last_id);
writeln!(self.writer, "{}", serde_json::to_string(&entry).unwrap()).unwrap();
self.notify_entry_info_subscribers(&entry);
}
self.last_id
}
/// Process Request items sent by clients.
pub fn log_verified_request(&mut self, msg: Request) -> Option<Response> {
pub fn process_request(
&mut self,
msg: Request,
rsp_addr: SocketAddr,
) -> Option<(Response, SocketAddr)> {
match msg {
Request::Transaction(tr) => {
if let Err(err) = self.acc.log_verified_transaction(tr) {
eprintln!("Transaction error: {:?}", err);
Request::GetBalance { key } => {
let val = self.acc.get_balance(&key);
Some((Response::Balance { key, val }, rsp_addr))
}
Request::GetLastId => Some((Response::LastId { id: self.sync() }, rsp_addr)),
Request::Transaction(_) => unreachable!(),
Request::Subscribe { subscriptions } => {
for subscription in subscriptions {
match subscription {
Subscription::EntryInfo => self.entry_info_subscribers.push(rsp_addr),
}
}
None
}
Request::GetBalance { key } => {
let val = self.acc.get_balance(&key);
Some(Response::Balance { key, val })
}
Request::GetId { is_last } => Some(Response::Id {
id: if is_last {
self.sync()
} else {
self.acc.first_id
},
is_last,
}),
}
}
fn recv_batch(recvr: &streamer::PacketReceiver) -> Result<Vec<SharedPackets>> {
let timer = Duration::new(1, 0);
let msgs = recvr.recv_timeout(timer)?;
trace!("got msgs");
let mut batch = vec![msgs];
while let Ok(more) = recvr.try_recv() {
trace!("got more msgs");
batch.push(more);
}
info!("batch len {}", batch.len());
Ok(batch)
}
fn verify_batch(batch: Vec<SharedPackets>) -> Vec<Vec<(SharedPackets, Vec<u8>)>> {
let chunk_size = max(1, (batch.len() + 3) / 4);
let batches: Vec<_> = batch.chunks(chunk_size).map(|x| x.to_vec()).collect();
batches
.into_par_iter()
.map(|batch| {
let r = ecdsa::ed25519_verify(&batch);
batch.into_iter().zip(r).collect()
})
.collect()
}
fn verifier(
recvr: &streamer::PacketReceiver,
sendr: &Sender<Vec<(SharedPackets, Vec<u8>)>>,
) -> Result<()> {
let batch = Self::recv_batch(recvr)?;
let verified_batches = Self::verify_batch(batch);
for xs in verified_batches {
sendr.send(xs)?;
}
Ok(())
}
pub fn deserialize_packets(p: &packet::Packets) -> Vec<Option<(Request, SocketAddr)>> {
p.packets
.par_iter()
.map(|x| {
deserialize(&x.data[0..x.meta.size])
.map(|req| (req, x.meta.addr()))
.ok()
})
.collect()
}
/// Split Request list into verified transactions and the rest
fn partition_requests(
req_vers: Vec<(Request, SocketAddr, u8)>,
) -> (Vec<Transaction>, Vec<(Request, SocketAddr)>) {
let mut trs = vec![];
let mut reqs = vec![];
for (msg, rsp_addr, verify) in req_vers {
match msg {
Request::Transaction(tr) => {
if verify != 0 {
trs.push(tr);
}
}
_ => reqs.push((msg, rsp_addr)),
}
}
(trs, reqs)
}
fn process_packets(
&mut self,
req_vers: Vec<(Request, SocketAddr, u8)>,
) -> Result<Vec<(Response, SocketAddr)>> {
let (trs, reqs) = Self::partition_requests(req_vers);
// Process the transactions in parallel and then log the successful ones.
for result in self.acc.process_verified_transactions(trs) {
if let Ok(tr) = result {
self.historian
.sender
.send(Signal::Event(Event::Transaction(tr)))?;
}
}
// Let validators know they should not attempt to process additional
// transactions in parallel.
self.historian.sender.send(Signal::Tick)?;
// Process the remaining requests serially.
let rsps = reqs.into_iter()
.filter_map(|(req, rsp_addr)| self.process_request(req, rsp_addr))
.collect();
Ok(rsps)
}
fn serialize_response(
resp: Response,
rsp_addr: SocketAddr,
blob_recycler: &packet::BlobRecycler,
) -> Result<packet::SharedBlob> {
let blob = blob_recycler.allocate();
{
let mut b = blob.write().unwrap();
let v = serialize(&resp)?;
let len = v.len();
b.data[..len].copy_from_slice(&v);
b.meta.size = len;
b.meta.set_addr(&rsp_addr);
}
Ok(blob)
}
fn serialize_responses(
rsps: Vec<(Response, SocketAddr)>,
blob_recycler: &packet::BlobRecycler,
) -> Result<VecDeque<packet::SharedBlob>> {
let mut blobs = VecDeque::new();
for (resp, rsp_addr) in rsps {
blobs.push_back(Self::serialize_response(resp, rsp_addr, blob_recycler)?);
}
Ok(blobs)
}
fn process(
obj: &Arc<Mutex<AccountantSkel<W>>>,
r_reader: &streamer::Receiver,
s_responder: &streamer::Responder,
packet_recycler: &streamer::PacketRecycler,
response_recycler: &streamer::ResponseRecycler,
verified_receiver: &Receiver<Vec<(SharedPackets, Vec<u8>)>>,
blob_sender: &streamer::BlobSender,
packet_recycler: &packet::PacketRecycler,
blob_recycler: &packet::BlobRecycler,
) -> Result<()> {
let timer = Duration::new(1, 0);
let msgs = r_reader.recv_timeout(timer)?;
let msgs_ = msgs.clone();
let rsps = streamer::allocate(response_recycler);
let rsps_ = rsps.clone();
{
let mut reqs = vec![];
for packet in &msgs.read().unwrap().packets {
let rsp_addr = packet.meta.get_addr();
let sz = packet.meta.size;
let req = deserialize(&packet.data[0..sz])?;
reqs.push((req, rsp_addr));
let mms = verified_receiver.recv_timeout(timer)?;
for (msgs, vers) in mms {
let reqs = Self::deserialize_packets(&msgs.read().unwrap());
let req_vers = reqs.into_iter()
.zip(vers)
.filter_map(|(req, ver)| req.map(|(msg, addr)| (msg, addr, ver)))
.filter(|x| x.0.verify())
.collect();
let rsps = obj.lock().unwrap().process_packets(req_vers)?;
let blobs = Self::serialize_responses(rsps, blob_recycler)?;
if !blobs.is_empty() {
//don't wake up the other side if there is nothing
blob_sender.send(blobs)?;
}
let reqs = filter_valid_requests(reqs);
packet_recycler.recycle(msgs);
let mut num = 0;
let mut ursps = rsps.write().unwrap();
for (req, rsp_addr) in reqs {
if let Some(resp) = obj.lock().unwrap().log_verified_request(req) {
if ursps.responses.len() <= num {
ursps
.responses
.resize((num + 1) * 2, streamer::Response::default());
}
let rsp = &mut ursps.responses[num];
let v = serialize(&resp)?;
let len = v.len();
rsp.data[..len].copy_from_slice(&v);
rsp.meta.size = len;
rsp.meta.set_addr(&rsp_addr);
num += 1;
}
}
ursps.responses.resize(num, streamer::Response::default());
// Write new entries to the ledger and notify subscribers.
obj.lock().unwrap().sync();
}
Ok(())
}
/// Process verified blobs, already in order
/// Respond with a signed hash of the state
fn replicate_state(
obj: &Arc<Mutex<AccountantSkel<W>>>,
verified_receiver: &streamer::BlobReceiver,
blob_recycler: &packet::BlobRecycler,
) -> Result<()> {
let timer = Duration::new(1, 0);
let blobs = verified_receiver.recv_timeout(timer)?;
for msgs in &blobs {
let blob = msgs.read().unwrap();
let entries: Vec<Entry> = deserialize(&blob.data()[..blob.meta.size]).unwrap();
for entry in entries {
obj.lock().unwrap().acc.register_entry_id(&entry.id);
obj.lock()
.unwrap()
.acc
.process_verified_events(entry.events)?;
}
//TODO respond back to leader with hash of the state
}
for blob in blobs {
blob_recycler.recycle(blob);
}
s_responder.send(rsps_)?;
streamer::recycle(packet_recycler, msgs_);
Ok(())
}
/// Create a UDP microservice that forwards messages the given AccountantSkel.
/// This service is the network leader
/// Set `exit` to shutdown its threads.
pub fn serve(
obj: Arc<Mutex<AccountantSkel<W>>>,
obj: &Arc<Mutex<AccountantSkel<W>>>,
addr: &str,
exit: Arc<AtomicBool>,
) -> Result<Vec<JoinHandle<()>>> {
@ -161,29 +332,479 @@ impl<W: Write + Send + 'static> AccountantSkel<W> {
local.set_port(0);
let write = UdpSocket::bind(local)?;
let packet_recycler = Arc::new(Mutex::new(Vec::new()));
let response_recycler = Arc::new(Mutex::new(Vec::new()));
let (s_reader, r_reader) = channel();
let t_receiver = streamer::receiver(read, exit.clone(), packet_recycler.clone(), s_reader)?;
let (s_responder, r_responder) = channel();
let packet_recycler = packet::PacketRecycler::default();
let blob_recycler = packet::BlobRecycler::default();
let (packet_sender, packet_receiver) = channel();
let t_receiver =
streamer::receiver(read, exit.clone(), packet_recycler.clone(), packet_sender)?;
let (blob_sender, blob_receiver) = channel();
let t_responder =
streamer::responder(write, exit.clone(), response_recycler.clone(), r_responder);
streamer::responder(write, exit.clone(), blob_recycler.clone(), blob_receiver);
let (verified_sender, verified_receiver) = channel();
let skel = obj.clone();
let t_server = spawn(move || loop {
let e = AccountantSkel::process(
&skel,
&r_reader,
&s_responder,
&packet_recycler,
&response_recycler,
);
if e.is_err() && exit.load(Ordering::Relaxed) {
let exit_ = exit.clone();
let t_verifier = spawn(move || loop {
let e = Self::verifier(&packet_receiver, &verified_sender);
if e.is_err() && exit_.load(Ordering::Relaxed) {
break;
}
});
Ok(vec![t_receiver, t_responder, t_server])
let skel = obj.clone();
let t_server = spawn(move || loop {
let e = Self::process(
&skel,
&verified_receiver,
&blob_sender,
&packet_recycler,
&blob_recycler,
);
if e.is_err() {
// Assume this was a timeout, so sync any empty entries.
skel.lock().unwrap().sync();
if exit.load(Ordering::Relaxed) {
break;
}
}
});
Ok(vec![t_receiver, t_responder, t_server, t_verifier])
}
/// This service receives messages from a leader in the network and processes the transactions
/// on the accountant state.
/// # Arguments
/// * `obj` - The accountant state.
/// * `rsubs` - The subscribers.
/// * `exit` - The exit signal.
/// # Remarks
/// The pipeline is constructed as follows:
/// 1. receive blobs from the network, these are out of order
/// 2. verify blobs, PoH, signatures (TODO)
/// 3. reconstruct contiguous window
/// a. order the blobs
/// b. use erasure coding to reconstruct missing blobs
/// c. ask the network for missing blobs, if erasure coding is insufficient
/// d. make sure that the blobs PoH sequences connect (TODO)
/// 4. process the transaction state machine
/// 5. respond with the hash of the state back to the leader
pub fn replicate(
obj: &Arc<Mutex<AccountantSkel<W>>>,
rsubs: subscribers::Subscribers,
exit: Arc<AtomicBool>,
) -> Result<Vec<JoinHandle<()>>> {
let read = UdpSocket::bind(rsubs.me.addr)?;
// make sure we are on the same interface
let mut local = read.local_addr()?;
local.set_port(0);
let write = UdpSocket::bind(local)?;
let blob_recycler = packet::BlobRecycler::default();
let (blob_sender, blob_receiver) = channel();
let t_blob_receiver = streamer::blob_receiver(
exit.clone(),
blob_recycler.clone(),
read,
blob_sender.clone(),
)?;
let (window_sender, window_receiver) = channel();
let (retransmit_sender, retransmit_receiver) = channel();
let subs = Arc::new(RwLock::new(rsubs));
let t_retransmit = streamer::retransmitter(
write,
exit.clone(),
subs.clone(),
blob_recycler.clone(),
retransmit_receiver,
);
//TODO
//the packets coming out of blob_receiver need to be sent to the GPU and verified
//then sent to the window, which does the erasure coding reconstruction
let t_window = streamer::window(
exit.clone(),
subs,
blob_recycler.clone(),
blob_receiver,
window_sender,
retransmit_sender,
);
let skel = obj.clone();
let t_server = spawn(move || loop {
let e = Self::replicate_state(&skel, &window_receiver, &blob_recycler);
if e.is_err() && exit.load(Ordering::Relaxed) {
break;
}
});
Ok(vec![t_blob_receiver, t_retransmit, t_window, t_server])
}
}
#[cfg(test)]
pub fn to_packets(r: &packet::PacketRecycler, reqs: Vec<Request>) -> Vec<SharedPackets> {
let mut out = vec![];
for rrs in reqs.chunks(packet::NUM_PACKETS) {
let p = r.allocate();
p.write()
.unwrap()
.packets
.resize(rrs.len(), Default::default());
for (i, o) in rrs.iter().zip(p.write().unwrap().packets.iter_mut()) {
let v = serialize(&i).expect("serialize request");
let len = v.len();
o.data[..len].copy_from_slice(&v);
o.meta.size = len;
}
out.push(p);
}
return out;
}
#[cfg(test)]
mod tests {
use accountant_skel::{to_packets, Request};
use bincode::serialize;
use ecdsa;
use packet::{BlobRecycler, PacketRecycler, NUM_PACKETS};
use transaction::{memfind, test_tx};
use accountant::Accountant;
use accountant_skel::AccountantSkel;
use accountant_stub::AccountantStub;
use entry::Entry;
use futures::Future;
use historian::Historian;
use mint::Mint;
use plan::Plan;
use recorder::Signal;
use signature::{KeyPair, KeyPairUtil};
use std::io::sink;
use std::net::{SocketAddr, UdpSocket};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Mutex};
use std::thread::sleep;
use std::time::Duration;
use transaction::Transaction;
use subscribers::{Node, Subscribers};
use streamer;
use std::sync::mpsc::channel;
use std::collections::VecDeque;
use hash::{hash, Hash};
use event::Event;
use entry;
use chrono::prelude::*;
#[test]
fn test_layout() {
let tr = test_tx();
let tx = serialize(&tr).unwrap();
let packet = serialize(&Request::Transaction(tr)).unwrap();
assert_matches!(memfind(&packet, &tx), Some(ecdsa::TX_OFFSET));
assert_matches!(memfind(&packet, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), None);
}
#[test]
fn test_to_packets() {
let tr = Request::Transaction(test_tx());
let re = PacketRecycler::default();
let rv = to_packets(&re, vec![tr.clone(); 1]);
assert_eq!(rv.len(), 1);
assert_eq!(rv[0].read().unwrap().packets.len(), 1);
let rv = to_packets(&re, vec![tr.clone(); NUM_PACKETS]);
assert_eq!(rv.len(), 1);
assert_eq!(rv[0].read().unwrap().packets.len(), NUM_PACKETS);
let rv = to_packets(&re, vec![tr.clone(); NUM_PACKETS + 1]);
assert_eq!(rv.len(), 2);
assert_eq!(rv[0].read().unwrap().packets.len(), NUM_PACKETS);
assert_eq!(rv[1].read().unwrap().packets.len(), 1);
}
#[test]
fn test_accounting_sequential_consistency() {
// In this attack we'll demonstrate that a verifier can interpret the ledger
// differently if either the server doesn't signal the ledger to add an
// Entry OR if the verifier tries to parallelize across multiple Entries.
let mint = Mint::new(2);
let acc = Accountant::new(&mint);
let rsp_addr: SocketAddr = "0.0.0.0:0".parse().expect("socket address");
let historian = Historian::new(&mint.last_id(), None);
let mut skel = AccountantSkel::new(acc, mint.last_id(), sink(), historian);
// Process a batch that includes a transaction that receives two tokens.
let alice = KeyPair::new();
let tr = Transaction::new(&mint.keypair(), alice.pubkey(), 2, mint.last_id());
let req_vers = vec![(Request::Transaction(tr), rsp_addr, 1_u8)];
assert!(skel.process_packets(req_vers).is_ok());
// Process a second batch that spends one of those tokens.
let tr = Transaction::new(&alice, mint.pubkey(), 1, mint.last_id());
let req_vers = vec![(Request::Transaction(tr), rsp_addr, 1_u8)];
assert!(skel.process_packets(req_vers).is_ok());
// Collect the ledger and feed it to a new accountant.
skel.historian.sender.send(Signal::Tick).unwrap();
drop(skel.historian.sender);
let entries: Vec<Entry> = skel.historian.receiver.iter().collect();
// Assert the user holds one token, not two. If the server only output one
// entry, then the second transaction will be rejected, because it drives
// the account balance below zero before the credit is added.
let acc = Accountant::new(&mint);
for entry in entries {
acc.process_verified_events(entry.events).unwrap();
}
assert_eq!(acc.get_balance(&alice.pubkey()), Some(1));
}
#[test]
fn test_accountant_bad_sig() {
let serve_port = 9002;
let send_port = 9003;
let addr = format!("127.0.0.1:{}", serve_port);
let send_addr = format!("127.0.0.1:{}", send_port);
let alice = Mint::new(10_000);
let acc = Accountant::new(&alice);
let bob_pubkey = KeyPair::new().pubkey();
let exit = Arc::new(AtomicBool::new(false));
let historian = Historian::new(&alice.last_id(), Some(30));
let acc = Arc::new(Mutex::new(AccountantSkel::new(
acc,
alice.last_id(),
sink(),
historian,
)));
let _threads = AccountantSkel::serve(&acc, &addr, exit.clone()).unwrap();
sleep(Duration::from_millis(300));
let socket = UdpSocket::bind(send_addr).unwrap();
socket.set_read_timeout(Some(Duration::new(5, 0))).unwrap();
let mut acc = AccountantStub::new(&addr, socket);
let last_id = acc.get_last_id().wait().unwrap();
let tr = Transaction::new(&alice.keypair(), bob_pubkey, 500, last_id);
let _sig = acc.transfer_signed(tr).unwrap();
let last_id = acc.get_last_id().wait().unwrap();
let mut tr2 = Transaction::new(&alice.keypair(), bob_pubkey, 501, last_id);
tr2.data.tokens = 502;
tr2.data.plan = Plan::new_payment(502, bob_pubkey);
let _sig = acc.transfer_signed(tr2).unwrap();
assert_eq!(acc.get_balance(&bob_pubkey).wait().unwrap(), 500);
exit.store(true, Ordering::Relaxed);
}
use std::sync::{Once, ONCE_INIT};
extern crate env_logger;
static INIT: Once = ONCE_INIT;
/// Setup function that is only run once, even if called multiple times.
fn setup() {
INIT.call_once(|| {
env_logger::init().unwrap();
});
}
#[test]
fn test_replicate() {
setup();
let leader_sock = UdpSocket::bind("127.0.0.1:0").expect("bind");
let leader_addr = leader_sock.local_addr().unwrap();
let me_addr = "127.0.0.1:9010".parse().unwrap();
let target_peer_sock = UdpSocket::bind("127.0.0.1:0").expect("bind");
let target_peer_addr = target_peer_sock.local_addr().unwrap();
let source_peer_sock = UdpSocket::bind("127.0.0.1:0").expect("bind");
let exit = Arc::new(AtomicBool::new(false));
let node_me = Node::new([0, 0, 0, 0, 0, 0, 0, 1], 10, me_addr);
let node_subs = vec![Node::new([0, 0, 0, 0, 0, 0, 0, 2], 8, target_peer_addr); 1];
let node_leader = Node::new([0, 0, 0, 0, 0, 0, 0, 3], 20, leader_addr);
let subs = Subscribers::new(node_me, node_leader, &node_subs);
// setup some blob services to send blobs into the socket
// to simulate the source peer and get blobs out of the socket to
// simulate target peer
let recv_recycler = BlobRecycler::default();
let resp_recycler = BlobRecycler::default();
let (s_reader, r_reader) = channel();
let t_receiver = streamer::blob_receiver(
exit.clone(),
recv_recycler.clone(),
target_peer_sock,
s_reader,
).unwrap();
let (s_responder, r_responder) = channel();
let t_responder = streamer::responder(
source_peer_sock,
exit.clone(),
resp_recycler.clone(),
r_responder,
);
let starting_balance = 10_000;
let alice = Mint::new(starting_balance);
let acc = Accountant::new(&alice);
let historian = Historian::new(&alice.last_id(), Some(30));
let acc = Arc::new(Mutex::new(AccountantSkel::new(
acc,
alice.last_id(),
sink(),
historian,
)));
let _threads = AccountantSkel::replicate(&acc, subs, exit.clone()).unwrap();
let mut alice_ref_balance = starting_balance;
let mut msgs = VecDeque::new();
let mut cur_hash = Hash::default();
let num_blobs = 10;
let transfer_amount = 501;
let bob_keypair = KeyPair::new();
for i in 0..num_blobs {
let b = resp_recycler.allocate();
let b_ = b.clone();
let mut w = b.write().unwrap();
w.set_index(i).unwrap();
let tr0 = Event::new_timestamp(&bob_keypair, Utc::now());
let entry0 = entry::create_entry(&cur_hash, i, vec![tr0]);
acc.lock().unwrap().acc.register_entry_id(&cur_hash);
cur_hash = hash(&cur_hash);
let tr1 = Transaction::new(
&alice.keypair(),
bob_keypair.pubkey(),
transfer_amount,
cur_hash,
);
acc.lock().unwrap().acc.register_entry_id(&cur_hash);
cur_hash = hash(&cur_hash);
let entry1 =
entry::create_entry(&cur_hash, i + num_blobs, vec![Event::Transaction(tr1)]);
acc.lock().unwrap().acc.register_entry_id(&cur_hash);
cur_hash = hash(&cur_hash);
alice_ref_balance -= transfer_amount;
let serialized_entry = serialize(&vec![entry0, entry1]).unwrap();
w.data_mut()[..serialized_entry.len()].copy_from_slice(&serialized_entry);
w.set_size(serialized_entry.len());
w.meta.set_addr(&me_addr);
drop(w);
msgs.push_back(b_);
}
// send the blobs into the socket
s_responder.send(msgs).expect("send");
// receive retransmitted messages
let timer = Duration::new(1, 0);
let mut msgs: Vec<_> = Vec::new();
while let Ok(msg) = r_reader.recv_timeout(timer) {
trace!("msg: {:?}", msg);
msgs.push(msg);
}
let alice_balance = acc.lock()
.unwrap()
.acc
.get_balance(&alice.keypair().pubkey())
.unwrap();
assert_eq!(alice_balance, alice_ref_balance);
let bob_balance = acc.lock()
.unwrap()
.acc
.get_balance(&bob_keypair.pubkey())
.unwrap();
assert_eq!(bob_balance, starting_balance - alice_ref_balance);
exit.store(true, Ordering::Relaxed);
t_receiver.join().expect("join");
t_responder.join().expect("join");
}
}
#[cfg(all(feature = "unstable", test))]
mod bench {
extern crate test;
use self::test::Bencher;
use accountant::{Accountant, MAX_ENTRY_IDS};
use accountant_skel::*;
use bincode::serialize;
use hash::hash;
use mint::Mint;
use signature::{KeyPair, KeyPairUtil};
use std::collections::HashSet;
use std::io::sink;
use std::time::Instant;
use transaction::Transaction;
#[bench]
fn process_packets_bench(_bencher: &mut Bencher) {
let mint = Mint::new(100_000_000);
let acc = Accountant::new(&mint);
let rsp_addr: SocketAddr = "0.0.0.0:0".parse().expect("socket address");
// Create transactions between unrelated parties.
let txs = 100_000;
let last_ids: Mutex<HashSet<Hash>> = Mutex::new(HashSet::new());
let transactions: Vec<_> = (0..txs)
.into_par_iter()
.map(|i| {
// Seed the 'to' account and a cell for its signature.
let dummy_id = i % (MAX_ENTRY_IDS as i32);
let last_id = hash(&serialize(&dummy_id).unwrap()); // Semi-unique hash
{
let mut last_ids = last_ids.lock().unwrap();
if !last_ids.contains(&last_id) {
last_ids.insert(last_id);
acc.register_entry_id(&last_id);
}
}
// Seed the 'from' account.
let rando0 = KeyPair::new();
let tr = Transaction::new(&mint.keypair(), rando0.pubkey(), 1_000, last_id);
acc.process_verified_transaction(&tr).unwrap();
let rando1 = KeyPair::new();
let tr = Transaction::new(&rando0, rando1.pubkey(), 2, last_id);
acc.process_verified_transaction(&tr).unwrap();
// Finally, return a transaction that's unique
Transaction::new(&rando0, rando1.pubkey(), 1, last_id)
})
.collect();
let req_vers = transactions
.into_iter()
.map(|tr| (Request::Transaction(tr), rsp_addr, 1_u8))
.collect();
let historian = Historian::new(&mint.last_id(), None);
let mut skel = AccountantSkel::new(acc, mint.last_id(), sink(), historian);
let now = Instant::now();
assert!(skel.process_packets(req_vers).is_ok());
let duration = now.elapsed();
let sec = duration.as_secs() as f64 + duration.subsec_nanos() as f64 / 1_000_000_000.0;
let tps = txs as f64 / sec;
// Ensure that all transactions were successfully logged.
drop(skel.historian.sender);
let entries: Vec<Entry> = skel.historian.receiver.iter().collect();
assert_eq!(entries.len(), 1);
assert_eq!(entries[0].events.len(), txs as usize);
println!("{} tps", tps);
}
}

View File

@ -3,10 +3,12 @@
//! this object instead of writing messages to the network directly. The binary
//! encoding of its messages are unstable and may change in future releases.
use accountant_skel::{Request, Response};
use accountant_skel::{Request, Response, Subscription};
use bincode::{deserialize, serialize};
use futures::future::{ok, FutureResult};
use hash::Hash;
use signature::{KeyPair, PublicKey, Signature};
use std::collections::HashMap;
use std::io;
use std::net::UdpSocket;
use transaction::Transaction;
@ -14,6 +16,9 @@ use transaction::Transaction;
pub struct AccountantStub {
pub addr: String,
pub socket: UdpSocket,
last_id: Option<Hash>,
num_events: u64,
balances: HashMap<PublicKey, Option<i64>>,
}
impl AccountantStub {
@ -21,9 +26,43 @@ impl AccountantStub {
/// over `socket`. To receive responses, the caller must bind `socket`
/// to a public address before invoking AccountantStub methods.
pub fn new(addr: &str, socket: UdpSocket) -> Self {
AccountantStub {
let stub = AccountantStub {
addr: addr.to_string(),
socket,
last_id: None,
num_events: 0,
balances: HashMap::new(),
};
stub.init();
stub
}
pub fn init(&self) {
let subscriptions = vec![Subscription::EntryInfo];
let req = Request::Subscribe { subscriptions };
let data = serialize(&req).expect("serialize Subscribe");
let _res = self.socket.send_to(&data, &self.addr);
}
pub fn recv_response(&self) -> io::Result<Response> {
let mut buf = vec![0u8; 1024];
self.socket.recv_from(&mut buf)?;
let resp = deserialize(&buf).expect("deserialize balance");
Ok(resp)
}
pub fn process_response(&mut self, resp: Response) {
match resp {
Response::Balance { key, val } => {
self.balances.insert(key, val);
}
Response::LastId { id } => {
self.last_id = Some(id);
}
Response::EntryInfo(entry_info) => {
self.last_id = Some(entry_info.id);
self.num_events += entry_info.num_events;
}
}
}
@ -51,40 +90,67 @@ impl AccountantStub {
/// Request the balance of the user holding `pubkey`. This method blocks
/// until the server sends a response. If the response packet is dropped
/// by the network, this method will hang indefinitely.
pub fn get_balance(&self, pubkey: &PublicKey) -> io::Result<Option<i64>> {
pub fn get_balance(&mut self, pubkey: &PublicKey) -> FutureResult<i64, i64> {
let req = Request::GetBalance { key: *pubkey };
let data = serialize(&req).expect("serialize GetBalance");
self.socket.send_to(&data, &self.addr)?;
let mut buf = vec![0u8; 1024];
self.socket.recv_from(&mut buf)?;
let resp = deserialize(&buf).expect("deserialize balance");
if let Response::Balance { key, val } = resp {
assert_eq!(key, *pubkey);
return Ok(val);
self.socket
.send_to(&data, &self.addr)
.expect("buffer error");
let mut done = false;
while !done {
let resp = self.recv_response().expect("recv response");
if let &Response::Balance { ref key, .. } = &resp {
done = key == pubkey;
}
self.process_response(resp);
}
Ok(None)
}
/// Request the first or last Entry ID from the server.
fn get_id(&self, is_last: bool) -> io::Result<Hash> {
let req = Request::GetId { is_last };
let data = serialize(&req).expect("serialize GetId");
self.socket.send_to(&data, &self.addr)?;
let mut buf = vec![0u8; 1024];
self.socket.recv_from(&mut buf)?;
let resp = deserialize(&buf).expect("deserialize Id");
if let Response::Id { id, .. } = resp {
return Ok(id);
}
Ok(Default::default())
ok(self.balances[pubkey].unwrap())
}
/// Request the last Entry ID from the server. This method blocks
/// until the server sends a response. At the time of this writing,
/// it also has the side-effect of causing the server to log any
/// entries that have been published by the Historian.
pub fn get_last_id(&self) -> io::Result<Hash> {
self.get_id(true)
pub fn get_last_id(&mut self) -> FutureResult<Hash, ()> {
let req = Request::GetLastId;
let data = serialize(&req).expect("serialize GetId");
self.socket
.send_to(&data, &self.addr)
.expect("buffer error");
let mut done = false;
while !done {
let resp = self.recv_response().expect("recv response");
if let &Response::LastId { .. } = &resp {
done = true;
}
self.process_response(resp);
}
ok(self.last_id.unwrap_or(Hash::default()))
}
/// Return the number of transactions the server processed since creating
/// this stub instance.
pub fn transaction_count(&mut self) -> u64 {
// Wait for at least one EntryInfo.
let mut done = false;
while !done {
let resp = self.recv_response().expect("recv response");
if let &Response::EntryInfo(_) = &resp {
done = true;
}
self.process_response(resp);
}
// Then take the rest.
self.socket.set_nonblocking(true).expect("set nonblocking");
loop {
match self.recv_response() {
Err(_) => break,
Ok(resp) => self.process_response(resp),
}
}
self.socket.set_nonblocking(false).expect("set blocking");
self.num_events
}
}
@ -93,6 +159,8 @@ mod tests {
use super::*;
use accountant::Accountant;
use accountant_skel::AccountantSkel;
use futures::Future;
use historian::Historian;
use mint::Mint;
use signature::{KeyPair, KeyPairUtil};
use std::io::sink;
@ -107,20 +175,27 @@ mod tests {
let addr = "127.0.0.1:9000";
let send_addr = "127.0.0.1:9001";
let alice = Mint::new(10_000);
let acc = Accountant::new(&alice, Some(30));
let acc = Accountant::new(&alice);
let bob_pubkey = KeyPair::new().pubkey();
let exit = Arc::new(AtomicBool::new(false));
let acc = Arc::new(Mutex::new(AccountantSkel::new(acc, sink())));
let _threads = AccountantSkel::serve(acc, addr, exit.clone()).unwrap();
let historian = Historian::new(&alice.last_id(), Some(30));
let acc = Arc::new(Mutex::new(AccountantSkel::new(
acc,
alice.last_id(),
sink(),
historian,
)));
let _threads = AccountantSkel::serve(&acc, addr, exit.clone()).unwrap();
sleep(Duration::from_millis(300));
let socket = UdpSocket::bind(send_addr).unwrap();
socket.set_read_timeout(Some(Duration::new(5, 0))).unwrap();
let acc = AccountantStub::new(addr, socket);
let last_id = acc.get_last_id().unwrap();
let mut acc = AccountantStub::new(addr, socket);
let last_id = acc.get_last_id().wait().unwrap();
let _sig = acc.transfer(500, &alice.keypair(), bob_pubkey, &last_id)
.unwrap();
assert_eq!(acc.get_balance(&bob_pubkey).unwrap().unwrap(), 500);
assert_eq!(acc.get_balance(&bob_pubkey).wait().unwrap(), 500);
exit.store(true, Ordering::Relaxed);
}
}

View File

@ -1,41 +1,106 @@
extern crate futures;
extern crate getopts;
extern crate isatty;
extern crate rayon;
extern crate serde_json;
extern crate solana;
extern crate untrusted;
use futures::Future;
use getopts::Options;
use isatty::stdin_isatty;
use rayon::prelude::*;
use solana::accountant_stub::AccountantStub;
use solana::mint::Mint;
use solana::mint::MintDemo;
use solana::signature::{KeyPair, KeyPairUtil};
use solana::transaction::Transaction;
use std::io::stdin;
use std::env;
use std::io::{stdin, Read};
use std::net::UdpSocket;
use std::time::{Duration, Instant};
use std::thread::sleep;
use rayon::prelude::*;
use std::process::exit;
use std::time::Instant;
use untrusted::Input;
fn print_usage(program: &str, opts: Options) {
let mut brief = format!("Usage: cat <mint.json> | {} [options]\n\n", program);
brief += " Solana client demo creates a number of transactions and\n";
brief += " sends them to a target node.";
brief += " Takes json formatted mint file to stdin.";
print!("{}", opts.usage(&brief));
}
fn main() {
let addr = "127.0.0.1:8000";
let send_addr = "127.0.0.1:8001";
let mut threads = 4usize;
let mut addr: String = "127.0.0.1:8000".to_string();
let mut send_addr: String = "127.0.0.1:8001".to_string();
let mint: Mint = serde_json::from_reader(stdin()).unwrap();
let mint_keypair = mint.keypair();
let mint_pubkey = mint.pubkey();
let mut opts = Options::new();
opts.optopt("s", "", "server address", "host:port");
opts.optopt("c", "", "client address", "host:port");
opts.optopt("t", "", "number of threads", "4");
opts.optflag("h", "help", "print help");
let args: Vec<String> = env::args().collect();
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(e) => {
eprintln!("{}", e);
exit(1);
}
};
let socket = UdpSocket::bind(send_addr).unwrap();
let acc = AccountantStub::new(addr, socket);
let last_id = acc.get_last_id().unwrap();
if matches.opt_present("h") {
let program = args[0].clone();
print_usage(&program, opts);
return;
}
if matches.opt_present("s") {
addr = matches.opt_str("s").unwrap();
}
if matches.opt_present("c") {
send_addr = matches.opt_str("c").unwrap();
}
if matches.opt_present("t") {
threads = matches.opt_str("t").unwrap().parse().expect("integer");
}
let mint_balance = acc.get_balance(&mint_pubkey).unwrap().unwrap();
println!("Mint's Initial Balance {}", mint_balance);
if stdin_isatty() {
eprintln!("nothing found on stdin, expected a json file");
exit(1);
}
let mut buffer = String::new();
let num_bytes = stdin().read_to_string(&mut buffer).unwrap();
if num_bytes == 0 {
eprintln!("empty file on stdin, expected a json file");
exit(1);
}
println!("Parsing stdin...");
let demo: MintDemo = serde_json::from_str(&buffer).unwrap_or_else(|e| {
eprintln!("failed to parse json: {}", e);
exit(1);
});
let socket = UdpSocket::bind(&send_addr).unwrap();
let mut acc = AccountantStub::new(&addr, socket);
println!("Get last ID...");
let last_id = acc.get_last_id().wait().unwrap();
println!("Creating keypairs...");
let txs = demo.users.len() / 2;
let keypairs: Vec<_> = demo.users
.into_par_iter()
.map(|(pkcs8, _)| KeyPair::from_pkcs8(Input::from(&pkcs8)).unwrap())
.collect();
let keypair_pairs: Vec<_> = keypairs.chunks(2).collect();
println!("Signing transactions...");
let txs = 100_000;
let now = Instant::now();
let transactions: Vec<_> = (0..txs)
let transactions: Vec<_> = keypair_pairs
.into_par_iter()
.map(|_| {
let rando_pubkey = KeyPair::new().pubkey();
Transaction::new(&mint_keypair, rando_pubkey, 1, last_id)
})
.map(|chunk| Transaction::new(&chunk[0], chunk[1].pubkey(), 1, last_id))
.collect();
let duration = now.elapsed();
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
@ -47,27 +112,32 @@ fn main() {
nsps / 1_000_f64
);
println!("Transferring 1 unit {} times...", txs);
let initial_tx_count = acc.transaction_count();
println!("Transfering {} transactions in {} batches", txs, threads);
let now = Instant::now();
let mut _sig = Default::default();
for tr in transactions {
_sig = tr.sig;
acc.transfer_signed(tr).unwrap();
let sz = transactions.len() / threads;
let chunks: Vec<_> = transactions.chunks(sz).collect();
chunks.into_par_iter().for_each(|trs| {
println!("Transferring 1 unit {} times...", trs.len());
let send_addr = "0.0.0.0:0";
let socket = UdpSocket::bind(send_addr).unwrap();
let acc = AccountantStub::new(&addr, socket);
for tr in trs {
acc.transfer_signed(tr.clone()).unwrap();
}
});
println!("Waiting for half the transactions to complete...",);
let mut tx_count = acc.transaction_count();
while tx_count < transactions.len() as u64 / 2 {
tx_count = acc.transaction_count();
}
println!("Waiting for last transaction to be confirmed...",);
let mut val = mint_balance;
let mut prev = 0;
while val != prev {
sleep(Duration::from_millis(20));
prev = val;
val = acc.get_balance(&mint_pubkey).unwrap().unwrap();
}
println!("Mint's Final Balance {}", val);
let txs = mint_balance - val;
println!("Successful transactions {}", txs);
let txs = tx_count - initial_tx_count;
println!("Transactions processed {}", txs);
let duration = now.elapsed();
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
let tps = (txs * 1_000_000_000) as f64 / ns as f64;
println!("Done. {} tps!", tps);
println!("Done. {} tps", tps);
}

View File

@ -1,30 +1,73 @@
extern crate isatty;
extern crate rayon;
extern crate ring;
extern crate serde_json;
extern crate solana;
extern crate untrusted;
use solana::entry::create_entry;
use isatty::stdin_isatty;
use rayon::prelude::*;
use solana::accountant::MAX_ENTRY_IDS;
use solana::entry::{create_entry, next_tick};
use solana::event::Event;
use solana::hash::Hash;
use solana::mint::Mint;
use solana::signature::{KeyPair, KeyPairUtil, PublicKey};
use solana::mint::MintDemo;
use solana::signature::{KeyPair, KeyPairUtil};
use solana::transaction::Transaction;
use std::io::stdin;
fn transfer(from: &KeyPair, (to, tokens): (PublicKey, i64), last_id: Hash) -> Event {
Event::Transaction(Transaction::new(from, to, tokens, last_id))
}
use std::io::{stdin, Read};
use std::process::exit;
use untrusted::Input;
// Generate a ledger with lots and lots of accounts.
fn main() {
let mint: Mint = serde_json::from_reader(stdin()).unwrap();
let mut entries = mint.create_entries();
if stdin_isatty() {
eprintln!("nothing found on stdin, expected a json file");
exit(1);
}
let from = mint.keypair();
let seed = mint.seed();
let alice = (KeyPair::new().pubkey(), 200);
let bob = (KeyPair::new().pubkey(), 100);
let events = vec![transfer(&from, alice, seed), transfer(&from, bob, seed)];
entries.push(create_entry(&seed, 0, events));
let mut buffer = String::new();
let num_bytes = stdin().read_to_string(&mut buffer).unwrap();
if num_bytes == 0 {
eprintln!("empty file on stdin, expected a json file");
exit(1);
}
for entry in entries {
let demo: MintDemo = serde_json::from_str(&buffer).unwrap_or_else(|e| {
eprintln!("failed to parse json: {}", e);
exit(1);
});
let num_accounts = demo.users.len();
let last_id = demo.mint.last_id();
let mint_keypair = demo.mint.keypair();
eprintln!("Signing {} transactions...", num_accounts);
let events: Vec<_> = demo.users
.into_par_iter()
.map(|(pkcs8, tokens)| {
let rando = KeyPair::from_pkcs8(Input::from(&pkcs8)).unwrap();
let tr = Transaction::new(&mint_keypair, rando.pubkey(), tokens, last_id);
Event::Transaction(tr)
})
.collect();
for entry in demo.mint.create_entries() {
println!("{}", serde_json::to_string(&entry).unwrap());
}
eprintln!("Logging the creation of {} accounts...", num_accounts);
let entry = create_entry(&last_id, 0, events);
println!("{}", serde_json::to_string(&entry).unwrap());
eprintln!("Creating {} empty entries...", MAX_ENTRY_IDS);
// Offer client lots of entry IDs to use for each transaction's last_id.
let mut last_id = last_id;
for _ in 0..MAX_ENTRY_IDS {
let entry = next_tick(&last_id, 1);
last_id = entry.id;
let serialized = serde_json::to_string(&entry).unwrap_or_else(|e| {
eprintln!("failed to serialize: {}", e);
exit(1);
});
println!("{}", serialized);
}
}

View File

@ -1,14 +1,36 @@
//! A command-line executable for generating the chain's genesis block.
extern crate isatty;
extern crate serde_json;
extern crate solana;
use isatty::stdin_isatty;
use solana::mint::Mint;
use std::io::stdin;
use std::io::{stdin, Read};
use std::process::exit;
fn main() {
let mint: Mint = serde_json::from_reader(stdin()).unwrap();
if stdin_isatty() {
eprintln!("nothing found on stdin, expected a json file");
exit(1);
}
let mut buffer = String::new();
let num_bytes = stdin().read_to_string(&mut buffer).unwrap();
if num_bytes == 0 {
eprintln!("empty file on stdin, expected a json file");
exit(1);
}
let mint: Mint = serde_json::from_str(&buffer).unwrap_or_else(|e| {
eprintln!("failed to parse json: {}", e);
exit(1);
});
for x in mint.create_entries() {
println!("{}", serde_json::to_string(&x).unwrap());
let serialized = serde_json::to_string(&x).unwrap_or_else(|e| {
eprintln!("failed to serialize: {}", e);
exit(1);
});
println!("{}", serialized);
}
}

View File

@ -4,7 +4,7 @@ use solana::entry::Entry;
use solana::event::Event;
use solana::hash::Hash;
use solana::historian::Historian;
use solana::ledger::verify_slice;
use solana::ledger::Block;
use solana::recorder::Signal;
use solana::signature::{KeyPair, KeyPairUtil};
use solana::transaction::Transaction;
@ -33,5 +33,5 @@ fn main() {
}
// Proof-of-History: Verify the historian learned about the events
// in the same order they appear in the vector.
assert!(verify_slice(&entries, &seed));
assert!(entries[..].verify(&seed));
}

33
src/bin/mint-demo.rs Normal file
View File

@ -0,0 +1,33 @@
extern crate rayon;
extern crate ring;
extern crate serde_json;
extern crate solana;
use rayon::prelude::*;
use ring::rand::SystemRandom;
use solana::mint::{Mint, MintDemo};
use solana::signature::KeyPair;
use std::io;
fn main() {
let mut input_text = String::new();
io::stdin().read_line(&mut input_text).unwrap();
let trimmed = input_text.trim();
let tokens = trimmed.parse::<i64>().unwrap();
let mint = Mint::new(tokens);
let tokens_per_user = 1_000;
let num_accounts = tokens / tokens_per_user;
let rnd = SystemRandom::new();
let users: Vec<_> = (0..num_accounts)
.into_par_iter()
.map(|_| {
let pkcs8 = KeyPair::generate_pkcs8(&rnd).unwrap().to_vec();
(pkcs8, tokens_per_user)
})
.collect();
let demo = MintDemo { mint, users };
println!("{}", serde_json::to_string(&demo).unwrap());
}

View File

@ -1,15 +1,29 @@
extern crate isatty;
extern crate serde_json;
extern crate solana;
use isatty::stdin_isatty;
use solana::mint::Mint;
use std::io;
use std::process::exit;
fn main() {
let mut input_text = String::new();
if stdin_isatty() {
eprintln!("nothing found on stdin, expected a token number");
exit(1);
}
io::stdin().read_line(&mut input_text).unwrap();
let trimmed = input_text.trim();
let tokens = trimmed.parse::<i64>().unwrap();
let tokens = trimmed.parse::<i64>().unwrap_or_else(|e| {
eprintln!("{}", e);
exit(1);
});
let mint = Mint::new(tokens);
println!("{}", serde_json::to_string(&mint).unwrap());
let serialized = serde_json::to_string(&mint).unwrap_or_else(|e| {
eprintln!("failed to serialize: {}", e);
exit(1);
});
println!("{}", serialized);
}

View File

@ -1,24 +1,110 @@
extern crate env_logger;
extern crate getopts;
extern crate isatty;
extern crate serde_json;
extern crate solana;
use getopts::Options;
use isatty::stdin_isatty;
use solana::accountant::Accountant;
use solana::accountant_skel::AccountantSkel;
use std::io::{self, stdout, BufRead};
use solana::entry::Entry;
use solana::event::Event;
use solana::historian::Historian;
use std::env;
use std::io::{stdin, stdout, Read};
use std::process::exit;
use std::sync::atomic::AtomicBool;
use std::sync::{Arc, Mutex};
fn print_usage(program: &str, opts: Options) {
let mut brief = format!("Usage: cat <transaction.log> | {} [options]\n\n", program);
brief += " Run a Solana node to handle transactions and\n";
brief += " write a new transaction log to stdout.\n";
brief += " Takes existing transaction log from stdin.";
print!("{}", opts.usage(&brief));
}
fn main() {
let addr = "127.0.0.1:8000";
let stdin = io::stdin();
let entries = stdin
.lock()
.lines()
.map(|line| serde_json::from_str(&line.unwrap()).unwrap());
let acc = Accountant::new_from_entries(entries, Some(1000));
env_logger::init().unwrap();
let mut port = 8000u16;
let mut opts = Options::new();
opts.optopt("p", "", "port", "port");
opts.optflag("h", "help", "print help");
let args: Vec<String> = env::args().collect();
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(e) => {
eprintln!("{}", e);
exit(1);
}
};
if matches.opt_present("h") {
let program = args[0].clone();
print_usage(&program, opts);
return;
}
if matches.opt_present("p") {
port = matches.opt_str("p").unwrap().parse().expect("port");
}
let addr = format!("0.0.0.0:{}", port);
if stdin_isatty() {
eprintln!("nothing found on stdin, expected a log file");
exit(1);
}
let mut buffer = String::new();
let num_bytes = stdin().read_to_string(&mut buffer).unwrap();
if num_bytes == 0 {
eprintln!("empty file on stdin, expected a log file");
exit(1);
}
eprintln!("Initializing...");
let mut entries = buffer.lines().map(|line| {
serde_json::from_str(&line).unwrap_or_else(|e| {
eprintln!("failed to parse json: {}", e);
exit(1);
})
});
// The first item in the ledger is required to be an entry with zero num_hashes,
// which implies its id can be used as the ledger's seed.
let entry0 = entries.next().unwrap();
// The second item in the ledger is a special transaction where the to and from
// fields are the same. That entry should be treated as a deposit, not a
// transfer to oneself.
let entry1: Entry = entries.next().unwrap();
let deposit = if let Event::Transaction(ref tr) = entry1.events[0] {
tr.data.plan.final_payment()
} else {
None
};
let acc = Accountant::new_from_deposit(&deposit.unwrap());
acc.register_entry_id(&entry0.id);
acc.register_entry_id(&entry1.id);
let mut last_id = entry1.id;
for entry in entries {
last_id = entry.id;
acc.process_verified_events(entry.events).unwrap();
acc.register_entry_id(&last_id);
}
let historian = Historian::new(&last_id, Some(1000));
let exit = Arc::new(AtomicBool::new(false));
let skel = Arc::new(Mutex::new(AccountantSkel::new(acc, stdout())));
eprintln!("Listening on {}", addr);
let threads = AccountantSkel::serve(skel, addr, exit.clone()).unwrap();
let skel = Arc::new(Mutex::new(AccountantSkel::new(
acc,
last_id,
stdout(),
historian,
)));
let threads = AccountantSkel::serve(&skel, &addr, exit.clone()).unwrap();
eprintln!("Ready. Listening on {}", addr);
for t in threads {
t.join().expect("join");
}

372
src/crdt.rs Normal file
View File

@ -0,0 +1,372 @@
//! The `crdt` module defines a data structure that is shared by all the nodes in the network over
//! a gossip control plane. The goal is to share small bits of of-chain information and detect and
//! repair partitions.
//!
//! This CRDT only supports a very limited set of types. A map of PublicKey -> Versioned Struct.
//! The last version is always picked durring an update.
use bincode::{deserialize, serialize};
use byteorder::{LittleEndian, ReadBytesExt};
use hash::Hash;
use result::Result;
use ring::rand::{SecureRandom, SystemRandom};
use signature::{PublicKey, Signature};
use std::collections::HashMap;
use std::io::Cursor;
use std::net::{SocketAddr, UdpSocket};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, RwLock};
use std::thread::{sleep, spawn, JoinHandle};
use std::time::Duration;
/// Structure to be replicated by the network
#[derive(Serialize, Deserialize, Clone)]
pub struct ReplicatedData {
id: PublicKey,
sig: Signature,
/// should always be increasing
version: u64,
/// address to connect to for gossip
gossip_addr: SocketAddr,
/// address to connect to for replication
replicate_addr: SocketAddr,
/// address to connect to when this node is leader
lead_addr: SocketAddr,
/// current leader identity
current_leader_id: PublicKey,
/// last verified hash that was submitted to the leader
last_verified_hash: Hash,
/// last verified count, always increasing
last_verified_count: u64,
}
impl ReplicatedData {
pub fn new(id: PublicKey, gossip_addr: SocketAddr) -> ReplicatedData {
let daddr = "0.0.0.0:0".parse().unwrap();
ReplicatedData {
id,
sig: Signature::default(),
version: 0,
gossip_addr,
replicate_addr: daddr,
lead_addr: daddr,
current_leader_id: PublicKey::default(),
last_verified_hash: Hash::default(),
last_verified_count: 0,
}
}
}
/// `Crdt` structure keeps a table of `ReplicatedData` structs
/// # Properties
/// * `table` - map of public id's to versioned and signed ReplicatedData structs
/// * `local` - map of public id's to what `self.update_index` `self.table` was updated
/// * `remote` - map of public id's to the `remote.update_index` was sent
/// * `update_index` - my update index
/// # Remarks
/// This implements two services, `gossip` and `listen`.
/// * `gossip` - asynchronously ask nodes to send updates
/// * `listen` - listen for requests and responses
/// No attempt to keep track of timeouts or dropped requests is made, or should be.
pub struct Crdt {
table: HashMap<PublicKey, ReplicatedData>,
/// Value of my update index when entry in table was updated.
/// Nodes will ask for updates since `update_index`, and this node
/// should respond with all the identities that are greater then the
/// request's `update_index` in this list
local: HashMap<PublicKey, u64>,
/// The value of the remote update index that i have last seen
/// This Node will ask external nodes for updates since the value in this list
remote: HashMap<PublicKey, u64>,
update_index: u64,
me: PublicKey,
timeout: Duration,
}
// TODO These messages should be signed, and go through the gpu pipeline for spam filtering
#[derive(Serialize, Deserialize)]
enum Protocol {
/// forward your own latest data structure when requesting an update
/// this doesn't update the `remote` update index, but it allows the
/// recepient of this request to add knowledge of this node to the network
RequestUpdates(u64, ReplicatedData),
//TODO might need a since?
/// from id, form's last update index, ReplicatedData
ReceiveUpdates(PublicKey, u64, Vec<ReplicatedData>),
}
impl Crdt {
pub fn new(me: ReplicatedData) -> Crdt {
assert_eq!(me.version, 0);
let mut g = Crdt {
table: HashMap::new(),
local: HashMap::new(),
remote: HashMap::new(),
me: me.id,
update_index: 1,
timeout: Duration::new(0, 100_000),
};
g.local.insert(me.id, g.update_index);
g.table.insert(me.id, me);
g
}
pub fn import(&mut self, v: &ReplicatedData) {
// TODO check that last_verified types are always increasing
// TODO probably an error or attack
if self.me != v.id {
self.insert(v);
}
}
pub fn insert(&mut self, v: &ReplicatedData) {
if self.table.get(&v.id).is_none() || (v.version > self.table[&v.id].version) {
trace!("insert! {}", v.version);
self.update_index += 1;
let _ = self.table.insert(v.id, v.clone());
let _ = self.local.insert(v.id, self.update_index);
} else {
trace!("INSERT FAILED {}", v.version);
}
}
fn random() -> u64 {
let rnd = SystemRandom::new();
let mut buf = [0u8; 8];
rnd.fill(&mut buf).unwrap();
let mut rdr = Cursor::new(&buf);
rdr.read_u64::<LittleEndian>().unwrap()
}
fn get_updates_since(&self, v: u64) -> (PublicKey, u64, Vec<ReplicatedData>) {
trace!("get updates since {}", v);
let data = self.table
.values()
.filter(|x| self.local[&x.id] > v)
.cloned()
.collect();
let id = self.me;
let ups = self.update_index;
(id, ups, data)
}
/// Create a random gossip request
/// # Returns
/// (A,B,C)
/// * A - Remote gossip address
/// * B - My gossip address
/// * C - Remote update index to request updates since
fn gossip_request(&self) -> (SocketAddr, Protocol) {
let n = (Self::random() as usize) % self.table.len();
trace!("random {:?} {}", &self.me[0..1], n);
let v = self.table.values().nth(n).unwrap().clone();
let remote_update_index = *self.remote.get(&v.id).unwrap_or(&0);
let req = Protocol::RequestUpdates(remote_update_index, self.table[&self.me].clone());
(v.gossip_addr, req)
}
/// At random pick a node and try to get updated changes from them
fn run_gossip(obj: &Arc<RwLock<Self>>) -> Result<()> {
//TODO we need to keep track of stakes and weight the selection by stake size
//TODO cache sockets
// Lock the object only to do this operation and not for any longer
// especially not when doing the `sock.send_to`
let (remote_gossip_addr, req) = obj.read().unwrap().gossip_request();
let sock = UdpSocket::bind("0.0.0.0:0")?;
// TODO this will get chatty, so we need to first ask for number of updates since
// then only ask for specific data that we dont have
let r = serialize(&req)?;
sock.send_to(&r, remote_gossip_addr)?;
Ok(())
}
/// Apply updates that we received from the identity `from`
/// # Arguments
/// * `from` - identity of the sender of the updates
/// * `update_index` - the number of updates that `from` has completed and this set of `data` represents
/// * `data` - the update data
fn apply_updates(&mut self, from: PublicKey, update_index: u64, data: &[ReplicatedData]) {
trace!("got updates {}", data.len());
// TODO we need to punish/spam resist here
// sig verify the whole update and slash anyone who sends a bad update
for v in data {
self.import(&v);
}
*self.remote.entry(from).or_insert(update_index) = update_index;
}
/// randomly pick a node and ask them for updates asynchronously
pub fn gossip(obj: Arc<RwLock<Self>>, exit: Arc<AtomicBool>) -> JoinHandle<()> {
spawn(move || loop {
let _ = Self::run_gossip(&obj);
if exit.load(Ordering::Relaxed) {
return;
}
//TODO this should be a tuned parameter
sleep(obj.read().unwrap().timeout);
})
}
/// Process messages from the network
fn run_listen(obj: &Arc<RwLock<Self>>, sock: &UdpSocket) -> Result<()> {
//TODO cache connections
let mut buf = vec![0u8; 1024 * 64];
let (amt, src) = sock.recv_from(&mut buf)?;
trace!("got request from {}", src);
buf.resize(amt, 0);
let r = deserialize(&buf)?;
match r {
// TODO sigverify these
Protocol::RequestUpdates(v, reqdata) => {
trace!("RequestUpdates {}", v);
let addr = reqdata.gossip_addr;
// only lock for this call, dont lock durring IO `sock.send_to` or `sock.recv_from`
let (from, ups, data) = obj.read().unwrap().get_updates_since(v);
trace!("get updates since response {} {}", v, data.len());
let rsp = serialize(&Protocol::ReceiveUpdates(from, ups, data))?;
trace!("send_to {}", addr);
//TODO verify reqdata belongs to sender
obj.write().unwrap().import(&reqdata);
sock.send_to(&rsp, addr).unwrap();
trace!("send_to done!");
}
Protocol::ReceiveUpdates(from, ups, data) => {
trace!("ReceivedUpdates");
obj.write().unwrap().apply_updates(from, ups, &data);
}
}
Ok(())
}
pub fn listen(
obj: Arc<RwLock<Self>>,
sock: UdpSocket,
exit: Arc<AtomicBool>,
) -> JoinHandle<()> {
sock.set_read_timeout(Some(Duration::new(2, 0))).unwrap();
spawn(move || loop {
let _ = Self::run_listen(&obj, &sock);
if exit.load(Ordering::Relaxed) {
return;
}
})
}
}
#[cfg(test)]
mod test {
use crdt::{Crdt, ReplicatedData};
use signature::KeyPair;
use signature::KeyPairUtil;
use std::net::UdpSocket;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, RwLock};
use std::thread::{sleep, JoinHandle};
use std::time::Duration;
/// Test that the network converges.
/// Run until every node in the network has a full ReplicatedData set.
/// Check that nodes stop sending updates after all the ReplicatedData has been shared.
/// tests that actually use this function are below
fn run_gossip_topo<F>(topo: F)
where
F: Fn(&Vec<(Arc<RwLock<Crdt>>, JoinHandle<()>)>) -> (),
{
let num: usize = 5;
let exit = Arc::new(AtomicBool::new(false));
let listen: Vec<_> = (0..num)
.map(|_| {
let listener = UdpSocket::bind("0.0.0.0:0").unwrap();
let pubkey = KeyPair::new().pubkey();
let d = ReplicatedData::new(pubkey, listener.local_addr().unwrap());
let crdt = Crdt::new(d);
let c = Arc::new(RwLock::new(crdt));
let l = Crdt::listen(c.clone(), listener, exit.clone());
(c, l)
})
.collect();
topo(&listen);
let gossip: Vec<_> = listen
.iter()
.map(|&(ref c, _)| Crdt::gossip(c.clone(), exit.clone()))
.collect();
let mut done = true;
for _ in 0..(num * 16) {
done = true;
for &(ref c, _) in listen.iter() {
trace!(
"done updates {} {}",
c.read().unwrap().table.len(),
c.read().unwrap().update_index
);
//make sure the number of updates doesn't grow unbounded
assert!(c.read().unwrap().update_index <= num as u64);
//make sure we got all the updates
if c.read().unwrap().table.len() != num {
done = false;
}
}
if done == true {
break;
}
sleep(Duration::new(1, 0));
}
exit.store(true, Ordering::Relaxed);
for j in gossip {
j.join().unwrap();
}
for (c, j) in listen.into_iter() {
j.join().unwrap();
// make it clear what failed
// protocol is to chatty, updates should stop after everyone receives `num`
assert!(c.read().unwrap().update_index <= num as u64);
// protocol is not chatty enough, everyone should get `num` entries
assert_eq!(c.read().unwrap().table.len(), num);
}
assert!(done);
}
/// ring a -> b -> c -> d -> e -> a
#[test]
fn gossip_ring_test() {
run_gossip_topo(|listen| {
let num = listen.len();
for n in 0..num {
let y = n % listen.len();
let x = (n + 1) % listen.len();
let mut xv = listen[x].0.write().unwrap();
let yv = listen[y].0.read().unwrap();
let mut d = yv.table[&yv.me].clone();
d.version = 0;
xv.insert(&d);
}
});
}
/// star (b,c,d,e) -> a
#[test]
fn gossip_star_test() {
run_gossip_topo(|listen| {
let num = listen.len();
for n in 0..(num - 1) {
let x = 0;
let y = (n + 1) % listen.len();
let mut xv = listen[x].0.write().unwrap();
let yv = listen[y].0.read().unwrap();
let mut d = yv.table[&yv.me].clone();
d.version = 0;
xv.insert(&d);
}
});
}
/// Test that insert drops messages that are older
#[test]
fn insert_test() {
let mut d = ReplicatedData::new(KeyPair::new().pubkey(), "127.0.0.1:1234".parse().unwrap());
assert_eq!(d.version, 0);
let mut crdt = Crdt::new(d.clone());
assert_eq!(crdt.table[&d.id].version, 0);
d.version = 2;
crdt.insert(&d);
assert_eq!(crdt.table[&d.id].version, 2);
d.version = 1;
crdt.insert(&d);
assert_eq!(crdt.table[&d.id].version, 2);
}
}

194
src/ecdsa.rs Normal file
View File

@ -0,0 +1,194 @@
use packet::{Packet, SharedPackets};
use std::mem::size_of;
use transaction::{PUB_KEY_OFFSET, SIGNED_DATA_OFFSET, SIG_OFFSET};
pub const TX_OFFSET: usize = 4;
#[cfg(feature = "cuda")]
#[repr(C)]
struct Elems {
elems: *const Packet,
num: u32,
}
#[cfg(feature = "cuda")]
#[link(name = "cuda_verify_ed25519")]
extern "C" {
fn ed25519_verify_many(
vecs: *const Elems,
num: u32, //number of vecs
message_size: u32, //size of each element inside the elems field of the vec
public_key_offset: u32,
signature_offset: u32,
signed_message_offset: u32,
signed_message_len_offset: u32,
out: *mut u8, //combined length of all the items in vecs
) -> u32;
}
#[cfg(not(feature = "cuda"))]
fn verify_packet(packet: &Packet) -> u8 {
use ring::signature;
use signature::{PublicKey, Signature};
use untrusted;
let msg_start = TX_OFFSET + SIGNED_DATA_OFFSET;
let sig_start = TX_OFFSET + SIG_OFFSET;
let sig_end = sig_start + size_of::<Signature>();
let pub_key_start = TX_OFFSET + PUB_KEY_OFFSET;
let pub_key_end = pub_key_start + size_of::<PublicKey>();
if packet.meta.size <= msg_start {
return 0;
}
let msg_end = packet.meta.size;
signature::verify(
&signature::ED25519,
untrusted::Input::from(&packet.data[pub_key_start..pub_key_end]),
untrusted::Input::from(&packet.data[msg_start..msg_end]),
untrusted::Input::from(&packet.data[sig_start..sig_end]),
).is_ok() as u8
}
#[cfg(not(feature = "cuda"))]
pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> {
use rayon::prelude::*;
batches
.into_par_iter()
.map(|p| {
p.read()
.unwrap()
.packets
.par_iter()
.map(verify_packet)
.collect()
})
.collect()
}
#[cfg(feature = "cuda")]
pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> {
use packet::PACKET_DATA_SIZE;
let mut out = Vec::new();
let mut elems = Vec::new();
let mut locks = Vec::new();
let mut rvs = Vec::new();
for packets in batches {
locks.push(packets.read().unwrap());
}
let mut num = 0;
for p in locks {
elems.push(Elems {
elems: p.packets.as_ptr(),
num: p.packets.len() as u32,
});
let mut v = Vec::new();
v.resize(p.packets.len(), 0);
rvs.push(v);
num += p.packets.len();
}
out.resize(num, 0);
trace!("Starting verify num packets: {}", num);
trace!("elem len: {}", elems.len() as u32);
trace!("packet sizeof: {}", size_of::<Packet>() as u32);
trace!("pub key: {}", (TX_OFFSET + PUB_KEY_OFFSET) as u32);
trace!("sig offset: {}", (TX_OFFSET + SIG_OFFSET) as u32);
trace!("sign data: {}", (TX_OFFSET + SIGNED_DATA_OFFSET) as u32);
trace!("len offset: {}", PACKET_DATA_SIZE as u32);
unsafe {
let res = ed25519_verify_many(
elems.as_ptr(),
elems.len() as u32,
size_of::<Packet>() as u32,
(TX_OFFSET + PUB_KEY_OFFSET) as u32,
(TX_OFFSET + SIG_OFFSET) as u32,
(TX_OFFSET + SIGNED_DATA_OFFSET) as u32,
PACKET_DATA_SIZE as u32,
out.as_mut_ptr(),
);
if res != 0 {
trace!("RETURN!!!: {}", res);
}
}
trace!("done verify");
let mut num = 0;
for vs in rvs.iter_mut() {
for mut v in vs.iter_mut() {
*v = out[num];
if *v != 0 {
trace!("VERIFIED PACKET!!!!!");
}
num += 1;
}
}
rvs
}
#[cfg(test)]
mod tests {
use accountant_skel::Request;
use bincode::serialize;
use ecdsa;
use packet::{Packet, Packets, SharedPackets};
use std::sync::RwLock;
use transaction::test_tx;
use transaction::Transaction;
fn make_packet_from_transaction(tr: Transaction) -> Packet {
let tx = serialize(&Request::Transaction(tr)).unwrap();
let mut packet = Packet::default();
packet.meta.size = tx.len();
packet.data[..packet.meta.size].copy_from_slice(&tx);
return packet;
}
fn test_verify_n(n: usize, modify_data: bool) {
let tr = test_tx();
let mut packet = make_packet_from_transaction(tr);
// jumble some data to test failure
if modify_data {
packet.data[20] = 10;
}
// generate packet vector
let mut packets = Packets::default();
packets.packets = Vec::new();
for _ in 0..n {
packets.packets.push(packet.clone());
}
let shared_packets = SharedPackets::new(RwLock::new(packets));
let batches = vec![shared_packets.clone(), shared_packets.clone()];
// verify packets
let ans = ecdsa::ed25519_verify(&batches);
// check result
let ref_ans = if modify_data { 0u8 } else { 1u8 };
assert_eq!(ans, vec![vec![ref_ans; n], vec![ref_ans; n]]);
}
#[test]
fn test_verify_zero() {
test_verify_n(0, false);
}
#[test]
fn test_verify_one() {
test_verify_n(1, false);
}
#[test]
fn test_verify_seventy_one() {
test_verify_n(71, false);
}
#[test]
fn test_verify_fail() {
test_verify_n(5, true);
}
}

View File

@ -43,8 +43,25 @@ impl Entry {
}
}
fn add_event_data(hash_data: &mut Vec<u8>, event: &Event) {
match *event {
Event::Transaction(ref tr) => {
hash_data.push(0u8);
hash_data.extend_from_slice(&tr.sig);
}
Event::Signature { ref sig, .. } => {
hash_data.push(1u8);
hash_data.extend_from_slice(sig);
}
Event::Timestamp { ref sig, .. } => {
hash_data.push(2u8);
hash_data.extend_from_slice(sig);
}
}
}
/// Creates the hash `num_hashes` after `start_hash`. If the event contains
/// signature, the final hash will be a hash of both the previous ID and
/// a signature, the final hash will be a hash of both the previous ID and
/// the signature.
pub fn next_hash(start_hash: &Hash, num_hashes: u64, events: &[Event]) -> Hash {
let mut id = *start_hash;
@ -55,17 +72,16 @@ pub fn next_hash(start_hash: &Hash, num_hashes: u64, events: &[Event]) -> Hash {
// Hash all the event data
let mut hash_data = vec![];
for event in events {
let sig = event.get_signature();
if let Some(sig) = sig {
hash_data.extend_from_slice(&sig);
}
add_event_data(&mut hash_data, event);
}
if !hash_data.is_empty() {
return extend_and_hash(&id, &hash_data);
extend_and_hash(&id, &hash_data)
} else if num_hashes != 0 {
hash(&id)
} else {
id
}
id
}
/// Creates the next Entry `num_hashes` after `start_hash`.
@ -99,6 +115,7 @@ pub fn next_tick(start_hash: &Hash, num_hashes: u64) -> Entry {
#[cfg(test)]
mod tests {
use super::*;
use chrono::prelude::*;
use entry::create_entry;
use event::Event;
use hash::hash;
@ -132,9 +149,28 @@ mod tests {
assert!(!e0.verify(&zero));
}
#[test]
fn test_witness_reorder_attack() {
let zero = Hash::default();
// First, verify entries
let keypair = KeyPair::new();
let tr0 = Event::new_timestamp(&keypair, Utc::now());
let tr1 = Event::new_signature(&keypair, Default::default());
let mut e0 = create_entry(&zero, 0, vec![tr0.clone(), tr1.clone()]);
assert!(e0.verify(&zero));
// Next, swap two witness events and ensure verification fails.
e0.events[0] = tr1; // <-- attack
e0.events[1] = tr0;
assert!(!e0.verify(&zero));
}
#[test]
fn test_next_tick() {
let zero = Hash::default();
assert_eq!(next_tick(&zero, 1).num_hashes, 1)
let tick = next_tick(&zero, 1);
assert_eq!(tick.num_hashes, 1);
assert_ne!(tick.id, zero);
}
}

420
src/erasure.rs Normal file
View File

@ -0,0 +1,420 @@
// Support erasure coding
use packet::{BlobRecycler, SharedBlob};
use std::result;
//TODO(sakridge) pick these values
const NUM_CODED: usize = 10;
const MAX_MISSING: usize = 2;
const NUM_DATA: usize = NUM_CODED - MAX_MISSING;
#[derive(Debug, PartialEq, Eq)]
pub enum ErasureError {
NotEnoughBlocksToDecode,
DecodeError,
InvalidBlockSize,
}
pub type Result<T> = result::Result<T, ErasureError>;
// k = number of data devices
// m = number of coding devices
// w = word size
extern "C" {
fn jerasure_matrix_encode(
k: i32,
m: i32,
w: i32,
matrix: *const i32,
data_ptrs: *const *const u8,
coding_ptrs: *const *mut u8,
size: i32,
);
fn jerasure_matrix_decode(
k: i32,
m: i32,
w: i32,
matrix: *const i32,
row_k_ones: i32,
erasures: *const i32,
data_ptrs: *const *mut u8,
coding_ptrs: *const *const u8,
size: i32,
) -> i32;
fn galois_single_divide(a: i32, b: i32, w: i32) -> i32;
}
fn get_matrix(m: i32, k: i32, w: i32) -> Vec<i32> {
let mut matrix = vec![0; (m * k) as usize];
for i in 0..m {
for j in 0..k {
unsafe {
matrix[(i * k + j) as usize] = galois_single_divide(1, i ^ (m + j), w);
}
}
}
matrix
}
pub const ERASURE_W: i32 = 32;
// Generate coding blocks into coding
// There are some alignment restrictions, blocks should be aligned by 16 bytes
// which means their size should be >= 16 bytes
pub fn generate_coding_blocks(coding: &mut [&mut [u8]], data: &[&[u8]]) -> Result<()> {
if data.len() == 0 {
return Ok(());
}
let m = coding.len() as i32;
let block_len = data[0].len();
let matrix: Vec<i32> = get_matrix(m, data.len() as i32, ERASURE_W);
let mut coding_arg = Vec::new();
let mut data_arg = Vec::new();
for block in data {
if block_len != block.len() {
return Err(ErasureError::InvalidBlockSize);
}
data_arg.push(block.as_ptr());
}
for mut block in coding {
if block_len != block.len() {
return Err(ErasureError::InvalidBlockSize);
}
coding_arg.push(block.as_mut_ptr());
}
unsafe {
jerasure_matrix_encode(
data.len() as i32,
m,
ERASURE_W,
matrix.as_ptr(),
data_arg.as_ptr(),
coding_arg.as_ptr(),
data[0].len() as i32,
);
}
Ok(())
}
// Recover data + coding blocks into data blocks
// data: array of blocks to recover into
// coding: arry of coding blocks
// erasures: list of indices in data where blocks should be recovered
pub fn decode_blocks(data: &mut [&mut [u8]], coding: &[&[u8]], erasures: &[i32]) -> Result<()> {
if data.len() == 0 {
return Ok(());
}
let block_len = data[0].len();
let matrix: Vec<i32> = get_matrix(coding.len() as i32, data.len() as i32, ERASURE_W);
// generate coding pointers, blocks should be the same size
let mut coding_arg: Vec<*const u8> = Vec::new();
for x in coding.iter() {
if x.len() != block_len {
return Err(ErasureError::InvalidBlockSize);
}
coding_arg.push(x.as_ptr());
}
// generate data pointers, blocks should be the same size
let mut data_arg: Vec<*mut u8> = Vec::new();
for x in data.iter_mut() {
if x.len() != block_len {
return Err(ErasureError::InvalidBlockSize);
}
data_arg.push(x.as_mut_ptr());
}
unsafe {
let ret = jerasure_matrix_decode(
data.len() as i32,
coding.len() as i32,
ERASURE_W,
matrix.as_ptr(),
0,
erasures.as_ptr(),
data_arg.as_ptr(),
coding_arg.as_ptr(),
data[0].len() as i32,
);
trace!("jerasure_matrix_decode ret: {}", ret);
for x in data[erasures[0] as usize][0..8].iter() {
trace!("{} ", x)
}
trace!("");
if ret < 0 {
return Err(ErasureError::DecodeError);
}
}
Ok(())
}
// Generate coding blocks in window from consumed to consumed+NUM_DATA
pub fn generate_coding(
re: &BlobRecycler,
window: &mut Vec<Option<SharedBlob>>,
consumed: usize,
) -> Result<()> {
let mut data_blobs = Vec::new();
let mut coding_blobs = Vec::new();
let mut data_locks = Vec::new();
let mut data_ptrs: Vec<&[u8]> = Vec::new();
let mut coding_locks = Vec::new();
let mut coding_ptrs: Vec<&mut [u8]> = Vec::new();
for i in consumed..consumed + NUM_DATA {
let n = i % window.len();
data_blobs.push(window[n].clone().unwrap());
}
for b in &data_blobs {
data_locks.push(b.write().unwrap());
}
for (i, l) in data_locks.iter_mut().enumerate() {
trace!("i: {} data: {}", i, l.data[0]);
data_ptrs.push(&l.data);
}
// generate coding ptr array
let coding_start = consumed + NUM_DATA;
let coding_end = consumed + NUM_CODED;
for i in coding_start..coding_end {
let n = i % window.len();
window[n] = Some(re.allocate());
coding_blobs.push(window[n].clone().unwrap());
}
for b in &coding_blobs {
coding_locks.push(b.write().unwrap());
}
for (i, l) in coding_locks.iter_mut().enumerate() {
trace!("i: {} data: {}", i, l.data[0]);
coding_ptrs.push(&mut l.data);
}
generate_coding_blocks(coding_ptrs.as_mut_slice(), &data_ptrs)?;
trace!("consumed: {}", consumed);
Ok(())
}
// Recover missing blocks into window
// missing blocks should be None, will use re
// to allocate new ones. Returns err if not enough
// coding blocks are present to restore
pub fn recover(
re: &BlobRecycler,
window: &mut Vec<Option<SharedBlob>>,
consumed: usize,
) -> Result<()> {
//recover with erasure coding
let mut data_missing = 0;
let mut coded_missing = 0;
let coding_start = consumed + NUM_DATA;
let coding_end = consumed + NUM_CODED;
for i in consumed..coding_end {
let n = i % window.len();
if window[n].is_none() {
if i >= coding_start {
coded_missing += 1;
} else {
data_missing += 1;
}
}
}
trace!("missing: data: {} coding: {}", data_missing, coded_missing);
if data_missing > 0 {
if (data_missing + coded_missing) <= MAX_MISSING {
let mut blobs: Vec<SharedBlob> = Vec::new();
let mut locks = Vec::new();
let mut data_ptrs: Vec<&mut [u8]> = Vec::new();
let mut coding_ptrs: Vec<&[u8]> = Vec::new();
let mut erasures: Vec<i32> = Vec::new();
for i in consumed..coding_end {
let j = i % window.len();
let mut b = &mut window[j];
if b.is_some() {
blobs.push(b.clone().unwrap());
continue;
}
let n = re.allocate();
*b = Some(n.clone());
//mark the missing memory
blobs.push(n);
erasures.push((i - consumed) as i32);
}
erasures.push(-1);
trace!("erasures: {:?}", erasures);
//lock everything
for b in &blobs {
locks.push(b.write().unwrap());
}
for (i, l) in locks.iter_mut().enumerate() {
if i >= NUM_DATA {
trace!("pushing coding: {}", i);
coding_ptrs.push(&l.data);
} else {
trace!("pushing data: {}", i);
data_ptrs.push(&mut l.data);
}
}
trace!(
"coding_ptrs.len: {} data_ptrs.len {}",
coding_ptrs.len(),
data_ptrs.len()
);
decode_blocks(data_ptrs.as_mut_slice(), &coding_ptrs, &erasures)?;
} else {
return Err(ErasureError::NotEnoughBlocksToDecode);
}
}
Ok(())
}
#[cfg(test)]
mod test {
use erasure;
use packet::{BlobRecycler, SharedBlob, PACKET_DATA_SIZE};
extern crate env_logger;
#[test]
pub fn test_coding() {
let zero_vec = vec![0; 16];
let mut vs: Vec<Vec<u8>> = (0..4).map(|i| (i..(16 + i)).collect()).collect();
let v_orig: Vec<u8> = vs[0].clone();
let m = 2;
let mut coding_blocks: Vec<_> = (0..m).map(|_| vec![0u8; 16]).collect();
{
let mut coding_blocks_slices: Vec<_> =
coding_blocks.iter_mut().map(|x| x.as_mut_slice()).collect();
let v_slices: Vec<_> = vs.iter().map(|x| x.as_slice()).collect();
assert!(
erasure::generate_coding_blocks(
coding_blocks_slices.as_mut_slice(),
v_slices.as_slice()
).is_ok()
);
}
trace!("coding blocks:");
for b in &coding_blocks {
trace!("{:?}", b);
}
let erasure: i32 = 1;
let erasures = vec![erasure, -1];
// clear an entry
vs[erasure as usize].copy_from_slice(zero_vec.as_slice());
{
let coding_blocks_slices: Vec<_> = coding_blocks.iter().map(|x| x.as_slice()).collect();
let mut v_slices: Vec<_> = vs.iter_mut().map(|x| x.as_mut_slice()).collect();
assert!(
erasure::decode_blocks(
v_slices.as_mut_slice(),
coding_blocks_slices.as_slice(),
erasures.as_slice(),
).is_ok()
);
}
trace!("vs:");
for v in &vs {
trace!("{:?}", v);
}
assert_eq!(v_orig, vs[0]);
}
fn print_window(window: &Vec<Option<SharedBlob>>) {
for (i, w) in window.iter().enumerate() {
print!("window({}): ", i);
if w.is_some() {
let window_lock = w.clone().unwrap();
let window_data = window_lock.read().unwrap().data;
for i in 0..8 {
print!("{} ", window_data[i]);
}
} else {
print!("null");
}
println!("");
}
}
#[test]
pub fn test_window_recover() {
let mut window = Vec::new();
let blob_recycler = BlobRecycler::default();
let offset = 4;
for i in 0..(4 * erasure::NUM_CODED + 1) {
let b = blob_recycler.allocate();
let b_ = b.clone();
let data_len = b.read().unwrap().data.len();
let mut w = b.write().unwrap();
w.set_index(i as u64).unwrap();
assert_eq!(i as u64, w.get_index().unwrap());
w.meta.size = PACKET_DATA_SIZE;
for k in 0..data_len {
w.data[k] = (k + i) as u8;
}
window.push(Some(b_));
}
println!("** after-gen:");
print_window(&window);
assert!(erasure::generate_coding(&blob_recycler, &mut window, offset).is_ok());
assert!(
erasure::generate_coding(&blob_recycler, &mut window, offset + erasure::NUM_CODED)
.is_ok()
);
assert!(
erasure::generate_coding(
&blob_recycler,
&mut window,
offset + (2 * erasure::NUM_CODED)
).is_ok()
);
assert!(
erasure::generate_coding(
&blob_recycler,
&mut window,
offset + (3 * erasure::NUM_CODED)
).is_ok()
);
println!("** after-coding:");
print_window(&window);
let refwindow = window[offset + 1].clone();
window[offset + 1] = None;
window[offset + 2] = None;
window[offset + erasure::NUM_CODED + 3] = None;
window[offset + (2 * erasure::NUM_CODED) + 0] = None;
window[offset + (2 * erasure::NUM_CODED) + 1] = None;
window[offset + (2 * erasure::NUM_CODED) + 2] = None;
let window_l0 = &(window[offset + (3 * erasure::NUM_CODED)]).clone().unwrap();
window_l0.write().unwrap().data[0] = 55;
println!("** after-nulling:");
print_window(&window);
assert!(erasure::recover(&blob_recycler, &mut window, offset).is_ok());
assert!(erasure::recover(&blob_recycler, &mut window, offset + erasure::NUM_CODED).is_ok());
assert!(
erasure::recover(
&blob_recycler,
&mut window,
offset + (2 * erasure::NUM_CODED)
).is_err()
);
assert!(
erasure::recover(
&blob_recycler,
&mut window,
offset + (3 * erasure::NUM_CODED)
).is_ok()
);
println!("** after-restore:");
print_window(&window);
let window_l = window[offset + 1].clone().unwrap();
let ref_l = refwindow.clone().unwrap();
assert_eq!(
window_l.read().unwrap().data.to_vec(),
ref_l.read().unwrap().data.to_vec()
);
}
}

View File

@ -33,12 +33,13 @@ impl Event {
}
}
// TODO: Rename this to transaction_signature().
/// If the Event is a Transaction, return its Signature.
pub fn get_signature(&self) -> Option<Signature> {
match *self {
Event::Transaction(ref tr) => Some(tr.sig),
Event::Signature { .. } | Event::Timestamp { .. } => None,
/// Create and sign a new Witness Signature. Used for unit-testing.
pub fn new_signature(from: &KeyPair, tx_sig: Signature) -> Self {
let sig = Signature::clone_from_slice(from.sign(&tx_sig).as_ref());
Event::Signature {
from: from.pubkey(),
tx_sig,
sig,
}
}
@ -46,9 +47,21 @@ impl Event {
/// spending plan is valid.
pub fn verify(&self) -> bool {
match *self {
Event::Transaction(ref tr) => tr.verify(),
Event::Transaction(ref tr) => tr.verify_sig(),
Event::Signature { from, tx_sig, sig } => sig.verify(&from, &tx_sig),
Event::Timestamp { from, dt, sig } => sig.verify(&from, &serialize(&dt).unwrap()),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use signature::{KeyPair, KeyPairUtil};
#[test]
fn test_event_verify() {
assert!(Event::new_timestamp(&KeyPair::new(), Utc::now()).verify());
assert!(Event::new_signature(&KeyPair::new(), Signature::default()).verify());
}
}

View File

@ -1,7 +1,7 @@
//! The `hash` module provides functions for creating SHA-256 hashes.
use generic_array::GenericArray;
use generic_array::typenum::U32;
use generic_array::GenericArray;
use sha2::{Digest, Sha256};
pub type Hash = GenericArray<u8, U32>;

View File

@ -4,8 +4,6 @@
use entry::Entry;
use hash::Hash;
use recorder::{ExitReason, Recorder, Signal};
use signature::Signature;
use std::collections::HashSet;
use std::sync::mpsc::{sync_channel, Receiver, SyncSender};
use std::thread::{spawn, JoinHandle};
use std::time::Instant;
@ -14,32 +12,21 @@ pub struct Historian {
pub sender: SyncSender<Signal>,
pub receiver: Receiver<Entry>,
pub thread_hdl: JoinHandle<ExitReason>,
pub signatures: HashSet<Signature>,
}
impl Historian {
pub fn new(start_hash: &Hash, ms_per_tick: Option<u64>) -> Self {
let (sender, event_receiver) = sync_channel(1000);
let (entry_sender, receiver) = sync_channel(1000);
let (sender, event_receiver) = sync_channel(10_000);
let (entry_sender, receiver) = sync_channel(10_000);
let thread_hdl =
Historian::create_recorder(*start_hash, ms_per_tick, event_receiver, entry_sender);
let signatures = HashSet::new();
Historian {
sender,
receiver,
thread_hdl,
signatures,
}
}
pub fn reserve_signature(&mut self, sig: &Signature) -> bool {
if self.signatures.contains(sig) {
return false;
}
self.signatures.insert(*sig);
true
}
/// A background thread that will continue tagging received Event messages and
/// sending back Entry messages until either the receiver or sender channel is closed.
fn create_recorder(
@ -66,7 +53,7 @@ impl Historian {
#[cfg(test)]
mod tests {
use super::*;
use ledger::*;
use ledger::Block;
use std::thread::sleep;
use std::time::Duration;
@ -95,7 +82,7 @@ mod tests {
ExitReason::RecvDisconnected
);
assert!(verify_slice(&[entry0, entry1, entry2], &zero));
assert!([entry0, entry1, entry2].verify(&zero));
}
#[test]
@ -110,20 +97,11 @@ mod tests {
);
}
#[test]
fn test_duplicate_event_signature() {
let zero = Hash::default();
let mut hist = Historian::new(&zero, None);
let sig = Signature::default();
assert!(hist.reserve_signature(&sig));
assert!(!hist.reserve_signature(&sig));
}
#[test]
fn test_ticking_historian() {
let zero = Hash::default();
let hist = Historian::new(&zero, Some(20));
sleep(Duration::from_millis(30));
sleep(Duration::from_millis(300));
hist.sender.send(Signal::Tick).unwrap();
drop(hist.sender);
let entries: Vec<Entry> = hist.receiver.iter().collect();

View File

@ -5,11 +5,17 @@ use entry::{next_tick, Entry};
use hash::Hash;
use rayon::prelude::*;
/// Verifies the hashes and counts of a slice of events are all consistent.
pub fn verify_slice(entries: &[Entry], start_hash: &Hash) -> bool {
let genesis = [Entry::new_tick(Default::default(), start_hash)];
let entry_pairs = genesis.par_iter().chain(entries).zip(entries);
entry_pairs.all(|(x0, x1)| x1.verify(&x0.id))
pub trait Block {
/// Verifies the hashes and counts of a slice of events are all consistent.
fn verify(&self, start_hash: &Hash) -> bool;
}
impl Block for [Entry] {
fn verify(&self, start_hash: &Hash) -> bool {
let genesis = [Entry::new_tick(0, start_hash)];
let entry_pairs = genesis.par_iter().chain(self).zip(self);
entry_pairs.all(|(x0, x1)| x1.verify(&x0.id))
}
}
/// Create a vector of Ticks of length `len` from `start_hash` hash and `num_hashes`.
@ -33,14 +39,14 @@ mod tests {
fn test_verify_slice() {
let zero = Hash::default();
let one = hash(&zero);
assert!(verify_slice(&vec![], &zero)); // base case
assert!(verify_slice(&vec![Entry::new_tick(0, &zero)], &zero)); // singleton case 1
assert!(!verify_slice(&vec![Entry::new_tick(0, &zero)], &one)); // singleton case 2, bad
assert!(verify_slice(&next_ticks(&zero, 0, 2), &zero)); // inductive step
assert!(vec![][..].verify(&zero)); // base case
assert!(vec![Entry::new_tick(0, &zero)][..].verify(&zero)); // singleton case 1
assert!(!vec![Entry::new_tick(0, &zero)][..].verify(&one)); // singleton case 2, bad
assert!(next_ticks(&zero, 0, 2)[..].verify(&zero)); // inductive step
let mut bad_ticks = next_ticks(&zero, 0, 2);
bad_ticks[1].id = one;
assert!(!verify_slice(&bad_ticks, &zero)); // inductive step, bad
assert!(!bad_ticks.verify(&zero)); // inductive step, bad
}
}
@ -52,10 +58,10 @@ mod bench {
#[bench]
fn event_bench(bencher: &mut Bencher) {
let start_hash = Default::default();
let events = next_ticks(&start_hash, 10_000, 8);
let start_hash = Hash::default();
let entries = next_ticks(&start_hash, 10_000, 8);
bencher.iter(|| {
assert!(verify_slice(&events, &start_hash));
assert!(entries.verify(&start_hash));
});
}
}

View File

@ -2,21 +2,29 @@
pub mod accountant;
pub mod accountant_skel;
pub mod accountant_stub;
pub mod crdt;
pub mod ecdsa;
pub mod entry;
#[cfg(feature = "erasure")]
pub mod erasure;
pub mod event;
pub mod hash;
pub mod historian;
pub mod ledger;
pub mod mint;
pub mod packet;
pub mod plan;
pub mod recorder;
pub mod result;
pub mod signature;
pub mod streamer;
pub mod subscribers;
pub mod transaction;
extern crate bincode;
extern crate byteorder;
extern crate chrono;
extern crate generic_array;
extern crate libc;
#[macro_use]
extern crate log;
extern crate rayon;
@ -28,6 +36,8 @@ extern crate serde_json;
extern crate sha2;
extern crate untrusted;
extern crate futures;
#[cfg(test)]
#[macro_use]
extern crate matches;

View File

@ -1,7 +1,7 @@
//! The `mint` module is a library for generating the chain's genesis block.
use entry::Entry;
use entry::create_entry;
use entry::Entry;
use event::Event;
use hash::{hash, Hash};
use ring::rand::SystemRandom;
@ -33,6 +33,10 @@ impl Mint {
hash(&self.pkcs8)
}
pub fn last_id(&self) -> Hash {
self.create_entries()[1].id
}
pub fn keypair(&self) -> KeyPair {
KeyPair::from_pkcs8(Input::from(&self.pkcs8)).unwrap()
}
@ -54,17 +58,23 @@ impl Mint {
}
}
#[derive(Serialize, Deserialize, Debug)]
pub struct MintDemo {
pub mint: Mint,
pub users: Vec<(Vec<u8>, i64)>,
}
#[cfg(test)]
mod tests {
use super::*;
use ledger::verify_slice;
use ledger::Block;
use plan::Plan;
#[test]
fn test_create_events() {
let mut events = Mint::new(100).create_events().into_iter();
if let Event::Transaction(tr) = events.next().unwrap() {
if let Plan::Pay(payment) = tr.plan {
if let Plan::Pay(payment) = tr.data.plan {
assert_eq!(tr.from, payment.to);
}
}
@ -74,6 +84,6 @@ mod tests {
#[test]
fn test_verify_entries() {
let entries = Mint::new(100).create_entries();
assert!(verify_slice(&entries, &entries[0].id));
assert!(entries[..].verify(&entries[0].id));
}
}

396
src/packet.rs Normal file
View File

@ -0,0 +1,396 @@
//! The `packet` module defines data structures and methods to pull data from the network.
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use result::{Error, Result};
use std::collections::VecDeque;
use std::fmt;
use std::io;
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket};
use std::sync::{Arc, Mutex, RwLock};
use std::mem::size_of;
pub type SharedPackets = Arc<RwLock<Packets>>;
pub type SharedBlob = Arc<RwLock<Blob>>;
pub type PacketRecycler = Recycler<Packets>;
pub type BlobRecycler = Recycler<Blob>;
pub const NUM_PACKETS: usize = 1024 * 8;
const BLOB_SIZE: usize = 64 * 1024;
pub const PACKET_DATA_SIZE: usize = 256;
pub const NUM_BLOBS: usize = (NUM_PACKETS * PACKET_DATA_SIZE) / BLOB_SIZE;
#[derive(Clone, Default)]
#[repr(C)]
pub struct Meta {
pub size: usize,
pub addr: [u16; 8],
pub port: u16,
pub v6: bool,
}
#[derive(Clone)]
#[repr(C)]
pub struct Packet {
pub data: [u8; PACKET_DATA_SIZE],
pub meta: Meta,
}
impl fmt::Debug for Packet {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"Packet {{ size: {:?}, addr: {:?} }}",
self.meta.size,
self.meta.addr()
)
}
}
impl Default for Packet {
fn default() -> Packet {
Packet {
data: [0u8; PACKET_DATA_SIZE],
meta: Meta::default(),
}
}
}
impl Meta {
pub fn addr(&self) -> SocketAddr {
if !self.v6 {
let addr = [
self.addr[0] as u8,
self.addr[1] as u8,
self.addr[2] as u8,
self.addr[3] as u8,
];
let ipv4: Ipv4Addr = From::<[u8; 4]>::from(addr);
SocketAddr::new(IpAddr::V4(ipv4), self.port)
} else {
let ipv6: Ipv6Addr = From::<[u16; 8]>::from(self.addr);
SocketAddr::new(IpAddr::V6(ipv6), self.port)
}
}
pub fn set_addr(&mut self, a: &SocketAddr) {
match *a {
SocketAddr::V4(v4) => {
let ip = v4.ip().octets();
self.addr[0] = u16::from(ip[0]);
self.addr[1] = u16::from(ip[1]);
self.addr[2] = u16::from(ip[2]);
self.addr[3] = u16::from(ip[3]);
self.port = a.port();
}
SocketAddr::V6(v6) => {
self.addr = v6.ip().segments();
self.port = a.port();
self.v6 = true;
}
}
}
}
#[derive(Debug)]
pub struct Packets {
pub packets: Vec<Packet>,
}
//auto derive doesn't support large arrays
impl Default for Packets {
fn default() -> Packets {
Packets {
packets: vec![Packet::default(); NUM_PACKETS],
}
}
}
#[derive(Clone)]
pub struct Blob {
pub data: [u8; BLOB_SIZE],
pub meta: Meta,
}
impl fmt::Debug for Blob {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"Blob {{ size: {:?}, addr: {:?} }}",
self.meta.size,
self.meta.addr()
)
}
}
//auto derive doesn't support large arrays
impl Default for Blob {
fn default() -> Blob {
Blob {
data: [0u8; BLOB_SIZE],
meta: Meta::default(),
}
}
}
pub struct Recycler<T> {
gc: Arc<Mutex<Vec<Arc<RwLock<T>>>>>,
}
impl<T: Default> Default for Recycler<T> {
fn default() -> Recycler<T> {
Recycler {
gc: Arc::new(Mutex::new(vec![])),
}
}
}
impl<T: Default> Clone for Recycler<T> {
fn clone(&self) -> Recycler<T> {
Recycler {
gc: self.gc.clone(),
}
}
}
impl<T: Default> Recycler<T> {
pub fn allocate(&self) -> Arc<RwLock<T>> {
let mut gc = self.gc.lock().expect("recycler lock");
gc.pop()
.unwrap_or_else(|| Arc::new(RwLock::new(Default::default())))
}
pub fn recycle(&self, msgs: Arc<RwLock<T>>) {
let mut gc = self.gc.lock().expect("recycler lock");
gc.push(msgs);
}
}
impl Packets {
fn run_read_from(&mut self, socket: &UdpSocket) -> Result<usize> {
self.packets.resize(NUM_PACKETS, Packet::default());
let mut i = 0;
//DOCUMENTED SIDE-EFFECT
//Performance out of the IO without poll
// * block on the socket until its readable
// * set the socket to non blocking
// * read until it fails
// * set it back to blocking before returning
socket.set_nonblocking(false)?;
for p in &mut self.packets {
p.meta.size = 0;
match socket.recv_from(&mut p.data) {
Err(_) if i > 0 => {
trace!("got {:?} messages", i);
break;
}
Err(e) => {
info!("recv_from err {:?}", e);
return Err(Error::IO(e));
}
Ok((nrecv, from)) => {
p.meta.size = nrecv;
p.meta.set_addr(&from);
if i == 0 {
socket.set_nonblocking(true)?;
}
}
}
i += 1;
}
Ok(i)
}
pub fn recv_from(&mut self, socket: &UdpSocket) -> Result<()> {
let sz = self.run_read_from(socket)?;
self.packets.resize(sz, Packet::default());
Ok(())
}
pub fn send_to(&self, socket: &UdpSocket) -> Result<()> {
for p in &self.packets {
let a = p.meta.addr();
socket.send_to(&p.data[..p.meta.size], &a)?;
}
Ok(())
}
}
const BLOB_INDEX_SIZE: usize = size_of::<u64>();
impl Blob {
pub fn get_index(&self) -> Result<u64> {
let mut rdr = io::Cursor::new(&self.data[0..BLOB_INDEX_SIZE]);
let r = rdr.read_u64::<LittleEndian>()?;
Ok(r)
}
pub fn set_index(&mut self, ix: u64) -> Result<()> {
let mut wtr = vec![];
wtr.write_u64::<LittleEndian>(ix)?;
self.data[..BLOB_INDEX_SIZE].clone_from_slice(&wtr);
Ok(())
}
pub fn data(&self) -> &[u8] {
&self.data[BLOB_INDEX_SIZE..]
}
pub fn data_mut(&mut self) -> &mut [u8] {
&mut self.data[BLOB_INDEX_SIZE..]
}
pub fn set_size(&mut self, size: usize) {
self.meta.size = size + BLOB_INDEX_SIZE;
}
pub fn recv_from(re: &BlobRecycler, socket: &UdpSocket) -> Result<VecDeque<SharedBlob>> {
let mut v = VecDeque::new();
//DOCUMENTED SIDE-EFFECT
//Performance out of the IO without poll
// * block on the socket until its readable
// * set the socket to non blocking
// * read until it fails
// * set it back to blocking before returning
socket.set_nonblocking(false)?;
for i in 0..NUM_BLOBS {
let r = re.allocate();
{
let mut p = r.write().unwrap();
match socket.recv_from(&mut p.data) {
Err(_) if i > 0 => {
trace!("got {:?} messages", i);
break;
}
Err(e) => {
info!("recv_from err {:?}", e);
return Err(Error::IO(e));
}
Ok((nrecv, from)) => {
p.meta.size = nrecv;
p.meta.set_addr(&from);
if i == 0 {
socket.set_nonblocking(true)?;
}
}
}
}
v.push_back(r);
}
Ok(v)
}
pub fn send_to(
re: &BlobRecycler,
socket: &UdpSocket,
v: &mut VecDeque<SharedBlob>,
) -> Result<()> {
while let Some(r) = v.pop_front() {
{
let p = r.read().unwrap();
let a = p.meta.addr();
socket.send_to(&p.data[..p.meta.size], &a)?;
}
re.recycle(r);
}
Ok(())
}
}
#[cfg(test)]
mod test {
use packet::{Blob, BlobRecycler, Packet, PacketRecycler, Packets};
use std::collections::VecDeque;
use std::io;
use std::io::Write;
use std::net::UdpSocket;
#[test]
pub fn packet_recycler_test() {
let r = PacketRecycler::default();
let p = r.allocate();
r.recycle(p);
assert_eq!(r.gc.lock().unwrap().len(), 1);
let _ = r.allocate();
assert_eq!(r.gc.lock().unwrap().len(), 0);
}
#[test]
pub fn blob_recycler_test() {
let r = BlobRecycler::default();
let p = r.allocate();
r.recycle(p);
assert_eq!(r.gc.lock().unwrap().len(), 1);
let _ = r.allocate();
assert_eq!(r.gc.lock().unwrap().len(), 0);
}
#[test]
pub fn packet_send_recv() {
let reader = UdpSocket::bind("127.0.0.1:0").expect("bind");
let addr = reader.local_addr().unwrap();
let sender = UdpSocket::bind("127.0.0.1:0").expect("bind");
let saddr = sender.local_addr().unwrap();
let r = PacketRecycler::default();
let p = r.allocate();
p.write().unwrap().packets.resize(10, Packet::default());
for m in p.write().unwrap().packets.iter_mut() {
m.meta.set_addr(&addr);
m.meta.size = 256;
}
p.read().unwrap().send_to(&sender).unwrap();
p.write().unwrap().recv_from(&reader).unwrap();
for m in p.write().unwrap().packets.iter_mut() {
assert_eq!(m.meta.size, 256);
assert_eq!(m.meta.addr(), saddr);
}
r.recycle(p);
}
#[test]
pub fn blob_send_recv() {
trace!("start");
let reader = UdpSocket::bind("127.0.0.1:0").expect("bind");
let addr = reader.local_addr().unwrap();
let sender = UdpSocket::bind("127.0.0.1:0").expect("bind");
let r = BlobRecycler::default();
let p = r.allocate();
p.write().unwrap().meta.set_addr(&addr);
p.write().unwrap().meta.size = 1024;
let mut v = VecDeque::new();
v.push_back(p);
assert_eq!(v.len(), 1);
Blob::send_to(&r, &sender, &mut v).unwrap();
trace!("send_to");
assert_eq!(v.len(), 0);
let mut rv = Blob::recv_from(&r, &reader).unwrap();
trace!("recv_from");
assert_eq!(rv.len(), 1);
let rp = rv.pop_front().unwrap();
assert_eq!(rp.write().unwrap().meta.size, 1024);
r.recycle(rp);
}
#[cfg(all(feature = "ipv6", test))]
#[test]
pub fn blob_ipv6_send_recv() {
let reader = UdpSocket::bind("[::1]:0").expect("bind");
let addr = reader.local_addr().unwrap();
let sender = UdpSocket::bind("[::1]:0").expect("bind");
let r = BlobRecycler::default();
let p = r.allocate();
p.write().unwrap().meta.set_addr(&addr);
p.write().unwrap().meta.size = 1024;
let mut v = VecDeque::default();
v.push_back(p);
Blob::send_to(&r, &sender, &mut v).unwrap();
let mut rv = Blob::recv_from(&r, &reader).unwrap();
let rp = rv.pop_front().unwrap();
assert_eq!(rp.write().unwrap().meta.size, 1024);
r.recycle(rp);
}
#[test]
pub fn debug_trait() {
write!(io::sink(), "{:?}", Packet::default()).unwrap();
write!(io::sink(), "{:?}", Packets::default()).unwrap();
write!(io::sink(), "{:?}", Blob::default()).unwrap();
}
#[test]
pub fn blob_test() {
let mut b = Blob::default();
b.set_index(<u64>::max_value()).unwrap();
assert_eq!(b.get_index().unwrap(), <u64>::max_value());
b.data_mut()[0] = 1;
assert_eq!(b.data()[0], 1);
assert_eq!(b.get_index().unwrap(), <u64>::max_value());
}
}

View File

@ -35,6 +35,7 @@ pub struct Payment {
pub to: PublicKey,
}
#[repr(C)]
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub enum Plan {
Pay(Payment),
@ -72,11 +73,11 @@ impl Plan {
)
}
/// Return true if the spending plan requires no additional Witnesses.
pub fn is_complete(&self) -> bool {
/// Return Payment if the spending plan requires no additional Witnesses.
pub fn final_payment(&self) -> Option<Payment> {
match *self {
Plan::Pay(_) => true,
_ => false,
Plan::Pay(ref payment) => Some(payment.clone()),
_ => None,
}
}

View File

@ -34,11 +34,11 @@ pub struct Recorder {
}
impl Recorder {
pub fn new(receiver: Receiver<Signal>, sender: SyncSender<Entry>, start_hash: Hash) -> Self {
pub fn new(receiver: Receiver<Signal>, sender: SyncSender<Entry>, last_hash: Hash) -> Self {
Recorder {
receiver,
sender,
last_hash: start_hash,
last_hash,
events: vec![],
num_hashes: 0,
num_ticks: 0,

View File

@ -4,6 +4,7 @@ use bincode;
use serde_json;
use std;
use std::any::Any;
use accountant;
#[derive(Debug)]
pub enum Error {
@ -14,6 +15,7 @@ pub enum Error {
RecvError(std::sync::mpsc::RecvError),
RecvTimeoutError(std::sync::mpsc::RecvTimeoutError),
Serialize(std::boxed::Box<bincode::ErrorKind>),
AccountingError(accountant::AccountingError),
SendError,
Services,
}
@ -30,6 +32,11 @@ impl std::convert::From<std::sync::mpsc::RecvTimeoutError> for Error {
Error::RecvTimeoutError(e)
}
}
impl std::convert::From<accountant::AccountingError> for Error {
fn from(e: accountant::AccountingError) -> Error {
Error::AccountingError(e)
}
}
impl<T> std::convert::From<std::sync::mpsc::SendError<T>> for Error {
fn from(_e: std::sync::mpsc::SendError<T>) -> Error {
Error::SendError
@ -70,9 +77,9 @@ mod tests {
use std::io;
use std::io::Write;
use std::net::SocketAddr;
use std::sync::mpsc::channel;
use std::sync::mpsc::RecvError;
use std::sync::mpsc::RecvTimeoutError;
use std::sync::mpsc::channel;
use std::thread;
fn addr_parse_error() -> Result<SocketAddr> {

View File

@ -1,7 +1,7 @@
//! The `signature` module provides functionality for public, and private keys.
use generic_array::GenericArray;
use generic_array::typenum::{U32, U64};
use generic_array::GenericArray;
use ring::signature::Ed25519KeyPair;
use ring::{rand, signature};
use untrusted;

View File

@ -1,240 +1,38 @@
//! The 'streamer` module allows for efficient batch processing of UDP packets.
use result::{Error, Result};
use std::fmt;
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket};
//! The `streamer` module defines a set of services for effecently pulling data from udp sockets.
use packet::{Blob, BlobRecycler, PacketRecycler, SharedBlob, SharedPackets, NUM_BLOBS};
use result::Result;
use std::collections::VecDeque;
use std::net::UdpSocket;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc;
use std::sync::{Arc, Mutex, RwLock};
use std::sync::{Arc, RwLock};
use std::thread::{spawn, JoinHandle};
use std::time::Duration;
use subscribers::Subscribers;
const BLOCK_SIZE: usize = 1024 * 8;
pub const PACKET_SIZE: usize = 256;
pub const RESP_SIZE: usize = 64 * 1024;
pub const NUM_RESP: usize = (BLOCK_SIZE * PACKET_SIZE) / RESP_SIZE;
#[derive(Clone, Default)]
pub struct Meta {
pub size: usize,
pub addr: [u16; 8],
pub port: u16,
pub v6: bool,
}
#[derive(Clone)]
pub struct Packet {
pub data: [u8; PACKET_SIZE],
pub meta: Meta,
}
impl fmt::Debug for Packet {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"Packet {{ size: {:?}, addr: {:?} }}",
self.meta.size,
self.meta.get_addr()
)
}
}
impl Default for Packet {
fn default() -> Packet {
Packet {
data: [0u8; PACKET_SIZE],
meta: Meta::default(),
}
}
}
impl Meta {
pub fn get_addr(&self) -> SocketAddr {
if !self.v6 {
let ipv4 = Ipv4Addr::new(
self.addr[0] as u8,
self.addr[1] as u8,
self.addr[2] as u8,
self.addr[3] as u8,
);
SocketAddr::new(IpAddr::V4(ipv4), self.port)
} else {
let ipv6 = Ipv6Addr::new(
self.addr[0],
self.addr[1],
self.addr[2],
self.addr[3],
self.addr[4],
self.addr[5],
self.addr[6],
self.addr[7],
);
SocketAddr::new(IpAddr::V6(ipv6), self.port)
}
}
pub fn set_addr(&mut self, a: &SocketAddr) {
match *a {
SocketAddr::V4(v4) => {
let ip = v4.ip().octets();
self.addr[0] = u16::from(ip[0]);
self.addr[1] = u16::from(ip[1]);
self.addr[2] = u16::from(ip[2]);
self.addr[3] = u16::from(ip[3]);
self.port = a.port();
}
SocketAddr::V6(v6) => {
self.addr = v6.ip().segments();
self.port = a.port();
self.v6 = true;
}
}
}
}
#[derive(Debug)]
pub struct Packets {
pub packets: Vec<Packet>,
}
impl Default for Packets {
fn default() -> Packets {
Packets {
packets: vec![Packet::default(); BLOCK_SIZE],
}
}
}
#[derive(Clone)]
pub struct Response {
pub data: [u8; RESP_SIZE],
pub meta: Meta,
}
impl fmt::Debug for Response {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"Response {{ size: {:?}, addr: {:?} }}",
self.meta.size,
self.meta.get_addr()
)
}
}
impl Default for Response {
fn default() -> Response {
Response {
data: [0u8; RESP_SIZE],
meta: Meta::default(),
}
}
}
#[derive(Debug)]
pub struct Responses {
pub responses: Vec<Response>,
}
impl Default for Responses {
fn default() -> Responses {
Responses {
responses: vec![Response::default(); NUM_RESP],
}
}
}
pub type SharedPackets = Arc<RwLock<Packets>>;
pub type PacketRecycler = Arc<Mutex<Vec<SharedPackets>>>;
pub type Receiver = mpsc::Receiver<SharedPackets>;
pub type Sender = mpsc::Sender<SharedPackets>;
pub type SharedResponses = Arc<RwLock<Responses>>;
pub type ResponseRecycler = Arc<Mutex<Vec<SharedResponses>>>;
pub type Responder = mpsc::Sender<SharedResponses>;
pub type ResponseReceiver = mpsc::Receiver<SharedResponses>;
impl Packets {
fn run_read_from(&mut self, socket: &UdpSocket) -> Result<usize> {
self.packets.resize(BLOCK_SIZE, Packet::default());
let mut i = 0;
socket.set_nonblocking(false)?;
for p in &mut self.packets {
p.meta.size = 0;
match socket.recv_from(&mut p.data) {
Err(_) if i > 0 => {
trace!("got {:?} messages", i);
break;
}
Err(e) => {
info!("recv_from err {:?}", e);
return Err(Error::IO(e));
}
Ok((nrecv, from)) => {
p.meta.size = nrecv;
p.meta.set_addr(&from);
if i == 0 {
socket.set_nonblocking(true)?;
}
}
}
i += 1;
}
Ok(i)
}
fn read_from(&mut self, socket: &UdpSocket) -> Result<()> {
let sz = self.run_read_from(socket)?;
self.packets.resize(sz, Packet::default());
Ok(())
}
}
impl Responses {
fn send_to(&self, socket: &UdpSocket, num: &mut usize) -> Result<()> {
for p in &self.responses {
let a = p.meta.get_addr();
socket.send_to(&p.data[..p.meta.size], &a)?;
//TODO(anatoly): wtf do we do about errors?
*num += 1;
}
Ok(())
}
}
pub fn allocate<T>(recycler: &Arc<Mutex<Vec<Arc<RwLock<T>>>>>) -> Arc<RwLock<T>>
where
T: Default,
{
let mut gc = recycler.lock().expect("lock");
gc.pop()
.unwrap_or_else(|| Arc::new(RwLock::new(Default::default())))
}
pub fn recycle<T>(recycler: &Arc<Mutex<Vec<Arc<RwLock<T>>>>>, msgs: Arc<RwLock<T>>)
where
T: Default,
{
let mut gc = recycler.lock().expect("lock");
gc.push(msgs);
}
pub type PacketReceiver = mpsc::Receiver<SharedPackets>;
pub type PacketSender = mpsc::Sender<SharedPackets>;
pub type BlobSender = mpsc::Sender<VecDeque<SharedBlob>>;
pub type BlobReceiver = mpsc::Receiver<VecDeque<SharedBlob>>;
fn recv_loop(
sock: &UdpSocket,
exit: &Arc<AtomicBool>,
recycler: &PacketRecycler,
channel: &Sender,
re: &PacketRecycler,
channel: &PacketSender,
) -> Result<()> {
loop {
let msgs = allocate(recycler);
let msgs = re.allocate();
let msgs_ = msgs.clone();
loop {
match msgs.write().unwrap().read_from(sock) {
match msgs.write().unwrap().recv_from(sock) {
Ok(()) => {
channel.send(msgs_)?;
break;
}
Err(_) => {
if exit.load(Ordering::Relaxed) {
recycle(recycler, msgs_);
re.recycle(msgs_);
return Ok(());
}
}
@ -247,7 +45,7 @@ pub fn receiver(
sock: UdpSocket,
exit: Arc<AtomicBool>,
recycler: PacketRecycler,
channel: Sender,
channel: PacketSender,
) -> Result<JoinHandle<()>> {
let timer = Duration::new(1, 0);
sock.set_read_timeout(Some(timer))?;
@ -257,21 +55,18 @@ pub fn receiver(
}))
}
fn recv_send(sock: &UdpSocket, recycler: &ResponseRecycler, r: &ResponseReceiver) -> Result<()> {
fn recv_send(sock: &UdpSocket, recycler: &BlobRecycler, r: &BlobReceiver) -> Result<()> {
let timer = Duration::new(1, 0);
let msgs = r.recv_timeout(timer)?;
let msgs_ = msgs.clone();
let mut num = 0;
msgs.read().unwrap().send_to(sock, &mut num)?;
recycle(recycler, msgs_);
let mut msgs = r.recv_timeout(timer)?;
Blob::send_to(recycler, sock, &mut msgs)?;
Ok(())
}
pub fn responder(
sock: UdpSocket,
exit: Arc<AtomicBool>,
recycler: ResponseRecycler,
r: ResponseReceiver,
recycler: BlobRecycler,
r: BlobReceiver,
) -> JoinHandle<()> {
spawn(move || loop {
if recv_send(&sock, &recycler, &r).is_err() && exit.load(Ordering::Relaxed) {
@ -280,10 +75,205 @@ pub fn responder(
})
}
//TODO, we would need to stick block authentication before we create the
//window.
fn recv_blobs(recycler: &BlobRecycler, sock: &UdpSocket, s: &BlobSender) -> Result<()> {
let dq = Blob::recv_from(recycler, sock)?;
if !dq.is_empty() {
s.send(dq)?;
}
Ok(())
}
pub fn blob_receiver(
exit: Arc<AtomicBool>,
recycler: BlobRecycler,
sock: UdpSocket,
s: BlobSender,
) -> Result<JoinHandle<()>> {
//DOCUMENTED SIDE-EFFECT
//1 second timeout on socket read
let timer = Duration::new(1, 0);
sock.set_read_timeout(Some(timer))?;
let t = spawn(move || loop {
if exit.load(Ordering::Relaxed) {
break;
}
let ret = recv_blobs(&recycler, &sock, &s);
if ret.is_err() {
break;
}
});
Ok(t)
}
fn recv_window(
window: &mut Vec<Option<SharedBlob>>,
subs: &Arc<RwLock<Subscribers>>,
recycler: &BlobRecycler,
consumed: &mut usize,
r: &BlobReceiver,
s: &BlobSender,
retransmit: &BlobSender,
) -> Result<()> {
let timer = Duration::new(1, 0);
let mut dq = r.recv_timeout(timer)?;
while let Ok(mut nq) = r.try_recv() {
dq.append(&mut nq)
}
{
//retransmit all leader blocks
let mut retransmitq = VecDeque::new();
let rsubs = subs.read().unwrap();
for b in &dq {
let p = b.read().unwrap();
//TODO this check isn't safe against adverserial packets
//we need to maintain a sequence window
trace!(
"idx: {} addr: {:?} leader: {:?}",
p.get_index().unwrap(),
p.meta.addr(),
rsubs.leader.addr
);
if p.meta.addr() == rsubs.leader.addr {
//TODO
//need to copy the retransmited blob
//otherwise we get into races with which thread
//should do the recycling
//
//a better absraction would be to recycle when the blob
//is dropped via a weakref to the recycler
let nv = recycler.allocate();
{
let mut mnv = nv.write().unwrap();
let sz = p.meta.size;
mnv.meta.size = sz;
mnv.data[..sz].copy_from_slice(&p.data[..sz]);
}
retransmitq.push_back(nv);
}
}
if !retransmitq.is_empty() {
retransmit.send(retransmitq)?;
}
}
//send a contiguous set of blocks
let mut contq = VecDeque::new();
while let Some(b) = dq.pop_front() {
let b_ = b.clone();
let p = b.write().unwrap();
let pix = p.get_index()? as usize;
let w = pix % NUM_BLOBS;
//TODO, after the block are authenticated
//if we get different blocks at the same index
//that is a network failure/attack
trace!("window w: {} size: {}", w, p.meta.size);
{
if window[w].is_none() {
window[w] = Some(b_);
} else {
debug!("duplicate blob at index {:}", w);
}
loop {
let k = *consumed % NUM_BLOBS;
trace!("k: {} consumed: {}", k, *consumed);
if window[k].is_none() {
break;
}
contq.push_back(window[k].clone().unwrap());
window[k] = None;
*consumed += 1;
}
}
}
trace!("sending contq.len: {}", contq.len());
if !contq.is_empty() {
s.send(contq)?;
}
Ok(())
}
pub fn window(
exit: Arc<AtomicBool>,
subs: Arc<RwLock<Subscribers>>,
recycler: BlobRecycler,
r: BlobReceiver,
s: BlobSender,
retransmit: BlobSender,
) -> JoinHandle<()> {
spawn(move || {
let mut window = vec![None; NUM_BLOBS];
let mut consumed = 0;
loop {
if exit.load(Ordering::Relaxed) {
break;
}
let _ = recv_window(
&mut window,
&subs,
&recycler,
&mut consumed,
&r,
&s,
&retransmit,
);
}
})
}
fn retransmit(
subs: &Arc<RwLock<Subscribers>>,
recycler: &BlobRecycler,
r: &BlobReceiver,
sock: &UdpSocket,
) -> Result<()> {
let timer = Duration::new(1, 0);
let mut dq = r.recv_timeout(timer)?;
while let Ok(mut nq) = r.try_recv() {
dq.append(&mut nq);
}
{
let wsubs = subs.read().unwrap();
for b in &dq {
let mut mb = b.write().unwrap();
wsubs.retransmit(&mut mb, sock)?;
}
}
while let Some(b) = dq.pop_front() {
recycler.recycle(b);
}
Ok(())
}
/// Service to retransmit messages from the leader to layer 1 nodes.
/// See `subscribers` for network layer definitions.
/// # Arguments
/// * `sock` - Socket to read from. Read timeout is set to 1.
/// * `exit` - Boolean to signal system exit.
/// * `subs` - Shared Subscriber structure. This structure needs to be updated and popualted by
/// the accountant.
/// * `recycler` - Blob recycler.
/// * `r` - Receive channel for blobs to be retransmitted to all the layer 1 nodes.
pub fn retransmitter(
sock: UdpSocket,
exit: Arc<AtomicBool>,
subs: Arc<RwLock<Subscribers>>,
recycler: BlobRecycler,
r: BlobReceiver,
) -> JoinHandle<()> {
spawn(move || loop {
if exit.load(Ordering::Relaxed) {
break;
}
let _ = retransmit(&subs, &recycler, &r, &sock);
})
}
#[cfg(all(feature = "unstable", test))]
mod bench {
extern crate test;
use self::test::Bencher;
use packet::{Packet, PacketRecycler, PACKET_DATA_SIZE};
use result::Result;
use std::net::{SocketAddr, UdpSocket};
use std::sync::atomic::{AtomicBool, Ordering};
@ -293,7 +283,7 @@ mod bench {
use std::thread::{spawn, JoinHandle};
use std::time::Duration;
use std::time::SystemTime;
use streamer::{allocate, receiver, recycle, Packet, PacketRecycler, Receiver, PACKET_SIZE};
use streamer::{receiver, PacketReceiver};
fn producer(
addr: &SocketAddr,
@ -301,11 +291,11 @@ mod bench {
exit: Arc<AtomicBool>,
) -> JoinHandle<()> {
let send = UdpSocket::bind("0.0.0.0:0").unwrap();
let msgs = allocate(&recycler);
let msgs = recycler.allocate();
let msgs_ = msgs.clone();
msgs.write().unwrap().packets.resize(10, Packet::default());
for w in msgs.write().unwrap().packets.iter_mut() {
w.meta.size = PACKET_SIZE;
w.meta.size = PACKET_DATA_SIZE;
w.meta.set_addr(&addr);
}
spawn(move || loop {
@ -314,7 +304,7 @@ mod bench {
}
let mut num = 0;
for p in msgs_.read().unwrap().packets.iter() {
let a = p.meta.get_addr();
let a = p.meta.addr();
send.send_to(&p.data[..p.meta.size], &a).unwrap();
num += 1;
}
@ -326,7 +316,7 @@ mod bench {
recycler: PacketRecycler,
exit: Arc<AtomicBool>,
rvs: Arc<Mutex<usize>>,
r: Receiver,
r: PacketReceiver,
) -> JoinHandle<()> {
spawn(move || loop {
if exit.load(Ordering::Relaxed) {
@ -337,7 +327,7 @@ mod bench {
Ok(msgs) => {
let msgs_ = msgs.clone();
*rvs.lock().unwrap() += msgs.read().unwrap().packets.len();
recycle(&recycler, msgs_);
recycler.recycle(msgs_);
}
_ => (),
}
@ -347,16 +337,16 @@ mod bench {
let read = UdpSocket::bind("127.0.0.1:0")?;
let addr = read.local_addr()?;
let exit = Arc::new(AtomicBool::new(false));
let recycler = Arc::new(Mutex::new(Vec::new()));
let pack_recycler = PacketRecycler::default();
let (s_reader, r_reader) = channel();
let t_reader = receiver(read, exit.clone(), recycler.clone(), s_reader)?;
let t_producer1 = producer(&addr, recycler.clone(), exit.clone());
let t_producer2 = producer(&addr, recycler.clone(), exit.clone());
let t_producer3 = producer(&addr, recycler.clone(), exit.clone());
let t_reader = receiver(read, exit.clone(), pack_recycler.clone(), s_reader)?;
let t_producer1 = producer(&addr, pack_recycler.clone(), exit.clone());
let t_producer2 = producer(&addr, pack_recycler.clone(), exit.clone());
let t_producer3 = producer(&addr, pack_recycler.clone(), exit.clone());
let rvs = Arc::new(Mutex::new(0));
let t_sink = sink(recycler.clone(), exit.clone(), rvs.clone(), r_reader);
let t_sink = sink(pack_recycler.clone(), exit.clone(), rvs.clone(), r_reader);
let start = SystemTime::now();
let start_val = *rvs.lock().unwrap();
@ -383,17 +373,20 @@ mod bench {
#[cfg(test)]
mod test {
use packet::{Blob, BlobRecycler, Packet, PacketRecycler, Packets, PACKET_DATA_SIZE};
use std::collections::VecDeque;
use std::io;
use std::io::Write;
use std::net::UdpSocket;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::channel;
use std::sync::{Arc, Mutex};
use std::sync::{Arc, RwLock};
use std::time::Duration;
use streamer::{allocate, receiver, responder, Packet, Packets, Receiver, Response, Responses,
PACKET_SIZE};
use streamer::{blob_receiver, receiver, responder, retransmitter, window, BlobReceiver,
PacketReceiver};
use subscribers::{Node, Subscribers};
fn get_msgs(r: Receiver, num: &mut usize) {
fn get_msgs(r: PacketReceiver, num: &mut usize) {
for _t in 0..5 {
let timer = Duration::new(1, 0);
match r.recv_timeout(timer) {
@ -405,40 +398,11 @@ mod test {
}
}
}
#[cfg(ipv6)]
#[test]
pub fn streamer_send_test_ipv6() {
let read = UdpSocket::bind("[::1]:0").expect("bind");
let addr = read.local_addr().unwrap();
let send = UdpSocket::bind("[::1]:0").expect("bind");
let exit = Arc::new(Mutex::new(false));
let recycler = Arc::new(Mutex::new(Vec::new()));
let (s_reader, r_reader) = channel();
let t_receiver = receiver(read, exit.clone(), recycler.clone(), s_reader).unwrap();
let (s_responder, r_responder) = channel();
let t_responder = responder(send, exit.clone(), recycler.clone(), r_responder);
let msgs = allocate(&recycler);
msgs.write().unwrap().packets.resize(10, Packet::default());
for (i, w) in msgs.write().unwrap().packets.iter_mut().enumerate() {
w.data[0] = i as u8;
w.size = PACKET_SIZE;
w.set_addr(&addr);
assert_eq!(w.get_addr(), addr);
}
s_responder.send(msgs).expect("send");
let mut num = 0;
get_msgs(r_reader, &mut num);
assert_eq!(num, 10);
exit.store(true, Ordering::Relaxed);
t_receiver.join().expect("join");
t_responder.join().expect("join");
}
#[test]
pub fn streamer_debug() {
write!(io::sink(), "{:?}", Packet::default()).unwrap();
write!(io::sink(), "{:?}", Packets::default()).unwrap();
write!(io::sink(), "{:?}", Response::default()).unwrap();
write!(io::sink(), "{:?}", Responses::default()).unwrap();
write!(io::sink(), "{:?}", Blob::default()).unwrap();
}
#[test]
pub fn streamer_send_test() {
@ -446,22 +410,21 @@ mod test {
let addr = read.local_addr().unwrap();
let send = UdpSocket::bind("127.0.0.1:0").expect("bind");
let exit = Arc::new(AtomicBool::new(false));
let packet_recycler = Arc::new(Mutex::new(Vec::new()));
let resp_recycler = Arc::new(Mutex::new(Vec::new()));
let pack_recycler = PacketRecycler::default();
let resp_recycler = BlobRecycler::default();
let (s_reader, r_reader) = channel();
let t_receiver = receiver(read, exit.clone(), packet_recycler.clone(), s_reader).unwrap();
let t_receiver = receiver(read, exit.clone(), pack_recycler.clone(), s_reader).unwrap();
let (s_responder, r_responder) = channel();
let t_responder = responder(send, exit.clone(), resp_recycler.clone(), r_responder);
let msgs = allocate(&resp_recycler);
msgs.write()
.unwrap()
.responses
.resize(10, Response::default());
for (i, w) in msgs.write().unwrap().responses.iter_mut().enumerate() {
let mut msgs = VecDeque::new();
for i in 0..10 {
let b = resp_recycler.allocate();
let b_ = b.clone();
let mut w = b.write().unwrap();
w.data[0] = i as u8;
w.meta.size = PACKET_SIZE;
w.meta.size = PACKET_DATA_SIZE;
w.meta.set_addr(&addr);
assert_eq!(w.meta.get_addr(), addr);
msgs.push_back(b_);
}
s_responder.send(msgs).expect("send");
let mut num = 0;
@ -471,4 +434,116 @@ mod test {
t_receiver.join().expect("join");
t_responder.join().expect("join");
}
fn get_blobs(r: BlobReceiver, num: &mut usize) {
for _t in 0..5 {
let timer = Duration::new(1, 0);
match r.recv_timeout(timer) {
Ok(m) => {
for (i, v) in m.iter().enumerate() {
assert_eq!(v.read().unwrap().get_index().unwrap() as usize, *num + i);
}
*num += m.len();
}
e => println!("error {:?}", e),
}
if *num == 10 {
break;
}
}
}
#[test]
pub fn window_send_test() {
let read = UdpSocket::bind("127.0.0.1:0").expect("bind");
let addr = read.local_addr().unwrap();
let send = UdpSocket::bind("127.0.0.1:0").expect("bind");
let exit = Arc::new(AtomicBool::new(false));
let subs = Arc::new(RwLock::new(Subscribers::new(
Node::default(),
Node::new([0; 8], 0, send.local_addr().unwrap()),
&[],
)));
let resp_recycler = BlobRecycler::default();
let (s_reader, r_reader) = channel();
let t_receiver =
blob_receiver(exit.clone(), resp_recycler.clone(), read, s_reader).unwrap();
let (s_window, r_window) = channel();
let (s_retransmit, r_retransmit) = channel();
let t_window = window(
exit.clone(),
subs,
resp_recycler.clone(),
r_reader,
s_window,
s_retransmit,
);
let (s_responder, r_responder) = channel();
let t_responder = responder(send, exit.clone(), resp_recycler.clone(), r_responder);
let mut msgs = VecDeque::new();
for v in 0..10 {
let i = 9 - v;
let b = resp_recycler.allocate();
let b_ = b.clone();
let mut w = b.write().unwrap();
w.set_index(i).unwrap();
assert_eq!(i, w.get_index().unwrap());
w.meta.size = PACKET_DATA_SIZE;
w.meta.set_addr(&addr);
msgs.push_back(b_);
}
s_responder.send(msgs).expect("send");
let mut num = 0;
get_blobs(r_window, &mut num);
assert_eq!(num, 10);
let mut q = r_retransmit.recv().unwrap();
while let Ok(mut nq) = r_retransmit.try_recv() {
q.append(&mut nq);
}
assert_eq!(q.len(), 10);
exit.store(true, Ordering::Relaxed);
t_receiver.join().expect("join");
t_responder.join().expect("join");
t_window.join().expect("join");
}
#[test]
pub fn retransmit() {
let read = UdpSocket::bind("127.0.0.1:0").expect("bind");
let send = UdpSocket::bind("127.0.0.1:0").expect("bind");
let exit = Arc::new(AtomicBool::new(false));
let subs = Arc::new(RwLock::new(Subscribers::new(
Node::default(),
Node::default(),
&[Node::new([0; 8], 1, read.local_addr().unwrap())],
)));
let (s_retransmit, r_retransmit) = channel();
let blob_recycler = BlobRecycler::default();
let saddr = send.local_addr().unwrap();
let t_retransmit = retransmitter(
send,
exit.clone(),
subs,
blob_recycler.clone(),
r_retransmit,
);
let mut bq = VecDeque::new();
let b = blob_recycler.allocate();
b.write().unwrap().meta.size = 10;
bq.push_back(b);
s_retransmit.send(bq).unwrap();
let (s_blob_receiver, r_blob_receiver) = channel();
let t_receiver =
blob_receiver(exit.clone(), blob_recycler.clone(), read, s_blob_receiver).unwrap();
let mut oq = r_blob_receiver.recv().unwrap();
assert_eq!(oq.len(), 1);
let o = oq.pop_front().unwrap();
let ro = o.read().unwrap();
assert_eq!(ro.meta.size, 10);
assert_eq!(ro.meta.addr(), saddr);
exit.store(true, Ordering::Relaxed);
t_receiver.join().expect("join");
t_retransmit.join().expect("join");
}
}

149
src/subscribers.rs Normal file
View File

@ -0,0 +1,149 @@
//! The `subscribers` module defines data structures to keep track of nodes on the network.
//! The network is arranged in layers:
//!
//! * layer 0 - Leader.
//! * layer 1 - As many nodes as we can fit to quickly get reliable `2/3+1` finality
//! * layer 2 - Everyone else, if layer 1 is `2^10`, layer 2 should be able to fit `2^20` number of nodes.
//!
//! It's up to the external state machine to keep this updated.
use packet::Blob;
use rayon::prelude::*;
use result::{Error, Result};
use std::net::{SocketAddr, UdpSocket};
use std::fmt;
#[derive(Clone, PartialEq)]
pub struct Node {
pub id: [u64; 8],
pub weight: u64,
pub addr: SocketAddr,
}
//sockaddr doesn't implement default
impl Default for Node {
fn default() -> Node {
Node {
id: [0; 8],
weight: 0,
addr: "0.0.0.0:0".parse().unwrap(),
}
}
}
impl Node {
pub fn new(id: [u64; 8], weight: u64, addr: SocketAddr) -> Node {
Node { id, weight, addr }
}
fn key(&self) -> i64 {
(self.weight as i64).checked_neg().unwrap()
}
}
impl fmt::Debug for Node {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Node {{ weight: {} addr: {} }}", self.weight, self.addr)
}
}
pub struct Subscribers {
data: Vec<Node>,
pub me: Node,
pub leader: Node,
}
impl Subscribers {
pub fn new(me: Node, leader: Node, network: &[Node]) -> Subscribers {
let mut h = Subscribers {
data: vec![],
me: me.clone(),
leader: leader.clone(),
};
h.insert(&[me, leader]);
h.insert(network);
h
}
/// retransmit messages from the leader to layer 1 nodes
pub fn retransmit(&self, blob: &mut Blob, s: &UdpSocket) -> Result<()> {
let errs: Vec<_> = self.data
.par_iter()
.map(|i| {
if self.me == *i {
return Ok(0);
}
if self.leader == *i {
return Ok(0);
}
trace!("retransmit blob to {}", i.addr);
s.send_to(&blob.data[..blob.meta.size], &i.addr)
})
.collect();
for e in errs {
trace!("retransmit result {:?}", e);
match e {
Err(e) => return Err(Error::IO(e)),
_ => (),
}
}
Ok(())
}
pub fn insert(&mut self, ns: &[Node]) {
self.data.extend_from_slice(ns);
self.data.sort_by_key(Node::key);
}
}
#[cfg(test)]
mod test {
use packet::Blob;
use rayon::prelude::*;
use std::net::UdpSocket;
use std::time::Duration;
use subscribers::{Node, Subscribers};
#[test]
pub fn subscriber() {
let mut me = Node::default();
me.weight = 10;
let mut leader = Node::default();
leader.weight = 11;
let mut s = Subscribers::new(me, leader, &[]);
assert_eq!(s.data.len(), 2);
assert_eq!(s.data[0].weight, 11);
assert_eq!(s.data[1].weight, 10);
let mut n = Node::default();
n.weight = 12;
s.insert(&[n]);
assert_eq!(s.data.len(), 3);
assert_eq!(s.data[0].weight, 12);
}
#[test]
pub fn retransmit() {
let s1 = UdpSocket::bind("127.0.0.1:0").expect("bind");
let s2 = UdpSocket::bind("127.0.0.1:0").expect("bind");
let s3 = UdpSocket::bind("127.0.0.1:0").expect("bind");
let n1 = Node::new([0; 8], 0, s1.local_addr().unwrap());
let n2 = Node::new([0; 8], 0, s2.local_addr().unwrap());
let mut s = Subscribers::new(n1.clone(), n2.clone(), &[]);
let n3 = Node::new([0; 8], 0, s3.local_addr().unwrap());
s.insert(&[n3]);
let mut b = Blob::default();
b.meta.size = 10;
let s4 = UdpSocket::bind("127.0.0.1:0").expect("bind");
s.retransmit(&mut b, &s4).unwrap();
let res: Vec<_> = [s1, s2, s3]
.into_par_iter()
.map(|s| {
let mut b = Blob::default();
s.set_read_timeout(Some(Duration::new(1, 0))).unwrap();
s.recv_from(&mut b.data).is_err()
})
.collect();
assert_eq!(res, [true, true, false]);
let mut n4 = Node::default();
n4.addr = "255.255.255.255:1".parse().unwrap();
s.insert(&[n4]);
assert!(s.retransmit(&mut b, &s4).is_err());
}
}

View File

@ -2,18 +2,27 @@
use bincode::serialize;
use chrono::prelude::*;
use rayon::prelude::*;
use hash::Hash;
use plan::{Condition, Payment, Plan};
use rayon::prelude::*;
use signature::{KeyPair, KeyPairUtil, PublicKey, Signature, SignatureUtil};
pub const SIGNED_DATA_OFFSET: usize = 112;
pub const SIG_OFFSET: usize = 8;
pub const PUB_KEY_OFFSET: usize = 80;
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub struct TransactionData {
pub tokens: i64,
pub last_id: Hash,
pub plan: Plan,
}
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub struct Transaction {
pub from: PublicKey,
pub plan: Plan,
pub tokens: i64,
pub last_id: Hash,
pub sig: Signature,
pub from: PublicKey,
pub data: TransactionData,
}
impl Transaction {
@ -22,11 +31,13 @@ impl Transaction {
let from = from_keypair.pubkey();
let plan = Plan::Pay(Payment { tokens, to });
let mut tr = Transaction {
from,
plan,
tokens,
last_id,
sig: Signature::default(),
data: TransactionData {
plan,
tokens,
last_id,
},
from: from,
};
tr.sign(from_keypair);
tr
@ -46,10 +57,12 @@ impl Transaction {
(Condition::Signature(from), Payment { tokens, to: from }),
);
let mut tr = Transaction {
from,
plan,
tokens,
last_id,
data: TransactionData {
plan,
tokens,
last_id,
},
from: from,
sig: Signature::default(),
};
tr.sign(from_keypair);
@ -57,7 +70,7 @@ impl Transaction {
}
fn get_sign_data(&self) -> Vec<u8> {
serialize(&(&self.from, &self.plan, &self.tokens, &self.last_id)).unwrap()
serialize(&(&self.data)).unwrap()
}
/// Sign this transaction.
@ -66,20 +79,43 @@ impl Transaction {
self.sig = Signature::clone_from_slice(keypair.sign(&sign_data).as_ref());
}
/// Verify this transaction's signature and its spending plan.
pub fn verify(&self) -> bool {
self.sig.verify(&self.from, &self.get_sign_data()) && self.plan.verify(self.tokens)
pub fn verify_sig(&self) -> bool {
self.sig.verify(&self.from, &self.get_sign_data())
}
pub fn verify_plan(&self) -> bool {
self.data.plan.verify(self.data.tokens)
}
}
#[cfg(test)]
pub fn test_tx() -> Transaction {
let keypair1 = KeyPair::new();
let pubkey1 = keypair1.pubkey();
let zero = Hash::default();
Transaction::new(&keypair1, pubkey1, 42, zero)
}
#[cfg(test)]
pub fn memfind<A: Eq>(a: &[A], b: &[A]) -> Option<usize> {
assert!(a.len() >= b.len());
let end = a.len() - b.len() + 1;
for i in 0..end {
if a[i..i + b.len()] == b[..] {
return Some(i);
}
}
None
}
/// Verify a batch of signatures.
pub fn verify_signatures(transactions: &[Transaction]) -> bool {
transactions.par_iter().all(|tr| tr.verify())
transactions.par_iter().all(|tr| tr.verify_sig())
}
/// Verify a batch of spending plans.
pub fn verify_plans(transactions: &[Transaction]) -> bool {
transactions.par_iter().all(|tr| tr.plan.verify(tr.tokens))
transactions.par_iter().all(|tr| tr.verify_plan())
}
/// Verify a batch of transactions.
@ -97,7 +133,7 @@ mod tests {
let keypair = KeyPair::new();
let zero = Hash::default();
let tr0 = Transaction::new(&keypair, keypair.pubkey(), 42, zero);
assert!(tr0.verify());
assert!(tr0.verify_plan());
}
#[test]
@ -107,7 +143,7 @@ mod tests {
let keypair1 = KeyPair::new();
let pubkey1 = keypair1.pubkey();
let tr0 = Transaction::new(&keypair0, pubkey1, 42, zero);
assert!(tr0.verify());
assert!(tr0.verify_plan());
}
#[test]
@ -117,10 +153,12 @@ mod tests {
to: Default::default(),
});
let claim0 = Transaction {
data: TransactionData {
plan,
tokens: 0,
last_id: Default::default(),
},
from: Default::default(),
plan,
tokens: 0,
last_id: Default::default(),
sig: Default::default(),
};
let buf = serialize(&claim0).unwrap();
@ -129,14 +167,17 @@ mod tests {
}
#[test]
fn test_bad_event_signature() {
fn test_token_attack() {
let zero = Hash::default();
let keypair = KeyPair::new();
let pubkey = keypair.pubkey();
let mut tr = Transaction::new(&keypair, pubkey, 42, zero);
tr.sign(&keypair);
tr.tokens = 1_000_000; // <-- attack!
assert!(!tr.verify());
tr.data.tokens = 1_000_000; // <-- attack, part 1!
if let Plan::Pay(ref mut payment) = tr.data.plan {
payment.tokens = tr.data.tokens; // <-- attack, part 2!
};
assert!(tr.verify_plan());
assert!(!tr.verify_sig());
}
#[test]
@ -147,11 +188,38 @@ mod tests {
let pubkey1 = keypair1.pubkey();
let zero = Hash::default();
let mut tr = Transaction::new(&keypair0, pubkey1, 42, zero);
tr.sign(&keypair0);
if let Plan::Pay(ref mut payment) = tr.plan {
if let Plan::Pay(ref mut payment) = tr.data.plan {
payment.to = thief_keypair.pubkey(); // <-- attack!
};
assert!(!tr.verify());
assert!(tr.verify_plan());
assert!(!tr.verify_sig());
}
#[test]
fn test_layout() {
let tr = test_tx();
let sign_data = tr.get_sign_data();
let tx = serialize(&tr).unwrap();
assert_matches!(memfind(&tx, &sign_data), Some(SIGNED_DATA_OFFSET));
assert_matches!(memfind(&tx, &tr.sig), Some(SIG_OFFSET));
assert_matches!(memfind(&tx, &tr.from), Some(PUB_KEY_OFFSET));
}
#[test]
fn test_overspend_attack() {
let keypair0 = KeyPair::new();
let keypair1 = KeyPair::new();
let zero = Hash::default();
let mut tr = Transaction::new(&keypair0, keypair1.pubkey(), 1, zero);
if let Plan::Pay(ref mut payment) = tr.data.plan {
payment.tokens = 2; // <-- attack!
}
assert!(!tr.verify_plan());
// Also, ensure all branchs of the plan spend all tokens
if let Plan::Pay(ref mut payment) = tr.data.plan {
payment.tokens = 0; // <-- whoops!
}
assert!(!tr.verify_plan());
}
#[test]