Compare commits
216 Commits
v0.6.0
...
v0.7.0-alp
Author | SHA1 | Date | |
---|---|---|---|
|
be5f2ef9b9 | ||
|
adfcb79387 | ||
|
73c4c0ac5f | ||
|
6fc601f696 | ||
|
07111fb7bb | ||
|
a06d2170b0 | ||
|
7f53ea3bf3 | ||
|
b2accd1c2a | ||
|
8ef8a8dea7 | ||
|
e929404676 | ||
|
c2258bedae | ||
|
215fdbb7ed | ||
|
ee998f6882 | ||
|
826e95afca | ||
|
47583d48e7 | ||
|
e759cdf061 | ||
|
88503c2a09 | ||
|
d5be23dffe | ||
|
80c01dc085 | ||
|
45b2549fa9 | ||
|
c7ce454188 | ||
|
7059ea42d6 | ||
|
8ea1c29c9b | ||
|
33bbfdbc9b | ||
|
5de54f8853 | ||
|
a1ac41218a | ||
|
55fc647568 | ||
|
e83e898eed | ||
|
eb07e4588b | ||
|
563f834c96 | ||
|
183178681d | ||
|
8dba53e494 | ||
|
e4782b19a3 | ||
|
ec86b1dffa | ||
|
6cb8266c7b | ||
|
9c50302a39 | ||
|
3313c69898 | ||
|
530c6ca7ec | ||
|
07ed2fb523 | ||
|
d9ec380a15 | ||
|
b60eb3a899 | ||
|
b4df69791b | ||
|
c21b8a22b9 | ||
|
475a76e656 | ||
|
7ba5d5ef86 | ||
|
737dc1ddde | ||
|
164bf19b36 | ||
|
25976771d9 | ||
|
f2198c2e9a | ||
|
eec19c6d2c | ||
|
30e03feb5f | ||
|
58cd3bde9f | ||
|
662bfb7b88 | ||
|
5f3e3a17d3 | ||
|
feba2d9975 | ||
|
e3e3a1c457 | ||
|
90628f3c8d | ||
|
f6bcadb79d | ||
|
d4ac16773c | ||
|
96f044d2bf | ||
|
f31868b913 | ||
|
73b0ff5b55 | ||
|
64cf69045a | ||
|
e57dae0f31 | ||
|
6386e7d5cf | ||
|
4bad103da9 | ||
|
30a26adb7c | ||
|
8be4adfc0a | ||
|
fed4cc3965 | ||
|
7d1e074683 | ||
|
00516e50a1 | ||
|
e83d76fbd9 | ||
|
304f152315 | ||
|
3a82ebf7fd | ||
|
0253d34467 | ||
|
9209f9acde | ||
|
3dbbb398df | ||
|
17e8ad110f | ||
|
5e91d31ed3 | ||
|
fad9d20820 | ||
|
fe9a1c8580 | ||
|
cd6d7d5198 | ||
|
771478bc68 | ||
|
c4a59896f8 | ||
|
3eb1608403 | ||
|
8fde70d4dc | ||
|
5a047833ed | ||
|
f6c28e6be1 | ||
|
0ebf10d19d | ||
|
d3005d3ef3 | ||
|
effcef2184 | ||
|
89fc0ad7a9 | ||
|
410272ee1d | ||
|
1c97bf50b6 | ||
|
4ecd2c9d0b | ||
|
e592243a09 | ||
|
2f4a92e352 | ||
|
ceafc29040 | ||
|
b20efabfd2 | ||
|
85b6e7293c | ||
|
6aced927ad | ||
|
75997e6c08 | ||
|
9040d00110 | ||
|
8ebc5c6b07 | ||
|
d4807790ff | ||
|
0de5e7a285 | ||
|
c40000aeda | ||
|
31198bc105 | ||
|
92599acfca | ||
|
f6e70779fe | ||
|
3017bde686 | ||
|
9d84ec4bb3 | ||
|
586141adb2 | ||
|
3f763f99e2 | ||
|
15c7f36ea3 | ||
|
04d1a083fa | ||
|
327ee1dae8 | ||
|
22885c3e64 | ||
|
94ededb54c | ||
|
af6a07697a | ||
|
5f1d8c95eb | ||
|
7d9e032407 | ||
|
bc918a5ad5 | ||
|
ee54ce4727 | ||
|
e85bf2f2d5 | ||
|
a7460ffbd1 | ||
|
7fe1fd2f95 | ||
|
d30670e92e | ||
|
9b202c6e1e | ||
|
87946eafd5 | ||
|
7575d3c726 | ||
|
8b9713a934 | ||
|
ec713c18c4 | ||
|
c24b0a1a3f | ||
|
34e0cb0092 | ||
|
7b7c7cba21 | ||
|
c45343dd30 | ||
|
b7f6603c1f | ||
|
2d3b052dea | ||
|
dcb6234771 | ||
|
e44d423e83 | ||
|
5435bb734c | ||
|
13f59adf61 | ||
|
0fce3368d3 | ||
|
1ee5c81267 | ||
|
3bb9d5eb50 | ||
|
efb23f7cf9 | ||
|
013f4674de | ||
|
6966b25d9c | ||
|
d513f56c8c | ||
|
7aa05618a3 | ||
|
cdfbbe5e60 | ||
|
fe7d1cb81c | ||
|
c2a9395a4b | ||
|
586279bcfc | ||
|
8bd10e7c4c | ||
|
928e6165bc | ||
|
77c9e801aa | ||
|
c78132417f | ||
|
849928887e | ||
|
ba1163d49f | ||
|
6f9c89af39 | ||
|
246b8b1242 | ||
|
f0db68cb75 | ||
|
f0d1fdfb46 | ||
|
3b8b2e030a | ||
|
b4fee677a5 | ||
|
fe706583f9 | ||
|
d0e0c17ece | ||
|
5aaa38bcaf | ||
|
6ff9b27f8e | ||
|
3f4e035506 | ||
|
57d9fbb927 | ||
|
ee44e51b30 | ||
|
5011f24123 | ||
|
d1eda334f3 | ||
|
2ae5ce9f2c | ||
|
4f5ac78b7e | ||
|
074c9af020 | ||
|
2da2d4e365 | ||
|
8eb76ab2a5 | ||
|
a710d95243 | ||
|
a06535d7ed | ||
|
f511ac9be7 | ||
|
e28ad2177e | ||
|
cb16fe84cd | ||
|
ec3569aa39 | ||
|
246edecf53 | ||
|
34834c5af9 | ||
|
b845245614 | ||
|
5711fb9969 | ||
|
d1eaecde9a | ||
|
00c8505d1e | ||
|
33f01efe69 | ||
|
377d312c81 | ||
|
badf5d5412 | ||
|
0339f90b40 | ||
|
5455e8e6a9 | ||
|
6843b71a0d | ||
|
634408b5e8 | ||
|
d053f78b74 | ||
|
93b6fceb2f | ||
|
ac7860c35d | ||
|
b0eab8729f | ||
|
cb81f80b31 | ||
|
ea97529185 | ||
|
f1075191fe | ||
|
74c479fbc9 | ||
|
7e788d3a17 | ||
|
69b3c75f0d | ||
|
b2c2fa40a2 | ||
|
50458d9524 | ||
|
9679e3e356 | ||
|
6db9f92b8a | ||
|
4a44498d45 | ||
|
216510c573 |
@@ -1,2 +1,5 @@
|
||||
ignore:
|
||||
- "src/bin"
|
||||
coverage:
|
||||
status:
|
||||
patch: off
|
||||
|
32
Cargo.toml
32
Cargo.toml
@@ -1,9 +1,10 @@
|
||||
[package]
|
||||
name = "solana"
|
||||
description = "Blockchain Rebuilt for Scale"
|
||||
version = "0.6.0"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "0.7.0-alpha"
|
||||
documentation = "https://docs.rs/solana"
|
||||
homepage = "http://solana.com/"
|
||||
readme = "README.md"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
authors = [
|
||||
"Anatoly Yakovenko <anatoly@solana.com>",
|
||||
@@ -40,6 +41,10 @@ path = "src/bin/mint.rs"
|
||||
name = "solana-mint-demo"
|
||||
path = "src/bin/mint-demo.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-drone"
|
||||
path = "src/bin/drone.rs"
|
||||
|
||||
[badges]
|
||||
codecov = { repository = "solana-labs/solana", branch = "master", service = "github" }
|
||||
|
||||
@@ -52,7 +57,7 @@ erasure = []
|
||||
[dependencies]
|
||||
rayon = "1.0.0"
|
||||
sha2 = "0.7.0"
|
||||
generic-array = { version = "0.9.0", default-features = false, features = ["serde"] }
|
||||
generic-array = { version = "0.11.1", default-features = false, features = ["serde"] }
|
||||
serde = "1.0.27"
|
||||
serde_derive = "1.0.27"
|
||||
serde_json = "1.0.10"
|
||||
@@ -60,12 +65,15 @@ ring = "0.12.1"
|
||||
untrusted = "0.5.1"
|
||||
bincode = "1.0.0"
|
||||
chrono = { version = "0.4.0", features = ["serde"] }
|
||||
log = "^0.4.1"
|
||||
env_logger = "^0.4.1"
|
||||
matches = "^0.1.6"
|
||||
byteorder = "^1.2.1"
|
||||
libc = "^0.2.1"
|
||||
getopts = "^0.2"
|
||||
isatty = "0.1"
|
||||
rand = "0.4.2"
|
||||
pnet = "^0.21.0"
|
||||
log = "0.4.2"
|
||||
env_logger = "0.5.10"
|
||||
matches = "0.1.6"
|
||||
byteorder = "1.2.1"
|
||||
libc = "0.2.1"
|
||||
getopts = "0.2"
|
||||
atty = "0.2"
|
||||
rand = "0.5.1"
|
||||
pnet_datalink = "0.21.0"
|
||||
tokio = "0.1"
|
||||
tokio-codec = "0.1"
|
||||
tokio-io = "0.1"
|
||||
|
2
LICENSE
2
LICENSE
@@ -1,4 +1,4 @@
|
||||
Copyright 2018 Anatoly Yakovenko, Greg Fitzgerald and Stephen Akridge
|
||||
Copyright 2018 Solana Labs, Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
123
README.md
123
README.md
@@ -1,19 +1,19 @@
|
||||
[](https://crates.io/crates/solana)
|
||||
[](https://docs.rs/solana)
|
||||
[](https://buildkite.com/solana-labs/solana)
|
||||
[](https://solana-ci-gate.herokuapp.com/buildkite_public_log?https://buildkite.com/solana-labs/solana/builds/latest/master)
|
||||
[](https://codecov.io/gh/solana-labs/solana)
|
||||
|
||||
Blockchain, Rebuilt for Scale
|
||||
===
|
||||
|
||||
Solana™ is a new blockchain architecture built from the ground up for scale. The architecture supports
|
||||
up to 710 thousand transactions per second on a gigabit network.
|
||||
|
||||
Disclaimer
|
||||
===
|
||||
|
||||
All claims, content, designs, algorithms, estimates, roadmaps, specifications, and performance measurements described in this project are done with the author's best effort. It is up to the reader to check and validate their accuracy and truthfulness. Furthermore nothing in this project constitutes a solicitation for investment.
|
||||
|
||||
Solana: Blockchain Rebuilt for Scale
|
||||
===
|
||||
|
||||
Solana™ is a new blockchain architecture built from the ground up for scale. The architecture supports
|
||||
up to 710 thousand transactions per second on a gigabit network.
|
||||
|
||||
Introduction
|
||||
===
|
||||
|
||||
@@ -58,50 +58,39 @@ your odds of success if you check out the
|
||||
before proceeding:
|
||||
|
||||
```bash
|
||||
$ git checkout v0.6.0
|
||||
$ git checkout v0.6.1
|
||||
```
|
||||
|
||||
Configuration Setup
|
||||
---
|
||||
|
||||
The network is initialized with a genesis ledger and leader/validator configuration files.
|
||||
These files can be generated by running the following script.
|
||||
|
||||
```bash
|
||||
$ ./multinode-demo/setup.sh
|
||||
```
|
||||
|
||||
Singlenode Testnet
|
||||
---
|
||||
|
||||
The fullnode server is initialized with a ledger from stdin and
|
||||
generates new ledger entries on stdout. To create the input ledger, we'll need
|
||||
to create *the mint* and use it to generate a *genesis ledger*. It's done in
|
||||
two steps because the mint-demo.json file contains private keys that will be
|
||||
used later in this demo.
|
||||
|
||||
```bash
|
||||
$ echo 1000000000 | cargo run --release --bin solana-mint-demo > mint-demo.json
|
||||
$ cat mint-demo.json | cargo run --release --bin solana-genesis-demo > genesis.log
|
||||
```
|
||||
|
||||
Before you start a fullnode, make sure you know the IP address of the machine you
|
||||
want to be the leader for the demo, and make sure that udp ports 8000-10000 are
|
||||
open on all the machines you want to test with.
|
||||
|
||||
Generate a leader configuration file with:
|
||||
|
||||
```bash
|
||||
cargo run --release --bin solana-fullnode-config > leader.json
|
||||
```
|
||||
|
||||
Now start the server:
|
||||
|
||||
```bash
|
||||
$ cat ./multinode-demo/leader.sh
|
||||
#!/bin/bash
|
||||
export RUST_LOG=solana=info
|
||||
sudo sysctl -w net.core.rmem_max=26214400
|
||||
cargo run --release --bin solana-fullnode -- -l leader.json < genesis.log
|
||||
$ ./multinode-demo/leader.sh > leader-txs.log
|
||||
$ ./multinode-demo/leader.sh
|
||||
```
|
||||
|
||||
To run a performance-enhanced fullnode on Linux, download `libcuda_verify_ed25519.a`. Enable
|
||||
it by adding `--features=cuda` to the line that runs `solana-fullnode` in `leader.sh`.
|
||||
it by adding `--features=cuda` to the line that runs `solana-fullnode` in
|
||||
`leader.sh`. [CUDA 9.2](https://developer.nvidia.com/cuda-downloads) must be installed on your system.
|
||||
|
||||
```bash
|
||||
$ wget https://solana-build-artifacts.s3.amazonaws.com/v0.6.0/libcuda_verify_ed25519.a
|
||||
cargo run --release --features=cuda --bin solana-fullnode -- -l leader.json < genesis.log
|
||||
$ ./fetch-perf-libs.sh
|
||||
$ cargo run --release --features=cuda --bin solana-fullnode -- -l leader.json < genesis.log
|
||||
```
|
||||
|
||||
Wait a few seconds for the server to initialize. It will print "Ready." when it's ready to
|
||||
@@ -113,22 +102,14 @@ Multinode Testnet
|
||||
To run a multinode testnet, after starting a leader node, spin up some validator nodes:
|
||||
|
||||
```bash
|
||||
$ cat ./multinode-demo/validator.sh
|
||||
#!/bin/bash
|
||||
rsync -v -e ssh $1/mint-demo.json .
|
||||
rsync -v -e ssh $1/leader.json .
|
||||
rsync -v -e ssh $1/genesis.log .
|
||||
export RUST_LOG=solana=info
|
||||
sudo sysctl -w net.core.rmem_max=26214400
|
||||
cargo run --release --bin solana-fullnode -- -l validator.json -v leader.json -b 9000 -d < genesis.log
|
||||
$ ./multinode-demo/validator.sh ubuntu@10.0.1.51:~/solana > validator-txs.log #The leader machine
|
||||
$ ./multinode-demo/validator.sh ubuntu@10.0.1.51:~/solana #The leader machine
|
||||
```
|
||||
|
||||
As with the leader node, you can run a performance-enhanced validator fullnode by adding
|
||||
`--features=cuda` to the line that runs `solana-fullnode` in `validator.sh`.
|
||||
|
||||
```bash
|
||||
cargo run --release --features=cuda --bin solana-fullnode -- -l validator.json -v leader.json -b 9000 -d < genesis.log
|
||||
$ cargo run --release --features=cuda --bin solana-fullnode -- -l validator.json -v leader.json < genesis.log
|
||||
```
|
||||
|
||||
|
||||
@@ -139,13 +120,7 @@ Now that your singlenode or multinode testnet is up and running, in a separate s
|
||||
the JSON configuration file here, not the genesis ledger.
|
||||
|
||||
```bash
|
||||
$ cat ./multinode-demo/client.sh
|
||||
#!/bin/bash
|
||||
export RUST_LOG=solana=info
|
||||
rsync -v -e ssh $1/leader.json .
|
||||
rsync -v -e ssh $1/mint-demo.json .
|
||||
cat mint-demo.json | cargo run --release --bin solana-client-demo -- -l leader.json
|
||||
$ ./multinode-demo/client.sh ubuntu@10.0.1.51:~/solana #The leader machine
|
||||
$ ./multinode-demo/client.sh ubuntu@10.0.1.51:~/solana 2 #The leader machine and the total number of nodes in the network
|
||||
```
|
||||
|
||||
What just happened? The client demo spins up several threads to send 500,000 transactions
|
||||
@@ -157,6 +132,26 @@ demo completes after it has convinced itself the testnet won't process any addit
|
||||
transactions. You should see several TPS measurements printed to the screen. In the
|
||||
multinode variation, you'll see TPS measurements for each validator node as well.
|
||||
|
||||
Linux Snap
|
||||
---
|
||||
A Linux [Snap](https://snapcraft.io/) is available, which can be used to
|
||||
easily get Solana running on supported Linux systems without building anything
|
||||
from source. The `edge` Snap channel is updated daily with the latest
|
||||
development from the `master` branch. To install:
|
||||
```bash
|
||||
$ sudo snap install solana --edge --devmode
|
||||
```
|
||||
(`--devmode` flag is required only for `solana.fullnode-cuda`)
|
||||
|
||||
Once installed the usual Solana programs will be available as `solona.*` instead
|
||||
of `solana-*`. For example, `solana.fullnode` instead of `solana-fullnode`.
|
||||
|
||||
Update to the latest version at any time with
|
||||
```bash
|
||||
$ snap info solana
|
||||
$ sudo snap refresh solana --devmode
|
||||
```
|
||||
|
||||
Developing
|
||||
===
|
||||
|
||||
@@ -210,6 +205,17 @@ to see the debug and info sections for streamer and server respectively. General
|
||||
we are using debug for infrequent debug messages, trace for potentially frequent messages and
|
||||
info for performance-related logging.
|
||||
|
||||
Attaching to a running process with gdb
|
||||
|
||||
```
|
||||
$ sudo gdb
|
||||
attach <PID>
|
||||
set logging on
|
||||
thread apply all bt
|
||||
```
|
||||
|
||||
This will dump all the threads stack traces into gdb.txt
|
||||
|
||||
Benchmarking
|
||||
---
|
||||
|
||||
@@ -228,12 +234,21 @@ $ cargo +nightly bench --features="unstable"
|
||||
Code coverage
|
||||
---
|
||||
|
||||
To generate code coverage statistics, run kcov via Docker:
|
||||
To generate code coverage statistics, install cargo-cov. Note: the tool currently only works
|
||||
in Rust nightly.
|
||||
|
||||
```bash
|
||||
$ ./ci/coverage.sh
|
||||
$ cargo +nightly install cargo-cov
|
||||
```
|
||||
The coverage report will be written to `./target/cov/index.html`
|
||||
|
||||
Run cargo-cov and generate a report:
|
||||
|
||||
```bash
|
||||
$ cargo +nightly cov test
|
||||
$ cargo +nightly cov report --open
|
||||
```
|
||||
|
||||
The coverage report will be written to `./target/cov/report/index.html`
|
||||
|
||||
|
||||
Why coverage? While most see coverage as a code quality metric, we see it primarily as a developer
|
||||
|
1
build.rs
1
build.rs
@@ -11,5 +11,6 @@ fn main() {
|
||||
}
|
||||
if !env::var("CARGO_FEATURE_ERASURE").is_err() {
|
||||
println!("cargo:rustc-link-lib=dylib=Jerasure");
|
||||
println!("cargo:rustc-link-lib=dylib=gf_complete");
|
||||
}
|
||||
}
|
||||
|
1
ci/.gitignore
vendored
1
ci/.gitignore
vendored
@@ -1,2 +1,3 @@
|
||||
/node_modules/
|
||||
/package-lock.json
|
||||
/snapcraft.credentials
|
||||
|
88
ci/README.md
Normal file
88
ci/README.md
Normal file
@@ -0,0 +1,88 @@
|
||||
|
||||
Our CI infrastructure is built around [BuildKite](https://buildkite.com) with some
|
||||
additional GitHub integration provided by https://github.com/mvines/ci-gate
|
||||
|
||||
## Agent Queues
|
||||
|
||||
We define two [Agent Queues](https://buildkite.com/docs/agent/v3/queues):
|
||||
`queue=default` and `queue=cuda`. The `default` queue should be favored and
|
||||
runs on lower-cost CPU instances. The `cuda` queue is only necessary for
|
||||
running **tests** that depend on GPU (via CUDA) access -- CUDA builds may still
|
||||
be run on the `default` queue, and the [buildkite artifact
|
||||
system](https://buildkite.com/docs/builds/artifacts) used to transfer build
|
||||
products over to a GPU instance for testing.
|
||||
|
||||
## Buildkite Agent Management
|
||||
|
||||
### Buildkite GCP Setup
|
||||
|
||||
CI runs on Google Cloud Platform via two Compute Engine Instance groups:
|
||||
`ci-default` and `ci-cuda`. Autoscaling is currently disabled and the number of
|
||||
VM Instances in each group is manually adjusted.
|
||||
|
||||
#### Updating a CI Disk Image
|
||||
|
||||
Each Instance group has its own disk image, `ci-default-vX` and
|
||||
`ci-cuda-vY`, where *X* and *Y* are incremented each time the image is changed.
|
||||
|
||||
The process to update a disk image is as follows (TODO: make this less manual):
|
||||
|
||||
1. Create a new VM Instance using the disk image to modify.
|
||||
2. Once the VM boots, ssh to it and modify the disk as desired.
|
||||
3. Stop the VM Instance running the modified disk. Remember the name of the VM disk
|
||||
4. From another machine, `gcloud auth login`, then create a new Disk Image based
|
||||
off the modified VM Instance:
|
||||
```
|
||||
$ gcloud compute images create ci-default-v5 --source-disk xxx --source-disk-zone us-east1-b
|
||||
```
|
||||
or
|
||||
```
|
||||
$ gcloud compute images create ci-cuda-v5 --source-disk xxx --source-disk-zone us-east1-b
|
||||
```
|
||||
5. Delete the new VM instance.
|
||||
6. Go to the Instance templates tab, find the existing template named
|
||||
`ci-default-vX` or `ci-cuda-vY` and select it. Use the "Copy" button to create
|
||||
a new Instance template called `ci-default-vX+1` or `ci-cuda-vY+1` with the
|
||||
newly created Disk image.
|
||||
7. Go to the Instance Groups tag and find the applicable group, `ci-default` or
|
||||
`ci-cuda`. Edit the Instance Group in two steps: (a) Set the number of
|
||||
instances to 0 and wait for them all to terminate, (b) Update the Instance
|
||||
template and restore the number of instances to the original value.
|
||||
8. Clean up the previous version by deleting it from Instance Templates and
|
||||
Images.
|
||||
|
||||
|
||||
## Reference
|
||||
|
||||
### Buildkite AWS CloudFormation Setup
|
||||
|
||||
**AWS CloudFormation is currently inactive, although it may be restored in the
|
||||
future**
|
||||
|
||||
AWS CloudFormation can be used to scale machines up and down based on the
|
||||
current CI load. If no machine is currently running it can take up to 60
|
||||
seconds to spin up a new instance, please remain calm during this time.
|
||||
|
||||
#### AMI
|
||||
We use a custom AWS AMI built via https://github.com/solana-labs/elastic-ci-stack-for-aws/tree/solana/cuda.
|
||||
|
||||
Use the following process to update this AMI as dependencies change:
|
||||
```bash
|
||||
$ export AWS_ACCESS_KEY_ID=my_access_key
|
||||
$ export AWS_SECRET_ACCESS_KEY=my_secret_access_key
|
||||
$ git clone https://github.com/solana-labs/elastic-ci-stack-for-aws.git -b solana/cuda
|
||||
$ cd elastic-ci-stack-for-aws/
|
||||
$ make build
|
||||
$ make build-ami
|
||||
```
|
||||
|
||||
Watch for the *"amazon-ebs: AMI:"* log message to extract the name of the new
|
||||
AMI. For example:
|
||||
```
|
||||
amazon-ebs: AMI: ami-07118545e8b4ce6dc
|
||||
```
|
||||
The new AMI should also now be visible in your EC2 Dashboard. Go to the desired
|
||||
AWS CloudFormation stack, update the **ImageId** field to the new AMI id, and
|
||||
*apply* the stack changes.
|
||||
|
||||
|
@@ -1,16 +1,31 @@
|
||||
steps:
|
||||
- command: "ci/coverage.sh"
|
||||
name: "coverage [public]"
|
||||
- command: "ci/docker-run.sh rust ci/test-stable.sh"
|
||||
name: "stable [public]"
|
||||
- command: "ci/docker-run.sh rustlang/rust:nightly ci/test-nightly.sh || true"
|
||||
name: "nightly - FAILURES IGNORED [public]"
|
||||
- command: "ci/docker-run.sh rust ci/test-ignored.sh"
|
||||
name: "ignored [public]"
|
||||
- command: "ci/test-cuda.sh"
|
||||
name: "cuda"
|
||||
timeout_in_minutes: 20
|
||||
- command: "ci/shellcheck.sh"
|
||||
name: "shellcheck [public]"
|
||||
timeout_in_minutes: 20
|
||||
- wait
|
||||
- command: "ci/publish.sh"
|
||||
name: "publish release artifacts"
|
||||
- command: "ci/docker-run.sh rustlang/rust:nightly ci/test-nightly.sh"
|
||||
name: "nightly [public]"
|
||||
timeout_in_minutes: 20
|
||||
- command: "ci/test-stable-perf.sh"
|
||||
name: "stable-perf [public]"
|
||||
timeout_in_minutes: 20
|
||||
retry:
|
||||
automatic:
|
||||
- exit_status: "*"
|
||||
limit: 2
|
||||
agents:
|
||||
- "queue=cuda"
|
||||
- command: "ci/snap.sh [public]"
|
||||
timeout_in_minutes: 20
|
||||
name: "snap [public]"
|
||||
- wait
|
||||
- command: "ci/publish-crate.sh [public]"
|
||||
timeout_in_minutes: 20
|
||||
name: "publish crate"
|
||||
- command: "ci/hoover.sh [public]"
|
||||
timeout_in_minutes: 20
|
||||
name: "clean agent"
|
||||
|
||||
|
@@ -1,21 +0,0 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
ci/docker-run.sh evilmachines/rust-cargo-kcov \
|
||||
bash -exc "\
|
||||
export RUST_BACKTRACE=1; \
|
||||
cargo build --verbose; \
|
||||
cargo kcov --lib --verbose; \
|
||||
"
|
||||
|
||||
echo Coverage report:
|
||||
ls -l target/cov/index.html
|
||||
|
||||
if [[ -z "$CODECOV_TOKEN" ]]; then
|
||||
echo CODECOV_TOKEN undefined
|
||||
else
|
||||
bash <(curl -s https://codecov.io/bash)
|
||||
fi
|
||||
|
||||
exit 0
|
@@ -19,7 +19,12 @@ fi
|
||||
docker pull "$IMAGE"
|
||||
shift
|
||||
|
||||
ARGS=(--workdir /solana --volume "$PWD:/solana" --rm)
|
||||
ARGS=(
|
||||
--workdir /solana
|
||||
--volume "$PWD:/solana"
|
||||
--env "HOME=/solana"
|
||||
--rm
|
||||
)
|
||||
|
||||
ARGS+=(--env "CARGO_HOME=/solana/.cargo")
|
||||
|
||||
@@ -28,14 +33,18 @@ ARGS+=(--env "CARGO_HOME=/solana/.cargo")
|
||||
ARGS+=(--security-opt "seccomp=unconfined")
|
||||
|
||||
# Ensure files are created with the current host uid/gid
|
||||
ARGS+=(--user "$(id -u):$(id -g)")
|
||||
if [[ -z "$SOLANA_DOCKER_RUN_NOSETUID" ]]; then
|
||||
ARGS+=(--user "$(id -u):$(id -g)")
|
||||
fi
|
||||
|
||||
# Environment variables to propagate into the container
|
||||
ARGS+=(
|
||||
--env BUILDKITE_BRANCH
|
||||
--env BUILDKITE_TAG
|
||||
--env CODECOV_TOKEN
|
||||
--env CRATES_IO_TOKEN
|
||||
--env SNAPCRAFT_CREDENTIALS_KEY
|
||||
)
|
||||
|
||||
set -x
|
||||
docker run "${ARGS[@]}" "$IMAGE" "$@"
|
||||
exec docker run "${ARGS[@]}" "$IMAGE" "$@"
|
||||
|
7
ci/docker-snapcraft/Dockerfile
Normal file
7
ci/docker-snapcraft/Dockerfile
Normal file
@@ -0,0 +1,7 @@
|
||||
FROM snapcraft/xenial-amd64
|
||||
|
||||
# Update snapcraft to latest version
|
||||
RUN apt-get update -qq \
|
||||
&& apt-get install -y snapcraft \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& snapcraft --version
|
6
ci/docker-snapcraft/build.sh
Executable file
6
ci/docker-snapcraft/build.sh
Executable file
@@ -0,0 +1,6 @@
|
||||
#!/bin/bash -ex
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
docker build -t solanalabs/snapcraft .
|
||||
docker push solanalabs/snapcraft
|
57
ci/hoover.sh
Executable file
57
ci/hoover.sh
Executable file
@@ -0,0 +1,57 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Regular maintenance performed on a buildkite agent to control disk usage
|
||||
#
|
||||
|
||||
echo --- Delete all exited containers first
|
||||
(
|
||||
set -x
|
||||
exited=$(docker ps -aq --no-trunc --filter "status=exited")
|
||||
if [[ -n "$exited" ]]; then
|
||||
# shellcheck disable=SC2086 # Don't want to double quote "$exited"
|
||||
docker rm $exited
|
||||
fi
|
||||
)
|
||||
|
||||
echo --- Delete untagged images
|
||||
(
|
||||
set -x
|
||||
untagged=$(docker images | grep '<none>'| awk '{ print $3 }')
|
||||
if [[ -n "$untagged" ]]; then
|
||||
# shellcheck disable=SC2086 # Don't want to double quote "$untagged"
|
||||
docker rmi $untagged
|
||||
fi
|
||||
)
|
||||
|
||||
echo --- Delete all dangling images
|
||||
(
|
||||
set -x
|
||||
dangling=$(docker images --filter 'dangling=true' -q --no-trunc | sort | uniq)
|
||||
if [[ -n "$dangling" ]]; then
|
||||
# shellcheck disable=SC2086 # Don't want to double quote "$dangling"
|
||||
docker rmi $dangling
|
||||
fi
|
||||
)
|
||||
|
||||
echo --- Remove unused docker networks
|
||||
(
|
||||
set -x
|
||||
docker network prune -f
|
||||
)
|
||||
|
||||
echo "--- Delete /tmp files older than 1 day owned by $(whoami)"
|
||||
(
|
||||
set -x
|
||||
find /tmp -maxdepth 1 -user "$(whoami)" -mtime +1 -print0 | xargs -0 rm -rf
|
||||
)
|
||||
|
||||
echo --- System Status
|
||||
(
|
||||
set -x
|
||||
docker images
|
||||
docker ps
|
||||
docker network ls
|
||||
df -h
|
||||
)
|
||||
|
||||
exit 0
|
@@ -16,4 +16,4 @@ if [[ ! -x $BKRUN ]]; then
|
||||
fi
|
||||
|
||||
set -x
|
||||
./ci/node_modules/.bin/bkrun ci/buildkite.yml
|
||||
exec ./ci/node_modules/.bin/bkrun ci/buildkite.yml
|
||||
|
40
ci/snap.sh
Executable file
40
ci/snap.sh
Executable file
@@ -0,0 +1,40 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
DRYRUN=
|
||||
if [[ -z $BUILDKITE_BRANCH || $BUILDKITE_BRANCH =~ pull/* ]]; then
|
||||
DRYRUN="echo"
|
||||
fi
|
||||
|
||||
if [[ -z "$BUILDKITE_TAG" ]]; then
|
||||
SNAP_CHANNEL=edge
|
||||
else
|
||||
SNAP_CHANNEL=beta
|
||||
fi
|
||||
|
||||
if [[ -z $DRYRUN ]]; then
|
||||
[[ -n $SNAPCRAFT_CREDENTIALS_KEY ]] || {
|
||||
echo SNAPCRAFT_CREDENTIALS_KEY not defined
|
||||
exit 1;
|
||||
}
|
||||
(
|
||||
openssl aes-256-cbc -d \
|
||||
-in ci/snapcraft.credentials.enc \
|
||||
-out ci/snapcraft.credentials \
|
||||
-k "$SNAPCRAFT_CREDENTIALS_KEY"
|
||||
|
||||
snapcraft login --with ci/snapcraft.credentials
|
||||
) || {
|
||||
rm -f ci/snapcraft.credentials;
|
||||
exit 1
|
||||
}
|
||||
fi
|
||||
|
||||
set -x
|
||||
|
||||
echo --- build
|
||||
snapcraft
|
||||
|
||||
echo --- publish
|
||||
$DRYRUN snapcraft push solana_*.snap --release $SNAP_CHANNEL
|
BIN
ci/snapcraft.credentials.enc
Normal file
BIN
ci/snapcraft.credentials.enc
Normal file
Binary file not shown.
@@ -1,22 +0,0 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
LIB=libcuda_verify_ed25519.a
|
||||
if [[ ! -r $LIB ]]; then
|
||||
if [[ -z "${libcuda_verify_ed25519_URL:-}" ]]; then
|
||||
echo "$0 skipped. Unable to locate $LIB"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
export LD_LIBRARY_PATH=/usr/local/cuda/lib64
|
||||
export PATH=$PATH:/usr/local/cuda/bin
|
||||
curl -X GET -o $LIB "$libcuda_verify_ed25519_URL"
|
||||
fi
|
||||
|
||||
# shellcheck disable=SC1090 # <-- shellcheck can't follow ~
|
||||
source ~/.cargo/env
|
||||
export RUST_BACKTRACE=1
|
||||
cargo test --features=cuda
|
||||
|
||||
exit 0
|
@@ -1,9 +0,0 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
rustc --version
|
||||
cargo --version
|
||||
|
||||
export RUST_BACKTRACE=1
|
||||
cargo test -- --ignored
|
@@ -2,13 +2,31 @@
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
export RUST_BACKTRACE=1
|
||||
rustc --version
|
||||
cargo --version
|
||||
|
||||
export RUST_BACKTRACE=1
|
||||
rustup component add rustfmt-preview
|
||||
cargo build --verbose --features unstable
|
||||
cargo test --verbose --features unstable
|
||||
cargo bench --verbose --features unstable
|
||||
_() {
|
||||
echo "--- $*"
|
||||
"$@"
|
||||
}
|
||||
|
||||
_ cargo build --verbose --features unstable
|
||||
_ cargo test --verbose --features unstable
|
||||
_ cargo bench --verbose --features unstable
|
||||
|
||||
|
||||
# Coverage ...
|
||||
_ cargo install --force cargo-cov
|
||||
_ cargo cov test
|
||||
_ cargo cov report
|
||||
|
||||
echo --- Coverage report:
|
||||
ls -l target/cov/report/index.html
|
||||
|
||||
if [[ -z "$CODECOV_TOKEN" ]]; then
|
||||
echo CODECOV_TOKEN undefined
|
||||
else
|
||||
bash <(curl -s https://codecov.io/bash) -x 'llvm-cov gcov'
|
||||
fi
|
||||
|
||||
exit 0
|
||||
|
12
ci/test-stable-perf.sh
Executable file
12
ci/test-stable-perf.sh
Executable file
@@ -0,0 +1,12 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
./fetch-perf-libs.sh
|
||||
|
||||
export LD_LIBRARY_PATH=$PWD:/usr/local/cuda/lib64
|
||||
export PATH=$PATH:/usr/local/cuda/bin
|
||||
export RUST_BACKTRACE=1
|
||||
|
||||
set -x
|
||||
exec cargo test --features=cuda,erasure
|
@@ -2,13 +2,17 @@
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
export RUST_BACKTRACE=1
|
||||
rustc --version
|
||||
cargo --version
|
||||
|
||||
export RUST_BACKTRACE=1
|
||||
rustup component add rustfmt-preview
|
||||
cargo fmt -- --write-mode=diff
|
||||
cargo build --verbose
|
||||
cargo test --verbose
|
||||
_() {
|
||||
echo "--- $*"
|
||||
"$@"
|
||||
}
|
||||
|
||||
exit 0
|
||||
_ rustup component add rustfmt-preview
|
||||
_ cargo fmt -- --write-mode=diff
|
||||
_ cargo build --verbose
|
||||
_ cargo test --verbose
|
||||
_ cargo test -- --ignored
|
||||
|
@@ -1,65 +0,0 @@
|
||||
The Historian
|
||||
===
|
||||
|
||||
Create a *Historian* and send it *events* to generate an *event log*, where each *entry*
|
||||
is tagged with the historian's latest *hash*. Then ensure the order of events was not tampered
|
||||
with by verifying each entry's hash can be generated from the hash in the previous entry:
|
||||
|
||||

|
||||
|
||||
```rust
|
||||
extern crate solana;
|
||||
|
||||
use solana::historian::Historian;
|
||||
use solana::ledger::{Block, Entry, Hash};
|
||||
use solana::event::{generate_keypair, get_pubkey, sign_claim_data, Event};
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
use std::sync::mpsc::SendError;
|
||||
|
||||
fn create_ledger(hist: &Historian<Hash>) -> Result<(), SendError<Event<Hash>>> {
|
||||
sleep(Duration::from_millis(15));
|
||||
let tokens = 42;
|
||||
let keypair = generate_keypair();
|
||||
let event0 = Event::new_claim(get_pubkey(&keypair), tokens, sign_claim_data(&tokens, &keypair));
|
||||
hist.sender.send(event0)?;
|
||||
sleep(Duration::from_millis(10));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let seed = Hash::default();
|
||||
let hist = Historian::new(&seed, Some(10));
|
||||
create_ledger(&hist).expect("send error");
|
||||
drop(hist.sender);
|
||||
let entries: Vec<Entry<Hash>> = hist.receiver.iter().collect();
|
||||
for entry in &entries {
|
||||
println!("{:?}", entry);
|
||||
}
|
||||
// Proof-of-History: Verify the historian learned about the events
|
||||
// in the same order they appear in the vector.
|
||||
assert!(entries[..].verify(&seed));
|
||||
}
|
||||
```
|
||||
|
||||
Running the program should produce a ledger similar to:
|
||||
|
||||
```rust
|
||||
Entry { num_hashes: 0, id: [0, ...], event: Tick }
|
||||
Entry { num_hashes: 3, id: [67, ...], event: Transaction { tokens: 42 } }
|
||||
Entry { num_hashes: 3, id: [123, ...], event: Tick }
|
||||
```
|
||||
|
||||
Proof-of-History
|
||||
---
|
||||
|
||||
Take note of the last line:
|
||||
|
||||
```rust
|
||||
assert!(entries[..].verify(&seed));
|
||||
```
|
||||
|
||||
[It's a proof!](https://en.wikipedia.org/wiki/Curry–Howard_correspondence) For each entry returned by the
|
||||
historian, we can verify that `id` is the result of applying a sha256 hash to the previous `id`
|
||||
exactly `num_hashes` times, and then hashing then event data on top of that. Because the event data is
|
||||
included in the hash, the events cannot be reordered without regenerating all the hashes.
|
@@ -1,18 +0,0 @@
|
||||
msc {
|
||||
client,historian,recorder;
|
||||
|
||||
recorder=>historian [ label = "e0 = Entry{id: h0, n: 0, event: Tick}" ] ;
|
||||
recorder=>recorder [ label = "h1 = hash(h0)" ] ;
|
||||
recorder=>recorder [ label = "h2 = hash(h1)" ] ;
|
||||
client=>historian [ label = "Transaction(d0)" ] ;
|
||||
historian=>recorder [ label = "Transaction(d0)" ] ;
|
||||
recorder=>recorder [ label = "h3 = hash(h2 + d0)" ] ;
|
||||
recorder=>historian [ label = "e1 = Entry{id: hash(h3), n: 3, event: Transaction(d0)}" ] ;
|
||||
recorder=>recorder [ label = "h4 = hash(h3)" ] ;
|
||||
recorder=>recorder [ label = "h5 = hash(h4)" ] ;
|
||||
recorder=>recorder [ label = "h6 = hash(h5)" ] ;
|
||||
recorder=>historian [ label = "e2 = Entry{id: h6, n: 3, event: Tick}" ] ;
|
||||
client=>historian [ label = "collect()" ] ;
|
||||
historian=>client [ label = "entries = [e0, e1, e2]" ] ;
|
||||
client=>client [ label = "entries.verify(h0)" ] ;
|
||||
}
|
37
fetch-perf-libs.sh
Executable file
37
fetch-perf-libs.sh
Executable file
@@ -0,0 +1,37 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
if [[ $(uname) != Linux ]]; then
|
||||
echo Performance libraries are only available for Linux
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ $(uname -m) != x86_64 ]]; then
|
||||
echo Performance libraries are only available for x86_64 architecture
|
||||
exit 1
|
||||
fi
|
||||
|
||||
(
|
||||
set -x
|
||||
curl -o solana-perf.tgz \
|
||||
https://solana-perf.s3.amazonaws.com/master/x86_64-unknown-linux-gnu/solana-perf.tgz
|
||||
tar zxvf solana-perf.tgz
|
||||
)
|
||||
|
||||
if [[ -r /usr/local/cuda/version.txt && -r cuda-version.txt ]]; then
|
||||
if ! diff /usr/local/cuda/version.txt cuda-version.txt > /dev/null; then
|
||||
echo ==============================================
|
||||
echo Warning: possible CUDA version mismatch
|
||||
echo
|
||||
echo "Expected version: $(cat cuda-version.txt)"
|
||||
echo "Detected version: $(cat /usr/local/cuda/version.txt)"
|
||||
echo ==============================================
|
||||
fi
|
||||
else
|
||||
echo ==============================================
|
||||
echo Warning: unable to validate CUDA version
|
||||
echo ==============================================
|
||||
fi
|
||||
|
||||
echo "Downloaded solana-perf version: $(cat solana-perf-HEAD.txt)"
|
||||
|
||||
exit 0
|
@@ -1,16 +1,17 @@
|
||||
#!/bin/bash -e
|
||||
#!/bin/bash
|
||||
|
||||
if [[ -z "$1" ]]; then
|
||||
echo "usage: $0 [network path to solana repo on leader machine]"
|
||||
exit 1
|
||||
if [[ -z $1 ]]; then
|
||||
echo "usage: $0 [network path to solana repo on leader machine] <number of nodes in the network>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
LEADER="$1"
|
||||
LEADER=$1
|
||||
COUNT=${2:-1}
|
||||
|
||||
set -x
|
||||
export RUST_LOG=solana=info
|
||||
rsync -v -e ssh "$LEADER/leader.json" .
|
||||
rsync -v -e ssh "$LEADER/mint-demo.json" .
|
||||
rsync -vz "$LEADER"/{leader.json,mint-demo.json} . || exit $?
|
||||
|
||||
# if RUST_LOG is unset, default to info
|
||||
export RUST_LOG=${RUST_LOG:-solana=info}
|
||||
|
||||
cargo run --release --bin solana-client-demo -- \
|
||||
-l leader.json < mint-demo.json 2>&1 | tee client.log
|
||||
-n "$COUNT" -l leader.json -d < mint-demo.json 2>&1 | tee client.log
|
||||
|
@@ -1,4 +1,28 @@
|
||||
#!/bin/bash
|
||||
export RUST_LOG=solana=info
|
||||
sudo sysctl -w net.core.rmem_max=26214400
|
||||
cargo run --release --bin solana-fullnode -- -l leader.json < genesis.log
|
||||
here=$(dirname "$0")
|
||||
|
||||
# shellcheck source=/dev/null
|
||||
. "${here}"/myip.sh
|
||||
|
||||
myip=$(myip) || exit $?
|
||||
|
||||
[[ -f leader-"${myip}".json ]] || {
|
||||
echo "I can't find a matching leader config file for \"${myip}\"...
|
||||
Please run ${here}/setup.sh first.
|
||||
"
|
||||
exit 1
|
||||
}
|
||||
|
||||
# if RUST_LOG is unset, default to info
|
||||
export RUST_LOG=${RUST_LOG:-solana=info}
|
||||
|
||||
[[ $(uname) = Linux ]] && sudo sysctl -w net.core.rmem_max=26214400 1>/dev/null 2>/dev/null
|
||||
|
||||
# this makes a leader.json file available alongside genesis, etc. for
|
||||
# validators and clients
|
||||
cp leader-"${myip}".json leader.json
|
||||
|
||||
cargo run --release --bin solana-fullnode -- \
|
||||
-l leader-"${myip}".json \
|
||||
< genesis.log tx-*.log \
|
||||
> tx-"$(date -u +%Y%m%d%H%M%S%N)".log
|
||||
|
58
multinode-demo/myip.sh
Executable file
58
multinode-demo/myip.sh
Executable file
@@ -0,0 +1,58 @@
|
||||
#!/bin/bash
|
||||
|
||||
function myip()
|
||||
{
|
||||
declare ipaddrs=( )
|
||||
|
||||
# query interwebs
|
||||
mapfile -t ipaddrs < <(curl -s ifconfig.co)
|
||||
|
||||
# machine's interfaces
|
||||
mapfile -t -O "${#ipaddrs[*]}" ipaddrs < \
|
||||
<(ifconfig | awk '/inet(6)? (addr:)?/ {print $2}')
|
||||
|
||||
ipaddrs=( "${extips[@]}" "${ipaddrs[@]}" )
|
||||
|
||||
if (( ! ${#ipaddrs[*]} ))
|
||||
then
|
||||
echo "
|
||||
myip: error: I'm having trouble determining what our IP address is...
|
||||
Are we connected to a network?
|
||||
|
||||
"
|
||||
return 1
|
||||
fi
|
||||
|
||||
|
||||
declare prompt="
|
||||
Please choose the IP address you want to advertise to the network:
|
||||
|
||||
0) ${ipaddrs[0]} <====== this one was returned by the interwebs...
|
||||
"
|
||||
|
||||
for ((i=1; i < ${#ipaddrs[*]}; i++))
|
||||
do
|
||||
prompt+=" $i) ${ipaddrs[i]}
|
||||
"
|
||||
done
|
||||
|
||||
while read -r -p "${prompt}
|
||||
please enter a number [0 for default]: " which
|
||||
do
|
||||
[[ -z ${which} ]] && break;
|
||||
[[ ${which} =~ [0-9]+ ]] && (( which < ${#ipaddrs[*]} )) && break;
|
||||
echo "Ug. invalid entry \"${which}\"...
|
||||
"
|
||||
sleep 1
|
||||
done
|
||||
|
||||
which=${which:-0}
|
||||
|
||||
echo "${ipaddrs[which]}"
|
||||
|
||||
}
|
||||
|
||||
if [[ ${0} == "${BASH_SOURCE[0]}" ]]
|
||||
then
|
||||
myip "$@"
|
||||
fi
|
15
multinode-demo/setup.sh
Executable file
15
multinode-demo/setup.sh
Executable file
@@ -0,0 +1,15 @@
|
||||
#!/bin/bash
|
||||
here=$(dirname "$0")
|
||||
|
||||
# shellcheck source=/dev/null
|
||||
. "${here}"/myip.sh
|
||||
|
||||
myip=$(myip) || exit $?
|
||||
|
||||
num_tokens=${1:-1000000000}
|
||||
|
||||
cargo run --release --bin solana-mint-demo <<<"${num_tokens}" > mint-demo.json
|
||||
cargo run --release --bin solana-genesis-demo < mint-demo.json > genesis.log
|
||||
|
||||
cargo run --release --bin solana-fullnode-config -- -d > leader-"${myip}".json
|
||||
cargo run --release --bin solana-fullnode-config -- -b 9000 -d > validator-"${myip}".json
|
@@ -1,21 +1,32 @@
|
||||
#!/bin/bash -e
|
||||
#!/bin/bash
|
||||
here=$(dirname "$0")
|
||||
|
||||
if [[ -z "$1" ]]; then
|
||||
# shellcheck source=/dev/null
|
||||
. "${here}"/myip.sh
|
||||
|
||||
leader=$1
|
||||
|
||||
[[ -z ${leader} ]] && {
|
||||
echo "usage: $0 [network path to solana repo on leader machine]"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
LEADER="$1"
|
||||
myip=$(myip) || exit $?
|
||||
|
||||
set -x
|
||||
[[ -f validator-"$myip".json ]] || {
|
||||
echo "I can't find a matching validator config file for \"${myip}\"...
|
||||
Please run ${here}/setup.sh first.
|
||||
"
|
||||
exit 1
|
||||
}
|
||||
|
||||
rsync -v -e ssh "$LEADER/mint-demo.json" .
|
||||
rsync -v -e ssh "$LEADER/leader.json" .
|
||||
rsync -v -e ssh "$LEADER/genesis.log" .
|
||||
rsync -vz "${leader}"/{mint-demo.json,leader.json,genesis.log,tx-*.log} . || exit $?
|
||||
|
||||
export RUST_LOG=solana=info
|
||||
[[ $(uname) = Linux ]] && sudo sysctl -w net.core.rmem_max=26214400 1>/dev/null 2>/dev/null
|
||||
|
||||
sudo sysctl -w net.core.rmem_max=26214400
|
||||
# if RUST_LOG is unset, default to info
|
||||
export RUST_LOG=${RUST_LOG:-solana=info}
|
||||
|
||||
cargo run --release --features=cuda --bin solana-fullnode -- \
|
||||
-l validator.json -v leader.json -b 9000 -d < genesis.log
|
||||
cargo run --release --bin solana-fullnode -- \
|
||||
-l validator-"${myip}".json -v leader.json \
|
||||
< genesis.log tx-*.log
|
||||
|
182
rfcs/rfc-001-smart-contracts-engine.md
Normal file
182
rfcs/rfc-001-smart-contracts-engine.md
Normal file
@@ -0,0 +1,182 @@
|
||||
# Smart Contracts Engine
|
||||
|
||||
The goal of this RFC is to define a set of constraints for APIs and runtime such that we can execute our smart contracts safely on massively parallel hardware such as a GPU. Our runtime is built around an OS *syscall* primitive. The difference in blockchain is that now the OS does a cryptographic check of memory region ownership before accessing the memory in the Solana kernel.
|
||||
|
||||
## Toolchain Stack
|
||||
|
||||
+---------------------+ +---------------------+
|
||||
| | | |
|
||||
| +------------+ | | +------------+ |
|
||||
| | | | | | | |
|
||||
| | frontend | | | | verifier | |
|
||||
| | | | | | | |
|
||||
| +-----+------+ | | +-----+------+ |
|
||||
| | | | | |
|
||||
| | | | | |
|
||||
| +-----+------+ | | +-----+------+ |
|
||||
| | | | | | | |
|
||||
| | llvm | | | | loader | |
|
||||
| | | +------>+ | | |
|
||||
| +-----+------+ | | +-----+------+ |
|
||||
| | | | | |
|
||||
| | | | | |
|
||||
| +-----+------+ | | +-----+------+ |
|
||||
| | | | | | | |
|
||||
| | ELF | | | | runtime | |
|
||||
| | | | | | | |
|
||||
| +------------+ | | +------------+ |
|
||||
| | | |
|
||||
| client | | solana |
|
||||
+---------------------+ +---------------------+
|
||||
|
||||
[Figure 1. Smart Contracts Stack]
|
||||
|
||||
In Figure 1 an untrusted client, creates a program in the front-end language of her choice, (like C/C++/Rust/Lua), and compiles it with LLVM to a position independent shared object ELF, targeting BPF bytecode. Solana will safely load and execute the ELF.
|
||||
|
||||
## Bytecode
|
||||
|
||||
Our bytecode is based on Berkley Packet Filter. The requirements for BPF overlap almost exactly with the requirements we have:
|
||||
|
||||
1. Deterministic amount of time to execute the code
|
||||
2. Bytecode that is portable between machine instruction sets
|
||||
3. Verified memory accesses
|
||||
4. Fast to load the object, verify the bytecode and JIT to local machine instruction set
|
||||
|
||||
For 1, that means that loops are unrolled, and for any jumps back we can guard them with a check against the number of instruction that have been executed at this point. If the limit is reached, the program yields its execution. This involves saving the stack and current instruction index.
|
||||
|
||||
For 2, the BPF bytecode already easily maps to x86–64, arm64 and other instruction sets.
|
||||
|
||||
For 3, every load and store that is relative can be checked to be within the expected memory that is passed into the ELF. Dynamic load and stores can do a runtime check against available memory, these will be slow and should be avoided.
|
||||
|
||||
For 4, Fully linked PIC ELF with just a single RX segment. Effectively we are linking a shared object with `-fpic -target bpf` and with a linker script to collect everything into a single RX segment. Writable globals are not supported.
|
||||
|
||||
## Loader
|
||||
The loader is our first smart contract. The job of this contract is to load the actual program with its own instance data. The loader will verify the bytecode and that the object implements the expected entry points.
|
||||
|
||||
Since there is only one RX segment, the context for the contract instance is passed into each entry point as well as the event data for that entry point.
|
||||
|
||||
A client will create a transaction to create a new loader instance:
|
||||
|
||||
`Solana_NewLoader(Loader Instance PubKey, proof of key ownership, space I need for my elf)`
|
||||
|
||||
A client will then do a bunch of transactions to load its elf into the loader instance they created:
|
||||
|
||||
`Loader_UploadElf(Loader Instance PubKey, proof of key ownership, pos start, pos end, data)`
|
||||
|
||||
At this point the client can create a new instance of the module with its own instance address:
|
||||
|
||||
`Loader_NewInstance(Loader Instance PubKey, proof of key ownership, Instance PubKey, proof of key ownership)`
|
||||
|
||||
Once the instance has been created, the client may need to upload more user data to solana to configure this instance:
|
||||
|
||||
`Instance_UploadModuleData(Instance PubKey, proof of key ownership, pos start, pos end, data)`
|
||||
|
||||
Now clients can `start` the instance:
|
||||
|
||||
`Instance_Start(Instance PubKey, proof of key ownership)`
|
||||
|
||||
## Runtime
|
||||
|
||||
Our goal with the runtime is to have a general purpose execution environment that is highly parallelizable and doesn't require dynamic resource management. We want to execute as many contracts as we can in parallel, and have them pass or fail without a destructive state change.
|
||||
|
||||
### State and Entry Point
|
||||
|
||||
State is addressed by an account which is at the moment simply the PubKey. Our goal is to eliminate dynamic memory allocation in the smart contract itself, so the contract is a function that takes a mapping of [(PubKey,State)] and returns [(PubKey, State')]. The output of keys is a subset of the input. Three basic kinds of state exist:
|
||||
|
||||
* Instance State
|
||||
* Participant State
|
||||
* Caller State
|
||||
|
||||
There isn't any difference in how each is implemented, but conceptually Participant State is memory that is allocated for each participant in the contract. Instance State is memory that is allocated for the contract itself, and Caller State is memory that the transactions caller has allocated.
|
||||
|
||||
|
||||
### Call
|
||||
|
||||
```
|
||||
void call(
|
||||
const struct instance_data *data,
|
||||
const uint8_t kind[], //instance|participant|caller|read|write
|
||||
const uint8_t *keys[],
|
||||
uint8_t *data[],
|
||||
int num,
|
||||
uint8_t dirty[], //dirty memory bits
|
||||
uint8_t *userdata, //current transaction data
|
||||
);
|
||||
```
|
||||
|
||||
To call this operation, the transaction that is destined to the contract instance specifies what keyed state it should present to the `call` function. To allocate the state memory or a call context, the client has to first call a function on the contract with the designed address that will own the state.
|
||||
|
||||
At its core, this is a system call that requires cryptographic proof of ownership of memory regions instead of an OS that checks page tables for access rights.
|
||||
|
||||
* `Instance_AllocateContext(Instance PubKey, My PubKey, Proof of key ownership)`
|
||||
|
||||
Any transaction can then call `call` on the contract with a set of keys. It's up to the contract itself to manage ownership:
|
||||
|
||||
* `Instance_Call(Instance PubKey, [Context PubKeys], proofs of ownership, userdata...)`
|
||||
|
||||
Contracts should be able to read any state that is part of solana, but only write to state that the contract allocated.
|
||||
|
||||
#### Caller State
|
||||
|
||||
Caller `state` is memory allocated for the `call` that belongs to the public key that is issuing the `call`. This is the caller's context.
|
||||
|
||||
#### Instance State
|
||||
|
||||
Instance `state` is memory that belongs to this contract instance. We may also need module-wide `state` as well.
|
||||
|
||||
#### Participant State
|
||||
|
||||
Participant `state` is any other memory. In some cases it may make sense to have these allocated as part of the call by the caller.
|
||||
|
||||
### Reduce
|
||||
|
||||
Some operations on the contract will require iteration over all the keys. To make this parallelizable the iteration is broken up into reduce calls which are combined.
|
||||
|
||||
```
|
||||
void reduce_m(
|
||||
const struct instance_data *data,
|
||||
const uint8_t *keys[],
|
||||
const uint8_t *data[],
|
||||
int num,
|
||||
uint8_t *reduce_data,
|
||||
);
|
||||
|
||||
void reduce_r(
|
||||
const struct instance_data *data,
|
||||
const uint8_t *reduce_data[],
|
||||
int num,
|
||||
uint8_t *reduce_data,
|
||||
);
|
||||
```
|
||||
|
||||
### Execution
|
||||
|
||||
Transactions are batched and processed in parallel at each stage.
|
||||
```
|
||||
+-----------+ +--------------+ +-----------+ +---------------+
|
||||
| sigverify |-+->| debit commit |---+->| execution |-+->| memory commit |
|
||||
+-----------+ | +--------------+ | +-----------+ | +---------------+
|
||||
| | |
|
||||
| +---------------+ | | +--------------+
|
||||
|->| memory verify |->+ +->| debit undo |
|
||||
+---------------+ | +--------------+
|
||||
|
|
||||
| +---------------+
|
||||
+->| credit commit |
|
||||
+---------------+
|
||||
|
||||
|
||||
```
|
||||
The `debit verify` stage is very similar to `memory verify`. Proof of key ownership is used to check if the callers key has some state allocated with the contract, then the memory is loaded and executed. After execution stage, the dirty pages are written back by the contract. Because know all the memory accesses during execution, we can batch transactions that do not interfere with each other. We can also apply the `debit undo` and `credit commit` stages of the transaction. `debit undo` is run in case of an exception during contract execution, only transfers may be reversed, fees are commited to solana.
|
||||
|
||||
### GPU execution
|
||||
|
||||
A single contract can read and write to separate key pairs without interference. These separate calls to the same contract can execute on the same GPU thread over different memory using different SIMD lanes.
|
||||
|
||||
## Notes
|
||||
|
||||
1. There is no dynamic memory allocation.
|
||||
2. Persistant Memory is allocated to a Key with ownership
|
||||
3. Contracts can `call` to update key owned state
|
||||
4. Contracts can `reduce` over the memory to aggregate state
|
||||
5. `call` is just a *syscall* that does a cryptographic check of memory owndershp
|
43
scripts/perf-plot.py
Executable file
43
scripts/perf-plot.py
Executable file
@@ -0,0 +1,43 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import matplotlib
|
||||
matplotlib.use('Agg')
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import json
|
||||
import sys
|
||||
|
||||
stages_to_counters = {}
|
||||
stages_to_time = {}
|
||||
|
||||
if len(sys.argv) != 2:
|
||||
print("USAGE: {} <input file>".format(sys.argv[0]))
|
||||
sys.exit(1)
|
||||
|
||||
with open(sys.argv[1]) as fh:
|
||||
for line in fh.readlines():
|
||||
if "COUNTER" in line:
|
||||
json_part = line[line.find("{"):]
|
||||
x = json.loads(json_part)
|
||||
counter = x['name']
|
||||
if not (counter in stages_to_counters):
|
||||
stages_to_counters[counter] = []
|
||||
stages_to_time[counter] = []
|
||||
stages_to_counters[counter].append(x['counts'])
|
||||
stages_to_time[counter].append(x['now'])
|
||||
|
||||
fig, ax = plt.subplots()
|
||||
|
||||
for stage in stages_to_counters.keys():
|
||||
plt.plot(stages_to_time[stage], stages_to_counters[stage], label=stage)
|
||||
|
||||
plt.xlabel('ms')
|
||||
plt.ylabel('count')
|
||||
|
||||
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
|
||||
ncol=2, mode="expand", borderaxespad=0.)
|
||||
|
||||
plt.locator_params(axis='x', nbins=10)
|
||||
plt.grid(True)
|
||||
|
||||
plt.savefig("perf.pdf")
|
69
snap/snapcraft.yaml
Normal file
69
snap/snapcraft.yaml
Normal file
@@ -0,0 +1,69 @@
|
||||
name: solana
|
||||
version: git
|
||||
summary: Blockchain, Rebuilt for Scale
|
||||
description: |
|
||||
710,000 tx/s with off-the-shelf hardware and no sharding.
|
||||
Scales with Moore's Law.
|
||||
grade: devel
|
||||
|
||||
# TODO: solana-perf-fullnode does not yet run with 'strict' confinement due to the
|
||||
# CUDA dependency, so use 'devmode' confinement for now
|
||||
confinement: devmode
|
||||
|
||||
apps:
|
||||
drone:
|
||||
command: solana-drone
|
||||
plugs:
|
||||
- network
|
||||
- network-bind
|
||||
fullnode:
|
||||
command: solana-fullnode
|
||||
plugs:
|
||||
- network
|
||||
- network-bind
|
||||
- home
|
||||
fullnode-cuda:
|
||||
command: solana-fullnode-cuda
|
||||
plugs:
|
||||
- network
|
||||
- network-bind
|
||||
- home
|
||||
fullnode-config:
|
||||
command: solana-fullnode-config
|
||||
plugs:
|
||||
- network
|
||||
- network-bind
|
||||
genesis:
|
||||
command: solana-genesis
|
||||
genesis-demo:
|
||||
command: solana-genesis-demo
|
||||
mint:
|
||||
command: solana-mint
|
||||
mint-demo:
|
||||
command: solana-mint-demo
|
||||
client-demo:
|
||||
command: solana-client-demo
|
||||
|
||||
parts:
|
||||
solana-cuda:
|
||||
plugin: rust
|
||||
rust-channel: stable
|
||||
rust-features:
|
||||
- erasure
|
||||
- cuda
|
||||
prime:
|
||||
- bin/solana-fullnode-cuda
|
||||
- usr/lib/libgf_complete.so.1
|
||||
- usr/lib/libJerasure.so.2
|
||||
override-build: |
|
||||
./fetch-perf-libs.sh
|
||||
snapcraftctl build
|
||||
mv $SNAPCRAFT_PART_INSTALL/bin/solana-fullnode $SNAPCRAFT_PART_INSTALL
|
||||
rm -rf $SNAPCRAFT_PART_INSTALL/bin/*
|
||||
mv $SNAPCRAFT_PART_INSTALL/solana-fullnode $SNAPCRAFT_PART_INSTALL/bin/solana-fullnode-cuda
|
||||
mkdir -p $SNAPCRAFT_PART_INSTALL/usr/lib/
|
||||
cp -f libJerasure.so $SNAPCRAFT_PART_INSTALL/usr/lib/libJerasure.so.2
|
||||
cp -f libgf_complete.so $SNAPCRAFT_PART_INSTALL/usr/lib/libgf_complete.so.1
|
||||
solana:
|
||||
plugin: rust
|
||||
rust-channel: stable
|
315
src/bank.rs
315
src/bank.rs
@@ -1,6 +1,6 @@
|
||||
//! The `bank` module tracks client balances, and the progress of pending
|
||||
//! transactions. It offers a high-level public API that signs transactions
|
||||
//! on behalf of the caller, and a private low-level API for when they have
|
||||
//! The `bank` module tracks client balances and the progress of smart
|
||||
//! contracts. It offers a high-level API that signs transactions
|
||||
//! on behalf of the caller, and a low-level API for when they have
|
||||
//! already been signed and verified.
|
||||
|
||||
extern crate libc;
|
||||
@@ -10,34 +10,82 @@ use entry::Entry;
|
||||
use hash::Hash;
|
||||
use mint::Mint;
|
||||
use payment_plan::{Payment, PaymentPlan, Witness};
|
||||
use rayon::prelude::*;
|
||||
use signature::{KeyPair, PublicKey, Signature};
|
||||
use std::collections::hash_map::Entry::Occupied;
|
||||
use std::collections::{HashMap, HashSet, VecDeque};
|
||||
use std::result;
|
||||
use std::sync::atomic::{AtomicIsize, AtomicUsize, Ordering};
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::RwLock;
|
||||
use std::time::Instant;
|
||||
use timing::duration_as_us;
|
||||
use transaction::{Instruction, Plan, Transaction};
|
||||
|
||||
pub const MAX_ENTRY_IDS: usize = 1024 * 4;
|
||||
/// The number of most recent `last_id` values that the bank will track the signatures
|
||||
/// of. Once the bank discards a `last_id`, it will reject any transactions that use
|
||||
/// that `last_id` in a transaction. Lowering this value reduces memory consumption,
|
||||
/// but requires clients to update its `last_id` more frequently. Raising the value
|
||||
/// lengthens the time a client must wait to be certain a missing transaction will
|
||||
/// not be processed by the network.
|
||||
pub const MAX_ENTRY_IDS: usize = 1024 * 16;
|
||||
|
||||
/// Reasons a transaction might be rejected.
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub enum BankError {
|
||||
/// Attempt to debit from `PublicKey`, but no found no record of a prior credit.
|
||||
AccountNotFound(PublicKey),
|
||||
|
||||
/// The requested debit from `PublicKey` has the potential to draw the balance
|
||||
/// below zero. This can occur when a debit and credit are processed in parallel.
|
||||
/// The bank may reject the debit or push it to a future entry.
|
||||
InsufficientFunds(PublicKey),
|
||||
|
||||
/// The bank has seen `Signature` before. This can occur under normal operation
|
||||
/// when a UDP packet is duplicated, as a user error from a client not updating
|
||||
/// its `last_id`, or as a double-spend attack.
|
||||
DuplicateSiganture(Signature),
|
||||
|
||||
/// The bank has not seen the given `last_id` or the transaction is too old and
|
||||
/// the `last_id` has been discarded.
|
||||
LastIdNotFound(Hash),
|
||||
|
||||
/// The transaction is invalid and has requested a debit or credit of negative
|
||||
/// tokens.
|
||||
NegativeTokens,
|
||||
}
|
||||
|
||||
pub type Result<T> = result::Result<T, BankError>;
|
||||
|
||||
/// The state of all accounts and contracts after processing its entries.
|
||||
pub struct Bank {
|
||||
balances: RwLock<HashMap<PublicKey, AtomicIsize>>,
|
||||
/// A map of account public keys to the balance in that account.
|
||||
balances: RwLock<HashMap<PublicKey, i64>>,
|
||||
|
||||
/// A map of smart contract transaction signatures to what remains of its payment
|
||||
/// plan. Each transaction that targets the plan should cause it to be reduced.
|
||||
/// Once it cannot be reduced, final payments are made and it is discarded.
|
||||
pending: RwLock<HashMap<Signature, Plan>>,
|
||||
last_ids: RwLock<VecDeque<(Hash, RwLock<HashSet<Signature>>)>>,
|
||||
|
||||
/// A FIFO queue of `last_id` items, where each item is a set of signatures
|
||||
/// that have been processed using that `last_id`. Rejected `last_id`
|
||||
/// values are so old that the `last_id` has been pulled out of the queue.
|
||||
last_ids: RwLock<VecDeque<Hash>>,
|
||||
|
||||
// Mapping of hashes to signature sets. The bank uses this data to
|
||||
/// reject transactions with signatures its seen before
|
||||
last_ids_sigs: RwLock<HashMap<Hash, HashSet<Signature>>>,
|
||||
|
||||
/// The set of trusted timekeepers. A Timestamp transaction from a `PublicKey`
|
||||
/// outside this set will be discarded. Note that if validators do not have the
|
||||
/// same set as leaders, they may interpret the ledger differently.
|
||||
time_sources: RwLock<HashSet<PublicKey>>,
|
||||
|
||||
/// The most recent timestamp from a trusted timekeeper. This timestamp is applied
|
||||
/// to every smart contract when it enters the system. If it is waiting on a
|
||||
/// timestamp witness before that timestamp, the bank will execute it immediately.
|
||||
last_time: RwLock<DateTime<Utc>>,
|
||||
|
||||
/// The number of transactions the bank has processed without error since the
|
||||
/// start of the ledger.
|
||||
transaction_count: AtomicUsize,
|
||||
}
|
||||
|
||||
@@ -48,11 +96,12 @@ impl Bank {
|
||||
balances: RwLock::new(HashMap::new()),
|
||||
pending: RwLock::new(HashMap::new()),
|
||||
last_ids: RwLock::new(VecDeque::new()),
|
||||
last_ids_sigs: RwLock::new(HashMap::new()),
|
||||
time_sources: RwLock::new(HashSet::new()),
|
||||
last_time: RwLock::new(Utc.timestamp(0, 0)),
|
||||
transaction_count: AtomicUsize::new(0),
|
||||
};
|
||||
bank.apply_payment(deposit);
|
||||
bank.apply_payment(deposit, &mut bank.balances.write().unwrap());
|
||||
bank
|
||||
}
|
||||
|
||||
@@ -67,78 +116,54 @@ impl Bank {
|
||||
bank
|
||||
}
|
||||
|
||||
/// Commit funds to the 'to' party.
|
||||
fn apply_payment(&self, payment: &Payment) {
|
||||
// First we check balances with a read lock to maximize potential parallelization.
|
||||
if self.balances
|
||||
.read()
|
||||
.expect("'balances' read lock in apply_payment")
|
||||
.contains_key(&payment.to)
|
||||
{
|
||||
let bals = self.balances.read().expect("'balances' read lock");
|
||||
bals[&payment.to].fetch_add(payment.tokens as isize, Ordering::Relaxed);
|
||||
/// Commit funds to the `payment.to` party.
|
||||
fn apply_payment(&self, payment: &Payment, balances: &mut HashMap<PublicKey, i64>) {
|
||||
if balances.contains_key(&payment.to) {
|
||||
*balances.get_mut(&payment.to).unwrap() += payment.tokens;
|
||||
} else {
|
||||
// Now we know the key wasn't present a nanosecond ago, but it might be there
|
||||
// by the time we aquire a write lock, so we'll have to check again.
|
||||
let mut bals = self.balances.write().expect("'balances' write lock");
|
||||
if bals.contains_key(&payment.to) {
|
||||
bals[&payment.to].fetch_add(payment.tokens as isize, Ordering::Relaxed);
|
||||
} else {
|
||||
bals.insert(payment.to, AtomicIsize::new(payment.tokens as isize));
|
||||
}
|
||||
balances.insert(payment.to, payment.tokens);
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the last entry ID registered
|
||||
/// Return the last entry ID registered.
|
||||
pub fn last_id(&self) -> Hash {
|
||||
let last_ids = self.last_ids.read().expect("'last_ids' read lock");
|
||||
let last_item = last_ids.iter().last().expect("empty 'last_ids' list");
|
||||
last_item.0
|
||||
*last_item
|
||||
}
|
||||
|
||||
fn reserve_signature(signatures: &RwLock<HashSet<Signature>>, sig: &Signature) -> Result<()> {
|
||||
if signatures
|
||||
.read()
|
||||
.expect("'signatures' read lock")
|
||||
.contains(sig)
|
||||
{
|
||||
/// Store the given signature. The bank will reject any transaction with the same signature.
|
||||
fn reserve_signature(signatures: &mut HashSet<Signature>, sig: &Signature) -> Result<()> {
|
||||
if let Some(sig) = signatures.get(sig) {
|
||||
return Err(BankError::DuplicateSiganture(*sig));
|
||||
}
|
||||
signatures
|
||||
.write()
|
||||
.expect("'signatures' write lock")
|
||||
.insert(*sig);
|
||||
signatures.insert(*sig);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn forget_signature(signatures: &RwLock<HashSet<Signature>>, sig: &Signature) {
|
||||
signatures
|
||||
.write()
|
||||
.expect("'signatures' write lock in forget_signature")
|
||||
.remove(sig);
|
||||
/// Forget the given `signature` because its transaction was rejected.
|
||||
fn forget_signature(signatures: &mut HashSet<Signature>, signature: &Signature) {
|
||||
signatures.remove(signature);
|
||||
}
|
||||
|
||||
fn forget_signature_with_last_id(&self, sig: &Signature, last_id: &Hash) {
|
||||
if let Some(entry) = self.last_ids
|
||||
.read()
|
||||
/// Forget the given `signature` with `last_id` because the transaction was rejected.
|
||||
fn forget_signature_with_last_id(&self, signature: &Signature, last_id: &Hash) {
|
||||
if let Some(entry) = self.last_ids_sigs
|
||||
.write()
|
||||
.expect("'last_ids' read lock in forget_signature_with_last_id")
|
||||
.iter()
|
||||
.rev()
|
||||
.find(|x| x.0 == *last_id)
|
||||
.get_mut(last_id)
|
||||
{
|
||||
Self::forget_signature(&entry.1, sig);
|
||||
Self::forget_signature(entry, signature);
|
||||
}
|
||||
}
|
||||
|
||||
fn reserve_signature_with_last_id(&self, sig: &Signature, last_id: &Hash) -> Result<()> {
|
||||
if let Some(entry) = self.last_ids
|
||||
.read()
|
||||
fn reserve_signature_with_last_id(&self, signature: &Signature, last_id: &Hash) -> Result<()> {
|
||||
if let Some(entry) = self.last_ids_sigs
|
||||
.write()
|
||||
.expect("'last_ids' read lock in reserve_signature_with_last_id")
|
||||
.iter()
|
||||
.rev()
|
||||
.find(|x| x.0 == *last_id)
|
||||
.get_mut(last_id)
|
||||
{
|
||||
return Self::reserve_signature(&entry.1, sig);
|
||||
return Self::reserve_signature(entry, signature);
|
||||
}
|
||||
Err(BankError::LastIdNotFound(*last_id))
|
||||
}
|
||||
@@ -151,63 +176,46 @@ impl Bank {
|
||||
let mut last_ids = self.last_ids
|
||||
.write()
|
||||
.expect("'last_ids' write lock in register_entry_id");
|
||||
let mut last_ids_sigs = self.last_ids_sigs
|
||||
.write()
|
||||
.expect("last_ids_sigs write lock");
|
||||
if last_ids.len() >= MAX_ENTRY_IDS {
|
||||
last_ids.pop_front();
|
||||
let id = last_ids.pop_front().unwrap();
|
||||
last_ids_sigs.remove(&id);
|
||||
}
|
||||
last_ids.push_back((*last_id, RwLock::new(HashSet::new())));
|
||||
last_ids_sigs.insert(*last_id, HashSet::new());
|
||||
last_ids.push_back(*last_id);
|
||||
}
|
||||
|
||||
/// Deduct tokens from the 'from' address the account has sufficient
|
||||
/// funds and isn't a duplicate.
|
||||
fn apply_debits(&self, tx: &Transaction) -> Result<()> {
|
||||
if let Instruction::NewContract(contract) = &tx.instruction {
|
||||
trace!("Transaction {}", contract.tokens);
|
||||
if contract.tokens < 0 {
|
||||
return Err(BankError::NegativeTokens);
|
||||
}
|
||||
}
|
||||
let bals = self.balances
|
||||
.read()
|
||||
.expect("'balances' read lock in apply_debits");
|
||||
let option = bals.get(&tx.from);
|
||||
|
||||
fn apply_debits(&self, tx: &Transaction, bals: &mut HashMap<PublicKey, i64>) -> Result<()> {
|
||||
let option = bals.get_mut(&tx.from);
|
||||
if option.is_none() {
|
||||
return Err(BankError::AccountNotFound(tx.from));
|
||||
}
|
||||
let bal = option.unwrap();
|
||||
|
||||
self.reserve_signature_with_last_id(&tx.sig, &tx.last_id)?;
|
||||
|
||||
loop {
|
||||
let result = if let Instruction::NewContract(contract) = &tx.instruction {
|
||||
let bal = option.expect("assignment of option to bal");
|
||||
let current = bal.load(Ordering::Relaxed) as i64;
|
||||
if let Instruction::NewContract(contract) = &tx.instruction {
|
||||
if contract.tokens < 0 {
|
||||
return Err(BankError::NegativeTokens);
|
||||
}
|
||||
|
||||
if current < contract.tokens {
|
||||
self.forget_signature_with_last_id(&tx.sig, &tx.last_id);
|
||||
return Err(BankError::InsufficientFunds(tx.from));
|
||||
}
|
||||
if *bal < contract.tokens {
|
||||
self.forget_signature_with_last_id(&tx.sig, &tx.last_id);
|
||||
return Err(BankError::InsufficientFunds(tx.from));
|
||||
}
|
||||
|
||||
bal.compare_exchange(
|
||||
current as isize,
|
||||
(current - contract.tokens) as isize,
|
||||
Ordering::Relaxed,
|
||||
Ordering::Relaxed,
|
||||
)
|
||||
} else {
|
||||
Ok(0)
|
||||
};
|
||||
|
||||
match result {
|
||||
Ok(_) => {
|
||||
self.transaction_count.fetch_add(1, Ordering::Relaxed);
|
||||
return Ok(());
|
||||
}
|
||||
Err(_) => continue,
|
||||
};
|
||||
}
|
||||
*bal -= contract.tokens;
|
||||
};
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn apply_credits(&self, tx: &Transaction) {
|
||||
/// Apply only a transaction's credits. Credits from multiple transactions
|
||||
/// may safely be applied in parallel.
|
||||
fn apply_credits(&self, tx: &Transaction, balances: &mut HashMap<PublicKey, i64>) {
|
||||
match &tx.instruction {
|
||||
Instruction::NewContract(contract) => {
|
||||
let mut plan = contract.plan.clone();
|
||||
@@ -215,8 +223,8 @@ impl Bank {
|
||||
.read()
|
||||
.expect("timestamp creation in apply_credits")));
|
||||
|
||||
if let Some(ref payment) = plan.final_payment() {
|
||||
self.apply_payment(payment);
|
||||
if let Some(payment) = plan.final_payment() {
|
||||
self.apply_payment(&payment, balances);
|
||||
} else {
|
||||
let mut pending = self.pending
|
||||
.write()
|
||||
@@ -233,47 +241,78 @@ impl Bank {
|
||||
}
|
||||
}
|
||||
|
||||
/// Process a Transaction.
|
||||
/// Process a Transaction. If it contains a payment plan that requires a witness
|
||||
/// to progress, the payment plan will be stored in the bank.
|
||||
fn process_transaction(&self, tx: &Transaction) -> Result<()> {
|
||||
self.apply_debits(tx)?;
|
||||
self.apply_credits(tx);
|
||||
let bals = &mut self.balances.write().unwrap();
|
||||
self.apply_debits(tx, bals)?;
|
||||
self.apply_credits(tx, bals);
|
||||
self.transaction_count.fetch_add(1, Ordering::Relaxed);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Process a batch of transactions.
|
||||
#[must_use]
|
||||
pub fn process_transactions(&self, txs: Vec<Transaction>) -> Vec<Result<Transaction>> {
|
||||
// Run all debits first to filter out any transactions that can't be processed
|
||||
// in parallel deterministically.
|
||||
info!("processing Transactions {}", txs.len());
|
||||
let results: Vec<_> = txs.into_par_iter()
|
||||
.map(|tx| self.apply_debits(&tx).map(|_| tx))
|
||||
let bals = &mut self.balances.write().unwrap();
|
||||
debug!("processing Transactions {}", txs.len());
|
||||
let txs_len = txs.len();
|
||||
let now = Instant::now();
|
||||
let results: Vec<_> = txs.into_iter()
|
||||
.map(|tx| self.apply_debits(&tx, bals).map(|_| tx))
|
||||
.collect(); // Calling collect() here forces all debits to complete before moving on.
|
||||
|
||||
results
|
||||
.into_par_iter()
|
||||
let debits = now.elapsed();
|
||||
let now = Instant::now();
|
||||
|
||||
let res: Vec<_> = results
|
||||
.into_iter()
|
||||
.map(|result| {
|
||||
result.map(|tx| {
|
||||
self.apply_credits(&tx);
|
||||
self.apply_credits(&tx, bals);
|
||||
tx
|
||||
})
|
||||
})
|
||||
.collect()
|
||||
.collect();
|
||||
|
||||
debug!(
|
||||
"debits: {} us credits: {:?} us tx: {}",
|
||||
duration_as_us(&debits),
|
||||
duration_as_us(&now.elapsed()),
|
||||
txs_len
|
||||
);
|
||||
|
||||
let mut tx_count = 0;
|
||||
for r in &res {
|
||||
if r.is_ok() {
|
||||
tx_count += 1;
|
||||
} else {
|
||||
info!("tx error: {:?}", r);
|
||||
}
|
||||
}
|
||||
self.transaction_count
|
||||
.fetch_add(tx_count, Ordering::Relaxed);
|
||||
res
|
||||
}
|
||||
|
||||
/// Process an ordered list of entries.
|
||||
pub fn process_entries<I>(&self, entries: I) -> Result<()>
|
||||
where
|
||||
I: IntoIterator<Item = Entry>,
|
||||
{
|
||||
for entry in entries {
|
||||
for result in self.process_transactions(entry.transactions) {
|
||||
result?;
|
||||
if !entry.transactions.is_empty() {
|
||||
for result in self.process_transactions(entry.transactions) {
|
||||
result?;
|
||||
}
|
||||
}
|
||||
self.register_entry_id(&entry.id);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Process a Witness Signature.
|
||||
/// Process a Witness Signature. Any payment plans waiting on this signature
|
||||
/// will progress one step.
|
||||
fn apply_signature(&self, from: PublicKey, tx_sig: Signature) -> Result<()> {
|
||||
if let Occupied(mut e) = self.pending
|
||||
.write()
|
||||
@@ -282,7 +321,7 @@ impl Bank {
|
||||
{
|
||||
e.get_mut().apply_witness(&Witness::Signature(from));
|
||||
if let Some(payment) = e.get().final_payment() {
|
||||
self.apply_payment(&payment);
|
||||
self.apply_payment(&payment, &mut self.balances.write().unwrap());
|
||||
e.remove_entry();
|
||||
}
|
||||
};
|
||||
@@ -290,7 +329,8 @@ impl Bank {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Process a Witness Timestamp.
|
||||
/// Process a Witness Timestamp. Any payment plans waiting on this timestamp
|
||||
/// will progress one step.
|
||||
fn apply_timestamp(&self, from: PublicKey, dt: DateTime<Utc>) -> Result<()> {
|
||||
// If this is the first timestamp we've seen, it probably came from the genesis block,
|
||||
// so we'll trust it.
|
||||
@@ -329,8 +369,8 @@ impl Bank {
|
||||
plan.apply_witness(&Witness::Timestamp(*self.last_time
|
||||
.read()
|
||||
.expect("'last_time' read lock when creating timestamp")));
|
||||
if let Some(ref payment) = plan.final_payment() {
|
||||
self.apply_payment(payment);
|
||||
if let Some(payment) = plan.final_payment() {
|
||||
self.apply_payment(&payment, &mut self.balances.write().unwrap());
|
||||
completed.push(key.clone());
|
||||
}
|
||||
}
|
||||
@@ -376,7 +416,7 @@ impl Bank {
|
||||
let bals = self.balances
|
||||
.read()
|
||||
.expect("'balances' read lock in get_balance");
|
||||
bals.get(pubkey).map(|x| x.load(Ordering::Relaxed) as i64)
|
||||
bals.get(pubkey).map(|x| *x)
|
||||
}
|
||||
|
||||
pub fn transaction_count(&self) -> usize {
|
||||
@@ -388,11 +428,12 @@ impl Bank {
|
||||
mod tests {
|
||||
use super::*;
|
||||
use bincode::serialize;
|
||||
use entry::next_entry;
|
||||
use hash::hash;
|
||||
use signature::KeyPairUtil;
|
||||
|
||||
#[test]
|
||||
fn test_bank() {
|
||||
fn test_two_payments_to_one_party() {
|
||||
let mint = Mint::new(10_000);
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
let bank = Bank::new(&mint);
|
||||
@@ -409,7 +450,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_tokens() {
|
||||
fn test_negative_tokens() {
|
||||
let mint = Mint::new(1);
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
let bank = Bank::new(&mint);
|
||||
@@ -433,7 +474,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_transfer() {
|
||||
fn test_insufficient_funds() {
|
||||
let mint = Mint::new(11_000);
|
||||
let bank = Bank::new(&mint);
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
@@ -570,7 +611,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_max_entry_ids() {
|
||||
fn test_reject_old_last_id() {
|
||||
let mint = Mint::new(1);
|
||||
let bank = Bank::new(&mint);
|
||||
let sig = Signature::default();
|
||||
@@ -599,6 +640,25 @@ mod tests {
|
||||
// Assert bad transactions aren't counted.
|
||||
assert_eq!(bank.transaction_count(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_process_empty_entry_is_registered() {
|
||||
let mint = Mint::new(1);
|
||||
let bank = Bank::new(&mint);
|
||||
let keypair = KeyPair::new();
|
||||
let entry = next_entry(&mint.last_id(), 1, vec![]);
|
||||
let tx = Transaction::new(&mint.keypair(), keypair.pubkey(), 1, entry.id);
|
||||
|
||||
// First, ensure the TX is rejected because of the unregistered last ID
|
||||
assert_eq!(
|
||||
bank.process_transaction(&tx),
|
||||
Err(BankError::LastIdNotFound(entry.id))
|
||||
);
|
||||
|
||||
// Now ensure the TX is accepted despite pointing to the ID of an empty entry.
|
||||
bank.process_entries(vec![entry]).unwrap();
|
||||
assert!(bank.process_transaction(&tx).is_ok());
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "unstable", test))]
|
||||
@@ -608,6 +668,7 @@ mod bench {
|
||||
use bank::*;
|
||||
use bincode::serialize;
|
||||
use hash::hash;
|
||||
use rayon::prelude::*;
|
||||
use signature::KeyPairUtil;
|
||||
|
||||
#[bench]
|
||||
@@ -637,8 +698,8 @@ mod bench {
|
||||
.collect();
|
||||
bencher.iter(|| {
|
||||
// Since benchmarker runs this multiple times, we need to clear the signatures.
|
||||
for sigs in bank.last_ids.read().unwrap().iter() {
|
||||
sigs.1.write().unwrap().clear();
|
||||
for (_, sigs) in bank.last_ids_sigs.write().unwrap().iter_mut() {
|
||||
sigs.clear();
|
||||
}
|
||||
|
||||
assert!(
|
||||
|
@@ -1,14 +1,17 @@
|
||||
//! The `banking_stage` processes Transaction messages.
|
||||
//! The `banking_stage` processes Transaction messages. It is intended to be used
|
||||
//! to contruct a software pipeline. The stage uses all available CPU cores and
|
||||
//! can do its processing in parallel with signature verification on the GPU.
|
||||
|
||||
use bank::Bank;
|
||||
use bincode::deserialize;
|
||||
use counter::Counter;
|
||||
use packet;
|
||||
use packet::SharedPackets;
|
||||
use rayon::prelude::*;
|
||||
use record_stage::Signal;
|
||||
use result::Result;
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
||||
use std::sync::mpsc::{channel, Receiver, Sender};
|
||||
use std::sync::Arc;
|
||||
use std::thread::{Builder, JoinHandle};
|
||||
@@ -17,12 +20,20 @@ use std::time::Instant;
|
||||
use timing;
|
||||
use transaction::Transaction;
|
||||
|
||||
/// Stores the stage's thread handle and output receiver.
|
||||
pub struct BankingStage {
|
||||
/// Handle to the stage's thread.
|
||||
pub thread_hdl: JoinHandle<()>,
|
||||
|
||||
/// Output receiver for the following stage.
|
||||
pub signal_receiver: Receiver<Signal>,
|
||||
}
|
||||
|
||||
impl BankingStage {
|
||||
/// Create the stage using `bank`. Exit when either `exit` is set or
|
||||
/// when `verified_receiver` or the stage's output receiver is dropped.
|
||||
/// Discard input packets using `packet_recycler` to minimize memory
|
||||
/// allocations in a previous stage such as the `fetch_stage`.
|
||||
pub fn new(
|
||||
bank: Arc<Bank>,
|
||||
exit: Arc<AtomicBool>,
|
||||
@@ -52,6 +63,8 @@ impl BankingStage {
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert the transactions from a blob of binary data to a vector of transactions and
|
||||
/// an unused `SocketAddr` that could be used to send a response.
|
||||
fn deserialize_transactions(p: &packet::Packets) -> Vec<Option<(Transaction, SocketAddr)>> {
|
||||
p.packets
|
||||
.par_iter()
|
||||
@@ -63,6 +76,8 @@ impl BankingStage {
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Process the incoming packets and send output `Signal` messages to `signal_sender`.
|
||||
/// Discard packets via `packet_recycler`.
|
||||
fn process_packets(
|
||||
bank: Arc<Bank>,
|
||||
verified_receiver: &Receiver<Vec<(SharedPackets, Vec<u8>)>>,
|
||||
@@ -80,6 +95,8 @@ impl BankingStage {
|
||||
timing::duration_as_ms(&recv_start.elapsed()),
|
||||
mms.len(),
|
||||
);
|
||||
let count = mms.iter().map(|x| x.1.len()).sum();
|
||||
static mut COUNTER: Counter = create_counter!("banking_stage_process_packets", 1);
|
||||
let proc_start = Instant::now();
|
||||
for (msgs, vers) in mms {
|
||||
let transactions = Self::deserialize_transactions(&msgs.read().unwrap());
|
||||
@@ -100,7 +117,7 @@ impl BankingStage {
|
||||
debug!("process_transactions");
|
||||
let results = bank.process_transactions(transactions);
|
||||
let transactions = results.into_iter().filter_map(|x| x.ok()).collect();
|
||||
signal_sender.send(Signal::Events(transactions))?;
|
||||
signal_sender.send(Signal::Transactions(transactions))?;
|
||||
debug!("done process_transactions");
|
||||
|
||||
packet_recycler.recycle(msgs);
|
||||
@@ -115,6 +132,7 @@ impl BankingStage {
|
||||
reqs_len,
|
||||
(reqs_len as f32) / (total_time_s)
|
||||
);
|
||||
inc_counter!(COUNTER, count, proc_start);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -253,29 +271,141 @@ mod bench {
|
||||
use self::test::Bencher;
|
||||
use bank::*;
|
||||
use banking_stage::BankingStage;
|
||||
use logger;
|
||||
use mint::Mint;
|
||||
use packet::{to_packets, PacketRecycler};
|
||||
use packet::{to_packets_chunked, PacketRecycler};
|
||||
use rayon::prelude::*;
|
||||
use record_stage::Signal;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use std::iter;
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::mpsc::{channel, Receiver};
|
||||
use std::sync::Arc;
|
||||
use transaction::Transaction;
|
||||
|
||||
fn check_txs(batches: usize, receiver: &Receiver<Signal>, ref_tx_count: usize) {
|
||||
let mut total = 0;
|
||||
for _ in 0..batches {
|
||||
let signal = receiver.recv().unwrap();
|
||||
if let Signal::Transactions(transactions) = signal {
|
||||
total += transactions.len();
|
||||
} else {
|
||||
assert!(false);
|
||||
}
|
||||
}
|
||||
assert_eq!(total, ref_tx_count);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_stage(bencher: &mut Bencher) {
|
||||
let tx = 100_usize;
|
||||
let mint = Mint::new(1_000_000_000);
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
|
||||
logger::setup();
|
||||
let tx = 30_000_usize;
|
||||
let mint_total = 1_000_000_000_000;
|
||||
let mint = Mint::new(mint_total);
|
||||
let num_dst_accounts = 8 * 1024;
|
||||
let num_src_accounts = 8 * 1024;
|
||||
|
||||
let srckeys: Vec<_> = (0..num_src_accounts).map(|_| KeyPair::new()).collect();
|
||||
let dstkeys: Vec<_> = (0..num_dst_accounts)
|
||||
.map(|_| KeyPair::new().pubkey())
|
||||
.collect();
|
||||
|
||||
info!("created keys src: {} dst: {}", srckeys.len(), dstkeys.len());
|
||||
|
||||
let transactions: Vec<_> = (0..tx)
|
||||
.map(|i| Transaction::new(&mint.keypair(), pubkey, i as i64, mint.last_id()))
|
||||
.map(|i| {
|
||||
Transaction::new(
|
||||
&srckeys[i % num_src_accounts],
|
||||
dstkeys[i % num_dst_accounts],
|
||||
i as i64,
|
||||
mint.last_id(),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
info!("created transactions");
|
||||
|
||||
let (verified_sender, verified_receiver) = channel();
|
||||
let (signal_sender, signal_receiver) = channel();
|
||||
let packet_recycler = PacketRecycler::default();
|
||||
let verified: Vec<_> = to_packets_chunked(&packet_recycler, transactions, 192)
|
||||
.into_iter()
|
||||
.map(|x| {
|
||||
let len = (*x).read().unwrap().packets.len();
|
||||
(x, iter::repeat(1).take(len).collect())
|
||||
})
|
||||
.collect();
|
||||
|
||||
let setup_transactions: Vec<_> = (0..num_src_accounts)
|
||||
.map(|i| {
|
||||
Transaction::new(
|
||||
&mint.keypair(),
|
||||
srckeys[i].pubkey(),
|
||||
mint_total / num_src_accounts as i64,
|
||||
mint.last_id(),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
let verified_setup: Vec<_> = to_packets_chunked(&packet_recycler, setup_transactions, tx)
|
||||
.into_iter()
|
||||
.map(|x| {
|
||||
let len = (*x).read().unwrap().packets.len();
|
||||
(x, iter::repeat(1).take(len).collect())
|
||||
})
|
||||
.collect();
|
||||
|
||||
bencher.iter(move || {
|
||||
let bank = Arc::new(Bank::new(&mint));
|
||||
|
||||
verified_sender.send(verified_setup.clone()).unwrap();
|
||||
BankingStage::process_packets(
|
||||
bank.clone(),
|
||||
&verified_receiver,
|
||||
&signal_sender,
|
||||
&packet_recycler,
|
||||
).unwrap();
|
||||
|
||||
check_txs(verified_setup.len(), &signal_receiver, num_src_accounts);
|
||||
|
||||
verified_sender.send(verified.clone()).unwrap();
|
||||
BankingStage::process_packets(
|
||||
bank.clone(),
|
||||
&verified_receiver,
|
||||
&signal_sender,
|
||||
&packet_recycler,
|
||||
).unwrap();
|
||||
|
||||
check_txs(verified.len(), &signal_receiver, tx);
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_banking_stage_single_from(bencher: &mut Bencher) {
|
||||
logger::setup();
|
||||
let tx = 20_000_usize;
|
||||
let mint = Mint::new(1_000_000_000_000);
|
||||
let mut pubkeys = Vec::new();
|
||||
let num_keys = 8;
|
||||
for _ in 0..num_keys {
|
||||
pubkeys.push(KeyPair::new().pubkey());
|
||||
}
|
||||
|
||||
let transactions: Vec<_> = (0..tx)
|
||||
.into_par_iter()
|
||||
.map(|i| {
|
||||
Transaction::new(
|
||||
&mint.keypair(),
|
||||
pubkeys[i % num_keys],
|
||||
i as i64,
|
||||
mint.last_id(),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
let (verified_sender, verified_receiver) = channel();
|
||||
let (signal_sender, signal_receiver) = channel();
|
||||
let packet_recycler = PacketRecycler::default();
|
||||
let verified: Vec<_> = to_packets(&packet_recycler, transactions)
|
||||
let verified: Vec<_> = to_packets_chunked(&packet_recycler, transactions, tx)
|
||||
.into_iter()
|
||||
.map(|x| {
|
||||
let len = (*x).read().unwrap().packets.len();
|
||||
@@ -292,12 +422,9 @@ mod bench {
|
||||
&signal_sender,
|
||||
&packet_recycler,
|
||||
).unwrap();
|
||||
let signal = signal_receiver.recv().unwrap();
|
||||
if let Signal::Events(ref transactions) = signal {
|
||||
assert_eq!(transactions.len(), tx);
|
||||
} else {
|
||||
assert!(false);
|
||||
}
|
||||
|
||||
check_txs(verified.len(), &signal_receiver, tx);
|
||||
});
|
||||
}
|
||||
|
||||
}
|
||||
|
@@ -1,20 +1,21 @@
|
||||
extern crate atty;
|
||||
extern crate env_logger;
|
||||
extern crate getopts;
|
||||
extern crate isatty;
|
||||
extern crate pnet;
|
||||
extern crate rayon;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use atty::{is, Stream};
|
||||
use getopts::Options;
|
||||
use isatty::stdin_isatty;
|
||||
use pnet::datalink;
|
||||
use rayon::prelude::*;
|
||||
use solana::crdt::{Crdt, ReplicatedData};
|
||||
use solana::data_replicator::DataReplicator;
|
||||
use solana::crdt::{get_ip_addr, Crdt, ReplicatedData};
|
||||
use solana::hash::Hash;
|
||||
use solana::mint::MintDemo;
|
||||
use solana::ncp::Ncp;
|
||||
use solana::signature::{GenKeys, KeyPair, KeyPairUtil};
|
||||
use solana::streamer::default_window;
|
||||
use solana::thin_client::ThinClient;
|
||||
use solana::timing::{duration_as_ms, duration_as_s};
|
||||
use solana::transaction::Transaction;
|
||||
use std::env;
|
||||
use std::fs::File;
|
||||
@@ -24,6 +25,7 @@ use std::process::exit;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::sleep;
|
||||
use std::thread::Builder;
|
||||
use std::thread::JoinHandle;
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
@@ -37,26 +39,119 @@ fn print_usage(program: &str, opts: Options) {
|
||||
print!("{}", opts.usage(&brief));
|
||||
}
|
||||
|
||||
fn get_ip_addr() -> Option<IpAddr> {
|
||||
for iface in datalink::interfaces() {
|
||||
for p in iface.ips {
|
||||
if !p.ip().is_loopback() && !p.ip().is_multicast() {
|
||||
return Some(p.ip());
|
||||
}
|
||||
fn sample_tx_count(
|
||||
thread_addr: Arc<RwLock<SocketAddr>>,
|
||||
exit: Arc<AtomicBool>,
|
||||
maxes: Arc<RwLock<Vec<(f64, u64)>>>,
|
||||
first_count: u64,
|
||||
v: ReplicatedData,
|
||||
sample_period: u64,
|
||||
) {
|
||||
let mut client = mk_client(&thread_addr, &v);
|
||||
let mut now = Instant::now();
|
||||
let mut initial_tx_count = client.transaction_count();
|
||||
let mut max_tps = 0.0;
|
||||
let mut total;
|
||||
loop {
|
||||
let tx_count = client.transaction_count();
|
||||
let duration = now.elapsed();
|
||||
now = Instant::now();
|
||||
let sample = tx_count - initial_tx_count;
|
||||
initial_tx_count = tx_count;
|
||||
println!("{}: Transactions processed {}", v.transactions_addr, sample);
|
||||
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
|
||||
let tps = (sample * 1_000_000_000) as f64 / ns as f64;
|
||||
if tps > max_tps {
|
||||
max_tps = tps;
|
||||
}
|
||||
println!("{}: {:.2} tps", v.transactions_addr, tps);
|
||||
total = tx_count - first_count;
|
||||
println!(
|
||||
"{}: Total Transactions processed {}",
|
||||
v.transactions_addr, total
|
||||
);
|
||||
sleep(Duration::new(sample_period, 0));
|
||||
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
println!("exiting validator thread");
|
||||
maxes.write().unwrap().push((max_tps, total));
|
||||
break;
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn generate_and_send_txs(
|
||||
client: &mut ThinClient,
|
||||
keypair_pairs: &Vec<&[KeyPair]>,
|
||||
leader: &ReplicatedData,
|
||||
txs: i64,
|
||||
last_id: &mut Hash,
|
||||
threads: usize,
|
||||
client_addr: Arc<RwLock<SocketAddr>>,
|
||||
) {
|
||||
println!(
|
||||
"Signing transactions... {} {}",
|
||||
keypair_pairs.len(),
|
||||
keypair_pairs[0].len()
|
||||
);
|
||||
let signing_start = Instant::now();
|
||||
let transactions: Vec<_> = keypair_pairs
|
||||
.par_iter()
|
||||
.map(|chunk| Transaction::new(&chunk[0], chunk[1].pubkey(), 1, *last_id))
|
||||
.collect();
|
||||
|
||||
let duration = signing_start.elapsed();
|
||||
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
|
||||
let bsps = txs as f64 / ns as f64;
|
||||
let nsps = ns as f64 / txs as f64;
|
||||
println!(
|
||||
"Done. {:.2} thousand signatures per second, {:.2} us per signature, {} ms total time",
|
||||
bsps * 1_000_000_f64,
|
||||
nsps / 1_000_f64,
|
||||
duration_as_ms(&duration),
|
||||
);
|
||||
|
||||
println!("Transfering {} transactions in {} batches", txs, threads);
|
||||
let transfer_start = Instant::now();
|
||||
let sz = transactions.len() / threads;
|
||||
let chunks: Vec<_> = transactions.chunks(sz).collect();
|
||||
chunks.into_par_iter().for_each(|txs| {
|
||||
println!(
|
||||
"Transferring 1 unit {} times... to {:?}",
|
||||
txs.len(),
|
||||
leader.transactions_addr
|
||||
);
|
||||
let client = mk_client(&client_addr, &leader);
|
||||
for tx in txs {
|
||||
client.transfer_signed(tx.clone()).unwrap();
|
||||
}
|
||||
});
|
||||
println!(
|
||||
"Transfer done. {:?} ms {} tps",
|
||||
duration_as_ms(&transfer_start.elapsed()),
|
||||
txs as f32 / (duration_as_s(&transfer_start.elapsed()))
|
||||
);
|
||||
|
||||
*last_id = client.get_last_id();
|
||||
}
|
||||
|
||||
fn main() {
|
||||
env_logger::init();
|
||||
let mut threads = 4usize;
|
||||
let mut num_nodes = 1usize;
|
||||
let mut time_sec = 60;
|
||||
|
||||
let mut opts = Options::new();
|
||||
opts.optopt("l", "", "leader", "leader.json");
|
||||
opts.optopt("c", "", "client port", "port");
|
||||
opts.optopt("t", "", "number of threads", &format!("{}", threads));
|
||||
opts.optflag("d", "dyn", "detect network address dynamically");
|
||||
opts.optopt(
|
||||
"s",
|
||||
"",
|
||||
"send transactions for this many seconds",
|
||||
&format!("{}", time_sec),
|
||||
);
|
||||
opts.optopt(
|
||||
"n",
|
||||
"",
|
||||
@@ -93,6 +188,9 @@ fn main() {
|
||||
if matches.opt_present("n") {
|
||||
num_nodes = matches.opt_str("n").unwrap().parse().expect("integer");
|
||||
}
|
||||
if matches.opt_present("s") {
|
||||
time_sec = matches.opt_str("s").unwrap().parse().expect("integer");
|
||||
}
|
||||
|
||||
let leader = if matches.opt_present("l") {
|
||||
read_leader(matches.opt_str("l").unwrap())
|
||||
@@ -107,11 +205,12 @@ fn main() {
|
||||
&client_addr,
|
||||
&leader,
|
||||
signal.clone(),
|
||||
num_nodes + 2,
|
||||
num_nodes,
|
||||
&mut c_threads,
|
||||
);
|
||||
assert_eq!(validators.len(), num_nodes);
|
||||
|
||||
if stdin_isatty() {
|
||||
if is(Stream::Stdin) {
|
||||
eprintln!("nothing found on stdin, expected a json file");
|
||||
exit(1);
|
||||
}
|
||||
@@ -131,83 +230,81 @@ fn main() {
|
||||
let mut client = mk_client(&client_addr, &leader);
|
||||
|
||||
println!("Get last ID...");
|
||||
let last_id = client.get_last_id();
|
||||
let mut last_id = client.get_last_id();
|
||||
println!("Got last ID {:?}", last_id);
|
||||
|
||||
let rnd = GenKeys::new(demo.mint.keypair().public_key_bytes());
|
||||
let mut seed = [0u8; 32];
|
||||
seed.copy_from_slice(&demo.mint.keypair().public_key_bytes()[..32]);
|
||||
let rnd = GenKeys::new(seed);
|
||||
|
||||
println!("Creating keypairs...");
|
||||
let txs = demo.num_accounts / 2;
|
||||
let keypairs = rnd.gen_n_keypairs(demo.num_accounts);
|
||||
let keypair_pairs: Vec<_> = keypairs.chunks(2).collect();
|
||||
|
||||
println!("Signing transactions...");
|
||||
let now = Instant::now();
|
||||
let transactions: Vec<_> = keypair_pairs
|
||||
.into_par_iter()
|
||||
.map(|chunk| Transaction::new(&chunk[0], chunk[1].pubkey(), 1, last_id))
|
||||
.collect();
|
||||
let duration = now.elapsed();
|
||||
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
|
||||
let bsps = txs as f64 / ns as f64;
|
||||
let nsps = ns as f64 / txs as f64;
|
||||
println!(
|
||||
"Done. {} thousand signatures per second, {}us per signature",
|
||||
bsps * 1_000_000_f64,
|
||||
nsps / 1_000_f64
|
||||
);
|
||||
|
||||
let first_count = client.transaction_count();
|
||||
println!("initial count {}", first_count);
|
||||
|
||||
println!("Transfering {} transactions in {} batches", txs, threads);
|
||||
let sz = transactions.len() / threads;
|
||||
let chunks: Vec<_> = transactions.chunks(sz).collect();
|
||||
chunks.into_par_iter().for_each(|txs| {
|
||||
println!(
|
||||
"Transferring 1 unit {} times... to {:?}",
|
||||
txs.len(),
|
||||
leader.transactions_addr
|
||||
);
|
||||
let client = mk_client(&client_addr, &leader);
|
||||
for tx in txs {
|
||||
client.transfer_signed(tx.clone()).unwrap();
|
||||
}
|
||||
});
|
||||
|
||||
println!("Sampling tps every second...",);
|
||||
validators.into_par_iter().for_each(|val| {
|
||||
let mut client = mk_client(&client_addr, &val);
|
||||
let mut now = Instant::now();
|
||||
let mut initial_tx_count = client.transaction_count();
|
||||
for i in 0..100 {
|
||||
let tx_count = client.transaction_count();
|
||||
let duration = now.elapsed();
|
||||
now = Instant::now();
|
||||
let sample = tx_count - initial_tx_count;
|
||||
initial_tx_count = tx_count;
|
||||
println!(
|
||||
"{}: Transactions processed {}",
|
||||
val.transactions_addr, sample
|
||||
);
|
||||
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
|
||||
let tps = (sample * 1_000_000_000) as f64 / ns as f64;
|
||||
println!("{}: {} tps", val.transactions_addr, tps);
|
||||
let total = tx_count - first_count;
|
||||
println!(
|
||||
"{}: Total Transactions processed {}",
|
||||
val.transactions_addr, total
|
||||
);
|
||||
if total == transactions.len() as u64 {
|
||||
break;
|
||||
}
|
||||
if i > 20 && sample == 0 {
|
||||
break;
|
||||
}
|
||||
sleep(Duration::new(1, 0));
|
||||
}
|
||||
});
|
||||
|
||||
// Setup a thread per validator to sample every period
|
||||
// collect the max transaction rate and total tx count seen
|
||||
let maxes = Arc::new(RwLock::new(Vec::new()));
|
||||
let sample_period = 1; // in seconds
|
||||
let v_threads: Vec<_> = validators
|
||||
.into_iter()
|
||||
.map(|v| {
|
||||
let exit = signal.clone();
|
||||
let thread_addr = client_addr.clone();
|
||||
let maxes = maxes.clone();
|
||||
Builder::new()
|
||||
.name("solana-client-sample".to_string())
|
||||
.spawn(move || {
|
||||
sample_tx_count(thread_addr, exit, maxes, first_count, v, sample_period);
|
||||
})
|
||||
.unwrap()
|
||||
})
|
||||
.collect();
|
||||
|
||||
// generate and send transactions for the specified duration
|
||||
let time = Duration::new(time_sec, 0);
|
||||
let now = Instant::now();
|
||||
while now.elapsed() < time {
|
||||
generate_and_send_txs(
|
||||
&mut client,
|
||||
&keypair_pairs,
|
||||
&leader,
|
||||
txs,
|
||||
&mut last_id,
|
||||
threads,
|
||||
client_addr.clone(),
|
||||
);
|
||||
}
|
||||
|
||||
// Stop the sampling threads so it will collect the stats
|
||||
signal.store(true, Ordering::Relaxed);
|
||||
for t in v_threads {
|
||||
t.join().unwrap();
|
||||
}
|
||||
|
||||
// Compute/report stats
|
||||
let mut max_of_maxes = 0.0;
|
||||
let mut total_txs = 0;
|
||||
for (max, txs) in maxes.read().unwrap().iter() {
|
||||
if *max > max_of_maxes {
|
||||
max_of_maxes = *max;
|
||||
}
|
||||
total_txs += *txs;
|
||||
}
|
||||
println!(
|
||||
"\nHighest TPS: {:.2} sampling period {}s total transactions: {} clients: {}",
|
||||
max_of_maxes,
|
||||
sample_period,
|
||||
total_txs,
|
||||
maxes.read().unwrap().len()
|
||||
);
|
||||
|
||||
// join the crdt client threads
|
||||
for t in c_threads {
|
||||
t.join().unwrap();
|
||||
}
|
||||
@@ -219,6 +316,10 @@ fn mk_client(locked_addr: &Arc<RwLock<SocketAddr>>, r: &ReplicatedData) -> ThinC
|
||||
let transactions_socket = UdpSocket::bind(addr.clone()).unwrap();
|
||||
addr.set_port(port + 1);
|
||||
let requests_socket = UdpSocket::bind(addr.clone()).unwrap();
|
||||
requests_socket
|
||||
.set_read_timeout(Some(Duration::new(1, 0)))
|
||||
.unwrap();
|
||||
|
||||
addr.set_port(port + 2);
|
||||
ThinClient::new(
|
||||
r.requests_addr,
|
||||
@@ -235,7 +336,14 @@ fn spy_node(client_addr: &Arc<RwLock<SocketAddr>>) -> (ReplicatedData, UdpSocket
|
||||
addr.set_port(port + 1);
|
||||
let daddr = "0.0.0.0:0".parse().unwrap();
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
let node = ReplicatedData::new(pubkey, gossip.local_addr().unwrap(), daddr, daddr, daddr);
|
||||
let node = ReplicatedData::new(
|
||||
pubkey,
|
||||
gossip.local_addr().unwrap(),
|
||||
daddr,
|
||||
daddr,
|
||||
daddr,
|
||||
daddr,
|
||||
);
|
||||
(node, gossip)
|
||||
}
|
||||
|
||||
@@ -255,36 +363,37 @@ fn converge(
|
||||
let spy_ref = Arc::new(RwLock::new(spy_crdt));
|
||||
let window = default_window();
|
||||
let gossip_send_socket = UdpSocket::bind("0.0.0.0:0").expect("bind 0");
|
||||
let data_replicator = DataReplicator::new(
|
||||
let ncp = Ncp::new(
|
||||
spy_ref.clone(),
|
||||
window.clone(),
|
||||
spy_gossip,
|
||||
gossip_send_socket,
|
||||
exit.clone(),
|
||||
).expect("DataReplicator::new");
|
||||
//wait for the network to converge
|
||||
let mut rv = vec![];
|
||||
//wait for the network to converge, 30 seconds should be plenty
|
||||
for _ in 0..30 {
|
||||
let min = spy_ref.read().unwrap().convergence();
|
||||
if num_nodes as u64 == min {
|
||||
println!("converged!");
|
||||
let v: Vec<ReplicatedData> = spy_ref
|
||||
.read()
|
||||
.unwrap()
|
||||
.table
|
||||
.values()
|
||||
.into_iter()
|
||||
.filter(|x| x.requests_addr != daddr)
|
||||
.cloned()
|
||||
.collect();
|
||||
if v.len() >= num_nodes {
|
||||
println!("CONVERGED!");
|
||||
rv.extend(v.into_iter());
|
||||
break;
|
||||
}
|
||||
sleep(Duration::new(1, 0));
|
||||
}
|
||||
threads.extend(data_replicator.thread_hdls.into_iter());
|
||||
let v: Vec<ReplicatedData> = spy_ref
|
||||
.read()
|
||||
.unwrap()
|
||||
.table
|
||||
.values()
|
||||
.into_iter()
|
||||
.filter(|x| x.requests_addr != daddr)
|
||||
.map(|x| x.clone())
|
||||
.collect();
|
||||
v.clone()
|
||||
threads.extend(ncp.thread_hdls.into_iter());
|
||||
rv
|
||||
}
|
||||
|
||||
fn read_leader(path: String) -> ReplicatedData {
|
||||
let file = File::open(path).expect("file");
|
||||
serde_json::from_reader(file).expect("parse")
|
||||
let file = File::open(path.clone()).expect(&format!("file not found: {}", path));
|
||||
serde_json::from_reader(file).expect(&format!("failed to parse {}", path))
|
||||
}
|
||||
|
168
src/bin/drone.rs
Normal file
168
src/bin/drone.rs
Normal file
@@ -0,0 +1,168 @@
|
||||
extern crate atty;
|
||||
extern crate bincode;
|
||||
extern crate env_logger;
|
||||
extern crate getopts;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
extern crate tokio;
|
||||
extern crate tokio_codec;
|
||||
extern crate tokio_io;
|
||||
|
||||
use atty::{is, Stream as atty_stream};
|
||||
use bincode::deserialize;
|
||||
use getopts::Options;
|
||||
use solana::crdt::{get_ip_addr, ReplicatedData};
|
||||
use solana::drone::{Drone, DroneRequest};
|
||||
use solana::mint::MintDemo;
|
||||
use std::env;
|
||||
use std::fs::File;
|
||||
use std::io::{stdin, Read};
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
||||
use std::process::exit;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::thread;
|
||||
use tokio::net::TcpListener;
|
||||
use tokio::prelude::*;
|
||||
use tokio_codec::{BytesCodec, Decoder};
|
||||
|
||||
fn print_usage(program: &str, opts: Options) {
|
||||
let mut brief = format!("Usage: cat <mint-demo.json> | {} [options]\n\n", program);
|
||||
brief += " Run a Solana Drone to act as the custodian of the mint's remaining tokens\n";
|
||||
|
||||
print!("{}", opts.usage(&brief));
|
||||
}
|
||||
|
||||
fn main() {
|
||||
env_logger::init();
|
||||
let mut opts = Options::new();
|
||||
opts.optopt(
|
||||
"t",
|
||||
"",
|
||||
"time",
|
||||
"time slice over which to limit token requests to drone",
|
||||
);
|
||||
opts.optopt("c", "", "cap", "request limit for time slice");
|
||||
opts.optopt("l", "", "leader", "leader.json");
|
||||
opts.optflag("h", "help", "print help");
|
||||
let args: Vec<String> = env::args().collect();
|
||||
let matches = match opts.parse(&args[1..]) {
|
||||
Ok(m) => m,
|
||||
Err(e) => {
|
||||
eprintln!("{}", e);
|
||||
exit(1);
|
||||
}
|
||||
};
|
||||
if matches.opt_present("h") {
|
||||
let program = args[0].clone();
|
||||
print_usage(&program, opts);
|
||||
return;
|
||||
}
|
||||
let time_slice: Option<u64>;
|
||||
if matches.opt_present("t") {
|
||||
time_slice = matches
|
||||
.opt_str("t")
|
||||
.expect("unexpected string from input")
|
||||
.parse()
|
||||
.ok();
|
||||
} else {
|
||||
time_slice = None;
|
||||
}
|
||||
let request_cap: Option<u64>;
|
||||
if matches.opt_present("c") {
|
||||
request_cap = matches
|
||||
.opt_str("c")
|
||||
.expect("unexpected string from input")
|
||||
.parse()
|
||||
.ok();
|
||||
} else {
|
||||
request_cap = None;
|
||||
}
|
||||
let leader = if matches.opt_present("l") {
|
||||
read_leader(matches.opt_str("l").unwrap())
|
||||
} else {
|
||||
let server_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
|
||||
ReplicatedData::new_leader(&server_addr)
|
||||
};
|
||||
|
||||
if is(atty_stream::Stdin) {
|
||||
eprintln!("nothing found on stdin, expected a json file");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
let mut buffer = String::new();
|
||||
let num_bytes = stdin().read_to_string(&mut buffer).unwrap();
|
||||
if num_bytes == 0 {
|
||||
eprintln!("empty file on stdin, expected a json file");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
let demo: MintDemo = serde_json::from_str(&buffer).unwrap_or_else(|e| {
|
||||
eprintln!("failed to parse json: {}", e);
|
||||
exit(1);
|
||||
});
|
||||
|
||||
let mint_keypair = demo.mint.keypair();
|
||||
|
||||
let mut drone_addr: SocketAddr = "0.0.0.0:9900".parse().unwrap();
|
||||
drone_addr.set_ip(get_ip_addr().unwrap());
|
||||
|
||||
let drone = Arc::new(Mutex::new(Drone::new(
|
||||
mint_keypair,
|
||||
drone_addr,
|
||||
leader.transactions_addr,
|
||||
leader.requests_addr,
|
||||
time_slice,
|
||||
request_cap,
|
||||
)));
|
||||
|
||||
let drone1 = drone.clone();
|
||||
thread::spawn(move || loop {
|
||||
let time = drone1.lock().unwrap().time_slice;
|
||||
thread::sleep(time);
|
||||
drone1.lock().unwrap().clear_request_count();
|
||||
});
|
||||
|
||||
let socket = TcpListener::bind(&drone_addr).unwrap();
|
||||
println!("Drone started. Listening on: {}", drone_addr);
|
||||
let done = socket
|
||||
.incoming()
|
||||
.map_err(|e| println!("failed to accept socket; error = {:?}", e))
|
||||
.for_each(move |socket| {
|
||||
let drone2 = drone.clone();
|
||||
// let client_ip = socket.peer_addr().expect("drone peer_addr").ip();
|
||||
let framed = BytesCodec::new().framed(socket);
|
||||
let (_writer, reader) = framed.split();
|
||||
|
||||
let processor = reader
|
||||
.for_each(move |bytes| {
|
||||
let req: DroneRequest =
|
||||
deserialize(&bytes).expect("deserialize packet in drone");
|
||||
println!("Airdrop requested...");
|
||||
// let res = drone2.lock().unwrap().check_rate_limit(client_ip);
|
||||
let res1 = drone2.lock().unwrap().send_airdrop(req);
|
||||
match res1 {
|
||||
Ok(_) => println!("Airdrop sent!"),
|
||||
Err(_) => println!("Request limit reached for this time slice"),
|
||||
}
|
||||
Ok(())
|
||||
})
|
||||
.and_then(|()| {
|
||||
println!("Socket received FIN packet and closed connection");
|
||||
Ok(())
|
||||
})
|
||||
.or_else(|err| {
|
||||
println!("Socket closed with error: {:?}", err);
|
||||
Err(err)
|
||||
})
|
||||
.then(|result| {
|
||||
println!("Socket closed with result: {:?}", result);
|
||||
Ok(())
|
||||
});
|
||||
tokio::spawn(processor)
|
||||
});
|
||||
tokio::run(done);
|
||||
}
|
||||
fn read_leader(path: String) -> ReplicatedData {
|
||||
let file = File::open(path.clone()).expect(&format!("file not found: {}", path));
|
||||
serde_json::from_reader(file).expect(&format!("failed to parse {}", path))
|
||||
}
|
@@ -1,13 +1,12 @@
|
||||
extern crate atty;
|
||||
extern crate env_logger;
|
||||
extern crate getopts;
|
||||
extern crate isatty;
|
||||
extern crate log;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
|
||||
use atty::{is, Stream};
|
||||
use getopts::Options;
|
||||
use isatty::stdin_isatty;
|
||||
use solana::bank::Bank;
|
||||
use solana::crdt::ReplicatedData;
|
||||
use solana::entry::Entry;
|
||||
@@ -16,7 +15,7 @@ use solana::server::Server;
|
||||
use solana::transaction::Instruction;
|
||||
use std::env;
|
||||
use std::fs::File;
|
||||
use std::io::{stdin, Read};
|
||||
use std::io::{stdin, stdout, BufRead, Write};
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
|
||||
use std::process::exit;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
@@ -33,16 +32,23 @@ fn print_usage(program: &str, opts: Options) {
|
||||
}
|
||||
|
||||
fn main() {
|
||||
env_logger::init().unwrap();
|
||||
env_logger::init();
|
||||
let mut opts = Options::new();
|
||||
opts.optopt("l", "", "load", "load my identity to path.json");
|
||||
opts.optflag("h", "help", "print help");
|
||||
opts.optopt("l", "", "run with the identity found in FILE", "FILE");
|
||||
opts.optopt(
|
||||
"v",
|
||||
"t",
|
||||
"",
|
||||
"validator",
|
||||
"run as replicate with path to leader.json",
|
||||
"testnet; connect to the network at this gossip entry point",
|
||||
"HOST:PORT",
|
||||
);
|
||||
opts.optopt(
|
||||
"o",
|
||||
"",
|
||||
"output log to FILE, defaults to stdout (ignored by validators)",
|
||||
"FILE",
|
||||
);
|
||||
|
||||
let args: Vec<String> = env::args().collect();
|
||||
let matches = match opts.parse(&args[1..]) {
|
||||
Ok(m) => m,
|
||||
@@ -56,46 +62,42 @@ fn main() {
|
||||
print_usage(&program, opts);
|
||||
return;
|
||||
}
|
||||
if stdin_isatty() {
|
||||
if is(Stream::Stdin) {
|
||||
eprintln!("nothing found on stdin, expected a log file");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
let mut buffer = String::new();
|
||||
let num_bytes = stdin().read_to_string(&mut buffer).unwrap();
|
||||
if num_bytes == 0 {
|
||||
eprintln!("empty file on stdin, expected a log file");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
eprintln!("Initializing...");
|
||||
let mut entries = buffer.lines().map(|line| {
|
||||
serde_json::from_str(&line).unwrap_or_else(|e| {
|
||||
let stdin = stdin();
|
||||
let mut entries = stdin.lock().lines().map(|line| {
|
||||
let entry: Entry = serde_json::from_str(&line.unwrap()).unwrap_or_else(|e| {
|
||||
eprintln!("failed to parse json: {}", e);
|
||||
exit(1);
|
||||
})
|
||||
});
|
||||
entry
|
||||
});
|
||||
|
||||
eprintln!("done parsing...");
|
||||
|
||||
// The first item in the ledger is required to be an entry with zero num_hashes,
|
||||
// which implies its id can be used as the ledger's seed.
|
||||
let entry0 = entries.next().unwrap();
|
||||
let entry0 = entries.next().expect("invalid ledger: empty");
|
||||
|
||||
// The second item in the ledger is a special transaction where the to and from
|
||||
// fields are the same. That entry should be treated as a deposit, not a
|
||||
// transfer to oneself.
|
||||
let entry1: Entry = entries.next().unwrap();
|
||||
let entry1 = entries
|
||||
.next()
|
||||
.expect("invalid ledger: need at least 2 entries");
|
||||
let tx = &entry1.transactions[0];
|
||||
let deposit = if let Instruction::NewContract(contract) = &tx.instruction {
|
||||
contract.plan.final_payment()
|
||||
} else {
|
||||
None
|
||||
};
|
||||
}.expect("invalid ledger, needs to start with a contract");
|
||||
|
||||
eprintln!("creating bank...");
|
||||
|
||||
let bank = Bank::new_from_deposit(&deposit.unwrap());
|
||||
let bank = Bank::new_from_deposit(&deposit);
|
||||
bank.register_entry_id(&entry0.id);
|
||||
bank.register_entry_id(&entry1.id);
|
||||
|
||||
@@ -104,7 +106,6 @@ fn main() {
|
||||
|
||||
eprintln!("creating networking stack...");
|
||||
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let bind_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
|
||||
let mut repl_data = ReplicatedData::new_leader(&bind_addr);
|
||||
if matches.opt_present("l") {
|
||||
@@ -113,15 +114,24 @@ fn main() {
|
||||
if let Ok(data) = serde_json::from_reader(file) {
|
||||
repl_data = data;
|
||||
} else {
|
||||
warn!("failed to parse leader {}, generating new identity", path);
|
||||
eprintln!("failed to parse {}", path);
|
||||
exit(1);
|
||||
}
|
||||
} else {
|
||||
eprintln!("failed to read {}", path);
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
let threads = if matches.opt_present("v") {
|
||||
eprintln!("starting validator... {}", repl_data.requests_addr);
|
||||
let path = matches.opt_str("v").unwrap();
|
||||
let file = File::open(path).expect("file");
|
||||
let leader = serde_json::from_reader(file).expect("parse");
|
||||
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let threads = if matches.opt_present("t") {
|
||||
let testnet_address_string = matches.opt_str("t").unwrap();
|
||||
eprintln!(
|
||||
"starting validator... {} connecting to {}",
|
||||
repl_data.requests_addr, testnet_address_string
|
||||
);
|
||||
let testnet_addr = testnet_address_string.parse().unwrap();
|
||||
let newtwork_entry_point = ReplicatedData::new_entry_point(testnet_addr);
|
||||
let s = Server::new_validator(
|
||||
bank,
|
||||
repl_data.clone(),
|
||||
@@ -129,14 +139,24 @@ fn main() {
|
||||
UdpSocket::bind("0.0.0.0:0").unwrap(),
|
||||
UdpSocket::bind(repl_data.replicate_addr).unwrap(),
|
||||
UdpSocket::bind(repl_data.gossip_addr).unwrap(),
|
||||
leader,
|
||||
UdpSocket::bind(repl_data.repair_addr).unwrap(),
|
||||
newtwork_entry_point,
|
||||
exit.clone(),
|
||||
);
|
||||
s.thread_hdls
|
||||
} else {
|
||||
eprintln!("starting leader... {}", repl_data.requests_addr);
|
||||
repl_data.current_leader_id = repl_data.id.clone();
|
||||
let file = File::create("leader.log").expect("leader.log create");
|
||||
|
||||
let outfile: Box<Write + Send + 'static> = if matches.opt_present("o") {
|
||||
let path = matches.opt_str("o").unwrap();
|
||||
Box::new(
|
||||
File::create(&path).expect(&format!("unable to open output file \"{}\"", path)),
|
||||
)
|
||||
} else {
|
||||
Box::new(stdout())
|
||||
};
|
||||
|
||||
let server = Server::new_leader(
|
||||
bank,
|
||||
//Some(Duration::from_millis(1000)),
|
||||
@@ -148,7 +168,7 @@ fn main() {
|
||||
UdpSocket::bind("0.0.0.0:0").unwrap(),
|
||||
UdpSocket::bind(repl_data.gossip_addr).unwrap(),
|
||||
exit.clone(),
|
||||
file,
|
||||
outfile,
|
||||
);
|
||||
server.thread_hdls
|
||||
};
|
||||
|
@@ -1,12 +1,13 @@
|
||||
extern crate isatty;
|
||||
extern crate atty;
|
||||
extern crate rayon;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use isatty::stdin_isatty;
|
||||
use atty::{is, Stream};
|
||||
use rayon::prelude::*;
|
||||
use solana::bank::MAX_ENTRY_IDS;
|
||||
use solana::entry::{next_entry, Entry};
|
||||
use solana::entry::next_entry;
|
||||
use solana::ledger::next_entries;
|
||||
use solana::mint::MintDemo;
|
||||
use solana::signature::{GenKeys, KeyPairUtil};
|
||||
use solana::transaction::Transaction;
|
||||
@@ -15,7 +16,7 @@ use std::process::exit;
|
||||
|
||||
// Generate a ledger with lots and lots of accounts.
|
||||
fn main() {
|
||||
if stdin_isatty() {
|
||||
if is(Stream::Stdin) {
|
||||
eprintln!("nothing found on stdin, expected a json file");
|
||||
exit(1);
|
||||
}
|
||||
@@ -32,9 +33,11 @@ fn main() {
|
||||
exit(1);
|
||||
});
|
||||
|
||||
let rnd = GenKeys::new(demo.mint.keypair().public_key_bytes());
|
||||
let mut seed = [0u8; 32];
|
||||
seed.copy_from_slice(&demo.mint.keypair().public_key_bytes()[..32]);
|
||||
let rnd = GenKeys::new(seed);
|
||||
let num_accounts = demo.num_accounts;
|
||||
let tokens_per_user = 1_000;
|
||||
let tokens_per_user = 500;
|
||||
|
||||
let keypairs = rnd.gen_n_keypairs(num_accounts);
|
||||
|
||||
@@ -72,6 +75,8 @@ fn main() {
|
||||
.collect();
|
||||
|
||||
eprintln!("Logging the creation of {} accounts...", num_accounts);
|
||||
let entry = Entry::new(&last_id, 0, transactions);
|
||||
println!("{}", serde_json::to_string(&entry).unwrap());
|
||||
let entries = next_entries(&last_id, 0, transactions);
|
||||
for entry in entries {
|
||||
println!("{}", serde_json::to_string(&entry).unwrap());
|
||||
}
|
||||
}
|
||||
|
@@ -1,16 +1,16 @@
|
||||
//! A command-line executable for generating the chain's genesis block.
|
||||
|
||||
extern crate isatty;
|
||||
extern crate atty;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use isatty::stdin_isatty;
|
||||
use atty::{is, Stream};
|
||||
use solana::mint::Mint;
|
||||
use std::io::{stdin, Read};
|
||||
use std::process::exit;
|
||||
|
||||
fn main() {
|
||||
if stdin_isatty() {
|
||||
if is(Stream::Stdin) {
|
||||
eprintln!("nothing found on stdin, expected a json file");
|
||||
exit(1);
|
||||
}
|
||||
|
@@ -1,13 +1,21 @@
|
||||
extern crate atty;
|
||||
extern crate rayon;
|
||||
extern crate ring;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use atty::{is, Stream};
|
||||
use solana::mint::{Mint, MintDemo};
|
||||
use std::io;
|
||||
use std::process::exit;
|
||||
|
||||
fn main() {
|
||||
let mut input_text = String::new();
|
||||
if is(Stream::Stdin) {
|
||||
eprintln!("nothing found on stdin, expected a token number");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
io::stdin().read_line(&mut input_text).unwrap();
|
||||
let trimmed = input_text.trim();
|
||||
let tokens = trimmed.parse::<i64>().unwrap();
|
||||
|
@@ -1,15 +1,15 @@
|
||||
extern crate isatty;
|
||||
extern crate atty;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use isatty::stdin_isatty;
|
||||
use atty::{is, Stream};
|
||||
use solana::mint::Mint;
|
||||
use std::io;
|
||||
use std::process::exit;
|
||||
|
||||
fn main() {
|
||||
let mut input_text = String::new();
|
||||
if stdin_isatty() {
|
||||
if is(Stream::Stdin) {
|
||||
eprintln!("nothing found on stdin, expected a token number");
|
||||
exit(1);
|
||||
}
|
||||
|
47
src/blob_fetch_stage.rs
Normal file
47
src/blob_fetch_stage.rs
Normal file
@@ -0,0 +1,47 @@
|
||||
//! The `blob_fetch_stage` pulls blobs from UDP sockets and sends it to a channel.
|
||||
|
||||
use packet;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::Arc;
|
||||
use std::thread::JoinHandle;
|
||||
use streamer;
|
||||
|
||||
pub struct BlobFetchStage {
|
||||
pub blob_receiver: streamer::BlobReceiver,
|
||||
pub thread_hdls: Vec<JoinHandle<()>>,
|
||||
}
|
||||
|
||||
impl BlobFetchStage {
|
||||
pub fn new(
|
||||
socket: UdpSocket,
|
||||
exit: Arc<AtomicBool>,
|
||||
blob_recycler: packet::BlobRecycler,
|
||||
) -> Self {
|
||||
Self::new_multi_socket(vec![socket], exit, blob_recycler)
|
||||
}
|
||||
pub fn new_multi_socket(
|
||||
sockets: Vec<UdpSocket>,
|
||||
exit: Arc<AtomicBool>,
|
||||
blob_recycler: packet::BlobRecycler,
|
||||
) -> Self {
|
||||
let (blob_sender, blob_receiver) = channel();
|
||||
let thread_hdls: Vec<_> = sockets
|
||||
.into_iter()
|
||||
.map(|socket| {
|
||||
streamer::blob_receiver(
|
||||
exit.clone(),
|
||||
blob_recycler.clone(),
|
||||
socket,
|
||||
blob_sender.clone(),
|
||||
).expect("blob receiver init")
|
||||
})
|
||||
.collect();
|
||||
|
||||
BlobFetchStage {
|
||||
blob_receiver,
|
||||
thread_hdls,
|
||||
}
|
||||
}
|
||||
}
|
@@ -8,9 +8,13 @@ use payment_plan::{Payment, PaymentPlan, Witness};
|
||||
use signature::PublicKey;
|
||||
use std::mem;
|
||||
|
||||
/// A data type representing a `Witness` that the payment plan is waiting on.
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub enum Condition {
|
||||
/// Wait for a `Timestamp` `Witness` at or after the given `DateTime`.
|
||||
Timestamp(DateTime<Utc>),
|
||||
|
||||
/// Wait for a `Signature` `Witness` from `PublicKey`.
|
||||
Signature(PublicKey),
|
||||
}
|
||||
|
||||
@@ -18,19 +22,26 @@ impl Condition {
|
||||
/// Return true if the given Witness satisfies this Condition.
|
||||
pub fn is_satisfied(&self, witness: &Witness) -> bool {
|
||||
match (self, witness) {
|
||||
(&Condition::Signature(ref pubkey), &Witness::Signature(ref from)) => pubkey == from,
|
||||
(&Condition::Timestamp(ref dt), &Witness::Timestamp(ref last_time)) => dt <= last_time,
|
||||
(Condition::Signature(pubkey), Witness::Signature(from)) => pubkey == from,
|
||||
(Condition::Timestamp(dt), Witness::Timestamp(last_time)) => dt <= last_time,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A data type reprsenting a payment plan.
|
||||
#[repr(C)]
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub enum Budget {
|
||||
/// Make a payment.
|
||||
Pay(Payment),
|
||||
|
||||
/// Make a payment after some condition.
|
||||
After(Condition, Payment),
|
||||
Race((Condition, Payment), (Condition, Payment)),
|
||||
|
||||
/// Either make a payment after one condition or a different payment after another
|
||||
/// condition, which ever condition is satisfied first.
|
||||
Or((Condition, Payment), (Condition, Payment)),
|
||||
}
|
||||
|
||||
impl Budget {
|
||||
@@ -57,7 +68,7 @@ impl Budget {
|
||||
tokens: i64,
|
||||
to: PublicKey,
|
||||
) -> Self {
|
||||
Budget::Race(
|
||||
Budget::Or(
|
||||
(Condition::Timestamp(dt), Payment { tokens, to }),
|
||||
(Condition::Signature(from), Payment { tokens, to: from }),
|
||||
)
|
||||
@@ -67,31 +78,27 @@ impl Budget {
|
||||
impl PaymentPlan for Budget {
|
||||
/// Return Payment if the budget requires no additional Witnesses.
|
||||
fn final_payment(&self) -> Option<Payment> {
|
||||
match *self {
|
||||
Budget::Pay(ref payment) => Some(payment.clone()),
|
||||
match self {
|
||||
Budget::Pay(payment) => Some(payment.clone()),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Return true if the budget spends exactly `spendable_tokens`.
|
||||
fn verify(&self, spendable_tokens: i64) -> bool {
|
||||
match *self {
|
||||
Budget::Pay(ref payment) | Budget::After(_, ref payment) => {
|
||||
payment.tokens == spendable_tokens
|
||||
}
|
||||
Budget::Race(ref a, ref b) => {
|
||||
a.1.tokens == spendable_tokens && b.1.tokens == spendable_tokens
|
||||
}
|
||||
match self {
|
||||
Budget::Pay(payment) | Budget::After(_, payment) => payment.tokens == spendable_tokens,
|
||||
Budget::Or(a, b) => a.1.tokens == spendable_tokens && b.1.tokens == spendable_tokens,
|
||||
}
|
||||
}
|
||||
|
||||
/// Apply a witness to the budget to see if the budget can be reduced.
|
||||
/// If so, modify the budget in-place.
|
||||
fn apply_witness(&mut self, witness: &Witness) {
|
||||
let new_payment = match *self {
|
||||
Budget::After(ref cond, ref payment) if cond.is_satisfied(witness) => Some(payment),
|
||||
Budget::Race((ref cond, ref payment), _) if cond.is_satisfied(witness) => Some(payment),
|
||||
Budget::Race(_, (ref cond, ref payment)) if cond.is_satisfied(witness) => Some(payment),
|
||||
let new_payment = match self {
|
||||
Budget::After(cond, payment) if cond.is_satisfied(witness) => Some(payment),
|
||||
Budget::Or((cond, payment), _) if cond.is_satisfied(witness) => Some(payment),
|
||||
Budget::Or(_, (cond, payment)) if cond.is_satisfied(witness) => Some(payment),
|
||||
_ => None,
|
||||
}.cloned();
|
||||
|
||||
|
69
src/counter.rs
Normal file
69
src/counter.rs
Normal file
@@ -0,0 +1,69 @@
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::time::Duration;
|
||||
use timing;
|
||||
|
||||
pub struct Counter {
|
||||
pub name: &'static str,
|
||||
pub counts: AtomicUsize,
|
||||
pub nanos: AtomicUsize,
|
||||
pub times: AtomicUsize,
|
||||
pub lograte: usize,
|
||||
}
|
||||
|
||||
macro_rules! create_counter {
|
||||
($name:expr, $lograte:expr) => {
|
||||
Counter {
|
||||
name: $name,
|
||||
counts: AtomicUsize::new(0),
|
||||
nanos: AtomicUsize::new(0),
|
||||
times: AtomicUsize::new(0),
|
||||
lograte: $lograte,
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! inc_counter {
|
||||
($name:expr, $count:expr, $start:expr) => {
|
||||
unsafe { $name.inc($count, $start.elapsed()) };
|
||||
};
|
||||
}
|
||||
|
||||
impl Counter {
|
||||
pub fn inc(&mut self, events: usize, dur: Duration) {
|
||||
let total = dur.as_secs() * 1_000_000_000 + dur.subsec_nanos() as u64;
|
||||
let counts = self.counts.fetch_add(events, Ordering::Relaxed);
|
||||
let nanos = self.nanos.fetch_add(total as usize, Ordering::Relaxed);
|
||||
let times = self.times.fetch_add(1, Ordering::Relaxed);
|
||||
if times % self.lograte == 0 && times > 0 {
|
||||
info!(
|
||||
"COUNTER:{{\"name\": \"{}\", \"counts\": {}, \"nanos\": {}, \"samples\": {}, \"rate\": {}, \"now\": {}}}",
|
||||
self.name,
|
||||
counts,
|
||||
nanos,
|
||||
times,
|
||||
counts as f64 * 1e9 / nanos as f64,
|
||||
timing::timestamp(),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use counter::Counter;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::time::Instant;
|
||||
#[test]
|
||||
fn test_counter() {
|
||||
static mut COUNTER: Counter = create_counter!("test", 100);
|
||||
let start = Instant::now();
|
||||
let count = 1;
|
||||
inc_counter!(COUNTER, count, start);
|
||||
unsafe {
|
||||
assert_eq!(COUNTER.counts.load(Ordering::Relaxed), 1);
|
||||
assert_ne!(COUNTER.nanos.load(Ordering::Relaxed), 0);
|
||||
assert_eq!(COUNTER.times.load(Ordering::Relaxed), 1);
|
||||
assert_eq!(COUNTER.lograte, 100);
|
||||
assert_eq!(COUNTER.name, "test");
|
||||
}
|
||||
}
|
||||
}
|
568
src/crdt.rs
568
src/crdt.rs
@@ -17,12 +17,11 @@ use bincode::{deserialize, serialize};
|
||||
use byteorder::{LittleEndian, ReadBytesExt};
|
||||
use hash::Hash;
|
||||
use packet::{to_blob, Blob, BlobRecycler, SharedBlob, BLOB_SIZE};
|
||||
use pnet::datalink;
|
||||
use pnet_datalink as datalink;
|
||||
use rayon::prelude::*;
|
||||
use result::{Error, Result};
|
||||
use ring::rand::{SecureRandom, SystemRandom};
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use signature::{PublicKey, Signature};
|
||||
use signature::{KeyPair, KeyPairUtil, PublicKey, Signature};
|
||||
use std;
|
||||
use std::collections::HashMap;
|
||||
use std::collections::VecDeque;
|
||||
@@ -32,7 +31,14 @@ use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::{sleep, Builder, JoinHandle};
|
||||
use std::time::Duration;
|
||||
use streamer::{BlobReceiver, BlobSender};
|
||||
use streamer::{BlobReceiver, BlobSender, Window};
|
||||
use timing::timestamp;
|
||||
|
||||
/// milliseconds we sleep for between gossip requests
|
||||
const GOSSIP_SLEEP_MILLIS: u64 = 100;
|
||||
|
||||
/// minimum membership table size before we start purging dead nodes
|
||||
const MIN_TABLE_SIZE: usize = 2;
|
||||
|
||||
pub fn parse_port_or_addr(optstr: Option<String>) -> SocketAddr {
|
||||
let daddr: SocketAddr = "0.0.0.0:8000".parse().expect("default socket address");
|
||||
@@ -55,7 +61,20 @@ pub fn get_ip_addr() -> Option<IpAddr> {
|
||||
for iface in datalink::interfaces() {
|
||||
for p in iface.ips {
|
||||
if !p.ip().is_loopback() && !p.ip().is_multicast() {
|
||||
return Some(p.ip());
|
||||
match p.ip() {
|
||||
IpAddr::V4(addr) => {
|
||||
if !addr.is_link_local() {
|
||||
return Some(p.ip());
|
||||
}
|
||||
}
|
||||
IpAddr::V6(_addr) => {
|
||||
// Select an ipv6 address if the config is selected
|
||||
#[cfg(feature = "ipv6")]
|
||||
{
|
||||
return Some(p.ip());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -77,6 +96,9 @@ pub struct ReplicatedData {
|
||||
pub requests_addr: SocketAddr,
|
||||
/// transactions address
|
||||
pub transactions_addr: SocketAddr,
|
||||
/// repair address, we use this to jump ahead of the packets
|
||||
/// destined to the replciate_addr
|
||||
pub repair_addr: SocketAddr,
|
||||
/// current leader identity
|
||||
pub current_leader_id: PublicKey,
|
||||
/// last verified hash that was submitted to the leader
|
||||
@@ -92,6 +114,7 @@ impl ReplicatedData {
|
||||
replicate_addr: SocketAddr,
|
||||
requests_addr: SocketAddr,
|
||||
transactions_addr: SocketAddr,
|
||||
repair_addr: SocketAddr,
|
||||
) -> ReplicatedData {
|
||||
ReplicatedData {
|
||||
id,
|
||||
@@ -101,6 +124,7 @@ impl ReplicatedData {
|
||||
replicate_addr,
|
||||
requests_addr,
|
||||
transactions_addr,
|
||||
repair_addr,
|
||||
current_leader_id: PublicKey::default(),
|
||||
last_verified_hash: Hash::default(),
|
||||
last_verified_count: 0,
|
||||
@@ -118,6 +142,7 @@ impl ReplicatedData {
|
||||
let gossip_addr = Self::next_port(&bind_addr, 1);
|
||||
let replicate_addr = Self::next_port(&bind_addr, 2);
|
||||
let requests_addr = Self::next_port(&bind_addr, 3);
|
||||
let repair_addr = Self::next_port(&bind_addr, 4);
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
ReplicatedData::new(
|
||||
pubkey,
|
||||
@@ -125,6 +150,18 @@ impl ReplicatedData {
|
||||
replicate_addr,
|
||||
requests_addr,
|
||||
transactions_addr,
|
||||
repair_addr,
|
||||
)
|
||||
}
|
||||
pub fn new_entry_point(gossip_addr: SocketAddr) -> Self {
|
||||
let daddr: SocketAddr = "0.0.0.0:0".parse().unwrap();
|
||||
ReplicatedData::new(
|
||||
PublicKey::default(),
|
||||
gossip_addr,
|
||||
daddr.clone(),
|
||||
daddr.clone(),
|
||||
daddr.clone(),
|
||||
daddr,
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -150,12 +187,12 @@ pub struct Crdt {
|
||||
/// The value of the remote update index that I have last seen
|
||||
/// This Node will ask external nodes for updates since the value in this list
|
||||
pub remote: HashMap<PublicKey, u64>,
|
||||
pub alive: HashMap<PublicKey, u64>,
|
||||
pub update_index: u64,
|
||||
pub me: PublicKey,
|
||||
timeout: Duration,
|
||||
}
|
||||
// TODO These messages should be signed, and go through the gpu pipeline for spam filtering
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
enum Protocol {
|
||||
/// forward your own latest data structure when requesting an update
|
||||
/// this doesn't update the `remote` update index, but it allows the
|
||||
@@ -175,9 +212,9 @@ impl Crdt {
|
||||
table: HashMap::new(),
|
||||
local: HashMap::new(),
|
||||
remote: HashMap::new(),
|
||||
alive: HashMap::new(),
|
||||
me: me.id,
|
||||
update_index: 1,
|
||||
timeout: Duration::from_millis(100),
|
||||
};
|
||||
g.local.insert(me.id, g.update_index);
|
||||
g.table.insert(me.id, me);
|
||||
@@ -220,6 +257,63 @@ impl Crdt {
|
||||
self.table[&v.id].version
|
||||
);
|
||||
}
|
||||
//update the liveness table
|
||||
let now = timestamp();
|
||||
*self.alive.entry(v.id).or_insert(now) = now;
|
||||
}
|
||||
|
||||
/// purge old validators
|
||||
/// TODO: we need a robust membership protocol
|
||||
/// http://asc.di.fct.unl.pt/~jleitao/pdf/dsn07-leitao.pdf
|
||||
/// challenging part is that we are on a permissionless network
|
||||
pub fn purge(&mut self, now: u64) {
|
||||
if self.table.len() <= MIN_TABLE_SIZE {
|
||||
return;
|
||||
}
|
||||
//wait for 4x as long as it would randomly take to reach our node
|
||||
//assuming everyone is waiting the same amount of time as this node
|
||||
let limit = self.table.len() as u64 * GOSSIP_SLEEP_MILLIS * 4;
|
||||
let dead_ids: Vec<PublicKey> = self.alive
|
||||
.iter()
|
||||
.filter_map(|(&k, v)| {
|
||||
if k != self.me && (now - v) > limit {
|
||||
info!("purge {:?} {}", &k[..4], now - v);
|
||||
Some(k)
|
||||
} else {
|
||||
trace!("purge skipped {:?} {} {}", &k[..4], now - v, limit);
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
for id in dead_ids.iter() {
|
||||
self.alive.remove(id);
|
||||
self.table.remove(id);
|
||||
self.remote.remove(id);
|
||||
self.local.remove(id);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn index_blobs(
|
||||
obj: &Arc<RwLock<Self>>,
|
||||
blobs: &Vec<SharedBlob>,
|
||||
receive_index: &mut u64,
|
||||
) -> Result<()> {
|
||||
let me: ReplicatedData = {
|
||||
let robj = obj.read().expect("'obj' read lock in crdt::index_blobs");
|
||||
debug!("broadcast table {}", robj.table.len());
|
||||
robj.table[&robj.me].clone()
|
||||
};
|
||||
|
||||
// enumerate all the blobs, those are the indices
|
||||
for (i, b) in blobs.iter().enumerate() {
|
||||
// only leader should be broadcasting
|
||||
let mut blob = b.write().expect("'blob' write lock in crdt::index_blobs");
|
||||
blob.set_id(me.id).expect("set_id in pub fn broadcast");
|
||||
blob.set_index(*receive_index + i as u64)
|
||||
.expect("set_index in pub fn broadcast");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// broadcast messages from the leader to layer 1 nodes
|
||||
@@ -227,9 +321,10 @@ impl Crdt {
|
||||
/// We need to avoid having obj locked while doing any io, such as the `send_to`
|
||||
pub fn broadcast(
|
||||
obj: &Arc<RwLock<Self>>,
|
||||
blobs: &Vec<SharedBlob>,
|
||||
window: &Window,
|
||||
s: &UdpSocket,
|
||||
transmit_index: &mut u64,
|
||||
received_index: u64,
|
||||
) -> Result<()> {
|
||||
let (me, table): (ReplicatedData, Vec<ReplicatedData>) = {
|
||||
// copy to avoid locking during IO
|
||||
@@ -259,31 +354,35 @@ impl Crdt {
|
||||
return Err(Error::CrdtTooSmall);
|
||||
}
|
||||
trace!("nodes table {}", nodes.len());
|
||||
trace!("blobs table {}", blobs.len());
|
||||
// enumerate all the blobs, those are the indices
|
||||
|
||||
// enumerate all the blobs in the window, those are the indices
|
||||
// transmit them to nodes, starting from a different node
|
||||
let orders: Vec<_> = blobs
|
||||
.iter()
|
||||
.enumerate()
|
||||
.zip(
|
||||
nodes
|
||||
.iter()
|
||||
.cycle()
|
||||
.skip((*transmit_index as usize) % nodes.len()),
|
||||
)
|
||||
.collect();
|
||||
let mut orders = Vec::new();
|
||||
let window_l = window.write().unwrap();
|
||||
for i in *transmit_index..received_index {
|
||||
let is = i as usize;
|
||||
let k = is % window_l.len();
|
||||
assert!(window_l[k].is_some());
|
||||
|
||||
orders.push((window_l[k].clone(), nodes[is % nodes.len()]));
|
||||
}
|
||||
|
||||
trace!("orders table {}", orders.len());
|
||||
let errs: Vec<_> = orders
|
||||
.into_iter()
|
||||
.map(|((i, b), v)| {
|
||||
.map(|(b, v)| {
|
||||
// only leader should be broadcasting
|
||||
assert!(me.current_leader_id != v.id);
|
||||
let mut blob = b.write().expect("'b' write lock in pub fn broadcast");
|
||||
blob.set_id(me.id).expect("set_id in pub fn broadcast");
|
||||
blob.set_index(*transmit_index + i as u64)
|
||||
.expect("set_index in pub fn broadcast");
|
||||
let bl = b.unwrap();
|
||||
let blob = bl.read().expect("blob read lock in streamer::broadcast");
|
||||
//TODO profile this, may need multiple sockets for par_iter
|
||||
trace!("broadcast {} to {}", blob.meta.size, v.replicate_addr);
|
||||
trace!(
|
||||
"broadcast idx: {} sz: {} to {} coding: {}",
|
||||
blob.get_index().unwrap(),
|
||||
blob.meta.size,
|
||||
v.replicate_addr,
|
||||
blob.is_coding()
|
||||
);
|
||||
assert!(blob.meta.size < BLOB_SIZE);
|
||||
let e = s.send_to(&blob.data[..blob.meta.size], &v.replicate_addr);
|
||||
trace!("done broadcast {} to {}", blob.meta.size, v.replicate_addr);
|
||||
@@ -378,7 +477,7 @@ impl Crdt {
|
||||
//trace!("get updates since {}", v);
|
||||
let data = self.table
|
||||
.values()
|
||||
.filter(|x| self.local[&x.id] > v)
|
||||
.filter(|x| x.id != PublicKey::default() && self.local[&x.id] > v)
|
||||
.cloned()
|
||||
.collect();
|
||||
let id = self.me;
|
||||
@@ -390,7 +489,7 @@ impl Crdt {
|
||||
let daddr = "0.0.0.0:0".parse().unwrap();
|
||||
let valid: Vec<_> = self.table
|
||||
.values()
|
||||
.filter(|r| r.id != self.me && r.replicate_addr != daddr)
|
||||
.filter(|r| r.id != self.me && r.repair_addr != daddr)
|
||||
.collect();
|
||||
if valid.is_empty() {
|
||||
return Err(Error::CrdtTooSmall);
|
||||
@@ -452,6 +551,32 @@ impl Crdt {
|
||||
blob_sender.send(q)?;
|
||||
Ok(())
|
||||
}
|
||||
/// TODO: This is obviously the wrong way to do this. Need to implement leader selection
|
||||
fn top_leader(&self) -> Option<PublicKey> {
|
||||
let mut table = HashMap::new();
|
||||
let def = PublicKey::default();
|
||||
let cur = self.table.values().filter(|x| x.current_leader_id != def);
|
||||
for v in cur {
|
||||
let cnt = table.entry(&v.current_leader_id).or_insert(0);
|
||||
*cnt += 1;
|
||||
trace!("leader {:?} {}", &v.current_leader_id[..4], *cnt);
|
||||
}
|
||||
let mut sorted: Vec<(&PublicKey, usize)> = table.into_iter().collect();
|
||||
sorted.sort_by_key(|a| a.1);
|
||||
sorted.last().map(|a| *a.0)
|
||||
}
|
||||
|
||||
/// TODO: This is obviously the wrong way to do this. Need to implement leader selection
|
||||
/// A t-shirt for the first person to actually use this bad behavior to attack the alpha testnet
|
||||
fn update_leader(&mut self) {
|
||||
if let Some(leader_id) = self.top_leader() {
|
||||
if self.my_data().current_leader_id != leader_id {
|
||||
if self.table.get(&leader_id).is_some() {
|
||||
self.set_leader(leader_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Apply updates that we received from the identity `from`
|
||||
/// # Arguments
|
||||
@@ -478,63 +603,85 @@ impl Crdt {
|
||||
Builder::new()
|
||||
.name("solana-gossip".to_string())
|
||||
.spawn(move || loop {
|
||||
let start = timestamp();
|
||||
let _ = Self::run_gossip(&obj, &blob_sender, &blob_recycler);
|
||||
obj.write().unwrap().purge(timestamp());
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
return;
|
||||
}
|
||||
//TODO this should be a tuned parameter
|
||||
sleep(
|
||||
obj.read()
|
||||
.expect("'obj' read lock in pub fn gossip")
|
||||
.timeout,
|
||||
);
|
||||
//TODO: possibly tune this parameter
|
||||
//we saw a deadlock passing an obj.read().unwrap().timeout into sleep
|
||||
let _ = obj.write().unwrap().update_leader();
|
||||
let elapsed = timestamp() - start;
|
||||
if GOSSIP_SLEEP_MILLIS > elapsed {
|
||||
let time_left = GOSSIP_SLEEP_MILLIS - elapsed;
|
||||
sleep(Duration::from_millis(time_left));
|
||||
}
|
||||
})
|
||||
.unwrap()
|
||||
}
|
||||
fn run_window_request(
|
||||
window: &Arc<RwLock<Vec<Option<SharedBlob>>>>,
|
||||
window: &Window,
|
||||
me: &ReplicatedData,
|
||||
from: &ReplicatedData,
|
||||
ix: u64,
|
||||
blob_recycler: &BlobRecycler,
|
||||
) -> Option<SharedBlob> {
|
||||
let pos = (ix as usize) % window.read().unwrap().len();
|
||||
if let &Some(ref blob) = &window.read().unwrap()[pos] {
|
||||
let rblob = blob.read().unwrap();
|
||||
let blob_ix = rblob.get_index().expect("run_window_request get_index");
|
||||
if let Some(blob) = &window.read().unwrap()[pos] {
|
||||
let mut wblob = blob.write().unwrap();
|
||||
let blob_ix = wblob.get_index().expect("run_window_request get_index");
|
||||
if blob_ix == ix {
|
||||
let num_retransmits = wblob.meta.num_retransmits;
|
||||
wblob.meta.num_retransmits += 1;
|
||||
// Setting the sender id to the requester id
|
||||
// prevents the requester from retransmitting this response
|
||||
// to other peers
|
||||
let mut sender_id = from.id;
|
||||
|
||||
// Allow retransmission of this response if the node
|
||||
// is the leader and the number of repair requests equals
|
||||
// a power of two
|
||||
if me.current_leader_id == me.id
|
||||
&& (num_retransmits == 0 || num_retransmits.is_power_of_two())
|
||||
{
|
||||
sender_id = me.id
|
||||
}
|
||||
|
||||
let out = blob_recycler.allocate();
|
||||
|
||||
// copy to avoid doing IO inside the lock
|
||||
{
|
||||
let mut outblob = out.write().unwrap();
|
||||
let sz = rblob.meta.size;
|
||||
let sz = wblob.meta.size;
|
||||
outblob.meta.size = sz;
|
||||
outblob.data[..sz].copy_from_slice(&rblob.data[..sz]);
|
||||
outblob.meta.set_addr(&from.replicate_addr);
|
||||
//TODO, set the sender id to the requester so we dont retransmit
|
||||
//come up with a cleaner solution for this when sender signatures are checked
|
||||
outblob.set_id(from.id).expect("blob set_id");
|
||||
outblob.data[..sz].copy_from_slice(&wblob.data[..sz]);
|
||||
outblob.meta.set_addr(&from.repair_addr);
|
||||
outblob.set_id(sender_id).expect("blob set_id");
|
||||
}
|
||||
|
||||
return Some(out);
|
||||
}
|
||||
} else {
|
||||
assert!(window.read().unwrap()[pos].is_none());
|
||||
info!("failed RequestWindowIndex {} {}", ix, from.replicate_addr);
|
||||
info!("failed RequestWindowIndex {} {}", ix, from.repair_addr);
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
//TODO we should first coalesce all the requests
|
||||
fn handle_blob(
|
||||
obj: &Arc<RwLock<Self>>,
|
||||
window: &Arc<RwLock<Vec<Option<SharedBlob>>>>,
|
||||
window: &Window,
|
||||
blob_recycler: &BlobRecycler,
|
||||
blob: &Blob,
|
||||
) -> Option<SharedBlob> {
|
||||
match deserialize(&blob.data[..blob.meta.size]) {
|
||||
// TODO sigverify these
|
||||
Ok(Protocol::RequestUpdates(v, reqdata)) => {
|
||||
Ok(Protocol::RequestUpdates(v, from_rd)) => {
|
||||
trace!("RequestUpdates {}", v);
|
||||
let addr = reqdata.gossip_addr;
|
||||
let addr = from_rd.gossip_addr;
|
||||
// only lock for this call, dont lock during IO `sock.send_to` or `sock.recv_from`
|
||||
let (from, ups, data) = obj.read()
|
||||
.expect("'obj' read lock in RequestUpdates")
|
||||
@@ -542,7 +689,7 @@ impl Crdt {
|
||||
trace!("get updates since response {} {}", v, data.len());
|
||||
let len = data.len();
|
||||
let rsp = Protocol::ReceiveUpdates(from, ups, data);
|
||||
obj.write().unwrap().insert(&reqdata);
|
||||
obj.write().unwrap().insert(&from_rd);
|
||||
if len < 1 {
|
||||
let me = obj.read().unwrap();
|
||||
trace!(
|
||||
@@ -557,7 +704,7 @@ impl Crdt {
|
||||
"sending updates me {:?} len {} to {:?} {}",
|
||||
&obj.read().unwrap().me[..4],
|
||||
len,
|
||||
&reqdata.id[..4],
|
||||
&from_rd.id[..4],
|
||||
addr,
|
||||
);
|
||||
Some(r)
|
||||
@@ -574,17 +721,19 @@ impl Crdt {
|
||||
None
|
||||
}
|
||||
Ok(Protocol::RequestWindowIndex(from, ix)) => {
|
||||
//TODO this doesn't depend on CRDT module, can be moved
|
||||
//but we are using the listen thread to service these request
|
||||
//TODO verify from is signed
|
||||
obj.write().unwrap().insert(&from);
|
||||
let me = obj.read().unwrap().my_data().clone();
|
||||
trace!(
|
||||
"received RequestWindowIndex {} {} myaddr {}",
|
||||
ix,
|
||||
from.replicate_addr,
|
||||
me.replicate_addr
|
||||
from.repair_addr,
|
||||
me.repair_addr
|
||||
);
|
||||
assert_ne!(from.replicate_addr, me.replicate_addr);
|
||||
Self::run_window_request(&window, &from, ix, blob_recycler)
|
||||
assert_ne!(from.repair_addr, me.repair_addr);
|
||||
Self::run_window_request(&window, &me, &from, ix, blob_recycler)
|
||||
}
|
||||
Err(_) => {
|
||||
warn!("deserialize crdt packet failed");
|
||||
@@ -596,7 +745,7 @@ impl Crdt {
|
||||
/// Process messages from the network
|
||||
fn run_listen(
|
||||
obj: &Arc<RwLock<Self>>,
|
||||
window: &Arc<RwLock<Vec<Option<SharedBlob>>>>,
|
||||
window: &Window,
|
||||
blob_recycler: &BlobRecycler,
|
||||
requests_receiver: &BlobReceiver,
|
||||
response_sender: &BlobSender,
|
||||
@@ -618,7 +767,7 @@ impl Crdt {
|
||||
}
|
||||
pub fn listen(
|
||||
obj: Arc<RwLock<Self>>,
|
||||
window: Arc<RwLock<Vec<Option<SharedBlob>>>>,
|
||||
window: Window,
|
||||
blob_recycler: BlobRecycler,
|
||||
requests_receiver: BlobReceiver,
|
||||
response_sender: BlobSender,
|
||||
@@ -656,6 +805,8 @@ pub struct Sockets {
|
||||
pub transaction: UdpSocket,
|
||||
pub respond: UdpSocket,
|
||||
pub broadcast: UdpSocket,
|
||||
pub repair: UdpSocket,
|
||||
pub retransmit: UdpSocket,
|
||||
}
|
||||
|
||||
pub struct TestNode {
|
||||
@@ -665,13 +816,15 @@ pub struct TestNode {
|
||||
|
||||
impl TestNode {
|
||||
pub fn new() -> TestNode {
|
||||
let gossip = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let gossip_send = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let requests = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let transaction = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let gossip = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let replicate = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let requests = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let repair = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let gossip_send = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let respond = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let broadcast = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let retransmit = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
let data = ReplicatedData::new(
|
||||
pubkey,
|
||||
@@ -679,6 +832,7 @@ impl TestNode {
|
||||
replicate.local_addr().unwrap(),
|
||||
requests.local_addr().unwrap(),
|
||||
transaction.local_addr().unwrap(),
|
||||
repair.local_addr().unwrap(),
|
||||
);
|
||||
TestNode {
|
||||
data: data,
|
||||
@@ -690,6 +844,8 @@ impl TestNode {
|
||||
transaction,
|
||||
respond,
|
||||
broadcast,
|
||||
repair,
|
||||
retransmit,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -697,8 +853,17 @@ impl TestNode {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crdt::{parse_port_or_addr, Crdt, ReplicatedData};
|
||||
use crdt::{parse_port_or_addr, Crdt, ReplicatedData, GOSSIP_SLEEP_MILLIS, MIN_TABLE_SIZE};
|
||||
use logger;
|
||||
use packet::BlobRecycler;
|
||||
use result::Error;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
use streamer::default_window;
|
||||
|
||||
#[test]
|
||||
fn test_parse_port_or_addr() {
|
||||
@@ -709,8 +874,6 @@ mod tests {
|
||||
let p3 = parse_port_or_addr(None);
|
||||
assert_eq!(p3.port(), 8000);
|
||||
}
|
||||
|
||||
/// Test that insert drops messages that are older
|
||||
#[test]
|
||||
fn insert_test() {
|
||||
let mut d = ReplicatedData::new(
|
||||
@@ -719,6 +882,7 @@ mod tests {
|
||||
"127.0.0.1:1235".parse().unwrap(),
|
||||
"127.0.0.1:1236".parse().unwrap(),
|
||||
"127.0.0.1:1237".parse().unwrap(),
|
||||
"127.0.0.1:1238".parse().unwrap(),
|
||||
);
|
||||
assert_eq!(d.version, 0);
|
||||
let mut crdt = Crdt::new(d.clone());
|
||||
@@ -736,6 +900,15 @@ mod tests {
|
||||
copy
|
||||
}
|
||||
#[test]
|
||||
fn replicated_data_new_leader() {
|
||||
let d1 = ReplicatedData::new_leader(&"127.0.0.1:1234".parse().unwrap());
|
||||
assert_eq!(d1.gossip_addr, "127.0.0.1:1235".parse().unwrap());
|
||||
assert_eq!(d1.replicate_addr, "127.0.0.1:1236".parse().unwrap());
|
||||
assert_eq!(d1.requests_addr, "127.0.0.1:1237".parse().unwrap());
|
||||
assert_eq!(d1.transactions_addr, "127.0.0.1:1234".parse().unwrap());
|
||||
assert_eq!(d1.repair_addr, "127.0.0.1:1238".parse().unwrap());
|
||||
}
|
||||
#[test]
|
||||
fn update_test() {
|
||||
let d1 = ReplicatedData::new(
|
||||
KeyPair::new().pubkey(),
|
||||
@@ -743,6 +916,7 @@ mod tests {
|
||||
"127.0.0.1:1235".parse().unwrap(),
|
||||
"127.0.0.1:1236".parse().unwrap(),
|
||||
"127.0.0.1:1237".parse().unwrap(),
|
||||
"127.0.0.1:1238".parse().unwrap(),
|
||||
);
|
||||
let d2 = ReplicatedData::new(
|
||||
KeyPair::new().pubkey(),
|
||||
@@ -750,6 +924,7 @@ mod tests {
|
||||
"127.0.0.1:1235".parse().unwrap(),
|
||||
"127.0.0.1:1236".parse().unwrap(),
|
||||
"127.0.0.1:1237".parse().unwrap(),
|
||||
"127.0.0.1:1238".parse().unwrap(),
|
||||
);
|
||||
let d3 = ReplicatedData::new(
|
||||
KeyPair::new().pubkey(),
|
||||
@@ -757,6 +932,7 @@ mod tests {
|
||||
"127.0.0.1:1235".parse().unwrap(),
|
||||
"127.0.0.1:1236".parse().unwrap(),
|
||||
"127.0.0.1:1237".parse().unwrap(),
|
||||
"127.0.0.1:1238".parse().unwrap(),
|
||||
);
|
||||
let mut crdt = Crdt::new(d1.clone());
|
||||
let (key, ix, ups) = crdt.get_updates_since(0);
|
||||
@@ -775,7 +951,10 @@ mod tests {
|
||||
assert_eq!(key, d1.id);
|
||||
assert_eq!(ix, 3);
|
||||
assert_eq!(ups.len(), 3);
|
||||
assert_eq!(sorted(&ups), sorted(&vec![d2.clone(), d1, d3]));
|
||||
assert_eq!(
|
||||
sorted(&ups),
|
||||
sorted(&vec![d1.clone(), d2.clone(), d3.clone()])
|
||||
);
|
||||
let mut crdt2 = Crdt::new(d2.clone());
|
||||
crdt2.apply_updates(key, ix, &ups);
|
||||
assert_eq!(crdt2.table.values().len(), 3);
|
||||
@@ -783,6 +962,267 @@ mod tests {
|
||||
sorted(&crdt2.table.values().map(|x| x.clone()).collect()),
|
||||
sorted(&crdt.table.values().map(|x| x.clone()).collect())
|
||||
);
|
||||
let d4 = ReplicatedData::new_entry_point("127.0.0.4:1234".parse().unwrap());
|
||||
crdt.insert(&d4);
|
||||
let (_key, _ix, ups) = crdt.get_updates_since(0);
|
||||
assert_eq!(sorted(&ups), sorted(&vec![d2.clone(), d1, d3]));
|
||||
}
|
||||
#[test]
|
||||
fn window_index_request() {
|
||||
let me = ReplicatedData::new(
|
||||
KeyPair::new().pubkey(),
|
||||
"127.0.0.1:1234".parse().unwrap(),
|
||||
"127.0.0.1:1235".parse().unwrap(),
|
||||
"127.0.0.1:1236".parse().unwrap(),
|
||||
"127.0.0.1:1237".parse().unwrap(),
|
||||
"127.0.0.1:1238".parse().unwrap(),
|
||||
);
|
||||
let mut crdt = Crdt::new(me.clone());
|
||||
let rv = crdt.window_index_request(0);
|
||||
assert_matches!(rv, Err(Error::CrdtTooSmall));
|
||||
let nxt = ReplicatedData::new(
|
||||
KeyPair::new().pubkey(),
|
||||
"127.0.0.1:1234".parse().unwrap(),
|
||||
"127.0.0.1:1235".parse().unwrap(),
|
||||
"127.0.0.1:1236".parse().unwrap(),
|
||||
"127.0.0.1:1237".parse().unwrap(),
|
||||
"0.0.0.0:0".parse().unwrap(),
|
||||
);
|
||||
crdt.insert(&nxt);
|
||||
let rv = crdt.window_index_request(0);
|
||||
assert_matches!(rv, Err(Error::CrdtTooSmall));
|
||||
let nxt = ReplicatedData::new(
|
||||
KeyPair::new().pubkey(),
|
||||
"127.0.0.2:1234".parse().unwrap(),
|
||||
"127.0.0.1:1235".parse().unwrap(),
|
||||
"127.0.0.1:1236".parse().unwrap(),
|
||||
"127.0.0.1:1237".parse().unwrap(),
|
||||
"127.0.0.1:1238".parse().unwrap(),
|
||||
);
|
||||
crdt.insert(&nxt);
|
||||
let rv = crdt.window_index_request(0).unwrap();
|
||||
assert_eq!(nxt.gossip_addr, "127.0.0.2:1234".parse().unwrap());
|
||||
assert_eq!(rv.0, "127.0.0.2:1234".parse().unwrap());
|
||||
|
||||
let nxt = ReplicatedData::new(
|
||||
KeyPair::new().pubkey(),
|
||||
"127.0.0.3:1234".parse().unwrap(),
|
||||
"127.0.0.1:1235".parse().unwrap(),
|
||||
"127.0.0.1:1236".parse().unwrap(),
|
||||
"127.0.0.1:1237".parse().unwrap(),
|
||||
"127.0.0.1:1238".parse().unwrap(),
|
||||
);
|
||||
crdt.insert(&nxt);
|
||||
let mut one = false;
|
||||
let mut two = false;
|
||||
while !one || !two {
|
||||
//this randomly picks an option, so eventually it should pick both
|
||||
let rv = crdt.window_index_request(0).unwrap();
|
||||
if rv.0 == "127.0.0.2:1234".parse().unwrap() {
|
||||
one = true;
|
||||
}
|
||||
if rv.0 == "127.0.0.3:1234".parse().unwrap() {
|
||||
two = true;
|
||||
}
|
||||
}
|
||||
assert!(one && two);
|
||||
}
|
||||
|
||||
/// test that gossip requests are eventually generated for all nodes
|
||||
#[test]
|
||||
fn gossip_request() {
|
||||
let me = ReplicatedData::new(
|
||||
KeyPair::new().pubkey(),
|
||||
"127.0.0.1:1234".parse().unwrap(),
|
||||
"127.0.0.1:1235".parse().unwrap(),
|
||||
"127.0.0.1:1236".parse().unwrap(),
|
||||
"127.0.0.1:1237".parse().unwrap(),
|
||||
"127.0.0.1:1238".parse().unwrap(),
|
||||
);
|
||||
let mut crdt = Crdt::new(me.clone());
|
||||
let rv = crdt.gossip_request();
|
||||
assert_matches!(rv, Err(Error::CrdtTooSmall));
|
||||
let nxt1 = ReplicatedData::new(
|
||||
KeyPair::new().pubkey(),
|
||||
"127.0.0.2:1234".parse().unwrap(),
|
||||
"127.0.0.1:1235".parse().unwrap(),
|
||||
"127.0.0.1:1236".parse().unwrap(),
|
||||
"127.0.0.1:1237".parse().unwrap(),
|
||||
"127.0.0.1:1238".parse().unwrap(),
|
||||
);
|
||||
|
||||
crdt.insert(&nxt1);
|
||||
|
||||
let rv = crdt.gossip_request().unwrap();
|
||||
assert_eq!(rv.0, nxt1.gossip_addr);
|
||||
|
||||
let nxt2 = ReplicatedData::new_entry_point("127.0.0.3:1234".parse().unwrap());
|
||||
crdt.insert(&nxt2);
|
||||
// check that the service works
|
||||
// and that it eventually produces a request for both nodes
|
||||
let (sender, reader) = channel();
|
||||
let recycler = BlobRecycler::default();
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let obj = Arc::new(RwLock::new(crdt));
|
||||
let thread = Crdt::gossip(obj, recycler, sender, exit.clone());
|
||||
let mut one = false;
|
||||
let mut two = false;
|
||||
for _ in 0..30 {
|
||||
//50% chance each try that we get a repeat
|
||||
let mut rv = reader.recv_timeout(Duration::new(1, 0)).unwrap();
|
||||
while let Ok(mut more) = reader.try_recv() {
|
||||
rv.append(&mut more);
|
||||
}
|
||||
assert!(rv.len() > 0);
|
||||
for i in rv.iter() {
|
||||
if i.read().unwrap().meta.addr() == nxt1.gossip_addr {
|
||||
one = true;
|
||||
} else if i.read().unwrap().meta.addr() == nxt2.gossip_addr {
|
||||
two = true;
|
||||
} else {
|
||||
//unexpected request
|
||||
assert!(false);
|
||||
}
|
||||
}
|
||||
if one && two {
|
||||
break;
|
||||
}
|
||||
}
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
thread.join().unwrap();
|
||||
//created requests to both
|
||||
assert!(one && two);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn purge_test() {
|
||||
let me = ReplicatedData::new_leader(&"127.0.0.1:1234".parse().unwrap());
|
||||
let mut crdt = Crdt::new(me.clone());
|
||||
let nxt = ReplicatedData::new_leader(&"127.0.0.2:1234".parse().unwrap());
|
||||
assert_ne!(me.id, nxt.id);
|
||||
crdt.insert(&nxt);
|
||||
let rv = crdt.gossip_request().unwrap();
|
||||
assert_eq!(rv.0, nxt.gossip_addr);
|
||||
let now = crdt.alive[&nxt.id];
|
||||
let len = crdt.table.len() as u64;
|
||||
crdt.purge(now);
|
||||
let rv = crdt.gossip_request().unwrap();
|
||||
assert_eq!(rv.0, nxt.gossip_addr);
|
||||
|
||||
crdt.purge(now + len * GOSSIP_SLEEP_MILLIS * 4);
|
||||
let rv = crdt.gossip_request().unwrap();
|
||||
assert_eq!(rv.0, nxt.gossip_addr);
|
||||
|
||||
crdt.purge(now + len * GOSSIP_SLEEP_MILLIS * 4 + 1);
|
||||
let rv = crdt.gossip_request().unwrap();
|
||||
assert_eq!(rv.0, nxt.gossip_addr);
|
||||
|
||||
let nxt2 = ReplicatedData::new_leader(&"127.0.0.2:1234".parse().unwrap());
|
||||
assert_ne!(me.id, nxt2.id);
|
||||
assert_ne!(nxt.id, nxt2.id);
|
||||
crdt.insert(&nxt2);
|
||||
while now == crdt.alive[&nxt2.id] {
|
||||
sleep(Duration::from_millis(GOSSIP_SLEEP_MILLIS));
|
||||
crdt.insert(&nxt2);
|
||||
}
|
||||
let len = crdt.table.len() as u64;
|
||||
assert!((MIN_TABLE_SIZE as u64) < len);
|
||||
crdt.purge(now + len * GOSSIP_SLEEP_MILLIS * 4);
|
||||
assert_eq!(len as usize, crdt.table.len());
|
||||
crdt.purge(now + len * GOSSIP_SLEEP_MILLIS * 4 + 1);
|
||||
let rv = crdt.gossip_request().unwrap();
|
||||
assert_eq!(rv.0, nxt.gossip_addr);
|
||||
assert_eq!(2, crdt.table.len());
|
||||
}
|
||||
|
||||
/// test window requests respond with the right blob, and do not overrun
|
||||
#[test]
|
||||
fn run_window_request() {
|
||||
let window = default_window();
|
||||
let me = ReplicatedData::new(
|
||||
KeyPair::new().pubkey(),
|
||||
"127.0.0.1:1234".parse().unwrap(),
|
||||
"127.0.0.1:1235".parse().unwrap(),
|
||||
"127.0.0.1:1236".parse().unwrap(),
|
||||
"127.0.0.1:1237".parse().unwrap(),
|
||||
"127.0.0.1:1238".parse().unwrap(),
|
||||
);
|
||||
let recycler = BlobRecycler::default();
|
||||
let rv = Crdt::run_window_request(&window, &me, &me, 0, &recycler);
|
||||
assert!(rv.is_none());
|
||||
let out = recycler.allocate();
|
||||
out.write().unwrap().meta.size = 200;
|
||||
window.write().unwrap()[0] = Some(out);
|
||||
let rv = Crdt::run_window_request(&window, &me, &me, 0, &recycler);
|
||||
assert!(rv.is_some());
|
||||
let v = rv.unwrap();
|
||||
//test we copied the blob
|
||||
assert_eq!(v.read().unwrap().meta.size, 200);
|
||||
let len = window.read().unwrap().len() as u64;
|
||||
let rv = Crdt::run_window_request(&window, &me, &me, len, &recycler);
|
||||
assert!(rv.is_none());
|
||||
}
|
||||
|
||||
/// test window requests respond with the right blob, and do not overrun
|
||||
#[test]
|
||||
fn run_window_request_with_backoff() {
|
||||
let window = default_window();
|
||||
|
||||
let mut me = ReplicatedData::new_leader(&"127.0.0.1:1234".parse().unwrap());
|
||||
me.current_leader_id = me.id;
|
||||
|
||||
let mock_peer = ReplicatedData::new_leader(&"127.0.0.1:1234".parse().unwrap());
|
||||
|
||||
let recycler = BlobRecycler::default();
|
||||
|
||||
// Simulate handling a repair request from mock_peer
|
||||
let rv = Crdt::run_window_request(&window, &me, &mock_peer, 0, &recycler);
|
||||
assert!(rv.is_none());
|
||||
let blob = recycler.allocate();
|
||||
let blob_size = 200;
|
||||
blob.write().unwrap().meta.size = blob_size;
|
||||
window.write().unwrap()[0] = Some(blob);
|
||||
|
||||
let num_requests: u32 = 64;
|
||||
for i in 0..num_requests {
|
||||
let shared_blob =
|
||||
Crdt::run_window_request(&window, &me, &mock_peer, 0, &recycler).unwrap();
|
||||
let blob = shared_blob.read().unwrap();
|
||||
// Test we copied the blob
|
||||
assert_eq!(blob.meta.size, blob_size);
|
||||
|
||||
let id = if i == 0 || i.is_power_of_two() {
|
||||
me.id
|
||||
} else {
|
||||
mock_peer.id
|
||||
};
|
||||
assert_eq!(blob.get_id().unwrap(), id);
|
||||
}
|
||||
}
|
||||
/// TODO: This is obviously the wrong way to do this. Need to implement leader selection,
|
||||
/// delete this test after leader selection is correctly implemented
|
||||
#[test]
|
||||
fn test_update_leader() {
|
||||
logger::setup();
|
||||
let me = ReplicatedData::new_leader(&"127.0.0.1:1234".parse().unwrap());
|
||||
let leader0 = ReplicatedData::new_leader(&"127.0.0.1:1234".parse().unwrap());
|
||||
let leader1 = ReplicatedData::new_leader(&"127.0.0.1:1234".parse().unwrap());
|
||||
let mut crdt = Crdt::new(me.clone());
|
||||
assert_eq!(crdt.top_leader(), None);
|
||||
crdt.set_leader(leader0.id);
|
||||
assert_eq!(crdt.top_leader().unwrap(), leader0.id);
|
||||
//add a bunch of nodes with a new leader
|
||||
for _ in 0..10 {
|
||||
let mut dum = ReplicatedData::new_entry_point("127.0.0.1:1234".parse().unwrap());
|
||||
dum.id = KeyPair::new().pubkey();
|
||||
dum.current_leader_id = leader1.id;
|
||||
crdt.insert(&dum);
|
||||
}
|
||||
assert_eq!(crdt.top_leader().unwrap(), leader1.id);
|
||||
crdt.update_leader();
|
||||
assert_eq!(crdt.my_data().current_leader_id, leader0.id);
|
||||
crdt.insert(&leader1);
|
||||
crdt.update_leader();
|
||||
assert_eq!(crdt.my_data().current_leader_id, leader1.id);
|
||||
}
|
||||
}
|
||||
|
312
src/drone.rs
Normal file
312
src/drone.rs
Normal file
@@ -0,0 +1,312 @@
|
||||
//! The `drone` module provides an object for launching a Solana Drone,
|
||||
//! which is the custodian of any remaining tokens in a mint.
|
||||
//! The Solana Drone builds and send airdrop transactions,
|
||||
//! checking requests against a request cap for a given time time_slice
|
||||
//! and (to come) an IP rate limit.
|
||||
|
||||
use signature::{KeyPair, PublicKey};
|
||||
use std::io;
|
||||
use std::io::{Error, ErrorKind};
|
||||
use std::net::{IpAddr, SocketAddr, UdpSocket};
|
||||
use std::time::Duration;
|
||||
use thin_client::ThinClient;
|
||||
use transaction::Transaction;
|
||||
|
||||
pub const TIME_SLICE: u64 = 60;
|
||||
pub const REQUEST_CAP: u64 = 150_000;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub enum DroneRequest {
|
||||
GetAirdrop {
|
||||
airdrop_request_amount: u64,
|
||||
client_public_key: PublicKey,
|
||||
},
|
||||
}
|
||||
|
||||
pub struct Drone {
|
||||
mint_keypair: KeyPair,
|
||||
ip_cache: Vec<IpAddr>,
|
||||
_airdrop_addr: SocketAddr,
|
||||
transactions_addr: SocketAddr,
|
||||
requests_addr: SocketAddr,
|
||||
pub time_slice: Duration,
|
||||
request_cap: u64,
|
||||
pub request_current: u64,
|
||||
}
|
||||
|
||||
impl Drone {
|
||||
pub fn new(
|
||||
mint_keypair: KeyPair,
|
||||
_airdrop_addr: SocketAddr,
|
||||
transactions_addr: SocketAddr,
|
||||
requests_addr: SocketAddr,
|
||||
time_input: Option<u64>,
|
||||
request_cap_input: Option<u64>,
|
||||
) -> Drone {
|
||||
let time_slice = match time_input {
|
||||
Some(time) => Duration::new(time, 0),
|
||||
None => Duration::new(TIME_SLICE, 0),
|
||||
};
|
||||
let request_cap = match request_cap_input {
|
||||
Some(cap) => cap,
|
||||
None => REQUEST_CAP,
|
||||
};
|
||||
Drone {
|
||||
mint_keypair,
|
||||
ip_cache: Vec::new(),
|
||||
_airdrop_addr,
|
||||
transactions_addr,
|
||||
requests_addr,
|
||||
time_slice,
|
||||
request_cap,
|
||||
request_current: 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn check_request_limit(&mut self, request_amount: u64) -> bool {
|
||||
(self.request_current + request_amount) <= self.request_cap
|
||||
}
|
||||
|
||||
pub fn clear_request_count(&mut self) {
|
||||
self.request_current = 0;
|
||||
}
|
||||
|
||||
pub fn add_ip_to_cache(&mut self, ip: IpAddr) {
|
||||
self.ip_cache.push(ip);
|
||||
}
|
||||
|
||||
pub fn clear_ip_cache(&mut self) {
|
||||
self.ip_cache.clear();
|
||||
}
|
||||
|
||||
pub fn check_rate_limit(&mut self, ip: IpAddr) -> Result<IpAddr, IpAddr> {
|
||||
// [WIP] This is placeholder code for a proper rate limiter.
|
||||
// Right now it will only allow one total drone request per IP
|
||||
if self.ip_cache.contains(&ip) {
|
||||
// Add proper error handling here
|
||||
Err(ip)
|
||||
} else {
|
||||
self.add_ip_to_cache(ip);
|
||||
Ok(ip)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn send_airdrop(&mut self, req: DroneRequest) -> Result<usize, io::Error> {
|
||||
let tx: Transaction;
|
||||
let request_amount: u64;
|
||||
let requests_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
|
||||
let mut client = ThinClient::new(
|
||||
self.requests_addr,
|
||||
requests_socket,
|
||||
self.transactions_addr,
|
||||
transactions_socket,
|
||||
);
|
||||
let last_id = client.get_last_id();
|
||||
|
||||
match req {
|
||||
DroneRequest::GetAirdrop {
|
||||
airdrop_request_amount,
|
||||
client_public_key,
|
||||
} => {
|
||||
request_amount = airdrop_request_amount.clone();
|
||||
tx = Transaction::new(
|
||||
&self.mint_keypair,
|
||||
client_public_key,
|
||||
airdrop_request_amount as i64,
|
||||
last_id,
|
||||
);
|
||||
}
|
||||
}
|
||||
if self.check_request_limit(request_amount) {
|
||||
self.request_current += request_amount;
|
||||
client.transfer_signed(tx)
|
||||
} else {
|
||||
Err(Error::new(ErrorKind::Other, "token limit reached"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use bank::Bank;
|
||||
use crdt::{get_ip_addr, TestNode};
|
||||
use drone::{Drone, DroneRequest, REQUEST_CAP, TIME_SLICE};
|
||||
use logger;
|
||||
use mint::Mint;
|
||||
use server::Server;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use std::io::sink;
|
||||
use std::net::{SocketAddr, UdpSocket};
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
use thin_client::ThinClient;
|
||||
|
||||
#[test]
|
||||
fn test_check_request_limit() {
|
||||
let keypair = KeyPair::new();
|
||||
let mut addr: SocketAddr = "0.0.0.0:9900".parse().unwrap();
|
||||
addr.set_ip(get_ip_addr().unwrap());
|
||||
let transactions_addr = "0.0.0.0:0".parse().unwrap();
|
||||
let requests_addr = "0.0.0.0:0".parse().unwrap();
|
||||
let mut drone = Drone::new(
|
||||
keypair,
|
||||
addr,
|
||||
transactions_addr,
|
||||
requests_addr,
|
||||
None,
|
||||
Some(3),
|
||||
);
|
||||
assert!(drone.check_request_limit(1));
|
||||
drone.request_current = 3;
|
||||
assert!(!drone.check_request_limit(1));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_clear_request_count() {
|
||||
let keypair = KeyPair::new();
|
||||
let mut addr: SocketAddr = "0.0.0.0:9900".parse().unwrap();
|
||||
addr.set_ip(get_ip_addr().unwrap());
|
||||
let transactions_addr = "0.0.0.0:0".parse().unwrap();
|
||||
let requests_addr = "0.0.0.0:0".parse().unwrap();
|
||||
let mut drone = Drone::new(keypair, addr, transactions_addr, requests_addr, None, None);
|
||||
drone.request_current = drone.request_current + 256;
|
||||
assert_eq!(drone.request_current, 256);
|
||||
drone.clear_request_count();
|
||||
assert_eq!(drone.request_current, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_add_ip_to_cache() {
|
||||
let keypair = KeyPair::new();
|
||||
let mut addr: SocketAddr = "0.0.0.0:9900".parse().unwrap();
|
||||
addr.set_ip(get_ip_addr().unwrap());
|
||||
let transactions_addr = "0.0.0.0:0".parse().unwrap();
|
||||
let requests_addr = "0.0.0.0:0".parse().unwrap();
|
||||
let mut drone = Drone::new(keypair, addr, transactions_addr, requests_addr, None, None);
|
||||
let ip = "127.0.0.1".parse().expect("create IpAddr from string");
|
||||
assert_eq!(drone.ip_cache.len(), 0);
|
||||
drone.add_ip_to_cache(ip);
|
||||
assert_eq!(drone.ip_cache.len(), 1);
|
||||
assert!(drone.ip_cache.contains(&ip));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_clear_ip_cache() {
|
||||
let keypair = KeyPair::new();
|
||||
let mut addr: SocketAddr = "0.0.0.0:9900".parse().unwrap();
|
||||
addr.set_ip(get_ip_addr().unwrap());
|
||||
let transactions_addr = "0.0.0.0:0".parse().unwrap();
|
||||
let requests_addr = "0.0.0.0:0".parse().unwrap();
|
||||
let mut drone = Drone::new(keypair, addr, transactions_addr, requests_addr, None, None);
|
||||
let ip = "127.0.0.1".parse().expect("create IpAddr from string");
|
||||
assert_eq!(drone.ip_cache.len(), 0);
|
||||
drone.add_ip_to_cache(ip);
|
||||
assert_eq!(drone.ip_cache.len(), 1);
|
||||
drone.clear_ip_cache();
|
||||
assert_eq!(drone.ip_cache.len(), 0);
|
||||
assert!(drone.ip_cache.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_drone_default_init() {
|
||||
let keypair = KeyPair::new();
|
||||
let mut addr: SocketAddr = "0.0.0.0:9900".parse().unwrap();
|
||||
addr.set_ip(get_ip_addr().unwrap());
|
||||
let transactions_addr = "0.0.0.0:0".parse().unwrap();
|
||||
let requests_addr = "0.0.0.0:0".parse().unwrap();
|
||||
let time_slice: Option<u64> = None;
|
||||
let request_cap: Option<u64> = None;
|
||||
let drone = Drone::new(
|
||||
keypair,
|
||||
addr,
|
||||
transactions_addr,
|
||||
requests_addr,
|
||||
time_slice,
|
||||
request_cap,
|
||||
);
|
||||
assert_eq!(drone.time_slice, Duration::new(TIME_SLICE, 0));
|
||||
assert_eq!(drone.request_cap, REQUEST_CAP);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_send_airdrop() {
|
||||
const SMALL_BATCH: i64 = 50;
|
||||
const TPS_BATCH: i64 = 5_000_000;
|
||||
|
||||
logger::setup();
|
||||
let leader = TestNode::new();
|
||||
|
||||
let alice = Mint::new(10_000_000);
|
||||
let bank = Bank::new(&alice);
|
||||
let bob_pubkey = KeyPair::new().pubkey();
|
||||
let carlos_pubkey = KeyPair::new().pubkey();
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
|
||||
let server = Server::new_leader(
|
||||
bank,
|
||||
Some(Duration::from_millis(30)),
|
||||
leader.data.clone(),
|
||||
leader.sockets.requests,
|
||||
leader.sockets.transaction,
|
||||
leader.sockets.broadcast,
|
||||
leader.sockets.respond,
|
||||
leader.sockets.gossip,
|
||||
exit.clone(),
|
||||
sink(),
|
||||
);
|
||||
sleep(Duration::from_millis(900));
|
||||
|
||||
let mut addr: SocketAddr = "0.0.0.0:9900".parse().expect("bind to drone socket");
|
||||
addr.set_ip(get_ip_addr().expect("drone get_ip_addr"));
|
||||
let mut drone = Drone::new(
|
||||
alice.keypair(),
|
||||
addr,
|
||||
leader.data.transactions_addr,
|
||||
leader.data.requests_addr,
|
||||
None,
|
||||
Some(5_000_050),
|
||||
);
|
||||
|
||||
let bob_req = DroneRequest::GetAirdrop {
|
||||
airdrop_request_amount: 50,
|
||||
client_public_key: bob_pubkey,
|
||||
};
|
||||
let bob_result = drone.send_airdrop(bob_req).expect("send airdrop test");
|
||||
assert!(bob_result > 0);
|
||||
|
||||
let carlos_req = DroneRequest::GetAirdrop {
|
||||
airdrop_request_amount: 5_000_000,
|
||||
client_public_key: carlos_pubkey,
|
||||
};
|
||||
let carlos_result = drone.send_airdrop(carlos_req).expect("send airdrop test");
|
||||
assert!(carlos_result > 0);
|
||||
|
||||
let requests_socket = UdpSocket::bind("0.0.0.0:0").expect("drone bind to requests socket");
|
||||
let transactions_socket =
|
||||
UdpSocket::bind("0.0.0.0:0").expect("drone bind to transactions socket");
|
||||
|
||||
let mut client = ThinClient::new(
|
||||
leader.data.requests_addr,
|
||||
requests_socket,
|
||||
leader.data.transactions_addr,
|
||||
transactions_socket,
|
||||
);
|
||||
|
||||
let bob_balance = client.poll_get_balance(&bob_pubkey);
|
||||
info!("Small request balance: {:?}", bob_balance);
|
||||
assert_eq!(bob_balance.unwrap(), SMALL_BATCH);
|
||||
|
||||
let carlos_balance = client.poll_get_balance(&carlos_pubkey);
|
||||
info!("TPS request balance: {:?}", carlos_balance);
|
||||
assert_eq!(carlos_balance.unwrap(), TPS_BATCH);
|
||||
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
for t in server.thread_hdls {
|
||||
t.join().unwrap();
|
||||
}
|
||||
}
|
||||
}
|
55
src/entry.rs
55
src/entry.rs
@@ -2,25 +2,38 @@
|
||||
//! unique ID that is the hash of the Entry before it, plus the hash of the
|
||||
//! transactions within it. Entries cannot be reordered, and its field `num_hashes`
|
||||
//! represents an approximate amount of time since the last Entry was created.
|
||||
use bincode::serialized_size;
|
||||
use hash::{extend_and_hash, hash, Hash};
|
||||
use packet::BLOB_DATA_SIZE;
|
||||
use rayon::prelude::*;
|
||||
use transaction::Transaction;
|
||||
|
||||
/// Each Entry contains three pieces of data. The `num_hashes` field is the number
|
||||
/// of hashes performed since the previous entry. The `id` field is the result
|
||||
/// of hashing `id` from the previous entry `num_hashes` times. The `transactions`
|
||||
/// field points to Events that took place shortly after `id` was generated.
|
||||
/// field points to Transactions that took place shortly before `id` was generated.
|
||||
///
|
||||
/// If you divide `num_hashes` by the amount of time it takes to generate a new hash, you
|
||||
/// get a duration estimate since the last Entry. Since processing power increases
|
||||
/// over time, one should expect the duration `num_hashes` represents to decrease proportionally.
|
||||
/// Though processing power varies across nodes, the network gives priority to the
|
||||
/// fastest processor. Duration should therefore be estimated by assuming that the hash
|
||||
/// was generated by the fastest processor at the time the entry was recorded.
|
||||
/// An upper bound on Duration can be estimated by assuming each hash was generated by the
|
||||
/// world's fastest processor at the time the entry was recorded. Or said another way, it
|
||||
/// is physically not possible for a shorter duration to have occurred if one assumes the
|
||||
/// hash was computed by the world's fastest processor at that time. The hash chain is both
|
||||
/// a Verifiable Delay Function (VDF) and a Proof of Work (not to be confused with Proof or
|
||||
/// Work consensus!)
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub struct Entry {
|
||||
/// The number of hashes since the previous Entry ID.
|
||||
pub num_hashes: u64,
|
||||
|
||||
/// The SHA-256 hash `num_hashes` after the previous Entry ID.
|
||||
pub id: Hash,
|
||||
|
||||
/// An unordered list of transactions that were observed before the Entry ID was
|
||||
/// generated. The may have been observed before a previous Entry ID but were
|
||||
/// pushed back into this list to ensure deterministic interpretation of the ledger.
|
||||
pub transactions: Vec<Transaction>,
|
||||
}
|
||||
|
||||
@@ -29,11 +42,13 @@ impl Entry {
|
||||
pub fn new(start_hash: &Hash, cur_hashes: u64, transactions: Vec<Transaction>) -> Self {
|
||||
let num_hashes = cur_hashes + if transactions.is_empty() { 0 } else { 1 };
|
||||
let id = next_hash(start_hash, 0, &transactions);
|
||||
Entry {
|
||||
let entry = Entry {
|
||||
num_hashes,
|
||||
id,
|
||||
transactions,
|
||||
}
|
||||
};
|
||||
assert!(serialized_size(&entry).unwrap() <= BLOB_DATA_SIZE as u64);
|
||||
entry
|
||||
}
|
||||
|
||||
/// Creates the next Tick Entry `num_hashes` after `start_hash`.
|
||||
@@ -45,6 +60,7 @@ impl Entry {
|
||||
let entry = Self::new(start_hash, *cur_hashes, transactions);
|
||||
*start_hash = entry.id;
|
||||
*cur_hashes = 0;
|
||||
assert!(serialized_size(&entry).unwrap() <= BLOB_DATA_SIZE as u64);
|
||||
entry
|
||||
}
|
||||
|
||||
@@ -73,8 +89,9 @@ fn add_transaction_data(hash_data: &mut Vec<u8>, tx: &Transaction) {
|
||||
|
||||
/// Creates the hash `num_hashes` after `start_hash`. If the transaction contains
|
||||
/// a signature, the final hash will be a hash of both the previous ID and
|
||||
/// the signature.
|
||||
pub fn next_hash(start_hash: &Hash, num_hashes: u64, transactions: &[Transaction]) -> Hash {
|
||||
/// the signature. If num_hashes is zero and there's no transaction data,
|
||||
/// start_hash is returned.
|
||||
fn next_hash(start_hash: &Hash, num_hashes: u64, transactions: &[Transaction]) -> Hash {
|
||||
let mut id = *start_hash;
|
||||
for _ in 1..num_hashes {
|
||||
id = hash(&id);
|
||||
@@ -95,8 +112,9 @@ pub fn next_hash(start_hash: &Hash, num_hashes: u64, transactions: &[Transaction
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates the next Tick or Event Entry `num_hashes` after `start_hash`.
|
||||
/// Creates the next Tick or Transaction Entry `num_hashes` after `start_hash`.
|
||||
pub fn next_entry(start_hash: &Hash, num_hashes: u64, transactions: Vec<Transaction>) -> Entry {
|
||||
assert!(num_hashes > 0 || transactions.len() == 0);
|
||||
Entry {
|
||||
num_hashes,
|
||||
id: next_hash(start_hash, num_hashes, &transactions),
|
||||
@@ -163,5 +181,24 @@ mod tests {
|
||||
let tick = next_entry(&zero, 1, vec![]);
|
||||
assert_eq!(tick.num_hashes, 1);
|
||||
assert_ne!(tick.id, zero);
|
||||
|
||||
let tick = next_entry(&zero, 0, vec![]);
|
||||
assert_eq!(tick.num_hashes, 0);
|
||||
assert_eq!(tick.id, zero);
|
||||
|
||||
let keypair = KeyPair::new();
|
||||
let tx0 = Transaction::new_timestamp(&keypair, Utc::now(), zero);
|
||||
let entry0 = next_entry(&zero, 1, vec![tx0.clone()]);
|
||||
assert_eq!(entry0.num_hashes, 1);
|
||||
assert_eq!(entry0.id, next_hash(&zero, 1, &vec![tx0]));
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_next_entry_panic() {
|
||||
let zero = Hash::default();
|
||||
let keypair = KeyPair::new();
|
||||
let tx = Transaction::new(&keypair, keypair.pubkey(), 0, zero);
|
||||
next_entry(&zero, 0, vec![tx]);
|
||||
}
|
||||
}
|
||||
|
@@ -1,4 +1,6 @@
|
||||
//! The `entry_writer` module helps implement the TPU's write stage.
|
||||
//! The `entry_writer` module helps implement the TPU's write stage. It
|
||||
//! writes entries to the given writer, which is typically a file or
|
||||
//! stdout, and then sends the Entry to its output channel.
|
||||
|
||||
use bank::Bank;
|
||||
use entry::Entry;
|
||||
|
516
src/erasure.rs
516
src/erasure.rs
@@ -1,17 +1,18 @@
|
||||
// Support erasure coding
|
||||
|
||||
use packet::{BlobRecycler, SharedBlob};
|
||||
use packet::{BlobRecycler, SharedBlob, BLOB_HEADER_SIZE};
|
||||
use std::result;
|
||||
|
||||
//TODO(sakridge) pick these values
|
||||
const NUM_CODED: usize = 10;
|
||||
const MAX_MISSING: usize = 2;
|
||||
pub const NUM_CODED: usize = 20;
|
||||
pub const MAX_MISSING: usize = 4;
|
||||
const NUM_DATA: usize = NUM_CODED - MAX_MISSING;
|
||||
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub enum ErasureError {
|
||||
NotEnoughBlocksToDecode,
|
||||
DecodeError,
|
||||
EncodeError,
|
||||
InvalidBlockSize,
|
||||
}
|
||||
|
||||
@@ -73,12 +74,22 @@ pub fn generate_coding_blocks(coding: &mut [&mut [u8]], data: &[&[u8]]) -> Resul
|
||||
let mut data_arg = Vec::new();
|
||||
for block in data {
|
||||
if block_len != block.len() {
|
||||
trace!(
|
||||
"data block size incorrect {} expected {}",
|
||||
block.len(),
|
||||
block_len
|
||||
);
|
||||
return Err(ErasureError::InvalidBlockSize);
|
||||
}
|
||||
data_arg.push(block.as_ptr());
|
||||
}
|
||||
for mut block in coding {
|
||||
if block_len != block.len() {
|
||||
trace!(
|
||||
"coding block size incorrect {} expected {}",
|
||||
block.len(),
|
||||
block_len
|
||||
);
|
||||
return Err(ErasureError::InvalidBlockSize);
|
||||
}
|
||||
coding_arg.push(block.as_mut_ptr());
|
||||
@@ -150,59 +161,128 @@ pub fn decode_blocks(data: &mut [&mut [u8]], coding: &[&[u8]], erasures: &[i32])
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Generate coding blocks in window from consumed to consumed+NUM_DATA
|
||||
// Allocate some coding blobs and insert into the blobs array
|
||||
pub fn add_coding_blobs(recycler: &BlobRecycler, blobs: &mut Vec<SharedBlob>, consumed: u64) {
|
||||
let mut added = 0;
|
||||
let blobs_len = blobs.len() as u64;
|
||||
for i in consumed..consumed + blobs_len {
|
||||
let is = i as usize;
|
||||
if is != 0 && ((is + MAX_MISSING) % NUM_CODED) == 0 {
|
||||
for _ in 0..MAX_MISSING {
|
||||
trace!("putting coding at {}", (i - consumed));
|
||||
let new_blob = recycler.allocate();
|
||||
let new_blob_clone = new_blob.clone();
|
||||
let mut new_blob_l = new_blob_clone.write().unwrap();
|
||||
new_blob_l.set_size(0);
|
||||
new_blob_l.set_coding().unwrap();
|
||||
drop(new_blob_l);
|
||||
blobs.insert((i - consumed) as usize, new_blob);
|
||||
added += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
info!(
|
||||
"add_coding consumed: {} blobs.len(): {} added: {}",
|
||||
consumed,
|
||||
blobs.len(),
|
||||
added
|
||||
);
|
||||
}
|
||||
|
||||
// Generate coding blocks in window starting from consumed
|
||||
pub fn generate_coding(
|
||||
re: &BlobRecycler,
|
||||
window: &mut Vec<SharedBlob>,
|
||||
window: &mut Vec<Option<SharedBlob>>,
|
||||
consumed: usize,
|
||||
num_blobs: usize,
|
||||
) -> Result<()> {
|
||||
let mut data_blobs = Vec::new();
|
||||
let mut coding_blobs = Vec::new();
|
||||
let mut data_locks = Vec::new();
|
||||
let mut data_ptrs: Vec<&[u8]> = Vec::new();
|
||||
let mut coding_locks = Vec::new();
|
||||
let mut coding_ptrs: Vec<&mut [u8]> = Vec::new();
|
||||
for i in consumed..consumed + NUM_DATA {
|
||||
let n = i % window.len();
|
||||
data_blobs.push(
|
||||
window[n]
|
||||
.clone()
|
||||
.expect("'data_blobs' arr in pub fn generate_coding"),
|
||||
);
|
||||
}
|
||||
for b in &data_blobs {
|
||||
data_locks.push(b.write().expect("'b' write lock in pub fn generate_coding"));
|
||||
}
|
||||
for (i, l) in data_locks.iter_mut().enumerate() {
|
||||
trace!("i: {} data: {}", i, l.data[0]);
|
||||
data_ptrs.push(&l.data);
|
||||
}
|
||||
let mut block_start = consumed - (consumed % NUM_CODED);
|
||||
|
||||
// generate coding ptr array
|
||||
let coding_start = consumed + NUM_DATA;
|
||||
let coding_end = consumed + NUM_CODED;
|
||||
for i in coding_start..coding_end {
|
||||
let n = i % window.len();
|
||||
window[n] = re.allocate();
|
||||
coding_blobs.push(
|
||||
window[n]
|
||||
.clone()
|
||||
.expect("'coding_blobs' arr in pub fn generate_coding"),
|
||||
);
|
||||
}
|
||||
for b in &coding_blobs {
|
||||
coding_locks.push(
|
||||
b.write()
|
||||
.expect("'coding_locks' arr in pub fn generate_coding"),
|
||||
);
|
||||
}
|
||||
for (i, l) in coding_locks.iter_mut().enumerate() {
|
||||
trace!("i: {} data: {}", i, l.data[0]);
|
||||
coding_ptrs.push(&mut l.data);
|
||||
}
|
||||
for i in consumed..consumed + num_blobs {
|
||||
if (i % NUM_CODED) == (NUM_CODED - 1) {
|
||||
let mut data_blobs = Vec::new();
|
||||
let mut coding_blobs = Vec::new();
|
||||
let mut data_locks = Vec::new();
|
||||
let mut data_ptrs: Vec<&[u8]> = Vec::new();
|
||||
let mut coding_locks = Vec::new();
|
||||
let mut coding_ptrs: Vec<&mut [u8]> = Vec::new();
|
||||
|
||||
generate_coding_blocks(coding_ptrs.as_mut_slice(), &data_ptrs)?;
|
||||
trace!("consumed: {}", consumed);
|
||||
info!(
|
||||
"generate_coding start: {} end: {} consumed: {} num_blobs: {}",
|
||||
block_start,
|
||||
block_start + NUM_DATA,
|
||||
consumed,
|
||||
num_blobs
|
||||
);
|
||||
for i in block_start..block_start + NUM_DATA {
|
||||
let n = i % window.len();
|
||||
trace!("window[{}] = {:?}", n, window[n]);
|
||||
if window[n].is_none() {
|
||||
trace!("data block is null @ {}", n);
|
||||
return Ok(());
|
||||
}
|
||||
data_blobs.push(
|
||||
window[n]
|
||||
.clone()
|
||||
.expect("'data_blobs' arr in pub fn generate_coding"),
|
||||
);
|
||||
}
|
||||
let mut max_data_size = 0;
|
||||
for b in &data_blobs {
|
||||
let lck = b.write().expect("'b' write lock in pub fn generate_coding");
|
||||
if lck.meta.size > max_data_size {
|
||||
max_data_size = lck.meta.size;
|
||||
}
|
||||
data_locks.push(lck);
|
||||
}
|
||||
trace!("max_data_size: {}", max_data_size);
|
||||
for (i, l) in data_locks.iter_mut().enumerate() {
|
||||
trace!("i: {} data: {}", i, l.data[0]);
|
||||
data_ptrs.push(&l.data[..max_data_size]);
|
||||
}
|
||||
|
||||
// generate coding ptr array
|
||||
let coding_start = block_start + NUM_DATA;
|
||||
let coding_end = block_start + NUM_CODED;
|
||||
for i in coding_start..coding_end {
|
||||
let n = i % window.len();
|
||||
if window[n].is_none() {
|
||||
trace!("coding block is null @ {}", n);
|
||||
return Ok(());
|
||||
}
|
||||
let w_l = window[n].clone().unwrap();
|
||||
w_l.write().unwrap().set_size(max_data_size);
|
||||
if w_l.write().unwrap().set_coding().is_err() {
|
||||
return Err(ErasureError::EncodeError);
|
||||
}
|
||||
coding_blobs.push(
|
||||
window[n]
|
||||
.clone()
|
||||
.expect("'coding_blobs' arr in pub fn generate_coding"),
|
||||
);
|
||||
}
|
||||
for b in &coding_blobs {
|
||||
coding_locks.push(
|
||||
b.write()
|
||||
.expect("'coding_locks' arr in pub fn generate_coding"),
|
||||
);
|
||||
}
|
||||
for (i, l) in coding_locks.iter_mut().enumerate() {
|
||||
trace!("i: {} coding: {} size: {}", i, l.data[0], max_data_size);
|
||||
coding_ptrs.push(&mut l.data_mut()[..max_data_size]);
|
||||
}
|
||||
|
||||
generate_coding_blocks(coding_ptrs.as_mut_slice(), &data_ptrs)?;
|
||||
debug!(
|
||||
"consumed: {} data: {}:{} coding: {}:{}",
|
||||
consumed,
|
||||
block_start,
|
||||
block_start + NUM_DATA,
|
||||
coding_start,
|
||||
coding_end
|
||||
);
|
||||
block_start += NUM_CODED;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -214,75 +294,142 @@ pub fn recover(
|
||||
re: &BlobRecycler,
|
||||
window: &mut Vec<Option<SharedBlob>>,
|
||||
consumed: usize,
|
||||
received: usize,
|
||||
) -> Result<()> {
|
||||
//recover with erasure coding
|
||||
let mut data_missing = 0;
|
||||
let mut coded_missing = 0;
|
||||
let coding_start = consumed + NUM_DATA;
|
||||
let coding_end = consumed + NUM_CODED;
|
||||
for i in consumed..coding_end {
|
||||
let n = i % window.len();
|
||||
if window[n].is_none() {
|
||||
if i >= coding_start {
|
||||
coded_missing += 1;
|
||||
} else {
|
||||
data_missing += 1;
|
||||
}
|
||||
}
|
||||
if received <= consumed {
|
||||
return Ok(());
|
||||
}
|
||||
trace!("missing: data: {} coding: {}", data_missing, coded_missing);
|
||||
if data_missing > 0 {
|
||||
if (data_missing + coded_missing) <= MAX_MISSING {
|
||||
let mut blobs: Vec<SharedBlob> = Vec::new();
|
||||
let mut locks = Vec::new();
|
||||
let mut data_ptrs: Vec<&mut [u8]> = Vec::new();
|
||||
let mut coding_ptrs: Vec<&[u8]> = Vec::new();
|
||||
let mut erasures: Vec<i32> = Vec::new();
|
||||
for i in consumed..coding_end {
|
||||
let j = i % window.len();
|
||||
let mut b = &mut window[j];
|
||||
if b.is_some() {
|
||||
blobs.push(b.clone().expect("'blobs' arr in pb fn recover"));
|
||||
continue;
|
||||
}
|
||||
let n = re.allocate();
|
||||
*b = Some(n.clone());
|
||||
//mark the missing memory
|
||||
blobs.push(n);
|
||||
erasures.push((i - consumed) as i32);
|
||||
}
|
||||
erasures.push(-1);
|
||||
trace!("erasures: {:?}", erasures);
|
||||
//lock everything
|
||||
for b in &blobs {
|
||||
locks.push(b.write().expect("'locks' arr in pb fn recover"));
|
||||
}
|
||||
for (i, l) in locks.iter_mut().enumerate() {
|
||||
if i >= NUM_DATA {
|
||||
trace!("pushing coding: {}", i);
|
||||
coding_ptrs.push(&l.data);
|
||||
} else {
|
||||
trace!("pushing data: {}", i);
|
||||
data_ptrs.push(&mut l.data);
|
||||
}
|
||||
}
|
||||
trace!(
|
||||
"coding_ptrs.len: {} data_ptrs.len {}",
|
||||
coding_ptrs.len(),
|
||||
data_ptrs.len()
|
||||
);
|
||||
decode_blocks(data_ptrs.as_mut_slice(), &coding_ptrs, &erasures)?;
|
||||
} else {
|
||||
return Err(ErasureError::NotEnoughBlocksToDecode);
|
||||
let num_blocks = (received - consumed) / NUM_CODED;
|
||||
let mut block_start = consumed - (consumed % NUM_CODED);
|
||||
|
||||
if num_blocks > 0 {
|
||||
debug!(
|
||||
"num_blocks: {} received: {} consumed: {}",
|
||||
num_blocks, received, consumed
|
||||
);
|
||||
}
|
||||
|
||||
for i in 0..num_blocks {
|
||||
if i > 100 {
|
||||
break;
|
||||
}
|
||||
let mut data_missing = 0;
|
||||
let mut coded_missing = 0;
|
||||
let coding_start = block_start + NUM_DATA;
|
||||
let coding_end = block_start + NUM_CODED;
|
||||
trace!(
|
||||
"recover: block_start: {} coding_start: {} coding_end: {}",
|
||||
block_start,
|
||||
coding_start,
|
||||
coding_end
|
||||
);
|
||||
for i in block_start..coding_end {
|
||||
let n = i % window.len();
|
||||
if window[n].is_none() {
|
||||
if i >= coding_start {
|
||||
coded_missing += 1;
|
||||
} else {
|
||||
data_missing += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (data_missing + coded_missing) != NUM_CODED && (data_missing + coded_missing) != 0 {
|
||||
debug!(
|
||||
"1: start: {} recovering: data: {} coding: {}",
|
||||
block_start, data_missing, coded_missing
|
||||
);
|
||||
}
|
||||
if data_missing > 0 {
|
||||
if (data_missing + coded_missing) <= MAX_MISSING {
|
||||
debug!(
|
||||
"2: recovering: data: {} coding: {}",
|
||||
data_missing, coded_missing
|
||||
);
|
||||
let mut blobs: Vec<SharedBlob> = Vec::new();
|
||||
let mut locks = Vec::new();
|
||||
let mut erasures: Vec<i32> = Vec::new();
|
||||
let mut meta = None;
|
||||
let mut size = None;
|
||||
for i in block_start..coding_end {
|
||||
let j = i % window.len();
|
||||
let mut b = &mut window[j];
|
||||
if b.is_some() {
|
||||
if i >= NUM_DATA && size.is_none() {
|
||||
let bl = b.clone().unwrap();
|
||||
size = Some(bl.read().unwrap().meta.size - BLOB_HEADER_SIZE);
|
||||
}
|
||||
if meta.is_none() {
|
||||
let bl = b.clone().unwrap();
|
||||
meta = Some(bl.read().unwrap().meta.clone());
|
||||
}
|
||||
blobs.push(b.clone().expect("'blobs' arr in pb fn recover"));
|
||||
continue;
|
||||
}
|
||||
let n = re.allocate();
|
||||
*b = Some(n.clone());
|
||||
//mark the missing memory
|
||||
blobs.push(n);
|
||||
erasures.push((i - block_start) as i32);
|
||||
}
|
||||
erasures.push(-1);
|
||||
trace!(
|
||||
"erasures: {:?} data_size: {} header_size: {}",
|
||||
erasures,
|
||||
size.unwrap(),
|
||||
BLOB_HEADER_SIZE
|
||||
);
|
||||
//lock everything
|
||||
for b in &blobs {
|
||||
locks.push(b.write().expect("'locks' arr in pb fn recover"));
|
||||
}
|
||||
{
|
||||
let mut coding_ptrs: Vec<&[u8]> = Vec::new();
|
||||
let mut data_ptrs: Vec<&mut [u8]> = Vec::new();
|
||||
for (i, l) in locks.iter_mut().enumerate() {
|
||||
if i >= NUM_DATA {
|
||||
trace!("pushing coding: {}", i);
|
||||
coding_ptrs.push(&l.data()[..size.unwrap()]);
|
||||
} else {
|
||||
trace!("pushing data: {}", i);
|
||||
data_ptrs.push(&mut l.data[..size.unwrap()]);
|
||||
}
|
||||
}
|
||||
trace!(
|
||||
"coding_ptrs.len: {} data_ptrs.len {}",
|
||||
coding_ptrs.len(),
|
||||
data_ptrs.len()
|
||||
);
|
||||
decode_blocks(data_ptrs.as_mut_slice(), &coding_ptrs, &erasures)?;
|
||||
}
|
||||
for i in &erasures[..erasures.len() - 1] {
|
||||
let idx = *i as usize;
|
||||
let data_size = locks[idx].get_data_size().unwrap() - BLOB_HEADER_SIZE as u64;
|
||||
locks[idx].meta = meta.clone().unwrap();
|
||||
locks[idx].set_size(data_size as usize);
|
||||
trace!(
|
||||
"erasures[{}] size: {} data[0]: {}",
|
||||
*i,
|
||||
data_size,
|
||||
locks[idx].data()[0]
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
block_start += NUM_CODED;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use crdt;
|
||||
use erasure;
|
||||
use packet::{BlobRecycler, SharedBlob, PACKET_DATA_SIZE};
|
||||
use logger;
|
||||
use packet::{BlobRecycler, SharedBlob, BLOB_HEADER_SIZE};
|
||||
use signature::KeyPair;
|
||||
use signature::KeyPairUtil;
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
||||
#[test]
|
||||
pub fn test_coding() {
|
||||
@@ -338,10 +485,15 @@ mod test {
|
||||
for (i, w) in window.iter().enumerate() {
|
||||
print!("window({}): ", i);
|
||||
if w.is_some() {
|
||||
let window_lock = w.clone().unwrap();
|
||||
let window_data = window_lock.read().unwrap().data;
|
||||
let window_l1 = w.clone().unwrap();
|
||||
let window_l2 = window_l1.read().unwrap();
|
||||
print!(
|
||||
"index: {:?} meta.size: {} data: ",
|
||||
window_l2.get_index(),
|
||||
window_l2.meta.size
|
||||
);
|
||||
for i in 0..8 {
|
||||
print!("{} ", window_data[i]);
|
||||
print!("{} ", window_l2.data()[i]);
|
||||
}
|
||||
} else {
|
||||
print!("null");
|
||||
@@ -350,45 +502,102 @@ mod test {
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_window_recover() {
|
||||
let mut window = Vec::new();
|
||||
let blob_recycler = BlobRecycler::default();
|
||||
let offset = 4;
|
||||
for i in 0..(4 * erasure::NUM_CODED + 1) {
|
||||
fn generate_window(
|
||||
data_len: usize,
|
||||
blob_recycler: &BlobRecycler,
|
||||
offset: usize,
|
||||
num_blobs: usize,
|
||||
) -> (Vec<Option<SharedBlob>>, usize) {
|
||||
let mut window = vec![None; 32];
|
||||
let mut blobs = Vec::new();
|
||||
for i in 0..num_blobs {
|
||||
let b = blob_recycler.allocate();
|
||||
let b_ = b.clone();
|
||||
let data_len = b.read().unwrap().data.len();
|
||||
let mut w = b.write().unwrap();
|
||||
w.set_index(i as u64).unwrap();
|
||||
assert_eq!(i as u64, w.get_index().unwrap());
|
||||
w.meta.size = PACKET_DATA_SIZE;
|
||||
w.set_size(data_len);
|
||||
for k in 0..data_len {
|
||||
w.data[k] = (k + i) as u8;
|
||||
w.data_mut()[k] = (k + i) as u8;
|
||||
}
|
||||
window.push(Some(b_));
|
||||
blobs.push(b_);
|
||||
}
|
||||
erasure::add_coding_blobs(blob_recycler, &mut blobs, offset as u64);
|
||||
let blobs_len = blobs.len();
|
||||
|
||||
let d = crdt::ReplicatedData::new(
|
||||
KeyPair::new().pubkey(),
|
||||
"127.0.0.1:1234".parse().unwrap(),
|
||||
"127.0.0.1:1235".parse().unwrap(),
|
||||
"127.0.0.1:1236".parse().unwrap(),
|
||||
"127.0.0.1:1237".parse().unwrap(),
|
||||
"127.0.0.1:1238".parse().unwrap(),
|
||||
);
|
||||
let crdt = Arc::new(RwLock::new(crdt::Crdt::new(d.clone())));
|
||||
|
||||
assert!(crdt::Crdt::index_blobs(&crdt, &blobs, &mut (offset as u64)).is_ok());
|
||||
for b in blobs {
|
||||
let idx = b.read().unwrap().get_index().unwrap() as usize;
|
||||
window[idx] = Some(b);
|
||||
}
|
||||
(window, blobs_len)
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_window_recover_basic() {
|
||||
logger::setup();
|
||||
let data_len = 16;
|
||||
let blob_recycler = BlobRecycler::default();
|
||||
|
||||
// Generate a window
|
||||
let offset = 1;
|
||||
let num_blobs = erasure::NUM_DATA + 2;
|
||||
let (mut window, blobs_len) = generate_window(data_len, &blob_recycler, 0, num_blobs);
|
||||
println!("** after-gen-window:");
|
||||
print_window(&window);
|
||||
|
||||
// Generate the coding blocks
|
||||
assert!(erasure::generate_coding(&mut window, offset, blobs_len).is_ok());
|
||||
println!("** after-gen-coding:");
|
||||
print_window(&window);
|
||||
|
||||
let erase_offset = offset;
|
||||
// Create a hole in the window
|
||||
let refwindow = window[erase_offset].clone();
|
||||
window[erase_offset] = None;
|
||||
|
||||
// Recover it from coding
|
||||
assert!(erasure::recover(&blob_recycler, &mut window, offset, offset + blobs_len).is_ok());
|
||||
println!("** after-recover:");
|
||||
print_window(&window);
|
||||
|
||||
// Check the result
|
||||
let window_l = window[erase_offset].clone().unwrap();
|
||||
let window_l2 = window_l.read().unwrap();
|
||||
let ref_l = refwindow.clone().unwrap();
|
||||
let ref_l2 = ref_l.read().unwrap();
|
||||
assert_eq!(
|
||||
window_l2.data[..(data_len + BLOB_HEADER_SIZE)],
|
||||
ref_l2.data[..(data_len + BLOB_HEADER_SIZE)]
|
||||
);
|
||||
assert_eq!(window_l2.meta.size, ref_l2.meta.size);
|
||||
assert_eq!(window_l2.meta.addr, ref_l2.meta.addr);
|
||||
assert_eq!(window_l2.meta.port, ref_l2.meta.port);
|
||||
assert_eq!(window_l2.meta.v6, ref_l2.meta.v6);
|
||||
assert_eq!(window_l2.get_index().unwrap(), erase_offset as u64);
|
||||
}
|
||||
|
||||
//TODO This needs to be reworked
|
||||
#[test]
|
||||
#[ignore]
|
||||
pub fn test_window_recover() {
|
||||
logger::setup();
|
||||
let blob_recycler = BlobRecycler::default();
|
||||
let offset = 4;
|
||||
let data_len = 16;
|
||||
let num_blobs = erasure::NUM_DATA + 2;
|
||||
let (mut window, blobs_len) = generate_window(data_len, &blob_recycler, offset, num_blobs);
|
||||
println!("** after-gen:");
|
||||
print_window(&window);
|
||||
assert!(erasure::generate_coding(&blob_recycler, &mut window, offset).is_ok());
|
||||
assert!(
|
||||
erasure::generate_coding(&blob_recycler, &mut window, offset + erasure::NUM_CODED)
|
||||
.is_ok()
|
||||
);
|
||||
assert!(
|
||||
erasure::generate_coding(
|
||||
&blob_recycler,
|
||||
&mut window,
|
||||
offset + (2 * erasure::NUM_CODED)
|
||||
).is_ok()
|
||||
);
|
||||
assert!(
|
||||
erasure::generate_coding(
|
||||
&blob_recycler,
|
||||
&mut window,
|
||||
offset + (3 * erasure::NUM_CODED)
|
||||
).is_ok()
|
||||
);
|
||||
assert!(erasure::generate_coding(&mut window, offset, blobs_len).is_ok());
|
||||
println!("** after-coding:");
|
||||
print_window(&window);
|
||||
let refwindow = window[offset + 1].clone();
|
||||
@@ -402,29 +611,14 @@ mod test {
|
||||
window_l0.write().unwrap().data[0] = 55;
|
||||
println!("** after-nulling:");
|
||||
print_window(&window);
|
||||
assert!(erasure::recover(&blob_recycler, &mut window, offset).is_ok());
|
||||
assert!(erasure::recover(&blob_recycler, &mut window, offset + erasure::NUM_CODED).is_ok());
|
||||
assert!(
|
||||
erasure::recover(
|
||||
&blob_recycler,
|
||||
&mut window,
|
||||
offset + (2 * erasure::NUM_CODED)
|
||||
).is_err()
|
||||
);
|
||||
assert!(
|
||||
erasure::recover(
|
||||
&blob_recycler,
|
||||
&mut window,
|
||||
offset + (3 * erasure::NUM_CODED)
|
||||
).is_ok()
|
||||
);
|
||||
assert!(erasure::recover(&blob_recycler, &mut window, offset, offset + blobs_len).is_ok());
|
||||
println!("** after-restore:");
|
||||
print_window(&window);
|
||||
let window_l = window[offset + 1].clone().unwrap();
|
||||
let ref_l = refwindow.clone().unwrap();
|
||||
assert_eq!(
|
||||
window_l.read().unwrap().data.to_vec(),
|
||||
ref_l.read().unwrap().data.to_vec()
|
||||
window_l.read().unwrap().data()[..data_len],
|
||||
ref_l.read().unwrap().data()[..data_len]
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@@ -10,7 +10,7 @@ use streamer;
|
||||
|
||||
pub struct FetchStage {
|
||||
pub packet_receiver: streamer::PacketReceiver,
|
||||
pub thread_hdl: JoinHandle<()>,
|
||||
pub thread_hdls: Vec<JoinHandle<()>>,
|
||||
}
|
||||
|
||||
impl FetchStage {
|
||||
@@ -18,14 +18,30 @@ impl FetchStage {
|
||||
socket: UdpSocket,
|
||||
exit: Arc<AtomicBool>,
|
||||
packet_recycler: packet::PacketRecycler,
|
||||
) -> Self {
|
||||
Self::new_multi_socket(vec![socket], exit, packet_recycler)
|
||||
}
|
||||
pub fn new_multi_socket(
|
||||
sockets: Vec<UdpSocket>,
|
||||
exit: Arc<AtomicBool>,
|
||||
packet_recycler: packet::PacketRecycler,
|
||||
) -> Self {
|
||||
let (packet_sender, packet_receiver) = channel();
|
||||
let thread_hdl =
|
||||
streamer::receiver(socket, exit.clone(), packet_recycler.clone(), packet_sender);
|
||||
let thread_hdls: Vec<_> = sockets
|
||||
.into_iter()
|
||||
.map(|socket| {
|
||||
streamer::receiver(
|
||||
socket,
|
||||
exit.clone(),
|
||||
packet_recycler.clone(),
|
||||
packet_sender.clone(),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
FetchStage {
|
||||
packet_receiver,
|
||||
thread_hdl,
|
||||
thread_hdls,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -10,7 +10,10 @@ pub type Hash = GenericArray<u8, U32>;
|
||||
pub fn hash(val: &[u8]) -> Hash {
|
||||
let mut hasher = Sha256::default();
|
||||
hasher.input(val);
|
||||
hasher.result()
|
||||
|
||||
// At the time of this writing, the sha2 library is stuck on an old version
|
||||
// of generic_array (0.9.0). Decouple ourselves with a clone to our version.
|
||||
GenericArray::clone_from_slice(hasher.result().as_slice())
|
||||
}
|
||||
|
||||
/// Return the hash of the given hash extended with the given value.
|
||||
|
248
src/ledger.rs
248
src/ledger.rs
@@ -1,18 +1,17 @@
|
||||
//! The `ledger` module provides functions for parallel verification of the
|
||||
//! Proof of History ledger.
|
||||
|
||||
use bincode::{deserialize, serialize_into};
|
||||
use entry::{next_entry, Entry};
|
||||
use bincode::{self, deserialize, serialize_into, serialized_size};
|
||||
use entry::Entry;
|
||||
use hash::Hash;
|
||||
use packet;
|
||||
use packet::{SharedBlob, BLOB_DATA_SIZE, BLOB_SIZE};
|
||||
use packet::{self, SharedBlob, BLOB_DATA_SIZE, BLOB_SIZE};
|
||||
use rayon::prelude::*;
|
||||
use std::cmp::min;
|
||||
use std::collections::VecDeque;
|
||||
use std::io::Cursor;
|
||||
use std::mem::size_of;
|
||||
use transaction::Transaction;
|
||||
|
||||
// a Block is a slice of Entries
|
||||
|
||||
pub trait Block {
|
||||
/// Verifies the hashes and counts of a slice of transactions are all consistent.
|
||||
fn verify(&self, start_hash: &Hash) -> bool;
|
||||
@@ -27,112 +26,115 @@ impl Block for [Entry] {
|
||||
}
|
||||
|
||||
fn to_blobs(&self, blob_recycler: &packet::BlobRecycler, q: &mut VecDeque<SharedBlob>) {
|
||||
let mut start = 0;
|
||||
let mut end = 0;
|
||||
while start < self.len() {
|
||||
let mut entries: Vec<Vec<Entry>> = Vec::new();
|
||||
let mut total = 0;
|
||||
for i in &self[start..] {
|
||||
total += size_of::<Transaction>() * i.transactions.len();
|
||||
total += size_of::<Entry>();
|
||||
if total >= BLOB_DATA_SIZE {
|
||||
break;
|
||||
}
|
||||
end += 1;
|
||||
}
|
||||
// See if we need to split the transactions
|
||||
if end <= start {
|
||||
let mut transaction_start = 0;
|
||||
let num_transactions_per_blob = BLOB_DATA_SIZE / size_of::<Transaction>();
|
||||
let total_entry_chunks = (self[end].transactions.len() + num_transactions_per_blob
|
||||
- 1) / num_transactions_per_blob;
|
||||
trace!(
|
||||
"splitting transactions end: {} total_chunks: {}",
|
||||
end,
|
||||
total_entry_chunks
|
||||
);
|
||||
for _ in 0..total_entry_chunks {
|
||||
let transaction_end = min(
|
||||
transaction_start + num_transactions_per_blob,
|
||||
self[end].transactions.len(),
|
||||
);
|
||||
let mut entry = Entry {
|
||||
num_hashes: self[end].num_hashes,
|
||||
id: self[end].id,
|
||||
transactions: self[end].transactions[transaction_start..transaction_end]
|
||||
.to_vec(),
|
||||
};
|
||||
entries.push(vec![entry]);
|
||||
transaction_start = transaction_end;
|
||||
}
|
||||
end += 1;
|
||||
} else {
|
||||
entries.push(self[start..end].to_vec());
|
||||
}
|
||||
|
||||
for entry in entries {
|
||||
let b = blob_recycler.allocate();
|
||||
let pos = {
|
||||
let mut bd = b.write().unwrap();
|
||||
let mut out = Cursor::new(bd.data_mut());
|
||||
serialize_into(&mut out, &entry).expect("failed to serialize output");
|
||||
out.position() as usize
|
||||
};
|
||||
assert!(pos < BLOB_SIZE);
|
||||
b.write().unwrap().set_size(pos);
|
||||
q.push_back(b);
|
||||
}
|
||||
start = end;
|
||||
for entry in self {
|
||||
let blob = blob_recycler.allocate();
|
||||
let pos = {
|
||||
let mut bd = blob.write().unwrap();
|
||||
let mut out = Cursor::new(bd.data_mut());
|
||||
serialize_into(&mut out, &entry).expect("failed to serialize output");
|
||||
out.position() as usize
|
||||
};
|
||||
assert!(pos < BLOB_SIZE);
|
||||
blob.write().unwrap().set_size(pos);
|
||||
q.push_back(blob);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a vector of Entries of length `transaction_batches.len()` from `start_hash` hash, `num_hashes`, and `transaction_batches`.
|
||||
pub fn reconstruct_entries_from_blobs(
|
||||
blobs: VecDeque<SharedBlob>,
|
||||
blob_recycler: &packet::BlobRecycler,
|
||||
) -> bincode::Result<Vec<Entry>> {
|
||||
let mut entries: Vec<Entry> = Vec::with_capacity(blobs.len());
|
||||
|
||||
for blob in blobs {
|
||||
let entry = {
|
||||
let msg = blob.read().unwrap();
|
||||
deserialize(&msg.data()[..msg.meta.size])
|
||||
};
|
||||
blob_recycler.recycle(blob);
|
||||
|
||||
match entry {
|
||||
Ok(entry) => entries.push(entry),
|
||||
Err(err) => {
|
||||
trace!("reconstruct_entry_from_blobs: {}", err);
|
||||
return Err(err);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(entries)
|
||||
}
|
||||
|
||||
/// Creates the next entries for given transactions, outputs
|
||||
/// updates start_hash to id of last Entry, sets cur_hashes to 0
|
||||
pub fn next_entries_mut(
|
||||
start_hash: &mut Hash,
|
||||
cur_hashes: &mut u64,
|
||||
transactions: Vec<Transaction>,
|
||||
) -> Vec<Entry> {
|
||||
if transactions.is_empty() {
|
||||
vec![Entry::new_mut(start_hash, cur_hashes, transactions)]
|
||||
} else {
|
||||
let mut chunk_len = transactions.len();
|
||||
|
||||
// check for fit, make sure they can be serialized
|
||||
while serialized_size(&Entry {
|
||||
num_hashes: 0,
|
||||
id: Hash::default(),
|
||||
transactions: transactions[0..chunk_len].to_vec(),
|
||||
}).unwrap() > BLOB_DATA_SIZE as u64
|
||||
{
|
||||
chunk_len /= 2;
|
||||
}
|
||||
|
||||
let mut entries = Vec::with_capacity(transactions.len() / chunk_len + 1);
|
||||
|
||||
for chunk in transactions.chunks(chunk_len) {
|
||||
entries.push(Entry::new_mut(start_hash, cur_hashes, chunk.to_vec()));
|
||||
}
|
||||
entries
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates the next Entries for given transactions
|
||||
pub fn next_entries(
|
||||
start_hash: &Hash,
|
||||
num_hashes: u64,
|
||||
transaction_batches: Vec<Vec<Transaction>>,
|
||||
cur_hashes: u64,
|
||||
transactions: Vec<Transaction>,
|
||||
) -> Vec<Entry> {
|
||||
let mut id = *start_hash;
|
||||
let mut entries = vec![];
|
||||
for transactions in &transaction_batches {
|
||||
let transactions = transactions.clone();
|
||||
let entry = next_entry(&id, num_hashes, transactions);
|
||||
id = entry.id;
|
||||
entries.push(entry);
|
||||
}
|
||||
entries
|
||||
}
|
||||
|
||||
pub fn reconstruct_entries_from_blobs(blobs: &VecDeque<SharedBlob>) -> Vec<Entry> {
|
||||
let mut entries_to_apply: Vec<Entry> = Vec::new();
|
||||
let mut last_id = Hash::default();
|
||||
for msgs in blobs {
|
||||
let blob = msgs.read().unwrap();
|
||||
let entries: Vec<Entry> = deserialize(&blob.data()[..blob.meta.size]).unwrap();
|
||||
for entry in entries {
|
||||
if entry.id == last_id {
|
||||
if let Some(last_entry) = entries_to_apply.last_mut() {
|
||||
last_entry.transactions.extend(entry.transactions);
|
||||
}
|
||||
} else {
|
||||
last_id = entry.id;
|
||||
entries_to_apply.push(entry);
|
||||
}
|
||||
}
|
||||
//TODO respond back to leader with hash of the state
|
||||
}
|
||||
entries_to_apply
|
||||
let mut num_hashes = cur_hashes;
|
||||
next_entries_mut(&mut id, &mut num_hashes, transactions)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use entry::{next_entry, Entry};
|
||||
use hash::hash;
|
||||
use packet::BlobRecycler;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
||||
use transaction::Transaction;
|
||||
|
||||
/// Create a vector of Entries of length `transaction_batches.len()`
|
||||
/// from `start_hash` hash, `num_hashes`, and `transaction_batches`.
|
||||
fn next_entries_batched(
|
||||
start_hash: &Hash,
|
||||
cur_hashes: u64,
|
||||
transaction_batches: Vec<Vec<Transaction>>,
|
||||
) -> Vec<Entry> {
|
||||
let mut id = *start_hash;
|
||||
let mut entries = vec![];
|
||||
let mut num_hashes = cur_hashes;
|
||||
|
||||
for transactions in transaction_batches {
|
||||
let mut entry_batch = next_entries_mut(&mut id, &mut num_hashes, transactions);
|
||||
entries.append(&mut entry_batch);
|
||||
}
|
||||
entries
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verify_slice() {
|
||||
let zero = Hash::default();
|
||||
@@ -140,45 +142,57 @@ mod tests {
|
||||
assert!(vec![][..].verify(&zero)); // base case
|
||||
assert!(vec![Entry::new_tick(0, &zero)][..].verify(&zero)); // singleton case 1
|
||||
assert!(!vec![Entry::new_tick(0, &zero)][..].verify(&one)); // singleton case 2, bad
|
||||
assert!(next_entries(&zero, 0, vec![vec![]; 2])[..].verify(&zero)); // inductive step
|
||||
assert!(next_entries_batched(&zero, 0, vec![vec![]; 2])[..].verify(&zero)); // inductive step
|
||||
|
||||
let mut bad_ticks = next_entries(&zero, 0, vec![vec![]; 2]);
|
||||
let mut bad_ticks = next_entries_batched(&zero, 0, vec![vec![]; 2]);
|
||||
bad_ticks[1].id = one;
|
||||
assert!(!bad_ticks.verify(&zero)); // inductive step, bad
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_entry_to_blobs() {
|
||||
fn test_entries_to_blobs() {
|
||||
let zero = Hash::default();
|
||||
let one = hash(&zero);
|
||||
let keypair = KeyPair::new();
|
||||
let tx0 = Transaction::new(&keypair, keypair.pubkey(), 1, one);
|
||||
let transactions = vec![tx0.clone(); 10000];
|
||||
let e0 = Entry::new(&zero, 0, transactions);
|
||||
let transactions = vec![tx0; 10_000];
|
||||
let entries = next_entries(&zero, 0, transactions);
|
||||
|
||||
let entries = vec![e0.clone(); 1];
|
||||
let blob_recycler = BlobRecycler::default();
|
||||
let mut blob_q = VecDeque::new();
|
||||
entries.to_blobs(&blob_recycler, &mut blob_q);
|
||||
|
||||
assert_eq!(reconstruct_entries_from_blobs(&blob_q), entries);
|
||||
assert_eq!(
|
||||
reconstruct_entries_from_blobs(blob_q, &blob_recycler).unwrap(),
|
||||
entries
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_next_entries() {
|
||||
fn test_bad_blobs_attack() {
|
||||
let blob_recycler = BlobRecycler::default();
|
||||
let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
|
||||
let blobs_q = packet::to_blobs(vec![(0, addr)], &blob_recycler).unwrap(); // <-- attack!
|
||||
assert!(reconstruct_entries_from_blobs(blobs_q, &blob_recycler).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_next_entries_batched() {
|
||||
// this also tests next_entries, ugly, but is an easy way to do vec of vec (batch)
|
||||
let mut id = Hash::default();
|
||||
let next_id = hash(&id);
|
||||
let keypair = KeyPair::new();
|
||||
let tx0 = Transaction::new(&keypair, keypair.pubkey(), 1, next_id);
|
||||
let transactions = vec![tx0.clone(); 5];
|
||||
|
||||
let transactions = vec![tx0; 5];
|
||||
let transaction_batches = vec![transactions.clone(); 5];
|
||||
let entries0 = next_entries(&id, 0, transaction_batches);
|
||||
let entries0 = next_entries_batched(&id, 0, transaction_batches);
|
||||
|
||||
assert_eq!(entries0.len(), 5);
|
||||
|
||||
let mut entries1 = vec![];
|
||||
for _ in 0..5 {
|
||||
let entry = next_entry(&id, 0, transactions.clone());
|
||||
let entry = next_entry(&id, 1, transactions.clone());
|
||||
id = entry.id;
|
||||
entries1.push(entry);
|
||||
}
|
||||
@@ -190,14 +204,30 @@ mod tests {
|
||||
mod bench {
|
||||
extern crate test;
|
||||
use self::test::Bencher;
|
||||
use hash::hash;
|
||||
use ledger::*;
|
||||
use packet::BlobRecycler;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use transaction::Transaction;
|
||||
|
||||
#[bench]
|
||||
fn bench_next_entries(bencher: &mut Bencher) {
|
||||
let start_hash = Hash::default();
|
||||
let entries = next_entries(&start_hash, 10_000, vec![vec![]; 8]);
|
||||
fn bench_block_to_blobs_to_block(bencher: &mut Bencher) {
|
||||
let zero = Hash::default();
|
||||
let one = hash(&zero);
|
||||
let keypair = KeyPair::new();
|
||||
let tx0 = Transaction::new(&keypair, keypair.pubkey(), 1, one);
|
||||
let transactions = vec![tx0; 10];
|
||||
let entries = next_entries(&zero, 1, transactions);
|
||||
|
||||
let blob_recycler = BlobRecycler::default();
|
||||
bencher.iter(|| {
|
||||
assert!(entries.verify(&start_hash));
|
||||
let mut blob_q = VecDeque::new();
|
||||
entries.to_blobs(&blob_recycler, &mut blob_q);
|
||||
assert_eq!(
|
||||
reconstruct_entries_from_blobs(blob_q, &blob_recycler).unwrap(),
|
||||
entries
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
}
|
||||
|
17
src/lib.rs
17
src/lib.rs
@@ -1,9 +1,20 @@
|
||||
//! The `solana` library implements the Solana high-performance blockchain architecture.
|
||||
//! It includes a full Rust implementation of the architecture (see
|
||||
//! [Server](server/struct.Server.html)) as well as hooks to GPU implementations of its most
|
||||
//! paralellizable components (i.e. [SigVerify](sigverify/index.html)). It also includes
|
||||
//! command-line tools to spin up fullnodes and a Rust library
|
||||
//! (see [ThinClient](thin_client/struct.ThinClient.html)) to interact with them.
|
||||
//!
|
||||
|
||||
#![cfg_attr(feature = "unstable", feature(test))]
|
||||
#[macro_use]
|
||||
pub mod counter;
|
||||
pub mod bank;
|
||||
pub mod banking_stage;
|
||||
pub mod blob_fetch_stage;
|
||||
pub mod budget;
|
||||
pub mod crdt;
|
||||
pub mod data_replicator;
|
||||
pub mod drone;
|
||||
pub mod entry;
|
||||
pub mod entry_writer;
|
||||
#[cfg(feature = "erasure")]
|
||||
@@ -13,6 +24,7 @@ pub mod hash;
|
||||
pub mod ledger;
|
||||
pub mod logger;
|
||||
pub mod mint;
|
||||
pub mod ncp;
|
||||
pub mod packet;
|
||||
pub mod payment_plan;
|
||||
pub mod record_stage;
|
||||
@@ -33,6 +45,7 @@ pub mod timing;
|
||||
pub mod tpu;
|
||||
pub mod transaction;
|
||||
pub mod tvu;
|
||||
pub mod window_stage;
|
||||
pub mod write_stage;
|
||||
extern crate bincode;
|
||||
extern crate byteorder;
|
||||
@@ -46,7 +59,7 @@ extern crate ring;
|
||||
extern crate serde;
|
||||
#[macro_use]
|
||||
extern crate serde_derive;
|
||||
extern crate pnet;
|
||||
extern crate pnet_datalink;
|
||||
extern crate serde_json;
|
||||
extern crate sha2;
|
||||
extern crate untrusted;
|
||||
|
@@ -1,3 +1,6 @@
|
||||
//! The `logger` module provides a setup function for `env_logger`. Its only function,
|
||||
//! `setup()` may be called multiple times.
|
||||
|
||||
use std::sync::{Once, ONCE_INIT};
|
||||
extern crate env_logger;
|
||||
|
||||
|
@@ -1,3 +1,5 @@
|
||||
//! The `ncp` module implements the network control plane.
|
||||
|
||||
use crdt;
|
||||
use packet;
|
||||
use result::Result;
|
||||
@@ -8,22 +10,22 @@ use std::sync::{Arc, RwLock};
|
||||
use std::thread::JoinHandle;
|
||||
use streamer;
|
||||
|
||||
pub struct DataReplicator {
|
||||
pub struct Ncp {
|
||||
pub thread_hdls: Vec<JoinHandle<()>>,
|
||||
}
|
||||
|
||||
impl DataReplicator {
|
||||
impl Ncp {
|
||||
pub fn new(
|
||||
crdt: Arc<RwLock<crdt::Crdt>>,
|
||||
window: Arc<RwLock<Vec<Option<packet::SharedBlob>>>>,
|
||||
gossip_listen_socket: UdpSocket,
|
||||
gossip_send_socket: UdpSocket,
|
||||
exit: Arc<AtomicBool>,
|
||||
) -> Result<DataReplicator> {
|
||||
) -> Result<Ncp> {
|
||||
let blob_recycler = packet::BlobRecycler::default();
|
||||
let (request_sender, request_receiver) = channel();
|
||||
trace!(
|
||||
"DataReplicator: id: {:?}, listening on: {:?}",
|
||||
"Ncp: id: {:?}, listening on: {:?}",
|
||||
&crdt.read().unwrap().me[..4],
|
||||
gossip_listen_socket.local_addr().unwrap()
|
||||
);
|
||||
@@ -50,26 +52,29 @@ impl DataReplicator {
|
||||
);
|
||||
let t_gossip = crdt::Crdt::gossip(crdt.clone(), blob_recycler, response_sender, exit);
|
||||
let thread_hdls = vec![t_receiver, t_responder, t_listen, t_gossip];
|
||||
Ok(DataReplicator { thread_hdls })
|
||||
Ok(Ncp { thread_hdls })
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crdt::{Crdt, TestNode};
|
||||
use data_replicator::DataReplicator;
|
||||
use ncp::Ncp;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
// test that stage will exit when flag is set
|
||||
// TODO: Troubleshoot Docker-based coverage build and re-enabled
|
||||
// this test. It is probably failing due to too many threads.
|
||||
fn test_exit() {
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let tn = TestNode::new();
|
||||
let crdt = Crdt::new(tn.data.clone());
|
||||
let c = Arc::new(RwLock::new(crdt));
|
||||
let w = Arc::new(RwLock::new(vec![]));
|
||||
let d = DataReplicator::new(
|
||||
let d = Ncp::new(
|
||||
c.clone(),
|
||||
w,
|
||||
tn.sockets.gossip,
|
@@ -1,6 +1,7 @@
|
||||
//! The `packet` module defines data structures and methods to pull data from the network.
|
||||
use bincode::{deserialize, serialize};
|
||||
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
|
||||
use counter::Counter;
|
||||
use result::{Error, Result};
|
||||
use serde::Serialize;
|
||||
use signature::PublicKey;
|
||||
@@ -9,7 +10,9 @@ use std::fmt;
|
||||
use std::io;
|
||||
use std::mem::size_of;
|
||||
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket};
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::sync::{Arc, Mutex, RwLock};
|
||||
use std::time::Instant;
|
||||
|
||||
pub type SharedPackets = Arc<RwLock<Packets>>;
|
||||
pub type SharedBlob = Arc<RwLock<Blob>>;
|
||||
@@ -18,7 +21,7 @@ pub type BlobRecycler = Recycler<Blob>;
|
||||
|
||||
pub const NUM_PACKETS: usize = 1024 * 8;
|
||||
pub const BLOB_SIZE: usize = 64 * 1024;
|
||||
pub const BLOB_DATA_SIZE: usize = BLOB_SIZE - BLOB_ID_END;
|
||||
pub const BLOB_DATA_SIZE: usize = BLOB_SIZE - BLOB_HEADER_SIZE;
|
||||
pub const PACKET_DATA_SIZE: usize = 256;
|
||||
pub const NUM_BLOBS: usize = (NUM_PACKETS * PACKET_DATA_SIZE) / BLOB_SIZE;
|
||||
|
||||
@@ -26,6 +29,7 @@ pub const NUM_BLOBS: usize = (NUM_PACKETS * PACKET_DATA_SIZE) / BLOB_SIZE;
|
||||
#[repr(C)]
|
||||
pub struct Meta {
|
||||
pub size: usize,
|
||||
pub num_retransmits: u64,
|
||||
pub addr: [u16; 8],
|
||||
pub port: u16,
|
||||
pub v6: bool,
|
||||
@@ -169,6 +173,7 @@ impl<T: Default> Recycler<T> {
|
||||
|
||||
impl Packets {
|
||||
fn run_read_from(&mut self, socket: &UdpSocket) -> Result<usize> {
|
||||
static mut COUNTER: Counter = create_counter!("packets", 10);
|
||||
self.packets.resize(NUM_PACKETS, Packet::default());
|
||||
let mut i = 0;
|
||||
//DOCUMENTED SIDE-EFFECT
|
||||
@@ -178,11 +183,13 @@ impl Packets {
|
||||
// * read until it fails
|
||||
// * set it back to blocking before returning
|
||||
socket.set_nonblocking(false)?;
|
||||
let mut start = Instant::now();
|
||||
for p in &mut self.packets {
|
||||
p.meta.size = 0;
|
||||
trace!("receiving on {}", socket.local_addr().unwrap());
|
||||
match socket.recv_from(&mut p.data) {
|
||||
Err(_) if i > 0 => {
|
||||
inc_counter!(COUNTER, i, start);
|
||||
debug!("got {:?} messages on {}", i, socket.local_addr().unwrap());
|
||||
break;
|
||||
}
|
||||
@@ -194,6 +201,7 @@ impl Packets {
|
||||
p.meta.size = nrecv;
|
||||
p.meta.set_addr(&from);
|
||||
if i == 0 {
|
||||
start = Instant::now();
|
||||
socket.set_nonblocking(true)?;
|
||||
}
|
||||
}
|
||||
@@ -217,9 +225,13 @@ impl Packets {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_packets<T: Serialize>(r: &PacketRecycler, xs: Vec<T>) -> Vec<SharedPackets> {
|
||||
pub fn to_packets_chunked<T: Serialize>(
|
||||
r: &PacketRecycler,
|
||||
xs: Vec<T>,
|
||||
chunks: usize,
|
||||
) -> Vec<SharedPackets> {
|
||||
let mut out = vec![];
|
||||
for x in xs.chunks(NUM_PACKETS) {
|
||||
for x in xs.chunks(chunks) {
|
||||
let p = r.allocate();
|
||||
p.write()
|
||||
.unwrap()
|
||||
@@ -236,6 +248,10 @@ pub fn to_packets<T: Serialize>(r: &PacketRecycler, xs: Vec<T>) -> Vec<SharedPac
|
||||
return out;
|
||||
}
|
||||
|
||||
pub fn to_packets<T: Serialize>(r: &PacketRecycler, xs: Vec<T>) -> Vec<SharedPackets> {
|
||||
to_packets_chunked(r, xs, NUM_PACKETS)
|
||||
}
|
||||
|
||||
pub fn to_blob<T: Serialize>(
|
||||
resp: T,
|
||||
rsp_addr: SocketAddr,
|
||||
@@ -246,10 +262,6 @@ pub fn to_blob<T: Serialize>(
|
||||
let mut b = blob.write().unwrap();
|
||||
let v = serialize(&resp)?;
|
||||
let len = v.len();
|
||||
// TODO: we are not using .data_mut() method here because
|
||||
// the raw bytes are being serialized and sent, this isn't the
|
||||
// right interface, and we should create a separate path for
|
||||
// sending request responses in the RPU
|
||||
assert!(len < BLOB_SIZE);
|
||||
b.data[..len].copy_from_slice(&v);
|
||||
b.meta.size = len;
|
||||
@@ -271,6 +283,17 @@ pub fn to_blobs<T: Serialize>(
|
||||
|
||||
const BLOB_INDEX_END: usize = size_of::<u64>();
|
||||
const BLOB_ID_END: usize = BLOB_INDEX_END + size_of::<usize>() + size_of::<PublicKey>();
|
||||
const BLOB_FLAGS_END: usize = BLOB_ID_END + size_of::<u32>();
|
||||
const BLOB_SIZE_END: usize = BLOB_FLAGS_END + size_of::<u64>();
|
||||
|
||||
macro_rules! align {
|
||||
($x:expr, $align:expr) => {
|
||||
$x + ($align - 1) & !($align - 1)
|
||||
};
|
||||
}
|
||||
|
||||
pub const BLOB_FLAG_IS_CODING: u32 = 0x1;
|
||||
pub const BLOB_HEADER_SIZE: usize = align!(BLOB_SIZE_END, 64);
|
||||
|
||||
impl Blob {
|
||||
pub fn get_index(&self) -> Result<u64> {
|
||||
@@ -297,14 +320,51 @@ impl Blob {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_flags(&self) -> Result<u32> {
|
||||
let mut rdr = io::Cursor::new(&self.data[BLOB_ID_END..BLOB_FLAGS_END]);
|
||||
let r = rdr.read_u32::<LittleEndian>()?;
|
||||
Ok(r)
|
||||
}
|
||||
|
||||
pub fn set_flags(&mut self, ix: u32) -> Result<()> {
|
||||
let mut wtr = vec![];
|
||||
wtr.write_u32::<LittleEndian>(ix)?;
|
||||
self.data[BLOB_ID_END..BLOB_FLAGS_END].clone_from_slice(&wtr);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn is_coding(&self) -> bool {
|
||||
return (self.get_flags().unwrap() & BLOB_FLAG_IS_CODING) != 0;
|
||||
}
|
||||
|
||||
pub fn set_coding(&mut self) -> Result<()> {
|
||||
let flags = self.get_flags().unwrap();
|
||||
self.set_flags(flags | BLOB_FLAG_IS_CODING)
|
||||
}
|
||||
|
||||
pub fn get_data_size(&self) -> Result<u64> {
|
||||
let mut rdr = io::Cursor::new(&self.data[BLOB_FLAGS_END..BLOB_SIZE_END]);
|
||||
let r = rdr.read_u64::<LittleEndian>()?;
|
||||
Ok(r)
|
||||
}
|
||||
|
||||
pub fn set_data_size(&mut self, ix: u64) -> Result<()> {
|
||||
let mut wtr = vec![];
|
||||
wtr.write_u64::<LittleEndian>(ix)?;
|
||||
self.data[BLOB_FLAGS_END..BLOB_SIZE_END].clone_from_slice(&wtr);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn data(&self) -> &[u8] {
|
||||
&self.data[BLOB_ID_END..]
|
||||
&self.data[BLOB_HEADER_SIZE..]
|
||||
}
|
||||
pub fn data_mut(&mut self) -> &mut [u8] {
|
||||
&mut self.data[BLOB_ID_END..]
|
||||
&mut self.data[BLOB_HEADER_SIZE..]
|
||||
}
|
||||
pub fn set_size(&mut self, size: usize) {
|
||||
self.meta.size = size + BLOB_ID_END;
|
||||
let new_size = size + BLOB_HEADER_SIZE;
|
||||
self.meta.size = new_size;
|
||||
self.set_data_size(new_size as u64).unwrap();
|
||||
}
|
||||
pub fn recv_from(re: &BlobRecycler, socket: &UdpSocket) -> Result<VecDeque<SharedBlob>> {
|
||||
let mut v = VecDeque::new();
|
||||
|
@@ -6,18 +6,27 @@
|
||||
use chrono::prelude::*;
|
||||
use signature::PublicKey;
|
||||
|
||||
/// The types of events a payment plan can process.
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub enum Witness {
|
||||
/// The current time.
|
||||
Timestamp(DateTime<Utc>),
|
||||
|
||||
/// A siganture from PublicKey.
|
||||
Signature(PublicKey),
|
||||
}
|
||||
|
||||
/// Some amount of tokens that should be sent to the `to` `PublicKey`.
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub struct Payment {
|
||||
/// Amount to be paid.
|
||||
pub tokens: i64,
|
||||
|
||||
/// The `PublicKey` that `tokens` should be paid to.
|
||||
pub to: PublicKey,
|
||||
}
|
||||
|
||||
/// Interface to smart contracts.
|
||||
pub trait PaymentPlan {
|
||||
/// Return Payment if the payment plan requires no additional Witnesses.
|
||||
fn final_payment(&self) -> Option<Payment>;
|
||||
|
@@ -1,8 +1,8 @@
|
||||
//! The `record_stage` module provides an object for generating a Proof of History.
|
||||
//! It records Event items on behalf of its users. It continuously generates
|
||||
//! new hashes, only stopping to check if it has been sent an Event item. It
|
||||
//! tags each Event with an Entry, and sends it back. The Entry includes the
|
||||
//! Event, the latest hash, and the number of hashes since the last transaction.
|
||||
//! It records Transaction items on behalf of its users. It continuously generates
|
||||
//! new hashes, only stopping to check if it has been sent an Transaction item. It
|
||||
//! tags each Transaction with an Entry, and sends it back. The Entry includes the
|
||||
//! Transaction, the latest hash, and the number of hashes since the last transaction.
|
||||
//! The resulting stream of entries represents ordered transactions in time.
|
||||
|
||||
use entry::Entry;
|
||||
@@ -16,7 +16,7 @@ use transaction::Transaction;
|
||||
#[cfg_attr(feature = "cargo-clippy", allow(large_enum_variant))]
|
||||
pub enum Signal {
|
||||
Tick,
|
||||
Events(Vec<Transaction>),
|
||||
Transactions(Vec<Transaction>),
|
||||
}
|
||||
|
||||
pub struct RecordStage {
|
||||
@@ -25,7 +25,7 @@ pub struct RecordStage {
|
||||
}
|
||||
|
||||
impl RecordStage {
|
||||
/// A background thread that will continue tagging received Event messages and
|
||||
/// A background thread that will continue tagging received Transaction messages and
|
||||
/// sending back Entry messages until either the receiver or sender channel is closed.
|
||||
pub fn new(signal_receiver: Receiver<Signal>, start_hash: &Hash) -> Self {
|
||||
let (entry_sender, entry_receiver) = channel();
|
||||
@@ -85,13 +85,20 @@ impl RecordStage {
|
||||
recorder: &mut Recorder,
|
||||
sender: &Sender<Entry>,
|
||||
) -> Result<(), ()> {
|
||||
let txs = if let Signal::Events(txs) = signal {
|
||||
let txs = if let Signal::Transactions(txs) = signal {
|
||||
txs
|
||||
} else {
|
||||
vec![]
|
||||
};
|
||||
let entry = recorder.record(txs);
|
||||
sender.send(entry).map_err(|_| ())
|
||||
let entries = recorder.record(txs);
|
||||
let mut result = Ok(());
|
||||
for entry in entries {
|
||||
result = sender.send(entry).map_err(|_| ());
|
||||
if result.is_err() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
fn process_signals(
|
||||
@@ -180,7 +187,9 @@ mod tests {
|
||||
let bob_pubkey = KeyPair::new().pubkey();
|
||||
let tx0 = Transaction::new(&alice_keypair, bob_pubkey, 1, zero);
|
||||
let tx1 = Transaction::new(&alice_keypair, bob_pubkey, 2, zero);
|
||||
tx_sender.send(Signal::Events(vec![tx0, tx1])).unwrap();
|
||||
tx_sender
|
||||
.send(Signal::Transactions(vec![tx0, tx1]))
|
||||
.unwrap();
|
||||
drop(tx_sender);
|
||||
let entries: Vec<_> = record_stage.entry_receiver.iter().collect();
|
||||
assert_eq!(entries.len(), 1);
|
||||
|
@@ -1,8 +1,9 @@
|
||||
//! The `recorder` module provides an object for generating a Proof of History.
|
||||
//! It records Event items on behalf of its users.
|
||||
//! It records Transaction items on behalf of its users.
|
||||
|
||||
use entry::Entry;
|
||||
use hash::{hash, Hash};
|
||||
use ledger::next_entries_mut;
|
||||
use std::time::{Duration, Instant};
|
||||
use transaction::Transaction;
|
||||
|
||||
@@ -26,15 +27,19 @@ impl Recorder {
|
||||
self.num_hashes += 1;
|
||||
}
|
||||
|
||||
pub fn record(&mut self, transactions: Vec<Transaction>) -> Entry {
|
||||
Entry::new_mut(&mut self.last_hash, &mut self.num_hashes, transactions)
|
||||
pub fn record(&mut self, transactions: Vec<Transaction>) -> Vec<Entry> {
|
||||
next_entries_mut(&mut self.last_hash, &mut self.num_hashes, transactions)
|
||||
}
|
||||
|
||||
pub fn tick(&mut self, start_time: Instant, tick_duration: Duration) -> Option<Entry> {
|
||||
if start_time.elapsed() > tick_duration * (self.num_ticks + 1) {
|
||||
// TODO: don't let this overflow u32
|
||||
self.num_ticks += 1;
|
||||
Some(self.record(vec![]))
|
||||
Some(Entry::new_mut(
|
||||
&mut self.last_hash,
|
||||
&mut self.num_hashes,
|
||||
vec![],
|
||||
))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
|
@@ -15,23 +15,21 @@ pub struct ReplicateStage {
|
||||
}
|
||||
|
||||
impl ReplicateStage {
|
||||
/// Process verified blobs, already in order
|
||||
/// Process entry blobs, already in order
|
||||
fn replicate_requests(
|
||||
bank: &Arc<Bank>,
|
||||
verified_receiver: &streamer::BlobReceiver,
|
||||
blob_receiver: &streamer::BlobReceiver,
|
||||
blob_recycler: &packet::BlobRecycler,
|
||||
) -> Result<()> {
|
||||
let timer = Duration::new(1, 0);
|
||||
let blobs = verified_receiver.recv_timeout(timer)?;
|
||||
let entries = ledger::reconstruct_entries_from_blobs(&blobs);
|
||||
let blobs = blob_receiver.recv_timeout(timer)?;
|
||||
let blobs_len = blobs.len();
|
||||
let entries = ledger::reconstruct_entries_from_blobs(blobs, &blob_recycler)?;
|
||||
let res = bank.process_entries(entries);
|
||||
if res.is_err() {
|
||||
error!("process_entries {} {:?}", blobs.len(), res);
|
||||
error!("process_entries {} {:?}", blobs_len, res);
|
||||
}
|
||||
res?;
|
||||
for blob in blobs {
|
||||
blob_recycler.recycle(blob);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@@ -43,7 +43,7 @@ impl RequestStage {
|
||||
) -> Result<()> {
|
||||
let (batch, batch_len) = streamer::recv_batch(packet_receiver)?;
|
||||
|
||||
info!(
|
||||
debug!(
|
||||
"@{:?} request_stage: processing: {}",
|
||||
timing::timestamp(),
|
||||
batch_len
|
||||
@@ -70,7 +70,7 @@ impl RequestStage {
|
||||
}
|
||||
let total_time_s = timing::duration_as_s(&proc_start.elapsed());
|
||||
let total_time_ms = timing::duration_as_ms(&proc_start.elapsed());
|
||||
info!(
|
||||
debug!(
|
||||
"@{:?} done process batches: {} time: {:?}ms reqs: {} reqs/s: {}",
|
||||
timing::timestamp(),
|
||||
batch_len,
|
||||
|
24
src/rpu.rs
24
src/rpu.rs
@@ -1,5 +1,27 @@
|
||||
//! The `rpu` module implements the Request Processing Unit, a
|
||||
//! 5-stage transaction processing pipeline in software.
|
||||
//! 3-stage transaction processing pipeline in software. It listens
|
||||
//! for `Request` messages from clients and replies with `Response`
|
||||
//! messages.
|
||||
//!
|
||||
//! ```text
|
||||
//! .------.
|
||||
//! | Bank |
|
||||
//! `---+--`
|
||||
//! |
|
||||
//! .------------------|-------------------.
|
||||
//! | RPU | |
|
||||
//! | v |
|
||||
//! .---------. | .-------. .---------. .---------. | .---------.
|
||||
//! | Alice |--->| | | | | +---->| Alice |
|
||||
//! `---------` | | Fetch | | Request | | Respond | | `---------`
|
||||
//! | | Stage |->| Stage |->| Stage | |
|
||||
//! .---------. | | | | | | | | .---------.
|
||||
//! | Bob |--->| | | | | +---->| Bob |
|
||||
//! `---------` | `-------` `---------` `---------` | `---------`
|
||||
//! | |
|
||||
//! | |
|
||||
//! `--------------------------------------`
|
||||
//! ```
|
||||
|
||||
use bank::Bank;
|
||||
use packet;
|
||||
|
122
src/server.rs
122
src/server.rs
@@ -2,7 +2,7 @@
|
||||
|
||||
use bank::Bank;
|
||||
use crdt::{Crdt, ReplicatedData};
|
||||
use data_replicator::DataReplicator;
|
||||
use ncp::Ncp;
|
||||
use packet;
|
||||
use rpu::Rpu;
|
||||
use std::io::Write;
|
||||
@@ -20,6 +20,30 @@ pub struct Server {
|
||||
}
|
||||
|
||||
impl Server {
|
||||
/// Create a server instance acting as a leader.
|
||||
///
|
||||
/// ```text
|
||||
/// .---------------------.
|
||||
/// | Leader |
|
||||
/// | |
|
||||
/// .--------. | .-----. |
|
||||
/// | |---->| | |
|
||||
/// | Client | | | RPU | |
|
||||
/// | |<----| | |
|
||||
/// `----+---` | `-----` |
|
||||
/// | | ^ |
|
||||
/// | | | |
|
||||
/// | | .--+---. |
|
||||
/// | | | Bank | |
|
||||
/// | | `------` |
|
||||
/// | | ^ |
|
||||
/// | | | | .------------.
|
||||
/// | | .--+--. .-----. | | |
|
||||
/// `-------->| TPU +-->| NCP +------>| Validators |
|
||||
/// | `-----` `-----` | | |
|
||||
/// | | `------------`
|
||||
/// `---------------------`
|
||||
/// ```
|
||||
pub fn new_leader<W: Write + Send + 'static>(
|
||||
bank: Bank,
|
||||
tick_duration: Option<Duration>,
|
||||
@@ -51,14 +75,14 @@ impl Server {
|
||||
let crdt = Arc::new(RwLock::new(Crdt::new(me)));
|
||||
let window = streamer::default_window();
|
||||
let gossip_send_socket = UdpSocket::bind("0.0.0.0:0").expect("bind 0");
|
||||
let data_replicator = DataReplicator::new(
|
||||
let ncp = Ncp::new(
|
||||
crdt.clone(),
|
||||
window.clone(),
|
||||
gossip_socket,
|
||||
gossip_send_socket,
|
||||
exit.clone(),
|
||||
).expect("DataReplicator::new");
|
||||
thread_hdls.extend(data_replicator.thread_hdls);
|
||||
).expect("Ncp::new");
|
||||
thread_hdls.extend(ncp.thread_hdls);
|
||||
|
||||
let t_broadcast = streamer::broadcaster(
|
||||
broadcast_socket,
|
||||
@@ -72,29 +96,109 @@ impl Server {
|
||||
|
||||
Server { thread_hdls }
|
||||
}
|
||||
|
||||
/// Create a server instance acting as a validator.
|
||||
///
|
||||
/// ```text
|
||||
/// .-------------------------------.
|
||||
/// | Validator |
|
||||
/// | |
|
||||
/// .--------. | .-----. |
|
||||
/// | |-------------->| | |
|
||||
/// | Client | | | RPU | |
|
||||
/// | |<--------------| | |
|
||||
/// `--------` | `-----` |
|
||||
/// | ^ |
|
||||
/// | | |
|
||||
/// | .--+---. |
|
||||
/// | | Bank | |
|
||||
/// | `------` |
|
||||
/// | ^ |
|
||||
/// .--------. | | | .------------.
|
||||
/// | | | .--+--. | | |
|
||||
/// | Leader |<------------->| TVU +<--------------->| |
|
||||
/// | | | `-----` | | Validators |
|
||||
/// | | | ^ | | |
|
||||
/// | | | | | | |
|
||||
/// | | | .--+--. | | |
|
||||
/// | |<------------->| NCP +<--------------->| |
|
||||
/// | | | `-----` | | |
|
||||
/// `--------` | | `------------`
|
||||
/// `-------------------------------`
|
||||
/// ```
|
||||
pub fn new_validator(
|
||||
bank: Bank,
|
||||
me: ReplicatedData,
|
||||
requests_socket: UdpSocket,
|
||||
respond_socket: UdpSocket,
|
||||
replicate_socket: UdpSocket,
|
||||
gossip_socket: UdpSocket,
|
||||
leader_repl_data: ReplicatedData,
|
||||
gossip_listen_socket: UdpSocket,
|
||||
repair_socket: UdpSocket,
|
||||
entry_point: ReplicatedData,
|
||||
exit: Arc<AtomicBool>,
|
||||
) -> Self {
|
||||
let bank = Arc::new(bank);
|
||||
let mut thread_hdls = vec![];
|
||||
let rpu = Rpu::new(bank.clone(), requests_socket, respond_socket, exit.clone());
|
||||
thread_hdls.extend(rpu.thread_hdls);
|
||||
|
||||
let crdt = Arc::new(RwLock::new(Crdt::new(me)));
|
||||
crdt.write()
|
||||
.expect("'crdt' write lock before insert() in pub fn replicate")
|
||||
.insert(&entry_point);
|
||||
let window = streamer::default_window();
|
||||
let gossip_send_socket = UdpSocket::bind("0.0.0.0:0").expect("bind 0");
|
||||
let retransmit_socket = UdpSocket::bind("0.0.0.0:0").expect("bind 0");
|
||||
let ncp = Ncp::new(
|
||||
crdt.clone(),
|
||||
window.clone(),
|
||||
gossip_listen_socket,
|
||||
gossip_send_socket,
|
||||
exit.clone(),
|
||||
).expect("Ncp::new");
|
||||
|
||||
let tvu = Tvu::new(
|
||||
bank.clone(),
|
||||
me,
|
||||
gossip_socket,
|
||||
crdt.clone(),
|
||||
window.clone(),
|
||||
replicate_socket,
|
||||
leader_repl_data,
|
||||
repair_socket,
|
||||
retransmit_socket,
|
||||
exit.clone(),
|
||||
);
|
||||
thread_hdls.extend(tvu.thread_hdls);
|
||||
thread_hdls.extend(ncp.thread_hdls);
|
||||
Server { thread_hdls }
|
||||
}
|
||||
}
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use bank::Bank;
|
||||
use crdt::TestNode;
|
||||
use mint::Mint;
|
||||
use server::Server;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::Arc;
|
||||
#[test]
|
||||
fn validator_exit() {
|
||||
let tn = TestNode::new();
|
||||
let alice = Mint::new(10_000);
|
||||
let bank = Bank::new(&alice);
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let v = Server::new_validator(
|
||||
bank,
|
||||
tn.data.clone(),
|
||||
tn.sockets.requests,
|
||||
tn.sockets.respond,
|
||||
tn.sockets.replicate,
|
||||
tn.sockets.gossip,
|
||||
tn.sockets.repair,
|
||||
tn.data,
|
||||
exit.clone(),
|
||||
);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
for t in v.thread_hdls {
|
||||
t.join().unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -56,9 +56,8 @@ pub struct GenKeys {
|
||||
}
|
||||
|
||||
impl GenKeys {
|
||||
pub fn new(seed: &[u8]) -> GenKeys {
|
||||
let seed32: Vec<_> = seed.iter().map(|&x| x as u32).collect();
|
||||
let rng = ChaChaRng::from_seed(&seed32);
|
||||
pub fn new(seed: [u8; 32]) -> GenKeys {
|
||||
let rng = ChaChaRng::from_seed(seed);
|
||||
GenKeys {
|
||||
generator: RefCell::new(rng),
|
||||
}
|
||||
@@ -68,7 +67,7 @@ impl GenKeys {
|
||||
KeyPair::generate_pkcs8(self).unwrap().to_vec()
|
||||
}
|
||||
|
||||
pub fn gen_n_seeds(&self, n: i64) -> Vec<[u8; 16]> {
|
||||
pub fn gen_n_seeds(&self, n: i64) -> Vec<[u8; 32]> {
|
||||
let mut rng = self.generator.borrow_mut();
|
||||
(0..n).map(|_| rng.gen()).collect()
|
||||
}
|
||||
@@ -77,7 +76,7 @@ impl GenKeys {
|
||||
self.gen_n_seeds(n)
|
||||
.into_par_iter()
|
||||
.map(|seed| {
|
||||
let pkcs8 = GenKeys::new(&seed).new_key();
|
||||
let pkcs8 = GenKeys::new(seed).new_key();
|
||||
KeyPair::from_pkcs8(untrusted::Input::from(&pkcs8)).unwrap()
|
||||
})
|
||||
.collect()
|
||||
@@ -87,7 +86,7 @@ impl GenKeys {
|
||||
impl SecureRandom for GenKeys {
|
||||
fn fill(&self, dest: &mut [u8]) -> Result<(), Unspecified> {
|
||||
let mut rng = self.generator.borrow_mut();
|
||||
rng.fill_bytes(dest);
|
||||
rng.fill(dest);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -99,17 +98,17 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_new_key_is_deterministic() {
|
||||
let seed = [1, 2, 3, 4];
|
||||
let rng0 = GenKeys::new(&seed);
|
||||
let rng1 = GenKeys::new(&seed);
|
||||
let seed = [0u8; 32];
|
||||
let rng0 = GenKeys::new(seed);
|
||||
let rng1 = GenKeys::new(seed);
|
||||
|
||||
for _ in 0..100 {
|
||||
assert_eq!(rng0.new_key(), rng1.new_key());
|
||||
}
|
||||
}
|
||||
|
||||
fn gen_n_pubkeys(seed: &[u8], n: i64) -> HashSet<PublicKey> {
|
||||
GenKeys::new(&seed)
|
||||
fn gen_n_pubkeys(seed: [u8; 32], n: i64) -> HashSet<PublicKey> {
|
||||
GenKeys::new(seed)
|
||||
.gen_n_keypairs(n)
|
||||
.into_iter()
|
||||
.map(|x| x.pubkey())
|
||||
@@ -118,8 +117,8 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_gen_n_pubkeys_deterministic() {
|
||||
let seed = [1, 2, 3, 4];
|
||||
assert_eq!(gen_n_pubkeys(&seed, 50), gen_n_pubkeys(&seed, 50));
|
||||
let seed = [0u8; 32];
|
||||
assert_eq!(gen_n_pubkeys(seed, 50), gen_n_pubkeys(seed, 50));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -132,8 +131,7 @@ mod bench {
|
||||
|
||||
#[bench]
|
||||
fn bench_gen_keys(b: &mut Bencher) {
|
||||
let seed: &[_] = &[1, 2, 3, 4];
|
||||
let rnd = GenKeys::new(seed);
|
||||
let rnd = GenKeys::new([0u8; 32]);
|
||||
b.iter(|| rnd.gen_n_keypairs(1000));
|
||||
}
|
||||
}
|
||||
|
@@ -1,5 +1,14 @@
|
||||
//! The `sigverify` module provides digital signature verification functions.
|
||||
//! By default, signatures are verified in parallel using all available CPU
|
||||
//! cores. When `--features=cuda` is enabled, signature verification is
|
||||
//! offloaded to the GPU.
|
||||
//!
|
||||
|
||||
use counter::Counter;
|
||||
use packet::{Packet, SharedPackets};
|
||||
use std::mem::size_of;
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::time::Instant;
|
||||
use transaction::{PUB_KEY_OFFSET, SIGNED_DATA_OFFSET, SIG_OFFSET};
|
||||
|
||||
pub const TX_OFFSET: usize = 0;
|
||||
@@ -61,8 +70,11 @@ fn batch_size(batches: &Vec<SharedPackets>) -> usize {
|
||||
#[cfg(not(feature = "cuda"))]
|
||||
pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> {
|
||||
use rayon::prelude::*;
|
||||
static mut COUNTER: Counter = create_counter!("ed25519_verify", 1);
|
||||
let start = Instant::now();
|
||||
let count = batch_size(batches);
|
||||
info!("CPU ECDSA for {}", batch_size(batches));
|
||||
batches
|
||||
let rv = batches
|
||||
.into_par_iter()
|
||||
.map(|p| {
|
||||
p.read()
|
||||
@@ -72,13 +84,17 @@ pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> {
|
||||
.map(verify_packet)
|
||||
.collect()
|
||||
})
|
||||
.collect()
|
||||
.collect();
|
||||
inc_counter!(COUNTER, count, start);
|
||||
rv
|
||||
}
|
||||
|
||||
#[cfg(feature = "cuda")]
|
||||
pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> {
|
||||
use packet::PACKET_DATA_SIZE;
|
||||
|
||||
static mut COUNTER: Counter = create_counter!("ed25519_verify_cuda", 1);
|
||||
let start = Instant::now();
|
||||
let count = batch_size(batches);
|
||||
info!("CUDA ECDSA for {}", batch_size(batches));
|
||||
let mut out = Vec::new();
|
||||
let mut elems = Vec::new();
|
||||
@@ -137,6 +153,7 @@ pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> {
|
||||
num += 1;
|
||||
}
|
||||
}
|
||||
inc_counter!(COUNTER, count, start);
|
||||
rvs
|
||||
}
|
||||
|
||||
|
@@ -1,4 +1,9 @@
|
||||
//! The `sigverify_stage` implements the signature verification stage of the TPU.
|
||||
//! The `sigverify_stage` implements the signature verification stage of the TPU. It
|
||||
//! receives a list of lists of packets and outputs the same list, but tags each
|
||||
//! top-level list with a list of booleans, telling the next stage whether the
|
||||
//! signature in that packet is valid. It assumes each packet contains one
|
||||
//! transaction. All processing is done on the CPU by default and on a GPU
|
||||
//! if the `cuda` feature is enabled with `--features=cuda`.
|
||||
|
||||
use packet::SharedPackets;
|
||||
use rand::{thread_rng, Rng};
|
||||
|
213
src/streamer.rs
213
src/streamer.rs
@@ -18,6 +18,7 @@ pub type PacketReceiver = mpsc::Receiver<SharedPackets>;
|
||||
pub type PacketSender = mpsc::Sender<SharedPackets>;
|
||||
pub type BlobSender = mpsc::Sender<VecDeque<SharedBlob>>;
|
||||
pub type BlobReceiver = mpsc::Receiver<VecDeque<SharedBlob>>;
|
||||
pub type Window = Arc<RwLock<Vec<Option<SharedBlob>>>>;
|
||||
|
||||
fn recv_loop(
|
||||
sock: &UdpSocket,
|
||||
@@ -143,7 +144,7 @@ pub fn blob_receiver(
|
||||
}
|
||||
|
||||
fn find_next_missing(
|
||||
locked_window: &Arc<RwLock<Vec<Option<SharedBlob>>>>,
|
||||
locked_window: &Window,
|
||||
crdt: &Arc<RwLock<Crdt>>,
|
||||
consumed: &mut usize,
|
||||
received: &mut usize,
|
||||
@@ -168,14 +169,26 @@ fn find_next_missing(
|
||||
}
|
||||
|
||||
fn repair_window(
|
||||
locked_window: &Arc<RwLock<Vec<Option<SharedBlob>>>>,
|
||||
locked_window: &Window,
|
||||
crdt: &Arc<RwLock<Crdt>>,
|
||||
_recycler: &BlobRecycler,
|
||||
last: &mut usize,
|
||||
times: &mut usize,
|
||||
consumed: &mut usize,
|
||||
received: &mut usize,
|
||||
) -> Result<()> {
|
||||
let reqs = find_next_missing(locked_window, crdt, consumed, received)?;
|
||||
#[cfg(feature = "erasure")]
|
||||
{
|
||||
if erasure::recover(
|
||||
_recycler,
|
||||
&mut locked_window.write().unwrap(),
|
||||
*consumed,
|
||||
*received,
|
||||
).is_err()
|
||||
{
|
||||
trace!("erasure::recover failed");
|
||||
}
|
||||
}
|
||||
//exponential backoff
|
||||
if *last != *consumed {
|
||||
*times = 0;
|
||||
@@ -187,6 +200,7 @@ fn repair_window(
|
||||
trace!("repair_window counter {} {}", *times, *consumed);
|
||||
return Ok(());
|
||||
}
|
||||
let reqs = find_next_missing(locked_window, crdt, consumed, received)?;
|
||||
let sock = UdpSocket::bind("0.0.0.0:0")?;
|
||||
for (to, req) in reqs {
|
||||
//todo cache socket
|
||||
@@ -198,7 +212,7 @@ fn repair_window(
|
||||
}
|
||||
|
||||
fn recv_window(
|
||||
locked_window: &Arc<RwLock<Vec<Option<SharedBlob>>>>,
|
||||
locked_window: &Window,
|
||||
crdt: &Arc<RwLock<Crdt>>,
|
||||
recycler: &BlobRecycler,
|
||||
consumed: &mut usize,
|
||||
@@ -261,16 +275,26 @@ fn recv_window(
|
||||
if pix > *received {
|
||||
*received = pix;
|
||||
}
|
||||
// Got a blob which has already been consumed, skip it
|
||||
// probably from a repair window request
|
||||
if pix < *consumed {
|
||||
debug!(
|
||||
"received: {} but older than consumed: {} skipping..",
|
||||
pix, *consumed
|
||||
);
|
||||
continue;
|
||||
}
|
||||
let w = pix % WINDOW_SIZE;
|
||||
//TODO, after the block are authenticated
|
||||
//if we get different blocks at the same index
|
||||
//that is a network failure/attack
|
||||
trace!("window w: {} size: {}", w, p.meta.size);
|
||||
drop(p);
|
||||
{
|
||||
let mut window = locked_window.write().unwrap();
|
||||
if window[w].is_none() {
|
||||
window[w] = Some(b_);
|
||||
} else if let &Some(ref cblob) = &window[w] {
|
||||
} else if let Some(cblob) = &window[w] {
|
||||
if cblob.read().unwrap().get_index().unwrap() != pix as u64 {
|
||||
warn!("overrun blob at index {:}", w);
|
||||
} else {
|
||||
@@ -283,31 +307,45 @@ fn recv_window(
|
||||
if window[k].is_none() {
|
||||
break;
|
||||
}
|
||||
contq.push_back(window[k].clone().expect("clone in fn recv_window"));
|
||||
window[k] = None;
|
||||
*consumed += 1;
|
||||
let mut is_coding = false;
|
||||
if let &Some(ref cblob) = &window[k] {
|
||||
if cblob
|
||||
.read()
|
||||
.expect("blob read lock for flags streamer::window")
|
||||
.is_coding()
|
||||
{
|
||||
is_coding = true;
|
||||
}
|
||||
}
|
||||
if !is_coding {
|
||||
contq.push_back(window[k].clone().expect("clone in fn recv_window"));
|
||||
*consumed += 1;
|
||||
|
||||
#[cfg(not(feature = "erasure"))]
|
||||
{
|
||||
window[k] = None;
|
||||
}
|
||||
} else {
|
||||
#[cfg(feature = "erasure")]
|
||||
{
|
||||
let block_start = *consumed - (*consumed % erasure::NUM_CODED);
|
||||
let coding_end = block_start + erasure::NUM_CODED;
|
||||
// We've received all this block's data blobs, go and null out the window now
|
||||
for j in block_start..coding_end {
|
||||
window[j % WINDOW_SIZE] = None;
|
||||
}
|
||||
|
||||
*consumed += erasure::MAX_MISSING;
|
||||
debug!(
|
||||
"skipping processing coding blob k: {} consumed: {}",
|
||||
k, *consumed
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
let buf: Vec<_> = locked_window
|
||||
.read()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(i, v)| {
|
||||
if i == (*consumed % WINDOW_SIZE) {
|
||||
assert!(v.is_none());
|
||||
"_"
|
||||
} else if v.is_none() {
|
||||
"0"
|
||||
} else {
|
||||
"1"
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
trace!("WINDOW: {}", buf.join(""));
|
||||
}
|
||||
print_window(locked_window, *consumed);
|
||||
trace!("sending contq.len: {}", contq.len());
|
||||
if !contq.is_empty() {
|
||||
trace!("sending contq.len: {}", contq.len());
|
||||
@@ -316,14 +354,43 @@ fn recv_window(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn default_window() -> Arc<RwLock<Vec<Option<SharedBlob>>>> {
|
||||
fn print_window(locked_window: &Window, consumed: usize) {
|
||||
{
|
||||
let buf: Vec<_> = locked_window
|
||||
.read()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(i, v)| {
|
||||
if i == (consumed % WINDOW_SIZE) {
|
||||
"_"
|
||||
} else if v.is_none() {
|
||||
"0"
|
||||
} else {
|
||||
if let &Some(ref cblob) = &v {
|
||||
if cblob.read().unwrap().is_coding() {
|
||||
"C"
|
||||
} else {
|
||||
"1"
|
||||
}
|
||||
} else {
|
||||
"0"
|
||||
}
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
debug!("WINDOW ({}): {}", consumed, buf.join(""));
|
||||
}
|
||||
}
|
||||
|
||||
pub fn default_window() -> Window {
|
||||
Arc::new(RwLock::new(vec![None; WINDOW_SIZE]))
|
||||
}
|
||||
|
||||
pub fn window(
|
||||
exit: Arc<AtomicBool>,
|
||||
crdt: Arc<RwLock<Crdt>>,
|
||||
window: Arc<RwLock<Vec<Option<SharedBlob>>>>,
|
||||
window: Window,
|
||||
recycler: BlobRecycler,
|
||||
r: BlobReceiver,
|
||||
s: BlobSender,
|
||||
@@ -353,6 +420,7 @@ pub fn window(
|
||||
let _ = repair_window(
|
||||
&window,
|
||||
&crdt,
|
||||
&recycler,
|
||||
&mut last,
|
||||
&mut times,
|
||||
&mut consumed,
|
||||
@@ -365,22 +433,31 @@ pub fn window(
|
||||
|
||||
fn broadcast(
|
||||
crdt: &Arc<RwLock<Crdt>>,
|
||||
window: &Arc<RwLock<Vec<Option<SharedBlob>>>>,
|
||||
window: &Window,
|
||||
recycler: &BlobRecycler,
|
||||
r: &BlobReceiver,
|
||||
sock: &UdpSocket,
|
||||
transmit_index: &mut u64,
|
||||
receive_index: &mut u64,
|
||||
) -> Result<()> {
|
||||
let timer = Duration::new(1, 0);
|
||||
let mut dq = r.recv_timeout(timer)?;
|
||||
while let Ok(mut nq) = r.try_recv() {
|
||||
dq.append(&mut nq);
|
||||
}
|
||||
let mut blobs = dq.into_iter().collect();
|
||||
/// appends codes to the list of blobs allowing us to reconstruct the stream
|
||||
let mut blobs: Vec<_> = dq.into_iter().collect();
|
||||
|
||||
print_window(window, *receive_index as usize);
|
||||
|
||||
// Insert the coding blobs into the blob stream
|
||||
#[cfg(feature = "erasure")]
|
||||
erasure::generate_coding(re, blobs, consumed);
|
||||
Crdt::broadcast(crdt, &blobs, &sock, transmit_index)?;
|
||||
erasure::add_coding_blobs(recycler, &mut blobs, *receive_index);
|
||||
|
||||
let blobs_len = blobs.len();
|
||||
info!("broadcast blobs.len: {}", blobs_len);
|
||||
|
||||
// Index the blobs
|
||||
Crdt::index_blobs(crdt, &blobs, receive_index)?;
|
||||
// keep the cache of blobs that are broadcast
|
||||
{
|
||||
let mut win = window.write().unwrap();
|
||||
@@ -407,6 +484,24 @@ fn broadcast(
|
||||
win[pos] = Some(b);
|
||||
}
|
||||
}
|
||||
|
||||
// Fill in the coding blob data from the window data blobs
|
||||
#[cfg(feature = "erasure")]
|
||||
{
|
||||
if erasure::generate_coding(
|
||||
&mut window.write().unwrap(),
|
||||
*receive_index as usize,
|
||||
blobs_len,
|
||||
).is_err()
|
||||
{
|
||||
return Err(Error::GenericError);
|
||||
}
|
||||
}
|
||||
|
||||
*receive_index += blobs_len as u64;
|
||||
|
||||
// Send blobs out from the window
|
||||
Crdt::broadcast(crdt, &window, &sock, transmit_index, *receive_index)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -423,7 +518,7 @@ pub fn broadcaster(
|
||||
sock: UdpSocket,
|
||||
exit: Arc<AtomicBool>,
|
||||
crdt: Arc<RwLock<Crdt>>,
|
||||
window: Arc<RwLock<Vec<Option<SharedBlob>>>>,
|
||||
window: Window,
|
||||
recycler: BlobRecycler,
|
||||
r: BlobReceiver,
|
||||
) -> JoinHandle<()> {
|
||||
@@ -431,11 +526,20 @@ pub fn broadcaster(
|
||||
.name("solana-broadcaster".to_string())
|
||||
.spawn(move || {
|
||||
let mut transmit_index = 0;
|
||||
let mut receive_index = 0;
|
||||
loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
let _ = broadcast(&crdt, &window, &recycler, &r, &sock, &mut transmit_index);
|
||||
let _ = broadcast(
|
||||
&crdt,
|
||||
&window,
|
||||
&recycler,
|
||||
&r,
|
||||
&sock,
|
||||
&mut transmit_index,
|
||||
&mut receive_index,
|
||||
);
|
||||
}
|
||||
})
|
||||
.unwrap()
|
||||
@@ -602,10 +706,8 @@ mod bench {
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use crdt::{Crdt, ReplicatedData};
|
||||
use crdt::{Crdt, TestNode};
|
||||
use packet::{Blob, BlobRecycler, Packet, PacketRecycler, Packets, PACKET_DATA_SIZE};
|
||||
use signature::KeyPair;
|
||||
use signature::KeyPairUtil;
|
||||
use std::collections::VecDeque;
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
@@ -688,29 +790,21 @@ mod test {
|
||||
|
||||
#[test]
|
||||
pub fn window_send_test() {
|
||||
let pubkey_me = KeyPair::new().pubkey();
|
||||
let read = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||
let addr = read.local_addr().unwrap();
|
||||
let send = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||
let serve = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||
let transaction = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||
let tn = TestNode::new();
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let rep_data = ReplicatedData::new(
|
||||
pubkey_me,
|
||||
read.local_addr().unwrap(),
|
||||
send.local_addr().unwrap(),
|
||||
serve.local_addr().unwrap(),
|
||||
transaction.local_addr().unwrap(),
|
||||
);
|
||||
let mut crdt_me = Crdt::new(rep_data);
|
||||
let mut crdt_me = Crdt::new(tn.data.clone());
|
||||
let me_id = crdt_me.my_data().id;
|
||||
crdt_me.set_leader(me_id);
|
||||
let subs = Arc::new(RwLock::new(crdt_me));
|
||||
|
||||
let resp_recycler = BlobRecycler::default();
|
||||
let (s_reader, r_reader) = channel();
|
||||
let t_receiver =
|
||||
blob_receiver(exit.clone(), resp_recycler.clone(), read, s_reader).unwrap();
|
||||
let t_receiver = blob_receiver(
|
||||
exit.clone(),
|
||||
resp_recycler.clone(),
|
||||
tn.sockets.gossip,
|
||||
s_reader,
|
||||
).unwrap();
|
||||
let (s_window, r_window) = channel();
|
||||
let (s_retransmit, r_retransmit) = channel();
|
||||
let win = default_window();
|
||||
@@ -724,7 +818,12 @@ mod test {
|
||||
s_retransmit,
|
||||
);
|
||||
let (s_responder, r_responder) = channel();
|
||||
let t_responder = responder(send, exit.clone(), resp_recycler.clone(), r_responder);
|
||||
let t_responder = responder(
|
||||
tn.sockets.replicate,
|
||||
exit.clone(),
|
||||
resp_recycler.clone(),
|
||||
r_responder,
|
||||
);
|
||||
let mut msgs = VecDeque::new();
|
||||
for v in 0..10 {
|
||||
let i = 9 - v;
|
||||
@@ -735,7 +834,7 @@ mod test {
|
||||
w.set_id(me_id).unwrap();
|
||||
assert_eq!(i, w.get_index().unwrap());
|
||||
w.meta.size = PACKET_DATA_SIZE;
|
||||
w.meta.set_addr(&addr);
|
||||
w.meta.set_addr(&tn.data.gossip_addr);
|
||||
msgs.push_back(b_);
|
||||
}
|
||||
s_responder.send(msgs).expect("send");
|
||||
|
@@ -12,6 +12,7 @@ use std::io;
|
||||
use std::net::{SocketAddr, UdpSocket};
|
||||
use transaction::Transaction;
|
||||
|
||||
/// An object for querying and sending transactions to the network.
|
||||
pub struct ThinClient {
|
||||
requests_addr: SocketAddr,
|
||||
requests_socket: UdpSocket,
|
||||
@@ -105,7 +106,7 @@ impl ThinClient {
|
||||
while !done {
|
||||
let resp = self.recv_response()?;
|
||||
trace!("recv_response {:?}", resp);
|
||||
if let &Response::Balance { ref key, .. } = &resp {
|
||||
if let Response::Balance { key, .. } = &resp {
|
||||
done = key == pubkey;
|
||||
}
|
||||
self.process_response(resp);
|
||||
@@ -120,17 +121,19 @@ impl ThinClient {
|
||||
let req = Request::GetTransactionCount;
|
||||
let data =
|
||||
serialize(&req).expect("serialize GetTransactionCount in pub fn transaction_count");
|
||||
self.requests_socket
|
||||
.send_to(&data, &self.requests_addr)
|
||||
.expect("buffer error in pub fn transaction_count");
|
||||
let mut done = false;
|
||||
while !done {
|
||||
let resp = self.recv_response().expect("transaction count dropped");
|
||||
info!("recv_response {:?}", resp);
|
||||
if let &Response::TransactionCount { .. } = &resp {
|
||||
done = true;
|
||||
self.requests_socket
|
||||
.send_to(&data, &self.requests_addr)
|
||||
.expect("buffer error in pub fn transaction_count");
|
||||
|
||||
if let Ok(resp) = self.recv_response() {
|
||||
info!("recv_response {:?}", resp);
|
||||
if let &Response::TransactionCount { .. } = &resp {
|
||||
done = true;
|
||||
}
|
||||
self.process_response(resp);
|
||||
}
|
||||
self.process_response(resp);
|
||||
}
|
||||
self.transaction_count
|
||||
}
|
||||
@@ -141,16 +144,18 @@ impl ThinClient {
|
||||
info!("get_last_id");
|
||||
let req = Request::GetLastId;
|
||||
let data = serialize(&req).expect("serialize GetLastId in pub fn get_last_id");
|
||||
self.requests_socket
|
||||
.send_to(&data, &self.requests_addr)
|
||||
.expect("buffer error in pub fn get_last_id");
|
||||
let mut done = false;
|
||||
while !done {
|
||||
let resp = self.recv_response().expect("get_last_id response");
|
||||
if let &Response::LastId { .. } = &resp {
|
||||
done = true;
|
||||
self.requests_socket
|
||||
.send_to(&data, &self.requests_addr)
|
||||
.expect("buffer error in pub fn get_last_id");
|
||||
|
||||
if let Ok(resp) = self.recv_response() {
|
||||
if let &Response::LastId { .. } = &resp {
|
||||
done = true;
|
||||
}
|
||||
self.process_response(resp);
|
||||
}
|
||||
self.process_response(resp);
|
||||
}
|
||||
self.last_id.expect("some last_id")
|
||||
}
|
||||
|
@@ -1,6 +1,11 @@
|
||||
//! The `timing` module provides std::time utility functions.
|
||||
use std::time::Duration;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
pub fn duration_as_us(d: &Duration) -> u64 {
|
||||
return (d.as_secs() * 1000 * 1000) + (d.subsec_nanos() as u64 / 1_000);
|
||||
}
|
||||
|
||||
pub fn duration_as_ms(d: &Duration) -> u64 {
|
||||
return (d.as_secs() * 1000) + (d.subsec_nanos() as u64 / 1_000_000);
|
||||
}
|
||||
|
26
src/tpu.rs
26
src/tpu.rs
@@ -1,5 +1,29 @@
|
||||
//! The `tpu` module implements the Transaction Processing Unit, a
|
||||
//! 5-stage transaction processing pipeline in software.
|
||||
//!
|
||||
//! ```text
|
||||
//! .---------------------------------------------------------------.
|
||||
//! | TPU .-----. |
|
||||
//! | | PoH | |
|
||||
//! | `--+--` |
|
||||
//! | | |
|
||||
//! | v |
|
||||
//! | .-------. .-----------. .---------. .--------. .-------. |
|
||||
//! .---------. | | Fetch | | SigVerify | | Banking | | Record | | Write | | .------------.
|
||||
//! | Clients |--->| Stage |->| Stage |->| Stage |->| Stage |->| Stage +--->| Validators |
|
||||
//! `---------` | | | | | | | | | | | | `------------`
|
||||
//! | `-------` `-----------` `----+----` `--------` `---+---` |
|
||||
//! | | | |
|
||||
//! | | | |
|
||||
//! | | | |
|
||||
//! | | | |
|
||||
//! `---------------------------------|-----------------------|-----`
|
||||
//! | |
|
||||
//! v v
|
||||
//! .------. .--------.
|
||||
//! | Bank | | Ledger |
|
||||
//! `------` `--------`
|
||||
//! ```
|
||||
|
||||
use bank::Bank;
|
||||
use banking_stage::BankingStage;
|
||||
@@ -61,11 +85,11 @@ impl Tpu {
|
||||
record_stage.entry_receiver,
|
||||
);
|
||||
let mut thread_hdls = vec![
|
||||
fetch_stage.thread_hdl,
|
||||
banking_stage.thread_hdl,
|
||||
record_stage.thread_hdl,
|
||||
write_stage.thread_hdl,
|
||||
];
|
||||
thread_hdls.extend(fetch_stage.thread_hdls.into_iter());
|
||||
thread_hdls.extend(sigverify_stage.thread_hdls.into_iter());
|
||||
Tpu {
|
||||
blob_receiver: write_stage.blob_receiver,
|
||||
|
@@ -11,8 +11,10 @@ pub const SIGNED_DATA_OFFSET: usize = 112;
|
||||
pub const SIG_OFFSET: usize = 8;
|
||||
pub const PUB_KEY_OFFSET: usize = 80;
|
||||
|
||||
/// The type of payment plan. Each item must implement the PaymentPlan trait.
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub enum Plan {
|
||||
/// The builtin contract language Budget.
|
||||
Budget(Budget),
|
||||
}
|
||||
|
||||
@@ -37,29 +39,49 @@ impl PaymentPlan for Plan {
|
||||
}
|
||||
}
|
||||
|
||||
/// A smart contract.
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub struct Contract {
|
||||
/// The number of tokens allocated to the `Plan` and any transaction fees.
|
||||
pub tokens: i64,
|
||||
pub plan: Plan,
|
||||
}
|
||||
|
||||
/// An instruction to progress the smart contract.
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub enum Instruction {
|
||||
/// Declare and instanstansiate `Contract`.
|
||||
NewContract(Contract),
|
||||
|
||||
/// Tell a payment plan acknowledge the given `DateTime` has past.
|
||||
ApplyTimestamp(DateTime<Utc>),
|
||||
|
||||
/// Tell the payment plan that the `NewContract` with `Signature` has been
|
||||
/// signed by the containing transaction's `PublicKey`.
|
||||
ApplySignature(Signature),
|
||||
}
|
||||
|
||||
/// An instruction signed by a client with `PublicKey`.
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub struct Transaction {
|
||||
/// A digital signature of `instruction`, `last_id` and `fee`, signed by `PublicKey`.
|
||||
pub sig: Signature,
|
||||
|
||||
/// The `PublicKey` of the entity that signed the transaction data.
|
||||
pub from: PublicKey,
|
||||
|
||||
/// The action the server should take.
|
||||
pub instruction: Instruction,
|
||||
|
||||
/// The ID of a recent ledger entry.
|
||||
pub last_id: Hash,
|
||||
|
||||
/// The number of tokens paid for processing and storage of this transaction.
|
||||
pub fee: i64,
|
||||
}
|
||||
|
||||
impl Transaction {
|
||||
/// Create a signed transaction from the given `Instruction`.
|
||||
fn new_from_instruction(
|
||||
from_keypair: &KeyPair,
|
||||
instruction: Instruction,
|
||||
@@ -122,7 +144,7 @@ impl Transaction {
|
||||
last_id: Hash,
|
||||
) -> Self {
|
||||
let from = from_keypair.pubkey();
|
||||
let budget = Budget::Race(
|
||||
let budget = Budget::Or(
|
||||
(Condition::Timestamp(dt), Payment { tokens, to }),
|
||||
(Condition::Signature(from), Payment { tokens, to: from }),
|
||||
);
|
||||
@@ -131,6 +153,7 @@ impl Transaction {
|
||||
Self::new_from_instruction(from_keypair, instruction, last_id, 0)
|
||||
}
|
||||
|
||||
/// Get the transaction data to sign.
|
||||
fn get_sign_data(&self) -> Vec<u8> {
|
||||
let mut data = serialize(&(&self.instruction)).expect("serialize Contract");
|
||||
let last_id_data = serialize(&(&self.last_id)).expect("serialize last_id");
|
||||
@@ -148,14 +171,17 @@ impl Transaction {
|
||||
self.sig = Signature::clone_from_slice(keypair.sign(&sign_data).as_ref());
|
||||
}
|
||||
|
||||
/// Verify only the transaction signature.
|
||||
pub fn verify_sig(&self) -> bool {
|
||||
warn!("transaction signature verification called");
|
||||
self.sig.verify(&self.from, &self.get_sign_data())
|
||||
}
|
||||
|
||||
/// Verify only the payment plan.
|
||||
pub fn verify_plan(&self) -> bool {
|
||||
if let Instruction::NewContract(contract) = &self.instruction {
|
||||
self.fee >= 0 && self.fee <= contract.tokens
|
||||
self.fee >= 0
|
||||
&& self.fee <= contract.tokens
|
||||
&& contract.plan.verify(contract.tokens - self.fee)
|
||||
} else {
|
||||
true
|
||||
|
220
src/tvu.rs
220
src/tvu.rs
@@ -1,36 +1,50 @@
|
||||
//! The `tvu` module implements the Transaction Validation Unit, a
|
||||
//! 5-stage transaction validation pipeline in software.
|
||||
//! 1. streamer
|
||||
//! - Incoming blobs are picked up from the replicate socket.
|
||||
//! 2. verifier
|
||||
//! - TODO Blobs are sent to the GPU, and while the memory is there the PoH stream is verified
|
||||
//! along with the ecdsa signature for the blob and each signature in all the transactions. Blobs
|
||||
//! with errors are dropped, or marked for slashing.
|
||||
//! 3.a retransmit
|
||||
//! - Blobs originating from the parent (leader, at the moment, is the only parent), are retransmit to all the
|
||||
//! peers in the crdt. Peers is everyone who is not me or the leader that has a known replicate
|
||||
//! address.
|
||||
//! 3.b window
|
||||
//! - Verified blobs are placed into a window, indexed by the counter set by the leader.sockets. This could
|
||||
//! be the PoH counter if its monotonically increasing in each blob. Erasure coding is used to
|
||||
//! recover any missing packets, and requests are made at random to peers and parents to retransmit
|
||||
//! a missing packet.
|
||||
//! 4. accountant
|
||||
//! - Contigous blobs are sent to the accountant for processing transactions
|
||||
//! 5. validator
|
||||
//! - TODO Validation messages are sent back to the leader
|
||||
//! 3-stage transaction validation pipeline in software.
|
||||
//!
|
||||
//! ```text
|
||||
//! .------------------------------------------.
|
||||
//! | TVU |
|
||||
//! | |
|
||||
//! | | .------------.
|
||||
//! | .------------------------>| Validators |
|
||||
//! | .-------. | | `------------`
|
||||
//! .--------. | | | .----+---. .-----------. |
|
||||
//! | Leader |--------->| Blob | | Window | | Replicate | |
|
||||
//! `--------` | | Fetch |-->| Stage |-->| Stage | |
|
||||
//! .------------. | | Stage | | | | | |
|
||||
//! | Validators |----->| | `--------` `----+------` |
|
||||
//! `------------` | `-------` | |
|
||||
//! | | |
|
||||
//! | | |
|
||||
//! | | |
|
||||
//! `--------------------------------|---------`
|
||||
//! |
|
||||
//! v
|
||||
//! .------.
|
||||
//! | Bank |
|
||||
//! `------`
|
||||
//! ```
|
||||
//!
|
||||
//! 1. Fetch Stage
|
||||
//! - Incoming blobs are picked up from the replicate socket and repair socket.
|
||||
//! 2. Window Stage
|
||||
//! - Blobs are windowed until a contiguous chunk is available. This stage also repairs and
|
||||
//! retransmits blobs that are in the queue.
|
||||
//! 3. Replicate Stage
|
||||
//! - Transactions in blobs are processed and applied to the bank.
|
||||
//! - TODO We need to verify the signatures in the blobs.
|
||||
|
||||
use bank::Bank;
|
||||
use crdt::{Crdt, ReplicatedData};
|
||||
use data_replicator::DataReplicator;
|
||||
use blob_fetch_stage::BlobFetchStage;
|
||||
use crdt::Crdt;
|
||||
use packet;
|
||||
use replicate_stage::ReplicateStage;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::JoinHandle;
|
||||
use streamer;
|
||||
use window_stage::WindowStage;
|
||||
|
||||
pub struct Tvu {
|
||||
pub thread_hdls: Vec<JoinHandle<()>>,
|
||||
@@ -41,90 +55,45 @@ impl Tvu {
|
||||
/// on the bank state.
|
||||
/// # Arguments
|
||||
/// * `bank` - The bank state.
|
||||
/// * `me` - my configuration
|
||||
/// * `gossip` - my gossisp socket
|
||||
/// * `replicate` - my replicate socket
|
||||
/// * `leader` - leader configuration
|
||||
/// * `crdt` - The crdt state.
|
||||
/// * `window` - The window state.
|
||||
/// * `replicate_socket` - my replicate socket
|
||||
/// * `repair_socket` - my repair socket
|
||||
/// * `retransmit_socket` - my retransmit socket
|
||||
/// * `exit` - The exit signal.
|
||||
pub fn new(
|
||||
bank: Arc<Bank>,
|
||||
me: ReplicatedData,
|
||||
gossip_listen_socket: UdpSocket,
|
||||
replicate: UdpSocket,
|
||||
leader: ReplicatedData,
|
||||
crdt: Arc<RwLock<Crdt>>,
|
||||
window: streamer::Window,
|
||||
replicate_socket: UdpSocket,
|
||||
repair_socket: UdpSocket,
|
||||
retransmit_socket: UdpSocket,
|
||||
exit: Arc<AtomicBool>,
|
||||
) -> Self {
|
||||
//replicate pipeline
|
||||
let crdt = Arc::new(RwLock::new(Crdt::new(me)));
|
||||
crdt.write()
|
||||
.expect("'crdt' write lock in pub fn replicate")
|
||||
.set_leader(leader.id);
|
||||
crdt.write()
|
||||
.expect("'crdt' write lock before insert() in pub fn replicate")
|
||||
.insert(&leader);
|
||||
let window = streamer::default_window();
|
||||
let gossip_send_socket = UdpSocket::bind("0.0.0.0:0").expect("bind 0");
|
||||
let data_replicator = DataReplicator::new(
|
||||
crdt.clone(),
|
||||
window.clone(),
|
||||
gossip_listen_socket,
|
||||
gossip_send_socket,
|
||||
exit.clone(),
|
||||
).expect("DataReplicator::new");
|
||||
|
||||
// TODO pull this socket out through the public interface
|
||||
// make sure we are on the same interface
|
||||
let mut local = replicate.local_addr().expect("tvu: get local address");
|
||||
local.set_port(0);
|
||||
let write = UdpSocket::bind(local).expect("tvu: bind to local socket");
|
||||
|
||||
let blob_recycler = packet::BlobRecycler::default();
|
||||
let (blob_sender, blob_receiver) = channel();
|
||||
let t_blob_receiver = streamer::blob_receiver(
|
||||
let fetch_stage = BlobFetchStage::new_multi_socket(
|
||||
vec![replicate_socket, repair_socket],
|
||||
exit.clone(),
|
||||
blob_recycler.clone(),
|
||||
replicate,
|
||||
blob_sender.clone(),
|
||||
).expect("tvu: blob receiver creation");
|
||||
let (window_sender, window_receiver) = channel();
|
||||
let (retransmit_sender, retransmit_receiver) = channel();
|
||||
|
||||
let t_retransmit = streamer::retransmitter(
|
||||
write,
|
||||
exit.clone(),
|
||||
crdt.clone(),
|
||||
blob_recycler.clone(),
|
||||
retransmit_receiver,
|
||||
);
|
||||
|
||||
//TODO
|
||||
//the packets coming out of blob_receiver need to be sent to the GPU and verified
|
||||
//then sent to the window, which does the erasure coding reconstruction
|
||||
let t_window = streamer::window(
|
||||
exit.clone(),
|
||||
crdt.clone(),
|
||||
let window_stage = WindowStage::new(
|
||||
crdt,
|
||||
window,
|
||||
blob_recycler.clone(),
|
||||
blob_receiver,
|
||||
window_sender,
|
||||
retransmit_sender,
|
||||
);
|
||||
|
||||
let replicate_stage = ReplicateStage::new(
|
||||
bank.clone(),
|
||||
retransmit_socket,
|
||||
exit.clone(),
|
||||
window_receiver,
|
||||
blob_recycler.clone(),
|
||||
fetch_stage.blob_receiver,
|
||||
);
|
||||
|
||||
let mut threads = vec![
|
||||
//replicate threads
|
||||
t_blob_receiver,
|
||||
t_retransmit,
|
||||
t_window,
|
||||
replicate_stage.thread_hdl,
|
||||
];
|
||||
threads.extend(data_replicator.thread_hdls.into_iter());
|
||||
let replicate_stage =
|
||||
ReplicateStage::new(bank, exit, window_stage.blob_receiver, blob_recycler);
|
||||
|
||||
let mut threads = vec![replicate_stage.thread_hdl];
|
||||
threads.extend(fetch_stage.thread_hdls.into_iter());
|
||||
threads.extend(window_stage.thread_hdls.into_iter());
|
||||
Tvu {
|
||||
thread_hdls: threads,
|
||||
}
|
||||
@@ -136,11 +105,11 @@ pub mod tests {
|
||||
use bank::Bank;
|
||||
use bincode::serialize;
|
||||
use crdt::{Crdt, TestNode};
|
||||
use data_replicator::DataReplicator;
|
||||
use entry::Entry;
|
||||
use hash::{hash, Hash};
|
||||
use logger;
|
||||
use mint::Mint;
|
||||
use ncp::Ncp;
|
||||
use packet::BlobRecycler;
|
||||
use result::Result;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
@@ -154,14 +123,15 @@ pub mod tests {
|
||||
use transaction::Transaction;
|
||||
use tvu::Tvu;
|
||||
|
||||
fn new_replicator(
|
||||
fn new_ncp(
|
||||
crdt: Arc<RwLock<Crdt>>,
|
||||
listen: UdpSocket,
|
||||
exit: Arc<AtomicBool>,
|
||||
) -> Result<DataReplicator> {
|
||||
) -> Result<(Ncp, streamer::Window)> {
|
||||
let window = streamer::default_window();
|
||||
let send_sock = UdpSocket::bind("0.0.0.0:0").expect("bind 0");
|
||||
DataReplicator::new(crdt, window, listen, send_sock, exit)
|
||||
let ncp = Ncp::new(crdt, window.clone(), listen, send_sock, exit)?;
|
||||
Ok((ncp, window))
|
||||
}
|
||||
/// Test that message sent from leader to target1 and replicated to target2
|
||||
#[test]
|
||||
@@ -177,7 +147,7 @@ pub mod tests {
|
||||
crdt_l.set_leader(leader.data.id);
|
||||
|
||||
let cref_l = Arc::new(RwLock::new(crdt_l));
|
||||
let dr_l = new_replicator(cref_l, leader.sockets.gossip, exit.clone()).unwrap();
|
||||
let dr_l = new_ncp(cref_l, leader.sockets.gossip, exit.clone()).unwrap();
|
||||
|
||||
//start crdt2
|
||||
let mut crdt2 = Crdt::new(target2.data.clone());
|
||||
@@ -185,7 +155,7 @@ pub mod tests {
|
||||
crdt2.set_leader(leader.data.id);
|
||||
let leader_id = leader.data.id;
|
||||
let cref2 = Arc::new(RwLock::new(crdt2));
|
||||
let dr_2 = new_replicator(cref2, target2.sockets.gossip, exit.clone()).unwrap();
|
||||
let dr_2 = new_ncp(cref2, target2.sockets.gossip, exit.clone()).unwrap();
|
||||
|
||||
// setup some blob services to send blobs into the socket
|
||||
// to simulate the source peer and get blobs out of the socket to
|
||||
@@ -213,28 +183,32 @@ pub mod tests {
|
||||
let mint = Mint::new(starting_balance);
|
||||
let replicate_addr = target1.data.replicate_addr;
|
||||
let bank = Arc::new(Bank::new(&mint));
|
||||
|
||||
//start crdt1
|
||||
let mut crdt1 = Crdt::new(target1.data.clone());
|
||||
crdt1.insert(&leader.data);
|
||||
crdt1.set_leader(leader.data.id);
|
||||
let cref1 = Arc::new(RwLock::new(crdt1));
|
||||
let dr_1 = new_ncp(cref1.clone(), target1.sockets.gossip, exit.clone()).unwrap();
|
||||
|
||||
let tvu = Tvu::new(
|
||||
bank.clone(),
|
||||
target1.data,
|
||||
target1.sockets.gossip,
|
||||
cref1,
|
||||
dr_1.1,
|
||||
target1.sockets.replicate,
|
||||
leader.data,
|
||||
target1.sockets.repair,
|
||||
target1.sockets.retransmit,
|
||||
exit.clone(),
|
||||
);
|
||||
|
||||
let mut alice_ref_balance = starting_balance;
|
||||
let mut msgs = VecDeque::new();
|
||||
let mut cur_hash = Hash::default();
|
||||
let num_blobs = 10;
|
||||
let mut blob_id = 0;
|
||||
let num_transfers = 10;
|
||||
let transfer_amount = 501;
|
||||
let bob_keypair = KeyPair::new();
|
||||
for i in 0..num_blobs {
|
||||
let b = resp_recycler.allocate();
|
||||
let b_ = b.clone();
|
||||
let mut w = b.write().unwrap();
|
||||
w.set_index(i).unwrap();
|
||||
w.set_id(leader_id).unwrap();
|
||||
|
||||
for i in 0..num_transfers {
|
||||
let entry0 = Entry::new(&cur_hash, i, vec![]);
|
||||
bank.register_entry_id(&cur_hash);
|
||||
cur_hash = hash(&cur_hash);
|
||||
@@ -247,19 +221,28 @@ pub mod tests {
|
||||
);
|
||||
bank.register_entry_id(&cur_hash);
|
||||
cur_hash = hash(&cur_hash);
|
||||
let entry1 = Entry::new(&cur_hash, i + num_blobs, vec![tx0]);
|
||||
let entry1 = Entry::new(&cur_hash, i + num_transfers, vec![tx0]);
|
||||
bank.register_entry_id(&cur_hash);
|
||||
cur_hash = hash(&cur_hash);
|
||||
|
||||
alice_ref_balance -= transfer_amount;
|
||||
|
||||
let serialized_entry = serialize(&vec![entry0, entry1]).unwrap();
|
||||
for entry in vec![entry0, entry1] {
|
||||
let b = resp_recycler.allocate();
|
||||
let b_ = b.clone();
|
||||
let mut w = b.write().unwrap();
|
||||
w.set_index(blob_id).unwrap();
|
||||
blob_id += 1;
|
||||
w.set_id(leader_id).unwrap();
|
||||
|
||||
w.data_mut()[..serialized_entry.len()].copy_from_slice(&serialized_entry);
|
||||
w.set_size(serialized_entry.len());
|
||||
w.meta.set_addr(&replicate_addr);
|
||||
drop(w);
|
||||
msgs.push_back(b_);
|
||||
let serialized_entry = serialize(&entry).unwrap();
|
||||
|
||||
w.data_mut()[..serialized_entry.len()].copy_from_slice(&serialized_entry);
|
||||
w.set_size(serialized_entry.len());
|
||||
w.meta.set_addr(&replicate_addr);
|
||||
drop(w);
|
||||
msgs.push_back(b_);
|
||||
}
|
||||
}
|
||||
|
||||
// send the blobs into the socket
|
||||
@@ -267,10 +250,8 @@ pub mod tests {
|
||||
|
||||
// receive retransmitted messages
|
||||
let timer = Duration::new(1, 0);
|
||||
let mut msgs: Vec<_> = Vec::new();
|
||||
while let Ok(msg) = r_reader.recv_timeout(timer) {
|
||||
trace!("msg: {:?}", msg);
|
||||
msgs.push(msg);
|
||||
}
|
||||
|
||||
let alice_balance = bank.get_balance(&mint.keypair().pubkey()).unwrap();
|
||||
@@ -283,10 +264,13 @@ pub mod tests {
|
||||
for t in tvu.thread_hdls {
|
||||
t.join().expect("join");
|
||||
}
|
||||
for t in dr_l.thread_hdls {
|
||||
for t in dr_l.0.thread_hdls {
|
||||
t.join().expect("join");
|
||||
}
|
||||
for t in dr_2.thread_hdls {
|
||||
for t in dr_2.0.thread_hdls {
|
||||
t.join().expect("join");
|
||||
}
|
||||
for t in dr_1.0.thread_hdls {
|
||||
t.join().expect("join");
|
||||
}
|
||||
t_receiver.join().expect("join");
|
||||
|
52
src/window_stage.rs
Normal file
52
src/window_stage.rs
Normal file
@@ -0,0 +1,52 @@
|
||||
//! The `window_stage` maintains the blob window
|
||||
|
||||
use crdt::Crdt;
|
||||
use packet;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::JoinHandle;
|
||||
use streamer;
|
||||
|
||||
pub struct WindowStage {
|
||||
pub blob_receiver: streamer::BlobReceiver,
|
||||
pub thread_hdls: Vec<JoinHandle<()>>,
|
||||
}
|
||||
|
||||
impl WindowStage {
|
||||
pub fn new(
|
||||
crdt: Arc<RwLock<Crdt>>,
|
||||
window: streamer::Window,
|
||||
retransmit_socket: UdpSocket,
|
||||
exit: Arc<AtomicBool>,
|
||||
blob_recycler: packet::BlobRecycler,
|
||||
fetch_stage_receiver: streamer::BlobReceiver,
|
||||
) -> Self {
|
||||
let (retransmit_sender, retransmit_receiver) = channel();
|
||||
|
||||
let t_retransmit = streamer::retransmitter(
|
||||
retransmit_socket,
|
||||
exit.clone(),
|
||||
crdt.clone(),
|
||||
blob_recycler.clone(),
|
||||
retransmit_receiver,
|
||||
);
|
||||
let (blob_sender, blob_receiver) = channel();
|
||||
let t_window = streamer::window(
|
||||
exit.clone(),
|
||||
crdt.clone(),
|
||||
window,
|
||||
blob_recycler.clone(),
|
||||
fetch_stage_receiver,
|
||||
blob_sender,
|
||||
retransmit_sender,
|
||||
);
|
||||
let thread_hdls = vec![t_retransmit, t_window];
|
||||
|
||||
WindowStage {
|
||||
blob_receiver,
|
||||
thread_hdls,
|
||||
}
|
||||
}
|
||||
}
|
@@ -1,4 +1,6 @@
|
||||
//! The `write_stage` module implements write stage of the RPU.
|
||||
//! The `write_stage` module implements the TPU's write stage. It
|
||||
//! writes entries to the given writer, which is typically a file or
|
||||
//! stdout, and then sends the Entry to its output channel.
|
||||
|
||||
use bank::Bank;
|
||||
use entry::Entry;
|
||||
|
@@ -5,8 +5,8 @@ extern crate solana;
|
||||
|
||||
use rayon::iter::*;
|
||||
use solana::crdt::{Crdt, TestNode};
|
||||
use solana::data_replicator::DataReplicator;
|
||||
use solana::logger;
|
||||
use solana::ncp::Ncp;
|
||||
use solana::packet::Blob;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
@@ -14,12 +14,12 @@ use std::sync::{Arc, RwLock};
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
|
||||
fn test_node(exit: Arc<AtomicBool>) -> (Arc<RwLock<Crdt>>, DataReplicator, UdpSocket) {
|
||||
fn test_node(exit: Arc<AtomicBool>) -> (Arc<RwLock<Crdt>>, Ncp, UdpSocket) {
|
||||
let tn = TestNode::new();
|
||||
let crdt = Crdt::new(tn.data.clone());
|
||||
let c = Arc::new(RwLock::new(crdt));
|
||||
let w = Arc::new(RwLock::new(vec![]));
|
||||
let d = DataReplicator::new(
|
||||
let d = Ncp::new(
|
||||
c.clone(),
|
||||
w,
|
||||
tn.sockets.gossip,
|
||||
@@ -35,7 +35,7 @@ fn test_node(exit: Arc<AtomicBool>) -> (Arc<RwLock<Crdt>>, DataReplicator, UdpSo
|
||||
/// tests that actually use this function are below
|
||||
fn run_gossip_topo<F>(topo: F)
|
||||
where
|
||||
F: Fn(&Vec<(Arc<RwLock<Crdt>>, DataReplicator, UdpSocket)>) -> (),
|
||||
F: Fn(&Vec<(Arc<RwLock<Crdt>>, Ncp, UdpSocket)>) -> (),
|
||||
{
|
||||
let num: usize = 5;
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
@@ -45,7 +45,7 @@ where
|
||||
for i in 0..(num * 32) {
|
||||
done = false;
|
||||
trace!("round {}", i);
|
||||
for &(ref c, _, _) in listen.iter() {
|
||||
for (c, _, _) in &listen {
|
||||
if num == c.read().unwrap().convergence() as usize {
|
||||
done = true;
|
||||
break;
|
||||
@@ -150,7 +150,8 @@ pub fn crdt_retransmit() {
|
||||
trace!("waiting to converge:");
|
||||
let mut done = false;
|
||||
for _ in 0..30 {
|
||||
done = c1.read().unwrap().table.len() == 3 && c2.read().unwrap().table.len() == 3
|
||||
done = c1.read().unwrap().table.len() == 3
|
||||
&& c2.read().unwrap().table.len() == 3
|
||||
&& c3.read().unwrap().table.len() == 3;
|
||||
if done {
|
||||
break;
|
||||
|
@@ -6,9 +6,9 @@ extern crate solana;
|
||||
use solana::bank::Bank;
|
||||
use solana::crdt::TestNode;
|
||||
use solana::crdt::{Crdt, ReplicatedData};
|
||||
use solana::data_replicator::DataReplicator;
|
||||
use solana::logger;
|
||||
use solana::mint::Mint;
|
||||
use solana::ncp::Ncp;
|
||||
use solana::server::Server;
|
||||
use solana::signature::{KeyPair, KeyPairUtil, PublicKey};
|
||||
use solana::streamer::default_window;
|
||||
@@ -37,6 +37,7 @@ fn validator(
|
||||
validator.sockets.respond,
|
||||
validator.sockets.replicate,
|
||||
validator.sockets.gossip,
|
||||
validator.sockets.repair,
|
||||
leader.clone(),
|
||||
exit.clone(),
|
||||
);
|
||||
@@ -60,7 +61,7 @@ fn converge(
|
||||
spy_crdt.set_leader(leader.id);
|
||||
let spy_ref = Arc::new(RwLock::new(spy_crdt));
|
||||
let spy_window = default_window();
|
||||
let dr = DataReplicator::new(
|
||||
let dr = Ncp::new(
|
||||
spy_ref.clone(),
|
||||
spy_window,
|
||||
spy.sockets.gossip,
|
||||
|
Reference in New Issue
Block a user