Compare commits
96 Commits
v0.6.0-alp
...
v0.6.0-bet
Author | SHA1 | Date | |
---|---|---|---|
|
53ecaa03f1 | ||
|
205c1aa505 | ||
|
9b54c1542b | ||
|
93d5d1b2ad | ||
|
4c0f3ed6f3 | ||
|
2580155bf2 | ||
|
6ab0dd4df9 | ||
|
4b8c36b6b9 | ||
|
359a8397c0 | ||
|
c9fd5d74b5 | ||
|
391744af97 | ||
|
587ab29e09 | ||
|
80f07dadc5 | ||
|
60609a44ba | ||
|
30c8fa46b4 | ||
|
7aab7d2f82 | ||
|
a8e1c44663 | ||
|
a2b92c35e1 | ||
|
9f2086c772 | ||
|
3eb005d492 | ||
|
68955bfcf4 | ||
|
9ac7070e08 | ||
|
e44e81bd17 | ||
|
f5eedd2d19 | ||
|
46059a37eb | ||
|
adc655a3a2 | ||
|
3058f80489 | ||
|
df98cae4b6 | ||
|
d327e0aabd | ||
|
17d3a6763c | ||
|
02c5b0343b | ||
|
2888e45fea | ||
|
f1311075d9 | ||
|
6c380e04a3 | ||
|
cef1c208a5 | ||
|
ef8eac92e3 | ||
|
9c9c63572b | ||
|
6c0c6de1d0 | ||
|
b57aecc24c | ||
|
290dde60a0 | ||
|
38623785f9 | ||
|
256ecc7208 | ||
|
76b06b47ba | ||
|
cf15cf587f | ||
|
134c7add57 | ||
|
ac0791826a | ||
|
d2622b7798 | ||
|
f82cbf3a27 | ||
|
aa7e3df8d6 | ||
|
ad00d7bd9c | ||
|
8d1f82c34d | ||
|
0cb2036e3a | ||
|
2b1e90b0a5 | ||
|
f2ccc133a2 | ||
|
5e824b39dd | ||
|
41efcae64b | ||
|
cf5671d058 | ||
|
2570bba6b1 | ||
|
71cb7d5c97 | ||
|
0df6541d5e | ||
|
52145caf7e | ||
|
86a50ae9e1 | ||
|
c64cfb74f3 | ||
|
26153d9919 | ||
|
5af922722f | ||
|
b70d730b32 | ||
|
bf4b856e0c | ||
|
0cf0ae6755 | ||
|
29061cff39 | ||
|
b7eec4c89f | ||
|
a3854c229e | ||
|
dcde256433 | ||
|
931bdbd5cd | ||
|
b7bd59c344 | ||
|
2dbf9a6017 | ||
|
fe93bba457 | ||
|
6e35f54738 | ||
|
089294a85e | ||
|
25c0b44641 | ||
|
58c1589688 | ||
|
bb53f69016 | ||
|
75659ca042 | ||
|
fc00594ea4 | ||
|
8d26be8b89 | ||
|
af4e95ae0f | ||
|
ffb4a7aa78 | ||
|
dcaeacc507 | ||
|
4f377e6710 | ||
|
122db85727 | ||
|
a598e4aa74 | ||
|
733b31ebbd | ||
|
dac9775de0 | ||
|
46c19a5783 | ||
|
aaeb5ba52f | ||
|
9f5a3d6064 | ||
|
4cdf873f98 |
36
.travis.yml
36
.travis.yml
@@ -1,36 +0,0 @@
|
|||||||
language: rust
|
|
||||||
required: sudo
|
|
||||||
services:
|
|
||||||
- docker
|
|
||||||
matrix:
|
|
||||||
allow_failures:
|
|
||||||
- rust: nightly
|
|
||||||
include:
|
|
||||||
- rust: stable
|
|
||||||
- rust: nightly
|
|
||||||
env:
|
|
||||||
- FEATURES='unstable'
|
|
||||||
before_script: |
|
|
||||||
export PATH="$PATH:$HOME/.cargo/bin"
|
|
||||||
rustup component add rustfmt-preview
|
|
||||||
script:
|
|
||||||
- cargo fmt -- --write-mode=diff
|
|
||||||
- cargo build --verbose --features "$FEATURES"
|
|
||||||
- cargo test --verbose --features "$FEATURES"
|
|
||||||
after_success: |
|
|
||||||
docker run -it --rm --security-opt seccomp=unconfined --volume "$PWD:/volume" elmtai/docker-rust-kcov
|
|
||||||
bash <(curl -s https://codecov.io/bash) -s target/cov
|
|
||||||
before_deploy:
|
|
||||||
- cargo package
|
|
||||||
deploy:
|
|
||||||
provider: releases
|
|
||||||
api-key:
|
|
||||||
secure: j3cPAbOuGjXuSl+j+JL/4GWxD6dA0/f5NQ0Od4LBVewPmnKiqimGOJ1xj3eKth+ZzwuCpcHwBIIR54NEDSJgHaYDXiukc05qCeToIPqOc0wGJ+GcUrWAy8M7Wo981I/0SVYDAnLv4+ivvJxYE7b2Jr3pHsQAzH7ClY8g2xu9HlNkScEsc4cizA9Sf3zIqtIoi480vxtQ5ghGOUCkwZuG3+Dg+IGnnjvE4qQOYey1del+KIDkmbHjry7iFWPF6fWK2187JNt6XiO2/2tZt6BkMEmdRnkw1r/wL9tj0AbqLgyBjzlI4QQfkBwsuX3ZFeNGArn71s7WmAUGyVOl0DJXfwN/BEUxMTd+lkMjuMNUxaU/hxVZ7zAWH55KJK+qf6B95DLVWr7ypjfJLLBcds+JfkBNoReWLM1XoDUKAU+wBf1b+PKiywNfNascjZTcz6QGe94sa7l/T4PxtHDSREmflFgu1Hysg61WuODDwTTHGrsg9ZuvlINnqQhXsJo9r9+TMIGwwWHcvLQDNz2TPALCfcLtd+RsevdOeXItYa0KD3D4gKGv36bwAVDpCIoZnSeiaT/PUyjilFtJjBpKz9BbOKgOtQhHGrHucn0WOF+bu/t3SFaJKQf/W+hLwO3NV8yiL5LQyHVm/TPY62nBfne2KEqi/LOFxgKG35aACouP0ig=
|
|
||||||
file: target/package/solana-$TRAVIS_TAG.crate
|
|
||||||
skip_cleanup: true
|
|
||||||
on:
|
|
||||||
tags: true
|
|
||||||
condition: "$TRAVIS_RUST_VERSION = stable"
|
|
||||||
|
|
||||||
after_deploy:
|
|
||||||
- cargo publish --token "$CRATES_IO_TOKEN"
|
|
11
Cargo.toml
11
Cargo.toml
@@ -1,7 +1,7 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "solana"
|
name = "solana"
|
||||||
description = "The World's Fastest Blockchain"
|
description = "The World's Fastest Blockchain"
|
||||||
version = "0.6.0-alpha"
|
version = "0.6.0-beta.1"
|
||||||
documentation = "https://docs.rs/solana"
|
documentation = "https://docs.rs/solana"
|
||||||
homepage = "http://solana.com/"
|
homepage = "http://solana.com/"
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
@@ -17,12 +17,12 @@ name = "solana-client-demo"
|
|||||||
path = "src/bin/client-demo.rs"
|
path = "src/bin/client-demo.rs"
|
||||||
|
|
||||||
[[bin]]
|
[[bin]]
|
||||||
name = "solana-multinode-demo"
|
name = "solana-fullnode"
|
||||||
path = "src/bin/multinode-demo.rs"
|
path = "src/bin/fullnode.rs"
|
||||||
|
|
||||||
[[bin]]
|
[[bin]]
|
||||||
name = "solana-testnode"
|
name = "solana-fullnode-config"
|
||||||
path = "src/bin/testnode.rs"
|
path = "src/bin/fullnode-config.rs"
|
||||||
|
|
||||||
[[bin]]
|
[[bin]]
|
||||||
name = "solana-genesis"
|
name = "solana-genesis"
|
||||||
@@ -67,6 +67,5 @@ byteorder = "^1.2.1"
|
|||||||
libc = "^0.2.1"
|
libc = "^0.2.1"
|
||||||
getopts = "^0.2"
|
getopts = "^0.2"
|
||||||
isatty = "0.1"
|
isatty = "0.1"
|
||||||
futures = "0.1"
|
|
||||||
rand = "0.4.2"
|
rand = "0.4.2"
|
||||||
pnet = "^0.21.0"
|
pnet = "^0.21.0"
|
||||||
|
96
README.md
96
README.md
@@ -1,6 +1,6 @@
|
|||||||
[](https://crates.io/crates/solana)
|
[](https://crates.io/crates/solana)
|
||||||
[](https://docs.rs/solana)
|
[](https://docs.rs/solana)
|
||||||
[](https://buildkite.com/solana-labs/solana)
|
[](https://buildkite.com/solana-labs/solana)
|
||||||
[](https://codecov.io/gh/solana-labs/solana)
|
[](https://codecov.io/gh/solana-labs/solana)
|
||||||
|
|
||||||
Disclaimer
|
Disclaimer
|
||||||
@@ -36,57 +36,72 @@ $ git clone https://github.com/solana-labs/solana.git
|
|||||||
$ cd solana
|
$ cd solana
|
||||||
```
|
```
|
||||||
|
|
||||||
The testnode server is initialized with a ledger from stdin and
|
The fullnode server is initialized with a ledger from stdin and
|
||||||
generates new ledger entries on stdout. To create the input ledger, we'll need
|
generates new ledger entries on stdout. To create the input ledger, we'll need
|
||||||
to create *the mint* and use it to generate a *genesis ledger*. It's done in
|
to create *the mint* and use it to generate a *genesis ledger*. It's done in
|
||||||
two steps because the mint-demo.json file contains private keys that will be
|
two steps because the mint-demo.json file contains private keys that will be
|
||||||
used later in this demo.
|
used later in this demo.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ echo 1000000000 | cargo run --release --bin solana-mint-demo > mint-demo.json
|
$ echo 1000000000 | cargo run --release --bin solana-mint-demo > mint-demo.json
|
||||||
$ cat mint-demo.json | cargo run --release --bin solana-genesis-demo > genesis.log
|
$ cat mint-demo.json | cargo run --release --bin solana-genesis-demo > genesis.log
|
||||||
```
|
```
|
||||||
|
|
||||||
Now you can start the server:
|
Before you start the server, make sure you know the IP address of the machine you
|
||||||
|
want to be the leader for the demo, and make sure that udp ports 8000-10000 are
|
||||||
|
open on all the machines you want to test with.
|
||||||
|
|
||||||
|
Generate a leader configuration file with:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ cat genesis.log | cargo run --release --bin solana-testnode > transactions0.log
|
cargo run --release --bin solana-fullnode-config > leader.json
|
||||||
|
```
|
||||||
|
|
||||||
|
Now start the server:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ cat ./multinode-demo/leader.sh
|
||||||
|
#!/bin/bash
|
||||||
|
export RUST_LOG=solana=info
|
||||||
|
sudo sysctl -w net.core.rmem_max=26214400
|
||||||
|
cat genesis.log | cargo run --release --bin solana-fullnode -- -l leader.json
|
||||||
|
$ ./multinode-demo/leader.sh > leader-txs.log
|
||||||
```
|
```
|
||||||
|
|
||||||
Wait a few seconds for the server to initialize. It will print "Ready." when it's safe
|
Wait a few seconds for the server to initialize. It will print "Ready." when it's safe
|
||||||
to start sending it transactions.
|
to start sending it transactions.
|
||||||
|
|
||||||
|
Now you can start some validators:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ cat ./multinode-demo/validator.sh
|
||||||
|
#!/bin/bash
|
||||||
|
rsync -v -e ssh $1/mint-demo.json .
|
||||||
|
rsync -v -e ssh $1/leader.json .
|
||||||
|
rsync -v -e ssh $1/genesis.log .
|
||||||
|
export RUST_LOG=solana=info
|
||||||
|
sudo sysctl -w net.core.rmem_max=26214400
|
||||||
|
cat genesis.log | cargo run --release --bin solana-fullnode -- -l validator.json -v leader.json -b 9000 -d
|
||||||
|
$ ./multinode-demo/validator.sh ubuntu@10.0.1.51:~/solana > validator-txs.log #The leader machine
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
Then, in a separate shell, let's execute some transactions. Note we pass in
|
Then, in a separate shell, let's execute some transactions. Note we pass in
|
||||||
the JSON configuration file here, not the genesis ledger.
|
the JSON configuration file here, not the genesis ledger.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ cat mint-demo.json | cargo run --release --bin solana-client-demo
|
$ cat ./multinode-demo/client.sh
|
||||||
|
#!/bin/bash
|
||||||
|
export RUST_LOG=solana=info
|
||||||
|
rsync -v -e ssh $1/leader.json .
|
||||||
|
rsync -v -e ssh $1/mint-demo.json .
|
||||||
|
cat mint-demo.json | cargo run --release --bin solana-client-demo -- -l leader.json -c 8100 -n 1
|
||||||
|
$ ./multinode-demo/client.sh ubuntu@10.0.1.51:~/solana #The leader machine
|
||||||
```
|
```
|
||||||
|
|
||||||
Now kill the server with Ctrl-C, and take a look at the ledger. You should
|
Try starting a more validators and reruning the client demo and change the `-n 1` option in `client.sh`!
|
||||||
see something similar to:
|
|
||||||
|
|
||||||
```json
|
To enable cuda, downlaod the cuda library (see #Benchmarking) and add `--features=cuda` to the leader and validator scripts (`--release --features=cuda`).
|
||||||
{"num_hashes":27,"id":[0, "..."],"event":"Tick"}
|
|
||||||
{"num_hashes":3,"id":[67, "..."],"event":{"Transaction":{"tokens":42}}}
|
|
||||||
{"num_hashes":27,"id":[0, "..."],"event":"Tick"}
|
|
||||||
```
|
|
||||||
|
|
||||||
Now restart the server from where we left off. Pass it both the genesis ledger, and
|
|
||||||
the transaction ledger.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
$ cat genesis.log transactions0.log | cargo run --release --bin solana-testnode > transactions1.log
|
|
||||||
```
|
|
||||||
|
|
||||||
Lastly, run the client demo again, and verify that all funds were spent in the
|
|
||||||
previous round, and so no additional transactions are added.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
$ cat mint-demo.json | cargo run --release --bin solana-client-demo
|
|
||||||
```
|
|
||||||
|
|
||||||
Stop the server again, and verify there are only Tick entries, and no Transaction entries.
|
|
||||||
|
|
||||||
Developing
|
Developing
|
||||||
===
|
===
|
||||||
@@ -102,7 +117,7 @@ $ source $HOME/.cargo/env
|
|||||||
$ rustup component add rustfmt-preview
|
$ rustup component add rustfmt-preview
|
||||||
```
|
```
|
||||||
|
|
||||||
If your rustc version is lower than 1.25.0, please update it:
|
If your rustc version is lower than 1.26.1, please update it:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ rustup update
|
$ rustup update
|
||||||
@@ -121,18 +136,23 @@ Testing
|
|||||||
Run the test suite:
|
Run the test suite:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
cargo test
|
$ cargo test
|
||||||
|
```
|
||||||
|
|
||||||
|
To emulate all the tests that will run on a Pull Request, run:
|
||||||
|
```bash
|
||||||
|
$ ./ci/run-local.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
Debugging
|
Debugging
|
||||||
---
|
---
|
||||||
|
|
||||||
There are some useful debug messages in the code, you can enable them on a per-module and per-level
|
There are some useful debug messages in the code, you can enable them on a per-module and per-level
|
||||||
basis with the normal RUST\_LOG environment variable. Run the testnode with this syntax:
|
basis with the normal RUST\_LOG environment variable. Run the fullnode with this syntax:
|
||||||
```bash
|
```bash
|
||||||
$ RUST_LOG=solana::streamer=debug,solana::accountant_skel=info cat genesis.log | ./target/release/solana-testnode > transactions0.log
|
$ RUST_LOG=solana::streamer=debug,solana::server=info cat genesis.log | ./target/release/solana-fullnode > transactions0.log
|
||||||
```
|
```
|
||||||
to see the debug and info sections for streamer and accountant\_skel respectively. Generally
|
to see the debug and info sections for streamer and server respectively. Generally
|
||||||
we are using debug for infrequent debug messages, trace for potentially frequent messages and
|
we are using debug for infrequent debug messages, trace for potentially frequent messages and
|
||||||
info for performance-related logging.
|
info for performance-related logging.
|
||||||
|
|
||||||
@@ -154,7 +174,7 @@ $ cargo +nightly bench --features="unstable"
|
|||||||
To run the benchmarks on Linux with GPU optimizations enabled:
|
To run the benchmarks on Linux with GPU optimizations enabled:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ wget https://solana-build-artifacts.s3.amazonaws.com/v0.5.0/libcuda_verify_ed25519.a
|
$ wget https://solana-build-artifacts.s3.amazonaws.com/v0.6.0/libcuda_verify_ed25519.a
|
||||||
$ cargo +nightly bench --features="unstable,cuda"
|
$ cargo +nightly bench --features="unstable,cuda"
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -164,8 +184,10 @@ Code coverage
|
|||||||
To generate code coverage statistics, run kcov via Docker:
|
To generate code coverage statistics, run kcov via Docker:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ docker run -it --rm --security-opt seccomp=unconfined --volume "$PWD:/volume" elmtai/docker-rust-kcov
|
$ ./ci/coverage.sh
|
||||||
```
|
```
|
||||||
|
The coverage report will be written to `./target/cov/index.html`
|
||||||
|
|
||||||
|
|
||||||
Why coverage? While most see coverage as a code quality metric, we see it primarily as a developer
|
Why coverage? While most see coverage as a code quality metric, we see it primarily as a developer
|
||||||
productivity metric. When a developer makes a change to the codebase, presumably it's a *solution* to
|
productivity metric. When a developer makes a change to the codebase, presumably it's a *solution* to
|
||||||
|
2
ci/.gitignore
vendored
Normal file
2
ci/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
/node_modules/
|
||||||
|
/package-lock.json
|
@@ -1,37 +1,16 @@
|
|||||||
steps:
|
steps:
|
||||||
- command: "ci/coverage.sh || true"
|
- command: "ci/coverage.sh"
|
||||||
label: "coverage"
|
name: "coverage [public]"
|
||||||
# TODO: Run coverage in a docker image rather than assuming kcov/cargo-kcov
|
- command: "ci/docker-run.sh rust ci/test-stable.sh"
|
||||||
# is installed on the build agent...
|
name: "stable [public]"
|
||||||
#plugins:
|
- command: "ci/docker-run.sh rustlang/rust:nightly ci/test-nightly.sh || true"
|
||||||
# docker#v1.1.1:
|
name: "nightly - FAILURES IGNORED [public]"
|
||||||
# image: "rust"
|
- command: "ci/docker-run.sh rust ci/test-ignored.sh"
|
||||||
# user: "998:997" # buildkite-agent:buildkite-agent
|
name: "ignored [public]"
|
||||||
# environment:
|
|
||||||
# - CODECOV_TOKEN=$CODECOV_TOKEN
|
|
||||||
- command: "ci/test-stable.sh"
|
|
||||||
label: "stable [public]"
|
|
||||||
plugins:
|
|
||||||
docker#v1.1.1:
|
|
||||||
image: "rust"
|
|
||||||
user: "998:997" # buildkite-agent:buildkite-agent
|
|
||||||
- command: "ci/test-nightly.sh || true"
|
|
||||||
label: "nightly - FAILURES IGNORED [public]"
|
|
||||||
plugins:
|
|
||||||
docker#v1.1.1:
|
|
||||||
image: "rustlang/rust:nightly"
|
|
||||||
user: "998:997" # buildkite-agent:buildkite-agent
|
|
||||||
- command: "ci/test-ignored.sh || true"
|
|
||||||
label: "ignored - FAILURES IGNORED [public]"
|
|
||||||
- command: "ci/test-cuda.sh"
|
- command: "ci/test-cuda.sh"
|
||||||
label: "cuda"
|
name: "cuda"
|
||||||
|
- command: "ci/shellcheck.sh || true"
|
||||||
|
name: "shellcheck [public]"
|
||||||
- wait
|
- wait
|
||||||
- command: "ci/publish.sh"
|
- command: "ci/publish.sh"
|
||||||
label: "publish release artifacts"
|
name: "publish release artifacts"
|
||||||
plugins:
|
|
||||||
docker#v1.1.1:
|
|
||||||
image: "rust"
|
|
||||||
user: "998:997" # buildkite-agent:buildkite-agent
|
|
||||||
environment:
|
|
||||||
- BUILDKITE_TAG=$BUILDKITE_TAG
|
|
||||||
- CRATES_IO_TOKEN=$CRATES_IO_TOKEN
|
|
||||||
|
@@ -1,25 +1,21 @@
|
|||||||
#!/bin/bash -e
|
#!/bin/bash -e
|
||||||
|
|
||||||
cd $(dirname $0)/..
|
cd "$(dirname "$0")/.."
|
||||||
|
|
||||||
if [[ -r ~/.cargo/env ]]; then
|
ci/docker-run.sh evilmachines/rust-cargo-kcov \
|
||||||
# Pick up local install of kcov/cargo-kcov
|
bash -exc "\
|
||||||
source ~/.cargo/env
|
export RUST_BACKTRACE=1; \
|
||||||
fi
|
cargo build --verbose; \
|
||||||
|
cargo kcov --lib --verbose; \
|
||||||
|
"
|
||||||
|
|
||||||
rustc --version
|
echo Coverage report:
|
||||||
cargo --version
|
ls -l target/cov/index.html
|
||||||
kcov --version
|
|
||||||
cargo-kcov --version
|
|
||||||
|
|
||||||
export RUST_BACKTRACE=1
|
|
||||||
cargo build
|
|
||||||
cargo kcov
|
|
||||||
|
|
||||||
if [[ -z "$CODECOV_TOKEN" ]]; then
|
if [[ -z "$CODECOV_TOKEN" ]]; then
|
||||||
echo CODECOV_TOKEN undefined
|
echo CODECOV_TOKEN undefined
|
||||||
exit 1
|
else
|
||||||
|
bash <(curl -s https://codecov.io/bash)
|
||||||
fi
|
fi
|
||||||
|
|
||||||
bash <(curl -s https://codecov.io/bash)
|
|
||||||
exit 0
|
exit 0
|
||||||
|
41
ci/docker-run.sh
Executable file
41
ci/docker-run.sh
Executable file
@@ -0,0 +1,41 @@
|
|||||||
|
#!/bin/bash -e
|
||||||
|
|
||||||
|
usage() {
|
||||||
|
echo "Usage: $0 [docker image name] [command]"
|
||||||
|
echo
|
||||||
|
echo Runs command in the specified docker image with
|
||||||
|
echo a CI-appropriate environment
|
||||||
|
echo
|
||||||
|
}
|
||||||
|
|
||||||
|
cd "$(dirname "$0")/.."
|
||||||
|
|
||||||
|
IMAGE="$1"
|
||||||
|
if [[ -z "$IMAGE" ]]; then
|
||||||
|
echo Error: image not defined
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
docker pull "$IMAGE"
|
||||||
|
shift
|
||||||
|
|
||||||
|
ARGS=(--workdir /solana --volume "$PWD:/solana" --rm)
|
||||||
|
|
||||||
|
ARGS+=(--env "CARGO_HOME=/solana/.cargo")
|
||||||
|
|
||||||
|
# kcov tries to set the personality of the binary which docker
|
||||||
|
# doesn't allow by default.
|
||||||
|
ARGS+=(--security-opt "seccomp=unconfined")
|
||||||
|
|
||||||
|
# Ensure files are created with the current host uid/gid
|
||||||
|
ARGS+=(--user "$(id -u):$(id -g)")
|
||||||
|
|
||||||
|
# Environment variables to propagate into the container
|
||||||
|
ARGS+=(
|
||||||
|
--env BUILDKITE_TAG
|
||||||
|
--env CODECOV_TOKEN
|
||||||
|
--env CRATES_IO_TOKEN
|
||||||
|
)
|
||||||
|
|
||||||
|
set -x
|
||||||
|
docker run "${ARGS[@]}" "$IMAGE" "$@"
|
@@ -1,6 +1,6 @@
|
|||||||
#!/bin/bash -e
|
#!/bin/bash -e
|
||||||
|
|
||||||
cd $(dirname $0)/..
|
cd "$(dirname "$0")/.."
|
||||||
|
|
||||||
if [[ -z "$BUILDKITE_TAG" ]]; then
|
if [[ -z "$BUILDKITE_TAG" ]]; then
|
||||||
# Skip publish if this is not a tagged release
|
# Skip publish if this is not a tagged release
|
||||||
@@ -12,8 +12,8 @@ if [[ -z "$CRATES_IO_TOKEN" ]]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
cargo package
|
|
||||||
# TODO: Ensure the published version matches the contents of BUILDKITE_TAG
|
# TODO: Ensure the published version matches the contents of BUILDKITE_TAG
|
||||||
cargo publish --token "$CRATES_IO_TOKEN"
|
ci/docker-run.sh rust \
|
||||||
|
bash -exc "cargo package; cargo publish --token $CRATES_IO_TOKEN"
|
||||||
|
|
||||||
exit 0
|
exit 0
|
||||||
|
19
ci/run-local.sh
Executable file
19
ci/run-local.sh
Executable file
@@ -0,0 +1,19 @@
|
|||||||
|
#!/bin/bash -e
|
||||||
|
#
|
||||||
|
# Run the entire buildkite CI pipeline locally for pre-testing before sending a
|
||||||
|
# Github pull request
|
||||||
|
#
|
||||||
|
|
||||||
|
cd "$(dirname "$0")/.."
|
||||||
|
BKRUN=ci/node_modules/.bin/bkrun
|
||||||
|
|
||||||
|
if [[ ! -x $BKRUN ]]; then
|
||||||
|
(
|
||||||
|
set -x
|
||||||
|
cd ci/
|
||||||
|
npm install bkrun
|
||||||
|
)
|
||||||
|
fi
|
||||||
|
|
||||||
|
set -x
|
||||||
|
./ci/node_modules/.bin/bkrun ci/buildkite.yml
|
11
ci/shellcheck.sh
Executable file
11
ci/shellcheck.sh
Executable file
@@ -0,0 +1,11 @@
|
|||||||
|
#!/bin/bash -e
|
||||||
|
#
|
||||||
|
# Reference: https://github.com/koalaman/shellcheck/wiki/Directive
|
||||||
|
|
||||||
|
cd "$(dirname "$0")/.."
|
||||||
|
|
||||||
|
set -x
|
||||||
|
find . -name "*.sh" -not -regex ".*/.cargo/.*" -not -regex ".*/node_modules/.*" -print0 \
|
||||||
|
| xargs -0 \
|
||||||
|
ci/docker-run.sh koalaman/shellcheck --color=always --external-sources --shell=bash
|
||||||
|
exit 0
|
@@ -1,17 +1,22 @@
|
|||||||
#!/bin/bash -e
|
#!/bin/bash -e
|
||||||
|
|
||||||
cd $(dirname $0)/..
|
cd "$(dirname "$0")/.."
|
||||||
|
|
||||||
if [[ -z "$libcuda_verify_ed25519_URL" ]]; then
|
LIB=libcuda_verify_ed25519.a
|
||||||
echo libcuda_verify_ed25519_URL undefined
|
if [[ ! -r $LIB ]]; then
|
||||||
exit 1
|
if [[ -z "${libcuda_verify_ed25519_URL:-}" ]]; then
|
||||||
|
echo "$0 skipped. Unable to locate $LIB"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
export LD_LIBRARY_PATH=/usr/local/cuda/lib64
|
||||||
|
export PATH=$PATH:/usr/local/cuda/bin
|
||||||
|
curl -X GET -o $LIB "$libcuda_verify_ed25519_URL"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
export LD_LIBRARY_PATH=/usr/local/cuda/lib64
|
# shellcheck disable=SC1090 # <-- shellcheck can't follow ~
|
||||||
export PATH=$PATH:/usr/local/cuda/bin
|
source ~/.cargo/env
|
||||||
curl -X GET -o libcuda_verify_ed25519.a "$libcuda_verify_ed25519_URL"
|
export RUST_BACKTRACE=1
|
||||||
|
|
||||||
source $HOME/.cargo/env
|
|
||||||
cargo test --features=cuda
|
cargo test --features=cuda
|
||||||
|
|
||||||
exit 0
|
exit 0
|
||||||
|
@@ -1,6 +1,6 @@
|
|||||||
#!/bin/bash -e
|
#!/bin/bash -e
|
||||||
|
|
||||||
cd $(dirname $0)/..
|
cd "$(dirname "$0")/.."
|
||||||
|
|
||||||
rustc --version
|
rustc --version
|
||||||
cargo --version
|
cargo --version
|
||||||
|
@@ -1,13 +1,14 @@
|
|||||||
#!/bin/bash -e
|
#!/bin/bash -e
|
||||||
|
|
||||||
cd $(dirname $0)/..
|
cd "$(dirname "$0")/.."
|
||||||
|
|
||||||
rustc --version
|
rustc --version
|
||||||
cargo --version
|
cargo --version
|
||||||
|
|
||||||
|
export RUST_BACKTRACE=1
|
||||||
rustup component add rustfmt-preview
|
rustup component add rustfmt-preview
|
||||||
cargo fmt -- --write-mode=diff
|
|
||||||
cargo build --verbose --features unstable
|
cargo build --verbose --features unstable
|
||||||
cargo test --verbose --features unstable
|
cargo test --verbose --features unstable
|
||||||
|
cargo bench --verbose --features unstable
|
||||||
|
|
||||||
exit 0
|
exit 0
|
||||||
|
@@ -1,10 +1,11 @@
|
|||||||
#!/bin/bash -e
|
#!/bin/bash -e
|
||||||
|
|
||||||
cd $(dirname $0)/..
|
cd "$(dirname "$0")/.."
|
||||||
|
|
||||||
rustc --version
|
rustc --version
|
||||||
cargo --version
|
cargo --version
|
||||||
|
|
||||||
|
export RUST_BACKTRACE=1
|
||||||
rustup component add rustfmt-preview
|
rustup component add rustfmt-preview
|
||||||
cargo fmt -- --write-mode=diff
|
cargo fmt -- --write-mode=diff
|
||||||
cargo build --verbose
|
cargo build --verbose
|
||||||
|
@@ -1,7 +1,16 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash -e
|
||||||
cd /home/ubuntu/solana
|
|
||||||
#git pull
|
if [[ -z "$1" ]]; then
|
||||||
export RUST_LOG=solana::crdt=trace
|
echo "usage: $0 [network path to solana repo on leader machine]"
|
||||||
# scp ubuntu@18.206.1.146:~/solana/leader.json .
|
exit 1
|
||||||
# scp ubuntu@18.206.1.146:~/solana/mint-demo.json .
|
fi
|
||||||
cat mint-demo.json | cargo run --release --bin solana-multinode-demo -- -l leader.json -c 10.0.5.179:8100 -n 3
|
|
||||||
|
LEADER="$1"
|
||||||
|
|
||||||
|
set -x
|
||||||
|
export RUST_LOG=solana=info
|
||||||
|
rsync -v -e ssh "$LEADER/leader.json" .
|
||||||
|
rsync -v -e ssh "$LEADER/mint-demo.json" .
|
||||||
|
|
||||||
|
cargo run --release --bin solana-client-demo -- \
|
||||||
|
-l leader.json < mint-demo.json 2>&1 | tee client.log
|
||||||
|
@@ -1,6 +1,4 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
cd /home/ubuntu/solana
|
|
||||||
git pull
|
|
||||||
export RUST_LOG=solana=info
|
export RUST_LOG=solana=info
|
||||||
cat genesis.log | cargo run --release --features cuda --bin solana-testnode -- -s leader.json -b 8000 -d | grep INFO
|
sudo sysctl -w net.core.rmem_max=26214400
|
||||||
#cat genesis.log | cargo run --release --bin solana-testnode -- -s leader.json -b 8000 -d
|
cat genesis.log | cargo run --bin solana-fullnode -- -l leader.json
|
||||||
|
@@ -1,10 +1,23 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash -e
|
||||||
cd /home/ubuntu/solana
|
|
||||||
git pull
|
|
||||||
scp ubuntu@18.206.1.146:~/solana/mint-demo.json .
|
|
||||||
scp ubuntu@18.206.1.146:~/solana/leader.json .
|
|
||||||
scp ubuntu@18.206.1.146:~/solana/genesis.log .
|
|
||||||
scp ubuntu@18.206.1.146:~/solana/libcuda_verify_ed25519.a .
|
|
||||||
export RUST_LOG=solana=info
|
|
||||||
cat genesis.log | cargo run --release --features cuda --bin solana-testnode -- -s replicator.json -v leader.json -b 9000 -d 2>&1 | tee validator.log
|
|
||||||
|
|
||||||
|
if [[ -z "$1" ]]; then
|
||||||
|
echo "usage: $0 [network path to solana repo on leader machine]"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
LEADER="$1"
|
||||||
|
|
||||||
|
set -x
|
||||||
|
|
||||||
|
rsync -v -e ssh "$LEADER/mint-demo.json" .
|
||||||
|
rsync -v -e ssh "$LEADER/leader.json" .
|
||||||
|
rsync -v -e ssh "$LEADER/genesis.log" .
|
||||||
|
rsync -v -e ssh "$LEADER/libcuda_verify_ed25519.a" .
|
||||||
|
|
||||||
|
export RUST_LOG=solana=info
|
||||||
|
|
||||||
|
sudo sysctl -w net.core.rmem_max=26214400
|
||||||
|
|
||||||
|
cat genesis.log | \
|
||||||
|
cargo run --release --features=cuda --bin solana-fullnode -- \
|
||||||
|
-l validator.json -s validator.json -v leader.json -b 9000 -d 2>&1 | tee validator-tee.log
|
||||||
|
287
src/bank.rs
287
src/bank.rs
@@ -7,18 +7,17 @@ extern crate libc;
|
|||||||
|
|
||||||
use chrono::prelude::*;
|
use chrono::prelude::*;
|
||||||
use entry::Entry;
|
use entry::Entry;
|
||||||
use event::Event;
|
|
||||||
use hash::Hash;
|
use hash::Hash;
|
||||||
use mint::Mint;
|
use mint::Mint;
|
||||||
use plan::{Payment, Plan, Witness};
|
use payment_plan::{Payment, PaymentPlan, Witness};
|
||||||
use rayon::prelude::*;
|
use rayon::prelude::*;
|
||||||
use signature::{KeyPair, PublicKey, Signature};
|
use signature::{KeyPair, PublicKey, Signature};
|
||||||
use std::collections::hash_map::Entry::Occupied;
|
use std::collections::hash_map::Entry::Occupied;
|
||||||
use std::collections::{HashMap, HashSet, VecDeque};
|
use std::collections::{HashMap, HashSet, VecDeque};
|
||||||
use std::result;
|
use std::result;
|
||||||
use std::sync::RwLock;
|
|
||||||
use std::sync::atomic::{AtomicIsize, AtomicUsize, Ordering};
|
use std::sync::atomic::{AtomicIsize, AtomicUsize, Ordering};
|
||||||
use transaction::{Instruction, Transaction};
|
use std::sync::RwLock;
|
||||||
|
use transaction::{Instruction, Plan, Transaction};
|
||||||
|
|
||||||
pub const MAX_ENTRY_IDS: usize = 1024 * 4;
|
pub const MAX_ENTRY_IDS: usize = 1024 * 4;
|
||||||
|
|
||||||
@@ -26,33 +25,13 @@ pub const MAX_ENTRY_IDS: usize = 1024 * 4;
|
|||||||
pub enum BankError {
|
pub enum BankError {
|
||||||
AccountNotFound(PublicKey),
|
AccountNotFound(PublicKey),
|
||||||
InsufficientFunds(PublicKey),
|
InsufficientFunds(PublicKey),
|
||||||
InvalidTransferSignature(Signature),
|
DuplicateSiganture(Signature),
|
||||||
|
LastIdNotFound(Hash),
|
||||||
|
NegativeTokens,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type Result<T> = result::Result<T, BankError>;
|
pub type Result<T> = result::Result<T, BankError>;
|
||||||
|
|
||||||
/// Commit funds to the 'to' party.
|
|
||||||
fn apply_payment(balances: &RwLock<HashMap<PublicKey, AtomicIsize>>, payment: &Payment) {
|
|
||||||
// First we check balances with a read lock to maximize potential parallelization.
|
|
||||||
if balances
|
|
||||||
.read()
|
|
||||||
.expect("'balances' read lock in apply_payment")
|
|
||||||
.contains_key(&payment.to)
|
|
||||||
{
|
|
||||||
let bals = balances.read().expect("'balances' read lock");
|
|
||||||
bals[&payment.to].fetch_add(payment.tokens as isize, Ordering::Relaxed);
|
|
||||||
} else {
|
|
||||||
// Now we know the key wasn't present a nanosecond ago, but it might be there
|
|
||||||
// by the time we aquire a write lock, so we'll have to check again.
|
|
||||||
let mut bals = balances.write().expect("'balances' write lock");
|
|
||||||
if bals.contains_key(&payment.to) {
|
|
||||||
bals[&payment.to].fetch_add(payment.tokens as isize, Ordering::Relaxed);
|
|
||||||
} else {
|
|
||||||
bals.insert(payment.to, AtomicIsize::new(payment.tokens as isize));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct Bank {
|
pub struct Bank {
|
||||||
balances: RwLock<HashMap<PublicKey, AtomicIsize>>,
|
balances: RwLock<HashMap<PublicKey, AtomicIsize>>,
|
||||||
pending: RwLock<HashMap<Signature, Plan>>,
|
pending: RwLock<HashMap<Signature, Plan>>,
|
||||||
@@ -65,16 +44,16 @@ pub struct Bank {
|
|||||||
impl Bank {
|
impl Bank {
|
||||||
/// Create an Bank using a deposit.
|
/// Create an Bank using a deposit.
|
||||||
pub fn new_from_deposit(deposit: &Payment) -> Self {
|
pub fn new_from_deposit(deposit: &Payment) -> Self {
|
||||||
let balances = RwLock::new(HashMap::new());
|
let bank = Bank {
|
||||||
apply_payment(&balances, deposit);
|
balances: RwLock::new(HashMap::new()),
|
||||||
Bank {
|
|
||||||
balances,
|
|
||||||
pending: RwLock::new(HashMap::new()),
|
pending: RwLock::new(HashMap::new()),
|
||||||
last_ids: RwLock::new(VecDeque::new()),
|
last_ids: RwLock::new(VecDeque::new()),
|
||||||
time_sources: RwLock::new(HashSet::new()),
|
time_sources: RwLock::new(HashSet::new()),
|
||||||
last_time: RwLock::new(Utc.timestamp(0, 0)),
|
last_time: RwLock::new(Utc.timestamp(0, 0)),
|
||||||
transaction_count: AtomicUsize::new(0),
|
transaction_count: AtomicUsize::new(0),
|
||||||
}
|
};
|
||||||
|
bank.apply_payment(deposit);
|
||||||
|
bank
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create an Bank with only a Mint. Typically used by unit tests.
|
/// Create an Bank with only a Mint. Typically used by unit tests.
|
||||||
@@ -88,6 +67,28 @@ impl Bank {
|
|||||||
bank
|
bank
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Commit funds to the 'to' party.
|
||||||
|
fn apply_payment(&self, payment: &Payment) {
|
||||||
|
// First we check balances with a read lock to maximize potential parallelization.
|
||||||
|
if self.balances
|
||||||
|
.read()
|
||||||
|
.expect("'balances' read lock in apply_payment")
|
||||||
|
.contains_key(&payment.to)
|
||||||
|
{
|
||||||
|
let bals = self.balances.read().expect("'balances' read lock");
|
||||||
|
bals[&payment.to].fetch_add(payment.tokens as isize, Ordering::Relaxed);
|
||||||
|
} else {
|
||||||
|
// Now we know the key wasn't present a nanosecond ago, but it might be there
|
||||||
|
// by the time we aquire a write lock, so we'll have to check again.
|
||||||
|
let mut bals = self.balances.write().expect("'balances' write lock");
|
||||||
|
if bals.contains_key(&payment.to) {
|
||||||
|
bals[&payment.to].fetch_add(payment.tokens as isize, Ordering::Relaxed);
|
||||||
|
} else {
|
||||||
|
bals.insert(payment.to, AtomicIsize::new(payment.tokens as isize));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Return the last entry ID registered
|
/// Return the last entry ID registered
|
||||||
pub fn last_id(&self) -> Hash {
|
pub fn last_id(&self) -> Hash {
|
||||||
let last_ids = self.last_ids.read().expect("'last_ids' read lock");
|
let last_ids = self.last_ids.read().expect("'last_ids' read lock");
|
||||||
@@ -95,29 +96,29 @@ impl Bank {
|
|||||||
last_item.0
|
last_item.0
|
||||||
}
|
}
|
||||||
|
|
||||||
fn reserve_signature(signatures: &RwLock<HashSet<Signature>>, sig: &Signature) -> bool {
|
fn reserve_signature(signatures: &RwLock<HashSet<Signature>>, sig: &Signature) -> Result<()> {
|
||||||
if signatures
|
if signatures
|
||||||
.read()
|
.read()
|
||||||
.expect("'signatures' read lock")
|
.expect("'signatures' read lock")
|
||||||
.contains(sig)
|
.contains(sig)
|
||||||
{
|
{
|
||||||
return false;
|
return Err(BankError::DuplicateSiganture(*sig));
|
||||||
}
|
}
|
||||||
signatures
|
signatures
|
||||||
.write()
|
.write()
|
||||||
.expect("'signatures' write lock")
|
.expect("'signatures' write lock")
|
||||||
.insert(*sig);
|
.insert(*sig);
|
||||||
true
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn forget_signature(signatures: &RwLock<HashSet<Signature>>, sig: &Signature) -> bool {
|
fn forget_signature(signatures: &RwLock<HashSet<Signature>>, sig: &Signature) {
|
||||||
signatures
|
signatures
|
||||||
.write()
|
.write()
|
||||||
.expect("'signatures' write lock in forget_signature")
|
.expect("'signatures' write lock in forget_signature")
|
||||||
.remove(sig)
|
.remove(sig);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn forget_signature_with_last_id(&self, sig: &Signature, last_id: &Hash) -> bool {
|
fn forget_signature_with_last_id(&self, sig: &Signature, last_id: &Hash) {
|
||||||
if let Some(entry) = self.last_ids
|
if let Some(entry) = self.last_ids
|
||||||
.read()
|
.read()
|
||||||
.expect("'last_ids' read lock in forget_signature_with_last_id")
|
.expect("'last_ids' read lock in forget_signature_with_last_id")
|
||||||
@@ -125,12 +126,11 @@ impl Bank {
|
|||||||
.rev()
|
.rev()
|
||||||
.find(|x| x.0 == *last_id)
|
.find(|x| x.0 == *last_id)
|
||||||
{
|
{
|
||||||
return Self::forget_signature(&entry.1, sig);
|
Self::forget_signature(&entry.1, sig);
|
||||||
}
|
}
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn reserve_signature_with_last_id(&self, sig: &Signature, last_id: &Hash) -> bool {
|
fn reserve_signature_with_last_id(&self, sig: &Signature, last_id: &Hash) -> Result<()> {
|
||||||
if let Some(entry) = self.last_ids
|
if let Some(entry) = self.last_ids
|
||||||
.read()
|
.read()
|
||||||
.expect("'last_ids' read lock in reserve_signature_with_last_id")
|
.expect("'last_ids' read lock in reserve_signature_with_last_id")
|
||||||
@@ -140,7 +140,7 @@ impl Bank {
|
|||||||
{
|
{
|
||||||
return Self::reserve_signature(&entry.1, sig);
|
return Self::reserve_signature(&entry.1, sig);
|
||||||
}
|
}
|
||||||
false
|
Err(BankError::LastIdNotFound(*last_id))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Tell the bank which Entry IDs exist on the ledger. This function
|
/// Tell the bank which Entry IDs exist on the ledger. This function
|
||||||
@@ -159,31 +159,32 @@ impl Bank {
|
|||||||
|
|
||||||
/// Deduct tokens from the 'from' address the account has sufficient
|
/// Deduct tokens from the 'from' address the account has sufficient
|
||||||
/// funds and isn't a duplicate.
|
/// funds and isn't a duplicate.
|
||||||
pub fn process_verified_transaction_debits(&self, tr: &Transaction) -> Result<()> {
|
fn apply_debits(&self, tx: &Transaction) -> Result<()> {
|
||||||
if let Instruction::NewContract(contract) = &tr.instruction {
|
if let Instruction::NewContract(contract) = &tx.instruction {
|
||||||
trace!("Transaction {}", contract.tokens);
|
trace!("Transaction {}", contract.tokens);
|
||||||
|
if contract.tokens < 0 {
|
||||||
|
return Err(BankError::NegativeTokens);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
let bals = self.balances
|
let bals = self.balances
|
||||||
.read()
|
.read()
|
||||||
.expect("'balances' read lock in process_verified_transaction_debits");
|
.expect("'balances' read lock in apply_debits");
|
||||||
let option = bals.get(&tr.from);
|
let option = bals.get(&tx.from);
|
||||||
|
|
||||||
if option.is_none() {
|
if option.is_none() {
|
||||||
return Err(BankError::AccountNotFound(tr.from));
|
return Err(BankError::AccountNotFound(tx.from));
|
||||||
}
|
}
|
||||||
|
|
||||||
if !self.reserve_signature_with_last_id(&tr.sig, &tr.last_id) {
|
self.reserve_signature_with_last_id(&tx.sig, &tx.last_id)?;
|
||||||
return Err(BankError::InvalidTransferSignature(tr.sig));
|
|
||||||
}
|
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
let result = if let Instruction::NewContract(contract) = &tr.instruction {
|
let result = if let Instruction::NewContract(contract) = &tx.instruction {
|
||||||
let bal = option.expect("assignment of option to bal");
|
let bal = option.expect("assignment of option to bal");
|
||||||
let current = bal.load(Ordering::Relaxed) as i64;
|
let current = bal.load(Ordering::Relaxed) as i64;
|
||||||
|
|
||||||
if current < contract.tokens {
|
if current < contract.tokens {
|
||||||
self.forget_signature_with_last_id(&tr.sig, &tr.last_id);
|
self.forget_signature_with_last_id(&tx.sig, &tx.last_id);
|
||||||
return Err(BankError::InsufficientFunds(tr.from));
|
return Err(BankError::InsufficientFunds(tx.from));
|
||||||
}
|
}
|
||||||
|
|
||||||
bal.compare_exchange(
|
bal.compare_exchange(
|
||||||
@@ -206,103 +207,82 @@ impl Bank {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn process_verified_transaction_credits(&self, tr: &Transaction) {
|
fn apply_credits(&self, tx: &Transaction) {
|
||||||
match &tr.instruction {
|
match &tx.instruction {
|
||||||
Instruction::NewContract(contract) => {
|
Instruction::NewContract(contract) => {
|
||||||
let mut plan = contract.plan.clone();
|
let mut plan = contract.plan.clone();
|
||||||
plan.apply_witness(&Witness::Timestamp(*self.last_time
|
plan.apply_witness(&Witness::Timestamp(*self.last_time
|
||||||
.read()
|
.read()
|
||||||
.expect("timestamp creation in process_verified_transaction_credits")));
|
.expect("timestamp creation in apply_credits")));
|
||||||
|
|
||||||
if let Some(ref payment) = plan.final_payment() {
|
if let Some(ref payment) = plan.final_payment() {
|
||||||
apply_payment(&self.balances, payment);
|
self.apply_payment(payment);
|
||||||
} else {
|
} else {
|
||||||
let mut pending = self.pending
|
let mut pending = self.pending
|
||||||
.write()
|
.write()
|
||||||
.expect("'pending' write lock in process_verified_transaction_credits");
|
.expect("'pending' write lock in apply_credits");
|
||||||
pending.insert(tr.sig, plan);
|
pending.insert(tx.sig, plan);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Instruction::ApplyTimestamp(dt) => {
|
Instruction::ApplyTimestamp(dt) => {
|
||||||
let _ = self.process_verified_timestamp(tr.from, *dt);
|
let _ = self.apply_timestamp(tx.from, *dt);
|
||||||
}
|
}
|
||||||
Instruction::ApplySignature(tx_sig) => {
|
Instruction::ApplySignature(tx_sig) => {
|
||||||
let _ = self.process_verified_sig(tr.from, *tx_sig);
|
let _ = self.apply_signature(tx.from, *tx_sig);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Process a Transaction that has already been verified.
|
/// Process a Transaction.
|
||||||
pub fn process_verified_transaction(&self, tr: &Transaction) -> Result<()> {
|
fn process_transaction(&self, tx: &Transaction) -> Result<()> {
|
||||||
self.process_verified_transaction_debits(tr)?;
|
self.apply_debits(tx)?;
|
||||||
self.process_verified_transaction_credits(tr);
|
self.apply_credits(tx);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Process a batch of verified transactions.
|
/// Process a batch of transactions.
|
||||||
pub fn process_verified_transactions(&self, trs: Vec<Transaction>) -> Vec<Result<Transaction>> {
|
pub fn process_transactions(&self, txs: Vec<Transaction>) -> Vec<Result<Transaction>> {
|
||||||
// Run all debits first to filter out any transactions that can't be processed
|
// Run all debits first to filter out any transactions that can't be processed
|
||||||
// in parallel deterministically.
|
// in parallel deterministically.
|
||||||
info!("processing Transactions {}", trs.len());
|
info!("processing Transactions {}", txs.len());
|
||||||
let results: Vec<_> = trs.into_par_iter()
|
let results: Vec<_> = txs.into_par_iter()
|
||||||
.map(|tr| self.process_verified_transaction_debits(&tr).map(|_| tr))
|
.map(|tx| self.apply_debits(&tx).map(|_| tx))
|
||||||
.collect(); // Calling collect() here forces all debits to complete before moving on.
|
.collect(); // Calling collect() here forces all debits to complete before moving on.
|
||||||
|
|
||||||
results
|
results
|
||||||
.into_par_iter()
|
.into_par_iter()
|
||||||
.map(|result| {
|
.map(|result| {
|
||||||
result.map(|tr| {
|
result.map(|tx| {
|
||||||
self.process_verified_transaction_credits(&tr);
|
self.apply_credits(&tx);
|
||||||
tr
|
tx
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
.collect()
|
.collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn partition_events(events: Vec<Event>) -> (Vec<Transaction>, Vec<Event>) {
|
pub fn process_entries<I>(&self, entries: I) -> Result<()>
|
||||||
(
|
where
|
||||||
events
|
I: IntoIterator<Item = Entry>,
|
||||||
.into_iter()
|
{
|
||||||
.map(|Event::Transaction(tr)| tr)
|
|
||||||
.collect(),
|
|
||||||
vec![],
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn process_verified_events(&self, events: Vec<Event>) -> Vec<Result<Event>> {
|
|
||||||
let (trs, rest) = Self::partition_events(events);
|
|
||||||
let mut results: Vec<_> = self.process_verified_transactions(trs)
|
|
||||||
.into_iter()
|
|
||||||
.map(|x| x.map(Event::Transaction))
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
for event in rest {
|
|
||||||
results.push(self.process_verified_event(event));
|
|
||||||
}
|
|
||||||
|
|
||||||
results
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn process_verified_entries(&self, entries: Vec<Entry>) -> Result<()> {
|
|
||||||
for entry in entries {
|
for entry in entries {
|
||||||
self.register_entry_id(&entry.id);
|
for result in self.process_transactions(entry.transactions) {
|
||||||
for result in self.process_verified_events(entry.events) {
|
|
||||||
result?;
|
result?;
|
||||||
}
|
}
|
||||||
|
self.register_entry_id(&entry.id);
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Process a Witness Signature that has already been verified.
|
/// Process a Witness Signature.
|
||||||
fn process_verified_sig(&self, from: PublicKey, tx_sig: Signature) -> Result<()> {
|
fn apply_signature(&self, from: PublicKey, tx_sig: Signature) -> Result<()> {
|
||||||
if let Occupied(mut e) = self.pending
|
if let Occupied(mut e) = self.pending
|
||||||
.write()
|
.write()
|
||||||
.expect("write() in process_verified_sig")
|
.expect("write() in apply_signature")
|
||||||
.entry(tx_sig)
|
.entry(tx_sig)
|
||||||
{
|
{
|
||||||
e.get_mut().apply_witness(&Witness::Signature(from));
|
e.get_mut().apply_witness(&Witness::Signature(from));
|
||||||
if let Some(payment) = e.get().final_payment() {
|
if let Some(payment) = e.get().final_payment() {
|
||||||
apply_payment(&self.balances, &payment);
|
self.apply_payment(&payment);
|
||||||
e.remove_entry();
|
e.remove_entry();
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@@ -310,8 +290,8 @@ impl Bank {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Process a Witness Timestamp that has already been verified.
|
/// Process a Witness Timestamp.
|
||||||
fn process_verified_timestamp(&self, from: PublicKey, dt: DateTime<Utc>) -> Result<()> {
|
fn apply_timestamp(&self, from: PublicKey, dt: DateTime<Utc>) -> Result<()> {
|
||||||
// If this is the first timestamp we've seen, it probably came from the genesis block,
|
// If this is the first timestamp we've seen, it probably came from the genesis block,
|
||||||
// so we'll trust it.
|
// so we'll trust it.
|
||||||
if *self.last_time
|
if *self.last_time
|
||||||
@@ -344,13 +324,13 @@ impl Bank {
|
|||||||
// double-spend if it enters before the modified plan is removed from 'pending'.
|
// double-spend if it enters before the modified plan is removed from 'pending'.
|
||||||
let mut pending = self.pending
|
let mut pending = self.pending
|
||||||
.write()
|
.write()
|
||||||
.expect("'pending' write lock in process_verified_timestamp");
|
.expect("'pending' write lock in apply_timestamp");
|
||||||
for (key, plan) in pending.iter_mut() {
|
for (key, plan) in pending.iter_mut() {
|
||||||
plan.apply_witness(&Witness::Timestamp(*self.last_time
|
plan.apply_witness(&Witness::Timestamp(*self.last_time
|
||||||
.read()
|
.read()
|
||||||
.expect("'last_time' read lock when creating timestamp")));
|
.expect("'last_time' read lock when creating timestamp")));
|
||||||
if let Some(ref payment) = plan.final_payment() {
|
if let Some(ref payment) = plan.final_payment() {
|
||||||
apply_payment(&self.balances, payment);
|
self.apply_payment(payment);
|
||||||
completed.push(key.clone());
|
completed.push(key.clone());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -362,14 +342,6 @@ impl Bank {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Process an Transaction or Witness that has already been verified.
|
|
||||||
pub fn process_verified_event(&self, event: Event) -> Result<Event> {
|
|
||||||
match event {
|
|
||||||
Event::Transaction(ref tr) => self.process_verified_transaction(tr),
|
|
||||||
}?;
|
|
||||||
Ok(event)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Create, sign, and process a Transaction from `keypair` to `to` of
|
/// Create, sign, and process a Transaction from `keypair` to `to` of
|
||||||
/// `n` tokens where `last_id` is the last Entry ID observed by the client.
|
/// `n` tokens where `last_id` is the last Entry ID observed by the client.
|
||||||
pub fn transfer(
|
pub fn transfer(
|
||||||
@@ -379,9 +351,9 @@ impl Bank {
|
|||||||
to: PublicKey,
|
to: PublicKey,
|
||||||
last_id: Hash,
|
last_id: Hash,
|
||||||
) -> Result<Signature> {
|
) -> Result<Signature> {
|
||||||
let tr = Transaction::new(keypair, to, n, last_id);
|
let tx = Transaction::new(keypair, to, n, last_id);
|
||||||
let sig = tr.sig;
|
let sig = tx.sig;
|
||||||
self.process_verified_transaction(&tr).map(|_| sig)
|
self.process_transaction(&tx).map(|_| sig)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create, sign, and process a postdated Transaction from `keypair`
|
/// Create, sign, and process a postdated Transaction from `keypair`
|
||||||
@@ -395,9 +367,9 @@ impl Bank {
|
|||||||
dt: DateTime<Utc>,
|
dt: DateTime<Utc>,
|
||||||
last_id: Hash,
|
last_id: Hash,
|
||||||
) -> Result<Signature> {
|
) -> Result<Signature> {
|
||||||
let tr = Transaction::new_on_date(keypair, to, dt, n, last_id);
|
let tx = Transaction::new_on_date(keypair, to, dt, n, last_id);
|
||||||
let sig = tr.sig;
|
let sig = tx.sig;
|
||||||
self.process_verified_transaction(&tr).map(|_| sig)
|
self.process_transaction(&tx).map(|_| sig)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_balance(&self, pubkey: &PublicKey) -> Option<i64> {
|
pub fn get_balance(&self, pubkey: &PublicKey) -> Option<i64> {
|
||||||
@@ -436,6 +408,18 @@ mod tests {
|
|||||||
assert_eq!(bank.transaction_count(), 2);
|
assert_eq!(bank.transaction_count(), 2);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_invalid_tokens() {
|
||||||
|
let mint = Mint::new(1);
|
||||||
|
let pubkey = KeyPair::new().pubkey();
|
||||||
|
let bank = Bank::new(&mint);
|
||||||
|
assert_eq!(
|
||||||
|
bank.transfer(-1, &mint.keypair(), pubkey, mint.last_id()),
|
||||||
|
Err(BankError::NegativeTokens)
|
||||||
|
);
|
||||||
|
assert_eq!(bank.transaction_count(), 0);
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_account_not_found() {
|
fn test_account_not_found() {
|
||||||
let mint = Mint::new(1);
|
let mint = Mint::new(1);
|
||||||
@@ -498,14 +482,14 @@ mod tests {
|
|||||||
|
|
||||||
// Now, acknowledge the time in the condition occurred and
|
// Now, acknowledge the time in the condition occurred and
|
||||||
// that pubkey's funds are now available.
|
// that pubkey's funds are now available.
|
||||||
bank.process_verified_timestamp(mint.pubkey(), dt).unwrap();
|
bank.apply_timestamp(mint.pubkey(), dt).unwrap();
|
||||||
assert_eq!(bank.get_balance(&pubkey), Some(1));
|
assert_eq!(bank.get_balance(&pubkey), Some(1));
|
||||||
|
|
||||||
// tx count is still 1, because we chose not to count timestamp events
|
// tx count is still 1, because we chose not to count timestamp transactions
|
||||||
// tx count.
|
// tx count.
|
||||||
assert_eq!(bank.transaction_count(), 1);
|
assert_eq!(bank.transaction_count(), 1);
|
||||||
|
|
||||||
bank.process_verified_timestamp(mint.pubkey(), dt).unwrap(); // <-- Attack! Attempt to process completed transaction.
|
bank.apply_timestamp(mint.pubkey(), dt).unwrap(); // <-- Attack! Attempt to process completed transaction.
|
||||||
assert_ne!(bank.get_balance(&pubkey), Some(2));
|
assert_ne!(bank.get_balance(&pubkey), Some(2));
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -515,7 +499,7 @@ mod tests {
|
|||||||
let bank = Bank::new(&mint);
|
let bank = Bank::new(&mint);
|
||||||
let pubkey = KeyPair::new().pubkey();
|
let pubkey = KeyPair::new().pubkey();
|
||||||
let dt = Utc::now();
|
let dt = Utc::now();
|
||||||
bank.process_verified_timestamp(mint.pubkey(), dt).unwrap();
|
bank.apply_timestamp(mint.pubkey(), dt).unwrap();
|
||||||
|
|
||||||
// It's now past now, so this transfer should be processed immediately.
|
// It's now past now, so this transfer should be processed immediately.
|
||||||
bank.transfer_on_date(1, &mint.keypair(), pubkey, dt, mint.last_id())
|
bank.transfer_on_date(1, &mint.keypair(), pubkey, dt, mint.last_id())
|
||||||
@@ -545,24 +529,30 @@ mod tests {
|
|||||||
assert_eq!(bank.get_balance(&pubkey), None);
|
assert_eq!(bank.get_balance(&pubkey), None);
|
||||||
|
|
||||||
// Now, cancel the trancaction. Mint gets her funds back, pubkey never sees them.
|
// Now, cancel the trancaction. Mint gets her funds back, pubkey never sees them.
|
||||||
bank.process_verified_sig(mint.pubkey(), sig).unwrap();
|
bank.apply_signature(mint.pubkey(), sig).unwrap();
|
||||||
assert_eq!(bank.get_balance(&mint.pubkey()), Some(1));
|
assert_eq!(bank.get_balance(&mint.pubkey()), Some(1));
|
||||||
assert_eq!(bank.get_balance(&pubkey), None);
|
assert_eq!(bank.get_balance(&pubkey), None);
|
||||||
|
|
||||||
// Assert cancel doesn't cause count to go backward.
|
// Assert cancel doesn't cause count to go backward.
|
||||||
assert_eq!(bank.transaction_count(), 1);
|
assert_eq!(bank.transaction_count(), 1);
|
||||||
|
|
||||||
bank.process_verified_sig(mint.pubkey(), sig).unwrap(); // <-- Attack! Attempt to cancel completed transaction.
|
bank.apply_signature(mint.pubkey(), sig).unwrap(); // <-- Attack! Attempt to cancel completed transaction.
|
||||||
assert_ne!(bank.get_balance(&mint.pubkey()), Some(2));
|
assert_ne!(bank.get_balance(&mint.pubkey()), Some(2));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_duplicate_event_signature() {
|
fn test_duplicate_transaction_signature() {
|
||||||
let mint = Mint::new(1);
|
let mint = Mint::new(1);
|
||||||
let bank = Bank::new(&mint);
|
let bank = Bank::new(&mint);
|
||||||
let sig = Signature::default();
|
let sig = Signature::default();
|
||||||
assert!(bank.reserve_signature_with_last_id(&sig, &mint.last_id()));
|
assert!(
|
||||||
assert!(!bank.reserve_signature_with_last_id(&sig, &mint.last_id()));
|
bank.reserve_signature_with_last_id(&sig, &mint.last_id())
|
||||||
|
.is_ok()
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
bank.reserve_signature_with_last_id(&sig, &mint.last_id()),
|
||||||
|
Err(BankError::DuplicateSiganture(sig))
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -570,9 +560,13 @@ mod tests {
|
|||||||
let mint = Mint::new(1);
|
let mint = Mint::new(1);
|
||||||
let bank = Bank::new(&mint);
|
let bank = Bank::new(&mint);
|
||||||
let sig = Signature::default();
|
let sig = Signature::default();
|
||||||
bank.reserve_signature_with_last_id(&sig, &mint.last_id());
|
bank.reserve_signature_with_last_id(&sig, &mint.last_id())
|
||||||
assert!(bank.forget_signature_with_last_id(&sig, &mint.last_id()));
|
.unwrap();
|
||||||
assert!(!bank.forget_signature_with_last_id(&sig, &mint.last_id()));
|
bank.forget_signature_with_last_id(&sig, &mint.last_id());
|
||||||
|
assert!(
|
||||||
|
bank.reserve_signature_with_last_id(&sig, &mint.last_id())
|
||||||
|
.is_ok()
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -585,7 +579,10 @@ mod tests {
|
|||||||
bank.register_entry_id(&last_id);
|
bank.register_entry_id(&last_id);
|
||||||
}
|
}
|
||||||
// Assert we're no longer able to use the oldest entry ID.
|
// Assert we're no longer able to use the oldest entry ID.
|
||||||
assert!(!bank.reserve_signature_with_last_id(&sig, &mint.last_id()));
|
assert_eq!(
|
||||||
|
bank.reserve_signature_with_last_id(&sig, &mint.last_id()),
|
||||||
|
Err(BankError::LastIdNotFound(mint.last_id()))
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -593,10 +590,10 @@ mod tests {
|
|||||||
let mint = Mint::new(2);
|
let mint = Mint::new(2);
|
||||||
let bank = Bank::new(&mint);
|
let bank = Bank::new(&mint);
|
||||||
let keypair = KeyPair::new();
|
let keypair = KeyPair::new();
|
||||||
let tr0 = Transaction::new(&mint.keypair(), keypair.pubkey(), 2, mint.last_id());
|
let tx0 = Transaction::new(&mint.keypair(), keypair.pubkey(), 2, mint.last_id());
|
||||||
let tr1 = Transaction::new(&keypair, mint.pubkey(), 1, mint.last_id());
|
let tx1 = Transaction::new(&keypair, mint.pubkey(), 1, mint.last_id());
|
||||||
let trs = vec![tr0, tr1];
|
let txs = vec![tx0, tx1];
|
||||||
let results = bank.process_verified_transactions(trs);
|
let results = bank.process_transactions(txs);
|
||||||
assert!(results[1].is_err());
|
assert!(results[1].is_err());
|
||||||
|
|
||||||
// Assert bad transactions aren't counted.
|
// Assert bad transactions aren't counted.
|
||||||
@@ -614,7 +611,7 @@ mod bench {
|
|||||||
use signature::KeyPairUtil;
|
use signature::KeyPairUtil;
|
||||||
|
|
||||||
#[bench]
|
#[bench]
|
||||||
fn process_verified_event_bench(bencher: &mut Bencher) {
|
fn bench_process_transaction(bencher: &mut Bencher) {
|
||||||
let mint = Mint::new(100_000_000);
|
let mint = Mint::new(100_000_000);
|
||||||
let bank = Bank::new(&mint);
|
let bank = Bank::new(&mint);
|
||||||
// Create transactions between unrelated parties.
|
// Create transactions between unrelated parties.
|
||||||
@@ -623,16 +620,16 @@ mod bench {
|
|||||||
.map(|i| {
|
.map(|i| {
|
||||||
// Seed the 'from' account.
|
// Seed the 'from' account.
|
||||||
let rando0 = KeyPair::new();
|
let rando0 = KeyPair::new();
|
||||||
let tr = Transaction::new(&mint.keypair(), rando0.pubkey(), 1_000, mint.last_id());
|
let tx = Transaction::new(&mint.keypair(), rando0.pubkey(), 1_000, mint.last_id());
|
||||||
bank.process_verified_transaction(&tr).unwrap();
|
bank.process_transaction(&tx).unwrap();
|
||||||
|
|
||||||
// Seed the 'to' account and a cell for its signature.
|
// Seed the 'to' account and a cell for its signature.
|
||||||
let last_id = hash(&serialize(&i).unwrap()); // Unique hash
|
let last_id = hash(&serialize(&i).unwrap()); // Unique hash
|
||||||
bank.register_entry_id(&last_id);
|
bank.register_entry_id(&last_id);
|
||||||
|
|
||||||
let rando1 = KeyPair::new();
|
let rando1 = KeyPair::new();
|
||||||
let tr = Transaction::new(&rando0, rando1.pubkey(), 1, last_id);
|
let tx = Transaction::new(&rando0, rando1.pubkey(), 1, last_id);
|
||||||
bank.process_verified_transaction(&tr).unwrap();
|
bank.process_transaction(&tx).unwrap();
|
||||||
|
|
||||||
// Finally, return a transaction that's unique
|
// Finally, return a transaction that's unique
|
||||||
Transaction::new(&rando0, rando1.pubkey(), 1, last_id)
|
Transaction::new(&rando0, rando1.pubkey(), 1, last_id)
|
||||||
@@ -645,7 +642,7 @@ mod bench {
|
|||||||
}
|
}
|
||||||
|
|
||||||
assert!(
|
assert!(
|
||||||
bank.process_verified_transactions(transactions.clone())
|
bank.process_transactions(transactions.clone())
|
||||||
.iter()
|
.iter()
|
||||||
.all(|x| x.is_ok())
|
.all(|x| x.is_ok())
|
||||||
);
|
);
|
||||||
|
@@ -1,21 +1,21 @@
|
|||||||
//! The `banking_stage` processes Event messages.
|
//! The `banking_stage` processes Transaction messages.
|
||||||
|
|
||||||
use bank::Bank;
|
use bank::Bank;
|
||||||
use bincode::deserialize;
|
use bincode::deserialize;
|
||||||
use event::Event;
|
|
||||||
use packet;
|
use packet;
|
||||||
use packet::SharedPackets;
|
use packet::SharedPackets;
|
||||||
use rayon::prelude::*;
|
use rayon::prelude::*;
|
||||||
use record_stage::Signal;
|
use record_stage::Signal;
|
||||||
use result::Result;
|
use result::Result;
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use std::sync::Arc;
|
|
||||||
use std::sync::atomic::{AtomicBool, Ordering};
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
use std::sync::mpsc::{channel, Receiver, Sender};
|
use std::sync::mpsc::{channel, Receiver, Sender};
|
||||||
use std::thread::{spawn, JoinHandle};
|
use std::sync::Arc;
|
||||||
|
use std::thread::{Builder, JoinHandle};
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use std::time::Instant;
|
use std::time::Instant;
|
||||||
use timing;
|
use timing;
|
||||||
|
use transaction::Transaction;
|
||||||
|
|
||||||
pub struct BankingStage {
|
pub struct BankingStage {
|
||||||
pub thread_hdl: JoinHandle<()>,
|
pub thread_hdl: JoinHandle<()>,
|
||||||
@@ -30,26 +30,29 @@ impl BankingStage {
|
|||||||
packet_recycler: packet::PacketRecycler,
|
packet_recycler: packet::PacketRecycler,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let (signal_sender, signal_receiver) = channel();
|
let (signal_sender, signal_receiver) = channel();
|
||||||
let thread_hdl = spawn(move || loop {
|
let thread_hdl = Builder::new()
|
||||||
let e = Self::process_packets(
|
.name("solana-banking-stage".to_string())
|
||||||
bank.clone(),
|
.spawn(move || loop {
|
||||||
&verified_receiver,
|
let e = Self::process_packets(
|
||||||
&signal_sender,
|
bank.clone(),
|
||||||
&packet_recycler,
|
&verified_receiver,
|
||||||
);
|
&signal_sender,
|
||||||
if e.is_err() {
|
&packet_recycler,
|
||||||
if exit.load(Ordering::Relaxed) {
|
);
|
||||||
break;
|
if e.is_err() {
|
||||||
|
if exit.load(Ordering::Relaxed) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
})
|
||||||
});
|
.unwrap();
|
||||||
BankingStage {
|
BankingStage {
|
||||||
thread_hdl,
|
thread_hdl,
|
||||||
signal_receiver,
|
signal_receiver,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn deserialize_events(p: &packet::Packets) -> Vec<Option<(Event, SocketAddr)>> {
|
fn deserialize_transactions(p: &packet::Packets) -> Vec<Option<(Transaction, SocketAddr)>> {
|
||||||
p.packets
|
p.packets
|
||||||
.par_iter()
|
.par_iter()
|
||||||
.map(|x| {
|
.map(|x| {
|
||||||
@@ -79,33 +82,33 @@ impl BankingStage {
|
|||||||
);
|
);
|
||||||
let proc_start = Instant::now();
|
let proc_start = Instant::now();
|
||||||
for (msgs, vers) in mms {
|
for (msgs, vers) in mms {
|
||||||
let events = Self::deserialize_events(&msgs.read().unwrap());
|
let transactions = Self::deserialize_transactions(&msgs.read().unwrap());
|
||||||
reqs_len += events.len();
|
reqs_len += transactions.len();
|
||||||
let events = events
|
let transactions = transactions
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.zip(vers)
|
.zip(vers)
|
||||||
.filter_map(|(event, ver)| match event {
|
.filter_map(|(tx, ver)| match tx {
|
||||||
None => None,
|
None => None,
|
||||||
Some((event, _addr)) => if event.verify() && ver != 0 {
|
Some((tx, _addr)) => if tx.verify_plan() && ver != 0 {
|
||||||
Some(event)
|
Some(tx)
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
debug!("process_events");
|
debug!("process_transactions");
|
||||||
let results = bank.process_verified_events(events);
|
let results = bank.process_transactions(transactions);
|
||||||
let events = results.into_iter().filter_map(|x| x.ok()).collect();
|
let transactions = results.into_iter().filter_map(|x| x.ok()).collect();
|
||||||
signal_sender.send(Signal::Events(events))?;
|
signal_sender.send(Signal::Events(transactions))?;
|
||||||
debug!("done process_events");
|
debug!("done process_transactions");
|
||||||
|
|
||||||
packet_recycler.recycle(msgs);
|
packet_recycler.recycle(msgs);
|
||||||
}
|
}
|
||||||
let total_time_s = timing::duration_as_s(&proc_start.elapsed());
|
let total_time_s = timing::duration_as_s(&proc_start.elapsed());
|
||||||
let total_time_ms = timing::duration_as_ms(&proc_start.elapsed());
|
let total_time_ms = timing::duration_as_ms(&proc_start.elapsed());
|
||||||
info!(
|
info!(
|
||||||
"@{:?} done processing event batches: {} time: {:?}ms reqs: {} reqs/s: {}",
|
"@{:?} done processing transaction batches: {} time: {:?}ms reqs: {} reqs/s: {}",
|
||||||
timing::timestamp(),
|
timing::timestamp(),
|
||||||
mms_len,
|
mms_len,
|
||||||
total_time_ms,
|
total_time_ms,
|
||||||
@@ -120,7 +123,6 @@ impl BankingStage {
|
|||||||
|
|
||||||
//use bank::Bank;
|
//use bank::Bank;
|
||||||
//use entry::Entry;
|
//use entry::Entry;
|
||||||
//use event::Event;
|
|
||||||
//use hash::Hash;
|
//use hash::Hash;
|
||||||
//use record_stage::RecordStage;
|
//use record_stage::RecordStage;
|
||||||
//use record_stage::Signal;
|
//use record_stage::Signal;
|
||||||
@@ -128,18 +130,17 @@ impl BankingStage {
|
|||||||
//use std::sync::mpsc::{channel, Sender};
|
//use std::sync::mpsc::{channel, Sender};
|
||||||
//use std::sync::{Arc, Mutex};
|
//use std::sync::{Arc, Mutex};
|
||||||
//use std::time::Duration;
|
//use std::time::Duration;
|
||||||
|
//use transaction::Transaction;
|
||||||
//
|
//
|
||||||
//#[cfg(test)]
|
//#[cfg(test)]
|
||||||
//mod tests {
|
//mod tests {
|
||||||
// use bank::Bank;
|
// use bank::Bank;
|
||||||
// use event::Event;
|
|
||||||
// use event_processor::EventProcessor;
|
|
||||||
// use mint::Mint;
|
// use mint::Mint;
|
||||||
// use signature::{KeyPair, KeyPairUtil};
|
// use signature::{KeyPair, KeyPairUtil};
|
||||||
// use transaction::Transaction;
|
// use transaction::Transaction;
|
||||||
//
|
//
|
||||||
// #[test]
|
// #[test]
|
||||||
// // TODO: Move this test banking_stage. Calling process_events() directly
|
// // TODO: Move this test banking_stage. Calling process_transactions() directly
|
||||||
// // defeats the purpose of this test.
|
// // defeats the purpose of this test.
|
||||||
// fn test_banking_sequential_consistency() {
|
// fn test_banking_sequential_consistency() {
|
||||||
// // In this attack we'll demonstrate that a verifier can interpret the ledger
|
// // In this attack we'll demonstrate that a verifier can interpret the ledger
|
||||||
@@ -147,18 +148,18 @@ impl BankingStage {
|
|||||||
// // Entry OR if the verifier tries to parallelize across multiple Entries.
|
// // Entry OR if the verifier tries to parallelize across multiple Entries.
|
||||||
// let mint = Mint::new(2);
|
// let mint = Mint::new(2);
|
||||||
// let bank = Bank::new(&mint);
|
// let bank = Bank::new(&mint);
|
||||||
// let event_processor = EventProcessor::new(bank, &mint.last_id(), None);
|
// let banking_stage = EventProcessor::new(bank, &mint.last_id(), None);
|
||||||
//
|
//
|
||||||
// // Process a batch that includes a transaction that receives two tokens.
|
// // Process a batch that includes a transaction that receives two tokens.
|
||||||
// let alice = KeyPair::new();
|
// let alice = KeyPair::new();
|
||||||
// let tr = Transaction::new(&mint.keypair(), alice.pubkey(), 2, mint.last_id());
|
// let tx = Transaction::new(&mint.keypair(), alice.pubkey(), 2, mint.last_id());
|
||||||
// let events = vec![Event::Transaction(tr)];
|
// let transactions = vec![tx];
|
||||||
// let entry0 = event_processor.process_events(events).unwrap();
|
// let entry0 = banking_stage.process_transactions(transactions).unwrap();
|
||||||
//
|
//
|
||||||
// // Process a second batch that spends one of those tokens.
|
// // Process a second batch that spends one of those tokens.
|
||||||
// let tr = Transaction::new(&alice, mint.pubkey(), 1, mint.last_id());
|
// let tx = Transaction::new(&alice, mint.pubkey(), 1, mint.last_id());
|
||||||
// let events = vec![Event::Transaction(tr)];
|
// let transactions = vec![tx];
|
||||||
// let entry1 = event_processor.process_events(events).unwrap();
|
// let entry1 = banking_stage.process_transactions(transactions).unwrap();
|
||||||
//
|
//
|
||||||
// // Collect the ledger and feed it to a new bank.
|
// // Collect the ledger and feed it to a new bank.
|
||||||
// let entries = vec![entry0, entry1];
|
// let entries = vec![entry0, entry1];
|
||||||
@@ -170,7 +171,7 @@ impl BankingStage {
|
|||||||
// for entry in entries {
|
// for entry in entries {
|
||||||
// assert!(
|
// assert!(
|
||||||
// bank
|
// bank
|
||||||
// .process_verified_events(entry.events)
|
// .process_transactions(entry.transactions)
|
||||||
// .into_iter()
|
// .into_iter()
|
||||||
// .all(|x| x.is_ok())
|
// .all(|x| x.is_ok())
|
||||||
// );
|
// );
|
||||||
@@ -185,7 +186,6 @@ impl BankingStage {
|
|||||||
// use self::test::Bencher;
|
// use self::test::Bencher;
|
||||||
// use bank::{Bank, MAX_ENTRY_IDS};
|
// use bank::{Bank, MAX_ENTRY_IDS};
|
||||||
// use bincode::serialize;
|
// use bincode::serialize;
|
||||||
// use event_processor::*;
|
|
||||||
// use hash::hash;
|
// use hash::hash;
|
||||||
// use mint::Mint;
|
// use mint::Mint;
|
||||||
// use rayon::prelude::*;
|
// use rayon::prelude::*;
|
||||||
@@ -195,7 +195,7 @@ impl BankingStage {
|
|||||||
// use transaction::Transaction;
|
// use transaction::Transaction;
|
||||||
//
|
//
|
||||||
// #[bench]
|
// #[bench]
|
||||||
// fn process_events_bench(_bencher: &mut Bencher) {
|
// fn bench_process_transactions(_bencher: &mut Bencher) {
|
||||||
// let mint = Mint::new(100_000_000);
|
// let mint = Mint::new(100_000_000);
|
||||||
// let bank = Bank::new(&mint);
|
// let bank = Bank::new(&mint);
|
||||||
// // Create transactions between unrelated parties.
|
// // Create transactions between unrelated parties.
|
||||||
@@ -217,36 +217,31 @@ impl BankingStage {
|
|||||||
//
|
//
|
||||||
// // Seed the 'from' account.
|
// // Seed the 'from' account.
|
||||||
// let rando0 = KeyPair::new();
|
// let rando0 = KeyPair::new();
|
||||||
// let tr = Transaction::new(&mint.keypair(), rando0.pubkey(), 1_000, last_id);
|
// let tx = Transaction::new(&mint.keypair(), rando0.pubkey(), 1_000, last_id);
|
||||||
// bank.process_verified_transaction(&tr).unwrap();
|
// bank.process_transaction(&tx).unwrap();
|
||||||
//
|
//
|
||||||
// let rando1 = KeyPair::new();
|
// let rando1 = KeyPair::new();
|
||||||
// let tr = Transaction::new(&rando0, rando1.pubkey(), 2, last_id);
|
// let tx = Transaction::new(&rando0, rando1.pubkey(), 2, last_id);
|
||||||
// bank.process_verified_transaction(&tr).unwrap();
|
// bank.process_transaction(&tx).unwrap();
|
||||||
//
|
//
|
||||||
// // Finally, return a transaction that's unique
|
// // Finally, return a transaction that's unique
|
||||||
// Transaction::new(&rando0, rando1.pubkey(), 1, last_id)
|
// Transaction::new(&rando0, rando1.pubkey(), 1, last_id)
|
||||||
// })
|
// })
|
||||||
// .collect();
|
// .collect();
|
||||||
//
|
//
|
||||||
// let events: Vec<_> = transactions
|
// let banking_stage = EventProcessor::new(bank, &mint.last_id(), None);
|
||||||
// .into_iter()
|
|
||||||
// .map(|tr| Event::Transaction(tr))
|
|
||||||
// .collect();
|
|
||||||
//
|
|
||||||
// let event_processor = EventProcessor::new(bank, &mint.last_id(), None);
|
|
||||||
//
|
//
|
||||||
// let now = Instant::now();
|
// let now = Instant::now();
|
||||||
// assert!(event_processor.process_events(events).is_ok());
|
// assert!(banking_stage.process_transactions(transactions).is_ok());
|
||||||
// let duration = now.elapsed();
|
// let duration = now.elapsed();
|
||||||
// let sec = duration.as_secs() as f64 + duration.subsec_nanos() as f64 / 1_000_000_000.0;
|
// let sec = duration.as_secs() as f64 + duration.subsec_nanos() as f64 / 1_000_000_000.0;
|
||||||
// let tps = txs as f64 / sec;
|
// let tps = txs as f64 / sec;
|
||||||
//
|
//
|
||||||
// // Ensure that all transactions were successfully logged.
|
// // Ensure that all transactions were successfully logged.
|
||||||
// drop(event_processor.historian_input);
|
// drop(banking_stage.historian_input);
|
||||||
// let entries: Vec<Entry> = event_processor.output.lock().unwrap().iter().collect();
|
// let entries: Vec<Entry> = banking_stage.output.lock().unwrap().iter().collect();
|
||||||
// assert_eq!(entries.len(), 1);
|
// assert_eq!(entries.len(), 1);
|
||||||
// assert_eq!(entries[0].events.len(), txs as usize);
|
// assert_eq!(entries[0].transactions.len(), txs as usize);
|
||||||
//
|
//
|
||||||
// println!("{} tps", tps);
|
// println!("{} tps", tps);
|
||||||
// }
|
// }
|
||||||
@@ -258,29 +253,29 @@ mod bench {
|
|||||||
use self::test::Bencher;
|
use self::test::Bencher;
|
||||||
use bank::*;
|
use bank::*;
|
||||||
use banking_stage::BankingStage;
|
use banking_stage::BankingStage;
|
||||||
use event::Event;
|
|
||||||
use mint::Mint;
|
use mint::Mint;
|
||||||
use packet::{to_packets, PacketRecycler};
|
use packet::{to_packets, PacketRecycler};
|
||||||
use record_stage::Signal;
|
use record_stage::Signal;
|
||||||
use signature::{KeyPair, KeyPairUtil};
|
use signature::{KeyPair, KeyPairUtil};
|
||||||
use std::iter;
|
use std::iter;
|
||||||
use std::sync::Arc;
|
|
||||||
use std::sync::mpsc::channel;
|
use std::sync::mpsc::channel;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use transaction::Transaction;
|
||||||
|
|
||||||
#[bench]
|
#[bench]
|
||||||
fn stage_bench(bencher: &mut Bencher) {
|
fn bench_stage(bencher: &mut Bencher) {
|
||||||
let tx = 100_usize;
|
let tx = 100_usize;
|
||||||
let mint = Mint::new(1_000_000_000);
|
let mint = Mint::new(1_000_000_000);
|
||||||
let pubkey = KeyPair::new().pubkey();
|
let pubkey = KeyPair::new().pubkey();
|
||||||
|
|
||||||
let events: Vec<_> = (0..tx)
|
let transactions: Vec<_> = (0..tx)
|
||||||
.map(|i| Event::new_transaction(&mint.keypair(), pubkey, i as i64, mint.last_id()))
|
.map(|i| Transaction::new(&mint.keypair(), pubkey, i as i64, mint.last_id()))
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let (verified_sender, verified_receiver) = channel();
|
let (verified_sender, verified_receiver) = channel();
|
||||||
let (signal_sender, signal_receiver) = channel();
|
let (signal_sender, signal_receiver) = channel();
|
||||||
let packet_recycler = PacketRecycler::default();
|
let packet_recycler = PacketRecycler::default();
|
||||||
let verified: Vec<_> = to_packets(&packet_recycler, events)
|
let verified: Vec<_> = to_packets(&packet_recycler, transactions)
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|x| {
|
.map(|x| {
|
||||||
let len = (*x).read().unwrap().packets.len();
|
let len = (*x).read().unwrap().packets.len();
|
||||||
@@ -298,8 +293,8 @@ mod bench {
|
|||||||
&packet_recycler,
|
&packet_recycler,
|
||||||
).unwrap();
|
).unwrap();
|
||||||
let signal = signal_receiver.recv().unwrap();
|
let signal = signal_receiver.recv().unwrap();
|
||||||
if let Signal::Events(ref events) = signal {
|
if let Signal::Events(ref transactions) = signal {
|
||||||
assert_eq!(events.len(), tx);
|
assert_eq!(transactions.len(), tx);
|
||||||
} else {
|
} else {
|
||||||
assert!(false);
|
assert!(false);
|
||||||
}
|
}
|
||||||
|
@@ -1,23 +1,30 @@
|
|||||||
extern crate futures;
|
|
||||||
extern crate getopts;
|
extern crate getopts;
|
||||||
extern crate isatty;
|
extern crate isatty;
|
||||||
|
extern crate pnet;
|
||||||
extern crate rayon;
|
extern crate rayon;
|
||||||
extern crate serde_json;
|
extern crate serde_json;
|
||||||
extern crate solana;
|
extern crate solana;
|
||||||
|
|
||||||
use futures::Future;
|
|
||||||
use getopts::Options;
|
use getopts::Options;
|
||||||
use isatty::stdin_isatty;
|
use isatty::stdin_isatty;
|
||||||
|
use pnet::datalink;
|
||||||
use rayon::prelude::*;
|
use rayon::prelude::*;
|
||||||
|
use solana::crdt::{Crdt, ReplicatedData};
|
||||||
|
use solana::data_replicator::DataReplicator;
|
||||||
use solana::mint::MintDemo;
|
use solana::mint::MintDemo;
|
||||||
use solana::signature::{GenKeys, KeyPairUtil};
|
use solana::signature::{GenKeys, KeyPair, KeyPairUtil};
|
||||||
|
use solana::streamer::default_window;
|
||||||
use solana::thin_client::ThinClient;
|
use solana::thin_client::ThinClient;
|
||||||
use solana::transaction::Transaction;
|
use solana::transaction::Transaction;
|
||||||
use std::env;
|
use std::env;
|
||||||
|
use std::fs::File;
|
||||||
use std::io::{stdin, Read};
|
use std::io::{stdin, Read};
|
||||||
use std::net::{SocketAddr, UdpSocket};
|
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
|
||||||
use std::process::exit;
|
use std::process::exit;
|
||||||
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
|
use std::sync::{Arc, RwLock};
|
||||||
use std::thread::sleep;
|
use std::thread::sleep;
|
||||||
|
use std::thread::JoinHandle;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use std::time::Instant;
|
use std::time::Instant;
|
||||||
|
|
||||||
@@ -30,15 +37,32 @@ fn print_usage(program: &str, opts: Options) {
|
|||||||
print!("{}", opts.usage(&brief));
|
print!("{}", opts.usage(&brief));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn get_ip_addr() -> Option<IpAddr> {
|
||||||
|
for iface in datalink::interfaces() {
|
||||||
|
for p in iface.ips {
|
||||||
|
if !p.ip().is_loopback() && !p.ip().is_multicast() {
|
||||||
|
return Some(p.ip());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
let mut threads = 4usize;
|
let mut threads = 4usize;
|
||||||
let mut server_addr: String = "127.0.0.1:8000".to_string();
|
let mut num_nodes = 1usize;
|
||||||
let mut requests_addr: String = "127.0.0.1:8010".to_string();
|
|
||||||
|
|
||||||
let mut opts = Options::new();
|
let mut opts = Options::new();
|
||||||
opts.optopt("s", "", "server address", "host:port");
|
opts.optopt("l", "", "leader", "leader.json");
|
||||||
opts.optopt("c", "", "client address", "host:port");
|
opts.optopt("c", "", "client port", "port");
|
||||||
opts.optopt("t", "", "number of threads", &format!("{}", threads));
|
opts.optopt("t", "", "number of threads", &format!("{}", threads));
|
||||||
|
opts.optflag("d", "dyn", "detect network address dynamically");
|
||||||
|
opts.optopt(
|
||||||
|
"n",
|
||||||
|
"",
|
||||||
|
"number of nodes to converge to",
|
||||||
|
&format!("{}", num_nodes),
|
||||||
|
);
|
||||||
opts.optflag("h", "help", "print help");
|
opts.optflag("h", "help", "print help");
|
||||||
let args: Vec<String> = env::args().collect();
|
let args: Vec<String> = env::args().collect();
|
||||||
let matches = match opts.parse(&args[1..]) {
|
let matches = match opts.parse(&args[1..]) {
|
||||||
@@ -54,19 +78,38 @@ fn main() {
|
|||||||
print_usage(&program, opts);
|
print_usage(&program, opts);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if matches.opt_present("s") {
|
let mut addr: SocketAddr = "0.0.0.0:8100".parse().unwrap();
|
||||||
server_addr = matches.opt_str("s").unwrap();
|
|
||||||
}
|
|
||||||
if matches.opt_present("c") {
|
if matches.opt_present("c") {
|
||||||
requests_addr = matches.opt_str("c").unwrap();
|
let port = matches.opt_str("c").unwrap().parse().unwrap();
|
||||||
|
addr.set_port(port);
|
||||||
}
|
}
|
||||||
|
if matches.opt_present("d") {
|
||||||
|
addr.set_ip(get_ip_addr().unwrap());
|
||||||
|
}
|
||||||
|
let client_addr: Arc<RwLock<SocketAddr>> = Arc::new(RwLock::new(addr));
|
||||||
if matches.opt_present("t") {
|
if matches.opt_present("t") {
|
||||||
threads = matches.opt_str("t").unwrap().parse().expect("integer");
|
threads = matches.opt_str("t").unwrap().parse().expect("integer");
|
||||||
}
|
}
|
||||||
|
if matches.opt_present("n") {
|
||||||
|
num_nodes = matches.opt_str("n").unwrap().parse().expect("integer");
|
||||||
|
}
|
||||||
|
|
||||||
let mut events_addr: SocketAddr = requests_addr.parse().unwrap();
|
let leader = if matches.opt_present("l") {
|
||||||
let requests_port = events_addr.port();
|
read_leader(matches.opt_str("l").unwrap())
|
||||||
events_addr.set_port(requests_port + 1);
|
} else {
|
||||||
|
let server_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
|
||||||
|
ReplicatedData::new_leader(&server_addr)
|
||||||
|
};
|
||||||
|
|
||||||
|
let signal = Arc::new(AtomicBool::new(false));
|
||||||
|
let mut c_threads = vec![];
|
||||||
|
let validators = converge(
|
||||||
|
&client_addr,
|
||||||
|
&leader,
|
||||||
|
signal.clone(),
|
||||||
|
num_nodes + 2,
|
||||||
|
&mut c_threads,
|
||||||
|
);
|
||||||
|
|
||||||
if stdin_isatty() {
|
if stdin_isatty() {
|
||||||
eprintln!("nothing found on stdin, expected a json file");
|
eprintln!("nothing found on stdin, expected a json file");
|
||||||
@@ -85,26 +128,10 @@ fn main() {
|
|||||||
eprintln!("failed to parse json: {}", e);
|
eprintln!("failed to parse json: {}", e);
|
||||||
exit(1);
|
exit(1);
|
||||||
});
|
});
|
||||||
|
let mut client = mk_client(&client_addr, &leader);
|
||||||
println!("Binding to {}", requests_addr);
|
|
||||||
let requests_socket = UdpSocket::bind(&requests_addr).unwrap();
|
|
||||||
requests_socket
|
|
||||||
.set_read_timeout(Some(Duration::new(5, 0)))
|
|
||||||
.unwrap();
|
|
||||||
let events_socket = UdpSocket::bind(&events_addr).unwrap();
|
|
||||||
let requests_addr: SocketAddr = server_addr.parse().unwrap();
|
|
||||||
let requests_port = requests_addr.port();
|
|
||||||
let mut events_server_addr = requests_addr.clone();
|
|
||||||
events_server_addr.set_port(requests_port + 3);
|
|
||||||
let mut client = ThinClient::new(
|
|
||||||
requests_addr,
|
|
||||||
requests_socket,
|
|
||||||
events_server_addr,
|
|
||||||
events_socket,
|
|
||||||
);
|
|
||||||
|
|
||||||
println!("Get last ID...");
|
println!("Get last ID...");
|
||||||
let last_id = client.get_last_id().wait().unwrap();
|
let last_id = client.get_last_id();
|
||||||
println!("Got last ID {:?}", last_id);
|
println!("Got last ID {:?}", last_id);
|
||||||
|
|
||||||
let rnd = GenKeys::new(demo.mint.keypair().public_key_bytes());
|
let rnd = GenKeys::new(demo.mint.keypair().public_key_bytes());
|
||||||
@@ -120,7 +147,7 @@ fn main() {
|
|||||||
.into_par_iter()
|
.into_par_iter()
|
||||||
.map(|chunk| Transaction::new(&chunk[0], chunk[1].pubkey(), 1, last_id))
|
.map(|chunk| Transaction::new(&chunk[0], chunk[1].pubkey(), 1, last_id))
|
||||||
.collect();
|
.collect();
|
||||||
let mut duration = now.elapsed();
|
let duration = now.elapsed();
|
||||||
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
|
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
|
||||||
let bsps = txs as f64 / ns as f64;
|
let bsps = txs as f64 / ns as f64;
|
||||||
let nsps = ns as f64 / txs as f64;
|
let nsps = ns as f64 / txs as f64;
|
||||||
@@ -130,46 +157,134 @@ fn main() {
|
|||||||
nsps / 1_000_f64
|
nsps / 1_000_f64
|
||||||
);
|
);
|
||||||
|
|
||||||
let initial_tx_count = client.transaction_count();
|
let first_count = client.transaction_count();
|
||||||
println!("initial count {}", initial_tx_count);
|
println!("initial count {}", first_count);
|
||||||
|
|
||||||
println!("Transfering {} transactions in {} batches", txs, threads);
|
println!("Transfering {} transactions in {} batches", txs, threads);
|
||||||
let now = Instant::now();
|
|
||||||
let sz = transactions.len() / threads;
|
let sz = transactions.len() / threads;
|
||||||
let chunks: Vec<_> = transactions.chunks(sz).collect();
|
let chunks: Vec<_> = transactions.chunks(sz).collect();
|
||||||
chunks.into_par_iter().for_each(|trs| {
|
chunks.into_par_iter().for_each(|txs| {
|
||||||
println!("Transferring 1 unit {} times... to", trs.len());
|
println!(
|
||||||
let requests_addr: SocketAddr = server_addr.parse().unwrap();
|
"Transferring 1 unit {} times... to {:?}",
|
||||||
let mut requests_cb_addr = requests_addr.clone();
|
txs.len(),
|
||||||
requests_cb_addr.set_port(0);
|
leader.transactions_addr
|
||||||
let requests_socket = UdpSocket::bind(requests_cb_addr).unwrap();
|
|
||||||
requests_socket
|
|
||||||
.set_read_timeout(Some(Duration::new(5, 0)))
|
|
||||||
.unwrap();
|
|
||||||
let mut events_addr: SocketAddr = requests_addr.clone();
|
|
||||||
events_addr.set_port(0);
|
|
||||||
let events_socket = UdpSocket::bind(&events_addr).unwrap();
|
|
||||||
let client = ThinClient::new(
|
|
||||||
requests_addr,
|
|
||||||
requests_socket,
|
|
||||||
events_server_addr,
|
|
||||||
events_socket,
|
|
||||||
);
|
);
|
||||||
for tr in trs {
|
let client = mk_client(&client_addr, &leader);
|
||||||
client.transfer_signed(tr.clone()).unwrap();
|
for tx in txs {
|
||||||
|
client.transfer_signed(tx.clone()).unwrap();
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
println!("Waiting for transactions to complete...",);
|
println!("Sampling tps every second...",);
|
||||||
let mut tx_count;
|
validators.into_par_iter().for_each(|val| {
|
||||||
for _ in 0..10 {
|
let mut client = mk_client(&client_addr, &val);
|
||||||
tx_count = client.transaction_count();
|
let mut now = Instant::now();
|
||||||
duration = now.elapsed();
|
let mut initial_tx_count = client.transaction_count();
|
||||||
let txs = tx_count - initial_tx_count;
|
for i in 0..100 {
|
||||||
println!("Transactions processed {}", txs);
|
let tx_count = client.transaction_count();
|
||||||
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
|
let duration = now.elapsed();
|
||||||
let tps = (txs * 1_000_000_000) as f64 / ns as f64;
|
now = Instant::now();
|
||||||
println!("{} tps", tps);
|
let sample = tx_count - initial_tx_count;
|
||||||
sleep(Duration::new(1, 0));
|
initial_tx_count = tx_count;
|
||||||
|
println!(
|
||||||
|
"{}: Transactions processed {}",
|
||||||
|
val.transactions_addr, sample
|
||||||
|
);
|
||||||
|
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
|
||||||
|
let tps = (sample * 1_000_000_000) as f64 / ns as f64;
|
||||||
|
println!("{}: {} tps", val.transactions_addr, tps);
|
||||||
|
let total = tx_count - first_count;
|
||||||
|
println!(
|
||||||
|
"{}: Total Transactions processed {}",
|
||||||
|
val.transactions_addr, total
|
||||||
|
);
|
||||||
|
if total == transactions.len() as u64 {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if i > 20 && sample == 0 {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
sleep(Duration::new(1, 0));
|
||||||
|
}
|
||||||
|
});
|
||||||
|
signal.store(true, Ordering::Relaxed);
|
||||||
|
for t in c_threads {
|
||||||
|
t.join().unwrap();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn mk_client(locked_addr: &Arc<RwLock<SocketAddr>>, r: &ReplicatedData) -> ThinClient {
|
||||||
|
let mut addr = locked_addr.write().unwrap();
|
||||||
|
let port = addr.port();
|
||||||
|
let transactions_socket = UdpSocket::bind(addr.clone()).unwrap();
|
||||||
|
addr.set_port(port + 1);
|
||||||
|
let requests_socket = UdpSocket::bind(addr.clone()).unwrap();
|
||||||
|
addr.set_port(port + 2);
|
||||||
|
ThinClient::new(
|
||||||
|
r.requests_addr,
|
||||||
|
requests_socket,
|
||||||
|
r.transactions_addr,
|
||||||
|
transactions_socket,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn spy_node(client_addr: &Arc<RwLock<SocketAddr>>) -> (ReplicatedData, UdpSocket) {
|
||||||
|
let mut addr = client_addr.write().unwrap();
|
||||||
|
let port = addr.port();
|
||||||
|
let gossip = UdpSocket::bind(addr.clone()).unwrap();
|
||||||
|
addr.set_port(port + 1);
|
||||||
|
let daddr = "0.0.0.0:0".parse().unwrap();
|
||||||
|
let pubkey = KeyPair::new().pubkey();
|
||||||
|
let node = ReplicatedData::new(pubkey, gossip.local_addr().unwrap(), daddr, daddr, daddr);
|
||||||
|
(node, gossip)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn converge(
|
||||||
|
client_addr: &Arc<RwLock<SocketAddr>>,
|
||||||
|
leader: &ReplicatedData,
|
||||||
|
exit: Arc<AtomicBool>,
|
||||||
|
num_nodes: usize,
|
||||||
|
threads: &mut Vec<JoinHandle<()>>,
|
||||||
|
) -> Vec<ReplicatedData> {
|
||||||
|
//lets spy on the network
|
||||||
|
let daddr = "0.0.0.0:0".parse().unwrap();
|
||||||
|
let (spy, spy_gossip) = spy_node(client_addr);
|
||||||
|
let mut spy_crdt = Crdt::new(spy);
|
||||||
|
spy_crdt.insert(&leader);
|
||||||
|
spy_crdt.set_leader(leader.id);
|
||||||
|
let spy_ref = Arc::new(RwLock::new(spy_crdt));
|
||||||
|
let window = default_window();
|
||||||
|
let gossip_send_socket = UdpSocket::bind("0.0.0.0:0").expect("bind 0");
|
||||||
|
let data_replicator = DataReplicator::new(
|
||||||
|
spy_ref.clone(),
|
||||||
|
window.clone(),
|
||||||
|
spy_gossip,
|
||||||
|
gossip_send_socket,
|
||||||
|
exit.clone(),
|
||||||
|
).expect("DataReplicator::new");
|
||||||
|
//wait for the network to converge
|
||||||
|
for _ in 0..30 {
|
||||||
|
let min = spy_ref.read().unwrap().convergence();
|
||||||
|
if num_nodes as u64 == min {
|
||||||
|
println!("converged!");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
sleep(Duration::new(1, 0));
|
||||||
|
}
|
||||||
|
threads.extend(data_replicator.thread_hdls.into_iter());
|
||||||
|
let v: Vec<ReplicatedData> = spy_ref
|
||||||
|
.read()
|
||||||
|
.unwrap()
|
||||||
|
.table
|
||||||
|
.values()
|
||||||
|
.into_iter()
|
||||||
|
.filter(|x| x.requests_addr != daddr)
|
||||||
|
.map(|x| x.clone())
|
||||||
|
.collect();
|
||||||
|
v.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn read_leader(path: String) -> ReplicatedData {
|
||||||
|
let file = File::open(path).expect("file");
|
||||||
|
serde_json::from_reader(file).expect("parse")
|
||||||
|
}
|
||||||
|
52
src/bin/fullnode-config.rs
Normal file
52
src/bin/fullnode-config.rs
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
extern crate getopts;
|
||||||
|
extern crate serde_json;
|
||||||
|
extern crate solana;
|
||||||
|
|
||||||
|
use getopts::Options;
|
||||||
|
use solana::crdt::{get_ip_addr, parse_port_or_addr, ReplicatedData};
|
||||||
|
use std::env;
|
||||||
|
use std::io;
|
||||||
|
use std::net::SocketAddr;
|
||||||
|
use std::process::exit;
|
||||||
|
|
||||||
|
fn print_usage(program: &str, opts: Options) {
|
||||||
|
let mut brief = format!("Usage: {} [options]\n\n", program);
|
||||||
|
brief += " Create a solana fullnode config file\n";
|
||||||
|
|
||||||
|
print!("{}", opts.usage(&brief));
|
||||||
|
}
|
||||||
|
|
||||||
|
fn main() {
|
||||||
|
let mut opts = Options::new();
|
||||||
|
opts.optopt("b", "", "bind", "bind to port or address");
|
||||||
|
opts.optflag("d", "dyn", "detect network address dynamically");
|
||||||
|
opts.optflag("h", "help", "print help");
|
||||||
|
let args: Vec<String> = env::args().collect();
|
||||||
|
let matches = match opts.parse(&args[1..]) {
|
||||||
|
Ok(m) => m,
|
||||||
|
Err(e) => {
|
||||||
|
eprintln!("{}", e);
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
if matches.opt_present("h") {
|
||||||
|
let program = args[0].clone();
|
||||||
|
print_usage(&program, opts);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let bind_addr: SocketAddr = {
|
||||||
|
let mut bind_addr = parse_port_or_addr(matches.opt_str("b"));
|
||||||
|
if matches.opt_present("d") {
|
||||||
|
let ip = get_ip_addr().unwrap();
|
||||||
|
bind_addr.set_ip(ip);
|
||||||
|
}
|
||||||
|
bind_addr
|
||||||
|
};
|
||||||
|
|
||||||
|
// we need all the receiving sockets to be bound within the expected
|
||||||
|
// port range that we open on aws
|
||||||
|
let repl_data = ReplicatedData::new_leader(&bind_addr);
|
||||||
|
let stdout = io::stdout();
|
||||||
|
serde_json::to_writer(stdout, &repl_data).expect("serialize");
|
||||||
|
}
|
@@ -1,28 +1,27 @@
|
|||||||
extern crate env_logger;
|
extern crate env_logger;
|
||||||
extern crate getopts;
|
extern crate getopts;
|
||||||
extern crate isatty;
|
extern crate isatty;
|
||||||
extern crate pnet;
|
|
||||||
extern crate serde_json;
|
extern crate serde_json;
|
||||||
extern crate solana;
|
extern crate solana;
|
||||||
|
#[macro_use]
|
||||||
|
extern crate log;
|
||||||
|
|
||||||
use getopts::Options;
|
use getopts::Options;
|
||||||
use isatty::stdin_isatty;
|
use isatty::stdin_isatty;
|
||||||
use pnet::datalink;
|
|
||||||
use solana::bank::Bank;
|
use solana::bank::Bank;
|
||||||
use solana::crdt::ReplicatedData;
|
use solana::crdt::ReplicatedData;
|
||||||
use solana::entry::Entry;
|
use solana::entry::Entry;
|
||||||
use solana::event::Event;
|
use solana::payment_plan::PaymentPlan;
|
||||||
use solana::server::Server;
|
use solana::server::Server;
|
||||||
use solana::signature::{KeyPair, KeyPairUtil};
|
|
||||||
use solana::transaction::Instruction;
|
use solana::transaction::Instruction;
|
||||||
use std::env;
|
use std::env;
|
||||||
use std::fs::File;
|
use std::fs::File;
|
||||||
use std::io::{stdin, stdout, Read};
|
use std::io::{stdin, Read};
|
||||||
use std::net::{IpAddr, SocketAddr, UdpSocket};
|
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
|
||||||
use std::process::exit;
|
use std::process::exit;
|
||||||
use std::sync::Arc;
|
|
||||||
use std::sync::atomic::AtomicBool;
|
use std::sync::atomic::AtomicBool;
|
||||||
use std::time::Duration;
|
use std::sync::Arc;
|
||||||
|
//use std::time::Duration;
|
||||||
|
|
||||||
fn print_usage(program: &str, opts: Options) {
|
fn print_usage(program: &str, opts: Options) {
|
||||||
let mut brief = format!("Usage: cat <transaction.log> | {} [options]\n\n", program);
|
let mut brief = format!("Usage: cat <transaction.log> | {} [options]\n\n", program);
|
||||||
@@ -36,9 +35,7 @@ fn print_usage(program: &str, opts: Options) {
|
|||||||
fn main() {
|
fn main() {
|
||||||
env_logger::init().unwrap();
|
env_logger::init().unwrap();
|
||||||
let mut opts = Options::new();
|
let mut opts = Options::new();
|
||||||
opts.optopt("b", "", "bind", "bind to port or address");
|
opts.optopt("l", "", "load", "load my identity to path.json");
|
||||||
opts.optflag("d", "dyn", "detect network address dynamically");
|
|
||||||
opts.optopt("s", "", "save", "save my identity to path.json");
|
|
||||||
opts.optflag("h", "help", "print help");
|
opts.optflag("h", "help", "print help");
|
||||||
opts.optopt(
|
opts.optopt(
|
||||||
"v",
|
"v",
|
||||||
@@ -59,14 +56,6 @@ fn main() {
|
|||||||
print_usage(&program, opts);
|
print_usage(&program, opts);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
let bind_addr: SocketAddr = {
|
|
||||||
let mut bind_addr = parse_port_or_addr(matches.opt_str("b"));
|
|
||||||
if matches.opt_present("d") {
|
|
||||||
let ip = get_ip_addr().unwrap();
|
|
||||||
bind_addr.set_ip(ip);
|
|
||||||
}
|
|
||||||
bind_addr
|
|
||||||
};
|
|
||||||
if stdin_isatty() {
|
if stdin_isatty() {
|
||||||
eprintln!("nothing found on stdin, expected a log file");
|
eprintln!("nothing found on stdin, expected a log file");
|
||||||
exit(1);
|
exit(1);
|
||||||
@@ -97,8 +86,8 @@ fn main() {
|
|||||||
// fields are the same. That entry should be treated as a deposit, not a
|
// fields are the same. That entry should be treated as a deposit, not a
|
||||||
// transfer to oneself.
|
// transfer to oneself.
|
||||||
let entry1: Entry = entries.next().unwrap();
|
let entry1: Entry = entries.next().unwrap();
|
||||||
let Event::Transaction(ref tr) = entry1.events[0];
|
let tx = &entry1.transactions[0];
|
||||||
let deposit = if let Instruction::NewContract(contract) = &tr.instruction {
|
let deposit = if let Instruction::NewContract(contract) = &tx.instruction {
|
||||||
contract.plan.final_payment()
|
contract.plan.final_payment()
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
@@ -111,26 +100,23 @@ fn main() {
|
|||||||
bank.register_entry_id(&entry1.id);
|
bank.register_entry_id(&entry1.id);
|
||||||
|
|
||||||
eprintln!("processing entries...");
|
eprintln!("processing entries...");
|
||||||
|
bank.process_entries(entries).expect("process_entries");
|
||||||
let mut last_id = entry1.id;
|
|
||||||
for entry in entries {
|
|
||||||
last_id = entry.id;
|
|
||||||
let results = bank.process_verified_events(entry.events);
|
|
||||||
for result in results {
|
|
||||||
if let Err(e) = result {
|
|
||||||
eprintln!("failed to process event {:?}", e);
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
bank.register_entry_id(&last_id);
|
|
||||||
}
|
|
||||||
|
|
||||||
eprintln!("creating networking stack...");
|
eprintln!("creating networking stack...");
|
||||||
|
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
// we need all the receiving sockets to be bound within the expected
|
let bind_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
|
||||||
// port range that we open on aws
|
let mut repl_data = ReplicatedData::new_leader(&bind_addr);
|
||||||
let mut repl_data = make_repl_data(&bind_addr);
|
if matches.opt_present("l") {
|
||||||
|
let path = matches.opt_str("l").unwrap();
|
||||||
|
if let Ok(file) = File::open(path.clone()) {
|
||||||
|
if let Ok(data) = serde_json::from_reader(file) {
|
||||||
|
repl_data = data;
|
||||||
|
} else {
|
||||||
|
warn!("failed to parse leader {}, generating new identity", path);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
let threads = if matches.opt_present("v") {
|
let threads = if matches.opt_present("v") {
|
||||||
eprintln!("starting validator... {}", repl_data.requests_addr);
|
eprintln!("starting validator... {}", repl_data.requests_addr);
|
||||||
let path = matches.opt_str("v").unwrap();
|
let path = matches.opt_str("v").unwrap();
|
||||||
@@ -150,88 +136,25 @@ fn main() {
|
|||||||
} else {
|
} else {
|
||||||
eprintln!("starting leader... {}", repl_data.requests_addr);
|
eprintln!("starting leader... {}", repl_data.requests_addr);
|
||||||
repl_data.current_leader_id = repl_data.id.clone();
|
repl_data.current_leader_id = repl_data.id.clone();
|
||||||
|
let file = File::create("leader.log").expect("leader.log create");
|
||||||
let server = Server::new_leader(
|
let server = Server::new_leader(
|
||||||
bank,
|
bank,
|
||||||
last_id,
|
//Some(Duration::from_millis(1000)),
|
||||||
Some(Duration::from_millis(1000)),
|
None,
|
||||||
repl_data.clone(),
|
repl_data.clone(),
|
||||||
UdpSocket::bind(repl_data.requests_addr).unwrap(),
|
UdpSocket::bind(repl_data.requests_addr).unwrap(),
|
||||||
UdpSocket::bind(repl_data.events_addr).unwrap(),
|
UdpSocket::bind(repl_data.transactions_addr).unwrap(),
|
||||||
UdpSocket::bind("0.0.0.0:0").unwrap(),
|
UdpSocket::bind("0.0.0.0:0").unwrap(),
|
||||||
UdpSocket::bind("0.0.0.0:0").unwrap(),
|
UdpSocket::bind("0.0.0.0:0").unwrap(),
|
||||||
UdpSocket::bind(repl_data.gossip_addr).unwrap(),
|
UdpSocket::bind(repl_data.gossip_addr).unwrap(),
|
||||||
exit.clone(),
|
exit.clone(),
|
||||||
stdout(),
|
file,
|
||||||
);
|
);
|
||||||
server.thread_hdls
|
server.thread_hdls
|
||||||
};
|
};
|
||||||
if matches.opt_present("s") {
|
eprintln!("Ready. Listening on {}", repl_data.transactions_addr);
|
||||||
let path = matches.opt_str("s").unwrap();
|
|
||||||
let file = File::create(path).expect("file");
|
|
||||||
serde_json::to_writer(file, &repl_data).expect("serialize");
|
|
||||||
}
|
|
||||||
eprintln!("Ready. Listening on {}", bind_addr);
|
|
||||||
|
|
||||||
for t in threads {
|
for t in threads {
|
||||||
t.join().expect("join");
|
t.join().expect("join");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn next_port(server_addr: &SocketAddr, nxt: u16) -> SocketAddr {
|
|
||||||
let mut gossip_addr = server_addr.clone();
|
|
||||||
gossip_addr.set_port(server_addr.port() + nxt);
|
|
||||||
gossip_addr
|
|
||||||
}
|
|
||||||
|
|
||||||
fn make_repl_data(bind_addr: &SocketAddr) -> ReplicatedData {
|
|
||||||
let events_addr = bind_addr.clone();
|
|
||||||
let gossip_addr = next_port(&bind_addr, 1);
|
|
||||||
let replicate_addr = next_port(&bind_addr, 2);
|
|
||||||
let requests_addr = next_port(&bind_addr, 3);
|
|
||||||
let pubkey = KeyPair::new().pubkey();
|
|
||||||
ReplicatedData::new(
|
|
||||||
pubkey,
|
|
||||||
gossip_addr,
|
|
||||||
replicate_addr,
|
|
||||||
requests_addr,
|
|
||||||
events_addr,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn parse_port_or_addr(optstr: Option<String>) -> SocketAddr {
|
|
||||||
let daddr: SocketAddr = "0.0.0.0:8000".parse().expect("default socket address");
|
|
||||||
if let Some(addrstr) = optstr {
|
|
||||||
if let Ok(port) = addrstr.parse() {
|
|
||||||
let mut addr = daddr.clone();
|
|
||||||
addr.set_port(port);
|
|
||||||
addr
|
|
||||||
} else if let Ok(addr) = addrstr.parse() {
|
|
||||||
addr
|
|
||||||
} else {
|
|
||||||
daddr
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
daddr
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_ip_addr() -> Option<IpAddr> {
|
|
||||||
for iface in datalink::interfaces() {
|
|
||||||
for p in iface.ips {
|
|
||||||
if !p.ip().is_loopback() && !p.ip().is_multicast() {
|
|
||||||
return Some(p.ip());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
None
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_parse_port_or_addr() {
|
|
||||||
let p1 = parse_port_or_addr(Some("9000".to_string()));
|
|
||||||
assert_eq!(p1.port(), 9000);
|
|
||||||
let p2 = parse_port_or_addr(Some("127.0.0.1:7000".to_string()));
|
|
||||||
assert_eq!(p2.port(), 7000);
|
|
||||||
let p3 = parse_port_or_addr(None);
|
|
||||||
assert_eq!(p3.port(), 8000);
|
|
||||||
}
|
|
@@ -7,9 +7,9 @@ use isatty::stdin_isatty;
|
|||||||
use rayon::prelude::*;
|
use rayon::prelude::*;
|
||||||
use solana::bank::MAX_ENTRY_IDS;
|
use solana::bank::MAX_ENTRY_IDS;
|
||||||
use solana::entry::{next_entry, Entry};
|
use solana::entry::{next_entry, Entry};
|
||||||
use solana::event::Event;
|
|
||||||
use solana::mint::MintDemo;
|
use solana::mint::MintDemo;
|
||||||
use solana::signature::{GenKeys, KeyPairUtil};
|
use solana::signature::{GenKeys, KeyPairUtil};
|
||||||
|
use solana::transaction::Transaction;
|
||||||
use std::io::{stdin, Read};
|
use std::io::{stdin, Read};
|
||||||
use std::process::exit;
|
use std::process::exit;
|
||||||
|
|
||||||
@@ -41,33 +41,37 @@ fn main() {
|
|||||||
let mint_keypair = demo.mint.keypair();
|
let mint_keypair = demo.mint.keypair();
|
||||||
let last_id = demo.mint.last_id();
|
let last_id = demo.mint.last_id();
|
||||||
|
|
||||||
eprintln!("Signing {} transactions...", num_accounts);
|
|
||||||
let events: Vec<_> = keypairs
|
|
||||||
.into_par_iter()
|
|
||||||
.map(|rando| {
|
|
||||||
let last_id = demo.mint.last_id();
|
|
||||||
Event::new_transaction(&mint_keypair, rando.pubkey(), tokens_per_user, last_id)
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
for entry in demo.mint.create_entries() {
|
for entry in demo.mint.create_entries() {
|
||||||
println!("{}", serde_json::to_string(&entry).unwrap());
|
println!("{}", serde_json::to_string(&entry).unwrap());
|
||||||
}
|
}
|
||||||
|
|
||||||
eprintln!("Logging the creation of {} accounts...", num_accounts);
|
|
||||||
let entry = Entry::new(&last_id, 0, events);
|
|
||||||
println!("{}", serde_json::to_string(&entry).unwrap());
|
|
||||||
|
|
||||||
eprintln!("Creating {} empty entries...", MAX_ENTRY_IDS);
|
eprintln!("Creating {} empty entries...", MAX_ENTRY_IDS);
|
||||||
|
|
||||||
// Offer client lots of entry IDs to use for each transaction's last_id.
|
// Offer client lots of entry IDs to use for each transaction's last_id.
|
||||||
let mut last_id = last_id;
|
let mut last_id = last_id;
|
||||||
|
let mut last_ids = vec![];
|
||||||
for _ in 0..MAX_ENTRY_IDS {
|
for _ in 0..MAX_ENTRY_IDS {
|
||||||
let entry = next_entry(&last_id, 1, vec![]);
|
let entry = next_entry(&last_id, 1, vec![]);
|
||||||
last_id = entry.id;
|
last_id = entry.id;
|
||||||
|
last_ids.push(last_id);
|
||||||
let serialized = serde_json::to_string(&entry).unwrap_or_else(|e| {
|
let serialized = serde_json::to_string(&entry).unwrap_or_else(|e| {
|
||||||
eprintln!("failed to serialize: {}", e);
|
eprintln!("failed to serialize: {}", e);
|
||||||
exit(1);
|
exit(1);
|
||||||
});
|
});
|
||||||
println!("{}", serialized);
|
println!("{}", serialized);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
eprintln!("Creating {} transactions...", num_accounts);
|
||||||
|
let transactions: Vec<_> = keypairs
|
||||||
|
.into_par_iter()
|
||||||
|
.enumerate()
|
||||||
|
.map(|(i, rando)| {
|
||||||
|
let last_id = last_ids[i % MAX_ENTRY_IDS];
|
||||||
|
Transaction::new(&mint_keypair, rando.pubkey(), tokens_per_user, last_id)
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
eprintln!("Logging the creation of {} accounts...", num_accounts);
|
||||||
|
let entry = Entry::new(&last_id, 0, transactions);
|
||||||
|
println!("{}", serde_json::to_string(&entry).unwrap());
|
||||||
}
|
}
|
||||||
|
@@ -1,261 +0,0 @@
|
|||||||
extern crate futures;
|
|
||||||
extern crate getopts;
|
|
||||||
extern crate isatty;
|
|
||||||
extern crate rayon;
|
|
||||||
extern crate serde_json;
|
|
||||||
extern crate solana;
|
|
||||||
|
|
||||||
use futures::Future;
|
|
||||||
use getopts::Options;
|
|
||||||
use isatty::stdin_isatty;
|
|
||||||
use rayon::prelude::*;
|
|
||||||
use solana::crdt::{Crdt, ReplicatedData};
|
|
||||||
use solana::mint::MintDemo;
|
|
||||||
use solana::signature::{GenKeys, KeyPair, KeyPairUtil};
|
|
||||||
use solana::streamer::default_window;
|
|
||||||
use solana::thin_client::ThinClient;
|
|
||||||
use solana::transaction::Transaction;
|
|
||||||
use std::env;
|
|
||||||
use std::fs::File;
|
|
||||||
use std::io::{stdin, Read};
|
|
||||||
use std::net::{SocketAddr, UdpSocket};
|
|
||||||
use std::process::exit;
|
|
||||||
use std::sync::atomic::{AtomicBool, Ordering};
|
|
||||||
use std::sync::{Arc, RwLock};
|
|
||||||
use std::thread::JoinHandle;
|
|
||||||
use std::thread::sleep;
|
|
||||||
use std::time::Duration;
|
|
||||||
use std::time::Instant;
|
|
||||||
|
|
||||||
fn print_usage(program: &str, opts: Options) {
|
|
||||||
let mut brief = format!("Usage: cat <mint.json> | {} [options]\n\n", program);
|
|
||||||
brief += " Solana client demo creates a number of transactions and\n";
|
|
||||||
brief += " sends them to a target node.";
|
|
||||||
brief += " Takes json formatted mint file to stdin.";
|
|
||||||
|
|
||||||
print!("{}", opts.usage(&brief));
|
|
||||||
}
|
|
||||||
|
|
||||||
fn main() {
|
|
||||||
let mut threads = 4usize;
|
|
||||||
let mut num_nodes = 10usize;
|
|
||||||
let mut leader = "leader.json".to_string();
|
|
||||||
|
|
||||||
let mut opts = Options::new();
|
|
||||||
opts.optopt("l", "", "leader", "leader.json");
|
|
||||||
opts.optopt("c", "", "client address", "host:port");
|
|
||||||
opts.optopt("t", "", "number of threads", &format!("{}", threads));
|
|
||||||
opts.optopt(
|
|
||||||
"n",
|
|
||||||
"",
|
|
||||||
"number of nodes to converge to",
|
|
||||||
&format!("{}", num_nodes),
|
|
||||||
);
|
|
||||||
opts.optflag("h", "help", "print help");
|
|
||||||
let args: Vec<String> = env::args().collect();
|
|
||||||
let matches = match opts.parse(&args[1..]) {
|
|
||||||
Ok(m) => m,
|
|
||||||
Err(e) => {
|
|
||||||
eprintln!("{}", e);
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
if matches.opt_present("h") {
|
|
||||||
let program = args[0].clone();
|
|
||||||
print_usage(&program, opts);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
if matches.opt_present("l") {
|
|
||||||
leader = matches.opt_str("l").unwrap();
|
|
||||||
}
|
|
||||||
let client_addr: Arc<RwLock<SocketAddr>> = if matches.opt_present("c") {
|
|
||||||
let addr = matches.opt_str("c").unwrap().parse().unwrap();
|
|
||||||
Arc::new(RwLock::new(addr))
|
|
||||||
} else {
|
|
||||||
Arc::new(RwLock::new("127.0.0.1:8010".parse().unwrap()))
|
|
||||||
};
|
|
||||||
if matches.opt_present("t") {
|
|
||||||
threads = matches.opt_str("t").unwrap().parse().expect("integer");
|
|
||||||
}
|
|
||||||
if matches.opt_present("n") {
|
|
||||||
num_nodes = matches.opt_str("n").unwrap().parse().expect("integer");
|
|
||||||
}
|
|
||||||
|
|
||||||
let leader: ReplicatedData = read_leader(leader);
|
|
||||||
let signal = Arc::new(AtomicBool::new(false));
|
|
||||||
let mut c_threads = vec![];
|
|
||||||
let validators = converge(
|
|
||||||
&client_addr,
|
|
||||||
&leader,
|
|
||||||
signal.clone(),
|
|
||||||
num_nodes + 2,
|
|
||||||
&mut c_threads,
|
|
||||||
);
|
|
||||||
|
|
||||||
if stdin_isatty() {
|
|
||||||
eprintln!("nothing found on stdin, expected a json file");
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut buffer = String::new();
|
|
||||||
let num_bytes = stdin().read_to_string(&mut buffer).unwrap();
|
|
||||||
if num_bytes == 0 {
|
|
||||||
eprintln!("empty file on stdin, expected a json file");
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
println!("Parsing stdin...");
|
|
||||||
let demo: MintDemo = serde_json::from_str(&buffer).unwrap_or_else(|e| {
|
|
||||||
eprintln!("failed to parse json: {}", e);
|
|
||||||
exit(1);
|
|
||||||
});
|
|
||||||
let mut client = mk_client(&client_addr, &leader);
|
|
||||||
|
|
||||||
println!("Get last ID...");
|
|
||||||
let last_id = client.get_last_id().wait().unwrap();
|
|
||||||
println!("Got last ID {:?}", last_id);
|
|
||||||
|
|
||||||
let rnd = GenKeys::new(demo.mint.keypair().public_key_bytes());
|
|
||||||
|
|
||||||
println!("Creating keypairs...");
|
|
||||||
let txs = demo.num_accounts / 2;
|
|
||||||
let keypairs = rnd.gen_n_keypairs(demo.num_accounts);
|
|
||||||
let keypair_pairs: Vec<_> = keypairs.chunks(2).collect();
|
|
||||||
|
|
||||||
println!("Signing transactions...");
|
|
||||||
let now = Instant::now();
|
|
||||||
let transactions: Vec<_> = keypair_pairs
|
|
||||||
.into_par_iter()
|
|
||||||
.map(|chunk| Transaction::new(&chunk[0], chunk[1].pubkey(), 1, last_id))
|
|
||||||
.collect();
|
|
||||||
let duration = now.elapsed();
|
|
||||||
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
|
|
||||||
let bsps = txs as f64 / ns as f64;
|
|
||||||
let nsps = ns as f64 / txs as f64;
|
|
||||||
println!(
|
|
||||||
"Done. {} thousand signatures per second, {}us per signature",
|
|
||||||
bsps * 1_000_000_f64,
|
|
||||||
nsps / 1_000_f64
|
|
||||||
);
|
|
||||||
|
|
||||||
let first_count = client.transaction_count();
|
|
||||||
println!("initial count {}", first_count);
|
|
||||||
|
|
||||||
println!("Transfering {} transactions in {} batches", txs, threads);
|
|
||||||
let sz = transactions.len() / threads;
|
|
||||||
let chunks: Vec<_> = transactions.chunks(sz).collect();
|
|
||||||
chunks.into_par_iter().for_each(|trs| {
|
|
||||||
println!("Transferring 1 unit {} times... to", trs.len());
|
|
||||||
let client = mk_client(&client_addr, &leader);
|
|
||||||
for tr in trs {
|
|
||||||
client.transfer_signed(tr.clone()).unwrap();
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
println!("Sampling tps every second...",);
|
|
||||||
validators.into_par_iter().for_each(|val| {
|
|
||||||
let mut client = mk_client(&client_addr, &val);
|
|
||||||
let mut now = Instant::now();
|
|
||||||
let mut initial_tx_count = client.transaction_count();
|
|
||||||
for i in 0..100 {
|
|
||||||
let tx_count = client.transaction_count();
|
|
||||||
let duration = now.elapsed();
|
|
||||||
now = Instant::now();
|
|
||||||
let sample = tx_count - initial_tx_count;
|
|
||||||
initial_tx_count = tx_count;
|
|
||||||
println!("{}: Transactions processed {}", val.events_addr, sample);
|
|
||||||
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
|
|
||||||
let tps = (sample * 1_000_000_000) as f64 / ns as f64;
|
|
||||||
println!("{}: {} tps", val.events_addr, tps);
|
|
||||||
let total = tx_count - first_count;
|
|
||||||
println!(
|
|
||||||
"{}: Total Transactions processed {}",
|
|
||||||
val.events_addr, total
|
|
||||||
);
|
|
||||||
if total == transactions.len() as u64 {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if i > 20 && sample == 0 {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
sleep(Duration::new(1, 0));
|
|
||||||
}
|
|
||||||
});
|
|
||||||
signal.store(true, Ordering::Relaxed);
|
|
||||||
for t in c_threads {
|
|
||||||
t.join().unwrap();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn mk_client(locked_addr: &Arc<RwLock<SocketAddr>>, r: &ReplicatedData) -> ThinClient {
|
|
||||||
let mut addr = locked_addr.write().unwrap();
|
|
||||||
let port = addr.port();
|
|
||||||
let events_socket = UdpSocket::bind(addr.clone()).unwrap();
|
|
||||||
addr.set_port(port + 1);
|
|
||||||
let requests_socket = UdpSocket::bind(addr.clone()).unwrap();
|
|
||||||
addr.set_port(port + 2);
|
|
||||||
ThinClient::new(
|
|
||||||
r.requests_addr,
|
|
||||||
requests_socket,
|
|
||||||
r.events_addr,
|
|
||||||
events_socket,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn spy_node(client_addr: &Arc<RwLock<SocketAddr>>) -> (ReplicatedData, UdpSocket) {
|
|
||||||
let mut addr = client_addr.write().unwrap();
|
|
||||||
let port = addr.port();
|
|
||||||
let gossip = UdpSocket::bind(addr.clone()).unwrap();
|
|
||||||
addr.set_port(port + 1);
|
|
||||||
let daddr = "0.0.0.0:0".parse().unwrap();
|
|
||||||
let pubkey = KeyPair::new().pubkey();
|
|
||||||
let node = ReplicatedData::new(pubkey, gossip.local_addr().unwrap(), daddr, daddr, daddr);
|
|
||||||
(node, gossip)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn converge(
|
|
||||||
client_addr: &Arc<RwLock<SocketAddr>>,
|
|
||||||
leader: &ReplicatedData,
|
|
||||||
exit: Arc<AtomicBool>,
|
|
||||||
num_nodes: usize,
|
|
||||||
threads: &mut Vec<JoinHandle<()>>,
|
|
||||||
) -> Vec<ReplicatedData> {
|
|
||||||
//lets spy on the network
|
|
||||||
let daddr = "0.0.0.0:0".parse().unwrap();
|
|
||||||
let (spy, spy_gossip) = spy_node(client_addr);
|
|
||||||
let mut spy_crdt = Crdt::new(spy);
|
|
||||||
spy_crdt.insert(&leader);
|
|
||||||
spy_crdt.set_leader(leader.id);
|
|
||||||
|
|
||||||
let spy_ref = Arc::new(RwLock::new(spy_crdt));
|
|
||||||
let spy_window = default_window();
|
|
||||||
let t_spy_listen = Crdt::listen(spy_ref.clone(), spy_window, spy_gossip, exit.clone());
|
|
||||||
let t_spy_gossip = Crdt::gossip(spy_ref.clone(), exit.clone());
|
|
||||||
//wait for the network to converge
|
|
||||||
for _ in 0..30 {
|
|
||||||
let min = spy_ref.read().unwrap().convergence();
|
|
||||||
if num_nodes as u64 == min {
|
|
||||||
println!("converged!");
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
sleep(Duration::new(1, 0));
|
|
||||||
}
|
|
||||||
threads.push(t_spy_listen);
|
|
||||||
threads.push(t_spy_gossip);
|
|
||||||
let v: Vec<ReplicatedData> = spy_ref
|
|
||||||
.read()
|
|
||||||
.unwrap()
|
|
||||||
.table
|
|
||||||
.values()
|
|
||||||
.into_iter()
|
|
||||||
.filter(|x| x.requests_addr != daddr)
|
|
||||||
.map(|x| x.clone())
|
|
||||||
.collect();
|
|
||||||
v.clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn read_leader(path: String) -> ReplicatedData {
|
|
||||||
let file = File::open(path).expect("file");
|
|
||||||
serde_json::from_reader(file).expect("parse")
|
|
||||||
}
|
|
@@ -1,18 +1,13 @@
|
|||||||
//! The `plan` module provides a domain-specific language for payment plans. Users create Plan objects that
|
//! The `budget` module provides a domain-specific language for payment plans. Users create Budget objects that
|
||||||
//! are given to an interpreter. The interpreter listens for `Witness` events,
|
//! are given to an interpreter. The interpreter listens for `Witness` transactions,
|
||||||
//! which it uses to reduce the payment plan. When the plan is reduced to a
|
//! which it uses to reduce the payment plan. When the budget is reduced to a
|
||||||
//! `Payment`, the payment is executed.
|
//! `Payment`, the payment is executed.
|
||||||
|
|
||||||
use chrono::prelude::*;
|
use chrono::prelude::*;
|
||||||
|
use payment_plan::{Payment, PaymentPlan, Witness};
|
||||||
use signature::PublicKey;
|
use signature::PublicKey;
|
||||||
use std::mem;
|
use std::mem;
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
|
||||||
pub enum Witness {
|
|
||||||
Timestamp(DateTime<Utc>),
|
|
||||||
Signature(PublicKey),
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||||
pub enum Condition {
|
pub enum Condition {
|
||||||
Timestamp(DateTime<Utc>),
|
Timestamp(DateTime<Utc>),
|
||||||
@@ -30,37 +25,31 @@ impl Condition {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
|
||||||
pub struct Payment {
|
|
||||||
pub tokens: i64,
|
|
||||||
pub to: PublicKey,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||||
pub enum Plan {
|
pub enum Budget {
|
||||||
Pay(Payment),
|
Pay(Payment),
|
||||||
After(Condition, Payment),
|
After(Condition, Payment),
|
||||||
Race((Condition, Payment), (Condition, Payment)),
|
Race((Condition, Payment), (Condition, Payment)),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Plan {
|
impl Budget {
|
||||||
/// Create the simplest spending plan - one that pays `tokens` to PublicKey.
|
/// Create the simplest budget - one that pays `tokens` to PublicKey.
|
||||||
pub fn new_payment(tokens: i64, to: PublicKey) -> Self {
|
pub fn new_payment(tokens: i64, to: PublicKey) -> Self {
|
||||||
Plan::Pay(Payment { tokens, to })
|
Budget::Pay(Payment { tokens, to })
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create a spending plan that pays `tokens` to `to` after being witnessed by `from`.
|
/// Create a budget that pays `tokens` to `to` after being witnessed by `from`.
|
||||||
pub fn new_authorized_payment(from: PublicKey, tokens: i64, to: PublicKey) -> Self {
|
pub fn new_authorized_payment(from: PublicKey, tokens: i64, to: PublicKey) -> Self {
|
||||||
Plan::After(Condition::Signature(from), Payment { tokens, to })
|
Budget::After(Condition::Signature(from), Payment { tokens, to })
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create a spending plan that pays `tokens` to `to` after the given DateTime.
|
/// Create a budget that pays `tokens` to `to` after the given DateTime.
|
||||||
pub fn new_future_payment(dt: DateTime<Utc>, tokens: i64, to: PublicKey) -> Self {
|
pub fn new_future_payment(dt: DateTime<Utc>, tokens: i64, to: PublicKey) -> Self {
|
||||||
Plan::After(Condition::Timestamp(dt), Payment { tokens, to })
|
Budget::After(Condition::Timestamp(dt), Payment { tokens, to })
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create a spending plan that pays `tokens` to `to` after the given DateTime
|
/// Create a budget that pays `tokens` to `to` after the given DateTime
|
||||||
/// unless cancelled by `from`.
|
/// unless cancelled by `from`.
|
||||||
pub fn new_cancelable_future_payment(
|
pub fn new_cancelable_future_payment(
|
||||||
dt: DateTime<Utc>,
|
dt: DateTime<Utc>,
|
||||||
@@ -68,44 +57,46 @@ impl Plan {
|
|||||||
tokens: i64,
|
tokens: i64,
|
||||||
to: PublicKey,
|
to: PublicKey,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
Plan::Race(
|
Budget::Race(
|
||||||
(Condition::Timestamp(dt), Payment { tokens, to }),
|
(Condition::Timestamp(dt), Payment { tokens, to }),
|
||||||
(Condition::Signature(from), Payment { tokens, to: from }),
|
(Condition::Signature(from), Payment { tokens, to: from }),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Return Payment if the spending plan requires no additional Witnesses.
|
impl PaymentPlan for Budget {
|
||||||
pub fn final_payment(&self) -> Option<Payment> {
|
/// Return Payment if the budget requires no additional Witnesses.
|
||||||
|
fn final_payment(&self) -> Option<Payment> {
|
||||||
match *self {
|
match *self {
|
||||||
Plan::Pay(ref payment) => Some(payment.clone()),
|
Budget::Pay(ref payment) => Some(payment.clone()),
|
||||||
_ => None,
|
_ => None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return true if the plan spends exactly `spendable_tokens`.
|
/// Return true if the budget spends exactly `spendable_tokens`.
|
||||||
pub fn verify(&self, spendable_tokens: i64) -> bool {
|
fn verify(&self, spendable_tokens: i64) -> bool {
|
||||||
match *self {
|
match *self {
|
||||||
Plan::Pay(ref payment) | Plan::After(_, ref payment) => {
|
Budget::Pay(ref payment) | Budget::After(_, ref payment) => {
|
||||||
payment.tokens == spendable_tokens
|
payment.tokens == spendable_tokens
|
||||||
}
|
}
|
||||||
Plan::Race(ref a, ref b) => {
|
Budget::Race(ref a, ref b) => {
|
||||||
a.1.tokens == spendable_tokens && b.1.tokens == spendable_tokens
|
a.1.tokens == spendable_tokens && b.1.tokens == spendable_tokens
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Apply a witness to the spending plan to see if the plan can be reduced.
|
/// Apply a witness to the budget to see if the budget can be reduced.
|
||||||
/// If so, modify the plan in-place.
|
/// If so, modify the budget in-place.
|
||||||
pub fn apply_witness(&mut self, witness: &Witness) {
|
fn apply_witness(&mut self, witness: &Witness) {
|
||||||
let new_payment = match *self {
|
let new_payment = match *self {
|
||||||
Plan::After(ref cond, ref payment) if cond.is_satisfied(witness) => Some(payment),
|
Budget::After(ref cond, ref payment) if cond.is_satisfied(witness) => Some(payment),
|
||||||
Plan::Race((ref cond, ref payment), _) if cond.is_satisfied(witness) => Some(payment),
|
Budget::Race((ref cond, ref payment), _) if cond.is_satisfied(witness) => Some(payment),
|
||||||
Plan::Race(_, (ref cond, ref payment)) if cond.is_satisfied(witness) => Some(payment),
|
Budget::Race(_, (ref cond, ref payment)) if cond.is_satisfied(witness) => Some(payment),
|
||||||
_ => None,
|
_ => None,
|
||||||
}.cloned();
|
}.cloned();
|
||||||
|
|
||||||
if let Some(payment) = new_payment {
|
if let Some(payment) = new_payment {
|
||||||
mem::replace(self, Plan::Pay(payment));
|
mem::replace(self, Budget::Pay(payment));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -130,14 +121,14 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_verify_plan() {
|
fn test_verify() {
|
||||||
let dt = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10);
|
let dt = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10);
|
||||||
let from = PublicKey::default();
|
let from = PublicKey::default();
|
||||||
let to = PublicKey::default();
|
let to = PublicKey::default();
|
||||||
assert!(Plan::new_payment(42, to).verify(42));
|
assert!(Budget::new_payment(42, to).verify(42));
|
||||||
assert!(Plan::new_authorized_payment(from, 42, to).verify(42));
|
assert!(Budget::new_authorized_payment(from, 42, to).verify(42));
|
||||||
assert!(Plan::new_future_payment(dt, 42, to).verify(42));
|
assert!(Budget::new_future_payment(dt, 42, to).verify(42));
|
||||||
assert!(Plan::new_cancelable_future_payment(dt, from, 42, to).verify(42));
|
assert!(Budget::new_cancelable_future_payment(dt, from, 42, to).verify(42));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -145,9 +136,9 @@ mod tests {
|
|||||||
let from = PublicKey::default();
|
let from = PublicKey::default();
|
||||||
let to = PublicKey::default();
|
let to = PublicKey::default();
|
||||||
|
|
||||||
let mut plan = Plan::new_authorized_payment(from, 42, to);
|
let mut budget = Budget::new_authorized_payment(from, 42, to);
|
||||||
plan.apply_witness(&Witness::Signature(from));
|
budget.apply_witness(&Witness::Signature(from));
|
||||||
assert_eq!(plan, Plan::new_payment(42, to));
|
assert_eq!(budget, Budget::new_payment(42, to));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -155,9 +146,9 @@ mod tests {
|
|||||||
let dt = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10);
|
let dt = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10);
|
||||||
let to = PublicKey::default();
|
let to = PublicKey::default();
|
||||||
|
|
||||||
let mut plan = Plan::new_future_payment(dt, 42, to);
|
let mut budget = Budget::new_future_payment(dt, 42, to);
|
||||||
plan.apply_witness(&Witness::Timestamp(dt));
|
budget.apply_witness(&Witness::Timestamp(dt));
|
||||||
assert_eq!(plan, Plan::new_payment(42, to));
|
assert_eq!(budget, Budget::new_payment(42, to));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -166,12 +157,12 @@ mod tests {
|
|||||||
let from = PublicKey::default();
|
let from = PublicKey::default();
|
||||||
let to = PublicKey::default();
|
let to = PublicKey::default();
|
||||||
|
|
||||||
let mut plan = Plan::new_cancelable_future_payment(dt, from, 42, to);
|
let mut budget = Budget::new_cancelable_future_payment(dt, from, 42, to);
|
||||||
plan.apply_witness(&Witness::Timestamp(dt));
|
budget.apply_witness(&Witness::Timestamp(dt));
|
||||||
assert_eq!(plan, Plan::new_payment(42, to));
|
assert_eq!(budget, Budget::new_payment(42, to));
|
||||||
|
|
||||||
let mut plan = Plan::new_cancelable_future_payment(dt, from, 42, to);
|
let mut budget = Budget::new_cancelable_future_payment(dt, from, 42, to);
|
||||||
plan.apply_witness(&Witness::Signature(from));
|
budget.apply_witness(&Witness::Signature(from));
|
||||||
assert_eq!(plan, Plan::new_payment(42, from));
|
assert_eq!(budget, Budget::new_payment(42, from));
|
||||||
}
|
}
|
||||||
}
|
}
|
620
src/crdt.rs
620
src/crdt.rs
@@ -16,35 +16,67 @@
|
|||||||
use bincode::{deserialize, serialize};
|
use bincode::{deserialize, serialize};
|
||||||
use byteorder::{LittleEndian, ReadBytesExt};
|
use byteorder::{LittleEndian, ReadBytesExt};
|
||||||
use hash::Hash;
|
use hash::Hash;
|
||||||
use packet::SharedBlob;
|
use packet::{to_blob, Blob, BlobRecycler, SharedBlob, BLOB_SIZE};
|
||||||
|
use pnet::datalink;
|
||||||
use rayon::prelude::*;
|
use rayon::prelude::*;
|
||||||
use result::{Error, Result};
|
use result::{Error, Result};
|
||||||
use ring::rand::{SecureRandom, SystemRandom};
|
use ring::rand::{SecureRandom, SystemRandom};
|
||||||
|
use signature::{KeyPair, KeyPairUtil};
|
||||||
use signature::{PublicKey, Signature};
|
use signature::{PublicKey, Signature};
|
||||||
use std;
|
use std;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
use std::collections::VecDeque;
|
||||||
use std::io::Cursor;
|
use std::io::Cursor;
|
||||||
use std::net::{SocketAddr, UdpSocket};
|
use std::net::{IpAddr, SocketAddr, UdpSocket};
|
||||||
use std::sync::atomic::{AtomicBool, Ordering};
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
use std::thread::{sleep, spawn, JoinHandle};
|
use std::thread::{sleep, Builder, JoinHandle};
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
use streamer::{BlobReceiver, BlobSender};
|
||||||
|
|
||||||
|
pub fn parse_port_or_addr(optstr: Option<String>) -> SocketAddr {
|
||||||
|
let daddr: SocketAddr = "0.0.0.0:8000".parse().expect("default socket address");
|
||||||
|
if let Some(addrstr) = optstr {
|
||||||
|
if let Ok(port) = addrstr.parse() {
|
||||||
|
let mut addr = daddr.clone();
|
||||||
|
addr.set_port(port);
|
||||||
|
addr
|
||||||
|
} else if let Ok(addr) = addrstr.parse() {
|
||||||
|
addr
|
||||||
|
} else {
|
||||||
|
daddr
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
daddr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_ip_addr() -> Option<IpAddr> {
|
||||||
|
for iface in datalink::interfaces() {
|
||||||
|
for p in iface.ips {
|
||||||
|
if !p.ip().is_loopback() && !p.ip().is_multicast() {
|
||||||
|
return Some(p.ip());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
/// Structure to be replicated by the network
|
/// Structure to be replicated by the network
|
||||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||||
pub struct ReplicatedData {
|
pub struct ReplicatedData {
|
||||||
pub id: PublicKey,
|
pub id: PublicKey,
|
||||||
sig: Signature,
|
sig: Signature,
|
||||||
/// should always be increasing
|
/// should always be increasing
|
||||||
version: u64,
|
pub version: u64,
|
||||||
/// address to connect to for gossip
|
/// address to connect to for gossip
|
||||||
pub gossip_addr: SocketAddr,
|
pub gossip_addr: SocketAddr,
|
||||||
/// address to connect to for replication
|
/// address to connect to for replication
|
||||||
pub replicate_addr: SocketAddr,
|
pub replicate_addr: SocketAddr,
|
||||||
/// address to connect to when this node is leader
|
/// address to connect to when this node is leader
|
||||||
pub requests_addr: SocketAddr,
|
pub requests_addr: SocketAddr,
|
||||||
/// events address
|
/// transactions address
|
||||||
pub events_addr: SocketAddr,
|
pub transactions_addr: SocketAddr,
|
||||||
/// current leader identity
|
/// current leader identity
|
||||||
pub current_leader_id: PublicKey,
|
pub current_leader_id: PublicKey,
|
||||||
/// last verified hash that was submitted to the leader
|
/// last verified hash that was submitted to the leader
|
||||||
@@ -59,7 +91,7 @@ impl ReplicatedData {
|
|||||||
gossip_addr: SocketAddr,
|
gossip_addr: SocketAddr,
|
||||||
replicate_addr: SocketAddr,
|
replicate_addr: SocketAddr,
|
||||||
requests_addr: SocketAddr,
|
requests_addr: SocketAddr,
|
||||||
events_addr: SocketAddr,
|
transactions_addr: SocketAddr,
|
||||||
) -> ReplicatedData {
|
) -> ReplicatedData {
|
||||||
ReplicatedData {
|
ReplicatedData {
|
||||||
id,
|
id,
|
||||||
@@ -68,12 +100,33 @@ impl ReplicatedData {
|
|||||||
gossip_addr,
|
gossip_addr,
|
||||||
replicate_addr,
|
replicate_addr,
|
||||||
requests_addr,
|
requests_addr,
|
||||||
events_addr,
|
transactions_addr,
|
||||||
current_leader_id: PublicKey::default(),
|
current_leader_id: PublicKey::default(),
|
||||||
last_verified_hash: Hash::default(),
|
last_verified_hash: Hash::default(),
|
||||||
last_verified_count: 0,
|
last_verified_count: 0,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn next_port(addr: &SocketAddr, nxt: u16) -> SocketAddr {
|
||||||
|
let mut nxt_addr = addr.clone();
|
||||||
|
nxt_addr.set_port(addr.port() + nxt);
|
||||||
|
nxt_addr
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn new_leader(bind_addr: &SocketAddr) -> Self {
|
||||||
|
let transactions_addr = bind_addr.clone();
|
||||||
|
let gossip_addr = Self::next_port(&bind_addr, 1);
|
||||||
|
let replicate_addr = Self::next_port(&bind_addr, 2);
|
||||||
|
let requests_addr = Self::next_port(&bind_addr, 3);
|
||||||
|
let pubkey = KeyPair::new().pubkey();
|
||||||
|
ReplicatedData::new(
|
||||||
|
pubkey,
|
||||||
|
gossip_addr,
|
||||||
|
replicate_addr,
|
||||||
|
requests_addr,
|
||||||
|
transactions_addr,
|
||||||
|
)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// `Crdt` structure keeps a table of `ReplicatedData` structs
|
/// `Crdt` structure keeps a table of `ReplicatedData` structs
|
||||||
@@ -149,15 +202,20 @@ impl Crdt {
|
|||||||
if self.table.get(&v.id).is_none() || (v.version > self.table[&v.id].version) {
|
if self.table.get(&v.id).is_none() || (v.version > self.table[&v.id].version) {
|
||||||
//somehow we signed a message for our own identity with a higher version that
|
//somehow we signed a message for our own identity with a higher version that
|
||||||
// we have stored ourselves
|
// we have stored ourselves
|
||||||
trace!("me: {:?}", self.me[0]);
|
trace!(
|
||||||
trace!("v.id: {:?}", v.id[0]);
|
"me: {:?} v.id: {:?} version: {}",
|
||||||
trace!("insert! {}", v.version);
|
&self.me[..4],
|
||||||
|
&v.id[..4],
|
||||||
|
v.version
|
||||||
|
);
|
||||||
self.update_index += 1;
|
self.update_index += 1;
|
||||||
let _ = self.table.insert(v.id.clone(), v.clone());
|
let _ = self.table.insert(v.id.clone(), v.clone());
|
||||||
let _ = self.local.insert(v.id, self.update_index);
|
let _ = self.local.insert(v.id, self.update_index);
|
||||||
} else {
|
} else {
|
||||||
trace!(
|
trace!(
|
||||||
"INSERT FAILED new.version: {} me.version: {}",
|
"INSERT FAILED me: {:?} data: {:?} new.version: {} me.version: {}",
|
||||||
|
&self.me[..4],
|
||||||
|
&v.id[..4],
|
||||||
v.version,
|
v.version,
|
||||||
self.table[&v.id].version
|
self.table[&v.id].version
|
||||||
);
|
);
|
||||||
@@ -226,6 +284,7 @@ impl Crdt {
|
|||||||
.expect("set_index in pub fn broadcast");
|
.expect("set_index in pub fn broadcast");
|
||||||
//TODO profile this, may need multiple sockets for par_iter
|
//TODO profile this, may need multiple sockets for par_iter
|
||||||
trace!("broadcast {} to {}", blob.meta.size, v.replicate_addr);
|
trace!("broadcast {} to {}", blob.meta.size, v.replicate_addr);
|
||||||
|
assert!(blob.meta.size < BLOB_SIZE);
|
||||||
let e = s.send_to(&blob.data[..blob.meta.size], &v.replicate_addr);
|
let e = s.send_to(&blob.data[..blob.meta.size], &v.replicate_addr);
|
||||||
trace!("done broadcast {} to {}", blob.meta.size, v.replicate_addr);
|
trace!("done broadcast {} to {}", blob.meta.size, v.replicate_addr);
|
||||||
e
|
e
|
||||||
@@ -285,6 +344,7 @@ impl Crdt {
|
|||||||
v.replicate_addr
|
v.replicate_addr
|
||||||
);
|
);
|
||||||
//TODO profile this, may need multiple sockets for par_iter
|
//TODO profile this, may need multiple sockets for par_iter
|
||||||
|
assert!(rblob.meta.size < BLOB_SIZE);
|
||||||
s.send_to(&rblob.data[..rblob.meta.size], &v.replicate_addr)
|
s.send_to(&rblob.data[..rblob.meta.size], &v.replicate_addr)
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
@@ -327,14 +387,16 @@ impl Crdt {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn window_index_request(&self, ix: u64) -> Result<(SocketAddr, Vec<u8>)> {
|
pub fn window_index_request(&self, ix: u64) -> Result<(SocketAddr, Vec<u8>)> {
|
||||||
if self.table.len() <= 1 {
|
let daddr = "0.0.0.0:0".parse().unwrap();
|
||||||
|
let valid: Vec<_> = self.table
|
||||||
|
.values()
|
||||||
|
.filter(|r| r.id != self.me && r.replicate_addr != daddr)
|
||||||
|
.collect();
|
||||||
|
if valid.is_empty() {
|
||||||
return Err(Error::CrdtTooSmall);
|
return Err(Error::CrdtTooSmall);
|
||||||
}
|
}
|
||||||
let mut n = (Self::random() as usize) % self.table.len();
|
let n = (Self::random() as usize) % valid.len();
|
||||||
while self.table.values().nth(n).unwrap().id == self.me {
|
let addr = valid[n].gossip_addr.clone();
|
||||||
n = (Self::random() as usize) % self.table.len();
|
|
||||||
}
|
|
||||||
let addr = self.table.values().nth(n).unwrap().gossip_addr.clone();
|
|
||||||
let req = Protocol::RequestWindowIndex(self.table[&self.me].clone(), ix);
|
let req = Protocol::RequestWindowIndex(self.table[&self.me].clone(), ix);
|
||||||
let out = serialize(&req)?;
|
let out = serialize(&req)?;
|
||||||
Ok((addr, out))
|
Ok((addr, out))
|
||||||
@@ -348,18 +410,32 @@ impl Crdt {
|
|||||||
fn gossip_request(&self) -> Result<(SocketAddr, Protocol)> {
|
fn gossip_request(&self) -> Result<(SocketAddr, Protocol)> {
|
||||||
let options: Vec<_> = self.table.values().filter(|v| v.id != self.me).collect();
|
let options: Vec<_> = self.table.values().filter(|v| v.id != self.me).collect();
|
||||||
if options.len() < 1 {
|
if options.len() < 1 {
|
||||||
trace!("crdt too small for gossip");
|
trace!(
|
||||||
|
"crdt too small for gossip {:?} {}",
|
||||||
|
&self.me[..4],
|
||||||
|
self.table.len()
|
||||||
|
);
|
||||||
return Err(Error::CrdtTooSmall);
|
return Err(Error::CrdtTooSmall);
|
||||||
}
|
}
|
||||||
let n = (Self::random() as usize) % options.len();
|
let n = (Self::random() as usize) % options.len();
|
||||||
let v = options[n].clone();
|
let v = options[n].clone();
|
||||||
let remote_update_index = *self.remote.get(&v.id).unwrap_or(&0);
|
let remote_update_index = *self.remote.get(&v.id).unwrap_or(&0);
|
||||||
let req = Protocol::RequestUpdates(remote_update_index, self.table[&self.me].clone());
|
let req = Protocol::RequestUpdates(remote_update_index, self.table[&self.me].clone());
|
||||||
|
trace!(
|
||||||
|
"created gossip request from {:?} to {:?} {}",
|
||||||
|
&self.me[..4],
|
||||||
|
&v.id[..4],
|
||||||
|
v.gossip_addr
|
||||||
|
);
|
||||||
Ok((v.gossip_addr, req))
|
Ok((v.gossip_addr, req))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// At random pick a node and try to get updated changes from them
|
/// At random pick a node and try to get updated changes from them
|
||||||
fn run_gossip(obj: &Arc<RwLock<Self>>) -> Result<()> {
|
fn run_gossip(
|
||||||
|
obj: &Arc<RwLock<Self>>,
|
||||||
|
blob_sender: &BlobSender,
|
||||||
|
blob_recycler: &BlobRecycler,
|
||||||
|
) -> Result<()> {
|
||||||
//TODO we need to keep track of stakes and weight the selection by stake size
|
//TODO we need to keep track of stakes and weight the selection by stake size
|
||||||
//TODO cache sockets
|
//TODO cache sockets
|
||||||
|
|
||||||
@@ -368,12 +444,12 @@ impl Crdt {
|
|||||||
let (remote_gossip_addr, req) = obj.read()
|
let (remote_gossip_addr, req) = obj.read()
|
||||||
.expect("'obj' read lock in fn run_gossip")
|
.expect("'obj' read lock in fn run_gossip")
|
||||||
.gossip_request()?;
|
.gossip_request()?;
|
||||||
let sock = UdpSocket::bind("0.0.0.0:0")?;
|
|
||||||
// TODO this will get chatty, so we need to first ask for number of updates since
|
// TODO this will get chatty, so we need to first ask for number of updates since
|
||||||
// then only ask for specific data that we dont have
|
// then only ask for specific data that we dont have
|
||||||
let r = serialize(&req)?;
|
let blob = to_blob(req, remote_gossip_addr, blob_recycler)?;
|
||||||
trace!("sending gossip request to {}", remote_gossip_addr);
|
let mut q: VecDeque<SharedBlob> = VecDeque::new();
|
||||||
sock.send_to(&r, remote_gossip_addr)?;
|
q.push_back(blob);
|
||||||
|
blob_sender.send(q)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -393,64 +469,70 @@ impl Crdt {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// randomly pick a node and ask them for updates asynchronously
|
/// randomly pick a node and ask them for updates asynchronously
|
||||||
pub fn gossip(obj: Arc<RwLock<Self>>, exit: Arc<AtomicBool>) -> JoinHandle<()> {
|
pub fn gossip(
|
||||||
spawn(move || loop {
|
obj: Arc<RwLock<Self>>,
|
||||||
let _ = Self::run_gossip(&obj);
|
blob_recycler: BlobRecycler,
|
||||||
if exit.load(Ordering::Relaxed) {
|
blob_sender: BlobSender,
|
||||||
return;
|
exit: Arc<AtomicBool>,
|
||||||
}
|
) -> JoinHandle<()> {
|
||||||
//TODO this should be a tuned parameter
|
Builder::new()
|
||||||
sleep(
|
.name("solana-gossip".to_string())
|
||||||
obj.read()
|
.spawn(move || loop {
|
||||||
.expect("'obj' read lock in pub fn gossip")
|
let _ = Self::run_gossip(&obj, &blob_sender, &blob_recycler);
|
||||||
.timeout,
|
if exit.load(Ordering::Relaxed) {
|
||||||
);
|
return;
|
||||||
})
|
}
|
||||||
|
//TODO this should be a tuned parameter
|
||||||
|
sleep(
|
||||||
|
obj.read()
|
||||||
|
.expect("'obj' read lock in pub fn gossip")
|
||||||
|
.timeout,
|
||||||
|
);
|
||||||
|
})
|
||||||
|
.unwrap()
|
||||||
}
|
}
|
||||||
fn run_window_request(
|
fn run_window_request(
|
||||||
window: &Arc<RwLock<Vec<Option<SharedBlob>>>>,
|
window: &Arc<RwLock<Vec<Option<SharedBlob>>>>,
|
||||||
sock: &UdpSocket,
|
|
||||||
from: &ReplicatedData,
|
from: &ReplicatedData,
|
||||||
ix: u64,
|
ix: u64,
|
||||||
) -> Result<()> {
|
blob_recycler: &BlobRecycler,
|
||||||
|
) -> Option<SharedBlob> {
|
||||||
let pos = (ix as usize) % window.read().unwrap().len();
|
let pos = (ix as usize) % window.read().unwrap().len();
|
||||||
let mut outblob = vec![];
|
|
||||||
if let &Some(ref blob) = &window.read().unwrap()[pos] {
|
if let &Some(ref blob) = &window.read().unwrap()[pos] {
|
||||||
let rblob = blob.read().unwrap();
|
let rblob = blob.read().unwrap();
|
||||||
let blob_ix = rblob.get_index().expect("run_window_request get_index");
|
let blob_ix = rblob.get_index().expect("run_window_request get_index");
|
||||||
if blob_ix == ix {
|
if blob_ix == ix {
|
||||||
|
let out = blob_recycler.allocate();
|
||||||
// copy to avoid doing IO inside the lock
|
// copy to avoid doing IO inside the lock
|
||||||
outblob.extend(&rblob.data[..rblob.meta.size]);
|
{
|
||||||
|
let mut outblob = out.write().unwrap();
|
||||||
|
let sz = rblob.meta.size;
|
||||||
|
outblob.meta.size = sz;
|
||||||
|
outblob.data[..sz].copy_from_slice(&rblob.data[..sz]);
|
||||||
|
outblob.meta.set_addr(&from.replicate_addr);
|
||||||
|
//TODO, set the sender id to the requester so we dont retransmit
|
||||||
|
//come up with a cleaner solution for this when sender signatures are checked
|
||||||
|
outblob.set_id(from.id).expect("blob set_id");
|
||||||
|
}
|
||||||
|
return Some(out);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
assert!(window.read().unwrap()[pos].is_none());
|
assert!(window.read().unwrap()[pos].is_none());
|
||||||
info!("failed RequestWindowIndex {} {}", ix, from.replicate_addr);
|
info!("failed RequestWindowIndex {} {}", ix, from.replicate_addr);
|
||||||
}
|
}
|
||||||
if outblob.len() > 0 {
|
None
|
||||||
info!(
|
|
||||||
"responding RequestWindowIndex {} {}",
|
|
||||||
ix, from.replicate_addr
|
|
||||||
);
|
|
||||||
sock.send_to(&outblob, from.replicate_addr)?;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
/// Process messages from the network
|
|
||||||
fn run_listen(
|
//TODO we should first coalesce all the requests
|
||||||
|
fn handle_blob(
|
||||||
obj: &Arc<RwLock<Self>>,
|
obj: &Arc<RwLock<Self>>,
|
||||||
window: &Arc<RwLock<Vec<Option<SharedBlob>>>>,
|
window: &Arc<RwLock<Vec<Option<SharedBlob>>>>,
|
||||||
sock: &UdpSocket,
|
blob_recycler: &BlobRecycler,
|
||||||
) -> Result<()> {
|
blob: &Blob,
|
||||||
//TODO cache connections
|
) -> Option<SharedBlob> {
|
||||||
let mut buf = vec![0u8; 1024 * 64];
|
match deserialize(&blob.data[..blob.meta.size]) {
|
||||||
trace!("recv_from on {}", sock.local_addr().unwrap());
|
|
||||||
let (amt, src) = sock.recv_from(&mut buf)?;
|
|
||||||
trace!("got request from {}", src);
|
|
||||||
buf.resize(amt, 0);
|
|
||||||
let r = deserialize(&buf)?;
|
|
||||||
match r {
|
|
||||||
// TODO sigverify these
|
// TODO sigverify these
|
||||||
Protocol::RequestUpdates(v, reqdata) => {
|
Ok(Protocol::RequestUpdates(v, reqdata)) => {
|
||||||
trace!("RequestUpdates {}", v);
|
trace!("RequestUpdates {}", v);
|
||||||
let addr = reqdata.gossip_addr;
|
let addr = reqdata.gossip_addr;
|
||||||
// only lock for this call, dont lock during IO `sock.send_to` or `sock.recv_from`
|
// only lock for this call, dont lock during IO `sock.send_to` or `sock.recv_from`
|
||||||
@@ -458,23 +540,40 @@ impl Crdt {
|
|||||||
.expect("'obj' read lock in RequestUpdates")
|
.expect("'obj' read lock in RequestUpdates")
|
||||||
.get_updates_since(v);
|
.get_updates_since(v);
|
||||||
trace!("get updates since response {} {}", v, data.len());
|
trace!("get updates since response {} {}", v, data.len());
|
||||||
let rsp = serialize(&Protocol::ReceiveUpdates(from, ups, data))?;
|
let len = data.len();
|
||||||
trace!("send_to {}", addr);
|
let rsp = Protocol::ReceiveUpdates(from, ups, data);
|
||||||
//TODO verify reqdata belongs to sender
|
obj.write().unwrap().insert(&reqdata);
|
||||||
obj.write()
|
if len < 1 {
|
||||||
.expect("'obj' write lock in RequestUpdates")
|
let me = obj.read().unwrap();
|
||||||
.insert(&reqdata);
|
trace!(
|
||||||
sock.send_to(&rsp, addr)
|
"no updates me {:?} ix {} since {}",
|
||||||
.expect("'sock.send_to' in RequestUpdates");
|
&me.me[..4],
|
||||||
trace!("send_to done!");
|
me.update_index,
|
||||||
|
v
|
||||||
|
);
|
||||||
|
None
|
||||||
|
} else if let Ok(r) = to_blob(rsp, addr, &blob_recycler) {
|
||||||
|
trace!(
|
||||||
|
"sending updates me {:?} len {} to {:?} {}",
|
||||||
|
&obj.read().unwrap().me[..4],
|
||||||
|
len,
|
||||||
|
&reqdata.id[..4],
|
||||||
|
addr,
|
||||||
|
);
|
||||||
|
Some(r)
|
||||||
|
} else {
|
||||||
|
warn!("to_blob failed");
|
||||||
|
None
|
||||||
|
}
|
||||||
}
|
}
|
||||||
Protocol::ReceiveUpdates(from, ups, data) => {
|
Ok(Protocol::ReceiveUpdates(from, ups, data)) => {
|
||||||
trace!("ReceivedUpdates");
|
trace!("ReceivedUpdates {:?} {} {}", &from[0..4], ups, data.len());
|
||||||
obj.write()
|
obj.write()
|
||||||
.expect("'obj' write lock in ReceiveUpdates")
|
.expect("'obj' write lock in ReceiveUpdates")
|
||||||
.apply_updates(from, ups, &data);
|
.apply_updates(from, ups, &data);
|
||||||
|
None
|
||||||
}
|
}
|
||||||
Protocol::RequestWindowIndex(from, ix) => {
|
Ok(Protocol::RequestWindowIndex(from, ix)) => {
|
||||||
//TODO verify from is signed
|
//TODO verify from is signed
|
||||||
obj.write().unwrap().insert(&from);
|
obj.write().unwrap().insert(&from);
|
||||||
let me = obj.read().unwrap().my_data().clone();
|
let me = obj.read().unwrap().my_data().clone();
|
||||||
@@ -485,161 +584,130 @@ impl Crdt {
|
|||||||
me.replicate_addr
|
me.replicate_addr
|
||||||
);
|
);
|
||||||
assert_ne!(from.replicate_addr, me.replicate_addr);
|
assert_ne!(from.replicate_addr, me.replicate_addr);
|
||||||
let _ = Self::run_window_request(window, sock, &from, ix);
|
Self::run_window_request(&window, &from, ix, blob_recycler)
|
||||||
}
|
}
|
||||||
|
Err(_) => {
|
||||||
|
warn!("deserialize crdt packet failed");
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Process messages from the network
|
||||||
|
fn run_listen(
|
||||||
|
obj: &Arc<RwLock<Self>>,
|
||||||
|
window: &Arc<RwLock<Vec<Option<SharedBlob>>>>,
|
||||||
|
blob_recycler: &BlobRecycler,
|
||||||
|
requests_receiver: &BlobReceiver,
|
||||||
|
response_sender: &BlobSender,
|
||||||
|
) -> Result<()> {
|
||||||
|
//TODO cache connections
|
||||||
|
let timeout = Duration::new(1, 0);
|
||||||
|
let mut reqs = requests_receiver.recv_timeout(timeout)?;
|
||||||
|
while let Ok(mut more) = requests_receiver.try_recv() {
|
||||||
|
reqs.append(&mut more);
|
||||||
|
}
|
||||||
|
let resp: VecDeque<_> = reqs.iter()
|
||||||
|
.filter_map(|b| Self::handle_blob(obj, window, blob_recycler, &b.read().unwrap()))
|
||||||
|
.collect();
|
||||||
|
response_sender.send(resp)?;
|
||||||
|
while let Some(r) = reqs.pop_front() {
|
||||||
|
blob_recycler.recycle(r);
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
pub fn listen(
|
pub fn listen(
|
||||||
obj: Arc<RwLock<Self>>,
|
obj: Arc<RwLock<Self>>,
|
||||||
window: Arc<RwLock<Vec<Option<SharedBlob>>>>,
|
window: Arc<RwLock<Vec<Option<SharedBlob>>>>,
|
||||||
sock: UdpSocket,
|
blob_recycler: BlobRecycler,
|
||||||
|
requests_receiver: BlobReceiver,
|
||||||
|
response_sender: BlobSender,
|
||||||
exit: Arc<AtomicBool>,
|
exit: Arc<AtomicBool>,
|
||||||
) -> JoinHandle<()> {
|
) -> JoinHandle<()> {
|
||||||
sock.set_read_timeout(Some(Duration::new(2, 0)))
|
Builder::new()
|
||||||
.expect("'sock.set_read_timeout' in crdt.rs");
|
.name("solana-listen".to_string())
|
||||||
spawn(move || loop {
|
.spawn(move || loop {
|
||||||
let e = Self::run_listen(&obj, &window, &sock);
|
let e = Self::run_listen(
|
||||||
if e.is_err() {
|
&obj,
|
||||||
info!(
|
&window,
|
||||||
"run_listen timeout, table size: {}",
|
&blob_recycler,
|
||||||
obj.read().unwrap().table.len()
|
&requests_receiver,
|
||||||
|
&response_sender,
|
||||||
);
|
);
|
||||||
}
|
if e.is_err() {
|
||||||
if exit.load(Ordering::Relaxed) {
|
info!(
|
||||||
return;
|
"run_listen timeout, table size: {}",
|
||||||
}
|
obj.read().unwrap().table.len()
|
||||||
})
|
);
|
||||||
|
}
|
||||||
|
if exit.load(Ordering::Relaxed) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.unwrap()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct Sockets {
|
||||||
|
pub gossip: UdpSocket,
|
||||||
|
pub gossip_send: UdpSocket,
|
||||||
|
pub requests: UdpSocket,
|
||||||
|
pub replicate: UdpSocket,
|
||||||
|
pub transaction: UdpSocket,
|
||||||
|
pub respond: UdpSocket,
|
||||||
|
pub broadcast: UdpSocket,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct TestNode {
|
||||||
|
pub data: ReplicatedData,
|
||||||
|
pub sockets: Sockets,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TestNode {
|
||||||
|
pub fn new() -> TestNode {
|
||||||
|
let gossip = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||||
|
let gossip_send = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||||
|
let requests = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||||
|
let transaction = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||||
|
let replicate = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||||
|
let respond = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||||
|
let broadcast = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||||
|
let pubkey = KeyPair::new().pubkey();
|
||||||
|
let data = ReplicatedData::new(
|
||||||
|
pubkey,
|
||||||
|
gossip.local_addr().unwrap(),
|
||||||
|
replicate.local_addr().unwrap(),
|
||||||
|
requests.local_addr().unwrap(),
|
||||||
|
transaction.local_addr().unwrap(),
|
||||||
|
);
|
||||||
|
TestNode {
|
||||||
|
data: data,
|
||||||
|
sockets: Sockets {
|
||||||
|
gossip,
|
||||||
|
gossip_send,
|
||||||
|
requests,
|
||||||
|
replicate,
|
||||||
|
transaction,
|
||||||
|
respond,
|
||||||
|
broadcast,
|
||||||
|
},
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use crdt::{Crdt, ReplicatedData};
|
use crdt::{parse_port_or_addr, Crdt, ReplicatedData};
|
||||||
use logger;
|
use signature::{KeyPair, KeyPairUtil};
|
||||||
use packet::Blob;
|
|
||||||
use rayon::iter::*;
|
|
||||||
use signature::KeyPair;
|
|
||||||
use signature::KeyPairUtil;
|
|
||||||
use std::net::UdpSocket;
|
|
||||||
use std::sync::atomic::{AtomicBool, Ordering};
|
|
||||||
use std::sync::{Arc, RwLock};
|
|
||||||
use std::thread::{sleep, JoinHandle};
|
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
fn test_node() -> (Crdt, UdpSocket, UdpSocket, UdpSocket) {
|
|
||||||
let gossip = UdpSocket::bind("0.0.0.0:0").unwrap();
|
|
||||||
let replicate = UdpSocket::bind("0.0.0.0:0").unwrap();
|
|
||||||
let serve = UdpSocket::bind("0.0.0.0:0").unwrap();
|
|
||||||
let events = UdpSocket::bind("0.0.0.0:0").unwrap();
|
|
||||||
let pubkey = KeyPair::new().pubkey();
|
|
||||||
let d = ReplicatedData::new(
|
|
||||||
pubkey,
|
|
||||||
gossip.local_addr().unwrap(),
|
|
||||||
replicate.local_addr().unwrap(),
|
|
||||||
serve.local_addr().unwrap(),
|
|
||||||
events.local_addr().unwrap(),
|
|
||||||
);
|
|
||||||
let crdt = Crdt::new(d);
|
|
||||||
trace!(
|
|
||||||
"id: {} gossip: {} replicate: {} serve: {}",
|
|
||||||
crdt.my_data().id[0],
|
|
||||||
gossip.local_addr().unwrap(),
|
|
||||||
replicate.local_addr().unwrap(),
|
|
||||||
serve.local_addr().unwrap(),
|
|
||||||
);
|
|
||||||
(crdt, gossip, replicate, serve)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Test that the network converges.
|
|
||||||
/// Run until every node in the network has a full ReplicatedData set.
|
|
||||||
/// Check that nodes stop sending updates after all the ReplicatedData has been shared.
|
|
||||||
/// tests that actually use this function are below
|
|
||||||
fn run_gossip_topo<F>(topo: F)
|
|
||||||
where
|
|
||||||
F: Fn(&Vec<(Arc<RwLock<Crdt>>, JoinHandle<()>)>) -> (),
|
|
||||||
{
|
|
||||||
let num: usize = 5;
|
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
|
||||||
let listen: Vec<_> = (0..num)
|
|
||||||
.map(|_| {
|
|
||||||
let (crdt, gossip, _, _) = test_node();
|
|
||||||
let c = Arc::new(RwLock::new(crdt));
|
|
||||||
let w = Arc::new(RwLock::new(vec![]));
|
|
||||||
let l = Crdt::listen(c.clone(), w, gossip, exit.clone());
|
|
||||||
(c, l)
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
topo(&listen);
|
|
||||||
let gossip: Vec<_> = listen
|
|
||||||
.iter()
|
|
||||||
.map(|&(ref c, _)| Crdt::gossip(c.clone(), exit.clone()))
|
|
||||||
.collect();
|
|
||||||
let mut done = true;
|
|
||||||
for i in 0..(num * 32) {
|
|
||||||
done = false;
|
|
||||||
trace!("round {}", i);
|
|
||||||
for &(ref c, _) in listen.iter() {
|
|
||||||
if num == c.read().unwrap().convergence() as usize {
|
|
||||||
done = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
//at least 1 node converged
|
|
||||||
if done == true {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
sleep(Duration::new(1, 0));
|
|
||||||
}
|
|
||||||
exit.store(true, Ordering::Relaxed);
|
|
||||||
for j in gossip {
|
|
||||||
j.join().unwrap();
|
|
||||||
}
|
|
||||||
for (c, j) in listen.into_iter() {
|
|
||||||
j.join().unwrap();
|
|
||||||
// make it clear what failed
|
|
||||||
// protocol is to chatty, updates should stop after everyone receives `num`
|
|
||||||
assert!(c.read().unwrap().update_index <= num as u64);
|
|
||||||
// protocol is not chatty enough, everyone should get `num` entries
|
|
||||||
assert_eq!(c.read().unwrap().table.len(), num);
|
|
||||||
}
|
|
||||||
assert!(done);
|
|
||||||
}
|
|
||||||
/// ring a -> b -> c -> d -> e -> a
|
|
||||||
#[test]
|
#[test]
|
||||||
#[ignore]
|
fn test_parse_port_or_addr() {
|
||||||
fn gossip_ring_test() {
|
let p1 = parse_port_or_addr(Some("9000".to_string()));
|
||||||
logger::setup();
|
assert_eq!(p1.port(), 9000);
|
||||||
run_gossip_topo(|listen| {
|
let p2 = parse_port_or_addr(Some("127.0.0.1:7000".to_string()));
|
||||||
let num = listen.len();
|
assert_eq!(p2.port(), 7000);
|
||||||
for n in 0..num {
|
let p3 = parse_port_or_addr(None);
|
||||||
let y = n % listen.len();
|
assert_eq!(p3.port(), 8000);
|
||||||
let x = (n + 1) % listen.len();
|
|
||||||
let mut xv = listen[x].0.write().unwrap();
|
|
||||||
let yv = listen[y].0.read().unwrap();
|
|
||||||
let mut d = yv.table[&yv.me].clone();
|
|
||||||
d.version = 0;
|
|
||||||
xv.insert(&d);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/// star (b,c,d,e) -> a
|
|
||||||
#[test]
|
|
||||||
#[ignore]
|
|
||||||
fn gossip_star_test() {
|
|
||||||
run_gossip_topo(|listen| {
|
|
||||||
let num = listen.len();
|
|
||||||
for n in 0..(num - 1) {
|
|
||||||
let x = 0;
|
|
||||||
let y = (n + 1) % listen.len();
|
|
||||||
let mut xv = listen[x].0.write().unwrap();
|
|
||||||
let yv = listen[y].0.read().unwrap();
|
|
||||||
let mut d = yv.table[&yv.me].clone();
|
|
||||||
d.version = 0;
|
|
||||||
xv.insert(&d);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Test that insert drops messages that are older
|
/// Test that insert drops messages that are older
|
||||||
@@ -662,77 +730,59 @@ mod tests {
|
|||||||
crdt.insert(&d);
|
crdt.insert(&d);
|
||||||
assert_eq!(crdt.table[&d.id].version, 2);
|
assert_eq!(crdt.table[&d.id].version, 2);
|
||||||
}
|
}
|
||||||
|
fn sorted(ls: &Vec<ReplicatedData>) -> Vec<ReplicatedData> {
|
||||||
#[test]
|
let mut copy: Vec<_> = ls.iter().cloned().collect();
|
||||||
#[ignore]
|
copy.sort_by(|x, y| x.id.cmp(&y.id));
|
||||||
pub fn test_crdt_retransmit() {
|
copy
|
||||||
logger::setup();
|
|
||||||
trace!("c1:");
|
|
||||||
let (mut c1, s1, r1, e1) = test_node();
|
|
||||||
trace!("c2:");
|
|
||||||
let (mut c2, s2, r2, _) = test_node();
|
|
||||||
trace!("c3:");
|
|
||||||
let (mut c3, s3, r3, _) = test_node();
|
|
||||||
let c1_id = c1.my_data().id;
|
|
||||||
c1.set_leader(c1_id);
|
|
||||||
|
|
||||||
c2.insert(&c1.my_data());
|
|
||||||
c3.insert(&c1.my_data());
|
|
||||||
|
|
||||||
c2.set_leader(c1.my_data().id);
|
|
||||||
c3.set_leader(c1.my_data().id);
|
|
||||||
|
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
|
||||||
|
|
||||||
// Create listen threads
|
|
||||||
let win1 = Arc::new(RwLock::new(vec![]));
|
|
||||||
let a1 = Arc::new(RwLock::new(c1));
|
|
||||||
let t1 = Crdt::listen(a1.clone(), win1, s1, exit.clone());
|
|
||||||
|
|
||||||
let a2 = Arc::new(RwLock::new(c2));
|
|
||||||
let win2 = Arc::new(RwLock::new(vec![]));
|
|
||||||
let t2 = Crdt::listen(a2.clone(), win2, s2, exit.clone());
|
|
||||||
|
|
||||||
let a3 = Arc::new(RwLock::new(c3));
|
|
||||||
let win3 = Arc::new(RwLock::new(vec![]));
|
|
||||||
let t3 = Crdt::listen(a3.clone(), win3, s3, exit.clone());
|
|
||||||
|
|
||||||
// Create gossip threads
|
|
||||||
let t1_gossip = Crdt::gossip(a1.clone(), exit.clone());
|
|
||||||
let t2_gossip = Crdt::gossip(a2.clone(), exit.clone());
|
|
||||||
let t3_gossip = Crdt::gossip(a3.clone(), exit.clone());
|
|
||||||
|
|
||||||
//wait to converge
|
|
||||||
trace!("waitng to converge:");
|
|
||||||
let mut done = false;
|
|
||||||
for _ in 0..30 {
|
|
||||||
done = a1.read().unwrap().table.len() == 3 && a2.read().unwrap().table.len() == 3
|
|
||||||
&& a3.read().unwrap().table.len() == 3;
|
|
||||||
if done {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
sleep(Duration::new(1, 0));
|
|
||||||
}
|
|
||||||
assert!(done);
|
|
||||||
let mut b = Blob::default();
|
|
||||||
b.meta.size = 10;
|
|
||||||
Crdt::retransmit(&a1, &Arc::new(RwLock::new(b)), &e1).unwrap();
|
|
||||||
let res: Vec<_> = [r1, r2, r3]
|
|
||||||
.into_par_iter()
|
|
||||||
.map(|s| {
|
|
||||||
let mut b = Blob::default();
|
|
||||||
s.set_read_timeout(Some(Duration::new(1, 0))).unwrap();
|
|
||||||
let res = s.recv_from(&mut b.data);
|
|
||||||
res.is_err() //true if failed to receive the retransmit packet
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
//true if failed receive the retransmit packet, r2, and r3 should succeed
|
|
||||||
//r1 was the sender, so it should fail to receive the packet
|
|
||||||
assert_eq!(res, [true, false, false]);
|
|
||||||
exit.store(true, Ordering::Relaxed);
|
|
||||||
let threads = vec![t1, t2, t3, t1_gossip, t2_gossip, t3_gossip];
|
|
||||||
for t in threads.into_iter() {
|
|
||||||
t.join().unwrap();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
#[test]
|
||||||
|
fn update_test() {
|
||||||
|
let d1 = ReplicatedData::new(
|
||||||
|
KeyPair::new().pubkey(),
|
||||||
|
"127.0.0.1:1234".parse().unwrap(),
|
||||||
|
"127.0.0.1:1235".parse().unwrap(),
|
||||||
|
"127.0.0.1:1236".parse().unwrap(),
|
||||||
|
"127.0.0.1:1237".parse().unwrap(),
|
||||||
|
);
|
||||||
|
let d2 = ReplicatedData::new(
|
||||||
|
KeyPair::new().pubkey(),
|
||||||
|
"127.0.0.1:1234".parse().unwrap(),
|
||||||
|
"127.0.0.1:1235".parse().unwrap(),
|
||||||
|
"127.0.0.1:1236".parse().unwrap(),
|
||||||
|
"127.0.0.1:1237".parse().unwrap(),
|
||||||
|
);
|
||||||
|
let d3 = ReplicatedData::new(
|
||||||
|
KeyPair::new().pubkey(),
|
||||||
|
"127.0.0.1:1234".parse().unwrap(),
|
||||||
|
"127.0.0.1:1235".parse().unwrap(),
|
||||||
|
"127.0.0.1:1236".parse().unwrap(),
|
||||||
|
"127.0.0.1:1237".parse().unwrap(),
|
||||||
|
);
|
||||||
|
let mut crdt = Crdt::new(d1.clone());
|
||||||
|
let (key, ix, ups) = crdt.get_updates_since(0);
|
||||||
|
assert_eq!(key, d1.id);
|
||||||
|
assert_eq!(ix, 1);
|
||||||
|
assert_eq!(ups.len(), 1);
|
||||||
|
assert_eq!(sorted(&ups), sorted(&vec![d1.clone()]));
|
||||||
|
crdt.insert(&d2);
|
||||||
|
let (key, ix, ups) = crdt.get_updates_since(0);
|
||||||
|
assert_eq!(key, d1.id);
|
||||||
|
assert_eq!(ix, 2);
|
||||||
|
assert_eq!(ups.len(), 2);
|
||||||
|
assert_eq!(sorted(&ups), sorted(&vec![d1.clone(), d2.clone()]));
|
||||||
|
crdt.insert(&d3);
|
||||||
|
let (key, ix, ups) = crdt.get_updates_since(0);
|
||||||
|
assert_eq!(key, d1.id);
|
||||||
|
assert_eq!(ix, 3);
|
||||||
|
assert_eq!(ups.len(), 3);
|
||||||
|
assert_eq!(sorted(&ups), sorted(&vec![d2.clone(), d1, d3]));
|
||||||
|
let mut crdt2 = Crdt::new(d2.clone());
|
||||||
|
crdt2.apply_updates(key, ix, &ups);
|
||||||
|
assert_eq!(crdt2.table.values().len(), 3);
|
||||||
|
assert_eq!(
|
||||||
|
sorted(&crdt2.table.values().map(|x| x.clone()).collect()),
|
||||||
|
sorted(&crdt.table.values().map(|x| x.clone()).collect())
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
84
src/data_replicator.rs
Normal file
84
src/data_replicator.rs
Normal file
@@ -0,0 +1,84 @@
|
|||||||
|
use crdt;
|
||||||
|
use packet;
|
||||||
|
use result::Result;
|
||||||
|
use std::net::UdpSocket;
|
||||||
|
use std::sync::atomic::AtomicBool;
|
||||||
|
use std::sync::mpsc::channel;
|
||||||
|
use std::sync::{Arc, RwLock};
|
||||||
|
use std::thread::JoinHandle;
|
||||||
|
use streamer;
|
||||||
|
|
||||||
|
pub struct DataReplicator {
|
||||||
|
pub thread_hdls: Vec<JoinHandle<()>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DataReplicator {
|
||||||
|
pub fn new(
|
||||||
|
crdt: Arc<RwLock<crdt::Crdt>>,
|
||||||
|
window: Arc<RwLock<Vec<Option<packet::SharedBlob>>>>,
|
||||||
|
gossip_listen_socket: UdpSocket,
|
||||||
|
gossip_send_socket: UdpSocket,
|
||||||
|
exit: Arc<AtomicBool>,
|
||||||
|
) -> Result<DataReplicator> {
|
||||||
|
let blob_recycler = packet::BlobRecycler::default();
|
||||||
|
let (request_sender, request_receiver) = channel();
|
||||||
|
trace!(
|
||||||
|
"DataReplicator: id: {:?}, listening on: {:?}",
|
||||||
|
&crdt.read().unwrap().me[..4],
|
||||||
|
gossip_listen_socket.local_addr().unwrap()
|
||||||
|
);
|
||||||
|
let t_receiver = streamer::blob_receiver(
|
||||||
|
exit.clone(),
|
||||||
|
blob_recycler.clone(),
|
||||||
|
gossip_listen_socket,
|
||||||
|
request_sender,
|
||||||
|
)?;
|
||||||
|
let (response_sender, response_receiver) = channel();
|
||||||
|
let t_responder = streamer::responder(
|
||||||
|
gossip_send_socket,
|
||||||
|
exit.clone(),
|
||||||
|
blob_recycler.clone(),
|
||||||
|
response_receiver,
|
||||||
|
);
|
||||||
|
let t_listen = crdt::Crdt::listen(
|
||||||
|
crdt.clone(),
|
||||||
|
window,
|
||||||
|
blob_recycler.clone(),
|
||||||
|
request_receiver,
|
||||||
|
response_sender.clone(),
|
||||||
|
exit.clone(),
|
||||||
|
);
|
||||||
|
let t_gossip = crdt::Crdt::gossip(crdt.clone(), blob_recycler, response_sender, exit);
|
||||||
|
let thread_hdls = vec![t_receiver, t_responder, t_listen, t_gossip];
|
||||||
|
Ok(DataReplicator { thread_hdls })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use crdt::{Crdt, TestNode};
|
||||||
|
use data_replicator::DataReplicator;
|
||||||
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
|
use std::sync::{Arc, RwLock};
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
// test that stage will exit when flag is set
|
||||||
|
fn test_exit() {
|
||||||
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
|
let tn = TestNode::new();
|
||||||
|
let crdt = Crdt::new(tn.data.clone());
|
||||||
|
let c = Arc::new(RwLock::new(crdt));
|
||||||
|
let w = Arc::new(RwLock::new(vec![]));
|
||||||
|
let d = DataReplicator::new(
|
||||||
|
c.clone(),
|
||||||
|
w,
|
||||||
|
tn.sockets.gossip,
|
||||||
|
tn.sockets.gossip_send,
|
||||||
|
exit.clone(),
|
||||||
|
).unwrap();
|
||||||
|
exit.store(true, Ordering::Relaxed);
|
||||||
|
for t in d.thread_hdls {
|
||||||
|
t.join().expect("thread join");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
89
src/entry.rs
89
src/entry.rs
@@ -2,13 +2,13 @@
|
|||||||
//! unique ID that is the hash of the Entry before it, plus the hash of the
|
//! unique ID that is the hash of the Entry before it, plus the hash of the
|
||||||
//! transactions within it. Entries cannot be reordered, and its field `num_hashes`
|
//! transactions within it. Entries cannot be reordered, and its field `num_hashes`
|
||||||
//! represents an approximate amount of time since the last Entry was created.
|
//! represents an approximate amount of time since the last Entry was created.
|
||||||
use event::Event;
|
|
||||||
use hash::{extend_and_hash, hash, Hash};
|
use hash::{extend_and_hash, hash, Hash};
|
||||||
use rayon::prelude::*;
|
use rayon::prelude::*;
|
||||||
|
use transaction::Transaction;
|
||||||
|
|
||||||
/// Each Entry contains three pieces of data. The `num_hashes` field is the number
|
/// Each Entry contains three pieces of data. The `num_hashes` field is the number
|
||||||
/// of hashes performed since the previous entry. The `id` field is the result
|
/// of hashes performed since the previous entry. The `id` field is the result
|
||||||
/// of hashing `id` from the previous entry `num_hashes` times. The `events`
|
/// of hashing `id` from the previous entry `num_hashes` times. The `transactions`
|
||||||
/// field points to Events that took place shortly after `id` was generated.
|
/// field points to Events that took place shortly after `id` was generated.
|
||||||
///
|
///
|
||||||
/// If you divide `num_hashes` by the amount of time it takes to generate a new hash, you
|
/// If you divide `num_hashes` by the amount of time it takes to generate a new hash, you
|
||||||
@@ -21,69 +21,69 @@ use rayon::prelude::*;
|
|||||||
pub struct Entry {
|
pub struct Entry {
|
||||||
pub num_hashes: u64,
|
pub num_hashes: u64,
|
||||||
pub id: Hash,
|
pub id: Hash,
|
||||||
pub events: Vec<Event>,
|
pub transactions: Vec<Transaction>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Entry {
|
impl Entry {
|
||||||
/// Creates the next Entry `num_hashes` after `start_hash`.
|
/// Creates the next Entry `num_hashes` after `start_hash`.
|
||||||
pub fn new(start_hash: &Hash, cur_hashes: u64, events: Vec<Event>) -> Self {
|
pub fn new(start_hash: &Hash, cur_hashes: u64, transactions: Vec<Transaction>) -> Self {
|
||||||
let num_hashes = cur_hashes + if events.is_empty() { 0 } else { 1 };
|
let num_hashes = cur_hashes + if transactions.is_empty() { 0 } else { 1 };
|
||||||
let id = next_hash(start_hash, 0, &events);
|
let id = next_hash(start_hash, 0, &transactions);
|
||||||
Entry {
|
Entry {
|
||||||
num_hashes,
|
num_hashes,
|
||||||
id,
|
id,
|
||||||
events,
|
transactions,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates the next Tick Entry `num_hashes` after `start_hash`.
|
/// Creates the next Tick Entry `num_hashes` after `start_hash`.
|
||||||
pub fn new_mut(start_hash: &mut Hash, cur_hashes: &mut u64, events: Vec<Event>) -> Self {
|
pub fn new_mut(
|
||||||
let entry = Self::new(start_hash, *cur_hashes, events);
|
start_hash: &mut Hash,
|
||||||
|
cur_hashes: &mut u64,
|
||||||
|
transactions: Vec<Transaction>,
|
||||||
|
) -> Self {
|
||||||
|
let entry = Self::new(start_hash, *cur_hashes, transactions);
|
||||||
*start_hash = entry.id;
|
*start_hash = entry.id;
|
||||||
*cur_hashes = 0;
|
*cur_hashes = 0;
|
||||||
entry
|
entry
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a Entry from the number of hashes `num_hashes` since the previous event
|
/// Creates a Entry from the number of hashes `num_hashes` since the previous transaction
|
||||||
/// and that resulting `id`.
|
/// and that resulting `id`.
|
||||||
pub fn new_tick(num_hashes: u64, id: &Hash) -> Self {
|
pub fn new_tick(num_hashes: u64, id: &Hash) -> Self {
|
||||||
Entry {
|
Entry {
|
||||||
num_hashes,
|
num_hashes,
|
||||||
id: *id,
|
id: *id,
|
||||||
events: vec![],
|
transactions: vec![],
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Verifies self.id is the result of hashing a `start_hash` `self.num_hashes` times.
|
/// Verifies self.id is the result of hashing a `start_hash` `self.num_hashes` times.
|
||||||
/// If the event is not a Tick, then hash that as well.
|
/// If the transaction is not a Tick, then hash that as well.
|
||||||
pub fn verify(&self, start_hash: &Hash) -> bool {
|
pub fn verify(&self, start_hash: &Hash) -> bool {
|
||||||
self.events.par_iter().all(|event| event.verify())
|
self.transactions.par_iter().all(|tx| tx.verify_plan())
|
||||||
&& self.id == next_hash(start_hash, self.num_hashes, &self.events)
|
&& self.id == next_hash(start_hash, self.num_hashes, &self.transactions)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn add_event_data(hash_data: &mut Vec<u8>, event: &Event) {
|
fn add_transaction_data(hash_data: &mut Vec<u8>, tx: &Transaction) {
|
||||||
match *event {
|
hash_data.push(0u8);
|
||||||
Event::Transaction(ref tr) => {
|
hash_data.extend_from_slice(&tx.sig);
|
||||||
hash_data.push(0u8);
|
|
||||||
hash_data.extend_from_slice(&tr.sig);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates the hash `num_hashes` after `start_hash`. If the event contains
|
/// Creates the hash `num_hashes` after `start_hash`. If the transaction contains
|
||||||
/// a signature, the final hash will be a hash of both the previous ID and
|
/// a signature, the final hash will be a hash of both the previous ID and
|
||||||
/// the signature.
|
/// the signature.
|
||||||
pub fn next_hash(start_hash: &Hash, num_hashes: u64, events: &[Event]) -> Hash {
|
pub fn next_hash(start_hash: &Hash, num_hashes: u64, transactions: &[Transaction]) -> Hash {
|
||||||
let mut id = *start_hash;
|
let mut id = *start_hash;
|
||||||
for _ in 1..num_hashes {
|
for _ in 1..num_hashes {
|
||||||
id = hash(&id);
|
id = hash(&id);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Hash all the event data
|
// Hash all the transaction data
|
||||||
let mut hash_data = vec![];
|
let mut hash_data = vec![];
|
||||||
for event in events {
|
for tx in transactions {
|
||||||
add_event_data(&mut hash_data, event);
|
add_transaction_data(&mut hash_data, tx);
|
||||||
}
|
}
|
||||||
|
|
||||||
if !hash_data.is_empty() {
|
if !hash_data.is_empty() {
|
||||||
@@ -96,11 +96,11 @@ pub fn next_hash(start_hash: &Hash, num_hashes: u64, events: &[Event]) -> Hash {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Creates the next Tick or Event Entry `num_hashes` after `start_hash`.
|
/// Creates the next Tick or Event Entry `num_hashes` after `start_hash`.
|
||||||
pub fn next_entry(start_hash: &Hash, num_hashes: u64, events: Vec<Event>) -> Entry {
|
pub fn next_entry(start_hash: &Hash, num_hashes: u64, transactions: Vec<Transaction>) -> Entry {
|
||||||
Entry {
|
Entry {
|
||||||
num_hashes,
|
num_hashes,
|
||||||
id: next_hash(start_hash, num_hashes, &events),
|
id: next_hash(start_hash, num_hashes, &transactions),
|
||||||
events: events,
|
transactions,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -109,7 +109,6 @@ mod tests {
|
|||||||
use super::*;
|
use super::*;
|
||||||
use chrono::prelude::*;
|
use chrono::prelude::*;
|
||||||
use entry::Entry;
|
use entry::Entry;
|
||||||
use event::Event;
|
|
||||||
use hash::hash;
|
use hash::hash;
|
||||||
use signature::{KeyPair, KeyPairUtil};
|
use signature::{KeyPair, KeyPairUtil};
|
||||||
use transaction::Transaction;
|
use transaction::Transaction;
|
||||||
@@ -125,19 +124,19 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_event_reorder_attack() {
|
fn test_transaction_reorder_attack() {
|
||||||
let zero = Hash::default();
|
let zero = Hash::default();
|
||||||
|
|
||||||
// First, verify entries
|
// First, verify entries
|
||||||
let keypair = KeyPair::new();
|
let keypair = KeyPair::new();
|
||||||
let tr0 = Event::new_transaction(&keypair, keypair.pubkey(), 0, zero);
|
let tx0 = Transaction::new(&keypair, keypair.pubkey(), 0, zero);
|
||||||
let tr1 = Event::new_transaction(&keypair, keypair.pubkey(), 1, zero);
|
let tx1 = Transaction::new(&keypair, keypair.pubkey(), 1, zero);
|
||||||
let mut e0 = Entry::new(&zero, 0, vec![tr0.clone(), tr1.clone()]);
|
let mut e0 = Entry::new(&zero, 0, vec![tx0.clone(), tx1.clone()]);
|
||||||
assert!(e0.verify(&zero));
|
assert!(e0.verify(&zero));
|
||||||
|
|
||||||
// Next, swap two events and ensure verification fails.
|
// Next, swap two transactions and ensure verification fails.
|
||||||
e0.events[0] = tr1; // <-- attack
|
e0.transactions[0] = tx1; // <-- attack
|
||||||
e0.events[1] = tr0;
|
e0.transactions[1] = tx0;
|
||||||
assert!(!e0.verify(&zero));
|
assert!(!e0.verify(&zero));
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -147,18 +146,14 @@ mod tests {
|
|||||||
|
|
||||||
// First, verify entries
|
// First, verify entries
|
||||||
let keypair = KeyPair::new();
|
let keypair = KeyPair::new();
|
||||||
let tr0 = Event::Transaction(Transaction::new_timestamp(&keypair, Utc::now(), zero));
|
let tx0 = Transaction::new_timestamp(&keypair, Utc::now(), zero);
|
||||||
let tr1 = Event::Transaction(Transaction::new_signature(
|
let tx1 = Transaction::new_signature(&keypair, Default::default(), zero);
|
||||||
&keypair,
|
let mut e0 = Entry::new(&zero, 0, vec![tx0.clone(), tx1.clone()]);
|
||||||
Default::default(),
|
|
||||||
zero,
|
|
||||||
));
|
|
||||||
let mut e0 = Entry::new(&zero, 0, vec![tr0.clone(), tr1.clone()]);
|
|
||||||
assert!(e0.verify(&zero));
|
assert!(e0.verify(&zero));
|
||||||
|
|
||||||
// Next, swap two witness events and ensure verification fails.
|
// Next, swap two witness transactions and ensure verification fails.
|
||||||
e0.events[0] = tr1; // <-- attack
|
e0.transactions[0] = tx1; // <-- attack
|
||||||
e0.events[1] = tr0;
|
e0.transactions[1] = tx0;
|
||||||
assert!(!e0.verify(&zero));
|
assert!(!e0.verify(&zero));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -2,13 +2,13 @@
|
|||||||
|
|
||||||
use bank::Bank;
|
use bank::Bank;
|
||||||
use entry::Entry;
|
use entry::Entry;
|
||||||
use ledger;
|
use ledger::Block;
|
||||||
use packet;
|
use packet;
|
||||||
use result::Result;
|
use result::Result;
|
||||||
use serde_json;
|
use serde_json;
|
||||||
use std::collections::VecDeque;
|
use std::collections::VecDeque;
|
||||||
use std::io::Write;
|
|
||||||
use std::io::sink;
|
use std::io::sink;
|
||||||
|
use std::io::Write;
|
||||||
use std::sync::mpsc::Receiver;
|
use std::sync::mpsc::Receiver;
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
@@ -63,7 +63,7 @@ impl<'a> EntryWriter<'a> {
|
|||||||
let mut q = VecDeque::new();
|
let mut q = VecDeque::new();
|
||||||
let list = self.write_entries(writer, entry_receiver)?;
|
let list = self.write_entries(writer, entry_receiver)?;
|
||||||
trace!("New blobs? {}", list.len());
|
trace!("New blobs? {}", list.len());
|
||||||
ledger::process_entry_list_into_blobs(&list, blob_recycler, &mut q);
|
list.to_blobs(blob_recycler, &mut q);
|
||||||
if !q.is_empty() {
|
if !q.is_empty() {
|
||||||
trace!("broadcasting {}", q.len());
|
trace!("broadcasting {}", q.len());
|
||||||
broadcast.send(q)?;
|
broadcast.send(q)?;
|
||||||
|
31
src/event.rs
31
src/event.rs
@@ -1,31 +0,0 @@
|
|||||||
//! The `event` module handles events, which may be a `Transaction`, or a `Witness` used to process a pending
|
|
||||||
//! Transaction.
|
|
||||||
|
|
||||||
use hash::Hash;
|
|
||||||
use signature::{KeyPair, PublicKey};
|
|
||||||
use transaction::Transaction;
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
|
||||||
pub enum Event {
|
|
||||||
Transaction(Transaction),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Event {
|
|
||||||
pub fn new_transaction(
|
|
||||||
from_keypair: &KeyPair,
|
|
||||||
to: PublicKey,
|
|
||||||
tokens: i64,
|
|
||||||
last_id: Hash,
|
|
||||||
) -> Self {
|
|
||||||
let tr = Transaction::new(from_keypair, to, tokens, last_id);
|
|
||||||
Event::Transaction(tr)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Verify the Event's signature's are valid and if a transaction, that its
|
|
||||||
/// spending plan is valid.
|
|
||||||
pub fn verify(&self) -> bool {
|
|
||||||
match *self {
|
|
||||||
Event::Transaction(ref tr) => tr.verify_plan(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
31
src/fetch_stage.rs
Normal file
31
src/fetch_stage.rs
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
//! The `fetch_stage` batches input from a UDP socket and sends it to a channel.
|
||||||
|
|
||||||
|
use packet;
|
||||||
|
use std::net::UdpSocket;
|
||||||
|
use std::sync::atomic::AtomicBool;
|
||||||
|
use std::sync::mpsc::channel;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::thread::JoinHandle;
|
||||||
|
use streamer;
|
||||||
|
|
||||||
|
pub struct FetchStage {
|
||||||
|
pub packet_receiver: streamer::PacketReceiver,
|
||||||
|
pub thread_hdl: JoinHandle<()>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FetchStage {
|
||||||
|
pub fn new(
|
||||||
|
socket: UdpSocket,
|
||||||
|
exit: Arc<AtomicBool>,
|
||||||
|
packet_recycler: packet::PacketRecycler,
|
||||||
|
) -> Self {
|
||||||
|
let (packet_sender, packet_receiver) = channel();
|
||||||
|
let thread_hdl =
|
||||||
|
streamer::receiver(socket, exit.clone(), packet_recycler.clone(), packet_sender);
|
||||||
|
|
||||||
|
FetchStage {
|
||||||
|
packet_receiver,
|
||||||
|
thread_hdl,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@@ -1,7 +1,7 @@
|
|||||||
//! The `hash` module provides functions for creating SHA-256 hashes.
|
//! The `hash` module provides functions for creating SHA-256 hashes.
|
||||||
|
|
||||||
use generic_array::GenericArray;
|
|
||||||
use generic_array::typenum::U32;
|
use generic_array::typenum::U32;
|
||||||
|
use generic_array::GenericArray;
|
||||||
use sha2::{Digest, Sha256};
|
use sha2::{Digest, Sha256};
|
||||||
|
|
||||||
pub type Hash = GenericArray<u8, U32>;
|
pub type Hash = GenericArray<u8, U32>;
|
||||||
|
166
src/ledger.rs
166
src/ledger.rs
@@ -3,7 +3,6 @@
|
|||||||
|
|
||||||
use bincode::{deserialize, serialize_into};
|
use bincode::{deserialize, serialize_into};
|
||||||
use entry::{next_entry, Entry};
|
use entry::{next_entry, Entry};
|
||||||
use event::Event;
|
|
||||||
use hash::Hash;
|
use hash::Hash;
|
||||||
use packet;
|
use packet;
|
||||||
use packet::{SharedBlob, BLOB_DATA_SIZE, BLOB_SIZE};
|
use packet::{SharedBlob, BLOB_DATA_SIZE, BLOB_SIZE};
|
||||||
@@ -12,10 +11,12 @@ use std::cmp::min;
|
|||||||
use std::collections::VecDeque;
|
use std::collections::VecDeque;
|
||||||
use std::io::Cursor;
|
use std::io::Cursor;
|
||||||
use std::mem::size_of;
|
use std::mem::size_of;
|
||||||
|
use transaction::Transaction;
|
||||||
|
|
||||||
pub trait Block {
|
pub trait Block {
|
||||||
/// Verifies the hashes and counts of a slice of events are all consistent.
|
/// Verifies the hashes and counts of a slice of transactions are all consistent.
|
||||||
fn verify(&self, start_hash: &Hash) -> bool;
|
fn verify(&self, start_hash: &Hash) -> bool;
|
||||||
|
fn to_blobs(&self, blob_recycler: &packet::BlobRecycler, q: &mut VecDeque<SharedBlob>);
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Block for [Entry] {
|
impl Block for [Entry] {
|
||||||
@@ -24,81 +25,85 @@ impl Block for [Entry] {
|
|||||||
let entry_pairs = genesis.par_iter().chain(self).zip(self);
|
let entry_pairs = genesis.par_iter().chain(self).zip(self);
|
||||||
entry_pairs.all(|(x0, x1)| x1.verify(&x0.id))
|
entry_pairs.all(|(x0, x1)| x1.verify(&x0.id))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn to_blobs(&self, blob_recycler: &packet::BlobRecycler, q: &mut VecDeque<SharedBlob>) {
|
||||||
|
let mut start = 0;
|
||||||
|
let mut end = 0;
|
||||||
|
while start < self.len() {
|
||||||
|
let mut entries: Vec<Vec<Entry>> = Vec::new();
|
||||||
|
let mut total = 0;
|
||||||
|
for i in &self[start..] {
|
||||||
|
total += size_of::<Transaction>() * i.transactions.len();
|
||||||
|
total += size_of::<Entry>();
|
||||||
|
if total >= BLOB_DATA_SIZE {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
end += 1;
|
||||||
|
}
|
||||||
|
// See if we need to split the transactions
|
||||||
|
if end <= start {
|
||||||
|
let mut transaction_start = 0;
|
||||||
|
let num_transactions_per_blob = BLOB_DATA_SIZE / size_of::<Transaction>();
|
||||||
|
let total_entry_chunks = (self[end].transactions.len() + num_transactions_per_blob
|
||||||
|
- 1) / num_transactions_per_blob;
|
||||||
|
trace!(
|
||||||
|
"splitting transactions end: {} total_chunks: {}",
|
||||||
|
end,
|
||||||
|
total_entry_chunks
|
||||||
|
);
|
||||||
|
for _ in 0..total_entry_chunks {
|
||||||
|
let transaction_end = min(
|
||||||
|
transaction_start + num_transactions_per_blob,
|
||||||
|
self[end].transactions.len(),
|
||||||
|
);
|
||||||
|
let mut entry = Entry {
|
||||||
|
num_hashes: self[end].num_hashes,
|
||||||
|
id: self[end].id,
|
||||||
|
transactions: self[end].transactions[transaction_start..transaction_end]
|
||||||
|
.to_vec(),
|
||||||
|
};
|
||||||
|
entries.push(vec![entry]);
|
||||||
|
transaction_start = transaction_end;
|
||||||
|
}
|
||||||
|
end += 1;
|
||||||
|
} else {
|
||||||
|
entries.push(self[start..end].to_vec());
|
||||||
|
}
|
||||||
|
|
||||||
|
for entry in entries {
|
||||||
|
let b = blob_recycler.allocate();
|
||||||
|
let pos = {
|
||||||
|
let mut bd = b.write().unwrap();
|
||||||
|
let mut out = Cursor::new(bd.data_mut());
|
||||||
|
serialize_into(&mut out, &entry).expect("failed to serialize output");
|
||||||
|
out.position() as usize
|
||||||
|
};
|
||||||
|
assert!(pos < BLOB_SIZE);
|
||||||
|
b.write().unwrap().set_size(pos);
|
||||||
|
q.push_back(b);
|
||||||
|
}
|
||||||
|
start = end;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create a vector of Entries of length `event_set.len()` from `start_hash` hash, `num_hashes`, and `event_set`.
|
/// Create a vector of Entries of length `transaction_batches.len()` from `start_hash` hash, `num_hashes`, and `transaction_batches`.
|
||||||
pub fn next_entries(start_hash: &Hash, num_hashes: u64, event_set: Vec<Vec<Event>>) -> Vec<Entry> {
|
pub fn next_entries(
|
||||||
|
start_hash: &Hash,
|
||||||
|
num_hashes: u64,
|
||||||
|
transaction_batches: Vec<Vec<Transaction>>,
|
||||||
|
) -> Vec<Entry> {
|
||||||
let mut id = *start_hash;
|
let mut id = *start_hash;
|
||||||
let mut entries = vec![];
|
let mut entries = vec![];
|
||||||
for event_list in &event_set {
|
for transactions in &transaction_batches {
|
||||||
let events = event_list.clone();
|
let transactions = transactions.clone();
|
||||||
let entry = next_entry(&id, num_hashes, events);
|
let entry = next_entry(&id, num_hashes, transactions);
|
||||||
id = entry.id;
|
id = entry.id;
|
||||||
entries.push(entry);
|
entries.push(entry);
|
||||||
}
|
}
|
||||||
entries
|
entries
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn process_entry_list_into_blobs(
|
|
||||||
list: &Vec<Entry>,
|
|
||||||
blob_recycler: &packet::BlobRecycler,
|
|
||||||
q: &mut VecDeque<SharedBlob>,
|
|
||||||
) {
|
|
||||||
let mut start = 0;
|
|
||||||
let mut end = 0;
|
|
||||||
while start < list.len() {
|
|
||||||
let mut entries: Vec<Vec<Entry>> = Vec::new();
|
|
||||||
let mut total = 0;
|
|
||||||
for i in &list[start..] {
|
|
||||||
total += size_of::<Event>() * i.events.len();
|
|
||||||
total += size_of::<Entry>();
|
|
||||||
if total >= BLOB_DATA_SIZE {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
end += 1;
|
|
||||||
}
|
|
||||||
// See if we need to split the events
|
|
||||||
if end <= start {
|
|
||||||
let mut event_start = 0;
|
|
||||||
let num_events_per_blob = BLOB_DATA_SIZE / size_of::<Event>();
|
|
||||||
let total_entry_chunks =
|
|
||||||
(list[end].events.len() + num_events_per_blob - 1) / num_events_per_blob;
|
|
||||||
trace!(
|
|
||||||
"splitting events end: {} total_chunks: {}",
|
|
||||||
end,
|
|
||||||
total_entry_chunks
|
|
||||||
);
|
|
||||||
for _ in 0..total_entry_chunks {
|
|
||||||
let event_end = min(event_start + num_events_per_blob, list[end].events.len());
|
|
||||||
let mut entry = Entry {
|
|
||||||
num_hashes: list[end].num_hashes,
|
|
||||||
id: list[end].id,
|
|
||||||
events: list[end].events[event_start..event_end].to_vec(),
|
|
||||||
};
|
|
||||||
entries.push(vec![entry]);
|
|
||||||
event_start = event_end;
|
|
||||||
}
|
|
||||||
end += 1;
|
|
||||||
} else {
|
|
||||||
entries.push(list[start..end].to_vec());
|
|
||||||
}
|
|
||||||
|
|
||||||
for entry in entries {
|
|
||||||
let b = blob_recycler.allocate();
|
|
||||||
let pos = {
|
|
||||||
let mut bd = b.write().unwrap();
|
|
||||||
let mut out = Cursor::new(bd.data_mut());
|
|
||||||
serialize_into(&mut out, &entry).expect("failed to serialize output");
|
|
||||||
out.position() as usize
|
|
||||||
};
|
|
||||||
assert!(pos < BLOB_SIZE);
|
|
||||||
b.write().unwrap().set_size(pos);
|
|
||||||
q.push_back(b);
|
|
||||||
}
|
|
||||||
start = end;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn reconstruct_entries_from_blobs(blobs: &VecDeque<SharedBlob>) -> Vec<Entry> {
|
pub fn reconstruct_entries_from_blobs(blobs: &VecDeque<SharedBlob>) -> Vec<Entry> {
|
||||||
let mut entries_to_apply: Vec<Entry> = Vec::new();
|
let mut entries_to_apply: Vec<Entry> = Vec::new();
|
||||||
let mut last_id = Hash::default();
|
let mut last_id = Hash::default();
|
||||||
@@ -108,7 +113,7 @@ pub fn reconstruct_entries_from_blobs(blobs: &VecDeque<SharedBlob>) -> Vec<Entry
|
|||||||
for entry in entries {
|
for entry in entries {
|
||||||
if entry.id == last_id {
|
if entry.id == last_id {
|
||||||
if let Some(last_entry) = entries_to_apply.last_mut() {
|
if let Some(last_entry) = entries_to_apply.last_mut() {
|
||||||
last_entry.events.extend(entry.events);
|
last_entry.transactions.extend(entry.transactions);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
last_id = entry.id;
|
last_id = entry.id;
|
||||||
@@ -147,17 +152,16 @@ mod tests {
|
|||||||
let zero = Hash::default();
|
let zero = Hash::default();
|
||||||
let one = hash(&zero);
|
let one = hash(&zero);
|
||||||
let keypair = KeyPair::new();
|
let keypair = KeyPair::new();
|
||||||
let tr0 = Event::Transaction(Transaction::new(&keypair, keypair.pubkey(), 1, one));
|
let tx0 = Transaction::new(&keypair, keypair.pubkey(), 1, one);
|
||||||
let events = vec![tr0.clone(); 10000];
|
let transactions = vec![tx0.clone(); 10000];
|
||||||
let e0 = Entry::new(&zero, 0, events);
|
let e0 = Entry::new(&zero, 0, transactions);
|
||||||
|
|
||||||
let entry_list = vec![e0.clone(); 1];
|
let entries = vec![e0.clone(); 1];
|
||||||
let blob_recycler = BlobRecycler::default();
|
let blob_recycler = BlobRecycler::default();
|
||||||
let mut blob_q = VecDeque::new();
|
let mut blob_q = VecDeque::new();
|
||||||
process_entry_list_into_blobs(&entry_list, &blob_recycler, &mut blob_q);
|
entries.to_blobs(&blob_recycler, &mut blob_q);
|
||||||
let entries = reconstruct_entries_from_blobs(&blob_q);
|
|
||||||
|
|
||||||
assert_eq!(entry_list, entries);
|
assert_eq!(reconstruct_entries_from_blobs(&blob_q), entries);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -165,16 +169,16 @@ mod tests {
|
|||||||
let mut id = Hash::default();
|
let mut id = Hash::default();
|
||||||
let next_id = hash(&id);
|
let next_id = hash(&id);
|
||||||
let keypair = KeyPair::new();
|
let keypair = KeyPair::new();
|
||||||
let tr0 = Event::Transaction(Transaction::new(&keypair, keypair.pubkey(), 1, next_id));
|
let tx0 = Transaction::new(&keypair, keypair.pubkey(), 1, next_id);
|
||||||
let events = vec![tr0.clone(); 5];
|
let transactions = vec![tx0.clone(); 5];
|
||||||
let event_set = vec![events.clone(); 5];
|
let transaction_batches = vec![transactions.clone(); 5];
|
||||||
let entries0 = next_entries(&id, 0, event_set);
|
let entries0 = next_entries(&id, 0, transaction_batches);
|
||||||
|
|
||||||
assert_eq!(entries0.len(), 5);
|
assert_eq!(entries0.len(), 5);
|
||||||
|
|
||||||
let mut entries1 = vec![];
|
let mut entries1 = vec![];
|
||||||
for _ in 0..5 {
|
for _ in 0..5 {
|
||||||
let entry = next_entry(&id, 0, events.clone());
|
let entry = next_entry(&id, 0, transactions.clone());
|
||||||
id = entry.id;
|
id = entry.id;
|
||||||
entries1.push(entry);
|
entries1.push(entry);
|
||||||
}
|
}
|
||||||
@@ -189,7 +193,7 @@ mod bench {
|
|||||||
use ledger::*;
|
use ledger::*;
|
||||||
|
|
||||||
#[bench]
|
#[bench]
|
||||||
fn event_bench(bencher: &mut Bencher) {
|
fn bench_next_entries(bencher: &mut Bencher) {
|
||||||
let start_hash = Hash::default();
|
let start_hash = Hash::default();
|
||||||
let entries = next_entries(&start_hash, 10_000, vec![vec![]; 8]);
|
let entries = next_entries(&start_hash, 10_000, vec![vec![]; 8]);
|
||||||
bencher.iter(|| {
|
bencher.iter(|| {
|
||||||
|
13
src/lib.rs
13
src/lib.rs
@@ -1,19 +1,20 @@
|
|||||||
#![cfg_attr(feature = "unstable", feature(test))]
|
#![cfg_attr(feature = "unstable", feature(test))]
|
||||||
pub mod bank;
|
pub mod bank;
|
||||||
pub mod banking_stage;
|
pub mod banking_stage;
|
||||||
|
pub mod budget;
|
||||||
pub mod crdt;
|
pub mod crdt;
|
||||||
pub mod ecdsa;
|
pub mod data_replicator;
|
||||||
pub mod entry;
|
pub mod entry;
|
||||||
pub mod entry_writer;
|
pub mod entry_writer;
|
||||||
#[cfg(feature = "erasure")]
|
#[cfg(feature = "erasure")]
|
||||||
pub mod erasure;
|
pub mod erasure;
|
||||||
pub mod event;
|
pub mod fetch_stage;
|
||||||
pub mod hash;
|
pub mod hash;
|
||||||
pub mod ledger;
|
pub mod ledger;
|
||||||
pub mod logger;
|
pub mod logger;
|
||||||
pub mod mint;
|
pub mod mint;
|
||||||
pub mod packet;
|
pub mod packet;
|
||||||
pub mod plan;
|
pub mod payment_plan;
|
||||||
pub mod record_stage;
|
pub mod record_stage;
|
||||||
pub mod recorder;
|
pub mod recorder;
|
||||||
pub mod replicate_stage;
|
pub mod replicate_stage;
|
||||||
@@ -23,8 +24,9 @@ pub mod request_stage;
|
|||||||
pub mod result;
|
pub mod result;
|
||||||
pub mod rpu;
|
pub mod rpu;
|
||||||
pub mod server;
|
pub mod server;
|
||||||
pub mod sig_verify_stage;
|
|
||||||
pub mod signature;
|
pub mod signature;
|
||||||
|
pub mod sigverify;
|
||||||
|
pub mod sigverify_stage;
|
||||||
pub mod streamer;
|
pub mod streamer;
|
||||||
pub mod thin_client;
|
pub mod thin_client;
|
||||||
pub mod timing;
|
pub mod timing;
|
||||||
@@ -44,12 +46,11 @@ extern crate ring;
|
|||||||
extern crate serde;
|
extern crate serde;
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate serde_derive;
|
extern crate serde_derive;
|
||||||
|
extern crate pnet;
|
||||||
extern crate serde_json;
|
extern crate serde_json;
|
||||||
extern crate sha2;
|
extern crate sha2;
|
||||||
extern crate untrusted;
|
extern crate untrusted;
|
||||||
|
|
||||||
extern crate futures;
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate matches;
|
extern crate matches;
|
||||||
|
27
src/mint.rs
27
src/mint.rs
@@ -1,7 +1,6 @@
|
|||||||
//! The `mint` module is a library for generating the chain's genesis block.
|
//! The `mint` module is a library for generating the chain's genesis block.
|
||||||
|
|
||||||
use entry::Entry;
|
use entry::Entry;
|
||||||
use event::Event;
|
|
||||||
use hash::{hash, Hash};
|
use hash::{hash, Hash};
|
||||||
use ring::rand::SystemRandom;
|
use ring::rand::SystemRandom;
|
||||||
use signature::{KeyPair, KeyPairUtil, PublicKey};
|
use signature::{KeyPair, KeyPairUtil, PublicKey};
|
||||||
@@ -47,15 +46,15 @@ impl Mint {
|
|||||||
self.pubkey
|
self.pubkey
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn create_events(&self) -> Vec<Event> {
|
pub fn create_transactions(&self) -> Vec<Transaction> {
|
||||||
let keypair = self.keypair();
|
let keypair = self.keypair();
|
||||||
let tr = Transaction::new(&keypair, self.pubkey(), self.tokens, self.seed());
|
let tx = Transaction::new(&keypair, self.pubkey(), self.tokens, self.seed());
|
||||||
vec![Event::Transaction(tr)]
|
vec![tx]
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn create_entries(&self) -> Vec<Entry> {
|
pub fn create_entries(&self) -> Vec<Entry> {
|
||||||
let e0 = Entry::new(&self.seed(), 0, vec![]);
|
let e0 = Entry::new(&self.seed(), 0, vec![]);
|
||||||
let e1 = Entry::new(&e0.id, 0, self.create_events());
|
let e1 = Entry::new(&e0.id, 0, self.create_transactions());
|
||||||
vec![e0, e1]
|
vec![e0, e1]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -69,20 +68,20 @@ pub struct MintDemo {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
use budget::Budget;
|
||||||
use ledger::Block;
|
use ledger::Block;
|
||||||
use plan::Plan;
|
use transaction::{Instruction, Plan};
|
||||||
use transaction::Instruction;
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_create_events() {
|
fn test_create_transactions() {
|
||||||
let mut events = Mint::new(100).create_events().into_iter();
|
let mut transactions = Mint::new(100).create_transactions().into_iter();
|
||||||
let Event::Transaction(tr) = events.next().unwrap();
|
let tx = transactions.next().unwrap();
|
||||||
if let Instruction::NewContract(contract) = tr.instruction {
|
if let Instruction::NewContract(contract) = tx.instruction {
|
||||||
if let Plan::Pay(payment) = contract.plan {
|
if let Plan::Budget(Budget::Pay(payment)) = contract.plan {
|
||||||
assert_eq!(tr.from, payment.to);
|
assert_eq!(tx.from, payment.to);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
assert_eq!(events.next(), None);
|
assert_eq!(transactions.next(), None);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
@@ -173,17 +173,17 @@ impl Packets {
|
|||||||
let mut i = 0;
|
let mut i = 0;
|
||||||
//DOCUMENTED SIDE-EFFECT
|
//DOCUMENTED SIDE-EFFECT
|
||||||
//Performance out of the IO without poll
|
//Performance out of the IO without poll
|
||||||
// * block on the socket until its readable
|
// * block on the socket until it's readable
|
||||||
// * set the socket to non blocking
|
// * set the socket to non blocking
|
||||||
// * read until it fails
|
// * read until it fails
|
||||||
// * set it back to blocking before returning
|
// * set it back to blocking before returning
|
||||||
socket.set_nonblocking(false)?;
|
socket.set_nonblocking(false)?;
|
||||||
for p in &mut self.packets {
|
for p in &mut self.packets {
|
||||||
p.meta.size = 0;
|
p.meta.size = 0;
|
||||||
trace!("receiving");
|
trace!("receiving on {}", socket.local_addr().unwrap());
|
||||||
match socket.recv_from(&mut p.data) {
|
match socket.recv_from(&mut p.data) {
|
||||||
Err(_) if i > 0 => {
|
Err(_) if i > 0 => {
|
||||||
debug!("got {:?} messages", i);
|
debug!("got {:?} messages on {}", i, socket.local_addr().unwrap());
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
@@ -236,6 +236,39 @@ pub fn to_packets<T: Serialize>(r: &PacketRecycler, xs: Vec<T>) -> Vec<SharedPac
|
|||||||
return out;
|
return out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn to_blob<T: Serialize>(
|
||||||
|
resp: T,
|
||||||
|
rsp_addr: SocketAddr,
|
||||||
|
blob_recycler: &BlobRecycler,
|
||||||
|
) -> Result<SharedBlob> {
|
||||||
|
let blob = blob_recycler.allocate();
|
||||||
|
{
|
||||||
|
let mut b = blob.write().unwrap();
|
||||||
|
let v = serialize(&resp)?;
|
||||||
|
let len = v.len();
|
||||||
|
// TODO: we are not using .data_mut() method here because
|
||||||
|
// the raw bytes are being serialized and sent, this isn't the
|
||||||
|
// right interface, and we should create a separate path for
|
||||||
|
// sending request responses in the RPU
|
||||||
|
assert!(len < BLOB_SIZE);
|
||||||
|
b.data[..len].copy_from_slice(&v);
|
||||||
|
b.meta.size = len;
|
||||||
|
b.meta.set_addr(&rsp_addr);
|
||||||
|
}
|
||||||
|
Ok(blob)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn to_blobs<T: Serialize>(
|
||||||
|
rsps: Vec<(T, SocketAddr)>,
|
||||||
|
blob_recycler: &BlobRecycler,
|
||||||
|
) -> Result<VecDeque<SharedBlob>> {
|
||||||
|
let mut blobs = VecDeque::new();
|
||||||
|
for (resp, rsp_addr) in rsps {
|
||||||
|
blobs.push_back(to_blob(resp, rsp_addr, blob_recycler)?);
|
||||||
|
}
|
||||||
|
Ok(blobs)
|
||||||
|
}
|
||||||
|
|
||||||
const BLOB_INDEX_END: usize = size_of::<u64>();
|
const BLOB_INDEX_END: usize = size_of::<u64>();
|
||||||
const BLOB_ID_END: usize = BLOB_INDEX_END + size_of::<usize>() + size_of::<PublicKey>();
|
const BLOB_ID_END: usize = BLOB_INDEX_END + size_of::<usize>() + size_of::<PublicKey>();
|
||||||
|
|
||||||
@@ -251,7 +284,8 @@ impl Blob {
|
|||||||
self.data[..BLOB_INDEX_END].clone_from_slice(&wtr);
|
self.data[..BLOB_INDEX_END].clone_from_slice(&wtr);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
/// sender id, we use this for identifying if its a blob from the leader that we should
|
||||||
|
/// retransmit. eventually blobs should have a signature that we can use ffor spam filtering
|
||||||
pub fn get_id(&self) -> Result<PublicKey> {
|
pub fn get_id(&self) -> Result<PublicKey> {
|
||||||
let e = deserialize(&self.data[BLOB_INDEX_END..BLOB_ID_END])?;
|
let e = deserialize(&self.data[BLOB_INDEX_END..BLOB_ID_END])?;
|
||||||
Ok(e)
|
Ok(e)
|
||||||
@@ -276,7 +310,7 @@ impl Blob {
|
|||||||
let mut v = VecDeque::new();
|
let mut v = VecDeque::new();
|
||||||
//DOCUMENTED SIDE-EFFECT
|
//DOCUMENTED SIDE-EFFECT
|
||||||
//Performance out of the IO without poll
|
//Performance out of the IO without poll
|
||||||
// * block on the socket until its readable
|
// * block on the socket until it's readable
|
||||||
// * set the socket to non blocking
|
// * set the socket to non blocking
|
||||||
// * read until it fails
|
// * read until it fails
|
||||||
// * set it back to blocking before returning
|
// * set it back to blocking before returning
|
||||||
@@ -285,9 +319,10 @@ impl Blob {
|
|||||||
let r = re.allocate();
|
let r = re.allocate();
|
||||||
{
|
{
|
||||||
let mut p = r.write().expect("'r' write lock in pub fn recv_from");
|
let mut p = r.write().expect("'r' write lock in pub fn recv_from");
|
||||||
|
trace!("receiving on {}", socket.local_addr().unwrap());
|
||||||
match socket.recv_from(&mut p.data) {
|
match socket.recv_from(&mut p.data) {
|
||||||
Err(_) if i > 0 => {
|
Err(_) if i > 0 => {
|
||||||
trace!("got {:?} messages", i);
|
trace!("got {:?} messages on {}", i, socket.local_addr().unwrap());
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
@@ -378,17 +413,17 @@ mod test {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_to_packets() {
|
fn test_to_packets() {
|
||||||
let tr = Request::GetTransactionCount;
|
let tx = Request::GetTransactionCount;
|
||||||
let re = PacketRecycler::default();
|
let re = PacketRecycler::default();
|
||||||
let rv = to_packets(&re, vec![tr.clone(); 1]);
|
let rv = to_packets(&re, vec![tx.clone(); 1]);
|
||||||
assert_eq!(rv.len(), 1);
|
assert_eq!(rv.len(), 1);
|
||||||
assert_eq!(rv[0].read().unwrap().packets.len(), 1);
|
assert_eq!(rv[0].read().unwrap().packets.len(), 1);
|
||||||
|
|
||||||
let rv = to_packets(&re, vec![tr.clone(); NUM_PACKETS]);
|
let rv = to_packets(&re, vec![tx.clone(); NUM_PACKETS]);
|
||||||
assert_eq!(rv.len(), 1);
|
assert_eq!(rv.len(), 1);
|
||||||
assert_eq!(rv[0].read().unwrap().packets.len(), NUM_PACKETS);
|
assert_eq!(rv[0].read().unwrap().packets.len(), NUM_PACKETS);
|
||||||
|
|
||||||
let rv = to_packets(&re, vec![tr.clone(); NUM_PACKETS + 1]);
|
let rv = to_packets(&re, vec![tx.clone(); NUM_PACKETS + 1]);
|
||||||
assert_eq!(rv.len(), 2);
|
assert_eq!(rv.len(), 2);
|
||||||
assert_eq!(rv[0].read().unwrap().packets.len(), NUM_PACKETS);
|
assert_eq!(rv[0].read().unwrap().packets.len(), NUM_PACKETS);
|
||||||
assert_eq!(rv[1].read().unwrap().packets.len(), 1);
|
assert_eq!(rv[1].read().unwrap().packets.len(), 1);
|
||||||
|
31
src/payment_plan.rs
Normal file
31
src/payment_plan.rs
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
//! The `plan` module provides a domain-specific language for payment plans. Users create Budget objects that
|
||||||
|
//! are given to an interpreter. The interpreter listens for `Witness` transactions,
|
||||||
|
//! which it uses to reduce the payment plan. When the plan is reduced to a
|
||||||
|
//! `Payment`, the payment is executed.
|
||||||
|
|
||||||
|
use chrono::prelude::*;
|
||||||
|
use signature::PublicKey;
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||||
|
pub enum Witness {
|
||||||
|
Timestamp(DateTime<Utc>),
|
||||||
|
Signature(PublicKey),
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||||
|
pub struct Payment {
|
||||||
|
pub tokens: i64,
|
||||||
|
pub to: PublicKey,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub trait PaymentPlan {
|
||||||
|
/// Return Payment if the payment plan requires no additional Witnesses.
|
||||||
|
fn final_payment(&self) -> Option<Payment>;
|
||||||
|
|
||||||
|
/// Return true if the plan spends exactly `spendable_tokens`.
|
||||||
|
fn verify(&self, spendable_tokens: i64) -> bool;
|
||||||
|
|
||||||
|
/// Apply a witness to the payment plan to see if the plan can be reduced.
|
||||||
|
/// If so, modify the plan in-place.
|
||||||
|
fn apply_witness(&mut self, witness: &Witness);
|
||||||
|
}
|
@@ -2,21 +2,21 @@
|
|||||||
//! It records Event items on behalf of its users. It continuously generates
|
//! It records Event items on behalf of its users. It continuously generates
|
||||||
//! new hashes, only stopping to check if it has been sent an Event item. It
|
//! new hashes, only stopping to check if it has been sent an Event item. It
|
||||||
//! tags each Event with an Entry, and sends it back. The Entry includes the
|
//! tags each Event with an Entry, and sends it back. The Entry includes the
|
||||||
//! Event, the latest hash, and the number of hashes since the last event.
|
//! Event, the latest hash, and the number of hashes since the last transaction.
|
||||||
//! The resulting stream of entries represents ordered events in time.
|
//! The resulting stream of entries represents ordered transactions in time.
|
||||||
|
|
||||||
use entry::Entry;
|
use entry::Entry;
|
||||||
use event::Event;
|
|
||||||
use hash::Hash;
|
use hash::Hash;
|
||||||
use recorder::Recorder;
|
use recorder::Recorder;
|
||||||
use std::sync::mpsc::{channel, Receiver, Sender, TryRecvError};
|
use std::sync::mpsc::{channel, Receiver, RecvError, Sender, TryRecvError};
|
||||||
use std::thread::{spawn, JoinHandle};
|
use std::thread::{Builder, JoinHandle};
|
||||||
use std::time::{Duration, Instant};
|
use std::time::{Duration, Instant};
|
||||||
|
use transaction::Transaction;
|
||||||
|
|
||||||
#[cfg_attr(feature = "cargo-clippy", allow(large_enum_variant))]
|
#[cfg_attr(feature = "cargo-clippy", allow(large_enum_variant))]
|
||||||
pub enum Signal {
|
pub enum Signal {
|
||||||
Tick,
|
Tick,
|
||||||
Events(Vec<Event>),
|
Events(Vec<Transaction>),
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct RecordStage {
|
pub struct RecordStage {
|
||||||
@@ -27,31 +27,17 @@ pub struct RecordStage {
|
|||||||
impl RecordStage {
|
impl RecordStage {
|
||||||
/// A background thread that will continue tagging received Event messages and
|
/// A background thread that will continue tagging received Event messages and
|
||||||
/// sending back Entry messages until either the receiver or sender channel is closed.
|
/// sending back Entry messages until either the receiver or sender channel is closed.
|
||||||
pub fn new(
|
pub fn new(signal_receiver: Receiver<Signal>, start_hash: &Hash) -> Self {
|
||||||
event_receiver: Receiver<Signal>,
|
|
||||||
start_hash: &Hash,
|
|
||||||
tick_duration: Option<Duration>,
|
|
||||||
) -> Self {
|
|
||||||
let (entry_sender, entry_receiver) = channel();
|
let (entry_sender, entry_receiver) = channel();
|
||||||
let start_hash = start_hash.clone();
|
let start_hash = start_hash.clone();
|
||||||
|
|
||||||
let thread_hdl = spawn(move || {
|
let thread_hdl = Builder::new()
|
||||||
let mut recorder = Recorder::new(start_hash);
|
.name("solana-record-stage".to_string())
|
||||||
let duration_data = tick_duration.map(|dur| (Instant::now(), dur));
|
.spawn(move || {
|
||||||
loop {
|
let mut recorder = Recorder::new(start_hash);
|
||||||
if let Err(_) = Self::process_events(
|
let _ = Self::process_signals(&mut recorder, &signal_receiver, &entry_sender);
|
||||||
&mut recorder,
|
})
|
||||||
duration_data,
|
.unwrap();
|
||||||
&event_receiver,
|
|
||||||
&entry_sender,
|
|
||||||
) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
if duration_data.is_some() {
|
|
||||||
recorder.hash();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
RecordStage {
|
RecordStage {
|
||||||
entry_receiver,
|
entry_receiver,
|
||||||
@@ -59,29 +45,81 @@ impl RecordStage {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn process_events(
|
/// Same as `RecordStage::new`, but will automatically produce entries every `tick_duration`.
|
||||||
|
pub fn new_with_clock(
|
||||||
|
signal_receiver: Receiver<Signal>,
|
||||||
|
start_hash: &Hash,
|
||||||
|
tick_duration: Duration,
|
||||||
|
) -> Self {
|
||||||
|
let (entry_sender, entry_receiver) = channel();
|
||||||
|
let start_hash = start_hash.clone();
|
||||||
|
|
||||||
|
let thread_hdl = Builder::new()
|
||||||
|
.name("solana-record-stage".to_string())
|
||||||
|
.spawn(move || {
|
||||||
|
let mut recorder = Recorder::new(start_hash);
|
||||||
|
let start_time = Instant::now();
|
||||||
|
loop {
|
||||||
|
if let Err(_) = Self::try_process_signals(
|
||||||
|
&mut recorder,
|
||||||
|
start_time,
|
||||||
|
tick_duration,
|
||||||
|
&signal_receiver,
|
||||||
|
&entry_sender,
|
||||||
|
) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
recorder.hash();
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
RecordStage {
|
||||||
|
entry_receiver,
|
||||||
|
thread_hdl,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn process_signal(
|
||||||
|
signal: Signal,
|
||||||
|
recorder: &mut Recorder,
|
||||||
|
sender: &Sender<Entry>,
|
||||||
|
) -> Result<(), ()> {
|
||||||
|
let txs = if let Signal::Events(txs) = signal {
|
||||||
|
txs
|
||||||
|
} else {
|
||||||
|
vec![]
|
||||||
|
};
|
||||||
|
let entry = recorder.record(txs);
|
||||||
|
sender.send(entry).map_err(|_| ())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn process_signals(
|
||||||
recorder: &mut Recorder,
|
recorder: &mut Recorder,
|
||||||
duration_data: Option<(Instant, Duration)>,
|
|
||||||
receiver: &Receiver<Signal>,
|
receiver: &Receiver<Signal>,
|
||||||
sender: &Sender<Entry>,
|
sender: &Sender<Entry>,
|
||||||
) -> Result<(), ()> {
|
) -> Result<(), ()> {
|
||||||
loop {
|
loop {
|
||||||
if let Some((start_time, tick_duration)) = duration_data {
|
match receiver.recv() {
|
||||||
if let Some(entry) = recorder.tick(start_time, tick_duration) {
|
Ok(signal) => Self::process_signal(signal, recorder, sender)?,
|
||||||
sender.send(entry).or(Err(()))?;
|
Err(RecvError) => return Err(()),
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn try_process_signals(
|
||||||
|
recorder: &mut Recorder,
|
||||||
|
start_time: Instant,
|
||||||
|
tick_duration: Duration,
|
||||||
|
receiver: &Receiver<Signal>,
|
||||||
|
sender: &Sender<Entry>,
|
||||||
|
) -> Result<(), ()> {
|
||||||
|
loop {
|
||||||
|
if let Some(entry) = recorder.tick(start_time, tick_duration) {
|
||||||
|
sender.send(entry).or(Err(()))?;
|
||||||
}
|
}
|
||||||
match receiver.try_recv() {
|
match receiver.try_recv() {
|
||||||
Ok(signal) => match signal {
|
Ok(signal) => Self::process_signal(signal, recorder, sender)?,
|
||||||
Signal::Tick => {
|
|
||||||
let entry = recorder.record(vec![]);
|
|
||||||
sender.send(entry).or(Err(()))?;
|
|
||||||
}
|
|
||||||
Signal::Events(events) => {
|
|
||||||
let entry = recorder.record(events);
|
|
||||||
sender.send(entry).or(Err(()))?;
|
|
||||||
}
|
|
||||||
},
|
|
||||||
Err(TryRecvError::Empty) => return Ok(()),
|
Err(TryRecvError::Empty) => return Ok(()),
|
||||||
Err(TryRecvError::Disconnected) => return Err(()),
|
Err(TryRecvError::Disconnected) => return Err(()),
|
||||||
};
|
};
|
||||||
@@ -99,15 +137,15 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_historian() {
|
fn test_historian() {
|
||||||
let (input, event_receiver) = channel();
|
let (tx_sender, tx_receiver) = channel();
|
||||||
let zero = Hash::default();
|
let zero = Hash::default();
|
||||||
let record_stage = RecordStage::new(event_receiver, &zero, None);
|
let record_stage = RecordStage::new(tx_receiver, &zero);
|
||||||
|
|
||||||
input.send(Signal::Tick).unwrap();
|
tx_sender.send(Signal::Tick).unwrap();
|
||||||
sleep(Duration::new(0, 1_000_000));
|
sleep(Duration::new(0, 1_000_000));
|
||||||
input.send(Signal::Tick).unwrap();
|
tx_sender.send(Signal::Tick).unwrap();
|
||||||
sleep(Duration::new(0, 1_000_000));
|
sleep(Duration::new(0, 1_000_000));
|
||||||
input.send(Signal::Tick).unwrap();
|
tx_sender.send(Signal::Tick).unwrap();
|
||||||
|
|
||||||
let entry0 = record_stage.entry_receiver.recv().unwrap();
|
let entry0 = record_stage.entry_receiver.recv().unwrap();
|
||||||
let entry1 = record_stage.entry_receiver.recv().unwrap();
|
let entry1 = record_stage.entry_receiver.recv().unwrap();
|
||||||
@@ -117,7 +155,7 @@ mod tests {
|
|||||||
assert_eq!(entry1.num_hashes, 0);
|
assert_eq!(entry1.num_hashes, 0);
|
||||||
assert_eq!(entry2.num_hashes, 0);
|
assert_eq!(entry2.num_hashes, 0);
|
||||||
|
|
||||||
drop(input);
|
drop(tx_sender);
|
||||||
assert_eq!(record_stage.thread_hdl.join().unwrap(), ());
|
assert_eq!(record_stage.thread_hdl.join().unwrap(), ());
|
||||||
|
|
||||||
assert!([entry0, entry1, entry2].verify(&zero));
|
assert!([entry0, entry1, entry2].verify(&zero));
|
||||||
@@ -125,38 +163,38 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_historian_closed_sender() {
|
fn test_historian_closed_sender() {
|
||||||
let (input, event_receiver) = channel();
|
let (tx_sender, tx_receiver) = channel();
|
||||||
let zero = Hash::default();
|
let zero = Hash::default();
|
||||||
let record_stage = RecordStage::new(event_receiver, &zero, None);
|
let record_stage = RecordStage::new(tx_receiver, &zero);
|
||||||
drop(record_stage.entry_receiver);
|
drop(record_stage.entry_receiver);
|
||||||
input.send(Signal::Tick).unwrap();
|
tx_sender.send(Signal::Tick).unwrap();
|
||||||
assert_eq!(record_stage.thread_hdl.join().unwrap(), ());
|
assert_eq!(record_stage.thread_hdl.join().unwrap(), ());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_events() {
|
fn test_transactions() {
|
||||||
let (input, signal_receiver) = channel();
|
let (tx_sender, signal_receiver) = channel();
|
||||||
let zero = Hash::default();
|
let zero = Hash::default();
|
||||||
let record_stage = RecordStage::new(signal_receiver, &zero, None);
|
let record_stage = RecordStage::new(signal_receiver, &zero);
|
||||||
let alice_keypair = KeyPair::new();
|
let alice_keypair = KeyPair::new();
|
||||||
let bob_pubkey = KeyPair::new().pubkey();
|
let bob_pubkey = KeyPair::new().pubkey();
|
||||||
let event0 = Event::new_transaction(&alice_keypair, bob_pubkey, 1, zero);
|
let tx0 = Transaction::new(&alice_keypair, bob_pubkey, 1, zero);
|
||||||
let event1 = Event::new_transaction(&alice_keypair, bob_pubkey, 2, zero);
|
let tx1 = Transaction::new(&alice_keypair, bob_pubkey, 2, zero);
|
||||||
input.send(Signal::Events(vec![event0, event1])).unwrap();
|
tx_sender.send(Signal::Events(vec![tx0, tx1])).unwrap();
|
||||||
drop(input);
|
drop(tx_sender);
|
||||||
let entries: Vec<_> = record_stage.entry_receiver.iter().collect();
|
let entries: Vec<_> = record_stage.entry_receiver.iter().collect();
|
||||||
assert_eq!(entries.len(), 1);
|
assert_eq!(entries.len(), 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[ignore]
|
fn test_clock() {
|
||||||
fn test_ticking_historian() {
|
let (tx_sender, tx_receiver) = channel();
|
||||||
let (input, event_receiver) = channel();
|
|
||||||
let zero = Hash::default();
|
let zero = Hash::default();
|
||||||
let record_stage = RecordStage::new(event_receiver, &zero, Some(Duration::from_millis(20)));
|
let record_stage =
|
||||||
|
RecordStage::new_with_clock(tx_receiver, &zero, Duration::from_millis(20));
|
||||||
sleep(Duration::from_millis(900));
|
sleep(Duration::from_millis(900));
|
||||||
input.send(Signal::Tick).unwrap();
|
tx_sender.send(Signal::Tick).unwrap();
|
||||||
drop(input);
|
drop(tx_sender);
|
||||||
let entries: Vec<Entry> = record_stage.entry_receiver.iter().collect();
|
let entries: Vec<Entry> = record_stage.entry_receiver.iter().collect();
|
||||||
assert!(entries.len() > 1);
|
assert!(entries.len() > 1);
|
||||||
|
|
||||||
|
@@ -2,9 +2,9 @@
|
|||||||
//! It records Event items on behalf of its users.
|
//! It records Event items on behalf of its users.
|
||||||
|
|
||||||
use entry::Entry;
|
use entry::Entry;
|
||||||
use event::Event;
|
|
||||||
use hash::{hash, Hash};
|
use hash::{hash, Hash};
|
||||||
use std::time::{Duration, Instant};
|
use std::time::{Duration, Instant};
|
||||||
|
use transaction::Transaction;
|
||||||
|
|
||||||
pub struct Recorder {
|
pub struct Recorder {
|
||||||
last_hash: Hash,
|
last_hash: Hash,
|
||||||
@@ -26,8 +26,8 @@ impl Recorder {
|
|||||||
self.num_hashes += 1;
|
self.num_hashes += 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn record(&mut self, events: Vec<Event>) -> Entry {
|
pub fn record(&mut self, transactions: Vec<Transaction>) -> Entry {
|
||||||
Entry::new_mut(&mut self.last_hash, &mut self.num_hashes, events)
|
Entry::new_mut(&mut self.last_hash, &mut self.num_hashes, transactions)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn tick(&mut self, start_time: Instant, tick_duration: Duration) -> Option<Entry> {
|
pub fn tick(&mut self, start_time: Instant, tick_duration: Duration) -> Option<Entry> {
|
||||||
|
@@ -4,9 +4,9 @@ use bank::Bank;
|
|||||||
use ledger;
|
use ledger;
|
||||||
use packet;
|
use packet;
|
||||||
use result::Result;
|
use result::Result;
|
||||||
use std::sync::Arc;
|
|
||||||
use std::sync::atomic::{AtomicBool, Ordering};
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
use std::thread::{spawn, JoinHandle};
|
use std::sync::Arc;
|
||||||
|
use std::thread::{Builder, JoinHandle};
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use streamer;
|
use streamer;
|
||||||
|
|
||||||
@@ -24,9 +24,9 @@ impl ReplicateStage {
|
|||||||
let timer = Duration::new(1, 0);
|
let timer = Duration::new(1, 0);
|
||||||
let blobs = verified_receiver.recv_timeout(timer)?;
|
let blobs = verified_receiver.recv_timeout(timer)?;
|
||||||
let entries = ledger::reconstruct_entries_from_blobs(&blobs);
|
let entries = ledger::reconstruct_entries_from_blobs(&blobs);
|
||||||
let res = bank.process_verified_entries(entries);
|
let res = bank.process_entries(entries);
|
||||||
if res.is_err() {
|
if res.is_err() {
|
||||||
error!("process_verified_entries {} {:?}", blobs.len(), res);
|
error!("process_entries {} {:?}", blobs.len(), res);
|
||||||
}
|
}
|
||||||
res?;
|
res?;
|
||||||
for blob in blobs {
|
for blob in blobs {
|
||||||
@@ -41,12 +41,15 @@ impl ReplicateStage {
|
|||||||
window_receiver: streamer::BlobReceiver,
|
window_receiver: streamer::BlobReceiver,
|
||||||
blob_recycler: packet::BlobRecycler,
|
blob_recycler: packet::BlobRecycler,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let thread_hdl = spawn(move || loop {
|
let thread_hdl = Builder::new()
|
||||||
let e = Self::replicate_requests(&bank, &window_receiver, &blob_recycler);
|
.name("solana-replicate-stage".to_string())
|
||||||
if e.is_err() && exit.load(Ordering::Relaxed) {
|
.spawn(move || loop {
|
||||||
break;
|
let e = Self::replicate_requests(&bank, &window_receiver, &blob_recycler);
|
||||||
}
|
if e.is_err() && exit.load(Ordering::Relaxed) {
|
||||||
});
|
break;
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.unwrap();
|
||||||
ReplicateStage { thread_hdl }
|
ReplicateStage { thread_hdl }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -1,20 +1,9 @@
|
|||||||
//! The `request_stage` processes thin client Request messages.
|
//! The `request_processor` processes thin client Request messages.
|
||||||
|
|
||||||
use bank::Bank;
|
use bank::Bank;
|
||||||
use bincode::{deserialize, serialize};
|
|
||||||
use event::Event;
|
|
||||||
use packet;
|
|
||||||
use packet::SharedPackets;
|
|
||||||
use rayon::prelude::*;
|
|
||||||
use request::{Request, Response};
|
use request::{Request, Response};
|
||||||
use result::Result;
|
|
||||||
use std::collections::VecDeque;
|
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::sync::mpsc::Receiver;
|
|
||||||
use std::time::Instant;
|
|
||||||
use streamer;
|
|
||||||
use timing;
|
|
||||||
|
|
||||||
pub struct RequestProcessor {
|
pub struct RequestProcessor {
|
||||||
bank: Arc<Bank>,
|
bank: Arc<Bank>,
|
||||||
@@ -62,104 +51,4 @@ impl RequestProcessor {
|
|||||||
.filter_map(|(req, rsp_addr)| self.process_request(req, rsp_addr))
|
.filter_map(|(req, rsp_addr)| self.process_request(req, rsp_addr))
|
||||||
.collect()
|
.collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn deserialize_requests(p: &packet::Packets) -> Vec<Option<(Request, SocketAddr)>> {
|
|
||||||
p.packets
|
|
||||||
.par_iter()
|
|
||||||
.map(|x| {
|
|
||||||
deserialize(&x.data[0..x.meta.size])
|
|
||||||
.map(|req| (req, x.meta.addr()))
|
|
||||||
.ok()
|
|
||||||
})
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Copy-paste of deserialize_requests() because I can't figure out how to
|
|
||||||
// route the lifetimes in a generic version.
|
|
||||||
pub fn deserialize_events(p: &packet::Packets) -> Vec<Option<(Event, SocketAddr)>> {
|
|
||||||
p.packets
|
|
||||||
.par_iter()
|
|
||||||
.map(|x| {
|
|
||||||
deserialize(&x.data[0..x.meta.size])
|
|
||||||
.map(|req| (req, x.meta.addr()))
|
|
||||||
.ok()
|
|
||||||
})
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Split Request list into verified transactions and the rest
|
|
||||||
fn serialize_response(
|
|
||||||
resp: Response,
|
|
||||||
rsp_addr: SocketAddr,
|
|
||||||
blob_recycler: &packet::BlobRecycler,
|
|
||||||
) -> Result<packet::SharedBlob> {
|
|
||||||
let blob = blob_recycler.allocate();
|
|
||||||
{
|
|
||||||
let mut b = blob.write().unwrap();
|
|
||||||
let v = serialize(&resp)?;
|
|
||||||
let len = v.len();
|
|
||||||
b.data[..len].copy_from_slice(&v);
|
|
||||||
b.meta.size = len;
|
|
||||||
b.meta.set_addr(&rsp_addr);
|
|
||||||
}
|
|
||||||
Ok(blob)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn serialize_responses(
|
|
||||||
rsps: Vec<(Response, SocketAddr)>,
|
|
||||||
blob_recycler: &packet::BlobRecycler,
|
|
||||||
) -> Result<VecDeque<packet::SharedBlob>> {
|
|
||||||
let mut blobs = VecDeque::new();
|
|
||||||
for (resp, rsp_addr) in rsps {
|
|
||||||
blobs.push_back(Self::serialize_response(resp, rsp_addr, blob_recycler)?);
|
|
||||||
}
|
|
||||||
Ok(blobs)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn process_request_packets(
|
|
||||||
&self,
|
|
||||||
packet_receiver: &Receiver<SharedPackets>,
|
|
||||||
blob_sender: &streamer::BlobSender,
|
|
||||||
packet_recycler: &packet::PacketRecycler,
|
|
||||||
blob_recycler: &packet::BlobRecycler,
|
|
||||||
) -> Result<()> {
|
|
||||||
let (batch, batch_len) = streamer::recv_batch(packet_receiver)?;
|
|
||||||
|
|
||||||
info!(
|
|
||||||
"@{:?} request_stage: processing: {}",
|
|
||||||
timing::timestamp(),
|
|
||||||
batch_len
|
|
||||||
);
|
|
||||||
|
|
||||||
let mut reqs_len = 0;
|
|
||||||
let proc_start = Instant::now();
|
|
||||||
for msgs in batch {
|
|
||||||
let reqs: Vec<_> = Self::deserialize_requests(&msgs.read().unwrap())
|
|
||||||
.into_iter()
|
|
||||||
.filter_map(|x| x)
|
|
||||||
.collect();
|
|
||||||
reqs_len += reqs.len();
|
|
||||||
|
|
||||||
let rsps = self.process_requests(reqs);
|
|
||||||
|
|
||||||
let blobs = Self::serialize_responses(rsps, blob_recycler)?;
|
|
||||||
if !blobs.is_empty() {
|
|
||||||
info!("process: sending blobs: {}", blobs.len());
|
|
||||||
//don't wake up the other side if there is nothing
|
|
||||||
blob_sender.send(blobs)?;
|
|
||||||
}
|
|
||||||
packet_recycler.recycle(msgs);
|
|
||||||
}
|
|
||||||
let total_time_s = timing::duration_as_s(&proc_start.elapsed());
|
|
||||||
let total_time_ms = timing::duration_as_ms(&proc_start.elapsed());
|
|
||||||
info!(
|
|
||||||
"@{:?} done process batches: {} time: {:?}ms reqs: {} reqs/s: {}",
|
|
||||||
timing::timestamp(),
|
|
||||||
batch_len,
|
|
||||||
total_time_ms,
|
|
||||||
reqs_len,
|
|
||||||
(reqs_len as f32) / (total_time_s)
|
|
||||||
);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
@@ -1,13 +1,20 @@
|
|||||||
//! The `request_stage` processes thin client Request messages.
|
//! The `request_stage` processes thin client Request messages.
|
||||||
|
|
||||||
|
use bincode::deserialize;
|
||||||
use packet;
|
use packet;
|
||||||
use packet::SharedPackets;
|
use packet::SharedPackets;
|
||||||
|
use rayon::prelude::*;
|
||||||
|
use request::Request;
|
||||||
use request_processor::RequestProcessor;
|
use request_processor::RequestProcessor;
|
||||||
use std::sync::Arc;
|
use result::Result;
|
||||||
|
use std::net::SocketAddr;
|
||||||
use std::sync::atomic::{AtomicBool, Ordering};
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
use std::sync::mpsc::{channel, Receiver};
|
use std::sync::mpsc::{channel, Receiver};
|
||||||
use std::thread::{spawn, JoinHandle};
|
use std::sync::Arc;
|
||||||
|
use std::thread::{Builder, JoinHandle};
|
||||||
|
use std::time::Instant;
|
||||||
use streamer;
|
use streamer;
|
||||||
|
use timing;
|
||||||
|
|
||||||
pub struct RequestStage {
|
pub struct RequestStage {
|
||||||
pub thread_hdl: JoinHandle<()>,
|
pub thread_hdl: JoinHandle<()>,
|
||||||
@@ -16,6 +23,63 @@ pub struct RequestStage {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl RequestStage {
|
impl RequestStage {
|
||||||
|
pub fn deserialize_requests(p: &packet::Packets) -> Vec<Option<(Request, SocketAddr)>> {
|
||||||
|
p.packets
|
||||||
|
.par_iter()
|
||||||
|
.map(|x| {
|
||||||
|
deserialize(&x.data[0..x.meta.size])
|
||||||
|
.map(|req| (req, x.meta.addr()))
|
||||||
|
.ok()
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn process_request_packets(
|
||||||
|
request_processor: &RequestProcessor,
|
||||||
|
packet_receiver: &Receiver<SharedPackets>,
|
||||||
|
blob_sender: &streamer::BlobSender,
|
||||||
|
packet_recycler: &packet::PacketRecycler,
|
||||||
|
blob_recycler: &packet::BlobRecycler,
|
||||||
|
) -> Result<()> {
|
||||||
|
let (batch, batch_len) = streamer::recv_batch(packet_receiver)?;
|
||||||
|
|
||||||
|
info!(
|
||||||
|
"@{:?} request_stage: processing: {}",
|
||||||
|
timing::timestamp(),
|
||||||
|
batch_len
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut reqs_len = 0;
|
||||||
|
let proc_start = Instant::now();
|
||||||
|
for msgs in batch {
|
||||||
|
let reqs: Vec<_> = Self::deserialize_requests(&msgs.read().unwrap())
|
||||||
|
.into_iter()
|
||||||
|
.filter_map(|x| x)
|
||||||
|
.collect();
|
||||||
|
reqs_len += reqs.len();
|
||||||
|
|
||||||
|
let rsps = request_processor.process_requests(reqs);
|
||||||
|
|
||||||
|
let blobs = packet::to_blobs(rsps, blob_recycler)?;
|
||||||
|
if !blobs.is_empty() {
|
||||||
|
info!("process: sending blobs: {}", blobs.len());
|
||||||
|
//don't wake up the other side if there is nothing
|
||||||
|
blob_sender.send(blobs)?;
|
||||||
|
}
|
||||||
|
packet_recycler.recycle(msgs);
|
||||||
|
}
|
||||||
|
let total_time_s = timing::duration_as_s(&proc_start.elapsed());
|
||||||
|
let total_time_ms = timing::duration_as_ms(&proc_start.elapsed());
|
||||||
|
info!(
|
||||||
|
"@{:?} done process batches: {} time: {:?}ms reqs: {} reqs/s: {}",
|
||||||
|
timing::timestamp(),
|
||||||
|
batch_len,
|
||||||
|
total_time_ms,
|
||||||
|
reqs_len,
|
||||||
|
(reqs_len as f32) / (total_time_s)
|
||||||
|
);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
pub fn new(
|
pub fn new(
|
||||||
request_processor: RequestProcessor,
|
request_processor: RequestProcessor,
|
||||||
exit: Arc<AtomicBool>,
|
exit: Arc<AtomicBool>,
|
||||||
@@ -26,19 +90,23 @@ impl RequestStage {
|
|||||||
let request_processor = Arc::new(request_processor);
|
let request_processor = Arc::new(request_processor);
|
||||||
let request_processor_ = request_processor.clone();
|
let request_processor_ = request_processor.clone();
|
||||||
let (blob_sender, blob_receiver) = channel();
|
let (blob_sender, blob_receiver) = channel();
|
||||||
let thread_hdl = spawn(move || loop {
|
let thread_hdl = Builder::new()
|
||||||
let e = request_processor_.process_request_packets(
|
.name("solana-request-stage".to_string())
|
||||||
&packet_receiver,
|
.spawn(move || loop {
|
||||||
&blob_sender,
|
let e = Self::process_request_packets(
|
||||||
&packet_recycler,
|
&request_processor_,
|
||||||
&blob_recycler,
|
&packet_receiver,
|
||||||
);
|
&blob_sender,
|
||||||
if e.is_err() {
|
&packet_recycler,
|
||||||
if exit.load(Ordering::Relaxed) {
|
&blob_recycler,
|
||||||
break;
|
);
|
||||||
|
if e.is_err() {
|
||||||
|
if exit.load(Ordering::Relaxed) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
})
|
||||||
});
|
.unwrap();
|
||||||
RequestStage {
|
RequestStage {
|
||||||
thread_hdl,
|
thread_hdl,
|
||||||
blob_receiver,
|
blob_receiver,
|
||||||
|
@@ -80,9 +80,9 @@ mod tests {
|
|||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use std::panic;
|
use std::panic;
|
||||||
|
use std::sync::mpsc::channel;
|
||||||
use std::sync::mpsc::RecvError;
|
use std::sync::mpsc::RecvError;
|
||||||
use std::sync::mpsc::RecvTimeoutError;
|
use std::sync::mpsc::RecvTimeoutError;
|
||||||
use std::sync::mpsc::channel;
|
|
||||||
use std::thread;
|
use std::thread;
|
||||||
|
|
||||||
fn addr_parse_error() -> Result<SocketAddr> {
|
fn addr_parse_error() -> Result<SocketAddr> {
|
||||||
|
@@ -6,9 +6,9 @@ use packet;
|
|||||||
use request_processor::RequestProcessor;
|
use request_processor::RequestProcessor;
|
||||||
use request_stage::RequestStage;
|
use request_stage::RequestStage;
|
||||||
use std::net::UdpSocket;
|
use std::net::UdpSocket;
|
||||||
use std::sync::Arc;
|
|
||||||
use std::sync::atomic::AtomicBool;
|
use std::sync::atomic::AtomicBool;
|
||||||
use std::sync::mpsc::channel;
|
use std::sync::mpsc::channel;
|
||||||
|
use std::sync::Arc;
|
||||||
use std::thread::JoinHandle;
|
use std::thread::JoinHandle;
|
||||||
use streamer;
|
use streamer;
|
||||||
|
|
||||||
|
@@ -1,15 +1,17 @@
|
|||||||
//! The `server` module hosts all the server microservices.
|
//! The `server` module hosts all the server microservices.
|
||||||
|
|
||||||
use bank::Bank;
|
use bank::Bank;
|
||||||
use crdt::ReplicatedData;
|
use crdt::{Crdt, ReplicatedData};
|
||||||
use hash::Hash;
|
use data_replicator::DataReplicator;
|
||||||
|
use packet;
|
||||||
use rpu::Rpu;
|
use rpu::Rpu;
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
use std::net::UdpSocket;
|
use std::net::UdpSocket;
|
||||||
use std::sync::Arc;
|
|
||||||
use std::sync::atomic::AtomicBool;
|
use std::sync::atomic::AtomicBool;
|
||||||
|
use std::sync::{Arc, RwLock};
|
||||||
use std::thread::JoinHandle;
|
use std::thread::JoinHandle;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
use streamer;
|
||||||
use tpu::Tpu;
|
use tpu::Tpu;
|
||||||
use tvu::Tvu;
|
use tvu::Tvu;
|
||||||
|
|
||||||
@@ -20,11 +22,10 @@ pub struct Server {
|
|||||||
impl Server {
|
impl Server {
|
||||||
pub fn new_leader<W: Write + Send + 'static>(
|
pub fn new_leader<W: Write + Send + 'static>(
|
||||||
bank: Bank,
|
bank: Bank,
|
||||||
start_hash: Hash,
|
|
||||||
tick_duration: Option<Duration>,
|
tick_duration: Option<Duration>,
|
||||||
me: ReplicatedData,
|
me: ReplicatedData,
|
||||||
requests_socket: UdpSocket,
|
requests_socket: UdpSocket,
|
||||||
events_socket: UdpSocket,
|
transactions_socket: UdpSocket,
|
||||||
broadcast_socket: UdpSocket,
|
broadcast_socket: UdpSocket,
|
||||||
respond_socket: UdpSocket,
|
respond_socket: UdpSocket,
|
||||||
gossip_socket: UdpSocket,
|
gossip_socket: UdpSocket,
|
||||||
@@ -35,18 +36,40 @@ impl Server {
|
|||||||
let mut thread_hdls = vec![];
|
let mut thread_hdls = vec![];
|
||||||
let rpu = Rpu::new(bank.clone(), requests_socket, respond_socket, exit.clone());
|
let rpu = Rpu::new(bank.clone(), requests_socket, respond_socket, exit.clone());
|
||||||
thread_hdls.extend(rpu.thread_hdls);
|
thread_hdls.extend(rpu.thread_hdls);
|
||||||
|
|
||||||
|
let blob_recycler = packet::BlobRecycler::default();
|
||||||
let tpu = Tpu::new(
|
let tpu = Tpu::new(
|
||||||
bank.clone(),
|
bank.clone(),
|
||||||
start_hash,
|
|
||||||
tick_duration,
|
tick_duration,
|
||||||
me,
|
transactions_socket,
|
||||||
events_socket,
|
blob_recycler.clone(),
|
||||||
broadcast_socket,
|
|
||||||
gossip_socket,
|
|
||||||
exit.clone(),
|
exit.clone(),
|
||||||
writer,
|
writer,
|
||||||
);
|
);
|
||||||
thread_hdls.extend(tpu.thread_hdls);
|
thread_hdls.extend(tpu.thread_hdls);
|
||||||
|
|
||||||
|
let crdt = Arc::new(RwLock::new(Crdt::new(me)));
|
||||||
|
let window = streamer::default_window();
|
||||||
|
let gossip_send_socket = UdpSocket::bind("0.0.0.0:0").expect("bind 0");
|
||||||
|
let data_replicator = DataReplicator::new(
|
||||||
|
crdt.clone(),
|
||||||
|
window.clone(),
|
||||||
|
gossip_socket,
|
||||||
|
gossip_send_socket,
|
||||||
|
exit.clone(),
|
||||||
|
).expect("DataReplicator::new");
|
||||||
|
thread_hdls.extend(data_replicator.thread_hdls);
|
||||||
|
|
||||||
|
let t_broadcast = streamer::broadcaster(
|
||||||
|
broadcast_socket,
|
||||||
|
exit.clone(),
|
||||||
|
crdt,
|
||||||
|
window,
|
||||||
|
blob_recycler.clone(),
|
||||||
|
tpu.blob_receiver,
|
||||||
|
);
|
||||||
|
thread_hdls.extend(vec![t_broadcast]);
|
||||||
|
|
||||||
Server { thread_hdls }
|
Server { thread_hdls }
|
||||||
}
|
}
|
||||||
pub fn new_validator(
|
pub fn new_validator(
|
||||||
|
@@ -1,7 +1,7 @@
|
|||||||
//! The `signature` module provides functionality for public, and private keys.
|
//! The `signature` module provides functionality for public, and private keys.
|
||||||
|
|
||||||
use generic_array::GenericArray;
|
|
||||||
use generic_array::typenum::{U32, U64};
|
use generic_array::typenum::{U32, U64};
|
||||||
|
use generic_array::GenericArray;
|
||||||
use rand::{ChaChaRng, Rng, SeedableRng};
|
use rand::{ChaChaRng, Rng, SeedableRng};
|
||||||
use rayon::prelude::*;
|
use rayon::prelude::*;
|
||||||
use ring::error::Unspecified;
|
use ring::error::Unspecified;
|
||||||
|
@@ -2,7 +2,7 @@ use packet::{Packet, SharedPackets};
|
|||||||
use std::mem::size_of;
|
use std::mem::size_of;
|
||||||
use transaction::{PUB_KEY_OFFSET, SIGNED_DATA_OFFSET, SIG_OFFSET};
|
use transaction::{PUB_KEY_OFFSET, SIGNED_DATA_OFFSET, SIG_OFFSET};
|
||||||
|
|
||||||
pub const TX_OFFSET: usize = 4;
|
pub const TX_OFFSET: usize = 0;
|
||||||
|
|
||||||
#[cfg(feature = "cuda")]
|
#[cfg(feature = "cuda")]
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
@@ -55,7 +55,7 @@ fn batch_size(batches: &Vec<SharedPackets>) -> usize {
|
|||||||
batches
|
batches
|
||||||
.iter()
|
.iter()
|
||||||
.map(|p| p.read().unwrap().packets.len())
|
.map(|p| p.read().unwrap().packets.len())
|
||||||
.fold(0, |x, y| x + y)
|
.sum()
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(not(feature = "cuda"))]
|
#[cfg(not(feature = "cuda"))]
|
||||||
@@ -143,33 +143,32 @@ pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use bincode::serialize;
|
use bincode::serialize;
|
||||||
use ecdsa;
|
|
||||||
use event::Event;
|
|
||||||
use packet::{Packet, Packets, SharedPackets};
|
use packet::{Packet, Packets, SharedPackets};
|
||||||
|
use sigverify;
|
||||||
use std::sync::RwLock;
|
use std::sync::RwLock;
|
||||||
use transaction::Transaction;
|
use transaction::Transaction;
|
||||||
use transaction::{memfind, test_tx};
|
use transaction::{memfind, test_tx};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_layout() {
|
fn test_layout() {
|
||||||
let tr = test_tx();
|
let tx = test_tx();
|
||||||
let tx = serialize(&tr).unwrap();
|
let tx_bytes = serialize(&tx).unwrap();
|
||||||
let packet = serialize(&Event::Transaction(tr)).unwrap();
|
let packet = serialize(&tx).unwrap();
|
||||||
assert_matches!(memfind(&packet, &tx), Some(ecdsa::TX_OFFSET));
|
assert_matches!(memfind(&packet, &tx_bytes), Some(sigverify::TX_OFFSET));
|
||||||
assert_matches!(memfind(&packet, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), None);
|
assert_matches!(memfind(&packet, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), None);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn make_packet_from_transaction(tr: Transaction) -> Packet {
|
fn make_packet_from_transaction(tx: Transaction) -> Packet {
|
||||||
let tx = serialize(&Event::Transaction(tr)).unwrap();
|
let tx_bytes = serialize(&tx).unwrap();
|
||||||
let mut packet = Packet::default();
|
let mut packet = Packet::default();
|
||||||
packet.meta.size = tx.len();
|
packet.meta.size = tx_bytes.len();
|
||||||
packet.data[..packet.meta.size].copy_from_slice(&tx);
|
packet.data[..packet.meta.size].copy_from_slice(&tx_bytes);
|
||||||
return packet;
|
return packet;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn test_verify_n(n: usize, modify_data: bool) {
|
fn test_verify_n(n: usize, modify_data: bool) {
|
||||||
let tr = test_tx();
|
let tx = test_tx();
|
||||||
let mut packet = make_packet_from_transaction(tr);
|
let mut packet = make_packet_from_transaction(tx);
|
||||||
|
|
||||||
// jumble some data to test failure
|
// jumble some data to test failure
|
||||||
if modify_data {
|
if modify_data {
|
||||||
@@ -186,7 +185,7 @@ mod tests {
|
|||||||
let batches = vec![shared_packets.clone(), shared_packets.clone()];
|
let batches = vec![shared_packets.clone(), shared_packets.clone()];
|
||||||
|
|
||||||
// verify packets
|
// verify packets
|
||||||
let ans = ecdsa::ed25519_verify(&batches);
|
let ans = sigverify::ed25519_verify(&batches);
|
||||||
|
|
||||||
// check result
|
// check result
|
||||||
let ref_ans = if modify_data { 0u8 } else { 1u8 };
|
let ref_ans = if modify_data { 0u8 } else { 1u8 };
|
@@ -1,9 +1,9 @@
|
|||||||
//! The `sig_verify_stage` implements the signature verification stage of the TPU.
|
//! The `sigverify_stage` implements the signature verification stage of the TPU.
|
||||||
|
|
||||||
use ecdsa;
|
|
||||||
use packet::SharedPackets;
|
use packet::SharedPackets;
|
||||||
use rand::{thread_rng, Rng};
|
use rand::{thread_rng, Rng};
|
||||||
use result::Result;
|
use result::Result;
|
||||||
|
use sigverify;
|
||||||
use std::sync::atomic::{AtomicBool, Ordering};
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
use std::sync::mpsc::{channel, Receiver, Sender};
|
use std::sync::mpsc::{channel, Receiver, Sender};
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
@@ -28,7 +28,7 @@ impl SigVerifyStage {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn verify_batch(batch: Vec<SharedPackets>) -> Vec<(SharedPackets, Vec<u8>)> {
|
fn verify_batch(batch: Vec<SharedPackets>) -> Vec<(SharedPackets, Vec<u8>)> {
|
||||||
let r = ecdsa::ed25519_verify(&batch);
|
let r = sigverify::ed25519_verify(&batch);
|
||||||
batch.into_iter().zip(r).collect()
|
batch.into_iter().zip(r).collect()
|
||||||
}
|
}
|
||||||
|
|
280
src/streamer.rs
280
src/streamer.rs
@@ -1,15 +1,16 @@
|
|||||||
//! The `streamer` module defines a set of services for effecently pulling data from udp sockets.
|
//! The `streamer` module defines a set of services for efficiently pulling data from UDP sockets.
|
||||||
|
//!
|
||||||
use crdt::Crdt;
|
use crdt::Crdt;
|
||||||
#[cfg(feature = "erasure")]
|
#[cfg(feature = "erasure")]
|
||||||
use erasure;
|
use erasure;
|
||||||
use packet::{Blob, BlobRecycler, PacketRecycler, SharedBlob, SharedPackets};
|
use packet::{Blob, BlobRecycler, PacketRecycler, SharedBlob, SharedPackets, BLOB_SIZE};
|
||||||
use result::{Error, Result};
|
use result::{Error, Result};
|
||||||
use std::collections::VecDeque;
|
use std::collections::VecDeque;
|
||||||
use std::net::{SocketAddr, UdpSocket};
|
use std::net::{SocketAddr, UdpSocket};
|
||||||
use std::sync::atomic::{AtomicBool, Ordering};
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
use std::sync::mpsc;
|
use std::sync::mpsc;
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
use std::thread::{spawn, JoinHandle};
|
use std::thread::{Builder, JoinHandle};
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
pub const WINDOW_SIZE: usize = 2 * 1024;
|
pub const WINDOW_SIZE: usize = 2 * 1024;
|
||||||
@@ -57,10 +58,13 @@ pub fn receiver(
|
|||||||
if res.is_err() {
|
if res.is_err() {
|
||||||
panic!("streamer::receiver set_read_timeout error");
|
panic!("streamer::receiver set_read_timeout error");
|
||||||
}
|
}
|
||||||
spawn(move || {
|
Builder::new()
|
||||||
let _ = recv_loop(&sock, &exit, &recycler, &packet_sender);
|
.name("solana-receiver".to_string())
|
||||||
()
|
.spawn(move || {
|
||||||
})
|
let _ = recv_loop(&sock, &exit, &recycler, &packet_sender);
|
||||||
|
()
|
||||||
|
})
|
||||||
|
.unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn recv_send(sock: &UdpSocket, recycler: &BlobRecycler, r: &BlobReceiver) -> Result<()> {
|
fn recv_send(sock: &UdpSocket, recycler: &BlobRecycler, r: &BlobReceiver) -> Result<()> {
|
||||||
@@ -95,16 +99,20 @@ pub fn responder(
|
|||||||
recycler: BlobRecycler,
|
recycler: BlobRecycler,
|
||||||
r: BlobReceiver,
|
r: BlobReceiver,
|
||||||
) -> JoinHandle<()> {
|
) -> JoinHandle<()> {
|
||||||
spawn(move || loop {
|
Builder::new()
|
||||||
if recv_send(&sock, &recycler, &r).is_err() && exit.load(Ordering::Relaxed) {
|
.name("solana-responder".to_string())
|
||||||
break;
|
.spawn(move || loop {
|
||||||
}
|
if recv_send(&sock, &recycler, &r).is_err() && exit.load(Ordering::Relaxed) {
|
||||||
})
|
break;
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
//TODO, we would need to stick block authentication before we create the
|
//TODO, we would need to stick block authentication before we create the
|
||||||
//window.
|
//window.
|
||||||
fn recv_blobs(recycler: &BlobRecycler, sock: &UdpSocket, s: &BlobSender) -> Result<()> {
|
fn recv_blobs(recycler: &BlobRecycler, sock: &UdpSocket, s: &BlobSender) -> Result<()> {
|
||||||
|
trace!("receiving on {}", sock.local_addr().unwrap());
|
||||||
let dq = Blob::recv_from(recycler, sock)?;
|
let dq = Blob::recv_from(recycler, sock)?;
|
||||||
if !dq.is_empty() {
|
if !dq.is_empty() {
|
||||||
s.send(dq)?;
|
s.send(dq)?;
|
||||||
@@ -122,12 +130,15 @@ pub fn blob_receiver(
|
|||||||
//1 second timeout on socket read
|
//1 second timeout on socket read
|
||||||
let timer = Duration::new(1, 0);
|
let timer = Duration::new(1, 0);
|
||||||
sock.set_read_timeout(Some(timer))?;
|
sock.set_read_timeout(Some(timer))?;
|
||||||
let t = spawn(move || loop {
|
let t = Builder::new()
|
||||||
if exit.load(Ordering::Relaxed) {
|
.name("solana-blob_receiver".to_string())
|
||||||
break;
|
.spawn(move || loop {
|
||||||
}
|
if exit.load(Ordering::Relaxed) {
|
||||||
let _ = recv_blobs(&recycler, &sock, &s);
|
break;
|
||||||
});
|
}
|
||||||
|
let _ = recv_blobs(&recycler, &sock, &s);
|
||||||
|
})
|
||||||
|
.unwrap();
|
||||||
Ok(t)
|
Ok(t)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -176,10 +187,11 @@ fn repair_window(
|
|||||||
trace!("repair_window counter {} {}", *times, *consumed);
|
trace!("repair_window counter {} {}", *times, *consumed);
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
info!("repair_window request {} {}", *consumed, *received);
|
|
||||||
let sock = UdpSocket::bind("0.0.0.0:0")?;
|
let sock = UdpSocket::bind("0.0.0.0:0")?;
|
||||||
for (to, req) in reqs {
|
for (to, req) in reqs {
|
||||||
//todo cache socket
|
//todo cache socket
|
||||||
|
info!("repair_window request {} {} {}", *consumed, *received, to);
|
||||||
|
assert!(req.len() < BLOB_SIZE);
|
||||||
sock.send_to(&req, to)?;
|
sock.send_to(&req, to)?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -220,11 +232,11 @@ fn recv_window(
|
|||||||
);
|
);
|
||||||
if p.get_id().expect("get_id in fn recv_window") == leader_id {
|
if p.get_id().expect("get_id in fn recv_window") == leader_id {
|
||||||
//TODO
|
//TODO
|
||||||
//need to copy the retransmited blob
|
//need to copy the retransmitted blob
|
||||||
//otherwise we get into races with which thread
|
//otherwise we get into races with which thread
|
||||||
//should do the recycling
|
//should do the recycling
|
||||||
//
|
//
|
||||||
//a better absraction would be to recycle when the blob
|
//a better abstraction would be to recycle when the blob
|
||||||
//is dropped via a weakref to the recycler
|
//is dropped via a weakref to the recycler
|
||||||
let nv = recycler.allocate();
|
let nv = recycler.allocate();
|
||||||
{
|
{
|
||||||
@@ -317,35 +329,38 @@ pub fn window(
|
|||||||
s: BlobSender,
|
s: BlobSender,
|
||||||
retransmit: BlobSender,
|
retransmit: BlobSender,
|
||||||
) -> JoinHandle<()> {
|
) -> JoinHandle<()> {
|
||||||
spawn(move || {
|
Builder::new()
|
||||||
let mut consumed = 0;
|
.name("solana-window".to_string())
|
||||||
let mut received = 0;
|
.spawn(move || {
|
||||||
let mut last = 0;
|
let mut consumed = 0;
|
||||||
let mut times = 0;
|
let mut received = 0;
|
||||||
loop {
|
let mut last = 0;
|
||||||
if exit.load(Ordering::Relaxed) {
|
let mut times = 0;
|
||||||
break;
|
loop {
|
||||||
|
if exit.load(Ordering::Relaxed) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
let _ = recv_window(
|
||||||
|
&window,
|
||||||
|
&crdt,
|
||||||
|
&recycler,
|
||||||
|
&mut consumed,
|
||||||
|
&mut received,
|
||||||
|
&r,
|
||||||
|
&s,
|
||||||
|
&retransmit,
|
||||||
|
);
|
||||||
|
let _ = repair_window(
|
||||||
|
&window,
|
||||||
|
&crdt,
|
||||||
|
&mut last,
|
||||||
|
&mut times,
|
||||||
|
&mut consumed,
|
||||||
|
&mut received,
|
||||||
|
);
|
||||||
}
|
}
|
||||||
let _ = recv_window(
|
})
|
||||||
&window,
|
.unwrap()
|
||||||
&crdt,
|
|
||||||
&recycler,
|
|
||||||
&mut consumed,
|
|
||||||
&mut received,
|
|
||||||
&r,
|
|
||||||
&s,
|
|
||||||
&retransmit,
|
|
||||||
);
|
|
||||||
let _ = repair_window(
|
|
||||||
&window,
|
|
||||||
&crdt,
|
|
||||||
&mut last,
|
|
||||||
&mut times,
|
|
||||||
&mut consumed,
|
|
||||||
&mut received,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn broadcast(
|
fn broadcast(
|
||||||
@@ -412,15 +427,18 @@ pub fn broadcaster(
|
|||||||
recycler: BlobRecycler,
|
recycler: BlobRecycler,
|
||||||
r: BlobReceiver,
|
r: BlobReceiver,
|
||||||
) -> JoinHandle<()> {
|
) -> JoinHandle<()> {
|
||||||
spawn(move || {
|
Builder::new()
|
||||||
let mut transmit_index = 0;
|
.name("solana-broadcaster".to_string())
|
||||||
loop {
|
.spawn(move || {
|
||||||
if exit.load(Ordering::Relaxed) {
|
let mut transmit_index = 0;
|
||||||
break;
|
loop {
|
||||||
|
if exit.load(Ordering::Relaxed) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
let _ = broadcast(&crdt, &window, &recycler, &r, &sock, &mut transmit_index);
|
||||||
}
|
}
|
||||||
let _ = broadcast(&crdt, &window, &recycler, &r, &sock, &mut transmit_index);
|
})
|
||||||
}
|
.unwrap()
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn retransmit(
|
fn retransmit(
|
||||||
@@ -460,24 +478,27 @@ pub fn retransmitter(
|
|||||||
recycler: BlobRecycler,
|
recycler: BlobRecycler,
|
||||||
r: BlobReceiver,
|
r: BlobReceiver,
|
||||||
) -> JoinHandle<()> {
|
) -> JoinHandle<()> {
|
||||||
spawn(move || {
|
Builder::new()
|
||||||
trace!("retransmitter started");
|
.name("solana-retransmitter".to_string())
|
||||||
loop {
|
.spawn(move || {
|
||||||
if exit.load(Ordering::Relaxed) {
|
trace!("retransmitter started");
|
||||||
break;
|
loop {
|
||||||
|
if exit.load(Ordering::Relaxed) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
// TODO: handle this error
|
||||||
|
let _ = retransmit(&crdt, &recycler, &r, &sock);
|
||||||
}
|
}
|
||||||
// TODO: handle this error
|
trace!("exiting retransmitter");
|
||||||
let _ = retransmit(&crdt, &recycler, &r, &sock);
|
})
|
||||||
}
|
.unwrap()
|
||||||
trace!("exiting retransmitter");
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(all(feature = "unstable", test))]
|
#[cfg(all(feature = "unstable", test))]
|
||||||
mod bench {
|
mod bench {
|
||||||
extern crate test;
|
extern crate test;
|
||||||
use self::test::Bencher;
|
use self::test::Bencher;
|
||||||
use packet::{Packet, PacketRecycler, PACKET_DATA_SIZE};
|
use packet::{Packet, PacketRecycler, BLOB_SIZE, PACKET_DATA_SIZE};
|
||||||
use result::Result;
|
use result::Result;
|
||||||
use std::net::{SocketAddr, UdpSocket};
|
use std::net::{SocketAddr, UdpSocket};
|
||||||
use std::sync::atomic::{AtomicBool, Ordering};
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
@@ -509,6 +530,7 @@ mod bench {
|
|||||||
let mut num = 0;
|
let mut num = 0;
|
||||||
for p in msgs_.read().unwrap().packets.iter() {
|
for p in msgs_.read().unwrap().packets.iter() {
|
||||||
let a = p.meta.addr();
|
let a = p.meta.addr();
|
||||||
|
assert!(p.meta.size < BLOB_SIZE);
|
||||||
send.send_to(&p.data[..p.meta.size], &a).unwrap();
|
send.send_to(&p.data[..p.meta.size], &a).unwrap();
|
||||||
num += 1;
|
num += 1;
|
||||||
}
|
}
|
||||||
@@ -537,7 +559,8 @@ mod bench {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
fn run_streamer_bench() -> Result<()> {
|
|
||||||
|
fn bench_streamer_with_result() -> Result<()> {
|
||||||
let read = UdpSocket::bind("127.0.0.1:0")?;
|
let read = UdpSocket::bind("127.0.0.1:0")?;
|
||||||
read.set_read_timeout(Some(Duration::new(1, 0)))?;
|
read.set_read_timeout(Some(Duration::new(1, 0)))?;
|
||||||
|
|
||||||
@@ -572,15 +595,14 @@ mod bench {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
#[bench]
|
#[bench]
|
||||||
pub fn streamer_bench(_bench: &mut Bencher) {
|
pub fn bench_streamer(_bench: &mut Bencher) {
|
||||||
run_streamer_bench().unwrap();
|
bench_streamer_with_result().unwrap();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
use crdt::{Crdt, ReplicatedData};
|
use crdt::{Crdt, ReplicatedData};
|
||||||
use logger;
|
|
||||||
use packet::{Blob, BlobRecycler, Packet, PacketRecycler, Packets, PACKET_DATA_SIZE};
|
use packet::{Blob, BlobRecycler, Packet, PacketRecycler, Packets, PACKET_DATA_SIZE};
|
||||||
use signature::KeyPair;
|
use signature::KeyPair;
|
||||||
use signature::KeyPairUtil;
|
use signature::KeyPairUtil;
|
||||||
@@ -591,10 +613,9 @@ mod test {
|
|||||||
use std::sync::atomic::{AtomicBool, Ordering};
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
use std::sync::mpsc::channel;
|
use std::sync::mpsc::channel;
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
use std::thread::sleep;
|
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
use streamer::{blob_receiver, receiver, responder, window};
|
||||||
use streamer::{default_window, BlobReceiver, PacketReceiver};
|
use streamer::{default_window, BlobReceiver, PacketReceiver};
|
||||||
use streamer::{blob_receiver, receiver, responder, retransmitter, window};
|
|
||||||
|
|
||||||
fn get_msgs(r: PacketReceiver, num: &mut usize) {
|
fn get_msgs(r: PacketReceiver, num: &mut usize) {
|
||||||
for _t in 0..5 {
|
for _t in 0..5 {
|
||||||
@@ -672,14 +693,14 @@ mod test {
|
|||||||
let addr = read.local_addr().unwrap();
|
let addr = read.local_addr().unwrap();
|
||||||
let send = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
let send = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||||
let serve = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
let serve = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||||
let event = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
let transaction = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
let rep_data = ReplicatedData::new(
|
let rep_data = ReplicatedData::new(
|
||||||
pubkey_me,
|
pubkey_me,
|
||||||
read.local_addr().unwrap(),
|
read.local_addr().unwrap(),
|
||||||
send.local_addr().unwrap(),
|
send.local_addr().unwrap(),
|
||||||
serve.local_addr().unwrap(),
|
serve.local_addr().unwrap(),
|
||||||
event.local_addr().unwrap(),
|
transaction.local_addr().unwrap(),
|
||||||
);
|
);
|
||||||
let mut crdt_me = Crdt::new(rep_data);
|
let mut crdt_me = Crdt::new(rep_data);
|
||||||
let me_id = crdt_me.my_data().id;
|
let me_id = crdt_me.my_data().id;
|
||||||
@@ -731,111 +752,4 @@ mod test {
|
|||||||
t_responder.join().expect("join");
|
t_responder.join().expect("join");
|
||||||
t_window.join().expect("join");
|
t_window.join().expect("join");
|
||||||
}
|
}
|
||||||
|
|
||||||
fn test_node() -> (Arc<RwLock<Crdt>>, UdpSocket, UdpSocket, UdpSocket) {
|
|
||||||
let gossip = UdpSocket::bind("127.0.0.1:0").unwrap();
|
|
||||||
let replicate = UdpSocket::bind("127.0.0.1:0").unwrap();
|
|
||||||
let serve = UdpSocket::bind("127.0.0.1:0").unwrap();
|
|
||||||
let event = UdpSocket::bind("127.0.0.1:0").unwrap();
|
|
||||||
let pubkey = KeyPair::new().pubkey();
|
|
||||||
let d = ReplicatedData::new(
|
|
||||||
pubkey,
|
|
||||||
gossip.local_addr().unwrap(),
|
|
||||||
replicate.local_addr().unwrap(),
|
|
||||||
serve.local_addr().unwrap(),
|
|
||||||
event.local_addr().unwrap(),
|
|
||||||
);
|
|
||||||
trace!("data: {:?}", d);
|
|
||||||
let crdt = Crdt::new(d);
|
|
||||||
(Arc::new(RwLock::new(crdt)), gossip, replicate, serve)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
#[ignore]
|
|
||||||
//retransmit from leader to replicate target
|
|
||||||
pub fn retransmit() {
|
|
||||||
logger::setup();
|
|
||||||
trace!("retransmit test start");
|
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
|
||||||
let (crdt_leader, sock_gossip_leader, _, sock_leader) = test_node();
|
|
||||||
let (crdt_target, sock_gossip_target, sock_replicate_target, _) = test_node();
|
|
||||||
let leader_data = crdt_leader.read().unwrap().my_data().clone();
|
|
||||||
crdt_leader.write().unwrap().insert(&leader_data);
|
|
||||||
crdt_leader.write().unwrap().set_leader(leader_data.id);
|
|
||||||
let t_crdt_leader_g = Crdt::gossip(crdt_leader.clone(), exit.clone());
|
|
||||||
let window_leader = Arc::new(RwLock::new(vec![]));
|
|
||||||
let t_crdt_leader_l = Crdt::listen(
|
|
||||||
crdt_leader.clone(),
|
|
||||||
window_leader,
|
|
||||||
sock_gossip_leader,
|
|
||||||
exit.clone(),
|
|
||||||
);
|
|
||||||
|
|
||||||
crdt_target.write().unwrap().insert(&leader_data);
|
|
||||||
crdt_target.write().unwrap().set_leader(leader_data.id);
|
|
||||||
let t_crdt_target_g = Crdt::gossip(crdt_target.clone(), exit.clone());
|
|
||||||
let window_target = Arc::new(RwLock::new(vec![]));
|
|
||||||
let t_crdt_target_l = Crdt::listen(
|
|
||||||
crdt_target.clone(),
|
|
||||||
window_target,
|
|
||||||
sock_gossip_target,
|
|
||||||
exit.clone(),
|
|
||||||
);
|
|
||||||
//leader retransmitter
|
|
||||||
let (s_retransmit, r_retransmit) = channel();
|
|
||||||
let blob_recycler = BlobRecycler::default();
|
|
||||||
let saddr = sock_leader.local_addr().unwrap();
|
|
||||||
let t_retransmit = retransmitter(
|
|
||||||
sock_leader,
|
|
||||||
exit.clone(),
|
|
||||||
crdt_leader.clone(),
|
|
||||||
blob_recycler.clone(),
|
|
||||||
r_retransmit,
|
|
||||||
);
|
|
||||||
|
|
||||||
//target receiver
|
|
||||||
let (s_blob_receiver, r_blob_receiver) = channel();
|
|
||||||
let t_receiver = blob_receiver(
|
|
||||||
exit.clone(),
|
|
||||||
blob_recycler.clone(),
|
|
||||||
sock_replicate_target,
|
|
||||||
s_blob_receiver,
|
|
||||||
).unwrap();
|
|
||||||
for _ in 0..10 {
|
|
||||||
let done = crdt_target.read().unwrap().update_index == 2
|
|
||||||
&& crdt_leader.read().unwrap().update_index == 2;
|
|
||||||
if done {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
let timer = Duration::new(1, 0);
|
|
||||||
sleep(timer);
|
|
||||||
}
|
|
||||||
|
|
||||||
//send the data through
|
|
||||||
let mut bq = VecDeque::new();
|
|
||||||
let b = blob_recycler.allocate();
|
|
||||||
b.write().unwrap().meta.size = 10;
|
|
||||||
bq.push_back(b);
|
|
||||||
s_retransmit.send(bq).unwrap();
|
|
||||||
let timer = Duration::new(5, 0);
|
|
||||||
trace!("Waiting for timeout");
|
|
||||||
let mut oq = r_blob_receiver.recv_timeout(timer).unwrap();
|
|
||||||
assert_eq!(oq.len(), 1);
|
|
||||||
let o = oq.pop_front().unwrap();
|
|
||||||
let ro = o.read().unwrap();
|
|
||||||
assert_eq!(ro.meta.size, 10);
|
|
||||||
assert_eq!(ro.meta.addr(), saddr);
|
|
||||||
exit.store(true, Ordering::Relaxed);
|
|
||||||
let threads = vec![
|
|
||||||
t_receiver,
|
|
||||||
t_retransmit,
|
|
||||||
t_crdt_target_g,
|
|
||||||
t_crdt_target_l,
|
|
||||||
t_crdt_leader_g,
|
|
||||||
t_crdt_leader_l,
|
|
||||||
];
|
|
||||||
for t in threads {
|
|
||||||
t.join().unwrap();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
@@ -4,8 +4,6 @@
|
|||||||
//! unstable and may change in future releases.
|
//! unstable and may change in future releases.
|
||||||
|
|
||||||
use bincode::{deserialize, serialize};
|
use bincode::{deserialize, serialize};
|
||||||
use event::Event;
|
|
||||||
use futures::future::{ok, FutureResult};
|
|
||||||
use hash::Hash;
|
use hash::Hash;
|
||||||
use request::{Request, Response};
|
use request::{Request, Response};
|
||||||
use signature::{KeyPair, PublicKey, Signature};
|
use signature::{KeyPair, PublicKey, Signature};
|
||||||
@@ -17,8 +15,8 @@ use transaction::Transaction;
|
|||||||
pub struct ThinClient {
|
pub struct ThinClient {
|
||||||
requests_addr: SocketAddr,
|
requests_addr: SocketAddr,
|
||||||
requests_socket: UdpSocket,
|
requests_socket: UdpSocket,
|
||||||
events_addr: SocketAddr,
|
transactions_addr: SocketAddr,
|
||||||
events_socket: UdpSocket,
|
transactions_socket: UdpSocket,
|
||||||
last_id: Option<Hash>,
|
last_id: Option<Hash>,
|
||||||
transaction_count: u64,
|
transaction_count: u64,
|
||||||
balances: HashMap<PublicKey, Option<i64>>,
|
balances: HashMap<PublicKey, Option<i64>>,
|
||||||
@@ -26,19 +24,19 @@ pub struct ThinClient {
|
|||||||
|
|
||||||
impl ThinClient {
|
impl ThinClient {
|
||||||
/// Create a new ThinClient that will interface with Rpu
|
/// Create a new ThinClient that will interface with Rpu
|
||||||
/// over `requests_socket` and `events_socket`. To receive responses, the caller must bind `socket`
|
/// over `requests_socket` and `transactions_socket`. To receive responses, the caller must bind `socket`
|
||||||
/// to a public address before invoking ThinClient methods.
|
/// to a public address before invoking ThinClient methods.
|
||||||
pub fn new(
|
pub fn new(
|
||||||
requests_addr: SocketAddr,
|
requests_addr: SocketAddr,
|
||||||
requests_socket: UdpSocket,
|
requests_socket: UdpSocket,
|
||||||
events_addr: SocketAddr,
|
transactions_addr: SocketAddr,
|
||||||
events_socket: UdpSocket,
|
transactions_socket: UdpSocket,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let client = ThinClient {
|
let client = ThinClient {
|
||||||
requests_addr,
|
requests_addr,
|
||||||
requests_socket,
|
requests_socket,
|
||||||
events_addr,
|
transactions_addr,
|
||||||
events_socket,
|
transactions_socket,
|
||||||
last_id: None,
|
last_id: None,
|
||||||
transaction_count: 0,
|
transaction_count: 0,
|
||||||
balances: HashMap::new(),
|
balances: HashMap::new(),
|
||||||
@@ -74,10 +72,10 @@ impl ThinClient {
|
|||||||
|
|
||||||
/// Send a signed Transaction to the server for processing. This method
|
/// Send a signed Transaction to the server for processing. This method
|
||||||
/// does not wait for a response.
|
/// does not wait for a response.
|
||||||
pub fn transfer_signed(&self, tr: Transaction) -> io::Result<usize> {
|
pub fn transfer_signed(&self, tx: Transaction) -> io::Result<usize> {
|
||||||
let event = Event::Transaction(tr);
|
let data = serialize(&tx).expect("serialize Transaction in pub fn transfer_signed");
|
||||||
let data = serialize(&event).expect("serialize Transaction in pub fn transfer_signed");
|
self.transactions_socket
|
||||||
self.events_socket.send_to(&data, &self.events_addr)
|
.send_to(&data, &self.transactions_addr)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates, signs, and processes a Transaction. Useful for writing unit-tests.
|
/// Creates, signs, and processes a Transaction. Useful for writing unit-tests.
|
||||||
@@ -88,9 +86,9 @@ impl ThinClient {
|
|||||||
to: PublicKey,
|
to: PublicKey,
|
||||||
last_id: &Hash,
|
last_id: &Hash,
|
||||||
) -> io::Result<Signature> {
|
) -> io::Result<Signature> {
|
||||||
let tr = Transaction::new(keypair, to, n, *last_id);
|
let tx = Transaction::new(keypair, to, n, *last_id);
|
||||||
let sig = tr.sig;
|
let sig = tx.sig;
|
||||||
self.transfer_signed(tr).map(|_| sig)
|
self.transfer_signed(tx).map(|_| sig)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Request the balance of the user holding `pubkey`. This method blocks
|
/// Request the balance of the user holding `pubkey`. This method blocks
|
||||||
@@ -139,7 +137,7 @@ impl ThinClient {
|
|||||||
|
|
||||||
/// Request the last Entry ID from the server. This method blocks
|
/// Request the last Entry ID from the server. This method blocks
|
||||||
/// until the server sends a response.
|
/// until the server sends a response.
|
||||||
pub fn get_last_id(&mut self) -> FutureResult<Hash, ()> {
|
pub fn get_last_id(&mut self) -> Hash {
|
||||||
info!("get_last_id");
|
info!("get_last_id");
|
||||||
let req = Request::GetLastId;
|
let req = Request::GetLastId;
|
||||||
let data = serialize(&req).expect("serialize GetLastId in pub fn get_last_id");
|
let data = serialize(&req).expect("serialize GetLastId in pub fn get_last_id");
|
||||||
@@ -154,46 +152,41 @@ impl ThinClient {
|
|||||||
}
|
}
|
||||||
self.process_response(resp);
|
self.process_response(resp);
|
||||||
}
|
}
|
||||||
ok(self.last_id.expect("some last_id"))
|
self.last_id.expect("some last_id")
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
pub fn poll_get_balance(&mut self, pubkey: &PublicKey) -> io::Result<i64> {
|
||||||
pub fn poll_get_balance(client: &mut ThinClient, pubkey: &PublicKey) -> io::Result<i64> {
|
use std::time::Instant;
|
||||||
use std::time::Instant;
|
|
||||||
|
|
||||||
let mut balance;
|
let mut balance;
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
loop {
|
loop {
|
||||||
balance = client.get_balance(pubkey);
|
balance = self.get_balance(pubkey);
|
||||||
if balance.is_ok() || now.elapsed().as_secs() > 1 {
|
if balance.is_ok() || now.elapsed().as_secs() > 1 {
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
balance
|
balance
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
use bank::Bank;
|
use bank::Bank;
|
||||||
use crdt::{Crdt, ReplicatedData};
|
use budget::Budget;
|
||||||
use futures::Future;
|
use crdt::TestNode;
|
||||||
use logger;
|
use logger;
|
||||||
use mint::Mint;
|
use mint::Mint;
|
||||||
use plan::Plan;
|
|
||||||
use server::Server;
|
use server::Server;
|
||||||
use signature::{KeyPair, KeyPairUtil};
|
use signature::{KeyPair, KeyPairUtil};
|
||||||
use std::io::sink;
|
use std::io::sink;
|
||||||
use std::sync::atomic::{AtomicBool, Ordering};
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::Arc;
|
||||||
use std::thread::JoinHandle;
|
|
||||||
use std::thread::sleep;
|
use std::thread::sleep;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use streamer::default_window;
|
use transaction::{Instruction, Plan};
|
||||||
use transaction::Instruction;
|
|
||||||
use tvu::tests::TestNode;
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_thin_client() {
|
fn test_thin_client() {
|
||||||
@@ -207,11 +200,10 @@ mod tests {
|
|||||||
|
|
||||||
let server = Server::new_leader(
|
let server = Server::new_leader(
|
||||||
bank,
|
bank,
|
||||||
alice.last_id(),
|
|
||||||
Some(Duration::from_millis(30)),
|
Some(Duration::from_millis(30)),
|
||||||
leader.data.clone(),
|
leader.data.clone(),
|
||||||
leader.sockets.requests,
|
leader.sockets.requests,
|
||||||
leader.sockets.event,
|
leader.sockets.transaction,
|
||||||
leader.sockets.broadcast,
|
leader.sockets.broadcast,
|
||||||
leader.sockets.respond,
|
leader.sockets.respond,
|
||||||
leader.sockets.gossip,
|
leader.sockets.gossip,
|
||||||
@@ -221,19 +213,19 @@ mod tests {
|
|||||||
sleep(Duration::from_millis(900));
|
sleep(Duration::from_millis(900));
|
||||||
|
|
||||||
let requests_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
let requests_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||||
let events_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||||
|
|
||||||
let mut client = ThinClient::new(
|
let mut client = ThinClient::new(
|
||||||
leader.data.requests_addr,
|
leader.data.requests_addr,
|
||||||
requests_socket,
|
requests_socket,
|
||||||
leader.data.events_addr,
|
leader.data.transactions_addr,
|
||||||
events_socket,
|
transactions_socket,
|
||||||
);
|
);
|
||||||
let last_id = client.get_last_id().wait().unwrap();
|
let last_id = client.get_last_id();
|
||||||
let _sig = client
|
let _sig = client
|
||||||
.transfer(500, &alice.keypair(), bob_pubkey, &last_id)
|
.transfer(500, &alice.keypair(), bob_pubkey, &last_id)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let balance = poll_get_balance(&mut client, &bob_pubkey);
|
let balance = client.poll_get_balance(&bob_pubkey);
|
||||||
assert_eq!(balance.unwrap(), 500);
|
assert_eq!(balance.unwrap(), 500);
|
||||||
exit.store(true, Ordering::Relaxed);
|
exit.store(true, Ordering::Relaxed);
|
||||||
for t in server.thread_hdls {
|
for t in server.thread_hdls {
|
||||||
@@ -252,11 +244,10 @@ mod tests {
|
|||||||
|
|
||||||
let server = Server::new_leader(
|
let server = Server::new_leader(
|
||||||
bank,
|
bank,
|
||||||
alice.last_id(),
|
|
||||||
Some(Duration::from_millis(30)),
|
Some(Duration::from_millis(30)),
|
||||||
leader.data.clone(),
|
leader.data.clone(),
|
||||||
leader.sockets.requests,
|
leader.sockets.requests,
|
||||||
leader.sockets.event,
|
leader.sockets.transaction,
|
||||||
leader.sockets.broadcast,
|
leader.sockets.broadcast,
|
||||||
leader.sockets.respond,
|
leader.sockets.respond,
|
||||||
leader.sockets.gossip,
|
leader.sockets.gossip,
|
||||||
@@ -269,186 +260,33 @@ mod tests {
|
|||||||
requests_socket
|
requests_socket
|
||||||
.set_read_timeout(Some(Duration::new(5, 0)))
|
.set_read_timeout(Some(Duration::new(5, 0)))
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let events_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||||
let mut client = ThinClient::new(
|
let mut client = ThinClient::new(
|
||||||
leader.data.requests_addr,
|
leader.data.requests_addr,
|
||||||
requests_socket,
|
requests_socket,
|
||||||
leader.data.events_addr,
|
leader.data.transactions_addr,
|
||||||
events_socket,
|
transactions_socket,
|
||||||
);
|
);
|
||||||
let last_id = client.get_last_id().wait().unwrap();
|
let last_id = client.get_last_id();
|
||||||
|
|
||||||
let tr = Transaction::new(&alice.keypair(), bob_pubkey, 500, last_id);
|
let tx = Transaction::new(&alice.keypair(), bob_pubkey, 500, last_id);
|
||||||
|
|
||||||
let _sig = client.transfer_signed(tr).unwrap();
|
let _sig = client.transfer_signed(tx).unwrap();
|
||||||
|
|
||||||
let last_id = client.get_last_id().wait().unwrap();
|
let last_id = client.get_last_id();
|
||||||
|
|
||||||
let mut tr2 = Transaction::new(&alice.keypair(), bob_pubkey, 501, last_id);
|
let mut tr2 = Transaction::new(&alice.keypair(), bob_pubkey, 501, last_id);
|
||||||
if let Instruction::NewContract(contract) = &mut tr2.instruction {
|
if let Instruction::NewContract(contract) = &mut tr2.instruction {
|
||||||
contract.tokens = 502;
|
contract.tokens = 502;
|
||||||
contract.plan = Plan::new_payment(502, bob_pubkey);
|
contract.plan = Plan::Budget(Budget::new_payment(502, bob_pubkey));
|
||||||
}
|
}
|
||||||
let _sig = client.transfer_signed(tr2).unwrap();
|
let _sig = client.transfer_signed(tr2).unwrap();
|
||||||
|
|
||||||
let balance = poll_get_balance(&mut client, &bob_pubkey);
|
let balance = client.poll_get_balance(&bob_pubkey);
|
||||||
assert_eq!(balance.unwrap(), 500);
|
assert_eq!(balance.unwrap(), 500);
|
||||||
exit.store(true, Ordering::Relaxed);
|
exit.store(true, Ordering::Relaxed);
|
||||||
for t in server.thread_hdls {
|
for t in server.thread_hdls {
|
||||||
t.join().unwrap();
|
t.join().unwrap();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
fn validator(
|
|
||||||
leader: &ReplicatedData,
|
|
||||||
exit: Arc<AtomicBool>,
|
|
||||||
alice: &Mint,
|
|
||||||
threads: &mut Vec<JoinHandle<()>>,
|
|
||||||
) {
|
|
||||||
let validator = TestNode::new();
|
|
||||||
let replicant_bank = Bank::new(&alice);
|
|
||||||
let mut ts = Server::new_validator(
|
|
||||||
replicant_bank,
|
|
||||||
validator.data.clone(),
|
|
||||||
validator.sockets.requests,
|
|
||||||
validator.sockets.respond,
|
|
||||||
validator.sockets.replicate,
|
|
||||||
validator.sockets.gossip,
|
|
||||||
leader.clone(),
|
|
||||||
exit.clone(),
|
|
||||||
);
|
|
||||||
threads.append(&mut ts.thread_hdls);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn converge(
|
|
||||||
leader: &ReplicatedData,
|
|
||||||
exit: Arc<AtomicBool>,
|
|
||||||
num_nodes: usize,
|
|
||||||
threads: &mut Vec<JoinHandle<()>>,
|
|
||||||
) -> Vec<ReplicatedData> {
|
|
||||||
//lets spy on the network
|
|
||||||
let mut spy = TestNode::new();
|
|
||||||
let daddr = "0.0.0.0:0".parse().unwrap();
|
|
||||||
let me = spy.data.id.clone();
|
|
||||||
spy.data.replicate_addr = daddr;
|
|
||||||
spy.data.requests_addr = daddr;
|
|
||||||
let mut spy_crdt = Crdt::new(spy.data);
|
|
||||||
spy_crdt.insert(&leader);
|
|
||||||
spy_crdt.set_leader(leader.id);
|
|
||||||
|
|
||||||
let spy_ref = Arc::new(RwLock::new(spy_crdt));
|
|
||||||
let spy_window = default_window();
|
|
||||||
let t_spy_listen = Crdt::listen(
|
|
||||||
spy_ref.clone(),
|
|
||||||
spy_window,
|
|
||||||
spy.sockets.gossip,
|
|
||||||
exit.clone(),
|
|
||||||
);
|
|
||||||
let t_spy_gossip = Crdt::gossip(spy_ref.clone(), exit.clone());
|
|
||||||
//wait for the network to converge
|
|
||||||
let mut converged = false;
|
|
||||||
for _ in 0..30 {
|
|
||||||
let num = spy_ref.read().unwrap().convergence();
|
|
||||||
if num == num_nodes as u64 {
|
|
||||||
converged = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
sleep(Duration::new(1, 0));
|
|
||||||
}
|
|
||||||
assert!(converged);
|
|
||||||
threads.push(t_spy_listen);
|
|
||||||
threads.push(t_spy_gossip);
|
|
||||||
let v: Vec<ReplicatedData> = spy_ref
|
|
||||||
.read()
|
|
||||||
.unwrap()
|
|
||||||
.table
|
|
||||||
.values()
|
|
||||||
.into_iter()
|
|
||||||
.filter(|x| x.id != me)
|
|
||||||
.map(|x| x.clone())
|
|
||||||
.collect();
|
|
||||||
v.clone()
|
|
||||||
}
|
|
||||||
#[test]
|
|
||||||
fn test_multi_node() {
|
|
||||||
logger::setup();
|
|
||||||
const N: usize = 5;
|
|
||||||
trace!("test_multi_accountant_stub");
|
|
||||||
let leader = TestNode::new();
|
|
||||||
let alice = Mint::new(10_000);
|
|
||||||
let bob_pubkey = KeyPair::new().pubkey();
|
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
|
||||||
|
|
||||||
let leader_bank = Bank::new(&alice);
|
|
||||||
let server = Server::new_leader(
|
|
||||||
leader_bank,
|
|
||||||
alice.last_id(),
|
|
||||||
None,
|
|
||||||
leader.data.clone(),
|
|
||||||
leader.sockets.requests,
|
|
||||||
leader.sockets.event,
|
|
||||||
leader.sockets.broadcast,
|
|
||||||
leader.sockets.respond,
|
|
||||||
leader.sockets.gossip,
|
|
||||||
exit.clone(),
|
|
||||||
sink(),
|
|
||||||
);
|
|
||||||
|
|
||||||
let mut threads = server.thread_hdls;
|
|
||||||
for _ in 0..N {
|
|
||||||
validator(&leader.data, exit.clone(), &alice, &mut threads);
|
|
||||||
}
|
|
||||||
let servers = converge(&leader.data, exit.clone(), N + 2, &mut threads);
|
|
||||||
//contains the leader addr as well
|
|
||||||
assert_eq!(servers.len(), N + 1);
|
|
||||||
//verify leader can do transfer
|
|
||||||
let leader_balance = tx_and_retry_get_balance(&leader.data, &alice, &bob_pubkey).unwrap();
|
|
||||||
assert_eq!(leader_balance, 500);
|
|
||||||
//verify validator has the same balance
|
|
||||||
let mut success = 0usize;
|
|
||||||
for server in servers.iter() {
|
|
||||||
let mut client = mk_client(server);
|
|
||||||
if let Ok(bal) = poll_get_balance(&mut client, &bob_pubkey) {
|
|
||||||
trace!("validator balance {}", bal);
|
|
||||||
if bal == leader_balance {
|
|
||||||
success += 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
assert_eq!(success, servers.len());
|
|
||||||
exit.store(true, Ordering::Relaxed);
|
|
||||||
for t in threads {
|
|
||||||
t.join().unwrap();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn mk_client(leader: &ReplicatedData) -> ThinClient {
|
|
||||||
let requests_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
|
||||||
requests_socket
|
|
||||||
.set_read_timeout(Some(Duration::new(1, 0)))
|
|
||||||
.unwrap();
|
|
||||||
let events_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
|
||||||
|
|
||||||
ThinClient::new(
|
|
||||||
leader.requests_addr,
|
|
||||||
requests_socket,
|
|
||||||
leader.events_addr,
|
|
||||||
events_socket,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn tx_and_retry_get_balance(
|
|
||||||
leader: &ReplicatedData,
|
|
||||||
alice: &Mint,
|
|
||||||
bob_pubkey: &PublicKey,
|
|
||||||
) -> io::Result<i64> {
|
|
||||||
let mut client = mk_client(leader);
|
|
||||||
trace!("getting leader last_id");
|
|
||||||
let last_id = client.get_last_id().wait().unwrap();
|
|
||||||
info!("executing leader transer");
|
|
||||||
let _sig = client
|
|
||||||
.transfer(500, &alice.keypair(), *bob_pubkey, &last_id)
|
|
||||||
.unwrap();
|
|
||||||
poll_get_balance(&mut client, bob_pubkey)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
74
src/tpu.rs
74
src/tpu.rs
@@ -3,58 +3,55 @@
|
|||||||
|
|
||||||
use bank::Bank;
|
use bank::Bank;
|
||||||
use banking_stage::BankingStage;
|
use banking_stage::BankingStage;
|
||||||
use crdt::{Crdt, ReplicatedData};
|
use fetch_stage::FetchStage;
|
||||||
use hash::Hash;
|
use packet::{BlobRecycler, PacketRecycler};
|
||||||
use packet;
|
|
||||||
use record_stage::RecordStage;
|
use record_stage::RecordStage;
|
||||||
use sig_verify_stage::SigVerifyStage;
|
use sigverify_stage::SigVerifyStage;
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
use std::net::UdpSocket;
|
use std::net::UdpSocket;
|
||||||
use std::sync::atomic::AtomicBool;
|
use std::sync::atomic::AtomicBool;
|
||||||
use std::sync::mpsc::channel;
|
use std::sync::{Arc, Mutex};
|
||||||
use std::sync::{Arc, Mutex, RwLock};
|
|
||||||
use std::thread::JoinHandle;
|
use std::thread::JoinHandle;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use streamer;
|
use streamer::BlobReceiver;
|
||||||
use write_stage::WriteStage;
|
use write_stage::WriteStage;
|
||||||
|
|
||||||
pub struct Tpu {
|
pub struct Tpu {
|
||||||
|
pub blob_receiver: BlobReceiver,
|
||||||
pub thread_hdls: Vec<JoinHandle<()>>,
|
pub thread_hdls: Vec<JoinHandle<()>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Tpu {
|
impl Tpu {
|
||||||
pub fn new<W: Write + Send + 'static>(
|
pub fn new<W: Write + Send + 'static>(
|
||||||
bank: Arc<Bank>,
|
bank: Arc<Bank>,
|
||||||
start_hash: Hash,
|
|
||||||
tick_duration: Option<Duration>,
|
tick_duration: Option<Duration>,
|
||||||
me: ReplicatedData,
|
transactions_socket: UdpSocket,
|
||||||
events_socket: UdpSocket,
|
blob_recycler: BlobRecycler,
|
||||||
broadcast_socket: UdpSocket,
|
|
||||||
gossip: UdpSocket,
|
|
||||||
exit: Arc<AtomicBool>,
|
exit: Arc<AtomicBool>,
|
||||||
writer: W,
|
writer: W,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let packet_recycler = packet::PacketRecycler::default();
|
let packet_recycler = PacketRecycler::default();
|
||||||
let (packet_sender, packet_receiver) = channel();
|
|
||||||
let t_receiver = streamer::receiver(
|
|
||||||
events_socket,
|
|
||||||
exit.clone(),
|
|
||||||
packet_recycler.clone(),
|
|
||||||
packet_sender,
|
|
||||||
);
|
|
||||||
|
|
||||||
let sig_verify_stage = SigVerifyStage::new(exit.clone(), packet_receiver);
|
let fetch_stage =
|
||||||
|
FetchStage::new(transactions_socket, exit.clone(), packet_recycler.clone());
|
||||||
|
|
||||||
|
let sigverify_stage = SigVerifyStage::new(exit.clone(), fetch_stage.packet_receiver);
|
||||||
|
|
||||||
let blob_recycler = packet::BlobRecycler::default();
|
|
||||||
let banking_stage = BankingStage::new(
|
let banking_stage = BankingStage::new(
|
||||||
bank.clone(),
|
bank.clone(),
|
||||||
exit.clone(),
|
exit.clone(),
|
||||||
sig_verify_stage.verified_receiver,
|
sigverify_stage.verified_receiver,
|
||||||
packet_recycler.clone(),
|
packet_recycler.clone(),
|
||||||
);
|
);
|
||||||
|
|
||||||
let record_stage =
|
let record_stage = match tick_duration {
|
||||||
RecordStage::new(banking_stage.signal_receiver, &start_hash, tick_duration);
|
Some(tick_duration) => RecordStage::new_with_clock(
|
||||||
|
banking_stage.signal_receiver,
|
||||||
|
&bank.last_id(),
|
||||||
|
tick_duration,
|
||||||
|
),
|
||||||
|
None => RecordStage::new(banking_stage.signal_receiver, &bank.last_id()),
|
||||||
|
};
|
||||||
|
|
||||||
let write_stage = WriteStage::new(
|
let write_stage = WriteStage::new(
|
||||||
bank.clone(),
|
bank.clone(),
|
||||||
@@ -63,31 +60,16 @@ impl Tpu {
|
|||||||
Mutex::new(writer),
|
Mutex::new(writer),
|
||||||
record_stage.entry_receiver,
|
record_stage.entry_receiver,
|
||||||
);
|
);
|
||||||
|
|
||||||
let crdt = Arc::new(RwLock::new(Crdt::new(me)));
|
|
||||||
let t_gossip = Crdt::gossip(crdt.clone(), exit.clone());
|
|
||||||
let window = streamer::default_window();
|
|
||||||
let t_listen = Crdt::listen(crdt.clone(), window.clone(), gossip, exit.clone());
|
|
||||||
|
|
||||||
let t_broadcast = streamer::broadcaster(
|
|
||||||
broadcast_socket,
|
|
||||||
exit.clone(),
|
|
||||||
crdt.clone(),
|
|
||||||
window,
|
|
||||||
blob_recycler.clone(),
|
|
||||||
write_stage.blob_receiver,
|
|
||||||
);
|
|
||||||
|
|
||||||
let mut thread_hdls = vec![
|
let mut thread_hdls = vec![
|
||||||
t_receiver,
|
fetch_stage.thread_hdl,
|
||||||
banking_stage.thread_hdl,
|
banking_stage.thread_hdl,
|
||||||
record_stage.thread_hdl,
|
record_stage.thread_hdl,
|
||||||
write_stage.thread_hdl,
|
write_stage.thread_hdl,
|
||||||
t_gossip,
|
|
||||||
t_listen,
|
|
||||||
t_broadcast,
|
|
||||||
];
|
];
|
||||||
thread_hdls.extend(sig_verify_stage.thread_hdls.into_iter());
|
thread_hdls.extend(sigverify_stage.thread_hdls.into_iter());
|
||||||
Tpu { thread_hdls }
|
Tpu {
|
||||||
|
blob_receiver: write_stage.blob_receiver,
|
||||||
|
thread_hdls,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -1,16 +1,42 @@
|
|||||||
//! The `transaction` module provides functionality for creating log transactions.
|
//! The `transaction` module provides functionality for creating log transactions.
|
||||||
|
|
||||||
use bincode::serialize;
|
use bincode::serialize;
|
||||||
|
use budget::{Budget, Condition};
|
||||||
use chrono::prelude::*;
|
use chrono::prelude::*;
|
||||||
use hash::Hash;
|
use hash::Hash;
|
||||||
use plan::{Condition, Payment, Plan};
|
use payment_plan::{Payment, PaymentPlan, Witness};
|
||||||
use rayon::prelude::*;
|
|
||||||
use signature::{KeyPair, KeyPairUtil, PublicKey, Signature, SignatureUtil};
|
use signature::{KeyPair, KeyPairUtil, PublicKey, Signature, SignatureUtil};
|
||||||
|
|
||||||
pub const SIGNED_DATA_OFFSET: usize = 112;
|
pub const SIGNED_DATA_OFFSET: usize = 112;
|
||||||
pub const SIG_OFFSET: usize = 8;
|
pub const SIG_OFFSET: usize = 8;
|
||||||
pub const PUB_KEY_OFFSET: usize = 80;
|
pub const PUB_KEY_OFFSET: usize = 80;
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||||
|
pub enum Plan {
|
||||||
|
Budget(Budget),
|
||||||
|
}
|
||||||
|
|
||||||
|
// A proxy for the underlying DSL.
|
||||||
|
impl PaymentPlan for Plan {
|
||||||
|
fn final_payment(&self) -> Option<Payment> {
|
||||||
|
match self {
|
||||||
|
Plan::Budget(budget) => budget.final_payment(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn verify(&self, spendable_tokens: i64) -> bool {
|
||||||
|
match self {
|
||||||
|
Plan::Budget(budget) => budget.verify(spendable_tokens),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn apply_witness(&mut self, witness: &Witness) {
|
||||||
|
match self {
|
||||||
|
Plan::Budget(budget) => budget.apply_witness(witness),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||||
pub struct Contract {
|
pub struct Contract {
|
||||||
pub tokens: i64,
|
pub tokens: i64,
|
||||||
@@ -30,6 +56,7 @@ pub struct Transaction {
|
|||||||
pub from: PublicKey,
|
pub from: PublicKey,
|
||||||
pub instruction: Instruction,
|
pub instruction: Instruction,
|
||||||
pub last_id: Hash,
|
pub last_id: Hash,
|
||||||
|
pub fee: i64,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Transaction {
|
impl Transaction {
|
||||||
@@ -37,35 +64,53 @@ impl Transaction {
|
|||||||
from_keypair: &KeyPair,
|
from_keypair: &KeyPair,
|
||||||
instruction: Instruction,
|
instruction: Instruction,
|
||||||
last_id: Hash,
|
last_id: Hash,
|
||||||
|
fee: i64,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let from = from_keypair.pubkey();
|
let from = from_keypair.pubkey();
|
||||||
let mut tr = Transaction {
|
let mut tx = Transaction {
|
||||||
sig: Signature::default(),
|
sig: Signature::default(),
|
||||||
instruction,
|
instruction,
|
||||||
last_id,
|
last_id,
|
||||||
from,
|
from,
|
||||||
|
fee,
|
||||||
};
|
};
|
||||||
tr.sign(from_keypair);
|
tx.sign(from_keypair);
|
||||||
tr
|
tx
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create and sign a new Transaction. Used for unit-testing.
|
||||||
|
pub fn new_taxed(
|
||||||
|
from_keypair: &KeyPair,
|
||||||
|
to: PublicKey,
|
||||||
|
tokens: i64,
|
||||||
|
fee: i64,
|
||||||
|
last_id: Hash,
|
||||||
|
) -> Self {
|
||||||
|
let payment = Payment {
|
||||||
|
tokens: tokens - fee,
|
||||||
|
to,
|
||||||
|
};
|
||||||
|
let budget = Budget::Pay(payment);
|
||||||
|
let plan = Plan::Budget(budget);
|
||||||
|
let instruction = Instruction::NewContract(Contract { plan, tokens });
|
||||||
|
Self::new_from_instruction(from_keypair, instruction, last_id, fee)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create and sign a new Transaction. Used for unit-testing.
|
/// Create and sign a new Transaction. Used for unit-testing.
|
||||||
pub fn new(from_keypair: &KeyPair, to: PublicKey, tokens: i64, last_id: Hash) -> Self {
|
pub fn new(from_keypair: &KeyPair, to: PublicKey, tokens: i64, last_id: Hash) -> Self {
|
||||||
let plan = Plan::Pay(Payment { tokens, to });
|
Self::new_taxed(from_keypair, to, tokens, 0, last_id)
|
||||||
let instruction = Instruction::NewContract(Contract { plan, tokens });
|
|
||||||
Self::new_from_instruction(from_keypair, instruction, last_id)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create and sign a new Witness Timestamp. Used for unit-testing.
|
/// Create and sign a new Witness Timestamp. Used for unit-testing.
|
||||||
pub fn new_timestamp(from_keypair: &KeyPair, dt: DateTime<Utc>, last_id: Hash) -> Self {
|
pub fn new_timestamp(from_keypair: &KeyPair, dt: DateTime<Utc>, last_id: Hash) -> Self {
|
||||||
let instruction = Instruction::ApplyTimestamp(dt);
|
let instruction = Instruction::ApplyTimestamp(dt);
|
||||||
Self::new_from_instruction(from_keypair, instruction, last_id)
|
Self::new_from_instruction(from_keypair, instruction, last_id, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create and sign a new Witness Signature. Used for unit-testing.
|
/// Create and sign a new Witness Signature. Used for unit-testing.
|
||||||
pub fn new_signature(from_keypair: &KeyPair, tx_sig: Signature, last_id: Hash) -> Self {
|
pub fn new_signature(from_keypair: &KeyPair, tx_sig: Signature, last_id: Hash) -> Self {
|
||||||
let instruction = Instruction::ApplySignature(tx_sig);
|
let instruction = Instruction::ApplySignature(tx_sig);
|
||||||
Self::new_from_instruction(from_keypair, instruction, last_id)
|
Self::new_from_instruction(from_keypair, instruction, last_id, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create and sign a postdated Transaction. Used for unit-testing.
|
/// Create and sign a postdated Transaction. Used for unit-testing.
|
||||||
@@ -77,25 +122,23 @@ impl Transaction {
|
|||||||
last_id: Hash,
|
last_id: Hash,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let from = from_keypair.pubkey();
|
let from = from_keypair.pubkey();
|
||||||
let plan = Plan::Race(
|
let budget = Budget::Race(
|
||||||
(Condition::Timestamp(dt), Payment { tokens, to }),
|
(Condition::Timestamp(dt), Payment { tokens, to }),
|
||||||
(Condition::Signature(from), Payment { tokens, to: from }),
|
(Condition::Signature(from), Payment { tokens, to: from }),
|
||||||
);
|
);
|
||||||
|
let plan = Plan::Budget(budget);
|
||||||
let instruction = Instruction::NewContract(Contract { plan, tokens });
|
let instruction = Instruction::NewContract(Contract { plan, tokens });
|
||||||
let mut tr = Transaction {
|
Self::new_from_instruction(from_keypair, instruction, last_id, 0)
|
||||||
instruction,
|
|
||||||
from,
|
|
||||||
last_id,
|
|
||||||
sig: Signature::default(),
|
|
||||||
};
|
|
||||||
tr.sign(from_keypair);
|
|
||||||
tr
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_sign_data(&self) -> Vec<u8> {
|
fn get_sign_data(&self) -> Vec<u8> {
|
||||||
let mut data = serialize(&(&self.instruction)).expect("serialize Contract");
|
let mut data = serialize(&(&self.instruction)).expect("serialize Contract");
|
||||||
let last_id_data = serialize(&(&self.last_id)).expect("serialize last_id");
|
let last_id_data = serialize(&(&self.last_id)).expect("serialize last_id");
|
||||||
data.extend_from_slice(&last_id_data);
|
data.extend_from_slice(&last_id_data);
|
||||||
|
|
||||||
|
let fee_data = serialize(&(&self.fee)).expect("serialize last_id");
|
||||||
|
data.extend_from_slice(&fee_data);
|
||||||
|
|
||||||
data
|
data
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -106,12 +149,14 @@ impl Transaction {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn verify_sig(&self) -> bool {
|
pub fn verify_sig(&self) -> bool {
|
||||||
|
warn!("transaction signature verification called");
|
||||||
self.sig.verify(&self.from, &self.get_sign_data())
|
self.sig.verify(&self.from, &self.get_sign_data())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn verify_plan(&self) -> bool {
|
pub fn verify_plan(&self) -> bool {
|
||||||
if let Instruction::NewContract(contract) = &self.instruction {
|
if let Instruction::NewContract(contract) = &self.instruction {
|
||||||
contract.plan.verify(contract.tokens)
|
self.fee >= 0 && self.fee <= contract.tokens
|
||||||
|
&& contract.plan.verify(contract.tokens - self.fee)
|
||||||
} else {
|
} else {
|
||||||
true
|
true
|
||||||
}
|
}
|
||||||
@@ -138,21 +183,6 @@ pub fn memfind<A: Eq>(a: &[A], b: &[A]) -> Option<usize> {
|
|||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Verify a batch of signatures.
|
|
||||||
pub fn verify_signatures(transactions: &[Transaction]) -> bool {
|
|
||||||
transactions.par_iter().all(|tr| tr.verify_sig())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Verify a batch of spending plans.
|
|
||||||
pub fn verify_plans(transactions: &[Transaction]) -> bool {
|
|
||||||
transactions.par_iter().all(|tr| tr.verify_plan())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Verify a batch of transactions.
|
|
||||||
pub fn verify_transactions(transactions: &[Transaction]) -> bool {
|
|
||||||
verify_signatures(transactions) && verify_plans(transactions)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
@@ -162,8 +192,8 @@ mod tests {
|
|||||||
fn test_claim() {
|
fn test_claim() {
|
||||||
let keypair = KeyPair::new();
|
let keypair = KeyPair::new();
|
||||||
let zero = Hash::default();
|
let zero = Hash::default();
|
||||||
let tr0 = Transaction::new(&keypair, keypair.pubkey(), 42, zero);
|
let tx0 = Transaction::new(&keypair, keypair.pubkey(), 42, zero);
|
||||||
assert!(tr0.verify_plan());
|
assert!(tx0.verify_plan());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -172,22 +202,34 @@ mod tests {
|
|||||||
let keypair0 = KeyPair::new();
|
let keypair0 = KeyPair::new();
|
||||||
let keypair1 = KeyPair::new();
|
let keypair1 = KeyPair::new();
|
||||||
let pubkey1 = keypair1.pubkey();
|
let pubkey1 = keypair1.pubkey();
|
||||||
let tr0 = Transaction::new(&keypair0, pubkey1, 42, zero);
|
let tx0 = Transaction::new(&keypair0, pubkey1, 42, zero);
|
||||||
assert!(tr0.verify_plan());
|
assert!(tx0.verify_plan());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_transfer_with_fee() {
|
||||||
|
let zero = Hash::default();
|
||||||
|
let keypair0 = KeyPair::new();
|
||||||
|
let pubkey1 = KeyPair::new().pubkey();
|
||||||
|
assert!(Transaction::new_taxed(&keypair0, pubkey1, 1, 1, zero).verify_plan());
|
||||||
|
assert!(!Transaction::new_taxed(&keypair0, pubkey1, 1, 2, zero).verify_plan());
|
||||||
|
assert!(!Transaction::new_taxed(&keypair0, pubkey1, 1, -1, zero).verify_plan());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_serialize_claim() {
|
fn test_serialize_claim() {
|
||||||
let plan = Plan::Pay(Payment {
|
let budget = Budget::Pay(Payment {
|
||||||
tokens: 0,
|
tokens: 0,
|
||||||
to: Default::default(),
|
to: Default::default(),
|
||||||
});
|
});
|
||||||
|
let plan = Plan::Budget(budget);
|
||||||
let instruction = Instruction::NewContract(Contract { plan, tokens: 0 });
|
let instruction = Instruction::NewContract(Contract { plan, tokens: 0 });
|
||||||
let claim0 = Transaction {
|
let claim0 = Transaction {
|
||||||
instruction,
|
instruction,
|
||||||
from: Default::default(),
|
from: Default::default(),
|
||||||
last_id: Default::default(),
|
last_id: Default::default(),
|
||||||
sig: Default::default(),
|
sig: Default::default(),
|
||||||
|
fee: 0,
|
||||||
};
|
};
|
||||||
let buf = serialize(&claim0).unwrap();
|
let buf = serialize(&claim0).unwrap();
|
||||||
let claim1: Transaction = deserialize(&buf).unwrap();
|
let claim1: Transaction = deserialize(&buf).unwrap();
|
||||||
@@ -199,15 +241,15 @@ mod tests {
|
|||||||
let zero = Hash::default();
|
let zero = Hash::default();
|
||||||
let keypair = KeyPair::new();
|
let keypair = KeyPair::new();
|
||||||
let pubkey = keypair.pubkey();
|
let pubkey = keypair.pubkey();
|
||||||
let mut tr = Transaction::new(&keypair, pubkey, 42, zero);
|
let mut tx = Transaction::new(&keypair, pubkey, 42, zero);
|
||||||
if let Instruction::NewContract(contract) = &mut tr.instruction {
|
if let Instruction::NewContract(contract) = &mut tx.instruction {
|
||||||
contract.tokens = 1_000_000; // <-- attack, part 1!
|
contract.tokens = 1_000_000; // <-- attack, part 1!
|
||||||
if let Plan::Pay(ref mut payment) = contract.plan {
|
if let Plan::Budget(Budget::Pay(ref mut payment)) = contract.plan {
|
||||||
payment.tokens = contract.tokens; // <-- attack, part 2!
|
payment.tokens = contract.tokens; // <-- attack, part 2!
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
assert!(tr.verify_plan());
|
assert!(tx.verify_plan());
|
||||||
assert!(!tr.verify_sig());
|
assert!(!tx.verify_sig());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -217,23 +259,23 @@ mod tests {
|
|||||||
let thief_keypair = KeyPair::new();
|
let thief_keypair = KeyPair::new();
|
||||||
let pubkey1 = keypair1.pubkey();
|
let pubkey1 = keypair1.pubkey();
|
||||||
let zero = Hash::default();
|
let zero = Hash::default();
|
||||||
let mut tr = Transaction::new(&keypair0, pubkey1, 42, zero);
|
let mut tx = Transaction::new(&keypair0, pubkey1, 42, zero);
|
||||||
if let Instruction::NewContract(contract) = &mut tr.instruction {
|
if let Instruction::NewContract(contract) = &mut tx.instruction {
|
||||||
if let Plan::Pay(ref mut payment) = contract.plan {
|
if let Plan::Budget(Budget::Pay(ref mut payment)) = contract.plan {
|
||||||
payment.to = thief_keypair.pubkey(); // <-- attack!
|
payment.to = thief_keypair.pubkey(); // <-- attack!
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
assert!(tr.verify_plan());
|
assert!(tx.verify_plan());
|
||||||
assert!(!tr.verify_sig());
|
assert!(!tx.verify_sig());
|
||||||
}
|
}
|
||||||
#[test]
|
#[test]
|
||||||
fn test_layout() {
|
fn test_layout() {
|
||||||
let tr = test_tx();
|
let tx = test_tx();
|
||||||
let sign_data = tr.get_sign_data();
|
let sign_data = tx.get_sign_data();
|
||||||
let tx = serialize(&tr).unwrap();
|
let tx_bytes = serialize(&tx).unwrap();
|
||||||
assert_matches!(memfind(&tx, &sign_data), Some(SIGNED_DATA_OFFSET));
|
assert_matches!(memfind(&tx_bytes, &sign_data), Some(SIGNED_DATA_OFFSET));
|
||||||
assert_matches!(memfind(&tx, &tr.sig), Some(SIG_OFFSET));
|
assert_matches!(memfind(&tx_bytes, &tx.sig), Some(SIG_OFFSET));
|
||||||
assert_matches!(memfind(&tx, &tr.from), Some(PUB_KEY_OFFSET));
|
assert_matches!(memfind(&tx_bytes, &tx.from), Some(PUB_KEY_OFFSET));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -241,55 +283,20 @@ mod tests {
|
|||||||
let keypair0 = KeyPair::new();
|
let keypair0 = KeyPair::new();
|
||||||
let keypair1 = KeyPair::new();
|
let keypair1 = KeyPair::new();
|
||||||
let zero = Hash::default();
|
let zero = Hash::default();
|
||||||
let mut tr = Transaction::new(&keypair0, keypair1.pubkey(), 1, zero);
|
let mut tx = Transaction::new(&keypair0, keypair1.pubkey(), 1, zero);
|
||||||
if let Instruction::NewContract(contract) = &mut tr.instruction {
|
if let Instruction::NewContract(contract) = &mut tx.instruction {
|
||||||
if let Plan::Pay(ref mut payment) = contract.plan {
|
if let Plan::Budget(Budget::Pay(ref mut payment)) = contract.plan {
|
||||||
payment.tokens = 2; // <-- attack!
|
payment.tokens = 2; // <-- attack!
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
assert!(!tr.verify_plan());
|
assert!(!tx.verify_plan());
|
||||||
|
|
||||||
// Also, ensure all branchs of the plan spend all tokens
|
// Also, ensure all branchs of the plan spend all tokens
|
||||||
if let Instruction::NewContract(contract) = &mut tr.instruction {
|
if let Instruction::NewContract(contract) = &mut tx.instruction {
|
||||||
if let Plan::Pay(ref mut payment) = contract.plan {
|
if let Plan::Budget(Budget::Pay(ref mut payment)) = contract.plan {
|
||||||
payment.tokens = 0; // <-- whoops!
|
payment.tokens = 0; // <-- whoops!
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
assert!(!tr.verify_plan());
|
assert!(!tx.verify_plan());
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_verify_transactions() {
|
|
||||||
let alice_keypair = KeyPair::new();
|
|
||||||
let bob_pubkey = KeyPair::new().pubkey();
|
|
||||||
let carol_pubkey = KeyPair::new().pubkey();
|
|
||||||
let last_id = Hash::default();
|
|
||||||
let tr0 = Transaction::new(&alice_keypair, bob_pubkey, 1, last_id);
|
|
||||||
let tr1 = Transaction::new(&alice_keypair, carol_pubkey, 1, last_id);
|
|
||||||
let transactions = vec![tr0, tr1];
|
|
||||||
assert!(verify_transactions(&transactions));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(all(feature = "unstable", test))]
|
|
||||||
mod bench {
|
|
||||||
extern crate test;
|
|
||||||
use self::test::Bencher;
|
|
||||||
use transaction::*;
|
|
||||||
|
|
||||||
#[bench]
|
|
||||||
fn verify_signatures_bench(bencher: &mut Bencher) {
|
|
||||||
let alice_keypair = KeyPair::new();
|
|
||||||
let last_id = Hash::default();
|
|
||||||
let transactions: Vec<_> = (0..64)
|
|
||||||
.into_par_iter()
|
|
||||||
.map(|_| {
|
|
||||||
let rando_pubkey = KeyPair::new().pubkey();
|
|
||||||
Transaction::new(&alice_keypair, rando_pubkey, 1, last_id)
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
bencher.iter(|| {
|
|
||||||
assert!(verify_signatures(&transactions));
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
132
src/tvu.rs
132
src/tvu.rs
@@ -7,12 +7,12 @@
|
|||||||
//! along with the ecdsa signature for the blob and each signature in all the transactions. Blobs
|
//! along with the ecdsa signature for the blob and each signature in all the transactions. Blobs
|
||||||
//! with errors are dropped, or marked for slashing.
|
//! with errors are dropped, or marked for slashing.
|
||||||
//! 3.a retransmit
|
//! 3.a retransmit
|
||||||
//! - Blobs originating from the parent (leader atm is the only parent), are retransmit to all the
|
//! - Blobs originating from the parent (leader, at the moment, is the only parent), are retransmit to all the
|
||||||
//! peers in the crdt. Peers is everyone who is not me or the leader that has a known replicate
|
//! peers in the crdt. Peers is everyone who is not me or the leader that has a known replicate
|
||||||
//! address.
|
//! address.
|
||||||
//! 3.b window
|
//! 3.b window
|
||||||
//! - Verified blobs are placed into a window, indexed by the counter set by the leader.sockets. This could
|
//! - Verified blobs are placed into a window, indexed by the counter set by the leader.sockets. This could
|
||||||
//! be the PoH counter if its monitonically increasing in each blob. Easure coding is used to
|
//! be the PoH counter if its monotonically increasing in each blob. Erasure coding is used to
|
||||||
//! recover any missing packets, and requests are made at random to peers and parents to retransmit
|
//! recover any missing packets, and requests are made at random to peers and parents to retransmit
|
||||||
//! a missing packet.
|
//! a missing packet.
|
||||||
//! 4. accountant
|
//! 4. accountant
|
||||||
@@ -22,6 +22,7 @@
|
|||||||
|
|
||||||
use bank::Bank;
|
use bank::Bank;
|
||||||
use crdt::{Crdt, ReplicatedData};
|
use crdt::{Crdt, ReplicatedData};
|
||||||
|
use data_replicator::DataReplicator;
|
||||||
use packet;
|
use packet;
|
||||||
use replicate_stage::ReplicateStage;
|
use replicate_stage::ReplicateStage;
|
||||||
use std::net::UdpSocket;
|
use std::net::UdpSocket;
|
||||||
@@ -41,14 +42,14 @@ impl Tvu {
|
|||||||
/// # Arguments
|
/// # Arguments
|
||||||
/// * `bank` - The bank state.
|
/// * `bank` - The bank state.
|
||||||
/// * `me` - my configuration
|
/// * `me` - my configuration
|
||||||
/// * `gossip` - my gosisp socket
|
/// * `gossip` - my gossisp socket
|
||||||
/// * `replicte` - my replicte socket
|
/// * `replicate` - my replicate socket
|
||||||
/// * `leader` - leader configuration
|
/// * `leader` - leader configuration
|
||||||
/// * `exit` - The exit signal.
|
/// * `exit` - The exit signal.
|
||||||
pub fn new(
|
pub fn new(
|
||||||
bank: Arc<Bank>,
|
bank: Arc<Bank>,
|
||||||
me: ReplicatedData,
|
me: ReplicatedData,
|
||||||
gossip: UdpSocket,
|
gossip_listen_socket: UdpSocket,
|
||||||
replicate: UdpSocket,
|
replicate: UdpSocket,
|
||||||
leader: ReplicatedData,
|
leader: ReplicatedData,
|
||||||
exit: Arc<AtomicBool>,
|
exit: Arc<AtomicBool>,
|
||||||
@@ -61,9 +62,15 @@ impl Tvu {
|
|||||||
crdt.write()
|
crdt.write()
|
||||||
.expect("'crdt' write lock before insert() in pub fn replicate")
|
.expect("'crdt' write lock before insert() in pub fn replicate")
|
||||||
.insert(&leader);
|
.insert(&leader);
|
||||||
let t_gossip = Crdt::gossip(crdt.clone(), exit.clone());
|
|
||||||
let window = streamer::default_window();
|
let window = streamer::default_window();
|
||||||
let t_listen = Crdt::listen(crdt.clone(), window.clone(), gossip, exit.clone());
|
let gossip_send_socket = UdpSocket::bind("0.0.0.0:0").expect("bind 0");
|
||||||
|
let data_replicator = DataReplicator::new(
|
||||||
|
crdt.clone(),
|
||||||
|
window.clone(),
|
||||||
|
gossip_listen_socket,
|
||||||
|
gossip_send_socket,
|
||||||
|
exit.clone(),
|
||||||
|
).expect("DataReplicator::new");
|
||||||
|
|
||||||
// TODO pull this socket out through the public interface
|
// TODO pull this socket out through the public interface
|
||||||
// make sure we are on the same interface
|
// make sure we are on the same interface
|
||||||
@@ -110,58 +117,32 @@ impl Tvu {
|
|||||||
blob_recycler.clone(),
|
blob_recycler.clone(),
|
||||||
);
|
);
|
||||||
|
|
||||||
let threads = vec![
|
let mut threads = vec![
|
||||||
//replicate threads
|
//replicate threads
|
||||||
t_blob_receiver,
|
t_blob_receiver,
|
||||||
t_retransmit,
|
t_retransmit,
|
||||||
t_window,
|
t_window,
|
||||||
replicate_stage.thread_hdl,
|
replicate_stage.thread_hdl,
|
||||||
t_gossip,
|
|
||||||
t_listen,
|
|
||||||
];
|
];
|
||||||
|
threads.extend(data_replicator.thread_hdls.into_iter());
|
||||||
Tvu {
|
Tvu {
|
||||||
thread_hdls: threads,
|
thread_hdls: threads,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
pub fn test_node() -> (ReplicatedData, UdpSocket, UdpSocket, UdpSocket, UdpSocket) {
|
|
||||||
use signature::{KeyPair, KeyPairUtil};
|
|
||||||
|
|
||||||
let events_socket = UdpSocket::bind("127.0.0.1:0").unwrap();
|
|
||||||
let gossip = UdpSocket::bind("127.0.0.1:0").unwrap();
|
|
||||||
let replicate = UdpSocket::bind("127.0.0.1:0").unwrap();
|
|
||||||
let requests_socket = UdpSocket::bind("127.0.0.1:0").unwrap();
|
|
||||||
requests_socket
|
|
||||||
.set_read_timeout(Some(Duration::new(1, 0)))
|
|
||||||
.unwrap();
|
|
||||||
let pubkey = KeyPair::new().pubkey();
|
|
||||||
let d = ReplicatedData::new(
|
|
||||||
pubkey,
|
|
||||||
gossip.local_addr().unwrap(),
|
|
||||||
replicate.local_addr().unwrap(),
|
|
||||||
requests_socket.local_addr().unwrap(),
|
|
||||||
events_socket.local_addr().unwrap(),
|
|
||||||
);
|
|
||||||
(d, gossip, replicate, requests_socket, events_socket)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
pub mod tests {
|
pub mod tests {
|
||||||
use bank::Bank;
|
use bank::Bank;
|
||||||
use bincode::serialize;
|
use bincode::serialize;
|
||||||
use crdt::Crdt;
|
use crdt::{Crdt, TestNode};
|
||||||
use crdt::ReplicatedData;
|
use data_replicator::DataReplicator;
|
||||||
use entry::Entry;
|
use entry::Entry;
|
||||||
use event::Event;
|
|
||||||
use hash::{hash, Hash};
|
use hash::{hash, Hash};
|
||||||
use logger;
|
use logger;
|
||||||
use mint::Mint;
|
use mint::Mint;
|
||||||
use packet::BlobRecycler;
|
use packet::BlobRecycler;
|
||||||
|
use result::Result;
|
||||||
use signature::{KeyPair, KeyPairUtil};
|
use signature::{KeyPair, KeyPairUtil};
|
||||||
use std::collections::VecDeque;
|
use std::collections::VecDeque;
|
||||||
use std::net::UdpSocket;
|
use std::net::UdpSocket;
|
||||||
@@ -170,9 +151,19 @@ pub mod tests {
|
|||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use streamer;
|
use streamer;
|
||||||
|
use transaction::Transaction;
|
||||||
use tvu::Tvu;
|
use tvu::Tvu;
|
||||||
|
|
||||||
/// Test that mesasge sent from leader to target1 and repliated to target2
|
fn new_replicator(
|
||||||
|
crdt: Arc<RwLock<Crdt>>,
|
||||||
|
listen: UdpSocket,
|
||||||
|
exit: Arc<AtomicBool>,
|
||||||
|
) -> Result<DataReplicator> {
|
||||||
|
let window = streamer::default_window();
|
||||||
|
let send_sock = UdpSocket::bind("0.0.0.0:0").expect("bind 0");
|
||||||
|
DataReplicator::new(crdt, window, listen, send_sock, exit)
|
||||||
|
}
|
||||||
|
/// Test that message sent from leader to target1 and replicated to target2
|
||||||
#[test]
|
#[test]
|
||||||
fn test_replicate() {
|
fn test_replicate() {
|
||||||
logger::setup();
|
logger::setup();
|
||||||
@@ -186,9 +177,7 @@ pub mod tests {
|
|||||||
crdt_l.set_leader(leader.data.id);
|
crdt_l.set_leader(leader.data.id);
|
||||||
|
|
||||||
let cref_l = Arc::new(RwLock::new(crdt_l));
|
let cref_l = Arc::new(RwLock::new(crdt_l));
|
||||||
let t_l_gossip = Crdt::gossip(cref_l.clone(), exit.clone());
|
let dr_l = new_replicator(cref_l, leader.sockets.gossip, exit.clone()).unwrap();
|
||||||
let window1 = streamer::default_window();
|
|
||||||
let t_l_listen = Crdt::listen(cref_l, window1, leader.sockets.gossip, exit.clone());
|
|
||||||
|
|
||||||
//start crdt2
|
//start crdt2
|
||||||
let mut crdt2 = Crdt::new(target2.data.clone());
|
let mut crdt2 = Crdt::new(target2.data.clone());
|
||||||
@@ -196,9 +185,7 @@ pub mod tests {
|
|||||||
crdt2.set_leader(leader.data.id);
|
crdt2.set_leader(leader.data.id);
|
||||||
let leader_id = leader.data.id;
|
let leader_id = leader.data.id;
|
||||||
let cref2 = Arc::new(RwLock::new(crdt2));
|
let cref2 = Arc::new(RwLock::new(crdt2));
|
||||||
let t2_gossip = Crdt::gossip(cref2.clone(), exit.clone());
|
let dr_2 = new_replicator(cref2, target2.sockets.gossip, exit.clone()).unwrap();
|
||||||
let window2 = streamer::default_window();
|
|
||||||
let t2_listen = Crdt::listen(cref2, window2, target2.sockets.gossip, exit.clone());
|
|
||||||
|
|
||||||
// setup some blob services to send blobs into the socket
|
// setup some blob services to send blobs into the socket
|
||||||
// to simulate the source peer and get blobs out of the socket to
|
// to simulate the source peer and get blobs out of the socket to
|
||||||
@@ -252,7 +239,7 @@ pub mod tests {
|
|||||||
bank.register_entry_id(&cur_hash);
|
bank.register_entry_id(&cur_hash);
|
||||||
cur_hash = hash(&cur_hash);
|
cur_hash = hash(&cur_hash);
|
||||||
|
|
||||||
let tr1 = Event::new_transaction(
|
let tx0 = Transaction::new(
|
||||||
&mint.keypair(),
|
&mint.keypair(),
|
||||||
bob_keypair.pubkey(),
|
bob_keypair.pubkey(),
|
||||||
transfer_amount,
|
transfer_amount,
|
||||||
@@ -260,7 +247,7 @@ pub mod tests {
|
|||||||
);
|
);
|
||||||
bank.register_entry_id(&cur_hash);
|
bank.register_entry_id(&cur_hash);
|
||||||
cur_hash = hash(&cur_hash);
|
cur_hash = hash(&cur_hash);
|
||||||
let entry1 = Entry::new(&cur_hash, i + num_blobs, vec![tr1]);
|
let entry1 = Entry::new(&cur_hash, i + num_blobs, vec![tx0]);
|
||||||
bank.register_entry_id(&cur_hash);
|
bank.register_entry_id(&cur_hash);
|
||||||
cur_hash = hash(&cur_hash);
|
cur_hash = hash(&cur_hash);
|
||||||
|
|
||||||
@@ -296,52 +283,13 @@ pub mod tests {
|
|||||||
for t in tvu.thread_hdls {
|
for t in tvu.thread_hdls {
|
||||||
t.join().expect("join");
|
t.join().expect("join");
|
||||||
}
|
}
|
||||||
t2_gossip.join().expect("join");
|
for t in dr_l.thread_hdls {
|
||||||
t2_listen.join().expect("join");
|
t.join().expect("join");
|
||||||
|
}
|
||||||
|
for t in dr_2.thread_hdls {
|
||||||
|
t.join().expect("join");
|
||||||
|
}
|
||||||
t_receiver.join().expect("join");
|
t_receiver.join().expect("join");
|
||||||
t_responder.join().expect("join");
|
t_responder.join().expect("join");
|
||||||
t_l_gossip.join().expect("join");
|
|
||||||
t_l_listen.join().expect("join");
|
|
||||||
}
|
|
||||||
pub struct Sockets {
|
|
||||||
pub gossip: UdpSocket,
|
|
||||||
pub requests: UdpSocket,
|
|
||||||
pub replicate: UdpSocket,
|
|
||||||
pub event: UdpSocket,
|
|
||||||
pub respond: UdpSocket,
|
|
||||||
pub broadcast: UdpSocket,
|
|
||||||
}
|
|
||||||
pub struct TestNode {
|
|
||||||
pub data: ReplicatedData,
|
|
||||||
pub sockets: Sockets,
|
|
||||||
}
|
|
||||||
impl TestNode {
|
|
||||||
pub fn new() -> TestNode {
|
|
||||||
let gossip = UdpSocket::bind("0.0.0.0:0").unwrap();
|
|
||||||
let requests = UdpSocket::bind("0.0.0.0:0").unwrap();
|
|
||||||
let event = UdpSocket::bind("0.0.0.0:0").unwrap();
|
|
||||||
let replicate = UdpSocket::bind("0.0.0.0:0").unwrap();
|
|
||||||
let respond = UdpSocket::bind("0.0.0.0:0").unwrap();
|
|
||||||
let broadcast = UdpSocket::bind("0.0.0.0:0").unwrap();
|
|
||||||
let pubkey = KeyPair::new().pubkey();
|
|
||||||
let data = ReplicatedData::new(
|
|
||||||
pubkey,
|
|
||||||
gossip.local_addr().unwrap(),
|
|
||||||
replicate.local_addr().unwrap(),
|
|
||||||
requests.local_addr().unwrap(),
|
|
||||||
event.local_addr().unwrap(),
|
|
||||||
);
|
|
||||||
TestNode {
|
|
||||||
data: data,
|
|
||||||
sockets: Sockets {
|
|
||||||
gossip,
|
|
||||||
requests,
|
|
||||||
replicate,
|
|
||||||
event,
|
|
||||||
respond,
|
|
||||||
broadcast,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -8,7 +8,7 @@ use std::io::Write;
|
|||||||
use std::sync::atomic::{AtomicBool, Ordering};
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
use std::sync::mpsc::{channel, Receiver};
|
use std::sync::mpsc::{channel, Receiver};
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
use std::thread::{spawn, JoinHandle};
|
use std::thread::{Builder, JoinHandle};
|
||||||
use streamer;
|
use streamer;
|
||||||
|
|
||||||
pub struct WriteStage {
|
pub struct WriteStage {
|
||||||
@@ -26,19 +26,22 @@ impl WriteStage {
|
|||||||
entry_receiver: Receiver<Entry>,
|
entry_receiver: Receiver<Entry>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let (blob_sender, blob_receiver) = channel();
|
let (blob_sender, blob_receiver) = channel();
|
||||||
let thread_hdl = spawn(move || loop {
|
let thread_hdl = Builder::new()
|
||||||
let entry_writer = EntryWriter::new(&bank);
|
.name("solana-writer".to_string())
|
||||||
let _ = entry_writer.write_and_send_entries(
|
.spawn(move || loop {
|
||||||
&blob_sender,
|
let entry_writer = EntryWriter::new(&bank);
|
||||||
&blob_recycler,
|
let _ = entry_writer.write_and_send_entries(
|
||||||
&writer,
|
&blob_sender,
|
||||||
&entry_receiver,
|
&blob_recycler,
|
||||||
);
|
&writer,
|
||||||
if exit.load(Ordering::Relaxed) {
|
&entry_receiver,
|
||||||
info!("broadcat_service exiting");
|
);
|
||||||
break;
|
if exit.load(Ordering::Relaxed) {
|
||||||
}
|
info!("broadcat_service exiting");
|
||||||
});
|
break;
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
WriteStage {
|
WriteStage {
|
||||||
thread_hdl,
|
thread_hdl,
|
||||||
@@ -52,16 +55,19 @@ impl WriteStage {
|
|||||||
entry_receiver: Receiver<Entry>,
|
entry_receiver: Receiver<Entry>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let (_blob_sender, blob_receiver) = channel();
|
let (_blob_sender, blob_receiver) = channel();
|
||||||
let thread_hdl = spawn(move || {
|
let thread_hdl = Builder::new()
|
||||||
let entry_writer = EntryWriter::new(&bank);
|
.name("solana-drain".to_string())
|
||||||
loop {
|
.spawn(move || {
|
||||||
let _ = entry_writer.drain_entries(&entry_receiver);
|
let entry_writer = EntryWriter::new(&bank);
|
||||||
if exit.load(Ordering::Relaxed) {
|
loop {
|
||||||
info!("drain_service exiting");
|
let _ = entry_writer.drain_entries(&entry_receiver);
|
||||||
break;
|
if exit.load(Ordering::Relaxed) {
|
||||||
|
info!("drain_service exiting");
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
})
|
||||||
});
|
.unwrap();
|
||||||
|
|
||||||
WriteStage {
|
WriteStage {
|
||||||
thread_hdl,
|
thread_hdl,
|
||||||
|
184
tests/data_replicator.rs
Normal file
184
tests/data_replicator.rs
Normal file
@@ -0,0 +1,184 @@
|
|||||||
|
#[macro_use]
|
||||||
|
extern crate log;
|
||||||
|
extern crate rayon;
|
||||||
|
extern crate solana;
|
||||||
|
|
||||||
|
use rayon::iter::*;
|
||||||
|
use solana::crdt::{Crdt, TestNode};
|
||||||
|
use solana::data_replicator::DataReplicator;
|
||||||
|
use solana::logger;
|
||||||
|
use solana::packet::Blob;
|
||||||
|
use std::net::UdpSocket;
|
||||||
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
|
use std::sync::{Arc, RwLock};
|
||||||
|
use std::thread::sleep;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
fn test_node(exit: Arc<AtomicBool>) -> (Arc<RwLock<Crdt>>, DataReplicator, UdpSocket) {
|
||||||
|
let tn = TestNode::new();
|
||||||
|
let crdt = Crdt::new(tn.data.clone());
|
||||||
|
let c = Arc::new(RwLock::new(crdt));
|
||||||
|
let w = Arc::new(RwLock::new(vec![]));
|
||||||
|
let d = DataReplicator::new(
|
||||||
|
c.clone(),
|
||||||
|
w,
|
||||||
|
tn.sockets.gossip,
|
||||||
|
tn.sockets.gossip_send,
|
||||||
|
exit,
|
||||||
|
).unwrap();
|
||||||
|
(c, d, tn.sockets.replicate)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Test that the network converges.
|
||||||
|
/// Run until every node in the network has a full ReplicatedData set.
|
||||||
|
/// Check that nodes stop sending updates after all the ReplicatedData has been shared.
|
||||||
|
/// tests that actually use this function are below
|
||||||
|
fn run_gossip_topo<F>(topo: F)
|
||||||
|
where
|
||||||
|
F: Fn(&Vec<(Arc<RwLock<Crdt>>, DataReplicator, UdpSocket)>) -> (),
|
||||||
|
{
|
||||||
|
let num: usize = 5;
|
||||||
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
|
let listen: Vec<_> = (0..num).map(|_| test_node(exit.clone())).collect();
|
||||||
|
topo(&listen);
|
||||||
|
let mut done = true;
|
||||||
|
for i in 0..(num * 32) {
|
||||||
|
done = false;
|
||||||
|
trace!("round {}", i);
|
||||||
|
for &(ref c, _, _) in listen.iter() {
|
||||||
|
if num == c.read().unwrap().convergence() as usize {
|
||||||
|
done = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
//at least 1 node converged
|
||||||
|
if done == true {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
sleep(Duration::new(1, 0));
|
||||||
|
}
|
||||||
|
exit.store(true, Ordering::Relaxed);
|
||||||
|
for (c, dr, _) in listen.into_iter() {
|
||||||
|
for j in dr.thread_hdls.into_iter() {
|
||||||
|
j.join().unwrap();
|
||||||
|
}
|
||||||
|
// make it clear what failed
|
||||||
|
// protocol is to chatty, updates should stop after everyone receives `num`
|
||||||
|
assert!(c.read().unwrap().update_index <= num as u64);
|
||||||
|
// protocol is not chatty enough, everyone should get `num` entries
|
||||||
|
assert_eq!(c.read().unwrap().table.len(), num);
|
||||||
|
}
|
||||||
|
assert!(done);
|
||||||
|
}
|
||||||
|
/// ring a -> b -> c -> d -> e -> a
|
||||||
|
#[test]
|
||||||
|
fn gossip_ring() {
|
||||||
|
logger::setup();
|
||||||
|
run_gossip_topo(|listen| {
|
||||||
|
let num = listen.len();
|
||||||
|
for n in 0..num {
|
||||||
|
let y = n % listen.len();
|
||||||
|
let x = (n + 1) % listen.len();
|
||||||
|
let mut xv = listen[x].0.write().unwrap();
|
||||||
|
let yv = listen[y].0.read().unwrap();
|
||||||
|
let mut d = yv.table[&yv.me].clone();
|
||||||
|
d.version = 0;
|
||||||
|
xv.insert(&d);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/// star a -> (b,c,d,e)
|
||||||
|
#[test]
|
||||||
|
fn gossip_star() {
|
||||||
|
logger::setup();
|
||||||
|
run_gossip_topo(|listen| {
|
||||||
|
let num = listen.len();
|
||||||
|
for n in 0..(num - 1) {
|
||||||
|
let x = 0;
|
||||||
|
let y = (n + 1) % listen.len();
|
||||||
|
let mut xv = listen[x].0.write().unwrap();
|
||||||
|
let yv = listen[y].0.read().unwrap();
|
||||||
|
let mut yd = yv.table[&yv.me].clone();
|
||||||
|
yd.version = 0;
|
||||||
|
xv.insert(&yd);
|
||||||
|
trace!("star leader {:?}", &xv.me[..4]);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/// rstar a <- (b,c,d,e)
|
||||||
|
#[test]
|
||||||
|
fn gossip_rstar() {
|
||||||
|
logger::setup();
|
||||||
|
run_gossip_topo(|listen| {
|
||||||
|
let num = listen.len();
|
||||||
|
let xd = {
|
||||||
|
let xv = listen[0].0.read().unwrap();
|
||||||
|
xv.table[&xv.me].clone()
|
||||||
|
};
|
||||||
|
trace!("rstar leader {:?}", &xd.id[..4]);
|
||||||
|
for n in 0..(num - 1) {
|
||||||
|
let y = (n + 1) % listen.len();
|
||||||
|
let mut yv = listen[y].0.write().unwrap();
|
||||||
|
yv.insert(&xd);
|
||||||
|
trace!("rstar insert {:?} into {:?}", &xd.id[..4], &yv.me[..4]);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
pub fn crdt_retransmit() {
|
||||||
|
logger::setup();
|
||||||
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
|
trace!("c1:");
|
||||||
|
let (c1, dr1, tn1) = test_node(exit.clone());
|
||||||
|
trace!("c2:");
|
||||||
|
let (c2, dr2, tn2) = test_node(exit.clone());
|
||||||
|
trace!("c3:");
|
||||||
|
let (c3, dr3, tn3) = test_node(exit.clone());
|
||||||
|
let c1_data = c1.read().unwrap().my_data().clone();
|
||||||
|
c1.write().unwrap().set_leader(c1_data.id);
|
||||||
|
|
||||||
|
c2.write().unwrap().insert(&c1_data);
|
||||||
|
c3.write().unwrap().insert(&c1_data);
|
||||||
|
|
||||||
|
c2.write().unwrap().set_leader(c1_data.id);
|
||||||
|
c3.write().unwrap().set_leader(c1_data.id);
|
||||||
|
|
||||||
|
//wait to converge
|
||||||
|
trace!("waiting to converge:");
|
||||||
|
let mut done = false;
|
||||||
|
for _ in 0..30 {
|
||||||
|
done = c1.read().unwrap().table.len() == 3 && c2.read().unwrap().table.len() == 3
|
||||||
|
&& c3.read().unwrap().table.len() == 3;
|
||||||
|
if done {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
sleep(Duration::new(1, 0));
|
||||||
|
}
|
||||||
|
assert!(done);
|
||||||
|
let mut b = Blob::default();
|
||||||
|
b.meta.size = 10;
|
||||||
|
Crdt::retransmit(&c1, &Arc::new(RwLock::new(b)), &tn1).unwrap();
|
||||||
|
let res: Vec<_> = [tn1, tn2, tn3]
|
||||||
|
.into_par_iter()
|
||||||
|
.map(|s| {
|
||||||
|
let mut b = Blob::default();
|
||||||
|
s.set_read_timeout(Some(Duration::new(1, 0))).unwrap();
|
||||||
|
let res = s.recv_from(&mut b.data);
|
||||||
|
res.is_err() //true if failed to receive the retransmit packet
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
//true if failed receive the retransmit packet, r2, and r3 should succeed
|
||||||
|
//r1 was the sender, so it should fail to receive the packet
|
||||||
|
assert_eq!(res, [true, false, false]);
|
||||||
|
exit.store(true, Ordering::Relaxed);
|
||||||
|
let mut threads = vec![];
|
||||||
|
threads.extend(dr1.thread_hdls.into_iter());
|
||||||
|
threads.extend(dr2.thread_hdls.into_iter());
|
||||||
|
threads.extend(dr3.thread_hdls.into_iter());
|
||||||
|
for t in threads.into_iter() {
|
||||||
|
t.join().unwrap();
|
||||||
|
}
|
||||||
|
}
|
174
tests/multinode.rs
Normal file
174
tests/multinode.rs
Normal file
@@ -0,0 +1,174 @@
|
|||||||
|
#[macro_use]
|
||||||
|
extern crate log;
|
||||||
|
extern crate bincode;
|
||||||
|
extern crate solana;
|
||||||
|
|
||||||
|
use solana::bank::Bank;
|
||||||
|
use solana::crdt::TestNode;
|
||||||
|
use solana::crdt::{Crdt, ReplicatedData};
|
||||||
|
use solana::data_replicator::DataReplicator;
|
||||||
|
use solana::logger;
|
||||||
|
use solana::mint::Mint;
|
||||||
|
use solana::server::Server;
|
||||||
|
use solana::signature::{KeyPair, KeyPairUtil, PublicKey};
|
||||||
|
use solana::streamer::default_window;
|
||||||
|
use solana::thin_client::ThinClient;
|
||||||
|
use std::io;
|
||||||
|
use std::io::sink;
|
||||||
|
use std::net::UdpSocket;
|
||||||
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
|
use std::sync::{Arc, RwLock};
|
||||||
|
use std::thread::sleep;
|
||||||
|
use std::thread::JoinHandle;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
fn validator(
|
||||||
|
leader: &ReplicatedData,
|
||||||
|
exit: Arc<AtomicBool>,
|
||||||
|
alice: &Mint,
|
||||||
|
threads: &mut Vec<JoinHandle<()>>,
|
||||||
|
) {
|
||||||
|
let validator = TestNode::new();
|
||||||
|
let replicant_bank = Bank::new(&alice);
|
||||||
|
let mut ts = Server::new_validator(
|
||||||
|
replicant_bank,
|
||||||
|
validator.data.clone(),
|
||||||
|
validator.sockets.requests,
|
||||||
|
validator.sockets.respond,
|
||||||
|
validator.sockets.replicate,
|
||||||
|
validator.sockets.gossip,
|
||||||
|
leader.clone(),
|
||||||
|
exit.clone(),
|
||||||
|
);
|
||||||
|
threads.append(&mut ts.thread_hdls);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn converge(
|
||||||
|
leader: &ReplicatedData,
|
||||||
|
exit: Arc<AtomicBool>,
|
||||||
|
num_nodes: usize,
|
||||||
|
threads: &mut Vec<JoinHandle<()>>,
|
||||||
|
) -> Vec<ReplicatedData> {
|
||||||
|
//lets spy on the network
|
||||||
|
let mut spy = TestNode::new();
|
||||||
|
let daddr = "0.0.0.0:0".parse().unwrap();
|
||||||
|
let me = spy.data.id.clone();
|
||||||
|
spy.data.replicate_addr = daddr;
|
||||||
|
spy.data.requests_addr = daddr;
|
||||||
|
let mut spy_crdt = Crdt::new(spy.data);
|
||||||
|
spy_crdt.insert(&leader);
|
||||||
|
spy_crdt.set_leader(leader.id);
|
||||||
|
let spy_ref = Arc::new(RwLock::new(spy_crdt));
|
||||||
|
let spy_window = default_window();
|
||||||
|
let dr = DataReplicator::new(
|
||||||
|
spy_ref.clone(),
|
||||||
|
spy_window,
|
||||||
|
spy.sockets.gossip,
|
||||||
|
spy.sockets.gossip_send,
|
||||||
|
exit,
|
||||||
|
).unwrap();
|
||||||
|
//wait for the network to converge
|
||||||
|
let mut converged = false;
|
||||||
|
for _ in 0..30 {
|
||||||
|
let num = spy_ref.read().unwrap().convergence();
|
||||||
|
if num == num_nodes as u64 {
|
||||||
|
converged = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
sleep(Duration::new(1, 0));
|
||||||
|
}
|
||||||
|
assert!(converged);
|
||||||
|
threads.extend(dr.thread_hdls.into_iter());
|
||||||
|
let v: Vec<ReplicatedData> = spy_ref
|
||||||
|
.read()
|
||||||
|
.unwrap()
|
||||||
|
.table
|
||||||
|
.values()
|
||||||
|
.into_iter()
|
||||||
|
.filter(|x| x.id != me)
|
||||||
|
.map(|x| x.clone())
|
||||||
|
.collect();
|
||||||
|
v.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_multi_node() {
|
||||||
|
logger::setup();
|
||||||
|
const N: usize = 5;
|
||||||
|
trace!("test_multi_accountant_stub");
|
||||||
|
let leader = TestNode::new();
|
||||||
|
let alice = Mint::new(10_000);
|
||||||
|
let bob_pubkey = KeyPair::new().pubkey();
|
||||||
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
|
|
||||||
|
let leader_bank = Bank::new(&alice);
|
||||||
|
let server = Server::new_leader(
|
||||||
|
leader_bank,
|
||||||
|
None,
|
||||||
|
leader.data.clone(),
|
||||||
|
leader.sockets.requests,
|
||||||
|
leader.sockets.transaction,
|
||||||
|
leader.sockets.broadcast,
|
||||||
|
leader.sockets.respond,
|
||||||
|
leader.sockets.gossip,
|
||||||
|
exit.clone(),
|
||||||
|
sink(),
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut threads = server.thread_hdls;
|
||||||
|
for _ in 0..N {
|
||||||
|
validator(&leader.data, exit.clone(), &alice, &mut threads);
|
||||||
|
}
|
||||||
|
let servers = converge(&leader.data, exit.clone(), N + 2, &mut threads);
|
||||||
|
//contains the leader addr as well
|
||||||
|
assert_eq!(servers.len(), N + 1);
|
||||||
|
//verify leader can do transfer
|
||||||
|
let leader_balance = tx_and_retry_get_balance(&leader.data, &alice, &bob_pubkey).unwrap();
|
||||||
|
assert_eq!(leader_balance, 500);
|
||||||
|
//verify validator has the same balance
|
||||||
|
let mut success = 0usize;
|
||||||
|
for server in servers.iter() {
|
||||||
|
let mut client = mk_client(server);
|
||||||
|
if let Ok(bal) = client.poll_get_balance(&bob_pubkey) {
|
||||||
|
trace!("validator balance {}", bal);
|
||||||
|
if bal == leader_balance {
|
||||||
|
success += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assert_eq!(success, servers.len());
|
||||||
|
exit.store(true, Ordering::Relaxed);
|
||||||
|
for t in threads {
|
||||||
|
t.join().unwrap();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn mk_client(leader: &ReplicatedData) -> ThinClient {
|
||||||
|
let requests_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||||
|
requests_socket
|
||||||
|
.set_read_timeout(Some(Duration::new(1, 0)))
|
||||||
|
.unwrap();
|
||||||
|
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||||
|
|
||||||
|
ThinClient::new(
|
||||||
|
leader.requests_addr,
|
||||||
|
requests_socket,
|
||||||
|
leader.transactions_addr,
|
||||||
|
transactions_socket,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn tx_and_retry_get_balance(
|
||||||
|
leader: &ReplicatedData,
|
||||||
|
alice: &Mint,
|
||||||
|
bob_pubkey: &PublicKey,
|
||||||
|
) -> io::Result<i64> {
|
||||||
|
let mut client = mk_client(leader);
|
||||||
|
trace!("getting leader last_id");
|
||||||
|
let last_id = client.get_last_id();
|
||||||
|
info!("executing leader transer");
|
||||||
|
let _sig = client
|
||||||
|
.transfer(500, &alice.keypair(), *bob_pubkey, &last_id)
|
||||||
|
.unwrap();
|
||||||
|
client.poll_get_balance(bob_pubkey)
|
||||||
|
}
|
Reference in New Issue
Block a user