Compare commits
61 Commits
v0.6.0-alp
...
v0.6.0-bet
Author | SHA1 | Date | |
---|---|---|---|
|
ef8eac92e3 | ||
|
9c9c63572b | ||
|
6c0c6de1d0 | ||
|
b57aecc24c | ||
|
290dde60a0 | ||
|
38623785f9 | ||
|
256ecc7208 | ||
|
76b06b47ba | ||
|
cf15cf587f | ||
|
134c7add57 | ||
|
ac0791826a | ||
|
d2622b7798 | ||
|
f82cbf3a27 | ||
|
aa7e3df8d6 | ||
|
ad00d7bd9c | ||
|
8d1f82c34d | ||
|
0cb2036e3a | ||
|
2b1e90b0a5 | ||
|
f2ccc133a2 | ||
|
5e824b39dd | ||
|
41efcae64b | ||
|
cf5671d058 | ||
|
2570bba6b1 | ||
|
71cb7d5c97 | ||
|
0df6541d5e | ||
|
52145caf7e | ||
|
86a50ae9e1 | ||
|
c64cfb74f3 | ||
|
26153d9919 | ||
|
5af922722f | ||
|
b70d730b32 | ||
|
bf4b856e0c | ||
|
0cf0ae6755 | ||
|
29061cff39 | ||
|
b7eec4c89f | ||
|
a3854c229e | ||
|
dcde256433 | ||
|
931bdbd5cd | ||
|
b7bd59c344 | ||
|
2dbf9a6017 | ||
|
fe93bba457 | ||
|
6e35f54738 | ||
|
089294a85e | ||
|
25c0b44641 | ||
|
58c1589688 | ||
|
bb53f69016 | ||
|
75659ca042 | ||
|
fc00594ea4 | ||
|
8d26be8b89 | ||
|
af4e95ae0f | ||
|
ffb4a7aa78 | ||
|
dcaeacc507 | ||
|
4f377e6710 | ||
|
122db85727 | ||
|
a598e4aa74 | ||
|
733b31ebbd | ||
|
dac9775de0 | ||
|
46c19a5783 | ||
|
aaeb5ba52f | ||
|
9f5a3d6064 | ||
|
4cdf873f98 |
36
.travis.yml
36
.travis.yml
@@ -1,36 +0,0 @@
|
||||
language: rust
|
||||
required: sudo
|
||||
services:
|
||||
- docker
|
||||
matrix:
|
||||
allow_failures:
|
||||
- rust: nightly
|
||||
include:
|
||||
- rust: stable
|
||||
- rust: nightly
|
||||
env:
|
||||
- FEATURES='unstable'
|
||||
before_script: |
|
||||
export PATH="$PATH:$HOME/.cargo/bin"
|
||||
rustup component add rustfmt-preview
|
||||
script:
|
||||
- cargo fmt -- --write-mode=diff
|
||||
- cargo build --verbose --features "$FEATURES"
|
||||
- cargo test --verbose --features "$FEATURES"
|
||||
after_success: |
|
||||
docker run -it --rm --security-opt seccomp=unconfined --volume "$PWD:/volume" elmtai/docker-rust-kcov
|
||||
bash <(curl -s https://codecov.io/bash) -s target/cov
|
||||
before_deploy:
|
||||
- cargo package
|
||||
deploy:
|
||||
provider: releases
|
||||
api-key:
|
||||
secure: j3cPAbOuGjXuSl+j+JL/4GWxD6dA0/f5NQ0Od4LBVewPmnKiqimGOJ1xj3eKth+ZzwuCpcHwBIIR54NEDSJgHaYDXiukc05qCeToIPqOc0wGJ+GcUrWAy8M7Wo981I/0SVYDAnLv4+ivvJxYE7b2Jr3pHsQAzH7ClY8g2xu9HlNkScEsc4cizA9Sf3zIqtIoi480vxtQ5ghGOUCkwZuG3+Dg+IGnnjvE4qQOYey1del+KIDkmbHjry7iFWPF6fWK2187JNt6XiO2/2tZt6BkMEmdRnkw1r/wL9tj0AbqLgyBjzlI4QQfkBwsuX3ZFeNGArn71s7WmAUGyVOl0DJXfwN/BEUxMTd+lkMjuMNUxaU/hxVZ7zAWH55KJK+qf6B95DLVWr7ypjfJLLBcds+JfkBNoReWLM1XoDUKAU+wBf1b+PKiywNfNascjZTcz6QGe94sa7l/T4PxtHDSREmflFgu1Hysg61WuODDwTTHGrsg9ZuvlINnqQhXsJo9r9+TMIGwwWHcvLQDNz2TPALCfcLtd+RsevdOeXItYa0KD3D4gKGv36bwAVDpCIoZnSeiaT/PUyjilFtJjBpKz9BbOKgOtQhHGrHucn0WOF+bu/t3SFaJKQf/W+hLwO3NV8yiL5LQyHVm/TPY62nBfne2KEqi/LOFxgKG35aACouP0ig=
|
||||
file: target/package/solana-$TRAVIS_TAG.crate
|
||||
skip_cleanup: true
|
||||
on:
|
||||
tags: true
|
||||
condition: "$TRAVIS_RUST_VERSION = stable"
|
||||
|
||||
after_deploy:
|
||||
- cargo publish --token "$CRATES_IO_TOKEN"
|
10
Cargo.toml
10
Cargo.toml
@@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "solana"
|
||||
description = "The World's Fastest Blockchain"
|
||||
version = "0.6.0-alpha"
|
||||
version = "0.6.0-beta"
|
||||
documentation = "https://docs.rs/solana"
|
||||
homepage = "http://solana.com/"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -17,12 +17,8 @@ name = "solana-client-demo"
|
||||
path = "src/bin/client-demo.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-multinode-demo"
|
||||
path = "src/bin/multinode-demo.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-testnode"
|
||||
path = "src/bin/testnode.rs"
|
||||
name = "solana-fullnode"
|
||||
path = "src/bin/fullnode.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-genesis"
|
||||
|
84
README.md
84
README.md
@@ -1,6 +1,6 @@
|
||||
[](https://crates.io/crates/solana)
|
||||
[](https://docs.rs/solana)
|
||||
[](https://buildkite.com/solana-labs/solana)
|
||||
[](https://buildkite.com/solana-labs/solana)
|
||||
[](https://codecov.io/gh/solana-labs/solana)
|
||||
|
||||
Disclaimer
|
||||
@@ -36,57 +36,62 @@ $ git clone https://github.com/solana-labs/solana.git
|
||||
$ cd solana
|
||||
```
|
||||
|
||||
The testnode server is initialized with a ledger from stdin and
|
||||
The fullnode server is initialized with a ledger from stdin and
|
||||
generates new ledger entries on stdout. To create the input ledger, we'll need
|
||||
to create *the mint* and use it to generate a *genesis ledger*. It's done in
|
||||
two steps because the mint-demo.json file contains private keys that will be
|
||||
used later in this demo.
|
||||
|
||||
```bash
|
||||
$ echo 1000000000 | cargo run --release --bin solana-mint-demo > mint-demo.json
|
||||
$ cat mint-demo.json | cargo run --release --bin solana-genesis-demo > genesis.log
|
||||
$ echo 1000000000 | cargo run --release --bin solana-mint-demo > mint-demo.json
|
||||
$ cat mint-demo.json | cargo run --release --bin solana-genesis-demo > genesis.log
|
||||
```
|
||||
|
||||
Now you can start the server:
|
||||
Before you start the server, make sure you know the IP address of the machine ou want to be the leader for the demo, and make sure that udp ports 8000-10000 are open on all the machines you wan to test with. Now you can start the server:
|
||||
|
||||
```bash
|
||||
$ cat genesis.log | cargo run --release --bin solana-testnode > transactions0.log
|
||||
$ cat ./multinode-demo/leader.sh
|
||||
#!/bin/bash
|
||||
export RUST_LOG=solana=info
|
||||
sudo sysctl -w net.core.rmem_max=26214400
|
||||
cat genesis.log leader.log | cargo run --release --features cuda --bin solana-fullnode -- -s leader.json -l leader.json -b 8000 -d 2>&1 | tee leader-tee.log
|
||||
$ ./multinode-demo/leader.sh
|
||||
```
|
||||
|
||||
Wait a few seconds for the server to initialize. It will print "Ready." when it's safe
|
||||
to start sending it transactions.
|
||||
|
||||
Now you can start some validators:
|
||||
|
||||
```bash
|
||||
$ cat ./multinode-demo/validator.sh
|
||||
#!/bin/bash
|
||||
rsync -v -e ssh $1:~/solana/mint-demo.json .
|
||||
rsync -v -e ssh $1:~/solana/leader.json .
|
||||
rsync -v -e ssh $1:~/solana/genesis.log .
|
||||
rsync -v -e ssh $1:~/solana/leader.log .
|
||||
rsync -v -e ssh $1:~/solana/libcuda_verify_ed25519.a .
|
||||
export RUST_LOG=solana=info
|
||||
sudo sysctl -w net.core.rmem_max=26214400
|
||||
cat genesis.log leader.log | cargo run --release --features cuda --bin solana-fullnode -- -l validator.json -s validator.json -v leader.json -b 9000 -d 2>&1 | tee validator-tee.log
|
||||
$ ./multinode-demo/validator.sh ubuntu@10.0.1.51 #The leader machine
|
||||
```
|
||||
|
||||
|
||||
Then, in a separate shell, let's execute some transactions. Note we pass in
|
||||
the JSON configuration file here, not the genesis ledger.
|
||||
|
||||
```bash
|
||||
$ cat mint-demo.json | cargo run --release --bin solana-client-demo
|
||||
$ cat ./multinode-demo/client.sh
|
||||
#!/bin/bash
|
||||
export RUST_LOG=solana=info
|
||||
rsync -v -e ssh $1:~/solana/leader.json .
|
||||
rsync -v -e ssh $1:~/solana/mint-demo.json .
|
||||
cat mint-demo.json | cargo run --release --bin solana-client-demo -- -l leader.json -c 8100 -n 1
|
||||
$ ./multinode-demo/client.sh ubuntu@10.0.1.51 #The leader machine
|
||||
```
|
||||
|
||||
Now kill the server with Ctrl-C, and take a look at the ledger. You should
|
||||
see something similar to:
|
||||
|
||||
```json
|
||||
{"num_hashes":27,"id":[0, "..."],"event":"Tick"}
|
||||
{"num_hashes":3,"id":[67, "..."],"event":{"Transaction":{"tokens":42}}}
|
||||
{"num_hashes":27,"id":[0, "..."],"event":"Tick"}
|
||||
```
|
||||
|
||||
Now restart the server from where we left off. Pass it both the genesis ledger, and
|
||||
the transaction ledger.
|
||||
|
||||
```bash
|
||||
$ cat genesis.log transactions0.log | cargo run --release --bin solana-testnode > transactions1.log
|
||||
```
|
||||
|
||||
Lastly, run the client demo again, and verify that all funds were spent in the
|
||||
previous round, and so no additional transactions are added.
|
||||
|
||||
```bash
|
||||
$ cat mint-demo.json | cargo run --release --bin solana-client-demo
|
||||
```
|
||||
|
||||
Stop the server again, and verify there are only Tick entries, and no Transaction entries.
|
||||
Try starting a more validators and reruning the client demo!
|
||||
|
||||
Developing
|
||||
===
|
||||
@@ -121,18 +126,23 @@ Testing
|
||||
Run the test suite:
|
||||
|
||||
```bash
|
||||
cargo test
|
||||
$ cargo test
|
||||
```
|
||||
|
||||
To emulate all the tests that will run on a Pull Request, run:
|
||||
```bash
|
||||
$ ./ci/run-local.sh
|
||||
```
|
||||
|
||||
Debugging
|
||||
---
|
||||
|
||||
There are some useful debug messages in the code, you can enable them on a per-module and per-level
|
||||
basis with the normal RUST\_LOG environment variable. Run the testnode with this syntax:
|
||||
basis with the normal RUST\_LOG environment variable. Run the fullnode with this syntax:
|
||||
```bash
|
||||
$ RUST_LOG=solana::streamer=debug,solana::accountant_skel=info cat genesis.log | ./target/release/solana-testnode > transactions0.log
|
||||
$ RUST_LOG=solana::streamer=debug,solana::server=info cat genesis.log | ./target/release/solana-fullnode > transactions0.log
|
||||
```
|
||||
to see the debug and info sections for streamer and accountant\_skel respectively. Generally
|
||||
to see the debug and info sections for streamer and server respectively. Generally
|
||||
we are using debug for infrequent debug messages, trace for potentially frequent messages and
|
||||
info for performance-related logging.
|
||||
|
||||
@@ -164,8 +174,10 @@ Code coverage
|
||||
To generate code coverage statistics, run kcov via Docker:
|
||||
|
||||
```bash
|
||||
$ docker run -it --rm --security-opt seccomp=unconfined --volume "$PWD:/volume" elmtai/docker-rust-kcov
|
||||
$ ./ci/coverage.sh
|
||||
```
|
||||
The coverage report will be written to `./target/cov/index.html`
|
||||
|
||||
|
||||
Why coverage? While most see coverage as a code quality metric, we see it primarily as a developer
|
||||
productivity metric. When a developer makes a change to the codebase, presumably it's a *solution* to
|
||||
|
2
ci/.gitignore
vendored
Normal file
2
ci/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
/node_modules/
|
||||
/package-lock.json
|
@@ -1,37 +1,16 @@
|
||||
steps:
|
||||
- command: "ci/coverage.sh || true"
|
||||
label: "coverage"
|
||||
# TODO: Run coverage in a docker image rather than assuming kcov/cargo-kcov
|
||||
# is installed on the build agent...
|
||||
#plugins:
|
||||
# docker#v1.1.1:
|
||||
# image: "rust"
|
||||
# user: "998:997" # buildkite-agent:buildkite-agent
|
||||
# environment:
|
||||
# - CODECOV_TOKEN=$CODECOV_TOKEN
|
||||
- command: "ci/test-stable.sh"
|
||||
label: "stable [public]"
|
||||
plugins:
|
||||
docker#v1.1.1:
|
||||
image: "rust"
|
||||
user: "998:997" # buildkite-agent:buildkite-agent
|
||||
- command: "ci/test-nightly.sh || true"
|
||||
label: "nightly - FAILURES IGNORED [public]"
|
||||
plugins:
|
||||
docker#v1.1.1:
|
||||
image: "rustlang/rust:nightly"
|
||||
user: "998:997" # buildkite-agent:buildkite-agent
|
||||
- command: "ci/test-ignored.sh || true"
|
||||
label: "ignored - FAILURES IGNORED [public]"
|
||||
- command: "ci/coverage.sh"
|
||||
name: "coverage [public]"
|
||||
- command: "ci/docker-run.sh rust ci/test-stable.sh"
|
||||
name: "stable [public]"
|
||||
- command: "ci/docker-run.sh rustlang/rust:nightly ci/test-nightly.sh || true"
|
||||
name: "nightly - FAILURES IGNORED [public]"
|
||||
- command: "ci/docker-run.sh rust ci/test-ignored.sh"
|
||||
name: "ignored [public]"
|
||||
- command: "ci/test-cuda.sh"
|
||||
label: "cuda"
|
||||
name: "cuda"
|
||||
- command: "ci/shellcheck.sh"
|
||||
name: "shellcheck [public]"
|
||||
- wait
|
||||
- command: "ci/publish.sh"
|
||||
label: "publish release artifacts"
|
||||
plugins:
|
||||
docker#v1.1.1:
|
||||
image: "rust"
|
||||
user: "998:997" # buildkite-agent:buildkite-agent
|
||||
environment:
|
||||
- BUILDKITE_TAG=$BUILDKITE_TAG
|
||||
- CRATES_IO_TOKEN=$CRATES_IO_TOKEN
|
||||
name: "publish release artifacts"
|
||||
|
@@ -1,25 +1,21 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd $(dirname $0)/..
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
if [[ -r ~/.cargo/env ]]; then
|
||||
# Pick up local install of kcov/cargo-kcov
|
||||
source ~/.cargo/env
|
||||
fi
|
||||
ci/docker-run.sh evilmachines/rust-cargo-kcov \
|
||||
bash -exc "\
|
||||
export RUST_BACKTRACE=1; \
|
||||
cargo build --verbose; \
|
||||
cargo kcov --lib --verbose; \
|
||||
"
|
||||
|
||||
rustc --version
|
||||
cargo --version
|
||||
kcov --version
|
||||
cargo-kcov --version
|
||||
|
||||
export RUST_BACKTRACE=1
|
||||
cargo build
|
||||
cargo kcov
|
||||
echo Coverage report:
|
||||
ls -l target/cov/index.html
|
||||
|
||||
if [[ -z "$CODECOV_TOKEN" ]]; then
|
||||
echo CODECOV_TOKEN undefined
|
||||
exit 1
|
||||
else
|
||||
bash <(curl -s https://codecov.io/bash)
|
||||
fi
|
||||
|
||||
bash <(curl -s https://codecov.io/bash)
|
||||
exit 0
|
||||
|
41
ci/docker-run.sh
Executable file
41
ci/docker-run.sh
Executable file
@@ -0,0 +1,41 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
usage() {
|
||||
echo "Usage: $0 [docker image name] [command]"
|
||||
echo
|
||||
echo Runs command in the specified docker image with
|
||||
echo a CI-appropriate environment
|
||||
echo
|
||||
}
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
IMAGE="$1"
|
||||
if [[ -z "$IMAGE" ]]; then
|
||||
echo Error: image not defined
|
||||
exit 1
|
||||
fi
|
||||
|
||||
docker pull "$IMAGE"
|
||||
shift
|
||||
|
||||
ARGS=(--workdir /solana --volume "$PWD:/solana" --rm)
|
||||
|
||||
ARGS+=(--env "CARGO_HOME=/solana/.cargo")
|
||||
|
||||
# kcov tries to set the personality of the binary which docker
|
||||
# doesn't allow by default.
|
||||
ARGS+=(--security-opt "seccomp=unconfined")
|
||||
|
||||
# Ensure files are created with the current host uid/gid
|
||||
ARGS+=(--user "$(id -u):$(id -g)")
|
||||
|
||||
# Environment variables to propagate into the container
|
||||
ARGS+=(
|
||||
--env BUILDKITE_TAG
|
||||
--env CODECOV_TOKEN
|
||||
--env CRATES_IO_TOKEN
|
||||
)
|
||||
|
||||
set -x
|
||||
docker run "${ARGS[@]}" "$IMAGE" "$@"
|
@@ -1,6 +1,6 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd $(dirname $0)/..
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
if [[ -z "$BUILDKITE_TAG" ]]; then
|
||||
# Skip publish if this is not a tagged release
|
||||
@@ -12,8 +12,8 @@ if [[ -z "$CRATES_IO_TOKEN" ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cargo package
|
||||
# TODO: Ensure the published version matches the contents of BUILDKITE_TAG
|
||||
cargo publish --token "$CRATES_IO_TOKEN"
|
||||
ci/docker-run.sh rust \
|
||||
bash -exc "cargo package; cargo publish --token $CRATES_IO_TOKEN"
|
||||
|
||||
exit 0
|
||||
|
19
ci/run-local.sh
Executable file
19
ci/run-local.sh
Executable file
@@ -0,0 +1,19 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
# Run the entire buildkite CI pipeline locally for pre-testing before sending a
|
||||
# Github pull request
|
||||
#
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
BKRUN=ci/node_modules/.bin/bkrun
|
||||
|
||||
if [[ ! -x $BKRUN ]]; then
|
||||
(
|
||||
set -x
|
||||
cd ci/
|
||||
npm install bkrun
|
||||
)
|
||||
fi
|
||||
|
||||
set -x
|
||||
./ci/node_modules/.bin/bkrun ci/buildkite.yml
|
11
ci/shellcheck.sh
Executable file
11
ci/shellcheck.sh
Executable file
@@ -0,0 +1,11 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
# Reference: https://github.com/koalaman/shellcheck/wiki/Directive
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
set -x
|
||||
find . -name "*.sh" -not -regex ".*/.cargo/.*" -not -regex ".*/node_modules/.*" -print0 \
|
||||
| xargs -0 \
|
||||
ci/docker-run.sh koalaman/shellcheck --color=always --external-sources --shell=bash
|
||||
exit 0
|
@@ -1,17 +1,22 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd $(dirname $0)/..
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
if [[ -z "$libcuda_verify_ed25519_URL" ]]; then
|
||||
echo libcuda_verify_ed25519_URL undefined
|
||||
exit 1
|
||||
LIB=libcuda_verify_ed25519.a
|
||||
if [[ ! -r $LIB ]]; then
|
||||
if [[ -z "${libcuda_verify_ed25519_URL:-}" ]]; then
|
||||
echo "$0 skipped. Unable to locate $LIB"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
export LD_LIBRARY_PATH=/usr/local/cuda/lib64
|
||||
export PATH=$PATH:/usr/local/cuda/bin
|
||||
curl -X GET -o $LIB "$libcuda_verify_ed25519_URL"
|
||||
fi
|
||||
|
||||
export LD_LIBRARY_PATH=/usr/local/cuda/lib64
|
||||
export PATH=$PATH:/usr/local/cuda/bin
|
||||
curl -X GET -o libcuda_verify_ed25519.a "$libcuda_verify_ed25519_URL"
|
||||
|
||||
source $HOME/.cargo/env
|
||||
# shellcheck disable=SC1090 # <-- shellcheck can't follow ~
|
||||
source ~/.cargo/env
|
||||
export RUST_BACKTRACE=1
|
||||
cargo test --features=cuda
|
||||
|
||||
exit 0
|
||||
|
@@ -1,6 +1,6 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd $(dirname $0)/..
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
rustc --version
|
||||
cargo --version
|
||||
|
@@ -1,13 +1,14 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd $(dirname $0)/..
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
rustc --version
|
||||
cargo --version
|
||||
|
||||
export RUST_BACKTRACE=1
|
||||
rustup component add rustfmt-preview
|
||||
cargo fmt -- --write-mode=diff
|
||||
cargo build --verbose --features unstable
|
||||
cargo test --verbose --features unstable
|
||||
cargo bench --verbose --features unstable
|
||||
|
||||
exit 0
|
||||
|
@@ -1,10 +1,11 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd $(dirname $0)/..
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
rustc --version
|
||||
cargo --version
|
||||
|
||||
export RUST_BACKTRACE=1
|
||||
rustup component add rustfmt-preview
|
||||
cargo fmt -- --write-mode=diff
|
||||
cargo build --verbose
|
||||
|
@@ -1,7 +1,16 @@
|
||||
#!/bin/bash
|
||||
cd /home/ubuntu/solana
|
||||
#git pull
|
||||
export RUST_LOG=solana::crdt=trace
|
||||
# scp ubuntu@18.206.1.146:~/solana/leader.json .
|
||||
# scp ubuntu@18.206.1.146:~/solana/mint-demo.json .
|
||||
cat mint-demo.json | cargo run --release --bin solana-multinode-demo -- -l leader.json -c 10.0.5.179:8100 -n 3
|
||||
#!/bin/bash -e
|
||||
|
||||
if [[ -z "$1" ]]; then
|
||||
echo "usage: $0 [leader machine]"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
LEADER="$1"
|
||||
|
||||
set -x
|
||||
export RUST_LOG=solana=info
|
||||
rsync -v -e ssh "$LEADER:~/solana/leader.json" .
|
||||
rsync -v -e ssh "$LEADER:~/solana/mint-demo.json" .
|
||||
|
||||
cargo run --release --bin solana-client-demo -- \
|
||||
-l leader.json -c 8100 -n 1 < mint-demo.json
|
||||
|
@@ -1,6 +1,4 @@
|
||||
#!/bin/bash
|
||||
cd /home/ubuntu/solana
|
||||
git pull
|
||||
export RUST_LOG=solana=info
|
||||
cat genesis.log | cargo run --release --features cuda --bin solana-testnode -- -s leader.json -b 8000 -d | grep INFO
|
||||
#cat genesis.log | cargo run --release --bin solana-testnode -- -s leader.json -b 8000 -d
|
||||
sudo sysctl -w net.core.rmem_max=26214400
|
||||
cat genesis.log leader.log | cargo run --release --features cuda --bin solana-fullnode -- -s leader.json -l leader.json -b 8000 -d 2>&1 | tee leader-tee.log
|
||||
|
@@ -1,10 +1,24 @@
|
||||
#!/bin/bash
|
||||
cd /home/ubuntu/solana
|
||||
git pull
|
||||
scp ubuntu@18.206.1.146:~/solana/mint-demo.json .
|
||||
scp ubuntu@18.206.1.146:~/solana/leader.json .
|
||||
scp ubuntu@18.206.1.146:~/solana/genesis.log .
|
||||
scp ubuntu@18.206.1.146:~/solana/libcuda_verify_ed25519.a .
|
||||
export RUST_LOG=solana=info
|
||||
cat genesis.log | cargo run --release --features cuda --bin solana-testnode -- -s replicator.json -v leader.json -b 9000 -d 2>&1 | tee validator.log
|
||||
#!/bin/bash -e
|
||||
|
||||
if [[ -z "$1" ]]; then
|
||||
echo "usage: $0 [leader machine]"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
LEADER="$1"
|
||||
|
||||
set -x
|
||||
|
||||
rsync -v -e ssh "$LEADER:~/solana/mint-demo.json" .
|
||||
rsync -v -e ssh "$LEADER:~/solana/leader.json" .
|
||||
rsync -v -e ssh "$LEADER:~/solana/genesis.log" .
|
||||
rsync -v -e ssh "$LEADER:~/solana/leader.log" .
|
||||
rsync -v -e ssh "$LEADER:~/solana/libcuda_verify_ed25519.a" .
|
||||
|
||||
export RUST_LOG=solana=info
|
||||
|
||||
sudo sysctl -w net.core.rmem_max=26214400
|
||||
|
||||
cat genesis.log leader.log | \
|
||||
cargo run --release --features cuda --bin solana-fullnode -- \
|
||||
-l validator.json -s validator.json -v leader.json -b 9000 -d 2>&1 | tee validator-tee.log
|
||||
|
282
src/bank.rs
282
src/bank.rs
@@ -7,18 +7,17 @@ extern crate libc;
|
||||
|
||||
use chrono::prelude::*;
|
||||
use entry::Entry;
|
||||
use event::Event;
|
||||
use hash::Hash;
|
||||
use mint::Mint;
|
||||
use plan::{Payment, Plan, Witness};
|
||||
use payment_plan::{Payment, PaymentPlan, Witness};
|
||||
use rayon::prelude::*;
|
||||
use signature::{KeyPair, PublicKey, Signature};
|
||||
use std::collections::hash_map::Entry::Occupied;
|
||||
use std::collections::{HashMap, HashSet, VecDeque};
|
||||
use std::result;
|
||||
use std::sync::RwLock;
|
||||
use std::sync::atomic::{AtomicIsize, AtomicUsize, Ordering};
|
||||
use transaction::{Instruction, Transaction};
|
||||
use std::sync::RwLock;
|
||||
use transaction::{Instruction, Plan, Transaction};
|
||||
|
||||
pub const MAX_ENTRY_IDS: usize = 1024 * 4;
|
||||
|
||||
@@ -26,33 +25,13 @@ pub const MAX_ENTRY_IDS: usize = 1024 * 4;
|
||||
pub enum BankError {
|
||||
AccountNotFound(PublicKey),
|
||||
InsufficientFunds(PublicKey),
|
||||
InvalidTransferSignature(Signature),
|
||||
DuplicateSiganture(Signature),
|
||||
LastIdNotFound(Hash),
|
||||
NegativeTokens,
|
||||
}
|
||||
|
||||
pub type Result<T> = result::Result<T, BankError>;
|
||||
|
||||
/// Commit funds to the 'to' party.
|
||||
fn apply_payment(balances: &RwLock<HashMap<PublicKey, AtomicIsize>>, payment: &Payment) {
|
||||
// First we check balances with a read lock to maximize potential parallelization.
|
||||
if balances
|
||||
.read()
|
||||
.expect("'balances' read lock in apply_payment")
|
||||
.contains_key(&payment.to)
|
||||
{
|
||||
let bals = balances.read().expect("'balances' read lock");
|
||||
bals[&payment.to].fetch_add(payment.tokens as isize, Ordering::Relaxed);
|
||||
} else {
|
||||
// Now we know the key wasn't present a nanosecond ago, but it might be there
|
||||
// by the time we aquire a write lock, so we'll have to check again.
|
||||
let mut bals = balances.write().expect("'balances' write lock");
|
||||
if bals.contains_key(&payment.to) {
|
||||
bals[&payment.to].fetch_add(payment.tokens as isize, Ordering::Relaxed);
|
||||
} else {
|
||||
bals.insert(payment.to, AtomicIsize::new(payment.tokens as isize));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Bank {
|
||||
balances: RwLock<HashMap<PublicKey, AtomicIsize>>,
|
||||
pending: RwLock<HashMap<Signature, Plan>>,
|
||||
@@ -65,16 +44,16 @@ pub struct Bank {
|
||||
impl Bank {
|
||||
/// Create an Bank using a deposit.
|
||||
pub fn new_from_deposit(deposit: &Payment) -> Self {
|
||||
let balances = RwLock::new(HashMap::new());
|
||||
apply_payment(&balances, deposit);
|
||||
Bank {
|
||||
balances,
|
||||
let bank = Bank {
|
||||
balances: RwLock::new(HashMap::new()),
|
||||
pending: RwLock::new(HashMap::new()),
|
||||
last_ids: RwLock::new(VecDeque::new()),
|
||||
time_sources: RwLock::new(HashSet::new()),
|
||||
last_time: RwLock::new(Utc.timestamp(0, 0)),
|
||||
transaction_count: AtomicUsize::new(0),
|
||||
}
|
||||
};
|
||||
bank.apply_payment(deposit);
|
||||
bank
|
||||
}
|
||||
|
||||
/// Create an Bank with only a Mint. Typically used by unit tests.
|
||||
@@ -88,6 +67,28 @@ impl Bank {
|
||||
bank
|
||||
}
|
||||
|
||||
/// Commit funds to the 'to' party.
|
||||
fn apply_payment(&self, payment: &Payment) {
|
||||
// First we check balances with a read lock to maximize potential parallelization.
|
||||
if self.balances
|
||||
.read()
|
||||
.expect("'balances' read lock in apply_payment")
|
||||
.contains_key(&payment.to)
|
||||
{
|
||||
let bals = self.balances.read().expect("'balances' read lock");
|
||||
bals[&payment.to].fetch_add(payment.tokens as isize, Ordering::Relaxed);
|
||||
} else {
|
||||
// Now we know the key wasn't present a nanosecond ago, but it might be there
|
||||
// by the time we aquire a write lock, so we'll have to check again.
|
||||
let mut bals = self.balances.write().expect("'balances' write lock");
|
||||
if bals.contains_key(&payment.to) {
|
||||
bals[&payment.to].fetch_add(payment.tokens as isize, Ordering::Relaxed);
|
||||
} else {
|
||||
bals.insert(payment.to, AtomicIsize::new(payment.tokens as isize));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the last entry ID registered
|
||||
pub fn last_id(&self) -> Hash {
|
||||
let last_ids = self.last_ids.read().expect("'last_ids' read lock");
|
||||
@@ -95,29 +96,29 @@ impl Bank {
|
||||
last_item.0
|
||||
}
|
||||
|
||||
fn reserve_signature(signatures: &RwLock<HashSet<Signature>>, sig: &Signature) -> bool {
|
||||
fn reserve_signature(signatures: &RwLock<HashSet<Signature>>, sig: &Signature) -> Result<()> {
|
||||
if signatures
|
||||
.read()
|
||||
.expect("'signatures' read lock")
|
||||
.contains(sig)
|
||||
{
|
||||
return false;
|
||||
return Err(BankError::DuplicateSiganture(*sig));
|
||||
}
|
||||
signatures
|
||||
.write()
|
||||
.expect("'signatures' write lock")
|
||||
.insert(*sig);
|
||||
true
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn forget_signature(signatures: &RwLock<HashSet<Signature>>, sig: &Signature) -> bool {
|
||||
fn forget_signature(signatures: &RwLock<HashSet<Signature>>, sig: &Signature) {
|
||||
signatures
|
||||
.write()
|
||||
.expect("'signatures' write lock in forget_signature")
|
||||
.remove(sig)
|
||||
.remove(sig);
|
||||
}
|
||||
|
||||
fn forget_signature_with_last_id(&self, sig: &Signature, last_id: &Hash) -> bool {
|
||||
fn forget_signature_with_last_id(&self, sig: &Signature, last_id: &Hash) {
|
||||
if let Some(entry) = self.last_ids
|
||||
.read()
|
||||
.expect("'last_ids' read lock in forget_signature_with_last_id")
|
||||
@@ -125,12 +126,11 @@ impl Bank {
|
||||
.rev()
|
||||
.find(|x| x.0 == *last_id)
|
||||
{
|
||||
return Self::forget_signature(&entry.1, sig);
|
||||
Self::forget_signature(&entry.1, sig);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
fn reserve_signature_with_last_id(&self, sig: &Signature, last_id: &Hash) -> bool {
|
||||
fn reserve_signature_with_last_id(&self, sig: &Signature, last_id: &Hash) -> Result<()> {
|
||||
if let Some(entry) = self.last_ids
|
||||
.read()
|
||||
.expect("'last_ids' read lock in reserve_signature_with_last_id")
|
||||
@@ -140,7 +140,7 @@ impl Bank {
|
||||
{
|
||||
return Self::reserve_signature(&entry.1, sig);
|
||||
}
|
||||
false
|
||||
Err(BankError::LastIdNotFound(*last_id))
|
||||
}
|
||||
|
||||
/// Tell the bank which Entry IDs exist on the ledger. This function
|
||||
@@ -159,31 +159,32 @@ impl Bank {
|
||||
|
||||
/// Deduct tokens from the 'from' address the account has sufficient
|
||||
/// funds and isn't a duplicate.
|
||||
pub fn process_verified_transaction_debits(&self, tr: &Transaction) -> Result<()> {
|
||||
if let Instruction::NewContract(contract) = &tr.instruction {
|
||||
fn apply_debits(&self, tx: &Transaction) -> Result<()> {
|
||||
if let Instruction::NewContract(contract) = &tx.instruction {
|
||||
trace!("Transaction {}", contract.tokens);
|
||||
if contract.tokens < 0 {
|
||||
return Err(BankError::NegativeTokens);
|
||||
}
|
||||
}
|
||||
let bals = self.balances
|
||||
.read()
|
||||
.expect("'balances' read lock in process_verified_transaction_debits");
|
||||
let option = bals.get(&tr.from);
|
||||
.expect("'balances' read lock in apply_debits");
|
||||
let option = bals.get(&tx.from);
|
||||
|
||||
if option.is_none() {
|
||||
return Err(BankError::AccountNotFound(tr.from));
|
||||
return Err(BankError::AccountNotFound(tx.from));
|
||||
}
|
||||
|
||||
if !self.reserve_signature_with_last_id(&tr.sig, &tr.last_id) {
|
||||
return Err(BankError::InvalidTransferSignature(tr.sig));
|
||||
}
|
||||
self.reserve_signature_with_last_id(&tx.sig, &tx.last_id)?;
|
||||
|
||||
loop {
|
||||
let result = if let Instruction::NewContract(contract) = &tr.instruction {
|
||||
let result = if let Instruction::NewContract(contract) = &tx.instruction {
|
||||
let bal = option.expect("assignment of option to bal");
|
||||
let current = bal.load(Ordering::Relaxed) as i64;
|
||||
|
||||
if current < contract.tokens {
|
||||
self.forget_signature_with_last_id(&tr.sig, &tr.last_id);
|
||||
return Err(BankError::InsufficientFunds(tr.from));
|
||||
self.forget_signature_with_last_id(&tx.sig, &tx.last_id);
|
||||
return Err(BankError::InsufficientFunds(tx.from));
|
||||
}
|
||||
|
||||
bal.compare_exchange(
|
||||
@@ -206,103 +207,79 @@ impl Bank {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn process_verified_transaction_credits(&self, tr: &Transaction) {
|
||||
match &tr.instruction {
|
||||
fn apply_credits(&self, tx: &Transaction) {
|
||||
match &tx.instruction {
|
||||
Instruction::NewContract(contract) => {
|
||||
let mut plan = contract.plan.clone();
|
||||
plan.apply_witness(&Witness::Timestamp(*self.last_time
|
||||
.read()
|
||||
.expect("timestamp creation in process_verified_transaction_credits")));
|
||||
.expect("timestamp creation in apply_credits")));
|
||||
|
||||
if let Some(ref payment) = plan.final_payment() {
|
||||
apply_payment(&self.balances, payment);
|
||||
self.apply_payment(payment);
|
||||
} else {
|
||||
let mut pending = self.pending
|
||||
.write()
|
||||
.expect("'pending' write lock in process_verified_transaction_credits");
|
||||
pending.insert(tr.sig, plan);
|
||||
.expect("'pending' write lock in apply_credits");
|
||||
pending.insert(tx.sig, plan);
|
||||
}
|
||||
}
|
||||
Instruction::ApplyTimestamp(dt) => {
|
||||
let _ = self.process_verified_timestamp(tr.from, *dt);
|
||||
let _ = self.apply_timestamp(tx.from, *dt);
|
||||
}
|
||||
Instruction::ApplySignature(tx_sig) => {
|
||||
let _ = self.process_verified_sig(tr.from, *tx_sig);
|
||||
let _ = self.apply_signature(tx.from, *tx_sig);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Process a Transaction that has already been verified.
|
||||
pub fn process_verified_transaction(&self, tr: &Transaction) -> Result<()> {
|
||||
self.process_verified_transaction_debits(tr)?;
|
||||
self.process_verified_transaction_credits(tr);
|
||||
/// Process a Transaction.
|
||||
fn process_transaction(&self, tx: &Transaction) -> Result<()> {
|
||||
self.apply_debits(tx)?;
|
||||
self.apply_credits(tx);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Process a batch of verified transactions.
|
||||
pub fn process_verified_transactions(&self, trs: Vec<Transaction>) -> Vec<Result<Transaction>> {
|
||||
/// Process a batch of transactions.
|
||||
pub fn process_transactions(&self, txs: Vec<Transaction>) -> Vec<Result<Transaction>> {
|
||||
// Run all debits first to filter out any transactions that can't be processed
|
||||
// in parallel deterministically.
|
||||
info!("processing Transactions {}", trs.len());
|
||||
let results: Vec<_> = trs.into_par_iter()
|
||||
.map(|tr| self.process_verified_transaction_debits(&tr).map(|_| tr))
|
||||
info!("processing Transactions {}", txs.len());
|
||||
let results: Vec<_> = txs.into_par_iter()
|
||||
.map(|tx| self.apply_debits(&tx).map(|_| tx))
|
||||
.collect(); // Calling collect() here forces all debits to complete before moving on.
|
||||
|
||||
results
|
||||
.into_par_iter()
|
||||
.map(|result| {
|
||||
result.map(|tr| {
|
||||
self.process_verified_transaction_credits(&tr);
|
||||
tr
|
||||
result.map(|tx| {
|
||||
self.apply_credits(&tx);
|
||||
tx
|
||||
})
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn partition_events(events: Vec<Event>) -> (Vec<Transaction>, Vec<Event>) {
|
||||
(
|
||||
events
|
||||
.into_iter()
|
||||
.map(|Event::Transaction(tr)| tr)
|
||||
.collect(),
|
||||
vec![],
|
||||
)
|
||||
}
|
||||
|
||||
pub fn process_verified_events(&self, events: Vec<Event>) -> Vec<Result<Event>> {
|
||||
let (trs, rest) = Self::partition_events(events);
|
||||
let mut results: Vec<_> = self.process_verified_transactions(trs)
|
||||
.into_iter()
|
||||
.map(|x| x.map(Event::Transaction))
|
||||
.collect();
|
||||
|
||||
for event in rest {
|
||||
results.push(self.process_verified_event(event));
|
||||
}
|
||||
|
||||
results
|
||||
}
|
||||
|
||||
pub fn process_verified_entries(&self, entries: Vec<Entry>) -> Result<()> {
|
||||
pub fn process_entries(&self, entries: Vec<Entry>) -> Result<()> {
|
||||
for entry in entries {
|
||||
self.register_entry_id(&entry.id);
|
||||
for result in self.process_verified_events(entry.events) {
|
||||
for result in self.process_transactions(entry.transactions) {
|
||||
result?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Process a Witness Signature that has already been verified.
|
||||
fn process_verified_sig(&self, from: PublicKey, tx_sig: Signature) -> Result<()> {
|
||||
/// Process a Witness Signature.
|
||||
fn apply_signature(&self, from: PublicKey, tx_sig: Signature) -> Result<()> {
|
||||
if let Occupied(mut e) = self.pending
|
||||
.write()
|
||||
.expect("write() in process_verified_sig")
|
||||
.expect("write() in apply_signature")
|
||||
.entry(tx_sig)
|
||||
{
|
||||
e.get_mut().apply_witness(&Witness::Signature(from));
|
||||
if let Some(payment) = e.get().final_payment() {
|
||||
apply_payment(&self.balances, &payment);
|
||||
self.apply_payment(&payment);
|
||||
e.remove_entry();
|
||||
}
|
||||
};
|
||||
@@ -310,8 +287,8 @@ impl Bank {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Process a Witness Timestamp that has already been verified.
|
||||
fn process_verified_timestamp(&self, from: PublicKey, dt: DateTime<Utc>) -> Result<()> {
|
||||
/// Process a Witness Timestamp.
|
||||
fn apply_timestamp(&self, from: PublicKey, dt: DateTime<Utc>) -> Result<()> {
|
||||
// If this is the first timestamp we've seen, it probably came from the genesis block,
|
||||
// so we'll trust it.
|
||||
if *self.last_time
|
||||
@@ -344,13 +321,13 @@ impl Bank {
|
||||
// double-spend if it enters before the modified plan is removed from 'pending'.
|
||||
let mut pending = self.pending
|
||||
.write()
|
||||
.expect("'pending' write lock in process_verified_timestamp");
|
||||
.expect("'pending' write lock in apply_timestamp");
|
||||
for (key, plan) in pending.iter_mut() {
|
||||
plan.apply_witness(&Witness::Timestamp(*self.last_time
|
||||
.read()
|
||||
.expect("'last_time' read lock when creating timestamp")));
|
||||
if let Some(ref payment) = plan.final_payment() {
|
||||
apply_payment(&self.balances, payment);
|
||||
self.apply_payment(payment);
|
||||
completed.push(key.clone());
|
||||
}
|
||||
}
|
||||
@@ -362,14 +339,6 @@ impl Bank {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Process an Transaction or Witness that has already been verified.
|
||||
pub fn process_verified_event(&self, event: Event) -> Result<Event> {
|
||||
match event {
|
||||
Event::Transaction(ref tr) => self.process_verified_transaction(tr),
|
||||
}?;
|
||||
Ok(event)
|
||||
}
|
||||
|
||||
/// Create, sign, and process a Transaction from `keypair` to `to` of
|
||||
/// `n` tokens where `last_id` is the last Entry ID observed by the client.
|
||||
pub fn transfer(
|
||||
@@ -379,9 +348,9 @@ impl Bank {
|
||||
to: PublicKey,
|
||||
last_id: Hash,
|
||||
) -> Result<Signature> {
|
||||
let tr = Transaction::new(keypair, to, n, last_id);
|
||||
let sig = tr.sig;
|
||||
self.process_verified_transaction(&tr).map(|_| sig)
|
||||
let tx = Transaction::new(keypair, to, n, last_id);
|
||||
let sig = tx.sig;
|
||||
self.process_transaction(&tx).map(|_| sig)
|
||||
}
|
||||
|
||||
/// Create, sign, and process a postdated Transaction from `keypair`
|
||||
@@ -395,9 +364,9 @@ impl Bank {
|
||||
dt: DateTime<Utc>,
|
||||
last_id: Hash,
|
||||
) -> Result<Signature> {
|
||||
let tr = Transaction::new_on_date(keypair, to, dt, n, last_id);
|
||||
let sig = tr.sig;
|
||||
self.process_verified_transaction(&tr).map(|_| sig)
|
||||
let tx = Transaction::new_on_date(keypair, to, dt, n, last_id);
|
||||
let sig = tx.sig;
|
||||
self.process_transaction(&tx).map(|_| sig)
|
||||
}
|
||||
|
||||
pub fn get_balance(&self, pubkey: &PublicKey) -> Option<i64> {
|
||||
@@ -436,6 +405,18 @@ mod tests {
|
||||
assert_eq!(bank.transaction_count(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_tokens() {
|
||||
let mint = Mint::new(1);
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
let bank = Bank::new(&mint);
|
||||
assert_eq!(
|
||||
bank.transfer(-1, &mint.keypair(), pubkey, mint.last_id()),
|
||||
Err(BankError::NegativeTokens)
|
||||
);
|
||||
assert_eq!(bank.transaction_count(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_account_not_found() {
|
||||
let mint = Mint::new(1);
|
||||
@@ -498,14 +479,14 @@ mod tests {
|
||||
|
||||
// Now, acknowledge the time in the condition occurred and
|
||||
// that pubkey's funds are now available.
|
||||
bank.process_verified_timestamp(mint.pubkey(), dt).unwrap();
|
||||
bank.apply_timestamp(mint.pubkey(), dt).unwrap();
|
||||
assert_eq!(bank.get_balance(&pubkey), Some(1));
|
||||
|
||||
// tx count is still 1, because we chose not to count timestamp events
|
||||
// tx count is still 1, because we chose not to count timestamp transactions
|
||||
// tx count.
|
||||
assert_eq!(bank.transaction_count(), 1);
|
||||
|
||||
bank.process_verified_timestamp(mint.pubkey(), dt).unwrap(); // <-- Attack! Attempt to process completed transaction.
|
||||
bank.apply_timestamp(mint.pubkey(), dt).unwrap(); // <-- Attack! Attempt to process completed transaction.
|
||||
assert_ne!(bank.get_balance(&pubkey), Some(2));
|
||||
}
|
||||
|
||||
@@ -515,7 +496,7 @@ mod tests {
|
||||
let bank = Bank::new(&mint);
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
let dt = Utc::now();
|
||||
bank.process_verified_timestamp(mint.pubkey(), dt).unwrap();
|
||||
bank.apply_timestamp(mint.pubkey(), dt).unwrap();
|
||||
|
||||
// It's now past now, so this transfer should be processed immediately.
|
||||
bank.transfer_on_date(1, &mint.keypair(), pubkey, dt, mint.last_id())
|
||||
@@ -545,24 +526,30 @@ mod tests {
|
||||
assert_eq!(bank.get_balance(&pubkey), None);
|
||||
|
||||
// Now, cancel the trancaction. Mint gets her funds back, pubkey never sees them.
|
||||
bank.process_verified_sig(mint.pubkey(), sig).unwrap();
|
||||
bank.apply_signature(mint.pubkey(), sig).unwrap();
|
||||
assert_eq!(bank.get_balance(&mint.pubkey()), Some(1));
|
||||
assert_eq!(bank.get_balance(&pubkey), None);
|
||||
|
||||
// Assert cancel doesn't cause count to go backward.
|
||||
assert_eq!(bank.transaction_count(), 1);
|
||||
|
||||
bank.process_verified_sig(mint.pubkey(), sig).unwrap(); // <-- Attack! Attempt to cancel completed transaction.
|
||||
bank.apply_signature(mint.pubkey(), sig).unwrap(); // <-- Attack! Attempt to cancel completed transaction.
|
||||
assert_ne!(bank.get_balance(&mint.pubkey()), Some(2));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_duplicate_event_signature() {
|
||||
fn test_duplicate_transaction_signature() {
|
||||
let mint = Mint::new(1);
|
||||
let bank = Bank::new(&mint);
|
||||
let sig = Signature::default();
|
||||
assert!(bank.reserve_signature_with_last_id(&sig, &mint.last_id()));
|
||||
assert!(!bank.reserve_signature_with_last_id(&sig, &mint.last_id()));
|
||||
assert!(
|
||||
bank.reserve_signature_with_last_id(&sig, &mint.last_id())
|
||||
.is_ok()
|
||||
);
|
||||
assert_eq!(
|
||||
bank.reserve_signature_with_last_id(&sig, &mint.last_id()),
|
||||
Err(BankError::DuplicateSiganture(sig))
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -570,9 +557,13 @@ mod tests {
|
||||
let mint = Mint::new(1);
|
||||
let bank = Bank::new(&mint);
|
||||
let sig = Signature::default();
|
||||
bank.reserve_signature_with_last_id(&sig, &mint.last_id());
|
||||
assert!(bank.forget_signature_with_last_id(&sig, &mint.last_id()));
|
||||
assert!(!bank.forget_signature_with_last_id(&sig, &mint.last_id()));
|
||||
bank.reserve_signature_with_last_id(&sig, &mint.last_id())
|
||||
.unwrap();
|
||||
bank.forget_signature_with_last_id(&sig, &mint.last_id());
|
||||
assert!(
|
||||
bank.reserve_signature_with_last_id(&sig, &mint.last_id())
|
||||
.is_ok()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -585,7 +576,10 @@ mod tests {
|
||||
bank.register_entry_id(&last_id);
|
||||
}
|
||||
// Assert we're no longer able to use the oldest entry ID.
|
||||
assert!(!bank.reserve_signature_with_last_id(&sig, &mint.last_id()));
|
||||
assert_eq!(
|
||||
bank.reserve_signature_with_last_id(&sig, &mint.last_id()),
|
||||
Err(BankError::LastIdNotFound(mint.last_id()))
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -593,10 +587,10 @@ mod tests {
|
||||
let mint = Mint::new(2);
|
||||
let bank = Bank::new(&mint);
|
||||
let keypair = KeyPair::new();
|
||||
let tr0 = Transaction::new(&mint.keypair(), keypair.pubkey(), 2, mint.last_id());
|
||||
let tr1 = Transaction::new(&keypair, mint.pubkey(), 1, mint.last_id());
|
||||
let trs = vec![tr0, tr1];
|
||||
let results = bank.process_verified_transactions(trs);
|
||||
let tx0 = Transaction::new(&mint.keypair(), keypair.pubkey(), 2, mint.last_id());
|
||||
let tx1 = Transaction::new(&keypair, mint.pubkey(), 1, mint.last_id());
|
||||
let txs = vec![tx0, tx1];
|
||||
let results = bank.process_transactions(txs);
|
||||
assert!(results[1].is_err());
|
||||
|
||||
// Assert bad transactions aren't counted.
|
||||
@@ -614,7 +608,7 @@ mod bench {
|
||||
use signature::KeyPairUtil;
|
||||
|
||||
#[bench]
|
||||
fn process_verified_event_bench(bencher: &mut Bencher) {
|
||||
fn bench_process_transaction(bencher: &mut Bencher) {
|
||||
let mint = Mint::new(100_000_000);
|
||||
let bank = Bank::new(&mint);
|
||||
// Create transactions between unrelated parties.
|
||||
@@ -623,16 +617,16 @@ mod bench {
|
||||
.map(|i| {
|
||||
// Seed the 'from' account.
|
||||
let rando0 = KeyPair::new();
|
||||
let tr = Transaction::new(&mint.keypair(), rando0.pubkey(), 1_000, mint.last_id());
|
||||
bank.process_verified_transaction(&tr).unwrap();
|
||||
let tx = Transaction::new(&mint.keypair(), rando0.pubkey(), 1_000, mint.last_id());
|
||||
bank.process_transaction(&tx).unwrap();
|
||||
|
||||
// Seed the 'to' account and a cell for its signature.
|
||||
let last_id = hash(&serialize(&i).unwrap()); // Unique hash
|
||||
bank.register_entry_id(&last_id);
|
||||
|
||||
let rando1 = KeyPair::new();
|
||||
let tr = Transaction::new(&rando0, rando1.pubkey(), 1, last_id);
|
||||
bank.process_verified_transaction(&tr).unwrap();
|
||||
let tx = Transaction::new(&rando0, rando1.pubkey(), 1, last_id);
|
||||
bank.process_transaction(&tx).unwrap();
|
||||
|
||||
// Finally, return a transaction that's unique
|
||||
Transaction::new(&rando0, rando1.pubkey(), 1, last_id)
|
||||
@@ -645,7 +639,7 @@ mod bench {
|
||||
}
|
||||
|
||||
assert!(
|
||||
bank.process_verified_transactions(transactions.clone())
|
||||
bank.process_transactions(transactions.clone())
|
||||
.iter()
|
||||
.all(|x| x.is_ok())
|
||||
);
|
||||
|
@@ -1,21 +1,21 @@
|
||||
//! The `banking_stage` processes Event messages.
|
||||
//! The `banking_stage` processes Transaction messages.
|
||||
|
||||
use bank::Bank;
|
||||
use bincode::deserialize;
|
||||
use event::Event;
|
||||
use packet;
|
||||
use packet::SharedPackets;
|
||||
use rayon::prelude::*;
|
||||
use record_stage::Signal;
|
||||
use result::Result;
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::{channel, Receiver, Sender};
|
||||
use std::sync::Arc;
|
||||
use std::thread::{spawn, JoinHandle};
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
use timing;
|
||||
use transaction::Transaction;
|
||||
|
||||
pub struct BankingStage {
|
||||
pub thread_hdl: JoinHandle<()>,
|
||||
@@ -49,7 +49,7 @@ impl BankingStage {
|
||||
}
|
||||
}
|
||||
|
||||
fn deserialize_events(p: &packet::Packets) -> Vec<Option<(Event, SocketAddr)>> {
|
||||
fn deserialize_transactions(p: &packet::Packets) -> Vec<Option<(Transaction, SocketAddr)>> {
|
||||
p.packets
|
||||
.par_iter()
|
||||
.map(|x| {
|
||||
@@ -79,33 +79,33 @@ impl BankingStage {
|
||||
);
|
||||
let proc_start = Instant::now();
|
||||
for (msgs, vers) in mms {
|
||||
let events = Self::deserialize_events(&msgs.read().unwrap());
|
||||
reqs_len += events.len();
|
||||
let events = events
|
||||
let transactions = Self::deserialize_transactions(&msgs.read().unwrap());
|
||||
reqs_len += transactions.len();
|
||||
let transactions = transactions
|
||||
.into_iter()
|
||||
.zip(vers)
|
||||
.filter_map(|(event, ver)| match event {
|
||||
.filter_map(|(tx, ver)| match tx {
|
||||
None => None,
|
||||
Some((event, _addr)) => if event.verify() && ver != 0 {
|
||||
Some(event)
|
||||
Some((tx, _addr)) => if tx.verify_plan() && ver != 0 {
|
||||
Some(tx)
|
||||
} else {
|
||||
None
|
||||
},
|
||||
})
|
||||
.collect();
|
||||
|
||||
debug!("process_events");
|
||||
let results = bank.process_verified_events(events);
|
||||
let events = results.into_iter().filter_map(|x| x.ok()).collect();
|
||||
signal_sender.send(Signal::Events(events))?;
|
||||
debug!("done process_events");
|
||||
debug!("process_transactions");
|
||||
let results = bank.process_transactions(transactions);
|
||||
let transactions = results.into_iter().filter_map(|x| x.ok()).collect();
|
||||
signal_sender.send(Signal::Events(transactions))?;
|
||||
debug!("done process_transactions");
|
||||
|
||||
packet_recycler.recycle(msgs);
|
||||
}
|
||||
let total_time_s = timing::duration_as_s(&proc_start.elapsed());
|
||||
let total_time_ms = timing::duration_as_ms(&proc_start.elapsed());
|
||||
info!(
|
||||
"@{:?} done processing event batches: {} time: {:?}ms reqs: {} reqs/s: {}",
|
||||
"@{:?} done processing transaction batches: {} time: {:?}ms reqs: {} reqs/s: {}",
|
||||
timing::timestamp(),
|
||||
mms_len,
|
||||
total_time_ms,
|
||||
@@ -120,7 +120,6 @@ impl BankingStage {
|
||||
|
||||
//use bank::Bank;
|
||||
//use entry::Entry;
|
||||
//use event::Event;
|
||||
//use hash::Hash;
|
||||
//use record_stage::RecordStage;
|
||||
//use record_stage::Signal;
|
||||
@@ -128,18 +127,17 @@ impl BankingStage {
|
||||
//use std::sync::mpsc::{channel, Sender};
|
||||
//use std::sync::{Arc, Mutex};
|
||||
//use std::time::Duration;
|
||||
//use transaction::Transaction;
|
||||
//
|
||||
//#[cfg(test)]
|
||||
//mod tests {
|
||||
// use bank::Bank;
|
||||
// use event::Event;
|
||||
// use event_processor::EventProcessor;
|
||||
// use mint::Mint;
|
||||
// use signature::{KeyPair, KeyPairUtil};
|
||||
// use transaction::Transaction;
|
||||
//
|
||||
// #[test]
|
||||
// // TODO: Move this test banking_stage. Calling process_events() directly
|
||||
// // TODO: Move this test banking_stage. Calling process_transactions() directly
|
||||
// // defeats the purpose of this test.
|
||||
// fn test_banking_sequential_consistency() {
|
||||
// // In this attack we'll demonstrate that a verifier can interpret the ledger
|
||||
@@ -147,18 +145,18 @@ impl BankingStage {
|
||||
// // Entry OR if the verifier tries to parallelize across multiple Entries.
|
||||
// let mint = Mint::new(2);
|
||||
// let bank = Bank::new(&mint);
|
||||
// let event_processor = EventProcessor::new(bank, &mint.last_id(), None);
|
||||
// let banking_stage = EventProcessor::new(bank, &mint.last_id(), None);
|
||||
//
|
||||
// // Process a batch that includes a transaction that receives two tokens.
|
||||
// let alice = KeyPair::new();
|
||||
// let tr = Transaction::new(&mint.keypair(), alice.pubkey(), 2, mint.last_id());
|
||||
// let events = vec![Event::Transaction(tr)];
|
||||
// let entry0 = event_processor.process_events(events).unwrap();
|
||||
// let tx = Transaction::new(&mint.keypair(), alice.pubkey(), 2, mint.last_id());
|
||||
// let transactions = vec![tx];
|
||||
// let entry0 = banking_stage.process_transactions(transactions).unwrap();
|
||||
//
|
||||
// // Process a second batch that spends one of those tokens.
|
||||
// let tr = Transaction::new(&alice, mint.pubkey(), 1, mint.last_id());
|
||||
// let events = vec![Event::Transaction(tr)];
|
||||
// let entry1 = event_processor.process_events(events).unwrap();
|
||||
// let tx = Transaction::new(&alice, mint.pubkey(), 1, mint.last_id());
|
||||
// let transactions = vec![tx];
|
||||
// let entry1 = banking_stage.process_transactions(transactions).unwrap();
|
||||
//
|
||||
// // Collect the ledger and feed it to a new bank.
|
||||
// let entries = vec![entry0, entry1];
|
||||
@@ -170,7 +168,7 @@ impl BankingStage {
|
||||
// for entry in entries {
|
||||
// assert!(
|
||||
// bank
|
||||
// .process_verified_events(entry.events)
|
||||
// .process_transactions(entry.transactions)
|
||||
// .into_iter()
|
||||
// .all(|x| x.is_ok())
|
||||
// );
|
||||
@@ -185,7 +183,6 @@ impl BankingStage {
|
||||
// use self::test::Bencher;
|
||||
// use bank::{Bank, MAX_ENTRY_IDS};
|
||||
// use bincode::serialize;
|
||||
// use event_processor::*;
|
||||
// use hash::hash;
|
||||
// use mint::Mint;
|
||||
// use rayon::prelude::*;
|
||||
@@ -195,7 +192,7 @@ impl BankingStage {
|
||||
// use transaction::Transaction;
|
||||
//
|
||||
// #[bench]
|
||||
// fn process_events_bench(_bencher: &mut Bencher) {
|
||||
// fn bench_process_transactions(_bencher: &mut Bencher) {
|
||||
// let mint = Mint::new(100_000_000);
|
||||
// let bank = Bank::new(&mint);
|
||||
// // Create transactions between unrelated parties.
|
||||
@@ -217,36 +214,31 @@ impl BankingStage {
|
||||
//
|
||||
// // Seed the 'from' account.
|
||||
// let rando0 = KeyPair::new();
|
||||
// let tr = Transaction::new(&mint.keypair(), rando0.pubkey(), 1_000, last_id);
|
||||
// bank.process_verified_transaction(&tr).unwrap();
|
||||
// let tx = Transaction::new(&mint.keypair(), rando0.pubkey(), 1_000, last_id);
|
||||
// bank.process_transaction(&tx).unwrap();
|
||||
//
|
||||
// let rando1 = KeyPair::new();
|
||||
// let tr = Transaction::new(&rando0, rando1.pubkey(), 2, last_id);
|
||||
// bank.process_verified_transaction(&tr).unwrap();
|
||||
// let tx = Transaction::new(&rando0, rando1.pubkey(), 2, last_id);
|
||||
// bank.process_transaction(&tx).unwrap();
|
||||
//
|
||||
// // Finally, return a transaction that's unique
|
||||
// Transaction::new(&rando0, rando1.pubkey(), 1, last_id)
|
||||
// })
|
||||
// .collect();
|
||||
//
|
||||
// let events: Vec<_> = transactions
|
||||
// .into_iter()
|
||||
// .map(|tr| Event::Transaction(tr))
|
||||
// .collect();
|
||||
//
|
||||
// let event_processor = EventProcessor::new(bank, &mint.last_id(), None);
|
||||
// let banking_stage = EventProcessor::new(bank, &mint.last_id(), None);
|
||||
//
|
||||
// let now = Instant::now();
|
||||
// assert!(event_processor.process_events(events).is_ok());
|
||||
// assert!(banking_stage.process_transactions(transactions).is_ok());
|
||||
// let duration = now.elapsed();
|
||||
// let sec = duration.as_secs() as f64 + duration.subsec_nanos() as f64 / 1_000_000_000.0;
|
||||
// let tps = txs as f64 / sec;
|
||||
//
|
||||
// // Ensure that all transactions were successfully logged.
|
||||
// drop(event_processor.historian_input);
|
||||
// let entries: Vec<Entry> = event_processor.output.lock().unwrap().iter().collect();
|
||||
// drop(banking_stage.historian_input);
|
||||
// let entries: Vec<Entry> = banking_stage.output.lock().unwrap().iter().collect();
|
||||
// assert_eq!(entries.len(), 1);
|
||||
// assert_eq!(entries[0].events.len(), txs as usize);
|
||||
// assert_eq!(entries[0].transactions.len(), txs as usize);
|
||||
//
|
||||
// println!("{} tps", tps);
|
||||
// }
|
||||
@@ -258,29 +250,29 @@ mod bench {
|
||||
use self::test::Bencher;
|
||||
use bank::*;
|
||||
use banking_stage::BankingStage;
|
||||
use event::Event;
|
||||
use mint::Mint;
|
||||
use packet::{to_packets, PacketRecycler};
|
||||
use record_stage::Signal;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use std::iter;
|
||||
use std::sync::Arc;
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::Arc;
|
||||
use transaction::Transaction;
|
||||
|
||||
#[bench]
|
||||
fn stage_bench(bencher: &mut Bencher) {
|
||||
fn bench_stage(bencher: &mut Bencher) {
|
||||
let tx = 100_usize;
|
||||
let mint = Mint::new(1_000_000_000);
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
|
||||
let events: Vec<_> = (0..tx)
|
||||
.map(|i| Event::new_transaction(&mint.keypair(), pubkey, i as i64, mint.last_id()))
|
||||
let transactions: Vec<_> = (0..tx)
|
||||
.map(|i| Transaction::new(&mint.keypair(), pubkey, i as i64, mint.last_id()))
|
||||
.collect();
|
||||
|
||||
let (verified_sender, verified_receiver) = channel();
|
||||
let (signal_sender, signal_receiver) = channel();
|
||||
let packet_recycler = PacketRecycler::default();
|
||||
let verified: Vec<_> = to_packets(&packet_recycler, events)
|
||||
let verified: Vec<_> = to_packets(&packet_recycler, transactions)
|
||||
.into_iter()
|
||||
.map(|x| {
|
||||
let len = (*x).read().unwrap().packets.len();
|
||||
@@ -298,8 +290,8 @@ mod bench {
|
||||
&packet_recycler,
|
||||
).unwrap();
|
||||
let signal = signal_receiver.recv().unwrap();
|
||||
if let Signal::Events(ref events) = signal {
|
||||
assert_eq!(events.len(), tx);
|
||||
if let Signal::Events(ref transactions) = signal {
|
||||
assert_eq!(transactions.len(), tx);
|
||||
} else {
|
||||
assert!(false);
|
||||
}
|
||||
|
@@ -1,6 +1,7 @@
|
||||
extern crate futures;
|
||||
extern crate getopts;
|
||||
extern crate isatty;
|
||||
extern crate pnet;
|
||||
extern crate rayon;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
@@ -8,16 +9,23 @@ extern crate solana;
|
||||
use futures::Future;
|
||||
use getopts::Options;
|
||||
use isatty::stdin_isatty;
|
||||
use pnet::datalink;
|
||||
use rayon::prelude::*;
|
||||
use solana::crdt::{Crdt, ReplicatedData};
|
||||
use solana::mint::MintDemo;
|
||||
use solana::signature::{GenKeys, KeyPairUtil};
|
||||
use solana::signature::{GenKeys, KeyPair, KeyPairUtil};
|
||||
use solana::streamer::default_window;
|
||||
use solana::thin_client::ThinClient;
|
||||
use solana::transaction::Transaction;
|
||||
use std::env;
|
||||
use std::fs::File;
|
||||
use std::io::{stdin, Read};
|
||||
use std::net::{SocketAddr, UdpSocket};
|
||||
use std::net::{IpAddr, SocketAddr, UdpSocket};
|
||||
use std::process::exit;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::sleep;
|
||||
use std::thread::JoinHandle;
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
|
||||
@@ -30,15 +38,32 @@ fn print_usage(program: &str, opts: Options) {
|
||||
print!("{}", opts.usage(&brief));
|
||||
}
|
||||
|
||||
fn get_ip_addr() -> Option<IpAddr> {
|
||||
for iface in datalink::interfaces() {
|
||||
for p in iface.ips {
|
||||
if !p.ip().is_loopback() && !p.ip().is_multicast() {
|
||||
return Some(p.ip());
|
||||
}
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let mut threads = 4usize;
|
||||
let mut server_addr: String = "127.0.0.1:8000".to_string();
|
||||
let mut requests_addr: String = "127.0.0.1:8010".to_string();
|
||||
let mut num_nodes = 10usize;
|
||||
let mut leader = "leader.json".to_string();
|
||||
|
||||
let mut opts = Options::new();
|
||||
opts.optopt("s", "", "server address", "host:port");
|
||||
opts.optopt("c", "", "client address", "host:port");
|
||||
opts.optopt("l", "", "leader", "leader.json");
|
||||
opts.optopt("c", "", "client port", "port");
|
||||
opts.optopt("t", "", "number of threads", &format!("{}", threads));
|
||||
opts.optopt(
|
||||
"n",
|
||||
"",
|
||||
"number of nodes to converge to",
|
||||
&format!("{}", num_nodes),
|
||||
);
|
||||
opts.optflag("h", "help", "print help");
|
||||
let args: Vec<String> = env::args().collect();
|
||||
let matches = match opts.parse(&args[1..]) {
|
||||
@@ -54,19 +79,33 @@ fn main() {
|
||||
print_usage(&program, opts);
|
||||
return;
|
||||
}
|
||||
if matches.opt_present("s") {
|
||||
server_addr = matches.opt_str("s").unwrap();
|
||||
if matches.opt_present("l") {
|
||||
leader = matches.opt_str("l").unwrap();
|
||||
}
|
||||
let mut addr: SocketAddr = "127.0.0.1:8010".parse().unwrap();
|
||||
if matches.opt_present("c") {
|
||||
requests_addr = matches.opt_str("c").unwrap();
|
||||
let port = matches.opt_str("c").unwrap().parse().unwrap();
|
||||
addr.set_port(port);
|
||||
}
|
||||
addr.set_ip(get_ip_addr().unwrap());
|
||||
let client_addr: Arc<RwLock<SocketAddr>> = Arc::new(RwLock::new(addr));
|
||||
if matches.opt_present("t") {
|
||||
threads = matches.opt_str("t").unwrap().parse().expect("integer");
|
||||
}
|
||||
if matches.opt_present("n") {
|
||||
num_nodes = matches.opt_str("n").unwrap().parse().expect("integer");
|
||||
}
|
||||
|
||||
let mut events_addr: SocketAddr = requests_addr.parse().unwrap();
|
||||
let requests_port = events_addr.port();
|
||||
events_addr.set_port(requests_port + 1);
|
||||
let leader: ReplicatedData = read_leader(leader);
|
||||
let signal = Arc::new(AtomicBool::new(false));
|
||||
let mut c_threads = vec![];
|
||||
let validators = converge(
|
||||
&client_addr,
|
||||
&leader,
|
||||
signal.clone(),
|
||||
num_nodes + 2,
|
||||
&mut c_threads,
|
||||
);
|
||||
|
||||
if stdin_isatty() {
|
||||
eprintln!("nothing found on stdin, expected a json file");
|
||||
@@ -85,23 +124,7 @@ fn main() {
|
||||
eprintln!("failed to parse json: {}", e);
|
||||
exit(1);
|
||||
});
|
||||
|
||||
println!("Binding to {}", requests_addr);
|
||||
let requests_socket = UdpSocket::bind(&requests_addr).unwrap();
|
||||
requests_socket
|
||||
.set_read_timeout(Some(Duration::new(5, 0)))
|
||||
.unwrap();
|
||||
let events_socket = UdpSocket::bind(&events_addr).unwrap();
|
||||
let requests_addr: SocketAddr = server_addr.parse().unwrap();
|
||||
let requests_port = requests_addr.port();
|
||||
let mut events_server_addr = requests_addr.clone();
|
||||
events_server_addr.set_port(requests_port + 3);
|
||||
let mut client = ThinClient::new(
|
||||
requests_addr,
|
||||
requests_socket,
|
||||
events_server_addr,
|
||||
events_socket,
|
||||
);
|
||||
let mut client = mk_client(&client_addr, &leader);
|
||||
|
||||
println!("Get last ID...");
|
||||
let last_id = client.get_last_id().wait().unwrap();
|
||||
@@ -120,7 +143,7 @@ fn main() {
|
||||
.into_par_iter()
|
||||
.map(|chunk| Transaction::new(&chunk[0], chunk[1].pubkey(), 1, last_id))
|
||||
.collect();
|
||||
let mut duration = now.elapsed();
|
||||
let duration = now.elapsed();
|
||||
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
|
||||
let bsps = txs as f64 / ns as f64;
|
||||
let nsps = ns as f64 / txs as f64;
|
||||
@@ -130,46 +153,125 @@ fn main() {
|
||||
nsps / 1_000_f64
|
||||
);
|
||||
|
||||
let initial_tx_count = client.transaction_count();
|
||||
println!("initial count {}", initial_tx_count);
|
||||
let first_count = client.transaction_count();
|
||||
println!("initial count {}", first_count);
|
||||
|
||||
println!("Transfering {} transactions in {} batches", txs, threads);
|
||||
let now = Instant::now();
|
||||
let sz = transactions.len() / threads;
|
||||
let chunks: Vec<_> = transactions.chunks(sz).collect();
|
||||
chunks.into_par_iter().for_each(|trs| {
|
||||
println!("Transferring 1 unit {} times... to", trs.len());
|
||||
let requests_addr: SocketAddr = server_addr.parse().unwrap();
|
||||
let mut requests_cb_addr = requests_addr.clone();
|
||||
requests_cb_addr.set_port(0);
|
||||
let requests_socket = UdpSocket::bind(requests_cb_addr).unwrap();
|
||||
requests_socket
|
||||
.set_read_timeout(Some(Duration::new(5, 0)))
|
||||
.unwrap();
|
||||
let mut events_addr: SocketAddr = requests_addr.clone();
|
||||
events_addr.set_port(0);
|
||||
let events_socket = UdpSocket::bind(&events_addr).unwrap();
|
||||
let client = ThinClient::new(
|
||||
requests_addr,
|
||||
requests_socket,
|
||||
events_server_addr,
|
||||
events_socket,
|
||||
);
|
||||
for tr in trs {
|
||||
client.transfer_signed(tr.clone()).unwrap();
|
||||
chunks.into_par_iter().for_each(|txs| {
|
||||
println!("Transferring 1 unit {} times... to", txs.len());
|
||||
let client = mk_client(&client_addr, &leader);
|
||||
for tx in txs {
|
||||
client.transfer_signed(tx.clone()).unwrap();
|
||||
}
|
||||
});
|
||||
|
||||
println!("Waiting for transactions to complete...",);
|
||||
let mut tx_count;
|
||||
for _ in 0..10 {
|
||||
tx_count = client.transaction_count();
|
||||
duration = now.elapsed();
|
||||
let txs = tx_count - initial_tx_count;
|
||||
println!("Transactions processed {}", txs);
|
||||
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
|
||||
let tps = (txs * 1_000_000_000) as f64 / ns as f64;
|
||||
println!("{} tps", tps);
|
||||
sleep(Duration::new(1, 0));
|
||||
println!("Sampling tps every second...",);
|
||||
validators.into_par_iter().for_each(|val| {
|
||||
let mut client = mk_client(&client_addr, &val);
|
||||
let mut now = Instant::now();
|
||||
let mut initial_tx_count = client.transaction_count();
|
||||
for i in 0..100 {
|
||||
let tx_count = client.transaction_count();
|
||||
let duration = now.elapsed();
|
||||
now = Instant::now();
|
||||
let sample = tx_count - initial_tx_count;
|
||||
initial_tx_count = tx_count;
|
||||
println!(
|
||||
"{}: Transactions processed {}",
|
||||
val.transactions_addr, sample
|
||||
);
|
||||
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
|
||||
let tps = (sample * 1_000_000_000) as f64 / ns as f64;
|
||||
println!("{}: {} tps", val.transactions_addr, tps);
|
||||
let total = tx_count - first_count;
|
||||
println!(
|
||||
"{}: Total Transactions processed {}",
|
||||
val.transactions_addr, total
|
||||
);
|
||||
if total == transactions.len() as u64 {
|
||||
break;
|
||||
}
|
||||
if i > 20 && sample == 0 {
|
||||
break;
|
||||
}
|
||||
sleep(Duration::new(1, 0));
|
||||
}
|
||||
});
|
||||
signal.store(true, Ordering::Relaxed);
|
||||
for t in c_threads {
|
||||
t.join().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
fn mk_client(locked_addr: &Arc<RwLock<SocketAddr>>, r: &ReplicatedData) -> ThinClient {
|
||||
let mut addr = locked_addr.write().unwrap();
|
||||
let port = addr.port();
|
||||
let transactions_socket = UdpSocket::bind(addr.clone()).unwrap();
|
||||
addr.set_port(port + 1);
|
||||
let requests_socket = UdpSocket::bind(addr.clone()).unwrap();
|
||||
addr.set_port(port + 2);
|
||||
ThinClient::new(
|
||||
r.requests_addr,
|
||||
requests_socket,
|
||||
r.transactions_addr,
|
||||
transactions_socket,
|
||||
)
|
||||
}
|
||||
|
||||
fn spy_node(client_addr: &Arc<RwLock<SocketAddr>>) -> (ReplicatedData, UdpSocket) {
|
||||
let mut addr = client_addr.write().unwrap();
|
||||
let port = addr.port();
|
||||
let gossip = UdpSocket::bind(addr.clone()).unwrap();
|
||||
addr.set_port(port + 1);
|
||||
let daddr = "0.0.0.0:0".parse().unwrap();
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
let node = ReplicatedData::new(pubkey, gossip.local_addr().unwrap(), daddr, daddr, daddr);
|
||||
(node, gossip)
|
||||
}
|
||||
|
||||
fn converge(
|
||||
client_addr: &Arc<RwLock<SocketAddr>>,
|
||||
leader: &ReplicatedData,
|
||||
exit: Arc<AtomicBool>,
|
||||
num_nodes: usize,
|
||||
threads: &mut Vec<JoinHandle<()>>,
|
||||
) -> Vec<ReplicatedData> {
|
||||
//lets spy on the network
|
||||
let daddr = "0.0.0.0:0".parse().unwrap();
|
||||
let (spy, spy_gossip) = spy_node(client_addr);
|
||||
let mut spy_crdt = Crdt::new(spy);
|
||||
spy_crdt.insert(&leader);
|
||||
spy_crdt.set_leader(leader.id);
|
||||
let spy_ref = Arc::new(RwLock::new(spy_crdt));
|
||||
let spy_window = default_window();
|
||||
let t_spy_listen = Crdt::listen(spy_ref.clone(), spy_window, spy_gossip, exit.clone());
|
||||
let t_spy_gossip = Crdt::gossip(spy_ref.clone(), exit.clone());
|
||||
//wait for the network to converge
|
||||
for _ in 0..30 {
|
||||
let min = spy_ref.read().unwrap().convergence();
|
||||
if num_nodes as u64 == min {
|
||||
println!("converged!");
|
||||
break;
|
||||
}
|
||||
sleep(Duration::new(1, 0));
|
||||
}
|
||||
threads.push(t_spy_listen);
|
||||
threads.push(t_spy_gossip);
|
||||
let v: Vec<ReplicatedData> = spy_ref
|
||||
.read()
|
||||
.unwrap()
|
||||
.table
|
||||
.values()
|
||||
.into_iter()
|
||||
.filter(|x| x.requests_addr != daddr)
|
||||
.map(|x| x.clone())
|
||||
.collect();
|
||||
v.clone()
|
||||
}
|
||||
|
||||
fn read_leader(path: String) -> ReplicatedData {
|
||||
let file = File::open(path).expect("file");
|
||||
serde_json::from_reader(file).expect("parse")
|
||||
}
|
||||
|
@@ -11,18 +11,18 @@ use pnet::datalink;
|
||||
use solana::bank::Bank;
|
||||
use solana::crdt::ReplicatedData;
|
||||
use solana::entry::Entry;
|
||||
use solana::event::Event;
|
||||
use solana::payment_plan::PaymentPlan;
|
||||
use solana::server::Server;
|
||||
use solana::signature::{KeyPair, KeyPairUtil};
|
||||
use solana::transaction::Instruction;
|
||||
use std::env;
|
||||
use std::fs::File;
|
||||
use std::io::{stdin, stdout, Read};
|
||||
use std::io::{stdin, Read};
|
||||
use std::net::{IpAddr, SocketAddr, UdpSocket};
|
||||
use std::process::exit;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::time::Duration;
|
||||
use std::sync::Arc;
|
||||
//use std::time::Duration;
|
||||
|
||||
fn print_usage(program: &str, opts: Options) {
|
||||
let mut brief = format!("Usage: cat <transaction.log> | {} [options]\n\n", program);
|
||||
@@ -39,6 +39,7 @@ fn main() {
|
||||
opts.optopt("b", "", "bind", "bind to port or address");
|
||||
opts.optflag("d", "dyn", "detect network address dynamically");
|
||||
opts.optopt("s", "", "save", "save my identity to path.json");
|
||||
opts.optopt("l", "", "load", "load my identity to path.json");
|
||||
opts.optflag("h", "help", "print help");
|
||||
opts.optopt(
|
||||
"v",
|
||||
@@ -97,8 +98,8 @@ fn main() {
|
||||
// fields are the same. That entry should be treated as a deposit, not a
|
||||
// transfer to oneself.
|
||||
let entry1: Entry = entries.next().unwrap();
|
||||
let Event::Transaction(ref tr) = entry1.events[0];
|
||||
let deposit = if let Instruction::NewContract(contract) = &tr.instruction {
|
||||
let tx = &entry1.transactions[0];
|
||||
let deposit = if let Instruction::NewContract(contract) = &tx.instruction {
|
||||
contract.plan.final_payment()
|
||||
} else {
|
||||
None
|
||||
@@ -115,10 +116,10 @@ fn main() {
|
||||
let mut last_id = entry1.id;
|
||||
for entry in entries {
|
||||
last_id = entry.id;
|
||||
let results = bank.process_verified_events(entry.events);
|
||||
let results = bank.process_transactions(entry.transactions);
|
||||
for result in results {
|
||||
if let Err(e) = result {
|
||||
eprintln!("failed to process event {:?}", e);
|
||||
eprintln!("failed to process transaction {:?}", e);
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
@@ -131,6 +132,12 @@ fn main() {
|
||||
// we need all the receiving sockets to be bound within the expected
|
||||
// port range that we open on aws
|
||||
let mut repl_data = make_repl_data(&bind_addr);
|
||||
if matches.opt_present("l") {
|
||||
let path = matches.opt_str("l").unwrap();
|
||||
if let Ok(file) = File::open(path) {
|
||||
repl_data = serde_json::from_reader(file).expect("parse");
|
||||
}
|
||||
}
|
||||
let threads = if matches.opt_present("v") {
|
||||
eprintln!("starting validator... {}", repl_data.requests_addr);
|
||||
let path = matches.opt_str("v").unwrap();
|
||||
@@ -150,18 +157,20 @@ fn main() {
|
||||
} else {
|
||||
eprintln!("starting leader... {}", repl_data.requests_addr);
|
||||
repl_data.current_leader_id = repl_data.id.clone();
|
||||
let file = File::create("leader.log").expect("leader.log create");
|
||||
let server = Server::new_leader(
|
||||
bank,
|
||||
last_id,
|
||||
Some(Duration::from_millis(1000)),
|
||||
//Some(Duration::from_millis(1000)),
|
||||
None,
|
||||
repl_data.clone(),
|
||||
UdpSocket::bind(repl_data.requests_addr).unwrap(),
|
||||
UdpSocket::bind(repl_data.events_addr).unwrap(),
|
||||
UdpSocket::bind(repl_data.transactions_addr).unwrap(),
|
||||
UdpSocket::bind("0.0.0.0:0").unwrap(),
|
||||
UdpSocket::bind("0.0.0.0:0").unwrap(),
|
||||
UdpSocket::bind(repl_data.gossip_addr).unwrap(),
|
||||
exit.clone(),
|
||||
stdout(),
|
||||
file,
|
||||
);
|
||||
server.thread_hdls
|
||||
};
|
||||
@@ -170,7 +179,7 @@ fn main() {
|
||||
let file = File::create(path).expect("file");
|
||||
serde_json::to_writer(file, &repl_data).expect("serialize");
|
||||
}
|
||||
eprintln!("Ready. Listening on {}", bind_addr);
|
||||
eprintln!("Ready. Listening on {}", repl_data.transactions_addr);
|
||||
|
||||
for t in threads {
|
||||
t.join().expect("join");
|
||||
@@ -184,7 +193,7 @@ fn next_port(server_addr: &SocketAddr, nxt: u16) -> SocketAddr {
|
||||
}
|
||||
|
||||
fn make_repl_data(bind_addr: &SocketAddr) -> ReplicatedData {
|
||||
let events_addr = bind_addr.clone();
|
||||
let transactions_addr = bind_addr.clone();
|
||||
let gossip_addr = next_port(&bind_addr, 1);
|
||||
let replicate_addr = next_port(&bind_addr, 2);
|
||||
let requests_addr = next_port(&bind_addr, 3);
|
||||
@@ -194,7 +203,7 @@ fn make_repl_data(bind_addr: &SocketAddr) -> ReplicatedData {
|
||||
gossip_addr,
|
||||
replicate_addr,
|
||||
requests_addr,
|
||||
events_addr,
|
||||
transactions_addr,
|
||||
)
|
||||
}
|
||||
|
@@ -7,9 +7,9 @@ use isatty::stdin_isatty;
|
||||
use rayon::prelude::*;
|
||||
use solana::bank::MAX_ENTRY_IDS;
|
||||
use solana::entry::{next_entry, Entry};
|
||||
use solana::event::Event;
|
||||
use solana::mint::MintDemo;
|
||||
use solana::signature::{GenKeys, KeyPairUtil};
|
||||
use solana::transaction::Transaction;
|
||||
use std::io::{stdin, Read};
|
||||
use std::process::exit;
|
||||
|
||||
@@ -42,11 +42,11 @@ fn main() {
|
||||
let last_id = demo.mint.last_id();
|
||||
|
||||
eprintln!("Signing {} transactions...", num_accounts);
|
||||
let events: Vec<_> = keypairs
|
||||
let transactions: Vec<_> = keypairs
|
||||
.into_par_iter()
|
||||
.map(|rando| {
|
||||
let last_id = demo.mint.last_id();
|
||||
Event::new_transaction(&mint_keypair, rando.pubkey(), tokens_per_user, last_id)
|
||||
Transaction::new(&mint_keypair, rando.pubkey(), tokens_per_user, last_id)
|
||||
})
|
||||
.collect();
|
||||
|
||||
@@ -55,7 +55,7 @@ fn main() {
|
||||
}
|
||||
|
||||
eprintln!("Logging the creation of {} accounts...", num_accounts);
|
||||
let entry = Entry::new(&last_id, 0, events);
|
||||
let entry = Entry::new(&last_id, 0, transactions);
|
||||
println!("{}", serde_json::to_string(&entry).unwrap());
|
||||
|
||||
eprintln!("Creating {} empty entries...", MAX_ENTRY_IDS);
|
||||
|
@@ -1,261 +0,0 @@
|
||||
extern crate futures;
|
||||
extern crate getopts;
|
||||
extern crate isatty;
|
||||
extern crate rayon;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use futures::Future;
|
||||
use getopts::Options;
|
||||
use isatty::stdin_isatty;
|
||||
use rayon::prelude::*;
|
||||
use solana::crdt::{Crdt, ReplicatedData};
|
||||
use solana::mint::MintDemo;
|
||||
use solana::signature::{GenKeys, KeyPair, KeyPairUtil};
|
||||
use solana::streamer::default_window;
|
||||
use solana::thin_client::ThinClient;
|
||||
use solana::transaction::Transaction;
|
||||
use std::env;
|
||||
use std::fs::File;
|
||||
use std::io::{stdin, Read};
|
||||
use std::net::{SocketAddr, UdpSocket};
|
||||
use std::process::exit;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::JoinHandle;
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
|
||||
fn print_usage(program: &str, opts: Options) {
|
||||
let mut brief = format!("Usage: cat <mint.json> | {} [options]\n\n", program);
|
||||
brief += " Solana client demo creates a number of transactions and\n";
|
||||
brief += " sends them to a target node.";
|
||||
brief += " Takes json formatted mint file to stdin.";
|
||||
|
||||
print!("{}", opts.usage(&brief));
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let mut threads = 4usize;
|
||||
let mut num_nodes = 10usize;
|
||||
let mut leader = "leader.json".to_string();
|
||||
|
||||
let mut opts = Options::new();
|
||||
opts.optopt("l", "", "leader", "leader.json");
|
||||
opts.optopt("c", "", "client address", "host:port");
|
||||
opts.optopt("t", "", "number of threads", &format!("{}", threads));
|
||||
opts.optopt(
|
||||
"n",
|
||||
"",
|
||||
"number of nodes to converge to",
|
||||
&format!("{}", num_nodes),
|
||||
);
|
||||
opts.optflag("h", "help", "print help");
|
||||
let args: Vec<String> = env::args().collect();
|
||||
let matches = match opts.parse(&args[1..]) {
|
||||
Ok(m) => m,
|
||||
Err(e) => {
|
||||
eprintln!("{}", e);
|
||||
exit(1);
|
||||
}
|
||||
};
|
||||
|
||||
if matches.opt_present("h") {
|
||||
let program = args[0].clone();
|
||||
print_usage(&program, opts);
|
||||
return;
|
||||
}
|
||||
if matches.opt_present("l") {
|
||||
leader = matches.opt_str("l").unwrap();
|
||||
}
|
||||
let client_addr: Arc<RwLock<SocketAddr>> = if matches.opt_present("c") {
|
||||
let addr = matches.opt_str("c").unwrap().parse().unwrap();
|
||||
Arc::new(RwLock::new(addr))
|
||||
} else {
|
||||
Arc::new(RwLock::new("127.0.0.1:8010".parse().unwrap()))
|
||||
};
|
||||
if matches.opt_present("t") {
|
||||
threads = matches.opt_str("t").unwrap().parse().expect("integer");
|
||||
}
|
||||
if matches.opt_present("n") {
|
||||
num_nodes = matches.opt_str("n").unwrap().parse().expect("integer");
|
||||
}
|
||||
|
||||
let leader: ReplicatedData = read_leader(leader);
|
||||
let signal = Arc::new(AtomicBool::new(false));
|
||||
let mut c_threads = vec![];
|
||||
let validators = converge(
|
||||
&client_addr,
|
||||
&leader,
|
||||
signal.clone(),
|
||||
num_nodes + 2,
|
||||
&mut c_threads,
|
||||
);
|
||||
|
||||
if stdin_isatty() {
|
||||
eprintln!("nothing found on stdin, expected a json file");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
let mut buffer = String::new();
|
||||
let num_bytes = stdin().read_to_string(&mut buffer).unwrap();
|
||||
if num_bytes == 0 {
|
||||
eprintln!("empty file on stdin, expected a json file");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
println!("Parsing stdin...");
|
||||
let demo: MintDemo = serde_json::from_str(&buffer).unwrap_or_else(|e| {
|
||||
eprintln!("failed to parse json: {}", e);
|
||||
exit(1);
|
||||
});
|
||||
let mut client = mk_client(&client_addr, &leader);
|
||||
|
||||
println!("Get last ID...");
|
||||
let last_id = client.get_last_id().wait().unwrap();
|
||||
println!("Got last ID {:?}", last_id);
|
||||
|
||||
let rnd = GenKeys::new(demo.mint.keypair().public_key_bytes());
|
||||
|
||||
println!("Creating keypairs...");
|
||||
let txs = demo.num_accounts / 2;
|
||||
let keypairs = rnd.gen_n_keypairs(demo.num_accounts);
|
||||
let keypair_pairs: Vec<_> = keypairs.chunks(2).collect();
|
||||
|
||||
println!("Signing transactions...");
|
||||
let now = Instant::now();
|
||||
let transactions: Vec<_> = keypair_pairs
|
||||
.into_par_iter()
|
||||
.map(|chunk| Transaction::new(&chunk[0], chunk[1].pubkey(), 1, last_id))
|
||||
.collect();
|
||||
let duration = now.elapsed();
|
||||
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
|
||||
let bsps = txs as f64 / ns as f64;
|
||||
let nsps = ns as f64 / txs as f64;
|
||||
println!(
|
||||
"Done. {} thousand signatures per second, {}us per signature",
|
||||
bsps * 1_000_000_f64,
|
||||
nsps / 1_000_f64
|
||||
);
|
||||
|
||||
let first_count = client.transaction_count();
|
||||
println!("initial count {}", first_count);
|
||||
|
||||
println!("Transfering {} transactions in {} batches", txs, threads);
|
||||
let sz = transactions.len() / threads;
|
||||
let chunks: Vec<_> = transactions.chunks(sz).collect();
|
||||
chunks.into_par_iter().for_each(|trs| {
|
||||
println!("Transferring 1 unit {} times... to", trs.len());
|
||||
let client = mk_client(&client_addr, &leader);
|
||||
for tr in trs {
|
||||
client.transfer_signed(tr.clone()).unwrap();
|
||||
}
|
||||
});
|
||||
|
||||
println!("Sampling tps every second...",);
|
||||
validators.into_par_iter().for_each(|val| {
|
||||
let mut client = mk_client(&client_addr, &val);
|
||||
let mut now = Instant::now();
|
||||
let mut initial_tx_count = client.transaction_count();
|
||||
for i in 0..100 {
|
||||
let tx_count = client.transaction_count();
|
||||
let duration = now.elapsed();
|
||||
now = Instant::now();
|
||||
let sample = tx_count - initial_tx_count;
|
||||
initial_tx_count = tx_count;
|
||||
println!("{}: Transactions processed {}", val.events_addr, sample);
|
||||
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
|
||||
let tps = (sample * 1_000_000_000) as f64 / ns as f64;
|
||||
println!("{}: {} tps", val.events_addr, tps);
|
||||
let total = tx_count - first_count;
|
||||
println!(
|
||||
"{}: Total Transactions processed {}",
|
||||
val.events_addr, total
|
||||
);
|
||||
if total == transactions.len() as u64 {
|
||||
break;
|
||||
}
|
||||
if i > 20 && sample == 0 {
|
||||
break;
|
||||
}
|
||||
sleep(Duration::new(1, 0));
|
||||
}
|
||||
});
|
||||
signal.store(true, Ordering::Relaxed);
|
||||
for t in c_threads {
|
||||
t.join().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
fn mk_client(locked_addr: &Arc<RwLock<SocketAddr>>, r: &ReplicatedData) -> ThinClient {
|
||||
let mut addr = locked_addr.write().unwrap();
|
||||
let port = addr.port();
|
||||
let events_socket = UdpSocket::bind(addr.clone()).unwrap();
|
||||
addr.set_port(port + 1);
|
||||
let requests_socket = UdpSocket::bind(addr.clone()).unwrap();
|
||||
addr.set_port(port + 2);
|
||||
ThinClient::new(
|
||||
r.requests_addr,
|
||||
requests_socket,
|
||||
r.events_addr,
|
||||
events_socket,
|
||||
)
|
||||
}
|
||||
|
||||
fn spy_node(client_addr: &Arc<RwLock<SocketAddr>>) -> (ReplicatedData, UdpSocket) {
|
||||
let mut addr = client_addr.write().unwrap();
|
||||
let port = addr.port();
|
||||
let gossip = UdpSocket::bind(addr.clone()).unwrap();
|
||||
addr.set_port(port + 1);
|
||||
let daddr = "0.0.0.0:0".parse().unwrap();
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
let node = ReplicatedData::new(pubkey, gossip.local_addr().unwrap(), daddr, daddr, daddr);
|
||||
(node, gossip)
|
||||
}
|
||||
|
||||
fn converge(
|
||||
client_addr: &Arc<RwLock<SocketAddr>>,
|
||||
leader: &ReplicatedData,
|
||||
exit: Arc<AtomicBool>,
|
||||
num_nodes: usize,
|
||||
threads: &mut Vec<JoinHandle<()>>,
|
||||
) -> Vec<ReplicatedData> {
|
||||
//lets spy on the network
|
||||
let daddr = "0.0.0.0:0".parse().unwrap();
|
||||
let (spy, spy_gossip) = spy_node(client_addr);
|
||||
let mut spy_crdt = Crdt::new(spy);
|
||||
spy_crdt.insert(&leader);
|
||||
spy_crdt.set_leader(leader.id);
|
||||
|
||||
let spy_ref = Arc::new(RwLock::new(spy_crdt));
|
||||
let spy_window = default_window();
|
||||
let t_spy_listen = Crdt::listen(spy_ref.clone(), spy_window, spy_gossip, exit.clone());
|
||||
let t_spy_gossip = Crdt::gossip(spy_ref.clone(), exit.clone());
|
||||
//wait for the network to converge
|
||||
for _ in 0..30 {
|
||||
let min = spy_ref.read().unwrap().convergence();
|
||||
if num_nodes as u64 == min {
|
||||
println!("converged!");
|
||||
break;
|
||||
}
|
||||
sleep(Duration::new(1, 0));
|
||||
}
|
||||
threads.push(t_spy_listen);
|
||||
threads.push(t_spy_gossip);
|
||||
let v: Vec<ReplicatedData> = spy_ref
|
||||
.read()
|
||||
.unwrap()
|
||||
.table
|
||||
.values()
|
||||
.into_iter()
|
||||
.filter(|x| x.requests_addr != daddr)
|
||||
.map(|x| x.clone())
|
||||
.collect();
|
||||
v.clone()
|
||||
}
|
||||
|
||||
fn read_leader(path: String) -> ReplicatedData {
|
||||
let file = File::open(path).expect("file");
|
||||
serde_json::from_reader(file).expect("parse")
|
||||
}
|
@@ -1,18 +1,13 @@
|
||||
//! The `plan` module provides a domain-specific language for payment plans. Users create Plan objects that
|
||||
//! are given to an interpreter. The interpreter listens for `Witness` events,
|
||||
//! which it uses to reduce the payment plan. When the plan is reduced to a
|
||||
//! The `budget` module provides a domain-specific language for payment plans. Users create Budget objects that
|
||||
//! are given to an interpreter. The interpreter listens for `Witness` transactions,
|
||||
//! which it uses to reduce the payment plan. When the budget is reduced to a
|
||||
//! `Payment`, the payment is executed.
|
||||
|
||||
use chrono::prelude::*;
|
||||
use payment_plan::{Payment, PaymentPlan, Witness};
|
||||
use signature::PublicKey;
|
||||
use std::mem;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub enum Witness {
|
||||
Timestamp(DateTime<Utc>),
|
||||
Signature(PublicKey),
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub enum Condition {
|
||||
Timestamp(DateTime<Utc>),
|
||||
@@ -30,37 +25,31 @@ impl Condition {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub struct Payment {
|
||||
pub tokens: i64,
|
||||
pub to: PublicKey,
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub enum Plan {
|
||||
pub enum Budget {
|
||||
Pay(Payment),
|
||||
After(Condition, Payment),
|
||||
Race((Condition, Payment), (Condition, Payment)),
|
||||
}
|
||||
|
||||
impl Plan {
|
||||
/// Create the simplest spending plan - one that pays `tokens` to PublicKey.
|
||||
impl Budget {
|
||||
/// Create the simplest budget - one that pays `tokens` to PublicKey.
|
||||
pub fn new_payment(tokens: i64, to: PublicKey) -> Self {
|
||||
Plan::Pay(Payment { tokens, to })
|
||||
Budget::Pay(Payment { tokens, to })
|
||||
}
|
||||
|
||||
/// Create a spending plan that pays `tokens` to `to` after being witnessed by `from`.
|
||||
/// Create a budget that pays `tokens` to `to` after being witnessed by `from`.
|
||||
pub fn new_authorized_payment(from: PublicKey, tokens: i64, to: PublicKey) -> Self {
|
||||
Plan::After(Condition::Signature(from), Payment { tokens, to })
|
||||
Budget::After(Condition::Signature(from), Payment { tokens, to })
|
||||
}
|
||||
|
||||
/// Create a spending plan that pays `tokens` to `to` after the given DateTime.
|
||||
/// Create a budget that pays `tokens` to `to` after the given DateTime.
|
||||
pub fn new_future_payment(dt: DateTime<Utc>, tokens: i64, to: PublicKey) -> Self {
|
||||
Plan::After(Condition::Timestamp(dt), Payment { tokens, to })
|
||||
Budget::After(Condition::Timestamp(dt), Payment { tokens, to })
|
||||
}
|
||||
|
||||
/// Create a spending plan that pays `tokens` to `to` after the given DateTime
|
||||
/// Create a budget that pays `tokens` to `to` after the given DateTime
|
||||
/// unless cancelled by `from`.
|
||||
pub fn new_cancelable_future_payment(
|
||||
dt: DateTime<Utc>,
|
||||
@@ -68,44 +57,46 @@ impl Plan {
|
||||
tokens: i64,
|
||||
to: PublicKey,
|
||||
) -> Self {
|
||||
Plan::Race(
|
||||
Budget::Race(
|
||||
(Condition::Timestamp(dt), Payment { tokens, to }),
|
||||
(Condition::Signature(from), Payment { tokens, to: from }),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// Return Payment if the spending plan requires no additional Witnesses.
|
||||
pub fn final_payment(&self) -> Option<Payment> {
|
||||
impl PaymentPlan for Budget {
|
||||
/// Return Payment if the budget requires no additional Witnesses.
|
||||
fn final_payment(&self) -> Option<Payment> {
|
||||
match *self {
|
||||
Plan::Pay(ref payment) => Some(payment.clone()),
|
||||
Budget::Pay(ref payment) => Some(payment.clone()),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Return true if the plan spends exactly `spendable_tokens`.
|
||||
pub fn verify(&self, spendable_tokens: i64) -> bool {
|
||||
/// Return true if the budget spends exactly `spendable_tokens`.
|
||||
fn verify(&self, spendable_tokens: i64) -> bool {
|
||||
match *self {
|
||||
Plan::Pay(ref payment) | Plan::After(_, ref payment) => {
|
||||
Budget::Pay(ref payment) | Budget::After(_, ref payment) => {
|
||||
payment.tokens == spendable_tokens
|
||||
}
|
||||
Plan::Race(ref a, ref b) => {
|
||||
Budget::Race(ref a, ref b) => {
|
||||
a.1.tokens == spendable_tokens && b.1.tokens == spendable_tokens
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Apply a witness to the spending plan to see if the plan can be reduced.
|
||||
/// If so, modify the plan in-place.
|
||||
pub fn apply_witness(&mut self, witness: &Witness) {
|
||||
/// Apply a witness to the budget to see if the budget can be reduced.
|
||||
/// If so, modify the budget in-place.
|
||||
fn apply_witness(&mut self, witness: &Witness) {
|
||||
let new_payment = match *self {
|
||||
Plan::After(ref cond, ref payment) if cond.is_satisfied(witness) => Some(payment),
|
||||
Plan::Race((ref cond, ref payment), _) if cond.is_satisfied(witness) => Some(payment),
|
||||
Plan::Race(_, (ref cond, ref payment)) if cond.is_satisfied(witness) => Some(payment),
|
||||
Budget::After(ref cond, ref payment) if cond.is_satisfied(witness) => Some(payment),
|
||||
Budget::Race((ref cond, ref payment), _) if cond.is_satisfied(witness) => Some(payment),
|
||||
Budget::Race(_, (ref cond, ref payment)) if cond.is_satisfied(witness) => Some(payment),
|
||||
_ => None,
|
||||
}.cloned();
|
||||
|
||||
if let Some(payment) = new_payment {
|
||||
mem::replace(self, Plan::Pay(payment));
|
||||
mem::replace(self, Budget::Pay(payment));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -130,14 +121,14 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verify_plan() {
|
||||
fn test_verify() {
|
||||
let dt = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10);
|
||||
let from = PublicKey::default();
|
||||
let to = PublicKey::default();
|
||||
assert!(Plan::new_payment(42, to).verify(42));
|
||||
assert!(Plan::new_authorized_payment(from, 42, to).verify(42));
|
||||
assert!(Plan::new_future_payment(dt, 42, to).verify(42));
|
||||
assert!(Plan::new_cancelable_future_payment(dt, from, 42, to).verify(42));
|
||||
assert!(Budget::new_payment(42, to).verify(42));
|
||||
assert!(Budget::new_authorized_payment(from, 42, to).verify(42));
|
||||
assert!(Budget::new_future_payment(dt, 42, to).verify(42));
|
||||
assert!(Budget::new_cancelable_future_payment(dt, from, 42, to).verify(42));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -145,9 +136,9 @@ mod tests {
|
||||
let from = PublicKey::default();
|
||||
let to = PublicKey::default();
|
||||
|
||||
let mut plan = Plan::new_authorized_payment(from, 42, to);
|
||||
plan.apply_witness(&Witness::Signature(from));
|
||||
assert_eq!(plan, Plan::new_payment(42, to));
|
||||
let mut budget = Budget::new_authorized_payment(from, 42, to);
|
||||
budget.apply_witness(&Witness::Signature(from));
|
||||
assert_eq!(budget, Budget::new_payment(42, to));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -155,9 +146,9 @@ mod tests {
|
||||
let dt = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10);
|
||||
let to = PublicKey::default();
|
||||
|
||||
let mut plan = Plan::new_future_payment(dt, 42, to);
|
||||
plan.apply_witness(&Witness::Timestamp(dt));
|
||||
assert_eq!(plan, Plan::new_payment(42, to));
|
||||
let mut budget = Budget::new_future_payment(dt, 42, to);
|
||||
budget.apply_witness(&Witness::Timestamp(dt));
|
||||
assert_eq!(budget, Budget::new_payment(42, to));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -166,12 +157,12 @@ mod tests {
|
||||
let from = PublicKey::default();
|
||||
let to = PublicKey::default();
|
||||
|
||||
let mut plan = Plan::new_cancelable_future_payment(dt, from, 42, to);
|
||||
plan.apply_witness(&Witness::Timestamp(dt));
|
||||
assert_eq!(plan, Plan::new_payment(42, to));
|
||||
let mut budget = Budget::new_cancelable_future_payment(dt, from, 42, to);
|
||||
budget.apply_witness(&Witness::Timestamp(dt));
|
||||
assert_eq!(budget, Budget::new_payment(42, to));
|
||||
|
||||
let mut plan = Plan::new_cancelable_future_payment(dt, from, 42, to);
|
||||
plan.apply_witness(&Witness::Signature(from));
|
||||
assert_eq!(plan, Plan::new_payment(42, from));
|
||||
let mut budget = Budget::new_cancelable_future_payment(dt, from, 42, to);
|
||||
budget.apply_witness(&Witness::Signature(from));
|
||||
assert_eq!(budget, Budget::new_payment(42, from));
|
||||
}
|
||||
}
|
38
src/crdt.rs
38
src/crdt.rs
@@ -16,7 +16,7 @@
|
||||
use bincode::{deserialize, serialize};
|
||||
use byteorder::{LittleEndian, ReadBytesExt};
|
||||
use hash::Hash;
|
||||
use packet::SharedBlob;
|
||||
use packet::{SharedBlob, BLOB_SIZE};
|
||||
use rayon::prelude::*;
|
||||
use result::{Error, Result};
|
||||
use ring::rand::{SecureRandom, SystemRandom};
|
||||
@@ -43,8 +43,8 @@ pub struct ReplicatedData {
|
||||
pub replicate_addr: SocketAddr,
|
||||
/// address to connect to when this node is leader
|
||||
pub requests_addr: SocketAddr,
|
||||
/// events address
|
||||
pub events_addr: SocketAddr,
|
||||
/// transactions address
|
||||
pub transactions_addr: SocketAddr,
|
||||
/// current leader identity
|
||||
pub current_leader_id: PublicKey,
|
||||
/// last verified hash that was submitted to the leader
|
||||
@@ -59,7 +59,7 @@ impl ReplicatedData {
|
||||
gossip_addr: SocketAddr,
|
||||
replicate_addr: SocketAddr,
|
||||
requests_addr: SocketAddr,
|
||||
events_addr: SocketAddr,
|
||||
transactions_addr: SocketAddr,
|
||||
) -> ReplicatedData {
|
||||
ReplicatedData {
|
||||
id,
|
||||
@@ -68,7 +68,7 @@ impl ReplicatedData {
|
||||
gossip_addr,
|
||||
replicate_addr,
|
||||
requests_addr,
|
||||
events_addr,
|
||||
transactions_addr,
|
||||
current_leader_id: PublicKey::default(),
|
||||
last_verified_hash: Hash::default(),
|
||||
last_verified_count: 0,
|
||||
@@ -226,6 +226,7 @@ impl Crdt {
|
||||
.expect("set_index in pub fn broadcast");
|
||||
//TODO profile this, may need multiple sockets for par_iter
|
||||
trace!("broadcast {} to {}", blob.meta.size, v.replicate_addr);
|
||||
assert!(blob.meta.size < BLOB_SIZE);
|
||||
let e = s.send_to(&blob.data[..blob.meta.size], &v.replicate_addr);
|
||||
trace!("done broadcast {} to {}", blob.meta.size, v.replicate_addr);
|
||||
e
|
||||
@@ -285,6 +286,7 @@ impl Crdt {
|
||||
v.replicate_addr
|
||||
);
|
||||
//TODO profile this, may need multiple sockets for par_iter
|
||||
assert!(rblob.meta.size < BLOB_SIZE);
|
||||
s.send_to(&rblob.data[..rblob.meta.size], &v.replicate_addr)
|
||||
})
|
||||
.collect();
|
||||
@@ -327,14 +329,16 @@ impl Crdt {
|
||||
}
|
||||
|
||||
pub fn window_index_request(&self, ix: u64) -> Result<(SocketAddr, Vec<u8>)> {
|
||||
if self.table.len() <= 1 {
|
||||
let daddr = "0.0.0.0:0".parse().unwrap();
|
||||
let valid: Vec<_> = self.table
|
||||
.values()
|
||||
.filter(|r| r.id != self.me && r.replicate_addr != daddr)
|
||||
.collect();
|
||||
if valid.is_empty() {
|
||||
return Err(Error::CrdtTooSmall);
|
||||
}
|
||||
let mut n = (Self::random() as usize) % self.table.len();
|
||||
while self.table.values().nth(n).unwrap().id == self.me {
|
||||
n = (Self::random() as usize) % self.table.len();
|
||||
}
|
||||
let addr = self.table.values().nth(n).unwrap().gossip_addr.clone();
|
||||
let n = (Self::random() as usize) % valid.len();
|
||||
let addr = valid[n].gossip_addr.clone();
|
||||
let req = Protocol::RequestWindowIndex(self.table[&self.me].clone(), ix);
|
||||
let out = serialize(&req)?;
|
||||
Ok((addr, out))
|
||||
@@ -431,6 +435,7 @@ impl Crdt {
|
||||
"responding RequestWindowIndex {} {}",
|
||||
ix, from.replicate_addr
|
||||
);
|
||||
assert!(outblob.len() < BLOB_SIZE);
|
||||
sock.send_to(&outblob, from.replicate_addr)?;
|
||||
}
|
||||
Ok(())
|
||||
@@ -442,7 +447,7 @@ impl Crdt {
|
||||
sock: &UdpSocket,
|
||||
) -> Result<()> {
|
||||
//TODO cache connections
|
||||
let mut buf = vec![0u8; 1024 * 64];
|
||||
let mut buf = vec![0u8; BLOB_SIZE];
|
||||
trace!("recv_from on {}", sock.local_addr().unwrap());
|
||||
let (amt, src) = sock.recv_from(&mut buf)?;
|
||||
trace!("got request from {}", src);
|
||||
@@ -451,7 +456,7 @@ impl Crdt {
|
||||
match r {
|
||||
// TODO sigverify these
|
||||
Protocol::RequestUpdates(v, reqdata) => {
|
||||
trace!("RequestUpdates {}", v);
|
||||
trace!("RequestUpdates {} from {}", v, src);
|
||||
let addr = reqdata.gossip_addr;
|
||||
// only lock for this call, dont lock during IO `sock.send_to` or `sock.recv_from`
|
||||
let (from, ups, data) = obj.read()
|
||||
@@ -464,12 +469,13 @@ impl Crdt {
|
||||
obj.write()
|
||||
.expect("'obj' write lock in RequestUpdates")
|
||||
.insert(&reqdata);
|
||||
assert!(rsp.len() < BLOB_SIZE);
|
||||
sock.send_to(&rsp, addr)
|
||||
.expect("'sock.send_to' in RequestUpdates");
|
||||
trace!("send_to done!");
|
||||
}
|
||||
Protocol::ReceiveUpdates(from, ups, data) => {
|
||||
trace!("ReceivedUpdates");
|
||||
trace!("ReceivedUpdates {} from {}", ups, src);
|
||||
obj.write()
|
||||
.expect("'obj' write lock in ReceiveUpdates")
|
||||
.apply_updates(from, ups, &data);
|
||||
@@ -531,14 +537,14 @@ mod tests {
|
||||
let gossip = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let replicate = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let serve = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let events = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let transactions = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
let d = ReplicatedData::new(
|
||||
pubkey,
|
||||
gossip.local_addr().unwrap(),
|
||||
replicate.local_addr().unwrap(),
|
||||
serve.local_addr().unwrap(),
|
||||
events.local_addr().unwrap(),
|
||||
transactions.local_addr().unwrap(),
|
||||
);
|
||||
let crdt = Crdt::new(d);
|
||||
trace!(
|
||||
|
89
src/entry.rs
89
src/entry.rs
@@ -2,13 +2,13 @@
|
||||
//! unique ID that is the hash of the Entry before it, plus the hash of the
|
||||
//! transactions within it. Entries cannot be reordered, and its field `num_hashes`
|
||||
//! represents an approximate amount of time since the last Entry was created.
|
||||
use event::Event;
|
||||
use hash::{extend_and_hash, hash, Hash};
|
||||
use rayon::prelude::*;
|
||||
use transaction::Transaction;
|
||||
|
||||
/// Each Entry contains three pieces of data. The `num_hashes` field is the number
|
||||
/// of hashes performed since the previous entry. The `id` field is the result
|
||||
/// of hashing `id` from the previous entry `num_hashes` times. The `events`
|
||||
/// of hashing `id` from the previous entry `num_hashes` times. The `transactions`
|
||||
/// field points to Events that took place shortly after `id` was generated.
|
||||
///
|
||||
/// If you divide `num_hashes` by the amount of time it takes to generate a new hash, you
|
||||
@@ -21,69 +21,69 @@ use rayon::prelude::*;
|
||||
pub struct Entry {
|
||||
pub num_hashes: u64,
|
||||
pub id: Hash,
|
||||
pub events: Vec<Event>,
|
||||
pub transactions: Vec<Transaction>,
|
||||
}
|
||||
|
||||
impl Entry {
|
||||
/// Creates the next Entry `num_hashes` after `start_hash`.
|
||||
pub fn new(start_hash: &Hash, cur_hashes: u64, events: Vec<Event>) -> Self {
|
||||
let num_hashes = cur_hashes + if events.is_empty() { 0 } else { 1 };
|
||||
let id = next_hash(start_hash, 0, &events);
|
||||
pub fn new(start_hash: &Hash, cur_hashes: u64, transactions: Vec<Transaction>) -> Self {
|
||||
let num_hashes = cur_hashes + if transactions.is_empty() { 0 } else { 1 };
|
||||
let id = next_hash(start_hash, 0, &transactions);
|
||||
Entry {
|
||||
num_hashes,
|
||||
id,
|
||||
events,
|
||||
transactions,
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates the next Tick Entry `num_hashes` after `start_hash`.
|
||||
pub fn new_mut(start_hash: &mut Hash, cur_hashes: &mut u64, events: Vec<Event>) -> Self {
|
||||
let entry = Self::new(start_hash, *cur_hashes, events);
|
||||
pub fn new_mut(
|
||||
start_hash: &mut Hash,
|
||||
cur_hashes: &mut u64,
|
||||
transactions: Vec<Transaction>,
|
||||
) -> Self {
|
||||
let entry = Self::new(start_hash, *cur_hashes, transactions);
|
||||
*start_hash = entry.id;
|
||||
*cur_hashes = 0;
|
||||
entry
|
||||
}
|
||||
|
||||
/// Creates a Entry from the number of hashes `num_hashes` since the previous event
|
||||
/// Creates a Entry from the number of hashes `num_hashes` since the previous transaction
|
||||
/// and that resulting `id`.
|
||||
pub fn new_tick(num_hashes: u64, id: &Hash) -> Self {
|
||||
Entry {
|
||||
num_hashes,
|
||||
id: *id,
|
||||
events: vec![],
|
||||
transactions: vec![],
|
||||
}
|
||||
}
|
||||
|
||||
/// Verifies self.id is the result of hashing a `start_hash` `self.num_hashes` times.
|
||||
/// If the event is not a Tick, then hash that as well.
|
||||
/// If the transaction is not a Tick, then hash that as well.
|
||||
pub fn verify(&self, start_hash: &Hash) -> bool {
|
||||
self.events.par_iter().all(|event| event.verify())
|
||||
&& self.id == next_hash(start_hash, self.num_hashes, &self.events)
|
||||
self.transactions.par_iter().all(|tx| tx.verify_plan())
|
||||
&& self.id == next_hash(start_hash, self.num_hashes, &self.transactions)
|
||||
}
|
||||
}
|
||||
|
||||
fn add_event_data(hash_data: &mut Vec<u8>, event: &Event) {
|
||||
match *event {
|
||||
Event::Transaction(ref tr) => {
|
||||
hash_data.push(0u8);
|
||||
hash_data.extend_from_slice(&tr.sig);
|
||||
}
|
||||
}
|
||||
fn add_transaction_data(hash_data: &mut Vec<u8>, tx: &Transaction) {
|
||||
hash_data.push(0u8);
|
||||
hash_data.extend_from_slice(&tx.sig);
|
||||
}
|
||||
|
||||
/// Creates the hash `num_hashes` after `start_hash`. If the event contains
|
||||
/// Creates the hash `num_hashes` after `start_hash`. If the transaction contains
|
||||
/// a signature, the final hash will be a hash of both the previous ID and
|
||||
/// the signature.
|
||||
pub fn next_hash(start_hash: &Hash, num_hashes: u64, events: &[Event]) -> Hash {
|
||||
pub fn next_hash(start_hash: &Hash, num_hashes: u64, transactions: &[Transaction]) -> Hash {
|
||||
let mut id = *start_hash;
|
||||
for _ in 1..num_hashes {
|
||||
id = hash(&id);
|
||||
}
|
||||
|
||||
// Hash all the event data
|
||||
// Hash all the transaction data
|
||||
let mut hash_data = vec![];
|
||||
for event in events {
|
||||
add_event_data(&mut hash_data, event);
|
||||
for tx in transactions {
|
||||
add_transaction_data(&mut hash_data, tx);
|
||||
}
|
||||
|
||||
if !hash_data.is_empty() {
|
||||
@@ -96,11 +96,11 @@ pub fn next_hash(start_hash: &Hash, num_hashes: u64, events: &[Event]) -> Hash {
|
||||
}
|
||||
|
||||
/// Creates the next Tick or Event Entry `num_hashes` after `start_hash`.
|
||||
pub fn next_entry(start_hash: &Hash, num_hashes: u64, events: Vec<Event>) -> Entry {
|
||||
pub fn next_entry(start_hash: &Hash, num_hashes: u64, transactions: Vec<Transaction>) -> Entry {
|
||||
Entry {
|
||||
num_hashes,
|
||||
id: next_hash(start_hash, num_hashes, &events),
|
||||
events: events,
|
||||
id: next_hash(start_hash, num_hashes, &transactions),
|
||||
transactions,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -109,7 +109,6 @@ mod tests {
|
||||
use super::*;
|
||||
use chrono::prelude::*;
|
||||
use entry::Entry;
|
||||
use event::Event;
|
||||
use hash::hash;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use transaction::Transaction;
|
||||
@@ -125,19 +124,19 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_event_reorder_attack() {
|
||||
fn test_transaction_reorder_attack() {
|
||||
let zero = Hash::default();
|
||||
|
||||
// First, verify entries
|
||||
let keypair = KeyPair::new();
|
||||
let tr0 = Event::new_transaction(&keypair, keypair.pubkey(), 0, zero);
|
||||
let tr1 = Event::new_transaction(&keypair, keypair.pubkey(), 1, zero);
|
||||
let mut e0 = Entry::new(&zero, 0, vec![tr0.clone(), tr1.clone()]);
|
||||
let tx0 = Transaction::new(&keypair, keypair.pubkey(), 0, zero);
|
||||
let tx1 = Transaction::new(&keypair, keypair.pubkey(), 1, zero);
|
||||
let mut e0 = Entry::new(&zero, 0, vec![tx0.clone(), tx1.clone()]);
|
||||
assert!(e0.verify(&zero));
|
||||
|
||||
// Next, swap two events and ensure verification fails.
|
||||
e0.events[0] = tr1; // <-- attack
|
||||
e0.events[1] = tr0;
|
||||
// Next, swap two transactions and ensure verification fails.
|
||||
e0.transactions[0] = tx1; // <-- attack
|
||||
e0.transactions[1] = tx0;
|
||||
assert!(!e0.verify(&zero));
|
||||
}
|
||||
|
||||
@@ -147,18 +146,14 @@ mod tests {
|
||||
|
||||
// First, verify entries
|
||||
let keypair = KeyPair::new();
|
||||
let tr0 = Event::Transaction(Transaction::new_timestamp(&keypair, Utc::now(), zero));
|
||||
let tr1 = Event::Transaction(Transaction::new_signature(
|
||||
&keypair,
|
||||
Default::default(),
|
||||
zero,
|
||||
));
|
||||
let mut e0 = Entry::new(&zero, 0, vec![tr0.clone(), tr1.clone()]);
|
||||
let tx0 = Transaction::new_timestamp(&keypair, Utc::now(), zero);
|
||||
let tx1 = Transaction::new_signature(&keypair, Default::default(), zero);
|
||||
let mut e0 = Entry::new(&zero, 0, vec![tx0.clone(), tx1.clone()]);
|
||||
assert!(e0.verify(&zero));
|
||||
|
||||
// Next, swap two witness events and ensure verification fails.
|
||||
e0.events[0] = tr1; // <-- attack
|
||||
e0.events[1] = tr0;
|
||||
// Next, swap two witness transactions and ensure verification fails.
|
||||
e0.transactions[0] = tx1; // <-- attack
|
||||
e0.transactions[1] = tx0;
|
||||
assert!(!e0.verify(&zero));
|
||||
}
|
||||
|
||||
|
@@ -2,13 +2,13 @@
|
||||
|
||||
use bank::Bank;
|
||||
use entry::Entry;
|
||||
use ledger;
|
||||
use ledger::Block;
|
||||
use packet;
|
||||
use result::Result;
|
||||
use serde_json;
|
||||
use std::collections::VecDeque;
|
||||
use std::io::Write;
|
||||
use std::io::sink;
|
||||
use std::io::Write;
|
||||
use std::sync::mpsc::Receiver;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::time::Duration;
|
||||
@@ -63,7 +63,7 @@ impl<'a> EntryWriter<'a> {
|
||||
let mut q = VecDeque::new();
|
||||
let list = self.write_entries(writer, entry_receiver)?;
|
||||
trace!("New blobs? {}", list.len());
|
||||
ledger::process_entry_list_into_blobs(&list, blob_recycler, &mut q);
|
||||
list.to_blobs(blob_recycler, &mut q);
|
||||
if !q.is_empty() {
|
||||
trace!("broadcasting {}", q.len());
|
||||
broadcast.send(q)?;
|
||||
|
31
src/event.rs
31
src/event.rs
@@ -1,31 +0,0 @@
|
||||
//! The `event` module handles events, which may be a `Transaction`, or a `Witness` used to process a pending
|
||||
//! Transaction.
|
||||
|
||||
use hash::Hash;
|
||||
use signature::{KeyPair, PublicKey};
|
||||
use transaction::Transaction;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub enum Event {
|
||||
Transaction(Transaction),
|
||||
}
|
||||
|
||||
impl Event {
|
||||
pub fn new_transaction(
|
||||
from_keypair: &KeyPair,
|
||||
to: PublicKey,
|
||||
tokens: i64,
|
||||
last_id: Hash,
|
||||
) -> Self {
|
||||
let tr = Transaction::new(from_keypair, to, tokens, last_id);
|
||||
Event::Transaction(tr)
|
||||
}
|
||||
|
||||
/// Verify the Event's signature's are valid and if a transaction, that its
|
||||
/// spending plan is valid.
|
||||
pub fn verify(&self) -> bool {
|
||||
match *self {
|
||||
Event::Transaction(ref tr) => tr.verify_plan(),
|
||||
}
|
||||
}
|
||||
}
|
31
src/fetch_stage.rs
Normal file
31
src/fetch_stage.rs
Normal file
@@ -0,0 +1,31 @@
|
||||
//! The `fetch_stage` batches input from a UDP socket and sends it to a channel.
|
||||
|
||||
use packet;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::Arc;
|
||||
use std::thread::JoinHandle;
|
||||
use streamer;
|
||||
|
||||
pub struct FetchStage {
|
||||
pub packet_receiver: streamer::PacketReceiver,
|
||||
pub thread_hdl: JoinHandle<()>,
|
||||
}
|
||||
|
||||
impl FetchStage {
|
||||
pub fn new(
|
||||
socket: UdpSocket,
|
||||
exit: Arc<AtomicBool>,
|
||||
packet_recycler: packet::PacketRecycler,
|
||||
) -> Self {
|
||||
let (packet_sender, packet_receiver) = channel();
|
||||
let thread_hdl =
|
||||
streamer::receiver(socket, exit.clone(), packet_recycler.clone(), packet_sender);
|
||||
|
||||
FetchStage {
|
||||
packet_receiver,
|
||||
thread_hdl,
|
||||
}
|
||||
}
|
||||
}
|
@@ -1,7 +1,7 @@
|
||||
//! The `hash` module provides functions for creating SHA-256 hashes.
|
||||
|
||||
use generic_array::GenericArray;
|
||||
use generic_array::typenum::U32;
|
||||
use generic_array::GenericArray;
|
||||
use sha2::{Digest, Sha256};
|
||||
|
||||
pub type Hash = GenericArray<u8, U32>;
|
||||
|
166
src/ledger.rs
166
src/ledger.rs
@@ -3,7 +3,6 @@
|
||||
|
||||
use bincode::{deserialize, serialize_into};
|
||||
use entry::{next_entry, Entry};
|
||||
use event::Event;
|
||||
use hash::Hash;
|
||||
use packet;
|
||||
use packet::{SharedBlob, BLOB_DATA_SIZE, BLOB_SIZE};
|
||||
@@ -12,10 +11,12 @@ use std::cmp::min;
|
||||
use std::collections::VecDeque;
|
||||
use std::io::Cursor;
|
||||
use std::mem::size_of;
|
||||
use transaction::Transaction;
|
||||
|
||||
pub trait Block {
|
||||
/// Verifies the hashes and counts of a slice of events are all consistent.
|
||||
/// Verifies the hashes and counts of a slice of transactions are all consistent.
|
||||
fn verify(&self, start_hash: &Hash) -> bool;
|
||||
fn to_blobs(&self, blob_recycler: &packet::BlobRecycler, q: &mut VecDeque<SharedBlob>);
|
||||
}
|
||||
|
||||
impl Block for [Entry] {
|
||||
@@ -24,81 +25,85 @@ impl Block for [Entry] {
|
||||
let entry_pairs = genesis.par_iter().chain(self).zip(self);
|
||||
entry_pairs.all(|(x0, x1)| x1.verify(&x0.id))
|
||||
}
|
||||
|
||||
fn to_blobs(&self, blob_recycler: &packet::BlobRecycler, q: &mut VecDeque<SharedBlob>) {
|
||||
let mut start = 0;
|
||||
let mut end = 0;
|
||||
while start < self.len() {
|
||||
let mut entries: Vec<Vec<Entry>> = Vec::new();
|
||||
let mut total = 0;
|
||||
for i in &self[start..] {
|
||||
total += size_of::<Transaction>() * i.transactions.len();
|
||||
total += size_of::<Entry>();
|
||||
if total >= BLOB_DATA_SIZE {
|
||||
break;
|
||||
}
|
||||
end += 1;
|
||||
}
|
||||
// See if we need to split the transactions
|
||||
if end <= start {
|
||||
let mut transaction_start = 0;
|
||||
let num_transactions_per_blob = BLOB_DATA_SIZE / size_of::<Transaction>();
|
||||
let total_entry_chunks = (self[end].transactions.len() + num_transactions_per_blob
|
||||
- 1) / num_transactions_per_blob;
|
||||
trace!(
|
||||
"splitting transactions end: {} total_chunks: {}",
|
||||
end,
|
||||
total_entry_chunks
|
||||
);
|
||||
for _ in 0..total_entry_chunks {
|
||||
let transaction_end = min(
|
||||
transaction_start + num_transactions_per_blob,
|
||||
self[end].transactions.len(),
|
||||
);
|
||||
let mut entry = Entry {
|
||||
num_hashes: self[end].num_hashes,
|
||||
id: self[end].id,
|
||||
transactions: self[end].transactions[transaction_start..transaction_end]
|
||||
.to_vec(),
|
||||
};
|
||||
entries.push(vec![entry]);
|
||||
transaction_start = transaction_end;
|
||||
}
|
||||
end += 1;
|
||||
} else {
|
||||
entries.push(self[start..end].to_vec());
|
||||
}
|
||||
|
||||
for entry in entries {
|
||||
let b = blob_recycler.allocate();
|
||||
let pos = {
|
||||
let mut bd = b.write().unwrap();
|
||||
let mut out = Cursor::new(bd.data_mut());
|
||||
serialize_into(&mut out, &entry).expect("failed to serialize output");
|
||||
out.position() as usize
|
||||
};
|
||||
assert!(pos < BLOB_SIZE);
|
||||
b.write().unwrap().set_size(pos);
|
||||
q.push_back(b);
|
||||
}
|
||||
start = end;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a vector of Entries of length `event_set.len()` from `start_hash` hash, `num_hashes`, and `event_set`.
|
||||
pub fn next_entries(start_hash: &Hash, num_hashes: u64, event_set: Vec<Vec<Event>>) -> Vec<Entry> {
|
||||
/// Create a vector of Entries of length `transaction_batches.len()` from `start_hash` hash, `num_hashes`, and `transaction_batches`.
|
||||
pub fn next_entries(
|
||||
start_hash: &Hash,
|
||||
num_hashes: u64,
|
||||
transaction_batches: Vec<Vec<Transaction>>,
|
||||
) -> Vec<Entry> {
|
||||
let mut id = *start_hash;
|
||||
let mut entries = vec![];
|
||||
for event_list in &event_set {
|
||||
let events = event_list.clone();
|
||||
let entry = next_entry(&id, num_hashes, events);
|
||||
for transactions in &transaction_batches {
|
||||
let transactions = transactions.clone();
|
||||
let entry = next_entry(&id, num_hashes, transactions);
|
||||
id = entry.id;
|
||||
entries.push(entry);
|
||||
}
|
||||
entries
|
||||
}
|
||||
|
||||
pub fn process_entry_list_into_blobs(
|
||||
list: &Vec<Entry>,
|
||||
blob_recycler: &packet::BlobRecycler,
|
||||
q: &mut VecDeque<SharedBlob>,
|
||||
) {
|
||||
let mut start = 0;
|
||||
let mut end = 0;
|
||||
while start < list.len() {
|
||||
let mut entries: Vec<Vec<Entry>> = Vec::new();
|
||||
let mut total = 0;
|
||||
for i in &list[start..] {
|
||||
total += size_of::<Event>() * i.events.len();
|
||||
total += size_of::<Entry>();
|
||||
if total >= BLOB_DATA_SIZE {
|
||||
break;
|
||||
}
|
||||
end += 1;
|
||||
}
|
||||
// See if we need to split the events
|
||||
if end <= start {
|
||||
let mut event_start = 0;
|
||||
let num_events_per_blob = BLOB_DATA_SIZE / size_of::<Event>();
|
||||
let total_entry_chunks =
|
||||
(list[end].events.len() + num_events_per_blob - 1) / num_events_per_blob;
|
||||
trace!(
|
||||
"splitting events end: {} total_chunks: {}",
|
||||
end,
|
||||
total_entry_chunks
|
||||
);
|
||||
for _ in 0..total_entry_chunks {
|
||||
let event_end = min(event_start + num_events_per_blob, list[end].events.len());
|
||||
let mut entry = Entry {
|
||||
num_hashes: list[end].num_hashes,
|
||||
id: list[end].id,
|
||||
events: list[end].events[event_start..event_end].to_vec(),
|
||||
};
|
||||
entries.push(vec![entry]);
|
||||
event_start = event_end;
|
||||
}
|
||||
end += 1;
|
||||
} else {
|
||||
entries.push(list[start..end].to_vec());
|
||||
}
|
||||
|
||||
for entry in entries {
|
||||
let b = blob_recycler.allocate();
|
||||
let pos = {
|
||||
let mut bd = b.write().unwrap();
|
||||
let mut out = Cursor::new(bd.data_mut());
|
||||
serialize_into(&mut out, &entry).expect("failed to serialize output");
|
||||
out.position() as usize
|
||||
};
|
||||
assert!(pos < BLOB_SIZE);
|
||||
b.write().unwrap().set_size(pos);
|
||||
q.push_back(b);
|
||||
}
|
||||
start = end;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn reconstruct_entries_from_blobs(blobs: &VecDeque<SharedBlob>) -> Vec<Entry> {
|
||||
let mut entries_to_apply: Vec<Entry> = Vec::new();
|
||||
let mut last_id = Hash::default();
|
||||
@@ -108,7 +113,7 @@ pub fn reconstruct_entries_from_blobs(blobs: &VecDeque<SharedBlob>) -> Vec<Entry
|
||||
for entry in entries {
|
||||
if entry.id == last_id {
|
||||
if let Some(last_entry) = entries_to_apply.last_mut() {
|
||||
last_entry.events.extend(entry.events);
|
||||
last_entry.transactions.extend(entry.transactions);
|
||||
}
|
||||
} else {
|
||||
last_id = entry.id;
|
||||
@@ -147,17 +152,16 @@ mod tests {
|
||||
let zero = Hash::default();
|
||||
let one = hash(&zero);
|
||||
let keypair = KeyPair::new();
|
||||
let tr0 = Event::Transaction(Transaction::new(&keypair, keypair.pubkey(), 1, one));
|
||||
let events = vec![tr0.clone(); 10000];
|
||||
let e0 = Entry::new(&zero, 0, events);
|
||||
let tx0 = Transaction::new(&keypair, keypair.pubkey(), 1, one);
|
||||
let transactions = vec![tx0.clone(); 10000];
|
||||
let e0 = Entry::new(&zero, 0, transactions);
|
||||
|
||||
let entry_list = vec![e0.clone(); 1];
|
||||
let entries = vec![e0.clone(); 1];
|
||||
let blob_recycler = BlobRecycler::default();
|
||||
let mut blob_q = VecDeque::new();
|
||||
process_entry_list_into_blobs(&entry_list, &blob_recycler, &mut blob_q);
|
||||
let entries = reconstruct_entries_from_blobs(&blob_q);
|
||||
entries.to_blobs(&blob_recycler, &mut blob_q);
|
||||
|
||||
assert_eq!(entry_list, entries);
|
||||
assert_eq!(reconstruct_entries_from_blobs(&blob_q), entries);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -165,16 +169,16 @@ mod tests {
|
||||
let mut id = Hash::default();
|
||||
let next_id = hash(&id);
|
||||
let keypair = KeyPair::new();
|
||||
let tr0 = Event::Transaction(Transaction::new(&keypair, keypair.pubkey(), 1, next_id));
|
||||
let events = vec![tr0.clone(); 5];
|
||||
let event_set = vec![events.clone(); 5];
|
||||
let entries0 = next_entries(&id, 0, event_set);
|
||||
let tx0 = Transaction::new(&keypair, keypair.pubkey(), 1, next_id);
|
||||
let transactions = vec![tx0.clone(); 5];
|
||||
let transaction_batches = vec![transactions.clone(); 5];
|
||||
let entries0 = next_entries(&id, 0, transaction_batches);
|
||||
|
||||
assert_eq!(entries0.len(), 5);
|
||||
|
||||
let mut entries1 = vec![];
|
||||
for _ in 0..5 {
|
||||
let entry = next_entry(&id, 0, events.clone());
|
||||
let entry = next_entry(&id, 0, transactions.clone());
|
||||
id = entry.id;
|
||||
entries1.push(entry);
|
||||
}
|
||||
@@ -189,7 +193,7 @@ mod bench {
|
||||
use ledger::*;
|
||||
|
||||
#[bench]
|
||||
fn event_bench(bencher: &mut Bencher) {
|
||||
fn bench_next_entries(bencher: &mut Bencher) {
|
||||
let start_hash = Hash::default();
|
||||
let entries = next_entries(&start_hash, 10_000, vec![vec![]; 8]);
|
||||
bencher.iter(|| {
|
||||
|
@@ -1,19 +1,19 @@
|
||||
#![cfg_attr(feature = "unstable", feature(test))]
|
||||
pub mod bank;
|
||||
pub mod banking_stage;
|
||||
pub mod budget;
|
||||
pub mod crdt;
|
||||
pub mod ecdsa;
|
||||
pub mod entry;
|
||||
pub mod entry_writer;
|
||||
#[cfg(feature = "erasure")]
|
||||
pub mod erasure;
|
||||
pub mod event;
|
||||
pub mod fetch_stage;
|
||||
pub mod hash;
|
||||
pub mod ledger;
|
||||
pub mod logger;
|
||||
pub mod mint;
|
||||
pub mod packet;
|
||||
pub mod plan;
|
||||
pub mod payment_plan;
|
||||
pub mod record_stage;
|
||||
pub mod recorder;
|
||||
pub mod replicate_stage;
|
||||
@@ -23,8 +23,9 @@ pub mod request_stage;
|
||||
pub mod result;
|
||||
pub mod rpu;
|
||||
pub mod server;
|
||||
pub mod sig_verify_stage;
|
||||
pub mod signature;
|
||||
pub mod sigverify;
|
||||
pub mod sigverify_stage;
|
||||
pub mod streamer;
|
||||
pub mod thin_client;
|
||||
pub mod timing;
|
||||
|
27
src/mint.rs
27
src/mint.rs
@@ -1,7 +1,6 @@
|
||||
//! The `mint` module is a library for generating the chain's genesis block.
|
||||
|
||||
use entry::Entry;
|
||||
use event::Event;
|
||||
use hash::{hash, Hash};
|
||||
use ring::rand::SystemRandom;
|
||||
use signature::{KeyPair, KeyPairUtil, PublicKey};
|
||||
@@ -47,15 +46,15 @@ impl Mint {
|
||||
self.pubkey
|
||||
}
|
||||
|
||||
pub fn create_events(&self) -> Vec<Event> {
|
||||
pub fn create_transactions(&self) -> Vec<Transaction> {
|
||||
let keypair = self.keypair();
|
||||
let tr = Transaction::new(&keypair, self.pubkey(), self.tokens, self.seed());
|
||||
vec![Event::Transaction(tr)]
|
||||
let tx = Transaction::new(&keypair, self.pubkey(), self.tokens, self.seed());
|
||||
vec![tx]
|
||||
}
|
||||
|
||||
pub fn create_entries(&self) -> Vec<Entry> {
|
||||
let e0 = Entry::new(&self.seed(), 0, vec![]);
|
||||
let e1 = Entry::new(&e0.id, 0, self.create_events());
|
||||
let e1 = Entry::new(&e0.id, 0, self.create_transactions());
|
||||
vec![e0, e1]
|
||||
}
|
||||
}
|
||||
@@ -69,20 +68,20 @@ pub struct MintDemo {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use budget::Budget;
|
||||
use ledger::Block;
|
||||
use plan::Plan;
|
||||
use transaction::Instruction;
|
||||
use transaction::{Instruction, Plan};
|
||||
|
||||
#[test]
|
||||
fn test_create_events() {
|
||||
let mut events = Mint::new(100).create_events().into_iter();
|
||||
let Event::Transaction(tr) = events.next().unwrap();
|
||||
if let Instruction::NewContract(contract) = tr.instruction {
|
||||
if let Plan::Pay(payment) = contract.plan {
|
||||
assert_eq!(tr.from, payment.to);
|
||||
fn test_create_transactions() {
|
||||
let mut transactions = Mint::new(100).create_transactions().into_iter();
|
||||
let tx = transactions.next().unwrap();
|
||||
if let Instruction::NewContract(contract) = tx.instruction {
|
||||
if let Plan::Budget(Budget::Pay(payment)) = contract.plan {
|
||||
assert_eq!(tx.from, payment.to);
|
||||
}
|
||||
}
|
||||
assert_eq!(events.next(), None);
|
||||
assert_eq!(transactions.next(), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@@ -173,7 +173,7 @@ impl Packets {
|
||||
let mut i = 0;
|
||||
//DOCUMENTED SIDE-EFFECT
|
||||
//Performance out of the IO without poll
|
||||
// * block on the socket until its readable
|
||||
// * block on the socket until it's readable
|
||||
// * set the socket to non blocking
|
||||
// * read until it fails
|
||||
// * set it back to blocking before returning
|
||||
@@ -236,6 +236,38 @@ pub fn to_packets<T: Serialize>(r: &PacketRecycler, xs: Vec<T>) -> Vec<SharedPac
|
||||
return out;
|
||||
}
|
||||
|
||||
pub fn to_blob<T: Serialize>(
|
||||
resp: T,
|
||||
rsp_addr: SocketAddr,
|
||||
blob_recycler: &BlobRecycler,
|
||||
) -> Result<SharedBlob> {
|
||||
let blob = blob_recycler.allocate();
|
||||
{
|
||||
let mut b = blob.write().unwrap();
|
||||
let v = serialize(&resp)?;
|
||||
let len = v.len();
|
||||
// TODO: we are not using .data_mut() method here because
|
||||
// the raw bytes are being serialized and sent, this isn't the
|
||||
// right interface, and we should create a separate path for
|
||||
// sending request responses in the RPU
|
||||
b.data[..len].copy_from_slice(&v);
|
||||
b.meta.size = len;
|
||||
b.meta.set_addr(&rsp_addr);
|
||||
}
|
||||
Ok(blob)
|
||||
}
|
||||
|
||||
pub fn to_blobs<T: Serialize>(
|
||||
rsps: Vec<(T, SocketAddr)>,
|
||||
blob_recycler: &BlobRecycler,
|
||||
) -> Result<VecDeque<SharedBlob>> {
|
||||
let mut blobs = VecDeque::new();
|
||||
for (resp, rsp_addr) in rsps {
|
||||
blobs.push_back(to_blob(resp, rsp_addr, blob_recycler)?);
|
||||
}
|
||||
Ok(blobs)
|
||||
}
|
||||
|
||||
const BLOB_INDEX_END: usize = size_of::<u64>();
|
||||
const BLOB_ID_END: usize = BLOB_INDEX_END + size_of::<usize>() + size_of::<PublicKey>();
|
||||
|
||||
@@ -276,7 +308,7 @@ impl Blob {
|
||||
let mut v = VecDeque::new();
|
||||
//DOCUMENTED SIDE-EFFECT
|
||||
//Performance out of the IO without poll
|
||||
// * block on the socket until its readable
|
||||
// * block on the socket until it's readable
|
||||
// * set the socket to non blocking
|
||||
// * read until it fails
|
||||
// * set it back to blocking before returning
|
||||
@@ -378,17 +410,17 @@ mod test {
|
||||
|
||||
#[test]
|
||||
fn test_to_packets() {
|
||||
let tr = Request::GetTransactionCount;
|
||||
let tx = Request::GetTransactionCount;
|
||||
let re = PacketRecycler::default();
|
||||
let rv = to_packets(&re, vec![tr.clone(); 1]);
|
||||
let rv = to_packets(&re, vec![tx.clone(); 1]);
|
||||
assert_eq!(rv.len(), 1);
|
||||
assert_eq!(rv[0].read().unwrap().packets.len(), 1);
|
||||
|
||||
let rv = to_packets(&re, vec![tr.clone(); NUM_PACKETS]);
|
||||
let rv = to_packets(&re, vec![tx.clone(); NUM_PACKETS]);
|
||||
assert_eq!(rv.len(), 1);
|
||||
assert_eq!(rv[0].read().unwrap().packets.len(), NUM_PACKETS);
|
||||
|
||||
let rv = to_packets(&re, vec![tr.clone(); NUM_PACKETS + 1]);
|
||||
let rv = to_packets(&re, vec![tx.clone(); NUM_PACKETS + 1]);
|
||||
assert_eq!(rv.len(), 2);
|
||||
assert_eq!(rv[0].read().unwrap().packets.len(), NUM_PACKETS);
|
||||
assert_eq!(rv[1].read().unwrap().packets.len(), 1);
|
||||
|
31
src/payment_plan.rs
Normal file
31
src/payment_plan.rs
Normal file
@@ -0,0 +1,31 @@
|
||||
//! The `plan` module provides a domain-specific language for payment plans. Users create Budget objects that
|
||||
//! are given to an interpreter. The interpreter listens for `Witness` transactions,
|
||||
//! which it uses to reduce the payment plan. When the plan is reduced to a
|
||||
//! `Payment`, the payment is executed.
|
||||
|
||||
use chrono::prelude::*;
|
||||
use signature::PublicKey;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub enum Witness {
|
||||
Timestamp(DateTime<Utc>),
|
||||
Signature(PublicKey),
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub struct Payment {
|
||||
pub tokens: i64,
|
||||
pub to: PublicKey,
|
||||
}
|
||||
|
||||
pub trait PaymentPlan {
|
||||
/// Return Payment if the payment plan requires no additional Witnesses.
|
||||
fn final_payment(&self) -> Option<Payment>;
|
||||
|
||||
/// Return true if the plan spends exactly `spendable_tokens`.
|
||||
fn verify(&self, spendable_tokens: i64) -> bool;
|
||||
|
||||
/// Apply a witness to the payment plan to see if the plan can be reduced.
|
||||
/// If so, modify the plan in-place.
|
||||
fn apply_witness(&mut self, witness: &Witness);
|
||||
}
|
@@ -2,21 +2,21 @@
|
||||
//! It records Event items on behalf of its users. It continuously generates
|
||||
//! new hashes, only stopping to check if it has been sent an Event item. It
|
||||
//! tags each Event with an Entry, and sends it back. The Entry includes the
|
||||
//! Event, the latest hash, and the number of hashes since the last event.
|
||||
//! The resulting stream of entries represents ordered events in time.
|
||||
//! Event, the latest hash, and the number of hashes since the last transaction.
|
||||
//! The resulting stream of entries represents ordered transactions in time.
|
||||
|
||||
use entry::Entry;
|
||||
use event::Event;
|
||||
use hash::Hash;
|
||||
use recorder::Recorder;
|
||||
use std::sync::mpsc::{channel, Receiver, Sender, TryRecvError};
|
||||
use std::thread::{spawn, JoinHandle};
|
||||
use std::time::{Duration, Instant};
|
||||
use transaction::Transaction;
|
||||
|
||||
#[cfg_attr(feature = "cargo-clippy", allow(large_enum_variant))]
|
||||
pub enum Signal {
|
||||
Tick,
|
||||
Events(Vec<Event>),
|
||||
Events(Vec<Transaction>),
|
||||
}
|
||||
|
||||
pub struct RecordStage {
|
||||
@@ -28,7 +28,7 @@ impl RecordStage {
|
||||
/// A background thread that will continue tagging received Event messages and
|
||||
/// sending back Entry messages until either the receiver or sender channel is closed.
|
||||
pub fn new(
|
||||
event_receiver: Receiver<Signal>,
|
||||
transaction_receiver: Receiver<Signal>,
|
||||
start_hash: &Hash,
|
||||
tick_duration: Option<Duration>,
|
||||
) -> Self {
|
||||
@@ -39,10 +39,10 @@ impl RecordStage {
|
||||
let mut recorder = Recorder::new(start_hash);
|
||||
let duration_data = tick_duration.map(|dur| (Instant::now(), dur));
|
||||
loop {
|
||||
if let Err(_) = Self::process_events(
|
||||
if let Err(_) = Self::process_transactions(
|
||||
&mut recorder,
|
||||
duration_data,
|
||||
&event_receiver,
|
||||
&transaction_receiver,
|
||||
&entry_sender,
|
||||
) {
|
||||
return;
|
||||
@@ -59,7 +59,7 @@ impl RecordStage {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn process_events(
|
||||
pub fn process_transactions(
|
||||
recorder: &mut Recorder,
|
||||
duration_data: Option<(Instant, Duration)>,
|
||||
receiver: &Receiver<Signal>,
|
||||
@@ -77,8 +77,8 @@ impl RecordStage {
|
||||
let entry = recorder.record(vec![]);
|
||||
sender.send(entry).or(Err(()))?;
|
||||
}
|
||||
Signal::Events(events) => {
|
||||
let entry = recorder.record(events);
|
||||
Signal::Events(transactions) => {
|
||||
let entry = recorder.record(transactions);
|
||||
sender.send(entry).or(Err(()))?;
|
||||
}
|
||||
},
|
||||
@@ -99,15 +99,15 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_historian() {
|
||||
let (input, event_receiver) = channel();
|
||||
let (tx_sender, tx_receiver) = channel();
|
||||
let zero = Hash::default();
|
||||
let record_stage = RecordStage::new(event_receiver, &zero, None);
|
||||
let record_stage = RecordStage::new(tx_receiver, &zero, None);
|
||||
|
||||
input.send(Signal::Tick).unwrap();
|
||||
tx_sender.send(Signal::Tick).unwrap();
|
||||
sleep(Duration::new(0, 1_000_000));
|
||||
input.send(Signal::Tick).unwrap();
|
||||
tx_sender.send(Signal::Tick).unwrap();
|
||||
sleep(Duration::new(0, 1_000_000));
|
||||
input.send(Signal::Tick).unwrap();
|
||||
tx_sender.send(Signal::Tick).unwrap();
|
||||
|
||||
let entry0 = record_stage.entry_receiver.recv().unwrap();
|
||||
let entry1 = record_stage.entry_receiver.recv().unwrap();
|
||||
@@ -117,7 +117,7 @@ mod tests {
|
||||
assert_eq!(entry1.num_hashes, 0);
|
||||
assert_eq!(entry2.num_hashes, 0);
|
||||
|
||||
drop(input);
|
||||
drop(tx_sender);
|
||||
assert_eq!(record_stage.thread_hdl.join().unwrap(), ());
|
||||
|
||||
assert!([entry0, entry1, entry2].verify(&zero));
|
||||
@@ -125,25 +125,25 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_historian_closed_sender() {
|
||||
let (input, event_receiver) = channel();
|
||||
let (tx_sender, tx_receiver) = channel();
|
||||
let zero = Hash::default();
|
||||
let record_stage = RecordStage::new(event_receiver, &zero, None);
|
||||
let record_stage = RecordStage::new(tx_receiver, &zero, None);
|
||||
drop(record_stage.entry_receiver);
|
||||
input.send(Signal::Tick).unwrap();
|
||||
tx_sender.send(Signal::Tick).unwrap();
|
||||
assert_eq!(record_stage.thread_hdl.join().unwrap(), ());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_events() {
|
||||
let (input, signal_receiver) = channel();
|
||||
fn test_transactions() {
|
||||
let (tx_sender, signal_receiver) = channel();
|
||||
let zero = Hash::default();
|
||||
let record_stage = RecordStage::new(signal_receiver, &zero, None);
|
||||
let alice_keypair = KeyPair::new();
|
||||
let bob_pubkey = KeyPair::new().pubkey();
|
||||
let event0 = Event::new_transaction(&alice_keypair, bob_pubkey, 1, zero);
|
||||
let event1 = Event::new_transaction(&alice_keypair, bob_pubkey, 2, zero);
|
||||
input.send(Signal::Events(vec![event0, event1])).unwrap();
|
||||
drop(input);
|
||||
let tx0 = Transaction::new(&alice_keypair, bob_pubkey, 1, zero);
|
||||
let tx1 = Transaction::new(&alice_keypair, bob_pubkey, 2, zero);
|
||||
tx_sender.send(Signal::Events(vec![tx0, tx1])).unwrap();
|
||||
drop(tx_sender);
|
||||
let entries: Vec<_> = record_stage.entry_receiver.iter().collect();
|
||||
assert_eq!(entries.len(), 1);
|
||||
}
|
||||
@@ -151,12 +151,12 @@ mod tests {
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn test_ticking_historian() {
|
||||
let (input, event_receiver) = channel();
|
||||
let (tx_sender, tx_receiver) = channel();
|
||||
let zero = Hash::default();
|
||||
let record_stage = RecordStage::new(event_receiver, &zero, Some(Duration::from_millis(20)));
|
||||
let record_stage = RecordStage::new(tx_receiver, &zero, Some(Duration::from_millis(20)));
|
||||
sleep(Duration::from_millis(900));
|
||||
input.send(Signal::Tick).unwrap();
|
||||
drop(input);
|
||||
tx_sender.send(Signal::Tick).unwrap();
|
||||
drop(tx_sender);
|
||||
let entries: Vec<Entry> = record_stage.entry_receiver.iter().collect();
|
||||
assert!(entries.len() > 1);
|
||||
|
||||
|
@@ -2,9 +2,9 @@
|
||||
//! It records Event items on behalf of its users.
|
||||
|
||||
use entry::Entry;
|
||||
use event::Event;
|
||||
use hash::{hash, Hash};
|
||||
use std::time::{Duration, Instant};
|
||||
use transaction::Transaction;
|
||||
|
||||
pub struct Recorder {
|
||||
last_hash: Hash,
|
||||
@@ -26,8 +26,8 @@ impl Recorder {
|
||||
self.num_hashes += 1;
|
||||
}
|
||||
|
||||
pub fn record(&mut self, events: Vec<Event>) -> Entry {
|
||||
Entry::new_mut(&mut self.last_hash, &mut self.num_hashes, events)
|
||||
pub fn record(&mut self, transactions: Vec<Transaction>) -> Entry {
|
||||
Entry::new_mut(&mut self.last_hash, &mut self.num_hashes, transactions)
|
||||
}
|
||||
|
||||
pub fn tick(&mut self, start_time: Instant, tick_duration: Duration) -> Option<Entry> {
|
||||
|
@@ -4,8 +4,8 @@ use bank::Bank;
|
||||
use ledger;
|
||||
use packet;
|
||||
use result::Result;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::thread::{spawn, JoinHandle};
|
||||
use std::time::Duration;
|
||||
use streamer;
|
||||
@@ -24,9 +24,9 @@ impl ReplicateStage {
|
||||
let timer = Duration::new(1, 0);
|
||||
let blobs = verified_receiver.recv_timeout(timer)?;
|
||||
let entries = ledger::reconstruct_entries_from_blobs(&blobs);
|
||||
let res = bank.process_verified_entries(entries);
|
||||
let res = bank.process_entries(entries);
|
||||
if res.is_err() {
|
||||
error!("process_verified_entries {} {:?}", blobs.len(), res);
|
||||
error!("process_entries {} {:?}", blobs.len(), res);
|
||||
}
|
||||
res?;
|
||||
for blob in blobs {
|
||||
|
@@ -1,20 +1,9 @@
|
||||
//! The `request_stage` processes thin client Request messages.
|
||||
//! The `request_processor` processes thin client Request messages.
|
||||
|
||||
use bank::Bank;
|
||||
use bincode::{deserialize, serialize};
|
||||
use event::Event;
|
||||
use packet;
|
||||
use packet::SharedPackets;
|
||||
use rayon::prelude::*;
|
||||
use request::{Request, Response};
|
||||
use result::Result;
|
||||
use std::collections::VecDeque;
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::Arc;
|
||||
use std::sync::mpsc::Receiver;
|
||||
use std::time::Instant;
|
||||
use streamer;
|
||||
use timing;
|
||||
|
||||
pub struct RequestProcessor {
|
||||
bank: Arc<Bank>,
|
||||
@@ -62,104 +51,4 @@ impl RequestProcessor {
|
||||
.filter_map(|(req, rsp_addr)| self.process_request(req, rsp_addr))
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn deserialize_requests(p: &packet::Packets) -> Vec<Option<(Request, SocketAddr)>> {
|
||||
p.packets
|
||||
.par_iter()
|
||||
.map(|x| {
|
||||
deserialize(&x.data[0..x.meta.size])
|
||||
.map(|req| (req, x.meta.addr()))
|
||||
.ok()
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
// Copy-paste of deserialize_requests() because I can't figure out how to
|
||||
// route the lifetimes in a generic version.
|
||||
pub fn deserialize_events(p: &packet::Packets) -> Vec<Option<(Event, SocketAddr)>> {
|
||||
p.packets
|
||||
.par_iter()
|
||||
.map(|x| {
|
||||
deserialize(&x.data[0..x.meta.size])
|
||||
.map(|req| (req, x.meta.addr()))
|
||||
.ok()
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Split Request list into verified transactions and the rest
|
||||
fn serialize_response(
|
||||
resp: Response,
|
||||
rsp_addr: SocketAddr,
|
||||
blob_recycler: &packet::BlobRecycler,
|
||||
) -> Result<packet::SharedBlob> {
|
||||
let blob = blob_recycler.allocate();
|
||||
{
|
||||
let mut b = blob.write().unwrap();
|
||||
let v = serialize(&resp)?;
|
||||
let len = v.len();
|
||||
b.data[..len].copy_from_slice(&v);
|
||||
b.meta.size = len;
|
||||
b.meta.set_addr(&rsp_addr);
|
||||
}
|
||||
Ok(blob)
|
||||
}
|
||||
|
||||
fn serialize_responses(
|
||||
rsps: Vec<(Response, SocketAddr)>,
|
||||
blob_recycler: &packet::BlobRecycler,
|
||||
) -> Result<VecDeque<packet::SharedBlob>> {
|
||||
let mut blobs = VecDeque::new();
|
||||
for (resp, rsp_addr) in rsps {
|
||||
blobs.push_back(Self::serialize_response(resp, rsp_addr, blob_recycler)?);
|
||||
}
|
||||
Ok(blobs)
|
||||
}
|
||||
|
||||
pub fn process_request_packets(
|
||||
&self,
|
||||
packet_receiver: &Receiver<SharedPackets>,
|
||||
blob_sender: &streamer::BlobSender,
|
||||
packet_recycler: &packet::PacketRecycler,
|
||||
blob_recycler: &packet::BlobRecycler,
|
||||
) -> Result<()> {
|
||||
let (batch, batch_len) = streamer::recv_batch(packet_receiver)?;
|
||||
|
||||
info!(
|
||||
"@{:?} request_stage: processing: {}",
|
||||
timing::timestamp(),
|
||||
batch_len
|
||||
);
|
||||
|
||||
let mut reqs_len = 0;
|
||||
let proc_start = Instant::now();
|
||||
for msgs in batch {
|
||||
let reqs: Vec<_> = Self::deserialize_requests(&msgs.read().unwrap())
|
||||
.into_iter()
|
||||
.filter_map(|x| x)
|
||||
.collect();
|
||||
reqs_len += reqs.len();
|
||||
|
||||
let rsps = self.process_requests(reqs);
|
||||
|
||||
let blobs = Self::serialize_responses(rsps, blob_recycler)?;
|
||||
if !blobs.is_empty() {
|
||||
info!("process: sending blobs: {}", blobs.len());
|
||||
//don't wake up the other side if there is nothing
|
||||
blob_sender.send(blobs)?;
|
||||
}
|
||||
packet_recycler.recycle(msgs);
|
||||
}
|
||||
let total_time_s = timing::duration_as_s(&proc_start.elapsed());
|
||||
let total_time_ms = timing::duration_as_ms(&proc_start.elapsed());
|
||||
info!(
|
||||
"@{:?} done process batches: {} time: {:?}ms reqs: {} reqs/s: {}",
|
||||
timing::timestamp(),
|
||||
batch_len,
|
||||
total_time_ms,
|
||||
reqs_len,
|
||||
(reqs_len as f32) / (total_time_s)
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
@@ -1,13 +1,20 @@
|
||||
//! The `request_stage` processes thin client Request messages.
|
||||
|
||||
use bincode::deserialize;
|
||||
use packet;
|
||||
use packet::SharedPackets;
|
||||
use rayon::prelude::*;
|
||||
use request::Request;
|
||||
use request_processor::RequestProcessor;
|
||||
use std::sync::Arc;
|
||||
use result::Result;
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::{channel, Receiver};
|
||||
use std::sync::Arc;
|
||||
use std::thread::{spawn, JoinHandle};
|
||||
use std::time::Instant;
|
||||
use streamer;
|
||||
use timing;
|
||||
|
||||
pub struct RequestStage {
|
||||
pub thread_hdl: JoinHandle<()>,
|
||||
@@ -16,6 +23,63 @@ pub struct RequestStage {
|
||||
}
|
||||
|
||||
impl RequestStage {
|
||||
pub fn deserialize_requests(p: &packet::Packets) -> Vec<Option<(Request, SocketAddr)>> {
|
||||
p.packets
|
||||
.par_iter()
|
||||
.map(|x| {
|
||||
deserialize(&x.data[0..x.meta.size])
|
||||
.map(|req| (req, x.meta.addr()))
|
||||
.ok()
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn process_request_packets(
|
||||
request_processor: &RequestProcessor,
|
||||
packet_receiver: &Receiver<SharedPackets>,
|
||||
blob_sender: &streamer::BlobSender,
|
||||
packet_recycler: &packet::PacketRecycler,
|
||||
blob_recycler: &packet::BlobRecycler,
|
||||
) -> Result<()> {
|
||||
let (batch, batch_len) = streamer::recv_batch(packet_receiver)?;
|
||||
|
||||
info!(
|
||||
"@{:?} request_stage: processing: {}",
|
||||
timing::timestamp(),
|
||||
batch_len
|
||||
);
|
||||
|
||||
let mut reqs_len = 0;
|
||||
let proc_start = Instant::now();
|
||||
for msgs in batch {
|
||||
let reqs: Vec<_> = Self::deserialize_requests(&msgs.read().unwrap())
|
||||
.into_iter()
|
||||
.filter_map(|x| x)
|
||||
.collect();
|
||||
reqs_len += reqs.len();
|
||||
|
||||
let rsps = request_processor.process_requests(reqs);
|
||||
|
||||
let blobs = packet::to_blobs(rsps, blob_recycler)?;
|
||||
if !blobs.is_empty() {
|
||||
info!("process: sending blobs: {}", blobs.len());
|
||||
//don't wake up the other side if there is nothing
|
||||
blob_sender.send(blobs)?;
|
||||
}
|
||||
packet_recycler.recycle(msgs);
|
||||
}
|
||||
let total_time_s = timing::duration_as_s(&proc_start.elapsed());
|
||||
let total_time_ms = timing::duration_as_ms(&proc_start.elapsed());
|
||||
info!(
|
||||
"@{:?} done process batches: {} time: {:?}ms reqs: {} reqs/s: {}",
|
||||
timing::timestamp(),
|
||||
batch_len,
|
||||
total_time_ms,
|
||||
reqs_len,
|
||||
(reqs_len as f32) / (total_time_s)
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
pub fn new(
|
||||
request_processor: RequestProcessor,
|
||||
exit: Arc<AtomicBool>,
|
||||
@@ -27,7 +91,8 @@ impl RequestStage {
|
||||
let request_processor_ = request_processor.clone();
|
||||
let (blob_sender, blob_receiver) = channel();
|
||||
let thread_hdl = spawn(move || loop {
|
||||
let e = request_processor_.process_request_packets(
|
||||
let e = Self::process_request_packets(
|
||||
&request_processor_,
|
||||
&packet_receiver,
|
||||
&blob_sender,
|
||||
&packet_recycler,
|
||||
|
@@ -80,9 +80,9 @@ mod tests {
|
||||
use std::io::Write;
|
||||
use std::net::SocketAddr;
|
||||
use std::panic;
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::mpsc::RecvError;
|
||||
use std::sync::mpsc::RecvTimeoutError;
|
||||
use std::sync::mpsc::channel;
|
||||
use std::thread;
|
||||
|
||||
fn addr_parse_error() -> Result<SocketAddr> {
|
||||
|
@@ -6,9 +6,9 @@ use packet;
|
||||
use request_processor::RequestProcessor;
|
||||
use request_stage::RequestStage;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::Arc;
|
||||
use std::thread::JoinHandle;
|
||||
use streamer;
|
||||
|
||||
|
@@ -1,15 +1,17 @@
|
||||
//! The `server` module hosts all the server microservices.
|
||||
|
||||
use bank::Bank;
|
||||
use crdt::ReplicatedData;
|
||||
use crdt::{Crdt, ReplicatedData};
|
||||
use hash::Hash;
|
||||
use packet;
|
||||
use rpu::Rpu;
|
||||
use std::io::Write;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::JoinHandle;
|
||||
use std::time::Duration;
|
||||
use streamer;
|
||||
use tpu::Tpu;
|
||||
use tvu::Tvu;
|
||||
|
||||
@@ -24,7 +26,7 @@ impl Server {
|
||||
tick_duration: Option<Duration>,
|
||||
me: ReplicatedData,
|
||||
requests_socket: UdpSocket,
|
||||
events_socket: UdpSocket,
|
||||
transactions_socket: UdpSocket,
|
||||
broadcast_socket: UdpSocket,
|
||||
respond_socket: UdpSocket,
|
||||
gossip_socket: UdpSocket,
|
||||
@@ -35,18 +37,34 @@ impl Server {
|
||||
let mut thread_hdls = vec![];
|
||||
let rpu = Rpu::new(bank.clone(), requests_socket, respond_socket, exit.clone());
|
||||
thread_hdls.extend(rpu.thread_hdls);
|
||||
|
||||
let blob_recycler = packet::BlobRecycler::default();
|
||||
let tpu = Tpu::new(
|
||||
bank.clone(),
|
||||
start_hash,
|
||||
tick_duration,
|
||||
me,
|
||||
events_socket,
|
||||
broadcast_socket,
|
||||
gossip_socket,
|
||||
transactions_socket,
|
||||
blob_recycler.clone(),
|
||||
exit.clone(),
|
||||
writer,
|
||||
);
|
||||
thread_hdls.extend(tpu.thread_hdls);
|
||||
|
||||
let crdt = Arc::new(RwLock::new(Crdt::new(me)));
|
||||
let t_gossip = Crdt::gossip(crdt.clone(), exit.clone());
|
||||
let window = streamer::default_window();
|
||||
let t_listen = Crdt::listen(crdt.clone(), window.clone(), gossip_socket, exit.clone());
|
||||
|
||||
let t_broadcast = streamer::broadcaster(
|
||||
broadcast_socket,
|
||||
exit.clone(),
|
||||
crdt.clone(),
|
||||
window,
|
||||
blob_recycler.clone(),
|
||||
tpu.blob_receiver,
|
||||
);
|
||||
thread_hdls.extend(vec![t_gossip, t_listen, t_broadcast]);
|
||||
|
||||
Server { thread_hdls }
|
||||
}
|
||||
pub fn new_validator(
|
||||
|
@@ -1,7 +1,7 @@
|
||||
//! The `signature` module provides functionality for public, and private keys.
|
||||
|
||||
use generic_array::GenericArray;
|
||||
use generic_array::typenum::{U32, U64};
|
||||
use generic_array::GenericArray;
|
||||
use rand::{ChaChaRng, Rng, SeedableRng};
|
||||
use rayon::prelude::*;
|
||||
use ring::error::Unspecified;
|
||||
|
@@ -2,7 +2,7 @@ use packet::{Packet, SharedPackets};
|
||||
use std::mem::size_of;
|
||||
use transaction::{PUB_KEY_OFFSET, SIGNED_DATA_OFFSET, SIG_OFFSET};
|
||||
|
||||
pub const TX_OFFSET: usize = 4;
|
||||
pub const TX_OFFSET: usize = 0;
|
||||
|
||||
#[cfg(feature = "cuda")]
|
||||
#[repr(C)]
|
||||
@@ -55,7 +55,7 @@ fn batch_size(batches: &Vec<SharedPackets>) -> usize {
|
||||
batches
|
||||
.iter()
|
||||
.map(|p| p.read().unwrap().packets.len())
|
||||
.fold(0, |x, y| x + y)
|
||||
.sum()
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "cuda"))]
|
||||
@@ -143,33 +143,32 @@ pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use bincode::serialize;
|
||||
use ecdsa;
|
||||
use event::Event;
|
||||
use packet::{Packet, Packets, SharedPackets};
|
||||
use sigverify;
|
||||
use std::sync::RwLock;
|
||||
use transaction::Transaction;
|
||||
use transaction::{memfind, test_tx};
|
||||
|
||||
#[test]
|
||||
fn test_layout() {
|
||||
let tr = test_tx();
|
||||
let tx = serialize(&tr).unwrap();
|
||||
let packet = serialize(&Event::Transaction(tr)).unwrap();
|
||||
assert_matches!(memfind(&packet, &tx), Some(ecdsa::TX_OFFSET));
|
||||
let tx = test_tx();
|
||||
let tx_bytes = serialize(&tx).unwrap();
|
||||
let packet = serialize(&tx).unwrap();
|
||||
assert_matches!(memfind(&packet, &tx_bytes), Some(sigverify::TX_OFFSET));
|
||||
assert_matches!(memfind(&packet, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), None);
|
||||
}
|
||||
|
||||
fn make_packet_from_transaction(tr: Transaction) -> Packet {
|
||||
let tx = serialize(&Event::Transaction(tr)).unwrap();
|
||||
fn make_packet_from_transaction(tx: Transaction) -> Packet {
|
||||
let tx_bytes = serialize(&tx).unwrap();
|
||||
let mut packet = Packet::default();
|
||||
packet.meta.size = tx.len();
|
||||
packet.data[..packet.meta.size].copy_from_slice(&tx);
|
||||
packet.meta.size = tx_bytes.len();
|
||||
packet.data[..packet.meta.size].copy_from_slice(&tx_bytes);
|
||||
return packet;
|
||||
}
|
||||
|
||||
fn test_verify_n(n: usize, modify_data: bool) {
|
||||
let tr = test_tx();
|
||||
let mut packet = make_packet_from_transaction(tr);
|
||||
let tx = test_tx();
|
||||
let mut packet = make_packet_from_transaction(tx);
|
||||
|
||||
// jumble some data to test failure
|
||||
if modify_data {
|
||||
@@ -186,7 +185,7 @@ mod tests {
|
||||
let batches = vec![shared_packets.clone(), shared_packets.clone()];
|
||||
|
||||
// verify packets
|
||||
let ans = ecdsa::ed25519_verify(&batches);
|
||||
let ans = sigverify::ed25519_verify(&batches);
|
||||
|
||||
// check result
|
||||
let ref_ans = if modify_data { 0u8 } else { 1u8 };
|
@@ -1,9 +1,9 @@
|
||||
//! The `sig_verify_stage` implements the signature verification stage of the TPU.
|
||||
//! The `sigverify_stage` implements the signature verification stage of the TPU.
|
||||
|
||||
use ecdsa;
|
||||
use packet::SharedPackets;
|
||||
use rand::{thread_rng, Rng};
|
||||
use result::Result;
|
||||
use sigverify;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::{channel, Receiver, Sender};
|
||||
use std::sync::{Arc, Mutex};
|
||||
@@ -28,7 +28,7 @@ impl SigVerifyStage {
|
||||
}
|
||||
|
||||
fn verify_batch(batch: Vec<SharedPackets>) -> Vec<(SharedPackets, Vec<u8>)> {
|
||||
let r = ecdsa::ed25519_verify(&batch);
|
||||
let r = sigverify::ed25519_verify(&batch);
|
||||
batch.into_iter().zip(r).collect()
|
||||
}
|
||||
|
@@ -1,8 +1,9 @@
|
||||
//! The `streamer` module defines a set of services for effecently pulling data from udp sockets.
|
||||
//! The `streamer` module defines a set of services for efficiently pulling data from UDP sockets.
|
||||
//!
|
||||
use crdt::Crdt;
|
||||
#[cfg(feature = "erasure")]
|
||||
use erasure;
|
||||
use packet::{Blob, BlobRecycler, PacketRecycler, SharedBlob, SharedPackets};
|
||||
use packet::{Blob, BlobRecycler, PacketRecycler, SharedBlob, SharedPackets, BLOB_SIZE};
|
||||
use result::{Error, Result};
|
||||
use std::collections::VecDeque;
|
||||
use std::net::{SocketAddr, UdpSocket};
|
||||
@@ -176,10 +177,11 @@ fn repair_window(
|
||||
trace!("repair_window counter {} {}", *times, *consumed);
|
||||
return Ok(());
|
||||
}
|
||||
info!("repair_window request {} {}", *consumed, *received);
|
||||
let sock = UdpSocket::bind("0.0.0.0:0")?;
|
||||
for (to, req) in reqs {
|
||||
//todo cache socket
|
||||
info!("repair_window request {} {} {}", *consumed, *received, to);
|
||||
assert!(req.len() < BLOB_SIZE);
|
||||
sock.send_to(&req, to)?;
|
||||
}
|
||||
Ok(())
|
||||
@@ -220,11 +222,11 @@ fn recv_window(
|
||||
);
|
||||
if p.get_id().expect("get_id in fn recv_window") == leader_id {
|
||||
//TODO
|
||||
//need to copy the retransmited blob
|
||||
//need to copy the retransmitted blob
|
||||
//otherwise we get into races with which thread
|
||||
//should do the recycling
|
||||
//
|
||||
//a better absraction would be to recycle when the blob
|
||||
//a better abstraction would be to recycle when the blob
|
||||
//is dropped via a weakref to the recycler
|
||||
let nv = recycler.allocate();
|
||||
{
|
||||
@@ -477,7 +479,7 @@ pub fn retransmitter(
|
||||
mod bench {
|
||||
extern crate test;
|
||||
use self::test::Bencher;
|
||||
use packet::{Packet, PacketRecycler, PACKET_DATA_SIZE};
|
||||
use packet::{Packet, PacketRecycler, BLOB_SIZE, PACKET_DATA_SIZE};
|
||||
use result::Result;
|
||||
use std::net::{SocketAddr, UdpSocket};
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
@@ -509,6 +511,7 @@ mod bench {
|
||||
let mut num = 0;
|
||||
for p in msgs_.read().unwrap().packets.iter() {
|
||||
let a = p.meta.addr();
|
||||
assert!(p.meta.size < BLOB_SIZE);
|
||||
send.send_to(&p.data[..p.meta.size], &a).unwrap();
|
||||
num += 1;
|
||||
}
|
||||
@@ -537,7 +540,8 @@ mod bench {
|
||||
}
|
||||
})
|
||||
}
|
||||
fn run_streamer_bench() -> Result<()> {
|
||||
|
||||
fn bench_streamer_with_result() -> Result<()> {
|
||||
let read = UdpSocket::bind("127.0.0.1:0")?;
|
||||
read.set_read_timeout(Some(Duration::new(1, 0)))?;
|
||||
|
||||
@@ -572,8 +576,8 @@ mod bench {
|
||||
Ok(())
|
||||
}
|
||||
#[bench]
|
||||
pub fn streamer_bench(_bench: &mut Bencher) {
|
||||
run_streamer_bench().unwrap();
|
||||
pub fn bench_streamer(_bench: &mut Bencher) {
|
||||
bench_streamer_with_result().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -593,8 +597,8 @@ mod test {
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
use streamer::{default_window, BlobReceiver, PacketReceiver};
|
||||
use streamer::{blob_receiver, receiver, responder, retransmitter, window};
|
||||
use streamer::{default_window, BlobReceiver, PacketReceiver};
|
||||
|
||||
fn get_msgs(r: PacketReceiver, num: &mut usize) {
|
||||
for _t in 0..5 {
|
||||
@@ -672,14 +676,14 @@ mod test {
|
||||
let addr = read.local_addr().unwrap();
|
||||
let send = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||
let serve = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||
let event = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||
let transaction = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let rep_data = ReplicatedData::new(
|
||||
pubkey_me,
|
||||
read.local_addr().unwrap(),
|
||||
send.local_addr().unwrap(),
|
||||
serve.local_addr().unwrap(),
|
||||
event.local_addr().unwrap(),
|
||||
transaction.local_addr().unwrap(),
|
||||
);
|
||||
let mut crdt_me = Crdt::new(rep_data);
|
||||
let me_id = crdt_me.my_data().id;
|
||||
@@ -736,14 +740,14 @@ mod test {
|
||||
let gossip = UdpSocket::bind("127.0.0.1:0").unwrap();
|
||||
let replicate = UdpSocket::bind("127.0.0.1:0").unwrap();
|
||||
let serve = UdpSocket::bind("127.0.0.1:0").unwrap();
|
||||
let event = UdpSocket::bind("127.0.0.1:0").unwrap();
|
||||
let transaction = UdpSocket::bind("127.0.0.1:0").unwrap();
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
let d = ReplicatedData::new(
|
||||
pubkey,
|
||||
gossip.local_addr().unwrap(),
|
||||
replicate.local_addr().unwrap(),
|
||||
serve.local_addr().unwrap(),
|
||||
event.local_addr().unwrap(),
|
||||
transaction.local_addr().unwrap(),
|
||||
);
|
||||
trace!("data: {:?}", d);
|
||||
let crdt = Crdt::new(d);
|
||||
|
@@ -4,7 +4,6 @@
|
||||
//! unstable and may change in future releases.
|
||||
|
||||
use bincode::{deserialize, serialize};
|
||||
use event::Event;
|
||||
use futures::future::{ok, FutureResult};
|
||||
use hash::Hash;
|
||||
use request::{Request, Response};
|
||||
@@ -17,8 +16,8 @@ use transaction::Transaction;
|
||||
pub struct ThinClient {
|
||||
requests_addr: SocketAddr,
|
||||
requests_socket: UdpSocket,
|
||||
events_addr: SocketAddr,
|
||||
events_socket: UdpSocket,
|
||||
transactions_addr: SocketAddr,
|
||||
transactions_socket: UdpSocket,
|
||||
last_id: Option<Hash>,
|
||||
transaction_count: u64,
|
||||
balances: HashMap<PublicKey, Option<i64>>,
|
||||
@@ -26,19 +25,19 @@ pub struct ThinClient {
|
||||
|
||||
impl ThinClient {
|
||||
/// Create a new ThinClient that will interface with Rpu
|
||||
/// over `requests_socket` and `events_socket`. To receive responses, the caller must bind `socket`
|
||||
/// over `requests_socket` and `transactions_socket`. To receive responses, the caller must bind `socket`
|
||||
/// to a public address before invoking ThinClient methods.
|
||||
pub fn new(
|
||||
requests_addr: SocketAddr,
|
||||
requests_socket: UdpSocket,
|
||||
events_addr: SocketAddr,
|
||||
events_socket: UdpSocket,
|
||||
transactions_addr: SocketAddr,
|
||||
transactions_socket: UdpSocket,
|
||||
) -> Self {
|
||||
let client = ThinClient {
|
||||
requests_addr,
|
||||
requests_socket,
|
||||
events_addr,
|
||||
events_socket,
|
||||
transactions_addr,
|
||||
transactions_socket,
|
||||
last_id: None,
|
||||
transaction_count: 0,
|
||||
balances: HashMap::new(),
|
||||
@@ -74,10 +73,10 @@ impl ThinClient {
|
||||
|
||||
/// Send a signed Transaction to the server for processing. This method
|
||||
/// does not wait for a response.
|
||||
pub fn transfer_signed(&self, tr: Transaction) -> io::Result<usize> {
|
||||
let event = Event::Transaction(tr);
|
||||
let data = serialize(&event).expect("serialize Transaction in pub fn transfer_signed");
|
||||
self.events_socket.send_to(&data, &self.events_addr)
|
||||
pub fn transfer_signed(&self, tx: Transaction) -> io::Result<usize> {
|
||||
let data = serialize(&tx).expect("serialize Transaction in pub fn transfer_signed");
|
||||
self.transactions_socket
|
||||
.send_to(&data, &self.transactions_addr)
|
||||
}
|
||||
|
||||
/// Creates, signs, and processes a Transaction. Useful for writing unit-tests.
|
||||
@@ -88,9 +87,9 @@ impl ThinClient {
|
||||
to: PublicKey,
|
||||
last_id: &Hash,
|
||||
) -> io::Result<Signature> {
|
||||
let tr = Transaction::new(keypair, to, n, *last_id);
|
||||
let sig = tr.sig;
|
||||
self.transfer_signed(tr).map(|_| sig)
|
||||
let tx = Transaction::new(keypair, to, n, *last_id);
|
||||
let sig = tx.sig;
|
||||
self.transfer_signed(tx).map(|_| sig)
|
||||
}
|
||||
|
||||
/// Request the balance of the user holding `pubkey`. This method blocks
|
||||
@@ -156,44 +155,40 @@ impl ThinClient {
|
||||
}
|
||||
ok(self.last_id.expect("some last_id"))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn poll_get_balance(client: &mut ThinClient, pubkey: &PublicKey) -> io::Result<i64> {
|
||||
use std::time::Instant;
|
||||
pub fn poll_get_balance(&mut self, pubkey: &PublicKey) -> io::Result<i64> {
|
||||
use std::time::Instant;
|
||||
|
||||
let mut balance;
|
||||
let now = Instant::now();
|
||||
loop {
|
||||
balance = client.get_balance(pubkey);
|
||||
if balance.is_ok() || now.elapsed().as_secs() > 1 {
|
||||
break;
|
||||
let mut balance;
|
||||
let now = Instant::now();
|
||||
loop {
|
||||
balance = self.get_balance(pubkey);
|
||||
if balance.is_ok() || now.elapsed().as_secs() > 1 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
balance
|
||||
balance
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use bank::Bank;
|
||||
use crdt::{Crdt, ReplicatedData};
|
||||
use budget::Budget;
|
||||
use futures::Future;
|
||||
use logger;
|
||||
use mint::Mint;
|
||||
use plan::Plan;
|
||||
use server::Server;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use std::io::sink;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::JoinHandle;
|
||||
use std::sync::Arc;
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
use streamer::default_window;
|
||||
use transaction::Instruction;
|
||||
use tvu::tests::TestNode;
|
||||
use transaction::{Instruction, Plan};
|
||||
use tvu::TestNode;
|
||||
|
||||
#[test]
|
||||
fn test_thin_client() {
|
||||
@@ -211,7 +206,7 @@ mod tests {
|
||||
Some(Duration::from_millis(30)),
|
||||
leader.data.clone(),
|
||||
leader.sockets.requests,
|
||||
leader.sockets.event,
|
||||
leader.sockets.transaction,
|
||||
leader.sockets.broadcast,
|
||||
leader.sockets.respond,
|
||||
leader.sockets.gossip,
|
||||
@@ -221,19 +216,19 @@ mod tests {
|
||||
sleep(Duration::from_millis(900));
|
||||
|
||||
let requests_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let events_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
|
||||
let mut client = ThinClient::new(
|
||||
leader.data.requests_addr,
|
||||
requests_socket,
|
||||
leader.data.events_addr,
|
||||
events_socket,
|
||||
leader.data.transactions_addr,
|
||||
transactions_socket,
|
||||
);
|
||||
let last_id = client.get_last_id().wait().unwrap();
|
||||
let _sig = client
|
||||
.transfer(500, &alice.keypair(), bob_pubkey, &last_id)
|
||||
.unwrap();
|
||||
let balance = poll_get_balance(&mut client, &bob_pubkey);
|
||||
let balance = client.poll_get_balance(&bob_pubkey);
|
||||
assert_eq!(balance.unwrap(), 500);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
for t in server.thread_hdls {
|
||||
@@ -256,7 +251,7 @@ mod tests {
|
||||
Some(Duration::from_millis(30)),
|
||||
leader.data.clone(),
|
||||
leader.sockets.requests,
|
||||
leader.sockets.event,
|
||||
leader.sockets.transaction,
|
||||
leader.sockets.broadcast,
|
||||
leader.sockets.respond,
|
||||
leader.sockets.gossip,
|
||||
@@ -269,186 +264,33 @@ mod tests {
|
||||
requests_socket
|
||||
.set_read_timeout(Some(Duration::new(5, 0)))
|
||||
.unwrap();
|
||||
let events_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let mut client = ThinClient::new(
|
||||
leader.data.requests_addr,
|
||||
requests_socket,
|
||||
leader.data.events_addr,
|
||||
events_socket,
|
||||
leader.data.transactions_addr,
|
||||
transactions_socket,
|
||||
);
|
||||
let last_id = client.get_last_id().wait().unwrap();
|
||||
|
||||
let tr = Transaction::new(&alice.keypair(), bob_pubkey, 500, last_id);
|
||||
let tx = Transaction::new(&alice.keypair(), bob_pubkey, 500, last_id);
|
||||
|
||||
let _sig = client.transfer_signed(tr).unwrap();
|
||||
let _sig = client.transfer_signed(tx).unwrap();
|
||||
|
||||
let last_id = client.get_last_id().wait().unwrap();
|
||||
|
||||
let mut tr2 = Transaction::new(&alice.keypair(), bob_pubkey, 501, last_id);
|
||||
if let Instruction::NewContract(contract) = &mut tr2.instruction {
|
||||
contract.tokens = 502;
|
||||
contract.plan = Plan::new_payment(502, bob_pubkey);
|
||||
contract.plan = Plan::Budget(Budget::new_payment(502, bob_pubkey));
|
||||
}
|
||||
let _sig = client.transfer_signed(tr2).unwrap();
|
||||
|
||||
let balance = poll_get_balance(&mut client, &bob_pubkey);
|
||||
let balance = client.poll_get_balance(&bob_pubkey);
|
||||
assert_eq!(balance.unwrap(), 500);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
for t in server.thread_hdls {
|
||||
t.join().unwrap();
|
||||
}
|
||||
}
|
||||
fn validator(
|
||||
leader: &ReplicatedData,
|
||||
exit: Arc<AtomicBool>,
|
||||
alice: &Mint,
|
||||
threads: &mut Vec<JoinHandle<()>>,
|
||||
) {
|
||||
let validator = TestNode::new();
|
||||
let replicant_bank = Bank::new(&alice);
|
||||
let mut ts = Server::new_validator(
|
||||
replicant_bank,
|
||||
validator.data.clone(),
|
||||
validator.sockets.requests,
|
||||
validator.sockets.respond,
|
||||
validator.sockets.replicate,
|
||||
validator.sockets.gossip,
|
||||
leader.clone(),
|
||||
exit.clone(),
|
||||
);
|
||||
threads.append(&mut ts.thread_hdls);
|
||||
}
|
||||
|
||||
fn converge(
|
||||
leader: &ReplicatedData,
|
||||
exit: Arc<AtomicBool>,
|
||||
num_nodes: usize,
|
||||
threads: &mut Vec<JoinHandle<()>>,
|
||||
) -> Vec<ReplicatedData> {
|
||||
//lets spy on the network
|
||||
let mut spy = TestNode::new();
|
||||
let daddr = "0.0.0.0:0".parse().unwrap();
|
||||
let me = spy.data.id.clone();
|
||||
spy.data.replicate_addr = daddr;
|
||||
spy.data.requests_addr = daddr;
|
||||
let mut spy_crdt = Crdt::new(spy.data);
|
||||
spy_crdt.insert(&leader);
|
||||
spy_crdt.set_leader(leader.id);
|
||||
|
||||
let spy_ref = Arc::new(RwLock::new(spy_crdt));
|
||||
let spy_window = default_window();
|
||||
let t_spy_listen = Crdt::listen(
|
||||
spy_ref.clone(),
|
||||
spy_window,
|
||||
spy.sockets.gossip,
|
||||
exit.clone(),
|
||||
);
|
||||
let t_spy_gossip = Crdt::gossip(spy_ref.clone(), exit.clone());
|
||||
//wait for the network to converge
|
||||
let mut converged = false;
|
||||
for _ in 0..30 {
|
||||
let num = spy_ref.read().unwrap().convergence();
|
||||
if num == num_nodes as u64 {
|
||||
converged = true;
|
||||
break;
|
||||
}
|
||||
sleep(Duration::new(1, 0));
|
||||
}
|
||||
assert!(converged);
|
||||
threads.push(t_spy_listen);
|
||||
threads.push(t_spy_gossip);
|
||||
let v: Vec<ReplicatedData> = spy_ref
|
||||
.read()
|
||||
.unwrap()
|
||||
.table
|
||||
.values()
|
||||
.into_iter()
|
||||
.filter(|x| x.id != me)
|
||||
.map(|x| x.clone())
|
||||
.collect();
|
||||
v.clone()
|
||||
}
|
||||
#[test]
|
||||
fn test_multi_node() {
|
||||
logger::setup();
|
||||
const N: usize = 5;
|
||||
trace!("test_multi_accountant_stub");
|
||||
let leader = TestNode::new();
|
||||
let alice = Mint::new(10_000);
|
||||
let bob_pubkey = KeyPair::new().pubkey();
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
|
||||
let leader_bank = Bank::new(&alice);
|
||||
let server = Server::new_leader(
|
||||
leader_bank,
|
||||
alice.last_id(),
|
||||
None,
|
||||
leader.data.clone(),
|
||||
leader.sockets.requests,
|
||||
leader.sockets.event,
|
||||
leader.sockets.broadcast,
|
||||
leader.sockets.respond,
|
||||
leader.sockets.gossip,
|
||||
exit.clone(),
|
||||
sink(),
|
||||
);
|
||||
|
||||
let mut threads = server.thread_hdls;
|
||||
for _ in 0..N {
|
||||
validator(&leader.data, exit.clone(), &alice, &mut threads);
|
||||
}
|
||||
let servers = converge(&leader.data, exit.clone(), N + 2, &mut threads);
|
||||
//contains the leader addr as well
|
||||
assert_eq!(servers.len(), N + 1);
|
||||
//verify leader can do transfer
|
||||
let leader_balance = tx_and_retry_get_balance(&leader.data, &alice, &bob_pubkey).unwrap();
|
||||
assert_eq!(leader_balance, 500);
|
||||
//verify validator has the same balance
|
||||
let mut success = 0usize;
|
||||
for server in servers.iter() {
|
||||
let mut client = mk_client(server);
|
||||
if let Ok(bal) = poll_get_balance(&mut client, &bob_pubkey) {
|
||||
trace!("validator balance {}", bal);
|
||||
if bal == leader_balance {
|
||||
success += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
assert_eq!(success, servers.len());
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
for t in threads {
|
||||
t.join().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
fn mk_client(leader: &ReplicatedData) -> ThinClient {
|
||||
let requests_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
requests_socket
|
||||
.set_read_timeout(Some(Duration::new(1, 0)))
|
||||
.unwrap();
|
||||
let events_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
|
||||
ThinClient::new(
|
||||
leader.requests_addr,
|
||||
requests_socket,
|
||||
leader.events_addr,
|
||||
events_socket,
|
||||
)
|
||||
}
|
||||
|
||||
fn tx_and_retry_get_balance(
|
||||
leader: &ReplicatedData,
|
||||
alice: &Mint,
|
||||
bob_pubkey: &PublicKey,
|
||||
) -> io::Result<i64> {
|
||||
let mut client = mk_client(leader);
|
||||
trace!("getting leader last_id");
|
||||
let last_id = client.get_last_id().wait().unwrap();
|
||||
info!("executing leader transer");
|
||||
let _sig = client
|
||||
.transfer(500, &alice.keypair(), *bob_pubkey, &last_id)
|
||||
.unwrap();
|
||||
poll_get_balance(&mut client, bob_pubkey)
|
||||
}
|
||||
|
||||
}
|
||||
|
61
src/tpu.rs
61
src/tpu.rs
@@ -3,22 +3,22 @@
|
||||
|
||||
use bank::Bank;
|
||||
use banking_stage::BankingStage;
|
||||
use crdt::{Crdt, ReplicatedData};
|
||||
use fetch_stage::FetchStage;
|
||||
use hash::Hash;
|
||||
use packet;
|
||||
use packet::{BlobRecycler, PacketRecycler};
|
||||
use record_stage::RecordStage;
|
||||
use sig_verify_stage::SigVerifyStage;
|
||||
use sigverify_stage::SigVerifyStage;
|
||||
use std::io::Write;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::{Arc, Mutex, RwLock};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::thread::JoinHandle;
|
||||
use std::time::Duration;
|
||||
use streamer;
|
||||
use streamer::BlobReceiver;
|
||||
use write_stage::WriteStage;
|
||||
|
||||
pub struct Tpu {
|
||||
pub blob_receiver: BlobReceiver,
|
||||
pub thread_hdls: Vec<JoinHandle<()>>,
|
||||
}
|
||||
|
||||
@@ -27,29 +27,22 @@ impl Tpu {
|
||||
bank: Arc<Bank>,
|
||||
start_hash: Hash,
|
||||
tick_duration: Option<Duration>,
|
||||
me: ReplicatedData,
|
||||
events_socket: UdpSocket,
|
||||
broadcast_socket: UdpSocket,
|
||||
gossip: UdpSocket,
|
||||
transactions_socket: UdpSocket,
|
||||
blob_recycler: BlobRecycler,
|
||||
exit: Arc<AtomicBool>,
|
||||
writer: W,
|
||||
) -> Self {
|
||||
let packet_recycler = packet::PacketRecycler::default();
|
||||
let (packet_sender, packet_receiver) = channel();
|
||||
let t_receiver = streamer::receiver(
|
||||
events_socket,
|
||||
exit.clone(),
|
||||
packet_recycler.clone(),
|
||||
packet_sender,
|
||||
);
|
||||
let packet_recycler = PacketRecycler::default();
|
||||
|
||||
let sig_verify_stage = SigVerifyStage::new(exit.clone(), packet_receiver);
|
||||
let fetch_stage =
|
||||
FetchStage::new(transactions_socket, exit.clone(), packet_recycler.clone());
|
||||
|
||||
let sigverify_stage = SigVerifyStage::new(exit.clone(), fetch_stage.packet_receiver);
|
||||
|
||||
let blob_recycler = packet::BlobRecycler::default();
|
||||
let banking_stage = BankingStage::new(
|
||||
bank.clone(),
|
||||
exit.clone(),
|
||||
sig_verify_stage.verified_receiver,
|
||||
sigverify_stage.verified_receiver,
|
||||
packet_recycler.clone(),
|
||||
);
|
||||
|
||||
@@ -64,30 +57,16 @@ impl Tpu {
|
||||
record_stage.entry_receiver,
|
||||
);
|
||||
|
||||
let crdt = Arc::new(RwLock::new(Crdt::new(me)));
|
||||
let t_gossip = Crdt::gossip(crdt.clone(), exit.clone());
|
||||
let window = streamer::default_window();
|
||||
let t_listen = Crdt::listen(crdt.clone(), window.clone(), gossip, exit.clone());
|
||||
|
||||
let t_broadcast = streamer::broadcaster(
|
||||
broadcast_socket,
|
||||
exit.clone(),
|
||||
crdt.clone(),
|
||||
window,
|
||||
blob_recycler.clone(),
|
||||
write_stage.blob_receiver,
|
||||
);
|
||||
|
||||
let mut thread_hdls = vec![
|
||||
t_receiver,
|
||||
fetch_stage.thread_hdl,
|
||||
banking_stage.thread_hdl,
|
||||
record_stage.thread_hdl,
|
||||
write_stage.thread_hdl,
|
||||
t_gossip,
|
||||
t_listen,
|
||||
t_broadcast,
|
||||
];
|
||||
thread_hdls.extend(sig_verify_stage.thread_hdls.into_iter());
|
||||
Tpu { thread_hdls }
|
||||
thread_hdls.extend(sigverify_stage.thread_hdls.into_iter());
|
||||
Tpu {
|
||||
blob_receiver: write_stage.blob_receiver,
|
||||
thread_hdls,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -1,16 +1,42 @@
|
||||
//! The `transaction` module provides functionality for creating log transactions.
|
||||
|
||||
use bincode::serialize;
|
||||
use budget::{Budget, Condition};
|
||||
use chrono::prelude::*;
|
||||
use hash::Hash;
|
||||
use plan::{Condition, Payment, Plan};
|
||||
use rayon::prelude::*;
|
||||
use payment_plan::{Payment, PaymentPlan, Witness};
|
||||
use signature::{KeyPair, KeyPairUtil, PublicKey, Signature, SignatureUtil};
|
||||
|
||||
pub const SIGNED_DATA_OFFSET: usize = 112;
|
||||
pub const SIG_OFFSET: usize = 8;
|
||||
pub const PUB_KEY_OFFSET: usize = 80;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub enum Plan {
|
||||
Budget(Budget),
|
||||
}
|
||||
|
||||
// A proxy for the underlying DSL.
|
||||
impl PaymentPlan for Plan {
|
||||
fn final_payment(&self) -> Option<Payment> {
|
||||
match self {
|
||||
Plan::Budget(budget) => budget.final_payment(),
|
||||
}
|
||||
}
|
||||
|
||||
fn verify(&self, spendable_tokens: i64) -> bool {
|
||||
match self {
|
||||
Plan::Budget(budget) => budget.verify(spendable_tokens),
|
||||
}
|
||||
}
|
||||
|
||||
fn apply_witness(&mut self, witness: &Witness) {
|
||||
match self {
|
||||
Plan::Budget(budget) => budget.apply_witness(witness),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub struct Contract {
|
||||
pub tokens: i64,
|
||||
@@ -30,6 +56,7 @@ pub struct Transaction {
|
||||
pub from: PublicKey,
|
||||
pub instruction: Instruction,
|
||||
pub last_id: Hash,
|
||||
pub fee: i64,
|
||||
}
|
||||
|
||||
impl Transaction {
|
||||
@@ -37,35 +64,53 @@ impl Transaction {
|
||||
from_keypair: &KeyPair,
|
||||
instruction: Instruction,
|
||||
last_id: Hash,
|
||||
fee: i64,
|
||||
) -> Self {
|
||||
let from = from_keypair.pubkey();
|
||||
let mut tr = Transaction {
|
||||
let mut tx = Transaction {
|
||||
sig: Signature::default(),
|
||||
instruction,
|
||||
last_id,
|
||||
from,
|
||||
fee,
|
||||
};
|
||||
tr.sign(from_keypair);
|
||||
tr
|
||||
tx.sign(from_keypair);
|
||||
tx
|
||||
}
|
||||
|
||||
/// Create and sign a new Transaction. Used for unit-testing.
|
||||
pub fn new_taxed(
|
||||
from_keypair: &KeyPair,
|
||||
to: PublicKey,
|
||||
tokens: i64,
|
||||
fee: i64,
|
||||
last_id: Hash,
|
||||
) -> Self {
|
||||
let payment = Payment {
|
||||
tokens: tokens - fee,
|
||||
to,
|
||||
};
|
||||
let budget = Budget::Pay(payment);
|
||||
let plan = Plan::Budget(budget);
|
||||
let instruction = Instruction::NewContract(Contract { plan, tokens });
|
||||
Self::new_from_instruction(from_keypair, instruction, last_id, fee)
|
||||
}
|
||||
|
||||
/// Create and sign a new Transaction. Used for unit-testing.
|
||||
pub fn new(from_keypair: &KeyPair, to: PublicKey, tokens: i64, last_id: Hash) -> Self {
|
||||
let plan = Plan::Pay(Payment { tokens, to });
|
||||
let instruction = Instruction::NewContract(Contract { plan, tokens });
|
||||
Self::new_from_instruction(from_keypair, instruction, last_id)
|
||||
Self::new_taxed(from_keypair, to, tokens, 0, last_id)
|
||||
}
|
||||
|
||||
/// Create and sign a new Witness Timestamp. Used for unit-testing.
|
||||
pub fn new_timestamp(from_keypair: &KeyPair, dt: DateTime<Utc>, last_id: Hash) -> Self {
|
||||
let instruction = Instruction::ApplyTimestamp(dt);
|
||||
Self::new_from_instruction(from_keypair, instruction, last_id)
|
||||
Self::new_from_instruction(from_keypair, instruction, last_id, 0)
|
||||
}
|
||||
|
||||
/// Create and sign a new Witness Signature. Used for unit-testing.
|
||||
pub fn new_signature(from_keypair: &KeyPair, tx_sig: Signature, last_id: Hash) -> Self {
|
||||
let instruction = Instruction::ApplySignature(tx_sig);
|
||||
Self::new_from_instruction(from_keypair, instruction, last_id)
|
||||
Self::new_from_instruction(from_keypair, instruction, last_id, 0)
|
||||
}
|
||||
|
||||
/// Create and sign a postdated Transaction. Used for unit-testing.
|
||||
@@ -77,25 +122,23 @@ impl Transaction {
|
||||
last_id: Hash,
|
||||
) -> Self {
|
||||
let from = from_keypair.pubkey();
|
||||
let plan = Plan::Race(
|
||||
let budget = Budget::Race(
|
||||
(Condition::Timestamp(dt), Payment { tokens, to }),
|
||||
(Condition::Signature(from), Payment { tokens, to: from }),
|
||||
);
|
||||
let plan = Plan::Budget(budget);
|
||||
let instruction = Instruction::NewContract(Contract { plan, tokens });
|
||||
let mut tr = Transaction {
|
||||
instruction,
|
||||
from,
|
||||
last_id,
|
||||
sig: Signature::default(),
|
||||
};
|
||||
tr.sign(from_keypair);
|
||||
tr
|
||||
Self::new_from_instruction(from_keypair, instruction, last_id, 0)
|
||||
}
|
||||
|
||||
fn get_sign_data(&self) -> Vec<u8> {
|
||||
let mut data = serialize(&(&self.instruction)).expect("serialize Contract");
|
||||
let last_id_data = serialize(&(&self.last_id)).expect("serialize last_id");
|
||||
data.extend_from_slice(&last_id_data);
|
||||
|
||||
let fee_data = serialize(&(&self.fee)).expect("serialize last_id");
|
||||
data.extend_from_slice(&fee_data);
|
||||
|
||||
data
|
||||
}
|
||||
|
||||
@@ -111,7 +154,8 @@ impl Transaction {
|
||||
|
||||
pub fn verify_plan(&self) -> bool {
|
||||
if let Instruction::NewContract(contract) = &self.instruction {
|
||||
contract.plan.verify(contract.tokens)
|
||||
self.fee >= 0 && self.fee <= contract.tokens
|
||||
&& contract.plan.verify(contract.tokens - self.fee)
|
||||
} else {
|
||||
true
|
||||
}
|
||||
@@ -138,21 +182,6 @@ pub fn memfind<A: Eq>(a: &[A], b: &[A]) -> Option<usize> {
|
||||
None
|
||||
}
|
||||
|
||||
/// Verify a batch of signatures.
|
||||
pub fn verify_signatures(transactions: &[Transaction]) -> bool {
|
||||
transactions.par_iter().all(|tr| tr.verify_sig())
|
||||
}
|
||||
|
||||
/// Verify a batch of spending plans.
|
||||
pub fn verify_plans(transactions: &[Transaction]) -> bool {
|
||||
transactions.par_iter().all(|tr| tr.verify_plan())
|
||||
}
|
||||
|
||||
/// Verify a batch of transactions.
|
||||
pub fn verify_transactions(transactions: &[Transaction]) -> bool {
|
||||
verify_signatures(transactions) && verify_plans(transactions)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
@@ -162,8 +191,8 @@ mod tests {
|
||||
fn test_claim() {
|
||||
let keypair = KeyPair::new();
|
||||
let zero = Hash::default();
|
||||
let tr0 = Transaction::new(&keypair, keypair.pubkey(), 42, zero);
|
||||
assert!(tr0.verify_plan());
|
||||
let tx0 = Transaction::new(&keypair, keypair.pubkey(), 42, zero);
|
||||
assert!(tx0.verify_plan());
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -172,22 +201,34 @@ mod tests {
|
||||
let keypair0 = KeyPair::new();
|
||||
let keypair1 = KeyPair::new();
|
||||
let pubkey1 = keypair1.pubkey();
|
||||
let tr0 = Transaction::new(&keypair0, pubkey1, 42, zero);
|
||||
assert!(tr0.verify_plan());
|
||||
let tx0 = Transaction::new(&keypair0, pubkey1, 42, zero);
|
||||
assert!(tx0.verify_plan());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_transfer_with_fee() {
|
||||
let zero = Hash::default();
|
||||
let keypair0 = KeyPair::new();
|
||||
let pubkey1 = KeyPair::new().pubkey();
|
||||
assert!(Transaction::new_taxed(&keypair0, pubkey1, 1, 1, zero).verify_plan());
|
||||
assert!(!Transaction::new_taxed(&keypair0, pubkey1, 1, 2, zero).verify_plan());
|
||||
assert!(!Transaction::new_taxed(&keypair0, pubkey1, 1, -1, zero).verify_plan());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialize_claim() {
|
||||
let plan = Plan::Pay(Payment {
|
||||
let budget = Budget::Pay(Payment {
|
||||
tokens: 0,
|
||||
to: Default::default(),
|
||||
});
|
||||
let plan = Plan::Budget(budget);
|
||||
let instruction = Instruction::NewContract(Contract { plan, tokens: 0 });
|
||||
let claim0 = Transaction {
|
||||
instruction,
|
||||
from: Default::default(),
|
||||
last_id: Default::default(),
|
||||
sig: Default::default(),
|
||||
fee: 0,
|
||||
};
|
||||
let buf = serialize(&claim0).unwrap();
|
||||
let claim1: Transaction = deserialize(&buf).unwrap();
|
||||
@@ -199,15 +240,15 @@ mod tests {
|
||||
let zero = Hash::default();
|
||||
let keypair = KeyPair::new();
|
||||
let pubkey = keypair.pubkey();
|
||||
let mut tr = Transaction::new(&keypair, pubkey, 42, zero);
|
||||
if let Instruction::NewContract(contract) = &mut tr.instruction {
|
||||
let mut tx = Transaction::new(&keypair, pubkey, 42, zero);
|
||||
if let Instruction::NewContract(contract) = &mut tx.instruction {
|
||||
contract.tokens = 1_000_000; // <-- attack, part 1!
|
||||
if let Plan::Pay(ref mut payment) = contract.plan {
|
||||
if let Plan::Budget(Budget::Pay(ref mut payment)) = contract.plan {
|
||||
payment.tokens = contract.tokens; // <-- attack, part 2!
|
||||
}
|
||||
}
|
||||
assert!(tr.verify_plan());
|
||||
assert!(!tr.verify_sig());
|
||||
assert!(tx.verify_plan());
|
||||
assert!(!tx.verify_sig());
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -217,23 +258,23 @@ mod tests {
|
||||
let thief_keypair = KeyPair::new();
|
||||
let pubkey1 = keypair1.pubkey();
|
||||
let zero = Hash::default();
|
||||
let mut tr = Transaction::new(&keypair0, pubkey1, 42, zero);
|
||||
if let Instruction::NewContract(contract) = &mut tr.instruction {
|
||||
if let Plan::Pay(ref mut payment) = contract.plan {
|
||||
let mut tx = Transaction::new(&keypair0, pubkey1, 42, zero);
|
||||
if let Instruction::NewContract(contract) = &mut tx.instruction {
|
||||
if let Plan::Budget(Budget::Pay(ref mut payment)) = contract.plan {
|
||||
payment.to = thief_keypair.pubkey(); // <-- attack!
|
||||
}
|
||||
}
|
||||
assert!(tr.verify_plan());
|
||||
assert!(!tr.verify_sig());
|
||||
assert!(tx.verify_plan());
|
||||
assert!(!tx.verify_sig());
|
||||
}
|
||||
#[test]
|
||||
fn test_layout() {
|
||||
let tr = test_tx();
|
||||
let sign_data = tr.get_sign_data();
|
||||
let tx = serialize(&tr).unwrap();
|
||||
assert_matches!(memfind(&tx, &sign_data), Some(SIGNED_DATA_OFFSET));
|
||||
assert_matches!(memfind(&tx, &tr.sig), Some(SIG_OFFSET));
|
||||
assert_matches!(memfind(&tx, &tr.from), Some(PUB_KEY_OFFSET));
|
||||
let tx = test_tx();
|
||||
let sign_data = tx.get_sign_data();
|
||||
let tx_bytes = serialize(&tx).unwrap();
|
||||
assert_matches!(memfind(&tx_bytes, &sign_data), Some(SIGNED_DATA_OFFSET));
|
||||
assert_matches!(memfind(&tx_bytes, &tx.sig), Some(SIG_OFFSET));
|
||||
assert_matches!(memfind(&tx_bytes, &tx.from), Some(PUB_KEY_OFFSET));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -241,55 +282,20 @@ mod tests {
|
||||
let keypair0 = KeyPair::new();
|
||||
let keypair1 = KeyPair::new();
|
||||
let zero = Hash::default();
|
||||
let mut tr = Transaction::new(&keypair0, keypair1.pubkey(), 1, zero);
|
||||
if let Instruction::NewContract(contract) = &mut tr.instruction {
|
||||
if let Plan::Pay(ref mut payment) = contract.plan {
|
||||
let mut tx = Transaction::new(&keypair0, keypair1.pubkey(), 1, zero);
|
||||
if let Instruction::NewContract(contract) = &mut tx.instruction {
|
||||
if let Plan::Budget(Budget::Pay(ref mut payment)) = contract.plan {
|
||||
payment.tokens = 2; // <-- attack!
|
||||
}
|
||||
}
|
||||
assert!(!tr.verify_plan());
|
||||
assert!(!tx.verify_plan());
|
||||
|
||||
// Also, ensure all branchs of the plan spend all tokens
|
||||
if let Instruction::NewContract(contract) = &mut tr.instruction {
|
||||
if let Plan::Pay(ref mut payment) = contract.plan {
|
||||
if let Instruction::NewContract(contract) = &mut tx.instruction {
|
||||
if let Plan::Budget(Budget::Pay(ref mut payment)) = contract.plan {
|
||||
payment.tokens = 0; // <-- whoops!
|
||||
}
|
||||
}
|
||||
assert!(!tr.verify_plan());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verify_transactions() {
|
||||
let alice_keypair = KeyPair::new();
|
||||
let bob_pubkey = KeyPair::new().pubkey();
|
||||
let carol_pubkey = KeyPair::new().pubkey();
|
||||
let last_id = Hash::default();
|
||||
let tr0 = Transaction::new(&alice_keypair, bob_pubkey, 1, last_id);
|
||||
let tr1 = Transaction::new(&alice_keypair, carol_pubkey, 1, last_id);
|
||||
let transactions = vec![tr0, tr1];
|
||||
assert!(verify_transactions(&transactions));
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "unstable", test))]
|
||||
mod bench {
|
||||
extern crate test;
|
||||
use self::test::Bencher;
|
||||
use transaction::*;
|
||||
|
||||
#[bench]
|
||||
fn verify_signatures_bench(bencher: &mut Bencher) {
|
||||
let alice_keypair = KeyPair::new();
|
||||
let last_id = Hash::default();
|
||||
let transactions: Vec<_> = (0..64)
|
||||
.into_par_iter()
|
||||
.map(|_| {
|
||||
let rando_pubkey = KeyPair::new().pubkey();
|
||||
Transaction::new(&alice_keypair, rando_pubkey, 1, last_id)
|
||||
})
|
||||
.collect();
|
||||
bencher.iter(|| {
|
||||
assert!(verify_signatures(&transactions));
|
||||
});
|
||||
assert!(!tx.verify_plan());
|
||||
}
|
||||
}
|
||||
|
114
src/tvu.rs
114
src/tvu.rs
@@ -7,12 +7,12 @@
|
||||
//! along with the ecdsa signature for the blob and each signature in all the transactions. Blobs
|
||||
//! with errors are dropped, or marked for slashing.
|
||||
//! 3.a retransmit
|
||||
//! - Blobs originating from the parent (leader atm is the only parent), are retransmit to all the
|
||||
//! - Blobs originating from the parent (leader, at the moment, is the only parent), are retransmit to all the
|
||||
//! peers in the crdt. Peers is everyone who is not me or the leader that has a known replicate
|
||||
//! address.
|
||||
//! 3.b window
|
||||
//! - Verified blobs are placed into a window, indexed by the counter set by the leader.sockets. This could
|
||||
//! be the PoH counter if its monitonically increasing in each blob. Easure coding is used to
|
||||
//! be the PoH counter if its monotonically increasing in each blob. Erasure coding is used to
|
||||
//! recover any missing packets, and requests are made at random to peers and parents to retransmit
|
||||
//! a missing packet.
|
||||
//! 4. accountant
|
||||
@@ -24,6 +24,7 @@ use bank::Bank;
|
||||
use crdt::{Crdt, ReplicatedData};
|
||||
use packet;
|
||||
use replicate_stage::ReplicateStage;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::mpsc::channel;
|
||||
@@ -41,8 +42,8 @@ impl Tvu {
|
||||
/// # Arguments
|
||||
/// * `bank` - The bank state.
|
||||
/// * `me` - my configuration
|
||||
/// * `gossip` - my gosisp socket
|
||||
/// * `replicte` - my replicte socket
|
||||
/// * `gossip` - my gossisp socket
|
||||
/// * `replicate` - my replicate socket
|
||||
/// * `leader` - leader configuration
|
||||
/// * `exit` - The exit signal.
|
||||
pub fn new(
|
||||
@@ -125,14 +126,56 @@ impl Tvu {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
use std::time::Duration;
|
||||
pub struct Sockets {
|
||||
pub gossip: UdpSocket,
|
||||
pub requests: UdpSocket,
|
||||
pub replicate: UdpSocket,
|
||||
pub transaction: UdpSocket,
|
||||
pub respond: UdpSocket,
|
||||
pub broadcast: UdpSocket,
|
||||
}
|
||||
|
||||
pub struct TestNode {
|
||||
pub data: ReplicatedData,
|
||||
pub sockets: Sockets,
|
||||
}
|
||||
|
||||
impl TestNode {
|
||||
pub fn new() -> TestNode {
|
||||
let gossip = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let requests = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let transaction = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let replicate = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let respond = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let broadcast = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
let data = ReplicatedData::new(
|
||||
pubkey,
|
||||
gossip.local_addr().unwrap(),
|
||||
replicate.local_addr().unwrap(),
|
||||
requests.local_addr().unwrap(),
|
||||
transaction.local_addr().unwrap(),
|
||||
);
|
||||
TestNode {
|
||||
data: data,
|
||||
sockets: Sockets {
|
||||
gossip,
|
||||
requests,
|
||||
replicate,
|
||||
transaction,
|
||||
respond,
|
||||
broadcast,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn test_node() -> (ReplicatedData, UdpSocket, UdpSocket, UdpSocket, UdpSocket) {
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use std::time::Duration;
|
||||
|
||||
let events_socket = UdpSocket::bind("127.0.0.1:0").unwrap();
|
||||
let transactions_socket = UdpSocket::bind("127.0.0.1:0").unwrap();
|
||||
let gossip = UdpSocket::bind("127.0.0.1:0").unwrap();
|
||||
let replicate = UdpSocket::bind("127.0.0.1:0").unwrap();
|
||||
let requests_socket = UdpSocket::bind("127.0.0.1:0").unwrap();
|
||||
@@ -145,9 +188,9 @@ pub fn test_node() -> (ReplicatedData, UdpSocket, UdpSocket, UdpSocket, UdpSocke
|
||||
gossip.local_addr().unwrap(),
|
||||
replicate.local_addr().unwrap(),
|
||||
requests_socket.local_addr().unwrap(),
|
||||
events_socket.local_addr().unwrap(),
|
||||
transactions_socket.local_addr().unwrap(),
|
||||
);
|
||||
(d, gossip, replicate, requests_socket, events_socket)
|
||||
(d, gossip, replicate, requests_socket, transactions_socket)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -155,24 +198,22 @@ pub mod tests {
|
||||
use bank::Bank;
|
||||
use bincode::serialize;
|
||||
use crdt::Crdt;
|
||||
use crdt::ReplicatedData;
|
||||
use entry::Entry;
|
||||
use event::Event;
|
||||
use hash::{hash, Hash};
|
||||
use logger;
|
||||
use mint::Mint;
|
||||
use packet::BlobRecycler;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use std::collections::VecDeque;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::time::Duration;
|
||||
use streamer;
|
||||
use tvu::Tvu;
|
||||
use transaction::Transaction;
|
||||
use tvu::{TestNode, Tvu};
|
||||
|
||||
/// Test that mesasge sent from leader to target1 and repliated to target2
|
||||
/// Test that message sent from leader to target1 and replicated to target2
|
||||
#[test]
|
||||
fn test_replicate() {
|
||||
logger::setup();
|
||||
@@ -252,7 +293,7 @@ pub mod tests {
|
||||
bank.register_entry_id(&cur_hash);
|
||||
cur_hash = hash(&cur_hash);
|
||||
|
||||
let tr1 = Event::new_transaction(
|
||||
let tx0 = Transaction::new(
|
||||
&mint.keypair(),
|
||||
bob_keypair.pubkey(),
|
||||
transfer_amount,
|
||||
@@ -260,7 +301,7 @@ pub mod tests {
|
||||
);
|
||||
bank.register_entry_id(&cur_hash);
|
||||
cur_hash = hash(&cur_hash);
|
||||
let entry1 = Entry::new(&cur_hash, i + num_blobs, vec![tr1]);
|
||||
let entry1 = Entry::new(&cur_hash, i + num_blobs, vec![tx0]);
|
||||
bank.register_entry_id(&cur_hash);
|
||||
cur_hash = hash(&cur_hash);
|
||||
|
||||
@@ -303,45 +344,4 @@ pub mod tests {
|
||||
t_l_gossip.join().expect("join");
|
||||
t_l_listen.join().expect("join");
|
||||
}
|
||||
pub struct Sockets {
|
||||
pub gossip: UdpSocket,
|
||||
pub requests: UdpSocket,
|
||||
pub replicate: UdpSocket,
|
||||
pub event: UdpSocket,
|
||||
pub respond: UdpSocket,
|
||||
pub broadcast: UdpSocket,
|
||||
}
|
||||
pub struct TestNode {
|
||||
pub data: ReplicatedData,
|
||||
pub sockets: Sockets,
|
||||
}
|
||||
impl TestNode {
|
||||
pub fn new() -> TestNode {
|
||||
let gossip = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let requests = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let event = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let replicate = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let respond = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let broadcast = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
let data = ReplicatedData::new(
|
||||
pubkey,
|
||||
gossip.local_addr().unwrap(),
|
||||
replicate.local_addr().unwrap(),
|
||||
requests.local_addr().unwrap(),
|
||||
event.local_addr().unwrap(),
|
||||
);
|
||||
TestNode {
|
||||
data: data,
|
||||
sockets: Sockets {
|
||||
gossip,
|
||||
requests,
|
||||
replicate,
|
||||
event,
|
||||
respond,
|
||||
broadcast,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
178
tests/multinode.rs
Normal file
178
tests/multinode.rs
Normal file
@@ -0,0 +1,178 @@
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
extern crate bincode;
|
||||
extern crate futures;
|
||||
extern crate solana;
|
||||
|
||||
use futures::Future;
|
||||
use solana::bank::Bank;
|
||||
use solana::crdt::{Crdt, ReplicatedData};
|
||||
use solana::logger;
|
||||
use solana::mint::Mint;
|
||||
use solana::server::Server;
|
||||
use solana::signature::{KeyPair, KeyPairUtil, PublicKey};
|
||||
use solana::streamer::default_window;
|
||||
use solana::thin_client::ThinClient;
|
||||
use solana::tvu::TestNode;
|
||||
use std::io;
|
||||
use std::io::sink;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::sleep;
|
||||
use std::thread::JoinHandle;
|
||||
use std::time::Duration;
|
||||
|
||||
fn validator(
|
||||
leader: &ReplicatedData,
|
||||
exit: Arc<AtomicBool>,
|
||||
alice: &Mint,
|
||||
threads: &mut Vec<JoinHandle<()>>,
|
||||
) {
|
||||
let validator = TestNode::new();
|
||||
let replicant_bank = Bank::new(&alice);
|
||||
let mut ts = Server::new_validator(
|
||||
replicant_bank,
|
||||
validator.data.clone(),
|
||||
validator.sockets.requests,
|
||||
validator.sockets.respond,
|
||||
validator.sockets.replicate,
|
||||
validator.sockets.gossip,
|
||||
leader.clone(),
|
||||
exit.clone(),
|
||||
);
|
||||
threads.append(&mut ts.thread_hdls);
|
||||
}
|
||||
|
||||
fn converge(
|
||||
leader: &ReplicatedData,
|
||||
exit: Arc<AtomicBool>,
|
||||
num_nodes: usize,
|
||||
threads: &mut Vec<JoinHandle<()>>,
|
||||
) -> Vec<ReplicatedData> {
|
||||
//lets spy on the network
|
||||
let mut spy = TestNode::new();
|
||||
let daddr = "0.0.0.0:0".parse().unwrap();
|
||||
let me = spy.data.id.clone();
|
||||
spy.data.replicate_addr = daddr;
|
||||
spy.data.requests_addr = daddr;
|
||||
let mut spy_crdt = Crdt::new(spy.data);
|
||||
spy_crdt.insert(&leader);
|
||||
spy_crdt.set_leader(leader.id);
|
||||
|
||||
let spy_ref = Arc::new(RwLock::new(spy_crdt));
|
||||
let spy_window = default_window();
|
||||
let t_spy_listen = Crdt::listen(
|
||||
spy_ref.clone(),
|
||||
spy_window,
|
||||
spy.sockets.gossip,
|
||||
exit.clone(),
|
||||
);
|
||||
let t_spy_gossip = Crdt::gossip(spy_ref.clone(), exit.clone());
|
||||
//wait for the network to converge
|
||||
let mut converged = false;
|
||||
for _ in 0..30 {
|
||||
let num = spy_ref.read().unwrap().convergence();
|
||||
if num == num_nodes as u64 {
|
||||
converged = true;
|
||||
break;
|
||||
}
|
||||
sleep(Duration::new(1, 0));
|
||||
}
|
||||
assert!(converged);
|
||||
threads.push(t_spy_listen);
|
||||
threads.push(t_spy_gossip);
|
||||
let v: Vec<ReplicatedData> = spy_ref
|
||||
.read()
|
||||
.unwrap()
|
||||
.table
|
||||
.values()
|
||||
.into_iter()
|
||||
.filter(|x| x.id != me)
|
||||
.map(|x| x.clone())
|
||||
.collect();
|
||||
v.clone()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_multi_node() {
|
||||
logger::setup();
|
||||
const N: usize = 5;
|
||||
trace!("test_multi_accountant_stub");
|
||||
let leader = TestNode::new();
|
||||
let alice = Mint::new(10_000);
|
||||
let bob_pubkey = KeyPair::new().pubkey();
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
|
||||
let leader_bank = Bank::new(&alice);
|
||||
let server = Server::new_leader(
|
||||
leader_bank,
|
||||
alice.last_id(),
|
||||
None,
|
||||
leader.data.clone(),
|
||||
leader.sockets.requests,
|
||||
leader.sockets.transaction,
|
||||
leader.sockets.broadcast,
|
||||
leader.sockets.respond,
|
||||
leader.sockets.gossip,
|
||||
exit.clone(),
|
||||
sink(),
|
||||
);
|
||||
|
||||
let mut threads = server.thread_hdls;
|
||||
for _ in 0..N {
|
||||
validator(&leader.data, exit.clone(), &alice, &mut threads);
|
||||
}
|
||||
let servers = converge(&leader.data, exit.clone(), N + 2, &mut threads);
|
||||
//contains the leader addr as well
|
||||
assert_eq!(servers.len(), N + 1);
|
||||
//verify leader can do transfer
|
||||
let leader_balance = tx_and_retry_get_balance(&leader.data, &alice, &bob_pubkey).unwrap();
|
||||
assert_eq!(leader_balance, 500);
|
||||
//verify validator has the same balance
|
||||
let mut success = 0usize;
|
||||
for server in servers.iter() {
|
||||
let mut client = mk_client(server);
|
||||
if let Ok(bal) = client.poll_get_balance(&bob_pubkey) {
|
||||
trace!("validator balance {}", bal);
|
||||
if bal == leader_balance {
|
||||
success += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
assert_eq!(success, servers.len());
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
for t in threads {
|
||||
t.join().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
fn mk_client(leader: &ReplicatedData) -> ThinClient {
|
||||
let requests_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
requests_socket
|
||||
.set_read_timeout(Some(Duration::new(1, 0)))
|
||||
.unwrap();
|
||||
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
|
||||
ThinClient::new(
|
||||
leader.requests_addr,
|
||||
requests_socket,
|
||||
leader.transactions_addr,
|
||||
transactions_socket,
|
||||
)
|
||||
}
|
||||
|
||||
fn tx_and_retry_get_balance(
|
||||
leader: &ReplicatedData,
|
||||
alice: &Mint,
|
||||
bob_pubkey: &PublicKey,
|
||||
) -> io::Result<i64> {
|
||||
let mut client = mk_client(leader);
|
||||
trace!("getting leader last_id");
|
||||
let last_id = client.get_last_id().wait().unwrap();
|
||||
info!("executing leader transer");
|
||||
let _sig = client
|
||||
.transfer(500, &alice.keypair(), *bob_pubkey, &last_id)
|
||||
.unwrap();
|
||||
client.poll_get_balance(bob_pubkey)
|
||||
}
|
Reference in New Issue
Block a user