Compare commits
342 Commits
v0.5.0-bet
...
v0.6.0-alp
Author | SHA1 | Date | |
---|---|---|---|
|
b43ae748c3 | ||
|
02ddd89653 | ||
|
bbe6eccefe | ||
|
6677a7b66a | ||
|
75c37fcc73 | ||
|
5be71a8a9d | ||
|
b9ae7d1ebb | ||
|
8b02e0f57c | ||
|
342cc7350a | ||
|
2335a51ced | ||
|
868df1824c | ||
|
83c11f0f9d | ||
|
1022f1b0c6 | ||
|
c2c80232e3 | ||
|
115f4e54b8 | ||
|
669b1694b8 | ||
|
2128c58fbe | ||
|
e12e154877 | ||
|
73d3c17507 | ||
|
7f647a93da | ||
|
ecb3dbbb60 | ||
|
cc907ba69d | ||
|
5a45eef1dc | ||
|
0d980e89bc | ||
|
ef87832bff | ||
|
94507d1aca | ||
|
89924a38ff | ||
|
7faa2b8698 | ||
|
65352ce8e7 | ||
|
f1988ee1e3 | ||
|
82ac8eb731 | ||
|
ae47e34fa5 | ||
|
28e781efc3 | ||
|
5c3ceb8355 | ||
|
c9113b381d | ||
|
75e69eecfa | ||
|
f3c4acc723 | ||
|
2a0095e322 | ||
|
9ad5f3c65b | ||
|
579de64d49 | ||
|
d4200a7b1e | ||
|
84477835dc | ||
|
504b318ef1 | ||
|
f154c8c490 | ||
|
d4959bc157 | ||
|
87e025fe22 | ||
|
8049323ca8 | ||
|
b38c7ea2ff | ||
|
239b925fb3 | ||
|
60da7f7aaf | ||
|
8646ff4927 | ||
|
59be94a81f | ||
|
437c485e5c | ||
|
79a58da6a9 | ||
|
ae29641a18 | ||
|
9c3f65bca9 | ||
|
086365b4c4 | ||
|
64044da49c | ||
|
7b5b7feb63 | ||
|
2e059f8504 | ||
|
207b6686d1 | ||
|
abfd7d6951 | ||
|
7fc166b5ba | ||
|
021953d59a | ||
|
bbe89df2ff | ||
|
a638ec5911 | ||
|
26272a3600 | ||
|
8454eb79d0 | ||
|
796f4b981b | ||
|
34514d65bc | ||
|
2786357082 | ||
|
4badeacd1d | ||
|
63a0ba6ec8 | ||
|
9a4ce6d70e | ||
|
35ee2d0ce1 | ||
|
b04716d40d | ||
|
051fa6f1f1 | ||
|
8dc1b07e75 | ||
|
bee1e7ebaf | ||
|
f3f0b9f0c5 | ||
|
a5cf745e1c | ||
|
273b800047 | ||
|
6c1f1c2a7a | ||
|
9c62f8d81f | ||
|
82aef7ebe2 | ||
|
57636d3d5f | ||
|
dc87effc0a | ||
|
f0c9823e9f | ||
|
0b91dd6163 | ||
|
4955c6f13a | ||
|
2e7beca9ba | ||
|
59c1b9983d | ||
|
f7083e0923 | ||
|
6d4defdf96 | ||
|
b826f837f8 | ||
|
5855e18a4e | ||
|
3f38c0a245 | ||
|
cfe8b3fc55 | ||
|
e9ee020b5f | ||
|
1bcf3891b4 | ||
|
5456de63e9 | ||
|
9026c70952 | ||
|
99dc4ea4a9 | ||
|
0aaa500f7c | ||
|
5f5be83a17 | ||
|
7e44005a0f | ||
|
ee3fb985ea | ||
|
2a268aa528 | ||
|
cd262cf860 | ||
|
a1889c32d4 | ||
|
d42d024d9c | ||
|
7b88b8d159 | ||
|
4131071b9a | ||
|
ef6bd7e3b8 | ||
|
374bff6550 | ||
|
0a46bbe4f9 | ||
|
f4971be236 | ||
|
421273f862 | ||
|
2c7f229883 | ||
|
904eabad2f | ||
|
8b233f6be4 | ||
|
08fc821ca9 | ||
|
81706f2d75 | ||
|
7b50c3910f | ||
|
2d635386af | ||
|
a604dcb4c4 | ||
|
7736b9cac6 | ||
|
d2dd005a59 | ||
|
6e8f99d9b2 | ||
|
685de30047 | ||
|
17cc9ab07f | ||
|
3f10bf44db | ||
|
27984e469a | ||
|
a2c05b112e | ||
|
a578c1a5e3 | ||
|
500aaed48e | ||
|
4a94da8a94 | ||
|
cc447c0fda | ||
|
0ae69bdcd9 | ||
|
5ba20a94e8 | ||
|
f168c377fd | ||
|
dfb754dd13 | ||
|
455050e19c | ||
|
317031f455 | ||
|
b132ce1944 | ||
|
8b226652aa | ||
|
2c7fe3ed8d | ||
|
3d5f2b3c28 | ||
|
7a79afe4a6 | ||
|
1f7387a39b | ||
|
0fc2bee144 | ||
|
791ae852a2 | ||
|
c2fcd876d7 | ||
|
d239d4a495 | ||
|
aec05ef602 | ||
|
e5d46d998b | ||
|
b2e3299539 | ||
|
c308a6459f | ||
|
4eb1bc08a7 | ||
|
ff5e1c635f | ||
|
6149c2fcb5 | ||
|
d7cd80dce5 | ||
|
6264508f5e | ||
|
a3869dd4c1 | ||
|
a3d2831f8c | ||
|
4cd1fa8c38 | ||
|
1511dc43d7 | ||
|
3d82807965 | ||
|
4180571660 | ||
|
421d9aa501 | ||
|
898f4971a2 | ||
|
7ab3331f01 | ||
|
b4ca414492 | ||
|
73abea088a | ||
|
2376dfc139 | ||
|
d2f95d5319 | ||
|
cd96843699 | ||
|
ca80bc33c6 | ||
|
19607886f7 | ||
|
3c11a91f77 | ||
|
b781fdbd04 | ||
|
765d901530 | ||
|
3cedbc493e | ||
|
0488d0a82f | ||
|
f0be595e4c | ||
|
55100854d6 | ||
|
600a1f8866 | ||
|
95bf68f3f5 | ||
|
bcdb058492 | ||
|
7f46aef624 | ||
|
e779496dfb | ||
|
3d77fa5fbc | ||
|
250830ade9 | ||
|
7b2eb7ccfc | ||
|
458c27c6e9 | ||
|
a49e664e63 | ||
|
f20380d6b4 | ||
|
05a5e551d6 | ||
|
d278b71cb2 | ||
|
a485c141d5 | ||
|
8a9f6b9ae3 | ||
|
7144090528 | ||
|
ee0015ac38 | ||
|
8b7f7f1088 | ||
|
c95c6a75f8 | ||
|
44bf79e35f | ||
|
bb654f286c | ||
|
1acd2aa8cf | ||
|
18d3659b91 | ||
|
63a4bafa72 | ||
|
4eb2e84c9f | ||
|
73c7fb87e8 | ||
|
c1496722aa | ||
|
d9f81b0c8c | ||
|
d69beaabe1 | ||
|
b7a0bd6347 | ||
|
882ea6b672 | ||
|
736d3eabae | ||
|
af53197c04 | ||
|
cf186c5762 | ||
|
f384a2ce85 | ||
|
803b76e997 | ||
|
230d7c3dd6 | ||
|
4f629dd982 | ||
|
4fdd891b54 | ||
|
64a892321a | ||
|
a80991f2b3 | ||
|
c9cd81319a | ||
|
521ae21632 | ||
|
bcd6606a16 | ||
|
52ebb88205 | ||
|
1e91d09be7 | ||
|
02c573986b | ||
|
f2de486658 | ||
|
900b4f2644 | ||
|
1cfaa9afb6 | ||
|
801468d70d | ||
|
0601e05978 | ||
|
7ce11b5d1c | ||
|
f2d4799491 | ||
|
ebc458cd32 | ||
|
43cd631579 | ||
|
bc824c1a6c | ||
|
4223aff840 | ||
|
f107c6c2ca | ||
|
7daf14caa7 | ||
|
ded28c705f | ||
|
778bec0777 | ||
|
6967cf7f86 | ||
|
0ee3ec86bd | ||
|
e4c47e8417 | ||
|
98ae80f4ed | ||
|
876c77d0bc | ||
|
d44a6f7541 | ||
|
9040c04d27 | ||
|
ebbdef0538 | ||
|
bfbee988d0 | ||
|
1d4d0272ca | ||
|
77a76f0783 | ||
|
d9079de262 | ||
|
b3d732a1a1 | ||
|
52f1a02938 | ||
|
fe51669e85 | ||
|
670a6c50c9 | ||
|
86c1aaf7d8 | ||
|
658e787b60 | ||
|
40c50aef50 | ||
|
a24c2bbe73 | ||
|
bdbe90b891 | ||
|
3236be7877 | ||
|
1dca17fdb4 | ||
|
785e971698 | ||
|
2bfa20ff85 | ||
|
474a9af78d | ||
|
61425eacb8 | ||
|
4870def1fb | ||
|
3e73fb9233 | ||
|
5ad6061c3f | ||
|
fae019b974 | ||
|
3bb06d8364 | ||
|
c9c9afa472 | ||
|
bd0671e123 | ||
|
6f3ec8d21f | ||
|
9a0bf13feb | ||
|
9ff1a6f0cd | ||
|
a59f64cae1 | ||
|
a4ecd09723 | ||
|
f159dfd15a | ||
|
9e8ec86fa3 | ||
|
62bb78f58d | ||
|
893011c3ba | ||
|
880cb8e7cc | ||
|
85f83f2c74 | ||
|
4751e459cc | ||
|
138efa6cec | ||
|
a68e50935e | ||
|
e8f5fb35ac | ||
|
6af27669b0 | ||
|
e162f24119 | ||
|
dbcc462a48 | ||
|
2d5313639a | ||
|
38af0f436d | ||
|
888c2ffb20 | ||
|
588593f619 | ||
|
2cdd515b12 | ||
|
0aad71d46e | ||
|
6f9285322d | ||
|
68c7f992fa | ||
|
1feff408ff | ||
|
f752e02487 | ||
|
c9c7fb0a27 | ||
|
de680c2a8e | ||
|
03695ba4c5 | ||
|
c2e2960bf7 | ||
|
385d2a580c | ||
|
7e02652068 | ||
|
ae29c9b4a0 | ||
|
078f917e61 | ||
|
b65f04d500 | ||
|
6acaffe581 | ||
|
e47ef42a33 | ||
|
b950e33d81 | ||
|
ec8cfc77ad | ||
|
00a16db9cd | ||
|
4b9f115586 | ||
|
c5cc91443e | ||
|
48d94143e7 | ||
|
8174a05156 | ||
|
63cf6363a2 | ||
|
cc6de605ac | ||
|
d0151d2b79 | ||
|
6b45d453b8 | ||
|
b992a84d67 | ||
|
cb362e9052 | ||
|
ccb478c1f6 | ||
|
6af3680f99 | ||
|
e6c3c215ab | ||
|
5c66bbde01 | ||
|
77dd1bdd4a | ||
|
6268d540a8 | ||
|
5918e38747 | ||
|
5eb80f8027 |
1
.gitattributes
vendored
1
.gitattributes
vendored
@@ -1 +0,0 @@
|
||||
*.a filter=lfs diff=lfs merge=lfs -text
|
1
.gitignore
vendored
1
.gitignore
vendored
@@ -1,3 +1,4 @@
|
||||
Cargo.lock
|
||||
/target/
|
||||
**/*.rs.bk
|
||||
.cargo
|
||||
|
14
.travis.yml
14
.travis.yml
@@ -20,3 +20,17 @@ script:
|
||||
after_success: |
|
||||
docker run -it --rm --security-opt seccomp=unconfined --volume "$PWD:/volume" elmtai/docker-rust-kcov
|
||||
bash <(curl -s https://codecov.io/bash) -s target/cov
|
||||
before_deploy:
|
||||
- cargo package
|
||||
deploy:
|
||||
provider: releases
|
||||
api-key:
|
||||
secure: j3cPAbOuGjXuSl+j+JL/4GWxD6dA0/f5NQ0Od4LBVewPmnKiqimGOJ1xj3eKth+ZzwuCpcHwBIIR54NEDSJgHaYDXiukc05qCeToIPqOc0wGJ+GcUrWAy8M7Wo981I/0SVYDAnLv4+ivvJxYE7b2Jr3pHsQAzH7ClY8g2xu9HlNkScEsc4cizA9Sf3zIqtIoi480vxtQ5ghGOUCkwZuG3+Dg+IGnnjvE4qQOYey1del+KIDkmbHjry7iFWPF6fWK2187JNt6XiO2/2tZt6BkMEmdRnkw1r/wL9tj0AbqLgyBjzlI4QQfkBwsuX3ZFeNGArn71s7WmAUGyVOl0DJXfwN/BEUxMTd+lkMjuMNUxaU/hxVZ7zAWH55KJK+qf6B95DLVWr7ypjfJLLBcds+JfkBNoReWLM1XoDUKAU+wBf1b+PKiywNfNascjZTcz6QGe94sa7l/T4PxtHDSREmflFgu1Hysg61WuODDwTTHGrsg9ZuvlINnqQhXsJo9r9+TMIGwwWHcvLQDNz2TPALCfcLtd+RsevdOeXItYa0KD3D4gKGv36bwAVDpCIoZnSeiaT/PUyjilFtJjBpKz9BbOKgOtQhHGrHucn0WOF+bu/t3SFaJKQf/W+hLwO3NV8yiL5LQyHVm/TPY62nBfne2KEqi/LOFxgKG35aACouP0ig=
|
||||
file: target/package/solana-$TRAVIS_TAG.crate
|
||||
skip_cleanup: true
|
||||
on:
|
||||
tags: true
|
||||
condition: "$TRAVIS_RUST_VERSION = stable"
|
||||
|
||||
after_deploy:
|
||||
- cargo publish --token "$CRATES_IO_TOKEN"
|
||||
|
22
Cargo.toml
22
Cargo.toml
@@ -1,25 +1,25 @@
|
||||
[package]
|
||||
name = "solana"
|
||||
description = "High Performance Blockchain"
|
||||
version = "0.5.0-beta"
|
||||
description = "The World's Fastest Blockchain"
|
||||
version = "0.6.0-alpha"
|
||||
documentation = "https://docs.rs/solana"
|
||||
homepage = "http://solana.io/"
|
||||
homepage = "http://solana.com/"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
authors = [
|
||||
"Anatoly Yakovenko <anatoly@solana.io>",
|
||||
"Greg Fitzgerald <greg@solana.io>",
|
||||
"Stephen Akridge <stephen@solana.io>",
|
||||
"Anatoly Yakovenko <anatoly@solana.com>",
|
||||
"Greg Fitzgerald <greg@solana.com>",
|
||||
"Stephen Akridge <stephen@solana.com>",
|
||||
]
|
||||
license = "Apache-2.0"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-historian-demo"
|
||||
path = "src/bin/historian-demo.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-client-demo"
|
||||
path = "src/bin/client-demo.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-multinode-demo"
|
||||
path = "src/bin/multinode-demo.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-testnode"
|
||||
path = "src/bin/testnode.rs"
|
||||
@@ -68,3 +68,5 @@ libc = "^0.2.1"
|
||||
getopts = "^0.2"
|
||||
isatty = "0.1"
|
||||
futures = "0.1"
|
||||
rand = "0.4.2"
|
||||
pnet = "^0.21.0"
|
||||
|
27
README.md
27
README.md
@@ -1,6 +1,6 @@
|
||||
[](https://crates.io/crates/solana)
|
||||
[](https://docs.rs/solana)
|
||||
[](https://travis-ci.org/solana-labs/solana)
|
||||
[](https://buildkite.com/solana-labs/solana)
|
||||
[](https://codecov.io/gh/solana-labs/solana)
|
||||
|
||||
Disclaimer
|
||||
@@ -102,6 +102,12 @@ $ source $HOME/.cargo/env
|
||||
$ rustup component add rustfmt-preview
|
||||
```
|
||||
|
||||
If your rustc version is lower than 1.25.0, please update it:
|
||||
|
||||
```bash
|
||||
$ rustup update
|
||||
```
|
||||
|
||||
Download the source code:
|
||||
|
||||
```bash
|
||||
@@ -118,6 +124,18 @@ Run the test suite:
|
||||
cargo test
|
||||
```
|
||||
|
||||
Debugging
|
||||
---
|
||||
|
||||
There are some useful debug messages in the code, you can enable them on a per-module and per-level
|
||||
basis with the normal RUST\_LOG environment variable. Run the testnode with this syntax:
|
||||
```bash
|
||||
$ RUST_LOG=solana::streamer=debug,solana::accountant_skel=info cat genesis.log | ./target/release/solana-testnode > transactions0.log
|
||||
```
|
||||
to see the debug and info sections for streamer and accountant\_skel respectively. Generally
|
||||
we are using debug for infrequent debug messages, trace for potentially frequent messages and
|
||||
info for performance-related logging.
|
||||
|
||||
Benchmarking
|
||||
---
|
||||
|
||||
@@ -133,6 +151,13 @@ Run the benchmarks:
|
||||
$ cargo +nightly bench --features="unstable"
|
||||
```
|
||||
|
||||
To run the benchmarks on Linux with GPU optimizations enabled:
|
||||
|
||||
```bash
|
||||
$ wget https://solana-build-artifacts.s3.amazonaws.com/v0.5.0/libcuda_verify_ed25519.a
|
||||
$ cargo +nightly bench --features="unstable,cuda"
|
||||
```
|
||||
|
||||
Code coverage
|
||||
---
|
||||
|
||||
|
1
_config.yml
Normal file
1
_config.yml
Normal file
@@ -0,0 +1 @@
|
||||
theme: jekyll-theme-slate
|
37
ci/buildkite.yml
Normal file
37
ci/buildkite.yml
Normal file
@@ -0,0 +1,37 @@
|
||||
steps:
|
||||
- command: "ci/coverage.sh || true"
|
||||
label: "coverage"
|
||||
# TODO: Run coverage in a docker image rather than assuming kcov/cargo-kcov
|
||||
# is installed on the build agent...
|
||||
#plugins:
|
||||
# docker#v1.1.1:
|
||||
# image: "rust"
|
||||
# user: "998:997" # buildkite-agent:buildkite-agent
|
||||
# environment:
|
||||
# - CODECOV_TOKEN=$CODECOV_TOKEN
|
||||
- command: "ci/test-stable.sh"
|
||||
label: "stable [public]"
|
||||
plugins:
|
||||
docker#v1.1.1:
|
||||
image: "rust"
|
||||
user: "998:997" # buildkite-agent:buildkite-agent
|
||||
- command: "ci/test-nightly.sh || true"
|
||||
label: "nightly - FAILURES IGNORED [public]"
|
||||
plugins:
|
||||
docker#v1.1.1:
|
||||
image: "rustlang/rust:nightly"
|
||||
user: "998:997" # buildkite-agent:buildkite-agent
|
||||
- command: "ci/test-ignored.sh || true"
|
||||
label: "ignored - FAILURES IGNORED [public]"
|
||||
- command: "ci/test-cuda.sh"
|
||||
label: "cuda"
|
||||
- wait
|
||||
- command: "ci/publish.sh"
|
||||
label: "publish release artifacts"
|
||||
plugins:
|
||||
docker#v1.1.1:
|
||||
image: "rust"
|
||||
user: "998:997" # buildkite-agent:buildkite-agent
|
||||
environment:
|
||||
- BUILDKITE_TAG=$BUILDKITE_TAG
|
||||
- CRATES_IO_TOKEN=$CRATES_IO_TOKEN
|
25
ci/coverage.sh
Executable file
25
ci/coverage.sh
Executable file
@@ -0,0 +1,25 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd $(dirname $0)/..
|
||||
|
||||
if [[ -r ~/.cargo/env ]]; then
|
||||
# Pick up local install of kcov/cargo-kcov
|
||||
source ~/.cargo/env
|
||||
fi
|
||||
|
||||
rustc --version
|
||||
cargo --version
|
||||
kcov --version
|
||||
cargo-kcov --version
|
||||
|
||||
export RUST_BACKTRACE=1
|
||||
cargo build
|
||||
cargo kcov
|
||||
|
||||
if [[ -z "$CODECOV_TOKEN" ]]; then
|
||||
echo CODECOV_TOKEN undefined
|
||||
exit 1
|
||||
fi
|
||||
|
||||
bash <(curl -s https://codecov.io/bash)
|
||||
exit 0
|
19
ci/publish.sh
Executable file
19
ci/publish.sh
Executable file
@@ -0,0 +1,19 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd $(dirname $0)/..
|
||||
|
||||
if [[ -z "$BUILDKITE_TAG" ]]; then
|
||||
# Skip publish if this is not a tagged release
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [[ -z "$CRATES_IO_TOKEN" ]]; then
|
||||
echo CRATES_IO_TOKEN undefined
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cargo package
|
||||
# TODO: Ensure the published version matches the contents of BUILDKITE_TAG
|
||||
cargo publish --token "$CRATES_IO_TOKEN"
|
||||
|
||||
exit 0
|
17
ci/test-cuda.sh
Executable file
17
ci/test-cuda.sh
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd $(dirname $0)/..
|
||||
|
||||
if [[ -z "$libcuda_verify_ed25519_URL" ]]; then
|
||||
echo libcuda_verify_ed25519_URL undefined
|
||||
exit 1
|
||||
fi
|
||||
|
||||
export LD_LIBRARY_PATH=/usr/local/cuda/lib64
|
||||
export PATH=$PATH:/usr/local/cuda/bin
|
||||
curl -X GET -o libcuda_verify_ed25519.a "$libcuda_verify_ed25519_URL"
|
||||
|
||||
source $HOME/.cargo/env
|
||||
cargo test --features=cuda
|
||||
|
||||
exit 0
|
9
ci/test-ignored.sh
Executable file
9
ci/test-ignored.sh
Executable file
@@ -0,0 +1,9 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd $(dirname $0)/..
|
||||
|
||||
rustc --version
|
||||
cargo --version
|
||||
|
||||
export RUST_BACKTRACE=1
|
||||
cargo test -- --ignored
|
13
ci/test-nightly.sh
Executable file
13
ci/test-nightly.sh
Executable file
@@ -0,0 +1,13 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd $(dirname $0)/..
|
||||
|
||||
rustc --version
|
||||
cargo --version
|
||||
|
||||
rustup component add rustfmt-preview
|
||||
cargo fmt -- --write-mode=diff
|
||||
cargo build --verbose --features unstable
|
||||
cargo test --verbose --features unstable
|
||||
|
||||
exit 0
|
13
ci/test-stable.sh
Executable file
13
ci/test-stable.sh
Executable file
@@ -0,0 +1,13 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd $(dirname $0)/..
|
||||
|
||||
rustc --version
|
||||
cargo --version
|
||||
|
||||
rustup component add rustfmt-preview
|
||||
cargo fmt -- --write-mode=diff
|
||||
cargo build --verbose
|
||||
cargo test --verbose
|
||||
|
||||
exit 0
|
7
multinode-demo/client.sh
Executable file
7
multinode-demo/client.sh
Executable file
@@ -0,0 +1,7 @@
|
||||
#!/bin/bash
|
||||
cd /home/ubuntu/solana
|
||||
#git pull
|
||||
export RUST_LOG=solana::crdt=trace
|
||||
# scp ubuntu@18.206.1.146:~/solana/leader.json .
|
||||
# scp ubuntu@18.206.1.146:~/solana/mint-demo.json .
|
||||
cat mint-demo.json | cargo run --release --bin solana-multinode-demo -- -l leader.json -c 10.0.5.179:8100 -n 3
|
6
multinode-demo/leader.sh
Executable file
6
multinode-demo/leader.sh
Executable file
@@ -0,0 +1,6 @@
|
||||
#!/bin/bash
|
||||
cd /home/ubuntu/solana
|
||||
git pull
|
||||
export RUST_LOG=solana=info
|
||||
cat genesis.log | cargo run --release --features cuda --bin solana-testnode -- -s leader.json -b 8000 -d | grep INFO
|
||||
#cat genesis.log | cargo run --release --bin solana-testnode -- -s leader.json -b 8000 -d
|
10
multinode-demo/validator.sh
Executable file
10
multinode-demo/validator.sh
Executable file
@@ -0,0 +1,10 @@
|
||||
#!/bin/bash
|
||||
cd /home/ubuntu/solana
|
||||
git pull
|
||||
scp ubuntu@18.206.1.146:~/solana/mint-demo.json .
|
||||
scp ubuntu@18.206.1.146:~/solana/leader.json .
|
||||
scp ubuntu@18.206.1.146:~/solana/genesis.log .
|
||||
scp ubuntu@18.206.1.146:~/solana/libcuda_verify_ed25519.a .
|
||||
export RUST_LOG=solana=info
|
||||
cat genesis.log | cargo run --release --features cuda --bin solana-testnode -- -s replicator.json -v leader.json -b 9000 -d 2>&1 | tee validator.log
|
||||
|
@@ -1,6 +0,0 @@
|
||||
#!/bin/bash
|
||||
export LD_LIBRARY_PATH=/usr/local/cuda/lib64
|
||||
source $HOME/.cargo/env
|
||||
export PATH=$PATH:/usr/local/cuda/bin
|
||||
cp /tmp/libcuda_verify_ed25519.a .
|
||||
cargo test --features=cuda
|
@@ -1,526 +0,0 @@
|
||||
//! The `accountant` module tracks client balances, and the progress of pending
|
||||
//! transactions. It offers a high-level public API that signs transactions
|
||||
//! on behalf of the caller, and a private low-level API for when they have
|
||||
//! already been signed and verified.
|
||||
|
||||
extern crate libc;
|
||||
|
||||
use chrono::prelude::*;
|
||||
use event::Event;
|
||||
use hash::Hash;
|
||||
use mint::Mint;
|
||||
use plan::{Payment, Plan, Witness};
|
||||
use rayon::prelude::*;
|
||||
use signature::{KeyPair, PublicKey, Signature};
|
||||
use std::collections::hash_map::Entry::Occupied;
|
||||
use std::collections::{HashMap, HashSet, VecDeque};
|
||||
use std::result;
|
||||
use std::sync::RwLock;
|
||||
use transaction::Transaction;
|
||||
|
||||
pub const MAX_ENTRY_IDS: usize = 1024 * 4;
|
||||
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub enum AccountingError {
|
||||
AccountNotFound,
|
||||
InsufficientFunds,
|
||||
InvalidTransferSignature,
|
||||
}
|
||||
|
||||
pub type Result<T> = result::Result<T, AccountingError>;
|
||||
|
||||
/// Commit funds to the 'to' party.
|
||||
fn apply_payment(balances: &RwLock<HashMap<PublicKey, RwLock<i64>>>, payment: &Payment) {
|
||||
if balances.read().unwrap().contains_key(&payment.to) {
|
||||
let bals = balances.read().unwrap();
|
||||
*bals[&payment.to].write().unwrap() += payment.tokens;
|
||||
} else {
|
||||
let mut bals = balances.write().unwrap();
|
||||
bals.insert(payment.to, RwLock::new(payment.tokens));
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Accountant {
|
||||
balances: RwLock<HashMap<PublicKey, RwLock<i64>>>,
|
||||
pending: RwLock<HashMap<Signature, Plan>>,
|
||||
last_ids: RwLock<VecDeque<(Hash, RwLock<HashSet<Signature>>)>>,
|
||||
time_sources: RwLock<HashSet<PublicKey>>,
|
||||
last_time: RwLock<DateTime<Utc>>,
|
||||
}
|
||||
|
||||
impl Accountant {
|
||||
/// Create an Accountant using a deposit.
|
||||
pub fn new_from_deposit(deposit: &Payment) -> Self {
|
||||
let balances = RwLock::new(HashMap::new());
|
||||
apply_payment(&balances, deposit);
|
||||
Accountant {
|
||||
balances,
|
||||
pending: RwLock::new(HashMap::new()),
|
||||
last_ids: RwLock::new(VecDeque::new()),
|
||||
time_sources: RwLock::new(HashSet::new()),
|
||||
last_time: RwLock::new(Utc.timestamp(0, 0)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create an Accountant with only a Mint. Typically used by unit tests.
|
||||
pub fn new(mint: &Mint) -> Self {
|
||||
let deposit = Payment {
|
||||
to: mint.pubkey(),
|
||||
tokens: mint.tokens,
|
||||
};
|
||||
let acc = Self::new_from_deposit(&deposit);
|
||||
acc.register_entry_id(&mint.last_id());
|
||||
acc
|
||||
}
|
||||
|
||||
fn reserve_signature(signatures: &RwLock<HashSet<Signature>>, sig: &Signature) -> bool {
|
||||
if signatures.read().unwrap().contains(sig) {
|
||||
return false;
|
||||
}
|
||||
signatures.write().unwrap().insert(*sig);
|
||||
true
|
||||
}
|
||||
|
||||
fn forget_signature(signatures: &RwLock<HashSet<Signature>>, sig: &Signature) -> bool {
|
||||
signatures.write().unwrap().remove(sig)
|
||||
}
|
||||
|
||||
fn forget_signature_with_last_id(&self, sig: &Signature, last_id: &Hash) -> bool {
|
||||
if let Some(entry) = self.last_ids
|
||||
.read()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.rev()
|
||||
.find(|x| x.0 == *last_id)
|
||||
{
|
||||
return Self::forget_signature(&entry.1, sig);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
fn reserve_signature_with_last_id(&self, sig: &Signature, last_id: &Hash) -> bool {
|
||||
if let Some(entry) = self.last_ids
|
||||
.read()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.rev()
|
||||
.find(|x| x.0 == *last_id)
|
||||
{
|
||||
return Self::reserve_signature(&entry.1, sig);
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
/// Tell the accountant which Entry IDs exist on the ledger. This function
|
||||
/// assumes subsequent calls correspond to later entries, and will boot
|
||||
/// the oldest ones once its internal cache is full. Once boot, the
|
||||
/// accountant will reject transactions using that `last_id`.
|
||||
pub fn register_entry_id(&self, last_id: &Hash) {
|
||||
let mut last_ids = self.last_ids.write().unwrap();
|
||||
if last_ids.len() >= MAX_ENTRY_IDS {
|
||||
last_ids.pop_front();
|
||||
}
|
||||
last_ids.push_back((*last_id, RwLock::new(HashSet::new())));
|
||||
}
|
||||
|
||||
/// Deduct tokens from the 'from' address the account has sufficient
|
||||
/// funds and isn't a duplicate.
|
||||
pub fn process_verified_transaction_debits(&self, tr: &Transaction) -> Result<()> {
|
||||
let bals = self.balances.read().unwrap();
|
||||
|
||||
// Hold a write lock before the condition check, so that a debit can't occur
|
||||
// between checking the balance and the withdraw.
|
||||
let option = bals.get(&tr.from);
|
||||
if option.is_none() {
|
||||
return Err(AccountingError::AccountNotFound);
|
||||
}
|
||||
let mut bal = option.unwrap().write().unwrap();
|
||||
|
||||
if !self.reserve_signature_with_last_id(&tr.sig, &tr.data.last_id) {
|
||||
return Err(AccountingError::InvalidTransferSignature);
|
||||
}
|
||||
|
||||
if *bal < tr.data.tokens {
|
||||
self.forget_signature_with_last_id(&tr.sig, &tr.data.last_id);
|
||||
return Err(AccountingError::InsufficientFunds);
|
||||
}
|
||||
|
||||
*bal -= tr.data.tokens;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn process_verified_transaction_credits(&self, tr: &Transaction) {
|
||||
let mut plan = tr.data.plan.clone();
|
||||
plan.apply_witness(&Witness::Timestamp(*self.last_time.read().unwrap()));
|
||||
|
||||
if let Some(ref payment) = plan.final_payment() {
|
||||
apply_payment(&self.balances, payment);
|
||||
} else {
|
||||
let mut pending = self.pending.write().unwrap();
|
||||
pending.insert(tr.sig, plan);
|
||||
}
|
||||
}
|
||||
|
||||
/// Process a Transaction that has already been verified.
|
||||
pub fn process_verified_transaction(&self, tr: &Transaction) -> Result<()> {
|
||||
self.process_verified_transaction_debits(tr)?;
|
||||
self.process_verified_transaction_credits(tr);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Process a batch of verified transactions.
|
||||
pub fn process_verified_transactions(&self, trs: Vec<Transaction>) -> Vec<Result<Transaction>> {
|
||||
// Run all debits first to filter out any transactions that can't be processed
|
||||
// in parallel deterministically.
|
||||
let results: Vec<_> = trs.into_par_iter()
|
||||
.map(|tr| self.process_verified_transaction_debits(&tr).map(|_| tr))
|
||||
.collect(); // Calling collect() here forces all debits to complete before moving on.
|
||||
|
||||
results
|
||||
.into_par_iter()
|
||||
.map(|result| {
|
||||
result.map(|tr| {
|
||||
self.process_verified_transaction_credits(&tr);
|
||||
tr
|
||||
})
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn partition_events(events: Vec<Event>) -> (Vec<Transaction>, Vec<Event>) {
|
||||
let mut trs = vec![];
|
||||
let mut rest = vec![];
|
||||
for event in events {
|
||||
match event {
|
||||
Event::Transaction(tr) => trs.push(tr),
|
||||
_ => rest.push(event),
|
||||
}
|
||||
}
|
||||
(trs, rest)
|
||||
}
|
||||
|
||||
pub fn process_verified_events(&self, events: Vec<Event>) -> Result<()> {
|
||||
let (trs, rest) = Self::partition_events(events);
|
||||
self.process_verified_transactions(trs);
|
||||
for event in rest {
|
||||
self.process_verified_event(&event)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Process a Witness Signature that has already been verified.
|
||||
fn process_verified_sig(&self, from: PublicKey, tx_sig: Signature) -> Result<()> {
|
||||
if let Occupied(mut e) = self.pending.write().unwrap().entry(tx_sig) {
|
||||
e.get_mut().apply_witness(&Witness::Signature(from));
|
||||
if let Some(ref payment) = e.get().final_payment() {
|
||||
apply_payment(&self.balances, payment);
|
||||
e.remove_entry();
|
||||
}
|
||||
};
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Process a Witness Timestamp that has already been verified.
|
||||
fn process_verified_timestamp(&self, from: PublicKey, dt: DateTime<Utc>) -> Result<()> {
|
||||
// If this is the first timestamp we've seen, it probably came from the genesis block,
|
||||
// so we'll trust it.
|
||||
if *self.last_time.read().unwrap() == Utc.timestamp(0, 0) {
|
||||
self.time_sources.write().unwrap().insert(from);
|
||||
}
|
||||
|
||||
if self.time_sources.read().unwrap().contains(&from) {
|
||||
if dt > *self.last_time.read().unwrap() {
|
||||
*self.last_time.write().unwrap() = dt;
|
||||
}
|
||||
} else {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Check to see if any timelocked transactions can be completed.
|
||||
let mut completed = vec![];
|
||||
|
||||
// Hold 'pending' write lock until the end of this function. Otherwise another thread can
|
||||
// double-spend if it enters before the modified plan is removed from 'pending'.
|
||||
let mut pending = self.pending.write().unwrap();
|
||||
for (key, plan) in pending.iter_mut() {
|
||||
plan.apply_witness(&Witness::Timestamp(*self.last_time.read().unwrap()));
|
||||
if let Some(ref payment) = plan.final_payment() {
|
||||
apply_payment(&self.balances, payment);
|
||||
completed.push(key.clone());
|
||||
}
|
||||
}
|
||||
|
||||
for key in completed {
|
||||
pending.remove(&key);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Process an Transaction or Witness that has already been verified.
|
||||
pub fn process_verified_event(&self, event: &Event) -> Result<()> {
|
||||
match *event {
|
||||
Event::Transaction(ref tr) => self.process_verified_transaction(tr),
|
||||
Event::Signature { from, tx_sig, .. } => self.process_verified_sig(from, tx_sig),
|
||||
Event::Timestamp { from, dt, .. } => self.process_verified_timestamp(from, dt),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create, sign, and process a Transaction from `keypair` to `to` of
|
||||
/// `n` tokens where `last_id` is the last Entry ID observed by the client.
|
||||
pub fn transfer(
|
||||
&self,
|
||||
n: i64,
|
||||
keypair: &KeyPair,
|
||||
to: PublicKey,
|
||||
last_id: Hash,
|
||||
) -> Result<Signature> {
|
||||
let tr = Transaction::new(keypair, to, n, last_id);
|
||||
let sig = tr.sig;
|
||||
self.process_verified_transaction(&tr).map(|_| sig)
|
||||
}
|
||||
|
||||
/// Create, sign, and process a postdated Transaction from `keypair`
|
||||
/// to `to` of `n` tokens on `dt` where `last_id` is the last Entry ID
|
||||
/// observed by the client.
|
||||
pub fn transfer_on_date(
|
||||
&self,
|
||||
n: i64,
|
||||
keypair: &KeyPair,
|
||||
to: PublicKey,
|
||||
dt: DateTime<Utc>,
|
||||
last_id: Hash,
|
||||
) -> Result<Signature> {
|
||||
let tr = Transaction::new_on_date(keypair, to, dt, n, last_id);
|
||||
let sig = tr.sig;
|
||||
self.process_verified_transaction(&tr).map(|_| sig)
|
||||
}
|
||||
|
||||
pub fn get_balance(&self, pubkey: &PublicKey) -> Option<i64> {
|
||||
let bals = self.balances.read().unwrap();
|
||||
bals.get(pubkey).map(|x| *x.read().unwrap())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use bincode::serialize;
|
||||
use hash::hash;
|
||||
use signature::KeyPairUtil;
|
||||
|
||||
#[test]
|
||||
fn test_accountant() {
|
||||
let alice = Mint::new(10_000);
|
||||
let bob_pubkey = KeyPair::new().pubkey();
|
||||
let acc = Accountant::new(&alice);
|
||||
acc.transfer(1_000, &alice.keypair(), bob_pubkey, alice.last_id())
|
||||
.unwrap();
|
||||
assert_eq!(acc.get_balance(&bob_pubkey).unwrap(), 1_000);
|
||||
|
||||
acc.transfer(500, &alice.keypair(), bob_pubkey, alice.last_id())
|
||||
.unwrap();
|
||||
assert_eq!(acc.get_balance(&bob_pubkey).unwrap(), 1_500);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_account_not_found() {
|
||||
let mint = Mint::new(1);
|
||||
let acc = Accountant::new(&mint);
|
||||
assert_eq!(
|
||||
acc.transfer(1, &KeyPair::new(), mint.pubkey(), mint.last_id()),
|
||||
Err(AccountingError::AccountNotFound)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_transfer() {
|
||||
let alice = Mint::new(11_000);
|
||||
let acc = Accountant::new(&alice);
|
||||
let bob_pubkey = KeyPair::new().pubkey();
|
||||
acc.transfer(1_000, &alice.keypair(), bob_pubkey, alice.last_id())
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
acc.transfer(10_001, &alice.keypair(), bob_pubkey, alice.last_id()),
|
||||
Err(AccountingError::InsufficientFunds)
|
||||
);
|
||||
|
||||
let alice_pubkey = alice.keypair().pubkey();
|
||||
assert_eq!(acc.get_balance(&alice_pubkey).unwrap(), 10_000);
|
||||
assert_eq!(acc.get_balance(&bob_pubkey).unwrap(), 1_000);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_transfer_to_newb() {
|
||||
let alice = Mint::new(10_000);
|
||||
let acc = Accountant::new(&alice);
|
||||
let alice_keypair = alice.keypair();
|
||||
let bob_pubkey = KeyPair::new().pubkey();
|
||||
acc.transfer(500, &alice_keypair, bob_pubkey, alice.last_id())
|
||||
.unwrap();
|
||||
assert_eq!(acc.get_balance(&bob_pubkey).unwrap(), 500);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_transfer_on_date() {
|
||||
let alice = Mint::new(1);
|
||||
let acc = Accountant::new(&alice);
|
||||
let alice_keypair = alice.keypair();
|
||||
let bob_pubkey = KeyPair::new().pubkey();
|
||||
let dt = Utc::now();
|
||||
acc.transfer_on_date(1, &alice_keypair, bob_pubkey, dt, alice.last_id())
|
||||
.unwrap();
|
||||
|
||||
// Alice's balance will be zero because all funds are locked up.
|
||||
assert_eq!(acc.get_balance(&alice.pubkey()), Some(0));
|
||||
|
||||
// Bob's balance will be None because the funds have not been
|
||||
// sent.
|
||||
assert_eq!(acc.get_balance(&bob_pubkey), None);
|
||||
|
||||
// Now, acknowledge the time in the condition occurred and
|
||||
// that bob's funds are now available.
|
||||
acc.process_verified_timestamp(alice.pubkey(), dt).unwrap();
|
||||
assert_eq!(acc.get_balance(&bob_pubkey), Some(1));
|
||||
|
||||
acc.process_verified_timestamp(alice.pubkey(), dt).unwrap(); // <-- Attack! Attempt to process completed transaction.
|
||||
assert_ne!(acc.get_balance(&bob_pubkey), Some(2));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_transfer_after_date() {
|
||||
let alice = Mint::new(1);
|
||||
let acc = Accountant::new(&alice);
|
||||
let alice_keypair = alice.keypair();
|
||||
let bob_pubkey = KeyPair::new().pubkey();
|
||||
let dt = Utc::now();
|
||||
acc.process_verified_timestamp(alice.pubkey(), dt).unwrap();
|
||||
|
||||
// It's now past now, so this transfer should be processed immediately.
|
||||
acc.transfer_on_date(1, &alice_keypair, bob_pubkey, dt, alice.last_id())
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(acc.get_balance(&alice.pubkey()), Some(0));
|
||||
assert_eq!(acc.get_balance(&bob_pubkey), Some(1));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cancel_transfer() {
|
||||
let alice = Mint::new(1);
|
||||
let acc = Accountant::new(&alice);
|
||||
let alice_keypair = alice.keypair();
|
||||
let bob_pubkey = KeyPair::new().pubkey();
|
||||
let dt = Utc::now();
|
||||
let sig = acc.transfer_on_date(1, &alice_keypair, bob_pubkey, dt, alice.last_id())
|
||||
.unwrap();
|
||||
|
||||
// Alice's balance will be zero because all funds are locked up.
|
||||
assert_eq!(acc.get_balance(&alice.pubkey()), Some(0));
|
||||
|
||||
// Bob's balance will be None because the funds have not been
|
||||
// sent.
|
||||
assert_eq!(acc.get_balance(&bob_pubkey), None);
|
||||
|
||||
// Now, cancel the trancaction. Alice gets her funds back, Bob never sees them.
|
||||
acc.process_verified_sig(alice.pubkey(), sig).unwrap();
|
||||
assert_eq!(acc.get_balance(&alice.pubkey()), Some(1));
|
||||
assert_eq!(acc.get_balance(&bob_pubkey), None);
|
||||
|
||||
acc.process_verified_sig(alice.pubkey(), sig).unwrap(); // <-- Attack! Attempt to cancel completed transaction.
|
||||
assert_ne!(acc.get_balance(&alice.pubkey()), Some(2));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_duplicate_event_signature() {
|
||||
let alice = Mint::new(1);
|
||||
let acc = Accountant::new(&alice);
|
||||
let sig = Signature::default();
|
||||
assert!(acc.reserve_signature_with_last_id(&sig, &alice.last_id()));
|
||||
assert!(!acc.reserve_signature_with_last_id(&sig, &alice.last_id()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_forget_signature() {
|
||||
let alice = Mint::new(1);
|
||||
let acc = Accountant::new(&alice);
|
||||
let sig = Signature::default();
|
||||
acc.reserve_signature_with_last_id(&sig, &alice.last_id());
|
||||
assert!(acc.forget_signature_with_last_id(&sig, &alice.last_id()));
|
||||
assert!(!acc.forget_signature_with_last_id(&sig, &alice.last_id()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_max_entry_ids() {
|
||||
let alice = Mint::new(1);
|
||||
let acc = Accountant::new(&alice);
|
||||
let sig = Signature::default();
|
||||
for i in 0..MAX_ENTRY_IDS {
|
||||
let last_id = hash(&serialize(&i).unwrap()); // Unique hash
|
||||
acc.register_entry_id(&last_id);
|
||||
}
|
||||
// Assert we're no longer able to use the oldest entry ID.
|
||||
assert!(!acc.reserve_signature_with_last_id(&sig, &alice.last_id()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_debits_before_credits() {
|
||||
let mint = Mint::new(2);
|
||||
let acc = Accountant::new(&mint);
|
||||
let alice = KeyPair::new();
|
||||
let tr0 = Transaction::new(&mint.keypair(), alice.pubkey(), 2, mint.last_id());
|
||||
let tr1 = Transaction::new(&alice, mint.pubkey(), 1, mint.last_id());
|
||||
let trs = vec![tr0, tr1];
|
||||
assert!(acc.process_verified_transactions(trs)[1].is_err());
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "unstable", test))]
|
||||
mod bench {
|
||||
extern crate test;
|
||||
use self::test::Bencher;
|
||||
use accountant::*;
|
||||
use bincode::serialize;
|
||||
use hash::hash;
|
||||
use signature::KeyPairUtil;
|
||||
|
||||
#[bench]
|
||||
fn process_verified_event_bench(bencher: &mut Bencher) {
|
||||
let mint = Mint::new(100_000_000);
|
||||
let acc = Accountant::new(&mint);
|
||||
// Create transactions between unrelated parties.
|
||||
let transactions: Vec<_> = (0..4096)
|
||||
.into_par_iter()
|
||||
.map(|i| {
|
||||
// Seed the 'from' account.
|
||||
let rando0 = KeyPair::new();
|
||||
let tr = Transaction::new(&mint.keypair(), rando0.pubkey(), 1_000, mint.last_id());
|
||||
acc.process_verified_transaction(&tr).unwrap();
|
||||
|
||||
// Seed the 'to' account and a cell for its signature.
|
||||
let last_id = hash(&serialize(&i).unwrap()); // Unique hash
|
||||
acc.register_entry_id(&last_id);
|
||||
|
||||
let rando1 = KeyPair::new();
|
||||
let tr = Transaction::new(&rando0, rando1.pubkey(), 1, last_id);
|
||||
acc.process_verified_transaction(&tr).unwrap();
|
||||
|
||||
// Finally, return a transaction that's unique
|
||||
Transaction::new(&rando0, rando1.pubkey(), 1, last_id)
|
||||
})
|
||||
.collect();
|
||||
bencher.iter(|| {
|
||||
// Since benchmarker runs this multiple times, we need to clear the signatures.
|
||||
for sigs in acc.last_ids.read().unwrap().iter() {
|
||||
sigs.1.write().unwrap().clear();
|
||||
}
|
||||
|
||||
assert!(
|
||||
acc.process_verified_transactions(transactions.clone())
|
||||
.iter()
|
||||
.all(|x| x.is_ok())
|
||||
);
|
||||
});
|
||||
}
|
||||
}
|
@@ -1,810 +0,0 @@
|
||||
//! The `accountant_skel` module is a microservice that exposes the high-level
|
||||
//! Accountant API to the network. Its message encoding is currently
|
||||
//! in flux. Clients should use AccountantStub to interact with it.
|
||||
|
||||
use accountant::Accountant;
|
||||
use bincode::{deserialize, serialize};
|
||||
use ecdsa;
|
||||
use entry::Entry;
|
||||
use event::Event;
|
||||
use hash::Hash;
|
||||
use historian::Historian;
|
||||
use packet;
|
||||
use packet::SharedPackets;
|
||||
use rayon::prelude::*;
|
||||
use recorder::Signal;
|
||||
use result::Result;
|
||||
use serde_json;
|
||||
use signature::PublicKey;
|
||||
use std::cmp::max;
|
||||
use std::collections::VecDeque;
|
||||
use std::io::Write;
|
||||
use std::net::{SocketAddr, UdpSocket};
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::{channel, Receiver, Sender};
|
||||
use std::sync::{Arc, Mutex, RwLock};
|
||||
use std::thread::{spawn, JoinHandle};
|
||||
use std::time::Duration;
|
||||
use streamer;
|
||||
use transaction::Transaction;
|
||||
|
||||
use subscribers;
|
||||
|
||||
pub struct AccountantSkel<W: Write + Send + 'static> {
|
||||
acc: Accountant,
|
||||
last_id: Hash,
|
||||
writer: W,
|
||||
historian: Historian,
|
||||
entry_info_subscribers: Vec<SocketAddr>,
|
||||
}
|
||||
|
||||
#[cfg_attr(feature = "cargo-clippy", allow(large_enum_variant))]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub enum Request {
|
||||
Transaction(Transaction),
|
||||
GetBalance { key: PublicKey },
|
||||
GetLastId,
|
||||
Subscribe { subscriptions: Vec<Subscription> },
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub enum Subscription {
|
||||
EntryInfo,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct EntryInfo {
|
||||
pub id: Hash,
|
||||
pub num_hashes: u64,
|
||||
pub num_events: u64,
|
||||
}
|
||||
|
||||
impl Request {
|
||||
/// Verify the request is valid.
|
||||
pub fn verify(&self) -> bool {
|
||||
match *self {
|
||||
Request::Transaction(ref tr) => tr.verify_plan(),
|
||||
_ => true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub enum Response {
|
||||
Balance { key: PublicKey, val: Option<i64> },
|
||||
EntryInfo(EntryInfo),
|
||||
LastId { id: Hash },
|
||||
}
|
||||
|
||||
impl<W: Write + Send + 'static> AccountantSkel<W> {
|
||||
/// Create a new AccountantSkel that wraps the given Accountant.
|
||||
pub fn new(acc: Accountant, last_id: Hash, writer: W, historian: Historian) -> Self {
|
||||
AccountantSkel {
|
||||
acc,
|
||||
last_id,
|
||||
writer,
|
||||
historian,
|
||||
entry_info_subscribers: vec![],
|
||||
}
|
||||
}
|
||||
|
||||
fn notify_entry_info_subscribers(&mut self, entry: &Entry) {
|
||||
// TODO: No need to bind().
|
||||
let socket = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||
|
||||
for addr in &self.entry_info_subscribers {
|
||||
let entry_info = EntryInfo {
|
||||
id: entry.id,
|
||||
num_hashes: entry.num_hashes,
|
||||
num_events: entry.events.len() as u64,
|
||||
};
|
||||
let data = serialize(&Response::EntryInfo(entry_info)).expect("serialize EntryInfo");
|
||||
let _res = socket.send_to(&data, addr);
|
||||
}
|
||||
}
|
||||
|
||||
/// Process any Entry items that have been published by the Historian.
|
||||
pub fn sync(&mut self) -> Hash {
|
||||
while let Ok(entry) = self.historian.receiver.try_recv() {
|
||||
self.last_id = entry.id;
|
||||
self.acc.register_entry_id(&self.last_id);
|
||||
writeln!(self.writer, "{}", serde_json::to_string(&entry).unwrap()).unwrap();
|
||||
self.notify_entry_info_subscribers(&entry);
|
||||
}
|
||||
self.last_id
|
||||
}
|
||||
|
||||
/// Process Request items sent by clients.
|
||||
pub fn process_request(
|
||||
&mut self,
|
||||
msg: Request,
|
||||
rsp_addr: SocketAddr,
|
||||
) -> Option<(Response, SocketAddr)> {
|
||||
match msg {
|
||||
Request::GetBalance { key } => {
|
||||
let val = self.acc.get_balance(&key);
|
||||
Some((Response::Balance { key, val }, rsp_addr))
|
||||
}
|
||||
Request::GetLastId => Some((Response::LastId { id: self.sync() }, rsp_addr)),
|
||||
Request::Transaction(_) => unreachable!(),
|
||||
Request::Subscribe { subscriptions } => {
|
||||
for subscription in subscriptions {
|
||||
match subscription {
|
||||
Subscription::EntryInfo => self.entry_info_subscribers.push(rsp_addr),
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn recv_batch(recvr: &streamer::PacketReceiver) -> Result<Vec<SharedPackets>> {
|
||||
let timer = Duration::new(1, 0);
|
||||
let msgs = recvr.recv_timeout(timer)?;
|
||||
trace!("got msgs");
|
||||
let mut batch = vec![msgs];
|
||||
while let Ok(more) = recvr.try_recv() {
|
||||
trace!("got more msgs");
|
||||
batch.push(more);
|
||||
}
|
||||
info!("batch len {}", batch.len());
|
||||
Ok(batch)
|
||||
}
|
||||
|
||||
fn verify_batch(batch: Vec<SharedPackets>) -> Vec<Vec<(SharedPackets, Vec<u8>)>> {
|
||||
let chunk_size = max(1, (batch.len() + 3) / 4);
|
||||
let batches: Vec<_> = batch.chunks(chunk_size).map(|x| x.to_vec()).collect();
|
||||
batches
|
||||
.into_par_iter()
|
||||
.map(|batch| {
|
||||
let r = ecdsa::ed25519_verify(&batch);
|
||||
batch.into_iter().zip(r).collect()
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn verifier(
|
||||
recvr: &streamer::PacketReceiver,
|
||||
sendr: &Sender<Vec<(SharedPackets, Vec<u8>)>>,
|
||||
) -> Result<()> {
|
||||
let batch = Self::recv_batch(recvr)?;
|
||||
let verified_batches = Self::verify_batch(batch);
|
||||
for xs in verified_batches {
|
||||
sendr.send(xs)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn deserialize_packets(p: &packet::Packets) -> Vec<Option<(Request, SocketAddr)>> {
|
||||
p.packets
|
||||
.par_iter()
|
||||
.map(|x| {
|
||||
deserialize(&x.data[0..x.meta.size])
|
||||
.map(|req| (req, x.meta.addr()))
|
||||
.ok()
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Split Request list into verified transactions and the rest
|
||||
fn partition_requests(
|
||||
req_vers: Vec<(Request, SocketAddr, u8)>,
|
||||
) -> (Vec<Transaction>, Vec<(Request, SocketAddr)>) {
|
||||
let mut trs = vec![];
|
||||
let mut reqs = vec![];
|
||||
for (msg, rsp_addr, verify) in req_vers {
|
||||
match msg {
|
||||
Request::Transaction(tr) => {
|
||||
if verify != 0 {
|
||||
trs.push(tr);
|
||||
}
|
||||
}
|
||||
_ => reqs.push((msg, rsp_addr)),
|
||||
}
|
||||
}
|
||||
(trs, reqs)
|
||||
}
|
||||
|
||||
fn process_packets(
|
||||
&mut self,
|
||||
req_vers: Vec<(Request, SocketAddr, u8)>,
|
||||
) -> Result<Vec<(Response, SocketAddr)>> {
|
||||
let (trs, reqs) = Self::partition_requests(req_vers);
|
||||
|
||||
// Process the transactions in parallel and then log the successful ones.
|
||||
for result in self.acc.process_verified_transactions(trs) {
|
||||
if let Ok(tr) = result {
|
||||
self.historian
|
||||
.sender
|
||||
.send(Signal::Event(Event::Transaction(tr)))?;
|
||||
}
|
||||
}
|
||||
|
||||
// Let validators know they should not attempt to process additional
|
||||
// transactions in parallel.
|
||||
self.historian.sender.send(Signal::Tick)?;
|
||||
|
||||
// Process the remaining requests serially.
|
||||
let rsps = reqs.into_iter()
|
||||
.filter_map(|(req, rsp_addr)| self.process_request(req, rsp_addr))
|
||||
.collect();
|
||||
|
||||
Ok(rsps)
|
||||
}
|
||||
|
||||
fn serialize_response(
|
||||
resp: Response,
|
||||
rsp_addr: SocketAddr,
|
||||
blob_recycler: &packet::BlobRecycler,
|
||||
) -> Result<packet::SharedBlob> {
|
||||
let blob = blob_recycler.allocate();
|
||||
{
|
||||
let mut b = blob.write().unwrap();
|
||||
let v = serialize(&resp)?;
|
||||
let len = v.len();
|
||||
b.data[..len].copy_from_slice(&v);
|
||||
b.meta.size = len;
|
||||
b.meta.set_addr(&rsp_addr);
|
||||
}
|
||||
Ok(blob)
|
||||
}
|
||||
|
||||
fn serialize_responses(
|
||||
rsps: Vec<(Response, SocketAddr)>,
|
||||
blob_recycler: &packet::BlobRecycler,
|
||||
) -> Result<VecDeque<packet::SharedBlob>> {
|
||||
let mut blobs = VecDeque::new();
|
||||
for (resp, rsp_addr) in rsps {
|
||||
blobs.push_back(Self::serialize_response(resp, rsp_addr, blob_recycler)?);
|
||||
}
|
||||
Ok(blobs)
|
||||
}
|
||||
|
||||
fn process(
|
||||
obj: &Arc<Mutex<AccountantSkel<W>>>,
|
||||
verified_receiver: &Receiver<Vec<(SharedPackets, Vec<u8>)>>,
|
||||
blob_sender: &streamer::BlobSender,
|
||||
packet_recycler: &packet::PacketRecycler,
|
||||
blob_recycler: &packet::BlobRecycler,
|
||||
) -> Result<()> {
|
||||
let timer = Duration::new(1, 0);
|
||||
let mms = verified_receiver.recv_timeout(timer)?;
|
||||
for (msgs, vers) in mms {
|
||||
let reqs = Self::deserialize_packets(&msgs.read().unwrap());
|
||||
let req_vers = reqs.into_iter()
|
||||
.zip(vers)
|
||||
.filter_map(|(req, ver)| req.map(|(msg, addr)| (msg, addr, ver)))
|
||||
.filter(|x| x.0.verify())
|
||||
.collect();
|
||||
let rsps = obj.lock().unwrap().process_packets(req_vers)?;
|
||||
let blobs = Self::serialize_responses(rsps, blob_recycler)?;
|
||||
if !blobs.is_empty() {
|
||||
//don't wake up the other side if there is nothing
|
||||
blob_sender.send(blobs)?;
|
||||
}
|
||||
packet_recycler.recycle(msgs);
|
||||
|
||||
// Write new entries to the ledger and notify subscribers.
|
||||
obj.lock().unwrap().sync();
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
/// Process verified blobs, already in order
|
||||
/// Respond with a signed hash of the state
|
||||
fn replicate_state(
|
||||
obj: &Arc<Mutex<AccountantSkel<W>>>,
|
||||
verified_receiver: &streamer::BlobReceiver,
|
||||
blob_recycler: &packet::BlobRecycler,
|
||||
) -> Result<()> {
|
||||
let timer = Duration::new(1, 0);
|
||||
let blobs = verified_receiver.recv_timeout(timer)?;
|
||||
for msgs in &blobs {
|
||||
let blob = msgs.read().unwrap();
|
||||
let entries: Vec<Entry> = deserialize(&blob.data()[..blob.meta.size]).unwrap();
|
||||
for entry in entries {
|
||||
obj.lock().unwrap().acc.register_entry_id(&entry.id);
|
||||
|
||||
obj.lock()
|
||||
.unwrap()
|
||||
.acc
|
||||
.process_verified_events(entry.events)?;
|
||||
}
|
||||
//TODO respond back to leader with hash of the state
|
||||
}
|
||||
for blob in blobs {
|
||||
blob_recycler.recycle(blob);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Create a UDP microservice that forwards messages the given AccountantSkel.
|
||||
/// This service is the network leader
|
||||
/// Set `exit` to shutdown its threads.
|
||||
pub fn serve(
|
||||
obj: &Arc<Mutex<AccountantSkel<W>>>,
|
||||
addr: &str,
|
||||
exit: Arc<AtomicBool>,
|
||||
) -> Result<Vec<JoinHandle<()>>> {
|
||||
let read = UdpSocket::bind(addr)?;
|
||||
// make sure we are on the same interface
|
||||
let mut local = read.local_addr()?;
|
||||
local.set_port(0);
|
||||
let write = UdpSocket::bind(local)?;
|
||||
|
||||
let packet_recycler = packet::PacketRecycler::default();
|
||||
let blob_recycler = packet::BlobRecycler::default();
|
||||
let (packet_sender, packet_receiver) = channel();
|
||||
let t_receiver =
|
||||
streamer::receiver(read, exit.clone(), packet_recycler.clone(), packet_sender)?;
|
||||
let (blob_sender, blob_receiver) = channel();
|
||||
let t_responder =
|
||||
streamer::responder(write, exit.clone(), blob_recycler.clone(), blob_receiver);
|
||||
let (verified_sender, verified_receiver) = channel();
|
||||
|
||||
let exit_ = exit.clone();
|
||||
let t_verifier = spawn(move || loop {
|
||||
let e = Self::verifier(&packet_receiver, &verified_sender);
|
||||
if e.is_err() && exit_.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
});
|
||||
|
||||
let skel = obj.clone();
|
||||
let t_server = spawn(move || loop {
|
||||
let e = Self::process(
|
||||
&skel,
|
||||
&verified_receiver,
|
||||
&blob_sender,
|
||||
&packet_recycler,
|
||||
&blob_recycler,
|
||||
);
|
||||
if e.is_err() {
|
||||
// Assume this was a timeout, so sync any empty entries.
|
||||
skel.lock().unwrap().sync();
|
||||
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
});
|
||||
Ok(vec![t_receiver, t_responder, t_server, t_verifier])
|
||||
}
|
||||
|
||||
/// This service receives messages from a leader in the network and processes the transactions
|
||||
/// on the accountant state.
|
||||
/// # Arguments
|
||||
/// * `obj` - The accountant state.
|
||||
/// * `rsubs` - The subscribers.
|
||||
/// * `exit` - The exit signal.
|
||||
/// # Remarks
|
||||
/// The pipeline is constructed as follows:
|
||||
/// 1. receive blobs from the network, these are out of order
|
||||
/// 2. verify blobs, PoH, signatures (TODO)
|
||||
/// 3. reconstruct contiguous window
|
||||
/// a. order the blobs
|
||||
/// b. use erasure coding to reconstruct missing blobs
|
||||
/// c. ask the network for missing blobs, if erasure coding is insufficient
|
||||
/// d. make sure that the blobs PoH sequences connect (TODO)
|
||||
/// 4. process the transaction state machine
|
||||
/// 5. respond with the hash of the state back to the leader
|
||||
pub fn replicate(
|
||||
obj: &Arc<Mutex<AccountantSkel<W>>>,
|
||||
rsubs: subscribers::Subscribers,
|
||||
exit: Arc<AtomicBool>,
|
||||
) -> Result<Vec<JoinHandle<()>>> {
|
||||
let read = UdpSocket::bind(rsubs.me.addr)?;
|
||||
// make sure we are on the same interface
|
||||
let mut local = read.local_addr()?;
|
||||
local.set_port(0);
|
||||
let write = UdpSocket::bind(local)?;
|
||||
|
||||
let blob_recycler = packet::BlobRecycler::default();
|
||||
let (blob_sender, blob_receiver) = channel();
|
||||
let t_blob_receiver = streamer::blob_receiver(
|
||||
exit.clone(),
|
||||
blob_recycler.clone(),
|
||||
read,
|
||||
blob_sender.clone(),
|
||||
)?;
|
||||
let (window_sender, window_receiver) = channel();
|
||||
let (retransmit_sender, retransmit_receiver) = channel();
|
||||
|
||||
let subs = Arc::new(RwLock::new(rsubs));
|
||||
let t_retransmit = streamer::retransmitter(
|
||||
write,
|
||||
exit.clone(),
|
||||
subs.clone(),
|
||||
blob_recycler.clone(),
|
||||
retransmit_receiver,
|
||||
);
|
||||
//TODO
|
||||
//the packets coming out of blob_receiver need to be sent to the GPU and verified
|
||||
//then sent to the window, which does the erasure coding reconstruction
|
||||
let t_window = streamer::window(
|
||||
exit.clone(),
|
||||
subs,
|
||||
blob_recycler.clone(),
|
||||
blob_receiver,
|
||||
window_sender,
|
||||
retransmit_sender,
|
||||
);
|
||||
|
||||
let skel = obj.clone();
|
||||
let t_server = spawn(move || loop {
|
||||
let e = Self::replicate_state(&skel, &window_receiver, &blob_recycler);
|
||||
if e.is_err() && exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
});
|
||||
Ok(vec![t_blob_receiver, t_retransmit, t_window, t_server])
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn to_packets(r: &packet::PacketRecycler, reqs: Vec<Request>) -> Vec<SharedPackets> {
|
||||
let mut out = vec![];
|
||||
for rrs in reqs.chunks(packet::NUM_PACKETS) {
|
||||
let p = r.allocate();
|
||||
p.write()
|
||||
.unwrap()
|
||||
.packets
|
||||
.resize(rrs.len(), Default::default());
|
||||
for (i, o) in rrs.iter().zip(p.write().unwrap().packets.iter_mut()) {
|
||||
let v = serialize(&i).expect("serialize request");
|
||||
let len = v.len();
|
||||
o.data[..len].copy_from_slice(&v);
|
||||
o.meta.size = len;
|
||||
}
|
||||
out.push(p);
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use accountant_skel::{to_packets, Request};
|
||||
use bincode::serialize;
|
||||
use ecdsa;
|
||||
use packet::{BlobRecycler, PacketRecycler, NUM_PACKETS};
|
||||
use transaction::{memfind, test_tx};
|
||||
|
||||
use accountant::Accountant;
|
||||
use accountant_skel::AccountantSkel;
|
||||
use accountant_stub::AccountantStub;
|
||||
use entry::Entry;
|
||||
use futures::Future;
|
||||
use historian::Historian;
|
||||
use mint::Mint;
|
||||
use plan::Plan;
|
||||
use recorder::Signal;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use std::io::sink;
|
||||
use std::net::{SocketAddr, UdpSocket};
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
use transaction::Transaction;
|
||||
|
||||
use subscribers::{Node, Subscribers};
|
||||
use streamer;
|
||||
use std::sync::mpsc::channel;
|
||||
use std::collections::VecDeque;
|
||||
use hash::{hash, Hash};
|
||||
use event::Event;
|
||||
use entry;
|
||||
use chrono::prelude::*;
|
||||
|
||||
#[test]
|
||||
fn test_layout() {
|
||||
let tr = test_tx();
|
||||
let tx = serialize(&tr).unwrap();
|
||||
let packet = serialize(&Request::Transaction(tr)).unwrap();
|
||||
assert_matches!(memfind(&packet, &tx), Some(ecdsa::TX_OFFSET));
|
||||
assert_matches!(memfind(&packet, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), None);
|
||||
}
|
||||
#[test]
|
||||
fn test_to_packets() {
|
||||
let tr = Request::Transaction(test_tx());
|
||||
let re = PacketRecycler::default();
|
||||
let rv = to_packets(&re, vec![tr.clone(); 1]);
|
||||
assert_eq!(rv.len(), 1);
|
||||
assert_eq!(rv[0].read().unwrap().packets.len(), 1);
|
||||
|
||||
let rv = to_packets(&re, vec![tr.clone(); NUM_PACKETS]);
|
||||
assert_eq!(rv.len(), 1);
|
||||
assert_eq!(rv[0].read().unwrap().packets.len(), NUM_PACKETS);
|
||||
|
||||
let rv = to_packets(&re, vec![tr.clone(); NUM_PACKETS + 1]);
|
||||
assert_eq!(rv.len(), 2);
|
||||
assert_eq!(rv[0].read().unwrap().packets.len(), NUM_PACKETS);
|
||||
assert_eq!(rv[1].read().unwrap().packets.len(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_accounting_sequential_consistency() {
|
||||
// In this attack we'll demonstrate that a verifier can interpret the ledger
|
||||
// differently if either the server doesn't signal the ledger to add an
|
||||
// Entry OR if the verifier tries to parallelize across multiple Entries.
|
||||
let mint = Mint::new(2);
|
||||
let acc = Accountant::new(&mint);
|
||||
let rsp_addr: SocketAddr = "0.0.0.0:0".parse().expect("socket address");
|
||||
let historian = Historian::new(&mint.last_id(), None);
|
||||
let mut skel = AccountantSkel::new(acc, mint.last_id(), sink(), historian);
|
||||
|
||||
// Process a batch that includes a transaction that receives two tokens.
|
||||
let alice = KeyPair::new();
|
||||
let tr = Transaction::new(&mint.keypair(), alice.pubkey(), 2, mint.last_id());
|
||||
let req_vers = vec![(Request::Transaction(tr), rsp_addr, 1_u8)];
|
||||
assert!(skel.process_packets(req_vers).is_ok());
|
||||
|
||||
// Process a second batch that spends one of those tokens.
|
||||
let tr = Transaction::new(&alice, mint.pubkey(), 1, mint.last_id());
|
||||
let req_vers = vec![(Request::Transaction(tr), rsp_addr, 1_u8)];
|
||||
assert!(skel.process_packets(req_vers).is_ok());
|
||||
|
||||
// Collect the ledger and feed it to a new accountant.
|
||||
skel.historian.sender.send(Signal::Tick).unwrap();
|
||||
drop(skel.historian.sender);
|
||||
let entries: Vec<Entry> = skel.historian.receiver.iter().collect();
|
||||
|
||||
// Assert the user holds one token, not two. If the server only output one
|
||||
// entry, then the second transaction will be rejected, because it drives
|
||||
// the account balance below zero before the credit is added.
|
||||
let acc = Accountant::new(&mint);
|
||||
for entry in entries {
|
||||
acc.process_verified_events(entry.events).unwrap();
|
||||
}
|
||||
assert_eq!(acc.get_balance(&alice.pubkey()), Some(1));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_accountant_bad_sig() {
|
||||
let serve_port = 9002;
|
||||
let send_port = 9003;
|
||||
let addr = format!("127.0.0.1:{}", serve_port);
|
||||
let send_addr = format!("127.0.0.1:{}", send_port);
|
||||
let alice = Mint::new(10_000);
|
||||
let acc = Accountant::new(&alice);
|
||||
let bob_pubkey = KeyPair::new().pubkey();
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let historian = Historian::new(&alice.last_id(), Some(30));
|
||||
let acc = Arc::new(Mutex::new(AccountantSkel::new(
|
||||
acc,
|
||||
alice.last_id(),
|
||||
sink(),
|
||||
historian,
|
||||
)));
|
||||
let _threads = AccountantSkel::serve(&acc, &addr, exit.clone()).unwrap();
|
||||
sleep(Duration::from_millis(300));
|
||||
|
||||
let socket = UdpSocket::bind(send_addr).unwrap();
|
||||
socket.set_read_timeout(Some(Duration::new(5, 0))).unwrap();
|
||||
|
||||
let mut acc = AccountantStub::new(&addr, socket);
|
||||
let last_id = acc.get_last_id().wait().unwrap();
|
||||
|
||||
let tr = Transaction::new(&alice.keypair(), bob_pubkey, 500, last_id);
|
||||
|
||||
let _sig = acc.transfer_signed(tr).unwrap();
|
||||
|
||||
let last_id = acc.get_last_id().wait().unwrap();
|
||||
|
||||
let mut tr2 = Transaction::new(&alice.keypair(), bob_pubkey, 501, last_id);
|
||||
tr2.data.tokens = 502;
|
||||
tr2.data.plan = Plan::new_payment(502, bob_pubkey);
|
||||
let _sig = acc.transfer_signed(tr2).unwrap();
|
||||
|
||||
assert_eq!(acc.get_balance(&bob_pubkey).wait().unwrap(), 500);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
use std::sync::{Once, ONCE_INIT};
|
||||
extern crate env_logger;
|
||||
|
||||
static INIT: Once = ONCE_INIT;
|
||||
|
||||
/// Setup function that is only run once, even if called multiple times.
|
||||
fn setup() {
|
||||
INIT.call_once(|| {
|
||||
env_logger::init().unwrap();
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_replicate() {
|
||||
setup();
|
||||
let leader_sock = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||
let leader_addr = leader_sock.local_addr().unwrap();
|
||||
let me_addr = "127.0.0.1:9010".parse().unwrap();
|
||||
let target_peer_sock = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||
let target_peer_addr = target_peer_sock.local_addr().unwrap();
|
||||
let source_peer_sock = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
|
||||
let node_me = Node::new([0, 0, 0, 0, 0, 0, 0, 1], 10, me_addr);
|
||||
let node_subs = vec![Node::new([0, 0, 0, 0, 0, 0, 0, 2], 8, target_peer_addr); 1];
|
||||
let node_leader = Node::new([0, 0, 0, 0, 0, 0, 0, 3], 20, leader_addr);
|
||||
let subs = Subscribers::new(node_me, node_leader, &node_subs);
|
||||
|
||||
// setup some blob services to send blobs into the socket
|
||||
// to simulate the source peer and get blobs out of the socket to
|
||||
// simulate target peer
|
||||
let recv_recycler = BlobRecycler::default();
|
||||
let resp_recycler = BlobRecycler::default();
|
||||
let (s_reader, r_reader) = channel();
|
||||
let t_receiver = streamer::blob_receiver(
|
||||
exit.clone(),
|
||||
recv_recycler.clone(),
|
||||
target_peer_sock,
|
||||
s_reader,
|
||||
).unwrap();
|
||||
let (s_responder, r_responder) = channel();
|
||||
let t_responder = streamer::responder(
|
||||
source_peer_sock,
|
||||
exit.clone(),
|
||||
resp_recycler.clone(),
|
||||
r_responder,
|
||||
);
|
||||
|
||||
let starting_balance = 10_000;
|
||||
let alice = Mint::new(starting_balance);
|
||||
let acc = Accountant::new(&alice);
|
||||
let historian = Historian::new(&alice.last_id(), Some(30));
|
||||
let acc = Arc::new(Mutex::new(AccountantSkel::new(
|
||||
acc,
|
||||
alice.last_id(),
|
||||
sink(),
|
||||
historian,
|
||||
)));
|
||||
|
||||
let _threads = AccountantSkel::replicate(&acc, subs, exit.clone()).unwrap();
|
||||
|
||||
let mut alice_ref_balance = starting_balance;
|
||||
let mut msgs = VecDeque::new();
|
||||
let mut cur_hash = Hash::default();
|
||||
let num_blobs = 10;
|
||||
let transfer_amount = 501;
|
||||
let bob_keypair = KeyPair::new();
|
||||
for i in 0..num_blobs {
|
||||
let b = resp_recycler.allocate();
|
||||
let b_ = b.clone();
|
||||
let mut w = b.write().unwrap();
|
||||
w.set_index(i).unwrap();
|
||||
|
||||
let tr0 = Event::new_timestamp(&bob_keypair, Utc::now());
|
||||
let entry0 = entry::create_entry(&cur_hash, i, vec![tr0]);
|
||||
acc.lock().unwrap().acc.register_entry_id(&cur_hash);
|
||||
cur_hash = hash(&cur_hash);
|
||||
|
||||
let tr1 = Transaction::new(
|
||||
&alice.keypair(),
|
||||
bob_keypair.pubkey(),
|
||||
transfer_amount,
|
||||
cur_hash,
|
||||
);
|
||||
acc.lock().unwrap().acc.register_entry_id(&cur_hash);
|
||||
cur_hash = hash(&cur_hash);
|
||||
let entry1 =
|
||||
entry::create_entry(&cur_hash, i + num_blobs, vec![Event::Transaction(tr1)]);
|
||||
acc.lock().unwrap().acc.register_entry_id(&cur_hash);
|
||||
cur_hash = hash(&cur_hash);
|
||||
|
||||
alice_ref_balance -= transfer_amount;
|
||||
|
||||
let serialized_entry = serialize(&vec![entry0, entry1]).unwrap();
|
||||
|
||||
w.data_mut()[..serialized_entry.len()].copy_from_slice(&serialized_entry);
|
||||
w.set_size(serialized_entry.len());
|
||||
w.meta.set_addr(&me_addr);
|
||||
drop(w);
|
||||
msgs.push_back(b_);
|
||||
}
|
||||
|
||||
// send the blobs into the socket
|
||||
s_responder.send(msgs).expect("send");
|
||||
|
||||
// receive retransmitted messages
|
||||
let timer = Duration::new(1, 0);
|
||||
let mut msgs: Vec<_> = Vec::new();
|
||||
while let Ok(msg) = r_reader.recv_timeout(timer) {
|
||||
trace!("msg: {:?}", msg);
|
||||
msgs.push(msg);
|
||||
}
|
||||
|
||||
let alice_balance = acc.lock()
|
||||
.unwrap()
|
||||
.acc
|
||||
.get_balance(&alice.keypair().pubkey())
|
||||
.unwrap();
|
||||
assert_eq!(alice_balance, alice_ref_balance);
|
||||
|
||||
let bob_balance = acc.lock()
|
||||
.unwrap()
|
||||
.acc
|
||||
.get_balance(&bob_keypair.pubkey())
|
||||
.unwrap();
|
||||
assert_eq!(bob_balance, starting_balance - alice_ref_balance);
|
||||
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
t_receiver.join().expect("join");
|
||||
t_responder.join().expect("join");
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "unstable", test))]
|
||||
mod bench {
|
||||
extern crate test;
|
||||
use self::test::Bencher;
|
||||
use accountant::{Accountant, MAX_ENTRY_IDS};
|
||||
use accountant_skel::*;
|
||||
use bincode::serialize;
|
||||
use hash::hash;
|
||||
use mint::Mint;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use std::collections::HashSet;
|
||||
use std::io::sink;
|
||||
use std::time::Instant;
|
||||
use transaction::Transaction;
|
||||
|
||||
#[bench]
|
||||
fn process_packets_bench(_bencher: &mut Bencher) {
|
||||
let mint = Mint::new(100_000_000);
|
||||
let acc = Accountant::new(&mint);
|
||||
let rsp_addr: SocketAddr = "0.0.0.0:0".parse().expect("socket address");
|
||||
// Create transactions between unrelated parties.
|
||||
let txs = 100_000;
|
||||
let last_ids: Mutex<HashSet<Hash>> = Mutex::new(HashSet::new());
|
||||
let transactions: Vec<_> = (0..txs)
|
||||
.into_par_iter()
|
||||
.map(|i| {
|
||||
// Seed the 'to' account and a cell for its signature.
|
||||
let dummy_id = i % (MAX_ENTRY_IDS as i32);
|
||||
let last_id = hash(&serialize(&dummy_id).unwrap()); // Semi-unique hash
|
||||
{
|
||||
let mut last_ids = last_ids.lock().unwrap();
|
||||
if !last_ids.contains(&last_id) {
|
||||
last_ids.insert(last_id);
|
||||
acc.register_entry_id(&last_id);
|
||||
}
|
||||
}
|
||||
|
||||
// Seed the 'from' account.
|
||||
let rando0 = KeyPair::new();
|
||||
let tr = Transaction::new(&mint.keypair(), rando0.pubkey(), 1_000, last_id);
|
||||
acc.process_verified_transaction(&tr).unwrap();
|
||||
|
||||
let rando1 = KeyPair::new();
|
||||
let tr = Transaction::new(&rando0, rando1.pubkey(), 2, last_id);
|
||||
acc.process_verified_transaction(&tr).unwrap();
|
||||
|
||||
// Finally, return a transaction that's unique
|
||||
Transaction::new(&rando0, rando1.pubkey(), 1, last_id)
|
||||
})
|
||||
.collect();
|
||||
|
||||
let req_vers = transactions
|
||||
.into_iter()
|
||||
.map(|tr| (Request::Transaction(tr), rsp_addr, 1_u8))
|
||||
.collect();
|
||||
|
||||
let historian = Historian::new(&mint.last_id(), None);
|
||||
let mut skel = AccountantSkel::new(acc, mint.last_id(), sink(), historian);
|
||||
|
||||
let now = Instant::now();
|
||||
assert!(skel.process_packets(req_vers).is_ok());
|
||||
let duration = now.elapsed();
|
||||
let sec = duration.as_secs() as f64 + duration.subsec_nanos() as f64 / 1_000_000_000.0;
|
||||
let tps = txs as f64 / sec;
|
||||
|
||||
// Ensure that all transactions were successfully logged.
|
||||
drop(skel.historian.sender);
|
||||
let entries: Vec<Entry> = skel.historian.receiver.iter().collect();
|
||||
assert_eq!(entries.len(), 1);
|
||||
assert_eq!(entries[0].events.len(), txs as usize);
|
||||
|
||||
println!("{} tps", tps);
|
||||
}
|
||||
}
|
@@ -1,201 +0,0 @@
|
||||
//! The `accountant_stub` module is a client-side object that interfaces with a server-side Accountant
|
||||
//! object via the network interface exposed by AccountantSkel. Client code should use
|
||||
//! this object instead of writing messages to the network directly. The binary
|
||||
//! encoding of its messages are unstable and may change in future releases.
|
||||
|
||||
use accountant_skel::{Request, Response, Subscription};
|
||||
use bincode::{deserialize, serialize};
|
||||
use futures::future::{ok, FutureResult};
|
||||
use hash::Hash;
|
||||
use signature::{KeyPair, PublicKey, Signature};
|
||||
use std::collections::HashMap;
|
||||
use std::io;
|
||||
use std::net::UdpSocket;
|
||||
use transaction::Transaction;
|
||||
|
||||
pub struct AccountantStub {
|
||||
pub addr: String,
|
||||
pub socket: UdpSocket,
|
||||
last_id: Option<Hash>,
|
||||
num_events: u64,
|
||||
balances: HashMap<PublicKey, Option<i64>>,
|
||||
}
|
||||
|
||||
impl AccountantStub {
|
||||
/// Create a new AccountantStub that will interface with AccountantSkel
|
||||
/// over `socket`. To receive responses, the caller must bind `socket`
|
||||
/// to a public address before invoking AccountantStub methods.
|
||||
pub fn new(addr: &str, socket: UdpSocket) -> Self {
|
||||
let stub = AccountantStub {
|
||||
addr: addr.to_string(),
|
||||
socket,
|
||||
last_id: None,
|
||||
num_events: 0,
|
||||
balances: HashMap::new(),
|
||||
};
|
||||
stub.init();
|
||||
stub
|
||||
}
|
||||
|
||||
pub fn init(&self) {
|
||||
let subscriptions = vec![Subscription::EntryInfo];
|
||||
let req = Request::Subscribe { subscriptions };
|
||||
let data = serialize(&req).expect("serialize Subscribe");
|
||||
let _res = self.socket.send_to(&data, &self.addr);
|
||||
}
|
||||
|
||||
pub fn recv_response(&self) -> io::Result<Response> {
|
||||
let mut buf = vec![0u8; 1024];
|
||||
self.socket.recv_from(&mut buf)?;
|
||||
let resp = deserialize(&buf).expect("deserialize balance");
|
||||
Ok(resp)
|
||||
}
|
||||
|
||||
pub fn process_response(&mut self, resp: Response) {
|
||||
match resp {
|
||||
Response::Balance { key, val } => {
|
||||
self.balances.insert(key, val);
|
||||
}
|
||||
Response::LastId { id } => {
|
||||
self.last_id = Some(id);
|
||||
}
|
||||
Response::EntryInfo(entry_info) => {
|
||||
self.last_id = Some(entry_info.id);
|
||||
self.num_events += entry_info.num_events;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Send a signed Transaction to the server for processing. This method
|
||||
/// does not wait for a response.
|
||||
pub fn transfer_signed(&self, tr: Transaction) -> io::Result<usize> {
|
||||
let req = Request::Transaction(tr);
|
||||
let data = serialize(&req).unwrap();
|
||||
self.socket.send_to(&data, &self.addr)
|
||||
}
|
||||
|
||||
/// Creates, signs, and processes a Transaction. Useful for writing unit-tests.
|
||||
pub fn transfer(
|
||||
&self,
|
||||
n: i64,
|
||||
keypair: &KeyPair,
|
||||
to: PublicKey,
|
||||
last_id: &Hash,
|
||||
) -> io::Result<Signature> {
|
||||
let tr = Transaction::new(keypair, to, n, *last_id);
|
||||
let sig = tr.sig;
|
||||
self.transfer_signed(tr).map(|_| sig)
|
||||
}
|
||||
|
||||
/// Request the balance of the user holding `pubkey`. This method blocks
|
||||
/// until the server sends a response. If the response packet is dropped
|
||||
/// by the network, this method will hang indefinitely.
|
||||
pub fn get_balance(&mut self, pubkey: &PublicKey) -> FutureResult<i64, i64> {
|
||||
let req = Request::GetBalance { key: *pubkey };
|
||||
let data = serialize(&req).expect("serialize GetBalance");
|
||||
self.socket
|
||||
.send_to(&data, &self.addr)
|
||||
.expect("buffer error");
|
||||
let mut done = false;
|
||||
while !done {
|
||||
let resp = self.recv_response().expect("recv response");
|
||||
if let &Response::Balance { ref key, .. } = &resp {
|
||||
done = key == pubkey;
|
||||
}
|
||||
self.process_response(resp);
|
||||
}
|
||||
ok(self.balances[pubkey].unwrap())
|
||||
}
|
||||
|
||||
/// Request the last Entry ID from the server. This method blocks
|
||||
/// until the server sends a response. At the time of this writing,
|
||||
/// it also has the side-effect of causing the server to log any
|
||||
/// entries that have been published by the Historian.
|
||||
pub fn get_last_id(&mut self) -> FutureResult<Hash, ()> {
|
||||
let req = Request::GetLastId;
|
||||
let data = serialize(&req).expect("serialize GetId");
|
||||
self.socket
|
||||
.send_to(&data, &self.addr)
|
||||
.expect("buffer error");
|
||||
let mut done = false;
|
||||
while !done {
|
||||
let resp = self.recv_response().expect("recv response");
|
||||
if let &Response::LastId { .. } = &resp {
|
||||
done = true;
|
||||
}
|
||||
self.process_response(resp);
|
||||
}
|
||||
ok(self.last_id.unwrap_or(Hash::default()))
|
||||
}
|
||||
|
||||
/// Return the number of transactions the server processed since creating
|
||||
/// this stub instance.
|
||||
pub fn transaction_count(&mut self) -> u64 {
|
||||
// Wait for at least one EntryInfo.
|
||||
let mut done = false;
|
||||
while !done {
|
||||
let resp = self.recv_response().expect("recv response");
|
||||
if let &Response::EntryInfo(_) = &resp {
|
||||
done = true;
|
||||
}
|
||||
self.process_response(resp);
|
||||
}
|
||||
|
||||
// Then take the rest.
|
||||
self.socket.set_nonblocking(true).expect("set nonblocking");
|
||||
loop {
|
||||
match self.recv_response() {
|
||||
Err(_) => break,
|
||||
Ok(resp) => self.process_response(resp),
|
||||
}
|
||||
}
|
||||
self.socket.set_nonblocking(false).expect("set blocking");
|
||||
self.num_events
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use accountant::Accountant;
|
||||
use accountant_skel::AccountantSkel;
|
||||
use futures::Future;
|
||||
use historian::Historian;
|
||||
use mint::Mint;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use std::io::sink;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
|
||||
// TODO: Figure out why this test sometimes hangs on TravisCI.
|
||||
#[test]
|
||||
fn test_accountant_stub() {
|
||||
let addr = "127.0.0.1:9000";
|
||||
let send_addr = "127.0.0.1:9001";
|
||||
let alice = Mint::new(10_000);
|
||||
let acc = Accountant::new(&alice);
|
||||
let bob_pubkey = KeyPair::new().pubkey();
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let historian = Historian::new(&alice.last_id(), Some(30));
|
||||
let acc = Arc::new(Mutex::new(AccountantSkel::new(
|
||||
acc,
|
||||
alice.last_id(),
|
||||
sink(),
|
||||
historian,
|
||||
)));
|
||||
let _threads = AccountantSkel::serve(&acc, addr, exit.clone()).unwrap();
|
||||
sleep(Duration::from_millis(300));
|
||||
|
||||
let socket = UdpSocket::bind(send_addr).unwrap();
|
||||
socket.set_read_timeout(Some(Duration::new(5, 0))).unwrap();
|
||||
|
||||
let mut acc = AccountantStub::new(addr, socket);
|
||||
let last_id = acc.get_last_id().wait().unwrap();
|
||||
let _sig = acc.transfer(500, &alice.keypair(), bob_pubkey, &last_id)
|
||||
.unwrap();
|
||||
assert_eq!(acc.get_balance(&bob_pubkey).wait().unwrap(), 500);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
}
|
||||
}
|
654
src/bank.rs
Normal file
654
src/bank.rs
Normal file
@@ -0,0 +1,654 @@
|
||||
//! The `bank` module tracks client balances, and the progress of pending
|
||||
//! transactions. It offers a high-level public API that signs transactions
|
||||
//! on behalf of the caller, and a private low-level API for when they have
|
||||
//! already been signed and verified.
|
||||
|
||||
extern crate libc;
|
||||
|
||||
use chrono::prelude::*;
|
||||
use entry::Entry;
|
||||
use event::Event;
|
||||
use hash::Hash;
|
||||
use mint::Mint;
|
||||
use plan::{Payment, Plan, Witness};
|
||||
use rayon::prelude::*;
|
||||
use signature::{KeyPair, PublicKey, Signature};
|
||||
use std::collections::hash_map::Entry::Occupied;
|
||||
use std::collections::{HashMap, HashSet, VecDeque};
|
||||
use std::result;
|
||||
use std::sync::RwLock;
|
||||
use std::sync::atomic::{AtomicIsize, AtomicUsize, Ordering};
|
||||
use transaction::{Instruction, Transaction};
|
||||
|
||||
pub const MAX_ENTRY_IDS: usize = 1024 * 4;
|
||||
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub enum BankError {
|
||||
AccountNotFound(PublicKey),
|
||||
InsufficientFunds(PublicKey),
|
||||
InvalidTransferSignature(Signature),
|
||||
}
|
||||
|
||||
pub type Result<T> = result::Result<T, BankError>;
|
||||
|
||||
/// Commit funds to the 'to' party.
|
||||
fn apply_payment(balances: &RwLock<HashMap<PublicKey, AtomicIsize>>, payment: &Payment) {
|
||||
// First we check balances with a read lock to maximize potential parallelization.
|
||||
if balances
|
||||
.read()
|
||||
.expect("'balances' read lock in apply_payment")
|
||||
.contains_key(&payment.to)
|
||||
{
|
||||
let bals = balances.read().expect("'balances' read lock");
|
||||
bals[&payment.to].fetch_add(payment.tokens as isize, Ordering::Relaxed);
|
||||
} else {
|
||||
// Now we know the key wasn't present a nanosecond ago, but it might be there
|
||||
// by the time we aquire a write lock, so we'll have to check again.
|
||||
let mut bals = balances.write().expect("'balances' write lock");
|
||||
if bals.contains_key(&payment.to) {
|
||||
bals[&payment.to].fetch_add(payment.tokens as isize, Ordering::Relaxed);
|
||||
} else {
|
||||
bals.insert(payment.to, AtomicIsize::new(payment.tokens as isize));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Bank {
|
||||
balances: RwLock<HashMap<PublicKey, AtomicIsize>>,
|
||||
pending: RwLock<HashMap<Signature, Plan>>,
|
||||
last_ids: RwLock<VecDeque<(Hash, RwLock<HashSet<Signature>>)>>,
|
||||
time_sources: RwLock<HashSet<PublicKey>>,
|
||||
last_time: RwLock<DateTime<Utc>>,
|
||||
transaction_count: AtomicUsize,
|
||||
}
|
||||
|
||||
impl Bank {
|
||||
/// Create an Bank using a deposit.
|
||||
pub fn new_from_deposit(deposit: &Payment) -> Self {
|
||||
let balances = RwLock::new(HashMap::new());
|
||||
apply_payment(&balances, deposit);
|
||||
Bank {
|
||||
balances,
|
||||
pending: RwLock::new(HashMap::new()),
|
||||
last_ids: RwLock::new(VecDeque::new()),
|
||||
time_sources: RwLock::new(HashSet::new()),
|
||||
last_time: RwLock::new(Utc.timestamp(0, 0)),
|
||||
transaction_count: AtomicUsize::new(0),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create an Bank with only a Mint. Typically used by unit tests.
|
||||
pub fn new(mint: &Mint) -> Self {
|
||||
let deposit = Payment {
|
||||
to: mint.pubkey(),
|
||||
tokens: mint.tokens,
|
||||
};
|
||||
let bank = Self::new_from_deposit(&deposit);
|
||||
bank.register_entry_id(&mint.last_id());
|
||||
bank
|
||||
}
|
||||
|
||||
/// Return the last entry ID registered
|
||||
pub fn last_id(&self) -> Hash {
|
||||
let last_ids = self.last_ids.read().expect("'last_ids' read lock");
|
||||
let last_item = last_ids.iter().last().expect("empty 'last_ids' list");
|
||||
last_item.0
|
||||
}
|
||||
|
||||
fn reserve_signature(signatures: &RwLock<HashSet<Signature>>, sig: &Signature) -> bool {
|
||||
if signatures
|
||||
.read()
|
||||
.expect("'signatures' read lock")
|
||||
.contains(sig)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
signatures
|
||||
.write()
|
||||
.expect("'signatures' write lock")
|
||||
.insert(*sig);
|
||||
true
|
||||
}
|
||||
|
||||
fn forget_signature(signatures: &RwLock<HashSet<Signature>>, sig: &Signature) -> bool {
|
||||
signatures
|
||||
.write()
|
||||
.expect("'signatures' write lock in forget_signature")
|
||||
.remove(sig)
|
||||
}
|
||||
|
||||
fn forget_signature_with_last_id(&self, sig: &Signature, last_id: &Hash) -> bool {
|
||||
if let Some(entry) = self.last_ids
|
||||
.read()
|
||||
.expect("'last_ids' read lock in forget_signature_with_last_id")
|
||||
.iter()
|
||||
.rev()
|
||||
.find(|x| x.0 == *last_id)
|
||||
{
|
||||
return Self::forget_signature(&entry.1, sig);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
fn reserve_signature_with_last_id(&self, sig: &Signature, last_id: &Hash) -> bool {
|
||||
if let Some(entry) = self.last_ids
|
||||
.read()
|
||||
.expect("'last_ids' read lock in reserve_signature_with_last_id")
|
||||
.iter()
|
||||
.rev()
|
||||
.find(|x| x.0 == *last_id)
|
||||
{
|
||||
return Self::reserve_signature(&entry.1, sig);
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
/// Tell the bank which Entry IDs exist on the ledger. This function
|
||||
/// assumes subsequent calls correspond to later entries, and will boot
|
||||
/// the oldest ones once its internal cache is full. Once boot, the
|
||||
/// bank will reject transactions using that `last_id`.
|
||||
pub fn register_entry_id(&self, last_id: &Hash) {
|
||||
let mut last_ids = self.last_ids
|
||||
.write()
|
||||
.expect("'last_ids' write lock in register_entry_id");
|
||||
if last_ids.len() >= MAX_ENTRY_IDS {
|
||||
last_ids.pop_front();
|
||||
}
|
||||
last_ids.push_back((*last_id, RwLock::new(HashSet::new())));
|
||||
}
|
||||
|
||||
/// Deduct tokens from the 'from' address the account has sufficient
|
||||
/// funds and isn't a duplicate.
|
||||
pub fn process_verified_transaction_debits(&self, tr: &Transaction) -> Result<()> {
|
||||
if let Instruction::NewContract(contract) = &tr.instruction {
|
||||
trace!("Transaction {}", contract.tokens);
|
||||
}
|
||||
let bals = self.balances
|
||||
.read()
|
||||
.expect("'balances' read lock in process_verified_transaction_debits");
|
||||
let option = bals.get(&tr.from);
|
||||
|
||||
if option.is_none() {
|
||||
return Err(BankError::AccountNotFound(tr.from));
|
||||
}
|
||||
|
||||
if !self.reserve_signature_with_last_id(&tr.sig, &tr.last_id) {
|
||||
return Err(BankError::InvalidTransferSignature(tr.sig));
|
||||
}
|
||||
|
||||
loop {
|
||||
let result = if let Instruction::NewContract(contract) = &tr.instruction {
|
||||
let bal = option.expect("assignment of option to bal");
|
||||
let current = bal.load(Ordering::Relaxed) as i64;
|
||||
|
||||
if current < contract.tokens {
|
||||
self.forget_signature_with_last_id(&tr.sig, &tr.last_id);
|
||||
return Err(BankError::InsufficientFunds(tr.from));
|
||||
}
|
||||
|
||||
bal.compare_exchange(
|
||||
current as isize,
|
||||
(current - contract.tokens) as isize,
|
||||
Ordering::Relaxed,
|
||||
Ordering::Relaxed,
|
||||
)
|
||||
} else {
|
||||
Ok(0)
|
||||
};
|
||||
|
||||
match result {
|
||||
Ok(_) => {
|
||||
self.transaction_count.fetch_add(1, Ordering::Relaxed);
|
||||
return Ok(());
|
||||
}
|
||||
Err(_) => continue,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
pub fn process_verified_transaction_credits(&self, tr: &Transaction) {
|
||||
match &tr.instruction {
|
||||
Instruction::NewContract(contract) => {
|
||||
let mut plan = contract.plan.clone();
|
||||
plan.apply_witness(&Witness::Timestamp(*self.last_time
|
||||
.read()
|
||||
.expect("timestamp creation in process_verified_transaction_credits")));
|
||||
|
||||
if let Some(ref payment) = plan.final_payment() {
|
||||
apply_payment(&self.balances, payment);
|
||||
} else {
|
||||
let mut pending = self.pending
|
||||
.write()
|
||||
.expect("'pending' write lock in process_verified_transaction_credits");
|
||||
pending.insert(tr.sig, plan);
|
||||
}
|
||||
}
|
||||
Instruction::ApplyTimestamp(dt) => {
|
||||
let _ = self.process_verified_timestamp(tr.from, *dt);
|
||||
}
|
||||
Instruction::ApplySignature(tx_sig) => {
|
||||
let _ = self.process_verified_sig(tr.from, *tx_sig);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Process a Transaction that has already been verified.
|
||||
pub fn process_verified_transaction(&self, tr: &Transaction) -> Result<()> {
|
||||
self.process_verified_transaction_debits(tr)?;
|
||||
self.process_verified_transaction_credits(tr);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Process a batch of verified transactions.
|
||||
pub fn process_verified_transactions(&self, trs: Vec<Transaction>) -> Vec<Result<Transaction>> {
|
||||
// Run all debits first to filter out any transactions that can't be processed
|
||||
// in parallel deterministically.
|
||||
info!("processing Transactions {}", trs.len());
|
||||
let results: Vec<_> = trs.into_par_iter()
|
||||
.map(|tr| self.process_verified_transaction_debits(&tr).map(|_| tr))
|
||||
.collect(); // Calling collect() here forces all debits to complete before moving on.
|
||||
|
||||
results
|
||||
.into_par_iter()
|
||||
.map(|result| {
|
||||
result.map(|tr| {
|
||||
self.process_verified_transaction_credits(&tr);
|
||||
tr
|
||||
})
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn partition_events(events: Vec<Event>) -> (Vec<Transaction>, Vec<Event>) {
|
||||
(
|
||||
events
|
||||
.into_iter()
|
||||
.map(|Event::Transaction(tr)| tr)
|
||||
.collect(),
|
||||
vec![],
|
||||
)
|
||||
}
|
||||
|
||||
pub fn process_verified_events(&self, events: Vec<Event>) -> Vec<Result<Event>> {
|
||||
let (trs, rest) = Self::partition_events(events);
|
||||
let mut results: Vec<_> = self.process_verified_transactions(trs)
|
||||
.into_iter()
|
||||
.map(|x| x.map(Event::Transaction))
|
||||
.collect();
|
||||
|
||||
for event in rest {
|
||||
results.push(self.process_verified_event(event));
|
||||
}
|
||||
|
||||
results
|
||||
}
|
||||
|
||||
pub fn process_verified_entries(&self, entries: Vec<Entry>) -> Result<()> {
|
||||
for entry in entries {
|
||||
self.register_entry_id(&entry.id);
|
||||
for result in self.process_verified_events(entry.events) {
|
||||
result?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Process a Witness Signature that has already been verified.
|
||||
fn process_verified_sig(&self, from: PublicKey, tx_sig: Signature) -> Result<()> {
|
||||
if let Occupied(mut e) = self.pending
|
||||
.write()
|
||||
.expect("write() in process_verified_sig")
|
||||
.entry(tx_sig)
|
||||
{
|
||||
e.get_mut().apply_witness(&Witness::Signature(from));
|
||||
if let Some(payment) = e.get().final_payment() {
|
||||
apply_payment(&self.balances, &payment);
|
||||
e.remove_entry();
|
||||
}
|
||||
};
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Process a Witness Timestamp that has already been verified.
|
||||
fn process_verified_timestamp(&self, from: PublicKey, dt: DateTime<Utc>) -> Result<()> {
|
||||
// If this is the first timestamp we've seen, it probably came from the genesis block,
|
||||
// so we'll trust it.
|
||||
if *self.last_time
|
||||
.read()
|
||||
.expect("'last_time' read lock on first timestamp check")
|
||||
== Utc.timestamp(0, 0)
|
||||
{
|
||||
self.time_sources
|
||||
.write()
|
||||
.expect("'time_sources' write lock on first timestamp")
|
||||
.insert(from);
|
||||
}
|
||||
|
||||
if self.time_sources
|
||||
.read()
|
||||
.expect("'time_sources' read lock")
|
||||
.contains(&from)
|
||||
{
|
||||
if dt > *self.last_time.read().expect("'last_time' read lock") {
|
||||
*self.last_time.write().expect("'last_time' write lock") = dt;
|
||||
}
|
||||
} else {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Check to see if any timelocked transactions can be completed.
|
||||
let mut completed = vec![];
|
||||
|
||||
// Hold 'pending' write lock until the end of this function. Otherwise another thread can
|
||||
// double-spend if it enters before the modified plan is removed from 'pending'.
|
||||
let mut pending = self.pending
|
||||
.write()
|
||||
.expect("'pending' write lock in process_verified_timestamp");
|
||||
for (key, plan) in pending.iter_mut() {
|
||||
plan.apply_witness(&Witness::Timestamp(*self.last_time
|
||||
.read()
|
||||
.expect("'last_time' read lock when creating timestamp")));
|
||||
if let Some(ref payment) = plan.final_payment() {
|
||||
apply_payment(&self.balances, payment);
|
||||
completed.push(key.clone());
|
||||
}
|
||||
}
|
||||
|
||||
for key in completed {
|
||||
pending.remove(&key);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Process an Transaction or Witness that has already been verified.
|
||||
pub fn process_verified_event(&self, event: Event) -> Result<Event> {
|
||||
match event {
|
||||
Event::Transaction(ref tr) => self.process_verified_transaction(tr),
|
||||
}?;
|
||||
Ok(event)
|
||||
}
|
||||
|
||||
/// Create, sign, and process a Transaction from `keypair` to `to` of
|
||||
/// `n` tokens where `last_id` is the last Entry ID observed by the client.
|
||||
pub fn transfer(
|
||||
&self,
|
||||
n: i64,
|
||||
keypair: &KeyPair,
|
||||
to: PublicKey,
|
||||
last_id: Hash,
|
||||
) -> Result<Signature> {
|
||||
let tr = Transaction::new(keypair, to, n, last_id);
|
||||
let sig = tr.sig;
|
||||
self.process_verified_transaction(&tr).map(|_| sig)
|
||||
}
|
||||
|
||||
/// Create, sign, and process a postdated Transaction from `keypair`
|
||||
/// to `to` of `n` tokens on `dt` where `last_id` is the last Entry ID
|
||||
/// observed by the client.
|
||||
pub fn transfer_on_date(
|
||||
&self,
|
||||
n: i64,
|
||||
keypair: &KeyPair,
|
||||
to: PublicKey,
|
||||
dt: DateTime<Utc>,
|
||||
last_id: Hash,
|
||||
) -> Result<Signature> {
|
||||
let tr = Transaction::new_on_date(keypair, to, dt, n, last_id);
|
||||
let sig = tr.sig;
|
||||
self.process_verified_transaction(&tr).map(|_| sig)
|
||||
}
|
||||
|
||||
pub fn get_balance(&self, pubkey: &PublicKey) -> Option<i64> {
|
||||
let bals = self.balances
|
||||
.read()
|
||||
.expect("'balances' read lock in get_balance");
|
||||
bals.get(pubkey).map(|x| x.load(Ordering::Relaxed) as i64)
|
||||
}
|
||||
|
||||
pub fn transaction_count(&self) -> usize {
|
||||
self.transaction_count.load(Ordering::Relaxed)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use bincode::serialize;
|
||||
use hash::hash;
|
||||
use signature::KeyPairUtil;
|
||||
|
||||
#[test]
|
||||
fn test_bank() {
|
||||
let mint = Mint::new(10_000);
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
let bank = Bank::new(&mint);
|
||||
assert_eq!(bank.last_id(), mint.last_id());
|
||||
|
||||
bank.transfer(1_000, &mint.keypair(), pubkey, mint.last_id())
|
||||
.unwrap();
|
||||
assert_eq!(bank.get_balance(&pubkey).unwrap(), 1_000);
|
||||
|
||||
bank.transfer(500, &mint.keypair(), pubkey, mint.last_id())
|
||||
.unwrap();
|
||||
assert_eq!(bank.get_balance(&pubkey).unwrap(), 1_500);
|
||||
assert_eq!(bank.transaction_count(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_account_not_found() {
|
||||
let mint = Mint::new(1);
|
||||
let bank = Bank::new(&mint);
|
||||
let keypair = KeyPair::new();
|
||||
assert_eq!(
|
||||
bank.transfer(1, &keypair, mint.pubkey(), mint.last_id()),
|
||||
Err(BankError::AccountNotFound(keypair.pubkey()))
|
||||
);
|
||||
assert_eq!(bank.transaction_count(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_transfer() {
|
||||
let mint = Mint::new(11_000);
|
||||
let bank = Bank::new(&mint);
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
bank.transfer(1_000, &mint.keypair(), pubkey, mint.last_id())
|
||||
.unwrap();
|
||||
assert_eq!(bank.transaction_count(), 1);
|
||||
assert_eq!(
|
||||
bank.transfer(10_001, &mint.keypair(), pubkey, mint.last_id()),
|
||||
Err(BankError::InsufficientFunds(mint.pubkey()))
|
||||
);
|
||||
assert_eq!(bank.transaction_count(), 1);
|
||||
|
||||
let mint_pubkey = mint.keypair().pubkey();
|
||||
assert_eq!(bank.get_balance(&mint_pubkey).unwrap(), 10_000);
|
||||
assert_eq!(bank.get_balance(&pubkey).unwrap(), 1_000);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_transfer_to_newb() {
|
||||
let mint = Mint::new(10_000);
|
||||
let bank = Bank::new(&mint);
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
bank.transfer(500, &mint.keypair(), pubkey, mint.last_id())
|
||||
.unwrap();
|
||||
assert_eq!(bank.get_balance(&pubkey).unwrap(), 500);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_transfer_on_date() {
|
||||
let mint = Mint::new(1);
|
||||
let bank = Bank::new(&mint);
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
let dt = Utc::now();
|
||||
bank.transfer_on_date(1, &mint.keypair(), pubkey, dt, mint.last_id())
|
||||
.unwrap();
|
||||
|
||||
// Mint's balance will be zero because all funds are locked up.
|
||||
assert_eq!(bank.get_balance(&mint.pubkey()), Some(0));
|
||||
|
||||
// tx count is 1, because debits were applied.
|
||||
assert_eq!(bank.transaction_count(), 1);
|
||||
|
||||
// pubkey's balance will be None because the funds have not been
|
||||
// sent.
|
||||
assert_eq!(bank.get_balance(&pubkey), None);
|
||||
|
||||
// Now, acknowledge the time in the condition occurred and
|
||||
// that pubkey's funds are now available.
|
||||
bank.process_verified_timestamp(mint.pubkey(), dt).unwrap();
|
||||
assert_eq!(bank.get_balance(&pubkey), Some(1));
|
||||
|
||||
// tx count is still 1, because we chose not to count timestamp events
|
||||
// tx count.
|
||||
assert_eq!(bank.transaction_count(), 1);
|
||||
|
||||
bank.process_verified_timestamp(mint.pubkey(), dt).unwrap(); // <-- Attack! Attempt to process completed transaction.
|
||||
assert_ne!(bank.get_balance(&pubkey), Some(2));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_transfer_after_date() {
|
||||
let mint = Mint::new(1);
|
||||
let bank = Bank::new(&mint);
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
let dt = Utc::now();
|
||||
bank.process_verified_timestamp(mint.pubkey(), dt).unwrap();
|
||||
|
||||
// It's now past now, so this transfer should be processed immediately.
|
||||
bank.transfer_on_date(1, &mint.keypair(), pubkey, dt, mint.last_id())
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(bank.get_balance(&mint.pubkey()), Some(0));
|
||||
assert_eq!(bank.get_balance(&pubkey), Some(1));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cancel_transfer() {
|
||||
let mint = Mint::new(1);
|
||||
let bank = Bank::new(&mint);
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
let dt = Utc::now();
|
||||
let sig = bank.transfer_on_date(1, &mint.keypair(), pubkey, dt, mint.last_id())
|
||||
.unwrap();
|
||||
|
||||
// Assert the debit counts as a transaction.
|
||||
assert_eq!(bank.transaction_count(), 1);
|
||||
|
||||
// Mint's balance will be zero because all funds are locked up.
|
||||
assert_eq!(bank.get_balance(&mint.pubkey()), Some(0));
|
||||
|
||||
// pubkey's balance will be None because the funds have not been
|
||||
// sent.
|
||||
assert_eq!(bank.get_balance(&pubkey), None);
|
||||
|
||||
// Now, cancel the trancaction. Mint gets her funds back, pubkey never sees them.
|
||||
bank.process_verified_sig(mint.pubkey(), sig).unwrap();
|
||||
assert_eq!(bank.get_balance(&mint.pubkey()), Some(1));
|
||||
assert_eq!(bank.get_balance(&pubkey), None);
|
||||
|
||||
// Assert cancel doesn't cause count to go backward.
|
||||
assert_eq!(bank.transaction_count(), 1);
|
||||
|
||||
bank.process_verified_sig(mint.pubkey(), sig).unwrap(); // <-- Attack! Attempt to cancel completed transaction.
|
||||
assert_ne!(bank.get_balance(&mint.pubkey()), Some(2));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_duplicate_event_signature() {
|
||||
let mint = Mint::new(1);
|
||||
let bank = Bank::new(&mint);
|
||||
let sig = Signature::default();
|
||||
assert!(bank.reserve_signature_with_last_id(&sig, &mint.last_id()));
|
||||
assert!(!bank.reserve_signature_with_last_id(&sig, &mint.last_id()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_forget_signature() {
|
||||
let mint = Mint::new(1);
|
||||
let bank = Bank::new(&mint);
|
||||
let sig = Signature::default();
|
||||
bank.reserve_signature_with_last_id(&sig, &mint.last_id());
|
||||
assert!(bank.forget_signature_with_last_id(&sig, &mint.last_id()));
|
||||
assert!(!bank.forget_signature_with_last_id(&sig, &mint.last_id()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_max_entry_ids() {
|
||||
let mint = Mint::new(1);
|
||||
let bank = Bank::new(&mint);
|
||||
let sig = Signature::default();
|
||||
for i in 0..MAX_ENTRY_IDS {
|
||||
let last_id = hash(&serialize(&i).unwrap()); // Unique hash
|
||||
bank.register_entry_id(&last_id);
|
||||
}
|
||||
// Assert we're no longer able to use the oldest entry ID.
|
||||
assert!(!bank.reserve_signature_with_last_id(&sig, &mint.last_id()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_debits_before_credits() {
|
||||
let mint = Mint::new(2);
|
||||
let bank = Bank::new(&mint);
|
||||
let keypair = KeyPair::new();
|
||||
let tr0 = Transaction::new(&mint.keypair(), keypair.pubkey(), 2, mint.last_id());
|
||||
let tr1 = Transaction::new(&keypair, mint.pubkey(), 1, mint.last_id());
|
||||
let trs = vec![tr0, tr1];
|
||||
let results = bank.process_verified_transactions(trs);
|
||||
assert!(results[1].is_err());
|
||||
|
||||
// Assert bad transactions aren't counted.
|
||||
assert_eq!(bank.transaction_count(), 1);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "unstable", test))]
|
||||
mod bench {
|
||||
extern crate test;
|
||||
use self::test::Bencher;
|
||||
use bank::*;
|
||||
use bincode::serialize;
|
||||
use hash::hash;
|
||||
use signature::KeyPairUtil;
|
||||
|
||||
#[bench]
|
||||
fn process_verified_event_bench(bencher: &mut Bencher) {
|
||||
let mint = Mint::new(100_000_000);
|
||||
let bank = Bank::new(&mint);
|
||||
// Create transactions between unrelated parties.
|
||||
let transactions: Vec<_> = (0..4096)
|
||||
.into_par_iter()
|
||||
.map(|i| {
|
||||
// Seed the 'from' account.
|
||||
let rando0 = KeyPair::new();
|
||||
let tr = Transaction::new(&mint.keypair(), rando0.pubkey(), 1_000, mint.last_id());
|
||||
bank.process_verified_transaction(&tr).unwrap();
|
||||
|
||||
// Seed the 'to' account and a cell for its signature.
|
||||
let last_id = hash(&serialize(&i).unwrap()); // Unique hash
|
||||
bank.register_entry_id(&last_id);
|
||||
|
||||
let rando1 = KeyPair::new();
|
||||
let tr = Transaction::new(&rando0, rando1.pubkey(), 1, last_id);
|
||||
bank.process_verified_transaction(&tr).unwrap();
|
||||
|
||||
// Finally, return a transaction that's unique
|
||||
Transaction::new(&rando0, rando1.pubkey(), 1, last_id)
|
||||
})
|
||||
.collect();
|
||||
bencher.iter(|| {
|
||||
// Since benchmarker runs this multiple times, we need to clear the signatures.
|
||||
for sigs in bank.last_ids.read().unwrap().iter() {
|
||||
sigs.1.write().unwrap().clear();
|
||||
}
|
||||
|
||||
assert!(
|
||||
bank.process_verified_transactions(transactions.clone())
|
||||
.iter()
|
||||
.all(|x| x.is_ok())
|
||||
);
|
||||
});
|
||||
}
|
||||
}
|
308
src/banking_stage.rs
Normal file
308
src/banking_stage.rs
Normal file
@@ -0,0 +1,308 @@
|
||||
//! The `banking_stage` processes Event messages.
|
||||
|
||||
use bank::Bank;
|
||||
use bincode::deserialize;
|
||||
use event::Event;
|
||||
use packet;
|
||||
use packet::SharedPackets;
|
||||
use rayon::prelude::*;
|
||||
use record_stage::Signal;
|
||||
use result::Result;
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::{channel, Receiver, Sender};
|
||||
use std::thread::{spawn, JoinHandle};
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
use timing;
|
||||
|
||||
pub struct BankingStage {
|
||||
pub thread_hdl: JoinHandle<()>,
|
||||
pub signal_receiver: Receiver<Signal>,
|
||||
}
|
||||
|
||||
impl BankingStage {
|
||||
pub fn new(
|
||||
bank: Arc<Bank>,
|
||||
exit: Arc<AtomicBool>,
|
||||
verified_receiver: Receiver<Vec<(SharedPackets, Vec<u8>)>>,
|
||||
packet_recycler: packet::PacketRecycler,
|
||||
) -> Self {
|
||||
let (signal_sender, signal_receiver) = channel();
|
||||
let thread_hdl = spawn(move || loop {
|
||||
let e = Self::process_packets(
|
||||
bank.clone(),
|
||||
&verified_receiver,
|
||||
&signal_sender,
|
||||
&packet_recycler,
|
||||
);
|
||||
if e.is_err() {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
});
|
||||
BankingStage {
|
||||
thread_hdl,
|
||||
signal_receiver,
|
||||
}
|
||||
}
|
||||
|
||||
fn deserialize_events(p: &packet::Packets) -> Vec<Option<(Event, SocketAddr)>> {
|
||||
p.packets
|
||||
.par_iter()
|
||||
.map(|x| {
|
||||
deserialize(&x.data[0..x.meta.size])
|
||||
.map(|req| (req, x.meta.addr()))
|
||||
.ok()
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn process_packets(
|
||||
bank: Arc<Bank>,
|
||||
verified_receiver: &Receiver<Vec<(SharedPackets, Vec<u8>)>>,
|
||||
signal_sender: &Sender<Signal>,
|
||||
packet_recycler: &packet::PacketRecycler,
|
||||
) -> Result<()> {
|
||||
let timer = Duration::new(1, 0);
|
||||
let recv_start = Instant::now();
|
||||
let mms = verified_receiver.recv_timeout(timer)?;
|
||||
let mut reqs_len = 0;
|
||||
let mms_len = mms.len();
|
||||
info!(
|
||||
"@{:?} process start stalled for: {:?}ms batches: {}",
|
||||
timing::timestamp(),
|
||||
timing::duration_as_ms(&recv_start.elapsed()),
|
||||
mms.len(),
|
||||
);
|
||||
let proc_start = Instant::now();
|
||||
for (msgs, vers) in mms {
|
||||
let events = Self::deserialize_events(&msgs.read().unwrap());
|
||||
reqs_len += events.len();
|
||||
let events = events
|
||||
.into_iter()
|
||||
.zip(vers)
|
||||
.filter_map(|(event, ver)| match event {
|
||||
None => None,
|
||||
Some((event, _addr)) => if event.verify() && ver != 0 {
|
||||
Some(event)
|
||||
} else {
|
||||
None
|
||||
},
|
||||
})
|
||||
.collect();
|
||||
|
||||
debug!("process_events");
|
||||
let results = bank.process_verified_events(events);
|
||||
let events = results.into_iter().filter_map(|x| x.ok()).collect();
|
||||
signal_sender.send(Signal::Events(events))?;
|
||||
debug!("done process_events");
|
||||
|
||||
packet_recycler.recycle(msgs);
|
||||
}
|
||||
let total_time_s = timing::duration_as_s(&proc_start.elapsed());
|
||||
let total_time_ms = timing::duration_as_ms(&proc_start.elapsed());
|
||||
info!(
|
||||
"@{:?} done processing event batches: {} time: {:?}ms reqs: {} reqs/s: {}",
|
||||
timing::timestamp(),
|
||||
mms_len,
|
||||
total_time_ms,
|
||||
reqs_len,
|
||||
(reqs_len as f32) / (total_time_s)
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: When banking is pulled out of RequestStage, add this test back in.
|
||||
|
||||
//use bank::Bank;
|
||||
//use entry::Entry;
|
||||
//use event::Event;
|
||||
//use hash::Hash;
|
||||
//use record_stage::RecordStage;
|
||||
//use record_stage::Signal;
|
||||
//use result::Result;
|
||||
//use std::sync::mpsc::{channel, Sender};
|
||||
//use std::sync::{Arc, Mutex};
|
||||
//use std::time::Duration;
|
||||
//
|
||||
//#[cfg(test)]
|
||||
//mod tests {
|
||||
// use bank::Bank;
|
||||
// use event::Event;
|
||||
// use event_processor::EventProcessor;
|
||||
// use mint::Mint;
|
||||
// use signature::{KeyPair, KeyPairUtil};
|
||||
// use transaction::Transaction;
|
||||
//
|
||||
// #[test]
|
||||
// // TODO: Move this test banking_stage. Calling process_events() directly
|
||||
// // defeats the purpose of this test.
|
||||
// fn test_banking_sequential_consistency() {
|
||||
// // In this attack we'll demonstrate that a verifier can interpret the ledger
|
||||
// // differently if either the server doesn't signal the ledger to add an
|
||||
// // Entry OR if the verifier tries to parallelize across multiple Entries.
|
||||
// let mint = Mint::new(2);
|
||||
// let bank = Bank::new(&mint);
|
||||
// let event_processor = EventProcessor::new(bank, &mint.last_id(), None);
|
||||
//
|
||||
// // Process a batch that includes a transaction that receives two tokens.
|
||||
// let alice = KeyPair::new();
|
||||
// let tr = Transaction::new(&mint.keypair(), alice.pubkey(), 2, mint.last_id());
|
||||
// let events = vec![Event::Transaction(tr)];
|
||||
// let entry0 = event_processor.process_events(events).unwrap();
|
||||
//
|
||||
// // Process a second batch that spends one of those tokens.
|
||||
// let tr = Transaction::new(&alice, mint.pubkey(), 1, mint.last_id());
|
||||
// let events = vec![Event::Transaction(tr)];
|
||||
// let entry1 = event_processor.process_events(events).unwrap();
|
||||
//
|
||||
// // Collect the ledger and feed it to a new bank.
|
||||
// let entries = vec![entry0, entry1];
|
||||
//
|
||||
// // Assert the user holds one token, not two. If the server only output one
|
||||
// // entry, then the second transaction will be rejected, because it drives
|
||||
// // the account balance below zero before the credit is added.
|
||||
// let bank = Bank::new(&mint);
|
||||
// for entry in entries {
|
||||
// assert!(
|
||||
// bank
|
||||
// .process_verified_events(entry.events)
|
||||
// .into_iter()
|
||||
// .all(|x| x.is_ok())
|
||||
// );
|
||||
// }
|
||||
// assert_eq!(bank.get_balance(&alice.pubkey()), Some(1));
|
||||
// }
|
||||
//}
|
||||
//
|
||||
//#[cfg(all(feature = "unstable", test))]
|
||||
//mod bench {
|
||||
// extern crate test;
|
||||
// use self::test::Bencher;
|
||||
// use bank::{Bank, MAX_ENTRY_IDS};
|
||||
// use bincode::serialize;
|
||||
// use event_processor::*;
|
||||
// use hash::hash;
|
||||
// use mint::Mint;
|
||||
// use rayon::prelude::*;
|
||||
// use signature::{KeyPair, KeyPairUtil};
|
||||
// use std::collections::HashSet;
|
||||
// use std::time::Instant;
|
||||
// use transaction::Transaction;
|
||||
//
|
||||
// #[bench]
|
||||
// fn process_events_bench(_bencher: &mut Bencher) {
|
||||
// let mint = Mint::new(100_000_000);
|
||||
// let bank = Bank::new(&mint);
|
||||
// // Create transactions between unrelated parties.
|
||||
// let txs = 100_000;
|
||||
// let last_ids: Mutex<HashSet<Hash>> = Mutex::new(HashSet::new());
|
||||
// let transactions: Vec<_> = (0..txs)
|
||||
// .into_par_iter()
|
||||
// .map(|i| {
|
||||
// // Seed the 'to' account and a cell for its signature.
|
||||
// let dummy_id = i % (MAX_ENTRY_IDS as i32);
|
||||
// let last_id = hash(&serialize(&dummy_id).unwrap()); // Semi-unique hash
|
||||
// {
|
||||
// let mut last_ids = last_ids.lock().unwrap();
|
||||
// if !last_ids.contains(&last_id) {
|
||||
// last_ids.insert(last_id);
|
||||
// bank.register_entry_id(&last_id);
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// // Seed the 'from' account.
|
||||
// let rando0 = KeyPair::new();
|
||||
// let tr = Transaction::new(&mint.keypair(), rando0.pubkey(), 1_000, last_id);
|
||||
// bank.process_verified_transaction(&tr).unwrap();
|
||||
//
|
||||
// let rando1 = KeyPair::new();
|
||||
// let tr = Transaction::new(&rando0, rando1.pubkey(), 2, last_id);
|
||||
// bank.process_verified_transaction(&tr).unwrap();
|
||||
//
|
||||
// // Finally, return a transaction that's unique
|
||||
// Transaction::new(&rando0, rando1.pubkey(), 1, last_id)
|
||||
// })
|
||||
// .collect();
|
||||
//
|
||||
// let events: Vec<_> = transactions
|
||||
// .into_iter()
|
||||
// .map(|tr| Event::Transaction(tr))
|
||||
// .collect();
|
||||
//
|
||||
// let event_processor = EventProcessor::new(bank, &mint.last_id(), None);
|
||||
//
|
||||
// let now = Instant::now();
|
||||
// assert!(event_processor.process_events(events).is_ok());
|
||||
// let duration = now.elapsed();
|
||||
// let sec = duration.as_secs() as f64 + duration.subsec_nanos() as f64 / 1_000_000_000.0;
|
||||
// let tps = txs as f64 / sec;
|
||||
//
|
||||
// // Ensure that all transactions were successfully logged.
|
||||
// drop(event_processor.historian_input);
|
||||
// let entries: Vec<Entry> = event_processor.output.lock().unwrap().iter().collect();
|
||||
// assert_eq!(entries.len(), 1);
|
||||
// assert_eq!(entries[0].events.len(), txs as usize);
|
||||
//
|
||||
// println!("{} tps", tps);
|
||||
// }
|
||||
//}
|
||||
|
||||
#[cfg(all(feature = "unstable", test))]
|
||||
mod bench {
|
||||
extern crate test;
|
||||
use self::test::Bencher;
|
||||
use bank::*;
|
||||
use banking_stage::BankingStage;
|
||||
use event::Event;
|
||||
use mint::Mint;
|
||||
use packet::{to_packets, PacketRecycler};
|
||||
use record_stage::Signal;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use std::iter;
|
||||
use std::sync::Arc;
|
||||
use std::sync::mpsc::channel;
|
||||
|
||||
#[bench]
|
||||
fn stage_bench(bencher: &mut Bencher) {
|
||||
let tx = 100_usize;
|
||||
let mint = Mint::new(1_000_000_000);
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
|
||||
let events: Vec<_> = (0..tx)
|
||||
.map(|i| Event::new_transaction(&mint.keypair(), pubkey, i as i64, mint.last_id()))
|
||||
.collect();
|
||||
|
||||
let (verified_sender, verified_receiver) = channel();
|
||||
let (signal_sender, signal_receiver) = channel();
|
||||
let packet_recycler = PacketRecycler::default();
|
||||
let verified: Vec<_> = to_packets(&packet_recycler, events)
|
||||
.into_iter()
|
||||
.map(|x| {
|
||||
let len = (*x).read().unwrap().packets.len();
|
||||
(x, iter::repeat(1).take(len).collect())
|
||||
})
|
||||
.collect();
|
||||
|
||||
bencher.iter(move || {
|
||||
let bank = Arc::new(Bank::new(&mint));
|
||||
verified_sender.send(verified.clone()).unwrap();
|
||||
BankingStage::process_packets(
|
||||
bank.clone(),
|
||||
&verified_receiver,
|
||||
&signal_sender,
|
||||
&packet_recycler,
|
||||
).unwrap();
|
||||
let signal = signal_receiver.recv().unwrap();
|
||||
if let Signal::Events(ref events) = signal {
|
||||
assert_eq!(events.len(), tx);
|
||||
} else {
|
||||
assert!(false);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
@@ -4,22 +4,22 @@ extern crate isatty;
|
||||
extern crate rayon;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
extern crate untrusted;
|
||||
|
||||
use futures::Future;
|
||||
use getopts::Options;
|
||||
use isatty::stdin_isatty;
|
||||
use rayon::prelude::*;
|
||||
use solana::accountant_stub::AccountantStub;
|
||||
use solana::mint::MintDemo;
|
||||
use solana::signature::{KeyPair, KeyPairUtil};
|
||||
use solana::signature::{GenKeys, KeyPairUtil};
|
||||
use solana::thin_client::ThinClient;
|
||||
use solana::transaction::Transaction;
|
||||
use std::env;
|
||||
use std::io::{stdin, Read};
|
||||
use std::net::UdpSocket;
|
||||
use std::net::{SocketAddr, UdpSocket};
|
||||
use std::process::exit;
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
use untrusted::Input;
|
||||
|
||||
fn print_usage(program: &str, opts: Options) {
|
||||
let mut brief = format!("Usage: cat <mint.json> | {} [options]\n\n", program);
|
||||
@@ -32,13 +32,13 @@ fn print_usage(program: &str, opts: Options) {
|
||||
|
||||
fn main() {
|
||||
let mut threads = 4usize;
|
||||
let mut addr: String = "127.0.0.1:8000".to_string();
|
||||
let mut send_addr: String = "127.0.0.1:8001".to_string();
|
||||
let mut server_addr: String = "127.0.0.1:8000".to_string();
|
||||
let mut requests_addr: String = "127.0.0.1:8010".to_string();
|
||||
|
||||
let mut opts = Options::new();
|
||||
opts.optopt("s", "", "server address", "host:port");
|
||||
opts.optopt("c", "", "client address", "host:port");
|
||||
opts.optopt("t", "", "number of threads", "4");
|
||||
opts.optopt("t", "", "number of threads", &format!("{}", threads));
|
||||
opts.optflag("h", "help", "print help");
|
||||
let args: Vec<String> = env::args().collect();
|
||||
let matches = match opts.parse(&args[1..]) {
|
||||
@@ -55,15 +55,19 @@ fn main() {
|
||||
return;
|
||||
}
|
||||
if matches.opt_present("s") {
|
||||
addr = matches.opt_str("s").unwrap();
|
||||
server_addr = matches.opt_str("s").unwrap();
|
||||
}
|
||||
if matches.opt_present("c") {
|
||||
send_addr = matches.opt_str("c").unwrap();
|
||||
requests_addr = matches.opt_str("c").unwrap();
|
||||
}
|
||||
if matches.opt_present("t") {
|
||||
threads = matches.opt_str("t").unwrap().parse().expect("integer");
|
||||
}
|
||||
|
||||
let mut events_addr: SocketAddr = requests_addr.parse().unwrap();
|
||||
let requests_port = events_addr.port();
|
||||
events_addr.set_port(requests_port + 1);
|
||||
|
||||
if stdin_isatty() {
|
||||
eprintln!("nothing found on stdin, expected a json file");
|
||||
exit(1);
|
||||
@@ -82,18 +86,32 @@ fn main() {
|
||||
exit(1);
|
||||
});
|
||||
|
||||
let socket = UdpSocket::bind(&send_addr).unwrap();
|
||||
let mut acc = AccountantStub::new(&addr, socket);
|
||||
println!("Binding to {}", requests_addr);
|
||||
let requests_socket = UdpSocket::bind(&requests_addr).unwrap();
|
||||
requests_socket
|
||||
.set_read_timeout(Some(Duration::new(5, 0)))
|
||||
.unwrap();
|
||||
let events_socket = UdpSocket::bind(&events_addr).unwrap();
|
||||
let requests_addr: SocketAddr = server_addr.parse().unwrap();
|
||||
let requests_port = requests_addr.port();
|
||||
let mut events_server_addr = requests_addr.clone();
|
||||
events_server_addr.set_port(requests_port + 3);
|
||||
let mut client = ThinClient::new(
|
||||
requests_addr,
|
||||
requests_socket,
|
||||
events_server_addr,
|
||||
events_socket,
|
||||
);
|
||||
|
||||
println!("Get last ID...");
|
||||
let last_id = acc.get_last_id().wait().unwrap();
|
||||
let last_id = client.get_last_id().wait().unwrap();
|
||||
println!("Got last ID {:?}", last_id);
|
||||
|
||||
let rnd = GenKeys::new(demo.mint.keypair().public_key_bytes());
|
||||
|
||||
println!("Creating keypairs...");
|
||||
let txs = demo.users.len() / 2;
|
||||
let keypairs: Vec<_> = demo.users
|
||||
.into_par_iter()
|
||||
.map(|(pkcs8, _)| KeyPair::from_pkcs8(Input::from(&pkcs8)).unwrap())
|
||||
.collect();
|
||||
let txs = demo.num_accounts / 2;
|
||||
let keypairs = rnd.gen_n_keypairs(demo.num_accounts);
|
||||
let keypair_pairs: Vec<_> = keypairs.chunks(2).collect();
|
||||
|
||||
println!("Signing transactions...");
|
||||
@@ -102,7 +120,7 @@ fn main() {
|
||||
.into_par_iter()
|
||||
.map(|chunk| Transaction::new(&chunk[0], chunk[1].pubkey(), 1, last_id))
|
||||
.collect();
|
||||
let duration = now.elapsed();
|
||||
let mut duration = now.elapsed();
|
||||
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
|
||||
let bsps = txs as f64 / ns as f64;
|
||||
let nsps = ns as f64 / txs as f64;
|
||||
@@ -112,32 +130,46 @@ fn main() {
|
||||
nsps / 1_000_f64
|
||||
);
|
||||
|
||||
let initial_tx_count = acc.transaction_count();
|
||||
let initial_tx_count = client.transaction_count();
|
||||
println!("initial count {}", initial_tx_count);
|
||||
|
||||
println!("Transfering {} transactions in {} batches", txs, threads);
|
||||
let now = Instant::now();
|
||||
let sz = transactions.len() / threads;
|
||||
let chunks: Vec<_> = transactions.chunks(sz).collect();
|
||||
chunks.into_par_iter().for_each(|trs| {
|
||||
println!("Transferring 1 unit {} times...", trs.len());
|
||||
let send_addr = "0.0.0.0:0";
|
||||
let socket = UdpSocket::bind(send_addr).unwrap();
|
||||
let acc = AccountantStub::new(&addr, socket);
|
||||
println!("Transferring 1 unit {} times... to", trs.len());
|
||||
let requests_addr: SocketAddr = server_addr.parse().unwrap();
|
||||
let mut requests_cb_addr = requests_addr.clone();
|
||||
requests_cb_addr.set_port(0);
|
||||
let requests_socket = UdpSocket::bind(requests_cb_addr).unwrap();
|
||||
requests_socket
|
||||
.set_read_timeout(Some(Duration::new(5, 0)))
|
||||
.unwrap();
|
||||
let mut events_addr: SocketAddr = requests_addr.clone();
|
||||
events_addr.set_port(0);
|
||||
let events_socket = UdpSocket::bind(&events_addr).unwrap();
|
||||
let client = ThinClient::new(
|
||||
requests_addr,
|
||||
requests_socket,
|
||||
events_server_addr,
|
||||
events_socket,
|
||||
);
|
||||
for tr in trs {
|
||||
acc.transfer_signed(tr.clone()).unwrap();
|
||||
client.transfer_signed(tr.clone()).unwrap();
|
||||
}
|
||||
});
|
||||
|
||||
println!("Waiting for half the transactions to complete...",);
|
||||
let mut tx_count = acc.transaction_count();
|
||||
while tx_count < transactions.len() as u64 / 2 {
|
||||
tx_count = acc.transaction_count();
|
||||
println!("Waiting for transactions to complete...",);
|
||||
let mut tx_count;
|
||||
for _ in 0..10 {
|
||||
tx_count = client.transaction_count();
|
||||
duration = now.elapsed();
|
||||
let txs = tx_count - initial_tx_count;
|
||||
println!("Transactions processed {}", txs);
|
||||
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
|
||||
let tps = (txs * 1_000_000_000) as f64 / ns as f64;
|
||||
println!("{} tps", tps);
|
||||
sleep(Duration::new(1, 0));
|
||||
}
|
||||
let txs = tx_count - initial_tx_count;
|
||||
println!("Transactions processed {}", txs);
|
||||
|
||||
let duration = now.elapsed();
|
||||
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
|
||||
let tps = (txs * 1_000_000_000) as f64 / ns as f64;
|
||||
println!("Done. {} tps", tps);
|
||||
}
|
||||
|
@@ -1,21 +1,17 @@
|
||||
extern crate isatty;
|
||||
extern crate rayon;
|
||||
extern crate ring;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
extern crate untrusted;
|
||||
|
||||
use isatty::stdin_isatty;
|
||||
use rayon::prelude::*;
|
||||
use solana::accountant::MAX_ENTRY_IDS;
|
||||
use solana::entry::{create_entry, next_tick};
|
||||
use solana::bank::MAX_ENTRY_IDS;
|
||||
use solana::entry::{next_entry, Entry};
|
||||
use solana::event::Event;
|
||||
use solana::mint::MintDemo;
|
||||
use solana::signature::{KeyPair, KeyPairUtil};
|
||||
use solana::transaction::Transaction;
|
||||
use solana::signature::{GenKeys, KeyPairUtil};
|
||||
use std::io::{stdin, Read};
|
||||
use std::process::exit;
|
||||
use untrusted::Input;
|
||||
|
||||
// Generate a ledger with lots and lots of accounts.
|
||||
fn main() {
|
||||
@@ -36,17 +32,21 @@ fn main() {
|
||||
exit(1);
|
||||
});
|
||||
|
||||
let num_accounts = demo.users.len();
|
||||
let last_id = demo.mint.last_id();
|
||||
let rnd = GenKeys::new(demo.mint.keypair().public_key_bytes());
|
||||
let num_accounts = demo.num_accounts;
|
||||
let tokens_per_user = 1_000;
|
||||
|
||||
let keypairs = rnd.gen_n_keypairs(num_accounts);
|
||||
|
||||
let mint_keypair = demo.mint.keypair();
|
||||
let last_id = demo.mint.last_id();
|
||||
|
||||
eprintln!("Signing {} transactions...", num_accounts);
|
||||
let events: Vec<_> = demo.users
|
||||
let events: Vec<_> = keypairs
|
||||
.into_par_iter()
|
||||
.map(|(pkcs8, tokens)| {
|
||||
let rando = KeyPair::from_pkcs8(Input::from(&pkcs8)).unwrap();
|
||||
let tr = Transaction::new(&mint_keypair, rando.pubkey(), tokens, last_id);
|
||||
Event::Transaction(tr)
|
||||
.map(|rando| {
|
||||
let last_id = demo.mint.last_id();
|
||||
Event::new_transaction(&mint_keypair, rando.pubkey(), tokens_per_user, last_id)
|
||||
})
|
||||
.collect();
|
||||
|
||||
@@ -55,14 +55,14 @@ fn main() {
|
||||
}
|
||||
|
||||
eprintln!("Logging the creation of {} accounts...", num_accounts);
|
||||
let entry = create_entry(&last_id, 0, events);
|
||||
let entry = Entry::new(&last_id, 0, events);
|
||||
println!("{}", serde_json::to_string(&entry).unwrap());
|
||||
|
||||
eprintln!("Creating {} empty entries...", MAX_ENTRY_IDS);
|
||||
// Offer client lots of entry IDs to use for each transaction's last_id.
|
||||
let mut last_id = last_id;
|
||||
for _ in 0..MAX_ENTRY_IDS {
|
||||
let entry = next_tick(&last_id, 1);
|
||||
let entry = next_entry(&last_id, 1, vec![]);
|
||||
last_id = entry.id;
|
||||
let serialized = serde_json::to_string(&entry).unwrap_or_else(|e| {
|
||||
eprintln!("failed to serialize: {}", e);
|
||||
|
@@ -1,37 +0,0 @@
|
||||
extern crate solana;
|
||||
|
||||
use solana::entry::Entry;
|
||||
use solana::event::Event;
|
||||
use solana::hash::Hash;
|
||||
use solana::historian::Historian;
|
||||
use solana::ledger::Block;
|
||||
use solana::recorder::Signal;
|
||||
use solana::signature::{KeyPair, KeyPairUtil};
|
||||
use solana::transaction::Transaction;
|
||||
use std::sync::mpsc::SendError;
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
|
||||
fn create_ledger(hist: &Historian, seed: &Hash) -> Result<(), SendError<Signal>> {
|
||||
sleep(Duration::from_millis(15));
|
||||
let keypair = KeyPair::new();
|
||||
let tr = Transaction::new(&keypair, keypair.pubkey(), 42, *seed);
|
||||
let signal0 = Signal::Event(Event::Transaction(tr));
|
||||
hist.sender.send(signal0)?;
|
||||
sleep(Duration::from_millis(10));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let seed = Hash::default();
|
||||
let hist = Historian::new(&seed, Some(10));
|
||||
create_ledger(&hist, &seed).expect("send error");
|
||||
drop(hist.sender);
|
||||
let entries: Vec<Entry> = hist.receiver.iter().collect();
|
||||
for entry in &entries {
|
||||
println!("{:?}", entry);
|
||||
}
|
||||
// Proof-of-History: Verify the historian learned about the events
|
||||
// in the same order they appear in the vector.
|
||||
assert!(entries[..].verify(&seed));
|
||||
}
|
@@ -3,10 +3,7 @@ extern crate ring;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use rayon::prelude::*;
|
||||
use ring::rand::SystemRandom;
|
||||
use solana::mint::{Mint, MintDemo};
|
||||
use solana::signature::KeyPair;
|
||||
use std::io;
|
||||
|
||||
fn main() {
|
||||
@@ -18,16 +15,7 @@ fn main() {
|
||||
let mint = Mint::new(tokens);
|
||||
let tokens_per_user = 1_000;
|
||||
let num_accounts = tokens / tokens_per_user;
|
||||
let rnd = SystemRandom::new();
|
||||
|
||||
let users: Vec<_> = (0..num_accounts)
|
||||
.into_par_iter()
|
||||
.map(|_| {
|
||||
let pkcs8 = KeyPair::generate_pkcs8(&rnd).unwrap().to_vec();
|
||||
(pkcs8, tokens_per_user)
|
||||
})
|
||||
.collect();
|
||||
|
||||
let demo = MintDemo { mint, users };
|
||||
let demo = MintDemo { mint, num_accounts };
|
||||
println!("{}", serde_json::to_string(&demo).unwrap());
|
||||
}
|
||||
|
261
src/bin/multinode-demo.rs
Normal file
261
src/bin/multinode-demo.rs
Normal file
@@ -0,0 +1,261 @@
|
||||
extern crate futures;
|
||||
extern crate getopts;
|
||||
extern crate isatty;
|
||||
extern crate rayon;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use futures::Future;
|
||||
use getopts::Options;
|
||||
use isatty::stdin_isatty;
|
||||
use rayon::prelude::*;
|
||||
use solana::crdt::{Crdt, ReplicatedData};
|
||||
use solana::mint::MintDemo;
|
||||
use solana::signature::{GenKeys, KeyPair, KeyPairUtil};
|
||||
use solana::streamer::default_window;
|
||||
use solana::thin_client::ThinClient;
|
||||
use solana::transaction::Transaction;
|
||||
use std::env;
|
||||
use std::fs::File;
|
||||
use std::io::{stdin, Read};
|
||||
use std::net::{SocketAddr, UdpSocket};
|
||||
use std::process::exit;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::JoinHandle;
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
|
||||
fn print_usage(program: &str, opts: Options) {
|
||||
let mut brief = format!("Usage: cat <mint.json> | {} [options]\n\n", program);
|
||||
brief += " Solana client demo creates a number of transactions and\n";
|
||||
brief += " sends them to a target node.";
|
||||
brief += " Takes json formatted mint file to stdin.";
|
||||
|
||||
print!("{}", opts.usage(&brief));
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let mut threads = 4usize;
|
||||
let mut num_nodes = 10usize;
|
||||
let mut leader = "leader.json".to_string();
|
||||
|
||||
let mut opts = Options::new();
|
||||
opts.optopt("l", "", "leader", "leader.json");
|
||||
opts.optopt("c", "", "client address", "host:port");
|
||||
opts.optopt("t", "", "number of threads", &format!("{}", threads));
|
||||
opts.optopt(
|
||||
"n",
|
||||
"",
|
||||
"number of nodes to converge to",
|
||||
&format!("{}", num_nodes),
|
||||
);
|
||||
opts.optflag("h", "help", "print help");
|
||||
let args: Vec<String> = env::args().collect();
|
||||
let matches = match opts.parse(&args[1..]) {
|
||||
Ok(m) => m,
|
||||
Err(e) => {
|
||||
eprintln!("{}", e);
|
||||
exit(1);
|
||||
}
|
||||
};
|
||||
|
||||
if matches.opt_present("h") {
|
||||
let program = args[0].clone();
|
||||
print_usage(&program, opts);
|
||||
return;
|
||||
}
|
||||
if matches.opt_present("l") {
|
||||
leader = matches.opt_str("l").unwrap();
|
||||
}
|
||||
let client_addr: Arc<RwLock<SocketAddr>> = if matches.opt_present("c") {
|
||||
let addr = matches.opt_str("c").unwrap().parse().unwrap();
|
||||
Arc::new(RwLock::new(addr))
|
||||
} else {
|
||||
Arc::new(RwLock::new("127.0.0.1:8010".parse().unwrap()))
|
||||
};
|
||||
if matches.opt_present("t") {
|
||||
threads = matches.opt_str("t").unwrap().parse().expect("integer");
|
||||
}
|
||||
if matches.opt_present("n") {
|
||||
num_nodes = matches.opt_str("n").unwrap().parse().expect("integer");
|
||||
}
|
||||
|
||||
let leader: ReplicatedData = read_leader(leader);
|
||||
let signal = Arc::new(AtomicBool::new(false));
|
||||
let mut c_threads = vec![];
|
||||
let validators = converge(
|
||||
&client_addr,
|
||||
&leader,
|
||||
signal.clone(),
|
||||
num_nodes + 2,
|
||||
&mut c_threads,
|
||||
);
|
||||
|
||||
if stdin_isatty() {
|
||||
eprintln!("nothing found on stdin, expected a json file");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
let mut buffer = String::new();
|
||||
let num_bytes = stdin().read_to_string(&mut buffer).unwrap();
|
||||
if num_bytes == 0 {
|
||||
eprintln!("empty file on stdin, expected a json file");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
println!("Parsing stdin...");
|
||||
let demo: MintDemo = serde_json::from_str(&buffer).unwrap_or_else(|e| {
|
||||
eprintln!("failed to parse json: {}", e);
|
||||
exit(1);
|
||||
});
|
||||
let mut client = mk_client(&client_addr, &leader);
|
||||
|
||||
println!("Get last ID...");
|
||||
let last_id = client.get_last_id().wait().unwrap();
|
||||
println!("Got last ID {:?}", last_id);
|
||||
|
||||
let rnd = GenKeys::new(demo.mint.keypair().public_key_bytes());
|
||||
|
||||
println!("Creating keypairs...");
|
||||
let txs = demo.num_accounts / 2;
|
||||
let keypairs = rnd.gen_n_keypairs(demo.num_accounts);
|
||||
let keypair_pairs: Vec<_> = keypairs.chunks(2).collect();
|
||||
|
||||
println!("Signing transactions...");
|
||||
let now = Instant::now();
|
||||
let transactions: Vec<_> = keypair_pairs
|
||||
.into_par_iter()
|
||||
.map(|chunk| Transaction::new(&chunk[0], chunk[1].pubkey(), 1, last_id))
|
||||
.collect();
|
||||
let duration = now.elapsed();
|
||||
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
|
||||
let bsps = txs as f64 / ns as f64;
|
||||
let nsps = ns as f64 / txs as f64;
|
||||
println!(
|
||||
"Done. {} thousand signatures per second, {}us per signature",
|
||||
bsps * 1_000_000_f64,
|
||||
nsps / 1_000_f64
|
||||
);
|
||||
|
||||
let first_count = client.transaction_count();
|
||||
println!("initial count {}", first_count);
|
||||
|
||||
println!("Transfering {} transactions in {} batches", txs, threads);
|
||||
let sz = transactions.len() / threads;
|
||||
let chunks: Vec<_> = transactions.chunks(sz).collect();
|
||||
chunks.into_par_iter().for_each(|trs| {
|
||||
println!("Transferring 1 unit {} times... to", trs.len());
|
||||
let client = mk_client(&client_addr, &leader);
|
||||
for tr in trs {
|
||||
client.transfer_signed(tr.clone()).unwrap();
|
||||
}
|
||||
});
|
||||
|
||||
println!("Sampling tps every second...",);
|
||||
validators.into_par_iter().for_each(|val| {
|
||||
let mut client = mk_client(&client_addr, &val);
|
||||
let mut now = Instant::now();
|
||||
let mut initial_tx_count = client.transaction_count();
|
||||
for i in 0..100 {
|
||||
let tx_count = client.transaction_count();
|
||||
let duration = now.elapsed();
|
||||
now = Instant::now();
|
||||
let sample = tx_count - initial_tx_count;
|
||||
initial_tx_count = tx_count;
|
||||
println!("{}: Transactions processed {}", val.events_addr, sample);
|
||||
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
|
||||
let tps = (sample * 1_000_000_000) as f64 / ns as f64;
|
||||
println!("{}: {} tps", val.events_addr, tps);
|
||||
let total = tx_count - first_count;
|
||||
println!(
|
||||
"{}: Total Transactions processed {}",
|
||||
val.events_addr, total
|
||||
);
|
||||
if total == transactions.len() as u64 {
|
||||
break;
|
||||
}
|
||||
if i > 20 && sample == 0 {
|
||||
break;
|
||||
}
|
||||
sleep(Duration::new(1, 0));
|
||||
}
|
||||
});
|
||||
signal.store(true, Ordering::Relaxed);
|
||||
for t in c_threads {
|
||||
t.join().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
fn mk_client(locked_addr: &Arc<RwLock<SocketAddr>>, r: &ReplicatedData) -> ThinClient {
|
||||
let mut addr = locked_addr.write().unwrap();
|
||||
let port = addr.port();
|
||||
let events_socket = UdpSocket::bind(addr.clone()).unwrap();
|
||||
addr.set_port(port + 1);
|
||||
let requests_socket = UdpSocket::bind(addr.clone()).unwrap();
|
||||
addr.set_port(port + 2);
|
||||
ThinClient::new(
|
||||
r.requests_addr,
|
||||
requests_socket,
|
||||
r.events_addr,
|
||||
events_socket,
|
||||
)
|
||||
}
|
||||
|
||||
fn spy_node(client_addr: &Arc<RwLock<SocketAddr>>) -> (ReplicatedData, UdpSocket) {
|
||||
let mut addr = client_addr.write().unwrap();
|
||||
let port = addr.port();
|
||||
let gossip = UdpSocket::bind(addr.clone()).unwrap();
|
||||
addr.set_port(port + 1);
|
||||
let daddr = "0.0.0.0:0".parse().unwrap();
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
let node = ReplicatedData::new(pubkey, gossip.local_addr().unwrap(), daddr, daddr, daddr);
|
||||
(node, gossip)
|
||||
}
|
||||
|
||||
fn converge(
|
||||
client_addr: &Arc<RwLock<SocketAddr>>,
|
||||
leader: &ReplicatedData,
|
||||
exit: Arc<AtomicBool>,
|
||||
num_nodes: usize,
|
||||
threads: &mut Vec<JoinHandle<()>>,
|
||||
) -> Vec<ReplicatedData> {
|
||||
//lets spy on the network
|
||||
let daddr = "0.0.0.0:0".parse().unwrap();
|
||||
let (spy, spy_gossip) = spy_node(client_addr);
|
||||
let mut spy_crdt = Crdt::new(spy);
|
||||
spy_crdt.insert(&leader);
|
||||
spy_crdt.set_leader(leader.id);
|
||||
|
||||
let spy_ref = Arc::new(RwLock::new(spy_crdt));
|
||||
let spy_window = default_window();
|
||||
let t_spy_listen = Crdt::listen(spy_ref.clone(), spy_window, spy_gossip, exit.clone());
|
||||
let t_spy_gossip = Crdt::gossip(spy_ref.clone(), exit.clone());
|
||||
//wait for the network to converge
|
||||
for _ in 0..30 {
|
||||
let min = spy_ref.read().unwrap().convergence();
|
||||
if num_nodes as u64 == min {
|
||||
println!("converged!");
|
||||
break;
|
||||
}
|
||||
sleep(Duration::new(1, 0));
|
||||
}
|
||||
threads.push(t_spy_listen);
|
||||
threads.push(t_spy_gossip);
|
||||
let v: Vec<ReplicatedData> = spy_ref
|
||||
.read()
|
||||
.unwrap()
|
||||
.table
|
||||
.values()
|
||||
.into_iter()
|
||||
.filter(|x| x.requests_addr != daddr)
|
||||
.map(|x| x.clone())
|
||||
.collect();
|
||||
v.clone()
|
||||
}
|
||||
|
||||
fn read_leader(path: String) -> ReplicatedData {
|
||||
let file = File::open(path).expect("file");
|
||||
serde_json::from_reader(file).expect("parse")
|
||||
}
|
@@ -1,21 +1,28 @@
|
||||
extern crate env_logger;
|
||||
extern crate getopts;
|
||||
extern crate isatty;
|
||||
extern crate pnet;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use getopts::Options;
|
||||
use isatty::stdin_isatty;
|
||||
use solana::accountant::Accountant;
|
||||
use solana::accountant_skel::AccountantSkel;
|
||||
use pnet::datalink;
|
||||
use solana::bank::Bank;
|
||||
use solana::crdt::ReplicatedData;
|
||||
use solana::entry::Entry;
|
||||
use solana::event::Event;
|
||||
use solana::historian::Historian;
|
||||
use solana::server::Server;
|
||||
use solana::signature::{KeyPair, KeyPairUtil};
|
||||
use solana::transaction::Instruction;
|
||||
use std::env;
|
||||
use std::fs::File;
|
||||
use std::io::{stdin, stdout, Read};
|
||||
use std::net::{IpAddr, SocketAddr, UdpSocket};
|
||||
use std::process::exit;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::time::Duration;
|
||||
|
||||
fn print_usage(program: &str, opts: Options) {
|
||||
let mut brief = format!("Usage: cat <transaction.log> | {} [options]\n\n", program);
|
||||
@@ -28,10 +35,17 @@ fn print_usage(program: &str, opts: Options) {
|
||||
|
||||
fn main() {
|
||||
env_logger::init().unwrap();
|
||||
let mut port = 8000u16;
|
||||
let mut opts = Options::new();
|
||||
opts.optopt("p", "", "port", "port");
|
||||
opts.optopt("b", "", "bind", "bind to port or address");
|
||||
opts.optflag("d", "dyn", "detect network address dynamically");
|
||||
opts.optopt("s", "", "save", "save my identity to path.json");
|
||||
opts.optflag("h", "help", "print help");
|
||||
opts.optopt(
|
||||
"v",
|
||||
"",
|
||||
"validator",
|
||||
"run as replicate with path to leader.json",
|
||||
);
|
||||
let args: Vec<String> = env::args().collect();
|
||||
let matches = match opts.parse(&args[1..]) {
|
||||
Ok(m) => m,
|
||||
@@ -45,11 +59,14 @@ fn main() {
|
||||
print_usage(&program, opts);
|
||||
return;
|
||||
}
|
||||
if matches.opt_present("p") {
|
||||
port = matches.opt_str("p").unwrap().parse().expect("port");
|
||||
}
|
||||
let addr = format!("0.0.0.0:{}", port);
|
||||
|
||||
let bind_addr: SocketAddr = {
|
||||
let mut bind_addr = parse_port_or_addr(matches.opt_str("b"));
|
||||
if matches.opt_present("d") {
|
||||
let ip = get_ip_addr().unwrap();
|
||||
bind_addr.set_ip(ip);
|
||||
}
|
||||
bind_addr
|
||||
};
|
||||
if stdin_isatty() {
|
||||
eprintln!("nothing found on stdin, expected a log file");
|
||||
exit(1);
|
||||
@@ -70,6 +87,8 @@ fn main() {
|
||||
})
|
||||
});
|
||||
|
||||
eprintln!("done parsing...");
|
||||
|
||||
// The first item in the ledger is required to be an entry with zero num_hashes,
|
||||
// which implies its id can be used as the ledger's seed.
|
||||
let entry0 = entries.next().unwrap();
|
||||
@@ -78,34 +97,141 @@ fn main() {
|
||||
// fields are the same. That entry should be treated as a deposit, not a
|
||||
// transfer to oneself.
|
||||
let entry1: Entry = entries.next().unwrap();
|
||||
let deposit = if let Event::Transaction(ref tr) = entry1.events[0] {
|
||||
tr.data.plan.final_payment()
|
||||
let Event::Transaction(ref tr) = entry1.events[0];
|
||||
let deposit = if let Instruction::NewContract(contract) = &tr.instruction {
|
||||
contract.plan.final_payment()
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let acc = Accountant::new_from_deposit(&deposit.unwrap());
|
||||
acc.register_entry_id(&entry0.id);
|
||||
acc.register_entry_id(&entry1.id);
|
||||
eprintln!("creating bank...");
|
||||
|
||||
let bank = Bank::new_from_deposit(&deposit.unwrap());
|
||||
bank.register_entry_id(&entry0.id);
|
||||
bank.register_entry_id(&entry1.id);
|
||||
|
||||
eprintln!("processing entries...");
|
||||
|
||||
let mut last_id = entry1.id;
|
||||
for entry in entries {
|
||||
last_id = entry.id;
|
||||
acc.process_verified_events(entry.events).unwrap();
|
||||
acc.register_entry_id(&last_id);
|
||||
let results = bank.process_verified_events(entry.events);
|
||||
for result in results {
|
||||
if let Err(e) = result {
|
||||
eprintln!("failed to process event {:?}", e);
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
bank.register_entry_id(&last_id);
|
||||
}
|
||||
|
||||
let historian = Historian::new(&last_id, Some(1000));
|
||||
eprintln!("creating networking stack...");
|
||||
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let skel = Arc::new(Mutex::new(AccountantSkel::new(
|
||||
acc,
|
||||
last_id,
|
||||
stdout(),
|
||||
historian,
|
||||
)));
|
||||
let threads = AccountantSkel::serve(&skel, &addr, exit.clone()).unwrap();
|
||||
eprintln!("Ready. Listening on {}", addr);
|
||||
// we need all the receiving sockets to be bound within the expected
|
||||
// port range that we open on aws
|
||||
let mut repl_data = make_repl_data(&bind_addr);
|
||||
let threads = if matches.opt_present("v") {
|
||||
eprintln!("starting validator... {}", repl_data.requests_addr);
|
||||
let path = matches.opt_str("v").unwrap();
|
||||
let file = File::open(path).expect("file");
|
||||
let leader = serde_json::from_reader(file).expect("parse");
|
||||
let s = Server::new_validator(
|
||||
bank,
|
||||
repl_data.clone(),
|
||||
UdpSocket::bind(repl_data.requests_addr).unwrap(),
|
||||
UdpSocket::bind("0.0.0.0:0").unwrap(),
|
||||
UdpSocket::bind(repl_data.replicate_addr).unwrap(),
|
||||
UdpSocket::bind(repl_data.gossip_addr).unwrap(),
|
||||
leader,
|
||||
exit.clone(),
|
||||
);
|
||||
s.thread_hdls
|
||||
} else {
|
||||
eprintln!("starting leader... {}", repl_data.requests_addr);
|
||||
repl_data.current_leader_id = repl_data.id.clone();
|
||||
let server = Server::new_leader(
|
||||
bank,
|
||||
last_id,
|
||||
Some(Duration::from_millis(1000)),
|
||||
repl_data.clone(),
|
||||
UdpSocket::bind(repl_data.requests_addr).unwrap(),
|
||||
UdpSocket::bind(repl_data.events_addr).unwrap(),
|
||||
UdpSocket::bind("0.0.0.0:0").unwrap(),
|
||||
UdpSocket::bind("0.0.0.0:0").unwrap(),
|
||||
UdpSocket::bind(repl_data.gossip_addr).unwrap(),
|
||||
exit.clone(),
|
||||
stdout(),
|
||||
);
|
||||
server.thread_hdls
|
||||
};
|
||||
if matches.opt_present("s") {
|
||||
let path = matches.opt_str("s").unwrap();
|
||||
let file = File::create(path).expect("file");
|
||||
serde_json::to_writer(file, &repl_data).expect("serialize");
|
||||
}
|
||||
eprintln!("Ready. Listening on {}", bind_addr);
|
||||
|
||||
for t in threads {
|
||||
t.join().expect("join");
|
||||
}
|
||||
}
|
||||
|
||||
fn next_port(server_addr: &SocketAddr, nxt: u16) -> SocketAddr {
|
||||
let mut gossip_addr = server_addr.clone();
|
||||
gossip_addr.set_port(server_addr.port() + nxt);
|
||||
gossip_addr
|
||||
}
|
||||
|
||||
fn make_repl_data(bind_addr: &SocketAddr) -> ReplicatedData {
|
||||
let events_addr = bind_addr.clone();
|
||||
let gossip_addr = next_port(&bind_addr, 1);
|
||||
let replicate_addr = next_port(&bind_addr, 2);
|
||||
let requests_addr = next_port(&bind_addr, 3);
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
ReplicatedData::new(
|
||||
pubkey,
|
||||
gossip_addr,
|
||||
replicate_addr,
|
||||
requests_addr,
|
||||
events_addr,
|
||||
)
|
||||
}
|
||||
|
||||
fn parse_port_or_addr(optstr: Option<String>) -> SocketAddr {
|
||||
let daddr: SocketAddr = "0.0.0.0:8000".parse().expect("default socket address");
|
||||
if let Some(addrstr) = optstr {
|
||||
if let Ok(port) = addrstr.parse() {
|
||||
let mut addr = daddr.clone();
|
||||
addr.set_port(port);
|
||||
addr
|
||||
} else if let Ok(addr) = addrstr.parse() {
|
||||
addr
|
||||
} else {
|
||||
daddr
|
||||
}
|
||||
} else {
|
||||
daddr
|
||||
}
|
||||
}
|
||||
|
||||
fn get_ip_addr() -> Option<IpAddr> {
|
||||
for iface in datalink::interfaces() {
|
||||
for p in iface.ips {
|
||||
if !p.ip().is_loopback() && !p.ip().is_multicast() {
|
||||
return Some(p.ip());
|
||||
}
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_port_or_addr() {
|
||||
let p1 = parse_port_or_addr(Some("9000".to_string()));
|
||||
assert_eq!(p1.port(), 9000);
|
||||
let p2 = parse_port_or_addr(Some("127.0.0.1:7000".to_string()));
|
||||
assert_eq!(p2.port(), 7000);
|
||||
let p3 = parse_port_or_addr(None);
|
||||
assert_eq!(p3.port(), 8000);
|
||||
}
|
||||
|
506
src/crdt.rs
506
src/crdt.rs
@@ -1,16 +1,27 @@
|
||||
//! The `crdt` module defines a data structure that is shared by all the nodes in the network over
|
||||
//! a gossip control plane. The goal is to share small bits of of-chain information and detect and
|
||||
//! a gossip control plane. The goal is to share small bits of off-chain information and detect and
|
||||
//! repair partitions.
|
||||
//!
|
||||
//! This CRDT only supports a very limited set of types. A map of PublicKey -> Versioned Struct.
|
||||
//! The last version is always picked durring an update.
|
||||
//! The last version is always picked during an update.
|
||||
//!
|
||||
//! The network is arranged in layers:
|
||||
//!
|
||||
//! * layer 0 - Leader.
|
||||
//! * layer 1 - As many nodes as we can fit
|
||||
//! * layer 2 - Everyone else, if layer 1 is `2^10`, layer 2 should be able to fit `2^20` number of nodes.
|
||||
//!
|
||||
//! Bank needs to provide an interface for us to query the stake weight
|
||||
|
||||
use bincode::{deserialize, serialize};
|
||||
use byteorder::{LittleEndian, ReadBytesExt};
|
||||
use hash::Hash;
|
||||
use result::Result;
|
||||
use packet::SharedBlob;
|
||||
use rayon::prelude::*;
|
||||
use result::{Error, Result};
|
||||
use ring::rand::{SecureRandom, SystemRandom};
|
||||
use signature::{PublicKey, Signature};
|
||||
use std;
|
||||
use std::collections::HashMap;
|
||||
use std::io::Cursor;
|
||||
use std::net::{SocketAddr, UdpSocket};
|
||||
@@ -20,20 +31,22 @@ use std::thread::{sleep, spawn, JoinHandle};
|
||||
use std::time::Duration;
|
||||
|
||||
/// Structure to be replicated by the network
|
||||
#[derive(Serialize, Deserialize, Clone)]
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
pub struct ReplicatedData {
|
||||
id: PublicKey,
|
||||
pub id: PublicKey,
|
||||
sig: Signature,
|
||||
/// should always be increasing
|
||||
version: u64,
|
||||
/// address to connect to for gossip
|
||||
gossip_addr: SocketAddr,
|
||||
pub gossip_addr: SocketAddr,
|
||||
/// address to connect to for replication
|
||||
replicate_addr: SocketAddr,
|
||||
pub replicate_addr: SocketAddr,
|
||||
/// address to connect to when this node is leader
|
||||
lead_addr: SocketAddr,
|
||||
pub requests_addr: SocketAddr,
|
||||
/// events address
|
||||
pub events_addr: SocketAddr,
|
||||
/// current leader identity
|
||||
current_leader_id: PublicKey,
|
||||
pub current_leader_id: PublicKey,
|
||||
/// last verified hash that was submitted to the leader
|
||||
last_verified_hash: Hash,
|
||||
/// last verified count, always increasing
|
||||
@@ -41,15 +54,21 @@ pub struct ReplicatedData {
|
||||
}
|
||||
|
||||
impl ReplicatedData {
|
||||
pub fn new(id: PublicKey, gossip_addr: SocketAddr) -> ReplicatedData {
|
||||
let daddr = "0.0.0.0:0".parse().unwrap();
|
||||
pub fn new(
|
||||
id: PublicKey,
|
||||
gossip_addr: SocketAddr,
|
||||
replicate_addr: SocketAddr,
|
||||
requests_addr: SocketAddr,
|
||||
events_addr: SocketAddr,
|
||||
) -> ReplicatedData {
|
||||
ReplicatedData {
|
||||
id,
|
||||
sig: Signature::default(),
|
||||
version: 0,
|
||||
gossip_addr,
|
||||
replicate_addr: daddr,
|
||||
lead_addr: daddr,
|
||||
replicate_addr,
|
||||
requests_addr,
|
||||
events_addr,
|
||||
current_leader_id: PublicKey::default(),
|
||||
last_verified_hash: Hash::default(),
|
||||
last_verified_count: 0,
|
||||
@@ -69,17 +88,17 @@ impl ReplicatedData {
|
||||
/// * `listen` - listen for requests and responses
|
||||
/// No attempt to keep track of timeouts or dropped requests is made, or should be.
|
||||
pub struct Crdt {
|
||||
table: HashMap<PublicKey, ReplicatedData>,
|
||||
pub table: HashMap<PublicKey, ReplicatedData>,
|
||||
/// Value of my update index when entry in table was updated.
|
||||
/// Nodes will ask for updates since `update_index`, and this node
|
||||
/// should respond with all the identities that are greater then the
|
||||
/// request's `update_index` in this list
|
||||
local: HashMap<PublicKey, u64>,
|
||||
/// The value of the remote update index that i have last seen
|
||||
/// The value of the remote update index that I have last seen
|
||||
/// This Node will ask external nodes for updates since the value in this list
|
||||
remote: HashMap<PublicKey, u64>,
|
||||
update_index: u64,
|
||||
me: PublicKey,
|
||||
pub remote: HashMap<PublicKey, u64>,
|
||||
pub update_index: u64,
|
||||
pub me: PublicKey,
|
||||
timeout: Duration,
|
||||
}
|
||||
// TODO These messages should be signed, and go through the gpu pipeline for spam filtering
|
||||
@@ -92,6 +111,8 @@ enum Protocol {
|
||||
//TODO might need a since?
|
||||
/// from id, form's last update index, ReplicatedData
|
||||
ReceiveUpdates(PublicKey, u64, Vec<ReplicatedData>),
|
||||
/// ask for a missing index
|
||||
RequestWindowIndex(ReplicatedData, u64),
|
||||
}
|
||||
|
||||
impl Crdt {
|
||||
@@ -103,38 +124,198 @@ impl Crdt {
|
||||
remote: HashMap::new(),
|
||||
me: me.id,
|
||||
update_index: 1,
|
||||
timeout: Duration::new(0, 100_000),
|
||||
timeout: Duration::from_millis(100),
|
||||
};
|
||||
g.local.insert(me.id, g.update_index);
|
||||
g.table.insert(me.id, me);
|
||||
g
|
||||
}
|
||||
pub fn import(&mut self, v: &ReplicatedData) {
|
||||
// TODO check that last_verified types are always increasing
|
||||
// TODO probably an error or attack
|
||||
if self.me != v.id {
|
||||
self.insert(v);
|
||||
}
|
||||
pub fn my_data(&self) -> &ReplicatedData {
|
||||
&self.table[&self.me]
|
||||
}
|
||||
pub fn leader_data(&self) -> &ReplicatedData {
|
||||
&self.table[&self.table[&self.me].current_leader_id]
|
||||
}
|
||||
|
||||
pub fn set_leader(&mut self, key: PublicKey) -> () {
|
||||
let mut me = self.my_data().clone();
|
||||
me.current_leader_id = key;
|
||||
me.version += 1;
|
||||
self.insert(&me);
|
||||
}
|
||||
|
||||
pub fn insert(&mut self, v: &ReplicatedData) {
|
||||
// TODO check that last_verified types are always increasing
|
||||
if self.table.get(&v.id).is_none() || (v.version > self.table[&v.id].version) {
|
||||
//somehow we signed a message for our own identity with a higher version that
|
||||
// we have stored ourselves
|
||||
trace!("me: {:?}", self.me[0]);
|
||||
trace!("v.id: {:?}", v.id[0]);
|
||||
trace!("insert! {}", v.version);
|
||||
self.update_index += 1;
|
||||
let _ = self.table.insert(v.id, v.clone());
|
||||
let _ = self.table.insert(v.id.clone(), v.clone());
|
||||
let _ = self.local.insert(v.id, self.update_index);
|
||||
} else {
|
||||
trace!("INSERT FAILED {}", v.version);
|
||||
trace!(
|
||||
"INSERT FAILED new.version: {} me.version: {}",
|
||||
v.version,
|
||||
self.table[&v.id].version
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// broadcast messages from the leader to layer 1 nodes
|
||||
/// # Remarks
|
||||
/// We need to avoid having obj locked while doing any io, such as the `send_to`
|
||||
pub fn broadcast(
|
||||
obj: &Arc<RwLock<Self>>,
|
||||
blobs: &Vec<SharedBlob>,
|
||||
s: &UdpSocket,
|
||||
transmit_index: &mut u64,
|
||||
) -> Result<()> {
|
||||
let (me, table): (ReplicatedData, Vec<ReplicatedData>) = {
|
||||
// copy to avoid locking during IO
|
||||
let robj = obj.read().expect("'obj' read lock in pub fn broadcast");
|
||||
trace!("broadcast table {}", robj.table.len());
|
||||
let cloned_table: Vec<ReplicatedData> = robj.table.values().cloned().collect();
|
||||
(robj.table[&robj.me].clone(), cloned_table)
|
||||
};
|
||||
let daddr = "0.0.0.0:0".parse().unwrap();
|
||||
let nodes: Vec<&ReplicatedData> = table
|
||||
.iter()
|
||||
.filter(|v| {
|
||||
if me.id == v.id {
|
||||
//filter myself
|
||||
false
|
||||
} else if v.replicate_addr == daddr {
|
||||
//filter nodes that are not listening
|
||||
false
|
||||
} else {
|
||||
trace!("broadcast node {}", v.replicate_addr);
|
||||
true
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
if nodes.len() < 1 {
|
||||
warn!("crdt too small");
|
||||
return Err(Error::CrdtTooSmall);
|
||||
}
|
||||
trace!("nodes table {}", nodes.len());
|
||||
trace!("blobs table {}", blobs.len());
|
||||
// enumerate all the blobs, those are the indices
|
||||
// transmit them to nodes, starting from a different node
|
||||
let orders: Vec<_> = blobs
|
||||
.iter()
|
||||
.enumerate()
|
||||
.zip(
|
||||
nodes
|
||||
.iter()
|
||||
.cycle()
|
||||
.skip((*transmit_index as usize) % nodes.len()),
|
||||
)
|
||||
.collect();
|
||||
trace!("orders table {}", orders.len());
|
||||
let errs: Vec<_> = orders
|
||||
.into_iter()
|
||||
.map(|((i, b), v)| {
|
||||
// only leader should be broadcasting
|
||||
assert!(me.current_leader_id != v.id);
|
||||
let mut blob = b.write().expect("'b' write lock in pub fn broadcast");
|
||||
blob.set_id(me.id).expect("set_id in pub fn broadcast");
|
||||
blob.set_index(*transmit_index + i as u64)
|
||||
.expect("set_index in pub fn broadcast");
|
||||
//TODO profile this, may need multiple sockets for par_iter
|
||||
trace!("broadcast {} to {}", blob.meta.size, v.replicate_addr);
|
||||
let e = s.send_to(&blob.data[..blob.meta.size], &v.replicate_addr);
|
||||
trace!("done broadcast {} to {}", blob.meta.size, v.replicate_addr);
|
||||
e
|
||||
})
|
||||
.collect();
|
||||
trace!("broadcast results {}", errs.len());
|
||||
for e in errs {
|
||||
match e {
|
||||
Err(e) => {
|
||||
error!("broadcast result {:?}", e);
|
||||
return Err(Error::IO(e));
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
*transmit_index += 1;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// retransmit messages from the leader to layer 1 nodes
|
||||
/// # Remarks
|
||||
/// We need to avoid having obj locked while doing any io, such as the `send_to`
|
||||
pub fn retransmit(obj: &Arc<RwLock<Self>>, blob: &SharedBlob, s: &UdpSocket) -> Result<()> {
|
||||
let (me, table): (ReplicatedData, Vec<ReplicatedData>) = {
|
||||
// copy to avoid locking during IO
|
||||
let s = obj.read().expect("'obj' read lock in pub fn retransmit");
|
||||
(s.table[&s.me].clone(), s.table.values().cloned().collect())
|
||||
};
|
||||
blob.write()
|
||||
.unwrap()
|
||||
.set_id(me.id)
|
||||
.expect("set_id in pub fn retransmit");
|
||||
let rblob = blob.read().unwrap();
|
||||
let daddr = "0.0.0.0:0".parse().unwrap();
|
||||
let orders: Vec<_> = table
|
||||
.iter()
|
||||
.filter(|v| {
|
||||
if me.id == v.id {
|
||||
false
|
||||
} else if me.current_leader_id == v.id {
|
||||
trace!("skip retransmit to leader {:?}", v.id);
|
||||
false
|
||||
} else if v.replicate_addr == daddr {
|
||||
trace!("skip nodes that are not listening {:?}", v.id);
|
||||
false
|
||||
} else {
|
||||
true
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
let errs: Vec<_> = orders
|
||||
.par_iter()
|
||||
.map(|v| {
|
||||
trace!(
|
||||
"retransmit blob {} to {}",
|
||||
rblob.get_index().unwrap(),
|
||||
v.replicate_addr
|
||||
);
|
||||
//TODO profile this, may need multiple sockets for par_iter
|
||||
s.send_to(&rblob.data[..rblob.meta.size], &v.replicate_addr)
|
||||
})
|
||||
.collect();
|
||||
for e in errs {
|
||||
match e {
|
||||
Err(e) => {
|
||||
info!("retransmit error {:?}", e);
|
||||
return Err(Error::IO(e));
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// max number of nodes that we could be converged to
|
||||
pub fn convergence(&self) -> u64 {
|
||||
let max = self.remote.values().len() as u64 + 1;
|
||||
self.remote.values().fold(max, |a, b| std::cmp::min(a, *b))
|
||||
}
|
||||
|
||||
fn random() -> u64 {
|
||||
let rnd = SystemRandom::new();
|
||||
let mut buf = [0u8; 8];
|
||||
rnd.fill(&mut buf).unwrap();
|
||||
rnd.fill(&mut buf).expect("rnd.fill in pub fn random");
|
||||
let mut rdr = Cursor::new(&buf);
|
||||
rdr.read_u64::<LittleEndian>().unwrap()
|
||||
rdr.read_u64::<LittleEndian>()
|
||||
.expect("rdr.read_u64 in fn random")
|
||||
}
|
||||
fn get_updates_since(&self, v: u64) -> (PublicKey, u64, Vec<ReplicatedData>) {
|
||||
trace!("get updates since {}", v);
|
||||
//trace!("get updates since {}", v);
|
||||
let data = self.table
|
||||
.values()
|
||||
.filter(|x| self.local[&x.id] > v)
|
||||
@@ -145,19 +326,36 @@ impl Crdt {
|
||||
(id, ups, data)
|
||||
}
|
||||
|
||||
pub fn window_index_request(&self, ix: u64) -> Result<(SocketAddr, Vec<u8>)> {
|
||||
if self.table.len() <= 1 {
|
||||
return Err(Error::CrdtTooSmall);
|
||||
}
|
||||
let mut n = (Self::random() as usize) % self.table.len();
|
||||
while self.table.values().nth(n).unwrap().id == self.me {
|
||||
n = (Self::random() as usize) % self.table.len();
|
||||
}
|
||||
let addr = self.table.values().nth(n).unwrap().gossip_addr.clone();
|
||||
let req = Protocol::RequestWindowIndex(self.table[&self.me].clone(), ix);
|
||||
let out = serialize(&req)?;
|
||||
Ok((addr, out))
|
||||
}
|
||||
|
||||
/// Create a random gossip request
|
||||
/// # Returns
|
||||
/// (A,B,C)
|
||||
/// * A - Remote gossip address
|
||||
/// * B - My gossip address
|
||||
/// * C - Remote update index to request updates since
|
||||
fn gossip_request(&self) -> (SocketAddr, Protocol) {
|
||||
let n = (Self::random() as usize) % self.table.len();
|
||||
trace!("random {:?} {}", &self.me[0..1], n);
|
||||
let v = self.table.values().nth(n).unwrap().clone();
|
||||
/// (A,B)
|
||||
/// * A - Address to send to
|
||||
/// * B - RequestUpdates protocol message
|
||||
fn gossip_request(&self) -> Result<(SocketAddr, Protocol)> {
|
||||
let options: Vec<_> = self.table.values().filter(|v| v.id != self.me).collect();
|
||||
if options.len() < 1 {
|
||||
trace!("crdt too small for gossip");
|
||||
return Err(Error::CrdtTooSmall);
|
||||
}
|
||||
let n = (Self::random() as usize) % options.len();
|
||||
let v = options[n].clone();
|
||||
let remote_update_index = *self.remote.get(&v.id).unwrap_or(&0);
|
||||
let req = Protocol::RequestUpdates(remote_update_index, self.table[&self.me].clone());
|
||||
(v.gossip_addr, req)
|
||||
Ok((v.gossip_addr, req))
|
||||
}
|
||||
|
||||
/// At random pick a node and try to get updated changes from them
|
||||
@@ -167,11 +365,14 @@ impl Crdt {
|
||||
|
||||
// Lock the object only to do this operation and not for any longer
|
||||
// especially not when doing the `sock.send_to`
|
||||
let (remote_gossip_addr, req) = obj.read().unwrap().gossip_request();
|
||||
let (remote_gossip_addr, req) = obj.read()
|
||||
.expect("'obj' read lock in fn run_gossip")
|
||||
.gossip_request()?;
|
||||
let sock = UdpSocket::bind("0.0.0.0:0")?;
|
||||
// TODO this will get chatty, so we need to first ask for number of updates since
|
||||
// then only ask for specific data that we dont have
|
||||
let r = serialize(&req)?;
|
||||
trace!("sending gossip request to {}", remote_gossip_addr);
|
||||
sock.send_to(&r, remote_gossip_addr)?;
|
||||
Ok(())
|
||||
}
|
||||
@@ -186,7 +387,7 @@ impl Crdt {
|
||||
// TODO we need to punish/spam resist here
|
||||
// sig verify the whole update and slash anyone who sends a bad update
|
||||
for v in data {
|
||||
self.import(&v);
|
||||
self.insert(&v);
|
||||
}
|
||||
*self.remote.entry(from).or_insert(update_index) = update_index;
|
||||
}
|
||||
@@ -199,14 +400,50 @@ impl Crdt {
|
||||
return;
|
||||
}
|
||||
//TODO this should be a tuned parameter
|
||||
sleep(obj.read().unwrap().timeout);
|
||||
sleep(
|
||||
obj.read()
|
||||
.expect("'obj' read lock in pub fn gossip")
|
||||
.timeout,
|
||||
);
|
||||
})
|
||||
}
|
||||
|
||||
fn run_window_request(
|
||||
window: &Arc<RwLock<Vec<Option<SharedBlob>>>>,
|
||||
sock: &UdpSocket,
|
||||
from: &ReplicatedData,
|
||||
ix: u64,
|
||||
) -> Result<()> {
|
||||
let pos = (ix as usize) % window.read().unwrap().len();
|
||||
let mut outblob = vec![];
|
||||
if let &Some(ref blob) = &window.read().unwrap()[pos] {
|
||||
let rblob = blob.read().unwrap();
|
||||
let blob_ix = rblob.get_index().expect("run_window_request get_index");
|
||||
if blob_ix == ix {
|
||||
// copy to avoid doing IO inside the lock
|
||||
outblob.extend(&rblob.data[..rblob.meta.size]);
|
||||
}
|
||||
} else {
|
||||
assert!(window.read().unwrap()[pos].is_none());
|
||||
info!("failed RequestWindowIndex {} {}", ix, from.replicate_addr);
|
||||
}
|
||||
if outblob.len() > 0 {
|
||||
info!(
|
||||
"responding RequestWindowIndex {} {}",
|
||||
ix, from.replicate_addr
|
||||
);
|
||||
sock.send_to(&outblob, from.replicate_addr)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
/// Process messages from the network
|
||||
fn run_listen(obj: &Arc<RwLock<Self>>, sock: &UdpSocket) -> Result<()> {
|
||||
fn run_listen(
|
||||
obj: &Arc<RwLock<Self>>,
|
||||
window: &Arc<RwLock<Vec<Option<SharedBlob>>>>,
|
||||
sock: &UdpSocket,
|
||||
) -> Result<()> {
|
||||
//TODO cache connections
|
||||
let mut buf = vec![0u8; 1024 * 64];
|
||||
trace!("recv_from on {}", sock.local_addr().unwrap());
|
||||
let (amt, src) = sock.recv_from(&mut buf)?;
|
||||
trace!("got request from {}", src);
|
||||
buf.resize(amt, 0);
|
||||
@@ -216,31 +453,59 @@ impl Crdt {
|
||||
Protocol::RequestUpdates(v, reqdata) => {
|
||||
trace!("RequestUpdates {}", v);
|
||||
let addr = reqdata.gossip_addr;
|
||||
// only lock for this call, dont lock durring IO `sock.send_to` or `sock.recv_from`
|
||||
let (from, ups, data) = obj.read().unwrap().get_updates_since(v);
|
||||
// only lock for this call, dont lock during IO `sock.send_to` or `sock.recv_from`
|
||||
let (from, ups, data) = obj.read()
|
||||
.expect("'obj' read lock in RequestUpdates")
|
||||
.get_updates_since(v);
|
||||
trace!("get updates since response {} {}", v, data.len());
|
||||
let rsp = serialize(&Protocol::ReceiveUpdates(from, ups, data))?;
|
||||
trace!("send_to {}", addr);
|
||||
//TODO verify reqdata belongs to sender
|
||||
obj.write().unwrap().import(&reqdata);
|
||||
sock.send_to(&rsp, addr).unwrap();
|
||||
obj.write()
|
||||
.expect("'obj' write lock in RequestUpdates")
|
||||
.insert(&reqdata);
|
||||
sock.send_to(&rsp, addr)
|
||||
.expect("'sock.send_to' in RequestUpdates");
|
||||
trace!("send_to done!");
|
||||
}
|
||||
Protocol::ReceiveUpdates(from, ups, data) => {
|
||||
trace!("ReceivedUpdates");
|
||||
obj.write().unwrap().apply_updates(from, ups, &data);
|
||||
obj.write()
|
||||
.expect("'obj' write lock in ReceiveUpdates")
|
||||
.apply_updates(from, ups, &data);
|
||||
}
|
||||
Protocol::RequestWindowIndex(from, ix) => {
|
||||
//TODO verify from is signed
|
||||
obj.write().unwrap().insert(&from);
|
||||
let me = obj.read().unwrap().my_data().clone();
|
||||
trace!(
|
||||
"received RequestWindowIndex {} {} myaddr {}",
|
||||
ix,
|
||||
from.replicate_addr,
|
||||
me.replicate_addr
|
||||
);
|
||||
assert_ne!(from.replicate_addr, me.replicate_addr);
|
||||
let _ = Self::run_window_request(window, sock, &from, ix);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
pub fn listen(
|
||||
obj: Arc<RwLock<Self>>,
|
||||
window: Arc<RwLock<Vec<Option<SharedBlob>>>>,
|
||||
sock: UdpSocket,
|
||||
exit: Arc<AtomicBool>,
|
||||
) -> JoinHandle<()> {
|
||||
sock.set_read_timeout(Some(Duration::new(2, 0))).unwrap();
|
||||
sock.set_read_timeout(Some(Duration::new(2, 0)))
|
||||
.expect("'sock.set_read_timeout' in crdt.rs");
|
||||
spawn(move || loop {
|
||||
let _ = Self::run_listen(&obj, &sock);
|
||||
let e = Self::run_listen(&obj, &window, &sock);
|
||||
if e.is_err() {
|
||||
info!(
|
||||
"run_listen timeout, table size: {}",
|
||||
obj.read().unwrap().table.len()
|
||||
);
|
||||
}
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
return;
|
||||
}
|
||||
@@ -249,8 +514,11 @@ impl Crdt {
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
mod tests {
|
||||
use crdt::{Crdt, ReplicatedData};
|
||||
use logger;
|
||||
use packet::Blob;
|
||||
use rayon::iter::*;
|
||||
use signature::KeyPair;
|
||||
use signature::KeyPairUtil;
|
||||
use std::net::UdpSocket;
|
||||
@@ -259,6 +527,30 @@ mod test {
|
||||
use std::thread::{sleep, JoinHandle};
|
||||
use std::time::Duration;
|
||||
|
||||
fn test_node() -> (Crdt, UdpSocket, UdpSocket, UdpSocket) {
|
||||
let gossip = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let replicate = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let serve = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let events = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
let d = ReplicatedData::new(
|
||||
pubkey,
|
||||
gossip.local_addr().unwrap(),
|
||||
replicate.local_addr().unwrap(),
|
||||
serve.local_addr().unwrap(),
|
||||
events.local_addr().unwrap(),
|
||||
);
|
||||
let crdt = Crdt::new(d);
|
||||
trace!(
|
||||
"id: {} gossip: {} replicate: {} serve: {}",
|
||||
crdt.my_data().id[0],
|
||||
gossip.local_addr().unwrap(),
|
||||
replicate.local_addr().unwrap(),
|
||||
serve.local_addr().unwrap(),
|
||||
);
|
||||
(crdt, gossip, replicate, serve)
|
||||
}
|
||||
|
||||
/// Test that the network converges.
|
||||
/// Run until every node in the network has a full ReplicatedData set.
|
||||
/// Check that nodes stop sending updates after all the ReplicatedData has been shared.
|
||||
@@ -271,12 +563,10 @@ mod test {
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let listen: Vec<_> = (0..num)
|
||||
.map(|_| {
|
||||
let listener = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
let d = ReplicatedData::new(pubkey, listener.local_addr().unwrap());
|
||||
let crdt = Crdt::new(d);
|
||||
let (crdt, gossip, _, _) = test_node();
|
||||
let c = Arc::new(RwLock::new(crdt));
|
||||
let l = Crdt::listen(c.clone(), listener, exit.clone());
|
||||
let w = Arc::new(RwLock::new(vec![]));
|
||||
let l = Crdt::listen(c.clone(), w, gossip, exit.clone());
|
||||
(c, l)
|
||||
})
|
||||
.collect();
|
||||
@@ -286,21 +576,16 @@ mod test {
|
||||
.map(|&(ref c, _)| Crdt::gossip(c.clone(), exit.clone()))
|
||||
.collect();
|
||||
let mut done = true;
|
||||
for _ in 0..(num * 16) {
|
||||
done = true;
|
||||
for i in 0..(num * 32) {
|
||||
done = false;
|
||||
trace!("round {}", i);
|
||||
for &(ref c, _) in listen.iter() {
|
||||
trace!(
|
||||
"done updates {} {}",
|
||||
c.read().unwrap().table.len(),
|
||||
c.read().unwrap().update_index
|
||||
);
|
||||
//make sure the number of updates doesn't grow unbounded
|
||||
assert!(c.read().unwrap().update_index <= num as u64);
|
||||
//make sure we got all the updates
|
||||
if c.read().unwrap().table.len() != num {
|
||||
done = false;
|
||||
if num == c.read().unwrap().convergence() as usize {
|
||||
done = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
//at least 1 node converged
|
||||
if done == true {
|
||||
break;
|
||||
}
|
||||
@@ -322,7 +607,9 @@ mod test {
|
||||
}
|
||||
/// ring a -> b -> c -> d -> e -> a
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn gossip_ring_test() {
|
||||
logger::setup();
|
||||
run_gossip_topo(|listen| {
|
||||
let num = listen.len();
|
||||
for n in 0..num {
|
||||
@@ -339,6 +626,7 @@ mod test {
|
||||
|
||||
/// star (b,c,d,e) -> a
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn gossip_star_test() {
|
||||
run_gossip_topo(|listen| {
|
||||
let num = listen.len();
|
||||
@@ -357,7 +645,13 @@ mod test {
|
||||
/// Test that insert drops messages that are older
|
||||
#[test]
|
||||
fn insert_test() {
|
||||
let mut d = ReplicatedData::new(KeyPair::new().pubkey(), "127.0.0.1:1234".parse().unwrap());
|
||||
let mut d = ReplicatedData::new(
|
||||
KeyPair::new().pubkey(),
|
||||
"127.0.0.1:1234".parse().unwrap(),
|
||||
"127.0.0.1:1235".parse().unwrap(),
|
||||
"127.0.0.1:1236".parse().unwrap(),
|
||||
"127.0.0.1:1237".parse().unwrap(),
|
||||
);
|
||||
assert_eq!(d.version, 0);
|
||||
let mut crdt = Crdt::new(d.clone());
|
||||
assert_eq!(crdt.table[&d.id].version, 0);
|
||||
@@ -369,4 +663,76 @@ mod test {
|
||||
assert_eq!(crdt.table[&d.id].version, 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
pub fn test_crdt_retransmit() {
|
||||
logger::setup();
|
||||
trace!("c1:");
|
||||
let (mut c1, s1, r1, e1) = test_node();
|
||||
trace!("c2:");
|
||||
let (mut c2, s2, r2, _) = test_node();
|
||||
trace!("c3:");
|
||||
let (mut c3, s3, r3, _) = test_node();
|
||||
let c1_id = c1.my_data().id;
|
||||
c1.set_leader(c1_id);
|
||||
|
||||
c2.insert(&c1.my_data());
|
||||
c3.insert(&c1.my_data());
|
||||
|
||||
c2.set_leader(c1.my_data().id);
|
||||
c3.set_leader(c1.my_data().id);
|
||||
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
|
||||
// Create listen threads
|
||||
let win1 = Arc::new(RwLock::new(vec![]));
|
||||
let a1 = Arc::new(RwLock::new(c1));
|
||||
let t1 = Crdt::listen(a1.clone(), win1, s1, exit.clone());
|
||||
|
||||
let a2 = Arc::new(RwLock::new(c2));
|
||||
let win2 = Arc::new(RwLock::new(vec![]));
|
||||
let t2 = Crdt::listen(a2.clone(), win2, s2, exit.clone());
|
||||
|
||||
let a3 = Arc::new(RwLock::new(c3));
|
||||
let win3 = Arc::new(RwLock::new(vec![]));
|
||||
let t3 = Crdt::listen(a3.clone(), win3, s3, exit.clone());
|
||||
|
||||
// Create gossip threads
|
||||
let t1_gossip = Crdt::gossip(a1.clone(), exit.clone());
|
||||
let t2_gossip = Crdt::gossip(a2.clone(), exit.clone());
|
||||
let t3_gossip = Crdt::gossip(a3.clone(), exit.clone());
|
||||
|
||||
//wait to converge
|
||||
trace!("waitng to converge:");
|
||||
let mut done = false;
|
||||
for _ in 0..30 {
|
||||
done = a1.read().unwrap().table.len() == 3 && a2.read().unwrap().table.len() == 3
|
||||
&& a3.read().unwrap().table.len() == 3;
|
||||
if done {
|
||||
break;
|
||||
}
|
||||
sleep(Duration::new(1, 0));
|
||||
}
|
||||
assert!(done);
|
||||
let mut b = Blob::default();
|
||||
b.meta.size = 10;
|
||||
Crdt::retransmit(&a1, &Arc::new(RwLock::new(b)), &e1).unwrap();
|
||||
let res: Vec<_> = [r1, r2, r3]
|
||||
.into_par_iter()
|
||||
.map(|s| {
|
||||
let mut b = Blob::default();
|
||||
s.set_read_timeout(Some(Duration::new(1, 0))).unwrap();
|
||||
let res = s.recv_from(&mut b.data);
|
||||
res.is_err() //true if failed to receive the retransmit packet
|
||||
})
|
||||
.collect();
|
||||
//true if failed receive the retransmit packet, r2, and r3 should succeed
|
||||
//r1 was the sender, so it should fail to receive the packet
|
||||
assert_eq!(res, [true, false, false]);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
let threads = vec![t1, t2, t3, t1_gossip, t2_gossip, t3_gossip];
|
||||
for t in threads.into_iter() {
|
||||
t.join().unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
33
src/ecdsa.rs
33
src/ecdsa.rs
@@ -51,15 +51,22 @@ fn verify_packet(packet: &Packet) -> u8 {
|
||||
).is_ok() as u8
|
||||
}
|
||||
|
||||
fn batch_size(batches: &Vec<SharedPackets>) -> usize {
|
||||
batches
|
||||
.iter()
|
||||
.map(|p| p.read().unwrap().packets.len())
|
||||
.fold(0, |x, y| x + y)
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "cuda"))]
|
||||
pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> {
|
||||
use rayon::prelude::*;
|
||||
|
||||
info!("CPU ECDSA for {}", batch_size(batches));
|
||||
batches
|
||||
.into_par_iter()
|
||||
.map(|p| {
|
||||
p.read()
|
||||
.unwrap()
|
||||
.expect("'p' read lock in ed25519_verify")
|
||||
.packets
|
||||
.par_iter()
|
||||
.map(verify_packet)
|
||||
@@ -72,13 +79,18 @@ pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> {
|
||||
pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> {
|
||||
use packet::PACKET_DATA_SIZE;
|
||||
|
||||
info!("CUDA ECDSA for {}", batch_size(batches));
|
||||
let mut out = Vec::new();
|
||||
let mut elems = Vec::new();
|
||||
let mut locks = Vec::new();
|
||||
let mut rvs = Vec::new();
|
||||
|
||||
for packets in batches {
|
||||
locks.push(packets.read().unwrap());
|
||||
locks.push(
|
||||
packets
|
||||
.read()
|
||||
.expect("'packets' read lock in pub fn ed25519_verify"),
|
||||
);
|
||||
}
|
||||
let mut num = 0;
|
||||
for p in locks {
|
||||
@@ -130,16 +142,25 @@ pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use accountant_skel::Request;
|
||||
use bincode::serialize;
|
||||
use ecdsa;
|
||||
use event::Event;
|
||||
use packet::{Packet, Packets, SharedPackets};
|
||||
use std::sync::RwLock;
|
||||
use transaction::test_tx;
|
||||
use transaction::Transaction;
|
||||
use transaction::{memfind, test_tx};
|
||||
|
||||
#[test]
|
||||
fn test_layout() {
|
||||
let tr = test_tx();
|
||||
let tx = serialize(&tr).unwrap();
|
||||
let packet = serialize(&Event::Transaction(tr)).unwrap();
|
||||
assert_matches!(memfind(&packet, &tx), Some(ecdsa::TX_OFFSET));
|
||||
assert_matches!(memfind(&packet, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), None);
|
||||
}
|
||||
|
||||
fn make_packet_from_transaction(tr: Transaction) -> Packet {
|
||||
let tx = serialize(&Request::Transaction(tr)).unwrap();
|
||||
let tx = serialize(&Event::Transaction(tr)).unwrap();
|
||||
let mut packet = Packet::default();
|
||||
packet.meta.size = tx.len();
|
||||
packet.data[..packet.meta.size].copy_from_slice(&tx);
|
||||
|
80
src/entry.rs
80
src/entry.rs
@@ -25,6 +25,25 @@ pub struct Entry {
|
||||
}
|
||||
|
||||
impl Entry {
|
||||
/// Creates the next Entry `num_hashes` after `start_hash`.
|
||||
pub fn new(start_hash: &Hash, cur_hashes: u64, events: Vec<Event>) -> Self {
|
||||
let num_hashes = cur_hashes + if events.is_empty() { 0 } else { 1 };
|
||||
let id = next_hash(start_hash, 0, &events);
|
||||
Entry {
|
||||
num_hashes,
|
||||
id,
|
||||
events,
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates the next Tick Entry `num_hashes` after `start_hash`.
|
||||
pub fn new_mut(start_hash: &mut Hash, cur_hashes: &mut u64, events: Vec<Event>) -> Self {
|
||||
let entry = Self::new(start_hash, *cur_hashes, events);
|
||||
*start_hash = entry.id;
|
||||
*cur_hashes = 0;
|
||||
entry
|
||||
}
|
||||
|
||||
/// Creates a Entry from the number of hashes `num_hashes` since the previous event
|
||||
/// and that resulting `id`.
|
||||
pub fn new_tick(num_hashes: u64, id: &Hash) -> Self {
|
||||
@@ -49,14 +68,6 @@ fn add_event_data(hash_data: &mut Vec<u8>, event: &Event) {
|
||||
hash_data.push(0u8);
|
||||
hash_data.extend_from_slice(&tr.sig);
|
||||
}
|
||||
Event::Signature { ref sig, .. } => {
|
||||
hash_data.push(1u8);
|
||||
hash_data.extend_from_slice(sig);
|
||||
}
|
||||
Event::Timestamp { ref sig, .. } => {
|
||||
hash_data.push(2u8);
|
||||
hash_data.extend_from_slice(sig);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -84,31 +95,12 @@ pub fn next_hash(start_hash: &Hash, num_hashes: u64, events: &[Event]) -> Hash {
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates the next Entry `num_hashes` after `start_hash`.
|
||||
pub fn create_entry(start_hash: &Hash, cur_hashes: u64, events: Vec<Event>) -> Entry {
|
||||
let num_hashes = cur_hashes + if events.is_empty() { 0 } else { 1 };
|
||||
let id = next_hash(start_hash, 0, &events);
|
||||
/// Creates the next Tick or Event Entry `num_hashes` after `start_hash`.
|
||||
pub fn next_entry(start_hash: &Hash, num_hashes: u64, events: Vec<Event>) -> Entry {
|
||||
Entry {
|
||||
num_hashes,
|
||||
id,
|
||||
events,
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates the next Tick Entry `num_hashes` after `start_hash`.
|
||||
pub fn create_entry_mut(start_hash: &mut Hash, cur_hashes: &mut u64, events: Vec<Event>) -> Entry {
|
||||
let entry = create_entry(start_hash, *cur_hashes, events);
|
||||
*start_hash = entry.id;
|
||||
*cur_hashes = 0;
|
||||
entry
|
||||
}
|
||||
|
||||
/// Creates the next Tick Entry `num_hashes` after `start_hash`.
|
||||
pub fn next_tick(start_hash: &Hash, num_hashes: u64) -> Entry {
|
||||
Entry {
|
||||
num_hashes,
|
||||
id: next_hash(start_hash, num_hashes, &[]),
|
||||
events: vec![],
|
||||
id: next_hash(start_hash, num_hashes, &events),
|
||||
events: events,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -116,7 +108,7 @@ pub fn next_tick(start_hash: &Hash, num_hashes: u64) -> Entry {
|
||||
mod tests {
|
||||
use super::*;
|
||||
use chrono::prelude::*;
|
||||
use entry::create_entry;
|
||||
use entry::Entry;
|
||||
use event::Event;
|
||||
use hash::hash;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
@@ -128,8 +120,8 @@ mod tests {
|
||||
let one = hash(&zero);
|
||||
assert!(Entry::new_tick(0, &zero).verify(&zero)); // base case
|
||||
assert!(!Entry::new_tick(0, &zero).verify(&one)); // base case, bad
|
||||
assert!(next_tick(&zero, 1).verify(&zero)); // inductive step
|
||||
assert!(!next_tick(&zero, 1).verify(&one)); // inductive step, bad
|
||||
assert!(next_entry(&zero, 1, vec![]).verify(&zero)); // inductive step
|
||||
assert!(!next_entry(&zero, 1, vec![]).verify(&one)); // inductive step, bad
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -138,9 +130,9 @@ mod tests {
|
||||
|
||||
// First, verify entries
|
||||
let keypair = KeyPair::new();
|
||||
let tr0 = Event::Transaction(Transaction::new(&keypair, keypair.pubkey(), 0, zero));
|
||||
let tr1 = Event::Transaction(Transaction::new(&keypair, keypair.pubkey(), 1, zero));
|
||||
let mut e0 = create_entry(&zero, 0, vec![tr0.clone(), tr1.clone()]);
|
||||
let tr0 = Event::new_transaction(&keypair, keypair.pubkey(), 0, zero);
|
||||
let tr1 = Event::new_transaction(&keypair, keypair.pubkey(), 1, zero);
|
||||
let mut e0 = Entry::new(&zero, 0, vec![tr0.clone(), tr1.clone()]);
|
||||
assert!(e0.verify(&zero));
|
||||
|
||||
// Next, swap two events and ensure verification fails.
|
||||
@@ -155,9 +147,13 @@ mod tests {
|
||||
|
||||
// First, verify entries
|
||||
let keypair = KeyPair::new();
|
||||
let tr0 = Event::new_timestamp(&keypair, Utc::now());
|
||||
let tr1 = Event::new_signature(&keypair, Default::default());
|
||||
let mut e0 = create_entry(&zero, 0, vec![tr0.clone(), tr1.clone()]);
|
||||
let tr0 = Event::Transaction(Transaction::new_timestamp(&keypair, Utc::now(), zero));
|
||||
let tr1 = Event::Transaction(Transaction::new_signature(
|
||||
&keypair,
|
||||
Default::default(),
|
||||
zero,
|
||||
));
|
||||
let mut e0 = Entry::new(&zero, 0, vec![tr0.clone(), tr1.clone()]);
|
||||
assert!(e0.verify(&zero));
|
||||
|
||||
// Next, swap two witness events and ensure verification fails.
|
||||
@@ -167,9 +163,9 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_next_tick() {
|
||||
fn test_next_entry() {
|
||||
let zero = Hash::default();
|
||||
let tick = next_tick(&zero, 1);
|
||||
let tick = next_entry(&zero, 1, vec![]);
|
||||
assert_eq!(tick.num_hashes, 1);
|
||||
assert_ne!(tick.id, zero);
|
||||
}
|
||||
|
80
src/entry_writer.rs
Normal file
80
src/entry_writer.rs
Normal file
@@ -0,0 +1,80 @@
|
||||
//! The `entry_writer` module helps implement the TPU's write stage.
|
||||
|
||||
use bank::Bank;
|
||||
use entry::Entry;
|
||||
use ledger;
|
||||
use packet;
|
||||
use result::Result;
|
||||
use serde_json;
|
||||
use std::collections::VecDeque;
|
||||
use std::io::Write;
|
||||
use std::io::sink;
|
||||
use std::sync::mpsc::Receiver;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::time::Duration;
|
||||
use streamer;
|
||||
|
||||
pub struct EntryWriter<'a> {
|
||||
bank: &'a Bank,
|
||||
}
|
||||
|
||||
impl<'a> EntryWriter<'a> {
|
||||
/// Create a new Tpu that wraps the given Bank.
|
||||
pub fn new(bank: &'a Bank) -> Self {
|
||||
EntryWriter { bank }
|
||||
}
|
||||
|
||||
fn write_entry<W: Write>(&self, writer: &Mutex<W>, entry: &Entry) {
|
||||
trace!("write_entry entry");
|
||||
self.bank.register_entry_id(&entry.id);
|
||||
writeln!(
|
||||
writer.lock().expect("'writer' lock in fn fn write_entry"),
|
||||
"{}",
|
||||
serde_json::to_string(&entry).expect("'entry' to_strong in fn write_entry")
|
||||
).expect("writeln! in fn write_entry");
|
||||
}
|
||||
|
||||
fn write_entries<W: Write>(
|
||||
&self,
|
||||
writer: &Mutex<W>,
|
||||
entry_receiver: &Receiver<Entry>,
|
||||
) -> Result<Vec<Entry>> {
|
||||
//TODO implement a serialize for channel that does this without allocations
|
||||
let mut l = vec![];
|
||||
let entry = entry_receiver.recv_timeout(Duration::new(1, 0))?;
|
||||
self.write_entry(writer, &entry);
|
||||
l.push(entry);
|
||||
while let Ok(entry) = entry_receiver.try_recv() {
|
||||
self.write_entry(writer, &entry);
|
||||
l.push(entry);
|
||||
}
|
||||
Ok(l)
|
||||
}
|
||||
|
||||
/// Process any Entry items that have been published by the Historian.
|
||||
/// continuosly broadcast blobs of entries out
|
||||
pub fn write_and_send_entries<W: Write>(
|
||||
&self,
|
||||
broadcast: &streamer::BlobSender,
|
||||
blob_recycler: &packet::BlobRecycler,
|
||||
writer: &Mutex<W>,
|
||||
entry_receiver: &Receiver<Entry>,
|
||||
) -> Result<()> {
|
||||
let mut q = VecDeque::new();
|
||||
let list = self.write_entries(writer, entry_receiver)?;
|
||||
trace!("New blobs? {}", list.len());
|
||||
ledger::process_entry_list_into_blobs(&list, blob_recycler, &mut q);
|
||||
if !q.is_empty() {
|
||||
trace!("broadcasting {}", q.len());
|
||||
broadcast.send(q)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Process any Entry items that have been published by the Historian.
|
||||
/// continuosly broadcast blobs of entries out
|
||||
pub fn drain_entries(&self, entry_receiver: &Receiver<Entry>) -> Result<()> {
|
||||
self.write_entries(&Arc::new(Mutex::new(sink())), entry_receiver)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
@@ -153,7 +153,7 @@ pub fn decode_blocks(data: &mut [&mut [u8]], coding: &[&[u8]], erasures: &[i32])
|
||||
// Generate coding blocks in window from consumed to consumed+NUM_DATA
|
||||
pub fn generate_coding(
|
||||
re: &BlobRecycler,
|
||||
window: &mut Vec<Option<SharedBlob>>,
|
||||
window: &mut Vec<SharedBlob>,
|
||||
consumed: usize,
|
||||
) -> Result<()> {
|
||||
let mut data_blobs = Vec::new();
|
||||
@@ -164,10 +164,14 @@ pub fn generate_coding(
|
||||
let mut coding_ptrs: Vec<&mut [u8]> = Vec::new();
|
||||
for i in consumed..consumed + NUM_DATA {
|
||||
let n = i % window.len();
|
||||
data_blobs.push(window[n].clone().unwrap());
|
||||
data_blobs.push(
|
||||
window[n]
|
||||
.clone()
|
||||
.expect("'data_blobs' arr in pub fn generate_coding"),
|
||||
);
|
||||
}
|
||||
for b in &data_blobs {
|
||||
data_locks.push(b.write().unwrap());
|
||||
data_locks.push(b.write().expect("'b' write lock in pub fn generate_coding"));
|
||||
}
|
||||
for (i, l) in data_locks.iter_mut().enumerate() {
|
||||
trace!("i: {} data: {}", i, l.data[0]);
|
||||
@@ -179,11 +183,18 @@ pub fn generate_coding(
|
||||
let coding_end = consumed + NUM_CODED;
|
||||
for i in coding_start..coding_end {
|
||||
let n = i % window.len();
|
||||
window[n] = Some(re.allocate());
|
||||
coding_blobs.push(window[n].clone().unwrap());
|
||||
window[n] = re.allocate();
|
||||
coding_blobs.push(
|
||||
window[n]
|
||||
.clone()
|
||||
.expect("'coding_blobs' arr in pub fn generate_coding"),
|
||||
);
|
||||
}
|
||||
for b in &coding_blobs {
|
||||
coding_locks.push(b.write().unwrap());
|
||||
coding_locks.push(
|
||||
b.write()
|
||||
.expect("'coding_locks' arr in pub fn generate_coding"),
|
||||
);
|
||||
}
|
||||
for (i, l) in coding_locks.iter_mut().enumerate() {
|
||||
trace!("i: {} data: {}", i, l.data[0]);
|
||||
@@ -231,7 +242,7 @@ pub fn recover(
|
||||
let j = i % window.len();
|
||||
let mut b = &mut window[j];
|
||||
if b.is_some() {
|
||||
blobs.push(b.clone().unwrap());
|
||||
blobs.push(b.clone().expect("'blobs' arr in pb fn recover"));
|
||||
continue;
|
||||
}
|
||||
let n = re.allocate();
|
||||
@@ -244,7 +255,7 @@ pub fn recover(
|
||||
trace!("erasures: {:?}", erasures);
|
||||
//lock everything
|
||||
for b in &blobs {
|
||||
locks.push(b.write().unwrap());
|
||||
locks.push(b.write().expect("'locks' arr in pb fn recover"));
|
||||
}
|
||||
for (i, l) in locks.iter_mut().enumerate() {
|
||||
if i >= NUM_DATA {
|
||||
@@ -272,7 +283,6 @@ pub fn recover(
|
||||
mod test {
|
||||
use erasure;
|
||||
use packet::{BlobRecycler, SharedBlob, PACKET_DATA_SIZE};
|
||||
extern crate env_logger;
|
||||
|
||||
#[test]
|
||||
pub fn test_coding() {
|
||||
|
58
src/event.rs
58
src/event.rs
@@ -1,67 +1,31 @@
|
||||
//! The `event` module handles events, which may be a `Transaction`, or a `Witness` used to process a pending
|
||||
//! Transaction.
|
||||
|
||||
use bincode::serialize;
|
||||
use chrono::prelude::*;
|
||||
use signature::{KeyPair, KeyPairUtil, PublicKey, Signature, SignatureUtil};
|
||||
use hash::Hash;
|
||||
use signature::{KeyPair, PublicKey};
|
||||
use transaction::Transaction;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub enum Event {
|
||||
Transaction(Transaction),
|
||||
Signature {
|
||||
from: PublicKey,
|
||||
tx_sig: Signature,
|
||||
sig: Signature,
|
||||
},
|
||||
Timestamp {
|
||||
from: PublicKey,
|
||||
dt: DateTime<Utc>,
|
||||
sig: Signature,
|
||||
},
|
||||
}
|
||||
|
||||
impl Event {
|
||||
/// Create and sign a new Witness Timestamp. Used for unit-testing.
|
||||
pub fn new_timestamp(from: &KeyPair, dt: DateTime<Utc>) -> Self {
|
||||
let sign_data = serialize(&dt).unwrap();
|
||||
let sig = Signature::clone_from_slice(from.sign(&sign_data).as_ref());
|
||||
Event::Timestamp {
|
||||
from: from.pubkey(),
|
||||
dt,
|
||||
sig,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create and sign a new Witness Signature. Used for unit-testing.
|
||||
pub fn new_signature(from: &KeyPair, tx_sig: Signature) -> Self {
|
||||
let sig = Signature::clone_from_slice(from.sign(&tx_sig).as_ref());
|
||||
Event::Signature {
|
||||
from: from.pubkey(),
|
||||
tx_sig,
|
||||
sig,
|
||||
}
|
||||
pub fn new_transaction(
|
||||
from_keypair: &KeyPair,
|
||||
to: PublicKey,
|
||||
tokens: i64,
|
||||
last_id: Hash,
|
||||
) -> Self {
|
||||
let tr = Transaction::new(from_keypair, to, tokens, last_id);
|
||||
Event::Transaction(tr)
|
||||
}
|
||||
|
||||
/// Verify the Event's signature's are valid and if a transaction, that its
|
||||
/// spending plan is valid.
|
||||
pub fn verify(&self) -> bool {
|
||||
match *self {
|
||||
Event::Transaction(ref tr) => tr.verify_sig(),
|
||||
Event::Signature { from, tx_sig, sig } => sig.verify(&from, &tx_sig),
|
||||
Event::Timestamp { from, dt, sig } => sig.verify(&from, &serialize(&dt).unwrap()),
|
||||
Event::Transaction(ref tr) => tr.verify_plan(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
|
||||
#[test]
|
||||
fn test_event_verify() {
|
||||
assert!(Event::new_timestamp(&KeyPair::new(), Utc::now()).verify());
|
||||
assert!(Event::new_signature(&KeyPair::new(), Signature::default()).verify());
|
||||
}
|
||||
}
|
||||
|
@@ -1,7 +1,7 @@
|
||||
//! The `hash` module provides functions for creating SHA-256 hashes.
|
||||
|
||||
use generic_array::typenum::U32;
|
||||
use generic_array::GenericArray;
|
||||
use generic_array::typenum::U32;
|
||||
use sha2::{Digest, Sha256};
|
||||
|
||||
pub type Hash = GenericArray<u8, U32>;
|
||||
|
113
src/historian.rs
113
src/historian.rs
@@ -1,113 +0,0 @@
|
||||
//! The `historian` module provides a microservice for generating a Proof of History.
|
||||
//! It manages a thread containing a Proof of History Recorder.
|
||||
|
||||
use entry::Entry;
|
||||
use hash::Hash;
|
||||
use recorder::{ExitReason, Recorder, Signal};
|
||||
use std::sync::mpsc::{sync_channel, Receiver, SyncSender};
|
||||
use std::thread::{spawn, JoinHandle};
|
||||
use std::time::Instant;
|
||||
|
||||
pub struct Historian {
|
||||
pub sender: SyncSender<Signal>,
|
||||
pub receiver: Receiver<Entry>,
|
||||
pub thread_hdl: JoinHandle<ExitReason>,
|
||||
}
|
||||
|
||||
impl Historian {
|
||||
pub fn new(start_hash: &Hash, ms_per_tick: Option<u64>) -> Self {
|
||||
let (sender, event_receiver) = sync_channel(10_000);
|
||||
let (entry_sender, receiver) = sync_channel(10_000);
|
||||
let thread_hdl =
|
||||
Historian::create_recorder(*start_hash, ms_per_tick, event_receiver, entry_sender);
|
||||
Historian {
|
||||
sender,
|
||||
receiver,
|
||||
thread_hdl,
|
||||
}
|
||||
}
|
||||
|
||||
/// A background thread that will continue tagging received Event messages and
|
||||
/// sending back Entry messages until either the receiver or sender channel is closed.
|
||||
fn create_recorder(
|
||||
start_hash: Hash,
|
||||
ms_per_tick: Option<u64>,
|
||||
receiver: Receiver<Signal>,
|
||||
sender: SyncSender<Entry>,
|
||||
) -> JoinHandle<ExitReason> {
|
||||
spawn(move || {
|
||||
let mut recorder = Recorder::new(receiver, sender, start_hash);
|
||||
let now = Instant::now();
|
||||
loop {
|
||||
if let Err(err) = recorder.process_events(now, ms_per_tick) {
|
||||
return err;
|
||||
}
|
||||
if ms_per_tick.is_some() {
|
||||
recorder.hash();
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use ledger::Block;
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
|
||||
#[test]
|
||||
fn test_historian() {
|
||||
let zero = Hash::default();
|
||||
let hist = Historian::new(&zero, None);
|
||||
|
||||
hist.sender.send(Signal::Tick).unwrap();
|
||||
sleep(Duration::new(0, 1_000_000));
|
||||
hist.sender.send(Signal::Tick).unwrap();
|
||||
sleep(Duration::new(0, 1_000_000));
|
||||
hist.sender.send(Signal::Tick).unwrap();
|
||||
|
||||
let entry0 = hist.receiver.recv().unwrap();
|
||||
let entry1 = hist.receiver.recv().unwrap();
|
||||
let entry2 = hist.receiver.recv().unwrap();
|
||||
|
||||
assert_eq!(entry0.num_hashes, 0);
|
||||
assert_eq!(entry1.num_hashes, 0);
|
||||
assert_eq!(entry2.num_hashes, 0);
|
||||
|
||||
drop(hist.sender);
|
||||
assert_eq!(
|
||||
hist.thread_hdl.join().unwrap(),
|
||||
ExitReason::RecvDisconnected
|
||||
);
|
||||
|
||||
assert!([entry0, entry1, entry2].verify(&zero));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_historian_closed_sender() {
|
||||
let zero = Hash::default();
|
||||
let hist = Historian::new(&zero, None);
|
||||
drop(hist.receiver);
|
||||
hist.sender.send(Signal::Tick).unwrap();
|
||||
assert_eq!(
|
||||
hist.thread_hdl.join().unwrap(),
|
||||
ExitReason::SendDisconnected
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ticking_historian() {
|
||||
let zero = Hash::default();
|
||||
let hist = Historian::new(&zero, Some(20));
|
||||
sleep(Duration::from_millis(300));
|
||||
hist.sender.send(Signal::Tick).unwrap();
|
||||
drop(hist.sender);
|
||||
let entries: Vec<Entry> = hist.receiver.iter().collect();
|
||||
assert!(entries.len() > 1);
|
||||
|
||||
// Ensure the ID is not the seed.
|
||||
assert_ne!(entries[0].id, zero);
|
||||
}
|
||||
}
|
154
src/ledger.rs
154
src/ledger.rs
@@ -1,9 +1,17 @@
|
||||
//! The `ledger` module provides functions for parallel verification of the
|
||||
//! Proof of History ledger.
|
||||
|
||||
use entry::{next_tick, Entry};
|
||||
use bincode::{deserialize, serialize_into};
|
||||
use entry::{next_entry, Entry};
|
||||
use event::Event;
|
||||
use hash::Hash;
|
||||
use packet;
|
||||
use packet::{SharedBlob, BLOB_DATA_SIZE, BLOB_SIZE};
|
||||
use rayon::prelude::*;
|
||||
use std::cmp::min;
|
||||
use std::collections::VecDeque;
|
||||
use std::io::Cursor;
|
||||
use std::mem::size_of;
|
||||
|
||||
pub trait Block {
|
||||
/// Verifies the hashes and counts of a slice of events are all consistent.
|
||||
@@ -18,22 +26,107 @@ impl Block for [Entry] {
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a vector of Ticks of length `len` from `start_hash` hash and `num_hashes`.
|
||||
pub fn next_ticks(start_hash: &Hash, num_hashes: u64, len: usize) -> Vec<Entry> {
|
||||
/// Create a vector of Entries of length `event_set.len()` from `start_hash` hash, `num_hashes`, and `event_set`.
|
||||
pub fn next_entries(start_hash: &Hash, num_hashes: u64, event_set: Vec<Vec<Event>>) -> Vec<Entry> {
|
||||
let mut id = *start_hash;
|
||||
let mut ticks = vec![];
|
||||
for _ in 0..len {
|
||||
let entry = next_tick(&id, num_hashes);
|
||||
let mut entries = vec![];
|
||||
for event_list in &event_set {
|
||||
let events = event_list.clone();
|
||||
let entry = next_entry(&id, num_hashes, events);
|
||||
id = entry.id;
|
||||
ticks.push(entry);
|
||||
entries.push(entry);
|
||||
}
|
||||
ticks
|
||||
entries
|
||||
}
|
||||
|
||||
pub fn process_entry_list_into_blobs(
|
||||
list: &Vec<Entry>,
|
||||
blob_recycler: &packet::BlobRecycler,
|
||||
q: &mut VecDeque<SharedBlob>,
|
||||
) {
|
||||
let mut start = 0;
|
||||
let mut end = 0;
|
||||
while start < list.len() {
|
||||
let mut entries: Vec<Vec<Entry>> = Vec::new();
|
||||
let mut total = 0;
|
||||
for i in &list[start..] {
|
||||
total += size_of::<Event>() * i.events.len();
|
||||
total += size_of::<Entry>();
|
||||
if total >= BLOB_DATA_SIZE {
|
||||
break;
|
||||
}
|
||||
end += 1;
|
||||
}
|
||||
// See if we need to split the events
|
||||
if end <= start {
|
||||
let mut event_start = 0;
|
||||
let num_events_per_blob = BLOB_DATA_SIZE / size_of::<Event>();
|
||||
let total_entry_chunks =
|
||||
(list[end].events.len() + num_events_per_blob - 1) / num_events_per_blob;
|
||||
trace!(
|
||||
"splitting events end: {} total_chunks: {}",
|
||||
end,
|
||||
total_entry_chunks
|
||||
);
|
||||
for _ in 0..total_entry_chunks {
|
||||
let event_end = min(event_start + num_events_per_blob, list[end].events.len());
|
||||
let mut entry = Entry {
|
||||
num_hashes: list[end].num_hashes,
|
||||
id: list[end].id,
|
||||
events: list[end].events[event_start..event_end].to_vec(),
|
||||
};
|
||||
entries.push(vec![entry]);
|
||||
event_start = event_end;
|
||||
}
|
||||
end += 1;
|
||||
} else {
|
||||
entries.push(list[start..end].to_vec());
|
||||
}
|
||||
|
||||
for entry in entries {
|
||||
let b = blob_recycler.allocate();
|
||||
let pos = {
|
||||
let mut bd = b.write().unwrap();
|
||||
let mut out = Cursor::new(bd.data_mut());
|
||||
serialize_into(&mut out, &entry).expect("failed to serialize output");
|
||||
out.position() as usize
|
||||
};
|
||||
assert!(pos < BLOB_SIZE);
|
||||
b.write().unwrap().set_size(pos);
|
||||
q.push_back(b);
|
||||
}
|
||||
start = end;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn reconstruct_entries_from_blobs(blobs: &VecDeque<SharedBlob>) -> Vec<Entry> {
|
||||
let mut entries_to_apply: Vec<Entry> = Vec::new();
|
||||
let mut last_id = Hash::default();
|
||||
for msgs in blobs {
|
||||
let blob = msgs.read().unwrap();
|
||||
let entries: Vec<Entry> = deserialize(&blob.data()[..blob.meta.size]).unwrap();
|
||||
for entry in entries {
|
||||
if entry.id == last_id {
|
||||
if let Some(last_entry) = entries_to_apply.last_mut() {
|
||||
last_entry.events.extend(entry.events);
|
||||
}
|
||||
} else {
|
||||
last_id = entry.id;
|
||||
entries_to_apply.push(entry);
|
||||
}
|
||||
}
|
||||
//TODO respond back to leader with hash of the state
|
||||
}
|
||||
entries_to_apply
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use hash::hash;
|
||||
use packet::BlobRecycler;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use transaction::Transaction;
|
||||
|
||||
#[test]
|
||||
fn test_verify_slice() {
|
||||
@@ -42,12 +135,51 @@ mod tests {
|
||||
assert!(vec![][..].verify(&zero)); // base case
|
||||
assert!(vec![Entry::new_tick(0, &zero)][..].verify(&zero)); // singleton case 1
|
||||
assert!(!vec![Entry::new_tick(0, &zero)][..].verify(&one)); // singleton case 2, bad
|
||||
assert!(next_ticks(&zero, 0, 2)[..].verify(&zero)); // inductive step
|
||||
assert!(next_entries(&zero, 0, vec![vec![]; 2])[..].verify(&zero)); // inductive step
|
||||
|
||||
let mut bad_ticks = next_ticks(&zero, 0, 2);
|
||||
let mut bad_ticks = next_entries(&zero, 0, vec![vec![]; 2]);
|
||||
bad_ticks[1].id = one;
|
||||
assert!(!bad_ticks.verify(&zero)); // inductive step, bad
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_entry_to_blobs() {
|
||||
let zero = Hash::default();
|
||||
let one = hash(&zero);
|
||||
let keypair = KeyPair::new();
|
||||
let tr0 = Event::Transaction(Transaction::new(&keypair, keypair.pubkey(), 1, one));
|
||||
let events = vec![tr0.clone(); 10000];
|
||||
let e0 = Entry::new(&zero, 0, events);
|
||||
|
||||
let entry_list = vec![e0.clone(); 1];
|
||||
let blob_recycler = BlobRecycler::default();
|
||||
let mut blob_q = VecDeque::new();
|
||||
process_entry_list_into_blobs(&entry_list, &blob_recycler, &mut blob_q);
|
||||
let entries = reconstruct_entries_from_blobs(&blob_q);
|
||||
|
||||
assert_eq!(entry_list, entries);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_next_entries() {
|
||||
let mut id = Hash::default();
|
||||
let next_id = hash(&id);
|
||||
let keypair = KeyPair::new();
|
||||
let tr0 = Event::Transaction(Transaction::new(&keypair, keypair.pubkey(), 1, next_id));
|
||||
let events = vec![tr0.clone(); 5];
|
||||
let event_set = vec![events.clone(); 5];
|
||||
let entries0 = next_entries(&id, 0, event_set);
|
||||
|
||||
assert_eq!(entries0.len(), 5);
|
||||
|
||||
let mut entries1 = vec![];
|
||||
for _ in 0..5 {
|
||||
let entry = next_entry(&id, 0, events.clone());
|
||||
id = entry.id;
|
||||
entries1.push(entry);
|
||||
}
|
||||
assert_eq!(entries0, entries1);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "unstable", test))]
|
||||
@@ -59,7 +191,7 @@ mod bench {
|
||||
#[bench]
|
||||
fn event_bench(bencher: &mut Bencher) {
|
||||
let start_hash = Hash::default();
|
||||
let entries = next_ticks(&start_hash, 10_000, 8);
|
||||
let entries = next_entries(&start_hash, 10_000, vec![vec![]; 8]);
|
||||
bencher.iter(|| {
|
||||
assert!(entries.verify(&start_hash));
|
||||
});
|
||||
|
24
src/lib.rs
24
src/lib.rs
@@ -1,25 +1,37 @@
|
||||
#![cfg_attr(feature = "unstable", feature(test))]
|
||||
pub mod accountant;
|
||||
pub mod accountant_skel;
|
||||
pub mod accountant_stub;
|
||||
pub mod bank;
|
||||
pub mod banking_stage;
|
||||
pub mod crdt;
|
||||
pub mod ecdsa;
|
||||
pub mod entry;
|
||||
pub mod entry_writer;
|
||||
#[cfg(feature = "erasure")]
|
||||
pub mod erasure;
|
||||
pub mod event;
|
||||
pub mod hash;
|
||||
pub mod historian;
|
||||
pub mod ledger;
|
||||
pub mod logger;
|
||||
pub mod mint;
|
||||
pub mod packet;
|
||||
pub mod plan;
|
||||
pub mod record_stage;
|
||||
pub mod recorder;
|
||||
pub mod replicate_stage;
|
||||
pub mod request;
|
||||
pub mod request_processor;
|
||||
pub mod request_stage;
|
||||
pub mod result;
|
||||
pub mod rpu;
|
||||
pub mod server;
|
||||
pub mod sig_verify_stage;
|
||||
pub mod signature;
|
||||
pub mod streamer;
|
||||
pub mod subscribers;
|
||||
pub mod thin_client;
|
||||
pub mod timing;
|
||||
pub mod tpu;
|
||||
pub mod transaction;
|
||||
pub mod tvu;
|
||||
pub mod write_stage;
|
||||
extern crate bincode;
|
||||
extern crate byteorder;
|
||||
extern crate chrono;
|
||||
@@ -41,3 +53,5 @@ extern crate futures;
|
||||
#[cfg(test)]
|
||||
#[macro_use]
|
||||
extern crate matches;
|
||||
|
||||
extern crate rand;
|
||||
|
11
src/logger.rs
Normal file
11
src/logger.rs
Normal file
@@ -0,0 +1,11 @@
|
||||
use std::sync::{Once, ONCE_INIT};
|
||||
extern crate env_logger;
|
||||
|
||||
static INIT: Once = ONCE_INIT;
|
||||
|
||||
/// Setup function that is only run once, even if called multiple times.
|
||||
pub fn setup() {
|
||||
INIT.call_once(|| {
|
||||
let _ = env_logger::init();
|
||||
});
|
||||
}
|
22
src/mint.rs
22
src/mint.rs
@@ -1,6 +1,5 @@
|
||||
//! The `mint` module is a library for generating the chain's genesis block.
|
||||
|
||||
use entry::create_entry;
|
||||
use entry::Entry;
|
||||
use event::Event;
|
||||
use hash::{hash, Hash};
|
||||
@@ -19,8 +18,11 @@ pub struct Mint {
|
||||
impl Mint {
|
||||
pub fn new(tokens: i64) -> Self {
|
||||
let rnd = SystemRandom::new();
|
||||
let pkcs8 = KeyPair::generate_pkcs8(&rnd).unwrap().to_vec();
|
||||
let keypair = KeyPair::from_pkcs8(Input::from(&pkcs8)).unwrap();
|
||||
let pkcs8 = KeyPair::generate_pkcs8(&rnd)
|
||||
.expect("generate_pkcs8 in mint pub fn new")
|
||||
.to_vec();
|
||||
let keypair =
|
||||
KeyPair::from_pkcs8(Input::from(&pkcs8)).expect("from_pkcs8 in mint pub fn new");
|
||||
let pubkey = keypair.pubkey();
|
||||
Mint {
|
||||
pkcs8,
|
||||
@@ -38,7 +40,7 @@ impl Mint {
|
||||
}
|
||||
|
||||
pub fn keypair(&self) -> KeyPair {
|
||||
KeyPair::from_pkcs8(Input::from(&self.pkcs8)).unwrap()
|
||||
KeyPair::from_pkcs8(Input::from(&self.pkcs8)).expect("from_pkcs8 in mint pub fn keypair")
|
||||
}
|
||||
|
||||
pub fn pubkey(&self) -> PublicKey {
|
||||
@@ -52,8 +54,8 @@ impl Mint {
|
||||
}
|
||||
|
||||
pub fn create_entries(&self) -> Vec<Entry> {
|
||||
let e0 = create_entry(&self.seed(), 0, vec![]);
|
||||
let e1 = create_entry(&e0.id, 0, self.create_events());
|
||||
let e0 = Entry::new(&self.seed(), 0, vec![]);
|
||||
let e1 = Entry::new(&e0.id, 0, self.create_events());
|
||||
vec![e0, e1]
|
||||
}
|
||||
}
|
||||
@@ -61,7 +63,7 @@ impl Mint {
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct MintDemo {
|
||||
pub mint: Mint,
|
||||
pub users: Vec<(Vec<u8>, i64)>,
|
||||
pub num_accounts: i64,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -69,12 +71,14 @@ mod tests {
|
||||
use super::*;
|
||||
use ledger::Block;
|
||||
use plan::Plan;
|
||||
use transaction::Instruction;
|
||||
|
||||
#[test]
|
||||
fn test_create_events() {
|
||||
let mut events = Mint::new(100).create_events().into_iter();
|
||||
if let Event::Transaction(tr) = events.next().unwrap() {
|
||||
if let Plan::Pay(payment) = tr.data.plan {
|
||||
let Event::Transaction(tr) = events.next().unwrap();
|
||||
if let Instruction::NewContract(contract) = tr.instruction {
|
||||
if let Plan::Pay(payment) = contract.plan {
|
||||
assert_eq!(tr.from, payment.to);
|
||||
}
|
||||
}
|
||||
|
@@ -1,12 +1,15 @@
|
||||
//! The `packet` module defines data structures and methods to pull data from the network.
|
||||
use bincode::{deserialize, serialize};
|
||||
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
|
||||
use result::{Error, Result};
|
||||
use serde::Serialize;
|
||||
use signature::PublicKey;
|
||||
use std::collections::VecDeque;
|
||||
use std::fmt;
|
||||
use std::io;
|
||||
use std::mem::size_of;
|
||||
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket};
|
||||
use std::sync::{Arc, Mutex, RwLock};
|
||||
use std::mem::size_of;
|
||||
|
||||
pub type SharedPackets = Arc<RwLock<Packets>>;
|
||||
pub type SharedBlob = Arc<RwLock<Blob>>;
|
||||
@@ -14,7 +17,8 @@ pub type PacketRecycler = Recycler<Packets>;
|
||||
pub type BlobRecycler = Recycler<Blob>;
|
||||
|
||||
pub const NUM_PACKETS: usize = 1024 * 8;
|
||||
const BLOB_SIZE: usize = 64 * 1024;
|
||||
pub const BLOB_SIZE: usize = 64 * 1024;
|
||||
pub const BLOB_DATA_SIZE: usize = BLOB_SIZE - BLOB_ID_END;
|
||||
pub const PACKET_DATA_SIZE: usize = 256;
|
||||
pub const NUM_BLOBS: usize = (NUM_PACKETS * PACKET_DATA_SIZE) / BLOB_SIZE;
|
||||
|
||||
@@ -153,12 +157,12 @@ impl<T: Default> Clone for Recycler<T> {
|
||||
|
||||
impl<T: Default> Recycler<T> {
|
||||
pub fn allocate(&self) -> Arc<RwLock<T>> {
|
||||
let mut gc = self.gc.lock().expect("recycler lock");
|
||||
let mut gc = self.gc.lock().expect("recycler lock in pb fn allocate");
|
||||
gc.pop()
|
||||
.unwrap_or_else(|| Arc::new(RwLock::new(Default::default())))
|
||||
}
|
||||
pub fn recycle(&self, msgs: Arc<RwLock<T>>) {
|
||||
let mut gc = self.gc.lock().expect("recycler lock");
|
||||
let mut gc = self.gc.lock().expect("recycler lock in pub fn recycle");
|
||||
gc.push(msgs);
|
||||
}
|
||||
}
|
||||
@@ -176,13 +180,14 @@ impl Packets {
|
||||
socket.set_nonblocking(false)?;
|
||||
for p in &mut self.packets {
|
||||
p.meta.size = 0;
|
||||
trace!("receiving");
|
||||
match socket.recv_from(&mut p.data) {
|
||||
Err(_) if i > 0 => {
|
||||
trace!("got {:?} messages", i);
|
||||
debug!("got {:?} messages", i);
|
||||
break;
|
||||
}
|
||||
Err(e) => {
|
||||
info!("recv_from err {:?}", e);
|
||||
trace!("recv_from err {:?}", e);
|
||||
return Err(Error::IO(e));
|
||||
}
|
||||
Ok((nrecv, from)) => {
|
||||
@@ -200,6 +205,7 @@ impl Packets {
|
||||
pub fn recv_from(&mut self, socket: &UdpSocket) -> Result<()> {
|
||||
let sz = self.run_read_from(socket)?;
|
||||
self.packets.resize(sz, Packet::default());
|
||||
debug!("recv_from: {}", sz);
|
||||
Ok(())
|
||||
}
|
||||
pub fn send_to(&self, socket: &UdpSocket) -> Result<()> {
|
||||
@@ -211,28 +217,60 @@ impl Packets {
|
||||
}
|
||||
}
|
||||
|
||||
const BLOB_INDEX_SIZE: usize = size_of::<u64>();
|
||||
pub fn to_packets<T: Serialize>(r: &PacketRecycler, xs: Vec<T>) -> Vec<SharedPackets> {
|
||||
let mut out = vec![];
|
||||
for x in xs.chunks(NUM_PACKETS) {
|
||||
let p = r.allocate();
|
||||
p.write()
|
||||
.unwrap()
|
||||
.packets
|
||||
.resize(x.len(), Default::default());
|
||||
for (i, o) in x.iter().zip(p.write().unwrap().packets.iter_mut()) {
|
||||
let v = serialize(&i).expect("serialize request");
|
||||
let len = v.len();
|
||||
o.data[..len].copy_from_slice(&v);
|
||||
o.meta.size = len;
|
||||
}
|
||||
out.push(p);
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
const BLOB_INDEX_END: usize = size_of::<u64>();
|
||||
const BLOB_ID_END: usize = BLOB_INDEX_END + size_of::<usize>() + size_of::<PublicKey>();
|
||||
|
||||
impl Blob {
|
||||
pub fn get_index(&self) -> Result<u64> {
|
||||
let mut rdr = io::Cursor::new(&self.data[0..BLOB_INDEX_SIZE]);
|
||||
let mut rdr = io::Cursor::new(&self.data[0..BLOB_INDEX_END]);
|
||||
let r = rdr.read_u64::<LittleEndian>()?;
|
||||
Ok(r)
|
||||
}
|
||||
pub fn set_index(&mut self, ix: u64) -> Result<()> {
|
||||
let mut wtr = vec![];
|
||||
wtr.write_u64::<LittleEndian>(ix)?;
|
||||
self.data[..BLOB_INDEX_SIZE].clone_from_slice(&wtr);
|
||||
self.data[..BLOB_INDEX_END].clone_from_slice(&wtr);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_id(&self) -> Result<PublicKey> {
|
||||
let e = deserialize(&self.data[BLOB_INDEX_END..BLOB_ID_END])?;
|
||||
Ok(e)
|
||||
}
|
||||
|
||||
pub fn set_id(&mut self, id: PublicKey) -> Result<()> {
|
||||
let wtr = serialize(&id)?;
|
||||
self.data[BLOB_INDEX_END..BLOB_ID_END].clone_from_slice(&wtr);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn data(&self) -> &[u8] {
|
||||
&self.data[BLOB_INDEX_SIZE..]
|
||||
&self.data[BLOB_ID_END..]
|
||||
}
|
||||
pub fn data_mut(&mut self) -> &mut [u8] {
|
||||
&mut self.data[BLOB_INDEX_SIZE..]
|
||||
&mut self.data[BLOB_ID_END..]
|
||||
}
|
||||
pub fn set_size(&mut self, size: usize) {
|
||||
self.meta.size = size + BLOB_INDEX_SIZE;
|
||||
self.meta.size = size + BLOB_ID_END;
|
||||
}
|
||||
pub fn recv_from(re: &BlobRecycler, socket: &UdpSocket) -> Result<VecDeque<SharedBlob>> {
|
||||
let mut v = VecDeque::new();
|
||||
@@ -246,14 +284,16 @@ impl Blob {
|
||||
for i in 0..NUM_BLOBS {
|
||||
let r = re.allocate();
|
||||
{
|
||||
let mut p = r.write().unwrap();
|
||||
let mut p = r.write().expect("'r' write lock in pub fn recv_from");
|
||||
match socket.recv_from(&mut p.data) {
|
||||
Err(_) if i > 0 => {
|
||||
trace!("got {:?} messages", i);
|
||||
break;
|
||||
}
|
||||
Err(e) => {
|
||||
info!("recv_from err {:?}", e);
|
||||
if e.kind() != io::ErrorKind::WouldBlock {
|
||||
info!("recv_from err {:?}", e);
|
||||
}
|
||||
return Err(Error::IO(e));
|
||||
}
|
||||
Ok((nrecv, from)) => {
|
||||
@@ -276,7 +316,7 @@ impl Blob {
|
||||
) -> Result<()> {
|
||||
while let Some(r) = v.pop_front() {
|
||||
{
|
||||
let p = r.read().unwrap();
|
||||
let p = r.read().expect("'r' read lock in pub fn send_to");
|
||||
let a = p.meta.addr();
|
||||
socket.send_to(&p.data[..p.meta.size], &a)?;
|
||||
}
|
||||
@@ -288,11 +328,13 @@ impl Blob {
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use packet::{Blob, BlobRecycler, Packet, PacketRecycler, Packets};
|
||||
use packet::{to_packets, Blob, BlobRecycler, Packet, PacketRecycler, Packets, NUM_PACKETS};
|
||||
use request::Request;
|
||||
use std::collections::VecDeque;
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
use std::net::UdpSocket;
|
||||
|
||||
#[test]
|
||||
pub fn packet_recycler_test() {
|
||||
let r = PacketRecycler::default();
|
||||
@@ -334,6 +376,24 @@ mod test {
|
||||
r.recycle(p);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_to_packets() {
|
||||
let tr = Request::GetTransactionCount;
|
||||
let re = PacketRecycler::default();
|
||||
let rv = to_packets(&re, vec![tr.clone(); 1]);
|
||||
assert_eq!(rv.len(), 1);
|
||||
assert_eq!(rv[0].read().unwrap().packets.len(), 1);
|
||||
|
||||
let rv = to_packets(&re, vec![tr.clone(); NUM_PACKETS]);
|
||||
assert_eq!(rv.len(), 1);
|
||||
assert_eq!(rv[0].read().unwrap().packets.len(), NUM_PACKETS);
|
||||
|
||||
let rv = to_packets(&re, vec![tr.clone(); NUM_PACKETS + 1]);
|
||||
assert_eq!(rv.len(), 2);
|
||||
assert_eq!(rv[0].read().unwrap().packets.len(), NUM_PACKETS);
|
||||
assert_eq!(rv[1].read().unwrap().packets.len(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn blob_send_recv() {
|
||||
trace!("start");
|
||||
|
@@ -7,6 +7,7 @@ use chrono::prelude::*;
|
||||
use signature::PublicKey;
|
||||
use std::mem;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub enum Witness {
|
||||
Timestamp(DateTime<Utc>),
|
||||
Signature(PublicKey),
|
||||
|
166
src/record_stage.rs
Normal file
166
src/record_stage.rs
Normal file
@@ -0,0 +1,166 @@
|
||||
//! The `record_stage` module provides an object for generating a Proof of History.
|
||||
//! It records Event items on behalf of its users. It continuously generates
|
||||
//! new hashes, only stopping to check if it has been sent an Event item. It
|
||||
//! tags each Event with an Entry, and sends it back. The Entry includes the
|
||||
//! Event, the latest hash, and the number of hashes since the last event.
|
||||
//! The resulting stream of entries represents ordered events in time.
|
||||
|
||||
use entry::Entry;
|
||||
use event::Event;
|
||||
use hash::Hash;
|
||||
use recorder::Recorder;
|
||||
use std::sync::mpsc::{channel, Receiver, Sender, TryRecvError};
|
||||
use std::thread::{spawn, JoinHandle};
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
#[cfg_attr(feature = "cargo-clippy", allow(large_enum_variant))]
|
||||
pub enum Signal {
|
||||
Tick,
|
||||
Events(Vec<Event>),
|
||||
}
|
||||
|
||||
pub struct RecordStage {
|
||||
pub entry_receiver: Receiver<Entry>,
|
||||
pub thread_hdl: JoinHandle<()>,
|
||||
}
|
||||
|
||||
impl RecordStage {
|
||||
/// A background thread that will continue tagging received Event messages and
|
||||
/// sending back Entry messages until either the receiver or sender channel is closed.
|
||||
pub fn new(
|
||||
event_receiver: Receiver<Signal>,
|
||||
start_hash: &Hash,
|
||||
tick_duration: Option<Duration>,
|
||||
) -> Self {
|
||||
let (entry_sender, entry_receiver) = channel();
|
||||
let start_hash = start_hash.clone();
|
||||
|
||||
let thread_hdl = spawn(move || {
|
||||
let mut recorder = Recorder::new(start_hash);
|
||||
let duration_data = tick_duration.map(|dur| (Instant::now(), dur));
|
||||
loop {
|
||||
if let Err(_) = Self::process_events(
|
||||
&mut recorder,
|
||||
duration_data,
|
||||
&event_receiver,
|
||||
&entry_sender,
|
||||
) {
|
||||
return;
|
||||
}
|
||||
if duration_data.is_some() {
|
||||
recorder.hash();
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
RecordStage {
|
||||
entry_receiver,
|
||||
thread_hdl,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn process_events(
|
||||
recorder: &mut Recorder,
|
||||
duration_data: Option<(Instant, Duration)>,
|
||||
receiver: &Receiver<Signal>,
|
||||
sender: &Sender<Entry>,
|
||||
) -> Result<(), ()> {
|
||||
loop {
|
||||
if let Some((start_time, tick_duration)) = duration_data {
|
||||
if let Some(entry) = recorder.tick(start_time, tick_duration) {
|
||||
sender.send(entry).or(Err(()))?;
|
||||
}
|
||||
}
|
||||
match receiver.try_recv() {
|
||||
Ok(signal) => match signal {
|
||||
Signal::Tick => {
|
||||
let entry = recorder.record(vec![]);
|
||||
sender.send(entry).or(Err(()))?;
|
||||
}
|
||||
Signal::Events(events) => {
|
||||
let entry = recorder.record(events);
|
||||
sender.send(entry).or(Err(()))?;
|
||||
}
|
||||
},
|
||||
Err(TryRecvError::Empty) => return Ok(()),
|
||||
Err(TryRecvError::Disconnected) => return Err(()),
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use ledger::Block;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use std::sync::mpsc::channel;
|
||||
use std::thread::sleep;
|
||||
|
||||
#[test]
|
||||
fn test_historian() {
|
||||
let (input, event_receiver) = channel();
|
||||
let zero = Hash::default();
|
||||
let record_stage = RecordStage::new(event_receiver, &zero, None);
|
||||
|
||||
input.send(Signal::Tick).unwrap();
|
||||
sleep(Duration::new(0, 1_000_000));
|
||||
input.send(Signal::Tick).unwrap();
|
||||
sleep(Duration::new(0, 1_000_000));
|
||||
input.send(Signal::Tick).unwrap();
|
||||
|
||||
let entry0 = record_stage.entry_receiver.recv().unwrap();
|
||||
let entry1 = record_stage.entry_receiver.recv().unwrap();
|
||||
let entry2 = record_stage.entry_receiver.recv().unwrap();
|
||||
|
||||
assert_eq!(entry0.num_hashes, 0);
|
||||
assert_eq!(entry1.num_hashes, 0);
|
||||
assert_eq!(entry2.num_hashes, 0);
|
||||
|
||||
drop(input);
|
||||
assert_eq!(record_stage.thread_hdl.join().unwrap(), ());
|
||||
|
||||
assert!([entry0, entry1, entry2].verify(&zero));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_historian_closed_sender() {
|
||||
let (input, event_receiver) = channel();
|
||||
let zero = Hash::default();
|
||||
let record_stage = RecordStage::new(event_receiver, &zero, None);
|
||||
drop(record_stage.entry_receiver);
|
||||
input.send(Signal::Tick).unwrap();
|
||||
assert_eq!(record_stage.thread_hdl.join().unwrap(), ());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_events() {
|
||||
let (input, signal_receiver) = channel();
|
||||
let zero = Hash::default();
|
||||
let record_stage = RecordStage::new(signal_receiver, &zero, None);
|
||||
let alice_keypair = KeyPair::new();
|
||||
let bob_pubkey = KeyPair::new().pubkey();
|
||||
let event0 = Event::new_transaction(&alice_keypair, bob_pubkey, 1, zero);
|
||||
let event1 = Event::new_transaction(&alice_keypair, bob_pubkey, 2, zero);
|
||||
input.send(Signal::Events(vec![event0, event1])).unwrap();
|
||||
drop(input);
|
||||
let entries: Vec<_> = record_stage.entry_receiver.iter().collect();
|
||||
assert_eq!(entries.len(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn test_ticking_historian() {
|
||||
let (input, event_receiver) = channel();
|
||||
let zero = Hash::default();
|
||||
let record_stage = RecordStage::new(event_receiver, &zero, Some(Duration::from_millis(20)));
|
||||
sleep(Duration::from_millis(900));
|
||||
input.send(Signal::Tick).unwrap();
|
||||
drop(input);
|
||||
let entries: Vec<Entry> = record_stage.entry_receiver.iter().collect();
|
||||
assert!(entries.len() > 1);
|
||||
|
||||
// Ensure the ID is not the seed.
|
||||
assert_ne!(entries[0].id, zero);
|
||||
}
|
||||
}
|
@@ -1,45 +1,21 @@
|
||||
//! The `recorder` module provides an object for generating a Proof of History.
|
||||
//! It records Event items on behalf of its users. It continuously generates
|
||||
//! new hashes, only stopping to check if it has been sent an Event item. It
|
||||
//! tags each Event with an Entry, and sends it back. The Entry includes the
|
||||
//! Event, the latest hash, and the number of hashes since the last event.
|
||||
//! The resulting stream of entries represents ordered events in time.
|
||||
//! It records Event items on behalf of its users.
|
||||
|
||||
use entry::{create_entry_mut, Entry};
|
||||
use entry::Entry;
|
||||
use event::Event;
|
||||
use hash::{hash, Hash};
|
||||
use std::mem;
|
||||
use std::sync::mpsc::{Receiver, SyncSender, TryRecvError};
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
#[cfg_attr(feature = "cargo-clippy", allow(large_enum_variant))]
|
||||
pub enum Signal {
|
||||
Tick,
|
||||
Event(Event),
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub enum ExitReason {
|
||||
RecvDisconnected,
|
||||
SendDisconnected,
|
||||
}
|
||||
|
||||
pub struct Recorder {
|
||||
sender: SyncSender<Entry>,
|
||||
receiver: Receiver<Signal>,
|
||||
last_hash: Hash,
|
||||
events: Vec<Event>,
|
||||
num_hashes: u64,
|
||||
num_ticks: u64,
|
||||
num_ticks: u32,
|
||||
}
|
||||
|
||||
impl Recorder {
|
||||
pub fn new(receiver: Receiver<Signal>, sender: SyncSender<Entry>, last_hash: Hash) -> Self {
|
||||
pub fn new(last_hash: Hash) -> Self {
|
||||
Recorder {
|
||||
receiver,
|
||||
sender,
|
||||
last_hash,
|
||||
events: vec![],
|
||||
num_hashes: 0,
|
||||
num_ticks: 0,
|
||||
}
|
||||
@@ -50,40 +26,17 @@ impl Recorder {
|
||||
self.num_hashes += 1;
|
||||
}
|
||||
|
||||
pub fn record_entry(&mut self) -> Result<(), ExitReason> {
|
||||
let events = mem::replace(&mut self.events, vec![]);
|
||||
let entry = create_entry_mut(&mut self.last_hash, &mut self.num_hashes, events);
|
||||
self.sender
|
||||
.send(entry)
|
||||
.or(Err(ExitReason::SendDisconnected))?;
|
||||
Ok(())
|
||||
pub fn record(&mut self, events: Vec<Event>) -> Entry {
|
||||
Entry::new_mut(&mut self.last_hash, &mut self.num_hashes, events)
|
||||
}
|
||||
|
||||
pub fn process_events(
|
||||
&mut self,
|
||||
epoch: Instant,
|
||||
ms_per_tick: Option<u64>,
|
||||
) -> Result<(), ExitReason> {
|
||||
loop {
|
||||
if let Some(ms) = ms_per_tick {
|
||||
if epoch.elapsed() > Duration::from_millis((self.num_ticks + 1) * ms) {
|
||||
self.record_entry()?;
|
||||
self.num_ticks += 1;
|
||||
}
|
||||
}
|
||||
|
||||
match self.receiver.try_recv() {
|
||||
Ok(signal) => match signal {
|
||||
Signal::Tick => {
|
||||
self.record_entry()?;
|
||||
}
|
||||
Signal::Event(event) => {
|
||||
self.events.push(event);
|
||||
}
|
||||
},
|
||||
Err(TryRecvError::Empty) => return Ok(()),
|
||||
Err(TryRecvError::Disconnected) => return Err(ExitReason::RecvDisconnected),
|
||||
};
|
||||
pub fn tick(&mut self, start_time: Instant, tick_duration: Duration) -> Option<Entry> {
|
||||
if start_time.elapsed() > tick_duration * (self.num_ticks + 1) {
|
||||
// TODO: don't let this overflow u32
|
||||
self.num_ticks += 1;
|
||||
Some(self.record(vec![]))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
52
src/replicate_stage.rs
Normal file
52
src/replicate_stage.rs
Normal file
@@ -0,0 +1,52 @@
|
||||
//! The `replicate_stage` replicates transactions broadcast by the leader.
|
||||
|
||||
use bank::Bank;
|
||||
use ledger;
|
||||
use packet;
|
||||
use result::Result;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::thread::{spawn, JoinHandle};
|
||||
use std::time::Duration;
|
||||
use streamer;
|
||||
|
||||
pub struct ReplicateStage {
|
||||
pub thread_hdl: JoinHandle<()>,
|
||||
}
|
||||
|
||||
impl ReplicateStage {
|
||||
/// Process verified blobs, already in order
|
||||
fn replicate_requests(
|
||||
bank: &Arc<Bank>,
|
||||
verified_receiver: &streamer::BlobReceiver,
|
||||
blob_recycler: &packet::BlobRecycler,
|
||||
) -> Result<()> {
|
||||
let timer = Duration::new(1, 0);
|
||||
let blobs = verified_receiver.recv_timeout(timer)?;
|
||||
let entries = ledger::reconstruct_entries_from_blobs(&blobs);
|
||||
let res = bank.process_verified_entries(entries);
|
||||
if res.is_err() {
|
||||
error!("process_verified_entries {} {:?}", blobs.len(), res);
|
||||
}
|
||||
res?;
|
||||
for blob in blobs {
|
||||
blob_recycler.recycle(blob);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn new(
|
||||
bank: Arc<Bank>,
|
||||
exit: Arc<AtomicBool>,
|
||||
window_receiver: streamer::BlobReceiver,
|
||||
blob_recycler: packet::BlobRecycler,
|
||||
) -> Self {
|
||||
let thread_hdl = spawn(move || loop {
|
||||
let e = Self::replicate_requests(&bank, &window_receiver, &blob_recycler);
|
||||
if e.is_err() && exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
});
|
||||
ReplicateStage { thread_hdl }
|
||||
}
|
||||
}
|
26
src/request.rs
Normal file
26
src/request.rs
Normal file
@@ -0,0 +1,26 @@
|
||||
//! The `request` module defines the messages for the thin client.
|
||||
|
||||
use hash::Hash;
|
||||
use signature::PublicKey;
|
||||
|
||||
#[cfg_attr(feature = "cargo-clippy", allow(large_enum_variant))]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub enum Request {
|
||||
GetBalance { key: PublicKey },
|
||||
GetLastId,
|
||||
GetTransactionCount,
|
||||
}
|
||||
|
||||
impl Request {
|
||||
/// Verify the request is valid.
|
||||
pub fn verify(&self) -> bool {
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub enum Response {
|
||||
Balance { key: PublicKey, val: Option<i64> },
|
||||
LastId { id: Hash },
|
||||
TransactionCount { transaction_count: u64 },
|
||||
}
|
165
src/request_processor.rs
Normal file
165
src/request_processor.rs
Normal file
@@ -0,0 +1,165 @@
|
||||
//! The `request_stage` processes thin client Request messages.
|
||||
|
||||
use bank::Bank;
|
||||
use bincode::{deserialize, serialize};
|
||||
use event::Event;
|
||||
use packet;
|
||||
use packet::SharedPackets;
|
||||
use rayon::prelude::*;
|
||||
use request::{Request, Response};
|
||||
use result::Result;
|
||||
use std::collections::VecDeque;
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::Arc;
|
||||
use std::sync::mpsc::Receiver;
|
||||
use std::time::Instant;
|
||||
use streamer;
|
||||
use timing;
|
||||
|
||||
pub struct RequestProcessor {
|
||||
bank: Arc<Bank>,
|
||||
}
|
||||
|
||||
impl RequestProcessor {
|
||||
/// Create a new Tpu that wraps the given Bank.
|
||||
pub fn new(bank: Arc<Bank>) -> Self {
|
||||
RequestProcessor { bank }
|
||||
}
|
||||
|
||||
/// Process Request items sent by clients.
|
||||
fn process_request(
|
||||
&self,
|
||||
msg: Request,
|
||||
rsp_addr: SocketAddr,
|
||||
) -> Option<(Response, SocketAddr)> {
|
||||
match msg {
|
||||
Request::GetBalance { key } => {
|
||||
let val = self.bank.get_balance(&key);
|
||||
let rsp = (Response::Balance { key, val }, rsp_addr);
|
||||
info!("Response::Balance {:?}", rsp);
|
||||
Some(rsp)
|
||||
}
|
||||
Request::GetLastId => {
|
||||
let id = self.bank.last_id();
|
||||
let rsp = (Response::LastId { id }, rsp_addr);
|
||||
info!("Response::LastId {:?}", rsp);
|
||||
Some(rsp)
|
||||
}
|
||||
Request::GetTransactionCount => {
|
||||
let transaction_count = self.bank.transaction_count() as u64;
|
||||
let rsp = (Response::TransactionCount { transaction_count }, rsp_addr);
|
||||
info!("Response::TransactionCount {:?}", rsp);
|
||||
Some(rsp)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn process_requests(
|
||||
&self,
|
||||
reqs: Vec<(Request, SocketAddr)>,
|
||||
) -> Vec<(Response, SocketAddr)> {
|
||||
reqs.into_iter()
|
||||
.filter_map(|(req, rsp_addr)| self.process_request(req, rsp_addr))
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn deserialize_requests(p: &packet::Packets) -> Vec<Option<(Request, SocketAddr)>> {
|
||||
p.packets
|
||||
.par_iter()
|
||||
.map(|x| {
|
||||
deserialize(&x.data[0..x.meta.size])
|
||||
.map(|req| (req, x.meta.addr()))
|
||||
.ok()
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
// Copy-paste of deserialize_requests() because I can't figure out how to
|
||||
// route the lifetimes in a generic version.
|
||||
pub fn deserialize_events(p: &packet::Packets) -> Vec<Option<(Event, SocketAddr)>> {
|
||||
p.packets
|
||||
.par_iter()
|
||||
.map(|x| {
|
||||
deserialize(&x.data[0..x.meta.size])
|
||||
.map(|req| (req, x.meta.addr()))
|
||||
.ok()
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Split Request list into verified transactions and the rest
|
||||
fn serialize_response(
|
||||
resp: Response,
|
||||
rsp_addr: SocketAddr,
|
||||
blob_recycler: &packet::BlobRecycler,
|
||||
) -> Result<packet::SharedBlob> {
|
||||
let blob = blob_recycler.allocate();
|
||||
{
|
||||
let mut b = blob.write().unwrap();
|
||||
let v = serialize(&resp)?;
|
||||
let len = v.len();
|
||||
b.data[..len].copy_from_slice(&v);
|
||||
b.meta.size = len;
|
||||
b.meta.set_addr(&rsp_addr);
|
||||
}
|
||||
Ok(blob)
|
||||
}
|
||||
|
||||
fn serialize_responses(
|
||||
rsps: Vec<(Response, SocketAddr)>,
|
||||
blob_recycler: &packet::BlobRecycler,
|
||||
) -> Result<VecDeque<packet::SharedBlob>> {
|
||||
let mut blobs = VecDeque::new();
|
||||
for (resp, rsp_addr) in rsps {
|
||||
blobs.push_back(Self::serialize_response(resp, rsp_addr, blob_recycler)?);
|
||||
}
|
||||
Ok(blobs)
|
||||
}
|
||||
|
||||
pub fn process_request_packets(
|
||||
&self,
|
||||
packet_receiver: &Receiver<SharedPackets>,
|
||||
blob_sender: &streamer::BlobSender,
|
||||
packet_recycler: &packet::PacketRecycler,
|
||||
blob_recycler: &packet::BlobRecycler,
|
||||
) -> Result<()> {
|
||||
let (batch, batch_len) = streamer::recv_batch(packet_receiver)?;
|
||||
|
||||
info!(
|
||||
"@{:?} request_stage: processing: {}",
|
||||
timing::timestamp(),
|
||||
batch_len
|
||||
);
|
||||
|
||||
let mut reqs_len = 0;
|
||||
let proc_start = Instant::now();
|
||||
for msgs in batch {
|
||||
let reqs: Vec<_> = Self::deserialize_requests(&msgs.read().unwrap())
|
||||
.into_iter()
|
||||
.filter_map(|x| x)
|
||||
.collect();
|
||||
reqs_len += reqs.len();
|
||||
|
||||
let rsps = self.process_requests(reqs);
|
||||
|
||||
let blobs = Self::serialize_responses(rsps, blob_recycler)?;
|
||||
if !blobs.is_empty() {
|
||||
info!("process: sending blobs: {}", blobs.len());
|
||||
//don't wake up the other side if there is nothing
|
||||
blob_sender.send(blobs)?;
|
||||
}
|
||||
packet_recycler.recycle(msgs);
|
||||
}
|
||||
let total_time_s = timing::duration_as_s(&proc_start.elapsed());
|
||||
let total_time_ms = timing::duration_as_ms(&proc_start.elapsed());
|
||||
info!(
|
||||
"@{:?} done process batches: {} time: {:?}ms reqs: {} reqs/s: {}",
|
||||
timing::timestamp(),
|
||||
batch_len,
|
||||
total_time_ms,
|
||||
reqs_len,
|
||||
(reqs_len as f32) / (total_time_s)
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
}
|
48
src/request_stage.rs
Normal file
48
src/request_stage.rs
Normal file
@@ -0,0 +1,48 @@
|
||||
//! The `request_stage` processes thin client Request messages.
|
||||
|
||||
use packet;
|
||||
use packet::SharedPackets;
|
||||
use request_processor::RequestProcessor;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::{channel, Receiver};
|
||||
use std::thread::{spawn, JoinHandle};
|
||||
use streamer;
|
||||
|
||||
pub struct RequestStage {
|
||||
pub thread_hdl: JoinHandle<()>,
|
||||
pub blob_receiver: streamer::BlobReceiver,
|
||||
pub request_processor: Arc<RequestProcessor>,
|
||||
}
|
||||
|
||||
impl RequestStage {
|
||||
pub fn new(
|
||||
request_processor: RequestProcessor,
|
||||
exit: Arc<AtomicBool>,
|
||||
packet_receiver: Receiver<SharedPackets>,
|
||||
packet_recycler: packet::PacketRecycler,
|
||||
blob_recycler: packet::BlobRecycler,
|
||||
) -> Self {
|
||||
let request_processor = Arc::new(request_processor);
|
||||
let request_processor_ = request_processor.clone();
|
||||
let (blob_sender, blob_receiver) = channel();
|
||||
let thread_hdl = spawn(move || loop {
|
||||
let e = request_processor_.process_request_packets(
|
||||
&packet_receiver,
|
||||
&blob_sender,
|
||||
&packet_recycler,
|
||||
&blob_recycler,
|
||||
);
|
||||
if e.is_err() {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
});
|
||||
RequestStage {
|
||||
thread_hdl,
|
||||
blob_receiver,
|
||||
request_processor,
|
||||
}
|
||||
}
|
||||
}
|
@@ -1,10 +1,10 @@
|
||||
//! The `result` module exposes a Result type that propagates one of many different Error types.
|
||||
|
||||
use bank;
|
||||
use bincode;
|
||||
use serde_json;
|
||||
use std;
|
||||
use std::any::Any;
|
||||
use accountant;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum Error {
|
||||
@@ -15,9 +15,11 @@ pub enum Error {
|
||||
RecvError(std::sync::mpsc::RecvError),
|
||||
RecvTimeoutError(std::sync::mpsc::RecvTimeoutError),
|
||||
Serialize(std::boxed::Box<bincode::ErrorKind>),
|
||||
AccountingError(accountant::AccountingError),
|
||||
BankError(bank::BankError),
|
||||
SendError,
|
||||
Services,
|
||||
CrdtTooSmall,
|
||||
GenericError,
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -32,9 +34,9 @@ impl std::convert::From<std::sync::mpsc::RecvTimeoutError> for Error {
|
||||
Error::RecvTimeoutError(e)
|
||||
}
|
||||
}
|
||||
impl std::convert::From<accountant::AccountingError> for Error {
|
||||
fn from(e: accountant::AccountingError) -> Error {
|
||||
Error::AccountingError(e)
|
||||
impl std::convert::From<bank::BankError> for Error {
|
||||
fn from(e: bank::BankError) -> Error {
|
||||
Error::BankError(e)
|
||||
}
|
||||
}
|
||||
impl<T> std::convert::From<std::sync::mpsc::SendError<T>> for Error {
|
||||
@@ -77,9 +79,10 @@ mod tests {
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::mpsc::channel;
|
||||
use std::panic;
|
||||
use std::sync::mpsc::RecvError;
|
||||
use std::sync::mpsc::RecvTimeoutError;
|
||||
use std::sync::mpsc::channel;
|
||||
use std::thread;
|
||||
|
||||
fn addr_parse_error() -> Result<SocketAddr> {
|
||||
@@ -88,6 +91,7 @@ mod tests {
|
||||
}
|
||||
|
||||
fn join_error() -> Result<()> {
|
||||
panic::set_hook(Box::new(|_info| {}));
|
||||
let r = thread::spawn(|| panic!("hi")).join()?;
|
||||
Ok(r)
|
||||
}
|
||||
|
55
src/rpu.rs
Normal file
55
src/rpu.rs
Normal file
@@ -0,0 +1,55 @@
|
||||
//! The `rpu` module implements the Request Processing Unit, a
|
||||
//! 5-stage transaction processing pipeline in software.
|
||||
|
||||
use bank::Bank;
|
||||
use packet;
|
||||
use request_processor::RequestProcessor;
|
||||
use request_stage::RequestStage;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::mpsc::channel;
|
||||
use std::thread::JoinHandle;
|
||||
use streamer;
|
||||
|
||||
pub struct Rpu {
|
||||
pub thread_hdls: Vec<JoinHandle<()>>,
|
||||
}
|
||||
|
||||
impl Rpu {
|
||||
pub fn new(
|
||||
bank: Arc<Bank>,
|
||||
requests_socket: UdpSocket,
|
||||
respond_socket: UdpSocket,
|
||||
exit: Arc<AtomicBool>,
|
||||
) -> Self {
|
||||
let packet_recycler = packet::PacketRecycler::default();
|
||||
let (packet_sender, packet_receiver) = channel();
|
||||
let t_receiver = streamer::receiver(
|
||||
requests_socket,
|
||||
exit.clone(),
|
||||
packet_recycler.clone(),
|
||||
packet_sender,
|
||||
);
|
||||
|
||||
let blob_recycler = packet::BlobRecycler::default();
|
||||
let request_processor = RequestProcessor::new(bank.clone());
|
||||
let request_stage = RequestStage::new(
|
||||
request_processor,
|
||||
exit.clone(),
|
||||
packet_receiver,
|
||||
packet_recycler.clone(),
|
||||
blob_recycler.clone(),
|
||||
);
|
||||
|
||||
let t_responder = streamer::responder(
|
||||
respond_socket,
|
||||
exit.clone(),
|
||||
blob_recycler.clone(),
|
||||
request_stage.blob_receiver,
|
||||
);
|
||||
|
||||
let thread_hdls = vec![t_receiver, t_responder, request_stage.thread_hdl];
|
||||
Rpu { thread_hdls }
|
||||
}
|
||||
}
|
77
src/server.rs
Normal file
77
src/server.rs
Normal file
@@ -0,0 +1,77 @@
|
||||
//! The `server` module hosts all the server microservices.
|
||||
|
||||
use bank::Bank;
|
||||
use crdt::ReplicatedData;
|
||||
use hash::Hash;
|
||||
use rpu::Rpu;
|
||||
use std::io::Write;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::thread::JoinHandle;
|
||||
use std::time::Duration;
|
||||
use tpu::Tpu;
|
||||
use tvu::Tvu;
|
||||
|
||||
pub struct Server {
|
||||
pub thread_hdls: Vec<JoinHandle<()>>,
|
||||
}
|
||||
|
||||
impl Server {
|
||||
pub fn new_leader<W: Write + Send + 'static>(
|
||||
bank: Bank,
|
||||
start_hash: Hash,
|
||||
tick_duration: Option<Duration>,
|
||||
me: ReplicatedData,
|
||||
requests_socket: UdpSocket,
|
||||
events_socket: UdpSocket,
|
||||
broadcast_socket: UdpSocket,
|
||||
respond_socket: UdpSocket,
|
||||
gossip_socket: UdpSocket,
|
||||
exit: Arc<AtomicBool>,
|
||||
writer: W,
|
||||
) -> Self {
|
||||
let bank = Arc::new(bank);
|
||||
let mut thread_hdls = vec![];
|
||||
let rpu = Rpu::new(bank.clone(), requests_socket, respond_socket, exit.clone());
|
||||
thread_hdls.extend(rpu.thread_hdls);
|
||||
let tpu = Tpu::new(
|
||||
bank.clone(),
|
||||
start_hash,
|
||||
tick_duration,
|
||||
me,
|
||||
events_socket,
|
||||
broadcast_socket,
|
||||
gossip_socket,
|
||||
exit.clone(),
|
||||
writer,
|
||||
);
|
||||
thread_hdls.extend(tpu.thread_hdls);
|
||||
Server { thread_hdls }
|
||||
}
|
||||
pub fn new_validator(
|
||||
bank: Bank,
|
||||
me: ReplicatedData,
|
||||
requests_socket: UdpSocket,
|
||||
respond_socket: UdpSocket,
|
||||
replicate_socket: UdpSocket,
|
||||
gossip_socket: UdpSocket,
|
||||
leader_repl_data: ReplicatedData,
|
||||
exit: Arc<AtomicBool>,
|
||||
) -> Self {
|
||||
let bank = Arc::new(bank);
|
||||
let mut thread_hdls = vec![];
|
||||
let rpu = Rpu::new(bank.clone(), requests_socket, respond_socket, exit.clone());
|
||||
thread_hdls.extend(rpu.thread_hdls);
|
||||
let tvu = Tvu::new(
|
||||
bank.clone(),
|
||||
me,
|
||||
gossip_socket,
|
||||
replicate_socket,
|
||||
leader_repl_data,
|
||||
exit.clone(),
|
||||
);
|
||||
thread_hdls.extend(tvu.thread_hdls);
|
||||
Server { thread_hdls }
|
||||
}
|
||||
}
|
96
src/sig_verify_stage.rs
Normal file
96
src/sig_verify_stage.rs
Normal file
@@ -0,0 +1,96 @@
|
||||
//! The `sig_verify_stage` implements the signature verification stage of the TPU.
|
||||
|
||||
use ecdsa;
|
||||
use packet::SharedPackets;
|
||||
use rand::{thread_rng, Rng};
|
||||
use result::Result;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::{channel, Receiver, Sender};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::thread::{spawn, JoinHandle};
|
||||
use std::time::Instant;
|
||||
use streamer;
|
||||
use timing;
|
||||
|
||||
pub struct SigVerifyStage {
|
||||
pub verified_receiver: Receiver<Vec<(SharedPackets, Vec<u8>)>>,
|
||||
pub thread_hdls: Vec<JoinHandle<()>>,
|
||||
}
|
||||
|
||||
impl SigVerifyStage {
|
||||
pub fn new(exit: Arc<AtomicBool>, packet_receiver: Receiver<SharedPackets>) -> Self {
|
||||
let (verified_sender, verified_receiver) = channel();
|
||||
let thread_hdls = Self::verifier_services(exit, packet_receiver, verified_sender);
|
||||
SigVerifyStage {
|
||||
thread_hdls,
|
||||
verified_receiver,
|
||||
}
|
||||
}
|
||||
|
||||
fn verify_batch(batch: Vec<SharedPackets>) -> Vec<(SharedPackets, Vec<u8>)> {
|
||||
let r = ecdsa::ed25519_verify(&batch);
|
||||
batch.into_iter().zip(r).collect()
|
||||
}
|
||||
|
||||
fn verifier(
|
||||
recvr: &Arc<Mutex<streamer::PacketReceiver>>,
|
||||
sendr: &Arc<Mutex<Sender<Vec<(SharedPackets, Vec<u8>)>>>>,
|
||||
) -> Result<()> {
|
||||
let (batch, len) =
|
||||
streamer::recv_batch(&recvr.lock().expect("'recvr' lock in fn verifier"))?;
|
||||
|
||||
let now = Instant::now();
|
||||
let batch_len = batch.len();
|
||||
let rand_id = thread_rng().gen_range(0, 100);
|
||||
info!(
|
||||
"@{:?} verifier: verifying: {} id: {}",
|
||||
timing::timestamp(),
|
||||
batch.len(),
|
||||
rand_id
|
||||
);
|
||||
|
||||
let verified_batch = Self::verify_batch(batch);
|
||||
sendr
|
||||
.lock()
|
||||
.expect("lock in fn verify_batch in tpu")
|
||||
.send(verified_batch)?;
|
||||
|
||||
let total_time_ms = timing::duration_as_ms(&now.elapsed());
|
||||
let total_time_s = timing::duration_as_s(&now.elapsed());
|
||||
info!(
|
||||
"@{:?} verifier: done. batches: {} total verify time: {:?} id: {} verified: {} v/s {}",
|
||||
timing::timestamp(),
|
||||
batch_len,
|
||||
total_time_ms,
|
||||
rand_id,
|
||||
len,
|
||||
(len as f32 / total_time_s)
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn verifier_service(
|
||||
exit: Arc<AtomicBool>,
|
||||
packet_receiver: Arc<Mutex<streamer::PacketReceiver>>,
|
||||
verified_sender: Arc<Mutex<Sender<Vec<(SharedPackets, Vec<u8>)>>>>,
|
||||
) -> JoinHandle<()> {
|
||||
spawn(move || loop {
|
||||
let e = Self::verifier(&packet_receiver.clone(), &verified_sender.clone());
|
||||
if e.is_err() && exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn verifier_services(
|
||||
exit: Arc<AtomicBool>,
|
||||
packet_receiver: streamer::PacketReceiver,
|
||||
verified_sender: Sender<Vec<(SharedPackets, Vec<u8>)>>,
|
||||
) -> Vec<JoinHandle<()>> {
|
||||
let sender = Arc::new(Mutex::new(verified_sender));
|
||||
let receiver = Arc::new(Mutex::new(packet_receiver));
|
||||
(0..4)
|
||||
.map(|_| Self::verifier_service(exit.clone(), receiver.clone(), sender.clone()))
|
||||
.collect()
|
||||
}
|
||||
}
|
102
src/signature.rs
102
src/signature.rs
@@ -1,9 +1,14 @@
|
||||
//! The `signature` module provides functionality for public, and private keys.
|
||||
|
||||
use generic_array::typenum::{U32, U64};
|
||||
use generic_array::GenericArray;
|
||||
use generic_array::typenum::{U32, U64};
|
||||
use rand::{ChaChaRng, Rng, SeedableRng};
|
||||
use rayon::prelude::*;
|
||||
use ring::error::Unspecified;
|
||||
use ring::rand::SecureRandom;
|
||||
use ring::signature::Ed25519KeyPair;
|
||||
use ring::{rand, signature};
|
||||
use std::cell::RefCell;
|
||||
use untrusted;
|
||||
|
||||
pub type KeyPair = Ed25519KeyPair;
|
||||
@@ -19,8 +24,10 @@ impl KeyPairUtil for Ed25519KeyPair {
|
||||
/// Return a new ED25519 keypair
|
||||
fn new() -> Self {
|
||||
let rng = rand::SystemRandom::new();
|
||||
let pkcs8_bytes = signature::Ed25519KeyPair::generate_pkcs8(&rng).unwrap();
|
||||
signature::Ed25519KeyPair::from_pkcs8(untrusted::Input::from(&pkcs8_bytes)).unwrap()
|
||||
let pkcs8_bytes = signature::Ed25519KeyPair::generate_pkcs8(&rng)
|
||||
.expect("generate_pkcs8 in signature pb fn new");
|
||||
signature::Ed25519KeyPair::from_pkcs8(untrusted::Input::from(&pkcs8_bytes))
|
||||
.expect("from_pcks8 in signature pb fn new")
|
||||
}
|
||||
|
||||
/// Return the public key for the given keypair
|
||||
@@ -41,3 +48,92 @@ impl SignatureUtil for GenericArray<u8, U64> {
|
||||
signature::verify(&signature::ED25519, peer_public_key, msg, sig).is_ok()
|
||||
}
|
||||
}
|
||||
|
||||
pub struct GenKeys {
|
||||
// This is necessary because the rng needs to mutate its state to remain
|
||||
// deterministic, and the fill trait requires an immuatble reference to self
|
||||
generator: RefCell<ChaChaRng>,
|
||||
}
|
||||
|
||||
impl GenKeys {
|
||||
pub fn new(seed: &[u8]) -> GenKeys {
|
||||
let seed32: Vec<_> = seed.iter().map(|&x| x as u32).collect();
|
||||
let rng = ChaChaRng::from_seed(&seed32);
|
||||
GenKeys {
|
||||
generator: RefCell::new(rng),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_key(&self) -> Vec<u8> {
|
||||
KeyPair::generate_pkcs8(self).unwrap().to_vec()
|
||||
}
|
||||
|
||||
pub fn gen_n_seeds(&self, n: i64) -> Vec<[u8; 16]> {
|
||||
let mut rng = self.generator.borrow_mut();
|
||||
(0..n).map(|_| rng.gen()).collect()
|
||||
}
|
||||
|
||||
pub fn gen_n_keypairs(&self, n: i64) -> Vec<KeyPair> {
|
||||
self.gen_n_seeds(n)
|
||||
.into_par_iter()
|
||||
.map(|seed| {
|
||||
let pkcs8 = GenKeys::new(&seed).new_key();
|
||||
KeyPair::from_pkcs8(untrusted::Input::from(&pkcs8)).unwrap()
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
impl SecureRandom for GenKeys {
|
||||
fn fill(&self, dest: &mut [u8]) -> Result<(), Unspecified> {
|
||||
let mut rng = self.generator.borrow_mut();
|
||||
rng.fill_bytes(dest);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::collections::HashSet;
|
||||
|
||||
#[test]
|
||||
fn test_new_key_is_deterministic() {
|
||||
let seed = [1, 2, 3, 4];
|
||||
let rng0 = GenKeys::new(&seed);
|
||||
let rng1 = GenKeys::new(&seed);
|
||||
|
||||
for _ in 0..100 {
|
||||
assert_eq!(rng0.new_key(), rng1.new_key());
|
||||
}
|
||||
}
|
||||
|
||||
fn gen_n_pubkeys(seed: &[u8], n: i64) -> HashSet<PublicKey> {
|
||||
GenKeys::new(&seed)
|
||||
.gen_n_keypairs(n)
|
||||
.into_iter()
|
||||
.map(|x| x.pubkey())
|
||||
.collect()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_gen_n_pubkeys_deterministic() {
|
||||
let seed = [1, 2, 3, 4];
|
||||
assert_eq!(gen_n_pubkeys(&seed, 50), gen_n_pubkeys(&seed, 50));
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "unstable", test))]
|
||||
mod bench {
|
||||
extern crate test;
|
||||
|
||||
use self::test::Bencher;
|
||||
use super::*;
|
||||
|
||||
#[bench]
|
||||
fn bench_gen_keys(b: &mut Bencher) {
|
||||
let seed: &[_] = &[1, 2, 3, 4];
|
||||
let rnd = GenKeys::new(seed);
|
||||
b.iter(|| rnd.gen_n_keypairs(1000));
|
||||
}
|
||||
}
|
||||
|
448
src/streamer.rs
448
src/streamer.rs
@@ -1,15 +1,18 @@
|
||||
//! The `streamer` module defines a set of services for effecently pulling data from udp sockets.
|
||||
use packet::{Blob, BlobRecycler, PacketRecycler, SharedBlob, SharedPackets, NUM_BLOBS};
|
||||
use result::Result;
|
||||
use crdt::Crdt;
|
||||
#[cfg(feature = "erasure")]
|
||||
use erasure;
|
||||
use packet::{Blob, BlobRecycler, PacketRecycler, SharedBlob, SharedPackets};
|
||||
use result::{Error, Result};
|
||||
use std::collections::VecDeque;
|
||||
use std::net::UdpSocket;
|
||||
use std::net::{SocketAddr, UdpSocket};
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::{spawn, JoinHandle};
|
||||
use std::time::Duration;
|
||||
use subscribers::Subscribers;
|
||||
|
||||
pub const WINDOW_SIZE: usize = 2 * 1024;
|
||||
pub type PacketReceiver = mpsc::Receiver<SharedPackets>;
|
||||
pub type PacketSender = mpsc::Sender<SharedPackets>;
|
||||
pub type BlobSender = mpsc::Sender<VecDeque<SharedBlob>>;
|
||||
@@ -25,7 +28,10 @@ fn recv_loop(
|
||||
let msgs = re.allocate();
|
||||
let msgs_ = msgs.clone();
|
||||
loop {
|
||||
match msgs.write().unwrap().recv_from(sock) {
|
||||
match msgs.write()
|
||||
.expect("write lock in fn recv_loop")
|
||||
.recv_from(sock)
|
||||
{
|
||||
Ok(()) => {
|
||||
channel.send(msgs_)?;
|
||||
break;
|
||||
@@ -45,14 +51,16 @@ pub fn receiver(
|
||||
sock: UdpSocket,
|
||||
exit: Arc<AtomicBool>,
|
||||
recycler: PacketRecycler,
|
||||
channel: PacketSender,
|
||||
) -> Result<JoinHandle<()>> {
|
||||
let timer = Duration::new(1, 0);
|
||||
sock.set_read_timeout(Some(timer))?;
|
||||
Ok(spawn(move || {
|
||||
let _ = recv_loop(&sock, &exit, &recycler, &channel);
|
||||
packet_sender: PacketSender,
|
||||
) -> JoinHandle<()> {
|
||||
let res = sock.set_read_timeout(Some(Duration::new(1, 0)));
|
||||
if res.is_err() {
|
||||
panic!("streamer::receiver set_read_timeout error");
|
||||
}
|
||||
spawn(move || {
|
||||
let _ = recv_loop(&sock, &exit, &recycler, &packet_sender);
|
||||
()
|
||||
}))
|
||||
})
|
||||
}
|
||||
|
||||
fn recv_send(sock: &UdpSocket, recycler: &BlobRecycler, r: &BlobReceiver) -> Result<()> {
|
||||
@@ -62,6 +70,25 @@ fn recv_send(sock: &UdpSocket, recycler: &BlobRecycler, r: &BlobReceiver) -> Res
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn recv_batch(recvr: &PacketReceiver) -> Result<(Vec<SharedPackets>, usize)> {
|
||||
let timer = Duration::new(1, 0);
|
||||
let msgs = recvr.recv_timeout(timer)?;
|
||||
trace!("got msgs");
|
||||
let mut len = msgs.read().unwrap().packets.len();
|
||||
let mut batch = vec![msgs];
|
||||
while let Ok(more) = recvr.try_recv() {
|
||||
trace!("got more msgs");
|
||||
len += more.read().unwrap().packets.len();
|
||||
batch.push(more);
|
||||
|
||||
if len > 100_000 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
debug!("batch len {}", batch.len());
|
||||
Ok((batch, len))
|
||||
}
|
||||
|
||||
pub fn responder(
|
||||
sock: UdpSocket,
|
||||
exit: Arc<AtomicBool>,
|
||||
@@ -99,43 +126,99 @@ pub fn blob_receiver(
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
let ret = recv_blobs(&recycler, &sock, &s);
|
||||
if ret.is_err() {
|
||||
break;
|
||||
}
|
||||
let _ = recv_blobs(&recycler, &sock, &s);
|
||||
});
|
||||
Ok(t)
|
||||
}
|
||||
|
||||
fn find_next_missing(
|
||||
locked_window: &Arc<RwLock<Vec<Option<SharedBlob>>>>,
|
||||
crdt: &Arc<RwLock<Crdt>>,
|
||||
consumed: &mut usize,
|
||||
received: &mut usize,
|
||||
) -> Result<Vec<(SocketAddr, Vec<u8>)>> {
|
||||
if *received <= *consumed {
|
||||
return Err(Error::GenericError);
|
||||
}
|
||||
let window = locked_window.read().unwrap();
|
||||
let reqs: Vec<_> = (*consumed..*received)
|
||||
.filter_map(|pix| {
|
||||
let i = pix % WINDOW_SIZE;
|
||||
if let &None = &window[i] {
|
||||
let val = crdt.read().unwrap().window_index_request(pix as u64);
|
||||
if let Ok((to, req)) = val {
|
||||
return Some((to, req));
|
||||
}
|
||||
}
|
||||
None
|
||||
})
|
||||
.collect();
|
||||
Ok(reqs)
|
||||
}
|
||||
|
||||
fn repair_window(
|
||||
locked_window: &Arc<RwLock<Vec<Option<SharedBlob>>>>,
|
||||
crdt: &Arc<RwLock<Crdt>>,
|
||||
last: &mut usize,
|
||||
times: &mut usize,
|
||||
consumed: &mut usize,
|
||||
received: &mut usize,
|
||||
) -> Result<()> {
|
||||
let reqs = find_next_missing(locked_window, crdt, consumed, received)?;
|
||||
//exponential backoff
|
||||
if *last != *consumed {
|
||||
*times = 0;
|
||||
}
|
||||
*last = *consumed;
|
||||
*times += 1;
|
||||
//if times flips from all 1s 7 -> 8, 15 -> 16, we retry otherwise return Ok
|
||||
if *times & (*times - 1) != 0 {
|
||||
trace!("repair_window counter {} {}", *times, *consumed);
|
||||
return Ok(());
|
||||
}
|
||||
info!("repair_window request {} {}", *consumed, *received);
|
||||
let sock = UdpSocket::bind("0.0.0.0:0")?;
|
||||
for (to, req) in reqs {
|
||||
//todo cache socket
|
||||
sock.send_to(&req, to)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn recv_window(
|
||||
window: &mut Vec<Option<SharedBlob>>,
|
||||
subs: &Arc<RwLock<Subscribers>>,
|
||||
locked_window: &Arc<RwLock<Vec<Option<SharedBlob>>>>,
|
||||
crdt: &Arc<RwLock<Crdt>>,
|
||||
recycler: &BlobRecycler,
|
||||
consumed: &mut usize,
|
||||
received: &mut usize,
|
||||
r: &BlobReceiver,
|
||||
s: &BlobSender,
|
||||
retransmit: &BlobSender,
|
||||
) -> Result<()> {
|
||||
let timer = Duration::new(1, 0);
|
||||
let timer = Duration::from_millis(200);
|
||||
let mut dq = r.recv_timeout(timer)?;
|
||||
let leader_id = crdt.read()
|
||||
.expect("'crdt' read lock in fn recv_window")
|
||||
.leader_data()
|
||||
.id;
|
||||
while let Ok(mut nq) = r.try_recv() {
|
||||
dq.append(&mut nq)
|
||||
}
|
||||
{
|
||||
//retransmit all leader blocks
|
||||
let mut retransmitq = VecDeque::new();
|
||||
let rsubs = subs.read().unwrap();
|
||||
for b in &dq {
|
||||
let p = b.read().unwrap();
|
||||
let p = b.read().expect("'b' read lock in fn recv_window");
|
||||
//TODO this check isn't safe against adverserial packets
|
||||
//we need to maintain a sequence window
|
||||
trace!(
|
||||
"idx: {} addr: {:?} leader: {:?}",
|
||||
p.get_index().unwrap(),
|
||||
"idx: {} addr: {:?} id: {:?} leader: {:?}",
|
||||
p.get_index().expect("get_index in fn recv_window"),
|
||||
p.get_id().expect("get_id in trace! fn recv_window"),
|
||||
p.meta.addr(),
|
||||
rsubs.leader.addr
|
||||
leader_id
|
||||
);
|
||||
if p.meta.addr() == rsubs.leader.addr {
|
||||
if p.get_id().expect("get_id in fn recv_window") == leader_id {
|
||||
//TODO
|
||||
//need to copy the retransmited blob
|
||||
//otherwise we get into races with which thread
|
||||
@@ -145,7 +228,7 @@ fn recv_window(
|
||||
//is dropped via a weakref to the recycler
|
||||
let nv = recycler.allocate();
|
||||
{
|
||||
let mut mnv = nv.write().unwrap();
|
||||
let mut mnv = nv.write().expect("recycler write lock in fn recv_window");
|
||||
let sz = p.meta.size;
|
||||
mnv.meta.size = sz;
|
||||
mnv.data[..sz].copy_from_slice(&p.data[..sz]);
|
||||
@@ -161,68 +244,187 @@ fn recv_window(
|
||||
let mut contq = VecDeque::new();
|
||||
while let Some(b) = dq.pop_front() {
|
||||
let b_ = b.clone();
|
||||
let p = b.write().unwrap();
|
||||
let p = b.write().expect("'b' write lock in fn recv_window");
|
||||
let pix = p.get_index()? as usize;
|
||||
let w = pix % NUM_BLOBS;
|
||||
if pix > *received {
|
||||
*received = pix;
|
||||
}
|
||||
let w = pix % WINDOW_SIZE;
|
||||
//TODO, after the block are authenticated
|
||||
//if we get different blocks at the same index
|
||||
//that is a network failure/attack
|
||||
trace!("window w: {} size: {}", w, p.meta.size);
|
||||
{
|
||||
let mut window = locked_window.write().unwrap();
|
||||
if window[w].is_none() {
|
||||
window[w] = Some(b_);
|
||||
} else {
|
||||
debug!("duplicate blob at index {:}", w);
|
||||
} else if let &Some(ref cblob) = &window[w] {
|
||||
if cblob.read().unwrap().get_index().unwrap() != pix as u64 {
|
||||
warn!("overrun blob at index {:}", w);
|
||||
} else {
|
||||
debug!("duplicate blob at index {:}", w);
|
||||
}
|
||||
}
|
||||
loop {
|
||||
let k = *consumed % NUM_BLOBS;
|
||||
let k = *consumed % WINDOW_SIZE;
|
||||
trace!("k: {} consumed: {}", k, *consumed);
|
||||
if window[k].is_none() {
|
||||
break;
|
||||
}
|
||||
contq.push_back(window[k].clone().unwrap());
|
||||
contq.push_back(window[k].clone().expect("clone in fn recv_window"));
|
||||
window[k] = None;
|
||||
*consumed += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
let buf: Vec<_> = locked_window
|
||||
.read()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(i, v)| {
|
||||
if i == (*consumed % WINDOW_SIZE) {
|
||||
assert!(v.is_none());
|
||||
"_"
|
||||
} else if v.is_none() {
|
||||
"0"
|
||||
} else {
|
||||
"1"
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
trace!("WINDOW: {}", buf.join(""));
|
||||
}
|
||||
trace!("sending contq.len: {}", contq.len());
|
||||
if !contq.is_empty() {
|
||||
trace!("sending contq.len: {}", contq.len());
|
||||
s.send(contq)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn default_window() -> Arc<RwLock<Vec<Option<SharedBlob>>>> {
|
||||
Arc::new(RwLock::new(vec![None; WINDOW_SIZE]))
|
||||
}
|
||||
|
||||
pub fn window(
|
||||
exit: Arc<AtomicBool>,
|
||||
subs: Arc<RwLock<Subscribers>>,
|
||||
crdt: Arc<RwLock<Crdt>>,
|
||||
window: Arc<RwLock<Vec<Option<SharedBlob>>>>,
|
||||
recycler: BlobRecycler,
|
||||
r: BlobReceiver,
|
||||
s: BlobSender,
|
||||
retransmit: BlobSender,
|
||||
) -> JoinHandle<()> {
|
||||
spawn(move || {
|
||||
let mut window = vec![None; NUM_BLOBS];
|
||||
let mut consumed = 0;
|
||||
let mut received = 0;
|
||||
let mut last = 0;
|
||||
let mut times = 0;
|
||||
loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
let _ = recv_window(
|
||||
&mut window,
|
||||
&subs,
|
||||
&window,
|
||||
&crdt,
|
||||
&recycler,
|
||||
&mut consumed,
|
||||
&mut received,
|
||||
&r,
|
||||
&s,
|
||||
&retransmit,
|
||||
);
|
||||
let _ = repair_window(
|
||||
&window,
|
||||
&crdt,
|
||||
&mut last,
|
||||
&mut times,
|
||||
&mut consumed,
|
||||
&mut received,
|
||||
);
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn broadcast(
|
||||
crdt: &Arc<RwLock<Crdt>>,
|
||||
window: &Arc<RwLock<Vec<Option<SharedBlob>>>>,
|
||||
recycler: &BlobRecycler,
|
||||
r: &BlobReceiver,
|
||||
sock: &UdpSocket,
|
||||
transmit_index: &mut u64,
|
||||
) -> Result<()> {
|
||||
let timer = Duration::new(1, 0);
|
||||
let mut dq = r.recv_timeout(timer)?;
|
||||
while let Ok(mut nq) = r.try_recv() {
|
||||
dq.append(&mut nq);
|
||||
}
|
||||
let mut blobs = dq.into_iter().collect();
|
||||
/// appends codes to the list of blobs allowing us to reconstruct the stream
|
||||
#[cfg(feature = "erasure")]
|
||||
erasure::generate_coding(re, blobs, consumed);
|
||||
Crdt::broadcast(crdt, &blobs, &sock, transmit_index)?;
|
||||
// keep the cache of blobs that are broadcast
|
||||
{
|
||||
let mut win = window.write().unwrap();
|
||||
for b in &blobs {
|
||||
let ix = b.read().unwrap().get_index().expect("blob index");
|
||||
let pos = (ix as usize) % WINDOW_SIZE;
|
||||
if let Some(x) = &win[pos] {
|
||||
trace!(
|
||||
"popped {} at {}",
|
||||
x.read().unwrap().get_index().unwrap(),
|
||||
pos
|
||||
);
|
||||
recycler.recycle(x.clone());
|
||||
}
|
||||
trace!("null {}", pos);
|
||||
win[pos] = None;
|
||||
assert!(win[pos].is_none());
|
||||
}
|
||||
while let Some(b) = blobs.pop() {
|
||||
let ix = b.read().unwrap().get_index().expect("blob index");
|
||||
let pos = (ix as usize) % WINDOW_SIZE;
|
||||
trace!("caching {} at {}", ix, pos);
|
||||
assert!(win[pos].is_none());
|
||||
win[pos] = Some(b);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Service to broadcast messages from the leader to layer 1 nodes.
|
||||
/// See `crdt` for network layer definitions.
|
||||
/// # Arguments
|
||||
/// * `sock` - Socket to send from.
|
||||
/// * `exit` - Boolean to signal system exit.
|
||||
/// * `crdt` - CRDT structure
|
||||
/// * `window` - Cache of blobs that we have broadcast
|
||||
/// * `recycler` - Blob recycler.
|
||||
/// * `r` - Receive channel for blobs to be retransmitted to all the layer 1 nodes.
|
||||
pub fn broadcaster(
|
||||
sock: UdpSocket,
|
||||
exit: Arc<AtomicBool>,
|
||||
crdt: Arc<RwLock<Crdt>>,
|
||||
window: Arc<RwLock<Vec<Option<SharedBlob>>>>,
|
||||
recycler: BlobRecycler,
|
||||
r: BlobReceiver,
|
||||
) -> JoinHandle<()> {
|
||||
spawn(move || {
|
||||
let mut transmit_index = 0;
|
||||
loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
let _ = broadcast(&crdt, &window, &recycler, &r, &sock, &mut transmit_index);
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn retransmit(
|
||||
subs: &Arc<RwLock<Subscribers>>,
|
||||
crdt: &Arc<RwLock<Crdt>>,
|
||||
recycler: &BlobRecycler,
|
||||
r: &BlobReceiver,
|
||||
sock: &UdpSocket,
|
||||
@@ -233,10 +435,8 @@ fn retransmit(
|
||||
dq.append(&mut nq);
|
||||
}
|
||||
{
|
||||
let wsubs = subs.read().unwrap();
|
||||
for b in &dq {
|
||||
let mut mb = b.write().unwrap();
|
||||
wsubs.retransmit(&mut mb, sock)?;
|
||||
Crdt::retransmit(&crdt, b, sock)?;
|
||||
}
|
||||
}
|
||||
while let Some(b) = dq.pop_front() {
|
||||
@@ -246,26 +446,30 @@ fn retransmit(
|
||||
}
|
||||
|
||||
/// Service to retransmit messages from the leader to layer 1 nodes.
|
||||
/// See `subscribers` for network layer definitions.
|
||||
/// See `crdt` for network layer definitions.
|
||||
/// # Arguments
|
||||
/// * `sock` - Socket to read from. Read timeout is set to 1.
|
||||
/// * `exit` - Boolean to signal system exit.
|
||||
/// * `subs` - Shared Subscriber structure. This structure needs to be updated and popualted by
|
||||
/// the accountant.
|
||||
/// * `crdt` - This structure needs to be updated and populated by the bank and via gossip.
|
||||
/// * `recycler` - Blob recycler.
|
||||
/// * `r` - Receive channel for blobs to be retransmitted to all the layer 1 nodes.
|
||||
pub fn retransmitter(
|
||||
sock: UdpSocket,
|
||||
exit: Arc<AtomicBool>,
|
||||
subs: Arc<RwLock<Subscribers>>,
|
||||
crdt: Arc<RwLock<Crdt>>,
|
||||
recycler: BlobRecycler,
|
||||
r: BlobReceiver,
|
||||
) -> JoinHandle<()> {
|
||||
spawn(move || loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
spawn(move || {
|
||||
trace!("retransmitter started");
|
||||
loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
// TODO: handle this error
|
||||
let _ = retransmit(&crdt, &recycler, &r, &sock);
|
||||
}
|
||||
let _ = retransmit(&subs, &recycler, &r, &sock);
|
||||
trace!("exiting retransmitter");
|
||||
})
|
||||
}
|
||||
|
||||
@@ -335,12 +539,14 @@ mod bench {
|
||||
}
|
||||
fn run_streamer_bench() -> Result<()> {
|
||||
let read = UdpSocket::bind("127.0.0.1:0")?;
|
||||
read.set_read_timeout(Some(Duration::new(1, 0)))?;
|
||||
|
||||
let addr = read.local_addr()?;
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let pack_recycler = PacketRecycler::default();
|
||||
|
||||
let (s_reader, r_reader) = channel();
|
||||
let t_reader = receiver(read, exit.clone(), pack_recycler.clone(), s_reader)?;
|
||||
let t_reader = receiver(read, exit.clone(), pack_recycler.clone(), s_reader);
|
||||
let t_producer1 = producer(&addr, pack_recycler.clone(), exit.clone());
|
||||
let t_producer2 = producer(&addr, pack_recycler.clone(), exit.clone());
|
||||
let t_producer3 = producer(&addr, pack_recycler.clone(), exit.clone());
|
||||
@@ -356,7 +562,7 @@ mod bench {
|
||||
let time = elapsed.as_secs() * 10000000000 + elapsed.subsec_nanos() as u64;
|
||||
let ftime = (time as f64) / 10000000000f64;
|
||||
let fcount = (end_val - start_val) as f64;
|
||||
println!("performance: {:?}", fcount / ftime);
|
||||
trace!("performance: {:?}", fcount / ftime);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
t_reader.join()?;
|
||||
t_producer1.join()?;
|
||||
@@ -373,7 +579,11 @@ mod bench {
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use crdt::{Crdt, ReplicatedData};
|
||||
use logger;
|
||||
use packet::{Blob, BlobRecycler, Packet, PacketRecycler, Packets, PACKET_DATA_SIZE};
|
||||
use signature::KeyPair;
|
||||
use signature::KeyPairUtil;
|
||||
use std::collections::VecDeque;
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
@@ -381,17 +591,17 @@ mod test {
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
use streamer::{blob_receiver, receiver, responder, retransmitter, window, BlobReceiver,
|
||||
PacketReceiver};
|
||||
use subscribers::{Node, Subscribers};
|
||||
use streamer::{default_window, BlobReceiver, PacketReceiver};
|
||||
use streamer::{blob_receiver, receiver, responder, retransmitter, window};
|
||||
|
||||
fn get_msgs(r: PacketReceiver, num: &mut usize) {
|
||||
for _t in 0..5 {
|
||||
let timer = Duration::new(1, 0);
|
||||
match r.recv_timeout(timer) {
|
||||
Ok(m) => *num += m.read().unwrap().packets.len(),
|
||||
e => println!("error {:?}", e),
|
||||
e => info!("error {:?}", e),
|
||||
}
|
||||
if *num == 10 {
|
||||
break;
|
||||
@@ -407,13 +617,15 @@ mod test {
|
||||
#[test]
|
||||
pub fn streamer_send_test() {
|
||||
let read = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||
read.set_read_timeout(Some(Duration::new(1, 0))).unwrap();
|
||||
|
||||
let addr = read.local_addr().unwrap();
|
||||
let send = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let pack_recycler = PacketRecycler::default();
|
||||
let resp_recycler = BlobRecycler::default();
|
||||
let (s_reader, r_reader) = channel();
|
||||
let t_receiver = receiver(read, exit.clone(), pack_recycler.clone(), s_reader).unwrap();
|
||||
let t_receiver = receiver(read, exit.clone(), pack_recycler.clone(), s_reader);
|
||||
let (s_responder, r_responder) = channel();
|
||||
let t_responder = responder(send, exit.clone(), resp_recycler.clone(), r_responder);
|
||||
let mut msgs = VecDeque::new();
|
||||
@@ -445,7 +657,7 @@ mod test {
|
||||
}
|
||||
*num += m.len();
|
||||
}
|
||||
e => println!("error {:?}", e),
|
||||
e => info!("error {:?}", e),
|
||||
}
|
||||
if *num == 10 {
|
||||
break;
|
||||
@@ -455,24 +667,36 @@ mod test {
|
||||
|
||||
#[test]
|
||||
pub fn window_send_test() {
|
||||
let pubkey_me = KeyPair::new().pubkey();
|
||||
let read = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||
let addr = read.local_addr().unwrap();
|
||||
let send = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||
let serve = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||
let event = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let subs = Arc::new(RwLock::new(Subscribers::new(
|
||||
Node::default(),
|
||||
Node::new([0; 8], 0, send.local_addr().unwrap()),
|
||||
&[],
|
||||
)));
|
||||
let rep_data = ReplicatedData::new(
|
||||
pubkey_me,
|
||||
read.local_addr().unwrap(),
|
||||
send.local_addr().unwrap(),
|
||||
serve.local_addr().unwrap(),
|
||||
event.local_addr().unwrap(),
|
||||
);
|
||||
let mut crdt_me = Crdt::new(rep_data);
|
||||
let me_id = crdt_me.my_data().id;
|
||||
crdt_me.set_leader(me_id);
|
||||
let subs = Arc::new(RwLock::new(crdt_me));
|
||||
|
||||
let resp_recycler = BlobRecycler::default();
|
||||
let (s_reader, r_reader) = channel();
|
||||
let t_receiver =
|
||||
blob_receiver(exit.clone(), resp_recycler.clone(), read, s_reader).unwrap();
|
||||
let (s_window, r_window) = channel();
|
||||
let (s_retransmit, r_retransmit) = channel();
|
||||
let win = default_window();
|
||||
let t_window = window(
|
||||
exit.clone(),
|
||||
subs,
|
||||
win,
|
||||
resp_recycler.clone(),
|
||||
r_reader,
|
||||
s_window,
|
||||
@@ -487,6 +711,7 @@ mod test {
|
||||
let b_ = b.clone();
|
||||
let mut w = b.write().unwrap();
|
||||
w.set_index(i).unwrap();
|
||||
w.set_id(me_id).unwrap();
|
||||
assert_eq!(i, w.get_index().unwrap());
|
||||
w.meta.size = PACKET_DATA_SIZE;
|
||||
w.meta.set_addr(&addr);
|
||||
@@ -507,43 +732,110 @@ mod test {
|
||||
t_window.join().expect("join");
|
||||
}
|
||||
|
||||
fn test_node() -> (Arc<RwLock<Crdt>>, UdpSocket, UdpSocket, UdpSocket) {
|
||||
let gossip = UdpSocket::bind("127.0.0.1:0").unwrap();
|
||||
let replicate = UdpSocket::bind("127.0.0.1:0").unwrap();
|
||||
let serve = UdpSocket::bind("127.0.0.1:0").unwrap();
|
||||
let event = UdpSocket::bind("127.0.0.1:0").unwrap();
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
let d = ReplicatedData::new(
|
||||
pubkey,
|
||||
gossip.local_addr().unwrap(),
|
||||
replicate.local_addr().unwrap(),
|
||||
serve.local_addr().unwrap(),
|
||||
event.local_addr().unwrap(),
|
||||
);
|
||||
trace!("data: {:?}", d);
|
||||
let crdt = Crdt::new(d);
|
||||
(Arc::new(RwLock::new(crdt)), gossip, replicate, serve)
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
//retransmit from leader to replicate target
|
||||
pub fn retransmit() {
|
||||
let read = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||
let send = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||
logger::setup();
|
||||
trace!("retransmit test start");
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let subs = Arc::new(RwLock::new(Subscribers::new(
|
||||
Node::default(),
|
||||
Node::default(),
|
||||
&[Node::new([0; 8], 1, read.local_addr().unwrap())],
|
||||
)));
|
||||
let (crdt_leader, sock_gossip_leader, _, sock_leader) = test_node();
|
||||
let (crdt_target, sock_gossip_target, sock_replicate_target, _) = test_node();
|
||||
let leader_data = crdt_leader.read().unwrap().my_data().clone();
|
||||
crdt_leader.write().unwrap().insert(&leader_data);
|
||||
crdt_leader.write().unwrap().set_leader(leader_data.id);
|
||||
let t_crdt_leader_g = Crdt::gossip(crdt_leader.clone(), exit.clone());
|
||||
let window_leader = Arc::new(RwLock::new(vec![]));
|
||||
let t_crdt_leader_l = Crdt::listen(
|
||||
crdt_leader.clone(),
|
||||
window_leader,
|
||||
sock_gossip_leader,
|
||||
exit.clone(),
|
||||
);
|
||||
|
||||
crdt_target.write().unwrap().insert(&leader_data);
|
||||
crdt_target.write().unwrap().set_leader(leader_data.id);
|
||||
let t_crdt_target_g = Crdt::gossip(crdt_target.clone(), exit.clone());
|
||||
let window_target = Arc::new(RwLock::new(vec![]));
|
||||
let t_crdt_target_l = Crdt::listen(
|
||||
crdt_target.clone(),
|
||||
window_target,
|
||||
sock_gossip_target,
|
||||
exit.clone(),
|
||||
);
|
||||
//leader retransmitter
|
||||
let (s_retransmit, r_retransmit) = channel();
|
||||
let blob_recycler = BlobRecycler::default();
|
||||
let saddr = send.local_addr().unwrap();
|
||||
let saddr = sock_leader.local_addr().unwrap();
|
||||
let t_retransmit = retransmitter(
|
||||
send,
|
||||
sock_leader,
|
||||
exit.clone(),
|
||||
subs,
|
||||
crdt_leader.clone(),
|
||||
blob_recycler.clone(),
|
||||
r_retransmit,
|
||||
);
|
||||
|
||||
//target receiver
|
||||
let (s_blob_receiver, r_blob_receiver) = channel();
|
||||
let t_receiver = blob_receiver(
|
||||
exit.clone(),
|
||||
blob_recycler.clone(),
|
||||
sock_replicate_target,
|
||||
s_blob_receiver,
|
||||
).unwrap();
|
||||
for _ in 0..10 {
|
||||
let done = crdt_target.read().unwrap().update_index == 2
|
||||
&& crdt_leader.read().unwrap().update_index == 2;
|
||||
if done {
|
||||
break;
|
||||
}
|
||||
let timer = Duration::new(1, 0);
|
||||
sleep(timer);
|
||||
}
|
||||
|
||||
//send the data through
|
||||
let mut bq = VecDeque::new();
|
||||
let b = blob_recycler.allocate();
|
||||
b.write().unwrap().meta.size = 10;
|
||||
bq.push_back(b);
|
||||
s_retransmit.send(bq).unwrap();
|
||||
let (s_blob_receiver, r_blob_receiver) = channel();
|
||||
let t_receiver =
|
||||
blob_receiver(exit.clone(), blob_recycler.clone(), read, s_blob_receiver).unwrap();
|
||||
let mut oq = r_blob_receiver.recv().unwrap();
|
||||
let timer = Duration::new(5, 0);
|
||||
trace!("Waiting for timeout");
|
||||
let mut oq = r_blob_receiver.recv_timeout(timer).unwrap();
|
||||
assert_eq!(oq.len(), 1);
|
||||
let o = oq.pop_front().unwrap();
|
||||
let ro = o.read().unwrap();
|
||||
assert_eq!(ro.meta.size, 10);
|
||||
assert_eq!(ro.meta.addr(), saddr);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
t_receiver.join().expect("join");
|
||||
t_retransmit.join().expect("join");
|
||||
let threads = vec![
|
||||
t_receiver,
|
||||
t_retransmit,
|
||||
t_crdt_target_g,
|
||||
t_crdt_target_l,
|
||||
t_crdt_leader_g,
|
||||
t_crdt_leader_l,
|
||||
];
|
||||
for t in threads {
|
||||
t.join().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@@ -1,149 +0,0 @@
|
||||
//! The `subscribers` module defines data structures to keep track of nodes on the network.
|
||||
//! The network is arranged in layers:
|
||||
//!
|
||||
//! * layer 0 - Leader.
|
||||
//! * layer 1 - As many nodes as we can fit to quickly get reliable `2/3+1` finality
|
||||
//! * layer 2 - Everyone else, if layer 1 is `2^10`, layer 2 should be able to fit `2^20` number of nodes.
|
||||
//!
|
||||
//! It's up to the external state machine to keep this updated.
|
||||
use packet::Blob;
|
||||
use rayon::prelude::*;
|
||||
use result::{Error, Result};
|
||||
use std::net::{SocketAddr, UdpSocket};
|
||||
|
||||
use std::fmt;
|
||||
|
||||
#[derive(Clone, PartialEq)]
|
||||
pub struct Node {
|
||||
pub id: [u64; 8],
|
||||
pub weight: u64,
|
||||
pub addr: SocketAddr,
|
||||
}
|
||||
|
||||
//sockaddr doesn't implement default
|
||||
impl Default for Node {
|
||||
fn default() -> Node {
|
||||
Node {
|
||||
id: [0; 8],
|
||||
weight: 0,
|
||||
addr: "0.0.0.0:0".parse().unwrap(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Node {
|
||||
pub fn new(id: [u64; 8], weight: u64, addr: SocketAddr) -> Node {
|
||||
Node { id, weight, addr }
|
||||
}
|
||||
fn key(&self) -> i64 {
|
||||
(self.weight as i64).checked_neg().unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for Node {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "Node {{ weight: {} addr: {} }}", self.weight, self.addr)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Subscribers {
|
||||
data: Vec<Node>,
|
||||
pub me: Node,
|
||||
pub leader: Node,
|
||||
}
|
||||
|
||||
impl Subscribers {
|
||||
pub fn new(me: Node, leader: Node, network: &[Node]) -> Subscribers {
|
||||
let mut h = Subscribers {
|
||||
data: vec![],
|
||||
me: me.clone(),
|
||||
leader: leader.clone(),
|
||||
};
|
||||
h.insert(&[me, leader]);
|
||||
h.insert(network);
|
||||
h
|
||||
}
|
||||
|
||||
/// retransmit messages from the leader to layer 1 nodes
|
||||
pub fn retransmit(&self, blob: &mut Blob, s: &UdpSocket) -> Result<()> {
|
||||
let errs: Vec<_> = self.data
|
||||
.par_iter()
|
||||
.map(|i| {
|
||||
if self.me == *i {
|
||||
return Ok(0);
|
||||
}
|
||||
if self.leader == *i {
|
||||
return Ok(0);
|
||||
}
|
||||
trace!("retransmit blob to {}", i.addr);
|
||||
s.send_to(&blob.data[..blob.meta.size], &i.addr)
|
||||
})
|
||||
.collect();
|
||||
for e in errs {
|
||||
trace!("retransmit result {:?}", e);
|
||||
match e {
|
||||
Err(e) => return Err(Error::IO(e)),
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
pub fn insert(&mut self, ns: &[Node]) {
|
||||
self.data.extend_from_slice(ns);
|
||||
self.data.sort_by_key(Node::key);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use packet::Blob;
|
||||
use rayon::prelude::*;
|
||||
use std::net::UdpSocket;
|
||||
use std::time::Duration;
|
||||
use subscribers::{Node, Subscribers};
|
||||
|
||||
#[test]
|
||||
pub fn subscriber() {
|
||||
let mut me = Node::default();
|
||||
me.weight = 10;
|
||||
let mut leader = Node::default();
|
||||
leader.weight = 11;
|
||||
let mut s = Subscribers::new(me, leader, &[]);
|
||||
assert_eq!(s.data.len(), 2);
|
||||
assert_eq!(s.data[0].weight, 11);
|
||||
assert_eq!(s.data[1].weight, 10);
|
||||
let mut n = Node::default();
|
||||
n.weight = 12;
|
||||
s.insert(&[n]);
|
||||
assert_eq!(s.data.len(), 3);
|
||||
assert_eq!(s.data[0].weight, 12);
|
||||
}
|
||||
#[test]
|
||||
pub fn retransmit() {
|
||||
let s1 = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||
let s2 = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||
let s3 = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||
let n1 = Node::new([0; 8], 0, s1.local_addr().unwrap());
|
||||
let n2 = Node::new([0; 8], 0, s2.local_addr().unwrap());
|
||||
let mut s = Subscribers::new(n1.clone(), n2.clone(), &[]);
|
||||
let n3 = Node::new([0; 8], 0, s3.local_addr().unwrap());
|
||||
s.insert(&[n3]);
|
||||
let mut b = Blob::default();
|
||||
b.meta.size = 10;
|
||||
let s4 = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||
s.retransmit(&mut b, &s4).unwrap();
|
||||
let res: Vec<_> = [s1, s2, s3]
|
||||
.into_par_iter()
|
||||
.map(|s| {
|
||||
let mut b = Blob::default();
|
||||
s.set_read_timeout(Some(Duration::new(1, 0))).unwrap();
|
||||
s.recv_from(&mut b.data).is_err()
|
||||
})
|
||||
.collect();
|
||||
assert_eq!(res, [true, true, false]);
|
||||
let mut n4 = Node::default();
|
||||
n4.addr = "255.255.255.255:1".parse().unwrap();
|
||||
s.insert(&[n4]);
|
||||
assert!(s.retransmit(&mut b, &s4).is_err());
|
||||
}
|
||||
}
|
454
src/thin_client.rs
Normal file
454
src/thin_client.rs
Normal file
@@ -0,0 +1,454 @@
|
||||
//! The `thin_client` module is a client-side object that interfaces with
|
||||
//! a server-side TPU. Client code should use this object instead of writing
|
||||
//! messages to the network directly. The binary encoding of its messages are
|
||||
//! unstable and may change in future releases.
|
||||
|
||||
use bincode::{deserialize, serialize};
|
||||
use event::Event;
|
||||
use futures::future::{ok, FutureResult};
|
||||
use hash::Hash;
|
||||
use request::{Request, Response};
|
||||
use signature::{KeyPair, PublicKey, Signature};
|
||||
use std::collections::HashMap;
|
||||
use std::io;
|
||||
use std::net::{SocketAddr, UdpSocket};
|
||||
use transaction::Transaction;
|
||||
|
||||
pub struct ThinClient {
|
||||
requests_addr: SocketAddr,
|
||||
requests_socket: UdpSocket,
|
||||
events_addr: SocketAddr,
|
||||
events_socket: UdpSocket,
|
||||
last_id: Option<Hash>,
|
||||
transaction_count: u64,
|
||||
balances: HashMap<PublicKey, Option<i64>>,
|
||||
}
|
||||
|
||||
impl ThinClient {
|
||||
/// Create a new ThinClient that will interface with Rpu
|
||||
/// over `requests_socket` and `events_socket`. To receive responses, the caller must bind `socket`
|
||||
/// to a public address before invoking ThinClient methods.
|
||||
pub fn new(
|
||||
requests_addr: SocketAddr,
|
||||
requests_socket: UdpSocket,
|
||||
events_addr: SocketAddr,
|
||||
events_socket: UdpSocket,
|
||||
) -> Self {
|
||||
let client = ThinClient {
|
||||
requests_addr,
|
||||
requests_socket,
|
||||
events_addr,
|
||||
events_socket,
|
||||
last_id: None,
|
||||
transaction_count: 0,
|
||||
balances: HashMap::new(),
|
||||
};
|
||||
client
|
||||
}
|
||||
|
||||
pub fn recv_response(&self) -> io::Result<Response> {
|
||||
let mut buf = vec![0u8; 1024];
|
||||
trace!("start recv_from");
|
||||
self.requests_socket.recv_from(&mut buf)?;
|
||||
trace!("end recv_from");
|
||||
let resp = deserialize(&buf).expect("deserialize balance in thin_client");
|
||||
Ok(resp)
|
||||
}
|
||||
|
||||
pub fn process_response(&mut self, resp: Response) {
|
||||
match resp {
|
||||
Response::Balance { key, val } => {
|
||||
trace!("Response balance {:?} {:?}", key, val);
|
||||
self.balances.insert(key, val);
|
||||
}
|
||||
Response::LastId { id } => {
|
||||
info!("Response last_id {:?}", id);
|
||||
self.last_id = Some(id);
|
||||
}
|
||||
Response::TransactionCount { transaction_count } => {
|
||||
info!("Response transaction count {:?}", transaction_count);
|
||||
self.transaction_count = transaction_count;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Send a signed Transaction to the server for processing. This method
|
||||
/// does not wait for a response.
|
||||
pub fn transfer_signed(&self, tr: Transaction) -> io::Result<usize> {
|
||||
let event = Event::Transaction(tr);
|
||||
let data = serialize(&event).expect("serialize Transaction in pub fn transfer_signed");
|
||||
self.events_socket.send_to(&data, &self.events_addr)
|
||||
}
|
||||
|
||||
/// Creates, signs, and processes a Transaction. Useful for writing unit-tests.
|
||||
pub fn transfer(
|
||||
&self,
|
||||
n: i64,
|
||||
keypair: &KeyPair,
|
||||
to: PublicKey,
|
||||
last_id: &Hash,
|
||||
) -> io::Result<Signature> {
|
||||
let tr = Transaction::new(keypair, to, n, *last_id);
|
||||
let sig = tr.sig;
|
||||
self.transfer_signed(tr).map(|_| sig)
|
||||
}
|
||||
|
||||
/// Request the balance of the user holding `pubkey`. This method blocks
|
||||
/// until the server sends a response. If the response packet is dropped
|
||||
/// by the network, this method will hang indefinitely.
|
||||
pub fn get_balance(&mut self, pubkey: &PublicKey) -> io::Result<i64> {
|
||||
trace!("get_balance");
|
||||
let req = Request::GetBalance { key: *pubkey };
|
||||
let data = serialize(&req).expect("serialize GetBalance in pub fn get_balance");
|
||||
self.requests_socket
|
||||
.send_to(&data, &self.requests_addr)
|
||||
.expect("buffer error in pub fn get_balance");
|
||||
let mut done = false;
|
||||
while !done {
|
||||
let resp = self.recv_response()?;
|
||||
trace!("recv_response {:?}", resp);
|
||||
if let &Response::Balance { ref key, .. } = &resp {
|
||||
done = key == pubkey;
|
||||
}
|
||||
self.process_response(resp);
|
||||
}
|
||||
self.balances[pubkey].ok_or(io::Error::new(io::ErrorKind::Other, "nokey"))
|
||||
}
|
||||
|
||||
/// Request the transaction count. If the response packet is dropped by the network,
|
||||
/// this method will hang.
|
||||
pub fn transaction_count(&mut self) -> u64 {
|
||||
info!("transaction_count");
|
||||
let req = Request::GetTransactionCount;
|
||||
let data =
|
||||
serialize(&req).expect("serialize GetTransactionCount in pub fn transaction_count");
|
||||
self.requests_socket
|
||||
.send_to(&data, &self.requests_addr)
|
||||
.expect("buffer error in pub fn transaction_count");
|
||||
let mut done = false;
|
||||
while !done {
|
||||
let resp = self.recv_response().expect("transaction count dropped");
|
||||
info!("recv_response {:?}", resp);
|
||||
if let &Response::TransactionCount { .. } = &resp {
|
||||
done = true;
|
||||
}
|
||||
self.process_response(resp);
|
||||
}
|
||||
self.transaction_count
|
||||
}
|
||||
|
||||
/// Request the last Entry ID from the server. This method blocks
|
||||
/// until the server sends a response.
|
||||
pub fn get_last_id(&mut self) -> FutureResult<Hash, ()> {
|
||||
info!("get_last_id");
|
||||
let req = Request::GetLastId;
|
||||
let data = serialize(&req).expect("serialize GetLastId in pub fn get_last_id");
|
||||
self.requests_socket
|
||||
.send_to(&data, &self.requests_addr)
|
||||
.expect("buffer error in pub fn get_last_id");
|
||||
let mut done = false;
|
||||
while !done {
|
||||
let resp = self.recv_response().expect("get_last_id response");
|
||||
if let &Response::LastId { .. } = &resp {
|
||||
done = true;
|
||||
}
|
||||
self.process_response(resp);
|
||||
}
|
||||
ok(self.last_id.expect("some last_id"))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn poll_get_balance(client: &mut ThinClient, pubkey: &PublicKey) -> io::Result<i64> {
|
||||
use std::time::Instant;
|
||||
|
||||
let mut balance;
|
||||
let now = Instant::now();
|
||||
loop {
|
||||
balance = client.get_balance(pubkey);
|
||||
if balance.is_ok() || now.elapsed().as_secs() > 1 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
balance
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use bank::Bank;
|
||||
use crdt::{Crdt, ReplicatedData};
|
||||
use futures::Future;
|
||||
use logger;
|
||||
use mint::Mint;
|
||||
use plan::Plan;
|
||||
use server::Server;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use std::io::sink;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::JoinHandle;
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
use streamer::default_window;
|
||||
use transaction::Instruction;
|
||||
use tvu::tests::TestNode;
|
||||
|
||||
#[test]
|
||||
fn test_thin_client() {
|
||||
logger::setup();
|
||||
let leader = TestNode::new();
|
||||
|
||||
let alice = Mint::new(10_000);
|
||||
let bank = Bank::new(&alice);
|
||||
let bob_pubkey = KeyPair::new().pubkey();
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
|
||||
let server = Server::new_leader(
|
||||
bank,
|
||||
alice.last_id(),
|
||||
Some(Duration::from_millis(30)),
|
||||
leader.data.clone(),
|
||||
leader.sockets.requests,
|
||||
leader.sockets.event,
|
||||
leader.sockets.broadcast,
|
||||
leader.sockets.respond,
|
||||
leader.sockets.gossip,
|
||||
exit.clone(),
|
||||
sink(),
|
||||
);
|
||||
sleep(Duration::from_millis(900));
|
||||
|
||||
let requests_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let events_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
|
||||
let mut client = ThinClient::new(
|
||||
leader.data.requests_addr,
|
||||
requests_socket,
|
||||
leader.data.events_addr,
|
||||
events_socket,
|
||||
);
|
||||
let last_id = client.get_last_id().wait().unwrap();
|
||||
let _sig = client
|
||||
.transfer(500, &alice.keypair(), bob_pubkey, &last_id)
|
||||
.unwrap();
|
||||
let balance = poll_get_balance(&mut client, &bob_pubkey);
|
||||
assert_eq!(balance.unwrap(), 500);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
for t in server.thread_hdls {
|
||||
t.join().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bad_sig() {
|
||||
logger::setup();
|
||||
let leader = TestNode::new();
|
||||
let alice = Mint::new(10_000);
|
||||
let bank = Bank::new(&alice);
|
||||
let bob_pubkey = KeyPair::new().pubkey();
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
|
||||
let server = Server::new_leader(
|
||||
bank,
|
||||
alice.last_id(),
|
||||
Some(Duration::from_millis(30)),
|
||||
leader.data.clone(),
|
||||
leader.sockets.requests,
|
||||
leader.sockets.event,
|
||||
leader.sockets.broadcast,
|
||||
leader.sockets.respond,
|
||||
leader.sockets.gossip,
|
||||
exit.clone(),
|
||||
sink(),
|
||||
);
|
||||
sleep(Duration::from_millis(300));
|
||||
|
||||
let requests_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
requests_socket
|
||||
.set_read_timeout(Some(Duration::new(5, 0)))
|
||||
.unwrap();
|
||||
let events_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let mut client = ThinClient::new(
|
||||
leader.data.requests_addr,
|
||||
requests_socket,
|
||||
leader.data.events_addr,
|
||||
events_socket,
|
||||
);
|
||||
let last_id = client.get_last_id().wait().unwrap();
|
||||
|
||||
let tr = Transaction::new(&alice.keypair(), bob_pubkey, 500, last_id);
|
||||
|
||||
let _sig = client.transfer_signed(tr).unwrap();
|
||||
|
||||
let last_id = client.get_last_id().wait().unwrap();
|
||||
|
||||
let mut tr2 = Transaction::new(&alice.keypair(), bob_pubkey, 501, last_id);
|
||||
if let Instruction::NewContract(contract) = &mut tr2.instruction {
|
||||
contract.tokens = 502;
|
||||
contract.plan = Plan::new_payment(502, bob_pubkey);
|
||||
}
|
||||
let _sig = client.transfer_signed(tr2).unwrap();
|
||||
|
||||
let balance = poll_get_balance(&mut client, &bob_pubkey);
|
||||
assert_eq!(balance.unwrap(), 500);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
for t in server.thread_hdls {
|
||||
t.join().unwrap();
|
||||
}
|
||||
}
|
||||
fn validator(
|
||||
leader: &ReplicatedData,
|
||||
exit: Arc<AtomicBool>,
|
||||
alice: &Mint,
|
||||
threads: &mut Vec<JoinHandle<()>>,
|
||||
) {
|
||||
let validator = TestNode::new();
|
||||
let replicant_bank = Bank::new(&alice);
|
||||
let mut ts = Server::new_validator(
|
||||
replicant_bank,
|
||||
validator.data.clone(),
|
||||
validator.sockets.requests,
|
||||
validator.sockets.respond,
|
||||
validator.sockets.replicate,
|
||||
validator.sockets.gossip,
|
||||
leader.clone(),
|
||||
exit.clone(),
|
||||
);
|
||||
threads.append(&mut ts.thread_hdls);
|
||||
}
|
||||
|
||||
fn converge(
|
||||
leader: &ReplicatedData,
|
||||
exit: Arc<AtomicBool>,
|
||||
num_nodes: usize,
|
||||
threads: &mut Vec<JoinHandle<()>>,
|
||||
) -> Vec<ReplicatedData> {
|
||||
//lets spy on the network
|
||||
let mut spy = TestNode::new();
|
||||
let daddr = "0.0.0.0:0".parse().unwrap();
|
||||
let me = spy.data.id.clone();
|
||||
spy.data.replicate_addr = daddr;
|
||||
spy.data.requests_addr = daddr;
|
||||
let mut spy_crdt = Crdt::new(spy.data);
|
||||
spy_crdt.insert(&leader);
|
||||
spy_crdt.set_leader(leader.id);
|
||||
|
||||
let spy_ref = Arc::new(RwLock::new(spy_crdt));
|
||||
let spy_window = default_window();
|
||||
let t_spy_listen = Crdt::listen(
|
||||
spy_ref.clone(),
|
||||
spy_window,
|
||||
spy.sockets.gossip,
|
||||
exit.clone(),
|
||||
);
|
||||
let t_spy_gossip = Crdt::gossip(spy_ref.clone(), exit.clone());
|
||||
//wait for the network to converge
|
||||
let mut converged = false;
|
||||
for _ in 0..30 {
|
||||
let num = spy_ref.read().unwrap().convergence();
|
||||
if num == num_nodes as u64 {
|
||||
converged = true;
|
||||
break;
|
||||
}
|
||||
sleep(Duration::new(1, 0));
|
||||
}
|
||||
assert!(converged);
|
||||
threads.push(t_spy_listen);
|
||||
threads.push(t_spy_gossip);
|
||||
let v: Vec<ReplicatedData> = spy_ref
|
||||
.read()
|
||||
.unwrap()
|
||||
.table
|
||||
.values()
|
||||
.into_iter()
|
||||
.filter(|x| x.id != me)
|
||||
.map(|x| x.clone())
|
||||
.collect();
|
||||
v.clone()
|
||||
}
|
||||
#[test]
|
||||
fn test_multi_node() {
|
||||
logger::setup();
|
||||
const N: usize = 5;
|
||||
trace!("test_multi_accountant_stub");
|
||||
let leader = TestNode::new();
|
||||
let alice = Mint::new(10_000);
|
||||
let bob_pubkey = KeyPair::new().pubkey();
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
|
||||
let leader_bank = Bank::new(&alice);
|
||||
let server = Server::new_leader(
|
||||
leader_bank,
|
||||
alice.last_id(),
|
||||
None,
|
||||
leader.data.clone(),
|
||||
leader.sockets.requests,
|
||||
leader.sockets.event,
|
||||
leader.sockets.broadcast,
|
||||
leader.sockets.respond,
|
||||
leader.sockets.gossip,
|
||||
exit.clone(),
|
||||
sink(),
|
||||
);
|
||||
|
||||
let mut threads = server.thread_hdls;
|
||||
for _ in 0..N {
|
||||
validator(&leader.data, exit.clone(), &alice, &mut threads);
|
||||
}
|
||||
let servers = converge(&leader.data, exit.clone(), N + 2, &mut threads);
|
||||
//contains the leader addr as well
|
||||
assert_eq!(servers.len(), N + 1);
|
||||
//verify leader can do transfer
|
||||
let leader_balance = tx_and_retry_get_balance(&leader.data, &alice, &bob_pubkey).unwrap();
|
||||
assert_eq!(leader_balance, 500);
|
||||
//verify validator has the same balance
|
||||
let mut success = 0usize;
|
||||
for server in servers.iter() {
|
||||
let mut client = mk_client(server);
|
||||
if let Ok(bal) = poll_get_balance(&mut client, &bob_pubkey) {
|
||||
trace!("validator balance {}", bal);
|
||||
if bal == leader_balance {
|
||||
success += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
assert_eq!(success, servers.len());
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
for t in threads {
|
||||
t.join().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
fn mk_client(leader: &ReplicatedData) -> ThinClient {
|
||||
let requests_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
requests_socket
|
||||
.set_read_timeout(Some(Duration::new(1, 0)))
|
||||
.unwrap();
|
||||
let events_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
|
||||
ThinClient::new(
|
||||
leader.requests_addr,
|
||||
requests_socket,
|
||||
leader.events_addr,
|
||||
events_socket,
|
||||
)
|
||||
}
|
||||
|
||||
fn tx_and_retry_get_balance(
|
||||
leader: &ReplicatedData,
|
||||
alice: &Mint,
|
||||
bob_pubkey: &PublicKey,
|
||||
) -> io::Result<i64> {
|
||||
let mut client = mk_client(leader);
|
||||
trace!("getting leader last_id");
|
||||
let last_id = client.get_last_id().wait().unwrap();
|
||||
info!("executing leader transer");
|
||||
let _sig = client
|
||||
.transfer(500, &alice.keypair(), *bob_pubkey, &last_id)
|
||||
.unwrap();
|
||||
poll_get_balance(&mut client, bob_pubkey)
|
||||
}
|
||||
|
||||
}
|
17
src/timing.rs
Normal file
17
src/timing.rs
Normal file
@@ -0,0 +1,17 @@
|
||||
use std::time::Duration;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
pub fn duration_as_ms(d: &Duration) -> u64 {
|
||||
return (d.as_secs() * 1000) + (d.subsec_nanos() as u64 / 1_000_000);
|
||||
}
|
||||
|
||||
pub fn duration_as_s(d: &Duration) -> f32 {
|
||||
return d.as_secs() as f32 + (d.subsec_nanos() as f32 / 1_000_000_000.0);
|
||||
}
|
||||
|
||||
pub fn timestamp() -> u64 {
|
||||
let now = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.expect("create timestamp in timing");
|
||||
return duration_as_ms(&now);
|
||||
}
|
93
src/tpu.rs
Normal file
93
src/tpu.rs
Normal file
@@ -0,0 +1,93 @@
|
||||
//! The `tpu` module implements the Transaction Processing Unit, a
|
||||
//! 5-stage transaction processing pipeline in software.
|
||||
|
||||
use bank::Bank;
|
||||
use banking_stage::BankingStage;
|
||||
use crdt::{Crdt, ReplicatedData};
|
||||
use hash::Hash;
|
||||
use packet;
|
||||
use record_stage::RecordStage;
|
||||
use sig_verify_stage::SigVerifyStage;
|
||||
use std::io::Write;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::{Arc, Mutex, RwLock};
|
||||
use std::thread::JoinHandle;
|
||||
use std::time::Duration;
|
||||
use streamer;
|
||||
use write_stage::WriteStage;
|
||||
|
||||
pub struct Tpu {
|
||||
pub thread_hdls: Vec<JoinHandle<()>>,
|
||||
}
|
||||
|
||||
impl Tpu {
|
||||
pub fn new<W: Write + Send + 'static>(
|
||||
bank: Arc<Bank>,
|
||||
start_hash: Hash,
|
||||
tick_duration: Option<Duration>,
|
||||
me: ReplicatedData,
|
||||
events_socket: UdpSocket,
|
||||
broadcast_socket: UdpSocket,
|
||||
gossip: UdpSocket,
|
||||
exit: Arc<AtomicBool>,
|
||||
writer: W,
|
||||
) -> Self {
|
||||
let packet_recycler = packet::PacketRecycler::default();
|
||||
let (packet_sender, packet_receiver) = channel();
|
||||
let t_receiver = streamer::receiver(
|
||||
events_socket,
|
||||
exit.clone(),
|
||||
packet_recycler.clone(),
|
||||
packet_sender,
|
||||
);
|
||||
|
||||
let sig_verify_stage = SigVerifyStage::new(exit.clone(), packet_receiver);
|
||||
|
||||
let blob_recycler = packet::BlobRecycler::default();
|
||||
let banking_stage = BankingStage::new(
|
||||
bank.clone(),
|
||||
exit.clone(),
|
||||
sig_verify_stage.verified_receiver,
|
||||
packet_recycler.clone(),
|
||||
);
|
||||
|
||||
let record_stage =
|
||||
RecordStage::new(banking_stage.signal_receiver, &start_hash, tick_duration);
|
||||
|
||||
let write_stage = WriteStage::new(
|
||||
bank.clone(),
|
||||
exit.clone(),
|
||||
blob_recycler.clone(),
|
||||
Mutex::new(writer),
|
||||
record_stage.entry_receiver,
|
||||
);
|
||||
|
||||
let crdt = Arc::new(RwLock::new(Crdt::new(me)));
|
||||
let t_gossip = Crdt::gossip(crdt.clone(), exit.clone());
|
||||
let window = streamer::default_window();
|
||||
let t_listen = Crdt::listen(crdt.clone(), window.clone(), gossip, exit.clone());
|
||||
|
||||
let t_broadcast = streamer::broadcaster(
|
||||
broadcast_socket,
|
||||
exit.clone(),
|
||||
crdt.clone(),
|
||||
window,
|
||||
blob_recycler.clone(),
|
||||
write_stage.blob_receiver,
|
||||
);
|
||||
|
||||
let mut thread_hdls = vec![
|
||||
t_receiver,
|
||||
banking_stage.thread_hdl,
|
||||
record_stage.thread_hdl,
|
||||
write_stage.thread_hdl,
|
||||
t_gossip,
|
||||
t_listen,
|
||||
t_broadcast,
|
||||
];
|
||||
thread_hdls.extend(sig_verify_stage.thread_hdls.into_iter());
|
||||
Tpu { thread_hdls }
|
||||
}
|
||||
}
|
@@ -12,37 +12,62 @@ pub const SIG_OFFSET: usize = 8;
|
||||
pub const PUB_KEY_OFFSET: usize = 80;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub struct TransactionData {
|
||||
pub struct Contract {
|
||||
pub tokens: i64,
|
||||
pub last_id: Hash,
|
||||
pub plan: Plan,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub enum Instruction {
|
||||
NewContract(Contract),
|
||||
ApplyTimestamp(DateTime<Utc>),
|
||||
ApplySignature(Signature),
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub struct Transaction {
|
||||
pub sig: Signature,
|
||||
pub from: PublicKey,
|
||||
pub data: TransactionData,
|
||||
pub instruction: Instruction,
|
||||
pub last_id: Hash,
|
||||
}
|
||||
|
||||
impl Transaction {
|
||||
/// Create and sign a new Transaction. Used for unit-testing.
|
||||
pub fn new(from_keypair: &KeyPair, to: PublicKey, tokens: i64, last_id: Hash) -> Self {
|
||||
fn new_from_instruction(
|
||||
from_keypair: &KeyPair,
|
||||
instruction: Instruction,
|
||||
last_id: Hash,
|
||||
) -> Self {
|
||||
let from = from_keypair.pubkey();
|
||||
let plan = Plan::Pay(Payment { tokens, to });
|
||||
let mut tr = Transaction {
|
||||
sig: Signature::default(),
|
||||
data: TransactionData {
|
||||
plan,
|
||||
tokens,
|
||||
last_id,
|
||||
},
|
||||
from: from,
|
||||
instruction,
|
||||
last_id,
|
||||
from,
|
||||
};
|
||||
tr.sign(from_keypair);
|
||||
tr
|
||||
}
|
||||
|
||||
/// Create and sign a new Transaction. Used for unit-testing.
|
||||
pub fn new(from_keypair: &KeyPair, to: PublicKey, tokens: i64, last_id: Hash) -> Self {
|
||||
let plan = Plan::Pay(Payment { tokens, to });
|
||||
let instruction = Instruction::NewContract(Contract { plan, tokens });
|
||||
Self::new_from_instruction(from_keypair, instruction, last_id)
|
||||
}
|
||||
|
||||
/// Create and sign a new Witness Timestamp. Used for unit-testing.
|
||||
pub fn new_timestamp(from_keypair: &KeyPair, dt: DateTime<Utc>, last_id: Hash) -> Self {
|
||||
let instruction = Instruction::ApplyTimestamp(dt);
|
||||
Self::new_from_instruction(from_keypair, instruction, last_id)
|
||||
}
|
||||
|
||||
/// Create and sign a new Witness Signature. Used for unit-testing.
|
||||
pub fn new_signature(from_keypair: &KeyPair, tx_sig: Signature, last_id: Hash) -> Self {
|
||||
let instruction = Instruction::ApplySignature(tx_sig);
|
||||
Self::new_from_instruction(from_keypair, instruction, last_id)
|
||||
}
|
||||
|
||||
/// Create and sign a postdated Transaction. Used for unit-testing.
|
||||
pub fn new_on_date(
|
||||
from_keypair: &KeyPair,
|
||||
@@ -56,13 +81,11 @@ impl Transaction {
|
||||
(Condition::Timestamp(dt), Payment { tokens, to }),
|
||||
(Condition::Signature(from), Payment { tokens, to: from }),
|
||||
);
|
||||
let instruction = Instruction::NewContract(Contract { plan, tokens });
|
||||
let mut tr = Transaction {
|
||||
data: TransactionData {
|
||||
plan,
|
||||
tokens,
|
||||
last_id,
|
||||
},
|
||||
from: from,
|
||||
instruction,
|
||||
from,
|
||||
last_id,
|
||||
sig: Signature::default(),
|
||||
};
|
||||
tr.sign(from_keypair);
|
||||
@@ -70,7 +93,10 @@ impl Transaction {
|
||||
}
|
||||
|
||||
fn get_sign_data(&self) -> Vec<u8> {
|
||||
serialize(&(&self.data)).unwrap()
|
||||
let mut data = serialize(&(&self.instruction)).expect("serialize Contract");
|
||||
let last_id_data = serialize(&(&self.last_id)).expect("serialize last_id");
|
||||
data.extend_from_slice(&last_id_data);
|
||||
data
|
||||
}
|
||||
|
||||
/// Sign this transaction.
|
||||
@@ -84,7 +110,11 @@ impl Transaction {
|
||||
}
|
||||
|
||||
pub fn verify_plan(&self) -> bool {
|
||||
self.data.plan.verify(self.data.tokens)
|
||||
if let Instruction::NewContract(contract) = &self.instruction {
|
||||
contract.plan.verify(contract.tokens)
|
||||
} else {
|
||||
true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -152,13 +182,11 @@ mod tests {
|
||||
tokens: 0,
|
||||
to: Default::default(),
|
||||
});
|
||||
let instruction = Instruction::NewContract(Contract { plan, tokens: 0 });
|
||||
let claim0 = Transaction {
|
||||
data: TransactionData {
|
||||
plan,
|
||||
tokens: 0,
|
||||
last_id: Default::default(),
|
||||
},
|
||||
instruction,
|
||||
from: Default::default(),
|
||||
last_id: Default::default(),
|
||||
sig: Default::default(),
|
||||
};
|
||||
let buf = serialize(&claim0).unwrap();
|
||||
@@ -172,10 +200,12 @@ mod tests {
|
||||
let keypair = KeyPair::new();
|
||||
let pubkey = keypair.pubkey();
|
||||
let mut tr = Transaction::new(&keypair, pubkey, 42, zero);
|
||||
tr.data.tokens = 1_000_000; // <-- attack, part 1!
|
||||
if let Plan::Pay(ref mut payment) = tr.data.plan {
|
||||
payment.tokens = tr.data.tokens; // <-- attack, part 2!
|
||||
};
|
||||
if let Instruction::NewContract(contract) = &mut tr.instruction {
|
||||
contract.tokens = 1_000_000; // <-- attack, part 1!
|
||||
if let Plan::Pay(ref mut payment) = contract.plan {
|
||||
payment.tokens = contract.tokens; // <-- attack, part 2!
|
||||
}
|
||||
}
|
||||
assert!(tr.verify_plan());
|
||||
assert!(!tr.verify_sig());
|
||||
}
|
||||
@@ -188,9 +218,11 @@ mod tests {
|
||||
let pubkey1 = keypair1.pubkey();
|
||||
let zero = Hash::default();
|
||||
let mut tr = Transaction::new(&keypair0, pubkey1, 42, zero);
|
||||
if let Plan::Pay(ref mut payment) = tr.data.plan {
|
||||
payment.to = thief_keypair.pubkey(); // <-- attack!
|
||||
};
|
||||
if let Instruction::NewContract(contract) = &mut tr.instruction {
|
||||
if let Plan::Pay(ref mut payment) = contract.plan {
|
||||
payment.to = thief_keypair.pubkey(); // <-- attack!
|
||||
}
|
||||
}
|
||||
assert!(tr.verify_plan());
|
||||
assert!(!tr.verify_sig());
|
||||
}
|
||||
@@ -210,14 +242,18 @@ mod tests {
|
||||
let keypair1 = KeyPair::new();
|
||||
let zero = Hash::default();
|
||||
let mut tr = Transaction::new(&keypair0, keypair1.pubkey(), 1, zero);
|
||||
if let Plan::Pay(ref mut payment) = tr.data.plan {
|
||||
payment.tokens = 2; // <-- attack!
|
||||
if let Instruction::NewContract(contract) = &mut tr.instruction {
|
||||
if let Plan::Pay(ref mut payment) = contract.plan {
|
||||
payment.tokens = 2; // <-- attack!
|
||||
}
|
||||
}
|
||||
assert!(!tr.verify_plan());
|
||||
|
||||
// Also, ensure all branchs of the plan spend all tokens
|
||||
if let Plan::Pay(ref mut payment) = tr.data.plan {
|
||||
payment.tokens = 0; // <-- whoops!
|
||||
if let Instruction::NewContract(contract) = &mut tr.instruction {
|
||||
if let Plan::Pay(ref mut payment) = contract.plan {
|
||||
payment.tokens = 0; // <-- whoops!
|
||||
}
|
||||
}
|
||||
assert!(!tr.verify_plan());
|
||||
}
|
||||
|
347
src/tvu.rs
Normal file
347
src/tvu.rs
Normal file
@@ -0,0 +1,347 @@
|
||||
//! The `tvu` module implements the Transaction Validation Unit, a
|
||||
//! 5-stage transaction validation pipeline in software.
|
||||
//! 1. streamer
|
||||
//! - Incoming blobs are picked up from the replicate socket.
|
||||
//! 2. verifier
|
||||
//! - TODO Blobs are sent to the GPU, and while the memory is there the PoH stream is verified
|
||||
//! along with the ecdsa signature for the blob and each signature in all the transactions. Blobs
|
||||
//! with errors are dropped, or marked for slashing.
|
||||
//! 3.a retransmit
|
||||
//! - Blobs originating from the parent (leader atm is the only parent), are retransmit to all the
|
||||
//! peers in the crdt. Peers is everyone who is not me or the leader that has a known replicate
|
||||
//! address.
|
||||
//! 3.b window
|
||||
//! - Verified blobs are placed into a window, indexed by the counter set by the leader.sockets. This could
|
||||
//! be the PoH counter if its monitonically increasing in each blob. Easure coding is used to
|
||||
//! recover any missing packets, and requests are made at random to peers and parents to retransmit
|
||||
//! a missing packet.
|
||||
//! 4. accountant
|
||||
//! - Contigous blobs are sent to the accountant for processing transactions
|
||||
//! 5. validator
|
||||
//! - TODO Validation messages are sent back to the leader
|
||||
|
||||
use bank::Bank;
|
||||
use crdt::{Crdt, ReplicatedData};
|
||||
use packet;
|
||||
use replicate_stage::ReplicateStage;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::JoinHandle;
|
||||
use streamer;
|
||||
|
||||
pub struct Tvu {
|
||||
pub thread_hdls: Vec<JoinHandle<()>>,
|
||||
}
|
||||
|
||||
impl Tvu {
|
||||
/// This service receives messages from a leader in the network and processes the transactions
|
||||
/// on the bank state.
|
||||
/// # Arguments
|
||||
/// * `bank` - The bank state.
|
||||
/// * `me` - my configuration
|
||||
/// * `gossip` - my gosisp socket
|
||||
/// * `replicte` - my replicte socket
|
||||
/// * `leader` - leader configuration
|
||||
/// * `exit` - The exit signal.
|
||||
pub fn new(
|
||||
bank: Arc<Bank>,
|
||||
me: ReplicatedData,
|
||||
gossip: UdpSocket,
|
||||
replicate: UdpSocket,
|
||||
leader: ReplicatedData,
|
||||
exit: Arc<AtomicBool>,
|
||||
) -> Self {
|
||||
//replicate pipeline
|
||||
let crdt = Arc::new(RwLock::new(Crdt::new(me)));
|
||||
crdt.write()
|
||||
.expect("'crdt' write lock in pub fn replicate")
|
||||
.set_leader(leader.id);
|
||||
crdt.write()
|
||||
.expect("'crdt' write lock before insert() in pub fn replicate")
|
||||
.insert(&leader);
|
||||
let t_gossip = Crdt::gossip(crdt.clone(), exit.clone());
|
||||
let window = streamer::default_window();
|
||||
let t_listen = Crdt::listen(crdt.clone(), window.clone(), gossip, exit.clone());
|
||||
|
||||
// TODO pull this socket out through the public interface
|
||||
// make sure we are on the same interface
|
||||
let mut local = replicate.local_addr().expect("tvu: get local address");
|
||||
local.set_port(0);
|
||||
let write = UdpSocket::bind(local).expect("tvu: bind to local socket");
|
||||
|
||||
let blob_recycler = packet::BlobRecycler::default();
|
||||
let (blob_sender, blob_receiver) = channel();
|
||||
let t_blob_receiver = streamer::blob_receiver(
|
||||
exit.clone(),
|
||||
blob_recycler.clone(),
|
||||
replicate,
|
||||
blob_sender.clone(),
|
||||
).expect("tvu: blob receiver creation");
|
||||
let (window_sender, window_receiver) = channel();
|
||||
let (retransmit_sender, retransmit_receiver) = channel();
|
||||
|
||||
let t_retransmit = streamer::retransmitter(
|
||||
write,
|
||||
exit.clone(),
|
||||
crdt.clone(),
|
||||
blob_recycler.clone(),
|
||||
retransmit_receiver,
|
||||
);
|
||||
|
||||
//TODO
|
||||
//the packets coming out of blob_receiver need to be sent to the GPU and verified
|
||||
//then sent to the window, which does the erasure coding reconstruction
|
||||
let t_window = streamer::window(
|
||||
exit.clone(),
|
||||
crdt.clone(),
|
||||
window,
|
||||
blob_recycler.clone(),
|
||||
blob_receiver,
|
||||
window_sender,
|
||||
retransmit_sender,
|
||||
);
|
||||
|
||||
let replicate_stage = ReplicateStage::new(
|
||||
bank.clone(),
|
||||
exit.clone(),
|
||||
window_receiver,
|
||||
blob_recycler.clone(),
|
||||
);
|
||||
|
||||
let threads = vec![
|
||||
//replicate threads
|
||||
t_blob_receiver,
|
||||
t_retransmit,
|
||||
t_window,
|
||||
replicate_stage.thread_hdl,
|
||||
t_gossip,
|
||||
t_listen,
|
||||
];
|
||||
Tvu {
|
||||
thread_hdls: threads,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
use std::time::Duration;
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn test_node() -> (ReplicatedData, UdpSocket, UdpSocket, UdpSocket, UdpSocket) {
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
|
||||
let events_socket = UdpSocket::bind("127.0.0.1:0").unwrap();
|
||||
let gossip = UdpSocket::bind("127.0.0.1:0").unwrap();
|
||||
let replicate = UdpSocket::bind("127.0.0.1:0").unwrap();
|
||||
let requests_socket = UdpSocket::bind("127.0.0.1:0").unwrap();
|
||||
requests_socket
|
||||
.set_read_timeout(Some(Duration::new(1, 0)))
|
||||
.unwrap();
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
let d = ReplicatedData::new(
|
||||
pubkey,
|
||||
gossip.local_addr().unwrap(),
|
||||
replicate.local_addr().unwrap(),
|
||||
requests_socket.local_addr().unwrap(),
|
||||
events_socket.local_addr().unwrap(),
|
||||
);
|
||||
(d, gossip, replicate, requests_socket, events_socket)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
use bank::Bank;
|
||||
use bincode::serialize;
|
||||
use crdt::Crdt;
|
||||
use crdt::ReplicatedData;
|
||||
use entry::Entry;
|
||||
use event::Event;
|
||||
use hash::{hash, Hash};
|
||||
use logger;
|
||||
use mint::Mint;
|
||||
use packet::BlobRecycler;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use std::collections::VecDeque;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::time::Duration;
|
||||
use streamer;
|
||||
use tvu::Tvu;
|
||||
|
||||
/// Test that mesasge sent from leader to target1 and repliated to target2
|
||||
#[test]
|
||||
fn test_replicate() {
|
||||
logger::setup();
|
||||
let leader = TestNode::new();
|
||||
let target1 = TestNode::new();
|
||||
let target2 = TestNode::new();
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
|
||||
//start crdt_leader
|
||||
let mut crdt_l = Crdt::new(leader.data.clone());
|
||||
crdt_l.set_leader(leader.data.id);
|
||||
|
||||
let cref_l = Arc::new(RwLock::new(crdt_l));
|
||||
let t_l_gossip = Crdt::gossip(cref_l.clone(), exit.clone());
|
||||
let window1 = streamer::default_window();
|
||||
let t_l_listen = Crdt::listen(cref_l, window1, leader.sockets.gossip, exit.clone());
|
||||
|
||||
//start crdt2
|
||||
let mut crdt2 = Crdt::new(target2.data.clone());
|
||||
crdt2.insert(&leader.data);
|
||||
crdt2.set_leader(leader.data.id);
|
||||
let leader_id = leader.data.id;
|
||||
let cref2 = Arc::new(RwLock::new(crdt2));
|
||||
let t2_gossip = Crdt::gossip(cref2.clone(), exit.clone());
|
||||
let window2 = streamer::default_window();
|
||||
let t2_listen = Crdt::listen(cref2, window2, target2.sockets.gossip, exit.clone());
|
||||
|
||||
// setup some blob services to send blobs into the socket
|
||||
// to simulate the source peer and get blobs out of the socket to
|
||||
// simulate target peer
|
||||
let recv_recycler = BlobRecycler::default();
|
||||
let resp_recycler = BlobRecycler::default();
|
||||
let (s_reader, r_reader) = channel();
|
||||
let t_receiver = streamer::blob_receiver(
|
||||
exit.clone(),
|
||||
recv_recycler.clone(),
|
||||
target2.sockets.replicate,
|
||||
s_reader,
|
||||
).unwrap();
|
||||
|
||||
// simulate leader sending messages
|
||||
let (s_responder, r_responder) = channel();
|
||||
let t_responder = streamer::responder(
|
||||
leader.sockets.requests,
|
||||
exit.clone(),
|
||||
resp_recycler.clone(),
|
||||
r_responder,
|
||||
);
|
||||
|
||||
let starting_balance = 10_000;
|
||||
let mint = Mint::new(starting_balance);
|
||||
let replicate_addr = target1.data.replicate_addr;
|
||||
let bank = Arc::new(Bank::new(&mint));
|
||||
let tvu = Tvu::new(
|
||||
bank.clone(),
|
||||
target1.data,
|
||||
target1.sockets.gossip,
|
||||
target1.sockets.replicate,
|
||||
leader.data,
|
||||
exit.clone(),
|
||||
);
|
||||
|
||||
let mut alice_ref_balance = starting_balance;
|
||||
let mut msgs = VecDeque::new();
|
||||
let mut cur_hash = Hash::default();
|
||||
let num_blobs = 10;
|
||||
let transfer_amount = 501;
|
||||
let bob_keypair = KeyPair::new();
|
||||
for i in 0..num_blobs {
|
||||
let b = resp_recycler.allocate();
|
||||
let b_ = b.clone();
|
||||
let mut w = b.write().unwrap();
|
||||
w.set_index(i).unwrap();
|
||||
w.set_id(leader_id).unwrap();
|
||||
|
||||
let entry0 = Entry::new(&cur_hash, i, vec![]);
|
||||
bank.register_entry_id(&cur_hash);
|
||||
cur_hash = hash(&cur_hash);
|
||||
|
||||
let tr1 = Event::new_transaction(
|
||||
&mint.keypair(),
|
||||
bob_keypair.pubkey(),
|
||||
transfer_amount,
|
||||
cur_hash,
|
||||
);
|
||||
bank.register_entry_id(&cur_hash);
|
||||
cur_hash = hash(&cur_hash);
|
||||
let entry1 = Entry::new(&cur_hash, i + num_blobs, vec![tr1]);
|
||||
bank.register_entry_id(&cur_hash);
|
||||
cur_hash = hash(&cur_hash);
|
||||
|
||||
alice_ref_balance -= transfer_amount;
|
||||
|
||||
let serialized_entry = serialize(&vec![entry0, entry1]).unwrap();
|
||||
|
||||
w.data_mut()[..serialized_entry.len()].copy_from_slice(&serialized_entry);
|
||||
w.set_size(serialized_entry.len());
|
||||
w.meta.set_addr(&replicate_addr);
|
||||
drop(w);
|
||||
msgs.push_back(b_);
|
||||
}
|
||||
|
||||
// send the blobs into the socket
|
||||
s_responder.send(msgs).expect("send");
|
||||
|
||||
// receive retransmitted messages
|
||||
let timer = Duration::new(1, 0);
|
||||
let mut msgs: Vec<_> = Vec::new();
|
||||
while let Ok(msg) = r_reader.recv_timeout(timer) {
|
||||
trace!("msg: {:?}", msg);
|
||||
msgs.push(msg);
|
||||
}
|
||||
|
||||
let alice_balance = bank.get_balance(&mint.keypair().pubkey()).unwrap();
|
||||
assert_eq!(alice_balance, alice_ref_balance);
|
||||
|
||||
let bob_balance = bank.get_balance(&bob_keypair.pubkey()).unwrap();
|
||||
assert_eq!(bob_balance, starting_balance - alice_ref_balance);
|
||||
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
for t in tvu.thread_hdls {
|
||||
t.join().expect("join");
|
||||
}
|
||||
t2_gossip.join().expect("join");
|
||||
t2_listen.join().expect("join");
|
||||
t_receiver.join().expect("join");
|
||||
t_responder.join().expect("join");
|
||||
t_l_gossip.join().expect("join");
|
||||
t_l_listen.join().expect("join");
|
||||
}
|
||||
pub struct Sockets {
|
||||
pub gossip: UdpSocket,
|
||||
pub requests: UdpSocket,
|
||||
pub replicate: UdpSocket,
|
||||
pub event: UdpSocket,
|
||||
pub respond: UdpSocket,
|
||||
pub broadcast: UdpSocket,
|
||||
}
|
||||
pub struct TestNode {
|
||||
pub data: ReplicatedData,
|
||||
pub sockets: Sockets,
|
||||
}
|
||||
impl TestNode {
|
||||
pub fn new() -> TestNode {
|
||||
let gossip = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let requests = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let event = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let replicate = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let respond = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let broadcast = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
let data = ReplicatedData::new(
|
||||
pubkey,
|
||||
gossip.local_addr().unwrap(),
|
||||
replicate.local_addr().unwrap(),
|
||||
requests.local_addr().unwrap(),
|
||||
event.local_addr().unwrap(),
|
||||
);
|
||||
TestNode {
|
||||
data: data,
|
||||
sockets: Sockets {
|
||||
gossip,
|
||||
requests,
|
||||
replicate,
|
||||
event,
|
||||
respond,
|
||||
broadcast,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
71
src/write_stage.rs
Normal file
71
src/write_stage.rs
Normal file
@@ -0,0 +1,71 @@
|
||||
//! The `write_stage` module implements write stage of the RPU.
|
||||
|
||||
use bank::Bank;
|
||||
use entry::Entry;
|
||||
use entry_writer::EntryWriter;
|
||||
use packet;
|
||||
use std::io::Write;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::{channel, Receiver};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::thread::{spawn, JoinHandle};
|
||||
use streamer;
|
||||
|
||||
pub struct WriteStage {
|
||||
pub thread_hdl: JoinHandle<()>,
|
||||
pub blob_receiver: streamer::BlobReceiver,
|
||||
}
|
||||
|
||||
impl WriteStage {
|
||||
/// Create a new Rpu that wraps the given Bank.
|
||||
pub fn new<W: Write + Send + 'static>(
|
||||
bank: Arc<Bank>,
|
||||
exit: Arc<AtomicBool>,
|
||||
blob_recycler: packet::BlobRecycler,
|
||||
writer: Mutex<W>,
|
||||
entry_receiver: Receiver<Entry>,
|
||||
) -> Self {
|
||||
let (blob_sender, blob_receiver) = channel();
|
||||
let thread_hdl = spawn(move || loop {
|
||||
let entry_writer = EntryWriter::new(&bank);
|
||||
let _ = entry_writer.write_and_send_entries(
|
||||
&blob_sender,
|
||||
&blob_recycler,
|
||||
&writer,
|
||||
&entry_receiver,
|
||||
);
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
info!("broadcat_service exiting");
|
||||
break;
|
||||
}
|
||||
});
|
||||
|
||||
WriteStage {
|
||||
thread_hdl,
|
||||
blob_receiver,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_drain(
|
||||
bank: Arc<Bank>,
|
||||
exit: Arc<AtomicBool>,
|
||||
entry_receiver: Receiver<Entry>,
|
||||
) -> Self {
|
||||
let (_blob_sender, blob_receiver) = channel();
|
||||
let thread_hdl = spawn(move || {
|
||||
let entry_writer = EntryWriter::new(&bank);
|
||||
loop {
|
||||
let _ = entry_writer.drain_entries(&entry_receiver);
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
info!("drain_service exiting");
|
||||
break;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
WriteStage {
|
||||
thread_hdl,
|
||||
blob_receiver,
|
||||
}
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user