Compare commits
155 Commits
v0.7.0-alp
...
v0.7.0-bet
Author | SHA1 | Date | |
---|---|---|---|
|
5d8b2f899a | ||
|
490205ab84 | ||
|
2c0e704c82 | ||
|
253048f72d | ||
|
e09b8430ce | ||
|
9ae283dc3a | ||
|
f95a79d145 | ||
|
0dabdfd48e | ||
|
d2bb4dc14a | ||
|
b4dc180592 | ||
|
263577773f | ||
|
7d708be121 | ||
|
feb1669d39 | ||
|
2cbfe41422 | ||
|
b7653865b1 | ||
|
c72dced8fa | ||
|
6feed5fd56 | ||
|
b8fe5ae076 | ||
|
7e657d65f3 | ||
|
a166bb816e | ||
|
2952027d04 | ||
|
430d9d9314 | ||
|
fa247196c0 | ||
|
5d17c2b58f | ||
|
6ee45d282e | ||
|
cfc3bd0696 | ||
|
3e0e09555a | ||
|
1d8bb5144e | ||
|
67e0100866 | ||
|
f2ab08c65e | ||
|
04a93050e7 | ||
|
03401041db | ||
|
6eac744a05 | ||
|
ae29e2085f | ||
|
7ce0b58af8 | ||
|
ea5663c0da | ||
|
a61bfae8a4 | ||
|
5716898216 | ||
|
c0f9e452f2 | ||
|
4e3526394e | ||
|
6806a14a3f | ||
|
ec7e50b37d | ||
|
e7b7dfebf5 | ||
|
a9e0b27772 | ||
|
669164bada | ||
|
4f3a291391 | ||
|
56e37ad2f4 | ||
|
17de79a83a | ||
|
09e9139855 | ||
|
76fc5822c9 | ||
|
c767a854ed | ||
|
b60802ddff | ||
|
1c35d59f26 | ||
|
adcaf715c6 | ||
|
1f9494221b | ||
|
466d6f76b9 | ||
|
b05e6ce3db | ||
|
1d812e78d5 | ||
|
fba494343f | ||
|
0b878eccf8 | ||
|
98772b16d6 | ||
|
bb82ff0c80 | ||
|
71af03dc98 | ||
|
5671da4a0a | ||
|
d63493a852 | ||
|
c06582ba40 | ||
|
450f271cf7 | ||
|
a31889f129 | ||
|
ba6a6f5227 | ||
|
9a38d61048 | ||
|
903ec27754 | ||
|
0b56d603c2 | ||
|
4ffb5d157a | ||
|
816246ebee | ||
|
a9881aee05 | ||
|
7b5b989cfe | ||
|
c4b62e19f2 | ||
|
79a97ada04 | ||
|
da215d1a21 | ||
|
9ffc50bead | ||
|
f8352bac2f | ||
|
27c1410fdc | ||
|
9a4733bde7 | ||
|
f3df5df52c | ||
|
517d08c637 | ||
|
90dd794ae5 | ||
|
e0dbbba8a3 | ||
|
705df55a7f | ||
|
d354e85a9a | ||
|
e4e1f8ec1e | ||
|
0112a24179 | ||
|
d680f6b3a5 | ||
|
47e732717f | ||
|
ec56abfccb | ||
|
e7cdb402fb | ||
|
a3fe1965fb | ||
|
5256e6833e | ||
|
051cd2e1ff | ||
|
51929e7df8 | ||
|
a094507bb8 | ||
|
8effa4e3e0 | ||
|
1c9e7dbc45 | ||
|
799b249f02 | ||
|
7b4a378c92 | ||
|
47917d00d1 | ||
|
a4c49af859 | ||
|
1c1d7d1e0e | ||
|
d28536d76e | ||
|
63cfbb9497 | ||
|
231040b93e | ||
|
7c74afc35a | ||
|
7878a011eb | ||
|
c05416e27d | ||
|
ee200d8fa0 | ||
|
2f42658cd4 | ||
|
d95e8030fc | ||
|
4aedd3f1b6 | ||
|
bb89d6f54d | ||
|
ed10841e3d | ||
|
6dac87f2a7 | ||
|
a167d0d331 | ||
|
eed37820b5 | ||
|
124e1fa350 | ||
|
ac40434cdf | ||
|
39354c06f8 | ||
|
faedb88de0 | ||
|
5cd1fb486f | ||
|
5b5df49e6c | ||
|
86f9277e2d | ||
|
56b09bf0ac | ||
|
f4c4b9df9c | ||
|
6e568c69a7 | ||
|
14d624ee40 | ||
|
d5c0557891 | ||
|
1691060a22 | ||
|
a5ce578c72 | ||
|
05edfad13a | ||
|
136b43f461 | ||
|
ac40c1818f | ||
|
eb63dbcd2a | ||
|
4e2f1a519e | ||
|
55ec7f9fe9 | ||
|
b7ddefdbf9 | ||
|
ce361c2cdc | ||
|
ed6ba55261 | ||
|
ec333d2bd6 | ||
|
551f639259 | ||
|
da3bb6fb93 | ||
|
08bcb62016 | ||
|
8f4ce1e8d0 | ||
|
4a534d6abb | ||
|
b48a8c0555 | ||
|
1919ec247b | ||
|
3966eb5374 | ||
|
c22ef50cae |
14
.buildkite/hooks/post-command
Executable file
14
.buildkite/hooks/post-command
Executable file
@@ -0,0 +1,14 @@
|
|||||||
|
#!/bin/bash -e
|
||||||
|
|
||||||
|
[[ -n "$CARGO_TARGET_CACHE_NAME" ]] || exit 0
|
||||||
|
|
||||||
|
#
|
||||||
|
# Save target/ for the next CI build on this machine
|
||||||
|
#
|
||||||
|
(
|
||||||
|
d=$HOME/cargo-target-cache/"$CARGO_TARGET_CACHE_NAME"
|
||||||
|
mkdir -p $d
|
||||||
|
set -x
|
||||||
|
rsync -a --delete --link-dest=$PWD target $d
|
||||||
|
du -hs $d
|
||||||
|
)
|
13
.buildkite/hooks/pre-command
Executable file
13
.buildkite/hooks/pre-command
Executable file
@@ -0,0 +1,13 @@
|
|||||||
|
#!/bin/bash -e
|
||||||
|
|
||||||
|
[[ -n "$CARGO_TARGET_CACHE_NAME" ]] || exit 0
|
||||||
|
|
||||||
|
#
|
||||||
|
# Restore target/ from the previous CI build on this machine
|
||||||
|
#
|
||||||
|
(
|
||||||
|
d=$HOME/cargo-target-cache/"$CARGO_TARGET_CACHE_NAME"
|
||||||
|
mkdir -p $d/target
|
||||||
|
set -x
|
||||||
|
rsync -a --delete --link-dest=$d $d/target .
|
||||||
|
)
|
7
.gitignore
vendored
7
.gitignore
vendored
@@ -2,3 +2,10 @@ Cargo.lock
|
|||||||
/target/
|
/target/
|
||||||
**/*.rs.bk
|
**/*.rs.bk
|
||||||
.cargo
|
.cargo
|
||||||
|
|
||||||
|
# node configuration files
|
||||||
|
/config/
|
||||||
|
/config-private/
|
||||||
|
/config-drone/
|
||||||
|
/config-validator/
|
||||||
|
/config-client/
|
||||||
|
21
Cargo.toml
21
Cargo.toml
@@ -1,7 +1,7 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "solana"
|
name = "solana"
|
||||||
description = "Blockchain, Rebuilt for Scale"
|
description = "Blockchain, Rebuilt for Scale"
|
||||||
version = "0.7.0-alpha"
|
version = "0.7.0-beta"
|
||||||
documentation = "https://docs.rs/solana"
|
documentation = "https://docs.rs/solana"
|
||||||
homepage = "http://solana.com/"
|
homepage = "http://solana.com/"
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
@@ -17,6 +17,10 @@ license = "Apache-2.0"
|
|||||||
name = "solana-client-demo"
|
name = "solana-client-demo"
|
||||||
path = "src/bin/client-demo.rs"
|
path = "src/bin/client-demo.rs"
|
||||||
|
|
||||||
|
[[bin]]
|
||||||
|
name = "solana-wallet"
|
||||||
|
path = "src/bin/wallet.rs"
|
||||||
|
|
||||||
[[bin]]
|
[[bin]]
|
||||||
name = "solana-fullnode"
|
name = "solana-fullnode"
|
||||||
path = "src/bin/fullnode.rs"
|
path = "src/bin/fullnode.rs"
|
||||||
@@ -29,18 +33,10 @@ path = "src/bin/fullnode-config.rs"
|
|||||||
name = "solana-genesis"
|
name = "solana-genesis"
|
||||||
path = "src/bin/genesis.rs"
|
path = "src/bin/genesis.rs"
|
||||||
|
|
||||||
[[bin]]
|
|
||||||
name = "solana-genesis-demo"
|
|
||||||
path = "src/bin/genesis-demo.rs"
|
|
||||||
|
|
||||||
[[bin]]
|
[[bin]]
|
||||||
name = "solana-mint"
|
name = "solana-mint"
|
||||||
path = "src/bin/mint.rs"
|
path = "src/bin/mint.rs"
|
||||||
|
|
||||||
[[bin]]
|
|
||||||
name = "solana-mint-demo"
|
|
||||||
path = "src/bin/mint-demo.rs"
|
|
||||||
|
|
||||||
[[bin]]
|
[[bin]]
|
||||||
name = "solana-drone"
|
name = "solana-drone"
|
||||||
path = "src/bin/drone.rs"
|
path = "src/bin/drone.rs"
|
||||||
@@ -76,4 +72,11 @@ rand = "0.5.1"
|
|||||||
pnet_datalink = "0.21.0"
|
pnet_datalink = "0.21.0"
|
||||||
tokio = "0.1"
|
tokio = "0.1"
|
||||||
tokio-codec = "0.1"
|
tokio-codec = "0.1"
|
||||||
|
tokio-core = "0.1.17"
|
||||||
tokio-io = "0.1"
|
tokio-io = "0.1"
|
||||||
|
itertools = "0.7.8"
|
||||||
|
bs58 = "0.2.0"
|
||||||
|
p2p = "0.5.2"
|
||||||
|
futures = "0.1.21"
|
||||||
|
clap = "2.31"
|
||||||
|
reqwest = "0.8.6"
|
||||||
|
93
README.md
93
README.md
@@ -58,7 +58,7 @@ your odds of success if you check out the
|
|||||||
before proceeding:
|
before proceeding:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ git checkout v0.6.1
|
$ git checkout v0.7.0-beta
|
||||||
```
|
```
|
||||||
|
|
||||||
Configuration Setup
|
Configuration Setup
|
||||||
@@ -84,13 +84,12 @@ Now start the server:
|
|||||||
$ ./multinode-demo/leader.sh
|
$ ./multinode-demo/leader.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
To run a performance-enhanced fullnode on Linux, download `libcuda_verify_ed25519.a`. Enable
|
To run a performance-enhanced fullnode on Linux,
|
||||||
it by adding `--features=cuda` to the line that runs `solana-fullnode` in
|
[CUDA 9.2](https://developer.nvidia.com/cuda-downloads) must be installed on
|
||||||
`leader.sh`. [CUDA 9.2](https://developer.nvidia.com/cuda-downloads) must be installed on your system.
|
your system:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ ./fetch-perf-libs.sh
|
$ ./fetch-perf-libs.sh
|
||||||
$ cargo run --release --features=cuda --bin solana-fullnode -- -l leader.json < genesis.log
|
$ SOLANA_CUDA=1 ./multinode-demo/leader.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
Wait a few seconds for the server to initialize. It will print "Ready." when it's ready to
|
Wait a few seconds for the server to initialize. It will print "Ready." when it's ready to
|
||||||
@@ -102,14 +101,15 @@ Multinode Testnet
|
|||||||
To run a multinode testnet, after starting a leader node, spin up some validator nodes:
|
To run a multinode testnet, after starting a leader node, spin up some validator nodes:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ ./multinode-demo/validator.sh ubuntu@10.0.1.51:~/solana #The leader machine
|
$ ./multinode-demo/validator.sh ubuntu@10.0.1.51:~/solana 10.0.1.51
|
||||||
```
|
```
|
||||||
|
|
||||||
As with the leader node, you can run a performance-enhanced validator fullnode by adding
|
To run a performance-enhanced fullnode on Linux,
|
||||||
`--features=cuda` to the line that runs `solana-fullnode` in `validator.sh`.
|
[CUDA 9.2](https://developer.nvidia.com/cuda-downloads) must be installed on
|
||||||
|
your system:
|
||||||
```bash
|
```bash
|
||||||
$ cargo run --release --features=cuda --bin solana-fullnode -- -l validator.json -v leader.json < genesis.log
|
$ ./fetch-perf-libs.sh
|
||||||
|
$ SOLANA_CUDA=1 ./multinode-demo/leader.sh ubuntu@10.0.1.51:~/solana 10.0.1.51
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
@@ -152,6 +152,72 @@ $ snap info solana
|
|||||||
$ sudo snap refresh solana --devmode
|
$ sudo snap refresh solana --devmode
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Daemon support
|
||||||
|
The snap supports running a leader, validator or leader+drone node as a system
|
||||||
|
daemon.
|
||||||
|
|
||||||
|
Run `sudo snap get solana` to view the current daemon configuration, and
|
||||||
|
`sudo snap logs -f solana` to view the daemon logs.
|
||||||
|
|
||||||
|
Disable the daemon at any time by running:
|
||||||
|
```bash
|
||||||
|
$ sudo snap set solana mode=
|
||||||
|
```
|
||||||
|
|
||||||
|
Runtime configuration files for the daemon can be found in
|
||||||
|
`/var/snap/solana/current/config`.
|
||||||
|
|
||||||
|
#### Leader daemon
|
||||||
|
```bash
|
||||||
|
$ sudo snap set solana mode=leader
|
||||||
|
```
|
||||||
|
|
||||||
|
If CUDA is available:
|
||||||
|
```bash
|
||||||
|
$ sudo snap set solana mode=leader enable-cuda=1
|
||||||
|
```
|
||||||
|
|
||||||
|
`rsync` must be configured and running on the leader.
|
||||||
|
|
||||||
|
1. Ensure rsync is installed with `sudo apt-get -y install rsync`
|
||||||
|
2. Edit `/etc/rsyncd.conf` to include the following
|
||||||
|
```
|
||||||
|
[config]
|
||||||
|
path = /var/snap/solana/current/config
|
||||||
|
hosts allow = *
|
||||||
|
read only = true
|
||||||
|
```
|
||||||
|
3. Run `sudo systemctl enable rsync; sudo systemctl start rsync`
|
||||||
|
4. Test by running `rsync -Pzravv rsync://<ip-address-of-leader>/config
|
||||||
|
solana-config` from another machine. **If the leader is running on a cloud
|
||||||
|
provider it may be necessary to configure the Firewall rules to permit ingress
|
||||||
|
to port tcp:873, tcp:9900 and the port range udp:8000-udp:10000**
|
||||||
|
|
||||||
|
|
||||||
|
To run both the Leader and Drone:
|
||||||
|
```bash
|
||||||
|
$ sudo snap set solana mode=leader+drone
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Validator daemon
|
||||||
|
```bash
|
||||||
|
$ sudo snap set solana mode=validator
|
||||||
|
|
||||||
|
```
|
||||||
|
If CUDA is available:
|
||||||
|
```bash
|
||||||
|
$ sudo snap set solana mode=validator enable-cuda=1
|
||||||
|
```
|
||||||
|
|
||||||
|
By default the validator will connect to **testnet.solana.com**, override
|
||||||
|
the leader IP address by running:
|
||||||
|
```bash
|
||||||
|
$ sudo snap set solana mode=validator leader-address=127.0.0.1 #<-- change IP address
|
||||||
|
```
|
||||||
|
It's assumed that the leader will be running `rsync` configured as described in
|
||||||
|
the previous **Leader daemon** section.
|
||||||
|
|
||||||
Developing
|
Developing
|
||||||
===
|
===
|
||||||
|
|
||||||
@@ -172,6 +238,11 @@ If your rustc version is lower than 1.26.1, please update it:
|
|||||||
$ rustup update
|
$ rustup update
|
||||||
```
|
```
|
||||||
|
|
||||||
|
On Linux systems you may need to install libssl-dev and pkg-config. On Ubuntu:
|
||||||
|
```bash
|
||||||
|
$ sudo apt-get install libssl-dev pkg-config
|
||||||
|
```
|
||||||
|
|
||||||
Download the source code:
|
Download the source code:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
4
ci/buildkite-snap.yml
Normal file
4
ci/buildkite-snap.yml
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
steps:
|
||||||
|
- command: "ci/snap.sh"
|
||||||
|
timeout_in_minutes: 20
|
||||||
|
name: "snap [public]"
|
@@ -1,16 +1,21 @@
|
|||||||
steps:
|
steps:
|
||||||
- command: "ci/docker-run.sh rust ci/test-stable.sh"
|
- command: "ci/docker-run.sh rust ci/test-stable.sh"
|
||||||
name: "stable [public]"
|
name: "stable [public]"
|
||||||
|
env:
|
||||||
|
CARGO_TARGET_CACHE_NAME: "stable"
|
||||||
timeout_in_minutes: 20
|
timeout_in_minutes: 20
|
||||||
- command: "ci/shellcheck.sh"
|
- command: "ci/shellcheck.sh"
|
||||||
name: "shellcheck [public]"
|
name: "shellcheck [public]"
|
||||||
timeout_in_minutes: 20
|
timeout_in_minutes: 20
|
||||||
- wait
|
|
||||||
- command: "ci/docker-run.sh rustlang/rust:nightly ci/test-nightly.sh"
|
- command: "ci/docker-run.sh rustlang/rust:nightly ci/test-nightly.sh"
|
||||||
name: "nightly [public]"
|
name: "nightly [public]"
|
||||||
timeout_in_minutes: 20
|
env:
|
||||||
|
CARGO_TARGET_CACHE_NAME: "nightly"
|
||||||
|
timeout_in_minutes: 30
|
||||||
- command: "ci/test-stable-perf.sh"
|
- command: "ci/test-stable-perf.sh"
|
||||||
name: "stable-perf [public]"
|
name: "stable-perf [public]"
|
||||||
|
env:
|
||||||
|
CARGO_TARGET_CACHE_NAME: "stable-perf"
|
||||||
timeout_in_minutes: 20
|
timeout_in_minutes: 20
|
||||||
retry:
|
retry:
|
||||||
automatic:
|
automatic:
|
||||||
@@ -18,14 +23,16 @@ steps:
|
|||||||
limit: 2
|
limit: 2
|
||||||
agents:
|
agents:
|
||||||
- "queue=cuda"
|
- "queue=cuda"
|
||||||
- command: "ci/snap.sh [public]"
|
- command: "ci/pr-snap.sh"
|
||||||
timeout_in_minutes: 20
|
timeout_in_minutes: 20
|
||||||
name: "snap [public]"
|
name: "snap [public]"
|
||||||
- wait
|
- wait
|
||||||
- command: "ci/publish-crate.sh [public]"
|
- command: "ci/publish-crate.sh"
|
||||||
timeout_in_minutes: 20
|
timeout_in_minutes: 20
|
||||||
name: "publish crate"
|
name: "publish crate [public]"
|
||||||
- command: "ci/hoover.sh [public]"
|
- command: "ci/hoover.sh"
|
||||||
timeout_in_minutes: 20
|
timeout_in_minutes: 20
|
||||||
name: "clean agent"
|
name: "clean agent [public]"
|
||||||
|
- trigger: "solana-snap"
|
||||||
|
branches: "!pull/*"
|
||||||
|
async: true
|
||||||
|
@@ -22,12 +22,11 @@ shift
|
|||||||
ARGS=(
|
ARGS=(
|
||||||
--workdir /solana
|
--workdir /solana
|
||||||
--volume "$PWD:/solana"
|
--volume "$PWD:/solana"
|
||||||
--env "HOME=/solana"
|
--volume "$HOME:/home"
|
||||||
|
--env "CARGO_HOME=/home/.cargo"
|
||||||
--rm
|
--rm
|
||||||
)
|
)
|
||||||
|
|
||||||
ARGS+=(--env "CARGO_HOME=/solana/.cargo")
|
|
||||||
|
|
||||||
# kcov tries to set the personality of the binary which docker
|
# kcov tries to set the personality of the binary which docker
|
||||||
# doesn't allow by default.
|
# doesn't allow by default.
|
||||||
ARGS+=(--security-opt "seccomp=unconfined")
|
ARGS+=(--security-opt "seccomp=unconfined")
|
||||||
|
8
ci/is-pr.sh
Executable file
8
ci/is-pr.sh
Executable file
@@ -0,0 +1,8 @@
|
|||||||
|
#!/bin/bash -e
|
||||||
|
#
|
||||||
|
# The standard BUILDKITE_PULL_REQUEST environment variable is always "false" due
|
||||||
|
# to how solana-ci-gate is used to trigger PR builds rather than using the
|
||||||
|
# standard Buildkite PR trigger.
|
||||||
|
#
|
||||||
|
|
||||||
|
[[ $BUILDKITE_BRANCH =~ pull/* ]]
|
18
ci/pr-snap.sh
Executable file
18
ci/pr-snap.sh
Executable file
@@ -0,0 +1,18 @@
|
|||||||
|
#!/bin/bash -e
|
||||||
|
#
|
||||||
|
# Only run snap.sh for pull requests that modify files under /snap
|
||||||
|
#
|
||||||
|
|
||||||
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
|
if ./is-pr.sh; then
|
||||||
|
affected_files="$(buildkite-agent meta-data get affected_files)"
|
||||||
|
echo "Affected files in this PR: $affected_files"
|
||||||
|
if [[ ! ":$affected_files:" =~ :snap/ ]]; then
|
||||||
|
echo "Skipping snap build as no files under /snap were modified"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
exec ./snap.sh
|
||||||
|
else
|
||||||
|
echo "Skipping snap build as this is not a pull request"
|
||||||
|
fi
|
@@ -3,7 +3,7 @@
|
|||||||
cd "$(dirname "$0")/.."
|
cd "$(dirname "$0")/.."
|
||||||
|
|
||||||
DRYRUN=
|
DRYRUN=
|
||||||
if [[ -z $BUILDKITE_BRANCH || $BUILDKITE_BRANCH =~ pull/* ]]; then
|
if [[ -z $BUILDKITE_BRANCH ]] || ./ci/is-pr.sh; then
|
||||||
DRYRUN="echo"
|
DRYRUN="echo"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -36,5 +36,8 @@ set -x
|
|||||||
echo --- build
|
echo --- build
|
||||||
snapcraft
|
snapcraft
|
||||||
|
|
||||||
|
source ci/upload_ci_artifact.sh
|
||||||
|
upload_ci_artifact solana_*.snap
|
||||||
|
|
||||||
echo --- publish
|
echo --- publish
|
||||||
$DRYRUN snapcraft push solana_*.snap --release $SNAP_CHANNEL
|
$DRYRUN snapcraft push solana_*.snap --release $SNAP_CHANNEL
|
||||||
|
@@ -16,7 +16,9 @@ _ cargo test --verbose --features unstable
|
|||||||
_ cargo bench --verbose --features unstable
|
_ cargo bench --verbose --features unstable
|
||||||
|
|
||||||
|
|
||||||
# Coverage ...
|
exit 0
|
||||||
|
|
||||||
|
# Coverage disabled (see issue #433)
|
||||||
_ cargo install --force cargo-cov
|
_ cargo install --force cargo-cov
|
||||||
_ cargo cov test
|
_ cargo cov test
|
||||||
_ cargo cov report
|
_ cargo cov report
|
||||||
|
@@ -12,7 +12,7 @@ _() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
_ rustup component add rustfmt-preview
|
_ rustup component add rustfmt-preview
|
||||||
_ cargo fmt -- --write-mode=diff
|
_ cargo fmt -- --write-mode=check
|
||||||
_ cargo build --verbose
|
_ cargo build --verbose
|
||||||
_ cargo test --verbose
|
_ cargo test --verbose
|
||||||
_ cargo test -- --ignored
|
_ cargo test -- --ignored
|
||||||
|
18
ci/upload_ci_artifact.sh
Normal file
18
ci/upload_ci_artifact.sh
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
# |source| me
|
||||||
|
|
||||||
|
upload_ci_artifact() {
|
||||||
|
echo "--- artifact: $1"
|
||||||
|
if [[ -r "$1" ]]; then
|
||||||
|
ls -l "$1"
|
||||||
|
if ${BUILDKITE:-false}; then
|
||||||
|
(
|
||||||
|
set -x
|
||||||
|
buildkite-agent artifact upload "$1"
|
||||||
|
)
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo ^^^ +++
|
||||||
|
echo "$1 not found"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
@@ -1,17 +1,23 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# usage: $0 <rsync network path to solana repo on leader machine> <number of nodes in the network>"
|
||||||
|
#
|
||||||
|
|
||||||
if [[ -z $1 ]]; then
|
here=$(dirname "$0")
|
||||||
echo "usage: $0 [network path to solana repo on leader machine] <number of nodes in the network>"
|
# shellcheck source=multinode-demo/common.sh
|
||||||
exit 1
|
source "$here"/common.sh
|
||||||
fi
|
|
||||||
|
|
||||||
LEADER=$1
|
leader=${1:-${here}/..} # Default to local solana repo
|
||||||
COUNT=${2:-1}
|
count=${2:-1}
|
||||||
|
|
||||||
rsync -vz "$LEADER"/{leader.json,mint-demo.json} . || exit $?
|
rsync_leader_url=$(rsync_url "$leader")
|
||||||
|
|
||||||
# if RUST_LOG is unset, default to info
|
set -ex
|
||||||
export RUST_LOG=${RUST_LOG:-solana=info}
|
mkdir -p "$SOLANA_CONFIG_CLIENT_DIR"
|
||||||
|
$rsync -vPz "$rsync_leader_url"/config/leader.json "$SOLANA_CONFIG_CLIENT_DIR"/
|
||||||
|
$rsync -vPz "$rsync_leader_url"/config-private/mint.json "$SOLANA_CONFIG_CLIENT_DIR"/
|
||||||
|
|
||||||
cargo run --release --bin solana-client-demo -- \
|
# shellcheck disable=SC2086 # $solana_client_demo should not be quoted
|
||||||
-n "$COUNT" -l leader.json -d < mint-demo.json 2>&1 | tee client.log
|
exec $solana_client_demo \
|
||||||
|
-n "$count" -l "$SOLANA_CONFIG_CLIENT_DIR"/leader.json \
|
||||||
|
< "$SOLANA_CONFIG_CLIENT_DIR"/mint.json
|
||||||
|
85
multinode-demo/common.sh
Normal file
85
multinode-demo/common.sh
Normal file
@@ -0,0 +1,85 @@
|
|||||||
|
# |source| this file
|
||||||
|
#
|
||||||
|
# Disable complaints about unused variables in this file:
|
||||||
|
# shellcheck disable=2034
|
||||||
|
|
||||||
|
rsync=rsync
|
||||||
|
if [[ -d "$SNAP" ]]; then # Running inside a Linux Snap?
|
||||||
|
solana_program() {
|
||||||
|
declare program="$1"
|
||||||
|
if [[ "$program" = wallet ]]; then
|
||||||
|
# TODO: Merge wallet.sh functionality into solana-wallet proper and
|
||||||
|
# remove this special case
|
||||||
|
printf "%s/bin/solana-%s" "$SNAP" "$program"
|
||||||
|
else
|
||||||
|
printf "%s/command-%s.wrapper" "$SNAP" "$program"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
rsync="$SNAP"/bin/rsync
|
||||||
|
SOLANA_CUDA="$(snapctl get enable-cuda)"
|
||||||
|
|
||||||
|
elif [[ -n "$USE_SNAP" ]]; then # Use the Linux Snap binaries
|
||||||
|
solana_program() {
|
||||||
|
declare program="$1"
|
||||||
|
printf "solana.%s" "$program"
|
||||||
|
}
|
||||||
|
elif [[ -n "$USE_INSTALL" ]]; then # Assume |cargo install| was run
|
||||||
|
solana_program() {
|
||||||
|
declare program="$1"
|
||||||
|
printf "solana-%s" "$program"
|
||||||
|
}
|
||||||
|
# CUDA was/wasn't selected at build time, can't affect CUDA state here
|
||||||
|
unset SOLANA_CUDA
|
||||||
|
else
|
||||||
|
solana_program() {
|
||||||
|
declare program="$1"
|
||||||
|
declare features=""
|
||||||
|
if [[ "$program" =~ ^(.*)-cuda$ ]]; then
|
||||||
|
program=${BASH_REMATCH[1]}
|
||||||
|
features="--features=cuda,erasure"
|
||||||
|
fi
|
||||||
|
if [[ -z "$DEBUG" ]]; then
|
||||||
|
maybe_release=--release
|
||||||
|
fi
|
||||||
|
printf "cargo run $maybe_release --bin solana-%s %s -- " "$program" "$features"
|
||||||
|
}
|
||||||
|
fi
|
||||||
|
|
||||||
|
solana_client_demo=$(solana_program client-demo)
|
||||||
|
solana_wallet=$(solana_program wallet)
|
||||||
|
solana_drone=$(solana_program drone)
|
||||||
|
solana_fullnode=$(solana_program fullnode)
|
||||||
|
solana_fullnode_config=$(solana_program fullnode-config)
|
||||||
|
solana_fullnode_cuda=$(solana_program fullnode-cuda)
|
||||||
|
solana_genesis=$(solana_program genesis)
|
||||||
|
solana_mint=$(solana_program mint)
|
||||||
|
|
||||||
|
export RUST_LOG=${RUST_LOG:-solana=info} # if RUST_LOG is unset, default to info
|
||||||
|
export RUST_BACKTRACE=1
|
||||||
|
|
||||||
|
tune_networking() {
|
||||||
|
[[ $(uname) = Linux ]] && (set -x; sudo sysctl -w net.core.rmem_max=26214400 1>/dev/null 2>/dev/null)
|
||||||
|
}
|
||||||
|
|
||||||
|
SOLANA_CONFIG_DIR=${SNAP_DATA:-$PWD}/config
|
||||||
|
SOLANA_CONFIG_PRIVATE_DIR=${SNAP_DATA:-$PWD}/config-private
|
||||||
|
SOLANA_CONFIG_CLIENT_DIR=${SNAP_USER_DATA:-$PWD}/config-client-client
|
||||||
|
|
||||||
|
rsync_url() { # adds the 'rsync://` prefix to URLs that need it
|
||||||
|
declare url="$1"
|
||||||
|
|
||||||
|
if [[ "$url" =~ ^.*:.*$ ]]; then
|
||||||
|
# assume remote-shell transport when colon is present, use $url unmodified
|
||||||
|
echo "$url"
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -d "$url" ]]; then
|
||||||
|
# assume local directory if $url is a valid directory, use $url unmodified
|
||||||
|
echo "$url"
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Default to rsync:// URL
|
||||||
|
echo "rsync://$url"
|
||||||
|
}
|
41
multinode-demo/drone.sh
Executable file
41
multinode-demo/drone.sh
Executable file
@@ -0,0 +1,41 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# usage: $0 <rsync network path to solana repo on leader machine>
|
||||||
|
#
|
||||||
|
|
||||||
|
here=$(dirname "$0")
|
||||||
|
# shellcheck source=multinode-demo/common.sh
|
||||||
|
source "$here"/common.sh
|
||||||
|
SOLANA_CONFIG_DIR="$SOLANA_CONFIG_DIR"-drone
|
||||||
|
|
||||||
|
if [[ -d "$SNAP" ]]; then
|
||||||
|
# Exit if mode is not yet configured
|
||||||
|
# (typically the case after the Snap is first installed)
|
||||||
|
[[ -n "$(snapctl get mode)" ]] || exit 0
|
||||||
|
|
||||||
|
# Select leader from the Snap configuration
|
||||||
|
leader_address="$(snapctl get leader-address)"
|
||||||
|
if [[ -z "$leader_address" ]]; then
|
||||||
|
# Assume drone is running on the same node as the leader by default
|
||||||
|
leader_address="localhost"
|
||||||
|
fi
|
||||||
|
leader="$leader_address"
|
||||||
|
else
|
||||||
|
leader=${1:-${here}/..} # Default to local solana repo
|
||||||
|
fi
|
||||||
|
|
||||||
|
[[ -f "$SOLANA_CONFIG_PRIVATE_DIR"/mint.json ]] || {
|
||||||
|
echo "$SOLANA_CONFIG_PRIVATE_DIR/mint.json not found, create it by running:"
|
||||||
|
echo
|
||||||
|
echo " ${here}/setup.sh -t leader"
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
rsync_leader_url=$(rsync_url "$leader")
|
||||||
|
set -ex
|
||||||
|
mkdir -p "$SOLANA_CONFIG_DIR"
|
||||||
|
$rsync -vPz "$rsync_leader_url"/config/leader.json "$SOLANA_CONFIG_DIR"/
|
||||||
|
|
||||||
|
# shellcheck disable=SC2086 # $solana_drone should not be quoted
|
||||||
|
exec $solana_drone \
|
||||||
|
-l "$SOLANA_CONFIG_DIR"/leader.json < "$SOLANA_CONFIG_PRIVATE_DIR"/mint.json
|
@@ -1,28 +1,33 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
here=$(dirname "$0")
|
here=$(dirname "$0")
|
||||||
|
# shellcheck source=multinode-demo/common.sh
|
||||||
|
source "$here"/common.sh
|
||||||
|
|
||||||
# shellcheck source=/dev/null
|
if [[ -d "$SNAP" ]]; then
|
||||||
. "${here}"/myip.sh
|
# Exit if mode is not yet configured
|
||||||
|
# (typically the case after the Snap is first installed)
|
||||||
|
[[ -n "$(snapctl get mode)" ]] || exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
myip=$(myip) || exit $?
|
[[ -f "$SOLANA_CONFIG_DIR"/leader.json ]] || {
|
||||||
|
echo "$SOLANA_CONFIG_DIR/leader.json not found, create it by running:"
|
||||||
[[ -f leader-"${myip}".json ]] || {
|
echo
|
||||||
echo "I can't find a matching leader config file for \"${myip}\"...
|
echo " ${here}/setup.sh -t leader"
|
||||||
Please run ${here}/setup.sh first.
|
|
||||||
"
|
|
||||||
exit 1
|
exit 1
|
||||||
}
|
}
|
||||||
|
|
||||||
# if RUST_LOG is unset, default to info
|
if [[ -n "$SOLANA_CUDA" ]]; then
|
||||||
export RUST_LOG=${RUST_LOG:-solana=info}
|
program="$solana_fullnode_cuda"
|
||||||
|
else
|
||||||
|
program="$solana_fullnode"
|
||||||
|
fi
|
||||||
|
|
||||||
[[ $(uname) = Linux ]] && sudo sysctl -w net.core.rmem_max=26214400 1>/dev/null 2>/dev/null
|
tune_networking
|
||||||
|
|
||||||
# this makes a leader.json file available alongside genesis, etc. for
|
# shellcheck disable=SC2086 # $program should not be quoted
|
||||||
# validators and clients
|
exec $program \
|
||||||
cp leader-"${myip}".json leader.json
|
-l "$SOLANA_CONFIG_DIR"/leader.json \
|
||||||
|
< <(shopt -s nullglob && cat "$SOLANA_CONFIG_DIR"/genesis.log \
|
||||||
cargo run --release --bin solana-fullnode -- \
|
"$SOLANA_CONFIG_DIR"/tx-*.log) \
|
||||||
-l leader-"${myip}".json \
|
> "$SOLANA_CONFIG_DIR"/tx-"$(date -u +%Y%m%d%H%M%S%N)".log
|
||||||
< genesis.log tx-*.log \
|
|
||||||
> tx-"$(date -u +%Y%m%d%H%M%S%N)".log
|
|
||||||
|
@@ -2,16 +2,17 @@
|
|||||||
|
|
||||||
function myip()
|
function myip()
|
||||||
{
|
{
|
||||||
declare ipaddrs=( )
|
# shellcheck disable=SC2207
|
||||||
|
declare ipaddrs=(
|
||||||
# query interwebs
|
# query interwebs
|
||||||
mapfile -t ipaddrs < <(curl -s ifconfig.co)
|
$(curl -s ifconfig.co)
|
||||||
|
|
||||||
# machine's interfaces
|
# machine's interfaces
|
||||||
mapfile -t -O "${#ipaddrs[*]}" ipaddrs < \
|
$(ifconfig |
|
||||||
<(ifconfig | awk '/inet(6)? (addr:)?/ {print $2}')
|
awk '/inet addr:/ {gsub("addr:","",$2); print $2; next}
|
||||||
|
/inet6 addr:/ {gsub("/.*", "", $3); print $3; next}
|
||||||
ipaddrs=( "${extips[@]}" "${ipaddrs[@]}" )
|
/inet(6)? / {print $2}'
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
if (( ! ${#ipaddrs[*]} ))
|
if (( ! ${#ipaddrs[*]} ))
|
||||||
then
|
then
|
||||||
|
@@ -1,15 +1,105 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
here=$(dirname "$0")
|
here=$(dirname "$0")
|
||||||
|
# shellcheck source=multinode-demo/common.sh
|
||||||
|
source "$here"/common.sh
|
||||||
|
|
||||||
# shellcheck source=/dev/null
|
usage () {
|
||||||
. "${here}"/myip.sh
|
exitcode=0
|
||||||
|
if [[ -n "$1" ]]; then
|
||||||
|
exitcode=1
|
||||||
|
echo "Error: $*"
|
||||||
|
fi
|
||||||
|
cat <<EOF
|
||||||
|
usage: $0 [-n num_tokens] [-l] [-p] [-t node_type]
|
||||||
|
|
||||||
myip=$(myip) || exit $?
|
Creates a fullnode configuration
|
||||||
|
|
||||||
num_tokens=${1:-1000000000}
|
-n num_tokens - Number of tokens to create
|
||||||
|
-l - Detect network address from local machine configuration, which
|
||||||
|
may be a private IP address unaccessible on the Intenet (default)
|
||||||
|
-p - Detect public address using public Internet servers
|
||||||
|
-t node_type - Create configuration files only for this kind of node. Valid
|
||||||
|
options are validator or leader. Creates configuration files
|
||||||
|
for both by default
|
||||||
|
|
||||||
cargo run --release --bin solana-mint-demo <<<"${num_tokens}" > mint-demo.json
|
EOF
|
||||||
cargo run --release --bin solana-genesis-demo < mint-demo.json > genesis.log
|
exit $exitcode
|
||||||
|
}
|
||||||
|
|
||||||
cargo run --release --bin solana-fullnode-config -- -d > leader-"${myip}".json
|
ip_address_arg=-l
|
||||||
cargo run --release --bin solana-fullnode-config -- -b 9000 -d > validator-"${myip}".json
|
num_tokens=1000000000
|
||||||
|
node_type_leader=true
|
||||||
|
node_type_validator=true
|
||||||
|
while getopts "h?n:lpt:" opt; do
|
||||||
|
case $opt in
|
||||||
|
h|\?)
|
||||||
|
usage
|
||||||
|
exit 0
|
||||||
|
;;
|
||||||
|
l)
|
||||||
|
ip_address_arg=-l
|
||||||
|
;;
|
||||||
|
p)
|
||||||
|
ip_address_arg=-p
|
||||||
|
;;
|
||||||
|
n)
|
||||||
|
num_tokens="$OPTARG"
|
||||||
|
;;
|
||||||
|
t)
|
||||||
|
node_type="$OPTARG"
|
||||||
|
case $OPTARG in
|
||||||
|
leader)
|
||||||
|
node_type_leader=true
|
||||||
|
node_type_validator=false
|
||||||
|
;;
|
||||||
|
validator)
|
||||||
|
node_type_leader=false
|
||||||
|
node_type_validator=true
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
usage "Error: unknown node type: $node_type"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
usage "Error: unhandled option: $opt"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
|
||||||
|
leader_address_args=("$ip_address_arg")
|
||||||
|
validator_address_args=("$ip_address_arg" -b 9000)
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
echo "Cleaning $SOLANA_CONFIG_DIR"
|
||||||
|
rm -rvf "$SOLANA_CONFIG_DIR"
|
||||||
|
mkdir -p "$SOLANA_CONFIG_DIR"
|
||||||
|
|
||||||
|
|
||||||
|
if $node_type_leader; then
|
||||||
|
rm -rvf "$SOLANA_CONFIG_PRIVATE_DIR"
|
||||||
|
mkdir -p "$SOLANA_CONFIG_PRIVATE_DIR"
|
||||||
|
|
||||||
|
echo "Creating $SOLANA_CONFIG_DIR/mint.json with $num_tokens tokens"
|
||||||
|
$solana_mint <<<"$num_tokens" > "$SOLANA_CONFIG_PRIVATE_DIR"/mint.json
|
||||||
|
|
||||||
|
echo "Creating $SOLANA_CONFIG_DIR/genesis.log"
|
||||||
|
$solana_genesis < "$SOLANA_CONFIG_PRIVATE_DIR"/mint.json > "$SOLANA_CONFIG_DIR"/genesis.log
|
||||||
|
|
||||||
|
echo "Creating $SOLANA_CONFIG_DIR/leader.json"
|
||||||
|
$solana_fullnode_config "${leader_address_args[@]}" > "$SOLANA_CONFIG_DIR"/leader.json
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
if $node_type_validator; then
|
||||||
|
echo "Creating $SOLANA_CONFIG_DIR/validator.json"
|
||||||
|
$solana_fullnode_config "${validator_address_args[@]}" > "$SOLANA_CONFIG_DIR"/validator.json
|
||||||
|
fi
|
||||||
|
|
||||||
|
ls -lh "$SOLANA_CONFIG_DIR"/
|
||||||
|
if $node_type_leader; then
|
||||||
|
ls -lh "$SOLANA_CONFIG_PRIVATE_DIR"
|
||||||
|
fi
|
||||||
|
80
multinode-demo/start_nodes.sh
Executable file
80
multinode-demo/start_nodes.sh
Executable file
@@ -0,0 +1,80 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
ip_addr_file=$1
|
||||||
|
remote_user=$2
|
||||||
|
ssh_keys=$3
|
||||||
|
|
||||||
|
usage() {
|
||||||
|
echo -e "\\tUsage: $0 <IP Address array> <username> [path to ssh keys]\\n"
|
||||||
|
echo -e "\\t <IP Address array>: A bash script that exports an array of IP addresses, ip_addr_array. Elements of the array are public IP address of remote nodes."
|
||||||
|
echo -e "\\t <username>: The username for logging into remote nodes."
|
||||||
|
echo -e "\\t [path to ssh keys]: The public/private key pair that remote nodes can use to perform rsync and ssh among themselves. Must contain pub, priv and authorized_keys.\\n"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Sample IP Address array file contents
|
||||||
|
# ip_addr_array=(192.168.1.1 192.168.1.5 192.168.2.2)
|
||||||
|
|
||||||
|
if [[ -z "$ip_addr_file" ]]; then
|
||||||
|
usage
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -z "$remote_user" ]]; then
|
||||||
|
usage
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Build and install locally
|
||||||
|
PATH="$HOME"/.cargo/bin:"$PATH"
|
||||||
|
cargo install --force
|
||||||
|
|
||||||
|
ip_addr_array=()
|
||||||
|
# Get IP address array
|
||||||
|
# shellcheck source=/dev/null
|
||||||
|
source "$ip_addr_file"
|
||||||
|
|
||||||
|
# shellcheck disable=SC2089,SC2016
|
||||||
|
ssh_command_prefix='export PATH="$HOME/.cargo/bin:$PATH"; cd solana; USE_INSTALL=1 ./multinode-demo/'
|
||||||
|
|
||||||
|
count=0
|
||||||
|
leader=
|
||||||
|
for ip_addr in "${ip_addr_array[@]}"; do
|
||||||
|
echo "$ip_addr"
|
||||||
|
|
||||||
|
# Deploy build and scripts to remote node
|
||||||
|
rsync -r -av ~/.cargo/bin "$remote_user"@"$ip_addr":~/.cargo
|
||||||
|
rsync -r -av ./multinode-demo "$remote_user"@"$ip_addr":~/solana/
|
||||||
|
|
||||||
|
# If provided, deploy SSH keys
|
||||||
|
if [[ -z $ssh_keys ]]; then
|
||||||
|
echo "skip copying the ssh keys"
|
||||||
|
else
|
||||||
|
rsync -r -av "$ssh_keys"/* "$remote_user"@"$ip_addr":~/.ssh/
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Stop current nodes
|
||||||
|
ssh "$remote_user"@"$ip_addr" 'pkill -9 solana-fullnode'
|
||||||
|
ssh "$remote_user"@"$ip_addr" 'pkill -9 solana-client-demo'
|
||||||
|
|
||||||
|
# Run setup
|
||||||
|
ssh "$remote_user"@"$ip_addr" "$ssh_command_prefix"'setup.sh -p "$ip_addr"'
|
||||||
|
|
||||||
|
if (( !count )); then
|
||||||
|
# Start the leader on the first node
|
||||||
|
echo "Starting leader node $ip_addr"
|
||||||
|
ssh -n -f "$remote_user"@"$ip_addr" "$ssh_command_prefix"'leader.sh > leader.log 2>&1'
|
||||||
|
leader=${ip_addr_array[0]}
|
||||||
|
else
|
||||||
|
# Start validator on all other nodes
|
||||||
|
echo "Starting validator node $ip_addr"
|
||||||
|
ssh -n -f "$remote_user"@"$ip_addr" "$ssh_command_prefix""validator.sh $remote_user@$leader:~/solana $leader > validator.log 2>&1"
|
||||||
|
fi
|
||||||
|
|
||||||
|
(( count++ ))
|
||||||
|
|
||||||
|
if (( count == ${#ip_addr_array[@]} )); then
|
||||||
|
# Launch client demo on the last node
|
||||||
|
echo "Starting client demo on $ip_addr"
|
||||||
|
ssh -n -f "$remote_user"@"$ip_addr" "$ssh_command_prefix""client.sh $remote_user@$leader:~/solana $count > client.log 2>&1"
|
||||||
|
fi
|
||||||
|
done
|
41
multinode-demo/test/wallet-sanity.sh
Executable file
41
multinode-demo/test/wallet-sanity.sh
Executable file
@@ -0,0 +1,41 @@
|
|||||||
|
#!/bin/bash -e
|
||||||
|
#
|
||||||
|
# Wallet sanity test
|
||||||
|
#
|
||||||
|
|
||||||
|
here=$(dirname "$0")
|
||||||
|
cd "$here"
|
||||||
|
|
||||||
|
wallet="../wallet.sh $1"
|
||||||
|
|
||||||
|
# Tokens transferred to this address are lost forever...
|
||||||
|
garbage_address=vS3ngn1TfQmpsW1Z4NkLuqNAQFF3dYQw8UZ6TCx9bmq
|
||||||
|
|
||||||
|
check_balance_output() {
|
||||||
|
declare expected_output="$1"
|
||||||
|
exec 42>&1
|
||||||
|
output=$($wallet balance | tee >(cat - >&42))
|
||||||
|
if [[ ! "$output" =~ $expected_output ]]; then
|
||||||
|
echo "Balance is incorrect. Expected: $expected_output"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
pay_and_confirm() {
|
||||||
|
exec 42>&1
|
||||||
|
signature=$($wallet pay "$@" | tee >(cat - >&42))
|
||||||
|
$wallet confirm "$signature"
|
||||||
|
}
|
||||||
|
|
||||||
|
$wallet reset
|
||||||
|
$wallet address
|
||||||
|
check_balance_output "Your balance is: 0"
|
||||||
|
$wallet airdrop --tokens 60
|
||||||
|
check_balance_output "Your balance is: 60"
|
||||||
|
$wallet airdrop --tokens 40
|
||||||
|
check_balance_output "Your balance is: 100"
|
||||||
|
pay_and_confirm --to $garbage_address --tokens 99
|
||||||
|
check_balance_output "Your balance is: 1"
|
||||||
|
|
||||||
|
echo PASS
|
||||||
|
exit 0
|
@@ -1,32 +1,80 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
here=$(dirname "$0")
|
here=$(dirname "$0")
|
||||||
|
# shellcheck source=multinode-demo/common.sh
|
||||||
|
source "$here"/common.sh
|
||||||
|
|
||||||
# shellcheck source=/dev/null
|
usage() {
|
||||||
. "${here}"/myip.sh
|
if [[ -n "$1" ]]; then
|
||||||
|
echo "$*"
|
||||||
leader=$1
|
echo
|
||||||
|
fi
|
||||||
[[ -z ${leader} ]] && {
|
echo "usage: $0 [rsync network path to solana repo on leader machine] [network ip address of leader]"
|
||||||
echo "usage: $0 [network path to solana repo on leader machine]"
|
|
||||||
exit 1
|
exit 1
|
||||||
}
|
}
|
||||||
|
|
||||||
myip=$(myip) || exit $?
|
if [[ "$1" = "-h" || -n "$3" ]]; then
|
||||||
|
usage
|
||||||
|
fi
|
||||||
|
|
||||||
[[ -f validator-"$myip".json ]] || {
|
if [[ -d "$SNAP" ]]; then
|
||||||
echo "I can't find a matching validator config file for \"${myip}\"...
|
# Exit if mode is not yet configured
|
||||||
Please run ${here}/setup.sh first.
|
# (typically the case after the Snap is first installed)
|
||||||
"
|
[[ -n "$(snapctl get mode)" ]] || exit 0
|
||||||
|
|
||||||
|
# Select leader from the Snap configuration
|
||||||
|
leader_address="$(snapctl get leader-address)"
|
||||||
|
if [[ -z "$leader_address" ]]; then
|
||||||
|
# Assume public testnet by default
|
||||||
|
leader_address=35.230.65.68 # testnet.solana.com
|
||||||
|
fi
|
||||||
|
leader="$leader_address"
|
||||||
|
else
|
||||||
|
if [[ -n "$3" ]]; then
|
||||||
|
usage
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -z "$1" ]]; then
|
||||||
|
leader=${1:-${here}/..} # Default to local solana repo
|
||||||
|
leader_address=${2:-127.0.0.1} # Default to local leader
|
||||||
|
elif [[ -z "$2" ]]; then
|
||||||
|
leader="$1"
|
||||||
|
leader_address=$(dig +short "$1" | head -n1)
|
||||||
|
if [[ -z "$leader_address" ]]; then
|
||||||
|
usage "Error: unable to resolve IP address for $leader"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
leader="$1"
|
||||||
|
leader_address="$2"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
leader_port=8001
|
||||||
|
|
||||||
|
if [[ -n "$SOLANA_CUDA" ]]; then
|
||||||
|
program="$solana_fullnode_cuda"
|
||||||
|
else
|
||||||
|
program="$solana_fullnode"
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
[[ -f "$SOLANA_CONFIG_DIR"/validator.json ]] || {
|
||||||
|
echo "$SOLANA_CONFIG_DIR/validator.json not found, create it by running:"
|
||||||
|
echo
|
||||||
|
echo " ${here}/setup.sh -t validator"
|
||||||
exit 1
|
exit 1
|
||||||
}
|
}
|
||||||
|
|
||||||
rsync -vz "${leader}"/{mint-demo.json,leader.json,genesis.log,tx-*.log} . || exit $?
|
rsync_leader_url=$(rsync_url "$leader")
|
||||||
|
|
||||||
[[ $(uname) = Linux ]] && sudo sysctl -w net.core.rmem_max=26214400 1>/dev/null 2>/dev/null
|
set -ex
|
||||||
|
SOLANA_LEADER_CONFIG_DIR="$SOLANA_CONFIG_DIR"/leader-config
|
||||||
|
rm -rf "$SOLANA_LEADER_CONFIG_DIR"
|
||||||
|
$rsync -vPrz "$rsync_leader_url"/config/ "$SOLANA_LEADER_CONFIG_DIR"
|
||||||
|
ls -lh "$SOLANA_LEADER_CONFIG_DIR"
|
||||||
|
|
||||||
# if RUST_LOG is unset, default to info
|
tune_networking
|
||||||
export RUST_LOG=${RUST_LOG:-solana=info}
|
|
||||||
|
|
||||||
cargo run --release --bin solana-fullnode -- \
|
# shellcheck disable=SC2086 # $program should not be quoted
|
||||||
-l validator-"${myip}".json -v leader.json \
|
exec $program \
|
||||||
< genesis.log tx-*.log
|
-l "$SOLANA_CONFIG_DIR"/validator.json -t "$leader_address:$leader_port" \
|
||||||
|
< <(shopt -s nullglob && cat "$SOLANA_LEADER_CONFIG_DIR"/genesis.log \
|
||||||
|
"$SOLANA_LEADER_CONFIG_DIR"/tx-*.log)
|
||||||
|
47
multinode-demo/wallet.sh
Executable file
47
multinode-demo/wallet.sh
Executable file
@@ -0,0 +1,47 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# usage: $0 <rsync network path to solana repo on leader machine>"
|
||||||
|
#
|
||||||
|
|
||||||
|
here=$(dirname "$0")
|
||||||
|
# shellcheck source=multinode-demo/common.sh
|
||||||
|
source "$here"/common.sh
|
||||||
|
|
||||||
|
# if $1 isn't host:path, something.com, or a valid local path
|
||||||
|
if [[ ${1%:} != "$1" || "$1" =~ [^.]\.[^.] || -d $1 ]]; then
|
||||||
|
leader=$1 # interpret
|
||||||
|
shift
|
||||||
|
else
|
||||||
|
if [[ -d "$SNAP" ]]; then
|
||||||
|
leader=testnet.solana.com # Default to testnet when running as a Snap
|
||||||
|
else
|
||||||
|
leader=$here/.. # Default to local solana repo
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "$1" = "reset" ]]; then
|
||||||
|
echo Wallet resetting
|
||||||
|
rm -rf "$SOLANA_CONFIG_CLIENT_DIR"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
rsync_leader_url=$(rsync_url "$leader")
|
||||||
|
|
||||||
|
set -e
|
||||||
|
mkdir -p "$SOLANA_CONFIG_CLIENT_DIR"
|
||||||
|
if [[ ! -r "$SOLANA_CONFIG_CLIENT_DIR"/leader.json ]]; then
|
||||||
|
(
|
||||||
|
set -x
|
||||||
|
$rsync -vPz "$rsync_leader_url"/config/leader.json "$SOLANA_CONFIG_CLIENT_DIR"/
|
||||||
|
)
|
||||||
|
fi
|
||||||
|
|
||||||
|
client_json="$SOLANA_CONFIG_CLIENT_DIR"/client.json
|
||||||
|
if [[ ! -r $client_json ]]; then
|
||||||
|
$solana_mint <<<0 > "$client_json"
|
||||||
|
fi
|
||||||
|
|
||||||
|
set -x
|
||||||
|
# shellcheck disable=SC2086 # $solana_wallet should not be quoted
|
||||||
|
exec $solana_wallet \
|
||||||
|
-l "$SOLANA_CONFIG_CLIENT_DIR"/leader.json -m "$client_json" "$@"
|
@@ -2,6 +2,10 @@
|
|||||||
|
|
||||||
The goal of this RFC is to define a set of constraints for APIs and runtime such that we can execute our smart contracts safely on massively parallel hardware such as a GPU. Our runtime is built around an OS *syscall* primitive. The difference in blockchain is that now the OS does a cryptographic check of memory region ownership before accessing the memory in the Solana kernel.
|
The goal of this RFC is to define a set of constraints for APIs and runtime such that we can execute our smart contracts safely on massively parallel hardware such as a GPU. Our runtime is built around an OS *syscall* primitive. The difference in blockchain is that now the OS does a cryptographic check of memory region ownership before accessing the memory in the Solana kernel.
|
||||||
|
|
||||||
|
## Version
|
||||||
|
|
||||||
|
version 0.1
|
||||||
|
|
||||||
## Toolchain Stack
|
## Toolchain Stack
|
||||||
|
|
||||||
+---------------------+ +---------------------+
|
+---------------------+ +---------------------+
|
||||||
@@ -50,6 +54,10 @@ For 3, every load and store that is relative can be checked to be within the exp
|
|||||||
|
|
||||||
For 4, Fully linked PIC ELF with just a single RX segment. Effectively we are linking a shared object with `-fpic -target bpf` and with a linker script to collect everything into a single RX segment. Writable globals are not supported.
|
For 4, Fully linked PIC ELF with just a single RX segment. Effectively we are linking a shared object with `-fpic -target bpf` and with a linker script to collect everything into a single RX segment. Writable globals are not supported.
|
||||||
|
|
||||||
|
### Address Checks
|
||||||
|
|
||||||
|
The interface to the module takes a `&mut Vec<Vec<u8>>` in rust, or a `int sz, void* data[sz], int szs[sz]` in `C`. Given the module's bytecode, for each method, we need to analyze the bounds on load and stores into each buffer the module uses. This check needs to be done `on chain`, and after those bounds are computed we can verify that the user supplied array of buffers will not cause a memory fault. For load and stores that we cannot analyze, we can replace with a `safe_load` and `safe_store` instruction that will check the table for access.
|
||||||
|
|
||||||
## Loader
|
## Loader
|
||||||
The loader is our first smart contract. The job of this contract is to load the actual program with its own instance data. The loader will verify the bytecode and that the object implements the expected entry points.
|
The loader is our first smart contract. The job of this contract is to load the actual program with its own instance data. The loader will verify the bytecode and that the object implements the expected entry points.
|
||||||
|
|
||||||
|
122
rfcs/rfc-002-consensus.md
Normal file
122
rfcs/rfc-002-consensus.md
Normal file
@@ -0,0 +1,122 @@
|
|||||||
|
# Consensus
|
||||||
|
|
||||||
|
VERY WIP
|
||||||
|
|
||||||
|
The goal of this RFC is to define the consensus algorithm used in solana. This proposal covers a Proof of Stake algorithm that leverages Proof of History. PoH is a permissionless clock for blockchain that is available before consensus. This PoS approach leverages PoH to make strong assumptions about time between partitions.
|
||||||
|
|
||||||
|
## Version
|
||||||
|
|
||||||
|
version 0.1
|
||||||
|
|
||||||
|
## Message Flow
|
||||||
|
|
||||||
|
1. Transactions are ingested at the leader.
|
||||||
|
2. Leader filters for valid transactions
|
||||||
|
3. Leader executes valid transactions on its state
|
||||||
|
4. Leader packages transactions into blobs
|
||||||
|
5. Leader transmits blobs to validator nodes.
|
||||||
|
a. The set of supermajority + `M` by stake weight of nodes is rotated in round robin fashion.
|
||||||
|
6. Validators retransmit blobs to peers in their set and to further downstream nodes.
|
||||||
|
7. Validators validate the transactions and execute them on their state.
|
||||||
|
8. Validators compute the hash of the state.
|
||||||
|
9. Validators transmit votes to the leader.
|
||||||
|
a. Votes are signatures of the hash of the computed state.
|
||||||
|
10. Leader executes the votes as any other transaction and broadcasts them out to the network
|
||||||
|
11. Validators observe their votes, and all the votes from the network.
|
||||||
|
12. Validators continue voting if the supermajority of stake is observed in the vote for the same hash.
|
||||||
|
|
||||||
|
Supermajority is defined as `2/3rds + 1` vote of the PoS stakes.
|
||||||
|
|
||||||
|
## Staking
|
||||||
|
|
||||||
|
Validators `stake` some of their spendable sol into a staking account. The stakes are not spendable and can only be used for voting.
|
||||||
|
|
||||||
|
```
|
||||||
|
CreateStake(
|
||||||
|
PoH count,
|
||||||
|
PoH hash,
|
||||||
|
source public key,
|
||||||
|
amount,
|
||||||
|
destination public key,
|
||||||
|
proof of ownership of destination public key,
|
||||||
|
signature of the message with the source keypair
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
Creating the stake has a warmup period of TBD. Unstaking requires the node to miss a certain amount of validation votes.
|
||||||
|
|
||||||
|
## Validation Votes
|
||||||
|
|
||||||
|
```
|
||||||
|
Validate(
|
||||||
|
PoH count,
|
||||||
|
PoH hash,
|
||||||
|
stake public key,
|
||||||
|
signature of the state,
|
||||||
|
signature of the message with the stake keypair
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Validator Slashing
|
||||||
|
|
||||||
|
Validators `stake` some of their spendable sol into a staking account. The stakes are not spendable and can only be used for voting.
|
||||||
|
|
||||||
|
```
|
||||||
|
Slash(Validate(
|
||||||
|
PoH count,
|
||||||
|
PoH hash,
|
||||||
|
stake public key,
|
||||||
|
...
|
||||||
|
signature of the message with the stake keypair
|
||||||
|
))
|
||||||
|
```
|
||||||
|
|
||||||
|
When the `Slash` vote is processed, validators should lookup `PoH hash` at `PoH count` and compare it with the message. If they do not match, the stake at `stake public key` should be set to `0`.
|
||||||
|
|
||||||
|
## Leader Slashing
|
||||||
|
|
||||||
|
TBD. The goal of this is to discourage leaders from generating multiple PoH streams.
|
||||||
|
|
||||||
|
## Validation Vote Contract
|
||||||
|
|
||||||
|
The goal of this contract is to simulate economic cost of mining on a shorter branch.
|
||||||
|
|
||||||
|
1. With my signature I am certifying that I computed `state hash` at `PoH count` and `PoH hash`.
|
||||||
|
2. I will not vote on a branch that doesn't contain this message for at least `N` counts, or until `PoH count` + `N` is reached by the PoH stream.
|
||||||
|
3. I will not vote for any other branch below `PoH count`.
|
||||||
|
a. if there are other votes not present in this PoH history the validator may need to `cancel` them before creating this vote.
|
||||||
|
|
||||||
|
## Leader Seed Generation
|
||||||
|
|
||||||
|
Leader selection is decided via a random seed. The process is as follows:
|
||||||
|
|
||||||
|
1. Periodically at a specific `PoH count` select the first vote signatures that create a supermajority from the previous round.
|
||||||
|
2. append them together
|
||||||
|
3. hash the string for `N` counts via a similar process as PoH itself.
|
||||||
|
4. The resulting hash is the random seed for `M` counts, where M > N
|
||||||
|
|
||||||
|
## Leader Ranking and Rotation
|
||||||
|
|
||||||
|
Leader's transmit for a count of `T`. When `T` is reached all the validators should switch to the next ranked leader. To rank leaders, the supermajority + `M` nodes are shuffled with the using the above calculated random seed.
|
||||||
|
|
||||||
|
TBD: define a ranking for critical partitions without a node from supermajority + `M` set.
|
||||||
|
|
||||||
|
## Partition selection
|
||||||
|
|
||||||
|
Validators should select the first branch to reach finality, or the highest ranking leader.
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
### Small Partition
|
||||||
|
1. Network partition M occurs for 10% of the nodes
|
||||||
|
2. The larger partition K, with 90% of the stake weight continues to operate as normal
|
||||||
|
3. M cycles through the ranks until one of them is leader.
|
||||||
|
4. M validators observe 10% of the vote pool, finality is not reached
|
||||||
|
5. M and K re-connect.
|
||||||
|
6. M validators cancel their votes on K which are below K's `PoH count`
|
||||||
|
|
||||||
|
### Leader Timeout
|
||||||
|
1. Next rank node observes a timeout.
|
||||||
|
2. Nodes receiving both PoH streams pick the higher rank node.
|
||||||
|
3. 2, causes a partition, since nodes can only vote for 1 leader.
|
||||||
|
4. Partition is resolved just like in the [Small Partition](#small-parition)
|
54
rfcs/rfc-003-storage.md
Normal file
54
rfcs/rfc-003-storage.md
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
# Storage
|
||||||
|
|
||||||
|
The goal of this RFC is to define a protocol for storing a very large ledger over a p2p network that is verified by solana validators. At full capacity on a 1gbps network solana will generate 4 petabytes of data per year. To prevent the network from centralizing around full nodes that have to store the full data set this protocol proposes a way for mining nodes to provide storage capacity for pieces of the network.
|
||||||
|
|
||||||
|
# Version
|
||||||
|
|
||||||
|
version 0.1
|
||||||
|
|
||||||
|
# Background
|
||||||
|
|
||||||
|
The basic idea to Proof of Replication is encrypting a dataset with a public symmetric key using CBC encryption, then hash the encrypted dataset. The main problem with the naive approach is that a dishonest storage node can stream the encryption and delete the data as its hashed. The simple solution is to force the hash to be done on the reverse of the encryption, or perhaps with a random order. This ensures that all the data is present during the generation of the proof and it also requires the validator to have the entirety of the encrypted data present for verification of every proof of every identity. So the space required to validate is `(Number of Proofs)*(data size)`
|
||||||
|
|
||||||
|
# Optimization with PoH
|
||||||
|
|
||||||
|
Our improvement on this approach is to randomly sample the encrypted blocks faster than it takes to encrypt, and record the hash of those samples into the PoH ledger. Thus the blocks stay in the exact same order for every PoRep and verification can stream the data and verify all the proofs in a single batch. This way we can verify multiple proofs concurrently, each one on its own CUDA core. With the current generation of graphics cards our network can support up to 14k replication identities or symmetric keys. The total space required for verification is `(2 CBC blocks) * (Number of Identities)`, with core count of equal to (Number of Identities). A CBC block is expected to be 1MB in size.
|
||||||
|
|
||||||
|
# Network
|
||||||
|
|
||||||
|
Validators for PoRep are the same validators that are verifying transactions. They have some stake that they have put up as collateral that ensures that their work is honest. If you can prove that a validator verified a fake PoRep, then the validators stake can be slashed.
|
||||||
|
|
||||||
|
Replicators are specialized thin clients. They download a part of the ledger and store it, and provide PoReps of storing the ledger. For each verified PoRep replicators earn a reward of sol from the mining pool.
|
||||||
|
|
||||||
|
# Constraints
|
||||||
|
|
||||||
|
We have the following constraints:
|
||||||
|
* At most 14k replication identities can be used, because thats how many CUDA cores we can fit in a $5k box at the moment.
|
||||||
|
* Verification requires generating the CBC blocks. That requires space of 2 blocks per identity, and 1 CUDA core per identity for the same dataset. So as many identities at once should be batched with as many proofs for those identities verified concurrently for the same dataset.
|
||||||
|
|
||||||
|
# Validation and Replication Protocol
|
||||||
|
|
||||||
|
1. Network sets the replication target number, lets say 1k. 1k PoRep identities are created from signatures of a PoH hash. So they are tied to a specific PoH hash. It doesn't matter who creates them, or simply the last 1k validation signatures we saw for the ledger at that count. This maybe just the initial batch of identities, because we want to stagger identity rotation.
|
||||||
|
2. Any client can use any of these identities to create PoRep proofs. Replicator identities are the CBC encryption keys.
|
||||||
|
3. Periodically at a specific PoH count, replicator that want to create PoRep proofs sign the PoH hash at that count. That signature is the seed used to pick the block and identity to replicate. A block is 1TB of ledger.
|
||||||
|
4. Periodically at a specific PoH count, replicator submits PoRep proofs for their selected block. A signature of the PoH hash at that count is the seed used to sample the 1TB encrypted block, and hash it. This is done faster than it takes to encrypt the 1TB block with the original identity.
|
||||||
|
5. Replicators must submit some number of fake proofs, which they can prove to be fake by providing the seed for the hash result.
|
||||||
|
6. Periodically at a specific PoH count, validators sign the hash and use the signature to select the 1TB block that they need to validate. They batch all the identities and proofs and submit approval for all the verified ones.
|
||||||
|
7. After #6, replicator client submit the proofs of fake proofs.
|
||||||
|
|
||||||
|
For any random seed, we force everyone to use a signature that is derived from a PoH hash. Everyone must use the same count, so the same PoH hash is signed by every participant. The signatures are then each cryptographically tied to the keypair, which prevents a leader from grinding on the resulting value for more than 1 identity.
|
||||||
|
|
||||||
|
We need to stagger the rotation of the identity keys. Once this gets going, the next identity could be generated by hashing itself with a PoH hash, or via some other process based on the validation signatures.
|
||||||
|
|
||||||
|
Since there are many more client identities then encryption identities, we need to split the reward for multiple clients, and prevent Sybil attacks from generating many clients to acquire the same block of data. To remain BFT we want to avoid a single human entity from storing all the replications of a single chunk of the ledger.
|
||||||
|
|
||||||
|
Our solution to this is to force the clients to continue using the same identity. If the first round is used to acquire the same block for many client identities, the second round for the same client identities will force a redistribution of the signatures, and therefore PoRep identities and blocks. Thus to get a reward for storage clients need to store the first block for free and the network can reward long lived client identities more than new ones.
|
||||||
|
|
||||||
|
# Notes
|
||||||
|
|
||||||
|
* We can reduce the costs of verification of PoRep by using PoH, and actually make it feasible to verify a large number of proofs for a global dataset.
|
||||||
|
* We can eliminate grinding by forcing everyone to sign the same PoH hash and use the signatures as the seed
|
||||||
|
* The game between validators and replicators is over random blocks and random encryption identities and random data samples. The goal of randomization is to prevent colluding groups from having overlap on data or validation.
|
||||||
|
* Replicator clients fish for lazy validators by submitting fake proofs that they can prove are fake.
|
||||||
|
* Replication identities are just symmetric encryption keys, the number of them on the network is our storage replication target. Many more client identities can exist than replicator identities, so unlimited number of clients can provide proofs of the same replicator identity.
|
||||||
|
* To defend against Sybil client identities that try to store the same block we force the clients to store for multiple rounds before receiving a reward.
|
17
snap/README.md
Normal file
17
snap/README.md
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
## Development
|
||||||
|
|
||||||
|
If you're running Ubuntu 16.04 and already have `snapcraft` installed, simply
|
||||||
|
run:
|
||||||
|
```
|
||||||
|
$ snapcraft
|
||||||
|
```
|
||||||
|
|
||||||
|
For other systems we provide a docker image that can be used for snap
|
||||||
|
development:
|
||||||
|
```
|
||||||
|
$ ./ci/docker-run.sh solanalabs/snapcraft snapcraft -d
|
||||||
|
```
|
||||||
|
|
||||||
|
## Reference
|
||||||
|
* https://docs.snapcraft.io/
|
||||||
|
|
34
snap/hooks/configure
vendored
Executable file
34
snap/hooks/configure
vendored
Executable file
@@ -0,0 +1,34 @@
|
|||||||
|
#!/bin/bash -e
|
||||||
|
|
||||||
|
echo Stopping daemons
|
||||||
|
snapctl stop --disable solana.daemon-drone
|
||||||
|
snapctl stop --disable solana.daemon-leader
|
||||||
|
snapctl stop --disable solana.daemon-validator
|
||||||
|
|
||||||
|
mode="$(snapctl get mode)"
|
||||||
|
if [[ -z "$mode" ]]; then
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
ip_address_arg=-p # Use public IP address (TODO: make this configurable?)
|
||||||
|
num_tokens="$(snapctl get num-tokens)"
|
||||||
|
|
||||||
|
case $mode in
|
||||||
|
leader+drone)
|
||||||
|
$SNAP/bin/setup.sh ${num_tokens:+-n $num_tokens} ${ip_address_arg} -t leader
|
||||||
|
snapctl start --enable solana.daemon-leader
|
||||||
|
snapctl start --enable solana.daemon-drone
|
||||||
|
;;
|
||||||
|
leader)
|
||||||
|
$SNAP/bin/setup.sh ${num_tokens:+-n $num_tokens} ${ip_address_arg} -t leader
|
||||||
|
snapctl start --enable solana.daemon-leader
|
||||||
|
;;
|
||||||
|
validator)
|
||||||
|
$SNAP/bin/setup.sh ${ip_address_arg} -t validator
|
||||||
|
snapctl start --enable solana.daemon-validator
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "Error: Unknown mode: $mode"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
@@ -10,6 +10,10 @@ grade: devel
|
|||||||
# CUDA dependency, so use 'devmode' confinement for now
|
# CUDA dependency, so use 'devmode' confinement for now
|
||||||
confinement: devmode
|
confinement: devmode
|
||||||
|
|
||||||
|
hooks:
|
||||||
|
configure:
|
||||||
|
plugs: [network]
|
||||||
|
|
||||||
apps:
|
apps:
|
||||||
drone:
|
drone:
|
||||||
command: solana-drone
|
command: solana-drone
|
||||||
@@ -35,35 +39,54 @@ apps:
|
|||||||
- network-bind
|
- network-bind
|
||||||
genesis:
|
genesis:
|
||||||
command: solana-genesis
|
command: solana-genesis
|
||||||
genesis-demo:
|
|
||||||
command: solana-genesis-demo
|
|
||||||
mint:
|
mint:
|
||||||
command: solana-mint
|
command: solana-mint
|
||||||
mint-demo:
|
|
||||||
command: solana-mint-demo
|
|
||||||
client-demo:
|
client-demo:
|
||||||
command: solana-client-demo
|
command: solana-client-demo
|
||||||
|
wallet:
|
||||||
|
# TODO: Merge wallet.sh functionality into solana-wallet proper
|
||||||
|
command: wallet.sh
|
||||||
|
#command: solana-wallet
|
||||||
|
|
||||||
|
daemon-validator:
|
||||||
|
daemon: simple
|
||||||
|
command: validator.sh
|
||||||
|
|
||||||
|
daemon-leader:
|
||||||
|
daemon: simple
|
||||||
|
command: leader.sh
|
||||||
|
|
||||||
|
daemon-drone:
|
||||||
|
daemon: simple
|
||||||
|
command: drone.sh
|
||||||
|
|
||||||
parts:
|
parts:
|
||||||
solana-cuda:
|
solana:
|
||||||
plugin: rust
|
plugin: nil
|
||||||
rust-channel: stable
|
|
||||||
rust-features:
|
|
||||||
- erasure
|
|
||||||
- cuda
|
|
||||||
prime:
|
prime:
|
||||||
- bin/solana-fullnode-cuda
|
- bin
|
||||||
- usr/lib/libgf_complete.so.1
|
- usr/lib/libgf_complete.so.1
|
||||||
- usr/lib/libJerasure.so.2
|
- usr/lib/libJerasure.so.2
|
||||||
override-build: |
|
override-build: |
|
||||||
|
# Build/install solana-fullnode-cuda
|
||||||
./fetch-perf-libs.sh
|
./fetch-perf-libs.sh
|
||||||
snapcraftctl build
|
cargo install --features=cuda,erasure --root $SNAPCRAFT_PART_INSTALL --bin solana-fullnode
|
||||||
mv $SNAPCRAFT_PART_INSTALL/bin/solana-fullnode $SNAPCRAFT_PART_INSTALL
|
mv $SNAPCRAFT_PART_INSTALL/bin/solana-fullnode $SNAPCRAFT_PART_INSTALL
|
||||||
rm -rf $SNAPCRAFT_PART_INSTALL/bin/*
|
rm -rf $SNAPCRAFT_PART_INSTALL/bin/*
|
||||||
mv $SNAPCRAFT_PART_INSTALL/solana-fullnode $SNAPCRAFT_PART_INSTALL/bin/solana-fullnode-cuda
|
mv $SNAPCRAFT_PART_INSTALL/solana-fullnode $SNAPCRAFT_PART_INSTALL/bin/solana-fullnode-cuda
|
||||||
mkdir -p $SNAPCRAFT_PART_INSTALL/usr/lib/
|
mkdir -p $SNAPCRAFT_PART_INSTALL/usr/lib/
|
||||||
cp -f libJerasure.so $SNAPCRAFT_PART_INSTALL/usr/lib/libJerasure.so.2
|
cp -f libJerasure.so $SNAPCRAFT_PART_INSTALL/usr/lib/libJerasure.so.2
|
||||||
cp -f libgf_complete.so $SNAPCRAFT_PART_INSTALL/usr/lib/libgf_complete.so.1
|
cp -f libgf_complete.so $SNAPCRAFT_PART_INSTALL/usr/lib/libgf_complete.so.1
|
||||||
solana:
|
|
||||||
plugin: rust
|
# Build/install all other programs
|
||||||
rust-channel: stable
|
cargo install --root $SNAPCRAFT_PART_INSTALL --bins
|
||||||
|
|
||||||
|
# Install multinode scripts
|
||||||
|
mkdir -p $SNAPCRAFT_PART_INSTALL/bin
|
||||||
|
cp -av multinode-demo/* $SNAPCRAFT_PART_INSTALL/bin/
|
||||||
|
|
||||||
|
# TODO: build rsync from source instead of sneaking it in from the host
|
||||||
|
# system...
|
||||||
|
set -x
|
||||||
|
mkdir -p $SNAPCRAFT_PART_INSTALL/bin
|
||||||
|
cp -av /usr/bin/rsync $SNAPCRAFT_PART_INSTALL/bin/
|
||||||
|
261
src/bank.rs
261
src/bank.rs
@@ -8,6 +8,8 @@ extern crate libc;
|
|||||||
use chrono::prelude::*;
|
use chrono::prelude::*;
|
||||||
use entry::Entry;
|
use entry::Entry;
|
||||||
use hash::Hash;
|
use hash::Hash;
|
||||||
|
use itertools::Itertools;
|
||||||
|
use ledger::Block;
|
||||||
use mint::Mint;
|
use mint::Mint;
|
||||||
use payment_plan::{Payment, PaymentPlan, Witness};
|
use payment_plan::{Payment, PaymentPlan, Witness};
|
||||||
use signature::{KeyPair, PublicKey, Signature};
|
use signature::{KeyPair, PublicKey, Signature};
|
||||||
@@ -28,6 +30,8 @@ use transaction::{Instruction, Plan, Transaction};
|
|||||||
/// not be processed by the network.
|
/// not be processed by the network.
|
||||||
pub const MAX_ENTRY_IDS: usize = 1024 * 16;
|
pub const MAX_ENTRY_IDS: usize = 1024 * 16;
|
||||||
|
|
||||||
|
pub const VERIFY_BLOCK_SIZE: usize = 16;
|
||||||
|
|
||||||
/// Reasons a transaction might be rejected.
|
/// Reasons a transaction might be rejected.
|
||||||
#[derive(Debug, PartialEq, Eq)]
|
#[derive(Debug, PartialEq, Eq)]
|
||||||
pub enum BankError {
|
pub enum BankError {
|
||||||
@@ -42,7 +46,7 @@ pub enum BankError {
|
|||||||
/// The bank has seen `Signature` before. This can occur under normal operation
|
/// The bank has seen `Signature` before. This can occur under normal operation
|
||||||
/// when a UDP packet is duplicated, as a user error from a client not updating
|
/// when a UDP packet is duplicated, as a user error from a client not updating
|
||||||
/// its `last_id`, or as a double-spend attack.
|
/// its `last_id`, or as a double-spend attack.
|
||||||
DuplicateSiganture(Signature),
|
DuplicateSignature(Signature),
|
||||||
|
|
||||||
/// The bank has not seen the given `last_id` or the transaction is too old and
|
/// The bank has not seen the given `last_id` or the transaction is too old and
|
||||||
/// the `last_id` has been discarded.
|
/// the `last_id` has been discarded.
|
||||||
@@ -51,6 +55,9 @@ pub enum BankError {
|
|||||||
/// The transaction is invalid and has requested a debit or credit of negative
|
/// The transaction is invalid and has requested a debit or credit of negative
|
||||||
/// tokens.
|
/// tokens.
|
||||||
NegativeTokens,
|
NegativeTokens,
|
||||||
|
|
||||||
|
/// Proof of History verification failed.
|
||||||
|
LedgerVerificationFailed,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type Result<T> = result::Result<T, BankError>;
|
pub type Result<T> = result::Result<T, BankError>;
|
||||||
@@ -89,10 +96,9 @@ pub struct Bank {
|
|||||||
transaction_count: AtomicUsize,
|
transaction_count: AtomicUsize,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Bank {
|
impl Default for Bank {
|
||||||
/// Create an Bank using a deposit.
|
fn default() -> Self {
|
||||||
pub fn new_from_deposit(deposit: &Payment) -> Self {
|
Bank {
|
||||||
let bank = Bank {
|
|
||||||
balances: RwLock::new(HashMap::new()),
|
balances: RwLock::new(HashMap::new()),
|
||||||
pending: RwLock::new(HashMap::new()),
|
pending: RwLock::new(HashMap::new()),
|
||||||
last_ids: RwLock::new(VecDeque::new()),
|
last_ids: RwLock::new(VecDeque::new()),
|
||||||
@@ -100,8 +106,15 @@ impl Bank {
|
|||||||
time_sources: RwLock::new(HashSet::new()),
|
time_sources: RwLock::new(HashSet::new()),
|
||||||
last_time: RwLock::new(Utc.timestamp(0, 0)),
|
last_time: RwLock::new(Utc.timestamp(0, 0)),
|
||||||
transaction_count: AtomicUsize::new(0),
|
transaction_count: AtomicUsize::new(0),
|
||||||
};
|
}
|
||||||
bank.apply_payment(deposit, &mut bank.balances.write().unwrap());
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Bank {
|
||||||
|
/// Create an Bank using a deposit.
|
||||||
|
pub fn new_from_deposit(deposit: &Payment) -> Self {
|
||||||
|
let bank = Self::default();
|
||||||
|
bank.apply_payment(deposit);
|
||||||
bank
|
bank
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -117,7 +130,8 @@ impl Bank {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Commit funds to the `payment.to` party.
|
/// Commit funds to the `payment.to` party.
|
||||||
fn apply_payment(&self, payment: &Payment, balances: &mut HashMap<PublicKey, i64>) {
|
fn apply_payment(&self, payment: &Payment) {
|
||||||
|
let mut balances = self.balances.write().unwrap();
|
||||||
if balances.contains_key(&payment.to) {
|
if balances.contains_key(&payment.to) {
|
||||||
*balances.get_mut(&payment.to).unwrap() += payment.tokens;
|
*balances.get_mut(&payment.to).unwrap() += payment.tokens;
|
||||||
} else {
|
} else {
|
||||||
@@ -128,14 +142,17 @@ impl Bank {
|
|||||||
/// Return the last entry ID registered.
|
/// Return the last entry ID registered.
|
||||||
pub fn last_id(&self) -> Hash {
|
pub fn last_id(&self) -> Hash {
|
||||||
let last_ids = self.last_ids.read().expect("'last_ids' read lock");
|
let last_ids = self.last_ids.read().expect("'last_ids' read lock");
|
||||||
let last_item = last_ids.iter().last().expect("empty 'last_ids' list");
|
let last_item = last_ids
|
||||||
|
.iter()
|
||||||
|
.last()
|
||||||
|
.expect("get last item from 'last_ids' list");
|
||||||
*last_item
|
*last_item
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Store the given signature. The bank will reject any transaction with the same signature.
|
/// Store the given signature. The bank will reject any transaction with the same signature.
|
||||||
fn reserve_signature(signatures: &mut HashSet<Signature>, sig: &Signature) -> Result<()> {
|
fn reserve_signature(signatures: &mut HashSet<Signature>, sig: &Signature) -> Result<()> {
|
||||||
if let Some(sig) = signatures.get(sig) {
|
if let Some(sig) = signatures.get(sig) {
|
||||||
return Err(BankError::DuplicateSiganture(*sig));
|
return Err(BankError::DuplicateSignature(*sig));
|
||||||
}
|
}
|
||||||
signatures.insert(*sig);
|
signatures.insert(*sig);
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -189,7 +206,10 @@ impl Bank {
|
|||||||
|
|
||||||
/// Deduct tokens from the 'from' address the account has sufficient
|
/// Deduct tokens from the 'from' address the account has sufficient
|
||||||
/// funds and isn't a duplicate.
|
/// funds and isn't a duplicate.
|
||||||
fn apply_debits(&self, tx: &Transaction, bals: &mut HashMap<PublicKey, i64>) -> Result<()> {
|
fn apply_debits(&self, tx: &Transaction) -> Result<()> {
|
||||||
|
let mut bals = self.balances.write().unwrap();
|
||||||
|
let mut purge = false;
|
||||||
|
{
|
||||||
let option = bals.get_mut(&tx.from);
|
let option = bals.get_mut(&tx.from);
|
||||||
if option.is_none() {
|
if option.is_none() {
|
||||||
return Err(BankError::AccountNotFound(tx.from));
|
return Err(BankError::AccountNotFound(tx.from));
|
||||||
@@ -206,16 +226,24 @@ impl Bank {
|
|||||||
if *bal < contract.tokens {
|
if *bal < contract.tokens {
|
||||||
self.forget_signature_with_last_id(&tx.sig, &tx.last_id);
|
self.forget_signature_with_last_id(&tx.sig, &tx.last_id);
|
||||||
return Err(BankError::InsufficientFunds(tx.from));
|
return Err(BankError::InsufficientFunds(tx.from));
|
||||||
|
} else if *bal == contract.tokens {
|
||||||
|
purge = true;
|
||||||
|
} else {
|
||||||
|
*bal -= contract.tokens;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
if purge {
|
||||||
|
bals.remove(&tx.from);
|
||||||
}
|
}
|
||||||
|
|
||||||
*bal -= contract.tokens;
|
|
||||||
};
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Apply only a transaction's credits. Credits from multiple transactions
|
/// Apply only a transaction's credits. Credits from multiple transactions
|
||||||
/// may safely be applied in parallel.
|
/// may safely be applied in parallel.
|
||||||
fn apply_credits(&self, tx: &Transaction, balances: &mut HashMap<PublicKey, i64>) {
|
fn apply_credits(&self, tx: &Transaction) {
|
||||||
match &tx.instruction {
|
match &tx.instruction {
|
||||||
Instruction::NewContract(contract) => {
|
Instruction::NewContract(contract) => {
|
||||||
let mut plan = contract.plan.clone();
|
let mut plan = contract.plan.clone();
|
||||||
@@ -224,7 +252,7 @@ impl Bank {
|
|||||||
.expect("timestamp creation in apply_credits")));
|
.expect("timestamp creation in apply_credits")));
|
||||||
|
|
||||||
if let Some(payment) = plan.final_payment() {
|
if let Some(payment) = plan.final_payment() {
|
||||||
self.apply_payment(&payment, balances);
|
self.apply_payment(&payment);
|
||||||
} else {
|
} else {
|
||||||
let mut pending = self.pending
|
let mut pending = self.pending
|
||||||
.write()
|
.write()
|
||||||
@@ -244,9 +272,8 @@ impl Bank {
|
|||||||
/// Process a Transaction. If it contains a payment plan that requires a witness
|
/// Process a Transaction. If it contains a payment plan that requires a witness
|
||||||
/// to progress, the payment plan will be stored in the bank.
|
/// to progress, the payment plan will be stored in the bank.
|
||||||
fn process_transaction(&self, tx: &Transaction) -> Result<()> {
|
fn process_transaction(&self, tx: &Transaction) -> Result<()> {
|
||||||
let bals = &mut self.balances.write().unwrap();
|
self.apply_debits(tx)?;
|
||||||
self.apply_debits(tx, bals)?;
|
self.apply_credits(tx);
|
||||||
self.apply_credits(tx, bals);
|
|
||||||
self.transaction_count.fetch_add(1, Ordering::Relaxed);
|
self.transaction_count.fetch_add(1, Ordering::Relaxed);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -254,12 +281,11 @@ impl Bank {
|
|||||||
/// Process a batch of transactions.
|
/// Process a batch of transactions.
|
||||||
#[must_use]
|
#[must_use]
|
||||||
pub fn process_transactions(&self, txs: Vec<Transaction>) -> Vec<Result<Transaction>> {
|
pub fn process_transactions(&self, txs: Vec<Transaction>) -> Vec<Result<Transaction>> {
|
||||||
let bals = &mut self.balances.write().unwrap();
|
|
||||||
debug!("processing Transactions {}", txs.len());
|
debug!("processing Transactions {}", txs.len());
|
||||||
let txs_len = txs.len();
|
let txs_len = txs.len();
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
let results: Vec<_> = txs.into_iter()
|
let results: Vec<_> = txs.into_iter()
|
||||||
.map(|tx| self.apply_debits(&tx, bals).map(|_| tx))
|
.map(|tx| self.apply_debits(&tx).map(|_| tx))
|
||||||
.collect(); // Calling collect() here forces all debits to complete before moving on.
|
.collect(); // Calling collect() here forces all debits to complete before moving on.
|
||||||
|
|
||||||
let debits = now.elapsed();
|
let debits = now.elapsed();
|
||||||
@@ -269,7 +295,7 @@ impl Bank {
|
|||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|result| {
|
.map(|result| {
|
||||||
result.map(|tx| {
|
result.map(|tx| {
|
||||||
self.apply_credits(&tx, bals);
|
self.apply_credits(&tx);
|
||||||
tx
|
tx
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
@@ -296,19 +322,78 @@ impl Bank {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Process an ordered list of entries.
|
/// Process an ordered list of entries.
|
||||||
pub fn process_entries<I>(&self, entries: I) -> Result<()>
|
pub fn process_entries<I>(&self, entries: I) -> Result<u64>
|
||||||
where
|
where
|
||||||
I: IntoIterator<Item = Entry>,
|
I: IntoIterator<Item = Entry>,
|
||||||
{
|
{
|
||||||
|
let mut entry_count = 0;
|
||||||
for entry in entries {
|
for entry in entries {
|
||||||
|
entry_count += 1;
|
||||||
|
|
||||||
if !entry.transactions.is_empty() {
|
if !entry.transactions.is_empty() {
|
||||||
for result in self.process_transactions(entry.transactions) {
|
for result in self.process_transactions(entry.transactions) {
|
||||||
result?;
|
result?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// TODO: verify this is ok in cases like:
|
||||||
|
// 1. an untrusted genesis or tx-<DATE>.log
|
||||||
|
// 2. a crazy leader..
|
||||||
|
if !entry.has_more {
|
||||||
self.register_entry_id(&entry.id);
|
self.register_entry_id(&entry.id);
|
||||||
}
|
}
|
||||||
Ok(())
|
}
|
||||||
|
Ok(entry_count)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Append entry blocks to the ledger, verifying them along the way.
|
||||||
|
pub fn process_blocks<I>(&self, entries: I) -> Result<u64>
|
||||||
|
where
|
||||||
|
I: IntoIterator<Item = Entry>,
|
||||||
|
{
|
||||||
|
// Ledger verification needs to be parallelized, but we can't pull the whole
|
||||||
|
// thing into memory. We therefore chunk it.
|
||||||
|
let mut entry_count = 0;
|
||||||
|
for block in &entries.into_iter().chunks(VERIFY_BLOCK_SIZE) {
|
||||||
|
let block: Vec<_> = block.collect();
|
||||||
|
if !block.verify(&self.last_id()) {
|
||||||
|
return Err(BankError::LedgerVerificationFailed);
|
||||||
|
}
|
||||||
|
entry_count += self.process_entries(block)?;
|
||||||
|
}
|
||||||
|
Ok(entry_count)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Process a full ledger.
|
||||||
|
pub fn process_ledger<I>(&self, entries: I) -> Result<u64>
|
||||||
|
where
|
||||||
|
I: IntoIterator<Item = Entry>,
|
||||||
|
{
|
||||||
|
let mut entries = entries.into_iter();
|
||||||
|
|
||||||
|
// The first item in the ledger is required to be an entry with zero num_hashes,
|
||||||
|
// which implies its id can be used as the ledger's seed.
|
||||||
|
let entry0 = entries.next().expect("invalid ledger: empty");
|
||||||
|
|
||||||
|
// The second item in the ledger is a special transaction where the to and from
|
||||||
|
// fields are the same. That entry should be treated as a deposit, not a
|
||||||
|
// transfer to oneself.
|
||||||
|
let entry1 = entries
|
||||||
|
.next()
|
||||||
|
.expect("invalid ledger: need at least 2 entries");
|
||||||
|
let tx = &entry1.transactions[0];
|
||||||
|
let deposit = if let Instruction::NewContract(contract) = &tx.instruction {
|
||||||
|
contract.plan.final_payment()
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}.expect("invalid ledger, needs to start with a contract");
|
||||||
|
|
||||||
|
self.apply_payment(&deposit);
|
||||||
|
self.register_entry_id(&entry0.id);
|
||||||
|
self.register_entry_id(&entry1.id);
|
||||||
|
|
||||||
|
let mut entry_count = 2;
|
||||||
|
entry_count += self.process_blocks(entries)?;
|
||||||
|
Ok(entry_count)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Process a Witness Signature. Any payment plans waiting on this signature
|
/// Process a Witness Signature. Any payment plans waiting on this signature
|
||||||
@@ -321,7 +406,7 @@ impl Bank {
|
|||||||
{
|
{
|
||||||
e.get_mut().apply_witness(&Witness::Signature(from));
|
e.get_mut().apply_witness(&Witness::Signature(from));
|
||||||
if let Some(payment) = e.get().final_payment() {
|
if let Some(payment) = e.get().final_payment() {
|
||||||
self.apply_payment(&payment, &mut self.balances.write().unwrap());
|
self.apply_payment(&payment);
|
||||||
e.remove_entry();
|
e.remove_entry();
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@@ -370,7 +455,7 @@ impl Bank {
|
|||||||
.read()
|
.read()
|
||||||
.expect("'last_time' read lock when creating timestamp")));
|
.expect("'last_time' read lock when creating timestamp")));
|
||||||
if let Some(payment) = plan.final_payment() {
|
if let Some(payment) = plan.final_payment() {
|
||||||
self.apply_payment(&payment, &mut self.balances.write().unwrap());
|
self.apply_payment(&payment);
|
||||||
completed.push(key.clone());
|
completed.push(key.clone());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -412,16 +497,28 @@ impl Bank {
|
|||||||
self.process_transaction(&tx).map(|_| sig)
|
self.process_transaction(&tx).map(|_| sig)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_balance(&self, pubkey: &PublicKey) -> Option<i64> {
|
pub fn get_balance(&self, pubkey: &PublicKey) -> i64 {
|
||||||
let bals = self.balances
|
let bals = self.balances
|
||||||
.read()
|
.read()
|
||||||
.expect("'balances' read lock in get_balance");
|
.expect("'balances' read lock in get_balance");
|
||||||
bals.get(pubkey).map(|x| *x)
|
bals.get(pubkey).map(|x| *x).unwrap_or(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn transaction_count(&self) -> usize {
|
pub fn transaction_count(&self) -> usize {
|
||||||
self.transaction_count.load(Ordering::Relaxed)
|
self.transaction_count.load(Ordering::Relaxed)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn has_signature(&self, signature: &Signature) -> bool {
|
||||||
|
let last_ids_sigs = self.last_ids_sigs
|
||||||
|
.read()
|
||||||
|
.expect("'last_ids_sigs' read lock");
|
||||||
|
for (_hash, signatures) in last_ids_sigs.iter() {
|
||||||
|
if signatures.contains(signature) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
false
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
@@ -429,8 +526,11 @@ mod tests {
|
|||||||
use super::*;
|
use super::*;
|
||||||
use bincode::serialize;
|
use bincode::serialize;
|
||||||
use entry::next_entry;
|
use entry::next_entry;
|
||||||
|
use entry_writer::{self, EntryWriter};
|
||||||
use hash::hash;
|
use hash::hash;
|
||||||
|
use ledger::next_entries;
|
||||||
use signature::KeyPairUtil;
|
use signature::KeyPairUtil;
|
||||||
|
use std::io::{BufRead, BufReader, Cursor, Seek, SeekFrom};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_two_payments_to_one_party() {
|
fn test_two_payments_to_one_party() {
|
||||||
@@ -441,11 +541,11 @@ mod tests {
|
|||||||
|
|
||||||
bank.transfer(1_000, &mint.keypair(), pubkey, mint.last_id())
|
bank.transfer(1_000, &mint.keypair(), pubkey, mint.last_id())
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(bank.get_balance(&pubkey).unwrap(), 1_000);
|
assert_eq!(bank.get_balance(&pubkey), 1_000);
|
||||||
|
|
||||||
bank.transfer(500, &mint.keypair(), pubkey, mint.last_id())
|
bank.transfer(500, &mint.keypair(), pubkey, mint.last_id())
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(bank.get_balance(&pubkey).unwrap(), 1_500);
|
assert_eq!(bank.get_balance(&pubkey), 1_500);
|
||||||
assert_eq!(bank.transaction_count(), 2);
|
assert_eq!(bank.transaction_count(), 2);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -488,8 +588,8 @@ mod tests {
|
|||||||
assert_eq!(bank.transaction_count(), 1);
|
assert_eq!(bank.transaction_count(), 1);
|
||||||
|
|
||||||
let mint_pubkey = mint.keypair().pubkey();
|
let mint_pubkey = mint.keypair().pubkey();
|
||||||
assert_eq!(bank.get_balance(&mint_pubkey).unwrap(), 10_000);
|
assert_eq!(bank.get_balance(&mint_pubkey), 10_000);
|
||||||
assert_eq!(bank.get_balance(&pubkey).unwrap(), 1_000);
|
assert_eq!(bank.get_balance(&pubkey), 1_000);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -499,7 +599,7 @@ mod tests {
|
|||||||
let pubkey = KeyPair::new().pubkey();
|
let pubkey = KeyPair::new().pubkey();
|
||||||
bank.transfer(500, &mint.keypair(), pubkey, mint.last_id())
|
bank.transfer(500, &mint.keypair(), pubkey, mint.last_id())
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(bank.get_balance(&pubkey).unwrap(), 500);
|
assert_eq!(bank.get_balance(&pubkey), 500);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -512,26 +612,26 @@ mod tests {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
// Mint's balance will be zero because all funds are locked up.
|
// Mint's balance will be zero because all funds are locked up.
|
||||||
assert_eq!(bank.get_balance(&mint.pubkey()), Some(0));
|
assert_eq!(bank.get_balance(&mint.pubkey()), 0);
|
||||||
|
|
||||||
// tx count is 1, because debits were applied.
|
// tx count is 1, because debits were applied.
|
||||||
assert_eq!(bank.transaction_count(), 1);
|
assert_eq!(bank.transaction_count(), 1);
|
||||||
|
|
||||||
// pubkey's balance will be None because the funds have not been
|
// pubkey's balance will be None because the funds have not been
|
||||||
// sent.
|
// sent.
|
||||||
assert_eq!(bank.get_balance(&pubkey), None);
|
assert_eq!(bank.get_balance(&pubkey), 0);
|
||||||
|
|
||||||
// Now, acknowledge the time in the condition occurred and
|
// Now, acknowledge the time in the condition occurred and
|
||||||
// that pubkey's funds are now available.
|
// that pubkey's funds are now available.
|
||||||
bank.apply_timestamp(mint.pubkey(), dt).unwrap();
|
bank.apply_timestamp(mint.pubkey(), dt).unwrap();
|
||||||
assert_eq!(bank.get_balance(&pubkey), Some(1));
|
assert_eq!(bank.get_balance(&pubkey), 1);
|
||||||
|
|
||||||
// tx count is still 1, because we chose not to count timestamp transactions
|
// tx count is still 1, because we chose not to count timestamp transactions
|
||||||
// tx count.
|
// tx count.
|
||||||
assert_eq!(bank.transaction_count(), 1);
|
assert_eq!(bank.transaction_count(), 1);
|
||||||
|
|
||||||
bank.apply_timestamp(mint.pubkey(), dt).unwrap(); // <-- Attack! Attempt to process completed transaction.
|
bank.apply_timestamp(mint.pubkey(), dt).unwrap(); // <-- Attack! Attempt to process completed transaction.
|
||||||
assert_ne!(bank.get_balance(&pubkey), Some(2));
|
assert_ne!(bank.get_balance(&pubkey), 2);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -546,8 +646,8 @@ mod tests {
|
|||||||
bank.transfer_on_date(1, &mint.keypair(), pubkey, dt, mint.last_id())
|
bank.transfer_on_date(1, &mint.keypair(), pubkey, dt, mint.last_id())
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
assert_eq!(bank.get_balance(&mint.pubkey()), Some(0));
|
assert_eq!(bank.get_balance(&mint.pubkey()), 0);
|
||||||
assert_eq!(bank.get_balance(&pubkey), Some(1));
|
assert_eq!(bank.get_balance(&pubkey), 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -563,22 +663,22 @@ mod tests {
|
|||||||
assert_eq!(bank.transaction_count(), 1);
|
assert_eq!(bank.transaction_count(), 1);
|
||||||
|
|
||||||
// Mint's balance will be zero because all funds are locked up.
|
// Mint's balance will be zero because all funds are locked up.
|
||||||
assert_eq!(bank.get_balance(&mint.pubkey()), Some(0));
|
assert_eq!(bank.get_balance(&mint.pubkey()), 0);
|
||||||
|
|
||||||
// pubkey's balance will be None because the funds have not been
|
// pubkey's balance will be None because the funds have not been
|
||||||
// sent.
|
// sent.
|
||||||
assert_eq!(bank.get_balance(&pubkey), None);
|
assert_eq!(bank.get_balance(&pubkey), 0);
|
||||||
|
|
||||||
// Now, cancel the trancaction. Mint gets her funds back, pubkey never sees them.
|
// Now, cancel the trancaction. Mint gets her funds back, pubkey never sees them.
|
||||||
bank.apply_signature(mint.pubkey(), sig).unwrap();
|
bank.apply_signature(mint.pubkey(), sig).unwrap();
|
||||||
assert_eq!(bank.get_balance(&mint.pubkey()), Some(1));
|
assert_eq!(bank.get_balance(&mint.pubkey()), 1);
|
||||||
assert_eq!(bank.get_balance(&pubkey), None);
|
assert_eq!(bank.get_balance(&pubkey), 0);
|
||||||
|
|
||||||
// Assert cancel doesn't cause count to go backward.
|
// Assert cancel doesn't cause count to go backward.
|
||||||
assert_eq!(bank.transaction_count(), 1);
|
assert_eq!(bank.transaction_count(), 1);
|
||||||
|
|
||||||
bank.apply_signature(mint.pubkey(), sig).unwrap(); // <-- Attack! Attempt to cancel completed transaction.
|
bank.apply_signature(mint.pubkey(), sig).unwrap(); // <-- Attack! Attempt to cancel completed transaction.
|
||||||
assert_ne!(bank.get_balance(&mint.pubkey()), Some(2));
|
assert_ne!(bank.get_balance(&mint.pubkey()), 2);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -592,7 +692,7 @@ mod tests {
|
|||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
bank.reserve_signature_with_last_id(&sig, &mint.last_id()),
|
bank.reserve_signature_with_last_id(&sig, &mint.last_id()),
|
||||||
Err(BankError::DuplicateSiganture(sig))
|
Err(BankError::DuplicateSignature(sig))
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -610,6 +710,16 @@ mod tests {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_has_signature() {
|
||||||
|
let mint = Mint::new(1);
|
||||||
|
let bank = Bank::new(&mint);
|
||||||
|
let sig = Signature::default();
|
||||||
|
bank.reserve_signature_with_last_id(&sig, &mint.last_id())
|
||||||
|
.expect("reserve signature");
|
||||||
|
assert!(bank.has_signature(&sig));
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_reject_old_last_id() {
|
fn test_reject_old_last_id() {
|
||||||
let mint = Mint::new(1);
|
let mint = Mint::new(1);
|
||||||
@@ -659,6 +769,69 @@ mod tests {
|
|||||||
bank.process_entries(vec![entry]).unwrap();
|
bank.process_entries(vec![entry]).unwrap();
|
||||||
assert!(bank.process_transaction(&tx).is_ok());
|
assert!(bank.process_transaction(&tx).is_ok());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_process_genesis() {
|
||||||
|
let mint = Mint::new(1);
|
||||||
|
let genesis = mint.create_entries();
|
||||||
|
let bank = Bank::default();
|
||||||
|
bank.process_ledger(genesis).unwrap();
|
||||||
|
assert_eq!(bank.get_balance(&mint.pubkey()), 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn create_sample_block(mint: &Mint) -> impl Iterator<Item = Entry> {
|
||||||
|
let keypair = KeyPair::new();
|
||||||
|
let tx = Transaction::new(&mint.keypair(), keypair.pubkey(), 1, mint.last_id());
|
||||||
|
next_entries(&mint.last_id(), 0, vec![tx]).into_iter()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn create_sample_ledger() -> (impl Iterator<Item = Entry>, PublicKey) {
|
||||||
|
let mint = Mint::new(2);
|
||||||
|
let genesis = mint.create_entries();
|
||||||
|
let block = create_sample_block(&mint);
|
||||||
|
(genesis.into_iter().chain(block), mint.pubkey())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_process_ledger() {
|
||||||
|
let (ledger, pubkey) = create_sample_ledger();
|
||||||
|
let bank = Bank::default();
|
||||||
|
bank.process_ledger(ledger).unwrap();
|
||||||
|
assert_eq!(bank.get_balance(&pubkey), 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write the given entries to a file and then return a file iterator to them.
|
||||||
|
fn to_file_iter(entries: impl Iterator<Item = Entry>) -> impl Iterator<Item = Entry> {
|
||||||
|
let mut file = Cursor::new(vec![]);
|
||||||
|
EntryWriter::write_entries(&mut file, entries).unwrap();
|
||||||
|
file.seek(SeekFrom::Start(0)).unwrap();
|
||||||
|
|
||||||
|
let reader = BufReader::new(file);
|
||||||
|
reader
|
||||||
|
.lines()
|
||||||
|
.map(|line| entry_writer::read_entry(line.unwrap()).unwrap())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_process_ledger_from_file() {
|
||||||
|
let (ledger, pubkey) = create_sample_ledger();
|
||||||
|
let ledger = to_file_iter(ledger);
|
||||||
|
|
||||||
|
let bank = Bank::default();
|
||||||
|
bank.process_ledger(ledger).unwrap();
|
||||||
|
assert_eq!(bank.get_balance(&pubkey), 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_process_ledger_from_files() {
|
||||||
|
let mint = Mint::new(2);
|
||||||
|
let genesis = to_file_iter(mint.create_entries().into_iter());
|
||||||
|
let block = to_file_iter(create_sample_block(&mint));
|
||||||
|
|
||||||
|
let bank = Bank::default();
|
||||||
|
bank.process_ledger(genesis.chain(block)).unwrap();
|
||||||
|
assert_eq!(bank.get_balance(&mint.pubkey()), 1);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(all(feature = "unstable", test))]
|
#[cfg(all(feature = "unstable", test))]
|
||||||
|
@@ -5,8 +5,7 @@
|
|||||||
use bank::Bank;
|
use bank::Bank;
|
||||||
use bincode::deserialize;
|
use bincode::deserialize;
|
||||||
use counter::Counter;
|
use counter::Counter;
|
||||||
use packet;
|
use packet::{PacketRecycler, Packets, SharedPackets};
|
||||||
use packet::SharedPackets;
|
|
||||||
use rayon::prelude::*;
|
use rayon::prelude::*;
|
||||||
use record_stage::Signal;
|
use record_stage::Signal;
|
||||||
use result::Result;
|
use result::Result;
|
||||||
@@ -24,9 +23,6 @@ use transaction::Transaction;
|
|||||||
pub struct BankingStage {
|
pub struct BankingStage {
|
||||||
/// Handle to the stage's thread.
|
/// Handle to the stage's thread.
|
||||||
pub thread_hdl: JoinHandle<()>,
|
pub thread_hdl: JoinHandle<()>,
|
||||||
|
|
||||||
/// Output receiver for the following stage.
|
|
||||||
pub signal_receiver: Receiver<Signal>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BankingStage {
|
impl BankingStage {
|
||||||
@@ -38,8 +34,8 @@ impl BankingStage {
|
|||||||
bank: Arc<Bank>,
|
bank: Arc<Bank>,
|
||||||
exit: Arc<AtomicBool>,
|
exit: Arc<AtomicBool>,
|
||||||
verified_receiver: Receiver<Vec<(SharedPackets, Vec<u8>)>>,
|
verified_receiver: Receiver<Vec<(SharedPackets, Vec<u8>)>>,
|
||||||
packet_recycler: packet::PacketRecycler,
|
packet_recycler: PacketRecycler,
|
||||||
) -> Self {
|
) -> (Self, Receiver<Signal>) {
|
||||||
let (signal_sender, signal_receiver) = channel();
|
let (signal_sender, signal_receiver) = channel();
|
||||||
let thread_hdl = Builder::new()
|
let thread_hdl = Builder::new()
|
||||||
.name("solana-banking-stage".to_string())
|
.name("solana-banking-stage".to_string())
|
||||||
@@ -57,15 +53,12 @@ impl BankingStage {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
.unwrap();
|
.unwrap();
|
||||||
BankingStage {
|
(BankingStage { thread_hdl }, signal_receiver)
|
||||||
thread_hdl,
|
|
||||||
signal_receiver,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Convert the transactions from a blob of binary data to a vector of transactions and
|
/// Convert the transactions from a blob of binary data to a vector of transactions and
|
||||||
/// an unused `SocketAddr` that could be used to send a response.
|
/// an unused `SocketAddr` that could be used to send a response.
|
||||||
fn deserialize_transactions(p: &packet::Packets) -> Vec<Option<(Transaction, SocketAddr)>> {
|
fn deserialize_transactions(p: &Packets) -> Vec<Option<(Transaction, SocketAddr)>> {
|
||||||
p.packets
|
p.packets
|
||||||
.par_iter()
|
.par_iter()
|
||||||
.map(|x| {
|
.map(|x| {
|
||||||
@@ -82,7 +75,7 @@ impl BankingStage {
|
|||||||
bank: Arc<Bank>,
|
bank: Arc<Bank>,
|
||||||
verified_receiver: &Receiver<Vec<(SharedPackets, Vec<u8>)>>,
|
verified_receiver: &Receiver<Vec<(SharedPackets, Vec<u8>)>>,
|
||||||
signal_sender: &Sender<Signal>,
|
signal_sender: &Sender<Signal>,
|
||||||
packet_recycler: &packet::PacketRecycler,
|
packet_recycler: &PacketRecycler,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let timer = Duration::new(1, 0);
|
let timer = Duration::new(1, 0);
|
||||||
let recv_start = Instant::now();
|
let recv_start = Instant::now();
|
||||||
@@ -298,7 +291,7 @@ mod bench {
|
|||||||
#[bench]
|
#[bench]
|
||||||
fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
|
fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
|
||||||
logger::setup();
|
logger::setup();
|
||||||
let tx = 30_000_usize;
|
let tx = 10_000_usize;
|
||||||
let mint_total = 1_000_000_000_000;
|
let mint_total = 1_000_000_000_000;
|
||||||
let mint = Mint::new(mint_total);
|
let mint = Mint::new(mint_total);
|
||||||
let num_dst_accounts = 8 * 1024;
|
let num_dst_accounts = 8 * 1024;
|
||||||
@@ -327,13 +320,6 @@ mod bench {
|
|||||||
let (verified_sender, verified_receiver) = channel();
|
let (verified_sender, verified_receiver) = channel();
|
||||||
let (signal_sender, signal_receiver) = channel();
|
let (signal_sender, signal_receiver) = channel();
|
||||||
let packet_recycler = PacketRecycler::default();
|
let packet_recycler = PacketRecycler::default();
|
||||||
let verified: Vec<_> = to_packets_chunked(&packet_recycler, transactions, 192)
|
|
||||||
.into_iter()
|
|
||||||
.map(|x| {
|
|
||||||
let len = (*x).read().unwrap().packets.len();
|
|
||||||
(x, iter::repeat(1).take(len).collect())
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let setup_transactions: Vec<_> = (0..num_src_accounts)
|
let setup_transactions: Vec<_> = (0..num_src_accounts)
|
||||||
.map(|i| {
|
.map(|i| {
|
||||||
@@ -346,7 +332,11 @@ mod bench {
|
|||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let verified_setup: Vec<_> = to_packets_chunked(&packet_recycler, setup_transactions, tx)
|
bencher.iter(move || {
|
||||||
|
let bank = Arc::new(Bank::new(&mint));
|
||||||
|
|
||||||
|
let verified_setup: Vec<_> =
|
||||||
|
to_packets_chunked(&packet_recycler, setup_transactions.clone(), tx)
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|x| {
|
.map(|x| {
|
||||||
let len = (*x).read().unwrap().packets.len();
|
let len = (*x).read().unwrap().packets.len();
|
||||||
@@ -354,10 +344,8 @@ mod bench {
|
|||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
bencher.iter(move || {
|
let verified_setup_len = verified_setup.len();
|
||||||
let bank = Arc::new(Bank::new(&mint));
|
verified_sender.send(verified_setup).unwrap();
|
||||||
|
|
||||||
verified_sender.send(verified_setup.clone()).unwrap();
|
|
||||||
BankingStage::process_packets(
|
BankingStage::process_packets(
|
||||||
bank.clone(),
|
bank.clone(),
|
||||||
&verified_receiver,
|
&verified_receiver,
|
||||||
@@ -365,9 +353,18 @@ mod bench {
|
|||||||
&packet_recycler,
|
&packet_recycler,
|
||||||
).unwrap();
|
).unwrap();
|
||||||
|
|
||||||
check_txs(verified_setup.len(), &signal_receiver, num_src_accounts);
|
check_txs(verified_setup_len, &signal_receiver, num_src_accounts);
|
||||||
|
|
||||||
verified_sender.send(verified.clone()).unwrap();
|
let verified: Vec<_> = to_packets_chunked(&packet_recycler, transactions.clone(), 192)
|
||||||
|
.into_iter()
|
||||||
|
.map(|x| {
|
||||||
|
let len = (*x).read().unwrap().packets.len();
|
||||||
|
(x, iter::repeat(1).take(len).collect())
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let verified_len = verified.len();
|
||||||
|
verified_sender.send(verified).unwrap();
|
||||||
BankingStage::process_packets(
|
BankingStage::process_packets(
|
||||||
bank.clone(),
|
bank.clone(),
|
||||||
&verified_receiver,
|
&verified_receiver,
|
||||||
@@ -375,14 +372,14 @@ mod bench {
|
|||||||
&packet_recycler,
|
&packet_recycler,
|
||||||
).unwrap();
|
).unwrap();
|
||||||
|
|
||||||
check_txs(verified.len(), &signal_receiver, tx);
|
check_txs(verified_len, &signal_receiver, tx);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
#[bench]
|
#[bench]
|
||||||
fn bench_banking_stage_single_from(bencher: &mut Bencher) {
|
fn bench_banking_stage_single_from(bencher: &mut Bencher) {
|
||||||
logger::setup();
|
logger::setup();
|
||||||
let tx = 20_000_usize;
|
let tx = 10_000_usize;
|
||||||
let mint = Mint::new(1_000_000_000_000);
|
let mint = Mint::new(1_000_000_000_000);
|
||||||
let mut pubkeys = Vec::new();
|
let mut pubkeys = Vec::new();
|
||||||
let num_keys = 8;
|
let num_keys = 8;
|
||||||
@@ -405,17 +402,18 @@ mod bench {
|
|||||||
let (verified_sender, verified_receiver) = channel();
|
let (verified_sender, verified_receiver) = channel();
|
||||||
let (signal_sender, signal_receiver) = channel();
|
let (signal_sender, signal_receiver) = channel();
|
||||||
let packet_recycler = PacketRecycler::default();
|
let packet_recycler = PacketRecycler::default();
|
||||||
let verified: Vec<_> = to_packets_chunked(&packet_recycler, transactions, tx)
|
|
||||||
|
bencher.iter(move || {
|
||||||
|
let bank = Arc::new(Bank::new(&mint));
|
||||||
|
let verified: Vec<_> = to_packets_chunked(&packet_recycler, transactions.clone(), tx)
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|x| {
|
.map(|x| {
|
||||||
let len = (*x).read().unwrap().packets.len();
|
let len = (*x).read().unwrap().packets.len();
|
||||||
(x, iter::repeat(1).take(len).collect())
|
(x, iter::repeat(1).take(len).collect())
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
let verified_len = verified.len();
|
||||||
bencher.iter(move || {
|
verified_sender.send(verified).unwrap();
|
||||||
let bank = Arc::new(Bank::new(&mint));
|
|
||||||
verified_sender.send(verified.clone()).unwrap();
|
|
||||||
BankingStage::process_packets(
|
BankingStage::process_packets(
|
||||||
bank.clone(),
|
bank.clone(),
|
||||||
&verified_receiver,
|
&verified_receiver,
|
||||||
@@ -423,7 +421,7 @@ mod bench {
|
|||||||
&packet_recycler,
|
&packet_recycler,
|
||||||
).unwrap();
|
).unwrap();
|
||||||
|
|
||||||
check_txs(verified.len(), &signal_receiver, tx);
|
check_txs(verified_len, &signal_receiver, tx);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -8,9 +8,10 @@ extern crate solana;
|
|||||||
use atty::{is, Stream};
|
use atty::{is, Stream};
|
||||||
use getopts::Options;
|
use getopts::Options;
|
||||||
use rayon::prelude::*;
|
use rayon::prelude::*;
|
||||||
use solana::crdt::{get_ip_addr, Crdt, ReplicatedData};
|
use solana::crdt::{Crdt, ReplicatedData};
|
||||||
use solana::hash::Hash;
|
use solana::hash::Hash;
|
||||||
use solana::mint::MintDemo;
|
use solana::mint::Mint;
|
||||||
|
use solana::nat::udp_public_bind;
|
||||||
use solana::ncp::Ncp;
|
use solana::ncp::Ncp;
|
||||||
use solana::signature::{GenKeys, KeyPair, KeyPairUtil};
|
use solana::signature::{GenKeys, KeyPair, KeyPairUtil};
|
||||||
use solana::streamer::default_window;
|
use solana::streamer::default_window;
|
||||||
@@ -40,14 +41,13 @@ fn print_usage(program: &str, opts: Options) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn sample_tx_count(
|
fn sample_tx_count(
|
||||||
thread_addr: Arc<RwLock<SocketAddr>>,
|
|
||||||
exit: Arc<AtomicBool>,
|
exit: Arc<AtomicBool>,
|
||||||
maxes: Arc<RwLock<Vec<(f64, u64)>>>,
|
maxes: Arc<RwLock<Vec<(f64, u64)>>>,
|
||||||
first_count: u64,
|
first_count: u64,
|
||||||
v: ReplicatedData,
|
v: ReplicatedData,
|
||||||
sample_period: u64,
|
sample_period: u64,
|
||||||
) {
|
) {
|
||||||
let mut client = mk_client(&thread_addr, &v);
|
let mut client = mk_client(&v);
|
||||||
let mut now = Instant::now();
|
let mut now = Instant::now();
|
||||||
let mut initial_tx_count = client.transaction_count();
|
let mut initial_tx_count = client.transaction_count();
|
||||||
let mut max_tps = 0.0;
|
let mut max_tps = 0.0;
|
||||||
@@ -82,22 +82,19 @@ fn sample_tx_count(
|
|||||||
|
|
||||||
fn generate_and_send_txs(
|
fn generate_and_send_txs(
|
||||||
client: &mut ThinClient,
|
client: &mut ThinClient,
|
||||||
keypair_pairs: &Vec<&[KeyPair]>,
|
tx_clients: &Vec<ThinClient>,
|
||||||
|
mint: &Mint,
|
||||||
|
keypairs: &Vec<KeyPair>,
|
||||||
leader: &ReplicatedData,
|
leader: &ReplicatedData,
|
||||||
txs: i64,
|
txs: i64,
|
||||||
last_id: &mut Hash,
|
last_id: &mut Hash,
|
||||||
threads: usize,
|
threads: usize,
|
||||||
client_addr: Arc<RwLock<SocketAddr>>,
|
|
||||||
) {
|
) {
|
||||||
println!(
|
println!("Signing transactions... {}", keypairs.len(),);
|
||||||
"Signing transactions... {} {}",
|
|
||||||
keypair_pairs.len(),
|
|
||||||
keypair_pairs[0].len()
|
|
||||||
);
|
|
||||||
let signing_start = Instant::now();
|
let signing_start = Instant::now();
|
||||||
let transactions: Vec<_> = keypair_pairs
|
let transactions: Vec<_> = keypairs
|
||||||
.par_iter()
|
.par_iter()
|
||||||
.map(|chunk| Transaction::new(&chunk[0], chunk[1].pubkey(), 1, *last_id))
|
.map(|keypair| Transaction::new(&mint.keypair(), keypair.pubkey(), 1, *last_id))
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let duration = signing_start.elapsed();
|
let duration = signing_start.elapsed();
|
||||||
@@ -115,13 +112,15 @@ fn generate_and_send_txs(
|
|||||||
let transfer_start = Instant::now();
|
let transfer_start = Instant::now();
|
||||||
let sz = transactions.len() / threads;
|
let sz = transactions.len() / threads;
|
||||||
let chunks: Vec<_> = transactions.chunks(sz).collect();
|
let chunks: Vec<_> = transactions.chunks(sz).collect();
|
||||||
chunks.into_par_iter().for_each(|txs| {
|
chunks
|
||||||
|
.into_par_iter()
|
||||||
|
.zip(tx_clients)
|
||||||
|
.for_each(|(txs, client)| {
|
||||||
println!(
|
println!(
|
||||||
"Transferring 1 unit {} times... to {:?}",
|
"Transferring 1 unit {} times... to {:?}",
|
||||||
txs.len(),
|
txs.len(),
|
||||||
leader.transactions_addr
|
leader.transactions_addr
|
||||||
);
|
);
|
||||||
let client = mk_client(&client_addr, &leader);
|
|
||||||
for tx in txs {
|
for tx in txs {
|
||||||
client.transfer_signed(tx.clone()).unwrap();
|
client.transfer_signed(tx.clone()).unwrap();
|
||||||
}
|
}
|
||||||
@@ -132,7 +131,14 @@ fn generate_and_send_txs(
|
|||||||
txs as f32 / (duration_as_s(&transfer_start.elapsed()))
|
txs as f32 / (duration_as_s(&transfer_start.elapsed()))
|
||||||
);
|
);
|
||||||
|
|
||||||
*last_id = client.get_last_id();
|
loop {
|
||||||
|
let new_id = client.get_last_id();
|
||||||
|
if *last_id != new_id {
|
||||||
|
*last_id = new_id;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
sleep(Duration::from_millis(100));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
@@ -143,9 +149,7 @@ fn main() {
|
|||||||
|
|
||||||
let mut opts = Options::new();
|
let mut opts = Options::new();
|
||||||
opts.optopt("l", "", "leader", "leader.json");
|
opts.optopt("l", "", "leader", "leader.json");
|
||||||
opts.optopt("c", "", "client port", "port");
|
|
||||||
opts.optopt("t", "", "number of threads", &format!("{}", threads));
|
opts.optopt("t", "", "number of threads", &format!("{}", threads));
|
||||||
opts.optflag("d", "dyn", "detect network address dynamically");
|
|
||||||
opts.optopt(
|
opts.optopt(
|
||||||
"s",
|
"s",
|
||||||
"",
|
"",
|
||||||
@@ -173,15 +177,6 @@ fn main() {
|
|||||||
print_usage(&program, opts);
|
print_usage(&program, opts);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
let mut addr: SocketAddr = "0.0.0.0:8100".parse().unwrap();
|
|
||||||
if matches.opt_present("c") {
|
|
||||||
let port = matches.opt_str("c").unwrap().parse().unwrap();
|
|
||||||
addr.set_port(port);
|
|
||||||
}
|
|
||||||
if matches.opt_present("d") {
|
|
||||||
addr.set_ip(get_ip_addr().unwrap());
|
|
||||||
}
|
|
||||||
let client_addr: Arc<RwLock<SocketAddr>> = Arc::new(RwLock::new(addr));
|
|
||||||
if matches.opt_present("t") {
|
if matches.opt_present("t") {
|
||||||
threads = matches.opt_str("t").unwrap().parse().expect("integer");
|
threads = matches.opt_str("t").unwrap().parse().expect("integer");
|
||||||
}
|
}
|
||||||
@@ -201,13 +196,7 @@ fn main() {
|
|||||||
|
|
||||||
let signal = Arc::new(AtomicBool::new(false));
|
let signal = Arc::new(AtomicBool::new(false));
|
||||||
let mut c_threads = vec![];
|
let mut c_threads = vec![];
|
||||||
let validators = converge(
|
let validators = converge(&leader, signal.clone(), num_nodes, &mut c_threads);
|
||||||
&client_addr,
|
|
||||||
&leader,
|
|
||||||
signal.clone(),
|
|
||||||
num_nodes,
|
|
||||||
&mut c_threads,
|
|
||||||
);
|
|
||||||
assert_eq!(validators.len(), num_nodes);
|
assert_eq!(validators.len(), num_nodes);
|
||||||
|
|
||||||
if is(Stream::Stdin) {
|
if is(Stream::Stdin) {
|
||||||
@@ -223,24 +212,23 @@ fn main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
println!("Parsing stdin...");
|
println!("Parsing stdin...");
|
||||||
let demo: MintDemo = serde_json::from_str(&buffer).unwrap_or_else(|e| {
|
let mint: Mint = serde_json::from_str(&buffer).unwrap_or_else(|e| {
|
||||||
eprintln!("failed to parse json: {}", e);
|
eprintln!("failed to parse json: {}", e);
|
||||||
exit(1);
|
exit(1);
|
||||||
});
|
});
|
||||||
let mut client = mk_client(&client_addr, &leader);
|
let mut client = mk_client(&leader);
|
||||||
|
|
||||||
println!("Get last ID...");
|
println!("Get last ID...");
|
||||||
let mut last_id = client.get_last_id();
|
let mut last_id = client.get_last_id();
|
||||||
println!("Got last ID {:?}", last_id);
|
println!("Got last ID {:?}", last_id);
|
||||||
|
|
||||||
let mut seed = [0u8; 32];
|
let mut seed = [0u8; 32];
|
||||||
seed.copy_from_slice(&demo.mint.keypair().public_key_bytes()[..32]);
|
seed.copy_from_slice(&mint.keypair().public_key_bytes()[..32]);
|
||||||
let rnd = GenKeys::new(seed);
|
let rnd = GenKeys::new(seed);
|
||||||
|
|
||||||
println!("Creating keypairs...");
|
println!("Creating keypairs...");
|
||||||
let txs = demo.num_accounts / 2;
|
let txs = 500_000;
|
||||||
let keypairs = rnd.gen_n_keypairs(demo.num_accounts);
|
let keypairs = rnd.gen_n_keypairs(txs);
|
||||||
let keypair_pairs: Vec<_> = keypairs.chunks(2).collect();
|
|
||||||
|
|
||||||
let first_count = client.transaction_count();
|
let first_count = client.transaction_count();
|
||||||
println!("initial count {}", first_count);
|
println!("initial count {}", first_count);
|
||||||
@@ -255,29 +243,31 @@ fn main() {
|
|||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|v| {
|
.map(|v| {
|
||||||
let exit = signal.clone();
|
let exit = signal.clone();
|
||||||
let thread_addr = client_addr.clone();
|
|
||||||
let maxes = maxes.clone();
|
let maxes = maxes.clone();
|
||||||
Builder::new()
|
Builder::new()
|
||||||
.name("solana-client-sample".to_string())
|
.name("solana-client-sample".to_string())
|
||||||
.spawn(move || {
|
.spawn(move || {
|
||||||
sample_tx_count(thread_addr, exit, maxes, first_count, v, sample_period);
|
sample_tx_count(exit, maxes, first_count, v, sample_period);
|
||||||
})
|
})
|
||||||
.unwrap()
|
.unwrap()
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
|
let clients = (0..threads).map(|_| mk_client(&leader)).collect();
|
||||||
|
|
||||||
// generate and send transactions for the specified duration
|
// generate and send transactions for the specified duration
|
||||||
let time = Duration::new(time_sec, 0);
|
let time = Duration::new(time_sec, 0);
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
while now.elapsed() < time {
|
while now.elapsed() < time {
|
||||||
generate_and_send_txs(
|
generate_and_send_txs(
|
||||||
&mut client,
|
&mut client,
|
||||||
&keypair_pairs,
|
&clients,
|
||||||
|
&mint,
|
||||||
|
&keypairs,
|
||||||
&leader,
|
&leader,
|
||||||
txs,
|
txs,
|
||||||
&mut last_id,
|
&mut last_id,
|
||||||
threads,
|
threads,
|
||||||
client_addr.clone(),
|
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -310,17 +300,14 @@ fn main() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn mk_client(locked_addr: &Arc<RwLock<SocketAddr>>, r: &ReplicatedData) -> ThinClient {
|
fn mk_client(r: &ReplicatedData) -> ThinClient {
|
||||||
let mut addr = locked_addr.write().unwrap();
|
let requests_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||||
let port = addr.port();
|
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||||
let transactions_socket = UdpSocket::bind(addr.clone()).unwrap();
|
|
||||||
addr.set_port(port + 1);
|
|
||||||
let requests_socket = UdpSocket::bind(addr.clone()).unwrap();
|
|
||||||
requests_socket
|
requests_socket
|
||||||
.set_read_timeout(Some(Duration::new(1, 0)))
|
.set_read_timeout(Some(Duration::new(1, 0)))
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
addr.set_port(port + 2);
|
|
||||||
ThinClient::new(
|
ThinClient::new(
|
||||||
r.requests_addr,
|
r.requests_addr,
|
||||||
requests_socket,
|
requests_socket,
|
||||||
@@ -329,26 +316,23 @@ fn mk_client(locked_addr: &Arc<RwLock<SocketAddr>>, r: &ReplicatedData) -> ThinC
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn spy_node(client_addr: &Arc<RwLock<SocketAddr>>) -> (ReplicatedData, UdpSocket) {
|
fn spy_node() -> (ReplicatedData, UdpSocket) {
|
||||||
let mut addr = client_addr.write().unwrap();
|
let gossip_socket_pair = udp_public_bind("gossip");
|
||||||
let port = addr.port();
|
|
||||||
let gossip = UdpSocket::bind(addr.clone()).unwrap();
|
|
||||||
addr.set_port(port + 1);
|
|
||||||
let daddr = "0.0.0.0:0".parse().unwrap();
|
|
||||||
let pubkey = KeyPair::new().pubkey();
|
let pubkey = KeyPair::new().pubkey();
|
||||||
|
let daddr = "0.0.0.0:0".parse().unwrap();
|
||||||
let node = ReplicatedData::new(
|
let node = ReplicatedData::new(
|
||||||
pubkey,
|
pubkey,
|
||||||
gossip.local_addr().unwrap(),
|
//gossip.local_addr().unwrap(),
|
||||||
|
gossip_socket_pair.addr,
|
||||||
daddr,
|
daddr,
|
||||||
daddr,
|
daddr,
|
||||||
daddr,
|
daddr,
|
||||||
daddr,
|
daddr,
|
||||||
);
|
);
|
||||||
(node, gossip)
|
(node, gossip_socket_pair.receiver)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn converge(
|
fn converge(
|
||||||
client_addr: &Arc<RwLock<SocketAddr>>,
|
|
||||||
leader: &ReplicatedData,
|
leader: &ReplicatedData,
|
||||||
exit: Arc<AtomicBool>,
|
exit: Arc<AtomicBool>,
|
||||||
num_nodes: usize,
|
num_nodes: usize,
|
||||||
@@ -356,7 +340,7 @@ fn converge(
|
|||||||
) -> Vec<ReplicatedData> {
|
) -> Vec<ReplicatedData> {
|
||||||
//lets spy on the network
|
//lets spy on the network
|
||||||
let daddr = "0.0.0.0:0".parse().unwrap();
|
let daddr = "0.0.0.0:0".parse().unwrap();
|
||||||
let (spy, spy_gossip) = spy_node(client_addr);
|
let (spy, spy_gossip) = spy_node();
|
||||||
let mut spy_crdt = Crdt::new(spy);
|
let mut spy_crdt = Crdt::new(spy);
|
||||||
spy_crdt.insert(&leader);
|
spy_crdt.insert(&leader);
|
||||||
spy_crdt.set_leader(leader.id);
|
spy_crdt.set_leader(leader.id);
|
||||||
|
@@ -11,9 +11,9 @@ extern crate tokio_io;
|
|||||||
use atty::{is, Stream as atty_stream};
|
use atty::{is, Stream as atty_stream};
|
||||||
use bincode::deserialize;
|
use bincode::deserialize;
|
||||||
use getopts::Options;
|
use getopts::Options;
|
||||||
use solana::crdt::{get_ip_addr, ReplicatedData};
|
use solana::crdt::ReplicatedData;
|
||||||
use solana::drone::{Drone, DroneRequest};
|
use solana::drone::{Drone, DroneRequest};
|
||||||
use solana::mint::MintDemo;
|
use solana::mint::Mint;
|
||||||
use std::env;
|
use std::env;
|
||||||
use std::fs::File;
|
use std::fs::File;
|
||||||
use std::io::{stdin, Read};
|
use std::io::{stdin, Read};
|
||||||
@@ -26,7 +26,7 @@ use tokio::prelude::*;
|
|||||||
use tokio_codec::{BytesCodec, Decoder};
|
use tokio_codec::{BytesCodec, Decoder};
|
||||||
|
|
||||||
fn print_usage(program: &str, opts: Options) {
|
fn print_usage(program: &str, opts: Options) {
|
||||||
let mut brief = format!("Usage: cat <mint-demo.json> | {} [options]\n\n", program);
|
let mut brief = format!("Usage: cat <mint.json> | {} [options]\n\n", program);
|
||||||
brief += " Run a Solana Drone to act as the custodian of the mint's remaining tokens\n";
|
brief += " Run a Solana Drone to act as the custodian of the mint's remaining tokens\n";
|
||||||
|
|
||||||
print!("{}", opts.usage(&brief));
|
print!("{}", opts.usage(&brief));
|
||||||
@@ -96,15 +96,14 @@ fn main() {
|
|||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
let demo: MintDemo = serde_json::from_str(&buffer).unwrap_or_else(|e| {
|
let mint: Mint = serde_json::from_str(&buffer).unwrap_or_else(|e| {
|
||||||
eprintln!("failed to parse json: {}", e);
|
eprintln!("failed to parse json: {}", e);
|
||||||
exit(1);
|
exit(1);
|
||||||
});
|
});
|
||||||
|
|
||||||
let mint_keypair = demo.mint.keypair();
|
let mint_keypair = mint.keypair();
|
||||||
|
|
||||||
let mut drone_addr: SocketAddr = "0.0.0.0:9900".parse().unwrap();
|
let drone_addr: SocketAddr = "0.0.0.0:9900".parse().unwrap();
|
||||||
drone_addr.set_ip(get_ip_addr().unwrap());
|
|
||||||
|
|
||||||
let drone = Arc::new(Mutex::new(Drone::new(
|
let drone = Arc::new(Mutex::new(Drone::new(
|
||||||
mint_keypair,
|
mint_keypair,
|
||||||
|
@@ -4,6 +4,7 @@ extern crate solana;
|
|||||||
|
|
||||||
use getopts::Options;
|
use getopts::Options;
|
||||||
use solana::crdt::{get_ip_addr, parse_port_or_addr, ReplicatedData};
|
use solana::crdt::{get_ip_addr, parse_port_or_addr, ReplicatedData};
|
||||||
|
use solana::nat::get_public_ip_addr;
|
||||||
use std::env;
|
use std::env;
|
||||||
use std::io;
|
use std::io;
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
@@ -19,7 +20,16 @@ fn print_usage(program: &str, opts: Options) {
|
|||||||
fn main() {
|
fn main() {
|
||||||
let mut opts = Options::new();
|
let mut opts = Options::new();
|
||||||
opts.optopt("b", "", "bind", "bind to port or address");
|
opts.optopt("b", "", "bind", "bind to port or address");
|
||||||
opts.optflag("d", "dyn", "detect network address dynamically");
|
opts.optflag(
|
||||||
|
"p",
|
||||||
|
"",
|
||||||
|
"detect public network address using public servers",
|
||||||
|
);
|
||||||
|
opts.optflag(
|
||||||
|
"l",
|
||||||
|
"",
|
||||||
|
"detect network address from local machine configuration",
|
||||||
|
);
|
||||||
opts.optflag("h", "help", "print help");
|
opts.optflag("h", "help", "print help");
|
||||||
let args: Vec<String> = env::args().collect();
|
let args: Vec<String> = env::args().collect();
|
||||||
let matches = match opts.parse(&args[1..]) {
|
let matches = match opts.parse(&args[1..]) {
|
||||||
@@ -37,10 +47,14 @@ fn main() {
|
|||||||
|
|
||||||
let bind_addr: SocketAddr = {
|
let bind_addr: SocketAddr = {
|
||||||
let mut bind_addr = parse_port_or_addr(matches.opt_str("b"));
|
let mut bind_addr = parse_port_or_addr(matches.opt_str("b"));
|
||||||
if matches.opt_present("d") {
|
if matches.opt_present("l") {
|
||||||
let ip = get_ip_addr().unwrap();
|
let ip = get_ip_addr().unwrap();
|
||||||
bind_addr.set_ip(ip);
|
bind_addr.set_ip(ip);
|
||||||
}
|
}
|
||||||
|
if matches.opt_present("p") {
|
||||||
|
let ip = get_public_ip_addr().unwrap();
|
||||||
|
bind_addr.set_ip(ip);
|
||||||
|
}
|
||||||
bind_addr
|
bind_addr
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@@ -7,16 +7,11 @@ extern crate solana;
|
|||||||
|
|
||||||
use atty::{is, Stream};
|
use atty::{is, Stream};
|
||||||
use getopts::Options;
|
use getopts::Options;
|
||||||
use solana::bank::Bank;
|
use solana::crdt::{ReplicatedData, TestNode};
|
||||||
use solana::crdt::ReplicatedData;
|
use solana::fullnode::FullNode;
|
||||||
use solana::entry::Entry;
|
|
||||||
use solana::payment_plan::PaymentPlan;
|
|
||||||
use solana::server::Server;
|
|
||||||
use solana::transaction::Instruction;
|
|
||||||
use std::env;
|
use std::env;
|
||||||
use std::fs::File;
|
use std::fs::File;
|
||||||
use std::io::{stdin, stdout, BufRead, Write};
|
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
||||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
|
|
||||||
use std::process::exit;
|
use std::process::exit;
|
||||||
use std::sync::atomic::AtomicBool;
|
use std::sync::atomic::AtomicBool;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
@@ -31,7 +26,7 @@ fn print_usage(program: &str, opts: Options) {
|
|||||||
print!("{}", opts.usage(&brief));
|
print!("{}", opts.usage(&brief));
|
||||||
}
|
}
|
||||||
|
|
||||||
fn main() {
|
fn main() -> () {
|
||||||
env_logger::init();
|
env_logger::init();
|
||||||
let mut opts = Options::new();
|
let mut opts = Options::new();
|
||||||
opts.optflag("h", "help", "print help");
|
opts.optflag("h", "help", "print help");
|
||||||
@@ -67,45 +62,6 @@ fn main() {
|
|||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
eprintln!("Initializing...");
|
|
||||||
let stdin = stdin();
|
|
||||||
let mut entries = stdin.lock().lines().map(|line| {
|
|
||||||
let entry: Entry = serde_json::from_str(&line.unwrap()).unwrap_or_else(|e| {
|
|
||||||
eprintln!("failed to parse json: {}", e);
|
|
||||||
exit(1);
|
|
||||||
});
|
|
||||||
entry
|
|
||||||
});
|
|
||||||
eprintln!("done parsing...");
|
|
||||||
|
|
||||||
// The first item in the ledger is required to be an entry with zero num_hashes,
|
|
||||||
// which implies its id can be used as the ledger's seed.
|
|
||||||
let entry0 = entries.next().expect("invalid ledger: empty");
|
|
||||||
|
|
||||||
// The second item in the ledger is a special transaction where the to and from
|
|
||||||
// fields are the same. That entry should be treated as a deposit, not a
|
|
||||||
// transfer to oneself.
|
|
||||||
let entry1 = entries
|
|
||||||
.next()
|
|
||||||
.expect("invalid ledger: need at least 2 entries");
|
|
||||||
let tx = &entry1.transactions[0];
|
|
||||||
let deposit = if let Instruction::NewContract(contract) = &tx.instruction {
|
|
||||||
contract.plan.final_payment()
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}.expect("invalid ledger, needs to start with a contract");
|
|
||||||
|
|
||||||
eprintln!("creating bank...");
|
|
||||||
|
|
||||||
let bank = Bank::new_from_deposit(&deposit);
|
|
||||||
bank.register_entry_id(&entry0.id);
|
|
||||||
bank.register_entry_id(&entry1.id);
|
|
||||||
|
|
||||||
eprintln!("processing entries...");
|
|
||||||
bank.process_entries(entries).expect("process_entries");
|
|
||||||
|
|
||||||
eprintln!("creating networking stack...");
|
|
||||||
|
|
||||||
let bind_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
|
let bind_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
|
||||||
let mut repl_data = ReplicatedData::new_leader(&bind_addr);
|
let mut repl_data = ReplicatedData::new_leader(&bind_addr);
|
||||||
if matches.opt_present("l") {
|
if matches.opt_present("l") {
|
||||||
@@ -122,59 +78,19 @@ fn main() {
|
|||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
let mut node = TestNode::new_with_bind_addr(repl_data, bind_addr);
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
let threads = if matches.opt_present("t") {
|
let fullnode = if matches.opt_present("t") {
|
||||||
let testnet_address_string = matches.opt_str("t").unwrap();
|
let testnet_address_string = matches.opt_str("t").unwrap();
|
||||||
eprintln!(
|
|
||||||
"starting validator... {} connecting to {}",
|
|
||||||
repl_data.requests_addr, testnet_address_string
|
|
||||||
);
|
|
||||||
let testnet_addr = testnet_address_string.parse().unwrap();
|
let testnet_addr = testnet_address_string.parse().unwrap();
|
||||||
let newtwork_entry_point = ReplicatedData::new_entry_point(testnet_addr);
|
FullNode::new(node, false, None, Some(testnet_addr), None, exit)
|
||||||
let s = Server::new_validator(
|
|
||||||
bank,
|
|
||||||
repl_data.clone(),
|
|
||||||
UdpSocket::bind(repl_data.requests_addr).unwrap(),
|
|
||||||
UdpSocket::bind("0.0.0.0:0").unwrap(),
|
|
||||||
UdpSocket::bind(repl_data.replicate_addr).unwrap(),
|
|
||||||
UdpSocket::bind(repl_data.gossip_addr).unwrap(),
|
|
||||||
UdpSocket::bind(repl_data.repair_addr).unwrap(),
|
|
||||||
newtwork_entry_point,
|
|
||||||
exit.clone(),
|
|
||||||
);
|
|
||||||
s.thread_hdls
|
|
||||||
} else {
|
} else {
|
||||||
eprintln!("starting leader... {}", repl_data.requests_addr);
|
node.data.current_leader_id = node.data.id.clone();
|
||||||
repl_data.current_leader_id = repl_data.id.clone();
|
|
||||||
|
|
||||||
let outfile: Box<Write + Send + 'static> = if matches.opt_present("o") {
|
let outfile = matches.opt_str("o");
|
||||||
let path = matches.opt_str("o").unwrap();
|
FullNode::new(node, true, None, None, outfile, exit)
|
||||||
Box::new(
|
|
||||||
File::create(&path).expect(&format!("unable to open output file \"{}\"", path)),
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
Box::new(stdout())
|
|
||||||
};
|
};
|
||||||
|
for t in fullnode.thread_hdls {
|
||||||
let server = Server::new_leader(
|
|
||||||
bank,
|
|
||||||
//Some(Duration::from_millis(1000)),
|
|
||||||
None,
|
|
||||||
repl_data.clone(),
|
|
||||||
UdpSocket::bind(repl_data.requests_addr).unwrap(),
|
|
||||||
UdpSocket::bind(repl_data.transactions_addr).unwrap(),
|
|
||||||
UdpSocket::bind("0.0.0.0:0").unwrap(),
|
|
||||||
UdpSocket::bind("0.0.0.0:0").unwrap(),
|
|
||||||
UdpSocket::bind(repl_data.gossip_addr).unwrap(),
|
|
||||||
exit.clone(),
|
|
||||||
outfile,
|
|
||||||
);
|
|
||||||
server.thread_hdls
|
|
||||||
};
|
|
||||||
eprintln!("Ready. Listening on {}", repl_data.transactions_addr);
|
|
||||||
|
|
||||||
for t in threads {
|
|
||||||
t.join().expect("join");
|
t.join().expect("join");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -1,82 +0,0 @@
|
|||||||
extern crate atty;
|
|
||||||
extern crate rayon;
|
|
||||||
extern crate serde_json;
|
|
||||||
extern crate solana;
|
|
||||||
|
|
||||||
use atty::{is, Stream};
|
|
||||||
use rayon::prelude::*;
|
|
||||||
use solana::bank::MAX_ENTRY_IDS;
|
|
||||||
use solana::entry::next_entry;
|
|
||||||
use solana::ledger::next_entries;
|
|
||||||
use solana::mint::MintDemo;
|
|
||||||
use solana::signature::{GenKeys, KeyPairUtil};
|
|
||||||
use solana::transaction::Transaction;
|
|
||||||
use std::io::{stdin, Read};
|
|
||||||
use std::process::exit;
|
|
||||||
|
|
||||||
// Generate a ledger with lots and lots of accounts.
|
|
||||||
fn main() {
|
|
||||||
if is(Stream::Stdin) {
|
|
||||||
eprintln!("nothing found on stdin, expected a json file");
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut buffer = String::new();
|
|
||||||
let num_bytes = stdin().read_to_string(&mut buffer).unwrap();
|
|
||||||
if num_bytes == 0 {
|
|
||||||
eprintln!("empty file on stdin, expected a json file");
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
let demo: MintDemo = serde_json::from_str(&buffer).unwrap_or_else(|e| {
|
|
||||||
eprintln!("failed to parse json: {}", e);
|
|
||||||
exit(1);
|
|
||||||
});
|
|
||||||
|
|
||||||
let mut seed = [0u8; 32];
|
|
||||||
seed.copy_from_slice(&demo.mint.keypair().public_key_bytes()[..32]);
|
|
||||||
let rnd = GenKeys::new(seed);
|
|
||||||
let num_accounts = demo.num_accounts;
|
|
||||||
let tokens_per_user = 500;
|
|
||||||
|
|
||||||
let keypairs = rnd.gen_n_keypairs(num_accounts);
|
|
||||||
|
|
||||||
let mint_keypair = demo.mint.keypair();
|
|
||||||
let last_id = demo.mint.last_id();
|
|
||||||
|
|
||||||
for entry in demo.mint.create_entries() {
|
|
||||||
println!("{}", serde_json::to_string(&entry).unwrap());
|
|
||||||
}
|
|
||||||
|
|
||||||
eprintln!("Creating {} empty entries...", MAX_ENTRY_IDS);
|
|
||||||
|
|
||||||
// Offer client lots of entry IDs to use for each transaction's last_id.
|
|
||||||
let mut last_id = last_id;
|
|
||||||
let mut last_ids = vec![];
|
|
||||||
for _ in 0..MAX_ENTRY_IDS {
|
|
||||||
let entry = next_entry(&last_id, 1, vec![]);
|
|
||||||
last_id = entry.id;
|
|
||||||
last_ids.push(last_id);
|
|
||||||
let serialized = serde_json::to_string(&entry).unwrap_or_else(|e| {
|
|
||||||
eprintln!("failed to serialize: {}", e);
|
|
||||||
exit(1);
|
|
||||||
});
|
|
||||||
println!("{}", serialized);
|
|
||||||
}
|
|
||||||
|
|
||||||
eprintln!("Creating {} transactions...", num_accounts);
|
|
||||||
let transactions: Vec<_> = keypairs
|
|
||||||
.into_par_iter()
|
|
||||||
.enumerate()
|
|
||||||
.map(|(i, rando)| {
|
|
||||||
let last_id = last_ids[i % MAX_ENTRY_IDS];
|
|
||||||
Transaction::new(&mint_keypair, rando.pubkey(), tokens_per_user, last_id)
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
eprintln!("Logging the creation of {} accounts...", num_accounts);
|
|
||||||
let entries = next_entries(&last_id, 0, transactions);
|
|
||||||
for entry in entries {
|
|
||||||
println!("{}", serde_json::to_string(&entry).unwrap());
|
|
||||||
}
|
|
||||||
}
|
|
@@ -5,32 +5,27 @@ extern crate serde_json;
|
|||||||
extern crate solana;
|
extern crate solana;
|
||||||
|
|
||||||
use atty::{is, Stream};
|
use atty::{is, Stream};
|
||||||
|
use solana::entry_writer::EntryWriter;
|
||||||
use solana::mint::Mint;
|
use solana::mint::Mint;
|
||||||
use std::io::{stdin, Read};
|
use std::error;
|
||||||
|
use std::io::{stdin, stdout, Read};
|
||||||
use std::process::exit;
|
use std::process::exit;
|
||||||
|
|
||||||
fn main() {
|
fn main() -> Result<(), Box<error::Error>> {
|
||||||
if is(Stream::Stdin) {
|
if is(Stream::Stdin) {
|
||||||
eprintln!("nothing found on stdin, expected a json file");
|
eprintln!("nothing found on stdin, expected a json file");
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut buffer = String::new();
|
let mut buffer = String::new();
|
||||||
let num_bytes = stdin().read_to_string(&mut buffer).unwrap();
|
let num_bytes = stdin().read_to_string(&mut buffer)?;
|
||||||
if num_bytes == 0 {
|
if num_bytes == 0 {
|
||||||
eprintln!("empty file on stdin, expected a json file");
|
eprintln!("empty file on stdin, expected a json file");
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
let mint: Mint = serde_json::from_str(&buffer).unwrap_or_else(|e| {
|
let mint: Mint = serde_json::from_str(&buffer)?;
|
||||||
eprintln!("failed to parse json: {}", e);
|
let mut writer = stdout();
|
||||||
exit(1);
|
EntryWriter::write_entries(&mut writer, mint.create_entries())?;
|
||||||
});
|
Ok(())
|
||||||
for x in mint.create_entries() {
|
|
||||||
let serialized = serde_json::to_string(&x).unwrap_or_else(|e| {
|
|
||||||
eprintln!("failed to serialize: {}", e);
|
|
||||||
exit(1);
|
|
||||||
});
|
|
||||||
println!("{}", serialized);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
@@ -1,29 +0,0 @@
|
|||||||
extern crate atty;
|
|
||||||
extern crate rayon;
|
|
||||||
extern crate ring;
|
|
||||||
extern crate serde_json;
|
|
||||||
extern crate solana;
|
|
||||||
|
|
||||||
use atty::{is, Stream};
|
|
||||||
use solana::mint::{Mint, MintDemo};
|
|
||||||
use std::io;
|
|
||||||
use std::process::exit;
|
|
||||||
|
|
||||||
fn main() {
|
|
||||||
let mut input_text = String::new();
|
|
||||||
if is(Stream::Stdin) {
|
|
||||||
eprintln!("nothing found on stdin, expected a token number");
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
io::stdin().read_line(&mut input_text).unwrap();
|
|
||||||
let trimmed = input_text.trim();
|
|
||||||
let tokens = trimmed.parse::<i64>().unwrap();
|
|
||||||
|
|
||||||
let mint = Mint::new(tokens);
|
|
||||||
let tokens_per_user = 1_000;
|
|
||||||
let num_accounts = tokens / tokens_per_user;
|
|
||||||
|
|
||||||
let demo = MintDemo { mint, num_accounts };
|
|
||||||
println!("{}", serde_json::to_string(&demo).unwrap());
|
|
||||||
}
|
|
337
src/bin/wallet.rs
Normal file
337
src/bin/wallet.rs
Normal file
@@ -0,0 +1,337 @@
|
|||||||
|
extern crate atty;
|
||||||
|
extern crate bincode;
|
||||||
|
extern crate bs58;
|
||||||
|
extern crate clap;
|
||||||
|
extern crate env_logger;
|
||||||
|
extern crate getopts;
|
||||||
|
extern crate serde_json;
|
||||||
|
extern crate solana;
|
||||||
|
|
||||||
|
use bincode::serialize;
|
||||||
|
use clap::{App, Arg, SubCommand};
|
||||||
|
use solana::crdt::ReplicatedData;
|
||||||
|
use solana::drone::DroneRequest;
|
||||||
|
use solana::mint::Mint;
|
||||||
|
use solana::signature::{PublicKey, Signature};
|
||||||
|
use solana::thin_client::ThinClient;
|
||||||
|
use std::error;
|
||||||
|
use std::fmt;
|
||||||
|
use std::fs::File;
|
||||||
|
use std::io;
|
||||||
|
use std::io::prelude::*;
|
||||||
|
use std::net::{IpAddr, Ipv4Addr, SocketAddr, TcpStream, UdpSocket};
|
||||||
|
use std::process::exit;
|
||||||
|
use std::thread::sleep;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
enum WalletCommand {
|
||||||
|
Address,
|
||||||
|
Balance,
|
||||||
|
AirDrop(i64),
|
||||||
|
Pay(i64, PublicKey),
|
||||||
|
Confirm(Signature),
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
enum WalletError {
|
||||||
|
CommandNotRecognized(String),
|
||||||
|
BadParameter(String),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for WalletError {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
write!(f, "invalid")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl error::Error for WalletError {
|
||||||
|
fn description(&self) -> &str {
|
||||||
|
"invalid"
|
||||||
|
}
|
||||||
|
|
||||||
|
fn cause(&self) -> Option<&error::Error> {
|
||||||
|
// Generic error, underlying cause isn't tracked.
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct WalletConfig {
|
||||||
|
leader: ReplicatedData,
|
||||||
|
id: Mint,
|
||||||
|
drone_addr: SocketAddr,
|
||||||
|
command: WalletCommand,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for WalletConfig {
|
||||||
|
fn default() -> WalletConfig {
|
||||||
|
let default_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
|
||||||
|
WalletConfig {
|
||||||
|
leader: ReplicatedData::new_leader(&default_addr.clone()),
|
||||||
|
id: Mint::new(0),
|
||||||
|
drone_addr: default_addr.clone(),
|
||||||
|
command: WalletCommand::Balance,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_args() -> Result<WalletConfig, Box<error::Error>> {
|
||||||
|
let matches = App::new("solana-wallet")
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("leader")
|
||||||
|
.short("l")
|
||||||
|
.long("leader")
|
||||||
|
.value_name("PATH")
|
||||||
|
.takes_value(true)
|
||||||
|
.help("/path/to/leader.json"),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("mint")
|
||||||
|
.short("m")
|
||||||
|
.long("mint")
|
||||||
|
.value_name("PATH")
|
||||||
|
.takes_value(true)
|
||||||
|
.help("/path/to/mint.json"),
|
||||||
|
)
|
||||||
|
.subcommand(
|
||||||
|
SubCommand::with_name("airdrop")
|
||||||
|
.about("Request a batch of tokens")
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("tokens")
|
||||||
|
// .index(1)
|
||||||
|
.long("tokens")
|
||||||
|
.value_name("NUMBER")
|
||||||
|
.takes_value(true)
|
||||||
|
.help("The number of tokens to request"),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
.subcommand(
|
||||||
|
SubCommand::with_name("pay")
|
||||||
|
.about("Send a payment")
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("tokens")
|
||||||
|
// .index(2)
|
||||||
|
.long("tokens")
|
||||||
|
.value_name("NUMBER")
|
||||||
|
.takes_value(true)
|
||||||
|
.required(true)
|
||||||
|
.help("the number of tokens to send"),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("to")
|
||||||
|
// .index(1)
|
||||||
|
.long("to")
|
||||||
|
.value_name("PUBKEY")
|
||||||
|
.takes_value(true)
|
||||||
|
.required(true)
|
||||||
|
.help("The pubkey of recipient"),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
.subcommand(
|
||||||
|
SubCommand::with_name("confirm")
|
||||||
|
.about("Confirm your payment by signature")
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("signature")
|
||||||
|
.index(1)
|
||||||
|
.value_name("SIGNATURE")
|
||||||
|
.required(true)
|
||||||
|
.help("The transaction signature to confirm"),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
.subcommand(SubCommand::with_name("balance").about("Get your balance"))
|
||||||
|
.subcommand(SubCommand::with_name("address").about("Get your public key"))
|
||||||
|
.get_matches();
|
||||||
|
|
||||||
|
let leader: ReplicatedData;
|
||||||
|
if let Some(l) = matches.value_of("leader") {
|
||||||
|
leader = read_leader(l.to_string());
|
||||||
|
} else {
|
||||||
|
let server_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
|
||||||
|
leader = ReplicatedData::new_leader(&server_addr);
|
||||||
|
};
|
||||||
|
|
||||||
|
let id: Mint;
|
||||||
|
if let Some(m) = matches.value_of("mint") {
|
||||||
|
id = read_mint(m.to_string())?;
|
||||||
|
} else {
|
||||||
|
eprintln!("No mint found!");
|
||||||
|
exit(1);
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut drone_addr = leader.transactions_addr.clone();
|
||||||
|
drone_addr.set_port(9900);
|
||||||
|
|
||||||
|
let command = match matches.subcommand() {
|
||||||
|
("airdrop", Some(airdrop_matches)) => {
|
||||||
|
let mut tokens: i64 = id.tokens;
|
||||||
|
if airdrop_matches.is_present("tokens") {
|
||||||
|
tokens = airdrop_matches.value_of("tokens").unwrap().parse()?;
|
||||||
|
}
|
||||||
|
Ok(WalletCommand::AirDrop(tokens))
|
||||||
|
}
|
||||||
|
("pay", Some(pay_matches)) => {
|
||||||
|
let to: PublicKey;
|
||||||
|
if pay_matches.is_present("to") {
|
||||||
|
let pubkey_vec = bs58::decode(pay_matches.value_of("to").unwrap())
|
||||||
|
.into_vec()
|
||||||
|
.expect("base58-encoded public key");
|
||||||
|
|
||||||
|
if pubkey_vec.len() != std::mem::size_of::<PublicKey>() {
|
||||||
|
display_actions();
|
||||||
|
Err(WalletError::BadParameter("Invalid public key".to_string()))?;
|
||||||
|
}
|
||||||
|
to = PublicKey::clone_from_slice(&pubkey_vec);
|
||||||
|
} else {
|
||||||
|
to = id.pubkey();
|
||||||
|
}
|
||||||
|
let mut tokens: i64 = id.tokens;
|
||||||
|
if pay_matches.is_present("tokens") {
|
||||||
|
tokens = pay_matches.value_of("tokens").unwrap().parse()?;
|
||||||
|
}
|
||||||
|
Ok(WalletCommand::Pay(tokens, to))
|
||||||
|
}
|
||||||
|
("confirm", Some(confirm_matches)) => {
|
||||||
|
let sig_vec = bs58::decode(confirm_matches.value_of("signature").unwrap())
|
||||||
|
.into_vec()
|
||||||
|
.expect("base58-encoded signature");
|
||||||
|
|
||||||
|
if sig_vec.len() == std::mem::size_of::<Signature>() {
|
||||||
|
let sig = Signature::clone_from_slice(&sig_vec);
|
||||||
|
Ok(WalletCommand::Confirm(sig))
|
||||||
|
} else {
|
||||||
|
display_actions();
|
||||||
|
Err(WalletError::BadParameter("Invalid signature".to_string()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
("balance", Some(_balance_matches)) => Ok(WalletCommand::Balance),
|
||||||
|
("address", Some(_address_matches)) => Ok(WalletCommand::Address),
|
||||||
|
("", None) => {
|
||||||
|
display_actions();
|
||||||
|
Err(WalletError::CommandNotRecognized(
|
||||||
|
"no subcommand given".to_string(),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
_ => unreachable!(),
|
||||||
|
}?;
|
||||||
|
|
||||||
|
Ok(WalletConfig {
|
||||||
|
leader,
|
||||||
|
id,
|
||||||
|
drone_addr, // TODO: Add an option for this.
|
||||||
|
command,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn process_command(
|
||||||
|
config: &WalletConfig,
|
||||||
|
client: &mut ThinClient,
|
||||||
|
) -> Result<(), Box<error::Error>> {
|
||||||
|
match config.command {
|
||||||
|
// Check client balance
|
||||||
|
WalletCommand::Address => {
|
||||||
|
println!("{}", bs58::encode(config.id.pubkey()).into_string());
|
||||||
|
}
|
||||||
|
WalletCommand::Balance => {
|
||||||
|
println!("Balance requested...");
|
||||||
|
let balance = client.poll_get_balance(&config.id.pubkey());
|
||||||
|
match balance {
|
||||||
|
Ok(balance) => {
|
||||||
|
println!("Your balance is: {:?}", balance);
|
||||||
|
}
|
||||||
|
Err(ref e) if e.kind() == std::io::ErrorKind::Other => {
|
||||||
|
println!("No account found! Request an airdrop to get started.");
|
||||||
|
}
|
||||||
|
Err(error) => {
|
||||||
|
println!("An error occurred: {:?}", error);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Request an airdrop from Solana Drone;
|
||||||
|
// Request amount is set in request_airdrop function
|
||||||
|
WalletCommand::AirDrop(tokens) => {
|
||||||
|
println!("Airdrop requested...");
|
||||||
|
println!("Airdropping {:?} tokens", tokens);
|
||||||
|
let _airdrop = request_airdrop(&config.drone_addr, &config.id, tokens as u64)?;
|
||||||
|
// TODO: return airdrop Result from Drone
|
||||||
|
sleep(Duration::from_millis(100));
|
||||||
|
println!(
|
||||||
|
"Your balance is: {:?}",
|
||||||
|
client.poll_get_balance(&config.id.pubkey()).unwrap()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
// If client has positive balance, spend tokens in {balance} number of transactions
|
||||||
|
WalletCommand::Pay(tokens, to) => {
|
||||||
|
let last_id = client.get_last_id();
|
||||||
|
let sig = client.transfer(tokens, &config.id.keypair(), to, &last_id)?;
|
||||||
|
println!("{}", bs58::encode(sig).into_string());
|
||||||
|
}
|
||||||
|
// Confirm the last client transaction by signature
|
||||||
|
WalletCommand::Confirm(sig) => {
|
||||||
|
if client.check_signature(&sig) {
|
||||||
|
println!("Confirmed");
|
||||||
|
} else {
|
||||||
|
println!("Not found");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn display_actions() {
|
||||||
|
println!("");
|
||||||
|
println!("Commands:");
|
||||||
|
println!(" address Get your public key");
|
||||||
|
println!(" balance Get your account balance");
|
||||||
|
println!(" airdrop Request a batch of tokens");
|
||||||
|
println!(" pay Send tokens to a public key");
|
||||||
|
println!(" confirm Confirm your last payment by signature");
|
||||||
|
println!("");
|
||||||
|
}
|
||||||
|
|
||||||
|
fn read_leader(path: String) -> ReplicatedData {
|
||||||
|
let file = File::open(path.clone()).expect(&format!("file not found: {}", path));
|
||||||
|
serde_json::from_reader(file).expect(&format!("failed to parse {}", path))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn read_mint(path: String) -> Result<Mint, Box<error::Error>> {
|
||||||
|
let file = File::open(path.clone())?;
|
||||||
|
let mint = serde_json::from_reader(file)?;
|
||||||
|
Ok(mint)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn mk_client(r: &ReplicatedData) -> io::Result<ThinClient> {
|
||||||
|
let requests_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||||
|
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||||
|
requests_socket
|
||||||
|
.set_read_timeout(Some(Duration::new(1, 0)))
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
Ok(ThinClient::new(
|
||||||
|
r.requests_addr,
|
||||||
|
requests_socket,
|
||||||
|
r.transactions_addr,
|
||||||
|
transactions_socket,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn request_airdrop(
|
||||||
|
drone_addr: &SocketAddr,
|
||||||
|
id: &Mint,
|
||||||
|
tokens: u64,
|
||||||
|
) -> Result<(), Box<error::Error>> {
|
||||||
|
let mut stream = TcpStream::connect(drone_addr)?;
|
||||||
|
let req = DroneRequest::GetAirdrop {
|
||||||
|
airdrop_request_amount: tokens,
|
||||||
|
client_public_key: id.pubkey(),
|
||||||
|
};
|
||||||
|
let tx = serialize(&req).expect("serialize drone request");
|
||||||
|
stream.write_all(&tx).unwrap();
|
||||||
|
// TODO: add timeout to this function, in case of unresponsive drone
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn main() -> Result<(), Box<error::Error>> {
|
||||||
|
env_logger::init();
|
||||||
|
let config = parse_args()?;
|
||||||
|
let mut client = mk_client(&config.leader)?;
|
||||||
|
process_command(&config, &mut client)
|
||||||
|
}
|
@@ -1,15 +1,14 @@
|
|||||||
//! The `blob_fetch_stage` pulls blobs from UDP sockets and sends it to a channel.
|
//! The `blob_fetch_stage` pulls blobs from UDP sockets and sends it to a channel.
|
||||||
|
|
||||||
use packet;
|
use packet::BlobRecycler;
|
||||||
use std::net::UdpSocket;
|
use std::net::UdpSocket;
|
||||||
use std::sync::atomic::AtomicBool;
|
use std::sync::atomic::AtomicBool;
|
||||||
use std::sync::mpsc::channel;
|
use std::sync::mpsc::channel;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::thread::JoinHandle;
|
use std::thread::JoinHandle;
|
||||||
use streamer;
|
use streamer::{self, BlobReceiver};
|
||||||
|
|
||||||
pub struct BlobFetchStage {
|
pub struct BlobFetchStage {
|
||||||
pub blob_receiver: streamer::BlobReceiver,
|
|
||||||
pub thread_hdls: Vec<JoinHandle<()>>,
|
pub thread_hdls: Vec<JoinHandle<()>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -17,15 +16,15 @@ impl BlobFetchStage {
|
|||||||
pub fn new(
|
pub fn new(
|
||||||
socket: UdpSocket,
|
socket: UdpSocket,
|
||||||
exit: Arc<AtomicBool>,
|
exit: Arc<AtomicBool>,
|
||||||
blob_recycler: packet::BlobRecycler,
|
blob_recycler: BlobRecycler,
|
||||||
) -> Self {
|
) -> (Self, BlobReceiver) {
|
||||||
Self::new_multi_socket(vec![socket], exit, blob_recycler)
|
Self::new_multi_socket(vec![socket], exit, blob_recycler)
|
||||||
}
|
}
|
||||||
pub fn new_multi_socket(
|
pub fn new_multi_socket(
|
||||||
sockets: Vec<UdpSocket>,
|
sockets: Vec<UdpSocket>,
|
||||||
exit: Arc<AtomicBool>,
|
exit: Arc<AtomicBool>,
|
||||||
blob_recycler: packet::BlobRecycler,
|
blob_recycler: BlobRecycler,
|
||||||
) -> Self {
|
) -> (Self, BlobReceiver) {
|
||||||
let (blob_sender, blob_receiver) = channel();
|
let (blob_sender, blob_receiver) = channel();
|
||||||
let thread_hdls: Vec<_> = sockets
|
let thread_hdls: Vec<_> = sockets
|
||||||
.into_iter()
|
.into_iter()
|
||||||
@@ -39,9 +38,6 @@ impl BlobFetchStage {
|
|||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
BlobFetchStage {
|
(BlobFetchStage { thread_hdls }, blob_receiver)
|
||||||
blob_receiver,
|
|
||||||
thread_hdls,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
333
src/choose_gossip_peer_strategy.rs
Normal file
333
src/choose_gossip_peer_strategy.rs
Normal file
@@ -0,0 +1,333 @@
|
|||||||
|
use crdt::ReplicatedData;
|
||||||
|
use rand::distributions::{Distribution, Weighted, WeightedChoice};
|
||||||
|
use rand::thread_rng;
|
||||||
|
use result::{Error, Result};
|
||||||
|
use signature::PublicKey;
|
||||||
|
use std;
|
||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
pub const DEFAULT_WEIGHT: u32 = 1;
|
||||||
|
|
||||||
|
pub trait ChooseGossipPeerStrategy {
|
||||||
|
fn choose_peer<'a>(&self, options: Vec<&'a ReplicatedData>) -> Result<&'a ReplicatedData>;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct ChooseRandomPeerStrategy<'a> {
|
||||||
|
random: &'a Fn() -> u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Given a source of randomness "random", this strategy will randomly pick a validator
|
||||||
|
// from the input options. This strategy works in isolation, but doesn't leverage any
|
||||||
|
// rumors from the rest of the gossip network to make more informed decisions about
|
||||||
|
// which validators have more/less updates
|
||||||
|
impl<'a, 'b> ChooseRandomPeerStrategy<'a> {
|
||||||
|
pub fn new(random: &'a Fn() -> u64) -> Self {
|
||||||
|
ChooseRandomPeerStrategy { random }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> ChooseGossipPeerStrategy for ChooseRandomPeerStrategy<'a> {
|
||||||
|
fn choose_peer<'b>(&self, options: Vec<&'b ReplicatedData>) -> Result<&'b ReplicatedData> {
|
||||||
|
if options.is_empty() {
|
||||||
|
return Err(Error::CrdtTooSmall);
|
||||||
|
}
|
||||||
|
|
||||||
|
let n = ((self.random)() as usize) % options.len();
|
||||||
|
Ok(options[n])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// This strategy uses rumors accumulated from the rest of the network to weight
|
||||||
|
// the importance of communicating with a particular validator based on cumulative network
|
||||||
|
// perceiption of the number of updates the validator has to offer. A validator is randomly
|
||||||
|
// picked based on a weighted sample from the pool of viable choices. The "weight", w, of a
|
||||||
|
// particular validator "v" is calculated as follows:
|
||||||
|
//
|
||||||
|
// w = [Sum for all i in I_v: (rumor_v(i) - observed(v)) * stake(i)] /
|
||||||
|
// [Sum for all i in I_v: Sum(stake(i))]
|
||||||
|
//
|
||||||
|
// where I_v is the set of all validators that returned a rumor about the update_index of
|
||||||
|
// validator "v", stake(i) is the size of the stake of validator "i", observed(v) is the
|
||||||
|
// observed update_index from the last direct communication validator "v", and
|
||||||
|
// rumor_v(i) is the rumored update_index of validator "v" propagated by fellow validator "i".
|
||||||
|
|
||||||
|
// This could be a problem if there are validators with large stakes lying about their
|
||||||
|
// observed updates. There could also be a problem in network partitions, or even just
|
||||||
|
// when certain validators are disproportionately active, where we hear more rumors about
|
||||||
|
// certain clusters of nodes that then propagate more rumros about each other. Hopefully
|
||||||
|
// this can be resolved with a good baseline DEFAULT_WEIGHT, or by implementing lockout
|
||||||
|
// periods for very active validators in the future.
|
||||||
|
|
||||||
|
pub struct ChooseWeightedPeerStrategy<'a> {
|
||||||
|
// The map of last directly observed update_index for each active validator.
|
||||||
|
// This is how we get observed(v) from the formula above.
|
||||||
|
remote: &'a HashMap<PublicKey, u64>,
|
||||||
|
// The map of rumored update_index for each active validator. Using the formula above,
|
||||||
|
// to find rumor_v(i), we would first look up "v" in the outer map, then look up
|
||||||
|
// "i" in the inner map, i.e. look up external_liveness[v][i]
|
||||||
|
external_liveness: &'a HashMap<PublicKey, HashMap<PublicKey, u64>>,
|
||||||
|
// A function returning the size of the stake for a particular validator, corresponds
|
||||||
|
// to stake(i) in the formula above.
|
||||||
|
get_stake: &'a Fn(PublicKey) -> f64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> ChooseWeightedPeerStrategy<'a> {
|
||||||
|
pub fn new(
|
||||||
|
remote: &'a HashMap<PublicKey, u64>,
|
||||||
|
external_liveness: &'a HashMap<PublicKey, HashMap<PublicKey, u64>>,
|
||||||
|
get_stake: &'a Fn(PublicKey) -> f64,
|
||||||
|
) -> Self {
|
||||||
|
ChooseWeightedPeerStrategy {
|
||||||
|
remote,
|
||||||
|
external_liveness,
|
||||||
|
get_stake,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn calculate_weighted_remote_index(&self, peer_id: PublicKey) -> u32 {
|
||||||
|
let mut last_seen_index = 0;
|
||||||
|
// If the peer is not in our remote table, then we leave last_seen_index as zero.
|
||||||
|
// Only happens when a peer appears in our crdt.table but not in our crdt.remote,
|
||||||
|
// which means a validator was directly injected into our crdt.table
|
||||||
|
if let Some(index) = self.remote.get(&peer_id) {
|
||||||
|
last_seen_index = *index;
|
||||||
|
}
|
||||||
|
|
||||||
|
let liveness_entry = self.external_liveness.get(&peer_id);
|
||||||
|
if liveness_entry.is_none() {
|
||||||
|
return DEFAULT_WEIGHT;
|
||||||
|
}
|
||||||
|
|
||||||
|
let votes = liveness_entry.unwrap();
|
||||||
|
|
||||||
|
if votes.is_empty() {
|
||||||
|
return DEFAULT_WEIGHT;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate the weighted average of the rumors
|
||||||
|
let mut relevant_votes = vec![];
|
||||||
|
|
||||||
|
let total_stake = votes.iter().fold(0.0, |total_stake, (&id, &vote)| {
|
||||||
|
let stake = (self.get_stake)(id);
|
||||||
|
// If the total stake is going to overflow u64, pick
|
||||||
|
// the larger of either the current total_stake, or the
|
||||||
|
// new stake, this way we are guaranteed to get at least u64/2
|
||||||
|
// sample of stake in our weighted calculation
|
||||||
|
if std::f64::MAX - total_stake < stake {
|
||||||
|
if stake > total_stake {
|
||||||
|
relevant_votes = vec![(stake, vote)];
|
||||||
|
stake
|
||||||
|
} else {
|
||||||
|
total_stake
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
relevant_votes.push((stake, vote));
|
||||||
|
total_stake + stake
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
let weighted_vote = relevant_votes.iter().fold(0.0, |sum, &(stake, vote)| {
|
||||||
|
if vote < last_seen_index {
|
||||||
|
// This should never happen because we maintain the invariant that the indexes
|
||||||
|
// in the external_liveness table are always greater than the corresponding
|
||||||
|
// indexes in the remote table, if the index exists in the remote table at all.
|
||||||
|
|
||||||
|
// Case 1: Attempt to insert bigger index into the "external_liveness" table
|
||||||
|
// happens after an insertion into the "remote" table. In this case,
|
||||||
|
// (see apply_updates()) function, we prevent the insertion if the entry
|
||||||
|
// in the remote table >= the atempted insertion into the "external" liveness
|
||||||
|
// table.
|
||||||
|
|
||||||
|
// Case 2: Bigger index in the "external_liveness" table inserted before
|
||||||
|
// a smaller insertion into the "remote" table. We clear the corresponding
|
||||||
|
// "external_liveness" table entry on all insertions into the "remote" table
|
||||||
|
// See apply_updates() function.
|
||||||
|
|
||||||
|
warn!("weighted peer index was smaller than local entry in remote table");
|
||||||
|
return sum;
|
||||||
|
}
|
||||||
|
|
||||||
|
let vote_difference = (vote - last_seen_index) as f64;
|
||||||
|
let new_weight = vote_difference * (stake / total_stake);
|
||||||
|
|
||||||
|
if std::f64::MAX - sum < new_weight {
|
||||||
|
return f64::max(new_weight, sum);
|
||||||
|
}
|
||||||
|
|
||||||
|
sum + new_weight
|
||||||
|
});
|
||||||
|
|
||||||
|
// Return u32 b/c the weighted sampling API from rand::distributions
|
||||||
|
// only takes u32 for weights
|
||||||
|
if weighted_vote >= std::u32::MAX as f64 {
|
||||||
|
return std::u32::MAX;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the weighted rumors we've heard about aren't any greater than
|
||||||
|
// what we've directly learned from the last time we communicated with the
|
||||||
|
// peer (i.e. weighted_vote == 0), then return a weight of 1.
|
||||||
|
// Otherwise, return the calculated weight.
|
||||||
|
weighted_vote as u32 + DEFAULT_WEIGHT
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> ChooseGossipPeerStrategy for ChooseWeightedPeerStrategy<'a> {
|
||||||
|
fn choose_peer<'b>(&self, options: Vec<&'b ReplicatedData>) -> Result<&'b ReplicatedData> {
|
||||||
|
if options.len() < 1 {
|
||||||
|
return Err(Error::CrdtTooSmall);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut weighted_peers = vec![];
|
||||||
|
for peer in options {
|
||||||
|
let weight = self.calculate_weighted_remote_index(peer.id);
|
||||||
|
weighted_peers.push(Weighted { weight, item: peer });
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut rng = thread_rng();
|
||||||
|
Ok(WeightedChoice::new(&mut weighted_peers).sample(&mut rng))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use choose_gossip_peer_strategy::{ChooseWeightedPeerStrategy, DEFAULT_WEIGHT};
|
||||||
|
use logger;
|
||||||
|
use signature::{KeyPair, KeyPairUtil, PublicKey};
|
||||||
|
use std;
|
||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
fn get_stake(_id: PublicKey) -> f64 {
|
||||||
|
1.0
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_default() {
|
||||||
|
logger::setup();
|
||||||
|
|
||||||
|
// Initialize the filler keys
|
||||||
|
let key1 = KeyPair::new().pubkey();
|
||||||
|
|
||||||
|
let remote: HashMap<PublicKey, u64> = HashMap::new();
|
||||||
|
let external_liveness: HashMap<PublicKey, HashMap<PublicKey, u64>> = HashMap::new();
|
||||||
|
|
||||||
|
let weighted_strategy =
|
||||||
|
ChooseWeightedPeerStrategy::new(&remote, &external_liveness, &get_stake);
|
||||||
|
|
||||||
|
// If external_liveness table doesn't contain this entry,
|
||||||
|
// return the default weight
|
||||||
|
let result = weighted_strategy.calculate_weighted_remote_index(key1);
|
||||||
|
assert_eq!(result, DEFAULT_WEIGHT);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_only_external_liveness() {
|
||||||
|
logger::setup();
|
||||||
|
|
||||||
|
// Initialize the filler keys
|
||||||
|
let key1 = KeyPair::new().pubkey();
|
||||||
|
let key2 = KeyPair::new().pubkey();
|
||||||
|
|
||||||
|
let remote: HashMap<PublicKey, u64> = HashMap::new();
|
||||||
|
let mut external_liveness: HashMap<PublicKey, HashMap<PublicKey, u64>> = HashMap::new();
|
||||||
|
|
||||||
|
// If only the liveness table contains the entry, should return the
|
||||||
|
// weighted liveness entries
|
||||||
|
let test_value: u32 = 5;
|
||||||
|
let mut rumors: HashMap<PublicKey, u64> = HashMap::new();
|
||||||
|
rumors.insert(key2, test_value as u64);
|
||||||
|
external_liveness.insert(key1, rumors);
|
||||||
|
|
||||||
|
let weighted_strategy =
|
||||||
|
ChooseWeightedPeerStrategy::new(&remote, &external_liveness, &get_stake);
|
||||||
|
|
||||||
|
let result = weighted_strategy.calculate_weighted_remote_index(key1);
|
||||||
|
assert_eq!(result, test_value + DEFAULT_WEIGHT);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_overflow_votes() {
|
||||||
|
logger::setup();
|
||||||
|
|
||||||
|
// Initialize the filler keys
|
||||||
|
let key1 = KeyPair::new().pubkey();
|
||||||
|
let key2 = KeyPair::new().pubkey();
|
||||||
|
|
||||||
|
let remote: HashMap<PublicKey, u64> = HashMap::new();
|
||||||
|
let mut external_liveness: HashMap<PublicKey, HashMap<PublicKey, u64>> = HashMap::new();
|
||||||
|
|
||||||
|
// If the vote index is greater than u32::MAX, default to u32::MAX
|
||||||
|
let test_value = (std::u32::MAX as u64) + 10;
|
||||||
|
let mut rumors: HashMap<PublicKey, u64> = HashMap::new();
|
||||||
|
rumors.insert(key2, test_value);
|
||||||
|
external_liveness.insert(key1, rumors);
|
||||||
|
|
||||||
|
let weighted_strategy =
|
||||||
|
ChooseWeightedPeerStrategy::new(&remote, &external_liveness, &get_stake);
|
||||||
|
|
||||||
|
let result = weighted_strategy.calculate_weighted_remote_index(key1);
|
||||||
|
assert_eq!(result, std::u32::MAX);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_many_validators() {
|
||||||
|
logger::setup();
|
||||||
|
|
||||||
|
// Initialize the filler keys
|
||||||
|
let key1 = KeyPair::new().pubkey();
|
||||||
|
|
||||||
|
let mut remote: HashMap<PublicKey, u64> = HashMap::new();
|
||||||
|
let mut external_liveness: HashMap<PublicKey, HashMap<PublicKey, u64>> = HashMap::new();
|
||||||
|
|
||||||
|
// Test many validators' rumors in external_liveness
|
||||||
|
let num_peers = 10;
|
||||||
|
let mut rumors: HashMap<PublicKey, u64> = HashMap::new();
|
||||||
|
|
||||||
|
remote.insert(key1, 0);
|
||||||
|
|
||||||
|
for i in 0..num_peers {
|
||||||
|
let pk = KeyPair::new().pubkey();
|
||||||
|
rumors.insert(pk, i);
|
||||||
|
}
|
||||||
|
|
||||||
|
external_liveness.insert(key1, rumors);
|
||||||
|
|
||||||
|
let weighted_strategy =
|
||||||
|
ChooseWeightedPeerStrategy::new(&remote, &external_liveness, &get_stake);
|
||||||
|
|
||||||
|
let result = weighted_strategy.calculate_weighted_remote_index(key1);
|
||||||
|
assert_eq!(result, (num_peers / 2) as u32);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_many_validators2() {
|
||||||
|
logger::setup();
|
||||||
|
|
||||||
|
// Initialize the filler keys
|
||||||
|
let key1 = KeyPair::new().pubkey();
|
||||||
|
|
||||||
|
let mut remote: HashMap<PublicKey, u64> = HashMap::new();
|
||||||
|
let mut external_liveness: HashMap<PublicKey, HashMap<PublicKey, u64>> = HashMap::new();
|
||||||
|
|
||||||
|
// Test many validators' rumors in external_liveness
|
||||||
|
let num_peers = 10;
|
||||||
|
let old_index = 20;
|
||||||
|
let mut rumors: HashMap<PublicKey, u64> = HashMap::new();
|
||||||
|
|
||||||
|
remote.insert(key1, old_index);
|
||||||
|
|
||||||
|
for _i in 0..num_peers {
|
||||||
|
let pk = KeyPair::new().pubkey();
|
||||||
|
rumors.insert(pk, old_index);
|
||||||
|
}
|
||||||
|
|
||||||
|
external_liveness.insert(key1, rumors);
|
||||||
|
|
||||||
|
let weighted_strategy =
|
||||||
|
ChooseWeightedPeerStrategy::new(&remote, &external_liveness, &get_stake);
|
||||||
|
|
||||||
|
let result = weighted_strategy.calculate_weighted_remote_index(key1);
|
||||||
|
|
||||||
|
// If nobody has seen a newer update then revert to default
|
||||||
|
assert_eq!(result, DEFAULT_WEIGHT);
|
||||||
|
}
|
||||||
|
}
|
153
src/crdt.rs
153
src/crdt.rs
@@ -15,6 +15,7 @@
|
|||||||
|
|
||||||
use bincode::{deserialize, serialize};
|
use bincode::{deserialize, serialize};
|
||||||
use byteorder::{LittleEndian, ReadBytesExt};
|
use byteorder::{LittleEndian, ReadBytesExt};
|
||||||
|
use choose_gossip_peer_strategy::{ChooseGossipPeerStrategy, ChooseWeightedPeerStrategy};
|
||||||
use hash::Hash;
|
use hash::Hash;
|
||||||
use packet::{to_blob, Blob, BlobRecycler, SharedBlob, BLOB_SIZE};
|
use packet::{to_blob, Blob, BlobRecycler, SharedBlob, BLOB_SIZE};
|
||||||
use pnet_datalink as datalink;
|
use pnet_datalink as datalink;
|
||||||
@@ -36,6 +37,7 @@ use timing::timestamp;
|
|||||||
|
|
||||||
/// milliseconds we sleep for between gossip requests
|
/// milliseconds we sleep for between gossip requests
|
||||||
const GOSSIP_SLEEP_MILLIS: u64 = 100;
|
const GOSSIP_SLEEP_MILLIS: u64 = 100;
|
||||||
|
//const GOSSIP_MIN_PURGE_MILLIS: u64 = 15000;
|
||||||
|
|
||||||
/// minimum membership table size before we start purging dead nodes
|
/// minimum membership table size before we start purging dead nodes
|
||||||
const MIN_TABLE_SIZE: usize = 2;
|
const MIN_TABLE_SIZE: usize = 2;
|
||||||
@@ -190,6 +192,7 @@ pub struct Crdt {
|
|||||||
pub alive: HashMap<PublicKey, u64>,
|
pub alive: HashMap<PublicKey, u64>,
|
||||||
pub update_index: u64,
|
pub update_index: u64,
|
||||||
pub me: PublicKey,
|
pub me: PublicKey,
|
||||||
|
external_liveness: HashMap<PublicKey, HashMap<PublicKey, u64>>,
|
||||||
}
|
}
|
||||||
// TODO These messages should be signed, and go through the gpu pipeline for spam filtering
|
// TODO These messages should be signed, and go through the gpu pipeline for spam filtering
|
||||||
#[derive(Serialize, Deserialize, Debug)]
|
#[derive(Serialize, Deserialize, Debug)]
|
||||||
@@ -200,7 +203,7 @@ enum Protocol {
|
|||||||
RequestUpdates(u64, ReplicatedData),
|
RequestUpdates(u64, ReplicatedData),
|
||||||
//TODO might need a since?
|
//TODO might need a since?
|
||||||
/// from id, form's last update index, ReplicatedData
|
/// from id, form's last update index, ReplicatedData
|
||||||
ReceiveUpdates(PublicKey, u64, Vec<ReplicatedData>),
|
ReceiveUpdates(PublicKey, u64, Vec<ReplicatedData>, Vec<(PublicKey, u64)>),
|
||||||
/// ask for a missing index
|
/// ask for a missing index
|
||||||
RequestWindowIndex(ReplicatedData, u64),
|
RequestWindowIndex(ReplicatedData, u64),
|
||||||
}
|
}
|
||||||
@@ -213,6 +216,7 @@ impl Crdt {
|
|||||||
local: HashMap::new(),
|
local: HashMap::new(),
|
||||||
remote: HashMap::new(),
|
remote: HashMap::new(),
|
||||||
alive: HashMap::new(),
|
alive: HashMap::new(),
|
||||||
|
external_liveness: HashMap::new(),
|
||||||
me: me.id,
|
me: me.id,
|
||||||
update_index: 1,
|
update_index: 1,
|
||||||
};
|
};
|
||||||
@@ -223,17 +227,22 @@ impl Crdt {
|
|||||||
pub fn my_data(&self) -> &ReplicatedData {
|
pub fn my_data(&self) -> &ReplicatedData {
|
||||||
&self.table[&self.me]
|
&self.table[&self.me]
|
||||||
}
|
}
|
||||||
pub fn leader_data(&self) -> &ReplicatedData {
|
pub fn leader_data(&self) -> Option<&ReplicatedData> {
|
||||||
&self.table[&self.table[&self.me].current_leader_id]
|
self.table.get(&(self.table[&self.me].current_leader_id))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn set_leader(&mut self, key: PublicKey) -> () {
|
pub fn set_leader(&mut self, key: PublicKey) -> () {
|
||||||
let mut me = self.my_data().clone();
|
let mut me = self.my_data().clone();
|
||||||
me.current_leader_id = key;
|
me.current_leader_id = key;
|
||||||
me.version += 1;
|
me.version += 1;
|
||||||
|
info!("setting leader to {:?}", &key[..4]);
|
||||||
self.insert(&me);
|
self.insert(&me);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn get_external_liveness_entry(&self, key: &PublicKey) -> Option<&HashMap<PublicKey, u64>> {
|
||||||
|
self.external_liveness.get(key)
|
||||||
|
}
|
||||||
|
|
||||||
pub fn insert(&mut self, v: &ReplicatedData) {
|
pub fn insert(&mut self, v: &ReplicatedData) {
|
||||||
// TODO check that last_verified types are always increasing
|
// TODO check that last_verified types are always increasing
|
||||||
if self.table.get(&v.id).is_none() || (v.version > self.table[&v.id].version) {
|
if self.table.get(&v.id).is_none() || (v.version > self.table[&v.id].version) {
|
||||||
@@ -270,9 +279,12 @@ impl Crdt {
|
|||||||
if self.table.len() <= MIN_TABLE_SIZE {
|
if self.table.len() <= MIN_TABLE_SIZE {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
//wait for 4x as long as it would randomly take to reach our node
|
//wait for 4x as long as it would randomly take to reach our node
|
||||||
//assuming everyone is waiting the same amount of time as this node
|
//assuming everyone is waiting the same amount of time as this node
|
||||||
let limit = self.table.len() as u64 * GOSSIP_SLEEP_MILLIS * 4;
|
let limit = self.table.len() as u64 * GOSSIP_SLEEP_MILLIS * 4;
|
||||||
|
//let limit = std::cmp::max(limit, GOSSIP_MIN_PURGE_MILLIS);
|
||||||
|
|
||||||
let dead_ids: Vec<PublicKey> = self.alive
|
let dead_ids: Vec<PublicKey> = self.alive
|
||||||
.iter()
|
.iter()
|
||||||
.filter_map(|(&k, v)| {
|
.filter_map(|(&k, v)| {
|
||||||
@@ -285,11 +297,16 @@ impl Crdt {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
for id in dead_ids.iter() {
|
for id in dead_ids.iter() {
|
||||||
self.alive.remove(id);
|
self.alive.remove(id);
|
||||||
self.table.remove(id);
|
self.table.remove(id);
|
||||||
self.remote.remove(id);
|
self.remote.remove(id);
|
||||||
self.local.remove(id);
|
self.local.remove(id);
|
||||||
|
self.external_liveness.remove(id);
|
||||||
|
for map in self.external_liveness.values_mut() {
|
||||||
|
map.remove(id);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -473,6 +490,12 @@ impl Crdt {
|
|||||||
rdr.read_u64::<LittleEndian>()
|
rdr.read_u64::<LittleEndian>()
|
||||||
.expect("rdr.read_u64 in fn random")
|
.expect("rdr.read_u64 in fn random")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: fill in with real implmentation once staking is implemented
|
||||||
|
fn get_stake(_id: PublicKey) -> f64 {
|
||||||
|
1.0
|
||||||
|
}
|
||||||
|
|
||||||
fn get_updates_since(&self, v: u64) -> (PublicKey, u64, Vec<ReplicatedData>) {
|
fn get_updates_since(&self, v: u64) -> (PublicKey, u64, Vec<ReplicatedData>) {
|
||||||
//trace!("get updates since {}", v);
|
//trace!("get updates since {}", v);
|
||||||
let data = self.table
|
let data = self.table
|
||||||
@@ -508,7 +531,18 @@ impl Crdt {
|
|||||||
/// * B - RequestUpdates protocol message
|
/// * B - RequestUpdates protocol message
|
||||||
fn gossip_request(&self) -> Result<(SocketAddr, Protocol)> {
|
fn gossip_request(&self) -> Result<(SocketAddr, Protocol)> {
|
||||||
let options: Vec<_> = self.table.values().filter(|v| v.id != self.me).collect();
|
let options: Vec<_> = self.table.values().filter(|v| v.id != self.me).collect();
|
||||||
if options.len() < 1 {
|
|
||||||
|
let choose_peer_strategy = ChooseWeightedPeerStrategy::new(
|
||||||
|
&self.remote,
|
||||||
|
&self.external_liveness,
|
||||||
|
&Self::get_stake,
|
||||||
|
);
|
||||||
|
|
||||||
|
let choose_peer_result = choose_peer_strategy.choose_peer(options);
|
||||||
|
|
||||||
|
let v = match choose_peer_result {
|
||||||
|
Ok(peer) => peer,
|
||||||
|
Err(Error::CrdtTooSmall) => {
|
||||||
trace!(
|
trace!(
|
||||||
"crdt too small for gossip {:?} {}",
|
"crdt too small for gossip {:?} {}",
|
||||||
&self.me[..4],
|
&self.me[..4],
|
||||||
@@ -516,8 +550,9 @@ impl Crdt {
|
|||||||
);
|
);
|
||||||
return Err(Error::CrdtTooSmall);
|
return Err(Error::CrdtTooSmall);
|
||||||
}
|
}
|
||||||
let n = (Self::random() as usize) % options.len();
|
Err(e) => return Err(e),
|
||||||
let v = options[n].clone();
|
};
|
||||||
|
|
||||||
let remote_update_index = *self.remote.get(&v.id).unwrap_or(&0);
|
let remote_update_index = *self.remote.get(&v.id).unwrap_or(&0);
|
||||||
let req = Protocol::RequestUpdates(remote_update_index, self.table[&self.me].clone());
|
let req = Protocol::RequestUpdates(remote_update_index, self.table[&self.me].clone());
|
||||||
trace!(
|
trace!(
|
||||||
@@ -526,6 +561,7 @@ impl Crdt {
|
|||||||
&v.id[..4],
|
&v.id[..4],
|
||||||
v.gossip_addr
|
v.gossip_addr
|
||||||
);
|
);
|
||||||
|
|
||||||
Ok((v.gossip_addr, req))
|
Ok((v.gossip_addr, req))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -543,6 +579,7 @@ impl Crdt {
|
|||||||
let (remote_gossip_addr, req) = obj.read()
|
let (remote_gossip_addr, req) = obj.read()
|
||||||
.expect("'obj' read lock in fn run_gossip")
|
.expect("'obj' read lock in fn run_gossip")
|
||||||
.gossip_request()?;
|
.gossip_request()?;
|
||||||
|
|
||||||
// TODO this will get chatty, so we need to first ask for number of updates since
|
// TODO this will get chatty, so we need to first ask for number of updates since
|
||||||
// then only ask for specific data that we dont have
|
// then only ask for specific data that we dont have
|
||||||
let blob = to_blob(req, remote_gossip_addr, blob_recycler)?;
|
let blob = to_blob(req, remote_gossip_addr, blob_recycler)?;
|
||||||
@@ -562,6 +599,9 @@ impl Crdt {
|
|||||||
trace!("leader {:?} {}", &v.current_leader_id[..4], *cnt);
|
trace!("leader {:?} {}", &v.current_leader_id[..4], *cnt);
|
||||||
}
|
}
|
||||||
let mut sorted: Vec<(&PublicKey, usize)> = table.into_iter().collect();
|
let mut sorted: Vec<(&PublicKey, usize)> = table.into_iter().collect();
|
||||||
|
if sorted.len() > 0 {
|
||||||
|
info!("sorted leaders {:?}", sorted);
|
||||||
|
}
|
||||||
sorted.sort_by_key(|a| a.1);
|
sorted.sort_by_key(|a| a.1);
|
||||||
sorted.last().map(|a| *a.0)
|
sorted.last().map(|a| *a.0)
|
||||||
}
|
}
|
||||||
@@ -583,14 +623,43 @@ impl Crdt {
|
|||||||
/// * `from` - identity of the sender of the updates
|
/// * `from` - identity of the sender of the updates
|
||||||
/// * `update_index` - the number of updates that `from` has completed and this set of `data` represents
|
/// * `update_index` - the number of updates that `from` has completed and this set of `data` represents
|
||||||
/// * `data` - the update data
|
/// * `data` - the update data
|
||||||
fn apply_updates(&mut self, from: PublicKey, update_index: u64, data: &[ReplicatedData]) {
|
fn apply_updates(
|
||||||
|
&mut self,
|
||||||
|
from: PublicKey,
|
||||||
|
update_index: u64,
|
||||||
|
data: &[ReplicatedData],
|
||||||
|
external_liveness: &[(PublicKey, u64)],
|
||||||
|
) {
|
||||||
trace!("got updates {}", data.len());
|
trace!("got updates {}", data.len());
|
||||||
// TODO we need to punish/spam resist here
|
// TODO we need to punish/spam resist here
|
||||||
// sig verify the whole update and slash anyone who sends a bad update
|
// sig verify the whole update and slash anyone who sends a bad update
|
||||||
for v in data {
|
for v in data {
|
||||||
self.insert(&v);
|
self.insert(&v);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for (pk, external_remote_index) in external_liveness {
|
||||||
|
let remote_entry = if let Some(v) = self.remote.get(pk) {
|
||||||
|
*v
|
||||||
|
} else {
|
||||||
|
0
|
||||||
|
};
|
||||||
|
|
||||||
|
if remote_entry >= *external_remote_index {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let liveness_entry = self.external_liveness.entry(*pk).or_insert(HashMap::new());
|
||||||
|
let peer_index = *liveness_entry.entry(from).or_insert(*external_remote_index);
|
||||||
|
if *external_remote_index > peer_index {
|
||||||
|
liveness_entry.insert(from, *external_remote_index);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
*self.remote.entry(from).or_insert(update_index) = update_index;
|
*self.remote.entry(from).or_insert(update_index) = update_index;
|
||||||
|
|
||||||
|
// Clear the remote liveness table for this node, b/c we've heard directly from them
|
||||||
|
// so we don't need to rely on rumors
|
||||||
|
self.external_liveness.remove(&from);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// randomly pick a node and ask them for updates asynchronously
|
/// randomly pick a node and ask them for updates asynchronously
|
||||||
@@ -664,7 +733,10 @@ impl Crdt {
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
assert!(window.read().unwrap()[pos].is_none());
|
assert!(window.read().unwrap()[pos].is_none());
|
||||||
info!("failed RequestWindowIndex {} {}", ix, from.repair_addr);
|
info!(
|
||||||
|
"failed RequestWindowIndex {} {} {}",
|
||||||
|
ix, pos, from.repair_addr
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
None
|
None
|
||||||
@@ -682,13 +754,17 @@ impl Crdt {
|
|||||||
Ok(Protocol::RequestUpdates(v, from_rd)) => {
|
Ok(Protocol::RequestUpdates(v, from_rd)) => {
|
||||||
trace!("RequestUpdates {}", v);
|
trace!("RequestUpdates {}", v);
|
||||||
let addr = from_rd.gossip_addr;
|
let addr = from_rd.gossip_addr;
|
||||||
// only lock for this call, dont lock during IO `sock.send_to` or `sock.recv_from`
|
let me = obj.read().unwrap();
|
||||||
let (from, ups, data) = obj.read()
|
// only lock for these two calls, dont lock during IO `sock.send_to` or `sock.recv_from`
|
||||||
.expect("'obj' read lock in RequestUpdates")
|
let (from, ups, data) = me.get_updates_since(v);
|
||||||
.get_updates_since(v);
|
let external_liveness = me.remote
|
||||||
|
.iter()
|
||||||
|
.map(|(k, v)| (k.clone(), v.clone()))
|
||||||
|
.collect();
|
||||||
|
drop(me);
|
||||||
trace!("get updates since response {} {}", v, data.len());
|
trace!("get updates since response {} {}", v, data.len());
|
||||||
let len = data.len();
|
let len = data.len();
|
||||||
let rsp = Protocol::ReceiveUpdates(from, ups, data);
|
let rsp = Protocol::ReceiveUpdates(from, ups, data, external_liveness);
|
||||||
obj.write().unwrap().insert(&from_rd);
|
obj.write().unwrap().insert(&from_rd);
|
||||||
if len < 1 {
|
if len < 1 {
|
||||||
let me = obj.read().unwrap();
|
let me = obj.read().unwrap();
|
||||||
@@ -713,11 +789,11 @@ impl Crdt {
|
|||||||
None
|
None
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(Protocol::ReceiveUpdates(from, ups, data)) => {
|
Ok(Protocol::ReceiveUpdates(from, ups, data, external_liveness)) => {
|
||||||
trace!("ReceivedUpdates {:?} {} {}", &from[0..4], ups, data.len());
|
trace!("ReceivedUpdates {:?} {} {}", &from[0..4], ups, data.len());
|
||||||
obj.write()
|
obj.write()
|
||||||
.expect("'obj' write lock in ReceiveUpdates")
|
.expect("'obj' write lock in ReceiveUpdates")
|
||||||
.apply_updates(from, ups, &data);
|
.apply_updates(from, ups, &data, &external_liveness);
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
Ok(Protocol::RequestWindowIndex(from, ix)) => {
|
Ok(Protocol::RequestWindowIndex(from, ix)) => {
|
||||||
@@ -849,6 +925,51 @@ impl TestNode {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
pub fn new_with_bind_addr(data: ReplicatedData, bind_addr: SocketAddr) -> TestNode {
|
||||||
|
let mut local_gossip_addr = bind_addr.clone();
|
||||||
|
local_gossip_addr.set_port(data.gossip_addr.port());
|
||||||
|
|
||||||
|
let mut local_replicate_addr = bind_addr.clone();
|
||||||
|
local_replicate_addr.set_port(data.replicate_addr.port());
|
||||||
|
|
||||||
|
let mut local_requests_addr = bind_addr.clone();
|
||||||
|
local_requests_addr.set_port(data.requests_addr.port());
|
||||||
|
|
||||||
|
let mut local_transactions_addr = bind_addr.clone();
|
||||||
|
local_transactions_addr.set_port(data.transactions_addr.port());
|
||||||
|
|
||||||
|
let mut local_repair_addr = bind_addr.clone();
|
||||||
|
local_repair_addr.set_port(data.repair_addr.port());
|
||||||
|
|
||||||
|
let transaction = UdpSocket::bind(local_transactions_addr).unwrap();
|
||||||
|
let gossip = UdpSocket::bind(local_gossip_addr).unwrap();
|
||||||
|
let replicate = UdpSocket::bind(local_replicate_addr).unwrap();
|
||||||
|
let repair = UdpSocket::bind(local_repair_addr).unwrap();
|
||||||
|
let requests = UdpSocket::bind(local_requests_addr).unwrap();
|
||||||
|
|
||||||
|
// Responses are sent from the same Udp port as requests are received
|
||||||
|
// from, in hopes that a NAT sitting in the middle will route the
|
||||||
|
// response Udp packet correctly back to the requester.
|
||||||
|
let respond = requests.try_clone().unwrap();
|
||||||
|
|
||||||
|
let gossip_send = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||||
|
let broadcast = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||||
|
let retransmit = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||||
|
TestNode {
|
||||||
|
data: data,
|
||||||
|
sockets: Sockets {
|
||||||
|
gossip,
|
||||||
|
gossip_send,
|
||||||
|
requests,
|
||||||
|
replicate,
|
||||||
|
transaction,
|
||||||
|
respond,
|
||||||
|
broadcast,
|
||||||
|
repair,
|
||||||
|
retransmit,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
@@ -956,7 +1077,7 @@ mod tests {
|
|||||||
sorted(&vec![d1.clone(), d2.clone(), d3.clone()])
|
sorted(&vec![d1.clone(), d2.clone(), d3.clone()])
|
||||||
);
|
);
|
||||||
let mut crdt2 = Crdt::new(d2.clone());
|
let mut crdt2 = Crdt::new(d2.clone());
|
||||||
crdt2.apply_updates(key, ix, &ups);
|
crdt2.apply_updates(key, ix, &ups, &vec![]);
|
||||||
assert_eq!(crdt2.table.values().len(), 3);
|
assert_eq!(crdt2.table.values().len(), 3);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
sorted(&crdt2.table.values().map(|x| x.clone()).collect()),
|
sorted(&crdt2.table.values().map(|x| x.clone()).collect()),
|
||||||
|
@@ -133,9 +133,9 @@ mod tests {
|
|||||||
use bank::Bank;
|
use bank::Bank;
|
||||||
use crdt::{get_ip_addr, TestNode};
|
use crdt::{get_ip_addr, TestNode};
|
||||||
use drone::{Drone, DroneRequest, REQUEST_CAP, TIME_SLICE};
|
use drone::{Drone, DroneRequest, REQUEST_CAP, TIME_SLICE};
|
||||||
|
use fullnode::FullNode;
|
||||||
use logger;
|
use logger;
|
||||||
use mint::Mint;
|
use mint::Mint;
|
||||||
use server::Server;
|
|
||||||
use signature::{KeyPair, KeyPairUtil};
|
use signature::{KeyPair, KeyPairUtil};
|
||||||
use std::io::sink;
|
use std::io::sink;
|
||||||
use std::net::{SocketAddr, UdpSocket};
|
use std::net::{SocketAddr, UdpSocket};
|
||||||
@@ -246,8 +246,9 @@ mod tests {
|
|||||||
let carlos_pubkey = KeyPair::new().pubkey();
|
let carlos_pubkey = KeyPair::new().pubkey();
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
|
|
||||||
let server = Server::new_leader(
|
let server = FullNode::new_leader(
|
||||||
bank,
|
bank,
|
||||||
|
0,
|
||||||
Some(Duration::from_millis(30)),
|
Some(Duration::from_millis(30)),
|
||||||
leader.data.clone(),
|
leader.data.clone(),
|
||||||
leader.sockets.requests,
|
leader.sockets.requests,
|
||||||
|
40
src/entry.rs
40
src/entry.rs
@@ -35,29 +35,57 @@ pub struct Entry {
|
|||||||
/// generated. The may have been observed before a previous Entry ID but were
|
/// generated. The may have been observed before a previous Entry ID but were
|
||||||
/// pushed back into this list to ensure deterministic interpretation of the ledger.
|
/// pushed back into this list to ensure deterministic interpretation of the ledger.
|
||||||
pub transactions: Vec<Transaction>,
|
pub transactions: Vec<Transaction>,
|
||||||
|
|
||||||
|
/// Indication that:
|
||||||
|
/// 1. the next Entry in the ledger has transactions that can potentially
|
||||||
|
/// be verified in parallel with these transactions
|
||||||
|
/// 2. this Entry can be left out of the bank's entry_id cache for
|
||||||
|
/// purposes of duplicate rejection
|
||||||
|
pub has_more: bool,
|
||||||
|
|
||||||
|
/// Erasure requires that Entry be a multiple of 4 bytes in size
|
||||||
|
pad: [u8; 3],
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Entry {
|
impl Entry {
|
||||||
/// Creates the next Entry `num_hashes` after `start_hash`.
|
/// Creates the next Entry `num_hashes` after `start_hash`.
|
||||||
pub fn new(start_hash: &Hash, cur_hashes: u64, transactions: Vec<Transaction>) -> Self {
|
pub fn new(
|
||||||
|
start_hash: &Hash,
|
||||||
|
cur_hashes: u64,
|
||||||
|
transactions: Vec<Transaction>,
|
||||||
|
has_more: bool,
|
||||||
|
) -> Self {
|
||||||
let num_hashes = cur_hashes + if transactions.is_empty() { 0 } else { 1 };
|
let num_hashes = cur_hashes + if transactions.is_empty() { 0 } else { 1 };
|
||||||
let id = next_hash(start_hash, 0, &transactions);
|
let id = next_hash(start_hash, 0, &transactions);
|
||||||
let entry = Entry {
|
let entry = Entry {
|
||||||
num_hashes,
|
num_hashes,
|
||||||
id,
|
id,
|
||||||
transactions,
|
transactions,
|
||||||
|
has_more,
|
||||||
|
pad: [0, 0, 0],
|
||||||
};
|
};
|
||||||
assert!(serialized_size(&entry).unwrap() <= BLOB_DATA_SIZE as u64);
|
assert!(serialized_size(&entry).unwrap() <= BLOB_DATA_SIZE as u64);
|
||||||
entry
|
entry
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn will_fit(transactions: Vec<Transaction>) -> bool {
|
||||||
|
serialized_size(&Entry {
|
||||||
|
num_hashes: 0,
|
||||||
|
id: Hash::default(),
|
||||||
|
transactions,
|
||||||
|
has_more: false,
|
||||||
|
pad: [0, 0, 0],
|
||||||
|
}).unwrap() <= BLOB_DATA_SIZE as u64
|
||||||
|
}
|
||||||
|
|
||||||
/// Creates the next Tick Entry `num_hashes` after `start_hash`.
|
/// Creates the next Tick Entry `num_hashes` after `start_hash`.
|
||||||
pub fn new_mut(
|
pub fn new_mut(
|
||||||
start_hash: &mut Hash,
|
start_hash: &mut Hash,
|
||||||
cur_hashes: &mut u64,
|
cur_hashes: &mut u64,
|
||||||
transactions: Vec<Transaction>,
|
transactions: Vec<Transaction>,
|
||||||
|
has_more: bool,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let entry = Self::new(start_hash, *cur_hashes, transactions);
|
let entry = Self::new(start_hash, *cur_hashes, transactions, has_more);
|
||||||
*start_hash = entry.id;
|
*start_hash = entry.id;
|
||||||
*cur_hashes = 0;
|
*cur_hashes = 0;
|
||||||
assert!(serialized_size(&entry).unwrap() <= BLOB_DATA_SIZE as u64);
|
assert!(serialized_size(&entry).unwrap() <= BLOB_DATA_SIZE as u64);
|
||||||
@@ -71,6 +99,8 @@ impl Entry {
|
|||||||
num_hashes,
|
num_hashes,
|
||||||
id: *id,
|
id: *id,
|
||||||
transactions: vec![],
|
transactions: vec![],
|
||||||
|
has_more: false,
|
||||||
|
pad: [0, 0, 0],
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -119,6 +149,8 @@ pub fn next_entry(start_hash: &Hash, num_hashes: u64, transactions: Vec<Transact
|
|||||||
num_hashes,
|
num_hashes,
|
||||||
id: next_hash(start_hash, num_hashes, &transactions),
|
id: next_hash(start_hash, num_hashes, &transactions),
|
||||||
transactions,
|
transactions,
|
||||||
|
has_more: false,
|
||||||
|
pad: [0, 0, 0],
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -149,7 +181,7 @@ mod tests {
|
|||||||
let keypair = KeyPair::new();
|
let keypair = KeyPair::new();
|
||||||
let tx0 = Transaction::new(&keypair, keypair.pubkey(), 0, zero);
|
let tx0 = Transaction::new(&keypair, keypair.pubkey(), 0, zero);
|
||||||
let tx1 = Transaction::new(&keypair, keypair.pubkey(), 1, zero);
|
let tx1 = Transaction::new(&keypair, keypair.pubkey(), 1, zero);
|
||||||
let mut e0 = Entry::new(&zero, 0, vec![tx0.clone(), tx1.clone()]);
|
let mut e0 = Entry::new(&zero, 0, vec![tx0.clone(), tx1.clone()], false);
|
||||||
assert!(e0.verify(&zero));
|
assert!(e0.verify(&zero));
|
||||||
|
|
||||||
// Next, swap two transactions and ensure verification fails.
|
// Next, swap two transactions and ensure verification fails.
|
||||||
@@ -166,7 +198,7 @@ mod tests {
|
|||||||
let keypair = KeyPair::new();
|
let keypair = KeyPair::new();
|
||||||
let tx0 = Transaction::new_timestamp(&keypair, Utc::now(), zero);
|
let tx0 = Transaction::new_timestamp(&keypair, Utc::now(), zero);
|
||||||
let tx1 = Transaction::new_signature(&keypair, Default::default(), zero);
|
let tx1 = Transaction::new_signature(&keypair, Default::default(), zero);
|
||||||
let mut e0 = Entry::new(&zero, 0, vec![tx0.clone(), tx1.clone()]);
|
let mut e0 = Entry::new(&zero, 0, vec![tx0.clone(), tx1.clone()], false);
|
||||||
assert!(e0.verify(&zero));
|
assert!(e0.verify(&zero));
|
||||||
|
|
||||||
// Next, swap two witness transactions and ensure verification fails.
|
// Next, swap two witness transactions and ensure verification fails.
|
||||||
|
@@ -4,79 +4,98 @@
|
|||||||
|
|
||||||
use bank::Bank;
|
use bank::Bank;
|
||||||
use entry::Entry;
|
use entry::Entry;
|
||||||
use ledger::Block;
|
|
||||||
use packet;
|
|
||||||
use result::Result;
|
|
||||||
use serde_json;
|
use serde_json;
|
||||||
use std::collections::VecDeque;
|
use std::io::{self, BufRead, Error, ErrorKind, Write};
|
||||||
use std::io::sink;
|
|
||||||
use std::io::Write;
|
|
||||||
use std::sync::mpsc::Receiver;
|
|
||||||
use std::sync::{Arc, Mutex};
|
|
||||||
use std::time::Duration;
|
|
||||||
use streamer;
|
|
||||||
|
|
||||||
pub struct EntryWriter<'a> {
|
pub struct EntryWriter<'a, W> {
|
||||||
bank: &'a Bank,
|
bank: &'a Bank,
|
||||||
|
writer: W,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> EntryWriter<'a> {
|
impl<'a, W: Write> EntryWriter<'a, W> {
|
||||||
/// Create a new Tpu that wraps the given Bank.
|
/// Create a new Tpu that wraps the given Bank.
|
||||||
pub fn new(bank: &'a Bank) -> Self {
|
pub fn new(bank: &'a Bank, writer: W) -> Self {
|
||||||
EntryWriter { bank }
|
EntryWriter { bank, writer }
|
||||||
}
|
}
|
||||||
|
|
||||||
fn write_entry<W: Write>(&self, writer: &Mutex<W>, entry: &Entry) {
|
fn write_entry(writer: &mut W, entry: &Entry) -> io::Result<()> {
|
||||||
trace!("write_entry entry");
|
let serialized = serde_json::to_string(entry).unwrap();
|
||||||
|
writeln!(writer, "{}", serialized)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn write_entries<I>(writer: &mut W, entries: I) -> io::Result<()>
|
||||||
|
where
|
||||||
|
I: IntoIterator<Item = Entry>,
|
||||||
|
{
|
||||||
|
for entry in entries {
|
||||||
|
Self::write_entry(writer, &entry)?;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn write_and_register_entry(&mut self, entry: &Entry) -> io::Result<()> {
|
||||||
|
trace!("write_and_register_entry entry");
|
||||||
|
if !entry.has_more {
|
||||||
self.bank.register_entry_id(&entry.id);
|
self.bank.register_entry_id(&entry.id);
|
||||||
writeln!(
|
}
|
||||||
writer.lock().expect("'writer' lock in fn fn write_entry"),
|
Self::write_entry(&mut self.writer, entry)
|
||||||
"{}",
|
|
||||||
serde_json::to_string(&entry).expect("'entry' to_strong in fn write_entry")
|
|
||||||
).expect("writeln! in fn write_entry");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn write_entries<W: Write>(
|
pub fn write_and_register_entries(&mut self, entries: &[Entry]) -> io::Result<()> {
|
||||||
&self,
|
for entry in entries {
|
||||||
writer: &Mutex<W>,
|
self.write_and_register_entry(&entry)?;
|
||||||
entry_receiver: &Receiver<Entry>,
|
|
||||||
) -> Result<Vec<Entry>> {
|
|
||||||
//TODO implement a serialize for channel that does this without allocations
|
|
||||||
let mut l = vec![];
|
|
||||||
let entry = entry_receiver.recv_timeout(Duration::new(1, 0))?;
|
|
||||||
self.write_entry(writer, &entry);
|
|
||||||
l.push(entry);
|
|
||||||
while let Ok(entry) = entry_receiver.try_recv() {
|
|
||||||
self.write_entry(writer, &entry);
|
|
||||||
l.push(entry);
|
|
||||||
}
|
|
||||||
Ok(l)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Process any Entry items that have been published by the Historian.
|
|
||||||
/// continuosly broadcast blobs of entries out
|
|
||||||
pub fn write_and_send_entries<W: Write>(
|
|
||||||
&self,
|
|
||||||
broadcast: &streamer::BlobSender,
|
|
||||||
blob_recycler: &packet::BlobRecycler,
|
|
||||||
writer: &Mutex<W>,
|
|
||||||
entry_receiver: &Receiver<Entry>,
|
|
||||||
) -> Result<()> {
|
|
||||||
let mut q = VecDeque::new();
|
|
||||||
let list = self.write_entries(writer, entry_receiver)?;
|
|
||||||
trace!("New blobs? {}", list.len());
|
|
||||||
list.to_blobs(blob_recycler, &mut q);
|
|
||||||
if !q.is_empty() {
|
|
||||||
trace!("broadcasting {}", q.len());
|
|
||||||
broadcast.send(q)?;
|
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Process any Entry items that have been published by the Historian.
|
pub fn read_entry(s: String) -> io::Result<Entry> {
|
||||||
/// continuosly broadcast blobs of entries out
|
serde_json::from_str(&s).map_err(|e| Error::new(ErrorKind::Other, e.to_string()))
|
||||||
pub fn drain_entries(&self, entry_receiver: &Receiver<Entry>) -> Result<()> {
|
}
|
||||||
self.write_entries(&Arc::new(Mutex::new(sink())), entry_receiver)?;
|
|
||||||
Ok(())
|
// TODO: How to implement this without attaching the input's lifetime to the output?
|
||||||
|
pub fn read_entries<'a, R: BufRead>(
|
||||||
|
reader: &'a mut R,
|
||||||
|
) -> impl Iterator<Item = io::Result<Entry>> + 'a {
|
||||||
|
reader.lines().map(|s| read_entry(s?))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use ledger;
|
||||||
|
use mint::Mint;
|
||||||
|
use packet::BLOB_DATA_SIZE;
|
||||||
|
use signature::{KeyPair, KeyPairUtil};
|
||||||
|
use transaction::Transaction;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_dont_register_partial_entries() {
|
||||||
|
let mint = Mint::new(1);
|
||||||
|
let bank = Bank::new(&mint);
|
||||||
|
|
||||||
|
let writer = io::sink();
|
||||||
|
let mut entry_writer = EntryWriter::new(&bank, writer);
|
||||||
|
let keypair = KeyPair::new();
|
||||||
|
let tx = Transaction::new(&mint.keypair(), keypair.pubkey(), 1, mint.last_id());
|
||||||
|
|
||||||
|
// NOTE: if Entry grows to larger than a transaction, the code below falls over
|
||||||
|
let threshold = (BLOB_DATA_SIZE / 256) - 1; // 256 is transaction size
|
||||||
|
|
||||||
|
// Verify large entries are split up and the first sets has_more.
|
||||||
|
let txs = vec![tx.clone(); threshold * 2];
|
||||||
|
let entries = ledger::next_entries(&mint.last_id(), 0, txs);
|
||||||
|
assert_eq!(entries.len(), 2);
|
||||||
|
assert!(entries[0].has_more);
|
||||||
|
assert!(!entries[1].has_more);
|
||||||
|
|
||||||
|
// Verify that write_and_register_entry doesn't register the first entries after a split.
|
||||||
|
assert_eq!(bank.last_id(), mint.last_id());
|
||||||
|
entry_writer.write_and_register_entry(&entries[0]).unwrap();
|
||||||
|
assert_eq!(bank.last_id(), mint.last_id());
|
||||||
|
|
||||||
|
// Verify that write_and_register_entry registers the final entry after a split.
|
||||||
|
entry_writer.write_and_register_entry(&entries[1]).unwrap();
|
||||||
|
assert_eq!(bank.last_id(), entries[1].id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -1,15 +1,14 @@
|
|||||||
//! The `fetch_stage` batches input from a UDP socket and sends it to a channel.
|
//! The `fetch_stage` batches input from a UDP socket and sends it to a channel.
|
||||||
|
|
||||||
use packet;
|
use packet::PacketRecycler;
|
||||||
use std::net::UdpSocket;
|
use std::net::UdpSocket;
|
||||||
use std::sync::atomic::AtomicBool;
|
use std::sync::atomic::AtomicBool;
|
||||||
use std::sync::mpsc::channel;
|
use std::sync::mpsc::channel;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::thread::JoinHandle;
|
use std::thread::JoinHandle;
|
||||||
use streamer;
|
use streamer::{self, PacketReceiver};
|
||||||
|
|
||||||
pub struct FetchStage {
|
pub struct FetchStage {
|
||||||
pub packet_receiver: streamer::PacketReceiver,
|
|
||||||
pub thread_hdls: Vec<JoinHandle<()>>,
|
pub thread_hdls: Vec<JoinHandle<()>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -17,15 +16,15 @@ impl FetchStage {
|
|||||||
pub fn new(
|
pub fn new(
|
||||||
socket: UdpSocket,
|
socket: UdpSocket,
|
||||||
exit: Arc<AtomicBool>,
|
exit: Arc<AtomicBool>,
|
||||||
packet_recycler: packet::PacketRecycler,
|
packet_recycler: PacketRecycler,
|
||||||
) -> Self {
|
) -> (Self, PacketReceiver) {
|
||||||
Self::new_multi_socket(vec![socket], exit, packet_recycler)
|
Self::new_multi_socket(vec![socket], exit, packet_recycler)
|
||||||
}
|
}
|
||||||
pub fn new_multi_socket(
|
pub fn new_multi_socket(
|
||||||
sockets: Vec<UdpSocket>,
|
sockets: Vec<UdpSocket>,
|
||||||
exit: Arc<AtomicBool>,
|
exit: Arc<AtomicBool>,
|
||||||
packet_recycler: packet::PacketRecycler,
|
packet_recycler: PacketRecycler,
|
||||||
) -> Self {
|
) -> (Self, PacketReceiver) {
|
||||||
let (packet_sender, packet_receiver) = channel();
|
let (packet_sender, packet_receiver) = channel();
|
||||||
let thread_hdls: Vec<_> = sockets
|
let thread_hdls: Vec<_> = sockets
|
||||||
.into_iter()
|
.into_iter()
|
||||||
@@ -39,9 +38,6 @@ impl FetchStage {
|
|||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
FetchStage {
|
(FetchStage { thread_hdls }, packet_receiver)
|
||||||
packet_receiver,
|
|
||||||
thread_hdls,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -1,11 +1,15 @@
|
|||||||
//! The `server` module hosts all the server microservices.
|
//! The `fullnode` module hosts all the fullnode microservices.
|
||||||
|
|
||||||
use bank::Bank;
|
use bank::Bank;
|
||||||
use crdt::{Crdt, ReplicatedData};
|
use crdt::{Crdt, ReplicatedData, TestNode};
|
||||||
|
use entry_writer;
|
||||||
use ncp::Ncp;
|
use ncp::Ncp;
|
||||||
use packet;
|
use packet::BlobRecycler;
|
||||||
use rpu::Rpu;
|
use rpu::Rpu;
|
||||||
|
use std::fs::File;
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
|
use std::io::{stdin, stdout, BufReader};
|
||||||
|
use std::net::SocketAddr;
|
||||||
use std::net::UdpSocket;
|
use std::net::UdpSocket;
|
||||||
use std::sync::atomic::AtomicBool;
|
use std::sync::atomic::AtomicBool;
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
@@ -15,11 +19,110 @@ use streamer;
|
|||||||
use tpu::Tpu;
|
use tpu::Tpu;
|
||||||
use tvu::Tvu;
|
use tvu::Tvu;
|
||||||
|
|
||||||
pub struct Server {
|
//use std::time::Duration;
|
||||||
|
pub struct FullNode {
|
||||||
pub thread_hdls: Vec<JoinHandle<()>>,
|
pub thread_hdls: Vec<JoinHandle<()>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Server {
|
impl FullNode {
|
||||||
|
pub fn new(
|
||||||
|
mut node: TestNode,
|
||||||
|
leader: bool,
|
||||||
|
infile: Option<String>,
|
||||||
|
network_entry_for_validator: Option<SocketAddr>,
|
||||||
|
outfile_for_leader: Option<String>,
|
||||||
|
exit: Arc<AtomicBool>,
|
||||||
|
) -> FullNode {
|
||||||
|
info!("creating bank...");
|
||||||
|
let bank = Bank::default();
|
||||||
|
let entry_height = if let Some(path) = infile {
|
||||||
|
let f = File::open(path).unwrap();
|
||||||
|
let mut r = BufReader::new(f);
|
||||||
|
let entries =
|
||||||
|
entry_writer::read_entries(&mut r).map(|e| e.expect("failed to parse entry"));
|
||||||
|
info!("processing ledger...");
|
||||||
|
bank.process_ledger(entries).expect("process_ledger")
|
||||||
|
} else {
|
||||||
|
let mut r = BufReader::new(stdin());
|
||||||
|
let entries =
|
||||||
|
entry_writer::read_entries(&mut r).map(|e| e.expect("failed to parse entry"));
|
||||||
|
info!("processing ledger...");
|
||||||
|
bank.process_ledger(entries).expect("process_ledger")
|
||||||
|
};
|
||||||
|
|
||||||
|
// entry_height is the network-wide agreed height of the ledger.
|
||||||
|
// initialize it from the input ledger
|
||||||
|
info!("processed {} ledger...", entry_height);
|
||||||
|
|
||||||
|
info!("creating networking stack...");
|
||||||
|
|
||||||
|
let local_gossip_addr = node.sockets.gossip.local_addr().unwrap();
|
||||||
|
let local_requests_addr = node.sockets.requests.local_addr().unwrap();
|
||||||
|
info!(
|
||||||
|
"starting... local gossip address: {} (advertising {})",
|
||||||
|
local_gossip_addr, node.data.gossip_addr
|
||||||
|
);
|
||||||
|
if !leader {
|
||||||
|
let testnet_addr = network_entry_for_validator.expect("validator requires entry");
|
||||||
|
|
||||||
|
let network_entry_point = ReplicatedData::new_entry_point(testnet_addr);
|
||||||
|
let server = FullNode::new_validator(
|
||||||
|
bank,
|
||||||
|
entry_height,
|
||||||
|
node.data.clone(),
|
||||||
|
node.sockets.requests,
|
||||||
|
node.sockets.respond,
|
||||||
|
node.sockets.replicate,
|
||||||
|
node.sockets.gossip,
|
||||||
|
node.sockets.repair,
|
||||||
|
network_entry_point,
|
||||||
|
exit.clone(),
|
||||||
|
);
|
||||||
|
info!(
|
||||||
|
"validator ready... local request address: {} (advertising {}) connected to: {}",
|
||||||
|
local_requests_addr, node.data.requests_addr, testnet_addr
|
||||||
|
);
|
||||||
|
server
|
||||||
|
} else {
|
||||||
|
node.data.current_leader_id = node.data.id.clone();
|
||||||
|
let server = if let Some(file) = outfile_for_leader {
|
||||||
|
FullNode::new_leader(
|
||||||
|
bank,
|
||||||
|
entry_height,
|
||||||
|
//Some(Duration::from_millis(1000)),
|
||||||
|
None,
|
||||||
|
node.data.clone(),
|
||||||
|
node.sockets.requests,
|
||||||
|
node.sockets.transaction,
|
||||||
|
node.sockets.broadcast,
|
||||||
|
node.sockets.respond,
|
||||||
|
node.sockets.gossip,
|
||||||
|
exit.clone(),
|
||||||
|
File::create(file).expect("opening ledger file"),
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
FullNode::new_leader(
|
||||||
|
bank,
|
||||||
|
entry_height,
|
||||||
|
//Some(Duration::from_millis(1000)),
|
||||||
|
None,
|
||||||
|
node.data.clone(),
|
||||||
|
node.sockets.requests,
|
||||||
|
node.sockets.transaction,
|
||||||
|
node.sockets.broadcast,
|
||||||
|
node.sockets.respond,
|
||||||
|
node.sockets.gossip,
|
||||||
|
exit.clone(),
|
||||||
|
stdout(),
|
||||||
|
)
|
||||||
|
};
|
||||||
|
info!(
|
||||||
|
"leader ready... local request address: {} (advertising {})",
|
||||||
|
local_requests_addr, node.data.requests_addr
|
||||||
|
);
|
||||||
|
server
|
||||||
|
}
|
||||||
|
}
|
||||||
/// Create a server instance acting as a leader.
|
/// Create a server instance acting as a leader.
|
||||||
///
|
///
|
||||||
/// ```text
|
/// ```text
|
||||||
@@ -46,6 +149,7 @@ impl Server {
|
|||||||
/// ```
|
/// ```
|
||||||
pub fn new_leader<W: Write + Send + 'static>(
|
pub fn new_leader<W: Write + Send + 'static>(
|
||||||
bank: Bank,
|
bank: Bank,
|
||||||
|
entry_height: u64,
|
||||||
tick_duration: Option<Duration>,
|
tick_duration: Option<Duration>,
|
||||||
me: ReplicatedData,
|
me: ReplicatedData,
|
||||||
requests_socket: UdpSocket,
|
requests_socket: UdpSocket,
|
||||||
@@ -61,8 +165,8 @@ impl Server {
|
|||||||
let rpu = Rpu::new(bank.clone(), requests_socket, respond_socket, exit.clone());
|
let rpu = Rpu::new(bank.clone(), requests_socket, respond_socket, exit.clone());
|
||||||
thread_hdls.extend(rpu.thread_hdls);
|
thread_hdls.extend(rpu.thread_hdls);
|
||||||
|
|
||||||
let blob_recycler = packet::BlobRecycler::default();
|
let blob_recycler = BlobRecycler::default();
|
||||||
let tpu = Tpu::new(
|
let (tpu, blob_receiver) = Tpu::new(
|
||||||
bank.clone(),
|
bank.clone(),
|
||||||
tick_duration,
|
tick_duration,
|
||||||
transactions_socket,
|
transactions_socket,
|
||||||
@@ -89,12 +193,13 @@ impl Server {
|
|||||||
exit.clone(),
|
exit.clone(),
|
||||||
crdt,
|
crdt,
|
||||||
window,
|
window,
|
||||||
|
entry_height,
|
||||||
blob_recycler.clone(),
|
blob_recycler.clone(),
|
||||||
tpu.blob_receiver,
|
blob_receiver,
|
||||||
);
|
);
|
||||||
thread_hdls.extend(vec![t_broadcast]);
|
thread_hdls.extend(vec![t_broadcast]);
|
||||||
|
|
||||||
Server { thread_hdls }
|
FullNode { thread_hdls }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create a server instance acting as a validator.
|
/// Create a server instance acting as a validator.
|
||||||
@@ -128,6 +233,7 @@ impl Server {
|
|||||||
/// ```
|
/// ```
|
||||||
pub fn new_validator(
|
pub fn new_validator(
|
||||||
bank: Bank,
|
bank: Bank,
|
||||||
|
entry_height: u64,
|
||||||
me: ReplicatedData,
|
me: ReplicatedData,
|
||||||
requests_socket: UdpSocket,
|
requests_socket: UdpSocket,
|
||||||
respond_socket: UdpSocket,
|
respond_socket: UdpSocket,
|
||||||
@@ -159,6 +265,7 @@ impl Server {
|
|||||||
|
|
||||||
let tvu = Tvu::new(
|
let tvu = Tvu::new(
|
||||||
bank.clone(),
|
bank.clone(),
|
||||||
|
entry_height,
|
||||||
crdt.clone(),
|
crdt.clone(),
|
||||||
window.clone(),
|
window.clone(),
|
||||||
replicate_socket,
|
replicate_socket,
|
||||||
@@ -168,15 +275,15 @@ impl Server {
|
|||||||
);
|
);
|
||||||
thread_hdls.extend(tvu.thread_hdls);
|
thread_hdls.extend(tvu.thread_hdls);
|
||||||
thread_hdls.extend(ncp.thread_hdls);
|
thread_hdls.extend(ncp.thread_hdls);
|
||||||
Server { thread_hdls }
|
FullNode { thread_hdls }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use bank::Bank;
|
use bank::Bank;
|
||||||
use crdt::TestNode;
|
use crdt::TestNode;
|
||||||
|
use fullnode::FullNode;
|
||||||
use mint::Mint;
|
use mint::Mint;
|
||||||
use server::Server;
|
|
||||||
use std::sync::atomic::{AtomicBool, Ordering};
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
#[test]
|
#[test]
|
||||||
@@ -185,8 +292,9 @@ mod tests {
|
|||||||
let alice = Mint::new(10_000);
|
let alice = Mint::new(10_000);
|
||||||
let bank = Bank::new(&alice);
|
let bank = Bank::new(&alice);
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
let v = Server::new_validator(
|
let v = FullNode::new_validator(
|
||||||
bank,
|
bank,
|
||||||
|
0,
|
||||||
tn.data.clone(),
|
tn.data.clone(),
|
||||||
tn.sockets.requests,
|
tn.sockets.requests,
|
||||||
tn.sockets.respond,
|
tn.sockets.respond,
|
114
src/ledger.rs
114
src/ledger.rs
@@ -1,10 +1,10 @@
|
|||||||
//! The `ledger` module provides functions for parallel verification of the
|
//! The `ledger` module provides functions for parallel verification of the
|
||||||
//! Proof of History ledger.
|
//! Proof of History ledger.
|
||||||
|
|
||||||
use bincode::{self, deserialize, serialize_into, serialized_size};
|
use bincode::{self, deserialize, serialize_into};
|
||||||
use entry::Entry;
|
use entry::Entry;
|
||||||
use hash::Hash;
|
use hash::Hash;
|
||||||
use packet::{self, SharedBlob, BLOB_DATA_SIZE, BLOB_SIZE};
|
use packet::{self, SharedBlob, BLOB_SIZE};
|
||||||
use rayon::prelude::*;
|
use rayon::prelude::*;
|
||||||
use std::collections::VecDeque;
|
use std::collections::VecDeque;
|
||||||
use std::io::Cursor;
|
use std::io::Cursor;
|
||||||
@@ -41,10 +41,7 @@ impl Block for [Entry] {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn reconstruct_entries_from_blobs(
|
pub fn reconstruct_entries_from_blobs(blobs: VecDeque<SharedBlob>) -> bincode::Result<Vec<Entry>> {
|
||||||
blobs: VecDeque<SharedBlob>,
|
|
||||||
blob_recycler: &packet::BlobRecycler,
|
|
||||||
) -> bincode::Result<Vec<Entry>> {
|
|
||||||
let mut entries: Vec<Entry> = Vec::with_capacity(blobs.len());
|
let mut entries: Vec<Entry> = Vec::with_capacity(blobs.len());
|
||||||
|
|
||||||
for blob in blobs {
|
for blob in blobs {
|
||||||
@@ -52,7 +49,6 @@ pub fn reconstruct_entries_from_blobs(
|
|||||||
let msg = blob.read().unwrap();
|
let msg = blob.read().unwrap();
|
||||||
deserialize(&msg.data()[..msg.meta.size])
|
deserialize(&msg.data()[..msg.meta.size])
|
||||||
};
|
};
|
||||||
blob_recycler.recycle(blob);
|
|
||||||
|
|
||||||
match entry {
|
match entry {
|
||||||
Ok(entry) => entries.push(entry),
|
Ok(entry) => entries.push(entry),
|
||||||
@@ -73,24 +69,31 @@ pub fn next_entries_mut(
|
|||||||
transactions: Vec<Transaction>,
|
transactions: Vec<Transaction>,
|
||||||
) -> Vec<Entry> {
|
) -> Vec<Entry> {
|
||||||
if transactions.is_empty() {
|
if transactions.is_empty() {
|
||||||
vec![Entry::new_mut(start_hash, cur_hashes, transactions)]
|
vec![Entry::new_mut(start_hash, cur_hashes, transactions, false)]
|
||||||
} else {
|
} else {
|
||||||
let mut chunk_len = transactions.len();
|
let mut chunk_len = transactions.len();
|
||||||
|
|
||||||
// check for fit, make sure they can be serialized
|
// check for fit, make sure they can be serialized
|
||||||
while serialized_size(&Entry {
|
while !Entry::will_fit(transactions[0..chunk_len].to_vec()) {
|
||||||
num_hashes: 0,
|
|
||||||
id: Hash::default(),
|
|
||||||
transactions: transactions[0..chunk_len].to_vec(),
|
|
||||||
}).unwrap() > BLOB_DATA_SIZE as u64
|
|
||||||
{
|
|
||||||
chunk_len /= 2;
|
chunk_len /= 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut entries = Vec::with_capacity(transactions.len() / chunk_len + 1);
|
let mut num_chunks = if transactions.len() % chunk_len == 0 {
|
||||||
|
transactions.len() / chunk_len
|
||||||
|
} else {
|
||||||
|
transactions.len() / chunk_len + 1
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut entries = Vec::with_capacity(num_chunks);
|
||||||
|
|
||||||
for chunk in transactions.chunks(chunk_len) {
|
for chunk in transactions.chunks(chunk_len) {
|
||||||
entries.push(Entry::new_mut(start_hash, cur_hashes, chunk.to_vec()));
|
num_chunks -= 1;
|
||||||
|
entries.push(Entry::new_mut(
|
||||||
|
start_hash,
|
||||||
|
cur_hashes,
|
||||||
|
chunk.to_vec(),
|
||||||
|
num_chunks > 0,
|
||||||
|
));
|
||||||
}
|
}
|
||||||
entries
|
entries
|
||||||
}
|
}
|
||||||
@@ -112,29 +115,11 @@ mod tests {
|
|||||||
use super::*;
|
use super::*;
|
||||||
use entry::{next_entry, Entry};
|
use entry::{next_entry, Entry};
|
||||||
use hash::hash;
|
use hash::hash;
|
||||||
use packet::BlobRecycler;
|
use packet::{BlobRecycler, BLOB_DATA_SIZE};
|
||||||
use signature::{KeyPair, KeyPairUtil};
|
use signature::{KeyPair, KeyPairUtil};
|
||||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
||||||
use transaction::Transaction;
|
use transaction::Transaction;
|
||||||
|
|
||||||
/// Create a vector of Entries of length `transaction_batches.len()`
|
|
||||||
/// from `start_hash` hash, `num_hashes`, and `transaction_batches`.
|
|
||||||
fn next_entries_batched(
|
|
||||||
start_hash: &Hash,
|
|
||||||
cur_hashes: u64,
|
|
||||||
transaction_batches: Vec<Vec<Transaction>>,
|
|
||||||
) -> Vec<Entry> {
|
|
||||||
let mut id = *start_hash;
|
|
||||||
let mut entries = vec![];
|
|
||||||
let mut num_hashes = cur_hashes;
|
|
||||||
|
|
||||||
for transactions in transaction_batches {
|
|
||||||
let mut entry_batch = next_entries_mut(&mut id, &mut num_hashes, transactions);
|
|
||||||
entries.append(&mut entry_batch);
|
|
||||||
}
|
|
||||||
entries
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_verify_slice() {
|
fn test_verify_slice() {
|
||||||
let zero = Hash::default();
|
let zero = Hash::default();
|
||||||
@@ -142,9 +127,9 @@ mod tests {
|
|||||||
assert!(vec![][..].verify(&zero)); // base case
|
assert!(vec![][..].verify(&zero)); // base case
|
||||||
assert!(vec![Entry::new_tick(0, &zero)][..].verify(&zero)); // singleton case 1
|
assert!(vec![Entry::new_tick(0, &zero)][..].verify(&zero)); // singleton case 1
|
||||||
assert!(!vec![Entry::new_tick(0, &zero)][..].verify(&one)); // singleton case 2, bad
|
assert!(!vec![Entry::new_tick(0, &zero)][..].verify(&one)); // singleton case 2, bad
|
||||||
assert!(next_entries_batched(&zero, 0, vec![vec![]; 2])[..].verify(&zero)); // inductive step
|
assert!(vec![next_entry(&zero, 0, vec![]); 2][..].verify(&zero)); // inductive step
|
||||||
|
|
||||||
let mut bad_ticks = next_entries_batched(&zero, 0, vec![vec![]; 2]);
|
let mut bad_ticks = vec![next_entry(&zero, 0, vec![]); 2];
|
||||||
bad_ticks[1].id = one;
|
bad_ticks[1].id = one;
|
||||||
assert!(!bad_ticks.verify(&zero)); // inductive step, bad
|
assert!(!bad_ticks.verify(&zero)); // inductive step, bad
|
||||||
}
|
}
|
||||||
@@ -162,10 +147,7 @@ mod tests {
|
|||||||
let mut blob_q = VecDeque::new();
|
let mut blob_q = VecDeque::new();
|
||||||
entries.to_blobs(&blob_recycler, &mut blob_q);
|
entries.to_blobs(&blob_recycler, &mut blob_q);
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(reconstruct_entries_from_blobs(blob_q).unwrap(), entries);
|
||||||
reconstruct_entries_from_blobs(blob_q, &blob_recycler).unwrap(),
|
|
||||||
entries
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -173,30 +155,45 @@ mod tests {
|
|||||||
let blob_recycler = BlobRecycler::default();
|
let blob_recycler = BlobRecycler::default();
|
||||||
let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
|
let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
|
||||||
let blobs_q = packet::to_blobs(vec![(0, addr)], &blob_recycler).unwrap(); // <-- attack!
|
let blobs_q = packet::to_blobs(vec![(0, addr)], &blob_recycler).unwrap(); // <-- attack!
|
||||||
assert!(reconstruct_entries_from_blobs(blobs_q, &blob_recycler).is_err());
|
assert!(reconstruct_entries_from_blobs(blobs_q).is_err());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_next_entries_batched() {
|
fn test_next_entries() {
|
||||||
// this also tests next_entries, ugly, but is an easy way to do vec of vec (batch)
|
let id = Hash::default();
|
||||||
let mut id = Hash::default();
|
|
||||||
let next_id = hash(&id);
|
let next_id = hash(&id);
|
||||||
let keypair = KeyPair::new();
|
let keypair = KeyPair::new();
|
||||||
let tx0 = Transaction::new(&keypair, keypair.pubkey(), 1, next_id);
|
let tx0 = Transaction::new(&keypair, keypair.pubkey(), 1, next_id);
|
||||||
|
|
||||||
let transactions = vec![tx0; 5];
|
// NOTE: if Entry grows to larger than a transaction, the code below falls over
|
||||||
let transaction_batches = vec![transactions.clone(); 5];
|
let threshold = (BLOB_DATA_SIZE / 256) - 1; // 256 is transaction size
|
||||||
let entries0 = next_entries_batched(&id, 0, transaction_batches);
|
|
||||||
|
|
||||||
assert_eq!(entries0.len(), 5);
|
// verify no split
|
||||||
|
let transactions = vec![tx0.clone(); threshold];
|
||||||
|
let entries0 = next_entries(&id, 0, transactions.clone());
|
||||||
|
assert_eq!(entries0.len(), 1);
|
||||||
|
assert!(entries0.verify(&id));
|
||||||
|
|
||||||
let mut entries1 = vec![];
|
// verify the split
|
||||||
for _ in 0..5 {
|
let transactions = vec![tx0.clone(); threshold * 2];
|
||||||
let entry = next_entry(&id, 1, transactions.clone());
|
let entries0 = next_entries(&id, 0, transactions.clone());
|
||||||
id = entry.id;
|
assert_eq!(entries0.len(), 2);
|
||||||
entries1.push(entry);
|
assert!(entries0[0].has_more);
|
||||||
}
|
assert!(!entries0[entries0.len() - 1].has_more);
|
||||||
assert_eq!(entries0, entries1);
|
|
||||||
|
assert!(entries0.verify(&id));
|
||||||
|
// test hand-construction... brittle, changes if split method changes... ?
|
||||||
|
// let mut entries1 = vec![];
|
||||||
|
// entries1.push(Entry::new(&id, 1, transactions[..threshold].to_vec(), true));
|
||||||
|
// id = entries1[0].id;
|
||||||
|
// entries1.push(Entry::new(
|
||||||
|
// &id,
|
||||||
|
// 1,
|
||||||
|
// transactions[threshold..].to_vec(),
|
||||||
|
// false,
|
||||||
|
// ));
|
||||||
|
//
|
||||||
|
// assert_eq!(entries0, entries1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -223,10 +220,7 @@ mod bench {
|
|||||||
bencher.iter(|| {
|
bencher.iter(|| {
|
||||||
let mut blob_q = VecDeque::new();
|
let mut blob_q = VecDeque::new();
|
||||||
entries.to_blobs(&blob_recycler, &mut blob_q);
|
entries.to_blobs(&blob_recycler, &mut blob_q);
|
||||||
assert_eq!(
|
assert_eq!(reconstruct_entries_from_blobs(blob_q).unwrap(), entries);
|
||||||
reconstruct_entries_from_blobs(blob_q, &blob_recycler).unwrap(),
|
|
||||||
entries
|
|
||||||
);
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -13,6 +13,7 @@ pub mod bank;
|
|||||||
pub mod banking_stage;
|
pub mod banking_stage;
|
||||||
pub mod blob_fetch_stage;
|
pub mod blob_fetch_stage;
|
||||||
pub mod budget;
|
pub mod budget;
|
||||||
|
pub mod choose_gossip_peer_strategy;
|
||||||
pub mod crdt;
|
pub mod crdt;
|
||||||
pub mod drone;
|
pub mod drone;
|
||||||
pub mod entry;
|
pub mod entry;
|
||||||
@@ -20,10 +21,12 @@ pub mod entry_writer;
|
|||||||
#[cfg(feature = "erasure")]
|
#[cfg(feature = "erasure")]
|
||||||
pub mod erasure;
|
pub mod erasure;
|
||||||
pub mod fetch_stage;
|
pub mod fetch_stage;
|
||||||
|
pub mod fullnode;
|
||||||
pub mod hash;
|
pub mod hash;
|
||||||
pub mod ledger;
|
pub mod ledger;
|
||||||
pub mod logger;
|
pub mod logger;
|
||||||
pub mod mint;
|
pub mod mint;
|
||||||
|
pub mod nat;
|
||||||
pub mod ncp;
|
pub mod ncp;
|
||||||
pub mod packet;
|
pub mod packet;
|
||||||
pub mod payment_plan;
|
pub mod payment_plan;
|
||||||
@@ -35,7 +38,6 @@ pub mod request_processor;
|
|||||||
pub mod request_stage;
|
pub mod request_stage;
|
||||||
pub mod result;
|
pub mod result;
|
||||||
pub mod rpu;
|
pub mod rpu;
|
||||||
pub mod server;
|
|
||||||
pub mod signature;
|
pub mod signature;
|
||||||
pub mod sigverify;
|
pub mod sigverify;
|
||||||
pub mod sigverify_stage;
|
pub mod sigverify_stage;
|
||||||
@@ -51,6 +53,7 @@ extern crate bincode;
|
|||||||
extern crate byteorder;
|
extern crate byteorder;
|
||||||
extern crate chrono;
|
extern crate chrono;
|
||||||
extern crate generic_array;
|
extern crate generic_array;
|
||||||
|
extern crate itertools;
|
||||||
extern crate libc;
|
extern crate libc;
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate log;
|
extern crate log;
|
||||||
|
10
src/mint.rs
10
src/mint.rs
@@ -53,18 +53,12 @@ impl Mint {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn create_entries(&self) -> Vec<Entry> {
|
pub fn create_entries(&self) -> Vec<Entry> {
|
||||||
let e0 = Entry::new(&self.seed(), 0, vec![]);
|
let e0 = Entry::new(&self.seed(), 0, vec![], false);
|
||||||
let e1 = Entry::new(&e0.id, 0, self.create_transactions());
|
let e1 = Entry::new(&e0.id, 0, self.create_transactions(), false);
|
||||||
vec![e0, e1]
|
vec![e0, e1]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug)]
|
|
||||||
pub struct MintDemo {
|
|
||||||
pub mint: Mint,
|
|
||||||
pub num_accounts: i64,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
97
src/nat.rs
Normal file
97
src/nat.rs
Normal file
@@ -0,0 +1,97 @@
|
|||||||
|
//! The `nat` module assists with NAT traversal
|
||||||
|
|
||||||
|
extern crate futures;
|
||||||
|
extern crate p2p;
|
||||||
|
extern crate reqwest;
|
||||||
|
extern crate tokio_core;
|
||||||
|
|
||||||
|
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
|
||||||
|
|
||||||
|
use self::futures::Future;
|
||||||
|
use self::p2p::UdpSocketExt;
|
||||||
|
use std::env;
|
||||||
|
use std::str;
|
||||||
|
|
||||||
|
/// A data type representing a public Udp socket
|
||||||
|
pub struct UdpSocketPair {
|
||||||
|
pub addr: SocketAddr, // Public address of the socket
|
||||||
|
pub receiver: UdpSocket, // Locally bound socket that can receive from the public address
|
||||||
|
pub sender: UdpSocket, // Locally bound socket to send via public address
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Tries to determine the public IP address of this machine
|
||||||
|
pub fn get_public_ip_addr() -> Result<IpAddr, String> {
|
||||||
|
let body = reqwest::get("http://ifconfig.co/ip")
|
||||||
|
.map_err(|err| err.to_string())?
|
||||||
|
.text()
|
||||||
|
.map_err(|err| err.to_string())?;
|
||||||
|
|
||||||
|
match body.lines().next() {
|
||||||
|
Some(ip) => Result::Ok(ip.parse().unwrap()),
|
||||||
|
None => Result::Err("Empty response body".to_string()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Binds a private Udp address to a public address using UPnP if possible
|
||||||
|
pub fn udp_public_bind(label: &str) -> UdpSocketPair {
|
||||||
|
let private_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 0);
|
||||||
|
|
||||||
|
let mut core = tokio_core::reactor::Core::new().unwrap();
|
||||||
|
let handle = core.handle();
|
||||||
|
let mc = p2p::P2p::default();
|
||||||
|
let res = core.run({
|
||||||
|
tokio_core::net::UdpSocket::bind_public(&private_addr, &handle, &mc)
|
||||||
|
.map_err(|e| {
|
||||||
|
info!("Failed to bind public socket for {}: {}", label, e);
|
||||||
|
})
|
||||||
|
.and_then(|(socket, public_addr)| Ok((public_addr, socket.local_addr().unwrap())))
|
||||||
|
});
|
||||||
|
|
||||||
|
match res {
|
||||||
|
Ok((public_addr, local_addr)) => {
|
||||||
|
info!(
|
||||||
|
"Using local address {} mapped to UPnP public address {} for {}",
|
||||||
|
local_addr, public_addr, label
|
||||||
|
);
|
||||||
|
|
||||||
|
// NAT should now be forwarding inbound packets directed at
|
||||||
|
// |public_addr| to the local |receiver| socket...
|
||||||
|
let receiver = UdpSocket::bind(local_addr).unwrap();
|
||||||
|
|
||||||
|
// TODO: try to autodetect a broken NAT (issue #496)
|
||||||
|
let sender = if env::var("BROKEN_NAT").is_err() {
|
||||||
|
receiver.try_clone().unwrap()
|
||||||
|
} else {
|
||||||
|
// ... however for outbound packets, some NATs *will not* rewrite the
|
||||||
|
// source port from |receiver.local_addr().port()| to |public_addr.port()|.
|
||||||
|
// This is currently a problem when talking with a fullnode as it
|
||||||
|
// assumes it can send UDP packets back at the source. This hits the
|
||||||
|
// NAT as a datagram for |receiver.local_addr().port()| on the NAT's public
|
||||||
|
// IP, which the NAT promptly discards. As a short term hack, create a
|
||||||
|
// local UDP socket, |sender|, with the same port as |public_addr.port()|.
|
||||||
|
//
|
||||||
|
// TODO: Remove the |sender| socket and deal with the downstream changes to
|
||||||
|
// the UDP signalling
|
||||||
|
let mut local_addr_sender = local_addr.clone();
|
||||||
|
local_addr_sender.set_port(public_addr.port());
|
||||||
|
UdpSocket::bind(local_addr_sender).unwrap()
|
||||||
|
};
|
||||||
|
|
||||||
|
UdpSocketPair {
|
||||||
|
addr: public_addr,
|
||||||
|
receiver,
|
||||||
|
sender,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(_) => {
|
||||||
|
let sender = UdpSocket::bind(private_addr).unwrap();
|
||||||
|
let local_addr = sender.local_addr().unwrap();
|
||||||
|
info!("Using local address {} for {}", local_addr, label);
|
||||||
|
UdpSocketPair {
|
||||||
|
addr: private_addr,
|
||||||
|
receiver: sender.try_clone().unwrap(),
|
||||||
|
sender,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
17
src/ncp.rs
17
src/ncp.rs
@@ -1,7 +1,7 @@
|
|||||||
//! The `ncp` module implements the network control plane.
|
//! The `ncp` module implements the network control plane.
|
||||||
|
|
||||||
use crdt;
|
use crdt::Crdt;
|
||||||
use packet;
|
use packet::{BlobRecycler, SharedBlob};
|
||||||
use result::Result;
|
use result::Result;
|
||||||
use std::net::UdpSocket;
|
use std::net::UdpSocket;
|
||||||
use std::sync::atomic::AtomicBool;
|
use std::sync::atomic::AtomicBool;
|
||||||
@@ -16,13 +16,13 @@ pub struct Ncp {
|
|||||||
|
|
||||||
impl Ncp {
|
impl Ncp {
|
||||||
pub fn new(
|
pub fn new(
|
||||||
crdt: Arc<RwLock<crdt::Crdt>>,
|
crdt: Arc<RwLock<Crdt>>,
|
||||||
window: Arc<RwLock<Vec<Option<packet::SharedBlob>>>>,
|
window: Arc<RwLock<Vec<Option<SharedBlob>>>>,
|
||||||
gossip_listen_socket: UdpSocket,
|
gossip_listen_socket: UdpSocket,
|
||||||
gossip_send_socket: UdpSocket,
|
gossip_send_socket: UdpSocket,
|
||||||
exit: Arc<AtomicBool>,
|
exit: Arc<AtomicBool>,
|
||||||
) -> Result<Ncp> {
|
) -> Result<Ncp> {
|
||||||
let blob_recycler = packet::BlobRecycler::default();
|
let blob_recycler = BlobRecycler::default();
|
||||||
let (request_sender, request_receiver) = channel();
|
let (request_sender, request_receiver) = channel();
|
||||||
trace!(
|
trace!(
|
||||||
"Ncp: id: {:?}, listening on: {:?}",
|
"Ncp: id: {:?}, listening on: {:?}",
|
||||||
@@ -42,7 +42,7 @@ impl Ncp {
|
|||||||
blob_recycler.clone(),
|
blob_recycler.clone(),
|
||||||
response_receiver,
|
response_receiver,
|
||||||
);
|
);
|
||||||
let t_listen = crdt::Crdt::listen(
|
let t_listen = Crdt::listen(
|
||||||
crdt.clone(),
|
crdt.clone(),
|
||||||
window,
|
window,
|
||||||
blob_recycler.clone(),
|
blob_recycler.clone(),
|
||||||
@@ -50,7 +50,7 @@ impl Ncp {
|
|||||||
response_sender.clone(),
|
response_sender.clone(),
|
||||||
exit.clone(),
|
exit.clone(),
|
||||||
);
|
);
|
||||||
let t_gossip = crdt::Crdt::gossip(crdt.clone(), blob_recycler, response_sender, exit);
|
let t_gossip = Crdt::gossip(crdt.clone(), blob_recycler, response_sender, exit);
|
||||||
let thread_hdls = vec![t_receiver, t_responder, t_listen, t_gossip];
|
let thread_hdls = vec![t_receiver, t_responder, t_listen, t_gossip];
|
||||||
Ok(Ncp { thread_hdls })
|
Ok(Ncp { thread_hdls })
|
||||||
}
|
}
|
||||||
@@ -64,10 +64,7 @@ mod tests {
|
|||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[ignore]
|
|
||||||
// test that stage will exit when flag is set
|
// test that stage will exit when flag is set
|
||||||
// TODO: Troubleshoot Docker-based coverage build and re-enabled
|
|
||||||
// this test. It is probably failing due to too many threads.
|
|
||||||
fn test_exit() {
|
fn test_exit() {
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
let tn = TestNode::new();
|
let tn = TestNode::new();
|
||||||
|
@@ -16,6 +16,7 @@ use std::time::Instant;
|
|||||||
|
|
||||||
pub type SharedPackets = Arc<RwLock<Packets>>;
|
pub type SharedPackets = Arc<RwLock<Packets>>;
|
||||||
pub type SharedBlob = Arc<RwLock<Blob>>;
|
pub type SharedBlob = Arc<RwLock<Blob>>;
|
||||||
|
pub type SharedBlobs = VecDeque<SharedBlob>;
|
||||||
pub type PacketRecycler = Recycler<Packets>;
|
pub type PacketRecycler = Recycler<Packets>;
|
||||||
pub type BlobRecycler = Recycler<Blob>;
|
pub type BlobRecycler = Recycler<Blob>;
|
||||||
|
|
||||||
@@ -162,12 +163,26 @@ impl<T: Default> Clone for Recycler<T> {
|
|||||||
impl<T: Default> Recycler<T> {
|
impl<T: Default> Recycler<T> {
|
||||||
pub fn allocate(&self) -> Arc<RwLock<T>> {
|
pub fn allocate(&self) -> Arc<RwLock<T>> {
|
||||||
let mut gc = self.gc.lock().expect("recycler lock in pb fn allocate");
|
let mut gc = self.gc.lock().expect("recycler lock in pb fn allocate");
|
||||||
gc.pop()
|
let x = gc.pop()
|
||||||
.unwrap_or_else(|| Arc::new(RwLock::new(Default::default())))
|
.unwrap_or_else(|| Arc::new(RwLock::new(Default::default())));
|
||||||
|
|
||||||
|
// Only return the item if this recycler is the last reference to it.
|
||||||
|
// Remove this check once `T` holds a Weak reference back to this
|
||||||
|
// recycler and implements `Drop`. At the time of this writing, Weak can't
|
||||||
|
// be passed across threads ('alloc' is a nightly-only API), and so our
|
||||||
|
// reference-counted recyclables are awkwardly being recycled by hand,
|
||||||
|
// which allows this race condition to exist.
|
||||||
|
if Arc::strong_count(&x) > 1 {
|
||||||
|
warn!("Recycled item still in use. Booting it.");
|
||||||
|
drop(gc);
|
||||||
|
self.allocate()
|
||||||
|
} else {
|
||||||
|
x
|
||||||
}
|
}
|
||||||
pub fn recycle(&self, msgs: Arc<RwLock<T>>) {
|
}
|
||||||
|
pub fn recycle(&self, x: Arc<RwLock<T>>) {
|
||||||
let mut gc = self.gc.lock().expect("recycler lock in pub fn recycle");
|
let mut gc = self.gc.lock().expect("recycler lock in pub fn recycle");
|
||||||
gc.push(msgs);
|
gc.push(x);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -273,7 +288,7 @@ pub fn to_blob<T: Serialize>(
|
|||||||
pub fn to_blobs<T: Serialize>(
|
pub fn to_blobs<T: Serialize>(
|
||||||
rsps: Vec<(T, SocketAddr)>,
|
rsps: Vec<(T, SocketAddr)>,
|
||||||
blob_recycler: &BlobRecycler,
|
blob_recycler: &BlobRecycler,
|
||||||
) -> Result<VecDeque<SharedBlob>> {
|
) -> Result<SharedBlobs> {
|
||||||
let mut blobs = VecDeque::new();
|
let mut blobs = VecDeque::new();
|
||||||
for (resp, rsp_addr) in rsps {
|
for (resp, rsp_addr) in rsps {
|
||||||
blobs.push_back(to_blob(resp, rsp_addr, blob_recycler)?);
|
blobs.push_back(to_blob(resp, rsp_addr, blob_recycler)?);
|
||||||
@@ -366,7 +381,7 @@ impl Blob {
|
|||||||
self.meta.size = new_size;
|
self.meta.size = new_size;
|
||||||
self.set_data_size(new_size as u64).unwrap();
|
self.set_data_size(new_size as u64).unwrap();
|
||||||
}
|
}
|
||||||
pub fn recv_from(re: &BlobRecycler, socket: &UdpSocket) -> Result<VecDeque<SharedBlob>> {
|
pub fn recv_from(re: &BlobRecycler, socket: &UdpSocket) -> Result<SharedBlobs> {
|
||||||
let mut v = VecDeque::new();
|
let mut v = VecDeque::new();
|
||||||
//DOCUMENTED SIDE-EFFECT
|
//DOCUMENTED SIDE-EFFECT
|
||||||
//Performance out of the IO without poll
|
//Performance out of the IO without poll
|
||||||
@@ -404,11 +419,7 @@ impl Blob {
|
|||||||
}
|
}
|
||||||
Ok(v)
|
Ok(v)
|
||||||
}
|
}
|
||||||
pub fn send_to(
|
pub fn send_to(re: &BlobRecycler, socket: &UdpSocket, v: &mut SharedBlobs) -> Result<()> {
|
||||||
re: &BlobRecycler,
|
|
||||||
socket: &UdpSocket,
|
|
||||||
v: &mut VecDeque<SharedBlob>,
|
|
||||||
) -> Result<()> {
|
|
||||||
while let Some(r) = v.pop_front() {
|
while let Some(r) = v.pop_front() {
|
||||||
{
|
{
|
||||||
let p = r.read().expect("'r' read lock in pub fn send_to");
|
let p = r.read().expect("'r' read lock in pub fn send_to");
|
||||||
@@ -422,13 +433,16 @@ impl Blob {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod tests {
|
||||||
use packet::{to_packets, Blob, BlobRecycler, Packet, PacketRecycler, Packets, NUM_PACKETS};
|
use packet::{
|
||||||
|
to_packets, Blob, BlobRecycler, Packet, PacketRecycler, Packets, Recycler, NUM_PACKETS,
|
||||||
|
};
|
||||||
use request::Request;
|
use request::Request;
|
||||||
use std::collections::VecDeque;
|
use std::collections::VecDeque;
|
||||||
use std::io;
|
use std::io;
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
use std::net::UdpSocket;
|
use std::net::UdpSocket;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
pub fn packet_recycler_test() {
|
pub fn packet_recycler_test() {
|
||||||
@@ -439,6 +453,37 @@ mod test {
|
|||||||
let _ = r.allocate();
|
let _ = r.allocate();
|
||||||
assert_eq!(r.gc.lock().unwrap().len(), 0);
|
assert_eq!(r.gc.lock().unwrap().len(), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
pub fn test_leaked_recyclable() {
|
||||||
|
// Ensure that the recycler won't return an item
|
||||||
|
// that is still referenced outside the recycler.
|
||||||
|
let r = Recycler::<u8>::default();
|
||||||
|
let x0 = r.allocate();
|
||||||
|
r.recycle(x0.clone());
|
||||||
|
assert_eq!(Arc::strong_count(&x0), 2);
|
||||||
|
assert_eq!(r.gc.lock().unwrap().len(), 1);
|
||||||
|
|
||||||
|
let x1 = r.allocate();
|
||||||
|
assert_eq!(Arc::strong_count(&x1), 1);
|
||||||
|
assert_eq!(r.gc.lock().unwrap().len(), 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
pub fn test_leaked_recyclable_recursion() {
|
||||||
|
// In the case of a leaked recyclable, ensure the recycler drops its lock before recursing.
|
||||||
|
let r = Recycler::<u8>::default();
|
||||||
|
let x0 = r.allocate();
|
||||||
|
let x1 = r.allocate();
|
||||||
|
r.recycle(x0); // <-- allocate() of this will require locking the recycler's stack.
|
||||||
|
r.recycle(x1.clone()); // <-- allocate() of this will cause it to be dropped and recurse.
|
||||||
|
assert_eq!(Arc::strong_count(&x1), 2);
|
||||||
|
assert_eq!(r.gc.lock().unwrap().len(), 2);
|
||||||
|
|
||||||
|
r.allocate(); // Ensure lock is released before recursing.
|
||||||
|
assert_eq!(r.gc.lock().unwrap().len(), 0);
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
pub fn blob_recycler_test() {
|
pub fn blob_recycler_test() {
|
||||||
let r = BlobRecycler::default();
|
let r = BlobRecycler::default();
|
||||||
|
@@ -20,14 +20,16 @@ pub enum Signal {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub struct RecordStage {
|
pub struct RecordStage {
|
||||||
pub entry_receiver: Receiver<Entry>,
|
|
||||||
pub thread_hdl: JoinHandle<()>,
|
pub thread_hdl: JoinHandle<()>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl RecordStage {
|
impl RecordStage {
|
||||||
/// A background thread that will continue tagging received Transaction messages and
|
/// A background thread that will continue tagging received Transaction messages and
|
||||||
/// sending back Entry messages until either the receiver or sender channel is closed.
|
/// sending back Entry messages until either the receiver or sender channel is closed.
|
||||||
pub fn new(signal_receiver: Receiver<Signal>, start_hash: &Hash) -> Self {
|
pub fn new(
|
||||||
|
signal_receiver: Receiver<Signal>,
|
||||||
|
start_hash: &Hash,
|
||||||
|
) -> (Self, Receiver<Vec<Entry>>) {
|
||||||
let (entry_sender, entry_receiver) = channel();
|
let (entry_sender, entry_receiver) = channel();
|
||||||
let start_hash = start_hash.clone();
|
let start_hash = start_hash.clone();
|
||||||
|
|
||||||
@@ -39,10 +41,7 @@ impl RecordStage {
|
|||||||
})
|
})
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
RecordStage {
|
(RecordStage { thread_hdl }, entry_receiver)
|
||||||
entry_receiver,
|
|
||||||
thread_hdl,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Same as `RecordStage::new`, but will automatically produce entries every `tick_duration`.
|
/// Same as `RecordStage::new`, but will automatically produce entries every `tick_duration`.
|
||||||
@@ -50,7 +49,7 @@ impl RecordStage {
|
|||||||
signal_receiver: Receiver<Signal>,
|
signal_receiver: Receiver<Signal>,
|
||||||
start_hash: &Hash,
|
start_hash: &Hash,
|
||||||
tick_duration: Duration,
|
tick_duration: Duration,
|
||||||
) -> Self {
|
) -> (Self, Receiver<Vec<Entry>>) {
|
||||||
let (entry_sender, entry_receiver) = channel();
|
let (entry_sender, entry_receiver) = channel();
|
||||||
let start_hash = start_hash.clone();
|
let start_hash = start_hash.clone();
|
||||||
|
|
||||||
@@ -74,16 +73,13 @@ impl RecordStage {
|
|||||||
})
|
})
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
RecordStage {
|
(RecordStage { thread_hdl }, entry_receiver)
|
||||||
entry_receiver,
|
|
||||||
thread_hdl,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn process_signal(
|
fn process_signal(
|
||||||
signal: Signal,
|
signal: Signal,
|
||||||
recorder: &mut Recorder,
|
recorder: &mut Recorder,
|
||||||
sender: &Sender<Entry>,
|
sender: &Sender<Vec<Entry>>,
|
||||||
) -> Result<(), ()> {
|
) -> Result<(), ()> {
|
||||||
let txs = if let Signal::Transactions(txs) = signal {
|
let txs = if let Signal::Transactions(txs) = signal {
|
||||||
txs
|
txs
|
||||||
@@ -91,20 +87,14 @@ impl RecordStage {
|
|||||||
vec![]
|
vec![]
|
||||||
};
|
};
|
||||||
let entries = recorder.record(txs);
|
let entries = recorder.record(txs);
|
||||||
let mut result = Ok(());
|
sender.send(entries).or(Err(()))?;
|
||||||
for entry in entries {
|
Ok(())
|
||||||
result = sender.send(entry).map_err(|_| ());
|
|
||||||
if result.is_err() {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
result
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn process_signals(
|
fn process_signals(
|
||||||
recorder: &mut Recorder,
|
recorder: &mut Recorder,
|
||||||
receiver: &Receiver<Signal>,
|
receiver: &Receiver<Signal>,
|
||||||
sender: &Sender<Entry>,
|
sender: &Sender<Vec<Entry>>,
|
||||||
) -> Result<(), ()> {
|
) -> Result<(), ()> {
|
||||||
loop {
|
loop {
|
||||||
match receiver.recv() {
|
match receiver.recv() {
|
||||||
@@ -119,11 +109,11 @@ impl RecordStage {
|
|||||||
start_time: Instant,
|
start_time: Instant,
|
||||||
tick_duration: Duration,
|
tick_duration: Duration,
|
||||||
receiver: &Receiver<Signal>,
|
receiver: &Receiver<Signal>,
|
||||||
sender: &Sender<Entry>,
|
sender: &Sender<Vec<Entry>>,
|
||||||
) -> Result<(), ()> {
|
) -> Result<(), ()> {
|
||||||
loop {
|
loop {
|
||||||
if let Some(entry) = recorder.tick(start_time, tick_duration) {
|
if let Some(entry) = recorder.tick(start_time, tick_duration) {
|
||||||
sender.send(entry).or(Err(()))?;
|
sender.send(vec![entry]).or(Err(()))?;
|
||||||
}
|
}
|
||||||
match receiver.try_recv() {
|
match receiver.try_recv() {
|
||||||
Ok(signal) => Self::process_signal(signal, recorder, sender)?,
|
Ok(signal) => Self::process_signal(signal, recorder, sender)?,
|
||||||
@@ -146,7 +136,7 @@ mod tests {
|
|||||||
fn test_historian() {
|
fn test_historian() {
|
||||||
let (tx_sender, tx_receiver) = channel();
|
let (tx_sender, tx_receiver) = channel();
|
||||||
let zero = Hash::default();
|
let zero = Hash::default();
|
||||||
let record_stage = RecordStage::new(tx_receiver, &zero);
|
let (record_stage, entry_receiver) = RecordStage::new(tx_receiver, &zero);
|
||||||
|
|
||||||
tx_sender.send(Signal::Tick).unwrap();
|
tx_sender.send(Signal::Tick).unwrap();
|
||||||
sleep(Duration::new(0, 1_000_000));
|
sleep(Duration::new(0, 1_000_000));
|
||||||
@@ -154,9 +144,9 @@ mod tests {
|
|||||||
sleep(Duration::new(0, 1_000_000));
|
sleep(Duration::new(0, 1_000_000));
|
||||||
tx_sender.send(Signal::Tick).unwrap();
|
tx_sender.send(Signal::Tick).unwrap();
|
||||||
|
|
||||||
let entry0 = record_stage.entry_receiver.recv().unwrap();
|
let entry0 = entry_receiver.recv().unwrap()[0].clone();
|
||||||
let entry1 = record_stage.entry_receiver.recv().unwrap();
|
let entry1 = entry_receiver.recv().unwrap()[0].clone();
|
||||||
let entry2 = record_stage.entry_receiver.recv().unwrap();
|
let entry2 = entry_receiver.recv().unwrap()[0].clone();
|
||||||
|
|
||||||
assert_eq!(entry0.num_hashes, 0);
|
assert_eq!(entry0.num_hashes, 0);
|
||||||
assert_eq!(entry1.num_hashes, 0);
|
assert_eq!(entry1.num_hashes, 0);
|
||||||
@@ -172,8 +162,8 @@ mod tests {
|
|||||||
fn test_historian_closed_sender() {
|
fn test_historian_closed_sender() {
|
||||||
let (tx_sender, tx_receiver) = channel();
|
let (tx_sender, tx_receiver) = channel();
|
||||||
let zero = Hash::default();
|
let zero = Hash::default();
|
||||||
let record_stage = RecordStage::new(tx_receiver, &zero);
|
let (record_stage, entry_receiver) = RecordStage::new(tx_receiver, &zero);
|
||||||
drop(record_stage.entry_receiver);
|
drop(entry_receiver);
|
||||||
tx_sender.send(Signal::Tick).unwrap();
|
tx_sender.send(Signal::Tick).unwrap();
|
||||||
assert_eq!(record_stage.thread_hdl.join().unwrap(), ());
|
assert_eq!(record_stage.thread_hdl.join().unwrap(), ());
|
||||||
}
|
}
|
||||||
@@ -182,7 +172,7 @@ mod tests {
|
|||||||
fn test_transactions() {
|
fn test_transactions() {
|
||||||
let (tx_sender, signal_receiver) = channel();
|
let (tx_sender, signal_receiver) = channel();
|
||||||
let zero = Hash::default();
|
let zero = Hash::default();
|
||||||
let record_stage = RecordStage::new(signal_receiver, &zero);
|
let (_record_stage, entry_receiver) = RecordStage::new(signal_receiver, &zero);
|
||||||
let alice_keypair = KeyPair::new();
|
let alice_keypair = KeyPair::new();
|
||||||
let bob_pubkey = KeyPair::new().pubkey();
|
let bob_pubkey = KeyPair::new().pubkey();
|
||||||
let tx0 = Transaction::new(&alice_keypair, bob_pubkey, 1, zero);
|
let tx0 = Transaction::new(&alice_keypair, bob_pubkey, 1, zero);
|
||||||
@@ -191,7 +181,7 @@ mod tests {
|
|||||||
.send(Signal::Transactions(vec![tx0, tx1]))
|
.send(Signal::Transactions(vec![tx0, tx1]))
|
||||||
.unwrap();
|
.unwrap();
|
||||||
drop(tx_sender);
|
drop(tx_sender);
|
||||||
let entries: Vec<_> = record_stage.entry_receiver.iter().collect();
|
let entries: Vec<_> = entry_receiver.iter().collect();
|
||||||
assert_eq!(entries.len(), 1);
|
assert_eq!(entries.len(), 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -199,12 +189,12 @@ mod tests {
|
|||||||
fn test_clock() {
|
fn test_clock() {
|
||||||
let (tx_sender, tx_receiver) = channel();
|
let (tx_sender, tx_receiver) = channel();
|
||||||
let zero = Hash::default();
|
let zero = Hash::default();
|
||||||
let record_stage =
|
let (_record_stage, entry_receiver) =
|
||||||
RecordStage::new_with_clock(tx_receiver, &zero, Duration::from_millis(20));
|
RecordStage::new_with_clock(tx_receiver, &zero, Duration::from_millis(20));
|
||||||
sleep(Duration::from_millis(900));
|
sleep(Duration::from_millis(900));
|
||||||
tx_sender.send(Signal::Tick).unwrap();
|
tx_sender.send(Signal::Tick).unwrap();
|
||||||
drop(tx_sender);
|
drop(tx_sender);
|
||||||
let entries: Vec<Entry> = record_stage.entry_receiver.iter().collect();
|
let entries: Vec<_> = entry_receiver.iter().flat_map(|x| x).collect();
|
||||||
assert!(entries.len() > 1);
|
assert!(entries.len() > 1);
|
||||||
|
|
||||||
// Ensure the ID is not the seed.
|
// Ensure the ID is not the seed.
|
||||||
|
@@ -3,7 +3,7 @@
|
|||||||
|
|
||||||
use entry::Entry;
|
use entry::Entry;
|
||||||
use hash::{hash, Hash};
|
use hash::{hash, Hash};
|
||||||
use ledger::next_entries_mut;
|
use ledger;
|
||||||
use std::time::{Duration, Instant};
|
use std::time::{Duration, Instant};
|
||||||
use transaction::Transaction;
|
use transaction::Transaction;
|
||||||
|
|
||||||
@@ -28,7 +28,7 @@ impl Recorder {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn record(&mut self, transactions: Vec<Transaction>) -> Vec<Entry> {
|
pub fn record(&mut self, transactions: Vec<Transaction>) -> Vec<Entry> {
|
||||||
next_entries_mut(&mut self.last_hash, &mut self.num_hashes, transactions)
|
ledger::next_entries_mut(&mut self.last_hash, &mut self.num_hashes, transactions)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn tick(&mut self, start_time: Instant, tick_duration: Duration) -> Option<Entry> {
|
pub fn tick(&mut self, start_time: Instant, tick_duration: Duration) -> Option<Entry> {
|
||||||
@@ -39,6 +39,7 @@ impl Recorder {
|
|||||||
&mut self.last_hash,
|
&mut self.last_hash,
|
||||||
&mut self.num_hashes,
|
&mut self.num_hashes,
|
||||||
vec![],
|
vec![],
|
||||||
|
false,
|
||||||
))
|
))
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
|
@@ -2,13 +2,12 @@
|
|||||||
|
|
||||||
use bank::Bank;
|
use bank::Bank;
|
||||||
use ledger;
|
use ledger;
|
||||||
use packet;
|
|
||||||
use result::Result;
|
use result::Result;
|
||||||
use std::sync::atomic::{AtomicBool, Ordering};
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::thread::{Builder, JoinHandle};
|
use std::thread::{Builder, JoinHandle};
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use streamer;
|
use streamer::BlobReceiver;
|
||||||
|
|
||||||
pub struct ReplicateStage {
|
pub struct ReplicateStage {
|
||||||
pub thread_hdl: JoinHandle<()>,
|
pub thread_hdl: JoinHandle<()>,
|
||||||
@@ -16,15 +15,11 @@ pub struct ReplicateStage {
|
|||||||
|
|
||||||
impl ReplicateStage {
|
impl ReplicateStage {
|
||||||
/// Process entry blobs, already in order
|
/// Process entry blobs, already in order
|
||||||
fn replicate_requests(
|
fn replicate_requests(bank: &Arc<Bank>, blob_receiver: &BlobReceiver) -> Result<()> {
|
||||||
bank: &Arc<Bank>,
|
|
||||||
blob_receiver: &streamer::BlobReceiver,
|
|
||||||
blob_recycler: &packet::BlobRecycler,
|
|
||||||
) -> Result<()> {
|
|
||||||
let timer = Duration::new(1, 0);
|
let timer = Duration::new(1, 0);
|
||||||
let blobs = blob_receiver.recv_timeout(timer)?;
|
let blobs = blob_receiver.recv_timeout(timer)?;
|
||||||
let blobs_len = blobs.len();
|
let blobs_len = blobs.len();
|
||||||
let entries = ledger::reconstruct_entries_from_blobs(blobs, &blob_recycler)?;
|
let entries = ledger::reconstruct_entries_from_blobs(blobs)?;
|
||||||
let res = bank.process_entries(entries);
|
let res = bank.process_entries(entries);
|
||||||
if res.is_err() {
|
if res.is_err() {
|
||||||
error!("process_entries {} {:?}", blobs_len, res);
|
error!("process_entries {} {:?}", blobs_len, res);
|
||||||
@@ -33,16 +28,11 @@ impl ReplicateStage {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn new(
|
pub fn new(bank: Arc<Bank>, exit: Arc<AtomicBool>, window_receiver: BlobReceiver) -> Self {
|
||||||
bank: Arc<Bank>,
|
|
||||||
exit: Arc<AtomicBool>,
|
|
||||||
window_receiver: streamer::BlobReceiver,
|
|
||||||
blob_recycler: packet::BlobRecycler,
|
|
||||||
) -> Self {
|
|
||||||
let thread_hdl = Builder::new()
|
let thread_hdl = Builder::new()
|
||||||
.name("solana-replicate-stage".to_string())
|
.name("solana-replicate-stage".to_string())
|
||||||
.spawn(move || loop {
|
.spawn(move || loop {
|
||||||
let e = Self::replicate_requests(&bank, &window_receiver, &blob_recycler);
|
let e = Self::replicate_requests(&bank, &window_receiver);
|
||||||
if e.is_err() && exit.load(Ordering::Relaxed) {
|
if e.is_err() && exit.load(Ordering::Relaxed) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@@ -1,7 +1,7 @@
|
|||||||
//! The `request` module defines the messages for the thin client.
|
//! The `request` module defines the messages for the thin client.
|
||||||
|
|
||||||
use hash::Hash;
|
use hash::Hash;
|
||||||
use signature::PublicKey;
|
use signature::{PublicKey, Signature};
|
||||||
|
|
||||||
#[cfg_attr(feature = "cargo-clippy", allow(large_enum_variant))]
|
#[cfg_attr(feature = "cargo-clippy", allow(large_enum_variant))]
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||||
@@ -9,6 +9,7 @@ pub enum Request {
|
|||||||
GetBalance { key: PublicKey },
|
GetBalance { key: PublicKey },
|
||||||
GetLastId,
|
GetLastId,
|
||||||
GetTransactionCount,
|
GetTransactionCount,
|
||||||
|
GetSignature { signature: Signature },
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Request {
|
impl Request {
|
||||||
@@ -20,7 +21,8 @@ impl Request {
|
|||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug)]
|
#[derive(Serialize, Deserialize, Debug)]
|
||||||
pub enum Response {
|
pub enum Response {
|
||||||
Balance { key: PublicKey, val: Option<i64> },
|
Balance { key: PublicKey, val: i64 },
|
||||||
LastId { id: Hash },
|
LastId { id: Hash },
|
||||||
TransactionCount { transaction_count: u64 },
|
TransactionCount { transaction_count: u64 },
|
||||||
|
SignatureStatus { signature_status: bool },
|
||||||
}
|
}
|
||||||
|
@@ -40,6 +40,12 @@ impl RequestProcessor {
|
|||||||
info!("Response::TransactionCount {:?}", rsp);
|
info!("Response::TransactionCount {:?}", rsp);
|
||||||
Some(rsp)
|
Some(rsp)
|
||||||
}
|
}
|
||||||
|
Request::GetSignature { signature } => {
|
||||||
|
let signature_status = self.bank.has_signature(&signature);
|
||||||
|
let rsp = (Response::SignatureStatus { signature_status }, rsp_addr);
|
||||||
|
info!("Response::Signature {:?}", rsp);
|
||||||
|
Some(rsp)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -1,8 +1,7 @@
|
|||||||
//! The `request_stage` processes thin client Request messages.
|
//! The `request_stage` processes thin client Request messages.
|
||||||
|
|
||||||
use bincode::deserialize;
|
use bincode::deserialize;
|
||||||
use packet;
|
use packet::{to_blobs, BlobRecycler, PacketRecycler, Packets, SharedPackets};
|
||||||
use packet::SharedPackets;
|
|
||||||
use rayon::prelude::*;
|
use rayon::prelude::*;
|
||||||
use request::Request;
|
use request::Request;
|
||||||
use request_processor::RequestProcessor;
|
use request_processor::RequestProcessor;
|
||||||
@@ -13,17 +12,16 @@ use std::sync::mpsc::{channel, Receiver};
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::thread::{Builder, JoinHandle};
|
use std::thread::{Builder, JoinHandle};
|
||||||
use std::time::Instant;
|
use std::time::Instant;
|
||||||
use streamer;
|
use streamer::{self, BlobReceiver, BlobSender};
|
||||||
use timing;
|
use timing;
|
||||||
|
|
||||||
pub struct RequestStage {
|
pub struct RequestStage {
|
||||||
pub thread_hdl: JoinHandle<()>,
|
pub thread_hdl: JoinHandle<()>,
|
||||||
pub blob_receiver: streamer::BlobReceiver,
|
|
||||||
pub request_processor: Arc<RequestProcessor>,
|
pub request_processor: Arc<RequestProcessor>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl RequestStage {
|
impl RequestStage {
|
||||||
pub fn deserialize_requests(p: &packet::Packets) -> Vec<Option<(Request, SocketAddr)>> {
|
pub fn deserialize_requests(p: &Packets) -> Vec<Option<(Request, SocketAddr)>> {
|
||||||
p.packets
|
p.packets
|
||||||
.par_iter()
|
.par_iter()
|
||||||
.map(|x| {
|
.map(|x| {
|
||||||
@@ -37,9 +35,9 @@ impl RequestStage {
|
|||||||
pub fn process_request_packets(
|
pub fn process_request_packets(
|
||||||
request_processor: &RequestProcessor,
|
request_processor: &RequestProcessor,
|
||||||
packet_receiver: &Receiver<SharedPackets>,
|
packet_receiver: &Receiver<SharedPackets>,
|
||||||
blob_sender: &streamer::BlobSender,
|
blob_sender: &BlobSender,
|
||||||
packet_recycler: &packet::PacketRecycler,
|
packet_recycler: &PacketRecycler,
|
||||||
blob_recycler: &packet::BlobRecycler,
|
blob_recycler: &BlobRecycler,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let (batch, batch_len) = streamer::recv_batch(packet_receiver)?;
|
let (batch, batch_len) = streamer::recv_batch(packet_receiver)?;
|
||||||
|
|
||||||
@@ -60,7 +58,7 @@ impl RequestStage {
|
|||||||
|
|
||||||
let rsps = request_processor.process_requests(reqs);
|
let rsps = request_processor.process_requests(reqs);
|
||||||
|
|
||||||
let blobs = packet::to_blobs(rsps, blob_recycler)?;
|
let blobs = to_blobs(rsps, blob_recycler)?;
|
||||||
if !blobs.is_empty() {
|
if !blobs.is_empty() {
|
||||||
info!("process: sending blobs: {}", blobs.len());
|
info!("process: sending blobs: {}", blobs.len());
|
||||||
//don't wake up the other side if there is nothing
|
//don't wake up the other side if there is nothing
|
||||||
@@ -84,9 +82,9 @@ impl RequestStage {
|
|||||||
request_processor: RequestProcessor,
|
request_processor: RequestProcessor,
|
||||||
exit: Arc<AtomicBool>,
|
exit: Arc<AtomicBool>,
|
||||||
packet_receiver: Receiver<SharedPackets>,
|
packet_receiver: Receiver<SharedPackets>,
|
||||||
packet_recycler: packet::PacketRecycler,
|
packet_recycler: PacketRecycler,
|
||||||
blob_recycler: packet::BlobRecycler,
|
blob_recycler: BlobRecycler,
|
||||||
) -> Self {
|
) -> (Self, BlobReceiver) {
|
||||||
let request_processor = Arc::new(request_processor);
|
let request_processor = Arc::new(request_processor);
|
||||||
let request_processor_ = request_processor.clone();
|
let request_processor_ = request_processor.clone();
|
||||||
let (blob_sender, blob_receiver) = channel();
|
let (blob_sender, blob_receiver) = channel();
|
||||||
@@ -107,10 +105,12 @@ impl RequestStage {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
(
|
||||||
RequestStage {
|
RequestStage {
|
||||||
thread_hdl,
|
thread_hdl,
|
||||||
blob_receiver,
|
|
||||||
request_processor,
|
request_processor,
|
||||||
}
|
},
|
||||||
|
blob_receiver,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
10
src/rpu.rs
10
src/rpu.rs
@@ -24,7 +24,7 @@
|
|||||||
//! ```
|
//! ```
|
||||||
|
|
||||||
use bank::Bank;
|
use bank::Bank;
|
||||||
use packet;
|
use packet::{BlobRecycler, PacketRecycler};
|
||||||
use request_processor::RequestProcessor;
|
use request_processor::RequestProcessor;
|
||||||
use request_stage::RequestStage;
|
use request_stage::RequestStage;
|
||||||
use std::net::UdpSocket;
|
use std::net::UdpSocket;
|
||||||
@@ -45,7 +45,7 @@ impl Rpu {
|
|||||||
respond_socket: UdpSocket,
|
respond_socket: UdpSocket,
|
||||||
exit: Arc<AtomicBool>,
|
exit: Arc<AtomicBool>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let packet_recycler = packet::PacketRecycler::default();
|
let packet_recycler = PacketRecycler::default();
|
||||||
let (packet_sender, packet_receiver) = channel();
|
let (packet_sender, packet_receiver) = channel();
|
||||||
let t_receiver = streamer::receiver(
|
let t_receiver = streamer::receiver(
|
||||||
requests_socket,
|
requests_socket,
|
||||||
@@ -54,9 +54,9 @@ impl Rpu {
|
|||||||
packet_sender,
|
packet_sender,
|
||||||
);
|
);
|
||||||
|
|
||||||
let blob_recycler = packet::BlobRecycler::default();
|
let blob_recycler = BlobRecycler::default();
|
||||||
let request_processor = RequestProcessor::new(bank.clone());
|
let request_processor = RequestProcessor::new(bank.clone());
|
||||||
let request_stage = RequestStage::new(
|
let (request_stage, blob_receiver) = RequestStage::new(
|
||||||
request_processor,
|
request_processor,
|
||||||
exit.clone(),
|
exit.clone(),
|
||||||
packet_receiver,
|
packet_receiver,
|
||||||
@@ -68,7 +68,7 @@ impl Rpu {
|
|||||||
respond_socket,
|
respond_socket,
|
||||||
exit.clone(),
|
exit.clone(),
|
||||||
blob_recycler.clone(),
|
blob_recycler.clone(),
|
||||||
request_stage.blob_receiver,
|
blob_receiver,
|
||||||
);
|
);
|
||||||
|
|
||||||
let thread_hdls = vec![t_receiver, t_responder, request_stage.thread_hdl];
|
let thread_hdls = vec![t_receiver, t_responder, request_stage.thread_hdl];
|
||||||
|
@@ -14,22 +14,21 @@ use std::sync::mpsc::{channel, Receiver, Sender};
|
|||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
use std::thread::{spawn, JoinHandle};
|
use std::thread::{spawn, JoinHandle};
|
||||||
use std::time::Instant;
|
use std::time::Instant;
|
||||||
use streamer;
|
use streamer::{self, PacketReceiver};
|
||||||
use timing;
|
use timing;
|
||||||
|
|
||||||
pub struct SigVerifyStage {
|
pub struct SigVerifyStage {
|
||||||
pub verified_receiver: Receiver<Vec<(SharedPackets, Vec<u8>)>>,
|
|
||||||
pub thread_hdls: Vec<JoinHandle<()>>,
|
pub thread_hdls: Vec<JoinHandle<()>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SigVerifyStage {
|
impl SigVerifyStage {
|
||||||
pub fn new(exit: Arc<AtomicBool>, packet_receiver: Receiver<SharedPackets>) -> Self {
|
pub fn new(
|
||||||
|
exit: Arc<AtomicBool>,
|
||||||
|
packet_receiver: Receiver<SharedPackets>,
|
||||||
|
) -> (Self, Receiver<Vec<(SharedPackets, Vec<u8>)>>) {
|
||||||
let (verified_sender, verified_receiver) = channel();
|
let (verified_sender, verified_receiver) = channel();
|
||||||
let thread_hdls = Self::verifier_services(exit, packet_receiver, verified_sender);
|
let thread_hdls = Self::verifier_services(exit, packet_receiver, verified_sender);
|
||||||
SigVerifyStage {
|
(SigVerifyStage { thread_hdls }, verified_receiver)
|
||||||
thread_hdls,
|
|
||||||
verified_receiver,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn verify_batch(batch: Vec<SharedPackets>) -> Vec<(SharedPackets, Vec<u8>)> {
|
fn verify_batch(batch: Vec<SharedPackets>) -> Vec<(SharedPackets, Vec<u8>)> {
|
||||||
@@ -38,7 +37,7 @@ impl SigVerifyStage {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn verifier(
|
fn verifier(
|
||||||
recvr: &Arc<Mutex<streamer::PacketReceiver>>,
|
recvr: &Arc<Mutex<PacketReceiver>>,
|
||||||
sendr: &Arc<Mutex<Sender<Vec<(SharedPackets, Vec<u8>)>>>>,
|
sendr: &Arc<Mutex<Sender<Vec<(SharedPackets, Vec<u8>)>>>>,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let (batch, len) =
|
let (batch, len) =
|
||||||
@@ -76,7 +75,7 @@ impl SigVerifyStage {
|
|||||||
|
|
||||||
fn verifier_service(
|
fn verifier_service(
|
||||||
exit: Arc<AtomicBool>,
|
exit: Arc<AtomicBool>,
|
||||||
packet_receiver: Arc<Mutex<streamer::PacketReceiver>>,
|
packet_receiver: Arc<Mutex<PacketReceiver>>,
|
||||||
verified_sender: Arc<Mutex<Sender<Vec<(SharedPackets, Vec<u8>)>>>>,
|
verified_sender: Arc<Mutex<Sender<Vec<(SharedPackets, Vec<u8>)>>>>,
|
||||||
) -> JoinHandle<()> {
|
) -> JoinHandle<()> {
|
||||||
spawn(move || loop {
|
spawn(move || loop {
|
||||||
@@ -89,7 +88,7 @@ impl SigVerifyStage {
|
|||||||
|
|
||||||
fn verifier_services(
|
fn verifier_services(
|
||||||
exit: Arc<AtomicBool>,
|
exit: Arc<AtomicBool>,
|
||||||
packet_receiver: streamer::PacketReceiver,
|
packet_receiver: PacketReceiver,
|
||||||
verified_sender: Sender<Vec<(SharedPackets, Vec<u8>)>>,
|
verified_sender: Sender<Vec<(SharedPackets, Vec<u8>)>>,
|
||||||
) -> Vec<JoinHandle<()>> {
|
) -> Vec<JoinHandle<()>> {
|
||||||
let sender = Arc::new(Mutex::new(verified_sender));
|
let sender = Arc::new(Mutex::new(verified_sender));
|
||||||
|
177
src/streamer.rs
177
src/streamer.rs
@@ -3,21 +3,24 @@
|
|||||||
use crdt::Crdt;
|
use crdt::Crdt;
|
||||||
#[cfg(feature = "erasure")]
|
#[cfg(feature = "erasure")]
|
||||||
use erasure;
|
use erasure;
|
||||||
use packet::{Blob, BlobRecycler, PacketRecycler, SharedBlob, SharedPackets, BLOB_SIZE};
|
use packet::{
|
||||||
|
Blob, BlobRecycler, PacketRecycler, SharedBlob, SharedBlobs, SharedPackets, BLOB_SIZE,
|
||||||
|
};
|
||||||
use result::{Error, Result};
|
use result::{Error, Result};
|
||||||
use std::collections::VecDeque;
|
use std::collections::VecDeque;
|
||||||
|
use std::mem;
|
||||||
use std::net::{SocketAddr, UdpSocket};
|
use std::net::{SocketAddr, UdpSocket};
|
||||||
use std::sync::atomic::{AtomicBool, Ordering};
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
use std::sync::mpsc;
|
use std::sync::mpsc::{Receiver, Sender};
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
use std::thread::{Builder, JoinHandle};
|
use std::thread::{Builder, JoinHandle};
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
pub const WINDOW_SIZE: usize = 2 * 1024;
|
pub const WINDOW_SIZE: u64 = 2 * 1024;
|
||||||
pub type PacketReceiver = mpsc::Receiver<SharedPackets>;
|
pub type PacketReceiver = Receiver<SharedPackets>;
|
||||||
pub type PacketSender = mpsc::Sender<SharedPackets>;
|
pub type PacketSender = Sender<SharedPackets>;
|
||||||
pub type BlobSender = mpsc::Sender<VecDeque<SharedBlob>>;
|
pub type BlobSender = Sender<SharedBlobs>;
|
||||||
pub type BlobReceiver = mpsc::Receiver<VecDeque<SharedBlob>>;
|
pub type BlobReceiver = Receiver<SharedBlobs>;
|
||||||
pub type Window = Arc<RwLock<Vec<Option<SharedBlob>>>>;
|
pub type Window = Arc<RwLock<Vec<Option<SharedBlob>>>>;
|
||||||
|
|
||||||
fn recv_loop(
|
fn recv_loop(
|
||||||
@@ -28,19 +31,18 @@ fn recv_loop(
|
|||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
loop {
|
loop {
|
||||||
let msgs = re.allocate();
|
let msgs = re.allocate();
|
||||||
let msgs_ = msgs.clone();
|
|
||||||
loop {
|
loop {
|
||||||
match msgs.write()
|
let result = msgs.write()
|
||||||
.expect("write lock in fn recv_loop")
|
.expect("write lock in fn recv_loop")
|
||||||
.recv_from(sock)
|
.recv_from(sock);
|
||||||
{
|
match result {
|
||||||
Ok(()) => {
|
Ok(()) => {
|
||||||
channel.send(msgs_)?;
|
channel.send(msgs)?;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
Err(_) => {
|
Err(_) => {
|
||||||
if exit.load(Ordering::Relaxed) {
|
if exit.load(Ordering::Relaxed) {
|
||||||
re.recycle(msgs_);
|
re.recycle(msgs);
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -146,8 +148,8 @@ pub fn blob_receiver(
|
|||||||
fn find_next_missing(
|
fn find_next_missing(
|
||||||
locked_window: &Window,
|
locked_window: &Window,
|
||||||
crdt: &Arc<RwLock<Crdt>>,
|
crdt: &Arc<RwLock<Crdt>>,
|
||||||
consumed: &mut usize,
|
consumed: &mut u64,
|
||||||
received: &mut usize,
|
received: &mut u64,
|
||||||
) -> Result<Vec<(SocketAddr, Vec<u8>)>> {
|
) -> Result<Vec<(SocketAddr, Vec<u8>)>> {
|
||||||
if *received <= *consumed {
|
if *received <= *consumed {
|
||||||
return Err(Error::GenericError);
|
return Err(Error::GenericError);
|
||||||
@@ -155,7 +157,7 @@ fn find_next_missing(
|
|||||||
let window = locked_window.read().unwrap();
|
let window = locked_window.read().unwrap();
|
||||||
let reqs: Vec<_> = (*consumed..*received)
|
let reqs: Vec<_> = (*consumed..*received)
|
||||||
.filter_map(|pix| {
|
.filter_map(|pix| {
|
||||||
let i = pix % WINDOW_SIZE;
|
let i = (pix % WINDOW_SIZE) as usize;
|
||||||
if let &None = &window[i] {
|
if let &None = &window[i] {
|
||||||
let val = crdt.read().unwrap().window_index_request(pix as u64);
|
let val = crdt.read().unwrap().window_index_request(pix as u64);
|
||||||
if let Ok((to, req)) = val {
|
if let Ok((to, req)) = val {
|
||||||
@@ -172,18 +174,18 @@ fn repair_window(
|
|||||||
locked_window: &Window,
|
locked_window: &Window,
|
||||||
crdt: &Arc<RwLock<Crdt>>,
|
crdt: &Arc<RwLock<Crdt>>,
|
||||||
_recycler: &BlobRecycler,
|
_recycler: &BlobRecycler,
|
||||||
last: &mut usize,
|
last: &mut u64,
|
||||||
times: &mut usize,
|
times: &mut usize,
|
||||||
consumed: &mut usize,
|
consumed: &mut u64,
|
||||||
received: &mut usize,
|
received: &mut u64,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
#[cfg(feature = "erasure")]
|
#[cfg(feature = "erasure")]
|
||||||
{
|
{
|
||||||
if erasure::recover(
|
if erasure::recover(
|
||||||
_recycler,
|
_recycler,
|
||||||
&mut locked_window.write().unwrap(),
|
&mut locked_window.write().unwrap(),
|
||||||
*consumed,
|
*consumed as usize,
|
||||||
*received,
|
*received as usize,
|
||||||
).is_err()
|
).is_err()
|
||||||
{
|
{
|
||||||
trace!("erasure::recover failed");
|
trace!("erasure::recover failed");
|
||||||
@@ -215,8 +217,8 @@ fn recv_window(
|
|||||||
locked_window: &Window,
|
locked_window: &Window,
|
||||||
crdt: &Arc<RwLock<Crdt>>,
|
crdt: &Arc<RwLock<Crdt>>,
|
||||||
recycler: &BlobRecycler,
|
recycler: &BlobRecycler,
|
||||||
consumed: &mut usize,
|
consumed: &mut u64,
|
||||||
received: &mut usize,
|
received: &mut u64,
|
||||||
r: &BlobReceiver,
|
r: &BlobReceiver,
|
||||||
s: &BlobSender,
|
s: &BlobSender,
|
||||||
retransmit: &BlobSender,
|
retransmit: &BlobSender,
|
||||||
@@ -226,6 +228,7 @@ fn recv_window(
|
|||||||
let leader_id = crdt.read()
|
let leader_id = crdt.read()
|
||||||
.expect("'crdt' read lock in fn recv_window")
|
.expect("'crdt' read lock in fn recv_window")
|
||||||
.leader_data()
|
.leader_data()
|
||||||
|
.expect("leader not ready")
|
||||||
.id;
|
.id;
|
||||||
while let Ok(mut nq) = r.try_recv() {
|
while let Ok(mut nq) = r.try_recv() {
|
||||||
dq.append(&mut nq)
|
dq.append(&mut nq)
|
||||||
@@ -269,9 +272,10 @@ fn recv_window(
|
|||||||
//send a contiguous set of blocks
|
//send a contiguous set of blocks
|
||||||
let mut contq = VecDeque::new();
|
let mut contq = VecDeque::new();
|
||||||
while let Some(b) = dq.pop_front() {
|
while let Some(b) = dq.pop_front() {
|
||||||
let b_ = b.clone();
|
let (pix, meta_size) = {
|
||||||
let p = b.write().expect("'b' write lock in fn recv_window");
|
let p = b.write().expect("'b' write lock in fn recv_window");
|
||||||
let pix = p.get_index()? as usize;
|
(p.get_index()?, p.meta.size)
|
||||||
|
};
|
||||||
if pix > *received {
|
if pix > *received {
|
||||||
*received = pix;
|
*received = pix;
|
||||||
}
|
}
|
||||||
@@ -284,16 +288,32 @@ fn recv_window(
|
|||||||
);
|
);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
let w = pix % WINDOW_SIZE;
|
let w = (pix % WINDOW_SIZE) as usize;
|
||||||
//TODO, after the block are authenticated
|
//TODO, after the block are authenticated
|
||||||
//if we get different blocks at the same index
|
//if we get different blocks at the same index
|
||||||
//that is a network failure/attack
|
//that is a network failure/attack
|
||||||
trace!("window w: {} size: {}", w, p.meta.size);
|
trace!("window w: {} size: {}", w, meta_size);
|
||||||
drop(p);
|
|
||||||
{
|
{
|
||||||
let mut window = locked_window.write().unwrap();
|
let mut window = locked_window.write().unwrap();
|
||||||
|
|
||||||
|
// Search the window for old blobs in the window
|
||||||
|
// of consumed to received and clear any old ones
|
||||||
|
for ix in *consumed..(pix + 1) {
|
||||||
|
let k = (ix % WINDOW_SIZE) as usize;
|
||||||
|
if let Some(b) = &mut window[k] {
|
||||||
|
if b.read().unwrap().get_index().unwrap() >= *consumed as u64 {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if let Some(b) = mem::replace(&mut window[k], None) {
|
||||||
|
recycler.recycle(b);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Insert the new blob into the window
|
||||||
|
// spot should be free because we cleared it above
|
||||||
if window[w].is_none() {
|
if window[w].is_none() {
|
||||||
window[w] = Some(b_);
|
window[w] = Some(b);
|
||||||
} else if let Some(cblob) = &window[w] {
|
} else if let Some(cblob) = &window[w] {
|
||||||
if cblob.read().unwrap().get_index().unwrap() != pix as u64 {
|
if cblob.read().unwrap().get_index().unwrap() != pix as u64 {
|
||||||
warn!("overrun blob at index {:}", w);
|
warn!("overrun blob at index {:}", w);
|
||||||
@@ -302,40 +322,45 @@ fn recv_window(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
loop {
|
loop {
|
||||||
let k = *consumed % WINDOW_SIZE;
|
let k = (*consumed % WINDOW_SIZE) as usize;
|
||||||
trace!("k: {} consumed: {}", k, *consumed);
|
trace!("k: {} consumed: {}", k, *consumed);
|
||||||
|
|
||||||
if window[k].is_none() {
|
if window[k].is_none() {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
let mut is_coding = false;
|
let mut is_coding = false;
|
||||||
if let &Some(ref cblob) = &window[k] {
|
if let &Some(ref cblob) = &window[k] {
|
||||||
if cblob
|
let cblob_r = cblob
|
||||||
.read()
|
.read()
|
||||||
.expect("blob read lock for flags streamer::window")
|
.expect("blob read lock for flogs streamer::window");
|
||||||
.is_coding()
|
if cblob_r.get_index().unwrap() < *consumed {
|
||||||
{
|
break;
|
||||||
|
}
|
||||||
|
if cblob_r.is_coding() {
|
||||||
is_coding = true;
|
is_coding = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !is_coding {
|
if !is_coding {
|
||||||
contq.push_back(window[k].clone().expect("clone in fn recv_window"));
|
contq.push_back(window[k].clone().expect("clone in fn recv_window"));
|
||||||
*consumed += 1;
|
*consumed += 1;
|
||||||
|
|
||||||
#[cfg(not(feature = "erasure"))]
|
|
||||||
{
|
|
||||||
window[k] = None;
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
#[cfg(feature = "erasure")]
|
#[cfg(feature = "erasure")]
|
||||||
{
|
{
|
||||||
let block_start = *consumed - (*consumed % erasure::NUM_CODED);
|
let block_start = *consumed - (*consumed % erasure::NUM_CODED as u64);
|
||||||
let coding_end = block_start + erasure::NUM_CODED;
|
let coding_end = block_start + erasure::NUM_CODED as u64;
|
||||||
// We've received all this block's data blobs, go and null out the window now
|
// We've received all this block's data blobs, go and null out the window now
|
||||||
for j in block_start..coding_end {
|
for j in block_start..*consumed {
|
||||||
window[j % WINDOW_SIZE] = None;
|
if let Some(b) =
|
||||||
|
mem::replace(&mut window[(j % WINDOW_SIZE) as usize], None)
|
||||||
|
{
|
||||||
|
recycler.recycle(b);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for j in *consumed..coding_end {
|
||||||
|
window[(j % WINDOW_SIZE) as usize] = None;
|
||||||
}
|
}
|
||||||
|
|
||||||
*consumed += erasure::MAX_MISSING;
|
*consumed += erasure::MAX_MISSING as u64;
|
||||||
debug!(
|
debug!(
|
||||||
"skipping processing coding blob k: {} consumed: {}",
|
"skipping processing coding blob k: {} consumed: {}",
|
||||||
k, *consumed
|
k, *consumed
|
||||||
@@ -354,7 +379,7 @@ fn recv_window(
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn print_window(locked_window: &Window, consumed: usize) {
|
fn print_window(locked_window: &Window, consumed: u64) {
|
||||||
{
|
{
|
||||||
let buf: Vec<_> = locked_window
|
let buf: Vec<_> = locked_window
|
||||||
.read()
|
.read()
|
||||||
@@ -362,7 +387,7 @@ fn print_window(locked_window: &Window, consumed: usize) {
|
|||||||
.iter()
|
.iter()
|
||||||
.enumerate()
|
.enumerate()
|
||||||
.map(|(i, v)| {
|
.map(|(i, v)| {
|
||||||
if i == (consumed % WINDOW_SIZE) {
|
if i == (consumed % WINDOW_SIZE) as usize {
|
||||||
"_"
|
"_"
|
||||||
} else if v.is_none() {
|
} else if v.is_none() {
|
||||||
"0"
|
"0"
|
||||||
@@ -384,13 +409,14 @@ fn print_window(locked_window: &Window, consumed: usize) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn default_window() -> Window {
|
pub fn default_window() -> Window {
|
||||||
Arc::new(RwLock::new(vec![None; WINDOW_SIZE]))
|
Arc::new(RwLock::new(vec![None; WINDOW_SIZE as usize]))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn window(
|
pub fn window(
|
||||||
exit: Arc<AtomicBool>,
|
exit: Arc<AtomicBool>,
|
||||||
crdt: Arc<RwLock<Crdt>>,
|
crdt: Arc<RwLock<Crdt>>,
|
||||||
window: Window,
|
window: Window,
|
||||||
|
entry_height: u64,
|
||||||
recycler: BlobRecycler,
|
recycler: BlobRecycler,
|
||||||
r: BlobReceiver,
|
r: BlobReceiver,
|
||||||
s: BlobSender,
|
s: BlobSender,
|
||||||
@@ -399,9 +425,9 @@ pub fn window(
|
|||||||
Builder::new()
|
Builder::new()
|
||||||
.name("solana-window".to_string())
|
.name("solana-window".to_string())
|
||||||
.spawn(move || {
|
.spawn(move || {
|
||||||
let mut consumed = 0;
|
let mut consumed = entry_height;
|
||||||
let mut received = 0;
|
let mut received = entry_height;
|
||||||
let mut last = 0;
|
let mut last = entry_height;
|
||||||
let mut times = 0;
|
let mut times = 0;
|
||||||
loop {
|
loop {
|
||||||
if exit.load(Ordering::Relaxed) {
|
if exit.load(Ordering::Relaxed) {
|
||||||
@@ -445,40 +471,46 @@ fn broadcast(
|
|||||||
while let Ok(mut nq) = r.try_recv() {
|
while let Ok(mut nq) = r.try_recv() {
|
||||||
dq.append(&mut nq);
|
dq.append(&mut nq);
|
||||||
}
|
}
|
||||||
let mut blobs: Vec<_> = dq.into_iter().collect();
|
|
||||||
|
|
||||||
print_window(window, *receive_index as usize);
|
// flatten deque to vec
|
||||||
|
let blobs_vec: Vec<_> = dq.into_iter().collect();
|
||||||
|
|
||||||
|
// We could receive more blobs than window slots so
|
||||||
|
// break them up into window-sized chunks to process
|
||||||
|
let blobs_chunked = blobs_vec.chunks(WINDOW_SIZE as usize).map(|x| x.to_vec());
|
||||||
|
|
||||||
|
print_window(window, *receive_index);
|
||||||
|
|
||||||
|
for mut blobs in blobs_chunked {
|
||||||
// Insert the coding blobs into the blob stream
|
// Insert the coding blobs into the blob stream
|
||||||
#[cfg(feature = "erasure")]
|
#[cfg(feature = "erasure")]
|
||||||
erasure::add_coding_blobs(recycler, &mut blobs, *receive_index);
|
erasure::add_coding_blobs(recycler, &mut blobs, *receive_index);
|
||||||
|
|
||||||
let blobs_len = blobs.len();
|
let blobs_len = blobs.len();
|
||||||
info!("broadcast blobs.len: {}", blobs_len);
|
debug!("broadcast blobs.len: {}", blobs_len);
|
||||||
|
|
||||||
// Index the blobs
|
// Index the blobs
|
||||||
Crdt::index_blobs(crdt, &blobs, receive_index)?;
|
Crdt::index_blobs(crdt, &blobs, receive_index)?;
|
||||||
// keep the cache of blobs that are broadcast
|
// keep the cache of blobs that are broadcast
|
||||||
{
|
{
|
||||||
let mut win = window.write().unwrap();
|
let mut win = window.write().unwrap();
|
||||||
|
assert!(blobs.len() <= win.len());
|
||||||
for b in &blobs {
|
for b in &blobs {
|
||||||
let ix = b.read().unwrap().get_index().expect("blob index");
|
let ix = b.read().unwrap().get_index().expect("blob index");
|
||||||
let pos = (ix as usize) % WINDOW_SIZE;
|
let pos = (ix % WINDOW_SIZE) as usize;
|
||||||
if let Some(x) = &win[pos] {
|
if let Some(x) = mem::replace(&mut win[pos], None) {
|
||||||
trace!(
|
trace!(
|
||||||
"popped {} at {}",
|
"popped {} at {}",
|
||||||
x.read().unwrap().get_index().unwrap(),
|
x.read().unwrap().get_index().unwrap(),
|
||||||
pos
|
pos
|
||||||
);
|
);
|
||||||
recycler.recycle(x.clone());
|
recycler.recycle(x);
|
||||||
}
|
}
|
||||||
trace!("null {}", pos);
|
trace!("null {}", pos);
|
||||||
win[pos] = None;
|
|
||||||
assert!(win[pos].is_none());
|
|
||||||
}
|
}
|
||||||
while let Some(b) = blobs.pop() {
|
while let Some(b) = blobs.pop() {
|
||||||
let ix = b.read().unwrap().get_index().expect("blob index");
|
let ix = b.read().unwrap().get_index().expect("blob index");
|
||||||
let pos = (ix as usize) % WINDOW_SIZE;
|
let pos = (ix % WINDOW_SIZE) as usize;
|
||||||
trace!("caching {} at {}", ix, pos);
|
trace!("caching {} at {}", ix, pos);
|
||||||
assert!(win[pos].is_none());
|
assert!(win[pos].is_none());
|
||||||
win[pos] = Some(b);
|
win[pos] = Some(b);
|
||||||
@@ -488,20 +520,18 @@ fn broadcast(
|
|||||||
// Fill in the coding blob data from the window data blobs
|
// Fill in the coding blob data from the window data blobs
|
||||||
#[cfg(feature = "erasure")]
|
#[cfg(feature = "erasure")]
|
||||||
{
|
{
|
||||||
if erasure::generate_coding(
|
erasure::generate_coding(
|
||||||
&mut window.write().unwrap(),
|
&mut window.write().unwrap(),
|
||||||
*receive_index as usize,
|
*receive_index as usize,
|
||||||
blobs_len,
|
blobs_len,
|
||||||
).is_err()
|
).map_err(|_| Error::GenericError)?;
|
||||||
{
|
|
||||||
return Err(Error::GenericError);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
*receive_index += blobs_len as u64;
|
*receive_index += blobs_len as u64;
|
||||||
|
|
||||||
// Send blobs out from the window
|
// Send blobs out from the window
|
||||||
Crdt::broadcast(crdt, &window, &sock, transmit_index, *receive_index)?;
|
Crdt::broadcast(crdt, &window, &sock, transmit_index, *receive_index)?;
|
||||||
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -519,14 +549,15 @@ pub fn broadcaster(
|
|||||||
exit: Arc<AtomicBool>,
|
exit: Arc<AtomicBool>,
|
||||||
crdt: Arc<RwLock<Crdt>>,
|
crdt: Arc<RwLock<Crdt>>,
|
||||||
window: Window,
|
window: Window,
|
||||||
|
entry_height: u64,
|
||||||
recycler: BlobRecycler,
|
recycler: BlobRecycler,
|
||||||
r: BlobReceiver,
|
r: BlobReceiver,
|
||||||
) -> JoinHandle<()> {
|
) -> JoinHandle<()> {
|
||||||
Builder::new()
|
Builder::new()
|
||||||
.name("solana-broadcaster".to_string())
|
.name("solana-broadcaster".to_string())
|
||||||
.spawn(move || {
|
.spawn(move || {
|
||||||
let mut transmit_index = 0;
|
let mut transmit_index = entry_height;
|
||||||
let mut receive_index = 0;
|
let mut receive_index = entry_height;
|
||||||
loop {
|
loop {
|
||||||
if exit.load(Ordering::Relaxed) {
|
if exit.load(Ordering::Relaxed) {
|
||||||
break;
|
break;
|
||||||
@@ -655,9 +686,8 @@ mod bench {
|
|||||||
let timer = Duration::new(1, 0);
|
let timer = Duration::new(1, 0);
|
||||||
match r.recv_timeout(timer) {
|
match r.recv_timeout(timer) {
|
||||||
Ok(msgs) => {
|
Ok(msgs) => {
|
||||||
let msgs_ = msgs.clone();
|
|
||||||
*rvs.lock().unwrap() += msgs.read().unwrap().packets.len();
|
*rvs.lock().unwrap() += msgs.read().unwrap().packets.len();
|
||||||
recycler.recycle(msgs_);
|
recycler.recycle(msgs);
|
||||||
}
|
}
|
||||||
_ => (),
|
_ => (),
|
||||||
}
|
}
|
||||||
@@ -754,12 +784,13 @@ mod test {
|
|||||||
let mut msgs = VecDeque::new();
|
let mut msgs = VecDeque::new();
|
||||||
for i in 0..10 {
|
for i in 0..10 {
|
||||||
let b = resp_recycler.allocate();
|
let b = resp_recycler.allocate();
|
||||||
let b_ = b.clone();
|
{
|
||||||
let mut w = b.write().unwrap();
|
let mut w = b.write().unwrap();
|
||||||
w.data[0] = i as u8;
|
w.data[0] = i as u8;
|
||||||
w.meta.size = PACKET_DATA_SIZE;
|
w.meta.size = PACKET_DATA_SIZE;
|
||||||
w.meta.set_addr(&addr);
|
w.meta.set_addr(&addr);
|
||||||
msgs.push_back(b_);
|
}
|
||||||
|
msgs.push_back(b);
|
||||||
}
|
}
|
||||||
s_responder.send(msgs).expect("send");
|
s_responder.send(msgs).expect("send");
|
||||||
let mut num = 0;
|
let mut num = 0;
|
||||||
@@ -812,6 +843,7 @@ mod test {
|
|||||||
exit.clone(),
|
exit.clone(),
|
||||||
subs,
|
subs,
|
||||||
win,
|
win,
|
||||||
|
0,
|
||||||
resp_recycler.clone(),
|
resp_recycler.clone(),
|
||||||
r_reader,
|
r_reader,
|
||||||
s_window,
|
s_window,
|
||||||
@@ -828,14 +860,15 @@ mod test {
|
|||||||
for v in 0..10 {
|
for v in 0..10 {
|
||||||
let i = 9 - v;
|
let i = 9 - v;
|
||||||
let b = resp_recycler.allocate();
|
let b = resp_recycler.allocate();
|
||||||
let b_ = b.clone();
|
{
|
||||||
let mut w = b.write().unwrap();
|
let mut w = b.write().unwrap();
|
||||||
w.set_index(i).unwrap();
|
w.set_index(i).unwrap();
|
||||||
w.set_id(me_id).unwrap();
|
w.set_id(me_id).unwrap();
|
||||||
assert_eq!(i, w.get_index().unwrap());
|
assert_eq!(i, w.get_index().unwrap());
|
||||||
w.meta.size = PACKET_DATA_SIZE;
|
w.meta.size = PACKET_DATA_SIZE;
|
||||||
w.meta.set_addr(&tn.data.gossip_addr);
|
w.meta.set_addr(&tn.data.gossip_addr);
|
||||||
msgs.push_back(b_);
|
}
|
||||||
|
msgs.push_back(b);
|
||||||
}
|
}
|
||||||
s_responder.send(msgs).expect("send");
|
s_responder.send(msgs).expect("send");
|
||||||
let mut num = 0;
|
let mut num = 0;
|
||||||
|
@@ -20,7 +20,8 @@ pub struct ThinClient {
|
|||||||
transactions_socket: UdpSocket,
|
transactions_socket: UdpSocket,
|
||||||
last_id: Option<Hash>,
|
last_id: Option<Hash>,
|
||||||
transaction_count: u64,
|
transaction_count: u64,
|
||||||
balances: HashMap<PublicKey, Option<i64>>,
|
balances: HashMap<PublicKey, i64>,
|
||||||
|
signature_status: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ThinClient {
|
impl ThinClient {
|
||||||
@@ -41,6 +42,7 @@ impl ThinClient {
|
|||||||
last_id: None,
|
last_id: None,
|
||||||
transaction_count: 0,
|
transaction_count: 0,
|
||||||
balances: HashMap::new(),
|
balances: HashMap::new(),
|
||||||
|
signature_status: false,
|
||||||
};
|
};
|
||||||
client
|
client
|
||||||
}
|
}
|
||||||
@@ -61,13 +63,24 @@ impl ThinClient {
|
|||||||
self.balances.insert(key, val);
|
self.balances.insert(key, val);
|
||||||
}
|
}
|
||||||
Response::LastId { id } => {
|
Response::LastId { id } => {
|
||||||
info!("Response last_id {:?}", id);
|
trace!("Response last_id {:?}", id);
|
||||||
self.last_id = Some(id);
|
self.last_id = Some(id);
|
||||||
}
|
}
|
||||||
Response::TransactionCount { transaction_count } => {
|
Response::TransactionCount { transaction_count } => {
|
||||||
info!("Response transaction count {:?}", transaction_count);
|
trace!("Response transaction count {:?}", transaction_count);
|
||||||
self.transaction_count = transaction_count;
|
self.transaction_count = transaction_count;
|
||||||
}
|
}
|
||||||
|
Response::SignatureStatus { signature_status } => {
|
||||||
|
self.signature_status = signature_status;
|
||||||
|
match signature_status {
|
||||||
|
true => {
|
||||||
|
trace!("Response found signature");
|
||||||
|
}
|
||||||
|
false => {
|
||||||
|
trace!("Response signature not found");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -111,7 +124,10 @@ impl ThinClient {
|
|||||||
}
|
}
|
||||||
self.process_response(resp);
|
self.process_response(resp);
|
||||||
}
|
}
|
||||||
self.balances[pubkey].ok_or(io::Error::new(io::ErrorKind::Other, "nokey"))
|
self.balances
|
||||||
|
.get(pubkey)
|
||||||
|
.map(|x| *x)
|
||||||
|
.ok_or(io::Error::new(io::ErrorKind::Other, "nokey"))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Request the transaction count. If the response packet is dropped by the network,
|
/// Request the transaction count. If the response packet is dropped by the network,
|
||||||
@@ -141,21 +157,27 @@ impl ThinClient {
|
|||||||
/// Request the last Entry ID from the server. This method blocks
|
/// Request the last Entry ID from the server. This method blocks
|
||||||
/// until the server sends a response.
|
/// until the server sends a response.
|
||||||
pub fn get_last_id(&mut self) -> Hash {
|
pub fn get_last_id(&mut self) -> Hash {
|
||||||
info!("get_last_id");
|
trace!("get_last_id");
|
||||||
let req = Request::GetLastId;
|
let req = Request::GetLastId;
|
||||||
let data = serialize(&req).expect("serialize GetLastId in pub fn get_last_id");
|
let data = serialize(&req).expect("serialize GetLastId in pub fn get_last_id");
|
||||||
let mut done = false;
|
let mut done = false;
|
||||||
while !done {
|
while !done {
|
||||||
|
debug!("get_last_id send_to {}", &self.requests_addr);
|
||||||
self.requests_socket
|
self.requests_socket
|
||||||
.send_to(&data, &self.requests_addr)
|
.send_to(&data, &self.requests_addr)
|
||||||
.expect("buffer error in pub fn get_last_id");
|
.expect("buffer error in pub fn get_last_id");
|
||||||
|
|
||||||
if let Ok(resp) = self.recv_response() {
|
match self.recv_response() {
|
||||||
|
Ok(resp) => {
|
||||||
if let &Response::LastId { .. } = &resp {
|
if let &Response::LastId { .. } = &resp {
|
||||||
done = true;
|
done = true;
|
||||||
}
|
}
|
||||||
self.process_response(resp);
|
self.process_response(resp);
|
||||||
}
|
}
|
||||||
|
Err(e) => {
|
||||||
|
debug!("thin_client get_last_id error: {}", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
self.last_id.expect("some last_id")
|
self.last_id.expect("some last_id")
|
||||||
}
|
}
|
||||||
@@ -167,13 +189,35 @@ impl ThinClient {
|
|||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
loop {
|
loop {
|
||||||
balance = self.get_balance(pubkey);
|
balance = self.get_balance(pubkey);
|
||||||
if balance.is_ok() || now.elapsed().as_secs() > 1 {
|
if balance.is_ok() && *balance.as_ref().unwrap() != 0 || now.elapsed().as_secs() > 1 {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
balance
|
balance
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Check a signature in the bank. This method blocks
|
||||||
|
/// until the server sends a response.
|
||||||
|
pub fn check_signature(&mut self, sig: &Signature) -> bool {
|
||||||
|
trace!("check_signature");
|
||||||
|
let req = Request::GetSignature { signature: *sig };
|
||||||
|
let data = serialize(&req).expect("serialize GetSignature in pub fn check_signature");
|
||||||
|
let mut done = false;
|
||||||
|
while !done {
|
||||||
|
self.requests_socket
|
||||||
|
.send_to(&data, &self.requests_addr)
|
||||||
|
.expect("buffer error in pub fn get_last_id");
|
||||||
|
|
||||||
|
if let Ok(resp) = self.recv_response() {
|
||||||
|
if let &Response::SignatureStatus { .. } = &resp {
|
||||||
|
done = true;
|
||||||
|
}
|
||||||
|
self.process_response(resp);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
self.signature_status
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
@@ -182,9 +226,9 @@ mod tests {
|
|||||||
use bank::Bank;
|
use bank::Bank;
|
||||||
use budget::Budget;
|
use budget::Budget;
|
||||||
use crdt::TestNode;
|
use crdt::TestNode;
|
||||||
|
use fullnode::FullNode;
|
||||||
use logger;
|
use logger;
|
||||||
use mint::Mint;
|
use mint::Mint;
|
||||||
use server::Server;
|
|
||||||
use signature::{KeyPair, KeyPairUtil};
|
use signature::{KeyPair, KeyPairUtil};
|
||||||
use std::io::sink;
|
use std::io::sink;
|
||||||
use std::sync::atomic::{AtomicBool, Ordering};
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
@@ -203,8 +247,9 @@ mod tests {
|
|||||||
let bob_pubkey = KeyPair::new().pubkey();
|
let bob_pubkey = KeyPair::new().pubkey();
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
|
|
||||||
let server = Server::new_leader(
|
let server = FullNode::new_leader(
|
||||||
bank,
|
bank,
|
||||||
|
0,
|
||||||
Some(Duration::from_millis(30)),
|
Some(Duration::from_millis(30)),
|
||||||
leader.data.clone(),
|
leader.data.clone(),
|
||||||
leader.sockets.requests,
|
leader.sockets.requests,
|
||||||
@@ -247,8 +292,9 @@ mod tests {
|
|||||||
let bob_pubkey = KeyPair::new().pubkey();
|
let bob_pubkey = KeyPair::new().pubkey();
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
|
|
||||||
let server = Server::new_leader(
|
let server = FullNode::new_leader(
|
||||||
bank,
|
bank,
|
||||||
|
0,
|
||||||
Some(Duration::from_millis(30)),
|
Some(Duration::from_millis(30)),
|
||||||
leader.data.clone(),
|
leader.data.clone(),
|
||||||
leader.sockets.requests,
|
leader.sockets.requests,
|
||||||
@@ -294,4 +340,53 @@ mod tests {
|
|||||||
t.join().unwrap();
|
t.join().unwrap();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_client_check_signature() {
|
||||||
|
logger::setup();
|
||||||
|
let leader = TestNode::new();
|
||||||
|
let alice = Mint::new(10_000);
|
||||||
|
let bank = Bank::new(&alice);
|
||||||
|
let bob_pubkey = KeyPair::new().pubkey();
|
||||||
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
|
|
||||||
|
let server = FullNode::new_leader(
|
||||||
|
bank,
|
||||||
|
0,
|
||||||
|
Some(Duration::from_millis(30)),
|
||||||
|
leader.data.clone(),
|
||||||
|
leader.sockets.requests,
|
||||||
|
leader.sockets.transaction,
|
||||||
|
leader.sockets.broadcast,
|
||||||
|
leader.sockets.respond,
|
||||||
|
leader.sockets.gossip,
|
||||||
|
exit.clone(),
|
||||||
|
sink(),
|
||||||
|
);
|
||||||
|
sleep(Duration::from_millis(300));
|
||||||
|
|
||||||
|
let requests_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||||
|
requests_socket
|
||||||
|
.set_read_timeout(Some(Duration::new(5, 0)))
|
||||||
|
.unwrap();
|
||||||
|
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||||
|
let mut client = ThinClient::new(
|
||||||
|
leader.data.requests_addr,
|
||||||
|
requests_socket,
|
||||||
|
leader.data.transactions_addr,
|
||||||
|
transactions_socket,
|
||||||
|
);
|
||||||
|
let last_id = client.get_last_id();
|
||||||
|
let sig = client
|
||||||
|
.transfer(500, &alice.keypair(), bob_pubkey, &last_id)
|
||||||
|
.unwrap();
|
||||||
|
sleep(Duration::from_millis(100));
|
||||||
|
|
||||||
|
assert!(client.check_signature(&sig));
|
||||||
|
|
||||||
|
exit.store(true, Ordering::Relaxed);
|
||||||
|
for t in server.thread_hdls {
|
||||||
|
t.join().unwrap();
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
37
src/tpu.rs
37
src/tpu.rs
@@ -34,14 +34,13 @@ use sigverify_stage::SigVerifyStage;
|
|||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
use std::net::UdpSocket;
|
use std::net::UdpSocket;
|
||||||
use std::sync::atomic::AtomicBool;
|
use std::sync::atomic::AtomicBool;
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::Arc;
|
||||||
use std::thread::JoinHandle;
|
use std::thread::JoinHandle;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use streamer::BlobReceiver;
|
use streamer::BlobReceiver;
|
||||||
use write_stage::WriteStage;
|
use write_stage::WriteStage;
|
||||||
|
|
||||||
pub struct Tpu {
|
pub struct Tpu {
|
||||||
pub blob_receiver: BlobReceiver,
|
|
||||||
pub thread_hdls: Vec<JoinHandle<()>>,
|
pub thread_hdls: Vec<JoinHandle<()>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -53,36 +52,35 @@ impl Tpu {
|
|||||||
blob_recycler: BlobRecycler,
|
blob_recycler: BlobRecycler,
|
||||||
exit: Arc<AtomicBool>,
|
exit: Arc<AtomicBool>,
|
||||||
writer: W,
|
writer: W,
|
||||||
) -> Self {
|
) -> (Self, BlobReceiver) {
|
||||||
let packet_recycler = PacketRecycler::default();
|
let packet_recycler = PacketRecycler::default();
|
||||||
|
|
||||||
let fetch_stage =
|
let (fetch_stage, packet_receiver) =
|
||||||
FetchStage::new(transactions_socket, exit.clone(), packet_recycler.clone());
|
FetchStage::new(transactions_socket, exit.clone(), packet_recycler.clone());
|
||||||
|
|
||||||
let sigverify_stage = SigVerifyStage::new(exit.clone(), fetch_stage.packet_receiver);
|
let (sigverify_stage, verified_receiver) =
|
||||||
|
SigVerifyStage::new(exit.clone(), packet_receiver);
|
||||||
|
|
||||||
let banking_stage = BankingStage::new(
|
let (banking_stage, signal_receiver) = BankingStage::new(
|
||||||
bank.clone(),
|
bank.clone(),
|
||||||
exit.clone(),
|
exit.clone(),
|
||||||
sigverify_stage.verified_receiver,
|
verified_receiver,
|
||||||
packet_recycler.clone(),
|
packet_recycler.clone(),
|
||||||
);
|
);
|
||||||
|
|
||||||
let record_stage = match tick_duration {
|
let (record_stage, entry_receiver) = match tick_duration {
|
||||||
Some(tick_duration) => RecordStage::new_with_clock(
|
Some(tick_duration) => {
|
||||||
banking_stage.signal_receiver,
|
RecordStage::new_with_clock(signal_receiver, &bank.last_id(), tick_duration)
|
||||||
&bank.last_id(),
|
}
|
||||||
tick_duration,
|
None => RecordStage::new(signal_receiver, &bank.last_id()),
|
||||||
),
|
|
||||||
None => RecordStage::new(banking_stage.signal_receiver, &bank.last_id()),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let write_stage = WriteStage::new(
|
let (write_stage, blob_receiver) = WriteStage::new(
|
||||||
bank.clone(),
|
bank.clone(),
|
||||||
exit.clone(),
|
exit.clone(),
|
||||||
blob_recycler.clone(),
|
blob_recycler.clone(),
|
||||||
Mutex::new(writer),
|
writer,
|
||||||
record_stage.entry_receiver,
|
entry_receiver,
|
||||||
);
|
);
|
||||||
let mut thread_hdls = vec![
|
let mut thread_hdls = vec![
|
||||||
banking_stage.thread_hdl,
|
banking_stage.thread_hdl,
|
||||||
@@ -91,9 +89,6 @@ impl Tpu {
|
|||||||
];
|
];
|
||||||
thread_hdls.extend(fetch_stage.thread_hdls.into_iter());
|
thread_hdls.extend(fetch_stage.thread_hdls.into_iter());
|
||||||
thread_hdls.extend(sigverify_stage.thread_hdls.into_iter());
|
thread_hdls.extend(sigverify_stage.thread_hdls.into_iter());
|
||||||
Tpu {
|
(Tpu { thread_hdls }, blob_receiver)
|
||||||
blob_receiver: write_stage.blob_receiver,
|
|
||||||
thread_hdls,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
39
src/tvu.rs
39
src/tvu.rs
@@ -37,13 +37,13 @@
|
|||||||
use bank::Bank;
|
use bank::Bank;
|
||||||
use blob_fetch_stage::BlobFetchStage;
|
use blob_fetch_stage::BlobFetchStage;
|
||||||
use crdt::Crdt;
|
use crdt::Crdt;
|
||||||
use packet;
|
use packet::BlobRecycler;
|
||||||
use replicate_stage::ReplicateStage;
|
use replicate_stage::ReplicateStage;
|
||||||
use std::net::UdpSocket;
|
use std::net::UdpSocket;
|
||||||
use std::sync::atomic::AtomicBool;
|
use std::sync::atomic::AtomicBool;
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
use std::thread::JoinHandle;
|
use std::thread::JoinHandle;
|
||||||
use streamer;
|
use streamer::Window;
|
||||||
use window_stage::WindowStage;
|
use window_stage::WindowStage;
|
||||||
|
|
||||||
pub struct Tvu {
|
pub struct Tvu {
|
||||||
@@ -55,6 +55,7 @@ impl Tvu {
|
|||||||
/// on the bank state.
|
/// on the bank state.
|
||||||
/// # Arguments
|
/// # Arguments
|
||||||
/// * `bank` - The bank state.
|
/// * `bank` - The bank state.
|
||||||
|
/// * `entry_height` - Initial ledger height, passed to replicate stage
|
||||||
/// * `crdt` - The crdt state.
|
/// * `crdt` - The crdt state.
|
||||||
/// * `window` - The window state.
|
/// * `window` - The window state.
|
||||||
/// * `replicate_socket` - my replicate socket
|
/// * `replicate_socket` - my replicate socket
|
||||||
@@ -63,15 +64,16 @@ impl Tvu {
|
|||||||
/// * `exit` - The exit signal.
|
/// * `exit` - The exit signal.
|
||||||
pub fn new(
|
pub fn new(
|
||||||
bank: Arc<Bank>,
|
bank: Arc<Bank>,
|
||||||
|
entry_height: u64,
|
||||||
crdt: Arc<RwLock<Crdt>>,
|
crdt: Arc<RwLock<Crdt>>,
|
||||||
window: streamer::Window,
|
window: Window,
|
||||||
replicate_socket: UdpSocket,
|
replicate_socket: UdpSocket,
|
||||||
repair_socket: UdpSocket,
|
repair_socket: UdpSocket,
|
||||||
retransmit_socket: UdpSocket,
|
retransmit_socket: UdpSocket,
|
||||||
exit: Arc<AtomicBool>,
|
exit: Arc<AtomicBool>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let blob_recycler = packet::BlobRecycler::default();
|
let blob_recycler = BlobRecycler::default();
|
||||||
let fetch_stage = BlobFetchStage::new_multi_socket(
|
let (fetch_stage, blob_receiver) = BlobFetchStage::new_multi_socket(
|
||||||
vec![replicate_socket, repair_socket],
|
vec![replicate_socket, repair_socket],
|
||||||
exit.clone(),
|
exit.clone(),
|
||||||
blob_recycler.clone(),
|
blob_recycler.clone(),
|
||||||
@@ -79,17 +81,17 @@ impl Tvu {
|
|||||||
//TODO
|
//TODO
|
||||||
//the packets coming out of blob_receiver need to be sent to the GPU and verified
|
//the packets coming out of blob_receiver need to be sent to the GPU and verified
|
||||||
//then sent to the window, which does the erasure coding reconstruction
|
//then sent to the window, which does the erasure coding reconstruction
|
||||||
let window_stage = WindowStage::new(
|
let (window_stage, blob_receiver) = WindowStage::new(
|
||||||
crdt,
|
crdt,
|
||||||
window,
|
window,
|
||||||
|
entry_height,
|
||||||
retransmit_socket,
|
retransmit_socket,
|
||||||
exit.clone(),
|
exit.clone(),
|
||||||
blob_recycler.clone(),
|
blob_recycler.clone(),
|
||||||
fetch_stage.blob_receiver,
|
blob_receiver,
|
||||||
);
|
);
|
||||||
|
|
||||||
let replicate_stage =
|
let replicate_stage = ReplicateStage::new(bank, exit, blob_receiver);
|
||||||
ReplicateStage::new(bank, exit, window_stage.blob_receiver, blob_recycler);
|
|
||||||
|
|
||||||
let mut threads = vec![replicate_stage.thread_hdl];
|
let mut threads = vec![replicate_stage.thread_hdl];
|
||||||
threads.extend(fetch_stage.thread_hdls.into_iter());
|
threads.extend(fetch_stage.thread_hdls.into_iter());
|
||||||
@@ -119,7 +121,7 @@ pub mod tests {
|
|||||||
use std::sync::mpsc::channel;
|
use std::sync::mpsc::channel;
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use streamer;
|
use streamer::{self, Window};
|
||||||
use transaction::Transaction;
|
use transaction::Transaction;
|
||||||
use tvu::Tvu;
|
use tvu::Tvu;
|
||||||
|
|
||||||
@@ -127,7 +129,7 @@ pub mod tests {
|
|||||||
crdt: Arc<RwLock<Crdt>>,
|
crdt: Arc<RwLock<Crdt>>,
|
||||||
listen: UdpSocket,
|
listen: UdpSocket,
|
||||||
exit: Arc<AtomicBool>,
|
exit: Arc<AtomicBool>,
|
||||||
) -> Result<(Ncp, streamer::Window)> {
|
) -> Result<(Ncp, Window)> {
|
||||||
let window = streamer::default_window();
|
let window = streamer::default_window();
|
||||||
let send_sock = UdpSocket::bind("0.0.0.0:0").expect("bind 0");
|
let send_sock = UdpSocket::bind("0.0.0.0:0").expect("bind 0");
|
||||||
let ncp = Ncp::new(crdt, window.clone(), listen, send_sock, exit)?;
|
let ncp = Ncp::new(crdt, window.clone(), listen, send_sock, exit)?;
|
||||||
@@ -193,6 +195,7 @@ pub mod tests {
|
|||||||
|
|
||||||
let tvu = Tvu::new(
|
let tvu = Tvu::new(
|
||||||
bank.clone(),
|
bank.clone(),
|
||||||
|
0,
|
||||||
cref1,
|
cref1,
|
||||||
dr_1.1,
|
dr_1.1,
|
||||||
target1.sockets.replicate,
|
target1.sockets.replicate,
|
||||||
@@ -209,7 +212,7 @@ pub mod tests {
|
|||||||
let transfer_amount = 501;
|
let transfer_amount = 501;
|
||||||
let bob_keypair = KeyPair::new();
|
let bob_keypair = KeyPair::new();
|
||||||
for i in 0..num_transfers {
|
for i in 0..num_transfers {
|
||||||
let entry0 = Entry::new(&cur_hash, i, vec![]);
|
let entry0 = Entry::new(&cur_hash, i, vec![], false);
|
||||||
bank.register_entry_id(&cur_hash);
|
bank.register_entry_id(&cur_hash);
|
||||||
cur_hash = hash(&cur_hash);
|
cur_hash = hash(&cur_hash);
|
||||||
|
|
||||||
@@ -221,7 +224,7 @@ pub mod tests {
|
|||||||
);
|
);
|
||||||
bank.register_entry_id(&cur_hash);
|
bank.register_entry_id(&cur_hash);
|
||||||
cur_hash = hash(&cur_hash);
|
cur_hash = hash(&cur_hash);
|
||||||
let entry1 = Entry::new(&cur_hash, i + num_transfers, vec![tx0]);
|
let entry1 = Entry::new(&cur_hash, i + num_transfers, vec![tx0], false);
|
||||||
bank.register_entry_id(&cur_hash);
|
bank.register_entry_id(&cur_hash);
|
||||||
cur_hash = hash(&cur_hash);
|
cur_hash = hash(&cur_hash);
|
||||||
|
|
||||||
@@ -229,7 +232,7 @@ pub mod tests {
|
|||||||
|
|
||||||
for entry in vec![entry0, entry1] {
|
for entry in vec![entry0, entry1] {
|
||||||
let b = resp_recycler.allocate();
|
let b = resp_recycler.allocate();
|
||||||
let b_ = b.clone();
|
{
|
||||||
let mut w = b.write().unwrap();
|
let mut w = b.write().unwrap();
|
||||||
w.set_index(blob_id).unwrap();
|
w.set_index(blob_id).unwrap();
|
||||||
blob_id += 1;
|
blob_id += 1;
|
||||||
@@ -240,8 +243,8 @@ pub mod tests {
|
|||||||
w.data_mut()[..serialized_entry.len()].copy_from_slice(&serialized_entry);
|
w.data_mut()[..serialized_entry.len()].copy_from_slice(&serialized_entry);
|
||||||
w.set_size(serialized_entry.len());
|
w.set_size(serialized_entry.len());
|
||||||
w.meta.set_addr(&replicate_addr);
|
w.meta.set_addr(&replicate_addr);
|
||||||
drop(w);
|
}
|
||||||
msgs.push_back(b_);
|
msgs.push_back(b);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -254,10 +257,10 @@ pub mod tests {
|
|||||||
trace!("msg: {:?}", msg);
|
trace!("msg: {:?}", msg);
|
||||||
}
|
}
|
||||||
|
|
||||||
let alice_balance = bank.get_balance(&mint.keypair().pubkey()).unwrap();
|
let alice_balance = bank.get_balance(&mint.keypair().pubkey());
|
||||||
assert_eq!(alice_balance, alice_ref_balance);
|
assert_eq!(alice_balance, alice_ref_balance);
|
||||||
|
|
||||||
let bob_balance = bank.get_balance(&bob_keypair.pubkey()).unwrap();
|
let bob_balance = bank.get_balance(&bob_keypair.pubkey());
|
||||||
assert_eq!(bob_balance, starting_balance - alice_ref_balance);
|
assert_eq!(bob_balance, starting_balance - alice_ref_balance);
|
||||||
|
|
||||||
exit.store(true, Ordering::Relaxed);
|
exit.store(true, Ordering::Relaxed);
|
||||||
|
@@ -1,28 +1,28 @@
|
|||||||
//! The `window_stage` maintains the blob window
|
//! The `window_stage` maintains the blob window
|
||||||
|
|
||||||
use crdt::Crdt;
|
use crdt::Crdt;
|
||||||
use packet;
|
use packet::BlobRecycler;
|
||||||
use std::net::UdpSocket;
|
use std::net::UdpSocket;
|
||||||
use std::sync::atomic::AtomicBool;
|
use std::sync::atomic::AtomicBool;
|
||||||
use std::sync::mpsc::channel;
|
use std::sync::mpsc::channel;
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
use std::thread::JoinHandle;
|
use std::thread::JoinHandle;
|
||||||
use streamer;
|
use streamer::{self, BlobReceiver, Window};
|
||||||
|
|
||||||
pub struct WindowStage {
|
pub struct WindowStage {
|
||||||
pub blob_receiver: streamer::BlobReceiver,
|
|
||||||
pub thread_hdls: Vec<JoinHandle<()>>,
|
pub thread_hdls: Vec<JoinHandle<()>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl WindowStage {
|
impl WindowStage {
|
||||||
pub fn new(
|
pub fn new(
|
||||||
crdt: Arc<RwLock<Crdt>>,
|
crdt: Arc<RwLock<Crdt>>,
|
||||||
window: streamer::Window,
|
window: Window,
|
||||||
|
entry_height: u64,
|
||||||
retransmit_socket: UdpSocket,
|
retransmit_socket: UdpSocket,
|
||||||
exit: Arc<AtomicBool>,
|
exit: Arc<AtomicBool>,
|
||||||
blob_recycler: packet::BlobRecycler,
|
blob_recycler: BlobRecycler,
|
||||||
fetch_stage_receiver: streamer::BlobReceiver,
|
fetch_stage_receiver: BlobReceiver,
|
||||||
) -> Self {
|
) -> (Self, BlobReceiver) {
|
||||||
let (retransmit_sender, retransmit_receiver) = channel();
|
let (retransmit_sender, retransmit_receiver) = channel();
|
||||||
|
|
||||||
let t_retransmit = streamer::retransmitter(
|
let t_retransmit = streamer::retransmitter(
|
||||||
@@ -37,6 +37,7 @@ impl WindowStage {
|
|||||||
exit.clone(),
|
exit.clone(),
|
||||||
crdt.clone(),
|
crdt.clone(),
|
||||||
window,
|
window,
|
||||||
|
entry_height,
|
||||||
blob_recycler.clone(),
|
blob_recycler.clone(),
|
||||||
fetch_stage_receiver,
|
fetch_stage_receiver,
|
||||||
blob_sender,
|
blob_sender,
|
||||||
@@ -44,9 +45,6 @@ impl WindowStage {
|
|||||||
);
|
);
|
||||||
let thread_hdls = vec![t_retransmit, t_window];
|
let thread_hdls = vec![t_retransmit, t_window];
|
||||||
|
|
||||||
WindowStage {
|
(WindowStage { thread_hdls }, blob_receiver)
|
||||||
blob_receiver,
|
|
||||||
thread_hdls,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -5,75 +5,71 @@
|
|||||||
use bank::Bank;
|
use bank::Bank;
|
||||||
use entry::Entry;
|
use entry::Entry;
|
||||||
use entry_writer::EntryWriter;
|
use entry_writer::EntryWriter;
|
||||||
use packet;
|
use ledger::Block;
|
||||||
|
use packet::BlobRecycler;
|
||||||
|
use result::Result;
|
||||||
|
use std::collections::VecDeque;
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
use std::sync::atomic::{AtomicBool, Ordering};
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
use std::sync::mpsc::{channel, Receiver};
|
use std::sync::mpsc::{channel, Receiver};
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::Arc;
|
||||||
use std::thread::{Builder, JoinHandle};
|
use std::thread::{Builder, JoinHandle};
|
||||||
use streamer;
|
use std::time::Duration;
|
||||||
|
use streamer::{BlobReceiver, BlobSender};
|
||||||
|
|
||||||
pub struct WriteStage {
|
pub struct WriteStage {
|
||||||
pub thread_hdl: JoinHandle<()>,
|
pub thread_hdl: JoinHandle<()>,
|
||||||
pub blob_receiver: streamer::BlobReceiver,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl WriteStage {
|
impl WriteStage {
|
||||||
|
/// Process any Entry items that have been published by the Historian.
|
||||||
|
/// continuosly broadcast blobs of entries out
|
||||||
|
pub fn write_and_send_entries<W: Write>(
|
||||||
|
entry_writer: &mut EntryWriter<W>,
|
||||||
|
blob_sender: &BlobSender,
|
||||||
|
blob_recycler: &BlobRecycler,
|
||||||
|
entry_receiver: &Receiver<Vec<Entry>>,
|
||||||
|
) -> Result<()> {
|
||||||
|
let entries = entry_receiver.recv_timeout(Duration::new(1, 0))?;
|
||||||
|
entry_writer.write_and_register_entries(&entries)?;
|
||||||
|
trace!("New blobs? {}", entries.len());
|
||||||
|
let mut blobs = VecDeque::new();
|
||||||
|
entries.to_blobs(blob_recycler, &mut blobs);
|
||||||
|
if !blobs.is_empty() {
|
||||||
|
trace!("broadcasting {}", blobs.len());
|
||||||
|
blob_sender.send(blobs)?;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
/// Create a new Rpu that wraps the given Bank.
|
/// Create a new Rpu that wraps the given Bank.
|
||||||
pub fn new<W: Write + Send + 'static>(
|
pub fn new<W: Write + Send + 'static>(
|
||||||
bank: Arc<Bank>,
|
bank: Arc<Bank>,
|
||||||
exit: Arc<AtomicBool>,
|
exit: Arc<AtomicBool>,
|
||||||
blob_recycler: packet::BlobRecycler,
|
blob_recycler: BlobRecycler,
|
||||||
writer: Mutex<W>,
|
writer: W,
|
||||||
entry_receiver: Receiver<Entry>,
|
entry_receiver: Receiver<Vec<Entry>>,
|
||||||
) -> Self {
|
) -> (Self, BlobReceiver) {
|
||||||
let (blob_sender, blob_receiver) = channel();
|
let (blob_sender, blob_receiver) = channel();
|
||||||
let thread_hdl = Builder::new()
|
let thread_hdl = Builder::new()
|
||||||
.name("solana-writer".to_string())
|
.name("solana-writer".to_string())
|
||||||
.spawn(move || loop {
|
.spawn(move || {
|
||||||
let entry_writer = EntryWriter::new(&bank);
|
let mut entry_writer = EntryWriter::new(&bank, writer);
|
||||||
let _ = entry_writer.write_and_send_entries(
|
loop {
|
||||||
|
let _ = Self::write_and_send_entries(
|
||||||
|
&mut entry_writer,
|
||||||
&blob_sender,
|
&blob_sender,
|
||||||
&blob_recycler,
|
&blob_recycler,
|
||||||
&writer,
|
|
||||||
&entry_receiver,
|
&entry_receiver,
|
||||||
);
|
);
|
||||||
if exit.load(Ordering::Relaxed) {
|
if exit.load(Ordering::Relaxed) {
|
||||||
info!("broadcat_service exiting");
|
info!("broadcat_service exiting");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
})
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
WriteStage {
|
|
||||||
thread_hdl,
|
|
||||||
blob_receiver,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn new_drain(
|
|
||||||
bank: Arc<Bank>,
|
|
||||||
exit: Arc<AtomicBool>,
|
|
||||||
entry_receiver: Receiver<Entry>,
|
|
||||||
) -> Self {
|
|
||||||
let (_blob_sender, blob_receiver) = channel();
|
|
||||||
let thread_hdl = Builder::new()
|
|
||||||
.name("solana-drain".to_string())
|
|
||||||
.spawn(move || {
|
|
||||||
let entry_writer = EntryWriter::new(&bank);
|
|
||||||
loop {
|
|
||||||
let _ = entry_writer.drain_entries(&entry_receiver);
|
|
||||||
if exit.load(Ordering::Relaxed) {
|
|
||||||
info!("drain_service exiting");
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
WriteStage {
|
(WriteStage { thread_hdl }, blob_receiver)
|
||||||
thread_hdl,
|
|
||||||
blob_receiver,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -183,3 +183,109 @@ pub fn crdt_retransmit() {
|
|||||||
t.join().unwrap();
|
t.join().unwrap();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_external_liveness_table() {
|
||||||
|
logger::setup();
|
||||||
|
let c1_c4_exit = Arc::new(AtomicBool::new(false));
|
||||||
|
let c2_c3_exit = Arc::new(AtomicBool::new(false));
|
||||||
|
|
||||||
|
trace!("c1:");
|
||||||
|
let (c1, dr1, _) = test_node(c1_c4_exit.clone());
|
||||||
|
trace!("c2:");
|
||||||
|
let (c2, dr2, _) = test_node(c2_c3_exit.clone());
|
||||||
|
trace!("c3:");
|
||||||
|
let (c3, dr3, _) = test_node(c2_c3_exit.clone());
|
||||||
|
trace!("c4:");
|
||||||
|
let (c4, dr4, _) = test_node(c1_c4_exit.clone());
|
||||||
|
|
||||||
|
let c1_data = c1.read().unwrap().my_data().clone();
|
||||||
|
c1.write().unwrap().set_leader(c1_data.id);
|
||||||
|
|
||||||
|
let c2_id = c2.read().unwrap().me;
|
||||||
|
let c3_id = c3.read().unwrap().me;
|
||||||
|
let c4_id = c4.read().unwrap().me;
|
||||||
|
|
||||||
|
// Insert the remote data about c4
|
||||||
|
let c2_index_for_c4 = 10;
|
||||||
|
c2.write().unwrap().remote.insert(c4_id, c2_index_for_c4);
|
||||||
|
let c3_index_for_c4 = 20;
|
||||||
|
c3.write().unwrap().remote.insert(c4_id, c3_index_for_c4);
|
||||||
|
|
||||||
|
// Set up the initial network topology
|
||||||
|
c2.write().unwrap().insert(&c1_data);
|
||||||
|
c3.write().unwrap().insert(&c1_data);
|
||||||
|
|
||||||
|
c2.write().unwrap().set_leader(c1_data.id);
|
||||||
|
c3.write().unwrap().set_leader(c1_data.id);
|
||||||
|
|
||||||
|
// Wait to converge
|
||||||
|
trace!("waiting to converge:");
|
||||||
|
let mut done = false;
|
||||||
|
for _ in 0..30 {
|
||||||
|
done = c1.read().unwrap().table.len() == 3
|
||||||
|
&& c2.read().unwrap().table.len() == 3
|
||||||
|
&& c3.read().unwrap().table.len() == 3;
|
||||||
|
if done {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
sleep(Duration::new(1, 0));
|
||||||
|
}
|
||||||
|
assert!(done);
|
||||||
|
|
||||||
|
// Validate c1's external liveness table, then release lock rc1
|
||||||
|
{
|
||||||
|
let rc1 = c1.read().unwrap();
|
||||||
|
let el = rc1.get_external_liveness_entry(&c4.read().unwrap().me);
|
||||||
|
|
||||||
|
// Make sure liveness table entry for c4 exists on node c1
|
||||||
|
assert!(el.is_some());
|
||||||
|
let liveness_map = el.unwrap();
|
||||||
|
|
||||||
|
// Make sure liveness table entry contains correct result for c2
|
||||||
|
let c2_index_result_for_c4 = liveness_map.get(&c2_id);
|
||||||
|
assert!(c2_index_result_for_c4.is_some());
|
||||||
|
assert_eq!(*(c2_index_result_for_c4.unwrap()), c2_index_for_c4);
|
||||||
|
|
||||||
|
// Make sure liveness table entry contains correct result for c3
|
||||||
|
let c3_index_result_for_c4 = liveness_map.get(&c3_id);
|
||||||
|
assert!(c3_index_result_for_c4.is_some());
|
||||||
|
assert_eq!(*(c3_index_result_for_c4.unwrap()), c3_index_for_c4);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Shutdown validators c2 and c3
|
||||||
|
c2_c3_exit.store(true, Ordering::Relaxed);
|
||||||
|
let mut threads = vec![];
|
||||||
|
threads.extend(dr2.thread_hdls.into_iter());
|
||||||
|
threads.extend(dr3.thread_hdls.into_iter());
|
||||||
|
|
||||||
|
for t in threads {
|
||||||
|
t.join().unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Allow communication between c1 and c4, make sure that c1's external_liveness table
|
||||||
|
// entry for c4 gets cleared
|
||||||
|
c4.write().unwrap().insert(&c1_data);
|
||||||
|
c4.write().unwrap().set_leader(c1_data.id);
|
||||||
|
for _ in 0..30 {
|
||||||
|
done = c1.read()
|
||||||
|
.unwrap()
|
||||||
|
.get_external_liveness_entry(&c4_id)
|
||||||
|
.is_none();
|
||||||
|
if done {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
sleep(Duration::new(1, 0));
|
||||||
|
}
|
||||||
|
assert!(done);
|
||||||
|
|
||||||
|
// Shutdown validators c1 and c4
|
||||||
|
c1_c4_exit.store(true, Ordering::Relaxed);
|
||||||
|
let mut threads = vec![];
|
||||||
|
threads.extend(dr1.thread_hdls.into_iter());
|
||||||
|
threads.extend(dr4.thread_hdls.into_iter());
|
||||||
|
|
||||||
|
for t in threads {
|
||||||
|
t.join().unwrap();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@@ -6,10 +6,10 @@ extern crate solana;
|
|||||||
use solana::bank::Bank;
|
use solana::bank::Bank;
|
||||||
use solana::crdt::TestNode;
|
use solana::crdt::TestNode;
|
||||||
use solana::crdt::{Crdt, ReplicatedData};
|
use solana::crdt::{Crdt, ReplicatedData};
|
||||||
|
use solana::fullnode::FullNode;
|
||||||
use solana::logger;
|
use solana::logger;
|
||||||
use solana::mint::Mint;
|
use solana::mint::Mint;
|
||||||
use solana::ncp::Ncp;
|
use solana::ncp::Ncp;
|
||||||
use solana::server::Server;
|
|
||||||
use solana::signature::{KeyPair, KeyPairUtil, PublicKey};
|
use solana::signature::{KeyPair, KeyPairUtil, PublicKey};
|
||||||
use solana::streamer::default_window;
|
use solana::streamer::default_window;
|
||||||
use solana::thin_client::ThinClient;
|
use solana::thin_client::ThinClient;
|
||||||
@@ -30,8 +30,9 @@ fn validator(
|
|||||||
) {
|
) {
|
||||||
let validator = TestNode::new();
|
let validator = TestNode::new();
|
||||||
let replicant_bank = Bank::new(&alice);
|
let replicant_bank = Bank::new(&alice);
|
||||||
let mut ts = Server::new_validator(
|
let mut ts = FullNode::new_validator(
|
||||||
replicant_bank,
|
replicant_bank,
|
||||||
|
0,
|
||||||
validator.data.clone(),
|
validator.data.clone(),
|
||||||
validator.sockets.requests,
|
validator.sockets.requests,
|
||||||
validator.sockets.respond,
|
validator.sockets.respond,
|
||||||
@@ -49,7 +50,7 @@ fn converge(
|
|||||||
exit: Arc<AtomicBool>,
|
exit: Arc<AtomicBool>,
|
||||||
num_nodes: usize,
|
num_nodes: usize,
|
||||||
threads: &mut Vec<JoinHandle<()>>,
|
threads: &mut Vec<JoinHandle<()>>,
|
||||||
) -> Vec<ReplicatedData> {
|
) -> (Vec<ReplicatedData>, PublicKey) {
|
||||||
//lets spy on the network
|
//lets spy on the network
|
||||||
let mut spy = TestNode::new();
|
let mut spy = TestNode::new();
|
||||||
let daddr = "0.0.0.0:0".parse().unwrap();
|
let daddr = "0.0.0.0:0".parse().unwrap();
|
||||||
@@ -89,22 +90,23 @@ fn converge(
|
|||||||
.filter(|x| x.id != me)
|
.filter(|x| x.id != me)
|
||||||
.map(|x| x.clone())
|
.map(|x| x.clone())
|
||||||
.collect();
|
.collect();
|
||||||
v.clone()
|
(v.clone(), me)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_multi_node() {
|
fn test_multi_node_validator_catchup_from_zero() {
|
||||||
logger::setup();
|
logger::setup();
|
||||||
const N: usize = 5;
|
const N: usize = 5;
|
||||||
trace!("test_multi_accountant_stub");
|
trace!("test_multi_node_validator_catchup_from_zero");
|
||||||
let leader = TestNode::new();
|
let leader = TestNode::new();
|
||||||
let alice = Mint::new(10_000);
|
let alice = Mint::new(10_000);
|
||||||
let bob_pubkey = KeyPair::new().pubkey();
|
let bob_pubkey = KeyPair::new().pubkey();
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
|
|
||||||
let leader_bank = Bank::new(&alice);
|
let leader_bank = Bank::new(&alice);
|
||||||
let server = Server::new_leader(
|
let server = FullNode::new_leader(
|
||||||
leader_bank,
|
leader_bank,
|
||||||
|
0,
|
||||||
None,
|
None,
|
||||||
leader.data.clone(),
|
leader.data.clone(),
|
||||||
leader.sockets.requests,
|
leader.sockets.requests,
|
||||||
@@ -120,7 +122,97 @@ fn test_multi_node() {
|
|||||||
for _ in 0..N {
|
for _ in 0..N {
|
||||||
validator(&leader.data, exit.clone(), &alice, &mut threads);
|
validator(&leader.data, exit.clone(), &alice, &mut threads);
|
||||||
}
|
}
|
||||||
let servers = converge(&leader.data, exit.clone(), N + 2, &mut threads);
|
let (servers, spy_id0) = converge(&leader.data, exit.clone(), N + 2, &mut threads);
|
||||||
|
//contains the leader addr as well
|
||||||
|
assert_eq!(servers.len(), N + 1);
|
||||||
|
//verify leader can do transfer
|
||||||
|
let leader_balance = tx_and_retry_get_balance(&leader.data, &alice, &bob_pubkey).unwrap();
|
||||||
|
assert_eq!(leader_balance, 500);
|
||||||
|
//verify validator has the same balance
|
||||||
|
let mut success = 0usize;
|
||||||
|
for server in servers.iter() {
|
||||||
|
info!("0server: {:?}", server.id[0]);
|
||||||
|
let mut client = mk_client(server);
|
||||||
|
if let Ok(bal) = client.poll_get_balance(&bob_pubkey) {
|
||||||
|
info!("validator balance {}", bal);
|
||||||
|
if bal == leader_balance {
|
||||||
|
success += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assert_eq!(success, servers.len());
|
||||||
|
|
||||||
|
success = 0;
|
||||||
|
// start up another validator, converge and then check everyone's balances
|
||||||
|
validator(&leader.data, exit.clone(), &alice, &mut threads);
|
||||||
|
let (servers, _) = converge(&leader.data, exit.clone(), N + 4, &mut threads);
|
||||||
|
|
||||||
|
let mut leader_balance = tx_and_retry_get_balance(&leader.data, &alice, &bob_pubkey).unwrap();
|
||||||
|
info!("leader balance {}", leader_balance);
|
||||||
|
loop {
|
||||||
|
let mut client = mk_client(&leader.data);
|
||||||
|
leader_balance = client.poll_get_balance(&bob_pubkey).unwrap();
|
||||||
|
if leader_balance == 1000 {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
sleep(Duration::from_millis(300));
|
||||||
|
}
|
||||||
|
assert_eq!(leader_balance, 1000);
|
||||||
|
|
||||||
|
for server in servers.iter() {
|
||||||
|
if server.id != spy_id0 {
|
||||||
|
let mut client = mk_client(server);
|
||||||
|
info!("1server: {:?}", server.id[0]);
|
||||||
|
for _ in 0..10 {
|
||||||
|
if let Ok(bal) = client.poll_get_balance(&bob_pubkey) {
|
||||||
|
info!("validator balance {}", bal);
|
||||||
|
if bal == leader_balance {
|
||||||
|
success += 1;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sleep(Duration::from_millis(500));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assert_eq!(success, (servers.len() - 1));
|
||||||
|
|
||||||
|
exit.store(true, Ordering::Relaxed);
|
||||||
|
for t in threads {
|
||||||
|
t.join().unwrap();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_multi_node_basic() {
|
||||||
|
logger::setup();
|
||||||
|
const N: usize = 5;
|
||||||
|
trace!("test_multi_node_basic");
|
||||||
|
let leader = TestNode::new();
|
||||||
|
let alice = Mint::new(10_000);
|
||||||
|
let bob_pubkey = KeyPair::new().pubkey();
|
||||||
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
|
|
||||||
|
let leader_bank = Bank::new(&alice);
|
||||||
|
let server = FullNode::new_leader(
|
||||||
|
leader_bank,
|
||||||
|
0,
|
||||||
|
None,
|
||||||
|
leader.data.clone(),
|
||||||
|
leader.sockets.requests,
|
||||||
|
leader.sockets.transaction,
|
||||||
|
leader.sockets.broadcast,
|
||||||
|
leader.sockets.respond,
|
||||||
|
leader.sockets.gossip,
|
||||||
|
exit.clone(),
|
||||||
|
sink(),
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut threads = server.thread_hdls;
|
||||||
|
for _ in 0..N {
|
||||||
|
validator(&leader.data, exit.clone(), &alice, &mut threads);
|
||||||
|
}
|
||||||
|
let (servers, _) = converge(&leader.data, exit.clone(), N + 2, &mut threads);
|
||||||
//contains the leader addr as well
|
//contains the leader addr as well
|
||||||
assert_eq!(servers.len(), N + 1);
|
assert_eq!(servers.len(), N + 1);
|
||||||
//verify leader can do transfer
|
//verify leader can do transfer
|
||||||
@@ -138,6 +230,7 @@ fn test_multi_node() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
assert_eq!(success, servers.len());
|
assert_eq!(success, servers.len());
|
||||||
|
|
||||||
exit.store(true, Ordering::Relaxed);
|
exit.store(true, Ordering::Relaxed);
|
||||||
for t in threads {
|
for t in threads {
|
||||||
t.join().unwrap();
|
t.join().unwrap();
|
||||||
@@ -167,7 +260,7 @@ fn tx_and_retry_get_balance(
|
|||||||
let mut client = mk_client(leader);
|
let mut client = mk_client(leader);
|
||||||
trace!("getting leader last_id");
|
trace!("getting leader last_id");
|
||||||
let last_id = client.get_last_id();
|
let last_id = client.get_last_id();
|
||||||
info!("executing leader transer");
|
info!("executing leader transfer");
|
||||||
let _sig = client
|
let _sig = client
|
||||||
.transfer(500, &alice.keypair(), *bob_pubkey, &last_id)
|
.transfer(500, &alice.keypair(), *bob_pubkey, &last_id)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
Reference in New Issue
Block a user