Compare commits
85 Commits
v0.7.0-rc.
...
v0.7.0-rc.
Author | SHA1 | Date | |
---|---|---|---|
|
88646bf27d | ||
|
0696f9f497 | ||
|
b2ea2455e2 | ||
|
3f659a69fd | ||
|
2c62be951f | ||
|
2348733d6c | ||
|
cc229b535d | ||
|
7f810a29ff | ||
|
fc1dfd86d2 | ||
|
5deb34e5bd | ||
|
39df087902 | ||
|
6ff46540b6 | ||
|
dbab8792e4 | ||
|
4eb676afaa | ||
|
a6cb2f1bcf | ||
|
28af9a39b4 | ||
|
8cf5620b87 | ||
|
85d6627ee6 | ||
|
611a005ec9 | ||
|
90b3b90391 | ||
|
fd4f294fd3 | ||
|
145274c001 | ||
|
df5d6693f6 | ||
|
05c5603879 | ||
|
c2c48a5c3c | ||
|
4af556f70e | ||
|
8bad411962 | ||
|
5b0418793e | ||
|
4423ee6902 | ||
|
f0c39cc84d | ||
|
3d45b04da8 | ||
|
9e2f26a5d2 | ||
|
a016f6e82e | ||
|
eb3e5fd204 | ||
|
72282dc493 | ||
|
47a22c66b4 | ||
|
fb11d8a909 | ||
|
7d872f52f4 | ||
|
d882bfe65c | ||
|
103584ef27 | ||
|
1fb537deb9 | ||
|
2bd48b4207 | ||
|
f5a6db3dc0 | ||
|
dd0c1ac5b2 | ||
|
d8c9655128 | ||
|
09f2d273c5 | ||
|
f6eb85e7a3 | ||
|
0d85b43901 | ||
|
fdf94a77b4 | ||
|
af40ab0c04 | ||
|
015b7a1ddb | ||
|
ab3e460e64 | ||
|
194a84c8dd | ||
|
51d932dad1 | ||
|
561d31cc13 | ||
|
d6a8e437bb | ||
|
4631af5011 | ||
|
5d28729b2a | ||
|
8c08e614b7 | ||
|
e76bf1438b | ||
|
4e177877c9 | ||
|
60848b9d95 | ||
|
79b3564a26 | ||
|
1e8c36c555 | ||
|
94d015b089 | ||
|
cfb3736372 | ||
|
2b77f62233 | ||
|
e8d23c17ca | ||
|
a7ed2a304a | ||
|
0025b42c26 | ||
|
3f7f492cc0 | ||
|
490d7875dd | ||
|
4240edf710 | ||
|
30e50d0f70 | ||
|
751c1eba32 | ||
|
d349d6aa98 | ||
|
1f9152dc72 | ||
|
1b9d50172b | ||
|
084dbd7f58 | ||
|
58c0508f94 | ||
|
dcf82c024f | ||
|
b253ed0c46 | ||
|
61db53fc19 | ||
|
b0ead086a1 | ||
|
a3b22d0d33 |
@@ -1,7 +1,7 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "solana"
|
name = "solana"
|
||||||
description = "Blockchain, Rebuilt for Scale"
|
description = "Blockchain, Rebuilt for Scale"
|
||||||
version = "0.7.0-beta"
|
version = "0.7.0-rc.6"
|
||||||
documentation = "https://docs.rs/solana"
|
documentation = "https://docs.rs/solana"
|
||||||
homepage = "http://solana.com/"
|
homepage = "http://solana.com/"
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
|
30
README.md
30
README.md
@@ -84,17 +84,24 @@ Now start the server:
|
|||||||
$ ./multinode-demo/leader.sh
|
$ ./multinode-demo/leader.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
To run a performance-enhanced fullnode on Linux,
|
|
||||||
[CUDA 9.2](https://developer.nvidia.com/cuda-downloads) must be installed on
|
|
||||||
your system:
|
|
||||||
```bash
|
|
||||||
$ ./fetch-perf-libs.sh
|
|
||||||
$ SOLANA_CUDA=1 ./multinode-demo/leader.sh
|
|
||||||
```
|
|
||||||
|
|
||||||
Wait a few seconds for the server to initialize. It will print "Ready." when it's ready to
|
Wait a few seconds for the server to initialize. It will print "Ready." when it's ready to
|
||||||
receive transactions.
|
receive transactions.
|
||||||
|
|
||||||
|
Drone
|
||||||
|
---
|
||||||
|
|
||||||
|
In order for the below test client and validators to work, we'll also need to
|
||||||
|
spin up a drone to give out some test tokens. The drone delivers Milton
|
||||||
|
Friedman-style "air drops" (free tokens to requesting clients) to be used in
|
||||||
|
test transactions.
|
||||||
|
|
||||||
|
Start the drone on the leader node with:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ ./multinode-demo/drone.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
Multinode Testnet
|
Multinode Testnet
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -104,15 +111,18 @@ To run a multinode testnet, after starting a leader node, spin up some validator
|
|||||||
$ ./multinode-demo/validator.sh ubuntu@10.0.1.51:~/solana 10.0.1.51
|
$ ./multinode-demo/validator.sh ubuntu@10.0.1.51:~/solana 10.0.1.51
|
||||||
```
|
```
|
||||||
|
|
||||||
To run a performance-enhanced fullnode on Linux,
|
To run a performance-enhanced leader or validator (on Linux),
|
||||||
[CUDA 9.2](https://developer.nvidia.com/cuda-downloads) must be installed on
|
[CUDA 9.2](https://developer.nvidia.com/cuda-downloads) must be installed on
|
||||||
your system:
|
your system:
|
||||||
```bash
|
```bash
|
||||||
$ ./fetch-perf-libs.sh
|
$ ./fetch-perf-libs.sh
|
||||||
$ SOLANA_CUDA=1 ./multinode-demo/leader.sh ubuntu@10.0.1.51:~/solana 10.0.1.51
|
$ SOLANA_CUDA=1 ./multinode-demo/leader.sh
|
||||||
|
$ SOLANA_CUDA=1 ./multinode-demo/validator.sh ubuntu@10.0.1.51:~/solana 10.0.1.51
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Testnet Client Demo
|
Testnet Client Demo
|
||||||
---
|
---
|
||||||
|
|
||||||
|
@@ -32,7 +32,7 @@ fn bench_process_transaction(bencher: &mut Bencher) {
|
|||||||
|
|
||||||
let rando1 = KeyPair::new();
|
let rando1 = KeyPair::new();
|
||||||
let tx = Transaction::new(&rando0, rando1.pubkey(), 1, last_id);
|
let tx = Transaction::new(&rando0, rando1.pubkey(), 1, last_id);
|
||||||
assert!(bank.process_transaction(&tx.clone()).is_ok());
|
assert!(bank.process_transaction(&tx).is_ok());
|
||||||
|
|
||||||
// Finally, return the transaction to the benchmark.
|
// Finally, return the transaction to the benchmark.
|
||||||
tx
|
tx
|
||||||
@@ -58,5 +58,9 @@ fn bench(criterion: &mut Criterion) {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
criterion_group!(benches, bench);
|
criterion_group!(
|
||||||
|
name = benches;
|
||||||
|
config = Criterion::default().sample_size(2);
|
||||||
|
targets = bench
|
||||||
|
);
|
||||||
criterion_main!(benches);
|
criterion_main!(benches);
|
||||||
|
@@ -32,5 +32,9 @@ fn bench(criterion: &mut Criterion) {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
criterion_group!(benches, bench);
|
criterion_group!(
|
||||||
|
name = benches;
|
||||||
|
config = Criterion::default().sample_size(2);
|
||||||
|
targets = bench
|
||||||
|
);
|
||||||
criterion_main!(benches);
|
criterion_main!(benches);
|
||||||
|
@@ -16,5 +16,9 @@ fn bench(criterion: &mut Criterion) {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
criterion_group!(benches, bench);
|
criterion_group!(
|
||||||
|
name = benches;
|
||||||
|
config = Criterion::default().sample_size(2);
|
||||||
|
targets = bench
|
||||||
|
);
|
||||||
criterion_main!(benches);
|
criterion_main!(benches);
|
||||||
|
@@ -1,4 +1,4 @@
|
|||||||
steps:
|
steps:
|
||||||
- command: "ci/snap.sh"
|
- command: "ci/snap.sh"
|
||||||
timeout_in_minutes: 20
|
timeout_in_minutes: 40
|
||||||
name: "snap [public]"
|
name: "snap [public]"
|
||||||
|
@@ -1,3 +1,6 @@
|
|||||||
FROM rustlang/rust:nightly
|
FROM rustlang/rust:nightly
|
||||||
|
|
||||||
RUN cargo install --force clippy cargo-cov
|
RUN cargo install --force clippy cargo-cov && \
|
||||||
|
echo deb http://ftp.debian.org/debian stretch-backports main >> /etc/apt/sources.list && \
|
||||||
|
apt update && \
|
||||||
|
apt install -y llvm-6.0
|
||||||
|
@@ -2,6 +2,6 @@ FROM snapcraft/xenial-amd64
|
|||||||
|
|
||||||
# Update snapcraft to latest version
|
# Update snapcraft to latest version
|
||||||
RUN apt-get update -qq \
|
RUN apt-get update -qq \
|
||||||
&& apt-get install -y snapcraft \
|
&& apt-get install -y snapcraft daemontools \
|
||||||
&& rm -rf /var/lib/apt/lists/* \
|
&& rm -rf /var/lib/apt/lists/* \
|
||||||
&& snapcraft --version
|
&& snapcraft --version
|
||||||
|
@@ -1,80 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
#
|
|
||||||
# Refreshes the Solana software running on the Testnet full nodes
|
|
||||||
#
|
|
||||||
# This script must be run by a user/machine that has successfully authenticated
|
|
||||||
# with GCP and has sufficient permission.
|
|
||||||
#
|
|
||||||
|
|
||||||
if [[ -z $SOLANA_METRICS_CONFIG ]]; then
|
|
||||||
echo Error: SOLANA_METRICS_CONFIG environment variable is unset
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Default to --edge channel. To select the beta channel:
|
|
||||||
# export SOLANA_METRICS_CONFIG=--beta
|
|
||||||
if [[ -z $SOLANA_SNAP_CHANNEL ]]; then
|
|
||||||
SOLANA_SNAP_CHANNEL=--edge
|
|
||||||
fi
|
|
||||||
|
|
||||||
vmlist=(testnet-solana-com:us-west1-b) # Leader is hard coded as the first entry
|
|
||||||
|
|
||||||
echo "--- Available validators"
|
|
||||||
gcloud compute instances list --filter="labels.testnet-mode=validator"
|
|
||||||
while read -r vmName vmZone status; do
|
|
||||||
if [[ $status != RUNNING ]]; then
|
|
||||||
echo "Warning: $vmName is not RUNNING, ignoring it."
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
vmlist+=("$vmName:$vmZone")
|
|
||||||
done < <(gcloud compute instances list --filter="labels.testnet-mode=validator" --format 'value(name,zone,status)')
|
|
||||||
|
|
||||||
|
|
||||||
echo "--- Refreshing"
|
|
||||||
leader=true
|
|
||||||
for info in "${vmlist[@]}"; do
|
|
||||||
vmName=${info%:*}
|
|
||||||
vmZone=${info#*:}
|
|
||||||
echo "Starting refresh for $vmName"
|
|
||||||
|
|
||||||
(
|
|
||||||
echo "--- Processing $vmName in zone $vmZone"
|
|
||||||
if $leader; then
|
|
||||||
nodeConfig="mode=leader+drone enable-cuda=1 metrics-config=$SOLANA_METRICS_CONFIG"
|
|
||||||
else
|
|
||||||
nodeConfig="mode=validator metrics-config=$SOLANA_METRICS_CONFIG"
|
|
||||||
fi
|
|
||||||
cat > "autogen-refresh-$vmName.sh" <<EOF
|
|
||||||
set -x
|
|
||||||
sudo snap remove solana
|
|
||||||
sudo snap install solana $SOLANA_SNAP_CHANNEL --devmode
|
|
||||||
sudo snap set solana $nodeConfig
|
|
||||||
snap info solana
|
|
||||||
sudo snap logs solana -n200
|
|
||||||
EOF
|
|
||||||
set -x
|
|
||||||
gcloud compute scp --zone "$vmZone" "autogen-refresh-$vmName.sh" "$vmName":
|
|
||||||
gcloud compute ssh "$vmName" --zone "$vmZone" \
|
|
||||||
--ssh-flag="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -t" \
|
|
||||||
--command="bash ./autogen-refresh-$vmName.sh"
|
|
||||||
) > "log-$vmName.txt" 2>&1 &
|
|
||||||
|
|
||||||
if $leader; then
|
|
||||||
echo Waiting for leader...
|
|
||||||
# Wait for the leader to initialize before starting the validators
|
|
||||||
# TODO: Remove this limitation eventually.
|
|
||||||
wait
|
|
||||||
fi
|
|
||||||
leader=false
|
|
||||||
done
|
|
||||||
|
|
||||||
echo Waiting for validators...
|
|
||||||
wait
|
|
||||||
|
|
||||||
for info in "${vmlist[@]}"; do
|
|
||||||
vmName=${info%:*}
|
|
||||||
cat "log-$vmName.txt"
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "--- done"
|
|
||||||
exit 0
|
|
@@ -37,6 +37,12 @@ fi
|
|||||||
|
|
||||||
set -x
|
set -x
|
||||||
|
|
||||||
|
echo --- checking for multilog
|
||||||
|
if [[ ! -x /usr/bin/multilog ]]; then
|
||||||
|
echo "multilog not found, install with: sudo apt-get install -y daemontools"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
echo --- build
|
echo --- build
|
||||||
snapcraft
|
snapcraft
|
||||||
|
|
||||||
|
@@ -27,6 +27,6 @@ ls -l target/cov/report/index.html
|
|||||||
if [[ -z "$CODECOV_TOKEN" ]]; then
|
if [[ -z "$CODECOV_TOKEN" ]]; then
|
||||||
echo CODECOV_TOKEN undefined
|
echo CODECOV_TOKEN undefined
|
||||||
else
|
else
|
||||||
bash <(curl -s https://codecov.io/bash) -x 'llvm-cov gcov'
|
bash <(curl -s https://codecov.io/bash) -x 'llvm-cov-6.0 gcov'
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
165
ci/testnet-deploy.sh
Executable file
165
ci/testnet-deploy.sh
Executable file
@@ -0,0 +1,165 @@
|
|||||||
|
#!/bin/bash -e
|
||||||
|
#
|
||||||
|
# Deploys the Solana software running on the testnet full nodes
|
||||||
|
#
|
||||||
|
# This script must be run by a user/machine that has successfully authenticated
|
||||||
|
# with GCP and has sufficient permission.
|
||||||
|
#
|
||||||
|
cd "$(dirname "$0")/.."
|
||||||
|
|
||||||
|
# TODO: Switch over to rolling updates
|
||||||
|
ROLLING_UPDATE=false
|
||||||
|
#ROLLING_UPDATE=true
|
||||||
|
|
||||||
|
if [[ -z $SOLANA_METRICS_CONFIG ]]; then
|
||||||
|
echo Error: SOLANA_METRICS_CONFIG environment variable is unset
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Default to edge channel. To select the beta channel:
|
||||||
|
# export SOLANA_SNAP_CHANNEL=beta
|
||||||
|
if [[ -z $SOLANA_SNAP_CHANNEL ]]; then
|
||||||
|
SOLANA_SNAP_CHANNEL=edge
|
||||||
|
fi
|
||||||
|
|
||||||
|
case $SOLANA_SNAP_CHANNEL in
|
||||||
|
edge)
|
||||||
|
publicUrl=master.testnet.solana.com
|
||||||
|
publicIp=$(dig +short $publicUrl | head -n1)
|
||||||
|
;;
|
||||||
|
beta)
|
||||||
|
publicUrl=testnet.solana.com
|
||||||
|
publicIp="" # Use default value
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo Error: Unknown SOLANA_SNAP_CHANNEL=$SOLANA_SNAP_CHANNEL
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
resourcePrefix=${publicUrl//./-}
|
||||||
|
vmlist=("$resourcePrefix":us-west1-b) # Leader is hard coded as the first entry
|
||||||
|
validatorNamePrefix=$resourcePrefix-validator-
|
||||||
|
|
||||||
|
echo "--- Available validators for $publicUrl"
|
||||||
|
filter="name~^$validatorNamePrefix"
|
||||||
|
gcloud compute instances list --filter="$filter"
|
||||||
|
while read -r vmName vmZone status; do
|
||||||
|
if [[ $status != RUNNING ]]; then
|
||||||
|
echo "Warning: $vmName is not RUNNING, ignoring it."
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
vmlist+=("$vmName:$vmZone")
|
||||||
|
done < <(gcloud compute instances list --filter="$filter" --format 'value(name,zone,status)')
|
||||||
|
|
||||||
|
wait_for_node() {
|
||||||
|
declare pid=$1
|
||||||
|
|
||||||
|
declare ok=true
|
||||||
|
wait "$pid" || ok=false
|
||||||
|
cat "log-$pid.txt"
|
||||||
|
if ! $ok; then
|
||||||
|
echo ^^^ +++
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
if ! $ROLLING_UPDATE; then
|
||||||
|
count=1
|
||||||
|
for info in "${vmlist[@]}"; do
|
||||||
|
nodePosition="($count/${#vmlist[*]})"
|
||||||
|
vmName=${info%:*}
|
||||||
|
vmZone=${info#*:}
|
||||||
|
echo "--- Shutting down $vmName in zone $vmZone $nodePosition"
|
||||||
|
gcloud compute ssh "$vmName" --zone "$vmZone" \
|
||||||
|
--ssh-flag="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" \
|
||||||
|
--command="echo sudo snap remove solana" &
|
||||||
|
|
||||||
|
if [[ $((count % 10)) = 0 ]]; then
|
||||||
|
# Slow down deployment to avoid triggering GCP login
|
||||||
|
# quota limits (each |ssh| counts as a login)
|
||||||
|
sleep 3
|
||||||
|
fi
|
||||||
|
|
||||||
|
count=$((count + 1))
|
||||||
|
done
|
||||||
|
|
||||||
|
wait
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "--- Refreshing leader for $publicUrl"
|
||||||
|
leader=true
|
||||||
|
pids=()
|
||||||
|
count=1
|
||||||
|
for info in "${vmlist[@]}"; do
|
||||||
|
nodePosition="($count/${#vmlist[*]})"
|
||||||
|
|
||||||
|
vmName=${info%:*}
|
||||||
|
vmZone=${info#*:}
|
||||||
|
echo "Starting refresh for $vmName $nodePosition"
|
||||||
|
|
||||||
|
(
|
||||||
|
SECONDS=0
|
||||||
|
echo "--- $vmName in zone $vmZone $nodePosition"
|
||||||
|
commonNodeConfig="\
|
||||||
|
rust-log=$RUST_LOG \
|
||||||
|
default-metrics-rate=$SOLANA_DEFAULT_METRICS_RATE \
|
||||||
|
metrics-config=$SOLANA_METRICS_CONFIG \
|
||||||
|
"
|
||||||
|
if $leader; then
|
||||||
|
nodeConfig="mode=leader+drone $commonNodeConfig"
|
||||||
|
if [[ -n $SOLANA_CUDA ]]; then
|
||||||
|
nodeConfig="$nodeConfig enable-cuda=1"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
nodeConfig="mode=validator leader-address=$publicIp $commonNodeConfig"
|
||||||
|
fi
|
||||||
|
|
||||||
|
set -x
|
||||||
|
gcloud compute ssh "$vmName" --zone "$vmZone" \
|
||||||
|
--ssh-flag="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -t" \
|
||||||
|
--command="\
|
||||||
|
set -ex; \
|
||||||
|
logmarker='solana deploy $(date)/$RANDOM'; \
|
||||||
|
sudo snap remove solana; \
|
||||||
|
logger \$logmarker; \
|
||||||
|
sudo snap install solana --$SOLANA_SNAP_CHANNEL --devmode; \
|
||||||
|
sudo snap set solana $nodeConfig; \
|
||||||
|
snap info solana; \
|
||||||
|
echo Slight delay to get more syslog output; \
|
||||||
|
sleep 2; \
|
||||||
|
sudo grep -Pzo \"\$logmarker(.|\\n)*\" /var/log/syslog \
|
||||||
|
"
|
||||||
|
echo "Succeeded in ${SECONDS} seconds"
|
||||||
|
) > "log-$vmName.txt" 2>&1 &
|
||||||
|
pid=$!
|
||||||
|
# Rename log file so it can be discovered later by $pid
|
||||||
|
mv "log-$vmName.txt" "log-$pid.txt"
|
||||||
|
|
||||||
|
if $leader; then
|
||||||
|
echo Waiting for leader...
|
||||||
|
# Wait for the leader to initialize before starting the validators
|
||||||
|
# TODO: Remove this limitation eventually.
|
||||||
|
wait_for_node "$pid"
|
||||||
|
|
||||||
|
echo "--- Refreshing validators"
|
||||||
|
else
|
||||||
|
# Slow down deployment to ~20 machines a minute to avoid triggering GCP login
|
||||||
|
# quota limits (each |ssh| counts as a login)
|
||||||
|
sleep 3
|
||||||
|
|
||||||
|
pids+=("$pid")
|
||||||
|
fi
|
||||||
|
leader=false
|
||||||
|
count=$((count + 1))
|
||||||
|
done
|
||||||
|
|
||||||
|
echo --- Waiting for validators
|
||||||
|
for pid in "${pids[@]}"; do
|
||||||
|
wait_for_node "$pid"
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "--- $publicUrl sanity test"
|
||||||
|
USE_SNAP=1 ci/testnet-sanity.sh $publicUrl
|
||||||
|
|
||||||
|
exit 0
|
17
ci/testnet-sanity.sh
Executable file
17
ci/testnet-sanity.sh
Executable file
@@ -0,0 +1,17 @@
|
|||||||
|
#!/bin/bash -e
|
||||||
|
#
|
||||||
|
# Perform a quick sanity test on the specific testnet
|
||||||
|
#
|
||||||
|
|
||||||
|
cd "$(dirname "$0")/.."
|
||||||
|
|
||||||
|
TESTNET=$1
|
||||||
|
if [[ -z $TESTNET ]]; then
|
||||||
|
TESTNET=testnet.solana.com
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "--- $TESTNET: wallet sanity"
|
||||||
|
multinode-demo/test/wallet-sanity.sh $TESTNET
|
||||||
|
|
||||||
|
echo --- fin
|
||||||
|
exit 0
|
@@ -7,25 +7,27 @@ here=$(dirname "$0")
|
|||||||
# shellcheck source=multinode-demo/common.sh
|
# shellcheck source=multinode-demo/common.sh
|
||||||
source "$here"/common.sh
|
source "$here"/common.sh
|
||||||
|
|
||||||
leader=${1:-${here}/..} # Default to local solana repo
|
leader=$1
|
||||||
|
if [[ -z $leader ]]; then
|
||||||
|
if [[ -d "$SNAP" ]]; then
|
||||||
|
leader=testnet.solana.com # Default to testnet when running as a Snap
|
||||||
|
else
|
||||||
|
leader=$here/.. # Default to local solana repo
|
||||||
|
fi
|
||||||
|
fi
|
||||||
count=${2:-1}
|
count=${2:-1}
|
||||||
|
|
||||||
rsync_leader_url=$(rsync_url "$leader")
|
rsync_leader_url=$(rsync_url "$leader")
|
||||||
|
|
||||||
set -ex
|
set -ex
|
||||||
mkdir -p "$SOLANA_CONFIG_CLIENT_DIR"
|
mkdir -p "$SOLANA_CONFIG_CLIENT_DIR"
|
||||||
if [[ ! -r "$SOLANA_CONFIG_CLIENT_DIR"/leader.json ]]; then
|
$rsync -vPz "$rsync_leader_url"/config/leader.json "$SOLANA_CONFIG_CLIENT_DIR"/
|
||||||
(
|
|
||||||
set -x
|
|
||||||
$rsync -vPz "$rsync_leader_url"/config/leader.json "$SOLANA_CONFIG_CLIENT_DIR"/
|
|
||||||
)
|
|
||||||
fi
|
|
||||||
|
|
||||||
client_json="$SOLANA_CONFIG_CLIENT_DIR"/client.json
|
client_json="$SOLANA_CONFIG_CLIENT_DIR"/client.json
|
||||||
if [[ ! -r $client_json ]]; then
|
[[ -r $client_json ]] || $solana_keygen -o "$client_json"
|
||||||
$solana_keygen -o "$client_json"
|
|
||||||
fi
|
$solana_client_demo \
|
||||||
|
-n "$count" \
|
||||||
|
-l "$SOLANA_CONFIG_CLIENT_DIR"/leader.json \
|
||||||
|
-k "$SOLANA_CONFIG_CLIENT_DIR"/client.json \
|
||||||
|
|
||||||
# shellcheck disable=SC2086 # $solana_client_demo should not be quoted
|
|
||||||
exec $solana_client_demo \
|
|
||||||
-n "$count" -l "$SOLANA_CONFIG_CLIENT_DIR"/leader.json -k "$SOLANA_CONFIG_CLIENT_DIR"/client.json
|
|
||||||
|
@@ -3,21 +3,41 @@
|
|||||||
# Disable complaints about unused variables in this file:
|
# Disable complaints about unused variables in this file:
|
||||||
# shellcheck disable=2034
|
# shellcheck disable=2034
|
||||||
|
|
||||||
|
# shellcheck disable=2154 # 'here' is referenced but not assigned
|
||||||
|
if [[ -z $here ]]; then
|
||||||
|
echo "|here| is not defined"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
rsync=rsync
|
rsync=rsync
|
||||||
|
leader_logger="cat"
|
||||||
|
validator_logger="cat"
|
||||||
|
drone_logger="cat"
|
||||||
|
|
||||||
if [[ -d "$SNAP" ]]; then # Running inside a Linux Snap?
|
if [[ -d "$SNAP" ]]; then # Running inside a Linux Snap?
|
||||||
solana_program() {
|
solana_program() {
|
||||||
declare program="$1"
|
declare program="$1"
|
||||||
if [[ "$program" = wallet ]]; then
|
if [[ "$program" = wallet || "$program" = client-demo ]]; then
|
||||||
# TODO: Merge wallet.sh functionality into solana-wallet proper and
|
# TODO: Merge wallet.sh/client.sh functionality into
|
||||||
# remove this special case
|
# solana-wallet/solana-demo-client proper and remove this special case
|
||||||
printf "%s/bin/solana-%s" "$SNAP" "$program"
|
printf "%s/bin/solana-%s" "$SNAP" "$program"
|
||||||
else
|
else
|
||||||
printf "%s/command-%s.wrapper" "$SNAP" "$program"
|
printf "%s/command-%s.wrapper" "$SNAP" "$program"
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
rsync="$SNAP"/bin/rsync
|
rsync="$SNAP"/bin/rsync
|
||||||
|
multilog="$SNAP/bin/multilog t s16777215"
|
||||||
|
leader_logger="$multilog $SNAP_DATA/leader"
|
||||||
|
validator_logger="$multilog t $SNAP_DATA/validator"
|
||||||
|
drone_logger="$multilog $SNAP_DATA/drone"
|
||||||
|
# Create log directories manually to prevent multilog from creating them as
|
||||||
|
# 0700
|
||||||
|
mkdir -p "$SNAP_DATA"/{drone,leader,validator}
|
||||||
|
|
||||||
SOLANA_METRICS_CONFIG="$(snapctl get metrics-config)"
|
SOLANA_METRICS_CONFIG="$(snapctl get metrics-config)"
|
||||||
|
SOLANA_DEFAULT_METRICS_RATE="$(snapctl get default-metrics-rate)"
|
||||||
SOLANA_CUDA="$(snapctl get enable-cuda)"
|
SOLANA_CUDA="$(snapctl get enable-cuda)"
|
||||||
|
RUST_LOG="$(snapctl get rust-log)"
|
||||||
|
|
||||||
elif [[ -n "$USE_SNAP" ]]; then # Use the Linux Snap binaries
|
elif [[ -n "$USE_SNAP" ]]; then # Use the Linux Snap binaries
|
||||||
solana_program() {
|
solana_program() {
|
||||||
@@ -37,13 +57,18 @@ else
|
|||||||
declare features=""
|
declare features=""
|
||||||
if [[ "$program" =~ ^(.*)-cuda$ ]]; then
|
if [[ "$program" =~ ^(.*)-cuda$ ]]; then
|
||||||
program=${BASH_REMATCH[1]}
|
program=${BASH_REMATCH[1]}
|
||||||
features="--features=cuda,erasure"
|
features="--features=cuda"
|
||||||
fi
|
fi
|
||||||
if [[ -z "$DEBUG" ]]; then
|
if [[ -z "$DEBUG" ]]; then
|
||||||
maybe_release=--release
|
maybe_release=--release
|
||||||
fi
|
fi
|
||||||
printf "cargo run $maybe_release --bin solana-%s %s -- " "$program" "$features"
|
printf "cargo run $maybe_release --bin solana-%s %s -- " "$program" "$features"
|
||||||
}
|
}
|
||||||
|
if [[ -n $SOLANA_CUDA ]]; then
|
||||||
|
# Locate perf libs downloaded by |./fetch-perf-libs.sh|
|
||||||
|
LD_LIBRARY_PATH=$(cd "$here" && dirname "$PWD"):$LD_LIBRARY_PATH
|
||||||
|
export LD_LIBRARY_PATH
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
solana_client_demo=$(solana_program client-demo)
|
solana_client_demo=$(solana_program client-demo)
|
||||||
@@ -73,29 +98,29 @@ configure_metrics() {
|
|||||||
for param in "${metrics_params[@]}"; do
|
for param in "${metrics_params[@]}"; do
|
||||||
IFS='=' read -r -a pair <<< "$param"
|
IFS='=' read -r -a pair <<< "$param"
|
||||||
if [[ "${#pair[@]}" != 2 ]]; then
|
if [[ "${#pair[@]}" != 2 ]]; then
|
||||||
echo Error: invalid metrics parameter: "$param"
|
echo Error: invalid metrics parameter: "$param" >&2
|
||||||
else
|
else
|
||||||
declare name="${pair[0]}"
|
declare name="${pair[0]}"
|
||||||
declare value="${pair[1]}"
|
declare value="${pair[1]}"
|
||||||
case "$name" in
|
case "$name" in
|
||||||
host)
|
host)
|
||||||
export INFLUX_HOST="$value"
|
export INFLUX_HOST="$value"
|
||||||
echo INFLUX_HOST="$INFLUX_HOST"
|
echo INFLUX_HOST="$INFLUX_HOST" >&2
|
||||||
;;
|
;;
|
||||||
db)
|
db)
|
||||||
export INFLUX_DATABASE="$value"
|
export INFLUX_DATABASE="$value"
|
||||||
echo INFLUX_DATABASE="$INFLUX_DATABASE"
|
echo INFLUX_DATABASE="$INFLUX_DATABASE" >&2
|
||||||
;;
|
;;
|
||||||
u)
|
u)
|
||||||
export INFLUX_USERNAME="$value"
|
export INFLUX_USERNAME="$value"
|
||||||
echo INFLUX_USERNAME="$INFLUX_USERNAME"
|
echo INFLUX_USERNAME="$INFLUX_USERNAME" >&2
|
||||||
;;
|
;;
|
||||||
p)
|
p)
|
||||||
export INFLUX_PASSWORD="$value"
|
export INFLUX_PASSWORD="$value"
|
||||||
echo INFLUX_PASSWORD="********"
|
echo INFLUX_PASSWORD="********" >&2
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
echo Error: Unknown metrics parameter name: "$name"
|
echo Error: Unknown metrics parameter name: "$name" >&2
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
fi
|
fi
|
||||||
@@ -105,16 +130,18 @@ configure_metrics
|
|||||||
|
|
||||||
tune_networking() {
|
tune_networking() {
|
||||||
# Reference: https://medium.com/@CameronSparr/increase-os-udp-buffers-to-improve-performance-51d167bb1360
|
# Reference: https://medium.com/@CameronSparr/increase-os-udp-buffers-to-improve-performance-51d167bb1360
|
||||||
[[ $(uname) = Linux ]] && (
|
if [[ $(uname) = Linux ]]; then
|
||||||
set -x
|
(
|
||||||
# test the existence of the sysctls before trying to set them
|
set -x +e
|
||||||
sysctl net.core.rmem_max 2>/dev/null 1>/dev/null &&
|
# test the existence of the sysctls before trying to set them
|
||||||
sudo sysctl -w net.core.rmem_max=26214400 1>/dev/null 2>/dev/null
|
# go ahead and return true and don't exit if these calls fail
|
||||||
|
sysctl net.core.rmem_max 2>/dev/null 1>/dev/null &&
|
||||||
|
sudo sysctl -w net.core.rmem_max=26214400 1>/dev/null 2>/dev/null
|
||||||
|
|
||||||
sysctl net.core.rmem_default 2>/dev/null 1>/dev/null &&
|
sysctl net.core.rmem_default 2>/dev/null 1>/dev/null &&
|
||||||
sudo sysctl -w net.core.rmem_default=26214400 1>/dev/null 2>/dev/null
|
sudo sysctl -w net.core.rmem_default=26214400 1>/dev/null 2>/dev/null
|
||||||
)
|
) || true
|
||||||
return 0
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
SOLANA_CONFIG_DIR=${SNAP_DATA:-$PWD}/config
|
SOLANA_CONFIG_DIR=${SNAP_DATA:-$PWD}/config
|
||||||
|
@@ -36,6 +36,7 @@ set -ex
|
|||||||
mkdir -p "$SOLANA_CONFIG_DIR"
|
mkdir -p "$SOLANA_CONFIG_DIR"
|
||||||
$rsync -vPz "$rsync_leader_url"/config/leader.json "$SOLANA_CONFIG_DIR"/
|
$rsync -vPz "$rsync_leader_url"/config/leader.json "$SOLANA_CONFIG_DIR"/
|
||||||
|
|
||||||
# shellcheck disable=SC2086 # $solana_drone should not be quoted
|
set -o pipefail
|
||||||
exec $solana_drone \
|
$solana_drone \
|
||||||
-l "$SOLANA_CONFIG_DIR"/leader.json -k "$SOLANA_CONFIG_PRIVATE_DIR"/mint.json
|
-l "$SOLANA_CONFIG_DIR"/leader.json -k "$SOLANA_CONFIG_PRIVATE_DIR"/mint.json \
|
||||||
|
2>&1 | $drone_logger
|
||||||
|
@@ -25,15 +25,8 @@ fi
|
|||||||
|
|
||||||
tune_networking
|
tune_networking
|
||||||
|
|
||||||
# migrate from old ledger format? why not...
|
set -xo pipefail
|
||||||
if [[ ! -f "$SOLANA_CONFIG_DIR"/ledger.log &&
|
$program \
|
||||||
-f "$SOLANA_CONFIG_DIR"/genesis.log ]]; then
|
|
||||||
(shopt -s nullglob &&
|
|
||||||
cat "$SOLANA_CONFIG_DIR"/genesis.log \
|
|
||||||
"$SOLANA_CONFIG_DIR"/tx-*.log) > "$SOLANA_CONFIG_DIR"/ledger.log
|
|
||||||
fi
|
|
||||||
|
|
||||||
# shellcheck disable=SC2086 # $program should not be quoted
|
|
||||||
exec $program \
|
|
||||||
--identity "$SOLANA_CONFIG_DIR"/leader.json \
|
--identity "$SOLANA_CONFIG_DIR"/leader.json \
|
||||||
--ledger "$SOLANA_CONFIG_DIR"/ledger.log
|
--ledger "$SOLANA_CONFIG_DIR"/ledger.log \
|
||||||
|
2>&1 | $leader_logger
|
||||||
|
14
multinode-demo/remote_leader.sh
Executable file
14
multinode-demo/remote_leader.sh
Executable file
@@ -0,0 +1,14 @@
|
|||||||
|
#!/bin/bash -e
|
||||||
|
|
||||||
|
[[ -n $FORCE ]] || exit
|
||||||
|
|
||||||
|
chmod 600 ~/.ssh/authorized_keys ~/.ssh/id_rsa
|
||||||
|
|
||||||
|
PATH="$HOME"/.cargo/bin:"$PATH"
|
||||||
|
|
||||||
|
./fetch-perf-libs.sh
|
||||||
|
|
||||||
|
# Run setup
|
||||||
|
USE_INSTALL=1 ./multinode-demo/setup.sh -p
|
||||||
|
USE_INSTALL=1 SOLANA_CUDA=1 ./multinode-demo/leader.sh >leader.log 2>&1 &
|
||||||
|
USE_INSTALL=1 ./multinode-demo/drone.sh >drone.log 2>&1 &
|
185
multinode-demo/remote_nodes.sh
Executable file
185
multinode-demo/remote_nodes.sh
Executable file
@@ -0,0 +1,185 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
command=$1
|
||||||
|
ip_addr_file=
|
||||||
|
remote_user=
|
||||||
|
ssh_keys=
|
||||||
|
|
||||||
|
shift
|
||||||
|
|
||||||
|
usage() {
|
||||||
|
exitcode=0
|
||||||
|
if [[ -n "$1" ]]; then
|
||||||
|
exitcode=1
|
||||||
|
echo "Error: $*"
|
||||||
|
fi
|
||||||
|
cat <<EOF
|
||||||
|
usage: $0 <start|stop> <-f IP Addr Array file> <-u username> [-k ssh-keys]
|
||||||
|
|
||||||
|
Manage a GCE multinode network
|
||||||
|
|
||||||
|
start|stop - Create or delete the network
|
||||||
|
-f file - A bash script that exports an array of IP addresses, ip_addr_array.
|
||||||
|
Elements of the array are public IP address of remote nodes.
|
||||||
|
-u username - The username for logging into remote nodes.
|
||||||
|
-k ssh-keys - Path to public/private key pair that remote nodes can use to perform
|
||||||
|
rsync and ssh among themselves. Must contain pub, and priv keys.
|
||||||
|
|
||||||
|
EOF
|
||||||
|
exit $exitcode
|
||||||
|
}
|
||||||
|
|
||||||
|
while getopts "h?f:u:k:" opt; do
|
||||||
|
case $opt in
|
||||||
|
h | \?)
|
||||||
|
usage
|
||||||
|
;;
|
||||||
|
f)
|
||||||
|
ip_addr_file=$OPTARG
|
||||||
|
;;
|
||||||
|
u)
|
||||||
|
remote_user=$OPTARG
|
||||||
|
;;
|
||||||
|
k)
|
||||||
|
ssh_keys=$OPTARG
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
usage "Error: unhandled option: $opt"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# Sample IP Address array file contents
|
||||||
|
# ip_addr_array=(192.168.1.1 192.168.1.5 192.168.2.2)
|
||||||
|
|
||||||
|
[[ -n $command ]] || usage "Need a command (start|stop)"
|
||||||
|
[[ -n $ip_addr_file ]] || usage "Need a file with IP address array"
|
||||||
|
[[ -n $remote_user ]] || usage "Need the username for remote nodes"
|
||||||
|
|
||||||
|
ip_addr_array=()
|
||||||
|
# Get IP address array
|
||||||
|
# shellcheck source=/dev/null
|
||||||
|
source "$ip_addr_file"
|
||||||
|
|
||||||
|
build_project() {
|
||||||
|
echo "Build started at $(date)"
|
||||||
|
SECONDS=0
|
||||||
|
|
||||||
|
# Build and install locally
|
||||||
|
PATH="$HOME"/.cargo/bin:"$PATH"
|
||||||
|
cargo install --force
|
||||||
|
|
||||||
|
echo "Build took $SECONDS seconds"
|
||||||
|
}
|
||||||
|
|
||||||
|
common_start_setup() {
|
||||||
|
ip_addr=$1
|
||||||
|
|
||||||
|
# Killing sshguard for now. TODO: Find a better solution
|
||||||
|
# sshguard is blacklisting IP address after ssh-keyscan and ssh login attempts
|
||||||
|
ssh "$remote_user@$ip_addr" " \
|
||||||
|
set -ex; \
|
||||||
|
sudo service sshguard stop; \
|
||||||
|
sudo apt-get --assume-yes install rsync libssl-dev; \
|
||||||
|
mkdir -p ~/.ssh ~/solana ~/.cargo/bin; \
|
||||||
|
" >log/"$ip_addr".log
|
||||||
|
|
||||||
|
# If provided, deploy SSH keys
|
||||||
|
if [[ -n $ssh_keys ]]; then
|
||||||
|
{
|
||||||
|
rsync -vPrz "$ssh_keys"/id_rsa "$remote_user@$ip_addr":~/.ssh/
|
||||||
|
rsync -vPrz "$ssh_keys"/id_rsa.pub "$remote_user@$ip_addr":~/.ssh/
|
||||||
|
rsync -vPrz "$ssh_keys"/id_rsa.pub "$remote_user@$ip_addr":~/.ssh/authorized_keys
|
||||||
|
rsync -vPrz ./multinode-demo "$remote_user@$ip_addr":~/solana/
|
||||||
|
} >>log/"$ip_addr".log
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
start_leader() {
|
||||||
|
common_start_setup "$1"
|
||||||
|
|
||||||
|
{
|
||||||
|
rsync -vPrz ~/.cargo/bin/solana* "$remote_user@$ip_addr":~/.cargo/bin/
|
||||||
|
rsync -vPrz ./fetch-perf-libs.sh "$remote_user@$ip_addr":~/solana/
|
||||||
|
ssh -n -f "$remote_user@$ip_addr" 'cd solana; FORCE=1 ./multinode-demo/remote_leader.sh'
|
||||||
|
} >>log/"$1".log
|
||||||
|
|
||||||
|
leader_ip=$1
|
||||||
|
leader_time=$SECONDS
|
||||||
|
SECONDS=0
|
||||||
|
}
|
||||||
|
|
||||||
|
start_validator() {
|
||||||
|
common_start_setup "$1"
|
||||||
|
|
||||||
|
ssh -n -f "$remote_user@$ip_addr" "cd solana; FORCE=1 ./multinode-demo/remote_validator.sh $leader_ip" >>log/"$1".log
|
||||||
|
}
|
||||||
|
|
||||||
|
start_all_nodes() {
|
||||||
|
echo "Deployment started at $(date)"
|
||||||
|
SECONDS=0
|
||||||
|
count=0
|
||||||
|
leader_ip=
|
||||||
|
leader_time=
|
||||||
|
|
||||||
|
mkdir -p log
|
||||||
|
|
||||||
|
for ip_addr in "${ip_addr_array[@]}"; do
|
||||||
|
if ((!count)); then
|
||||||
|
# Start the leader on the first node
|
||||||
|
echo "Leader node $ip_addr, killing previous instance and restarting"
|
||||||
|
start_leader "$ip_addr"
|
||||||
|
else
|
||||||
|
# Start validator on all other nodes
|
||||||
|
echo "Validator[$count] node $ip_addr, killing previous instance and restarting"
|
||||||
|
start_validator "$ip_addr" &
|
||||||
|
# TBD: Remove the sleep or reduce time once GCP login quota is increased
|
||||||
|
sleep 2
|
||||||
|
fi
|
||||||
|
|
||||||
|
((count = count + 1))
|
||||||
|
done
|
||||||
|
|
||||||
|
wait
|
||||||
|
|
||||||
|
((validator_count = count - 1))
|
||||||
|
|
||||||
|
echo "Deployment finished at $(date)"
|
||||||
|
echo "Leader deployment too $leader_time seconds"
|
||||||
|
echo "$validator_count Validator deployment took $SECONDS seconds"
|
||||||
|
}
|
||||||
|
|
||||||
|
stop_all_nodes() {
|
||||||
|
SECONDS=0
|
||||||
|
local count=0
|
||||||
|
for ip_addr in "${ip_addr_array[@]}"; do
|
||||||
|
ssh-keygen -R "$ip_addr" >log/local.log
|
||||||
|
ssh-keyscan "$ip_addr" >>~/.ssh/known_hosts 2>/dev/null
|
||||||
|
|
||||||
|
echo "Stopping node[$count] $ip_addr. Remote user $remote_user"
|
||||||
|
|
||||||
|
ssh -n -f "$remote_user@$ip_addr" " \
|
||||||
|
set -ex; \
|
||||||
|
sudo service sshguard stop; \
|
||||||
|
pkill -9 solana-; \
|
||||||
|
pkill -9 validator; \
|
||||||
|
pkill -9 leader; \
|
||||||
|
"
|
||||||
|
sleep 2
|
||||||
|
((count = count + 1))
|
||||||
|
echo "Stopped node[$count] $ip_addr"
|
||||||
|
done
|
||||||
|
echo "Stopping $count nodes took $SECONDS seconds"
|
||||||
|
}
|
||||||
|
|
||||||
|
if [[ $command == "start" ]]; then
|
||||||
|
build_project
|
||||||
|
stop_all_nodes
|
||||||
|
start_all_nodes
|
||||||
|
elif [[ $command == "stop" ]]; then
|
||||||
|
stop_all_nodes
|
||||||
|
else
|
||||||
|
usage "Unknown command: $command"
|
||||||
|
fi
|
17
multinode-demo/remote_validator.sh
Executable file
17
multinode-demo/remote_validator.sh
Executable file
@@ -0,0 +1,17 @@
|
|||||||
|
#!/bin/bash -e
|
||||||
|
|
||||||
|
[[ -n $FORCE ]] || exit
|
||||||
|
|
||||||
|
chmod 600 ~/.ssh/authorized_keys ~/.ssh/id_rsa
|
||||||
|
|
||||||
|
PATH="$HOME"/.cargo/bin:"$PATH"
|
||||||
|
|
||||||
|
touch ~/.ssh/known_hosts
|
||||||
|
ssh-keygen -R "$1" 2>/dev/null
|
||||||
|
ssh-keyscan "$1" >>~/.ssh/known_hosts 2>/dev/null
|
||||||
|
|
||||||
|
rsync -vPrz "$1":~/.cargo/bin/solana* ~/.cargo/bin/
|
||||||
|
|
||||||
|
# Run setup
|
||||||
|
USE_INSTALL=1 ./multinode-demo/setup.sh -p
|
||||||
|
USE_INSTALL=1 ./multinode-demo/validator.sh "$1":~/solana "$1" >validator.log 2>&1
|
@@ -71,7 +71,8 @@ done
|
|||||||
|
|
||||||
leader_address_args=("$ip_address_arg")
|
leader_address_args=("$ip_address_arg")
|
||||||
validator_address_args=("$ip_address_arg" -b 9000)
|
validator_address_args=("$ip_address_arg" -b 9000)
|
||||||
id_path="$SOLANA_CONFIG_PRIVATE_DIR"/id.json
|
leader_id_path="$SOLANA_CONFIG_PRIVATE_DIR"/leader-id.json
|
||||||
|
validator_id_path="$SOLANA_CONFIG_PRIVATE_DIR"/validator-id.json
|
||||||
mint_path="$SOLANA_CONFIG_PRIVATE_DIR"/mint.json
|
mint_path="$SOLANA_CONFIG_PRIVATE_DIR"/mint.json
|
||||||
|
|
||||||
set -e
|
set -e
|
||||||
@@ -83,23 +84,24 @@ mkdir -p "$SOLANA_CONFIG_DIR"
|
|||||||
rm -rvf "$SOLANA_CONFIG_PRIVATE_DIR"
|
rm -rvf "$SOLANA_CONFIG_PRIVATE_DIR"
|
||||||
mkdir -p "$SOLANA_CONFIG_PRIVATE_DIR"
|
mkdir -p "$SOLANA_CONFIG_PRIVATE_DIR"
|
||||||
|
|
||||||
$solana_keygen -o "$id_path"
|
$solana_keygen -o "$leader_id_path"
|
||||||
|
$solana_keygen -o "$validator_id_path"
|
||||||
|
|
||||||
if $node_type_leader; then
|
if $node_type_leader; then
|
||||||
echo "Creating $SOLANA_CONFIG_DIR/mint.json with $num_tokens tokens"
|
echo "Creating $mint_path with $num_tokens tokens"
|
||||||
$solana_keygen -o "$mint_path"
|
$solana_keygen -o "$mint_path"
|
||||||
|
|
||||||
echo "Creating $SOLANA_CONFIG_DIR/ledger.log"
|
echo "Creating $SOLANA_CONFIG_DIR/ledger.log"
|
||||||
$solana_genesis --tokens="$num_tokens" < "$mint_path" > "$SOLANA_CONFIG_DIR"/ledger.log
|
$solana_genesis --tokens="$num_tokens" < "$mint_path" > "$SOLANA_CONFIG_DIR"/ledger.log
|
||||||
|
|
||||||
echo "Creating $SOLANA_CONFIG_DIR/leader.json"
|
echo "Creating $SOLANA_CONFIG_DIR/leader.json"
|
||||||
$solana_fullnode_config --keypair="$id_path" "${leader_address_args[@]}" > "$SOLANA_CONFIG_DIR"/leader.json
|
$solana_fullnode_config --keypair="$leader_id_path" "${leader_address_args[@]}" > "$SOLANA_CONFIG_DIR"/leader.json
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
||||||
if $node_type_validator; then
|
if $node_type_validator; then
|
||||||
echo "Creating $SOLANA_CONFIG_DIR/validator.json"
|
echo "Creating $SOLANA_CONFIG_DIR/validator.json"
|
||||||
$solana_fullnode_config --keypair="$id_path" "${validator_address_args[@]}" > "$SOLANA_CONFIG_DIR"/validator.json
|
$solana_fullnode_config --keypair="$validator_id_path" "${validator_address_args[@]}" > "$SOLANA_CONFIG_DIR"/validator.json
|
||||||
fi
|
fi
|
||||||
|
|
||||||
ls -lh "$SOLANA_CONFIG_DIR"/
|
ls -lh "$SOLANA_CONFIG_DIR"/
|
||||||
|
@@ -1,107 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
ip_addr_file=$1
|
|
||||||
remote_user=$2
|
|
||||||
ssh_keys=$3
|
|
||||||
|
|
||||||
usage() {
|
|
||||||
echo -e "\\tUsage: $0 <IP Address array> <username> [path to ssh keys]\\n"
|
|
||||||
echo -e "\\t <IP Address array>: A bash script that exports an array of IP addresses, ip_addr_array. Elements of the array are public IP address of remote nodes."
|
|
||||||
echo -e "\\t <username>: The username for logging into remote nodes."
|
|
||||||
echo -e "\\t [path to ssh keys]: The public/private key pair that remote nodes can use to perform rsync and ssh among themselves. Must contain pub, priv and authorized_keys.\\n"
|
|
||||||
exit 1
|
|
||||||
}
|
|
||||||
|
|
||||||
# Sample IP Address array file contents
|
|
||||||
# ip_addr_array=(192.168.1.1 192.168.1.5 192.168.2.2)
|
|
||||||
|
|
||||||
if [[ -z "$ip_addr_file" ]]; then
|
|
||||||
usage
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -z "$remote_user" ]]; then
|
|
||||||
usage
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "Build started at $(date)"
|
|
||||||
SECONDS=0
|
|
||||||
# Build and install locally
|
|
||||||
PATH="$HOME"/.cargo/bin:"$PATH"
|
|
||||||
cargo install --force
|
|
||||||
|
|
||||||
echo "Build took $SECONDS seconds"
|
|
||||||
|
|
||||||
ip_addr_array=()
|
|
||||||
# Get IP address array
|
|
||||||
# shellcheck source=/dev/null
|
|
||||||
source "$ip_addr_file"
|
|
||||||
|
|
||||||
# shellcheck disable=SC2089,SC2016
|
|
||||||
ssh_command_prefix='export PATH="$HOME/.cargo/bin:$PATH"; cd solana; USE_INSTALL=1'
|
|
||||||
|
|
||||||
echo "Deployment started at $(date)"
|
|
||||||
SECONDS=0
|
|
||||||
count=0
|
|
||||||
leader=
|
|
||||||
for ip_addr in "${ip_addr_array[@]}"; do
|
|
||||||
echo "$ip_addr"
|
|
||||||
|
|
||||||
ssh-keygen -R "$ip_addr"
|
|
||||||
ssh-keyscan "$ip_addr" >>~/.ssh/known_hosts
|
|
||||||
|
|
||||||
ssh -n -f "$remote_user@$ip_addr" 'mkdir -p ~/.ssh ~/solana ~/.cargo/bin'
|
|
||||||
|
|
||||||
# Killing sshguard for now. TODO: Find a better solution
|
|
||||||
# sshguard is blacklisting IP address after ssh-keyscan and ssh login attempts
|
|
||||||
ssh -n -f "$remote_user@$ip_addr" "sudo service sshguard stop"
|
|
||||||
ssh -n -f "$remote_user@$ip_addr" 'sudo apt-get --assume-yes install rsync libssl-dev'
|
|
||||||
|
|
||||||
# If provided, deploy SSH keys
|
|
||||||
if [[ -z $ssh_keys ]]; then
|
|
||||||
echo "skip copying the ssh keys"
|
|
||||||
else
|
|
||||||
rsync -vPrz "$ssh_keys"/id_rsa "$remote_user@$ip_addr":~/.ssh/
|
|
||||||
rsync -vPrz "$ssh_keys"/id_rsa.pub "$remote_user@$ip_addr":~/.ssh/
|
|
||||||
rsync -vPrz "$ssh_keys"/id_rsa.pub "$remote_user@$ip_addr":~/.ssh/authorized_keys
|
|
||||||
ssh -n -f "$remote_user@$ip_addr" 'chmod 600 ~/.ssh/authorized_keys ~/.ssh/id_rsa'
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Stop current nodes
|
|
||||||
ssh "$remote_user@$ip_addr" 'pkill -9 solana-'
|
|
||||||
|
|
||||||
if [[ -n $leader ]]; then
|
|
||||||
echo "Adding known hosts for $ip_addr"
|
|
||||||
ssh -n -f "$remote_user@$ip_addr" "ssh-keygen -R $leader"
|
|
||||||
ssh -n -f "$remote_user@$ip_addr" "ssh-keyscan $leader >> ~/.ssh/known_hosts"
|
|
||||||
|
|
||||||
ssh -n -f "$remote_user@$ip_addr" "rsync -vPrz ""$remote_user@$leader"":~/.cargo/bin/solana* ~/.cargo/bin/"
|
|
||||||
ssh -n -f "$remote_user@$ip_addr" "rsync -vPrz ""$remote_user@$leader"":~/solana/multinode-demo ~/solana/"
|
|
||||||
ssh -n -f "$remote_user@$ip_addr" "rsync -vPrz ""$remote_user@$leader"":~/solana/fetch-perf-libs.sh ~/solana/"
|
|
||||||
else
|
|
||||||
# Deploy build and scripts to remote node
|
|
||||||
rsync -vPrz ~/.cargo/bin/solana* "$remote_user@$ip_addr":~/.cargo/bin/
|
|
||||||
rsync -vPrz ./multinode-demo "$remote_user@$ip_addr":~/solana/
|
|
||||||
rsync -vPrz ./fetch-perf-libs.sh "$remote_user@$ip_addr":~/solana/
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Run setup
|
|
||||||
ssh "$remote_user@$ip_addr" "$ssh_command_prefix"' ./multinode-demo/setup.sh -p "$ip_addr"'
|
|
||||||
|
|
||||||
if ((!count)); then
|
|
||||||
# Start the leader on the first node
|
|
||||||
echo "Starting leader node $ip_addr"
|
|
||||||
ssh -n -f "$remote_user@$ip_addr" 'cd solana; ./fetch-perf-libs.sh'
|
|
||||||
ssh -n -f "$remote_user@$ip_addr" "$ssh_command_prefix"' SOLANA_CUDA=1 ./multinode-demo/leader.sh > leader.log 2>&1'
|
|
||||||
ssh -n -f "$remote_user@$ip_addr" "$ssh_command_prefix"' ./multinode-demo/drone.sh > drone.log 2>&1'
|
|
||||||
leader=${ip_addr_array[0]}
|
|
||||||
else
|
|
||||||
# Start validator on all other nodes
|
|
||||||
echo "Starting validator node $ip_addr"
|
|
||||||
ssh -n -f "$remote_user@$ip_addr" "$ssh_command_prefix"" ./multinode-demo/validator.sh $remote_user@$leader:~/solana $leader > validator.log 2>&1"
|
|
||||||
fi
|
|
||||||
|
|
||||||
((count++))
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "Deployment finished at $(date)"
|
|
||||||
echo "Deployment took $SECONDS seconds"
|
|
@@ -6,7 +6,13 @@
|
|||||||
here=$(dirname "$0")
|
here=$(dirname "$0")
|
||||||
cd "$here"
|
cd "$here"
|
||||||
|
|
||||||
wallet="../wallet.sh $1"
|
if [[ -n "$USE_SNAP" ]]; then
|
||||||
|
# TODO: Merge wallet.sh functionality into solana-wallet proper and
|
||||||
|
# remove this USE_SNAP case
|
||||||
|
wallet="solana.wallet $1"
|
||||||
|
else
|
||||||
|
wallet="../wallet.sh $1"
|
||||||
|
fi
|
||||||
|
|
||||||
# Tokens transferred to this address are lost forever...
|
# Tokens transferred to this address are lost forever...
|
||||||
garbage_address=vS3ngn1TfQmpsW1Z4NkLuqNAQFF3dYQw8UZ6TCx9bmq
|
garbage_address=vS3ngn1TfQmpsW1Z4NkLuqNAQFF3dYQw8UZ6TCx9bmq
|
||||||
|
@@ -65,13 +65,12 @@ fi
|
|||||||
|
|
||||||
rsync_leader_url=$(rsync_url "$leader")
|
rsync_leader_url=$(rsync_url "$leader")
|
||||||
|
|
||||||
set -ex
|
tune_networking
|
||||||
|
|
||||||
SOLANA_LEADER_CONFIG_DIR="$SOLANA_CONFIG_DIR"/leader-config
|
SOLANA_LEADER_CONFIG_DIR="$SOLANA_CONFIG_DIR"/leader-config
|
||||||
rm -rf "$SOLANA_LEADER_CONFIG_DIR"
|
rm -rf "$SOLANA_LEADER_CONFIG_DIR"
|
||||||
|
set -ex
|
||||||
$rsync -vPrz "$rsync_leader_url"/config/ "$SOLANA_LEADER_CONFIG_DIR"
|
$rsync -vPrz "$rsync_leader_url"/config/ "$SOLANA_LEADER_CONFIG_DIR"
|
||||||
ls -lh "$SOLANA_LEADER_CONFIG_DIR"
|
|
||||||
|
|
||||||
tune_networking
|
|
||||||
|
|
||||||
# migrate from old ledger format? why not...
|
# migrate from old ledger format? why not...
|
||||||
if [[ ! -f "$SOLANA_LEADER_CONFIG_DIR"/ledger.log &&
|
if [[ ! -f "$SOLANA_LEADER_CONFIG_DIR"/ledger.log &&
|
||||||
@@ -85,12 +84,13 @@ fi
|
|||||||
# TODO: Remove this workaround
|
# TODO: Remove this workaround
|
||||||
while ! $solana_wallet \
|
while ! $solana_wallet \
|
||||||
-l "$SOLANA_LEADER_CONFIG_DIR"/leader.json \
|
-l "$SOLANA_LEADER_CONFIG_DIR"/leader.json \
|
||||||
-k "$SOLANA_CONFIG_PRIVATE_DIR"/id.json airdrop --tokens 1; do
|
-k "$SOLANA_CONFIG_PRIVATE_DIR"/validator-id.json airdrop --tokens 1; do
|
||||||
sleep 1
|
sleep 1
|
||||||
done
|
done
|
||||||
|
|
||||||
# shellcheck disable=SC2086 # $program should not be quoted
|
set -o pipefail
|
||||||
exec $program \
|
$program \
|
||||||
--identity "$SOLANA_CONFIG_DIR"/validator.json \
|
--identity "$SOLANA_CONFIG_DIR"/validator.json \
|
||||||
--testnet "$leader_address:$leader_port" \
|
--testnet "$leader_address:$leader_port" \
|
||||||
--ledger "$SOLANA_LEADER_CONFIG_DIR"/ledger.log
|
--ledger "$SOLANA_LEADER_CONFIG_DIR"/ledger.log \
|
||||||
|
2>&1 | $validator_logger
|
||||||
|
@@ -30,18 +30,16 @@ rsync_leader_url=$(rsync_url "$leader")
|
|||||||
set -e
|
set -e
|
||||||
mkdir -p "$SOLANA_CONFIG_CLIENT_DIR"
|
mkdir -p "$SOLANA_CONFIG_CLIENT_DIR"
|
||||||
if [[ ! -r "$SOLANA_CONFIG_CLIENT_DIR"/leader.json ]]; then
|
if [[ ! -r "$SOLANA_CONFIG_CLIENT_DIR"/leader.json ]]; then
|
||||||
(
|
echo "Fetching leader configuration from $rsync_leader_url"
|
||||||
set -x
|
$rsync -Pz "$rsync_leader_url"/config/leader.json "$SOLANA_CONFIG_CLIENT_DIR"/
|
||||||
$rsync -vPz "$rsync_leader_url"/config/leader.json "$SOLANA_CONFIG_CLIENT_DIR"/
|
|
||||||
)
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
client_id_path="$SOLANA_CONFIG_CLIENT_DIR"/id.json
|
client_id_path="$SOLANA_CONFIG_CLIENT_DIR"/id.json
|
||||||
if [[ ! -r $client_id_path ]]; then
|
if [[ ! -r $client_id_path ]]; then
|
||||||
|
echo "Generating client identity: $client_id_path"
|
||||||
$solana_keygen -o "$client_id_path"
|
$solana_keygen -o "$client_id_path"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
set -x
|
|
||||||
# shellcheck disable=SC2086 # $solana_wallet should not be quoted
|
# shellcheck disable=SC2086 # $solana_wallet should not be quoted
|
||||||
exec $solana_wallet \
|
exec $solana_wallet \
|
||||||
-l "$SOLANA_CONFIG_CLIENT_DIR"/leader.json -k "$client_id_path" "$@"
|
-l "$SOLANA_CONFIG_CLIENT_DIR"/leader.json -k "$client_id_path" "$@"
|
||||||
|
@@ -45,7 +45,9 @@ apps:
|
|||||||
plugs:
|
plugs:
|
||||||
- home
|
- home
|
||||||
client-demo:
|
client-demo:
|
||||||
command: solana-client-demo
|
# TODO: Merge client.sh functionality into solana-client-demo proper
|
||||||
|
command: client.sh
|
||||||
|
#command: solana-client-demo
|
||||||
plugs:
|
plugs:
|
||||||
- network
|
- network
|
||||||
- network-bind
|
- network-bind
|
||||||
@@ -57,30 +59,43 @@ apps:
|
|||||||
plugs:
|
plugs:
|
||||||
- network
|
- network
|
||||||
- home
|
- home
|
||||||
|
|
||||||
daemon-validator:
|
daemon-validator:
|
||||||
daemon: simple
|
daemon: simple
|
||||||
command: validator.sh
|
command: validator.sh
|
||||||
|
plugs:
|
||||||
|
- network
|
||||||
|
- network-bind
|
||||||
daemon-leader:
|
daemon-leader:
|
||||||
daemon: simple
|
daemon: simple
|
||||||
command: leader.sh
|
command: leader.sh
|
||||||
|
plugs:
|
||||||
|
- network
|
||||||
|
- network-bind
|
||||||
daemon-drone:
|
daemon-drone:
|
||||||
daemon: simple
|
daemon: simple
|
||||||
command: drone.sh
|
command: drone.sh
|
||||||
|
plugs:
|
||||||
|
- network
|
||||||
|
- network-bind
|
||||||
|
|
||||||
parts:
|
parts:
|
||||||
solana:
|
solana:
|
||||||
plugin: nil
|
plugin: nil
|
||||||
prime:
|
prime:
|
||||||
- bin
|
- bin
|
||||||
- usr/lib/libgf_complete.so.1
|
- usr/lib
|
||||||
- usr/lib/libJerasure.so.2
|
|
||||||
override-build: |
|
override-build: |
|
||||||
|
# Install CUDA 9.2 runtime
|
||||||
|
mkdir -p $SNAPCRAFT_PART_INSTALL/usr/
|
||||||
|
cp -rav /usr/local/cuda-9.2/targets/x86_64-linux/lib/ $SNAPCRAFT_PART_INSTALL/usr/lib
|
||||||
|
mkdir -p $SNAPCRAFT_PART_INSTALL/usr/lib/x86_64-linux-gnu/
|
||||||
|
cp -rav /usr/lib/x86_64-linux-gnu/libcuda.* $SNAPCRAFT_PART_INSTALL/usr/lib/x86_64-linux-gnu/
|
||||||
|
mkdir -p $SNAPCRAFT_PART_INSTALL/usr/lib/nvidia-396/
|
||||||
|
cp -v /usr/lib/nvidia-396/libnvidia-fatbinaryloader.so* $SNAPCRAFT_PART_INSTALL/usr/lib/nvidia-396/
|
||||||
|
|
||||||
# Build/install solana-fullnode-cuda
|
# Build/install solana-fullnode-cuda
|
||||||
./fetch-perf-libs.sh
|
./fetch-perf-libs.sh
|
||||||
cargo install --features=cuda,erasure --root $SNAPCRAFT_PART_INSTALL --bin solana-fullnode
|
cargo install --features=cuda --root $SNAPCRAFT_PART_INSTALL --bin solana-fullnode
|
||||||
mv $SNAPCRAFT_PART_INSTALL/bin/solana-fullnode $SNAPCRAFT_PART_INSTALL
|
mv $SNAPCRAFT_PART_INSTALL/bin/solana-fullnode $SNAPCRAFT_PART_INSTALL
|
||||||
rm -rf $SNAPCRAFT_PART_INSTALL/bin/*
|
rm -rf $SNAPCRAFT_PART_INSTALL/bin/*
|
||||||
mv $SNAPCRAFT_PART_INSTALL/solana-fullnode $SNAPCRAFT_PART_INSTALL/bin/solana-fullnode-cuda
|
mv $SNAPCRAFT_PART_INSTALL/solana-fullnode $SNAPCRAFT_PART_INSTALL/bin/solana-fullnode-cuda
|
||||||
@@ -95,8 +110,9 @@ parts:
|
|||||||
mkdir -p $SNAPCRAFT_PART_INSTALL/bin
|
mkdir -p $SNAPCRAFT_PART_INSTALL/bin
|
||||||
cp -av multinode-demo/* $SNAPCRAFT_PART_INSTALL/bin/
|
cp -av multinode-demo/* $SNAPCRAFT_PART_INSTALL/bin/
|
||||||
|
|
||||||
# TODO: build rsync from source instead of sneaking it in from the host
|
# TODO: build rsync/multilog from source instead of sneaking it in from the host
|
||||||
# system...
|
# system...
|
||||||
set -x
|
set -x
|
||||||
mkdir -p $SNAPCRAFT_PART_INSTALL/bin
|
mkdir -p $SNAPCRAFT_PART_INSTALL/bin
|
||||||
cp -av /usr/bin/rsync $SNAPCRAFT_PART_INSTALL/bin/
|
cp -av /usr/bin/rsync $SNAPCRAFT_PART_INSTALL/bin/
|
||||||
|
cp -av /usr/bin/multilog $SNAPCRAFT_PART_INSTALL/bin/
|
||||||
|
@@ -6,6 +6,7 @@
|
|||||||
extern crate libc;
|
extern crate libc;
|
||||||
|
|
||||||
use chrono::prelude::*;
|
use chrono::prelude::*;
|
||||||
|
use counter::Counter;
|
||||||
use entry::Entry;
|
use entry::Entry;
|
||||||
use hash::Hash;
|
use hash::Hash;
|
||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
@@ -202,6 +203,11 @@ impl Bank {
|
|||||||
{
|
{
|
||||||
let option = bals.get_mut(&tx.from);
|
let option = bals.get_mut(&tx.from);
|
||||||
if option.is_none() {
|
if option.is_none() {
|
||||||
|
if let Instruction::NewVote(_) = &tx.instruction {
|
||||||
|
inc_new_counter!("bank-appy_debits-vote_account_not_found", 1);
|
||||||
|
} else {
|
||||||
|
inc_new_counter!("bank-appy_debits-generic_account_not_found", 1);
|
||||||
|
}
|
||||||
return Err(BankError::AccountNotFound(tx.from));
|
return Err(BankError::AccountNotFound(tx.from));
|
||||||
}
|
}
|
||||||
let bal = option.unwrap();
|
let bal = option.unwrap();
|
||||||
|
@@ -40,7 +40,7 @@ impl BankingStage {
|
|||||||
.name("solana-banking-stage".to_string())
|
.name("solana-banking-stage".to_string())
|
||||||
.spawn(move || loop {
|
.spawn(move || loop {
|
||||||
if let Err(e) = Self::process_packets(
|
if let Err(e) = Self::process_packets(
|
||||||
&bank.clone(),
|
&bank,
|
||||||
&verified_receiver,
|
&verified_receiver,
|
||||||
&signal_sender,
|
&signal_sender,
|
||||||
&packet_recycler,
|
&packet_recycler,
|
||||||
@@ -89,7 +89,6 @@ impl BankingStage {
|
|||||||
mms.len(),
|
mms.len(),
|
||||||
);
|
);
|
||||||
let count = mms.iter().map(|x| x.1.len()).sum();
|
let count = mms.iter().map(|x| x.1.len()).sum();
|
||||||
static mut COUNTER: Counter = create_counter!("banking_stage_process_packets", 1);
|
|
||||||
let proc_start = Instant::now();
|
let proc_start = Instant::now();
|
||||||
for (msgs, vers) in mms {
|
for (msgs, vers) in mms {
|
||||||
let transactions = Self::deserialize_transactions(&msgs.read().unwrap());
|
let transactions = Self::deserialize_transactions(&msgs.read().unwrap());
|
||||||
@@ -125,7 +124,7 @@ impl BankingStage {
|
|||||||
reqs_len,
|
reqs_len,
|
||||||
(reqs_len as f32) / (total_time_s)
|
(reqs_len as f32) / (total_time_s)
|
||||||
);
|
);
|
||||||
inc_counter!(COUNTER, count);
|
inc_new_counter!("banking_stage-process_packets", count);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
23
src/bin/client-demo.rs
Normal file → Executable file
23
src/bin/client-demo.rs
Normal file → Executable file
@@ -9,7 +9,7 @@ use bincode::serialize;
|
|||||||
use clap::{App, Arg};
|
use clap::{App, Arg};
|
||||||
use rayon::prelude::*;
|
use rayon::prelude::*;
|
||||||
use solana::crdt::{Crdt, NodeInfo};
|
use solana::crdt::{Crdt, NodeInfo};
|
||||||
use solana::drone::DroneRequest;
|
use solana::drone::{DroneRequest, DRONE_PORT};
|
||||||
use solana::fullnode::Config;
|
use solana::fullnode::Config;
|
||||||
use solana::hash::Hash;
|
use solana::hash::Hash;
|
||||||
use solana::nat::{udp_public_bind, udp_random_bind};
|
use solana::nat::{udp_public_bind, udp_random_bind};
|
||||||
@@ -211,7 +211,7 @@ fn main() {
|
|||||||
threads = t.to_string().parse().expect("integer");
|
threads = t.to_string().parse().expect("integer");
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(n) = matches.value_of("nodes") {
|
if let Some(n) = matches.value_of("num_nodes") {
|
||||||
num_nodes = n.to_string().parse().expect("integer");
|
num_nodes = n.to_string().parse().expect("integer");
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -220,12 +220,13 @@ fn main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let mut drone_addr = leader.contact_info.tpu;
|
let mut drone_addr = leader.contact_info.tpu;
|
||||||
drone_addr.set_port(9900);
|
drone_addr.set_port(DRONE_PORT);
|
||||||
|
|
||||||
let signal = Arc::new(AtomicBool::new(false));
|
let signal = Arc::new(AtomicBool::new(false));
|
||||||
let mut c_threads = vec![];
|
let mut c_threads = vec![];
|
||||||
let validators = converge(&leader, &signal.clone(), num_nodes, &mut c_threads);
|
let validators = converge(&leader, &signal, num_nodes, &mut c_threads);
|
||||||
assert_eq!(validators.len(), num_nodes);
|
println!("Network has {} node(s)", validators.len());
|
||||||
|
assert!(validators.len() >= num_nodes);
|
||||||
|
|
||||||
let mut client = mk_client(&leader);
|
let mut client = mk_client(&leader);
|
||||||
|
|
||||||
@@ -365,6 +366,8 @@ fn spy_node() -> (NodeInfo, UdpSocket) {
|
|||||||
let gossip_socket_pair = udp_public_bind("gossip", 8000, 10000);
|
let gossip_socket_pair = udp_public_bind("gossip", 8000, 10000);
|
||||||
let pubkey = KeyPair::new().pubkey();
|
let pubkey = KeyPair::new().pubkey();
|
||||||
let daddr = "0.0.0.0:0".parse().unwrap();
|
let daddr = "0.0.0.0:0".parse().unwrap();
|
||||||
|
assert!(!gossip_socket_pair.addr.ip().is_unspecified());
|
||||||
|
assert!(!gossip_socket_pair.addr.ip().is_multicast());
|
||||||
let node = NodeInfo::new(
|
let node = NodeInfo::new(
|
||||||
pubkey,
|
pubkey,
|
||||||
//gossip.local_addr().unwrap(),
|
//gossip.local_addr().unwrap(),
|
||||||
@@ -386,14 +389,14 @@ fn converge(
|
|||||||
//lets spy on the network
|
//lets spy on the network
|
||||||
let daddr = "0.0.0.0:0".parse().unwrap();
|
let daddr = "0.0.0.0:0".parse().unwrap();
|
||||||
let (spy, spy_gossip) = spy_node();
|
let (spy, spy_gossip) = spy_node();
|
||||||
let mut spy_crdt = Crdt::new(spy);
|
let mut spy_crdt = Crdt::new(spy).expect("Crdt::new");
|
||||||
spy_crdt.insert(&leader);
|
spy_crdt.insert(&leader);
|
||||||
spy_crdt.set_leader(leader.id);
|
spy_crdt.set_leader(leader.id);
|
||||||
let spy_ref = Arc::new(RwLock::new(spy_crdt));
|
let spy_ref = Arc::new(RwLock::new(spy_crdt));
|
||||||
let window = default_window();
|
let window = default_window();
|
||||||
let gossip_send_socket = udp_random_bind(8000, 10000, 5).unwrap();
|
let gossip_send_socket = udp_random_bind(8000, 10000, 5).unwrap();
|
||||||
let ncp = Ncp::new(
|
let ncp = Ncp::new(
|
||||||
&spy_ref.clone(),
|
&spy_ref,
|
||||||
window.clone(),
|
window.clone(),
|
||||||
spy_gossip,
|
spy_gossip,
|
||||||
gossip_send_socket,
|
gossip_send_socket,
|
||||||
@@ -415,6 +418,12 @@ fn converge(
|
|||||||
println!("CONVERGED!");
|
println!("CONVERGED!");
|
||||||
rv.extend(v.into_iter());
|
rv.extend(v.into_iter());
|
||||||
break;
|
break;
|
||||||
|
} else {
|
||||||
|
println!(
|
||||||
|
"{} node(s) discovered (looking for {} or more)",
|
||||||
|
v.len(),
|
||||||
|
num_nodes
|
||||||
|
);
|
||||||
}
|
}
|
||||||
sleep(Duration::new(1, 0));
|
sleep(Duration::new(1, 0));
|
||||||
}
|
}
|
||||||
|
@@ -10,8 +10,9 @@ extern crate tokio_io;
|
|||||||
use bincode::deserialize;
|
use bincode::deserialize;
|
||||||
use clap::{App, Arg};
|
use clap::{App, Arg};
|
||||||
use solana::crdt::NodeInfo;
|
use solana::crdt::NodeInfo;
|
||||||
use solana::drone::{Drone, DroneRequest};
|
use solana::drone::{Drone, DroneRequest, DRONE_PORT};
|
||||||
use solana::fullnode::Config;
|
use solana::fullnode::Config;
|
||||||
|
use solana::metrics::set_panic_hook;
|
||||||
use solana::signature::read_keypair;
|
use solana::signature::read_keypair;
|
||||||
use std::fs::File;
|
use std::fs::File;
|
||||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
||||||
@@ -23,7 +24,8 @@ use tokio_codec::{BytesCodec, Decoder};
|
|||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
env_logger::init();
|
env_logger::init();
|
||||||
let matches = App::new("solana-client-demo")
|
set_panic_hook("drone");
|
||||||
|
let matches = App::new("drone")
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("leader")
|
Arg::with_name("leader")
|
||||||
.short("l")
|
.short("l")
|
||||||
@@ -83,7 +85,7 @@ fn main() {
|
|||||||
request_cap = None;
|
request_cap = None;
|
||||||
}
|
}
|
||||||
|
|
||||||
let drone_addr: SocketAddr = "0.0.0.0:9900".parse().unwrap();
|
let drone_addr: SocketAddr = format!("0.0.0.0:{}", DRONE_PORT).parse().unwrap();
|
||||||
|
|
||||||
let drone = Arc::new(Mutex::new(Drone::new(
|
let drone = Arc::new(Mutex::new(Drone::new(
|
||||||
mint_keypair,
|
mint_keypair,
|
||||||
|
@@ -8,6 +8,7 @@ extern crate solana;
|
|||||||
use clap::{App, Arg};
|
use clap::{App, Arg};
|
||||||
use solana::crdt::{NodeInfo, TestNode};
|
use solana::crdt::{NodeInfo, TestNode};
|
||||||
use solana::fullnode::{Config, FullNode, LedgerFile};
|
use solana::fullnode::{Config, FullNode, LedgerFile};
|
||||||
|
use solana::metrics::set_panic_hook;
|
||||||
use solana::service::Service;
|
use solana::service::Service;
|
||||||
use solana::signature::{KeyPair, KeyPairUtil};
|
use solana::signature::{KeyPair, KeyPairUtil};
|
||||||
use std::fs::File;
|
use std::fs::File;
|
||||||
@@ -17,6 +18,7 @@ use std::process::exit;
|
|||||||
|
|
||||||
fn main() -> () {
|
fn main() -> () {
|
||||||
env_logger::init();
|
env_logger::init();
|
||||||
|
set_panic_hook("fullnode");
|
||||||
let matches = App::new("fullnode")
|
let matches = App::new("fullnode")
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("identity")
|
Arg::with_name("identity")
|
||||||
|
@@ -10,7 +10,7 @@ extern crate solana;
|
|||||||
use bincode::serialize;
|
use bincode::serialize;
|
||||||
use clap::{App, Arg, SubCommand};
|
use clap::{App, Arg, SubCommand};
|
||||||
use solana::crdt::NodeInfo;
|
use solana::crdt::NodeInfo;
|
||||||
use solana::drone::DroneRequest;
|
use solana::drone::{DroneRequest, DRONE_PORT};
|
||||||
use solana::fullnode::Config;
|
use solana::fullnode::Config;
|
||||||
use solana::signature::{read_keypair, KeyPair, KeyPairUtil, PublicKey, Signature};
|
use solana::signature::{read_keypair, KeyPair, KeyPairUtil, PublicKey, Signature};
|
||||||
use solana::thin_client::ThinClient;
|
use solana::thin_client::ThinClient;
|
||||||
@@ -142,7 +142,7 @@ fn parse_args() -> Result<WalletConfig, Box<error::Error>> {
|
|||||||
|
|
||||||
let leader: NodeInfo;
|
let leader: NodeInfo;
|
||||||
if let Some(l) = matches.value_of("leader") {
|
if let Some(l) = matches.value_of("leader") {
|
||||||
leader = read_leader(l).node_info;
|
leader = read_leader(l)?.node_info;
|
||||||
} else {
|
} else {
|
||||||
let server_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
|
let server_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
|
||||||
leader = NodeInfo::new_leader(&server_addr);
|
leader = NodeInfo::new_leader(&server_addr);
|
||||||
@@ -164,7 +164,7 @@ fn parse_args() -> Result<WalletConfig, Box<error::Error>> {
|
|||||||
})?;
|
})?;
|
||||||
|
|
||||||
let mut drone_addr = leader.contact_info.tpu;
|
let mut drone_addr = leader.contact_info.tpu;
|
||||||
drone_addr.set_port(9900);
|
drone_addr.set_port(DRONE_PORT);
|
||||||
|
|
||||||
let command = match matches.subcommand() {
|
let command = match matches.subcommand() {
|
||||||
("airdrop", Some(airdrop_matches)) => {
|
("airdrop", Some(airdrop_matches)) => {
|
||||||
@@ -249,15 +249,28 @@ fn process_command(
|
|||||||
// Request an airdrop from Solana Drone;
|
// Request an airdrop from Solana Drone;
|
||||||
// Request amount is set in request_airdrop function
|
// Request amount is set in request_airdrop function
|
||||||
WalletCommand::AirDrop(tokens) => {
|
WalletCommand::AirDrop(tokens) => {
|
||||||
println!("Airdrop requested...");
|
|
||||||
println!("Airdropping {:?} tokens", tokens);
|
|
||||||
request_airdrop(&config.drone_addr, &config.id, tokens as u64)?;
|
|
||||||
// TODO: return airdrop Result from Drone
|
|
||||||
sleep(Duration::from_millis(100));
|
|
||||||
println!(
|
println!(
|
||||||
"Your balance is: {:?}",
|
"Requesting airdrop of {:?} tokens from {}",
|
||||||
client.poll_get_balance(&config.id.pubkey()).unwrap()
|
tokens, config.drone_addr
|
||||||
);
|
);
|
||||||
|
let previous_balance = client.poll_get_balance(&config.id.pubkey())?;
|
||||||
|
request_airdrop(&config.drone_addr, &config.id, tokens as u64)?;
|
||||||
|
|
||||||
|
// TODO: return airdrop Result from Drone instead of polling the
|
||||||
|
// network
|
||||||
|
let mut current_balance = previous_balance;
|
||||||
|
for _ in 0..20 {
|
||||||
|
sleep(Duration::from_millis(500));
|
||||||
|
current_balance = client.poll_get_balance(&config.id.pubkey())?;
|
||||||
|
if previous_balance != current_balance {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
println!(".");
|
||||||
|
}
|
||||||
|
println!("Your balance is: {:?}", current_balance);
|
||||||
|
if current_balance - previous_balance != tokens {
|
||||||
|
Err("Airdrop failed!")?;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// If client has positive balance, spend tokens in {balance} number of transactions
|
// If client has positive balance, spend tokens in {balance} number of transactions
|
||||||
WalletCommand::Pay(tokens, to) => {
|
WalletCommand::Pay(tokens, to) => {
|
||||||
@@ -288,9 +301,20 @@ fn display_actions() {
|
|||||||
println!();
|
println!();
|
||||||
}
|
}
|
||||||
|
|
||||||
fn read_leader(path: &str) -> Config {
|
fn read_leader(path: &str) -> Result<Config, WalletError> {
|
||||||
let file = File::open(path.to_string()).unwrap_or_else(|_| panic!("file not found: {}", path));
|
let file = File::open(path.to_string()).or_else(|err| {
|
||||||
serde_json::from_reader(file).unwrap_or_else(|_| panic!("failed to parse {}", path))
|
Err(WalletError::BadParameter(format!(
|
||||||
|
"{}: Unable to open leader file: {}",
|
||||||
|
err, path
|
||||||
|
)))
|
||||||
|
})?;
|
||||||
|
|
||||||
|
serde_json::from_reader(file).or_else(|err| {
|
||||||
|
Err(WalletError::BadParameter(format!(
|
||||||
|
"{}: Failed to parse leader file: {}",
|
||||||
|
err, path
|
||||||
|
)))
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn mk_client(r: &NodeInfo) -> io::Result<ThinClient> {
|
fn mk_client(r: &NodeInfo) -> io::Result<ThinClient> {
|
||||||
|
@@ -29,7 +29,7 @@ impl<'a, 'b> ChooseRandomPeerStrategy<'a> {
|
|||||||
impl<'a> ChooseGossipPeerStrategy for ChooseRandomPeerStrategy<'a> {
|
impl<'a> ChooseGossipPeerStrategy for ChooseRandomPeerStrategy<'a> {
|
||||||
fn choose_peer<'b>(&self, options: Vec<&'b NodeInfo>) -> Result<&'b NodeInfo> {
|
fn choose_peer<'b>(&self, options: Vec<&'b NodeInfo>) -> Result<&'b NodeInfo> {
|
||||||
if options.is_empty() {
|
if options.is_empty() {
|
||||||
Err(CrdtError::TooSmall)?;
|
Err(CrdtError::NoPeers)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
let n = ((self.random)() as usize) % options.len();
|
let n = ((self.random)() as usize) % options.len();
|
||||||
@@ -174,7 +174,7 @@ impl<'a> ChooseWeightedPeerStrategy<'a> {
|
|||||||
impl<'a> ChooseGossipPeerStrategy for ChooseWeightedPeerStrategy<'a> {
|
impl<'a> ChooseGossipPeerStrategy for ChooseWeightedPeerStrategy<'a> {
|
||||||
fn choose_peer<'b>(&self, options: Vec<&'b NodeInfo>) -> Result<&'b NodeInfo> {
|
fn choose_peer<'b>(&self, options: Vec<&'b NodeInfo>) -> Result<&'b NodeInfo> {
|
||||||
if options.is_empty() {
|
if options.is_empty() {
|
||||||
Err(CrdtError::TooSmall)?;
|
Err(CrdtError::NoPeers)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut weighted_peers = vec![];
|
let mut weighted_peers = vec![];
|
||||||
|
@@ -1,9 +1,10 @@
|
|||||||
use influx_db_client as influxdb;
|
use influx_db_client as influxdb;
|
||||||
use metrics;
|
use metrics;
|
||||||
|
use std::env;
|
||||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||||
use timing;
|
use timing;
|
||||||
|
|
||||||
const INFLUX_RATE: usize = 100;
|
const DEFAULT_METRICS_RATE: usize = 100;
|
||||||
|
|
||||||
pub struct Counter {
|
pub struct Counter {
|
||||||
pub name: &'static str,
|
pub name: &'static str,
|
||||||
@@ -12,7 +13,7 @@ pub struct Counter {
|
|||||||
pub times: AtomicUsize,
|
pub times: AtomicUsize,
|
||||||
/// last accumulated value logged
|
/// last accumulated value logged
|
||||||
pub lastlog: AtomicUsize,
|
pub lastlog: AtomicUsize,
|
||||||
pub lograte: usize,
|
pub lograte: AtomicUsize,
|
||||||
}
|
}
|
||||||
|
|
||||||
macro_rules! create_counter {
|
macro_rules! create_counter {
|
||||||
@@ -22,7 +23,7 @@ macro_rules! create_counter {
|
|||||||
counts: AtomicUsize::new(0),
|
counts: AtomicUsize::new(0),
|
||||||
times: AtomicUsize::new(0),
|
times: AtomicUsize::new(0),
|
||||||
lastlog: AtomicUsize::new(0),
|
lastlog: AtomicUsize::new(0),
|
||||||
lograte: $lograte,
|
lograte: AtomicUsize::new($lograte),
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
@@ -33,12 +34,38 @@ macro_rules! inc_counter {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
macro_rules! inc_new_counter {
|
||||||
|
($name:expr, $count:expr) => {{
|
||||||
|
static mut INC_NEW_COUNTER: Counter = create_counter!($name, 0);
|
||||||
|
inc_counter!(INC_NEW_COUNTER, $count);
|
||||||
|
}};
|
||||||
|
($name:expr, $count:expr, $lograte:expr) => {{
|
||||||
|
static mut INC_NEW_COUNTER: Counter = create_counter!($name, $lograte);
|
||||||
|
inc_counter!(INC_NEW_COUNTER, $count);
|
||||||
|
}};
|
||||||
|
}
|
||||||
|
|
||||||
impl Counter {
|
impl Counter {
|
||||||
|
fn default_log_rate() -> usize {
|
||||||
|
let v = env::var("SOLANA_DEFAULT_METRICS_RATE")
|
||||||
|
.map(|x| x.parse().unwrap_or(DEFAULT_METRICS_RATE))
|
||||||
|
.unwrap_or(DEFAULT_METRICS_RATE);
|
||||||
|
if v == 0 {
|
||||||
|
DEFAULT_METRICS_RATE
|
||||||
|
} else {
|
||||||
|
v
|
||||||
|
}
|
||||||
|
}
|
||||||
pub fn inc(&mut self, events: usize) {
|
pub fn inc(&mut self, events: usize) {
|
||||||
let counts = self.counts.fetch_add(events, Ordering::Relaxed);
|
let counts = self.counts.fetch_add(events, Ordering::Relaxed);
|
||||||
let times = self.times.fetch_add(1, Ordering::Relaxed);
|
let times = self.times.fetch_add(1, Ordering::Relaxed);
|
||||||
let lastlog = self.lastlog.load(Ordering::Relaxed);
|
let mut lograte = self.lograte.load(Ordering::Relaxed);
|
||||||
if times % self.lograte == 0 && times > 0 {
|
if lograte == 0 {
|
||||||
|
lograte = Counter::default_log_rate();
|
||||||
|
self.lograte.store(lograte, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
if times % lograte == 0 && times > 0 {
|
||||||
|
let lastlog = self.lastlog.load(Ordering::Relaxed);
|
||||||
info!(
|
info!(
|
||||||
"COUNTER:{{\"name\": \"{}\", \"counts\": {}, \"samples\": {}, \"now\": {}}}",
|
"COUNTER:{{\"name\": \"{}\", \"counts\": {}, \"samples\": {}, \"now\": {}}}",
|
||||||
self.name,
|
self.name,
|
||||||
@@ -46,10 +73,8 @@ impl Counter {
|
|||||||
times,
|
times,
|
||||||
timing::timestamp(),
|
timing::timestamp(),
|
||||||
);
|
);
|
||||||
}
|
|
||||||
if times % INFLUX_RATE == 0 && times > 0 {
|
|
||||||
metrics::submit(
|
metrics::submit(
|
||||||
influxdb::Point::new(&format!("counter_{}", self.name))
|
influxdb::Point::new(&format!("counter-{}", self.name))
|
||||||
.add_field(
|
.add_field(
|
||||||
"count",
|
"count",
|
||||||
influxdb::Value::Integer(counts as i64 - lastlog as i64),
|
influxdb::Value::Integer(counts as i64 - lastlog as i64),
|
||||||
@@ -63,7 +88,8 @@ impl Counter {
|
|||||||
}
|
}
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use counter::Counter;
|
use counter::{Counter, DEFAULT_METRICS_RATE};
|
||||||
|
use std::env;
|
||||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||||
#[test]
|
#[test]
|
||||||
fn test_counter() {
|
fn test_counter() {
|
||||||
@@ -73,7 +99,7 @@ mod tests {
|
|||||||
unsafe {
|
unsafe {
|
||||||
assert_eq!(COUNTER.counts.load(Ordering::Relaxed), 1);
|
assert_eq!(COUNTER.counts.load(Ordering::Relaxed), 1);
|
||||||
assert_eq!(COUNTER.times.load(Ordering::Relaxed), 1);
|
assert_eq!(COUNTER.times.load(Ordering::Relaxed), 1);
|
||||||
assert_eq!(COUNTER.lograte, 100);
|
assert_eq!(COUNTER.lograte.load(Ordering::Relaxed), 100);
|
||||||
assert_eq!(COUNTER.lastlog.load(Ordering::Relaxed), 0);
|
assert_eq!(COUNTER.lastlog.load(Ordering::Relaxed), 0);
|
||||||
assert_eq!(COUNTER.name, "test");
|
assert_eq!(COUNTER.name, "test");
|
||||||
}
|
}
|
||||||
@@ -88,4 +114,42 @@ mod tests {
|
|||||||
assert_eq!(COUNTER.lastlog.load(Ordering::Relaxed), 399);
|
assert_eq!(COUNTER.lastlog.load(Ordering::Relaxed), 399);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#[test]
|
||||||
|
fn test_inc_new_counter() {
|
||||||
|
//make sure that macros are syntactically correct
|
||||||
|
//the variable is internal to the macro scope so there is no way to introspect it
|
||||||
|
inc_new_counter!("counter-1", 1);
|
||||||
|
inc_new_counter!("counter-2", 1, 2);
|
||||||
|
}
|
||||||
|
#[test]
|
||||||
|
fn test_lograte() {
|
||||||
|
static mut COUNTER: Counter = create_counter!("test_lograte", 0);
|
||||||
|
inc_counter!(COUNTER, 2);
|
||||||
|
unsafe {
|
||||||
|
assert_eq!(
|
||||||
|
COUNTER.lograte.load(Ordering::Relaxed),
|
||||||
|
DEFAULT_METRICS_RATE
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#[test]
|
||||||
|
fn test_lograte_env() {
|
||||||
|
assert_ne!(DEFAULT_METRICS_RATE, 0);
|
||||||
|
static mut COUNTER: Counter = create_counter!("test_lograte_env", 0);
|
||||||
|
env::set_var("SOLANA_DEFAULT_METRICS_RATE", "50");
|
||||||
|
inc_counter!(COUNTER, 2);
|
||||||
|
unsafe {
|
||||||
|
assert_eq!(COUNTER.lograte.load(Ordering::Relaxed), 50);
|
||||||
|
}
|
||||||
|
|
||||||
|
static mut COUNTER2: Counter = create_counter!("test_lograte_env", 0);
|
||||||
|
env::set_var("SOLANA_DEFAULT_METRICS_RATE", "0");
|
||||||
|
inc_counter!(COUNTER2, 2);
|
||||||
|
unsafe {
|
||||||
|
assert_eq!(
|
||||||
|
COUNTER2.lograte.load(Ordering::Relaxed),
|
||||||
|
DEFAULT_METRICS_RATE
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
470
src/crdt.rs
470
src/crdt.rs
@@ -37,8 +37,6 @@ use streamer::{BlobReceiver, BlobSender, Window};
|
|||||||
use timing::timestamp;
|
use timing::timestamp;
|
||||||
use transaction::Vote;
|
use transaction::Vote;
|
||||||
|
|
||||||
const LOG_RATE: usize = 10;
|
|
||||||
|
|
||||||
/// milliseconds we sleep for between gossip requests
|
/// milliseconds we sleep for between gossip requests
|
||||||
const GOSSIP_SLEEP_MILLIS: u64 = 100;
|
const GOSSIP_SLEEP_MILLIS: u64 = 100;
|
||||||
const GOSSIP_PURGE_MILLIS: u64 = 15000;
|
const GOSSIP_PURGE_MILLIS: u64 = 15000;
|
||||||
@@ -48,8 +46,11 @@ const MIN_TABLE_SIZE: usize = 2;
|
|||||||
|
|
||||||
#[derive(Debug, PartialEq, Eq)]
|
#[derive(Debug, PartialEq, Eq)]
|
||||||
pub enum CrdtError {
|
pub enum CrdtError {
|
||||||
TooSmall,
|
NoPeers,
|
||||||
NoLeader,
|
NoLeader,
|
||||||
|
BadContactInfo,
|
||||||
|
BadNodeInfo,
|
||||||
|
BadGossipAddress,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn parse_port_or_addr(optstr: Option<String>) -> SocketAddr {
|
pub fn parse_port_or_addr(optstr: Option<String>) -> SocketAddr {
|
||||||
@@ -119,8 +120,6 @@ pub struct ContactInfo {
|
|||||||
pub struct LedgerState {
|
pub struct LedgerState {
|
||||||
/// last verified hash that was submitted to the leader
|
/// last verified hash that was submitted to the leader
|
||||||
pub last_id: Hash,
|
pub last_id: Hash,
|
||||||
/// last verified entry count, always increasing
|
|
||||||
pub entry_height: u64,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||||
@@ -150,7 +149,7 @@ impl NodeInfo {
|
|||||||
rpu: SocketAddr,
|
rpu: SocketAddr,
|
||||||
tpu: SocketAddr,
|
tpu: SocketAddr,
|
||||||
tvu_window: SocketAddr,
|
tvu_window: SocketAddr,
|
||||||
) -> NodeInfo {
|
) -> Self {
|
||||||
NodeInfo {
|
NodeInfo {
|
||||||
id,
|
id,
|
||||||
version: 0,
|
version: 0,
|
||||||
@@ -165,10 +164,38 @@ impl NodeInfo {
|
|||||||
leader_id: PublicKey::default(),
|
leader_id: PublicKey::default(),
|
||||||
ledger_state: LedgerState {
|
ledger_state: LedgerState {
|
||||||
last_id: Hash::default(),
|
last_id: Hash::default(),
|
||||||
entry_height: 0,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#[cfg(test)]
|
||||||
|
/// NodeInfo with unspecified addresses for adversarial testing.
|
||||||
|
pub fn new_unspecified() -> Self {
|
||||||
|
let addr: SocketAddr = "0.0.0.0:0".parse().unwrap();
|
||||||
|
assert!(addr.ip().is_unspecified());
|
||||||
|
Self::new(
|
||||||
|
KeyPair::new().pubkey(),
|
||||||
|
addr.clone(),
|
||||||
|
addr.clone(),
|
||||||
|
addr.clone(),
|
||||||
|
addr.clone(),
|
||||||
|
addr.clone(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
#[cfg(test)]
|
||||||
|
/// NodeInfo with multicast addresses for adversarial testing.
|
||||||
|
pub fn new_multicast() -> Self {
|
||||||
|
let addr: SocketAddr = "224.0.1.255:1000".parse().unwrap();
|
||||||
|
assert!(addr.ip().is_multicast());
|
||||||
|
Self::new(
|
||||||
|
KeyPair::new().pubkey(),
|
||||||
|
addr.clone(),
|
||||||
|
addr.clone(),
|
||||||
|
addr.clone(),
|
||||||
|
addr.clone(),
|
||||||
|
addr.clone(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
pub fn debug_id(&self) -> u64 {
|
pub fn debug_id(&self) -> u64 {
|
||||||
make_debug_id(&self.id)
|
make_debug_id(&self.id)
|
||||||
}
|
}
|
||||||
@@ -256,8 +283,31 @@ enum Protocol {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Crdt {
|
impl Crdt {
|
||||||
pub fn new(me: NodeInfo) -> Crdt {
|
pub fn new(me: NodeInfo) -> Result<Crdt> {
|
||||||
assert_eq!(me.version, 0);
|
if me.version != 0 {
|
||||||
|
return Err(Error::CrdtError(CrdtError::BadNodeInfo));
|
||||||
|
}
|
||||||
|
if me.contact_info.ncp.ip().is_unspecified()
|
||||||
|
|| me.contact_info.ncp.port() == 0
|
||||||
|
|| me.contact_info.ncp.ip().is_multicast()
|
||||||
|
{
|
||||||
|
return Err(Error::CrdtError(CrdtError::BadGossipAddress));
|
||||||
|
}
|
||||||
|
for addr in &[
|
||||||
|
me.contact_info.tvu,
|
||||||
|
me.contact_info.rpu,
|
||||||
|
me.contact_info.tpu,
|
||||||
|
me.contact_info.tvu_window,
|
||||||
|
] {
|
||||||
|
//dummy address is allowed, services will filter them
|
||||||
|
if addr.ip().is_unspecified() && addr.port() == 0 {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
//if addr is not a dummy address, than it must be valid
|
||||||
|
if addr.ip().is_unspecified() || addr.port() == 0 || addr.ip().is_multicast() {
|
||||||
|
return Err(Error::CrdtError(CrdtError::BadContactInfo));
|
||||||
|
}
|
||||||
|
}
|
||||||
let mut g = Crdt {
|
let mut g = Crdt {
|
||||||
table: HashMap::new(),
|
table: HashMap::new(),
|
||||||
local: HashMap::new(),
|
local: HashMap::new(),
|
||||||
@@ -269,7 +319,7 @@ impl Crdt {
|
|||||||
};
|
};
|
||||||
g.local.insert(me.id, g.update_index);
|
g.local.insert(me.id, g.update_index);
|
||||||
g.table.insert(me.id, me);
|
g.table.insert(me.id, me);
|
||||||
g
|
Ok(g)
|
||||||
}
|
}
|
||||||
pub fn debug_id(&self) -> u64 {
|
pub fn debug_id(&self) -> u64 {
|
||||||
make_debug_id(&self.me)
|
make_debug_id(&self.me)
|
||||||
@@ -348,8 +398,7 @@ impl Crdt {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
pub fn insert_votes(&mut self, votes: &[(PublicKey, Vote, Hash)]) {
|
pub fn insert_votes(&mut self, votes: &[(PublicKey, Vote, Hash)]) {
|
||||||
static mut COUNTER_VOTE: Counter = create_counter!("crdt-vote-count", LOG_RATE);
|
inc_new_counter!("crdt-vote-count", votes.len());
|
||||||
inc_counter!(COUNTER_VOTE, votes.len());
|
|
||||||
if !votes.is_empty() {
|
if !votes.is_empty() {
|
||||||
info!("{:x}: INSERTING VOTES {}", self.debug_id(), votes.len());
|
info!("{:x}: INSERTING VOTES {}", self.debug_id(), votes.len());
|
||||||
}
|
}
|
||||||
@@ -369,12 +418,15 @@ impl Crdt {
|
|||||||
v.debug_id(),
|
v.debug_id(),
|
||||||
v.version
|
v.version
|
||||||
);
|
);
|
||||||
|
if self.table.get(&v.id).is_none() {
|
||||||
|
inc_new_counter!("crdt-insert-new_entry", 1, 1);
|
||||||
|
}
|
||||||
|
|
||||||
self.update_index += 1;
|
self.update_index += 1;
|
||||||
let _ = self.table.insert(v.id, v.clone());
|
let _ = self.table.insert(v.id, v.clone());
|
||||||
let _ = self.local.insert(v.id, self.update_index);
|
let _ = self.local.insert(v.id, self.update_index);
|
||||||
static mut COUNTER_UPDATE: Counter = create_counter!("crdt-update-count", LOG_RATE);
|
inc_new_counter!("crdt-update-count", 1);
|
||||||
inc_counter!(COUNTER_UPDATE, 1);
|
self.update_liveness(v.id);
|
||||||
} else {
|
} else {
|
||||||
trace!(
|
trace!(
|
||||||
"{:x}: INSERT FAILED data: {:x} new.version: {} me.version: {}",
|
"{:x}: INSERT FAILED data: {:x} new.version: {} me.version: {}",
|
||||||
@@ -384,7 +436,6 @@ impl Crdt {
|
|||||||
self.table[&v.id].version
|
self.table[&v.id].version
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
self.update_liveness(v.id);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn update_liveness(&mut self, id: PublicKey) {
|
fn update_liveness(&mut self, id: PublicKey) {
|
||||||
@@ -404,35 +455,20 @@ impl Crdt {
|
|||||||
/// challenging part is that we are on a permissionless network
|
/// challenging part is that we are on a permissionless network
|
||||||
pub fn purge(&mut self, now: u64) {
|
pub fn purge(&mut self, now: u64) {
|
||||||
if self.table.len() <= MIN_TABLE_SIZE {
|
if self.table.len() <= MIN_TABLE_SIZE {
|
||||||
|
trace!("purge: skipped: table too small: {}", self.table.len());
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if self.leader_data().is_none() {
|
if self.leader_data().is_none() {
|
||||||
|
trace!("purge: skipped: no leader_data");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
let leader_id = self.leader_data().unwrap().id;
|
let leader_id = self.leader_data().unwrap().id;
|
||||||
|
|
||||||
let limit = GOSSIP_PURGE_MILLIS;
|
let limit = GOSSIP_PURGE_MILLIS;
|
||||||
let dead_ids: Vec<PublicKey> = self.alive
|
let dead_ids: Vec<PublicKey> = self.alive
|
||||||
.iter()
|
.iter()
|
||||||
.filter_map(|(&k, v)| {
|
.filter_map(|(&k, v)| {
|
||||||
if k != self.me && (now - v) > limit {
|
if k != self.me && (now - v) > limit {
|
||||||
if leader_id == k {
|
Some(k)
|
||||||
info!(
|
|
||||||
"{:x}: PURGE LEADER {:x} {}",
|
|
||||||
self.debug_id(),
|
|
||||||
make_debug_id(&k),
|
|
||||||
now - v
|
|
||||||
);
|
|
||||||
Some(k)
|
|
||||||
} else {
|
|
||||||
info!(
|
|
||||||
"{:x}: PURGE {:x} {}",
|
|
||||||
self.debug_id(),
|
|
||||||
make_debug_id(&k),
|
|
||||||
now - v
|
|
||||||
);
|
|
||||||
Some(k)
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
trace!(
|
trace!(
|
||||||
"{:x} purge skipped {:x} {} {}",
|
"{:x} purge skipped {:x} {} {}",
|
||||||
@@ -446,8 +482,7 @@ impl Crdt {
|
|||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
static mut COUNTER_PURGE: Counter = create_counter!("crdt-purge-count", LOG_RATE);
|
inc_new_counter!("crdt-purge-count", dead_ids.len());
|
||||||
inc_counter!(COUNTER_PURGE, dead_ids.len());
|
|
||||||
|
|
||||||
for id in &dead_ids {
|
for id in &dead_ids {
|
||||||
self.alive.remove(id);
|
self.alive.remove(id);
|
||||||
@@ -455,9 +490,19 @@ impl Crdt {
|
|||||||
self.remote.remove(id);
|
self.remote.remove(id);
|
||||||
self.local.remove(id);
|
self.local.remove(id);
|
||||||
self.external_liveness.remove(id);
|
self.external_liveness.remove(id);
|
||||||
|
info!("{:x}: PURGE {:x}", self.debug_id(), make_debug_id(id));
|
||||||
for map in self.external_liveness.values_mut() {
|
for map in self.external_liveness.values_mut() {
|
||||||
map.remove(id);
|
map.remove(id);
|
||||||
}
|
}
|
||||||
|
if *id == leader_id {
|
||||||
|
info!(
|
||||||
|
"{:x}: PURGE LEADER {:x}",
|
||||||
|
self.debug_id(),
|
||||||
|
make_debug_id(id),
|
||||||
|
);
|
||||||
|
inc_new_counter!("crdt-purge-purged_leader", 1, 1);
|
||||||
|
self.set_leader(PublicKey::default());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -522,7 +567,8 @@ impl Crdt {
|
|||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
if broadcast_table.is_empty() {
|
if broadcast_table.is_empty() {
|
||||||
warn!("{:x}:not enough peers in crdt table", me.debug_id());
|
warn!("{:x}:not enough peers in crdt table", me.debug_id());
|
||||||
Err(CrdtError::TooSmall)?;
|
inc_new_counter!("crdt-broadcast-not_enough_peers_error", 1, 1);
|
||||||
|
Err(CrdtError::NoPeers)?;
|
||||||
}
|
}
|
||||||
trace!("broadcast nodes {}", broadcast_table.len());
|
trace!("broadcast nodes {}", broadcast_table.len());
|
||||||
|
|
||||||
@@ -628,7 +674,8 @@ impl Crdt {
|
|||||||
.collect();
|
.collect();
|
||||||
for e in errs {
|
for e in errs {
|
||||||
if let Err(e) = &e {
|
if let Err(e) = &e {
|
||||||
error!("broadcast result {:?}", e);
|
inc_new_counter!("crdt-retransmit-send_to_error", 1, 1);
|
||||||
|
error!("retransmit result {:?}", e);
|
||||||
}
|
}
|
||||||
e?;
|
e?;
|
||||||
}
|
}
|
||||||
@@ -669,7 +716,7 @@ impl Crdt {
|
|||||||
.filter(|r| r.id != self.me && r.contact_info.tvu_window != daddr)
|
.filter(|r| r.id != self.me && r.contact_info.tvu_window != daddr)
|
||||||
.collect();
|
.collect();
|
||||||
if valid.is_empty() {
|
if valid.is_empty() {
|
||||||
Err(CrdtError::TooSmall)?;
|
Err(CrdtError::NoPeers)?;
|
||||||
}
|
}
|
||||||
let n = (Self::random() as usize) % valid.len();
|
let n = (Self::random() as usize) % valid.len();
|
||||||
let addr = valid[n].contact_info.ncp;
|
let addr = valid[n].contact_info.ncp;
|
||||||
@@ -684,7 +731,14 @@ impl Crdt {
|
|||||||
/// * A - Address to send to
|
/// * A - Address to send to
|
||||||
/// * B - RequestUpdates protocol message
|
/// * B - RequestUpdates protocol message
|
||||||
fn gossip_request(&self) -> Result<(SocketAddr, Protocol)> {
|
fn gossip_request(&self) -> Result<(SocketAddr, Protocol)> {
|
||||||
let options: Vec<_> = self.table.values().filter(|v| v.id != self.me).collect();
|
let options: Vec<_> = self.table
|
||||||
|
.values()
|
||||||
|
.filter(|v| {
|
||||||
|
v.id != self.me
|
||||||
|
&& !v.contact_info.ncp.ip().is_unspecified()
|
||||||
|
&& !v.contact_info.ncp.ip().is_multicast()
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
let choose_peer_strategy = ChooseWeightedPeerStrategy::new(
|
let choose_peer_strategy = ChooseWeightedPeerStrategy::new(
|
||||||
&self.remote,
|
&self.remote,
|
||||||
@@ -694,7 +748,7 @@ impl Crdt {
|
|||||||
|
|
||||||
let choose_peer_result = choose_peer_strategy.choose_peer(options);
|
let choose_peer_result = choose_peer_strategy.choose_peer(options);
|
||||||
|
|
||||||
if let Err(Error::CrdtError(CrdtError::TooSmall)) = &choose_peer_result {
|
if let Err(Error::CrdtError(CrdtError::NoPeers)) = &choose_peer_result {
|
||||||
trace!(
|
trace!(
|
||||||
"crdt too small for gossip {:x} {}",
|
"crdt too small for gossip {:x} {}",
|
||||||
self.debug_id(),
|
self.debug_id(),
|
||||||
@@ -715,12 +769,11 @@ impl Crdt {
|
|||||||
Ok((v.contact_info.ncp, req))
|
Ok((v.contact_info.ncp, req))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn new_vote(&mut self, height: u64, last_id: Hash) -> Result<(Vote, SocketAddr)> {
|
pub fn new_vote(&mut self, last_id: Hash) -> Result<(Vote, SocketAddr)> {
|
||||||
let mut me = self.my_data().clone();
|
let mut me = self.my_data().clone();
|
||||||
let leader = self.leader_data().ok_or(CrdtError::NoLeader)?.clone();
|
let leader = self.leader_data().ok_or(CrdtError::NoLeader)?.clone();
|
||||||
me.version += 1;
|
me.version += 1;
|
||||||
me.ledger_state.last_id = last_id;
|
me.ledger_state.last_id = last_id;
|
||||||
me.ledger_state.entry_height = height;
|
|
||||||
let vote = Vote {
|
let vote = Vote {
|
||||||
version: me.version,
|
version: me.version,
|
||||||
contact_info_version: me.contact_info.version,
|
contact_info_version: me.contact_info.version,
|
||||||
@@ -898,24 +951,18 @@ impl Crdt {
|
|||||||
outblob.meta.set_addr(&from.contact_info.tvu_window);
|
outblob.meta.set_addr(&from.contact_info.tvu_window);
|
||||||
outblob.set_id(sender_id).expect("blob set_id");
|
outblob.set_id(sender_id).expect("blob set_id");
|
||||||
}
|
}
|
||||||
static mut COUNTER_REQ_WINDOW_PASS: Counter =
|
inc_new_counter!("crdt-window-request-pass", 1);
|
||||||
create_counter!("crdt-window-request-pass", LOG_RATE);
|
|
||||||
inc_counter!(COUNTER_REQ_WINDOW_PASS, 1);
|
|
||||||
|
|
||||||
return Some(out);
|
return Some(out);
|
||||||
} else {
|
} else {
|
||||||
static mut COUNTER_REQ_WINDOW_OUTSIDE: Counter =
|
inc_new_counter!("crdt-window-request-outside", 1);
|
||||||
create_counter!("crdt-window-request-outside", LOG_RATE);
|
|
||||||
inc_counter!(COUNTER_REQ_WINDOW_OUTSIDE, 1);
|
|
||||||
info!(
|
info!(
|
||||||
"requested ix {} != blob_ix {}, outside window!",
|
"requested ix {} != blob_ix {}, outside window!",
|
||||||
ix, blob_ix
|
ix, blob_ix
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
static mut COUNTER_REQ_WINDOW_FAIL: Counter =
|
inc_new_counter!("crdt-window-request-fail", 1);
|
||||||
create_counter!("crdt-window-request-fail", LOG_RATE);
|
|
||||||
inc_counter!(COUNTER_REQ_WINDOW_FAIL, 1);
|
|
||||||
assert!(window.read().unwrap()[pos].is_none());
|
assert!(window.read().unwrap()[pos].is_none());
|
||||||
info!(
|
info!(
|
||||||
"{:x}: failed RequestWindowIndex {:x} {} {}",
|
"{:x}: failed RequestWindowIndex {:x} {} {}",
|
||||||
@@ -937,11 +984,35 @@ impl Crdt {
|
|||||||
blob: &Blob,
|
blob: &Blob,
|
||||||
) -> Option<SharedBlob> {
|
) -> Option<SharedBlob> {
|
||||||
match deserialize(&blob.data[..blob.meta.size]) {
|
match deserialize(&blob.data[..blob.meta.size]) {
|
||||||
|
Ok(request) => Crdt::handle_protocol(request, obj, window, blob_recycler),
|
||||||
|
Err(_) => {
|
||||||
|
warn!("deserialize crdt packet failed");
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_protocol(
|
||||||
|
request: Protocol,
|
||||||
|
obj: &Arc<RwLock<Self>>,
|
||||||
|
window: &Window,
|
||||||
|
blob_recycler: &BlobRecycler,
|
||||||
|
) -> Option<SharedBlob> {
|
||||||
|
match request {
|
||||||
// TODO sigverify these
|
// TODO sigverify these
|
||||||
Ok(Protocol::RequestUpdates(v, from_rd)) => {
|
Protocol::RequestUpdates(v, from_rd) => {
|
||||||
trace!("RequestUpdates {}", v);
|
|
||||||
let addr = from_rd.contact_info.ncp;
|
let addr = from_rd.contact_info.ncp;
|
||||||
|
trace!("RequestUpdates {} from {}", v, addr);
|
||||||
let me = obj.read().unwrap();
|
let me = obj.read().unwrap();
|
||||||
|
if addr == me.table[&me.me].contact_info.ncp {
|
||||||
|
warn!(
|
||||||
|
"RequestUpdates ignored, I'm talking to myself: me={:x} remoteme={:x}",
|
||||||
|
me.debug_id(),
|
||||||
|
make_debug_id(&from_rd.id)
|
||||||
|
);
|
||||||
|
inc_new_counter!("crdt-window-request-loopback", 1);
|
||||||
|
return None;
|
||||||
|
}
|
||||||
// only lock for these two calls, dont lock during IO `sock.send_to` or `sock.recv_from`
|
// only lock for these two calls, dont lock during IO `sock.send_to` or `sock.recv_from`
|
||||||
let (from, ups, data) = me.get_updates_since(v);
|
let (from, ups, data) = me.get_updates_since(v);
|
||||||
let external_liveness = me.remote.iter().map(|(k, v)| (*k, *v)).collect();
|
let external_liveness = me.remote.iter().map(|(k, v)| (*k, *v)).collect();
|
||||||
@@ -949,7 +1020,11 @@ impl Crdt {
|
|||||||
trace!("get updates since response {} {}", v, data.len());
|
trace!("get updates since response {} {}", v, data.len());
|
||||||
let len = data.len();
|
let len = data.len();
|
||||||
let rsp = Protocol::ReceiveUpdates(from, ups, data, external_liveness);
|
let rsp = Protocol::ReceiveUpdates(from, ups, data, external_liveness);
|
||||||
obj.write().unwrap().insert(&from_rd);
|
{
|
||||||
|
let mut me = obj.write().unwrap();
|
||||||
|
me.insert(&from_rd);
|
||||||
|
me.update_liveness(from_rd.id);
|
||||||
|
}
|
||||||
if len < 1 {
|
if len < 1 {
|
||||||
let me = obj.read().unwrap();
|
let me = obj.read().unwrap();
|
||||||
trace!(
|
trace!(
|
||||||
@@ -973,40 +1048,43 @@ impl Crdt {
|
|||||||
None
|
None
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(Protocol::ReceiveUpdates(from, ups, data, external_liveness)) => {
|
Protocol::ReceiveUpdates(from, update_index, data, external_liveness) => {
|
||||||
trace!(
|
trace!(
|
||||||
"ReceivedUpdates {:x} {} {}",
|
"ReceivedUpdates from={:x} update_index={} len={}",
|
||||||
make_debug_id(&from),
|
make_debug_id(&from),
|
||||||
ups,
|
update_index,
|
||||||
data.len()
|
data.len()
|
||||||
);
|
);
|
||||||
obj.write()
|
obj.write()
|
||||||
.expect("'obj' write lock in ReceiveUpdates")
|
.expect("'obj' write lock in ReceiveUpdates")
|
||||||
.apply_updates(from, ups, &data, &external_liveness);
|
.apply_updates(from, update_index, &data, &external_liveness);
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
Ok(Protocol::RequestWindowIndex(from, ix)) => {
|
Protocol::RequestWindowIndex(from, ix) => {
|
||||||
//TODO this doesn't depend on CRDT module, can be moved
|
//TODO this doesn't depend on CRDT module, can be moved
|
||||||
//but we are using the listen thread to service these request
|
//but we are using the listen thread to service these request
|
||||||
//TODO verify from is signed
|
//TODO verify from is signed
|
||||||
obj.write().unwrap().insert(&from);
|
obj.write().unwrap().insert(&from);
|
||||||
let me = obj.read().unwrap().my_data().clone();
|
let me = obj.read().unwrap().my_data().clone();
|
||||||
static mut COUNTER_REQ_WINDOW: Counter =
|
inc_new_counter!("crdt-window-request-recv", 1);
|
||||||
create_counter!("crdt-window-request-recv", LOG_RATE);
|
|
||||||
inc_counter!(COUNTER_REQ_WINDOW, 1);
|
|
||||||
trace!(
|
trace!(
|
||||||
"{:x}:received RequestWindowIndex {:x} {} ",
|
"{:x}:received RequestWindowIndex {:x} {} ",
|
||||||
me.debug_id(),
|
me.debug_id(),
|
||||||
from.debug_id(),
|
from.debug_id(),
|
||||||
ix,
|
ix,
|
||||||
);
|
);
|
||||||
assert_ne!(from.contact_info.tvu_window, me.contact_info.tvu_window);
|
if from.contact_info.tvu_window == me.contact_info.tvu_window {
|
||||||
|
warn!(
|
||||||
|
"Ignored {:x}:received RequestWindowIndex from ME {:x} {} ",
|
||||||
|
me.debug_id(),
|
||||||
|
from.debug_id(),
|
||||||
|
ix,
|
||||||
|
);
|
||||||
|
inc_new_counter!("crdt-window-request-address-eq", 1);
|
||||||
|
return None;
|
||||||
|
}
|
||||||
Self::run_window_request(&window, &me, &from, ix, blob_recycler)
|
Self::run_window_request(&window, &me, &from, ix, blob_recycler)
|
||||||
}
|
}
|
||||||
Err(_) => {
|
|
||||||
warn!("deserialize crdt packet failed");
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1084,23 +1162,18 @@ pub struct TestNode {
|
|||||||
pub sockets: Sockets,
|
pub sockets: Sockets,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for TestNode {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self::new()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TestNode {
|
impl TestNode {
|
||||||
pub fn new() -> Self {
|
pub fn new_localhost() -> Self {
|
||||||
let pubkey = KeyPair::new().pubkey();
|
let pubkey = KeyPair::new().pubkey();
|
||||||
Self::new_with_pubkey(pubkey)
|
Self::new_localhost_with_pubkey(pubkey)
|
||||||
}
|
}
|
||||||
pub fn new_with_pubkey(pubkey: PublicKey) -> Self {
|
pub fn new_localhost_with_pubkey(pubkey: PublicKey) -> Self {
|
||||||
let transaction = UdpSocket::bind("0.0.0.0:0").unwrap();
|
let transaction = UdpSocket::bind("127.0.0.1:0").unwrap();
|
||||||
let gossip = UdpSocket::bind("0.0.0.0:0").unwrap();
|
let gossip = UdpSocket::bind("127.0.0.1:0").unwrap();
|
||||||
let replicate = UdpSocket::bind("0.0.0.0:0").unwrap();
|
let replicate = UdpSocket::bind("127.0.0.1:0").unwrap();
|
||||||
let requests = UdpSocket::bind("0.0.0.0:0").unwrap();
|
let requests = UdpSocket::bind("127.0.0.1:0").unwrap();
|
||||||
let repair = UdpSocket::bind("0.0.0.0:0").unwrap();
|
let repair = UdpSocket::bind("127.0.0.1:0").unwrap();
|
||||||
|
|
||||||
let gossip_send = UdpSocket::bind("0.0.0.0:0").unwrap();
|
let gossip_send = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||||
let respond = UdpSocket::bind("0.0.0.0:0").unwrap();
|
let respond = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||||
let broadcast = UdpSocket::bind("0.0.0.0:0").unwrap();
|
let broadcast = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||||
@@ -1178,14 +1251,14 @@ impl TestNode {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use crdt::{
|
use crdt::{
|
||||||
parse_port_or_addr, Crdt, CrdtError, NodeInfo, GOSSIP_PURGE_MILLIS, GOSSIP_SLEEP_MILLIS,
|
parse_port_or_addr, Crdt, CrdtError, NodeInfo, Protocol, GOSSIP_PURGE_MILLIS,
|
||||||
MIN_TABLE_SIZE,
|
GOSSIP_SLEEP_MILLIS, MIN_TABLE_SIZE,
|
||||||
};
|
};
|
||||||
use hash::Hash;
|
use hash::Hash;
|
||||||
use logger;
|
use logger;
|
||||||
use packet::BlobRecycler;
|
use packet::BlobRecycler;
|
||||||
use result::Error;
|
use result::Error;
|
||||||
use signature::{KeyPair, KeyPairUtil};
|
use signature::{KeyPair, KeyPairUtil, PublicKey};
|
||||||
use std::sync::atomic::{AtomicBool, Ordering};
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
use std::sync::mpsc::channel;
|
use std::sync::mpsc::channel;
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
@@ -1203,6 +1276,107 @@ mod tests {
|
|||||||
let p3 = parse_port_or_addr(None);
|
let p3 = parse_port_or_addr(None);
|
||||||
assert_eq!(p3.port(), 8000);
|
assert_eq!(p3.port(), 8000);
|
||||||
}
|
}
|
||||||
|
#[test]
|
||||||
|
fn test_bad_address() {
|
||||||
|
let d1 = NodeInfo::new(
|
||||||
|
KeyPair::new().pubkey(),
|
||||||
|
"0.0.0.0:1234".parse().unwrap(),
|
||||||
|
"0.0.0.0:1235".parse().unwrap(),
|
||||||
|
"0.0.0.0:1236".parse().unwrap(),
|
||||||
|
"0.0.0.0:1237".parse().unwrap(),
|
||||||
|
"0.0.0.0:1238".parse().unwrap(),
|
||||||
|
);
|
||||||
|
assert_matches!(
|
||||||
|
Crdt::new(d1).err(),
|
||||||
|
Some(Error::CrdtError(CrdtError::BadGossipAddress))
|
||||||
|
);
|
||||||
|
let d1_1 = NodeInfo::new(
|
||||||
|
KeyPair::new().pubkey(),
|
||||||
|
"0.0.0.1:1234".parse().unwrap(),
|
||||||
|
"0.0.0.0:1235".parse().unwrap(),
|
||||||
|
"0.0.0.0:1236".parse().unwrap(),
|
||||||
|
"0.0.0.0:1237".parse().unwrap(),
|
||||||
|
"0.0.0.0:1238".parse().unwrap(),
|
||||||
|
);
|
||||||
|
assert_matches!(
|
||||||
|
Crdt::new(d1_1).err(),
|
||||||
|
Some(Error::CrdtError(CrdtError::BadContactInfo))
|
||||||
|
);
|
||||||
|
let d2 = NodeInfo::new(
|
||||||
|
KeyPair::new().pubkey(),
|
||||||
|
"0.0.0.1:0".parse().unwrap(),
|
||||||
|
"0.0.0.1:0".parse().unwrap(),
|
||||||
|
"0.0.0.1:0".parse().unwrap(),
|
||||||
|
"0.0.0.1:0".parse().unwrap(),
|
||||||
|
"0.0.0.1:0".parse().unwrap(),
|
||||||
|
);
|
||||||
|
assert_matches!(
|
||||||
|
Crdt::new(d2).err(),
|
||||||
|
Some(Error::CrdtError(CrdtError::BadGossipAddress))
|
||||||
|
);
|
||||||
|
let d2_1 = NodeInfo::new(
|
||||||
|
KeyPair::new().pubkey(),
|
||||||
|
"0.0.0.1:1234".parse().unwrap(),
|
||||||
|
"0.0.0.1:0".parse().unwrap(),
|
||||||
|
"0.0.0.1:0".parse().unwrap(),
|
||||||
|
"0.0.0.1:0".parse().unwrap(),
|
||||||
|
"0.0.0.1:0".parse().unwrap(),
|
||||||
|
);
|
||||||
|
assert_matches!(
|
||||||
|
Crdt::new(d2_1).err(),
|
||||||
|
Some(Error::CrdtError(CrdtError::BadContactInfo))
|
||||||
|
);
|
||||||
|
let d3 = NodeInfo::new_unspecified();
|
||||||
|
assert_matches!(
|
||||||
|
Crdt::new(d3).err(),
|
||||||
|
Some(Error::CrdtError(CrdtError::BadGossipAddress))
|
||||||
|
);
|
||||||
|
let d4 = NodeInfo::new_multicast();
|
||||||
|
assert_matches!(
|
||||||
|
Crdt::new(d4).err(),
|
||||||
|
Some(Error::CrdtError(CrdtError::BadGossipAddress))
|
||||||
|
);
|
||||||
|
let mut d5 = NodeInfo::new_multicast();
|
||||||
|
d5.version = 1;
|
||||||
|
assert_matches!(
|
||||||
|
Crdt::new(d5).err(),
|
||||||
|
Some(Error::CrdtError(CrdtError::BadNodeInfo))
|
||||||
|
);
|
||||||
|
let d6 = NodeInfo::new(
|
||||||
|
KeyPair::new().pubkey(),
|
||||||
|
"0.0.0.0:1234".parse().unwrap(),
|
||||||
|
"0.0.0.0:0".parse().unwrap(),
|
||||||
|
"0.0.0.0:0".parse().unwrap(),
|
||||||
|
"0.0.0.0:0".parse().unwrap(),
|
||||||
|
"0.0.0.0:0".parse().unwrap(),
|
||||||
|
);
|
||||||
|
assert_matches!(
|
||||||
|
Crdt::new(d6).err(),
|
||||||
|
Some(Error::CrdtError(CrdtError::BadGossipAddress))
|
||||||
|
);
|
||||||
|
let d7 = NodeInfo::new(
|
||||||
|
KeyPair::new().pubkey(),
|
||||||
|
"0.0.0.1:0".parse().unwrap(),
|
||||||
|
"0.0.0.0:0".parse().unwrap(),
|
||||||
|
"0.0.0.0:0".parse().unwrap(),
|
||||||
|
"0.0.0.0:0".parse().unwrap(),
|
||||||
|
"0.0.0.0:0".parse().unwrap(),
|
||||||
|
);
|
||||||
|
assert_matches!(
|
||||||
|
Crdt::new(d7).err(),
|
||||||
|
Some(Error::CrdtError(CrdtError::BadGossipAddress))
|
||||||
|
);
|
||||||
|
let d8 = NodeInfo::new(
|
||||||
|
KeyPair::new().pubkey(),
|
||||||
|
"0.0.0.1:1234".parse().unwrap(),
|
||||||
|
"0.0.0.0:0".parse().unwrap(),
|
||||||
|
"0.0.0.0:0".parse().unwrap(),
|
||||||
|
"0.0.0.0:0".parse().unwrap(),
|
||||||
|
"0.0.0.0:0".parse().unwrap(),
|
||||||
|
);
|
||||||
|
assert_eq!(Crdt::new(d8).is_ok(), true);
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn insert_test() {
|
fn insert_test() {
|
||||||
let mut d = NodeInfo::new(
|
let mut d = NodeInfo::new(
|
||||||
@@ -1214,30 +1388,43 @@ mod tests {
|
|||||||
"127.0.0.1:1238".parse().unwrap(),
|
"127.0.0.1:1238".parse().unwrap(),
|
||||||
);
|
);
|
||||||
assert_eq!(d.version, 0);
|
assert_eq!(d.version, 0);
|
||||||
let mut crdt = Crdt::new(d.clone());
|
let mut crdt = Crdt::new(d.clone()).unwrap();
|
||||||
assert_eq!(crdt.table[&d.id].version, 0);
|
assert_eq!(crdt.table[&d.id].version, 0);
|
||||||
|
assert!(!crdt.alive.contains_key(&d.id));
|
||||||
|
|
||||||
d.version = 2;
|
d.version = 2;
|
||||||
crdt.insert(&d);
|
crdt.insert(&d);
|
||||||
|
let liveness = crdt.alive[&d.id];
|
||||||
assert_eq!(crdt.table[&d.id].version, 2);
|
assert_eq!(crdt.table[&d.id].version, 2);
|
||||||
|
|
||||||
d.version = 1;
|
d.version = 1;
|
||||||
crdt.insert(&d);
|
crdt.insert(&d);
|
||||||
assert_eq!(crdt.table[&d.id].version, 2);
|
assert_eq!(crdt.table[&d.id].version, 2);
|
||||||
|
assert_eq!(liveness, crdt.alive[&d.id]);
|
||||||
|
|
||||||
|
// Ensure liveness will be updated for version 3
|
||||||
|
sleep(Duration::from_millis(1));
|
||||||
|
|
||||||
|
d.version = 3;
|
||||||
|
crdt.insert(&d);
|
||||||
|
assert_eq!(crdt.table[&d.id].version, 3);
|
||||||
|
assert!(liveness < crdt.alive[&d.id]);
|
||||||
}
|
}
|
||||||
#[test]
|
#[test]
|
||||||
fn test_new_vote() {
|
fn test_new_vote() {
|
||||||
let d = NodeInfo::new_leader(&"127.0.0.1:1234".parse().unwrap());
|
let d = NodeInfo::new_leader(&"127.0.0.1:1234".parse().unwrap());
|
||||||
assert_eq!(d.version, 0);
|
assert_eq!(d.version, 0);
|
||||||
let mut crdt = Crdt::new(d.clone());
|
let mut crdt = Crdt::new(d.clone()).unwrap();
|
||||||
assert_eq!(crdt.table[&d.id].version, 0);
|
assert_eq!(crdt.table[&d.id].version, 0);
|
||||||
let leader = NodeInfo::new_leader(&"127.0.0.2:1235".parse().unwrap());
|
let leader = NodeInfo::new_leader(&"127.0.0.2:1235".parse().unwrap());
|
||||||
assert_ne!(d.id, leader.id);
|
assert_ne!(d.id, leader.id);
|
||||||
assert_matches!(
|
assert_matches!(
|
||||||
crdt.new_vote(0, Hash::default()).err(),
|
crdt.new_vote(Hash::default()).err(),
|
||||||
Some(Error::CrdtError(CrdtError::NoLeader))
|
Some(Error::CrdtError(CrdtError::NoLeader))
|
||||||
);
|
);
|
||||||
crdt.insert(&leader);
|
crdt.insert(&leader);
|
||||||
assert_matches!(
|
assert_matches!(
|
||||||
crdt.new_vote(0, Hash::default()).err(),
|
crdt.new_vote(Hash::default()).err(),
|
||||||
Some(Error::CrdtError(CrdtError::NoLeader))
|
Some(Error::CrdtError(CrdtError::NoLeader))
|
||||||
);
|
);
|
||||||
crdt.set_leader(leader.id);
|
crdt.set_leader(leader.id);
|
||||||
@@ -1247,14 +1434,14 @@ mod tests {
|
|||||||
contact_info_version: 0,
|
contact_info_version: 0,
|
||||||
};
|
};
|
||||||
let expected = (v, crdt.table[&leader.id].contact_info.tpu);
|
let expected = (v, crdt.table[&leader.id].contact_info.tpu);
|
||||||
assert_eq!(crdt.new_vote(0, Hash::default()).unwrap(), expected);
|
assert_eq!(crdt.new_vote(Hash::default()).unwrap(), expected);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_insert_vote() {
|
fn test_insert_vote() {
|
||||||
let d = NodeInfo::new_leader(&"127.0.0.1:1234".parse().unwrap());
|
let d = NodeInfo::new_leader(&"127.0.0.1:1234".parse().unwrap());
|
||||||
assert_eq!(d.version, 0);
|
assert_eq!(d.version, 0);
|
||||||
let mut crdt = Crdt::new(d.clone());
|
let mut crdt = Crdt::new(d.clone()).unwrap();
|
||||||
assert_eq!(crdt.table[&d.id].version, 0);
|
assert_eq!(crdt.table[&d.id].version, 0);
|
||||||
let vote_same_version = Vote {
|
let vote_same_version = Vote {
|
||||||
version: d.version,
|
version: d.version,
|
||||||
@@ -1286,7 +1473,7 @@ mod tests {
|
|||||||
// TODO: remove this test once leaders vote
|
// TODO: remove this test once leaders vote
|
||||||
let d = NodeInfo::new_leader(&"127.0.0.1:1234".parse().unwrap());
|
let d = NodeInfo::new_leader(&"127.0.0.1:1234".parse().unwrap());
|
||||||
assert_eq!(d.version, 0);
|
assert_eq!(d.version, 0);
|
||||||
let mut crdt = Crdt::new(d.clone());
|
let mut crdt = Crdt::new(d.clone()).unwrap();
|
||||||
let leader = NodeInfo::new_leader(&"127.0.0.2:1235".parse().unwrap());
|
let leader = NodeInfo::new_leader(&"127.0.0.2:1235".parse().unwrap());
|
||||||
assert_ne!(d.id, leader.id);
|
assert_ne!(d.id, leader.id);
|
||||||
crdt.insert(&leader);
|
crdt.insert(&leader);
|
||||||
@@ -1355,7 +1542,7 @@ mod tests {
|
|||||||
"127.0.0.1:1237".parse().unwrap(),
|
"127.0.0.1:1237".parse().unwrap(),
|
||||||
"127.0.0.1:1238".parse().unwrap(),
|
"127.0.0.1:1238".parse().unwrap(),
|
||||||
);
|
);
|
||||||
let mut crdt = Crdt::new(d1.clone());
|
let mut crdt = Crdt::new(d1.clone()).expect("Crdt::new");
|
||||||
let (key, ix, ups) = crdt.get_updates_since(0);
|
let (key, ix, ups) = crdt.get_updates_since(0);
|
||||||
assert_eq!(key, d1.id);
|
assert_eq!(key, d1.id);
|
||||||
assert_eq!(ix, 1);
|
assert_eq!(ix, 1);
|
||||||
@@ -1376,7 +1563,7 @@ mod tests {
|
|||||||
sorted(&ups),
|
sorted(&ups),
|
||||||
sorted(&vec![d1.clone(), d2.clone(), d3.clone()])
|
sorted(&vec![d1.clone(), d2.clone(), d3.clone()])
|
||||||
);
|
);
|
||||||
let mut crdt2 = Crdt::new(d2.clone());
|
let mut crdt2 = Crdt::new(d2.clone()).expect("Crdt::new");
|
||||||
crdt2.apply_updates(key, ix, &ups, &vec![]);
|
crdt2.apply_updates(key, ix, &ups, &vec![]);
|
||||||
assert_eq!(crdt2.table.values().len(), 3);
|
assert_eq!(crdt2.table.values().len(), 3);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
@@ -1398,9 +1585,9 @@ mod tests {
|
|||||||
"127.0.0.1:1237".parse().unwrap(),
|
"127.0.0.1:1237".parse().unwrap(),
|
||||||
"127.0.0.1:1238".parse().unwrap(),
|
"127.0.0.1:1238".parse().unwrap(),
|
||||||
);
|
);
|
||||||
let mut crdt = Crdt::new(me.clone());
|
let mut crdt = Crdt::new(me.clone()).expect("Crdt::new");
|
||||||
let rv = crdt.window_index_request(0);
|
let rv = crdt.window_index_request(0);
|
||||||
assert_matches!(rv, Err(Error::CrdtError(CrdtError::TooSmall)));
|
assert_matches!(rv, Err(Error::CrdtError(CrdtError::NoPeers)));
|
||||||
let nxt = NodeInfo::new(
|
let nxt = NodeInfo::new(
|
||||||
KeyPair::new().pubkey(),
|
KeyPair::new().pubkey(),
|
||||||
"127.0.0.1:1234".parse().unwrap(),
|
"127.0.0.1:1234".parse().unwrap(),
|
||||||
@@ -1411,7 +1598,7 @@ mod tests {
|
|||||||
);
|
);
|
||||||
crdt.insert(&nxt);
|
crdt.insert(&nxt);
|
||||||
let rv = crdt.window_index_request(0);
|
let rv = crdt.window_index_request(0);
|
||||||
assert_matches!(rv, Err(Error::CrdtError(CrdtError::TooSmall)));
|
assert_matches!(rv, Err(Error::CrdtError(CrdtError::NoPeers)));
|
||||||
let nxt = NodeInfo::new(
|
let nxt = NodeInfo::new(
|
||||||
KeyPair::new().pubkey(),
|
KeyPair::new().pubkey(),
|
||||||
"127.0.0.2:1234".parse().unwrap(),
|
"127.0.0.2:1234".parse().unwrap(),
|
||||||
@@ -1449,6 +1636,30 @@ mod tests {
|
|||||||
assert!(one && two);
|
assert!(one && two);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn gossip_request_bad_addr() {
|
||||||
|
let me = NodeInfo::new(
|
||||||
|
KeyPair::new().pubkey(),
|
||||||
|
"127.0.0.1:127".parse().unwrap(),
|
||||||
|
"127.0.0.1:127".parse().unwrap(),
|
||||||
|
"127.0.0.1:127".parse().unwrap(),
|
||||||
|
"127.0.0.1:127".parse().unwrap(),
|
||||||
|
"127.0.0.1:127".parse().unwrap(),
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut crdt = Crdt::new(me).expect("Crdt::new");
|
||||||
|
let nxt1 = NodeInfo::new_unspecified();
|
||||||
|
// Filter out unspecified addresses
|
||||||
|
crdt.insert(&nxt1); //<--- attack!
|
||||||
|
let rv = crdt.gossip_request();
|
||||||
|
assert_matches!(rv, Err(Error::CrdtError(CrdtError::NoPeers)));
|
||||||
|
let nxt2 = NodeInfo::new_multicast();
|
||||||
|
// Filter out multicast addresses
|
||||||
|
crdt.insert(&nxt2); //<--- attack!
|
||||||
|
let rv = crdt.gossip_request();
|
||||||
|
assert_matches!(rv, Err(Error::CrdtError(CrdtError::NoPeers)));
|
||||||
|
}
|
||||||
|
|
||||||
/// test that gossip requests are eventually generated for all nodes
|
/// test that gossip requests are eventually generated for all nodes
|
||||||
#[test]
|
#[test]
|
||||||
fn gossip_request() {
|
fn gossip_request() {
|
||||||
@@ -1460,9 +1671,9 @@ mod tests {
|
|||||||
"127.0.0.1:1237".parse().unwrap(),
|
"127.0.0.1:1237".parse().unwrap(),
|
||||||
"127.0.0.1:1238".parse().unwrap(),
|
"127.0.0.1:1238".parse().unwrap(),
|
||||||
);
|
);
|
||||||
let mut crdt = Crdt::new(me.clone());
|
let mut crdt = Crdt::new(me.clone()).expect("Crdt::new");
|
||||||
let rv = crdt.gossip_request();
|
let rv = crdt.gossip_request();
|
||||||
assert_matches!(rv, Err(Error::CrdtError(CrdtError::TooSmall)));
|
assert_matches!(rv, Err(Error::CrdtError(CrdtError::NoPeers)));
|
||||||
let nxt1 = NodeInfo::new(
|
let nxt1 = NodeInfo::new(
|
||||||
KeyPair::new().pubkey(),
|
KeyPair::new().pubkey(),
|
||||||
"127.0.0.2:1234".parse().unwrap(),
|
"127.0.0.2:1234".parse().unwrap(),
|
||||||
@@ -1519,7 +1730,7 @@ mod tests {
|
|||||||
fn purge_test() {
|
fn purge_test() {
|
||||||
logger::setup();
|
logger::setup();
|
||||||
let me = NodeInfo::new_leader(&"127.0.0.1:1234".parse().unwrap());
|
let me = NodeInfo::new_leader(&"127.0.0.1:1234".parse().unwrap());
|
||||||
let mut crdt = Crdt::new(me.clone());
|
let mut crdt = Crdt::new(me.clone()).expect("Crdt::new");
|
||||||
let nxt = NodeInfo::new_leader(&"127.0.0.2:1234".parse().unwrap());
|
let nxt = NodeInfo::new_leader(&"127.0.0.2:1234".parse().unwrap());
|
||||||
assert_ne!(me.id, nxt.id);
|
assert_ne!(me.id, nxt.id);
|
||||||
crdt.set_leader(me.id);
|
crdt.set_leader(me.id);
|
||||||
@@ -1557,6 +1768,28 @@ mod tests {
|
|||||||
let rv = crdt.gossip_request().unwrap();
|
let rv = crdt.gossip_request().unwrap();
|
||||||
assert_eq!(rv.0, nxt.contact_info.ncp);
|
assert_eq!(rv.0, nxt.contact_info.ncp);
|
||||||
}
|
}
|
||||||
|
#[test]
|
||||||
|
fn purge_leader_test() {
|
||||||
|
logger::setup();
|
||||||
|
let me = NodeInfo::new_leader(&"127.0.0.1:1234".parse().unwrap());
|
||||||
|
let mut crdt = Crdt::new(me.clone()).expect("Crdt::new");
|
||||||
|
let nxt = NodeInfo::new_leader(&"127.0.0.2:1234".parse().unwrap());
|
||||||
|
assert_ne!(me.id, nxt.id);
|
||||||
|
crdt.insert(&nxt);
|
||||||
|
crdt.set_leader(nxt.id);
|
||||||
|
let now = crdt.alive[&nxt.id];
|
||||||
|
let mut nxt2 = NodeInfo::new_leader(&"127.0.0.2:1234".parse().unwrap());
|
||||||
|
crdt.insert(&nxt2);
|
||||||
|
while now == crdt.alive[&nxt2.id] {
|
||||||
|
sleep(Duration::from_millis(GOSSIP_SLEEP_MILLIS));
|
||||||
|
nxt2.version = nxt2.version + 1;
|
||||||
|
crdt.insert(&nxt2);
|
||||||
|
}
|
||||||
|
let len = crdt.table.len() as u64;
|
||||||
|
crdt.purge(now + GOSSIP_PURGE_MILLIS + 1);
|
||||||
|
assert_eq!(len as usize - 1, crdt.table.len());
|
||||||
|
assert_eq!(crdt.my_data().leader_id, PublicKey::default());
|
||||||
|
}
|
||||||
|
|
||||||
/// test window requests respond with the right blob, and do not overrun
|
/// test window requests respond with the right blob, and do not overrun
|
||||||
#[test]
|
#[test]
|
||||||
@@ -1630,7 +1863,7 @@ mod tests {
|
|||||||
let me = NodeInfo::new_leader(&"127.0.0.1:1234".parse().unwrap());
|
let me = NodeInfo::new_leader(&"127.0.0.1:1234".parse().unwrap());
|
||||||
let leader0 = NodeInfo::new_leader(&"127.0.0.1:1234".parse().unwrap());
|
let leader0 = NodeInfo::new_leader(&"127.0.0.1:1234".parse().unwrap());
|
||||||
let leader1 = NodeInfo::new_leader(&"127.0.0.1:1234".parse().unwrap());
|
let leader1 = NodeInfo::new_leader(&"127.0.0.1:1234".parse().unwrap());
|
||||||
let mut crdt = Crdt::new(me.clone());
|
let mut crdt = Crdt::new(me.clone()).expect("Crdt::new");
|
||||||
assert_eq!(crdt.top_leader(), None);
|
assert_eq!(crdt.top_leader(), None);
|
||||||
crdt.set_leader(leader0.id);
|
crdt.set_leader(leader0.id);
|
||||||
assert_eq!(crdt.top_leader().unwrap(), leader0.id);
|
assert_eq!(crdt.top_leader().unwrap(), leader0.id);
|
||||||
@@ -1648,4 +1881,41 @@ mod tests {
|
|||||||
crdt.update_leader();
|
crdt.update_leader();
|
||||||
assert_eq!(crdt.my_data().leader_id, leader1.id);
|
assert_eq!(crdt.my_data().leader_id, leader1.id);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Validates the node that sent Protocol::ReceiveUpdates gets its
|
||||||
|
/// liveness updated, but not if the node sends Protocol::ReceiveUpdates
|
||||||
|
/// to itself.
|
||||||
|
#[test]
|
||||||
|
fn protocol_requestupdate_alive() {
|
||||||
|
logger::setup();
|
||||||
|
let window = default_window();
|
||||||
|
let recycler = BlobRecycler::default();
|
||||||
|
|
||||||
|
let node = NodeInfo::new_leader(&"127.0.0.1:1234".parse().unwrap());
|
||||||
|
let node_with_same_addr = NodeInfo::new_leader(&"127.0.0.1:1234".parse().unwrap());
|
||||||
|
assert_ne!(node.id, node_with_same_addr.id);
|
||||||
|
let node_with_diff_addr = NodeInfo::new_leader(&"127.0.0.1:4321".parse().unwrap());
|
||||||
|
|
||||||
|
let crdt = Crdt::new(node.clone()).expect("Crdt::new");
|
||||||
|
assert_eq!(crdt.alive.len(), 0);
|
||||||
|
|
||||||
|
let obj = Arc::new(RwLock::new(crdt));
|
||||||
|
|
||||||
|
let request = Protocol::RequestUpdates(1, node.clone());
|
||||||
|
assert!(Crdt::handle_protocol(request, &obj, &window, &recycler).is_none());
|
||||||
|
|
||||||
|
let request = Protocol::RequestUpdates(1, node_with_same_addr.clone());
|
||||||
|
assert!(Crdt::handle_protocol(request, &obj, &window, &recycler).is_none());
|
||||||
|
|
||||||
|
let request = Protocol::RequestUpdates(1, node_with_diff_addr.clone());
|
||||||
|
Crdt::handle_protocol(request, &obj, &window, &recycler);
|
||||||
|
|
||||||
|
let me = obj.write().unwrap();
|
||||||
|
|
||||||
|
// |node| and |node_with_same_addr| should not be in me.alive, but
|
||||||
|
// |node_with_diff_addr| should now be.
|
||||||
|
assert!(!me.alive.contains_key(&node.id));
|
||||||
|
assert!(!me.alive.contains_key(&node_with_same_addr.id));
|
||||||
|
assert!(me.alive[&node_with_diff_addr.id] > 0);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@@ -16,6 +16,7 @@ use transaction::Transaction;
|
|||||||
|
|
||||||
pub const TIME_SLICE: u64 = 60;
|
pub const TIME_SLICE: u64 = 60;
|
||||||
pub const REQUEST_CAP: u64 = 1_000_000;
|
pub const REQUEST_CAP: u64 = 1_000_000;
|
||||||
|
pub const DRONE_PORT: u16 = 9900;
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone, Copy)]
|
#[derive(Serialize, Deserialize, Debug, Clone, Copy)]
|
||||||
pub enum DroneRequest {
|
pub enum DroneRequest {
|
||||||
@@ -112,6 +113,10 @@ impl Drone {
|
|||||||
airdrop_request_amount,
|
airdrop_request_amount,
|
||||||
client_public_key,
|
client_public_key,
|
||||||
} => {
|
} => {
|
||||||
|
info!(
|
||||||
|
"Requesting airdrop of {} to {:?}",
|
||||||
|
airdrop_request_amount, client_public_key
|
||||||
|
);
|
||||||
request_amount = airdrop_request_amount;
|
request_amount = airdrop_request_amount;
|
||||||
tx = Transaction::new(
|
tx = Transaction::new(
|
||||||
&self.mint_keypair,
|
&self.mint_keypair,
|
||||||
@@ -261,7 +266,7 @@ mod tests {
|
|||||||
const TPS_BATCH: i64 = 5_000_000;
|
const TPS_BATCH: i64 = 5_000_000;
|
||||||
|
|
||||||
logger::setup();
|
logger::setup();
|
||||||
let leader = TestNode::new();
|
let leader = TestNode::new_localhost();
|
||||||
|
|
||||||
let alice = Mint::new(10_000_000);
|
let alice = Mint::new(10_000_000);
|
||||||
let bank = Bank::new(&alice);
|
let bank = Bank::new(&alice);
|
||||||
|
@@ -20,7 +20,8 @@ impl<'a, W: Write> EntryWriter<'a, W> {
|
|||||||
|
|
||||||
fn write_entry(writer: &mut W, entry: &Entry) -> io::Result<()> {
|
fn write_entry(writer: &mut W, entry: &Entry) -> io::Result<()> {
|
||||||
let serialized = serde_json::to_string(entry).unwrap();
|
let serialized = serde_json::to_string(entry).unwrap();
|
||||||
writeln!(writer, "{}", serialized)
|
writeln!(writer, "{}", serialized)?;
|
||||||
|
writer.flush()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn write_entries<I>(writer: &mut W, entries: I) -> io::Result<()>
|
pub fn write_entries<I>(writer: &mut W, entries: I) -> io::Result<()>
|
||||||
|
@@ -195,7 +195,7 @@ impl FullNode {
|
|||||||
let bank = Arc::new(bank);
|
let bank = Arc::new(bank);
|
||||||
let mut thread_hdls = vec![];
|
let mut thread_hdls = vec![];
|
||||||
let rpu = Rpu::new(
|
let rpu = Rpu::new(
|
||||||
&bank.clone(),
|
&bank,
|
||||||
node.sockets.requests,
|
node.sockets.requests,
|
||||||
node.sockets.respond,
|
node.sockets.respond,
|
||||||
exit.clone(),
|
exit.clone(),
|
||||||
@@ -203,20 +203,20 @@ impl FullNode {
|
|||||||
thread_hdls.extend(rpu.thread_hdls());
|
thread_hdls.extend(rpu.thread_hdls());
|
||||||
|
|
||||||
let blob_recycler = BlobRecycler::default();
|
let blob_recycler = BlobRecycler::default();
|
||||||
let crdt = Arc::new(RwLock::new(Crdt::new(node.data)));
|
let crdt = Arc::new(RwLock::new(Crdt::new(node.data).expect("Crdt::new")));
|
||||||
let (tpu, blob_receiver) = Tpu::new(
|
let (tpu, blob_receiver) = Tpu::new(
|
||||||
&bank.clone(),
|
&bank,
|
||||||
&crdt.clone(),
|
&crdt,
|
||||||
tick_duration,
|
tick_duration,
|
||||||
node.sockets.transaction,
|
node.sockets.transaction,
|
||||||
&blob_recycler.clone(),
|
&blob_recycler,
|
||||||
exit.clone(),
|
exit.clone(),
|
||||||
writer,
|
writer,
|
||||||
);
|
);
|
||||||
thread_hdls.extend(tpu.thread_hdls());
|
thread_hdls.extend(tpu.thread_hdls());
|
||||||
let window = FullNode::new_window(ledger_tail, entry_height, &crdt, &blob_recycler);
|
let window = FullNode::new_window(ledger_tail, entry_height, &crdt, &blob_recycler);
|
||||||
let ncp = Ncp::new(
|
let ncp = Ncp::new(
|
||||||
&crdt.clone(),
|
&crdt,
|
||||||
window.clone(),
|
window.clone(),
|
||||||
node.sockets.gossip,
|
node.sockets.gossip,
|
||||||
node.sockets.gossip_send,
|
node.sockets.gossip_send,
|
||||||
@@ -278,14 +278,14 @@ impl FullNode {
|
|||||||
let bank = Arc::new(bank);
|
let bank = Arc::new(bank);
|
||||||
let mut thread_hdls = vec![];
|
let mut thread_hdls = vec![];
|
||||||
let rpu = Rpu::new(
|
let rpu = Rpu::new(
|
||||||
&bank.clone(),
|
&bank,
|
||||||
node.sockets.requests,
|
node.sockets.requests,
|
||||||
node.sockets.respond,
|
node.sockets.respond,
|
||||||
exit.clone(),
|
exit.clone(),
|
||||||
);
|
);
|
||||||
thread_hdls.extend(rpu.thread_hdls());
|
thread_hdls.extend(rpu.thread_hdls());
|
||||||
|
|
||||||
let crdt = Arc::new(RwLock::new(Crdt::new(node.data)));
|
let crdt = Arc::new(RwLock::new(Crdt::new(node.data).expect("Crdt::new")));
|
||||||
crdt.write()
|
crdt.write()
|
||||||
.expect("'crdt' write lock before insert() in pub fn replicate")
|
.expect("'crdt' write lock before insert() in pub fn replicate")
|
||||||
.insert(&entry_point);
|
.insert(&entry_point);
|
||||||
@@ -295,7 +295,7 @@ impl FullNode {
|
|||||||
let window = FullNode::new_window(ledger_tail, entry_height, &crdt, &blob_recycler);
|
let window = FullNode::new_window(ledger_tail, entry_height, &crdt, &blob_recycler);
|
||||||
|
|
||||||
let ncp = Ncp::new(
|
let ncp = Ncp::new(
|
||||||
&crdt.clone(),
|
&crdt,
|
||||||
window.clone(),
|
window.clone(),
|
||||||
node.sockets.gossip,
|
node.sockets.gossip,
|
||||||
node.sockets.gossip_send,
|
node.sockets.gossip_send,
|
||||||
@@ -304,7 +304,7 @@ impl FullNode {
|
|||||||
|
|
||||||
let tvu = Tvu::new(
|
let tvu = Tvu::new(
|
||||||
keypair,
|
keypair,
|
||||||
bank.clone(),
|
&bank,
|
||||||
entry_height,
|
entry_height,
|
||||||
crdt.clone(),
|
crdt.clone(),
|
||||||
window.clone(),
|
window.clone(),
|
||||||
@@ -318,8 +318,12 @@ impl FullNode {
|
|||||||
FullNode { exit, thread_hdls }
|
FullNode { exit, thread_hdls }
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn close(self) -> Result<()> {
|
//used for notifying many nodes in parallel to exit
|
||||||
|
pub fn exit(&self) {
|
||||||
self.exit.store(true, Ordering::Relaxed);
|
self.exit.store(true, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
pub fn close(self) -> Result<()> {
|
||||||
|
self.exit();
|
||||||
self.join()
|
self.join()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -343,18 +347,39 @@ mod tests {
|
|||||||
use crdt::TestNode;
|
use crdt::TestNode;
|
||||||
use fullnode::FullNode;
|
use fullnode::FullNode;
|
||||||
use mint::Mint;
|
use mint::Mint;
|
||||||
|
use service::Service;
|
||||||
use signature::{KeyPair, KeyPairUtil};
|
use signature::{KeyPair, KeyPairUtil};
|
||||||
use std::sync::atomic::AtomicBool;
|
use std::sync::atomic::AtomicBool;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
#[test]
|
#[test]
|
||||||
fn validator_exit() {
|
fn validator_exit() {
|
||||||
let kp = KeyPair::new();
|
let kp = KeyPair::new();
|
||||||
let tn = TestNode::new_with_pubkey(kp.pubkey());
|
let tn = TestNode::new_localhost_with_pubkey(kp.pubkey());
|
||||||
let alice = Mint::new(10_000);
|
let alice = Mint::new(10_000);
|
||||||
let bank = Bank::new(&alice);
|
let bank = Bank::new(&alice);
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
let entry = tn.data.clone();
|
let entry = tn.data.clone();
|
||||||
let v = FullNode::new_validator(kp, bank, 0, None, tn, &entry, exit);
|
let v = FullNode::new_validator(kp, bank, 0, None, tn, &entry, exit);
|
||||||
v.close().unwrap();
|
v.exit();
|
||||||
|
v.join().unwrap();
|
||||||
|
}
|
||||||
|
#[test]
|
||||||
|
fn validator_parallel_exit() {
|
||||||
|
let vals: Vec<FullNode> = (0..2)
|
||||||
|
.map(|_| {
|
||||||
|
let kp = KeyPair::new();
|
||||||
|
let tn = TestNode::new_localhost_with_pubkey(kp.pubkey());
|
||||||
|
let alice = Mint::new(10_000);
|
||||||
|
let bank = Bank::new(&alice);
|
||||||
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
|
let entry = tn.data.clone();
|
||||||
|
FullNode::new_validator(kp, bank, 0, None, tn, &entry, exit)
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
//each validator can exit in parallel to speed many sequential calls to `join`
|
||||||
|
vals.iter().for_each(|v| v.exit());
|
||||||
|
//while join is called sequentially, the above exit call notified all the
|
||||||
|
//validators to exit from all their threads
|
||||||
|
vals.into_iter().for_each(|v| v.join().unwrap());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -57,7 +57,7 @@ impl InfluxDbMetricsWriter {
|
|||||||
impl MetricsWriter for InfluxDbMetricsWriter {
|
impl MetricsWriter for InfluxDbMetricsWriter {
|
||||||
fn write(&self, points: Vec<influxdb::Point>) {
|
fn write(&self, points: Vec<influxdb::Point>) {
|
||||||
if let Some(ref client) = self.client {
|
if let Some(ref client) = self.client {
|
||||||
info!("submitting {} points", points.len());
|
debug!("submitting {} points", points.len());
|
||||||
if let Err(err) = client.write_points(
|
if let Err(err) = client.write_points(
|
||||||
influxdb::Points { point: points },
|
influxdb::Points { point: points },
|
||||||
Some(influxdb::Precision::Milliseconds),
|
Some(influxdb::Precision::Milliseconds),
|
||||||
@@ -184,6 +184,50 @@ pub fn flush() {
|
|||||||
agent.flush();
|
agent.flush();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Hook the panic handler to generate a data point on each panic
|
||||||
|
pub fn set_panic_hook(program: &'static str) {
|
||||||
|
use std::panic;
|
||||||
|
use std::sync::{Once, ONCE_INIT};
|
||||||
|
static SET_HOOK: Once = ONCE_INIT;
|
||||||
|
SET_HOOK.call_once(|| {
|
||||||
|
let default_hook = panic::take_hook();
|
||||||
|
panic::set_hook(Box::new(move |ono| {
|
||||||
|
default_hook(ono);
|
||||||
|
submit(
|
||||||
|
influxdb::Point::new("panic")
|
||||||
|
.add_tag("program", influxdb::Value::String(program.to_string()))
|
||||||
|
.add_tag(
|
||||||
|
"thread",
|
||||||
|
influxdb::Value::String(
|
||||||
|
thread::current().name().unwrap_or("?").to_string(),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
// The 'one' field exists to give Kapacitor Alerts a numerical value
|
||||||
|
// to filter on
|
||||||
|
.add_field("one", influxdb::Value::Integer(1))
|
||||||
|
.add_field(
|
||||||
|
"message",
|
||||||
|
influxdb::Value::String(
|
||||||
|
// TODO: use ono.message() when it becomes stable
|
||||||
|
ono.to_string(),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
.add_field(
|
||||||
|
"location",
|
||||||
|
influxdb::Value::String(match ono.location() {
|
||||||
|
Some(location) => location.to_string(),
|
||||||
|
None => "?".to_string(),
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
.to_owned(),
|
||||||
|
);
|
||||||
|
// Flush metrics immediately in case the process exits immediately
|
||||||
|
// upon return
|
||||||
|
flush();
|
||||||
|
}));
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
8
src/nat.rs
Normal file → Executable file
8
src/nat.rs
Normal file → Executable file
@@ -106,9 +106,13 @@ pub fn udp_public_bind(label: &str, startport: u16, endport: u16) -> UdpSocketPa
|
|||||||
Err(_) => {
|
Err(_) => {
|
||||||
let sender = udp_random_bind(startport, endport, 5).unwrap();
|
let sender = udp_random_bind(startport, endport, 5).unwrap();
|
||||||
let local_addr = sender.local_addr().unwrap();
|
let local_addr = sender.local_addr().unwrap();
|
||||||
info!("Using local address {} for {}", local_addr, label);
|
|
||||||
|
let pub_ip = get_public_ip_addr().unwrap();
|
||||||
|
let pub_addr = SocketAddr::new(pub_ip, local_addr.port());
|
||||||
|
|
||||||
|
info!("Using source address {} for {}", pub_addr, label);
|
||||||
UdpSocketPair {
|
UdpSocketPair {
|
||||||
addr: private_addr,
|
addr: pub_addr,
|
||||||
receiver: sender.try_clone().unwrap(),
|
receiver: sender.try_clone().unwrap(),
|
||||||
sender,
|
sender,
|
||||||
}
|
}
|
||||||
|
@@ -88,12 +88,12 @@ mod tests {
|
|||||||
// test that stage will exit when flag is set
|
// test that stage will exit when flag is set
|
||||||
fn test_exit() {
|
fn test_exit() {
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
let tn = TestNode::new();
|
let tn = TestNode::new_localhost();
|
||||||
let crdt = Crdt::new(tn.data.clone());
|
let crdt = Crdt::new(tn.data.clone()).expect("Crdt::new");
|
||||||
let c = Arc::new(RwLock::new(crdt));
|
let c = Arc::new(RwLock::new(crdt));
|
||||||
let w = Arc::new(RwLock::new(vec![]));
|
let w = Arc::new(RwLock::new(vec![]));
|
||||||
let d = Ncp::new(
|
let d = Ncp::new(
|
||||||
&c.clone(),
|
&c,
|
||||||
w,
|
w,
|
||||||
tn.sockets.gossip,
|
tn.sockets.gossip,
|
||||||
tn.sockets.gossip_send,
|
tn.sockets.gossip_send,
|
||||||
|
@@ -19,7 +19,6 @@ pub type SharedBlobs = VecDeque<SharedBlob>;
|
|||||||
pub type PacketRecycler = Recycler<Packets>;
|
pub type PacketRecycler = Recycler<Packets>;
|
||||||
pub type BlobRecycler = Recycler<Blob>;
|
pub type BlobRecycler = Recycler<Blob>;
|
||||||
|
|
||||||
const LOG_RATE: usize = 10;
|
|
||||||
pub const NUM_PACKETS: usize = 1024 * 8;
|
pub const NUM_PACKETS: usize = 1024 * 8;
|
||||||
pub const BLOB_SIZE: usize = 64 * 1024;
|
pub const BLOB_SIZE: usize = 64 * 1024;
|
||||||
pub const BLOB_DATA_SIZE: usize = BLOB_SIZE - BLOB_HEADER_SIZE;
|
pub const BLOB_DATA_SIZE: usize = BLOB_SIZE - BLOB_HEADER_SIZE;
|
||||||
@@ -188,7 +187,6 @@ impl<T: Default> Recycler<T> {
|
|||||||
|
|
||||||
impl Packets {
|
impl Packets {
|
||||||
fn run_read_from(&mut self, socket: &UdpSocket) -> Result<usize> {
|
fn run_read_from(&mut self, socket: &UdpSocket) -> Result<usize> {
|
||||||
static mut COUNTER: Counter = create_counter!("packets", LOG_RATE);
|
|
||||||
self.packets.resize(NUM_PACKETS, Packet::default());
|
self.packets.resize(NUM_PACKETS, Packet::default());
|
||||||
let mut i = 0;
|
let mut i = 0;
|
||||||
//DOCUMENTED SIDE-EFFECT
|
//DOCUMENTED SIDE-EFFECT
|
||||||
@@ -203,7 +201,7 @@ impl Packets {
|
|||||||
trace!("receiving on {}", socket.local_addr().unwrap());
|
trace!("receiving on {}", socket.local_addr().unwrap());
|
||||||
match socket.recv_from(&mut p.data) {
|
match socket.recv_from(&mut p.data) {
|
||||||
Err(_) if i > 0 => {
|
Err(_) if i > 0 => {
|
||||||
inc_counter!(COUNTER, i);
|
inc_new_counter!("packets-recv_count", 1);
|
||||||
debug!("got {:?} messages on {}", i, socket.local_addr().unwrap());
|
debug!("got {:?} messages on {}", i, socket.local_addr().unwrap());
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@@ -214,6 +212,7 @@ impl Packets {
|
|||||||
Ok((nrecv, from)) => {
|
Ok((nrecv, from)) => {
|
||||||
p.meta.size = nrecv;
|
p.meta.size = nrecv;
|
||||||
p.meta.set_addr(&from);
|
p.meta.set_addr(&from);
|
||||||
|
trace!("got {} bytes from {}", nrecv, from);
|
||||||
if i == 0 {
|
if i == 0 {
|
||||||
socket.set_nonblocking(true)?;
|
socket.set_nonblocking(true)?;
|
||||||
}
|
}
|
||||||
@@ -407,6 +406,7 @@ impl Blob {
|
|||||||
Ok((nrecv, from)) => {
|
Ok((nrecv, from)) => {
|
||||||
p.meta.size = nrecv;
|
p.meta.size = nrecv;
|
||||||
p.meta.set_addr(&from);
|
p.meta.set_addr(&from);
|
||||||
|
trace!("got {} bytes from {}", nrecv, from);
|
||||||
if i == 0 {
|
if i == 0 {
|
||||||
socket.set_nonblocking(true)?;
|
socket.set_nonblocking(true)?;
|
||||||
}
|
}
|
||||||
@@ -423,7 +423,7 @@ impl Blob {
|
|||||||
let p = r.read().expect("'r' read lock in pub fn send_to");
|
let p = r.read().expect("'r' read lock in pub fn send_to");
|
||||||
let a = p.meta.addr();
|
let a = p.meta.addr();
|
||||||
if let Err(e) = socket.send_to(&p.data[..p.meta.size], &a) {
|
if let Err(e) = socket.send_to(&p.data[..p.meta.size], &a) {
|
||||||
info!(
|
warn!(
|
||||||
"error sending {} byte packet to {:?}: {:?}",
|
"error sending {} byte packet to {:?}: {:?}",
|
||||||
p.meta.size, a, e
|
p.meta.size, a, e
|
||||||
);
|
);
|
||||||
|
@@ -27,7 +27,6 @@ pub struct ReplicateStage {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const VOTE_TIMEOUT_MS: u64 = 1000;
|
const VOTE_TIMEOUT_MS: u64 = 1000;
|
||||||
const LOG_RATE: usize = 10;
|
|
||||||
|
|
||||||
impl ReplicateStage {
|
impl ReplicateStage {
|
||||||
/// Process entry blobs, already in order
|
/// Process entry blobs, already in order
|
||||||
@@ -48,28 +47,29 @@ impl ReplicateStage {
|
|||||||
}
|
}
|
||||||
let blobs_len = blobs.len();
|
let blobs_len = blobs.len();
|
||||||
let entries = ledger::reconstruct_entries_from_blobs(blobs.clone())?;
|
let entries = ledger::reconstruct_entries_from_blobs(blobs.clone())?;
|
||||||
let votes = entries_to_votes(&entries);
|
{
|
||||||
|
let votes = entries_to_votes(&entries);
|
||||||
static mut COUNTER_REPLICATE: Counter = create_counter!("replicate-transactions", LOG_RATE);
|
let mut wcrdt = crdt.write().unwrap();
|
||||||
inc_counter!(
|
wcrdt.insert_votes(&votes);
|
||||||
COUNTER_REPLICATE,
|
};
|
||||||
|
inc_new_counter!(
|
||||||
|
"replicate-transactions",
|
||||||
entries.iter().map(|x| x.transactions.len()).sum()
|
entries.iter().map(|x| x.transactions.len()).sum()
|
||||||
);
|
);
|
||||||
let res = bank.process_entries(entries);
|
let res = bank.process_entries(entries);
|
||||||
if res.is_err() {
|
if res.is_err() {
|
||||||
error!("process_entries {} {:?}", blobs_len, res);
|
error!("process_entries {} {:?}", blobs_len, res);
|
||||||
}
|
}
|
||||||
|
let _ = res?;
|
||||||
let now = timing::timestamp();
|
let now = timing::timestamp();
|
||||||
if now - *last_vote > VOTE_TIMEOUT_MS {
|
if now - *last_vote > VOTE_TIMEOUT_MS {
|
||||||
let height = res?;
|
|
||||||
let last_id = bank.last_id();
|
let last_id = bank.last_id();
|
||||||
let shared_blob = blob_recycler.allocate();
|
let shared_blob = blob_recycler.allocate();
|
||||||
let (vote, addr) = {
|
let (vote, addr) = {
|
||||||
let mut wcrdt = crdt.write().unwrap();
|
let mut wcrdt = crdt.write().unwrap();
|
||||||
wcrdt.insert_votes(&votes);
|
|
||||||
//TODO: doesn't seem like there is a synchronous call to get height and id
|
//TODO: doesn't seem like there is a synchronous call to get height and id
|
||||||
info!("replicate_stage {} {:?}", height, &last_id[..8]);
|
info!("replicate_stage {:?}", &last_id[..8]);
|
||||||
wcrdt.new_vote(height, last_id)
|
wcrdt.new_vote(last_id)
|
||||||
}?;
|
}?;
|
||||||
{
|
{
|
||||||
let mut blob = shared_blob.write().unwrap();
|
let mut blob = shared_blob.write().unwrap();
|
||||||
@@ -80,7 +80,9 @@ impl ReplicateStage {
|
|||||||
blob.meta.set_addr(&addr);
|
blob.meta.set_addr(&addr);
|
||||||
blob.meta.size = len;
|
blob.meta.size = len;
|
||||||
}
|
}
|
||||||
|
inc_new_counter!("replicate-vote_sent", 1);
|
||||||
*last_vote = now;
|
*last_vote = now;
|
||||||
|
|
||||||
vote_blob_sender.send(VecDeque::from(vec![shared_blob]))?;
|
vote_blob_sender.send(VecDeque::from(vec![shared_blob]))?;
|
||||||
}
|
}
|
||||||
while let Some(blob) = blobs.pop_front() {
|
while let Some(blob) = blobs.pop_front() {
|
||||||
|
@@ -22,6 +22,8 @@ struct Elems {
|
|||||||
#[cfg(feature = "cuda")]
|
#[cfg(feature = "cuda")]
|
||||||
#[link(name = "cuda_verify_ed25519")]
|
#[link(name = "cuda_verify_ed25519")]
|
||||||
extern "C" {
|
extern "C" {
|
||||||
|
fn ed25519_init() -> bool;
|
||||||
|
fn ed25519_set_verbose(val: bool);
|
||||||
fn ed25519_verify_many(
|
fn ed25519_verify_many(
|
||||||
vecs: *const Elems,
|
vecs: *const Elems,
|
||||||
num: u32, //number of vecs
|
num: u32, //number of vecs
|
||||||
@@ -34,6 +36,11 @@ extern "C" {
|
|||||||
) -> u32;
|
) -> u32;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(not(feature = "cuda"))]
|
||||||
|
pub fn init() {
|
||||||
|
// stub
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(not(feature = "cuda"))]
|
#[cfg(not(feature = "cuda"))]
|
||||||
fn verify_packet(packet: &Packet) -> u8 {
|
fn verify_packet(packet: &Packet) -> u8 {
|
||||||
use ring::signature;
|
use ring::signature;
|
||||||
@@ -70,7 +77,6 @@ fn batch_size(batches: &[SharedPackets]) -> usize {
|
|||||||
#[cfg(not(feature = "cuda"))]
|
#[cfg(not(feature = "cuda"))]
|
||||||
pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> {
|
pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> {
|
||||||
use rayon::prelude::*;
|
use rayon::prelude::*;
|
||||||
static mut COUNTER: Counter = create_counter!("ed25519_verify", 1);
|
|
||||||
let count = batch_size(batches);
|
let count = batch_size(batches);
|
||||||
info!("CPU ECDSA for {}", batch_size(batches));
|
info!("CPU ECDSA for {}", batch_size(batches));
|
||||||
let rv = batches
|
let rv = batches
|
||||||
@@ -84,14 +90,24 @@ pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> {
|
|||||||
.collect()
|
.collect()
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
inc_counter!(COUNTER, count);
|
inc_new_counter!("ed25519_verify", count);
|
||||||
rv
|
rv
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "cuda")]
|
||||||
|
pub fn init() {
|
||||||
|
unsafe {
|
||||||
|
ed25519_set_verbose(true);
|
||||||
|
if !ed25519_init() {
|
||||||
|
panic!("ed25519_init() failed");
|
||||||
|
}
|
||||||
|
ed25519_set_verbose(false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(feature = "cuda")]
|
#[cfg(feature = "cuda")]
|
||||||
pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> {
|
pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> {
|
||||||
use packet::PACKET_DATA_SIZE;
|
use packet::PACKET_DATA_SIZE;
|
||||||
static mut COUNTER: Counter = create_counter!("ed25519_verify_cuda", 1);
|
|
||||||
let count = batch_size(batches);
|
let count = batch_size(batches);
|
||||||
info!("CUDA ECDSA for {}", batch_size(batches));
|
info!("CUDA ECDSA for {}", batch_size(batches));
|
||||||
let mut out = Vec::new();
|
let mut out = Vec::new();
|
||||||
@@ -151,7 +167,7 @@ pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> {
|
|||||||
num += 1;
|
num += 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
inc_counter!(COUNTER, count);
|
inc_new_counter!("ed25519_verify", count);
|
||||||
rvs
|
rvs
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -25,6 +25,7 @@ pub struct SigVerifyStage {
|
|||||||
|
|
||||||
impl SigVerifyStage {
|
impl SigVerifyStage {
|
||||||
pub fn new(packet_receiver: Receiver<SharedPackets>) -> (Self, Receiver<VerifiedPackets>) {
|
pub fn new(packet_receiver: Receiver<SharedPackets>) -> (Self, Receiver<VerifiedPackets>) {
|
||||||
|
sigverify::init();
|
||||||
let (verified_sender, verified_receiver) = channel();
|
let (verified_sender, verified_receiver) = channel();
|
||||||
let thread_hdls = Self::verifier_services(packet_receiver, verified_sender);
|
let thread_hdls = Self::verifier_services(packet_receiver, verified_sender);
|
||||||
(SigVerifyStage { thread_hdls }, verified_receiver)
|
(SigVerifyStage { thread_hdls }, verified_receiver)
|
||||||
@@ -77,7 +78,7 @@ impl SigVerifyStage {
|
|||||||
verified_sender: Arc<Mutex<Sender<VerifiedPackets>>>,
|
verified_sender: Arc<Mutex<Sender<VerifiedPackets>>>,
|
||||||
) -> JoinHandle<()> {
|
) -> JoinHandle<()> {
|
||||||
spawn(move || loop {
|
spawn(move || loop {
|
||||||
if let Err(e) = Self::verifier(&packet_receiver.clone(), &verified_sender.clone()) {
|
if let Err(e) = Self::verifier(&packet_receiver, &verified_sender) {
|
||||||
match e {
|
match e {
|
||||||
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
|
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
|
||||||
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
||||||
|
@@ -18,7 +18,6 @@ use std::sync::{Arc, RwLock};
|
|||||||
use std::thread::{Builder, JoinHandle};
|
use std::thread::{Builder, JoinHandle};
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
const LOG_RATE: usize = 10;
|
|
||||||
pub const WINDOW_SIZE: u64 = 2 * 1024;
|
pub const WINDOW_SIZE: u64 = 2 * 1024;
|
||||||
pub type PacketReceiver = Receiver<SharedPackets>;
|
pub type PacketReceiver = Receiver<SharedPackets>;
|
||||||
pub type PacketSender = Sender<SharedPackets>;
|
pub type PacketSender = Sender<SharedPackets>;
|
||||||
@@ -117,7 +116,7 @@ pub fn responder(
|
|||||||
match e {
|
match e {
|
||||||
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
|
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
|
||||||
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
||||||
_ => error!("{} responder error: {:?}", name, e),
|
_ => warn!("{} responder error: {:?}", name, e),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@@ -127,7 +126,7 @@ pub fn responder(
|
|||||||
//TODO, we would need to stick block authentication before we create the
|
//TODO, we would need to stick block authentication before we create the
|
||||||
//window.
|
//window.
|
||||||
fn recv_blobs(recycler: &BlobRecycler, sock: &UdpSocket, s: &BlobSender) -> Result<()> {
|
fn recv_blobs(recycler: &BlobRecycler, sock: &UdpSocket, s: &BlobSender) -> Result<()> {
|
||||||
trace!("receiving on {}", sock.local_addr().unwrap());
|
trace!("recv_blobs: receiving on {}", sock.local_addr().unwrap());
|
||||||
let dq = Blob::recv_from(recycler, sock)?;
|
let dq = Blob::recv_from(recycler, sock)?;
|
||||||
if !dq.is_empty() {
|
if !dq.is_empty() {
|
||||||
s.send(dq)?;
|
s.send(dq)?;
|
||||||
@@ -224,9 +223,7 @@ fn repair_window(
|
|||||||
let reqs = find_next_missing(locked_window, crdt, consumed, received)?;
|
let reqs = find_next_missing(locked_window, crdt, consumed, received)?;
|
||||||
trace!("{:x}: repair_window missing: {}", debug_id, reqs.len());
|
trace!("{:x}: repair_window missing: {}", debug_id, reqs.len());
|
||||||
if !reqs.is_empty() {
|
if !reqs.is_empty() {
|
||||||
static mut COUNTER_REPAIR: Counter =
|
inc_new_counter!("streamer-repair_window-repair", reqs.len());
|
||||||
create_counter!("streamer-repair_window-repair", LOG_RATE);
|
|
||||||
inc_counter!(COUNTER_REPAIR, reqs.len());
|
|
||||||
debug!(
|
debug!(
|
||||||
"{:x}: repair_window counter times: {} consumed: {} received: {} missing: {}",
|
"{:x}: repair_window counter times: {} consumed: {} received: {} missing: {}",
|
||||||
debug_id,
|
debug_id,
|
||||||
@@ -301,9 +298,7 @@ fn retransmit_all_leader_blocks(
|
|||||||
*received,
|
*received,
|
||||||
retransmit_queue.len(),
|
retransmit_queue.len(),
|
||||||
);
|
);
|
||||||
static mut COUNTER_RETRANSMIT: Counter =
|
inc_new_counter!("streamer-recv_window-retransmit", retransmit_queue.len());
|
||||||
create_counter!("streamer-recv_window-retransmit", LOG_RATE);
|
|
||||||
inc_counter!(COUNTER_RETRANSMIT, retransmit_queue.len());
|
|
||||||
retransmit.send(retransmit_queue)?;
|
retransmit.send(retransmit_queue)?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -413,8 +408,7 @@ fn recv_window(
|
|||||||
while let Ok(mut nq) = r.try_recv() {
|
while let Ok(mut nq) = r.try_recv() {
|
||||||
dq.append(&mut nq)
|
dq.append(&mut nq)
|
||||||
}
|
}
|
||||||
static mut COUNTER_RECV: Counter = create_counter!("streamer-recv_window-recv", LOG_RATE);
|
inc_new_counter!("streamer-recv_window-recv", dq.len());
|
||||||
inc_counter!(COUNTER_RECV, dq.len());
|
|
||||||
debug!(
|
debug!(
|
||||||
"{:x}: RECV_WINDOW {} {}: got packets {}",
|
"{:x}: RECV_WINDOW {} {}: got packets {}",
|
||||||
debug_id,
|
debug_id,
|
||||||
@@ -480,9 +474,7 @@ fn recv_window(
|
|||||||
consume_queue.len(),
|
consume_queue.len(),
|
||||||
);
|
);
|
||||||
trace!("sending consume_queue.len: {}", consume_queue.len());
|
trace!("sending consume_queue.len: {}", consume_queue.len());
|
||||||
static mut COUNTER_CONSUME: Counter =
|
inc_new_counter!("streamer-recv_window-consume", consume_queue.len());
|
||||||
create_counter!("streamer-recv_window-consume", LOG_RATE);
|
|
||||||
inc_counter!(COUNTER_CONSUME, consume_queue.len());
|
|
||||||
s.send(consume_queue)?;
|
s.send(consume_queue)?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -591,7 +583,10 @@ pub fn window(
|
|||||||
match e {
|
match e {
|
||||||
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
|
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
|
||||||
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
||||||
_ => error!("window error: {:?}", e),
|
_ => {
|
||||||
|
inc_new_counter!("streamer-window-error", 1, 1);
|
||||||
|
error!("window error: {:?}", e);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
let _ = repair_window(
|
let _ = repair_window(
|
||||||
@@ -647,9 +642,7 @@ fn broadcast(
|
|||||||
// Index the blobs
|
// Index the blobs
|
||||||
Crdt::index_blobs(&me, &blobs, receive_index)?;
|
Crdt::index_blobs(&me, &blobs, receive_index)?;
|
||||||
// keep the cache of blobs that are broadcast
|
// keep the cache of blobs that are broadcast
|
||||||
static mut COUNTER_BROADCAST: Counter =
|
inc_new_counter!("streamer-broadcast-sent", blobs.len());
|
||||||
create_counter!("streamer-broadcast-sent", LOG_RATE);
|
|
||||||
inc_counter!(COUNTER_BROADCAST, blobs.len());
|
|
||||||
{
|
{
|
||||||
let mut win = window.write().unwrap();
|
let mut win = window.write().unwrap();
|
||||||
assert!(blobs.len() <= win.len());
|
assert!(blobs.len() <= win.len());
|
||||||
@@ -738,8 +731,11 @@ pub fn broadcaster(
|
|||||||
match e {
|
match e {
|
||||||
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
|
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
|
||||||
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
||||||
Error::CrdtError(CrdtError::TooSmall) => (), // TODO: Why are the unit-tests throwing hundreds of these?
|
Error::CrdtError(CrdtError::NoPeers) => (), // TODO: Why are the unit-tests throwing hundreds of these?
|
||||||
_ => error!("broadcaster error: {:?}", e),
|
_ => {
|
||||||
|
inc_new_counter!("streamer-broadcaster-error", 1, 1);
|
||||||
|
error!("broadcaster error: {:?}", e);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -792,7 +788,10 @@ pub fn retransmitter(
|
|||||||
match e {
|
match e {
|
||||||
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
|
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
|
||||||
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
||||||
_ => error!("retransmitter error: {:?}", e),
|
_ => {
|
||||||
|
inc_new_counter!("streamer-retransmit-error", 1, 1);
|
||||||
|
error!("retransmitter error: {:?}", e);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -899,9 +898,9 @@ mod test {
|
|||||||
#[test]
|
#[test]
|
||||||
pub fn window_send_test() {
|
pub fn window_send_test() {
|
||||||
logger::setup();
|
logger::setup();
|
||||||
let tn = TestNode::new();
|
let tn = TestNode::new_localhost();
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
let mut crdt_me = Crdt::new(tn.data.clone());
|
let mut crdt_me = Crdt::new(tn.data.clone()).expect("Crdt::new");
|
||||||
let me_id = crdt_me.my_data().id;
|
let me_id = crdt_me.my_data().id;
|
||||||
crdt_me.set_leader(me_id);
|
crdt_me.set_leader(me_id);
|
||||||
let subs = Arc::new(RwLock::new(crdt_me));
|
let subs = Arc::new(RwLock::new(crdt_me));
|
||||||
|
@@ -274,7 +274,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_thin_client() {
|
fn test_thin_client() {
|
||||||
logger::setup();
|
logger::setup();
|
||||||
let leader = TestNode::new();
|
let leader = TestNode::new_localhost();
|
||||||
let leader_data = leader.data.clone();
|
let leader_data = leader.data.clone();
|
||||||
|
|
||||||
let alice = Mint::new(10_000);
|
let alice = Mint::new(10_000);
|
||||||
@@ -317,7 +317,7 @@ mod tests {
|
|||||||
#[ignore]
|
#[ignore]
|
||||||
fn test_bad_sig() {
|
fn test_bad_sig() {
|
||||||
logger::setup();
|
logger::setup();
|
||||||
let leader = TestNode::new();
|
let leader = TestNode::new_localhost();
|
||||||
let alice = Mint::new(10_000);
|
let alice = Mint::new(10_000);
|
||||||
let bank = Bank::new(&alice);
|
let bank = Bank::new(&alice);
|
||||||
let bob_pubkey = KeyPair::new().pubkey();
|
let bob_pubkey = KeyPair::new().pubkey();
|
||||||
@@ -371,7 +371,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_client_check_signature() {
|
fn test_client_check_signature() {
|
||||||
logger::setup();
|
logger::setup();
|
||||||
let leader = TestNode::new();
|
let leader = TestNode::new_localhost();
|
||||||
let alice = Mint::new(10_000);
|
let alice = Mint::new(10_000);
|
||||||
let bank = Bank::new(&alice);
|
let bank = Bank::new(&alice);
|
||||||
let bob_pubkey = KeyPair::new().pubkey();
|
let bob_pubkey = KeyPair::new().pubkey();
|
||||||
|
@@ -63,7 +63,7 @@ impl Tpu {
|
|||||||
let packet_recycler = PacketRecycler::default();
|
let packet_recycler = PacketRecycler::default();
|
||||||
|
|
||||||
let (fetch_stage, packet_receiver) =
|
let (fetch_stage, packet_receiver) =
|
||||||
FetchStage::new(transactions_socket, exit, &packet_recycler.clone());
|
FetchStage::new(transactions_socket, exit, &packet_recycler);
|
||||||
|
|
||||||
let (sigverify_stage, verified_receiver) = SigVerifyStage::new(packet_receiver);
|
let (sigverify_stage, verified_receiver) = SigVerifyStage::new(packet_receiver);
|
||||||
|
|
||||||
|
31
src/tvu.rs
31
src/tvu.rs
@@ -70,7 +70,7 @@ impl Tvu {
|
|||||||
/// * `exit` - The exit signal.
|
/// * `exit` - The exit signal.
|
||||||
pub fn new(
|
pub fn new(
|
||||||
keypair: KeyPair,
|
keypair: KeyPair,
|
||||||
bank: Arc<Bank>,
|
bank: &Arc<Bank>,
|
||||||
entry_height: u64,
|
entry_height: u64,
|
||||||
crdt: Arc<RwLock<Crdt>>,
|
crdt: Arc<RwLock<Crdt>>,
|
||||||
window: Window,
|
window: Window,
|
||||||
@@ -83,22 +83,27 @@ impl Tvu {
|
|||||||
let (fetch_stage, blob_fetch_receiver) = BlobFetchStage::new_multi_socket(
|
let (fetch_stage, blob_fetch_receiver) = BlobFetchStage::new_multi_socket(
|
||||||
vec![replicate_socket, repair_socket],
|
vec![replicate_socket, repair_socket],
|
||||||
exit,
|
exit,
|
||||||
&blob_recycler.clone(),
|
&blob_recycler,
|
||||||
);
|
);
|
||||||
//TODO
|
//TODO
|
||||||
//the packets coming out of blob_receiver need to be sent to the GPU and verified
|
//the packets coming out of blob_receiver need to be sent to the GPU and verified
|
||||||
//then sent to the window, which does the erasure coding reconstruction
|
//then sent to the window, which does the erasure coding reconstruction
|
||||||
let (window_stage, blob_window_receiver) = WindowStage::new(
|
let (window_stage, blob_window_receiver) = WindowStage::new(
|
||||||
&crdt.clone(),
|
&crdt,
|
||||||
window,
|
window,
|
||||||
entry_height,
|
entry_height,
|
||||||
retransmit_socket,
|
retransmit_socket,
|
||||||
&blob_recycler.clone(),
|
&blob_recycler,
|
||||||
blob_fetch_receiver,
|
blob_fetch_receiver,
|
||||||
);
|
);
|
||||||
|
|
||||||
let replicate_stage =
|
let replicate_stage = ReplicateStage::new(
|
||||||
ReplicateStage::new(keypair, bank, crdt, blob_recycler, blob_window_receiver);
|
keypair,
|
||||||
|
bank.clone(),
|
||||||
|
crdt,
|
||||||
|
blob_recycler,
|
||||||
|
blob_window_receiver,
|
||||||
|
);
|
||||||
|
|
||||||
Tvu {
|
Tvu {
|
||||||
replicate_stage,
|
replicate_stage,
|
||||||
@@ -168,21 +173,21 @@ pub mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_replicate() {
|
fn test_replicate() {
|
||||||
logger::setup();
|
logger::setup();
|
||||||
let leader = TestNode::new();
|
let leader = TestNode::new_localhost();
|
||||||
let target1_kp = KeyPair::new();
|
let target1_kp = KeyPair::new();
|
||||||
let target1 = TestNode::new_with_pubkey(target1_kp.pubkey());
|
let target1 = TestNode::new_localhost_with_pubkey(target1_kp.pubkey());
|
||||||
let target2 = TestNode::new();
|
let target2 = TestNode::new_localhost();
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
|
|
||||||
//start crdt_leader
|
//start crdt_leader
|
||||||
let mut crdt_l = Crdt::new(leader.data.clone());
|
let mut crdt_l = Crdt::new(leader.data.clone()).expect("Crdt::new");
|
||||||
crdt_l.set_leader(leader.data.id);
|
crdt_l.set_leader(leader.data.id);
|
||||||
|
|
||||||
let cref_l = Arc::new(RwLock::new(crdt_l));
|
let cref_l = Arc::new(RwLock::new(crdt_l));
|
||||||
let dr_l = new_ncp(cref_l, leader.sockets.gossip, exit.clone()).unwrap();
|
let dr_l = new_ncp(cref_l, leader.sockets.gossip, exit.clone()).unwrap();
|
||||||
|
|
||||||
//start crdt2
|
//start crdt2
|
||||||
let mut crdt2 = Crdt::new(target2.data.clone());
|
let mut crdt2 = Crdt::new(target2.data.clone()).expect("Crdt::new");
|
||||||
crdt2.insert(&leader.data);
|
crdt2.insert(&leader.data);
|
||||||
crdt2.set_leader(leader.data.id);
|
crdt2.set_leader(leader.data.id);
|
||||||
let leader_id = leader.data.id;
|
let leader_id = leader.data.id;
|
||||||
@@ -217,7 +222,7 @@ pub mod tests {
|
|||||||
let bank = Arc::new(Bank::new(&mint));
|
let bank = Arc::new(Bank::new(&mint));
|
||||||
|
|
||||||
//start crdt1
|
//start crdt1
|
||||||
let mut crdt1 = Crdt::new(target1.data.clone());
|
let mut crdt1 = Crdt::new(target1.data.clone()).expect("Crdt::new");
|
||||||
crdt1.insert(&leader.data);
|
crdt1.insert(&leader.data);
|
||||||
crdt1.set_leader(leader.data.id);
|
crdt1.set_leader(leader.data.id);
|
||||||
let cref1 = Arc::new(RwLock::new(crdt1));
|
let cref1 = Arc::new(RwLock::new(crdt1));
|
||||||
@@ -225,7 +230,7 @@ pub mod tests {
|
|||||||
|
|
||||||
let tvu = Tvu::new(
|
let tvu = Tvu::new(
|
||||||
target1_kp,
|
target1_kp,
|
||||||
bank.clone(),
|
&bank,
|
||||||
0,
|
0,
|
||||||
cref1,
|
cref1,
|
||||||
dr_1.1,
|
dr_1.1,
|
||||||
|
@@ -3,6 +3,7 @@
|
|||||||
//! stdout, and then sends the Entry to its output channel.
|
//! stdout, and then sends the Entry to its output channel.
|
||||||
|
|
||||||
use bank::Bank;
|
use bank::Bank;
|
||||||
|
use counter::Counter;
|
||||||
use crdt::Crdt;
|
use crdt::Crdt;
|
||||||
use entry::Entry;
|
use entry::Entry;
|
||||||
use entry_writer::EntryWriter;
|
use entry_writer::EntryWriter;
|
||||||
@@ -12,6 +13,7 @@ use result::{Error, Result};
|
|||||||
use service::Service;
|
use service::Service;
|
||||||
use std::collections::VecDeque;
|
use std::collections::VecDeque;
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
|
use std::sync::atomic::AtomicUsize;
|
||||||
use std::sync::mpsc::{channel, Receiver, RecvTimeoutError};
|
use std::sync::mpsc::{channel, Receiver, RecvTimeoutError};
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
use std::thread::{self, Builder, JoinHandle};
|
use std::thread::{self, Builder, JoinHandle};
|
||||||
@@ -41,6 +43,8 @@ impl WriteStage {
|
|||||||
let mut blobs = VecDeque::new();
|
let mut blobs = VecDeque::new();
|
||||||
entries.to_blobs(blob_recycler, &mut blobs);
|
entries.to_blobs(blob_recycler, &mut blobs);
|
||||||
if !blobs.is_empty() {
|
if !blobs.is_empty() {
|
||||||
|
inc_new_counter!("write_stage-broadcast_vote-count", votes.len());
|
||||||
|
inc_new_counter!("write_stage-broadcast_blobs-count", blobs.len());
|
||||||
trace!("broadcasting {}", blobs.len());
|
trace!("broadcasting {}", blobs.len());
|
||||||
blob_sender.send(blobs)?;
|
blob_sender.send(blobs)?;
|
||||||
}
|
}
|
||||||
@@ -71,7 +75,10 @@ impl WriteStage {
|
|||||||
match e {
|
match e {
|
||||||
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
|
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
|
||||||
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
||||||
_ => error!("{:?}", e),
|
_ => {
|
||||||
|
inc_new_counter!("write_stage-error", 1);
|
||||||
|
error!("{:?}", e);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
@@ -16,8 +16,8 @@ use std::thread::sleep;
|
|||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
fn test_node(exit: Arc<AtomicBool>) -> (Arc<RwLock<Crdt>>, Ncp, UdpSocket) {
|
fn test_node(exit: Arc<AtomicBool>) -> (Arc<RwLock<Crdt>>, Ncp, UdpSocket) {
|
||||||
let tn = TestNode::new();
|
let tn = TestNode::new_localhost();
|
||||||
let crdt = Crdt::new(tn.data.clone());
|
let crdt = Crdt::new(tn.data.clone()).expect("Crdt::new");
|
||||||
let c = Arc::new(RwLock::new(crdt));
|
let c = Arc::new(RwLock::new(crdt));
|
||||||
let w = Arc::new(RwLock::new(vec![]));
|
let w = Arc::new(RwLock::new(vec![]));
|
||||||
let d = Ncp::new(
|
let d = Ncp::new(
|
||||||
|
@@ -11,6 +11,7 @@ use solana::fullnode::{FullNode, LedgerFile};
|
|||||||
use solana::logger;
|
use solana::logger;
|
||||||
use solana::mint::Mint;
|
use solana::mint::Mint;
|
||||||
use solana::ncp::Ncp;
|
use solana::ncp::Ncp;
|
||||||
|
use solana::service::Service;
|
||||||
use solana::signature::{KeyPair, KeyPairUtil, PublicKey};
|
use solana::signature::{KeyPair, KeyPairUtil, PublicKey};
|
||||||
use solana::streamer::default_window;
|
use solana::streamer::default_window;
|
||||||
use solana::thin_client::ThinClient;
|
use solana::thin_client::ThinClient;
|
||||||
@@ -24,18 +25,18 @@ use std::time::Duration;
|
|||||||
fn converge(leader: &NodeInfo, num_nodes: usize) -> Vec<NodeInfo> {
|
fn converge(leader: &NodeInfo, num_nodes: usize) -> Vec<NodeInfo> {
|
||||||
//lets spy on the network
|
//lets spy on the network
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
let mut spy = TestNode::new();
|
let mut spy = TestNode::new_localhost();
|
||||||
let daddr = "0.0.0.0:0".parse().unwrap();
|
let daddr = "0.0.0.0:0".parse().unwrap();
|
||||||
let me = spy.data.id.clone();
|
let me = spy.data.id.clone();
|
||||||
spy.data.contact_info.tvu = daddr;
|
spy.data.contact_info.tvu = daddr;
|
||||||
spy.data.contact_info.rpu = daddr;
|
spy.data.contact_info.rpu = daddr;
|
||||||
let mut spy_crdt = Crdt::new(spy.data);
|
let mut spy_crdt = Crdt::new(spy.data).expect("Crdt::new");
|
||||||
spy_crdt.insert(&leader);
|
spy_crdt.insert(&leader);
|
||||||
spy_crdt.set_leader(leader.id);
|
spy_crdt.set_leader(leader.id);
|
||||||
let spy_ref = Arc::new(RwLock::new(spy_crdt));
|
let spy_ref = Arc::new(RwLock::new(spy_crdt));
|
||||||
let spy_window = default_window();
|
let spy_window = default_window();
|
||||||
let ncp = Ncp::new(
|
let ncp = Ncp::new(
|
||||||
&spy_ref.clone(),
|
&spy_ref,
|
||||||
spy_window,
|
spy_window,
|
||||||
spy.sockets.gossip,
|
spy.sockets.gossip,
|
||||||
spy.sockets.gossip_send,
|
spy.sockets.gossip_send,
|
||||||
@@ -86,7 +87,7 @@ fn test_multi_node_validator_catchup_from_zero() {
|
|||||||
logger::setup();
|
logger::setup();
|
||||||
const N: usize = 5;
|
const N: usize = 5;
|
||||||
trace!("test_multi_node_validator_catchup_from_zero");
|
trace!("test_multi_node_validator_catchup_from_zero");
|
||||||
let leader = TestNode::new();
|
let leader = TestNode::new_localhost();
|
||||||
let leader_data = leader.data.clone();
|
let leader_data = leader.data.clone();
|
||||||
let bob_pubkey = KeyPair::new().pubkey();
|
let bob_pubkey = KeyPair::new().pubkey();
|
||||||
|
|
||||||
@@ -101,7 +102,7 @@ fn test_multi_node_validator_catchup_from_zero() {
|
|||||||
let mut nodes = vec![server];
|
let mut nodes = vec![server];
|
||||||
for _ in 0..N {
|
for _ in 0..N {
|
||||||
let keypair = KeyPair::new();
|
let keypair = KeyPair::new();
|
||||||
let validator = TestNode::new_with_pubkey(keypair.pubkey());
|
let validator = TestNode::new_localhost_with_pubkey(keypair.pubkey());
|
||||||
let mut val = FullNode::new(
|
let mut val = FullNode::new(
|
||||||
validator,
|
validator,
|
||||||
false,
|
false,
|
||||||
@@ -135,7 +136,7 @@ fn test_multi_node_validator_catchup_from_zero() {
|
|||||||
success = 0;
|
success = 0;
|
||||||
// start up another validator, converge and then check everyone's balances
|
// start up another validator, converge and then check everyone's balances
|
||||||
let keypair = KeyPair::new();
|
let keypair = KeyPair::new();
|
||||||
let validator = TestNode::new_with_pubkey(keypair.pubkey());
|
let validator = TestNode::new_localhost_with_pubkey(keypair.pubkey());
|
||||||
let val = FullNode::new(
|
let val = FullNode::new(
|
||||||
validator,
|
validator,
|
||||||
false,
|
false,
|
||||||
@@ -186,7 +187,7 @@ fn test_multi_node_basic() {
|
|||||||
logger::setup();
|
logger::setup();
|
||||||
const N: usize = 5;
|
const N: usize = 5;
|
||||||
trace!("test_multi_node_basic");
|
trace!("test_multi_node_basic");
|
||||||
let leader = TestNode::new();
|
let leader = TestNode::new_localhost();
|
||||||
let leader_data = leader.data.clone();
|
let leader_data = leader.data.clone();
|
||||||
let bob_pubkey = KeyPair::new().pubkey();
|
let bob_pubkey = KeyPair::new().pubkey();
|
||||||
let (alice, ledger_path) = genesis(10_000);
|
let (alice, ledger_path) = genesis(10_000);
|
||||||
@@ -200,7 +201,7 @@ fn test_multi_node_basic() {
|
|||||||
let mut nodes = vec![server];
|
let mut nodes = vec![server];
|
||||||
for _ in 0..N {
|
for _ in 0..N {
|
||||||
let keypair = KeyPair::new();
|
let keypair = KeyPair::new();
|
||||||
let validator = TestNode::new_with_pubkey(keypair.pubkey());
|
let validator = TestNode::new_localhost_with_pubkey(keypair.pubkey());
|
||||||
let val = FullNode::new(
|
let val = FullNode::new(
|
||||||
validator,
|
validator,
|
||||||
false,
|
false,
|
||||||
@@ -239,7 +240,7 @@ fn test_multi_node_basic() {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_boot_validator_from_file() {
|
fn test_boot_validator_from_file() {
|
||||||
logger::setup();
|
logger::setup();
|
||||||
let leader = TestNode::new();
|
let leader = TestNode::new_localhost();
|
||||||
let bob_pubkey = KeyPair::new().pubkey();
|
let bob_pubkey = KeyPair::new().pubkey();
|
||||||
let (alice, ledger_path) = genesis(100_000);
|
let (alice, ledger_path) = genesis(100_000);
|
||||||
let leader_data = leader.data.clone();
|
let leader_data = leader.data.clone();
|
||||||
@@ -258,7 +259,7 @@ fn test_boot_validator_from_file() {
|
|||||||
assert_eq!(leader_balance, 1000);
|
assert_eq!(leader_balance, 1000);
|
||||||
|
|
||||||
let keypair = KeyPair::new();
|
let keypair = KeyPair::new();
|
||||||
let validator = TestNode::new_with_pubkey(keypair.pubkey());
|
let validator = TestNode::new_localhost_with_pubkey(keypair.pubkey());
|
||||||
let validator_data = validator.data.clone();
|
let validator_data = validator.data.clone();
|
||||||
let val_fullnode = FullNode::new(
|
let val_fullnode = FullNode::new(
|
||||||
validator,
|
validator,
|
||||||
@@ -277,7 +278,7 @@ fn test_boot_validator_from_file() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn create_leader(ledger_path: &str) -> (NodeInfo, FullNode) {
|
fn create_leader(ledger_path: &str) -> (NodeInfo, FullNode) {
|
||||||
let leader = TestNode::new();
|
let leader = TestNode::new_localhost();
|
||||||
let leader_data = leader.data.clone();
|
let leader_data = leader.data.clone();
|
||||||
let leader_fullnode = FullNode::new(
|
let leader_fullnode = FullNode::new(
|
||||||
leader,
|
leader,
|
||||||
@@ -328,7 +329,7 @@ fn test_leader_restart_validator_start_from_old_ledger() {
|
|||||||
|
|
||||||
// start validator from old ledger
|
// start validator from old ledger
|
||||||
let keypair = KeyPair::new();
|
let keypair = KeyPair::new();
|
||||||
let validator = TestNode::new_with_pubkey(keypair.pubkey());
|
let validator = TestNode::new_localhost_with_pubkey(keypair.pubkey());
|
||||||
let validator_data = validator.data.clone();
|
let validator_data = validator.data.clone();
|
||||||
let val_fullnode = FullNode::new(
|
let val_fullnode = FullNode::new(
|
||||||
validator,
|
validator,
|
||||||
@@ -369,7 +370,7 @@ fn test_leader_restart_validator_start_from_old_ledger() {
|
|||||||
fn test_multi_node_dynamic_network() {
|
fn test_multi_node_dynamic_network() {
|
||||||
logger::setup();
|
logger::setup();
|
||||||
const N: usize = 60;
|
const N: usize = 60;
|
||||||
let leader = TestNode::new();
|
let leader = TestNode::new_localhost();
|
||||||
let bob_pubkey = KeyPair::new().pubkey();
|
let bob_pubkey = KeyPair::new().pubkey();
|
||||||
let (alice, ledger_path) = genesis(100_000);
|
let (alice, ledger_path) = genesis(100_000);
|
||||||
let leader_data = leader.data.clone();
|
let leader_data = leader.data.clone();
|
||||||
@@ -392,7 +393,7 @@ fn test_multi_node_dynamic_network() {
|
|||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|n| {
|
.map(|n| {
|
||||||
let keypair = KeyPair::new();
|
let keypair = KeyPair::new();
|
||||||
let validator = TestNode::new_with_pubkey(keypair.pubkey());
|
let validator = TestNode::new_localhost_with_pubkey(keypair.pubkey());
|
||||||
let rd = validator.data.clone();
|
let rd = validator.data.clone();
|
||||||
//send some tokens to the new validator
|
//send some tokens to the new validator
|
||||||
let bal =
|
let bal =
|
||||||
@@ -410,6 +411,7 @@ fn test_multi_node_dynamic_network() {
|
|||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
|
let mut consecutive_success = 0;
|
||||||
for i in 0..N {
|
for i in 0..N {
|
||||||
//verify leader can do transfer
|
//verify leader can do transfer
|
||||||
let expected = ((i + 3) * 500) as i64;
|
let expected = ((i + 3) * 500) as i64;
|
||||||
@@ -452,13 +454,25 @@ fn test_multi_node_dynamic_network() {
|
|||||||
validators.len(),
|
validators.len(),
|
||||||
distance
|
distance
|
||||||
);
|
);
|
||||||
//assert_eq!(success, validators.len());
|
if success == validators.len() && distance == 0 {
|
||||||
|
consecutive_success += 1;
|
||||||
|
} else {
|
||||||
|
consecutive_success = 0;
|
||||||
|
}
|
||||||
|
if consecutive_success == 10 {
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for (_, node) in validators {
|
assert_eq!(consecutive_success, 10);
|
||||||
node.close().unwrap();
|
for (_, node) in &validators {
|
||||||
|
node.exit();
|
||||||
}
|
}
|
||||||
server.close().unwrap();
|
server.exit();
|
||||||
|
for (_, node) in validators {
|
||||||
|
node.join().unwrap();
|
||||||
|
}
|
||||||
|
server.join().unwrap();
|
||||||
|
|
||||||
std::fs::remove_file(ledger_path).unwrap();
|
std::fs::remove_file(ledger_path).unwrap();
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user