Compare commits
257 Commits
v0.6.0-bet
...
v0.7.0-alp
Author | SHA1 | Date | |
---|---|---|---|
|
be5f2ef9b9 | ||
|
adfcb79387 | ||
|
73c4c0ac5f | ||
|
6fc601f696 | ||
|
07111fb7bb | ||
|
a06d2170b0 | ||
|
7f53ea3bf3 | ||
|
b2accd1c2a | ||
|
8ef8a8dea7 | ||
|
e929404676 | ||
|
c2258bedae | ||
|
215fdbb7ed | ||
|
ee998f6882 | ||
|
826e95afca | ||
|
47583d48e7 | ||
|
e759cdf061 | ||
|
88503c2a09 | ||
|
d5be23dffe | ||
|
80c01dc085 | ||
|
45b2549fa9 | ||
|
c7ce454188 | ||
|
7059ea42d6 | ||
|
8ea1c29c9b | ||
|
33bbfdbc9b | ||
|
5de54f8853 | ||
|
a1ac41218a | ||
|
55fc647568 | ||
|
e83e898eed | ||
|
eb07e4588b | ||
|
563f834c96 | ||
|
183178681d | ||
|
8dba53e494 | ||
|
e4782b19a3 | ||
|
ec86b1dffa | ||
|
6cb8266c7b | ||
|
9c50302a39 | ||
|
3313c69898 | ||
|
530c6ca7ec | ||
|
07ed2fb523 | ||
|
d9ec380a15 | ||
|
b60eb3a899 | ||
|
b4df69791b | ||
|
c21b8a22b9 | ||
|
475a76e656 | ||
|
7ba5d5ef86 | ||
|
737dc1ddde | ||
|
164bf19b36 | ||
|
25976771d9 | ||
|
f2198c2e9a | ||
|
eec19c6d2c | ||
|
30e03feb5f | ||
|
58cd3bde9f | ||
|
662bfb7b88 | ||
|
5f3e3a17d3 | ||
|
feba2d9975 | ||
|
e3e3a1c457 | ||
|
90628f3c8d | ||
|
f6bcadb79d | ||
|
d4ac16773c | ||
|
96f044d2bf | ||
|
f31868b913 | ||
|
73b0ff5b55 | ||
|
64cf69045a | ||
|
e57dae0f31 | ||
|
6386e7d5cf | ||
|
4bad103da9 | ||
|
30a26adb7c | ||
|
8be4adfc0a | ||
|
fed4cc3965 | ||
|
7d1e074683 | ||
|
00516e50a1 | ||
|
e83d76fbd9 | ||
|
304f152315 | ||
|
3a82ebf7fd | ||
|
0253d34467 | ||
|
9209f9acde | ||
|
3dbbb398df | ||
|
17e8ad110f | ||
|
5e91d31ed3 | ||
|
fad9d20820 | ||
|
fe9a1c8580 | ||
|
cd6d7d5198 | ||
|
771478bc68 | ||
|
c4a59896f8 | ||
|
3eb1608403 | ||
|
8fde70d4dc | ||
|
5a047833ed | ||
|
f6c28e6be1 | ||
|
0ebf10d19d | ||
|
d3005d3ef3 | ||
|
effcef2184 | ||
|
89fc0ad7a9 | ||
|
410272ee1d | ||
|
1c97bf50b6 | ||
|
4ecd2c9d0b | ||
|
e592243a09 | ||
|
2f4a92e352 | ||
|
ceafc29040 | ||
|
b20efabfd2 | ||
|
85b6e7293c | ||
|
6aced927ad | ||
|
75997e6c08 | ||
|
9040d00110 | ||
|
8ebc5c6b07 | ||
|
d4807790ff | ||
|
0de5e7a285 | ||
|
c40000aeda | ||
|
31198bc105 | ||
|
92599acfca | ||
|
f6e70779fe | ||
|
3017bde686 | ||
|
9d84ec4bb3 | ||
|
586141adb2 | ||
|
3f763f99e2 | ||
|
15c7f36ea3 | ||
|
04d1a083fa | ||
|
327ee1dae8 | ||
|
22885c3e64 | ||
|
94ededb54c | ||
|
af6a07697a | ||
|
5f1d8c95eb | ||
|
7d9e032407 | ||
|
bc918a5ad5 | ||
|
ee54ce4727 | ||
|
e85bf2f2d5 | ||
|
a7460ffbd1 | ||
|
7fe1fd2f95 | ||
|
d30670e92e | ||
|
9b202c6e1e | ||
|
87946eafd5 | ||
|
7575d3c726 | ||
|
8b9713a934 | ||
|
ec713c18c4 | ||
|
c24b0a1a3f | ||
|
34e0cb0092 | ||
|
7b7c7cba21 | ||
|
c45343dd30 | ||
|
b7f6603c1f | ||
|
2d3b052dea | ||
|
dcb6234771 | ||
|
e44d423e83 | ||
|
5435bb734c | ||
|
13f59adf61 | ||
|
0fce3368d3 | ||
|
1ee5c81267 | ||
|
3bb9d5eb50 | ||
|
efb23f7cf9 | ||
|
013f4674de | ||
|
6966b25d9c | ||
|
d513f56c8c | ||
|
7aa05618a3 | ||
|
cdfbbe5e60 | ||
|
fe7d1cb81c | ||
|
c2a9395a4b | ||
|
586279bcfc | ||
|
8bd10e7c4c | ||
|
928e6165bc | ||
|
77c9e801aa | ||
|
c78132417f | ||
|
849928887e | ||
|
ba1163d49f | ||
|
6f9c89af39 | ||
|
246b8b1242 | ||
|
f0db68cb75 | ||
|
f0d1fdfb46 | ||
|
3b8b2e030a | ||
|
b4fee677a5 | ||
|
fe706583f9 | ||
|
d0e0c17ece | ||
|
5aaa38bcaf | ||
|
6ff9b27f8e | ||
|
3f4e035506 | ||
|
57d9fbb927 | ||
|
ee44e51b30 | ||
|
5011f24123 | ||
|
d1eda334f3 | ||
|
2ae5ce9f2c | ||
|
4f5ac78b7e | ||
|
074c9af020 | ||
|
2da2d4e365 | ||
|
8eb76ab2a5 | ||
|
a710d95243 | ||
|
a06535d7ed | ||
|
f511ac9be7 | ||
|
e28ad2177e | ||
|
cb16fe84cd | ||
|
ec3569aa39 | ||
|
246edecf53 | ||
|
34834c5af9 | ||
|
b845245614 | ||
|
5711fb9969 | ||
|
d1eaecde9a | ||
|
00c8505d1e | ||
|
33f01efe69 | ||
|
377d312c81 | ||
|
badf5d5412 | ||
|
0339f90b40 | ||
|
5455e8e6a9 | ||
|
6843b71a0d | ||
|
634408b5e8 | ||
|
d053f78b74 | ||
|
93b6fceb2f | ||
|
ac7860c35d | ||
|
b0eab8729f | ||
|
cb81f80b31 | ||
|
ea97529185 | ||
|
f1075191fe | ||
|
74c479fbc9 | ||
|
7e788d3a17 | ||
|
69b3c75f0d | ||
|
b2c2fa40a2 | ||
|
50458d9524 | ||
|
9679e3e356 | ||
|
6db9f92b8a | ||
|
4a44498d45 | ||
|
216510c573 | ||
|
fd338c3097 | ||
|
b66ebf5dec | ||
|
5da99de579 | ||
|
3aa2907bd6 | ||
|
05d1618659 | ||
|
86113811f2 | ||
|
53ecaa03f1 | ||
|
205c1aa505 | ||
|
9b54c1542b | ||
|
93d5d1b2ad | ||
|
4c0f3ed6f3 | ||
|
2580155bf2 | ||
|
6ab0dd4df9 | ||
|
4b8c36b6b9 | ||
|
359a8397c0 | ||
|
c9fd5d74b5 | ||
|
391744af97 | ||
|
587ab29e09 | ||
|
80f07dadc5 | ||
|
60609a44ba | ||
|
30c8fa46b4 | ||
|
7aab7d2f82 | ||
|
a8e1c44663 | ||
|
a2b92c35e1 | ||
|
9f2086c772 | ||
|
3eb005d492 | ||
|
68955bfcf4 | ||
|
9ac7070e08 | ||
|
e44e81bd17 | ||
|
f5eedd2d19 | ||
|
46059a37eb | ||
|
adc655a3a2 | ||
|
3058f80489 | ||
|
df98cae4b6 | ||
|
d327e0aabd | ||
|
17d3a6763c | ||
|
02c5b0343b | ||
|
2888e45fea | ||
|
f1311075d9 | ||
|
6c380e04a3 | ||
|
cef1c208a5 |
@@ -1,2 +1,5 @@
|
||||
ignore:
|
||||
- "src/bin"
|
||||
coverage:
|
||||
status:
|
||||
patch: off
|
||||
|
37
Cargo.toml
37
Cargo.toml
@@ -1,9 +1,10 @@
|
||||
[package]
|
||||
name = "solana"
|
||||
description = "The World's Fastest Blockchain"
|
||||
version = "0.6.0-beta"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "0.7.0-alpha"
|
||||
documentation = "https://docs.rs/solana"
|
||||
homepage = "http://solana.com/"
|
||||
readme = "README.md"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
authors = [
|
||||
"Anatoly Yakovenko <anatoly@solana.com>",
|
||||
@@ -20,6 +21,10 @@ path = "src/bin/client-demo.rs"
|
||||
name = "solana-fullnode"
|
||||
path = "src/bin/fullnode.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-fullnode-config"
|
||||
path = "src/bin/fullnode-config.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-genesis"
|
||||
path = "src/bin/genesis.rs"
|
||||
@@ -36,6 +41,10 @@ path = "src/bin/mint.rs"
|
||||
name = "solana-mint-demo"
|
||||
path = "src/bin/mint-demo.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-drone"
|
||||
path = "src/bin/drone.rs"
|
||||
|
||||
[badges]
|
||||
codecov = { repository = "solana-labs/solana", branch = "master", service = "github" }
|
||||
|
||||
@@ -48,7 +57,7 @@ erasure = []
|
||||
[dependencies]
|
||||
rayon = "1.0.0"
|
||||
sha2 = "0.7.0"
|
||||
generic-array = { version = "0.9.0", default-features = false, features = ["serde"] }
|
||||
generic-array = { version = "0.11.1", default-features = false, features = ["serde"] }
|
||||
serde = "1.0.27"
|
||||
serde_derive = "1.0.27"
|
||||
serde_json = "1.0.10"
|
||||
@@ -56,13 +65,15 @@ ring = "0.12.1"
|
||||
untrusted = "0.5.1"
|
||||
bincode = "1.0.0"
|
||||
chrono = { version = "0.4.0", features = ["serde"] }
|
||||
log = "^0.4.1"
|
||||
env_logger = "^0.4.1"
|
||||
matches = "^0.1.6"
|
||||
byteorder = "^1.2.1"
|
||||
libc = "^0.2.1"
|
||||
getopts = "^0.2"
|
||||
isatty = "0.1"
|
||||
futures = "0.1"
|
||||
rand = "0.4.2"
|
||||
pnet = "^0.21.0"
|
||||
log = "0.4.2"
|
||||
env_logger = "0.5.10"
|
||||
matches = "0.1.6"
|
||||
byteorder = "1.2.1"
|
||||
libc = "0.2.1"
|
||||
getopts = "0.2"
|
||||
atty = "0.2"
|
||||
rand = "0.5.1"
|
||||
pnet_datalink = "0.21.0"
|
||||
tokio = "0.1"
|
||||
tokio-codec = "0.1"
|
||||
tokio-io = "0.1"
|
||||
|
2
LICENSE
2
LICENSE
@@ -1,4 +1,4 @@
|
||||
Copyright 2018 Anatoly Yakovenko, Greg Fitzgerald and Stephen Akridge
|
||||
Copyright 2018 Solana Labs, Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
186
README.md
186
README.md
@@ -1,27 +1,42 @@
|
||||
[](https://crates.io/crates/solana)
|
||||
[](https://docs.rs/solana)
|
||||
[](https://buildkite.com/solana-labs/solana)
|
||||
[](https://solana-ci-gate.herokuapp.com/buildkite_public_log?https://buildkite.com/solana-labs/solana/builds/latest/master)
|
||||
[](https://codecov.io/gh/solana-labs/solana)
|
||||
|
||||
Blockchain, Rebuilt for Scale
|
||||
===
|
||||
|
||||
Solana™ is a new blockchain architecture built from the ground up for scale. The architecture supports
|
||||
up to 710 thousand transactions per second on a gigabit network.
|
||||
|
||||
Disclaimer
|
||||
===
|
||||
|
||||
All claims, content, designs, algorithms, estimates, roadmaps, specifications, and performance measurements described in this project are done with the author's best effort. It is up to the reader to check and validate their accuracy and truthfulness. Furthermore nothing in this project constitutes a solicitation for investment.
|
||||
|
||||
Solana: High Performance Blockchain
|
||||
===
|
||||
|
||||
Solana™ is a new architecture for a high performance blockchain. It aims to support
|
||||
over 700 thousand transactions per second on a gigabit network.
|
||||
|
||||
Introduction
|
||||
===
|
||||
|
||||
It's possible for a centralized database to process 710,000 transactions per second on a standard gigabit network if the transactions are, on average, no more than 178 bytes. A centralized database can also replicate itself and maintain high availability without significantly compromising that transaction rate using the distributed system technique known as Optimistic Concurrency Control [H.T.Kung, J.T.Robinson (1981)]. At Solana, we're demonstrating that these same theoretical limits apply just as well to blockchain on an adversarial network. The key ingredient? Finding a way to share time when nodes can't trust one-another. Once nodes can trust time, suddenly ~40 years of distributed systems research becomes applicable to blockchain! Furthermore, and much to our surprise, it can implemented using a mechanism that has existed in Bitcoin since day one. The Bitcoin feature is called nLocktime and it can be used to postdate transactions using block height instead of a timestamp. As a Bitcoin client, you'd use block height instead of a timestamp if you don't trust the network. Block height turns out to be an instance of what's being called a Verifiable Delay Function in cryptography circles. It's a cryptographically secure way to say time has passed. In Solana, we use a far more granular verifiable delay function, a SHA 256 hash chain, to checkpoint the ledger and coordinate consensus. With it, we implement Optimistic Concurrency Control and are now well in route towards that theoretical limit of 710,000 transactions per second.
|
||||
It's possible for a centralized database to process 710,000 transactions per second on a standard gigabit network if the transactions are, on average, no more than 176 bytes. A centralized database can also replicate itself and maintain high availability without significantly compromising that transaction rate using the distributed system technique known as Optimistic Concurrency Control [H.T.Kung, J.T.Robinson (1981)]. At Solana, we're demonstrating that these same theoretical limits apply just as well to blockchain on an adversarial network. The key ingredient? Finding a way to share time when nodes can't trust one-another. Once nodes can trust time, suddenly ~40 years of distributed systems research becomes applicable to blockchain! Furthermore, and much to our surprise, it can implemented using a mechanism that has existed in Bitcoin since day one. The Bitcoin feature is called nLocktime and it can be used to postdate transactions using block height instead of a timestamp. As a Bitcoin client, you'd use block height instead of a timestamp if you don't trust the network. Block height turns out to be an instance of what's being called a Verifiable Delay Function in cryptography circles. It's a cryptographically secure way to say time has passed. In Solana, we use a far more granular verifiable delay function, a SHA 256 hash chain, to checkpoint the ledger and coordinate consensus. With it, we implement Optimistic Concurrency Control and are now well in route towards that theoretical limit of 710,000 transactions per second.
|
||||
|
||||
Running the demo
|
||||
|
||||
Testnet Demos
|
||||
===
|
||||
|
||||
The Solana repo contains all the scripts you might need to spin up your own
|
||||
local testnet. Depending on what you're looking to achieve, you may want to
|
||||
run a different variation, as the full-fledged, performance-enhanced
|
||||
multinode testnet is considerably more complex to set up than a Rust-only,
|
||||
singlenode testnode. If you are looking to develop high-level features, such
|
||||
as experimenting with smart contracts, save yourself some setup headaches and
|
||||
stick to the Rust-only singlenode demo. If you're doing performance optimization
|
||||
of the transaction pipeline, consider the enhanced singlenode demo. If you're
|
||||
doing consensus work, you'll need at least a Rust-only multinode demo. If you want
|
||||
to reproduce our TPS metrics, run the enhanced multinode demo.
|
||||
|
||||
For all four variations, you'd need the latest Rust toolchain and the Solana
|
||||
source code:
|
||||
|
||||
First, install Rust's package manager Cargo.
|
||||
|
||||
```bash
|
||||
@@ -36,62 +51,106 @@ $ git clone https://github.com/solana-labs/solana.git
|
||||
$ cd solana
|
||||
```
|
||||
|
||||
The fullnode server is initialized with a ledger from stdin and
|
||||
generates new ledger entries on stdout. To create the input ledger, we'll need
|
||||
to create *the mint* and use it to generate a *genesis ledger*. It's done in
|
||||
two steps because the mint-demo.json file contains private keys that will be
|
||||
used later in this demo.
|
||||
The demo code is sometimes broken between releases as we add new low-level
|
||||
features, so if this is your first time running the demo, you'll improve
|
||||
your odds of success if you check out the
|
||||
[latest release](https://github.com/solana-labs/solana/releases)
|
||||
before proceeding:
|
||||
|
||||
```bash
|
||||
$ echo 1000000000 | cargo run --release --bin solana-mint-demo > mint-demo.json
|
||||
$ cat mint-demo.json | cargo run --release --bin solana-genesis-demo > genesis.log
|
||||
$ git checkout v0.6.1
|
||||
```
|
||||
|
||||
Before you start the server, make sure you know the IP address of the machine ou want to be the leader for the demo, and make sure that udp ports 8000-10000 are open on all the machines you wan to test with. Now you can start the server:
|
||||
Configuration Setup
|
||||
---
|
||||
|
||||
The network is initialized with a genesis ledger and leader/validator configuration files.
|
||||
These files can be generated by running the following script.
|
||||
|
||||
```bash
|
||||
$ ./multinode-demo/setup.sh
|
||||
```
|
||||
|
||||
Singlenode Testnet
|
||||
---
|
||||
|
||||
Before you start a fullnode, make sure you know the IP address of the machine you
|
||||
want to be the leader for the demo, and make sure that udp ports 8000-10000 are
|
||||
open on all the machines you want to test with.
|
||||
|
||||
Now start the server:
|
||||
|
||||
```bash
|
||||
$ cat ./multinode-demo/leader.sh
|
||||
#!/bin/bash
|
||||
export RUST_LOG=solana=info
|
||||
sudo sysctl -w net.core.rmem_max=26214400
|
||||
cat genesis.log leader.log | cargo run --release --features cuda --bin solana-fullnode -- -s leader.json -l leader.json -b 8000 -d 2>&1 | tee leader-tee.log
|
||||
$ ./multinode-demo/leader.sh
|
||||
```
|
||||
|
||||
Wait a few seconds for the server to initialize. It will print "Ready." when it's safe
|
||||
to start sending it transactions.
|
||||
|
||||
Now you can start some validators:
|
||||
To run a performance-enhanced fullnode on Linux, download `libcuda_verify_ed25519.a`. Enable
|
||||
it by adding `--features=cuda` to the line that runs `solana-fullnode` in
|
||||
`leader.sh`. [CUDA 9.2](https://developer.nvidia.com/cuda-downloads) must be installed on your system.
|
||||
|
||||
```bash
|
||||
$ cat ./multinode-demo/validator.sh
|
||||
#!/bin/bash
|
||||
rsync -v -e ssh $1:~/solana/mint-demo.json .
|
||||
rsync -v -e ssh $1:~/solana/leader.json .
|
||||
rsync -v -e ssh $1:~/solana/genesis.log .
|
||||
rsync -v -e ssh $1:~/solana/leader.log .
|
||||
rsync -v -e ssh $1:~/solana/libcuda_verify_ed25519.a .
|
||||
export RUST_LOG=solana=info
|
||||
sudo sysctl -w net.core.rmem_max=26214400
|
||||
cat genesis.log leader.log | cargo run --release --features cuda --bin solana-fullnode -- -l validator.json -s validator.json -v leader.json -b 9000 -d 2>&1 | tee validator-tee.log
|
||||
$ ./multinode-demo/validator.sh ubuntu@10.0.1.51 #The leader machine
|
||||
$ ./fetch-perf-libs.sh
|
||||
$ cargo run --release --features=cuda --bin solana-fullnode -- -l leader.json < genesis.log
|
||||
```
|
||||
|
||||
Wait a few seconds for the server to initialize. It will print "Ready." when it's ready to
|
||||
receive transactions.
|
||||
|
||||
Multinode Testnet
|
||||
---
|
||||
|
||||
To run a multinode testnet, after starting a leader node, spin up some validator nodes:
|
||||
|
||||
```bash
|
||||
$ ./multinode-demo/validator.sh ubuntu@10.0.1.51:~/solana #The leader machine
|
||||
```
|
||||
|
||||
As with the leader node, you can run a performance-enhanced validator fullnode by adding
|
||||
`--features=cuda` to the line that runs `solana-fullnode` in `validator.sh`.
|
||||
|
||||
```bash
|
||||
$ cargo run --release --features=cuda --bin solana-fullnode -- -l validator.json -v leader.json < genesis.log
|
||||
```
|
||||
|
||||
|
||||
Then, in a separate shell, let's execute some transactions. Note we pass in
|
||||
Testnet Client Demo
|
||||
---
|
||||
|
||||
Now that your singlenode or multinode testnet is up and running, in a separate shell, let's send it some transactions! Note we pass in
|
||||
the JSON configuration file here, not the genesis ledger.
|
||||
|
||||
```bash
|
||||
$ cat ./multinode-demo/client.sh
|
||||
#!/bin/bash
|
||||
export RUST_LOG=solana=info
|
||||
rsync -v -e ssh $1:~/solana/leader.json .
|
||||
rsync -v -e ssh $1:~/solana/mint-demo.json .
|
||||
cat mint-demo.json | cargo run --release --bin solana-client-demo -- -l leader.json -c 8100 -n 1
|
||||
$ ./multinode-demo/client.sh ubuntu@10.0.1.51 #The leader machine
|
||||
$ ./multinode-demo/client.sh ubuntu@10.0.1.51:~/solana 2 #The leader machine and the total number of nodes in the network
|
||||
```
|
||||
|
||||
Try starting a more validators and reruning the client demo!
|
||||
What just happened? The client demo spins up several threads to send 500,000 transactions
|
||||
to the testnet as quickly as it can. The client then pings the testnet periodically to see
|
||||
how many transactions it processed in that time. Take note that the demo intentionally
|
||||
floods the network with UDP packets, such that the network will almost certainly drop a
|
||||
bunch of them. This ensures the testnet has an opportunity to reach 710k TPS. The client
|
||||
demo completes after it has convinced itself the testnet won't process any additional
|
||||
transactions. You should see several TPS measurements printed to the screen. In the
|
||||
multinode variation, you'll see TPS measurements for each validator node as well.
|
||||
|
||||
Linux Snap
|
||||
---
|
||||
A Linux [Snap](https://snapcraft.io/) is available, which can be used to
|
||||
easily get Solana running on supported Linux systems without building anything
|
||||
from source. The `edge` Snap channel is updated daily with the latest
|
||||
development from the `master` branch. To install:
|
||||
```bash
|
||||
$ sudo snap install solana --edge --devmode
|
||||
```
|
||||
(`--devmode` flag is required only for `solana.fullnode-cuda`)
|
||||
|
||||
Once installed the usual Solana programs will be available as `solona.*` instead
|
||||
of `solana-*`. For example, `solana.fullnode` instead of `solana-fullnode`.
|
||||
|
||||
Update to the latest version at any time with
|
||||
```bash
|
||||
$ snap info solana
|
||||
$ sudo snap refresh solana --devmode
|
||||
```
|
||||
|
||||
Developing
|
||||
===
|
||||
@@ -107,7 +166,7 @@ $ source $HOME/.cargo/env
|
||||
$ rustup component add rustfmt-preview
|
||||
```
|
||||
|
||||
If your rustc version is lower than 1.25.0, please update it:
|
||||
If your rustc version is lower than 1.26.1, please update it:
|
||||
|
||||
```bash
|
||||
$ rustup update
|
||||
@@ -146,6 +205,17 @@ to see the debug and info sections for streamer and server respectively. General
|
||||
we are using debug for infrequent debug messages, trace for potentially frequent messages and
|
||||
info for performance-related logging.
|
||||
|
||||
Attaching to a running process with gdb
|
||||
|
||||
```
|
||||
$ sudo gdb
|
||||
attach <PID>
|
||||
set logging on
|
||||
thread apply all bt
|
||||
```
|
||||
|
||||
This will dump all the threads stack traces into gdb.txt
|
||||
|
||||
Benchmarking
|
||||
---
|
||||
|
||||
@@ -161,22 +231,24 @@ Run the benchmarks:
|
||||
$ cargo +nightly bench --features="unstable"
|
||||
```
|
||||
|
||||
To run the benchmarks on Linux with GPU optimizations enabled:
|
||||
|
||||
```bash
|
||||
$ wget https://solana-build-artifacts.s3.amazonaws.com/v0.5.0/libcuda_verify_ed25519.a
|
||||
$ cargo +nightly bench --features="unstable,cuda"
|
||||
```
|
||||
|
||||
Code coverage
|
||||
---
|
||||
|
||||
To generate code coverage statistics, run kcov via Docker:
|
||||
To generate code coverage statistics, install cargo-cov. Note: the tool currently only works
|
||||
in Rust nightly.
|
||||
|
||||
```bash
|
||||
$ ./ci/coverage.sh
|
||||
$ cargo +nightly install cargo-cov
|
||||
```
|
||||
The coverage report will be written to `./target/cov/index.html`
|
||||
|
||||
Run cargo-cov and generate a report:
|
||||
|
||||
```bash
|
||||
$ cargo +nightly cov test
|
||||
$ cargo +nightly cov report --open
|
||||
```
|
||||
|
||||
The coverage report will be written to `./target/cov/report/index.html`
|
||||
|
||||
|
||||
Why coverage? While most see coverage as a code quality metric, we see it primarily as a developer
|
||||
|
1
build.rs
1
build.rs
@@ -11,5 +11,6 @@ fn main() {
|
||||
}
|
||||
if !env::var("CARGO_FEATURE_ERASURE").is_err() {
|
||||
println!("cargo:rustc-link-lib=dylib=Jerasure");
|
||||
println!("cargo:rustc-link-lib=dylib=gf_complete");
|
||||
}
|
||||
}
|
||||
|
1
ci/.gitignore
vendored
1
ci/.gitignore
vendored
@@ -1,2 +1,3 @@
|
||||
/node_modules/
|
||||
/package-lock.json
|
||||
/snapcraft.credentials
|
||||
|
88
ci/README.md
Normal file
88
ci/README.md
Normal file
@@ -0,0 +1,88 @@
|
||||
|
||||
Our CI infrastructure is built around [BuildKite](https://buildkite.com) with some
|
||||
additional GitHub integration provided by https://github.com/mvines/ci-gate
|
||||
|
||||
## Agent Queues
|
||||
|
||||
We define two [Agent Queues](https://buildkite.com/docs/agent/v3/queues):
|
||||
`queue=default` and `queue=cuda`. The `default` queue should be favored and
|
||||
runs on lower-cost CPU instances. The `cuda` queue is only necessary for
|
||||
running **tests** that depend on GPU (via CUDA) access -- CUDA builds may still
|
||||
be run on the `default` queue, and the [buildkite artifact
|
||||
system](https://buildkite.com/docs/builds/artifacts) used to transfer build
|
||||
products over to a GPU instance for testing.
|
||||
|
||||
## Buildkite Agent Management
|
||||
|
||||
### Buildkite GCP Setup
|
||||
|
||||
CI runs on Google Cloud Platform via two Compute Engine Instance groups:
|
||||
`ci-default` and `ci-cuda`. Autoscaling is currently disabled and the number of
|
||||
VM Instances in each group is manually adjusted.
|
||||
|
||||
#### Updating a CI Disk Image
|
||||
|
||||
Each Instance group has its own disk image, `ci-default-vX` and
|
||||
`ci-cuda-vY`, where *X* and *Y* are incremented each time the image is changed.
|
||||
|
||||
The process to update a disk image is as follows (TODO: make this less manual):
|
||||
|
||||
1. Create a new VM Instance using the disk image to modify.
|
||||
2. Once the VM boots, ssh to it and modify the disk as desired.
|
||||
3. Stop the VM Instance running the modified disk. Remember the name of the VM disk
|
||||
4. From another machine, `gcloud auth login`, then create a new Disk Image based
|
||||
off the modified VM Instance:
|
||||
```
|
||||
$ gcloud compute images create ci-default-v5 --source-disk xxx --source-disk-zone us-east1-b
|
||||
```
|
||||
or
|
||||
```
|
||||
$ gcloud compute images create ci-cuda-v5 --source-disk xxx --source-disk-zone us-east1-b
|
||||
```
|
||||
5. Delete the new VM instance.
|
||||
6. Go to the Instance templates tab, find the existing template named
|
||||
`ci-default-vX` or `ci-cuda-vY` and select it. Use the "Copy" button to create
|
||||
a new Instance template called `ci-default-vX+1` or `ci-cuda-vY+1` with the
|
||||
newly created Disk image.
|
||||
7. Go to the Instance Groups tag and find the applicable group, `ci-default` or
|
||||
`ci-cuda`. Edit the Instance Group in two steps: (a) Set the number of
|
||||
instances to 0 and wait for them all to terminate, (b) Update the Instance
|
||||
template and restore the number of instances to the original value.
|
||||
8. Clean up the previous version by deleting it from Instance Templates and
|
||||
Images.
|
||||
|
||||
|
||||
## Reference
|
||||
|
||||
### Buildkite AWS CloudFormation Setup
|
||||
|
||||
**AWS CloudFormation is currently inactive, although it may be restored in the
|
||||
future**
|
||||
|
||||
AWS CloudFormation can be used to scale machines up and down based on the
|
||||
current CI load. If no machine is currently running it can take up to 60
|
||||
seconds to spin up a new instance, please remain calm during this time.
|
||||
|
||||
#### AMI
|
||||
We use a custom AWS AMI built via https://github.com/solana-labs/elastic-ci-stack-for-aws/tree/solana/cuda.
|
||||
|
||||
Use the following process to update this AMI as dependencies change:
|
||||
```bash
|
||||
$ export AWS_ACCESS_KEY_ID=my_access_key
|
||||
$ export AWS_SECRET_ACCESS_KEY=my_secret_access_key
|
||||
$ git clone https://github.com/solana-labs/elastic-ci-stack-for-aws.git -b solana/cuda
|
||||
$ cd elastic-ci-stack-for-aws/
|
||||
$ make build
|
||||
$ make build-ami
|
||||
```
|
||||
|
||||
Watch for the *"amazon-ebs: AMI:"* log message to extract the name of the new
|
||||
AMI. For example:
|
||||
```
|
||||
amazon-ebs: AMI: ami-07118545e8b4ce6dc
|
||||
```
|
||||
The new AMI should also now be visible in your EC2 Dashboard. Go to the desired
|
||||
AWS CloudFormation stack, update the **ImageId** field to the new AMI id, and
|
||||
*apply* the stack changes.
|
||||
|
||||
|
@@ -1,16 +1,31 @@
|
||||
steps:
|
||||
- command: "ci/coverage.sh"
|
||||
name: "coverage [public]"
|
||||
- command: "ci/docker-run.sh rust ci/test-stable.sh"
|
||||
name: "stable [public]"
|
||||
- command: "ci/docker-run.sh rustlang/rust:nightly ci/test-nightly.sh || true"
|
||||
name: "nightly - FAILURES IGNORED [public]"
|
||||
- command: "ci/docker-run.sh rust ci/test-ignored.sh"
|
||||
name: "ignored [public]"
|
||||
- command: "ci/test-cuda.sh"
|
||||
name: "cuda"
|
||||
timeout_in_minutes: 20
|
||||
- command: "ci/shellcheck.sh"
|
||||
name: "shellcheck [public]"
|
||||
timeout_in_minutes: 20
|
||||
- wait
|
||||
- command: "ci/publish.sh"
|
||||
name: "publish release artifacts"
|
||||
- command: "ci/docker-run.sh rustlang/rust:nightly ci/test-nightly.sh"
|
||||
name: "nightly [public]"
|
||||
timeout_in_minutes: 20
|
||||
- command: "ci/test-stable-perf.sh"
|
||||
name: "stable-perf [public]"
|
||||
timeout_in_minutes: 20
|
||||
retry:
|
||||
automatic:
|
||||
- exit_status: "*"
|
||||
limit: 2
|
||||
agents:
|
||||
- "queue=cuda"
|
||||
- command: "ci/snap.sh [public]"
|
||||
timeout_in_minutes: 20
|
||||
name: "snap [public]"
|
||||
- wait
|
||||
- command: "ci/publish-crate.sh [public]"
|
||||
timeout_in_minutes: 20
|
||||
name: "publish crate"
|
||||
- command: "ci/hoover.sh [public]"
|
||||
timeout_in_minutes: 20
|
||||
name: "clean agent"
|
||||
|
||||
|
@@ -1,21 +0,0 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
ci/docker-run.sh evilmachines/rust-cargo-kcov \
|
||||
bash -exc "\
|
||||
export RUST_BACKTRACE=1; \
|
||||
cargo build --verbose; \
|
||||
cargo kcov --lib --verbose; \
|
||||
"
|
||||
|
||||
echo Coverage report:
|
||||
ls -l target/cov/index.html
|
||||
|
||||
if [[ -z "$CODECOV_TOKEN" ]]; then
|
||||
echo CODECOV_TOKEN undefined
|
||||
else
|
||||
bash <(curl -s https://codecov.io/bash)
|
||||
fi
|
||||
|
||||
exit 0
|
@@ -19,7 +19,12 @@ fi
|
||||
docker pull "$IMAGE"
|
||||
shift
|
||||
|
||||
ARGS=(--workdir /solana --volume "$PWD:/solana" --rm)
|
||||
ARGS=(
|
||||
--workdir /solana
|
||||
--volume "$PWD:/solana"
|
||||
--env "HOME=/solana"
|
||||
--rm
|
||||
)
|
||||
|
||||
ARGS+=(--env "CARGO_HOME=/solana/.cargo")
|
||||
|
||||
@@ -28,14 +33,18 @@ ARGS+=(--env "CARGO_HOME=/solana/.cargo")
|
||||
ARGS+=(--security-opt "seccomp=unconfined")
|
||||
|
||||
# Ensure files are created with the current host uid/gid
|
||||
ARGS+=(--user "$(id -u):$(id -g)")
|
||||
if [[ -z "$SOLANA_DOCKER_RUN_NOSETUID" ]]; then
|
||||
ARGS+=(--user "$(id -u):$(id -g)")
|
||||
fi
|
||||
|
||||
# Environment variables to propagate into the container
|
||||
ARGS+=(
|
||||
--env BUILDKITE_BRANCH
|
||||
--env BUILDKITE_TAG
|
||||
--env CODECOV_TOKEN
|
||||
--env CRATES_IO_TOKEN
|
||||
--env SNAPCRAFT_CREDENTIALS_KEY
|
||||
)
|
||||
|
||||
set -x
|
||||
docker run "${ARGS[@]}" "$IMAGE" "$@"
|
||||
exec docker run "${ARGS[@]}" "$IMAGE" "$@"
|
||||
|
7
ci/docker-snapcraft/Dockerfile
Normal file
7
ci/docker-snapcraft/Dockerfile
Normal file
@@ -0,0 +1,7 @@
|
||||
FROM snapcraft/xenial-amd64
|
||||
|
||||
# Update snapcraft to latest version
|
||||
RUN apt-get update -qq \
|
||||
&& apt-get install -y snapcraft \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& snapcraft --version
|
6
ci/docker-snapcraft/build.sh
Executable file
6
ci/docker-snapcraft/build.sh
Executable file
@@ -0,0 +1,6 @@
|
||||
#!/bin/bash -ex
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
docker build -t solanalabs/snapcraft .
|
||||
docker push solanalabs/snapcraft
|
57
ci/hoover.sh
Executable file
57
ci/hoover.sh
Executable file
@@ -0,0 +1,57 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Regular maintenance performed on a buildkite agent to control disk usage
|
||||
#
|
||||
|
||||
echo --- Delete all exited containers first
|
||||
(
|
||||
set -x
|
||||
exited=$(docker ps -aq --no-trunc --filter "status=exited")
|
||||
if [[ -n "$exited" ]]; then
|
||||
# shellcheck disable=SC2086 # Don't want to double quote "$exited"
|
||||
docker rm $exited
|
||||
fi
|
||||
)
|
||||
|
||||
echo --- Delete untagged images
|
||||
(
|
||||
set -x
|
||||
untagged=$(docker images | grep '<none>'| awk '{ print $3 }')
|
||||
if [[ -n "$untagged" ]]; then
|
||||
# shellcheck disable=SC2086 # Don't want to double quote "$untagged"
|
||||
docker rmi $untagged
|
||||
fi
|
||||
)
|
||||
|
||||
echo --- Delete all dangling images
|
||||
(
|
||||
set -x
|
||||
dangling=$(docker images --filter 'dangling=true' -q --no-trunc | sort | uniq)
|
||||
if [[ -n "$dangling" ]]; then
|
||||
# shellcheck disable=SC2086 # Don't want to double quote "$dangling"
|
||||
docker rmi $dangling
|
||||
fi
|
||||
)
|
||||
|
||||
echo --- Remove unused docker networks
|
||||
(
|
||||
set -x
|
||||
docker network prune -f
|
||||
)
|
||||
|
||||
echo "--- Delete /tmp files older than 1 day owned by $(whoami)"
|
||||
(
|
||||
set -x
|
||||
find /tmp -maxdepth 1 -user "$(whoami)" -mtime +1 -print0 | xargs -0 rm -rf
|
||||
)
|
||||
|
||||
echo --- System Status
|
||||
(
|
||||
set -x
|
||||
docker images
|
||||
docker ps
|
||||
docker network ls
|
||||
df -h
|
||||
)
|
||||
|
||||
exit 0
|
@@ -16,4 +16,4 @@ if [[ ! -x $BKRUN ]]; then
|
||||
fi
|
||||
|
||||
set -x
|
||||
./ci/node_modules/.bin/bkrun ci/buildkite.yml
|
||||
exec ./ci/node_modules/.bin/bkrun ci/buildkite.yml
|
||||
|
40
ci/snap.sh
Executable file
40
ci/snap.sh
Executable file
@@ -0,0 +1,40 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
DRYRUN=
|
||||
if [[ -z $BUILDKITE_BRANCH || $BUILDKITE_BRANCH =~ pull/* ]]; then
|
||||
DRYRUN="echo"
|
||||
fi
|
||||
|
||||
if [[ -z "$BUILDKITE_TAG" ]]; then
|
||||
SNAP_CHANNEL=edge
|
||||
else
|
||||
SNAP_CHANNEL=beta
|
||||
fi
|
||||
|
||||
if [[ -z $DRYRUN ]]; then
|
||||
[[ -n $SNAPCRAFT_CREDENTIALS_KEY ]] || {
|
||||
echo SNAPCRAFT_CREDENTIALS_KEY not defined
|
||||
exit 1;
|
||||
}
|
||||
(
|
||||
openssl aes-256-cbc -d \
|
||||
-in ci/snapcraft.credentials.enc \
|
||||
-out ci/snapcraft.credentials \
|
||||
-k "$SNAPCRAFT_CREDENTIALS_KEY"
|
||||
|
||||
snapcraft login --with ci/snapcraft.credentials
|
||||
) || {
|
||||
rm -f ci/snapcraft.credentials;
|
||||
exit 1
|
||||
}
|
||||
fi
|
||||
|
||||
set -x
|
||||
|
||||
echo --- build
|
||||
snapcraft
|
||||
|
||||
echo --- publish
|
||||
$DRYRUN snapcraft push solana_*.snap --release $SNAP_CHANNEL
|
BIN
ci/snapcraft.credentials.enc
Normal file
BIN
ci/snapcraft.credentials.enc
Normal file
Binary file not shown.
@@ -1,22 +0,0 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
LIB=libcuda_verify_ed25519.a
|
||||
if [[ ! -r $LIB ]]; then
|
||||
if [[ -z "${libcuda_verify_ed25519_URL:-}" ]]; then
|
||||
echo "$0 skipped. Unable to locate $LIB"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
export LD_LIBRARY_PATH=/usr/local/cuda/lib64
|
||||
export PATH=$PATH:/usr/local/cuda/bin
|
||||
curl -X GET -o $LIB "$libcuda_verify_ed25519_URL"
|
||||
fi
|
||||
|
||||
# shellcheck disable=SC1090 # <-- shellcheck can't follow ~
|
||||
source ~/.cargo/env
|
||||
export RUST_BACKTRACE=1
|
||||
cargo test --features=cuda
|
||||
|
||||
exit 0
|
@@ -1,9 +0,0 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
rustc --version
|
||||
cargo --version
|
||||
|
||||
export RUST_BACKTRACE=1
|
||||
cargo test -- --ignored
|
@@ -2,13 +2,31 @@
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
export RUST_BACKTRACE=1
|
||||
rustc --version
|
||||
cargo --version
|
||||
|
||||
export RUST_BACKTRACE=1
|
||||
rustup component add rustfmt-preview
|
||||
cargo build --verbose --features unstable
|
||||
cargo test --verbose --features unstable
|
||||
cargo bench --verbose --features unstable
|
||||
_() {
|
||||
echo "--- $*"
|
||||
"$@"
|
||||
}
|
||||
|
||||
_ cargo build --verbose --features unstable
|
||||
_ cargo test --verbose --features unstable
|
||||
_ cargo bench --verbose --features unstable
|
||||
|
||||
|
||||
# Coverage ...
|
||||
_ cargo install --force cargo-cov
|
||||
_ cargo cov test
|
||||
_ cargo cov report
|
||||
|
||||
echo --- Coverage report:
|
||||
ls -l target/cov/report/index.html
|
||||
|
||||
if [[ -z "$CODECOV_TOKEN" ]]; then
|
||||
echo CODECOV_TOKEN undefined
|
||||
else
|
||||
bash <(curl -s https://codecov.io/bash) -x 'llvm-cov gcov'
|
||||
fi
|
||||
|
||||
exit 0
|
||||
|
12
ci/test-stable-perf.sh
Executable file
12
ci/test-stable-perf.sh
Executable file
@@ -0,0 +1,12 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
./fetch-perf-libs.sh
|
||||
|
||||
export LD_LIBRARY_PATH=$PWD:/usr/local/cuda/lib64
|
||||
export PATH=$PATH:/usr/local/cuda/bin
|
||||
export RUST_BACKTRACE=1
|
||||
|
||||
set -x
|
||||
exec cargo test --features=cuda,erasure
|
@@ -2,13 +2,17 @@
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
export RUST_BACKTRACE=1
|
||||
rustc --version
|
||||
cargo --version
|
||||
|
||||
export RUST_BACKTRACE=1
|
||||
rustup component add rustfmt-preview
|
||||
cargo fmt -- --write-mode=diff
|
||||
cargo build --verbose
|
||||
cargo test --verbose
|
||||
_() {
|
||||
echo "--- $*"
|
||||
"$@"
|
||||
}
|
||||
|
||||
exit 0
|
||||
_ rustup component add rustfmt-preview
|
||||
_ cargo fmt -- --write-mode=diff
|
||||
_ cargo build --verbose
|
||||
_ cargo test --verbose
|
||||
_ cargo test -- --ignored
|
||||
|
@@ -1,65 +0,0 @@
|
||||
The Historian
|
||||
===
|
||||
|
||||
Create a *Historian* and send it *events* to generate an *event log*, where each *entry*
|
||||
is tagged with the historian's latest *hash*. Then ensure the order of events was not tampered
|
||||
with by verifying each entry's hash can be generated from the hash in the previous entry:
|
||||
|
||||

|
||||
|
||||
```rust
|
||||
extern crate solana;
|
||||
|
||||
use solana::historian::Historian;
|
||||
use solana::ledger::{Block, Entry, Hash};
|
||||
use solana::event::{generate_keypair, get_pubkey, sign_claim_data, Event};
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
use std::sync::mpsc::SendError;
|
||||
|
||||
fn create_ledger(hist: &Historian<Hash>) -> Result<(), SendError<Event<Hash>>> {
|
||||
sleep(Duration::from_millis(15));
|
||||
let tokens = 42;
|
||||
let keypair = generate_keypair();
|
||||
let event0 = Event::new_claim(get_pubkey(&keypair), tokens, sign_claim_data(&tokens, &keypair));
|
||||
hist.sender.send(event0)?;
|
||||
sleep(Duration::from_millis(10));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let seed = Hash::default();
|
||||
let hist = Historian::new(&seed, Some(10));
|
||||
create_ledger(&hist).expect("send error");
|
||||
drop(hist.sender);
|
||||
let entries: Vec<Entry<Hash>> = hist.receiver.iter().collect();
|
||||
for entry in &entries {
|
||||
println!("{:?}", entry);
|
||||
}
|
||||
// Proof-of-History: Verify the historian learned about the events
|
||||
// in the same order they appear in the vector.
|
||||
assert!(entries[..].verify(&seed));
|
||||
}
|
||||
```
|
||||
|
||||
Running the program should produce a ledger similar to:
|
||||
|
||||
```rust
|
||||
Entry { num_hashes: 0, id: [0, ...], event: Tick }
|
||||
Entry { num_hashes: 3, id: [67, ...], event: Transaction { tokens: 42 } }
|
||||
Entry { num_hashes: 3, id: [123, ...], event: Tick }
|
||||
```
|
||||
|
||||
Proof-of-History
|
||||
---
|
||||
|
||||
Take note of the last line:
|
||||
|
||||
```rust
|
||||
assert!(entries[..].verify(&seed));
|
||||
```
|
||||
|
||||
[It's a proof!](https://en.wikipedia.org/wiki/Curry–Howard_correspondence) For each entry returned by the
|
||||
historian, we can verify that `id` is the result of applying a sha256 hash to the previous `id`
|
||||
exactly `num_hashes` times, and then hashing then event data on top of that. Because the event data is
|
||||
included in the hash, the events cannot be reordered without regenerating all the hashes.
|
@@ -1,18 +0,0 @@
|
||||
msc {
|
||||
client,historian,recorder;
|
||||
|
||||
recorder=>historian [ label = "e0 = Entry{id: h0, n: 0, event: Tick}" ] ;
|
||||
recorder=>recorder [ label = "h1 = hash(h0)" ] ;
|
||||
recorder=>recorder [ label = "h2 = hash(h1)" ] ;
|
||||
client=>historian [ label = "Transaction(d0)" ] ;
|
||||
historian=>recorder [ label = "Transaction(d0)" ] ;
|
||||
recorder=>recorder [ label = "h3 = hash(h2 + d0)" ] ;
|
||||
recorder=>historian [ label = "e1 = Entry{id: hash(h3), n: 3, event: Transaction(d0)}" ] ;
|
||||
recorder=>recorder [ label = "h4 = hash(h3)" ] ;
|
||||
recorder=>recorder [ label = "h5 = hash(h4)" ] ;
|
||||
recorder=>recorder [ label = "h6 = hash(h5)" ] ;
|
||||
recorder=>historian [ label = "e2 = Entry{id: h6, n: 3, event: Tick}" ] ;
|
||||
client=>historian [ label = "collect()" ] ;
|
||||
historian=>client [ label = "entries = [e0, e1, e2]" ] ;
|
||||
client=>client [ label = "entries.verify(h0)" ] ;
|
||||
}
|
37
fetch-perf-libs.sh
Executable file
37
fetch-perf-libs.sh
Executable file
@@ -0,0 +1,37 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
if [[ $(uname) != Linux ]]; then
|
||||
echo Performance libraries are only available for Linux
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ $(uname -m) != x86_64 ]]; then
|
||||
echo Performance libraries are only available for x86_64 architecture
|
||||
exit 1
|
||||
fi
|
||||
|
||||
(
|
||||
set -x
|
||||
curl -o solana-perf.tgz \
|
||||
https://solana-perf.s3.amazonaws.com/master/x86_64-unknown-linux-gnu/solana-perf.tgz
|
||||
tar zxvf solana-perf.tgz
|
||||
)
|
||||
|
||||
if [[ -r /usr/local/cuda/version.txt && -r cuda-version.txt ]]; then
|
||||
if ! diff /usr/local/cuda/version.txt cuda-version.txt > /dev/null; then
|
||||
echo ==============================================
|
||||
echo Warning: possible CUDA version mismatch
|
||||
echo
|
||||
echo "Expected version: $(cat cuda-version.txt)"
|
||||
echo "Detected version: $(cat /usr/local/cuda/version.txt)"
|
||||
echo ==============================================
|
||||
fi
|
||||
else
|
||||
echo ==============================================
|
||||
echo Warning: unable to validate CUDA version
|
||||
echo ==============================================
|
||||
fi
|
||||
|
||||
echo "Downloaded solana-perf version: $(cat solana-perf-HEAD.txt)"
|
||||
|
||||
exit 0
|
@@ -1,16 +1,17 @@
|
||||
#!/bin/bash -e
|
||||
#!/bin/bash
|
||||
|
||||
if [[ -z "$1" ]]; then
|
||||
echo "usage: $0 [leader machine]"
|
||||
if [[ -z $1 ]]; then
|
||||
echo "usage: $0 [network path to solana repo on leader machine] <number of nodes in the network>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
LEADER="$1"
|
||||
LEADER=$1
|
||||
COUNT=${2:-1}
|
||||
|
||||
set -x
|
||||
export RUST_LOG=solana=info
|
||||
rsync -v -e ssh "$LEADER:~/solana/leader.json" .
|
||||
rsync -v -e ssh "$LEADER:~/solana/mint-demo.json" .
|
||||
rsync -vz "$LEADER"/{leader.json,mint-demo.json} . || exit $?
|
||||
|
||||
# if RUST_LOG is unset, default to info
|
||||
export RUST_LOG=${RUST_LOG:-solana=info}
|
||||
|
||||
cargo run --release --bin solana-client-demo -- \
|
||||
-l leader.json -c 8100 -n 1 < mint-demo.json
|
||||
-n "$COUNT" -l leader.json -d < mint-demo.json 2>&1 | tee client.log
|
||||
|
@@ -1,4 +1,28 @@
|
||||
#!/bin/bash
|
||||
export RUST_LOG=solana=info
|
||||
sudo sysctl -w net.core.rmem_max=26214400
|
||||
cat genesis.log leader.log | cargo run --release --features cuda --bin solana-fullnode -- -s leader.json -l leader.json -b 8000 -d 2>&1 | tee leader-tee.log
|
||||
here=$(dirname "$0")
|
||||
|
||||
# shellcheck source=/dev/null
|
||||
. "${here}"/myip.sh
|
||||
|
||||
myip=$(myip) || exit $?
|
||||
|
||||
[[ -f leader-"${myip}".json ]] || {
|
||||
echo "I can't find a matching leader config file for \"${myip}\"...
|
||||
Please run ${here}/setup.sh first.
|
||||
"
|
||||
exit 1
|
||||
}
|
||||
|
||||
# if RUST_LOG is unset, default to info
|
||||
export RUST_LOG=${RUST_LOG:-solana=info}
|
||||
|
||||
[[ $(uname) = Linux ]] && sudo sysctl -w net.core.rmem_max=26214400 1>/dev/null 2>/dev/null
|
||||
|
||||
# this makes a leader.json file available alongside genesis, etc. for
|
||||
# validators and clients
|
||||
cp leader-"${myip}".json leader.json
|
||||
|
||||
cargo run --release --bin solana-fullnode -- \
|
||||
-l leader-"${myip}".json \
|
||||
< genesis.log tx-*.log \
|
||||
> tx-"$(date -u +%Y%m%d%H%M%S%N)".log
|
||||
|
58
multinode-demo/myip.sh
Executable file
58
multinode-demo/myip.sh
Executable file
@@ -0,0 +1,58 @@
|
||||
#!/bin/bash
|
||||
|
||||
function myip()
|
||||
{
|
||||
declare ipaddrs=( )
|
||||
|
||||
# query interwebs
|
||||
mapfile -t ipaddrs < <(curl -s ifconfig.co)
|
||||
|
||||
# machine's interfaces
|
||||
mapfile -t -O "${#ipaddrs[*]}" ipaddrs < \
|
||||
<(ifconfig | awk '/inet(6)? (addr:)?/ {print $2}')
|
||||
|
||||
ipaddrs=( "${extips[@]}" "${ipaddrs[@]}" )
|
||||
|
||||
if (( ! ${#ipaddrs[*]} ))
|
||||
then
|
||||
echo "
|
||||
myip: error: I'm having trouble determining what our IP address is...
|
||||
Are we connected to a network?
|
||||
|
||||
"
|
||||
return 1
|
||||
fi
|
||||
|
||||
|
||||
declare prompt="
|
||||
Please choose the IP address you want to advertise to the network:
|
||||
|
||||
0) ${ipaddrs[0]} <====== this one was returned by the interwebs...
|
||||
"
|
||||
|
||||
for ((i=1; i < ${#ipaddrs[*]}; i++))
|
||||
do
|
||||
prompt+=" $i) ${ipaddrs[i]}
|
||||
"
|
||||
done
|
||||
|
||||
while read -r -p "${prompt}
|
||||
please enter a number [0 for default]: " which
|
||||
do
|
||||
[[ -z ${which} ]] && break;
|
||||
[[ ${which} =~ [0-9]+ ]] && (( which < ${#ipaddrs[*]} )) && break;
|
||||
echo "Ug. invalid entry \"${which}\"...
|
||||
"
|
||||
sleep 1
|
||||
done
|
||||
|
||||
which=${which:-0}
|
||||
|
||||
echo "${ipaddrs[which]}"
|
||||
|
||||
}
|
||||
|
||||
if [[ ${0} == "${BASH_SOURCE[0]}" ]]
|
||||
then
|
||||
myip "$@"
|
||||
fi
|
15
multinode-demo/setup.sh
Executable file
15
multinode-demo/setup.sh
Executable file
@@ -0,0 +1,15 @@
|
||||
#!/bin/bash
|
||||
here=$(dirname "$0")
|
||||
|
||||
# shellcheck source=/dev/null
|
||||
. "${here}"/myip.sh
|
||||
|
||||
myip=$(myip) || exit $?
|
||||
|
||||
num_tokens=${1:-1000000000}
|
||||
|
||||
cargo run --release --bin solana-mint-demo <<<"${num_tokens}" > mint-demo.json
|
||||
cargo run --release --bin solana-genesis-demo < mint-demo.json > genesis.log
|
||||
|
||||
cargo run --release --bin solana-fullnode-config -- -d > leader-"${myip}".json
|
||||
cargo run --release --bin solana-fullnode-config -- -b 9000 -d > validator-"${myip}".json
|
@@ -1,24 +1,32 @@
|
||||
#!/bin/bash -e
|
||||
#!/bin/bash
|
||||
here=$(dirname "$0")
|
||||
|
||||
if [[ -z "$1" ]]; then
|
||||
echo "usage: $0 [leader machine]"
|
||||
# shellcheck source=/dev/null
|
||||
. "${here}"/myip.sh
|
||||
|
||||
leader=$1
|
||||
|
||||
[[ -z ${leader} ]] && {
|
||||
echo "usage: $0 [network path to solana repo on leader machine]"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
LEADER="$1"
|
||||
myip=$(myip) || exit $?
|
||||
|
||||
set -x
|
||||
[[ -f validator-"$myip".json ]] || {
|
||||
echo "I can't find a matching validator config file for \"${myip}\"...
|
||||
Please run ${here}/setup.sh first.
|
||||
"
|
||||
exit 1
|
||||
}
|
||||
|
||||
rsync -v -e ssh "$LEADER:~/solana/mint-demo.json" .
|
||||
rsync -v -e ssh "$LEADER:~/solana/leader.json" .
|
||||
rsync -v -e ssh "$LEADER:~/solana/genesis.log" .
|
||||
rsync -v -e ssh "$LEADER:~/solana/leader.log" .
|
||||
rsync -v -e ssh "$LEADER:~/solana/libcuda_verify_ed25519.a" .
|
||||
rsync -vz "${leader}"/{mint-demo.json,leader.json,genesis.log,tx-*.log} . || exit $?
|
||||
|
||||
export RUST_LOG=solana=info
|
||||
[[ $(uname) = Linux ]] && sudo sysctl -w net.core.rmem_max=26214400 1>/dev/null 2>/dev/null
|
||||
|
||||
sudo sysctl -w net.core.rmem_max=26214400
|
||||
# if RUST_LOG is unset, default to info
|
||||
export RUST_LOG=${RUST_LOG:-solana=info}
|
||||
|
||||
cat genesis.log leader.log | \
|
||||
cargo run --release --features cuda --bin solana-fullnode -- \
|
||||
-l validator.json -s validator.json -v leader.json -b 9000 -d 2>&1 | tee validator-tee.log
|
||||
cargo run --release --bin solana-fullnode -- \
|
||||
-l validator-"${myip}".json -v leader.json \
|
||||
< genesis.log tx-*.log
|
||||
|
182
rfcs/rfc-001-smart-contracts-engine.md
Normal file
182
rfcs/rfc-001-smart-contracts-engine.md
Normal file
@@ -0,0 +1,182 @@
|
||||
# Smart Contracts Engine
|
||||
|
||||
The goal of this RFC is to define a set of constraints for APIs and runtime such that we can execute our smart contracts safely on massively parallel hardware such as a GPU. Our runtime is built around an OS *syscall* primitive. The difference in blockchain is that now the OS does a cryptographic check of memory region ownership before accessing the memory in the Solana kernel.
|
||||
|
||||
## Toolchain Stack
|
||||
|
||||
+---------------------+ +---------------------+
|
||||
| | | |
|
||||
| +------------+ | | +------------+ |
|
||||
| | | | | | | |
|
||||
| | frontend | | | | verifier | |
|
||||
| | | | | | | |
|
||||
| +-----+------+ | | +-----+------+ |
|
||||
| | | | | |
|
||||
| | | | | |
|
||||
| +-----+------+ | | +-----+------+ |
|
||||
| | | | | | | |
|
||||
| | llvm | | | | loader | |
|
||||
| | | +------>+ | | |
|
||||
| +-----+------+ | | +-----+------+ |
|
||||
| | | | | |
|
||||
| | | | | |
|
||||
| +-----+------+ | | +-----+------+ |
|
||||
| | | | | | | |
|
||||
| | ELF | | | | runtime | |
|
||||
| | | | | | | |
|
||||
| +------------+ | | +------------+ |
|
||||
| | | |
|
||||
| client | | solana |
|
||||
+---------------------+ +---------------------+
|
||||
|
||||
[Figure 1. Smart Contracts Stack]
|
||||
|
||||
In Figure 1 an untrusted client, creates a program in the front-end language of her choice, (like C/C++/Rust/Lua), and compiles it with LLVM to a position independent shared object ELF, targeting BPF bytecode. Solana will safely load and execute the ELF.
|
||||
|
||||
## Bytecode
|
||||
|
||||
Our bytecode is based on Berkley Packet Filter. The requirements for BPF overlap almost exactly with the requirements we have:
|
||||
|
||||
1. Deterministic amount of time to execute the code
|
||||
2. Bytecode that is portable between machine instruction sets
|
||||
3. Verified memory accesses
|
||||
4. Fast to load the object, verify the bytecode and JIT to local machine instruction set
|
||||
|
||||
For 1, that means that loops are unrolled, and for any jumps back we can guard them with a check against the number of instruction that have been executed at this point. If the limit is reached, the program yields its execution. This involves saving the stack and current instruction index.
|
||||
|
||||
For 2, the BPF bytecode already easily maps to x86–64, arm64 and other instruction sets.
|
||||
|
||||
For 3, every load and store that is relative can be checked to be within the expected memory that is passed into the ELF. Dynamic load and stores can do a runtime check against available memory, these will be slow and should be avoided.
|
||||
|
||||
For 4, Fully linked PIC ELF with just a single RX segment. Effectively we are linking a shared object with `-fpic -target bpf` and with a linker script to collect everything into a single RX segment. Writable globals are not supported.
|
||||
|
||||
## Loader
|
||||
The loader is our first smart contract. The job of this contract is to load the actual program with its own instance data. The loader will verify the bytecode and that the object implements the expected entry points.
|
||||
|
||||
Since there is only one RX segment, the context for the contract instance is passed into each entry point as well as the event data for that entry point.
|
||||
|
||||
A client will create a transaction to create a new loader instance:
|
||||
|
||||
`Solana_NewLoader(Loader Instance PubKey, proof of key ownership, space I need for my elf)`
|
||||
|
||||
A client will then do a bunch of transactions to load its elf into the loader instance they created:
|
||||
|
||||
`Loader_UploadElf(Loader Instance PubKey, proof of key ownership, pos start, pos end, data)`
|
||||
|
||||
At this point the client can create a new instance of the module with its own instance address:
|
||||
|
||||
`Loader_NewInstance(Loader Instance PubKey, proof of key ownership, Instance PubKey, proof of key ownership)`
|
||||
|
||||
Once the instance has been created, the client may need to upload more user data to solana to configure this instance:
|
||||
|
||||
`Instance_UploadModuleData(Instance PubKey, proof of key ownership, pos start, pos end, data)`
|
||||
|
||||
Now clients can `start` the instance:
|
||||
|
||||
`Instance_Start(Instance PubKey, proof of key ownership)`
|
||||
|
||||
## Runtime
|
||||
|
||||
Our goal with the runtime is to have a general purpose execution environment that is highly parallelizable and doesn't require dynamic resource management. We want to execute as many contracts as we can in parallel, and have them pass or fail without a destructive state change.
|
||||
|
||||
### State and Entry Point
|
||||
|
||||
State is addressed by an account which is at the moment simply the PubKey. Our goal is to eliminate dynamic memory allocation in the smart contract itself, so the contract is a function that takes a mapping of [(PubKey,State)] and returns [(PubKey, State')]. The output of keys is a subset of the input. Three basic kinds of state exist:
|
||||
|
||||
* Instance State
|
||||
* Participant State
|
||||
* Caller State
|
||||
|
||||
There isn't any difference in how each is implemented, but conceptually Participant State is memory that is allocated for each participant in the contract. Instance State is memory that is allocated for the contract itself, and Caller State is memory that the transactions caller has allocated.
|
||||
|
||||
|
||||
### Call
|
||||
|
||||
```
|
||||
void call(
|
||||
const struct instance_data *data,
|
||||
const uint8_t kind[], //instance|participant|caller|read|write
|
||||
const uint8_t *keys[],
|
||||
uint8_t *data[],
|
||||
int num,
|
||||
uint8_t dirty[], //dirty memory bits
|
||||
uint8_t *userdata, //current transaction data
|
||||
);
|
||||
```
|
||||
|
||||
To call this operation, the transaction that is destined to the contract instance specifies what keyed state it should present to the `call` function. To allocate the state memory or a call context, the client has to first call a function on the contract with the designed address that will own the state.
|
||||
|
||||
At its core, this is a system call that requires cryptographic proof of ownership of memory regions instead of an OS that checks page tables for access rights.
|
||||
|
||||
* `Instance_AllocateContext(Instance PubKey, My PubKey, Proof of key ownership)`
|
||||
|
||||
Any transaction can then call `call` on the contract with a set of keys. It's up to the contract itself to manage ownership:
|
||||
|
||||
* `Instance_Call(Instance PubKey, [Context PubKeys], proofs of ownership, userdata...)`
|
||||
|
||||
Contracts should be able to read any state that is part of solana, but only write to state that the contract allocated.
|
||||
|
||||
#### Caller State
|
||||
|
||||
Caller `state` is memory allocated for the `call` that belongs to the public key that is issuing the `call`. This is the caller's context.
|
||||
|
||||
#### Instance State
|
||||
|
||||
Instance `state` is memory that belongs to this contract instance. We may also need module-wide `state` as well.
|
||||
|
||||
#### Participant State
|
||||
|
||||
Participant `state` is any other memory. In some cases it may make sense to have these allocated as part of the call by the caller.
|
||||
|
||||
### Reduce
|
||||
|
||||
Some operations on the contract will require iteration over all the keys. To make this parallelizable the iteration is broken up into reduce calls which are combined.
|
||||
|
||||
```
|
||||
void reduce_m(
|
||||
const struct instance_data *data,
|
||||
const uint8_t *keys[],
|
||||
const uint8_t *data[],
|
||||
int num,
|
||||
uint8_t *reduce_data,
|
||||
);
|
||||
|
||||
void reduce_r(
|
||||
const struct instance_data *data,
|
||||
const uint8_t *reduce_data[],
|
||||
int num,
|
||||
uint8_t *reduce_data,
|
||||
);
|
||||
```
|
||||
|
||||
### Execution
|
||||
|
||||
Transactions are batched and processed in parallel at each stage.
|
||||
```
|
||||
+-----------+ +--------------+ +-----------+ +---------------+
|
||||
| sigverify |-+->| debit commit |---+->| execution |-+->| memory commit |
|
||||
+-----------+ | +--------------+ | +-----------+ | +---------------+
|
||||
| | |
|
||||
| +---------------+ | | +--------------+
|
||||
|->| memory verify |->+ +->| debit undo |
|
||||
+---------------+ | +--------------+
|
||||
|
|
||||
| +---------------+
|
||||
+->| credit commit |
|
||||
+---------------+
|
||||
|
||||
|
||||
```
|
||||
The `debit verify` stage is very similar to `memory verify`. Proof of key ownership is used to check if the callers key has some state allocated with the contract, then the memory is loaded and executed. After execution stage, the dirty pages are written back by the contract. Because know all the memory accesses during execution, we can batch transactions that do not interfere with each other. We can also apply the `debit undo` and `credit commit` stages of the transaction. `debit undo` is run in case of an exception during contract execution, only transfers may be reversed, fees are commited to solana.
|
||||
|
||||
### GPU execution
|
||||
|
||||
A single contract can read and write to separate key pairs without interference. These separate calls to the same contract can execute on the same GPU thread over different memory using different SIMD lanes.
|
||||
|
||||
## Notes
|
||||
|
||||
1. There is no dynamic memory allocation.
|
||||
2. Persistant Memory is allocated to a Key with ownership
|
||||
3. Contracts can `call` to update key owned state
|
||||
4. Contracts can `reduce` over the memory to aggregate state
|
||||
5. `call` is just a *syscall* that does a cryptographic check of memory owndershp
|
43
scripts/perf-plot.py
Executable file
43
scripts/perf-plot.py
Executable file
@@ -0,0 +1,43 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import matplotlib
|
||||
matplotlib.use('Agg')
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import json
|
||||
import sys
|
||||
|
||||
stages_to_counters = {}
|
||||
stages_to_time = {}
|
||||
|
||||
if len(sys.argv) != 2:
|
||||
print("USAGE: {} <input file>".format(sys.argv[0]))
|
||||
sys.exit(1)
|
||||
|
||||
with open(sys.argv[1]) as fh:
|
||||
for line in fh.readlines():
|
||||
if "COUNTER" in line:
|
||||
json_part = line[line.find("{"):]
|
||||
x = json.loads(json_part)
|
||||
counter = x['name']
|
||||
if not (counter in stages_to_counters):
|
||||
stages_to_counters[counter] = []
|
||||
stages_to_time[counter] = []
|
||||
stages_to_counters[counter].append(x['counts'])
|
||||
stages_to_time[counter].append(x['now'])
|
||||
|
||||
fig, ax = plt.subplots()
|
||||
|
||||
for stage in stages_to_counters.keys():
|
||||
plt.plot(stages_to_time[stage], stages_to_counters[stage], label=stage)
|
||||
|
||||
plt.xlabel('ms')
|
||||
plt.ylabel('count')
|
||||
|
||||
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
|
||||
ncol=2, mode="expand", borderaxespad=0.)
|
||||
|
||||
plt.locator_params(axis='x', nbins=10)
|
||||
plt.grid(True)
|
||||
|
||||
plt.savefig("perf.pdf")
|
69
snap/snapcraft.yaml
Normal file
69
snap/snapcraft.yaml
Normal file
@@ -0,0 +1,69 @@
|
||||
name: solana
|
||||
version: git
|
||||
summary: Blockchain, Rebuilt for Scale
|
||||
description: |
|
||||
710,000 tx/s with off-the-shelf hardware and no sharding.
|
||||
Scales with Moore's Law.
|
||||
grade: devel
|
||||
|
||||
# TODO: solana-perf-fullnode does not yet run with 'strict' confinement due to the
|
||||
# CUDA dependency, so use 'devmode' confinement for now
|
||||
confinement: devmode
|
||||
|
||||
apps:
|
||||
drone:
|
||||
command: solana-drone
|
||||
plugs:
|
||||
- network
|
||||
- network-bind
|
||||
fullnode:
|
||||
command: solana-fullnode
|
||||
plugs:
|
||||
- network
|
||||
- network-bind
|
||||
- home
|
||||
fullnode-cuda:
|
||||
command: solana-fullnode-cuda
|
||||
plugs:
|
||||
- network
|
||||
- network-bind
|
||||
- home
|
||||
fullnode-config:
|
||||
command: solana-fullnode-config
|
||||
plugs:
|
||||
- network
|
||||
- network-bind
|
||||
genesis:
|
||||
command: solana-genesis
|
||||
genesis-demo:
|
||||
command: solana-genesis-demo
|
||||
mint:
|
||||
command: solana-mint
|
||||
mint-demo:
|
||||
command: solana-mint-demo
|
||||
client-demo:
|
||||
command: solana-client-demo
|
||||
|
||||
parts:
|
||||
solana-cuda:
|
||||
plugin: rust
|
||||
rust-channel: stable
|
||||
rust-features:
|
||||
- erasure
|
||||
- cuda
|
||||
prime:
|
||||
- bin/solana-fullnode-cuda
|
||||
- usr/lib/libgf_complete.so.1
|
||||
- usr/lib/libJerasure.so.2
|
||||
override-build: |
|
||||
./fetch-perf-libs.sh
|
||||
snapcraftctl build
|
||||
mv $SNAPCRAFT_PART_INSTALL/bin/solana-fullnode $SNAPCRAFT_PART_INSTALL
|
||||
rm -rf $SNAPCRAFT_PART_INSTALL/bin/*
|
||||
mv $SNAPCRAFT_PART_INSTALL/solana-fullnode $SNAPCRAFT_PART_INSTALL/bin/solana-fullnode-cuda
|
||||
mkdir -p $SNAPCRAFT_PART_INSTALL/usr/lib/
|
||||
cp -f libJerasure.so $SNAPCRAFT_PART_INSTALL/usr/lib/libJerasure.so.2
|
||||
cp -f libgf_complete.so $SNAPCRAFT_PART_INSTALL/usr/lib/libgf_complete.so.1
|
||||
solana:
|
||||
plugin: rust
|
||||
rust-channel: stable
|
312
src/bank.rs
312
src/bank.rs
@@ -1,6 +1,6 @@
|
||||
//! The `bank` module tracks client balances, and the progress of pending
|
||||
//! transactions. It offers a high-level public API that signs transactions
|
||||
//! on behalf of the caller, and a private low-level API for when they have
|
||||
//! The `bank` module tracks client balances and the progress of smart
|
||||
//! contracts. It offers a high-level API that signs transactions
|
||||
//! on behalf of the caller, and a low-level API for when they have
|
||||
//! already been signed and verified.
|
||||
|
||||
extern crate libc;
|
||||
@@ -10,34 +10,82 @@ use entry::Entry;
|
||||
use hash::Hash;
|
||||
use mint::Mint;
|
||||
use payment_plan::{Payment, PaymentPlan, Witness};
|
||||
use rayon::prelude::*;
|
||||
use signature::{KeyPair, PublicKey, Signature};
|
||||
use std::collections::hash_map::Entry::Occupied;
|
||||
use std::collections::{HashMap, HashSet, VecDeque};
|
||||
use std::result;
|
||||
use std::sync::atomic::{AtomicIsize, AtomicUsize, Ordering};
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::RwLock;
|
||||
use std::time::Instant;
|
||||
use timing::duration_as_us;
|
||||
use transaction::{Instruction, Plan, Transaction};
|
||||
|
||||
pub const MAX_ENTRY_IDS: usize = 1024 * 4;
|
||||
/// The number of most recent `last_id` values that the bank will track the signatures
|
||||
/// of. Once the bank discards a `last_id`, it will reject any transactions that use
|
||||
/// that `last_id` in a transaction. Lowering this value reduces memory consumption,
|
||||
/// but requires clients to update its `last_id` more frequently. Raising the value
|
||||
/// lengthens the time a client must wait to be certain a missing transaction will
|
||||
/// not be processed by the network.
|
||||
pub const MAX_ENTRY_IDS: usize = 1024 * 16;
|
||||
|
||||
/// Reasons a transaction might be rejected.
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub enum BankError {
|
||||
/// Attempt to debit from `PublicKey`, but no found no record of a prior credit.
|
||||
AccountNotFound(PublicKey),
|
||||
|
||||
/// The requested debit from `PublicKey` has the potential to draw the balance
|
||||
/// below zero. This can occur when a debit and credit are processed in parallel.
|
||||
/// The bank may reject the debit or push it to a future entry.
|
||||
InsufficientFunds(PublicKey),
|
||||
|
||||
/// The bank has seen `Signature` before. This can occur under normal operation
|
||||
/// when a UDP packet is duplicated, as a user error from a client not updating
|
||||
/// its `last_id`, or as a double-spend attack.
|
||||
DuplicateSiganture(Signature),
|
||||
|
||||
/// The bank has not seen the given `last_id` or the transaction is too old and
|
||||
/// the `last_id` has been discarded.
|
||||
LastIdNotFound(Hash),
|
||||
|
||||
/// The transaction is invalid and has requested a debit or credit of negative
|
||||
/// tokens.
|
||||
NegativeTokens,
|
||||
}
|
||||
|
||||
pub type Result<T> = result::Result<T, BankError>;
|
||||
|
||||
/// The state of all accounts and contracts after processing its entries.
|
||||
pub struct Bank {
|
||||
balances: RwLock<HashMap<PublicKey, AtomicIsize>>,
|
||||
/// A map of account public keys to the balance in that account.
|
||||
balances: RwLock<HashMap<PublicKey, i64>>,
|
||||
|
||||
/// A map of smart contract transaction signatures to what remains of its payment
|
||||
/// plan. Each transaction that targets the plan should cause it to be reduced.
|
||||
/// Once it cannot be reduced, final payments are made and it is discarded.
|
||||
pending: RwLock<HashMap<Signature, Plan>>,
|
||||
last_ids: RwLock<VecDeque<(Hash, RwLock<HashSet<Signature>>)>>,
|
||||
|
||||
/// A FIFO queue of `last_id` items, where each item is a set of signatures
|
||||
/// that have been processed using that `last_id`. Rejected `last_id`
|
||||
/// values are so old that the `last_id` has been pulled out of the queue.
|
||||
last_ids: RwLock<VecDeque<Hash>>,
|
||||
|
||||
// Mapping of hashes to signature sets. The bank uses this data to
|
||||
/// reject transactions with signatures its seen before
|
||||
last_ids_sigs: RwLock<HashMap<Hash, HashSet<Signature>>>,
|
||||
|
||||
/// The set of trusted timekeepers. A Timestamp transaction from a `PublicKey`
|
||||
/// outside this set will be discarded. Note that if validators do not have the
|
||||
/// same set as leaders, they may interpret the ledger differently.
|
||||
time_sources: RwLock<HashSet<PublicKey>>,
|
||||
|
||||
/// The most recent timestamp from a trusted timekeeper. This timestamp is applied
|
||||
/// to every smart contract when it enters the system. If it is waiting on a
|
||||
/// timestamp witness before that timestamp, the bank will execute it immediately.
|
||||
last_time: RwLock<DateTime<Utc>>,
|
||||
|
||||
/// The number of transactions the bank has processed without error since the
|
||||
/// start of the ledger.
|
||||
transaction_count: AtomicUsize,
|
||||
}
|
||||
|
||||
@@ -48,11 +96,12 @@ impl Bank {
|
||||
balances: RwLock::new(HashMap::new()),
|
||||
pending: RwLock::new(HashMap::new()),
|
||||
last_ids: RwLock::new(VecDeque::new()),
|
||||
last_ids_sigs: RwLock::new(HashMap::new()),
|
||||
time_sources: RwLock::new(HashSet::new()),
|
||||
last_time: RwLock::new(Utc.timestamp(0, 0)),
|
||||
transaction_count: AtomicUsize::new(0),
|
||||
};
|
||||
bank.apply_payment(deposit);
|
||||
bank.apply_payment(deposit, &mut bank.balances.write().unwrap());
|
||||
bank
|
||||
}
|
||||
|
||||
@@ -67,78 +116,54 @@ impl Bank {
|
||||
bank
|
||||
}
|
||||
|
||||
/// Commit funds to the 'to' party.
|
||||
fn apply_payment(&self, payment: &Payment) {
|
||||
// First we check balances with a read lock to maximize potential parallelization.
|
||||
if self.balances
|
||||
.read()
|
||||
.expect("'balances' read lock in apply_payment")
|
||||
.contains_key(&payment.to)
|
||||
{
|
||||
let bals = self.balances.read().expect("'balances' read lock");
|
||||
bals[&payment.to].fetch_add(payment.tokens as isize, Ordering::Relaxed);
|
||||
/// Commit funds to the `payment.to` party.
|
||||
fn apply_payment(&self, payment: &Payment, balances: &mut HashMap<PublicKey, i64>) {
|
||||
if balances.contains_key(&payment.to) {
|
||||
*balances.get_mut(&payment.to).unwrap() += payment.tokens;
|
||||
} else {
|
||||
// Now we know the key wasn't present a nanosecond ago, but it might be there
|
||||
// by the time we aquire a write lock, so we'll have to check again.
|
||||
let mut bals = self.balances.write().expect("'balances' write lock");
|
||||
if bals.contains_key(&payment.to) {
|
||||
bals[&payment.to].fetch_add(payment.tokens as isize, Ordering::Relaxed);
|
||||
} else {
|
||||
bals.insert(payment.to, AtomicIsize::new(payment.tokens as isize));
|
||||
}
|
||||
balances.insert(payment.to, payment.tokens);
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the last entry ID registered
|
||||
/// Return the last entry ID registered.
|
||||
pub fn last_id(&self) -> Hash {
|
||||
let last_ids = self.last_ids.read().expect("'last_ids' read lock");
|
||||
let last_item = last_ids.iter().last().expect("empty 'last_ids' list");
|
||||
last_item.0
|
||||
*last_item
|
||||
}
|
||||
|
||||
fn reserve_signature(signatures: &RwLock<HashSet<Signature>>, sig: &Signature) -> Result<()> {
|
||||
if signatures
|
||||
.read()
|
||||
.expect("'signatures' read lock")
|
||||
.contains(sig)
|
||||
{
|
||||
/// Store the given signature. The bank will reject any transaction with the same signature.
|
||||
fn reserve_signature(signatures: &mut HashSet<Signature>, sig: &Signature) -> Result<()> {
|
||||
if let Some(sig) = signatures.get(sig) {
|
||||
return Err(BankError::DuplicateSiganture(*sig));
|
||||
}
|
||||
signatures
|
||||
.write()
|
||||
.expect("'signatures' write lock")
|
||||
.insert(*sig);
|
||||
signatures.insert(*sig);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn forget_signature(signatures: &RwLock<HashSet<Signature>>, sig: &Signature) {
|
||||
signatures
|
||||
/// Forget the given `signature` because its transaction was rejected.
|
||||
fn forget_signature(signatures: &mut HashSet<Signature>, signature: &Signature) {
|
||||
signatures.remove(signature);
|
||||
}
|
||||
|
||||
/// Forget the given `signature` with `last_id` because the transaction was rejected.
|
||||
fn forget_signature_with_last_id(&self, signature: &Signature, last_id: &Hash) {
|
||||
if let Some(entry) = self.last_ids_sigs
|
||||
.write()
|
||||
.expect("'signatures' write lock in forget_signature")
|
||||
.remove(sig);
|
||||
}
|
||||
|
||||
fn forget_signature_with_last_id(&self, sig: &Signature, last_id: &Hash) {
|
||||
if let Some(entry) = self.last_ids
|
||||
.read()
|
||||
.expect("'last_ids' read lock in forget_signature_with_last_id")
|
||||
.iter()
|
||||
.rev()
|
||||
.find(|x| x.0 == *last_id)
|
||||
.get_mut(last_id)
|
||||
{
|
||||
Self::forget_signature(&entry.1, sig);
|
||||
Self::forget_signature(entry, signature);
|
||||
}
|
||||
}
|
||||
|
||||
fn reserve_signature_with_last_id(&self, sig: &Signature, last_id: &Hash) -> Result<()> {
|
||||
if let Some(entry) = self.last_ids
|
||||
.read()
|
||||
fn reserve_signature_with_last_id(&self, signature: &Signature, last_id: &Hash) -> Result<()> {
|
||||
if let Some(entry) = self.last_ids_sigs
|
||||
.write()
|
||||
.expect("'last_ids' read lock in reserve_signature_with_last_id")
|
||||
.iter()
|
||||
.rev()
|
||||
.find(|x| x.0 == *last_id)
|
||||
.get_mut(last_id)
|
||||
{
|
||||
return Self::reserve_signature(&entry.1, sig);
|
||||
return Self::reserve_signature(entry, signature);
|
||||
}
|
||||
Err(BankError::LastIdNotFound(*last_id))
|
||||
}
|
||||
@@ -151,63 +176,46 @@ impl Bank {
|
||||
let mut last_ids = self.last_ids
|
||||
.write()
|
||||
.expect("'last_ids' write lock in register_entry_id");
|
||||
let mut last_ids_sigs = self.last_ids_sigs
|
||||
.write()
|
||||
.expect("last_ids_sigs write lock");
|
||||
if last_ids.len() >= MAX_ENTRY_IDS {
|
||||
last_ids.pop_front();
|
||||
let id = last_ids.pop_front().unwrap();
|
||||
last_ids_sigs.remove(&id);
|
||||
}
|
||||
last_ids.push_back((*last_id, RwLock::new(HashSet::new())));
|
||||
last_ids_sigs.insert(*last_id, HashSet::new());
|
||||
last_ids.push_back(*last_id);
|
||||
}
|
||||
|
||||
/// Deduct tokens from the 'from' address the account has sufficient
|
||||
/// funds and isn't a duplicate.
|
||||
fn apply_debits(&self, tx: &Transaction) -> Result<()> {
|
||||
if let Instruction::NewContract(contract) = &tx.instruction {
|
||||
trace!("Transaction {}", contract.tokens);
|
||||
if contract.tokens < 0 {
|
||||
return Err(BankError::NegativeTokens);
|
||||
}
|
||||
}
|
||||
let bals = self.balances
|
||||
.read()
|
||||
.expect("'balances' read lock in apply_debits");
|
||||
let option = bals.get(&tx.from);
|
||||
|
||||
fn apply_debits(&self, tx: &Transaction, bals: &mut HashMap<PublicKey, i64>) -> Result<()> {
|
||||
let option = bals.get_mut(&tx.from);
|
||||
if option.is_none() {
|
||||
return Err(BankError::AccountNotFound(tx.from));
|
||||
}
|
||||
let bal = option.unwrap();
|
||||
|
||||
self.reserve_signature_with_last_id(&tx.sig, &tx.last_id)?;
|
||||
|
||||
loop {
|
||||
let result = if let Instruction::NewContract(contract) = &tx.instruction {
|
||||
let bal = option.expect("assignment of option to bal");
|
||||
let current = bal.load(Ordering::Relaxed) as i64;
|
||||
if let Instruction::NewContract(contract) = &tx.instruction {
|
||||
if contract.tokens < 0 {
|
||||
return Err(BankError::NegativeTokens);
|
||||
}
|
||||
|
||||
if current < contract.tokens {
|
||||
if *bal < contract.tokens {
|
||||
self.forget_signature_with_last_id(&tx.sig, &tx.last_id);
|
||||
return Err(BankError::InsufficientFunds(tx.from));
|
||||
}
|
||||
|
||||
bal.compare_exchange(
|
||||
current as isize,
|
||||
(current - contract.tokens) as isize,
|
||||
Ordering::Relaxed,
|
||||
Ordering::Relaxed,
|
||||
)
|
||||
} else {
|
||||
Ok(0)
|
||||
*bal -= contract.tokens;
|
||||
};
|
||||
|
||||
match result {
|
||||
Ok(_) => {
|
||||
self.transaction_count.fetch_add(1, Ordering::Relaxed);
|
||||
return Ok(());
|
||||
}
|
||||
Err(_) => continue,
|
||||
};
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn apply_credits(&self, tx: &Transaction) {
|
||||
/// Apply only a transaction's credits. Credits from multiple transactions
|
||||
/// may safely be applied in parallel.
|
||||
fn apply_credits(&self, tx: &Transaction, balances: &mut HashMap<PublicKey, i64>) {
|
||||
match &tx.instruction {
|
||||
Instruction::NewContract(contract) => {
|
||||
let mut plan = contract.plan.clone();
|
||||
@@ -215,8 +223,8 @@ impl Bank {
|
||||
.read()
|
||||
.expect("timestamp creation in apply_credits")));
|
||||
|
||||
if let Some(ref payment) = plan.final_payment() {
|
||||
self.apply_payment(payment);
|
||||
if let Some(payment) = plan.final_payment() {
|
||||
self.apply_payment(&payment, balances);
|
||||
} else {
|
||||
let mut pending = self.pending
|
||||
.write()
|
||||
@@ -233,44 +241,78 @@ impl Bank {
|
||||
}
|
||||
}
|
||||
|
||||
/// Process a Transaction.
|
||||
/// Process a Transaction. If it contains a payment plan that requires a witness
|
||||
/// to progress, the payment plan will be stored in the bank.
|
||||
fn process_transaction(&self, tx: &Transaction) -> Result<()> {
|
||||
self.apply_debits(tx)?;
|
||||
self.apply_credits(tx);
|
||||
let bals = &mut self.balances.write().unwrap();
|
||||
self.apply_debits(tx, bals)?;
|
||||
self.apply_credits(tx, bals);
|
||||
self.transaction_count.fetch_add(1, Ordering::Relaxed);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Process a batch of transactions.
|
||||
#[must_use]
|
||||
pub fn process_transactions(&self, txs: Vec<Transaction>) -> Vec<Result<Transaction>> {
|
||||
// Run all debits first to filter out any transactions that can't be processed
|
||||
// in parallel deterministically.
|
||||
info!("processing Transactions {}", txs.len());
|
||||
let results: Vec<_> = txs.into_par_iter()
|
||||
.map(|tx| self.apply_debits(&tx).map(|_| tx))
|
||||
let bals = &mut self.balances.write().unwrap();
|
||||
debug!("processing Transactions {}", txs.len());
|
||||
let txs_len = txs.len();
|
||||
let now = Instant::now();
|
||||
let results: Vec<_> = txs.into_iter()
|
||||
.map(|tx| self.apply_debits(&tx, bals).map(|_| tx))
|
||||
.collect(); // Calling collect() here forces all debits to complete before moving on.
|
||||
|
||||
results
|
||||
.into_par_iter()
|
||||
let debits = now.elapsed();
|
||||
let now = Instant::now();
|
||||
|
||||
let res: Vec<_> = results
|
||||
.into_iter()
|
||||
.map(|result| {
|
||||
result.map(|tx| {
|
||||
self.apply_credits(&tx);
|
||||
self.apply_credits(&tx, bals);
|
||||
tx
|
||||
})
|
||||
})
|
||||
.collect()
|
||||
.collect();
|
||||
|
||||
debug!(
|
||||
"debits: {} us credits: {:?} us tx: {}",
|
||||
duration_as_us(&debits),
|
||||
duration_as_us(&now.elapsed()),
|
||||
txs_len
|
||||
);
|
||||
|
||||
let mut tx_count = 0;
|
||||
for r in &res {
|
||||
if r.is_ok() {
|
||||
tx_count += 1;
|
||||
} else {
|
||||
info!("tx error: {:?}", r);
|
||||
}
|
||||
}
|
||||
self.transaction_count
|
||||
.fetch_add(tx_count, Ordering::Relaxed);
|
||||
res
|
||||
}
|
||||
|
||||
pub fn process_entries(&self, entries: Vec<Entry>) -> Result<()> {
|
||||
/// Process an ordered list of entries.
|
||||
pub fn process_entries<I>(&self, entries: I) -> Result<()>
|
||||
where
|
||||
I: IntoIterator<Item = Entry>,
|
||||
{
|
||||
for entry in entries {
|
||||
self.register_entry_id(&entry.id);
|
||||
if !entry.transactions.is_empty() {
|
||||
for result in self.process_transactions(entry.transactions) {
|
||||
result?;
|
||||
}
|
||||
}
|
||||
self.register_entry_id(&entry.id);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Process a Witness Signature.
|
||||
/// Process a Witness Signature. Any payment plans waiting on this signature
|
||||
/// will progress one step.
|
||||
fn apply_signature(&self, from: PublicKey, tx_sig: Signature) -> Result<()> {
|
||||
if let Occupied(mut e) = self.pending
|
||||
.write()
|
||||
@@ -279,7 +321,7 @@ impl Bank {
|
||||
{
|
||||
e.get_mut().apply_witness(&Witness::Signature(from));
|
||||
if let Some(payment) = e.get().final_payment() {
|
||||
self.apply_payment(&payment);
|
||||
self.apply_payment(&payment, &mut self.balances.write().unwrap());
|
||||
e.remove_entry();
|
||||
}
|
||||
};
|
||||
@@ -287,7 +329,8 @@ impl Bank {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Process a Witness Timestamp.
|
||||
/// Process a Witness Timestamp. Any payment plans waiting on this timestamp
|
||||
/// will progress one step.
|
||||
fn apply_timestamp(&self, from: PublicKey, dt: DateTime<Utc>) -> Result<()> {
|
||||
// If this is the first timestamp we've seen, it probably came from the genesis block,
|
||||
// so we'll trust it.
|
||||
@@ -326,8 +369,8 @@ impl Bank {
|
||||
plan.apply_witness(&Witness::Timestamp(*self.last_time
|
||||
.read()
|
||||
.expect("'last_time' read lock when creating timestamp")));
|
||||
if let Some(ref payment) = plan.final_payment() {
|
||||
self.apply_payment(payment);
|
||||
if let Some(payment) = plan.final_payment() {
|
||||
self.apply_payment(&payment, &mut self.balances.write().unwrap());
|
||||
completed.push(key.clone());
|
||||
}
|
||||
}
|
||||
@@ -373,7 +416,7 @@ impl Bank {
|
||||
let bals = self.balances
|
||||
.read()
|
||||
.expect("'balances' read lock in get_balance");
|
||||
bals.get(pubkey).map(|x| x.load(Ordering::Relaxed) as i64)
|
||||
bals.get(pubkey).map(|x| *x)
|
||||
}
|
||||
|
||||
pub fn transaction_count(&self) -> usize {
|
||||
@@ -385,11 +428,12 @@ impl Bank {
|
||||
mod tests {
|
||||
use super::*;
|
||||
use bincode::serialize;
|
||||
use entry::next_entry;
|
||||
use hash::hash;
|
||||
use signature::KeyPairUtil;
|
||||
|
||||
#[test]
|
||||
fn test_bank() {
|
||||
fn test_two_payments_to_one_party() {
|
||||
let mint = Mint::new(10_000);
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
let bank = Bank::new(&mint);
|
||||
@@ -406,7 +450,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_tokens() {
|
||||
fn test_negative_tokens() {
|
||||
let mint = Mint::new(1);
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
let bank = Bank::new(&mint);
|
||||
@@ -430,7 +474,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_transfer() {
|
||||
fn test_insufficient_funds() {
|
||||
let mint = Mint::new(11_000);
|
||||
let bank = Bank::new(&mint);
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
@@ -567,7 +611,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_max_entry_ids() {
|
||||
fn test_reject_old_last_id() {
|
||||
let mint = Mint::new(1);
|
||||
let bank = Bank::new(&mint);
|
||||
let sig = Signature::default();
|
||||
@@ -596,6 +640,25 @@ mod tests {
|
||||
// Assert bad transactions aren't counted.
|
||||
assert_eq!(bank.transaction_count(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_process_empty_entry_is_registered() {
|
||||
let mint = Mint::new(1);
|
||||
let bank = Bank::new(&mint);
|
||||
let keypair = KeyPair::new();
|
||||
let entry = next_entry(&mint.last_id(), 1, vec![]);
|
||||
let tx = Transaction::new(&mint.keypair(), keypair.pubkey(), 1, entry.id);
|
||||
|
||||
// First, ensure the TX is rejected because of the unregistered last ID
|
||||
assert_eq!(
|
||||
bank.process_transaction(&tx),
|
||||
Err(BankError::LastIdNotFound(entry.id))
|
||||
);
|
||||
|
||||
// Now ensure the TX is accepted despite pointing to the ID of an empty entry.
|
||||
bank.process_entries(vec![entry]).unwrap();
|
||||
assert!(bank.process_transaction(&tx).is_ok());
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "unstable", test))]
|
||||
@@ -605,6 +668,7 @@ mod bench {
|
||||
use bank::*;
|
||||
use bincode::serialize;
|
||||
use hash::hash;
|
||||
use rayon::prelude::*;
|
||||
use signature::KeyPairUtil;
|
||||
|
||||
#[bench]
|
||||
@@ -634,8 +698,8 @@ mod bench {
|
||||
.collect();
|
||||
bencher.iter(|| {
|
||||
// Since benchmarker runs this multiple times, we need to clear the signatures.
|
||||
for sigs in bank.last_ids.read().unwrap().iter() {
|
||||
sigs.1.write().unwrap().clear();
|
||||
for (_, sigs) in bank.last_ids_sigs.write().unwrap().iter_mut() {
|
||||
sigs.clear();
|
||||
}
|
||||
|
||||
assert!(
|
||||
|
@@ -1,28 +1,39 @@
|
||||
//! The `banking_stage` processes Transaction messages.
|
||||
//! The `banking_stage` processes Transaction messages. It is intended to be used
|
||||
//! to contruct a software pipeline. The stage uses all available CPU cores and
|
||||
//! can do its processing in parallel with signature verification on the GPU.
|
||||
|
||||
use bank::Bank;
|
||||
use bincode::deserialize;
|
||||
use counter::Counter;
|
||||
use packet;
|
||||
use packet::SharedPackets;
|
||||
use rayon::prelude::*;
|
||||
use record_stage::Signal;
|
||||
use result::Result;
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
||||
use std::sync::mpsc::{channel, Receiver, Sender};
|
||||
use std::sync::Arc;
|
||||
use std::thread::{spawn, JoinHandle};
|
||||
use std::thread::{Builder, JoinHandle};
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
use timing;
|
||||
use transaction::Transaction;
|
||||
|
||||
/// Stores the stage's thread handle and output receiver.
|
||||
pub struct BankingStage {
|
||||
/// Handle to the stage's thread.
|
||||
pub thread_hdl: JoinHandle<()>,
|
||||
|
||||
/// Output receiver for the following stage.
|
||||
pub signal_receiver: Receiver<Signal>,
|
||||
}
|
||||
|
||||
impl BankingStage {
|
||||
/// Create the stage using `bank`. Exit when either `exit` is set or
|
||||
/// when `verified_receiver` or the stage's output receiver is dropped.
|
||||
/// Discard input packets using `packet_recycler` to minimize memory
|
||||
/// allocations in a previous stage such as the `fetch_stage`.
|
||||
pub fn new(
|
||||
bank: Arc<Bank>,
|
||||
exit: Arc<AtomicBool>,
|
||||
@@ -30,7 +41,9 @@ impl BankingStage {
|
||||
packet_recycler: packet::PacketRecycler,
|
||||
) -> Self {
|
||||
let (signal_sender, signal_receiver) = channel();
|
||||
let thread_hdl = spawn(move || loop {
|
||||
let thread_hdl = Builder::new()
|
||||
.name("solana-banking-stage".to_string())
|
||||
.spawn(move || loop {
|
||||
let e = Self::process_packets(
|
||||
bank.clone(),
|
||||
&verified_receiver,
|
||||
@@ -42,13 +55,16 @@ impl BankingStage {
|
||||
break;
|
||||
}
|
||||
}
|
||||
});
|
||||
})
|
||||
.unwrap();
|
||||
BankingStage {
|
||||
thread_hdl,
|
||||
signal_receiver,
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert the transactions from a blob of binary data to a vector of transactions and
|
||||
/// an unused `SocketAddr` that could be used to send a response.
|
||||
fn deserialize_transactions(p: &packet::Packets) -> Vec<Option<(Transaction, SocketAddr)>> {
|
||||
p.packets
|
||||
.par_iter()
|
||||
@@ -60,6 +76,8 @@ impl BankingStage {
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Process the incoming packets and send output `Signal` messages to `signal_sender`.
|
||||
/// Discard packets via `packet_recycler`.
|
||||
fn process_packets(
|
||||
bank: Arc<Bank>,
|
||||
verified_receiver: &Receiver<Vec<(SharedPackets, Vec<u8>)>>,
|
||||
@@ -77,6 +95,8 @@ impl BankingStage {
|
||||
timing::duration_as_ms(&recv_start.elapsed()),
|
||||
mms.len(),
|
||||
);
|
||||
let count = mms.iter().map(|x| x.1.len()).sum();
|
||||
static mut COUNTER: Counter = create_counter!("banking_stage_process_packets", 1);
|
||||
let proc_start = Instant::now();
|
||||
for (msgs, vers) in mms {
|
||||
let transactions = Self::deserialize_transactions(&msgs.read().unwrap());
|
||||
@@ -97,7 +117,7 @@ impl BankingStage {
|
||||
debug!("process_transactions");
|
||||
let results = bank.process_transactions(transactions);
|
||||
let transactions = results.into_iter().filter_map(|x| x.ok()).collect();
|
||||
signal_sender.send(Signal::Events(transactions))?;
|
||||
signal_sender.send(Signal::Transactions(transactions))?;
|
||||
debug!("done process_transactions");
|
||||
|
||||
packet_recycler.recycle(msgs);
|
||||
@@ -112,6 +132,7 @@ impl BankingStage {
|
||||
reqs_len,
|
||||
(reqs_len as f32) / (total_time_s)
|
||||
);
|
||||
inc_counter!(COUNTER, count, proc_start);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -250,29 +271,141 @@ mod bench {
|
||||
use self::test::Bencher;
|
||||
use bank::*;
|
||||
use banking_stage::BankingStage;
|
||||
use logger;
|
||||
use mint::Mint;
|
||||
use packet::{to_packets, PacketRecycler};
|
||||
use packet::{to_packets_chunked, PacketRecycler};
|
||||
use rayon::prelude::*;
|
||||
use record_stage::Signal;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use std::iter;
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::mpsc::{channel, Receiver};
|
||||
use std::sync::Arc;
|
||||
use transaction::Transaction;
|
||||
|
||||
fn check_txs(batches: usize, receiver: &Receiver<Signal>, ref_tx_count: usize) {
|
||||
let mut total = 0;
|
||||
for _ in 0..batches {
|
||||
let signal = receiver.recv().unwrap();
|
||||
if let Signal::Transactions(transactions) = signal {
|
||||
total += transactions.len();
|
||||
} else {
|
||||
assert!(false);
|
||||
}
|
||||
}
|
||||
assert_eq!(total, ref_tx_count);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_stage(bencher: &mut Bencher) {
|
||||
let tx = 100_usize;
|
||||
let mint = Mint::new(1_000_000_000);
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
|
||||
logger::setup();
|
||||
let tx = 30_000_usize;
|
||||
let mint_total = 1_000_000_000_000;
|
||||
let mint = Mint::new(mint_total);
|
||||
let num_dst_accounts = 8 * 1024;
|
||||
let num_src_accounts = 8 * 1024;
|
||||
|
||||
let srckeys: Vec<_> = (0..num_src_accounts).map(|_| KeyPair::new()).collect();
|
||||
let dstkeys: Vec<_> = (0..num_dst_accounts)
|
||||
.map(|_| KeyPair::new().pubkey())
|
||||
.collect();
|
||||
|
||||
info!("created keys src: {} dst: {}", srckeys.len(), dstkeys.len());
|
||||
|
||||
let transactions: Vec<_> = (0..tx)
|
||||
.map(|i| Transaction::new(&mint.keypair(), pubkey, i as i64, mint.last_id()))
|
||||
.map(|i| {
|
||||
Transaction::new(
|
||||
&srckeys[i % num_src_accounts],
|
||||
dstkeys[i % num_dst_accounts],
|
||||
i as i64,
|
||||
mint.last_id(),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
info!("created transactions");
|
||||
|
||||
let (verified_sender, verified_receiver) = channel();
|
||||
let (signal_sender, signal_receiver) = channel();
|
||||
let packet_recycler = PacketRecycler::default();
|
||||
let verified: Vec<_> = to_packets_chunked(&packet_recycler, transactions, 192)
|
||||
.into_iter()
|
||||
.map(|x| {
|
||||
let len = (*x).read().unwrap().packets.len();
|
||||
(x, iter::repeat(1).take(len).collect())
|
||||
})
|
||||
.collect();
|
||||
|
||||
let setup_transactions: Vec<_> = (0..num_src_accounts)
|
||||
.map(|i| {
|
||||
Transaction::new(
|
||||
&mint.keypair(),
|
||||
srckeys[i].pubkey(),
|
||||
mint_total / num_src_accounts as i64,
|
||||
mint.last_id(),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
let verified_setup: Vec<_> = to_packets_chunked(&packet_recycler, setup_transactions, tx)
|
||||
.into_iter()
|
||||
.map(|x| {
|
||||
let len = (*x).read().unwrap().packets.len();
|
||||
(x, iter::repeat(1).take(len).collect())
|
||||
})
|
||||
.collect();
|
||||
|
||||
bencher.iter(move || {
|
||||
let bank = Arc::new(Bank::new(&mint));
|
||||
|
||||
verified_sender.send(verified_setup.clone()).unwrap();
|
||||
BankingStage::process_packets(
|
||||
bank.clone(),
|
||||
&verified_receiver,
|
||||
&signal_sender,
|
||||
&packet_recycler,
|
||||
).unwrap();
|
||||
|
||||
check_txs(verified_setup.len(), &signal_receiver, num_src_accounts);
|
||||
|
||||
verified_sender.send(verified.clone()).unwrap();
|
||||
BankingStage::process_packets(
|
||||
bank.clone(),
|
||||
&verified_receiver,
|
||||
&signal_sender,
|
||||
&packet_recycler,
|
||||
).unwrap();
|
||||
|
||||
check_txs(verified.len(), &signal_receiver, tx);
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_banking_stage_single_from(bencher: &mut Bencher) {
|
||||
logger::setup();
|
||||
let tx = 20_000_usize;
|
||||
let mint = Mint::new(1_000_000_000_000);
|
||||
let mut pubkeys = Vec::new();
|
||||
let num_keys = 8;
|
||||
for _ in 0..num_keys {
|
||||
pubkeys.push(KeyPair::new().pubkey());
|
||||
}
|
||||
|
||||
let transactions: Vec<_> = (0..tx)
|
||||
.into_par_iter()
|
||||
.map(|i| {
|
||||
Transaction::new(
|
||||
&mint.keypair(),
|
||||
pubkeys[i % num_keys],
|
||||
i as i64,
|
||||
mint.last_id(),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
let (verified_sender, verified_receiver) = channel();
|
||||
let (signal_sender, signal_receiver) = channel();
|
||||
let packet_recycler = PacketRecycler::default();
|
||||
let verified: Vec<_> = to_packets(&packet_recycler, transactions)
|
||||
let verified: Vec<_> = to_packets_chunked(&packet_recycler, transactions, tx)
|
||||
.into_iter()
|
||||
.map(|x| {
|
||||
let len = (*x).read().unwrap().packets.len();
|
||||
@@ -289,12 +422,9 @@ mod bench {
|
||||
&signal_sender,
|
||||
&packet_recycler,
|
||||
).unwrap();
|
||||
let signal = signal_receiver.recv().unwrap();
|
||||
if let Signal::Events(ref transactions) = signal {
|
||||
assert_eq!(transactions.len(), tx);
|
||||
} else {
|
||||
assert!(false);
|
||||
}
|
||||
|
||||
check_txs(verified.len(), &signal_receiver, tx);
|
||||
});
|
||||
}
|
||||
|
||||
}
|
||||
|
@@ -1,30 +1,31 @@
|
||||
extern crate futures;
|
||||
extern crate atty;
|
||||
extern crate env_logger;
|
||||
extern crate getopts;
|
||||
extern crate isatty;
|
||||
extern crate pnet;
|
||||
extern crate rayon;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use futures::Future;
|
||||
use atty::{is, Stream};
|
||||
use getopts::Options;
|
||||
use isatty::stdin_isatty;
|
||||
use pnet::datalink;
|
||||
use rayon::prelude::*;
|
||||
use solana::crdt::{Crdt, ReplicatedData};
|
||||
use solana::crdt::{get_ip_addr, Crdt, ReplicatedData};
|
||||
use solana::hash::Hash;
|
||||
use solana::mint::MintDemo;
|
||||
use solana::ncp::Ncp;
|
||||
use solana::signature::{GenKeys, KeyPair, KeyPairUtil};
|
||||
use solana::streamer::default_window;
|
||||
use solana::thin_client::ThinClient;
|
||||
use solana::timing::{duration_as_ms, duration_as_s};
|
||||
use solana::transaction::Transaction;
|
||||
use std::env;
|
||||
use std::fs::File;
|
||||
use std::io::{stdin, Read};
|
||||
use std::net::{IpAddr, SocketAddr, UdpSocket};
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
|
||||
use std::process::exit;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::sleep;
|
||||
use std::thread::Builder;
|
||||
use std::thread::JoinHandle;
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
@@ -38,26 +39,119 @@ fn print_usage(program: &str, opts: Options) {
|
||||
print!("{}", opts.usage(&brief));
|
||||
}
|
||||
|
||||
fn get_ip_addr() -> Option<IpAddr> {
|
||||
for iface in datalink::interfaces() {
|
||||
for p in iface.ips {
|
||||
if !p.ip().is_loopback() && !p.ip().is_multicast() {
|
||||
return Some(p.ip());
|
||||
fn sample_tx_count(
|
||||
thread_addr: Arc<RwLock<SocketAddr>>,
|
||||
exit: Arc<AtomicBool>,
|
||||
maxes: Arc<RwLock<Vec<(f64, u64)>>>,
|
||||
first_count: u64,
|
||||
v: ReplicatedData,
|
||||
sample_period: u64,
|
||||
) {
|
||||
let mut client = mk_client(&thread_addr, &v);
|
||||
let mut now = Instant::now();
|
||||
let mut initial_tx_count = client.transaction_count();
|
||||
let mut max_tps = 0.0;
|
||||
let mut total;
|
||||
loop {
|
||||
let tx_count = client.transaction_count();
|
||||
let duration = now.elapsed();
|
||||
now = Instant::now();
|
||||
let sample = tx_count - initial_tx_count;
|
||||
initial_tx_count = tx_count;
|
||||
println!("{}: Transactions processed {}", v.transactions_addr, sample);
|
||||
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
|
||||
let tps = (sample * 1_000_000_000) as f64 / ns as f64;
|
||||
if tps > max_tps {
|
||||
max_tps = tps;
|
||||
}
|
||||
println!("{}: {:.2} tps", v.transactions_addr, tps);
|
||||
total = tx_count - first_count;
|
||||
println!(
|
||||
"{}: Total Transactions processed {}",
|
||||
v.transactions_addr, total
|
||||
);
|
||||
sleep(Duration::new(sample_period, 0));
|
||||
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
println!("exiting validator thread");
|
||||
maxes.write().unwrap().push((max_tps, total));
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn generate_and_send_txs(
|
||||
client: &mut ThinClient,
|
||||
keypair_pairs: &Vec<&[KeyPair]>,
|
||||
leader: &ReplicatedData,
|
||||
txs: i64,
|
||||
last_id: &mut Hash,
|
||||
threads: usize,
|
||||
client_addr: Arc<RwLock<SocketAddr>>,
|
||||
) {
|
||||
println!(
|
||||
"Signing transactions... {} {}",
|
||||
keypair_pairs.len(),
|
||||
keypair_pairs[0].len()
|
||||
);
|
||||
let signing_start = Instant::now();
|
||||
let transactions: Vec<_> = keypair_pairs
|
||||
.par_iter()
|
||||
.map(|chunk| Transaction::new(&chunk[0], chunk[1].pubkey(), 1, *last_id))
|
||||
.collect();
|
||||
|
||||
let duration = signing_start.elapsed();
|
||||
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
|
||||
let bsps = txs as f64 / ns as f64;
|
||||
let nsps = ns as f64 / txs as f64;
|
||||
println!(
|
||||
"Done. {:.2} thousand signatures per second, {:.2} us per signature, {} ms total time",
|
||||
bsps * 1_000_000_f64,
|
||||
nsps / 1_000_f64,
|
||||
duration_as_ms(&duration),
|
||||
);
|
||||
|
||||
println!("Transfering {} transactions in {} batches", txs, threads);
|
||||
let transfer_start = Instant::now();
|
||||
let sz = transactions.len() / threads;
|
||||
let chunks: Vec<_> = transactions.chunks(sz).collect();
|
||||
chunks.into_par_iter().for_each(|txs| {
|
||||
println!(
|
||||
"Transferring 1 unit {} times... to {:?}",
|
||||
txs.len(),
|
||||
leader.transactions_addr
|
||||
);
|
||||
let client = mk_client(&client_addr, &leader);
|
||||
for tx in txs {
|
||||
client.transfer_signed(tx.clone()).unwrap();
|
||||
}
|
||||
None
|
||||
});
|
||||
println!(
|
||||
"Transfer done. {:?} ms {} tps",
|
||||
duration_as_ms(&transfer_start.elapsed()),
|
||||
txs as f32 / (duration_as_s(&transfer_start.elapsed()))
|
||||
);
|
||||
|
||||
*last_id = client.get_last_id();
|
||||
}
|
||||
|
||||
fn main() {
|
||||
env_logger::init();
|
||||
let mut threads = 4usize;
|
||||
let mut num_nodes = 10usize;
|
||||
let mut leader = "leader.json".to_string();
|
||||
let mut num_nodes = 1usize;
|
||||
let mut time_sec = 60;
|
||||
|
||||
let mut opts = Options::new();
|
||||
opts.optopt("l", "", "leader", "leader.json");
|
||||
opts.optopt("c", "", "client port", "port");
|
||||
opts.optopt("t", "", "number of threads", &format!("{}", threads));
|
||||
opts.optflag("d", "dyn", "detect network address dynamically");
|
||||
opts.optopt(
|
||||
"s",
|
||||
"",
|
||||
"send transactions for this many seconds",
|
||||
&format!("{}", time_sec),
|
||||
);
|
||||
opts.optopt(
|
||||
"n",
|
||||
"",
|
||||
@@ -79,15 +173,14 @@ fn main() {
|
||||
print_usage(&program, opts);
|
||||
return;
|
||||
}
|
||||
if matches.opt_present("l") {
|
||||
leader = matches.opt_str("l").unwrap();
|
||||
}
|
||||
let mut addr: SocketAddr = "127.0.0.1:8010".parse().unwrap();
|
||||
let mut addr: SocketAddr = "0.0.0.0:8100".parse().unwrap();
|
||||
if matches.opt_present("c") {
|
||||
let port = matches.opt_str("c").unwrap().parse().unwrap();
|
||||
addr.set_port(port);
|
||||
}
|
||||
if matches.opt_present("d") {
|
||||
addr.set_ip(get_ip_addr().unwrap());
|
||||
}
|
||||
let client_addr: Arc<RwLock<SocketAddr>> = Arc::new(RwLock::new(addr));
|
||||
if matches.opt_present("t") {
|
||||
threads = matches.opt_str("t").unwrap().parse().expect("integer");
|
||||
@@ -95,19 +188,29 @@ fn main() {
|
||||
if matches.opt_present("n") {
|
||||
num_nodes = matches.opt_str("n").unwrap().parse().expect("integer");
|
||||
}
|
||||
if matches.opt_present("s") {
|
||||
time_sec = matches.opt_str("s").unwrap().parse().expect("integer");
|
||||
}
|
||||
|
||||
let leader = if matches.opt_present("l") {
|
||||
read_leader(matches.opt_str("l").unwrap())
|
||||
} else {
|
||||
let server_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
|
||||
ReplicatedData::new_leader(&server_addr)
|
||||
};
|
||||
|
||||
let leader: ReplicatedData = read_leader(leader);
|
||||
let signal = Arc::new(AtomicBool::new(false));
|
||||
let mut c_threads = vec![];
|
||||
let validators = converge(
|
||||
&client_addr,
|
||||
&leader,
|
||||
signal.clone(),
|
||||
num_nodes + 2,
|
||||
num_nodes,
|
||||
&mut c_threads,
|
||||
);
|
||||
assert_eq!(validators.len(), num_nodes);
|
||||
|
||||
if stdin_isatty() {
|
||||
if is(Stream::Stdin) {
|
||||
eprintln!("nothing found on stdin, expected a json file");
|
||||
exit(1);
|
||||
}
|
||||
@@ -127,79 +230,81 @@ fn main() {
|
||||
let mut client = mk_client(&client_addr, &leader);
|
||||
|
||||
println!("Get last ID...");
|
||||
let last_id = client.get_last_id().wait().unwrap();
|
||||
let mut last_id = client.get_last_id();
|
||||
println!("Got last ID {:?}", last_id);
|
||||
|
||||
let rnd = GenKeys::new(demo.mint.keypair().public_key_bytes());
|
||||
let mut seed = [0u8; 32];
|
||||
seed.copy_from_slice(&demo.mint.keypair().public_key_bytes()[..32]);
|
||||
let rnd = GenKeys::new(seed);
|
||||
|
||||
println!("Creating keypairs...");
|
||||
let txs = demo.num_accounts / 2;
|
||||
let keypairs = rnd.gen_n_keypairs(demo.num_accounts);
|
||||
let keypair_pairs: Vec<_> = keypairs.chunks(2).collect();
|
||||
|
||||
println!("Signing transactions...");
|
||||
let now = Instant::now();
|
||||
let transactions: Vec<_> = keypair_pairs
|
||||
.into_par_iter()
|
||||
.map(|chunk| Transaction::new(&chunk[0], chunk[1].pubkey(), 1, last_id))
|
||||
.collect();
|
||||
let duration = now.elapsed();
|
||||
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
|
||||
let bsps = txs as f64 / ns as f64;
|
||||
let nsps = ns as f64 / txs as f64;
|
||||
println!(
|
||||
"Done. {} thousand signatures per second, {}us per signature",
|
||||
bsps * 1_000_000_f64,
|
||||
nsps / 1_000_f64
|
||||
);
|
||||
|
||||
let first_count = client.transaction_count();
|
||||
println!("initial count {}", first_count);
|
||||
|
||||
println!("Transfering {} transactions in {} batches", txs, threads);
|
||||
let sz = transactions.len() / threads;
|
||||
let chunks: Vec<_> = transactions.chunks(sz).collect();
|
||||
chunks.into_par_iter().for_each(|txs| {
|
||||
println!("Transferring 1 unit {} times... to", txs.len());
|
||||
let client = mk_client(&client_addr, &leader);
|
||||
for tx in txs {
|
||||
client.transfer_signed(tx.clone()).unwrap();
|
||||
}
|
||||
});
|
||||
|
||||
println!("Sampling tps every second...",);
|
||||
validators.into_par_iter().for_each(|val| {
|
||||
let mut client = mk_client(&client_addr, &val);
|
||||
let mut now = Instant::now();
|
||||
let mut initial_tx_count = client.transaction_count();
|
||||
for i in 0..100 {
|
||||
let tx_count = client.transaction_count();
|
||||
let duration = now.elapsed();
|
||||
now = Instant::now();
|
||||
let sample = tx_count - initial_tx_count;
|
||||
initial_tx_count = tx_count;
|
||||
println!(
|
||||
"{}: Transactions processed {}",
|
||||
val.transactions_addr, sample
|
||||
|
||||
// Setup a thread per validator to sample every period
|
||||
// collect the max transaction rate and total tx count seen
|
||||
let maxes = Arc::new(RwLock::new(Vec::new()));
|
||||
let sample_period = 1; // in seconds
|
||||
let v_threads: Vec<_> = validators
|
||||
.into_iter()
|
||||
.map(|v| {
|
||||
let exit = signal.clone();
|
||||
let thread_addr = client_addr.clone();
|
||||
let maxes = maxes.clone();
|
||||
Builder::new()
|
||||
.name("solana-client-sample".to_string())
|
||||
.spawn(move || {
|
||||
sample_tx_count(thread_addr, exit, maxes, first_count, v, sample_period);
|
||||
})
|
||||
.unwrap()
|
||||
})
|
||||
.collect();
|
||||
|
||||
// generate and send transactions for the specified duration
|
||||
let time = Duration::new(time_sec, 0);
|
||||
let now = Instant::now();
|
||||
while now.elapsed() < time {
|
||||
generate_and_send_txs(
|
||||
&mut client,
|
||||
&keypair_pairs,
|
||||
&leader,
|
||||
txs,
|
||||
&mut last_id,
|
||||
threads,
|
||||
client_addr.clone(),
|
||||
);
|
||||
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
|
||||
let tps = (sample * 1_000_000_000) as f64 / ns as f64;
|
||||
println!("{}: {} tps", val.transactions_addr, tps);
|
||||
let total = tx_count - first_count;
|
||||
println!(
|
||||
"{}: Total Transactions processed {}",
|
||||
val.transactions_addr, total
|
||||
);
|
||||
if total == transactions.len() as u64 {
|
||||
break;
|
||||
}
|
||||
if i > 20 && sample == 0 {
|
||||
break;
|
||||
}
|
||||
sleep(Duration::new(1, 0));
|
||||
}
|
||||
});
|
||||
|
||||
// Stop the sampling threads so it will collect the stats
|
||||
signal.store(true, Ordering::Relaxed);
|
||||
for t in v_threads {
|
||||
t.join().unwrap();
|
||||
}
|
||||
|
||||
// Compute/report stats
|
||||
let mut max_of_maxes = 0.0;
|
||||
let mut total_txs = 0;
|
||||
for (max, txs) in maxes.read().unwrap().iter() {
|
||||
if *max > max_of_maxes {
|
||||
max_of_maxes = *max;
|
||||
}
|
||||
total_txs += *txs;
|
||||
}
|
||||
println!(
|
||||
"\nHighest TPS: {:.2} sampling period {}s total transactions: {} clients: {}",
|
||||
max_of_maxes,
|
||||
sample_period,
|
||||
total_txs,
|
||||
maxes.read().unwrap().len()
|
||||
);
|
||||
|
||||
// join the crdt client threads
|
||||
for t in c_threads {
|
||||
t.join().unwrap();
|
||||
}
|
||||
@@ -211,6 +316,10 @@ fn mk_client(locked_addr: &Arc<RwLock<SocketAddr>>, r: &ReplicatedData) -> ThinC
|
||||
let transactions_socket = UdpSocket::bind(addr.clone()).unwrap();
|
||||
addr.set_port(port + 1);
|
||||
let requests_socket = UdpSocket::bind(addr.clone()).unwrap();
|
||||
requests_socket
|
||||
.set_read_timeout(Some(Duration::new(1, 0)))
|
||||
.unwrap();
|
||||
|
||||
addr.set_port(port + 2);
|
||||
ThinClient::new(
|
||||
r.requests_addr,
|
||||
@@ -227,7 +336,14 @@ fn spy_node(client_addr: &Arc<RwLock<SocketAddr>>) -> (ReplicatedData, UdpSocket
|
||||
addr.set_port(port + 1);
|
||||
let daddr = "0.0.0.0:0".parse().unwrap();
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
let node = ReplicatedData::new(pubkey, gossip.local_addr().unwrap(), daddr, daddr, daddr);
|
||||
let node = ReplicatedData::new(
|
||||
pubkey,
|
||||
gossip.local_addr().unwrap(),
|
||||
daddr,
|
||||
daddr,
|
||||
daddr,
|
||||
daddr,
|
||||
);
|
||||
(node, gossip)
|
||||
}
|
||||
|
||||
@@ -245,20 +361,18 @@ fn converge(
|
||||
spy_crdt.insert(&leader);
|
||||
spy_crdt.set_leader(leader.id);
|
||||
let spy_ref = Arc::new(RwLock::new(spy_crdt));
|
||||
let spy_window = default_window();
|
||||
let t_spy_listen = Crdt::listen(spy_ref.clone(), spy_window, spy_gossip, exit.clone());
|
||||
let t_spy_gossip = Crdt::gossip(spy_ref.clone(), exit.clone());
|
||||
//wait for the network to converge
|
||||
let window = default_window();
|
||||
let gossip_send_socket = UdpSocket::bind("0.0.0.0:0").expect("bind 0");
|
||||
let ncp = Ncp::new(
|
||||
spy_ref.clone(),
|
||||
window.clone(),
|
||||
spy_gossip,
|
||||
gossip_send_socket,
|
||||
exit.clone(),
|
||||
).expect("DataReplicator::new");
|
||||
let mut rv = vec![];
|
||||
//wait for the network to converge, 30 seconds should be plenty
|
||||
for _ in 0..30 {
|
||||
let min = spy_ref.read().unwrap().convergence();
|
||||
if num_nodes as u64 == min {
|
||||
println!("converged!");
|
||||
break;
|
||||
}
|
||||
sleep(Duration::new(1, 0));
|
||||
}
|
||||
threads.push(t_spy_listen);
|
||||
threads.push(t_spy_gossip);
|
||||
let v: Vec<ReplicatedData> = spy_ref
|
||||
.read()
|
||||
.unwrap()
|
||||
@@ -266,12 +380,20 @@ fn converge(
|
||||
.values()
|
||||
.into_iter()
|
||||
.filter(|x| x.requests_addr != daddr)
|
||||
.map(|x| x.clone())
|
||||
.cloned()
|
||||
.collect();
|
||||
v.clone()
|
||||
if v.len() >= num_nodes {
|
||||
println!("CONVERGED!");
|
||||
rv.extend(v.into_iter());
|
||||
break;
|
||||
}
|
||||
sleep(Duration::new(1, 0));
|
||||
}
|
||||
threads.extend(ncp.thread_hdls.into_iter());
|
||||
rv
|
||||
}
|
||||
|
||||
fn read_leader(path: String) -> ReplicatedData {
|
||||
let file = File::open(path).expect("file");
|
||||
serde_json::from_reader(file).expect("parse")
|
||||
let file = File::open(path.clone()).expect(&format!("file not found: {}", path));
|
||||
serde_json::from_reader(file).expect(&format!("failed to parse {}", path))
|
||||
}
|
||||
|
168
src/bin/drone.rs
Normal file
168
src/bin/drone.rs
Normal file
@@ -0,0 +1,168 @@
|
||||
extern crate atty;
|
||||
extern crate bincode;
|
||||
extern crate env_logger;
|
||||
extern crate getopts;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
extern crate tokio;
|
||||
extern crate tokio_codec;
|
||||
extern crate tokio_io;
|
||||
|
||||
use atty::{is, Stream as atty_stream};
|
||||
use bincode::deserialize;
|
||||
use getopts::Options;
|
||||
use solana::crdt::{get_ip_addr, ReplicatedData};
|
||||
use solana::drone::{Drone, DroneRequest};
|
||||
use solana::mint::MintDemo;
|
||||
use std::env;
|
||||
use std::fs::File;
|
||||
use std::io::{stdin, Read};
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
||||
use std::process::exit;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::thread;
|
||||
use tokio::net::TcpListener;
|
||||
use tokio::prelude::*;
|
||||
use tokio_codec::{BytesCodec, Decoder};
|
||||
|
||||
fn print_usage(program: &str, opts: Options) {
|
||||
let mut brief = format!("Usage: cat <mint-demo.json> | {} [options]\n\n", program);
|
||||
brief += " Run a Solana Drone to act as the custodian of the mint's remaining tokens\n";
|
||||
|
||||
print!("{}", opts.usage(&brief));
|
||||
}
|
||||
|
||||
fn main() {
|
||||
env_logger::init();
|
||||
let mut opts = Options::new();
|
||||
opts.optopt(
|
||||
"t",
|
||||
"",
|
||||
"time",
|
||||
"time slice over which to limit token requests to drone",
|
||||
);
|
||||
opts.optopt("c", "", "cap", "request limit for time slice");
|
||||
opts.optopt("l", "", "leader", "leader.json");
|
||||
opts.optflag("h", "help", "print help");
|
||||
let args: Vec<String> = env::args().collect();
|
||||
let matches = match opts.parse(&args[1..]) {
|
||||
Ok(m) => m,
|
||||
Err(e) => {
|
||||
eprintln!("{}", e);
|
||||
exit(1);
|
||||
}
|
||||
};
|
||||
if matches.opt_present("h") {
|
||||
let program = args[0].clone();
|
||||
print_usage(&program, opts);
|
||||
return;
|
||||
}
|
||||
let time_slice: Option<u64>;
|
||||
if matches.opt_present("t") {
|
||||
time_slice = matches
|
||||
.opt_str("t")
|
||||
.expect("unexpected string from input")
|
||||
.parse()
|
||||
.ok();
|
||||
} else {
|
||||
time_slice = None;
|
||||
}
|
||||
let request_cap: Option<u64>;
|
||||
if matches.opt_present("c") {
|
||||
request_cap = matches
|
||||
.opt_str("c")
|
||||
.expect("unexpected string from input")
|
||||
.parse()
|
||||
.ok();
|
||||
} else {
|
||||
request_cap = None;
|
||||
}
|
||||
let leader = if matches.opt_present("l") {
|
||||
read_leader(matches.opt_str("l").unwrap())
|
||||
} else {
|
||||
let server_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
|
||||
ReplicatedData::new_leader(&server_addr)
|
||||
};
|
||||
|
||||
if is(atty_stream::Stdin) {
|
||||
eprintln!("nothing found on stdin, expected a json file");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
let mut buffer = String::new();
|
||||
let num_bytes = stdin().read_to_string(&mut buffer).unwrap();
|
||||
if num_bytes == 0 {
|
||||
eprintln!("empty file on stdin, expected a json file");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
let demo: MintDemo = serde_json::from_str(&buffer).unwrap_or_else(|e| {
|
||||
eprintln!("failed to parse json: {}", e);
|
||||
exit(1);
|
||||
});
|
||||
|
||||
let mint_keypair = demo.mint.keypair();
|
||||
|
||||
let mut drone_addr: SocketAddr = "0.0.0.0:9900".parse().unwrap();
|
||||
drone_addr.set_ip(get_ip_addr().unwrap());
|
||||
|
||||
let drone = Arc::new(Mutex::new(Drone::new(
|
||||
mint_keypair,
|
||||
drone_addr,
|
||||
leader.transactions_addr,
|
||||
leader.requests_addr,
|
||||
time_slice,
|
||||
request_cap,
|
||||
)));
|
||||
|
||||
let drone1 = drone.clone();
|
||||
thread::spawn(move || loop {
|
||||
let time = drone1.lock().unwrap().time_slice;
|
||||
thread::sleep(time);
|
||||
drone1.lock().unwrap().clear_request_count();
|
||||
});
|
||||
|
||||
let socket = TcpListener::bind(&drone_addr).unwrap();
|
||||
println!("Drone started. Listening on: {}", drone_addr);
|
||||
let done = socket
|
||||
.incoming()
|
||||
.map_err(|e| println!("failed to accept socket; error = {:?}", e))
|
||||
.for_each(move |socket| {
|
||||
let drone2 = drone.clone();
|
||||
// let client_ip = socket.peer_addr().expect("drone peer_addr").ip();
|
||||
let framed = BytesCodec::new().framed(socket);
|
||||
let (_writer, reader) = framed.split();
|
||||
|
||||
let processor = reader
|
||||
.for_each(move |bytes| {
|
||||
let req: DroneRequest =
|
||||
deserialize(&bytes).expect("deserialize packet in drone");
|
||||
println!("Airdrop requested...");
|
||||
// let res = drone2.lock().unwrap().check_rate_limit(client_ip);
|
||||
let res1 = drone2.lock().unwrap().send_airdrop(req);
|
||||
match res1 {
|
||||
Ok(_) => println!("Airdrop sent!"),
|
||||
Err(_) => println!("Request limit reached for this time slice"),
|
||||
}
|
||||
Ok(())
|
||||
})
|
||||
.and_then(|()| {
|
||||
println!("Socket received FIN packet and closed connection");
|
||||
Ok(())
|
||||
})
|
||||
.or_else(|err| {
|
||||
println!("Socket closed with error: {:?}", err);
|
||||
Err(err)
|
||||
})
|
||||
.then(|result| {
|
||||
println!("Socket closed with result: {:?}", result);
|
||||
Ok(())
|
||||
});
|
||||
tokio::spawn(processor)
|
||||
});
|
||||
tokio::run(done);
|
||||
}
|
||||
fn read_leader(path: String) -> ReplicatedData {
|
||||
let file = File::open(path.clone()).expect(&format!("file not found: {}", path));
|
||||
serde_json::from_reader(file).expect(&format!("failed to parse {}", path))
|
||||
}
|
52
src/bin/fullnode-config.rs
Normal file
52
src/bin/fullnode-config.rs
Normal file
@@ -0,0 +1,52 @@
|
||||
extern crate getopts;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use getopts::Options;
|
||||
use solana::crdt::{get_ip_addr, parse_port_or_addr, ReplicatedData};
|
||||
use std::env;
|
||||
use std::io;
|
||||
use std::net::SocketAddr;
|
||||
use std::process::exit;
|
||||
|
||||
fn print_usage(program: &str, opts: Options) {
|
||||
let mut brief = format!("Usage: {} [options]\n\n", program);
|
||||
brief += " Create a solana fullnode config file\n";
|
||||
|
||||
print!("{}", opts.usage(&brief));
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let mut opts = Options::new();
|
||||
opts.optopt("b", "", "bind", "bind to port or address");
|
||||
opts.optflag("d", "dyn", "detect network address dynamically");
|
||||
opts.optflag("h", "help", "print help");
|
||||
let args: Vec<String> = env::args().collect();
|
||||
let matches = match opts.parse(&args[1..]) {
|
||||
Ok(m) => m,
|
||||
Err(e) => {
|
||||
eprintln!("{}", e);
|
||||
exit(1);
|
||||
}
|
||||
};
|
||||
if matches.opt_present("h") {
|
||||
let program = args[0].clone();
|
||||
print_usage(&program, opts);
|
||||
return;
|
||||
}
|
||||
|
||||
let bind_addr: SocketAddr = {
|
||||
let mut bind_addr = parse_port_or_addr(matches.opt_str("b"));
|
||||
if matches.opt_present("d") {
|
||||
let ip = get_ip_addr().unwrap();
|
||||
bind_addr.set_ip(ip);
|
||||
}
|
||||
bind_addr
|
||||
};
|
||||
|
||||
// we need all the receiving sockets to be bound within the expected
|
||||
// port range that we open on aws
|
||||
let repl_data = ReplicatedData::new_leader(&bind_addr);
|
||||
let stdout = io::stdout();
|
||||
serde_json::to_writer(stdout, &repl_data).expect("serialize");
|
||||
}
|
@@ -1,24 +1,22 @@
|
||||
extern crate atty;
|
||||
extern crate env_logger;
|
||||
extern crate getopts;
|
||||
extern crate isatty;
|
||||
extern crate pnet;
|
||||
extern crate log;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use atty::{is, Stream};
|
||||
use getopts::Options;
|
||||
use isatty::stdin_isatty;
|
||||
use pnet::datalink;
|
||||
use solana::bank::Bank;
|
||||
use solana::crdt::ReplicatedData;
|
||||
use solana::entry::Entry;
|
||||
use solana::payment_plan::PaymentPlan;
|
||||
use solana::server::Server;
|
||||
use solana::signature::{KeyPair, KeyPairUtil};
|
||||
use solana::transaction::Instruction;
|
||||
use std::env;
|
||||
use std::fs::File;
|
||||
use std::io::{stdin, Read};
|
||||
use std::net::{IpAddr, SocketAddr, UdpSocket};
|
||||
use std::io::{stdin, stdout, BufRead, Write};
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
|
||||
use std::process::exit;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::Arc;
|
||||
@@ -34,19 +32,23 @@ fn print_usage(program: &str, opts: Options) {
|
||||
}
|
||||
|
||||
fn main() {
|
||||
env_logger::init().unwrap();
|
||||
env_logger::init();
|
||||
let mut opts = Options::new();
|
||||
opts.optopt("b", "", "bind", "bind to port or address");
|
||||
opts.optflag("d", "dyn", "detect network address dynamically");
|
||||
opts.optopt("s", "", "save", "save my identity to path.json");
|
||||
opts.optopt("l", "", "load", "load my identity to path.json");
|
||||
opts.optflag("h", "help", "print help");
|
||||
opts.optopt("l", "", "run with the identity found in FILE", "FILE");
|
||||
opts.optopt(
|
||||
"v",
|
||||
"t",
|
||||
"",
|
||||
"validator",
|
||||
"run as replicate with path to leader.json",
|
||||
"testnet; connect to the network at this gossip entry point",
|
||||
"HOST:PORT",
|
||||
);
|
||||
opts.optopt(
|
||||
"o",
|
||||
"",
|
||||
"output log to FILE, defaults to stdout (ignored by validators)",
|
||||
"FILE",
|
||||
);
|
||||
|
||||
let args: Vec<String> = env::args().collect();
|
||||
let matches = match opts.parse(&args[1..]) {
|
||||
Ok(m) => m,
|
||||
@@ -60,89 +62,76 @@ fn main() {
|
||||
print_usage(&program, opts);
|
||||
return;
|
||||
}
|
||||
let bind_addr: SocketAddr = {
|
||||
let mut bind_addr = parse_port_or_addr(matches.opt_str("b"));
|
||||
if matches.opt_present("d") {
|
||||
let ip = get_ip_addr().unwrap();
|
||||
bind_addr.set_ip(ip);
|
||||
}
|
||||
bind_addr
|
||||
};
|
||||
if stdin_isatty() {
|
||||
if is(Stream::Stdin) {
|
||||
eprintln!("nothing found on stdin, expected a log file");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
let mut buffer = String::new();
|
||||
let num_bytes = stdin().read_to_string(&mut buffer).unwrap();
|
||||
if num_bytes == 0 {
|
||||
eprintln!("empty file on stdin, expected a log file");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
eprintln!("Initializing...");
|
||||
let mut entries = buffer.lines().map(|line| {
|
||||
serde_json::from_str(&line).unwrap_or_else(|e| {
|
||||
let stdin = stdin();
|
||||
let mut entries = stdin.lock().lines().map(|line| {
|
||||
let entry: Entry = serde_json::from_str(&line.unwrap()).unwrap_or_else(|e| {
|
||||
eprintln!("failed to parse json: {}", e);
|
||||
exit(1);
|
||||
})
|
||||
});
|
||||
|
||||
entry
|
||||
});
|
||||
eprintln!("done parsing...");
|
||||
|
||||
// The first item in the ledger is required to be an entry with zero num_hashes,
|
||||
// which implies its id can be used as the ledger's seed.
|
||||
let entry0 = entries.next().unwrap();
|
||||
let entry0 = entries.next().expect("invalid ledger: empty");
|
||||
|
||||
// The second item in the ledger is a special transaction where the to and from
|
||||
// fields are the same. That entry should be treated as a deposit, not a
|
||||
// transfer to oneself.
|
||||
let entry1: Entry = entries.next().unwrap();
|
||||
let entry1 = entries
|
||||
.next()
|
||||
.expect("invalid ledger: need at least 2 entries");
|
||||
let tx = &entry1.transactions[0];
|
||||
let deposit = if let Instruction::NewContract(contract) = &tx.instruction {
|
||||
contract.plan.final_payment()
|
||||
} else {
|
||||
None
|
||||
};
|
||||
}.expect("invalid ledger, needs to start with a contract");
|
||||
|
||||
eprintln!("creating bank...");
|
||||
|
||||
let bank = Bank::new_from_deposit(&deposit.unwrap());
|
||||
let bank = Bank::new_from_deposit(&deposit);
|
||||
bank.register_entry_id(&entry0.id);
|
||||
bank.register_entry_id(&entry1.id);
|
||||
|
||||
eprintln!("processing entries...");
|
||||
|
||||
let mut last_id = entry1.id;
|
||||
for entry in entries {
|
||||
last_id = entry.id;
|
||||
let results = bank.process_transactions(entry.transactions);
|
||||
for result in results {
|
||||
if let Err(e) = result {
|
||||
eprintln!("failed to process transaction {:?}", e);
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
bank.register_entry_id(&last_id);
|
||||
}
|
||||
bank.process_entries(entries).expect("process_entries");
|
||||
|
||||
eprintln!("creating networking stack...");
|
||||
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
// we need all the receiving sockets to be bound within the expected
|
||||
// port range that we open on aws
|
||||
let mut repl_data = make_repl_data(&bind_addr);
|
||||
let bind_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
|
||||
let mut repl_data = ReplicatedData::new_leader(&bind_addr);
|
||||
if matches.opt_present("l") {
|
||||
let path = matches.opt_str("l").unwrap();
|
||||
if let Ok(file) = File::open(path) {
|
||||
repl_data = serde_json::from_reader(file).expect("parse");
|
||||
if let Ok(file) = File::open(path.clone()) {
|
||||
if let Ok(data) = serde_json::from_reader(file) {
|
||||
repl_data = data;
|
||||
} else {
|
||||
eprintln!("failed to parse {}", path);
|
||||
exit(1);
|
||||
}
|
||||
} else {
|
||||
eprintln!("failed to read {}", path);
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
let threads = if matches.opt_present("v") {
|
||||
eprintln!("starting validator... {}", repl_data.requests_addr);
|
||||
let path = matches.opt_str("v").unwrap();
|
||||
let file = File::open(path).expect("file");
|
||||
let leader = serde_json::from_reader(file).expect("parse");
|
||||
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let threads = if matches.opt_present("t") {
|
||||
let testnet_address_string = matches.opt_str("t").unwrap();
|
||||
eprintln!(
|
||||
"starting validator... {} connecting to {}",
|
||||
repl_data.requests_addr, testnet_address_string
|
||||
);
|
||||
let testnet_addr = testnet_address_string.parse().unwrap();
|
||||
let newtwork_entry_point = ReplicatedData::new_entry_point(testnet_addr);
|
||||
let s = Server::new_validator(
|
||||
bank,
|
||||
repl_data.clone(),
|
||||
@@ -150,17 +139,26 @@ fn main() {
|
||||
UdpSocket::bind("0.0.0.0:0").unwrap(),
|
||||
UdpSocket::bind(repl_data.replicate_addr).unwrap(),
|
||||
UdpSocket::bind(repl_data.gossip_addr).unwrap(),
|
||||
leader,
|
||||
UdpSocket::bind(repl_data.repair_addr).unwrap(),
|
||||
newtwork_entry_point,
|
||||
exit.clone(),
|
||||
);
|
||||
s.thread_hdls
|
||||
} else {
|
||||
eprintln!("starting leader... {}", repl_data.requests_addr);
|
||||
repl_data.current_leader_id = repl_data.id.clone();
|
||||
let file = File::create("leader.log").expect("leader.log create");
|
||||
|
||||
let outfile: Box<Write + Send + 'static> = if matches.opt_present("o") {
|
||||
let path = matches.opt_str("o").unwrap();
|
||||
Box::new(
|
||||
File::create(&path).expect(&format!("unable to open output file \"{}\"", path)),
|
||||
)
|
||||
} else {
|
||||
Box::new(stdout())
|
||||
};
|
||||
|
||||
let server = Server::new_leader(
|
||||
bank,
|
||||
last_id,
|
||||
//Some(Duration::from_millis(1000)),
|
||||
None,
|
||||
repl_data.clone(),
|
||||
@@ -170,77 +168,13 @@ fn main() {
|
||||
UdpSocket::bind("0.0.0.0:0").unwrap(),
|
||||
UdpSocket::bind(repl_data.gossip_addr).unwrap(),
|
||||
exit.clone(),
|
||||
file,
|
||||
outfile,
|
||||
);
|
||||
server.thread_hdls
|
||||
};
|
||||
if matches.opt_present("s") {
|
||||
let path = matches.opt_str("s").unwrap();
|
||||
let file = File::create(path).expect("file");
|
||||
serde_json::to_writer(file, &repl_data).expect("serialize");
|
||||
}
|
||||
eprintln!("Ready. Listening on {}", repl_data.transactions_addr);
|
||||
|
||||
for t in threads {
|
||||
t.join().expect("join");
|
||||
}
|
||||
}
|
||||
|
||||
fn next_port(server_addr: &SocketAddr, nxt: u16) -> SocketAddr {
|
||||
let mut gossip_addr = server_addr.clone();
|
||||
gossip_addr.set_port(server_addr.port() + nxt);
|
||||
gossip_addr
|
||||
}
|
||||
|
||||
fn make_repl_data(bind_addr: &SocketAddr) -> ReplicatedData {
|
||||
let transactions_addr = bind_addr.clone();
|
||||
let gossip_addr = next_port(&bind_addr, 1);
|
||||
let replicate_addr = next_port(&bind_addr, 2);
|
||||
let requests_addr = next_port(&bind_addr, 3);
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
ReplicatedData::new(
|
||||
pubkey,
|
||||
gossip_addr,
|
||||
replicate_addr,
|
||||
requests_addr,
|
||||
transactions_addr,
|
||||
)
|
||||
}
|
||||
|
||||
fn parse_port_or_addr(optstr: Option<String>) -> SocketAddr {
|
||||
let daddr: SocketAddr = "0.0.0.0:8000".parse().expect("default socket address");
|
||||
if let Some(addrstr) = optstr {
|
||||
if let Ok(port) = addrstr.parse() {
|
||||
let mut addr = daddr.clone();
|
||||
addr.set_port(port);
|
||||
addr
|
||||
} else if let Ok(addr) = addrstr.parse() {
|
||||
addr
|
||||
} else {
|
||||
daddr
|
||||
}
|
||||
} else {
|
||||
daddr
|
||||
}
|
||||
}
|
||||
|
||||
fn get_ip_addr() -> Option<IpAddr> {
|
||||
for iface in datalink::interfaces() {
|
||||
for p in iface.ips {
|
||||
if !p.ip().is_loopback() && !p.ip().is_multicast() {
|
||||
return Some(p.ip());
|
||||
}
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_port_or_addr() {
|
||||
let p1 = parse_port_or_addr(Some("9000".to_string()));
|
||||
assert_eq!(p1.port(), 9000);
|
||||
let p2 = parse_port_or_addr(Some("127.0.0.1:7000".to_string()));
|
||||
assert_eq!(p2.port(), 7000);
|
||||
let p3 = parse_port_or_addr(None);
|
||||
assert_eq!(p3.port(), 8000);
|
||||
}
|
||||
|
@@ -1,12 +1,13 @@
|
||||
extern crate isatty;
|
||||
extern crate atty;
|
||||
extern crate rayon;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use isatty::stdin_isatty;
|
||||
use atty::{is, Stream};
|
||||
use rayon::prelude::*;
|
||||
use solana::bank::MAX_ENTRY_IDS;
|
||||
use solana::entry::{next_entry, Entry};
|
||||
use solana::entry::next_entry;
|
||||
use solana::ledger::next_entries;
|
||||
use solana::mint::MintDemo;
|
||||
use solana::signature::{GenKeys, KeyPairUtil};
|
||||
use solana::transaction::Transaction;
|
||||
@@ -15,7 +16,7 @@ use std::process::exit;
|
||||
|
||||
// Generate a ledger with lots and lots of accounts.
|
||||
fn main() {
|
||||
if stdin_isatty() {
|
||||
if is(Stream::Stdin) {
|
||||
eprintln!("nothing found on stdin, expected a json file");
|
||||
exit(1);
|
||||
}
|
||||
@@ -32,42 +33,50 @@ fn main() {
|
||||
exit(1);
|
||||
});
|
||||
|
||||
let rnd = GenKeys::new(demo.mint.keypair().public_key_bytes());
|
||||
let mut seed = [0u8; 32];
|
||||
seed.copy_from_slice(&demo.mint.keypair().public_key_bytes()[..32]);
|
||||
let rnd = GenKeys::new(seed);
|
||||
let num_accounts = demo.num_accounts;
|
||||
let tokens_per_user = 1_000;
|
||||
let tokens_per_user = 500;
|
||||
|
||||
let keypairs = rnd.gen_n_keypairs(num_accounts);
|
||||
|
||||
let mint_keypair = demo.mint.keypair();
|
||||
let last_id = demo.mint.last_id();
|
||||
|
||||
eprintln!("Signing {} transactions...", num_accounts);
|
||||
let transactions: Vec<_> = keypairs
|
||||
.into_par_iter()
|
||||
.map(|rando| {
|
||||
let last_id = demo.mint.last_id();
|
||||
Transaction::new(&mint_keypair, rando.pubkey(), tokens_per_user, last_id)
|
||||
})
|
||||
.collect();
|
||||
|
||||
for entry in demo.mint.create_entries() {
|
||||
println!("{}", serde_json::to_string(&entry).unwrap());
|
||||
}
|
||||
|
||||
eprintln!("Logging the creation of {} accounts...", num_accounts);
|
||||
let entry = Entry::new(&last_id, 0, transactions);
|
||||
println!("{}", serde_json::to_string(&entry).unwrap());
|
||||
|
||||
eprintln!("Creating {} empty entries...", MAX_ENTRY_IDS);
|
||||
|
||||
// Offer client lots of entry IDs to use for each transaction's last_id.
|
||||
let mut last_id = last_id;
|
||||
let mut last_ids = vec![];
|
||||
for _ in 0..MAX_ENTRY_IDS {
|
||||
let entry = next_entry(&last_id, 1, vec![]);
|
||||
last_id = entry.id;
|
||||
last_ids.push(last_id);
|
||||
let serialized = serde_json::to_string(&entry).unwrap_or_else(|e| {
|
||||
eprintln!("failed to serialize: {}", e);
|
||||
exit(1);
|
||||
});
|
||||
println!("{}", serialized);
|
||||
}
|
||||
|
||||
eprintln!("Creating {} transactions...", num_accounts);
|
||||
let transactions: Vec<_> = keypairs
|
||||
.into_par_iter()
|
||||
.enumerate()
|
||||
.map(|(i, rando)| {
|
||||
let last_id = last_ids[i % MAX_ENTRY_IDS];
|
||||
Transaction::new(&mint_keypair, rando.pubkey(), tokens_per_user, last_id)
|
||||
})
|
||||
.collect();
|
||||
|
||||
eprintln!("Logging the creation of {} accounts...", num_accounts);
|
||||
let entries = next_entries(&last_id, 0, transactions);
|
||||
for entry in entries {
|
||||
println!("{}", serde_json::to_string(&entry).unwrap());
|
||||
}
|
||||
}
|
||||
|
@@ -1,16 +1,16 @@
|
||||
//! A command-line executable for generating the chain's genesis block.
|
||||
|
||||
extern crate isatty;
|
||||
extern crate atty;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use isatty::stdin_isatty;
|
||||
use atty::{is, Stream};
|
||||
use solana::mint::Mint;
|
||||
use std::io::{stdin, Read};
|
||||
use std::process::exit;
|
||||
|
||||
fn main() {
|
||||
if stdin_isatty() {
|
||||
if is(Stream::Stdin) {
|
||||
eprintln!("nothing found on stdin, expected a json file");
|
||||
exit(1);
|
||||
}
|
||||
|
@@ -1,13 +1,21 @@
|
||||
extern crate atty;
|
||||
extern crate rayon;
|
||||
extern crate ring;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use atty::{is, Stream};
|
||||
use solana::mint::{Mint, MintDemo};
|
||||
use std::io;
|
||||
use std::process::exit;
|
||||
|
||||
fn main() {
|
||||
let mut input_text = String::new();
|
||||
if is(Stream::Stdin) {
|
||||
eprintln!("nothing found on stdin, expected a token number");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
io::stdin().read_line(&mut input_text).unwrap();
|
||||
let trimmed = input_text.trim();
|
||||
let tokens = trimmed.parse::<i64>().unwrap();
|
||||
|
@@ -1,15 +1,15 @@
|
||||
extern crate isatty;
|
||||
extern crate atty;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use isatty::stdin_isatty;
|
||||
use atty::{is, Stream};
|
||||
use solana::mint::Mint;
|
||||
use std::io;
|
||||
use std::process::exit;
|
||||
|
||||
fn main() {
|
||||
let mut input_text = String::new();
|
||||
if stdin_isatty() {
|
||||
if is(Stream::Stdin) {
|
||||
eprintln!("nothing found on stdin, expected a token number");
|
||||
exit(1);
|
||||
}
|
||||
|
47
src/blob_fetch_stage.rs
Normal file
47
src/blob_fetch_stage.rs
Normal file
@@ -0,0 +1,47 @@
|
||||
//! The `blob_fetch_stage` pulls blobs from UDP sockets and sends it to a channel.
|
||||
|
||||
use packet;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::Arc;
|
||||
use std::thread::JoinHandle;
|
||||
use streamer;
|
||||
|
||||
pub struct BlobFetchStage {
|
||||
pub blob_receiver: streamer::BlobReceiver,
|
||||
pub thread_hdls: Vec<JoinHandle<()>>,
|
||||
}
|
||||
|
||||
impl BlobFetchStage {
|
||||
pub fn new(
|
||||
socket: UdpSocket,
|
||||
exit: Arc<AtomicBool>,
|
||||
blob_recycler: packet::BlobRecycler,
|
||||
) -> Self {
|
||||
Self::new_multi_socket(vec![socket], exit, blob_recycler)
|
||||
}
|
||||
pub fn new_multi_socket(
|
||||
sockets: Vec<UdpSocket>,
|
||||
exit: Arc<AtomicBool>,
|
||||
blob_recycler: packet::BlobRecycler,
|
||||
) -> Self {
|
||||
let (blob_sender, blob_receiver) = channel();
|
||||
let thread_hdls: Vec<_> = sockets
|
||||
.into_iter()
|
||||
.map(|socket| {
|
||||
streamer::blob_receiver(
|
||||
exit.clone(),
|
||||
blob_recycler.clone(),
|
||||
socket,
|
||||
blob_sender.clone(),
|
||||
).expect("blob receiver init")
|
||||
})
|
||||
.collect();
|
||||
|
||||
BlobFetchStage {
|
||||
blob_receiver,
|
||||
thread_hdls,
|
||||
}
|
||||
}
|
||||
}
|
@@ -8,9 +8,13 @@ use payment_plan::{Payment, PaymentPlan, Witness};
|
||||
use signature::PublicKey;
|
||||
use std::mem;
|
||||
|
||||
/// A data type representing a `Witness` that the payment plan is waiting on.
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub enum Condition {
|
||||
/// Wait for a `Timestamp` `Witness` at or after the given `DateTime`.
|
||||
Timestamp(DateTime<Utc>),
|
||||
|
||||
/// Wait for a `Signature` `Witness` from `PublicKey`.
|
||||
Signature(PublicKey),
|
||||
}
|
||||
|
||||
@@ -18,19 +22,26 @@ impl Condition {
|
||||
/// Return true if the given Witness satisfies this Condition.
|
||||
pub fn is_satisfied(&self, witness: &Witness) -> bool {
|
||||
match (self, witness) {
|
||||
(&Condition::Signature(ref pubkey), &Witness::Signature(ref from)) => pubkey == from,
|
||||
(&Condition::Timestamp(ref dt), &Witness::Timestamp(ref last_time)) => dt <= last_time,
|
||||
(Condition::Signature(pubkey), Witness::Signature(from)) => pubkey == from,
|
||||
(Condition::Timestamp(dt), Witness::Timestamp(last_time)) => dt <= last_time,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A data type reprsenting a payment plan.
|
||||
#[repr(C)]
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub enum Budget {
|
||||
/// Make a payment.
|
||||
Pay(Payment),
|
||||
|
||||
/// Make a payment after some condition.
|
||||
After(Condition, Payment),
|
||||
Race((Condition, Payment), (Condition, Payment)),
|
||||
|
||||
/// Either make a payment after one condition or a different payment after another
|
||||
/// condition, which ever condition is satisfied first.
|
||||
Or((Condition, Payment), (Condition, Payment)),
|
||||
}
|
||||
|
||||
impl Budget {
|
||||
@@ -57,7 +68,7 @@ impl Budget {
|
||||
tokens: i64,
|
||||
to: PublicKey,
|
||||
) -> Self {
|
||||
Budget::Race(
|
||||
Budget::Or(
|
||||
(Condition::Timestamp(dt), Payment { tokens, to }),
|
||||
(Condition::Signature(from), Payment { tokens, to: from }),
|
||||
)
|
||||
@@ -67,31 +78,27 @@ impl Budget {
|
||||
impl PaymentPlan for Budget {
|
||||
/// Return Payment if the budget requires no additional Witnesses.
|
||||
fn final_payment(&self) -> Option<Payment> {
|
||||
match *self {
|
||||
Budget::Pay(ref payment) => Some(payment.clone()),
|
||||
match self {
|
||||
Budget::Pay(payment) => Some(payment.clone()),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Return true if the budget spends exactly `spendable_tokens`.
|
||||
fn verify(&self, spendable_tokens: i64) -> bool {
|
||||
match *self {
|
||||
Budget::Pay(ref payment) | Budget::After(_, ref payment) => {
|
||||
payment.tokens == spendable_tokens
|
||||
}
|
||||
Budget::Race(ref a, ref b) => {
|
||||
a.1.tokens == spendable_tokens && b.1.tokens == spendable_tokens
|
||||
}
|
||||
match self {
|
||||
Budget::Pay(payment) | Budget::After(_, payment) => payment.tokens == spendable_tokens,
|
||||
Budget::Or(a, b) => a.1.tokens == spendable_tokens && b.1.tokens == spendable_tokens,
|
||||
}
|
||||
}
|
||||
|
||||
/// Apply a witness to the budget to see if the budget can be reduced.
|
||||
/// If so, modify the budget in-place.
|
||||
fn apply_witness(&mut self, witness: &Witness) {
|
||||
let new_payment = match *self {
|
||||
Budget::After(ref cond, ref payment) if cond.is_satisfied(witness) => Some(payment),
|
||||
Budget::Race((ref cond, ref payment), _) if cond.is_satisfied(witness) => Some(payment),
|
||||
Budget::Race(_, (ref cond, ref payment)) if cond.is_satisfied(witness) => Some(payment),
|
||||
let new_payment = match self {
|
||||
Budget::After(cond, payment) if cond.is_satisfied(witness) => Some(payment),
|
||||
Budget::Or((cond, payment), _) if cond.is_satisfied(witness) => Some(payment),
|
||||
Budget::Or(_, (cond, payment)) if cond.is_satisfied(witness) => Some(payment),
|
||||
_ => None,
|
||||
}.cloned();
|
||||
|
||||
|
69
src/counter.rs
Normal file
69
src/counter.rs
Normal file
@@ -0,0 +1,69 @@
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::time::Duration;
|
||||
use timing;
|
||||
|
||||
pub struct Counter {
|
||||
pub name: &'static str,
|
||||
pub counts: AtomicUsize,
|
||||
pub nanos: AtomicUsize,
|
||||
pub times: AtomicUsize,
|
||||
pub lograte: usize,
|
||||
}
|
||||
|
||||
macro_rules! create_counter {
|
||||
($name:expr, $lograte:expr) => {
|
||||
Counter {
|
||||
name: $name,
|
||||
counts: AtomicUsize::new(0),
|
||||
nanos: AtomicUsize::new(0),
|
||||
times: AtomicUsize::new(0),
|
||||
lograte: $lograte,
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! inc_counter {
|
||||
($name:expr, $count:expr, $start:expr) => {
|
||||
unsafe { $name.inc($count, $start.elapsed()) };
|
||||
};
|
||||
}
|
||||
|
||||
impl Counter {
|
||||
pub fn inc(&mut self, events: usize, dur: Duration) {
|
||||
let total = dur.as_secs() * 1_000_000_000 + dur.subsec_nanos() as u64;
|
||||
let counts = self.counts.fetch_add(events, Ordering::Relaxed);
|
||||
let nanos = self.nanos.fetch_add(total as usize, Ordering::Relaxed);
|
||||
let times = self.times.fetch_add(1, Ordering::Relaxed);
|
||||
if times % self.lograte == 0 && times > 0 {
|
||||
info!(
|
||||
"COUNTER:{{\"name\": \"{}\", \"counts\": {}, \"nanos\": {}, \"samples\": {}, \"rate\": {}, \"now\": {}}}",
|
||||
self.name,
|
||||
counts,
|
||||
nanos,
|
||||
times,
|
||||
counts as f64 * 1e9 / nanos as f64,
|
||||
timing::timestamp(),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use counter::Counter;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::time::Instant;
|
||||
#[test]
|
||||
fn test_counter() {
|
||||
static mut COUNTER: Counter = create_counter!("test", 100);
|
||||
let start = Instant::now();
|
||||
let count = 1;
|
||||
inc_counter!(COUNTER, count, start);
|
||||
unsafe {
|
||||
assert_eq!(COUNTER.counts.load(Ordering::Relaxed), 1);
|
||||
assert_ne!(COUNTER.nanos.load(Ordering::Relaxed), 0);
|
||||
assert_eq!(COUNTER.times.load(Ordering::Relaxed), 1);
|
||||
assert_eq!(COUNTER.lograte, 100);
|
||||
assert_eq!(COUNTER.name, "test");
|
||||
}
|
||||
}
|
||||
}
|
1060
src/crdt.rs
1060
src/crdt.rs
File diff suppressed because it is too large
Load Diff
312
src/drone.rs
Normal file
312
src/drone.rs
Normal file
@@ -0,0 +1,312 @@
|
||||
//! The `drone` module provides an object for launching a Solana Drone,
|
||||
//! which is the custodian of any remaining tokens in a mint.
|
||||
//! The Solana Drone builds and send airdrop transactions,
|
||||
//! checking requests against a request cap for a given time time_slice
|
||||
//! and (to come) an IP rate limit.
|
||||
|
||||
use signature::{KeyPair, PublicKey};
|
||||
use std::io;
|
||||
use std::io::{Error, ErrorKind};
|
||||
use std::net::{IpAddr, SocketAddr, UdpSocket};
|
||||
use std::time::Duration;
|
||||
use thin_client::ThinClient;
|
||||
use transaction::Transaction;
|
||||
|
||||
pub const TIME_SLICE: u64 = 60;
|
||||
pub const REQUEST_CAP: u64 = 150_000;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub enum DroneRequest {
|
||||
GetAirdrop {
|
||||
airdrop_request_amount: u64,
|
||||
client_public_key: PublicKey,
|
||||
},
|
||||
}
|
||||
|
||||
pub struct Drone {
|
||||
mint_keypair: KeyPair,
|
||||
ip_cache: Vec<IpAddr>,
|
||||
_airdrop_addr: SocketAddr,
|
||||
transactions_addr: SocketAddr,
|
||||
requests_addr: SocketAddr,
|
||||
pub time_slice: Duration,
|
||||
request_cap: u64,
|
||||
pub request_current: u64,
|
||||
}
|
||||
|
||||
impl Drone {
|
||||
pub fn new(
|
||||
mint_keypair: KeyPair,
|
||||
_airdrop_addr: SocketAddr,
|
||||
transactions_addr: SocketAddr,
|
||||
requests_addr: SocketAddr,
|
||||
time_input: Option<u64>,
|
||||
request_cap_input: Option<u64>,
|
||||
) -> Drone {
|
||||
let time_slice = match time_input {
|
||||
Some(time) => Duration::new(time, 0),
|
||||
None => Duration::new(TIME_SLICE, 0),
|
||||
};
|
||||
let request_cap = match request_cap_input {
|
||||
Some(cap) => cap,
|
||||
None => REQUEST_CAP,
|
||||
};
|
||||
Drone {
|
||||
mint_keypair,
|
||||
ip_cache: Vec::new(),
|
||||
_airdrop_addr,
|
||||
transactions_addr,
|
||||
requests_addr,
|
||||
time_slice,
|
||||
request_cap,
|
||||
request_current: 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn check_request_limit(&mut self, request_amount: u64) -> bool {
|
||||
(self.request_current + request_amount) <= self.request_cap
|
||||
}
|
||||
|
||||
pub fn clear_request_count(&mut self) {
|
||||
self.request_current = 0;
|
||||
}
|
||||
|
||||
pub fn add_ip_to_cache(&mut self, ip: IpAddr) {
|
||||
self.ip_cache.push(ip);
|
||||
}
|
||||
|
||||
pub fn clear_ip_cache(&mut self) {
|
||||
self.ip_cache.clear();
|
||||
}
|
||||
|
||||
pub fn check_rate_limit(&mut self, ip: IpAddr) -> Result<IpAddr, IpAddr> {
|
||||
// [WIP] This is placeholder code for a proper rate limiter.
|
||||
// Right now it will only allow one total drone request per IP
|
||||
if self.ip_cache.contains(&ip) {
|
||||
// Add proper error handling here
|
||||
Err(ip)
|
||||
} else {
|
||||
self.add_ip_to_cache(ip);
|
||||
Ok(ip)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn send_airdrop(&mut self, req: DroneRequest) -> Result<usize, io::Error> {
|
||||
let tx: Transaction;
|
||||
let request_amount: u64;
|
||||
let requests_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
|
||||
let mut client = ThinClient::new(
|
||||
self.requests_addr,
|
||||
requests_socket,
|
||||
self.transactions_addr,
|
||||
transactions_socket,
|
||||
);
|
||||
let last_id = client.get_last_id();
|
||||
|
||||
match req {
|
||||
DroneRequest::GetAirdrop {
|
||||
airdrop_request_amount,
|
||||
client_public_key,
|
||||
} => {
|
||||
request_amount = airdrop_request_amount.clone();
|
||||
tx = Transaction::new(
|
||||
&self.mint_keypair,
|
||||
client_public_key,
|
||||
airdrop_request_amount as i64,
|
||||
last_id,
|
||||
);
|
||||
}
|
||||
}
|
||||
if self.check_request_limit(request_amount) {
|
||||
self.request_current += request_amount;
|
||||
client.transfer_signed(tx)
|
||||
} else {
|
||||
Err(Error::new(ErrorKind::Other, "token limit reached"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use bank::Bank;
|
||||
use crdt::{get_ip_addr, TestNode};
|
||||
use drone::{Drone, DroneRequest, REQUEST_CAP, TIME_SLICE};
|
||||
use logger;
|
||||
use mint::Mint;
|
||||
use server::Server;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use std::io::sink;
|
||||
use std::net::{SocketAddr, UdpSocket};
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
use thin_client::ThinClient;
|
||||
|
||||
#[test]
|
||||
fn test_check_request_limit() {
|
||||
let keypair = KeyPair::new();
|
||||
let mut addr: SocketAddr = "0.0.0.0:9900".parse().unwrap();
|
||||
addr.set_ip(get_ip_addr().unwrap());
|
||||
let transactions_addr = "0.0.0.0:0".parse().unwrap();
|
||||
let requests_addr = "0.0.0.0:0".parse().unwrap();
|
||||
let mut drone = Drone::new(
|
||||
keypair,
|
||||
addr,
|
||||
transactions_addr,
|
||||
requests_addr,
|
||||
None,
|
||||
Some(3),
|
||||
);
|
||||
assert!(drone.check_request_limit(1));
|
||||
drone.request_current = 3;
|
||||
assert!(!drone.check_request_limit(1));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_clear_request_count() {
|
||||
let keypair = KeyPair::new();
|
||||
let mut addr: SocketAddr = "0.0.0.0:9900".parse().unwrap();
|
||||
addr.set_ip(get_ip_addr().unwrap());
|
||||
let transactions_addr = "0.0.0.0:0".parse().unwrap();
|
||||
let requests_addr = "0.0.0.0:0".parse().unwrap();
|
||||
let mut drone = Drone::new(keypair, addr, transactions_addr, requests_addr, None, None);
|
||||
drone.request_current = drone.request_current + 256;
|
||||
assert_eq!(drone.request_current, 256);
|
||||
drone.clear_request_count();
|
||||
assert_eq!(drone.request_current, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_add_ip_to_cache() {
|
||||
let keypair = KeyPair::new();
|
||||
let mut addr: SocketAddr = "0.0.0.0:9900".parse().unwrap();
|
||||
addr.set_ip(get_ip_addr().unwrap());
|
||||
let transactions_addr = "0.0.0.0:0".parse().unwrap();
|
||||
let requests_addr = "0.0.0.0:0".parse().unwrap();
|
||||
let mut drone = Drone::new(keypair, addr, transactions_addr, requests_addr, None, None);
|
||||
let ip = "127.0.0.1".parse().expect("create IpAddr from string");
|
||||
assert_eq!(drone.ip_cache.len(), 0);
|
||||
drone.add_ip_to_cache(ip);
|
||||
assert_eq!(drone.ip_cache.len(), 1);
|
||||
assert!(drone.ip_cache.contains(&ip));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_clear_ip_cache() {
|
||||
let keypair = KeyPair::new();
|
||||
let mut addr: SocketAddr = "0.0.0.0:9900".parse().unwrap();
|
||||
addr.set_ip(get_ip_addr().unwrap());
|
||||
let transactions_addr = "0.0.0.0:0".parse().unwrap();
|
||||
let requests_addr = "0.0.0.0:0".parse().unwrap();
|
||||
let mut drone = Drone::new(keypair, addr, transactions_addr, requests_addr, None, None);
|
||||
let ip = "127.0.0.1".parse().expect("create IpAddr from string");
|
||||
assert_eq!(drone.ip_cache.len(), 0);
|
||||
drone.add_ip_to_cache(ip);
|
||||
assert_eq!(drone.ip_cache.len(), 1);
|
||||
drone.clear_ip_cache();
|
||||
assert_eq!(drone.ip_cache.len(), 0);
|
||||
assert!(drone.ip_cache.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_drone_default_init() {
|
||||
let keypair = KeyPair::new();
|
||||
let mut addr: SocketAddr = "0.0.0.0:9900".parse().unwrap();
|
||||
addr.set_ip(get_ip_addr().unwrap());
|
||||
let transactions_addr = "0.0.0.0:0".parse().unwrap();
|
||||
let requests_addr = "0.0.0.0:0".parse().unwrap();
|
||||
let time_slice: Option<u64> = None;
|
||||
let request_cap: Option<u64> = None;
|
||||
let drone = Drone::new(
|
||||
keypair,
|
||||
addr,
|
||||
transactions_addr,
|
||||
requests_addr,
|
||||
time_slice,
|
||||
request_cap,
|
||||
);
|
||||
assert_eq!(drone.time_slice, Duration::new(TIME_SLICE, 0));
|
||||
assert_eq!(drone.request_cap, REQUEST_CAP);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_send_airdrop() {
|
||||
const SMALL_BATCH: i64 = 50;
|
||||
const TPS_BATCH: i64 = 5_000_000;
|
||||
|
||||
logger::setup();
|
||||
let leader = TestNode::new();
|
||||
|
||||
let alice = Mint::new(10_000_000);
|
||||
let bank = Bank::new(&alice);
|
||||
let bob_pubkey = KeyPair::new().pubkey();
|
||||
let carlos_pubkey = KeyPair::new().pubkey();
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
|
||||
let server = Server::new_leader(
|
||||
bank,
|
||||
Some(Duration::from_millis(30)),
|
||||
leader.data.clone(),
|
||||
leader.sockets.requests,
|
||||
leader.sockets.transaction,
|
||||
leader.sockets.broadcast,
|
||||
leader.sockets.respond,
|
||||
leader.sockets.gossip,
|
||||
exit.clone(),
|
||||
sink(),
|
||||
);
|
||||
sleep(Duration::from_millis(900));
|
||||
|
||||
let mut addr: SocketAddr = "0.0.0.0:9900".parse().expect("bind to drone socket");
|
||||
addr.set_ip(get_ip_addr().expect("drone get_ip_addr"));
|
||||
let mut drone = Drone::new(
|
||||
alice.keypair(),
|
||||
addr,
|
||||
leader.data.transactions_addr,
|
||||
leader.data.requests_addr,
|
||||
None,
|
||||
Some(5_000_050),
|
||||
);
|
||||
|
||||
let bob_req = DroneRequest::GetAirdrop {
|
||||
airdrop_request_amount: 50,
|
||||
client_public_key: bob_pubkey,
|
||||
};
|
||||
let bob_result = drone.send_airdrop(bob_req).expect("send airdrop test");
|
||||
assert!(bob_result > 0);
|
||||
|
||||
let carlos_req = DroneRequest::GetAirdrop {
|
||||
airdrop_request_amount: 5_000_000,
|
||||
client_public_key: carlos_pubkey,
|
||||
};
|
||||
let carlos_result = drone.send_airdrop(carlos_req).expect("send airdrop test");
|
||||
assert!(carlos_result > 0);
|
||||
|
||||
let requests_socket = UdpSocket::bind("0.0.0.0:0").expect("drone bind to requests socket");
|
||||
let transactions_socket =
|
||||
UdpSocket::bind("0.0.0.0:0").expect("drone bind to transactions socket");
|
||||
|
||||
let mut client = ThinClient::new(
|
||||
leader.data.requests_addr,
|
||||
requests_socket,
|
||||
leader.data.transactions_addr,
|
||||
transactions_socket,
|
||||
);
|
||||
|
||||
let bob_balance = client.poll_get_balance(&bob_pubkey);
|
||||
info!("Small request balance: {:?}", bob_balance);
|
||||
assert_eq!(bob_balance.unwrap(), SMALL_BATCH);
|
||||
|
||||
let carlos_balance = client.poll_get_balance(&carlos_pubkey);
|
||||
info!("TPS request balance: {:?}", carlos_balance);
|
||||
assert_eq!(carlos_balance.unwrap(), TPS_BATCH);
|
||||
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
for t in server.thread_hdls {
|
||||
t.join().unwrap();
|
||||
}
|
||||
}
|
||||
}
|
55
src/entry.rs
55
src/entry.rs
@@ -2,25 +2,38 @@
|
||||
//! unique ID that is the hash of the Entry before it, plus the hash of the
|
||||
//! transactions within it. Entries cannot be reordered, and its field `num_hashes`
|
||||
//! represents an approximate amount of time since the last Entry was created.
|
||||
use bincode::serialized_size;
|
||||
use hash::{extend_and_hash, hash, Hash};
|
||||
use packet::BLOB_DATA_SIZE;
|
||||
use rayon::prelude::*;
|
||||
use transaction::Transaction;
|
||||
|
||||
/// Each Entry contains three pieces of data. The `num_hashes` field is the number
|
||||
/// of hashes performed since the previous entry. The `id` field is the result
|
||||
/// of hashing `id` from the previous entry `num_hashes` times. The `transactions`
|
||||
/// field points to Events that took place shortly after `id` was generated.
|
||||
/// field points to Transactions that took place shortly before `id` was generated.
|
||||
///
|
||||
/// If you divide `num_hashes` by the amount of time it takes to generate a new hash, you
|
||||
/// get a duration estimate since the last Entry. Since processing power increases
|
||||
/// over time, one should expect the duration `num_hashes` represents to decrease proportionally.
|
||||
/// Though processing power varies across nodes, the network gives priority to the
|
||||
/// fastest processor. Duration should therefore be estimated by assuming that the hash
|
||||
/// was generated by the fastest processor at the time the entry was recorded.
|
||||
/// An upper bound on Duration can be estimated by assuming each hash was generated by the
|
||||
/// world's fastest processor at the time the entry was recorded. Or said another way, it
|
||||
/// is physically not possible for a shorter duration to have occurred if one assumes the
|
||||
/// hash was computed by the world's fastest processor at that time. The hash chain is both
|
||||
/// a Verifiable Delay Function (VDF) and a Proof of Work (not to be confused with Proof or
|
||||
/// Work consensus!)
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub struct Entry {
|
||||
/// The number of hashes since the previous Entry ID.
|
||||
pub num_hashes: u64,
|
||||
|
||||
/// The SHA-256 hash `num_hashes` after the previous Entry ID.
|
||||
pub id: Hash,
|
||||
|
||||
/// An unordered list of transactions that were observed before the Entry ID was
|
||||
/// generated. The may have been observed before a previous Entry ID but were
|
||||
/// pushed back into this list to ensure deterministic interpretation of the ledger.
|
||||
pub transactions: Vec<Transaction>,
|
||||
}
|
||||
|
||||
@@ -29,11 +42,13 @@ impl Entry {
|
||||
pub fn new(start_hash: &Hash, cur_hashes: u64, transactions: Vec<Transaction>) -> Self {
|
||||
let num_hashes = cur_hashes + if transactions.is_empty() { 0 } else { 1 };
|
||||
let id = next_hash(start_hash, 0, &transactions);
|
||||
Entry {
|
||||
let entry = Entry {
|
||||
num_hashes,
|
||||
id,
|
||||
transactions,
|
||||
}
|
||||
};
|
||||
assert!(serialized_size(&entry).unwrap() <= BLOB_DATA_SIZE as u64);
|
||||
entry
|
||||
}
|
||||
|
||||
/// Creates the next Tick Entry `num_hashes` after `start_hash`.
|
||||
@@ -45,6 +60,7 @@ impl Entry {
|
||||
let entry = Self::new(start_hash, *cur_hashes, transactions);
|
||||
*start_hash = entry.id;
|
||||
*cur_hashes = 0;
|
||||
assert!(serialized_size(&entry).unwrap() <= BLOB_DATA_SIZE as u64);
|
||||
entry
|
||||
}
|
||||
|
||||
@@ -73,8 +89,9 @@ fn add_transaction_data(hash_data: &mut Vec<u8>, tx: &Transaction) {
|
||||
|
||||
/// Creates the hash `num_hashes` after `start_hash`. If the transaction contains
|
||||
/// a signature, the final hash will be a hash of both the previous ID and
|
||||
/// the signature.
|
||||
pub fn next_hash(start_hash: &Hash, num_hashes: u64, transactions: &[Transaction]) -> Hash {
|
||||
/// the signature. If num_hashes is zero and there's no transaction data,
|
||||
/// start_hash is returned.
|
||||
fn next_hash(start_hash: &Hash, num_hashes: u64, transactions: &[Transaction]) -> Hash {
|
||||
let mut id = *start_hash;
|
||||
for _ in 1..num_hashes {
|
||||
id = hash(&id);
|
||||
@@ -95,8 +112,9 @@ pub fn next_hash(start_hash: &Hash, num_hashes: u64, transactions: &[Transaction
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates the next Tick or Event Entry `num_hashes` after `start_hash`.
|
||||
/// Creates the next Tick or Transaction Entry `num_hashes` after `start_hash`.
|
||||
pub fn next_entry(start_hash: &Hash, num_hashes: u64, transactions: Vec<Transaction>) -> Entry {
|
||||
assert!(num_hashes > 0 || transactions.len() == 0);
|
||||
Entry {
|
||||
num_hashes,
|
||||
id: next_hash(start_hash, num_hashes, &transactions),
|
||||
@@ -163,5 +181,24 @@ mod tests {
|
||||
let tick = next_entry(&zero, 1, vec![]);
|
||||
assert_eq!(tick.num_hashes, 1);
|
||||
assert_ne!(tick.id, zero);
|
||||
|
||||
let tick = next_entry(&zero, 0, vec![]);
|
||||
assert_eq!(tick.num_hashes, 0);
|
||||
assert_eq!(tick.id, zero);
|
||||
|
||||
let keypair = KeyPair::new();
|
||||
let tx0 = Transaction::new_timestamp(&keypair, Utc::now(), zero);
|
||||
let entry0 = next_entry(&zero, 1, vec![tx0.clone()]);
|
||||
assert_eq!(entry0.num_hashes, 1);
|
||||
assert_eq!(entry0.id, next_hash(&zero, 1, &vec![tx0]));
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_next_entry_panic() {
|
||||
let zero = Hash::default();
|
||||
let keypair = KeyPair::new();
|
||||
let tx = Transaction::new(&keypair, keypair.pubkey(), 0, zero);
|
||||
next_entry(&zero, 0, vec![tx]);
|
||||
}
|
||||
}
|
||||
|
@@ -1,4 +1,6 @@
|
||||
//! The `entry_writer` module helps implement the TPU's write stage.
|
||||
//! The `entry_writer` module helps implement the TPU's write stage. It
|
||||
//! writes entries to the given writer, which is typically a file or
|
||||
//! stdout, and then sends the Entry to its output channel.
|
||||
|
||||
use bank::Bank;
|
||||
use entry::Entry;
|
||||
|
356
src/erasure.rs
356
src/erasure.rs
@@ -1,17 +1,18 @@
|
||||
// Support erasure coding
|
||||
|
||||
use packet::{BlobRecycler, SharedBlob};
|
||||
use packet::{BlobRecycler, SharedBlob, BLOB_HEADER_SIZE};
|
||||
use std::result;
|
||||
|
||||
//TODO(sakridge) pick these values
|
||||
const NUM_CODED: usize = 10;
|
||||
const MAX_MISSING: usize = 2;
|
||||
pub const NUM_CODED: usize = 20;
|
||||
pub const MAX_MISSING: usize = 4;
|
||||
const NUM_DATA: usize = NUM_CODED - MAX_MISSING;
|
||||
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub enum ErasureError {
|
||||
NotEnoughBlocksToDecode,
|
||||
DecodeError,
|
||||
EncodeError,
|
||||
InvalidBlockSize,
|
||||
}
|
||||
|
||||
@@ -73,12 +74,22 @@ pub fn generate_coding_blocks(coding: &mut [&mut [u8]], data: &[&[u8]]) -> Resul
|
||||
let mut data_arg = Vec::new();
|
||||
for block in data {
|
||||
if block_len != block.len() {
|
||||
trace!(
|
||||
"data block size incorrect {} expected {}",
|
||||
block.len(),
|
||||
block_len
|
||||
);
|
||||
return Err(ErasureError::InvalidBlockSize);
|
||||
}
|
||||
data_arg.push(block.as_ptr());
|
||||
}
|
||||
for mut block in coding {
|
||||
if block_len != block.len() {
|
||||
trace!(
|
||||
"coding block size incorrect {} expected {}",
|
||||
block.len(),
|
||||
block_len
|
||||
);
|
||||
return Err(ErasureError::InvalidBlockSize);
|
||||
}
|
||||
coding_arg.push(block.as_mut_ptr());
|
||||
@@ -150,40 +161,99 @@ pub fn decode_blocks(data: &mut [&mut [u8]], coding: &[&[u8]], erasures: &[i32])
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Generate coding blocks in window from consumed to consumed+NUM_DATA
|
||||
// Allocate some coding blobs and insert into the blobs array
|
||||
pub fn add_coding_blobs(recycler: &BlobRecycler, blobs: &mut Vec<SharedBlob>, consumed: u64) {
|
||||
let mut added = 0;
|
||||
let blobs_len = blobs.len() as u64;
|
||||
for i in consumed..consumed + blobs_len {
|
||||
let is = i as usize;
|
||||
if is != 0 && ((is + MAX_MISSING) % NUM_CODED) == 0 {
|
||||
for _ in 0..MAX_MISSING {
|
||||
trace!("putting coding at {}", (i - consumed));
|
||||
let new_blob = recycler.allocate();
|
||||
let new_blob_clone = new_blob.clone();
|
||||
let mut new_blob_l = new_blob_clone.write().unwrap();
|
||||
new_blob_l.set_size(0);
|
||||
new_blob_l.set_coding().unwrap();
|
||||
drop(new_blob_l);
|
||||
blobs.insert((i - consumed) as usize, new_blob);
|
||||
added += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
info!(
|
||||
"add_coding consumed: {} blobs.len(): {} added: {}",
|
||||
consumed,
|
||||
blobs.len(),
|
||||
added
|
||||
);
|
||||
}
|
||||
|
||||
// Generate coding blocks in window starting from consumed
|
||||
pub fn generate_coding(
|
||||
re: &BlobRecycler,
|
||||
window: &mut Vec<SharedBlob>,
|
||||
window: &mut Vec<Option<SharedBlob>>,
|
||||
consumed: usize,
|
||||
num_blobs: usize,
|
||||
) -> Result<()> {
|
||||
let mut block_start = consumed - (consumed % NUM_CODED);
|
||||
|
||||
for i in consumed..consumed + num_blobs {
|
||||
if (i % NUM_CODED) == (NUM_CODED - 1) {
|
||||
let mut data_blobs = Vec::new();
|
||||
let mut coding_blobs = Vec::new();
|
||||
let mut data_locks = Vec::new();
|
||||
let mut data_ptrs: Vec<&[u8]> = Vec::new();
|
||||
let mut coding_locks = Vec::new();
|
||||
let mut coding_ptrs: Vec<&mut [u8]> = Vec::new();
|
||||
for i in consumed..consumed + NUM_DATA {
|
||||
|
||||
info!(
|
||||
"generate_coding start: {} end: {} consumed: {} num_blobs: {}",
|
||||
block_start,
|
||||
block_start + NUM_DATA,
|
||||
consumed,
|
||||
num_blobs
|
||||
);
|
||||
for i in block_start..block_start + NUM_DATA {
|
||||
let n = i % window.len();
|
||||
trace!("window[{}] = {:?}", n, window[n]);
|
||||
if window[n].is_none() {
|
||||
trace!("data block is null @ {}", n);
|
||||
return Ok(());
|
||||
}
|
||||
data_blobs.push(
|
||||
window[n]
|
||||
.clone()
|
||||
.expect("'data_blobs' arr in pub fn generate_coding"),
|
||||
);
|
||||
}
|
||||
let mut max_data_size = 0;
|
||||
for b in &data_blobs {
|
||||
data_locks.push(b.write().expect("'b' write lock in pub fn generate_coding"));
|
||||
let lck = b.write().expect("'b' write lock in pub fn generate_coding");
|
||||
if lck.meta.size > max_data_size {
|
||||
max_data_size = lck.meta.size;
|
||||
}
|
||||
data_locks.push(lck);
|
||||
}
|
||||
trace!("max_data_size: {}", max_data_size);
|
||||
for (i, l) in data_locks.iter_mut().enumerate() {
|
||||
trace!("i: {} data: {}", i, l.data[0]);
|
||||
data_ptrs.push(&l.data);
|
||||
data_ptrs.push(&l.data[..max_data_size]);
|
||||
}
|
||||
|
||||
// generate coding ptr array
|
||||
let coding_start = consumed + NUM_DATA;
|
||||
let coding_end = consumed + NUM_CODED;
|
||||
let coding_start = block_start + NUM_DATA;
|
||||
let coding_end = block_start + NUM_CODED;
|
||||
for i in coding_start..coding_end {
|
||||
let n = i % window.len();
|
||||
window[n] = re.allocate();
|
||||
if window[n].is_none() {
|
||||
trace!("coding block is null @ {}", n);
|
||||
return Ok(());
|
||||
}
|
||||
let w_l = window[n].clone().unwrap();
|
||||
w_l.write().unwrap().set_size(max_data_size);
|
||||
if w_l.write().unwrap().set_coding().is_err() {
|
||||
return Err(ErasureError::EncodeError);
|
||||
}
|
||||
coding_blobs.push(
|
||||
window[n]
|
||||
.clone()
|
||||
@@ -197,12 +267,22 @@ pub fn generate_coding(
|
||||
);
|
||||
}
|
||||
for (i, l) in coding_locks.iter_mut().enumerate() {
|
||||
trace!("i: {} data: {}", i, l.data[0]);
|
||||
coding_ptrs.push(&mut l.data);
|
||||
trace!("i: {} coding: {} size: {}", i, l.data[0], max_data_size);
|
||||
coding_ptrs.push(&mut l.data_mut()[..max_data_size]);
|
||||
}
|
||||
|
||||
generate_coding_blocks(coding_ptrs.as_mut_slice(), &data_ptrs)?;
|
||||
trace!("consumed: {}", consumed);
|
||||
debug!(
|
||||
"consumed: {} data: {}:{} coding: {}:{}",
|
||||
consumed,
|
||||
block_start,
|
||||
block_start + NUM_DATA,
|
||||
coding_start,
|
||||
coding_end
|
||||
);
|
||||
block_start += NUM_CODED;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -214,13 +294,37 @@ pub fn recover(
|
||||
re: &BlobRecycler,
|
||||
window: &mut Vec<Option<SharedBlob>>,
|
||||
consumed: usize,
|
||||
received: usize,
|
||||
) -> Result<()> {
|
||||
//recover with erasure coding
|
||||
if received <= consumed {
|
||||
return Ok(());
|
||||
}
|
||||
let num_blocks = (received - consumed) / NUM_CODED;
|
||||
let mut block_start = consumed - (consumed % NUM_CODED);
|
||||
|
||||
if num_blocks > 0 {
|
||||
debug!(
|
||||
"num_blocks: {} received: {} consumed: {}",
|
||||
num_blocks, received, consumed
|
||||
);
|
||||
}
|
||||
|
||||
for i in 0..num_blocks {
|
||||
if i > 100 {
|
||||
break;
|
||||
}
|
||||
let mut data_missing = 0;
|
||||
let mut coded_missing = 0;
|
||||
let coding_start = consumed + NUM_DATA;
|
||||
let coding_end = consumed + NUM_CODED;
|
||||
for i in consumed..coding_end {
|
||||
let coding_start = block_start + NUM_DATA;
|
||||
let coding_end = block_start + NUM_CODED;
|
||||
trace!(
|
||||
"recover: block_start: {} coding_start: {} coding_end: {}",
|
||||
block_start,
|
||||
coding_start,
|
||||
coding_end
|
||||
);
|
||||
for i in block_start..coding_end {
|
||||
let n = i % window.len();
|
||||
if window[n].is_none() {
|
||||
if i >= coding_start {
|
||||
@@ -230,18 +334,35 @@ pub fn recover(
|
||||
}
|
||||
}
|
||||
}
|
||||
trace!("missing: data: {} coding: {}", data_missing, coded_missing);
|
||||
if (data_missing + coded_missing) != NUM_CODED && (data_missing + coded_missing) != 0 {
|
||||
debug!(
|
||||
"1: start: {} recovering: data: {} coding: {}",
|
||||
block_start, data_missing, coded_missing
|
||||
);
|
||||
}
|
||||
if data_missing > 0 {
|
||||
if (data_missing + coded_missing) <= MAX_MISSING {
|
||||
debug!(
|
||||
"2: recovering: data: {} coding: {}",
|
||||
data_missing, coded_missing
|
||||
);
|
||||
let mut blobs: Vec<SharedBlob> = Vec::new();
|
||||
let mut locks = Vec::new();
|
||||
let mut data_ptrs: Vec<&mut [u8]> = Vec::new();
|
||||
let mut coding_ptrs: Vec<&[u8]> = Vec::new();
|
||||
let mut erasures: Vec<i32> = Vec::new();
|
||||
for i in consumed..coding_end {
|
||||
let mut meta = None;
|
||||
let mut size = None;
|
||||
for i in block_start..coding_end {
|
||||
let j = i % window.len();
|
||||
let mut b = &mut window[j];
|
||||
if b.is_some() {
|
||||
if i >= NUM_DATA && size.is_none() {
|
||||
let bl = b.clone().unwrap();
|
||||
size = Some(bl.read().unwrap().meta.size - BLOB_HEADER_SIZE);
|
||||
}
|
||||
if meta.is_none() {
|
||||
let bl = b.clone().unwrap();
|
||||
meta = Some(bl.read().unwrap().meta.clone());
|
||||
}
|
||||
blobs.push(b.clone().expect("'blobs' arr in pb fn recover"));
|
||||
continue;
|
||||
}
|
||||
@@ -249,21 +370,29 @@ pub fn recover(
|
||||
*b = Some(n.clone());
|
||||
//mark the missing memory
|
||||
blobs.push(n);
|
||||
erasures.push((i - consumed) as i32);
|
||||
erasures.push((i - block_start) as i32);
|
||||
}
|
||||
erasures.push(-1);
|
||||
trace!("erasures: {:?}", erasures);
|
||||
trace!(
|
||||
"erasures: {:?} data_size: {} header_size: {}",
|
||||
erasures,
|
||||
size.unwrap(),
|
||||
BLOB_HEADER_SIZE
|
||||
);
|
||||
//lock everything
|
||||
for b in &blobs {
|
||||
locks.push(b.write().expect("'locks' arr in pb fn recover"));
|
||||
}
|
||||
{
|
||||
let mut coding_ptrs: Vec<&[u8]> = Vec::new();
|
||||
let mut data_ptrs: Vec<&mut [u8]> = Vec::new();
|
||||
for (i, l) in locks.iter_mut().enumerate() {
|
||||
if i >= NUM_DATA {
|
||||
trace!("pushing coding: {}", i);
|
||||
coding_ptrs.push(&l.data);
|
||||
coding_ptrs.push(&l.data()[..size.unwrap()]);
|
||||
} else {
|
||||
trace!("pushing data: {}", i);
|
||||
data_ptrs.push(&mut l.data);
|
||||
data_ptrs.push(&mut l.data[..size.unwrap()]);
|
||||
}
|
||||
}
|
||||
trace!(
|
||||
@@ -272,17 +401,35 @@ pub fn recover(
|
||||
data_ptrs.len()
|
||||
);
|
||||
decode_blocks(data_ptrs.as_mut_slice(), &coding_ptrs, &erasures)?;
|
||||
} else {
|
||||
return Err(ErasureError::NotEnoughBlocksToDecode);
|
||||
}
|
||||
for i in &erasures[..erasures.len() - 1] {
|
||||
let idx = *i as usize;
|
||||
let data_size = locks[idx].get_data_size().unwrap() - BLOB_HEADER_SIZE as u64;
|
||||
locks[idx].meta = meta.clone().unwrap();
|
||||
locks[idx].set_size(data_size as usize);
|
||||
trace!(
|
||||
"erasures[{}] size: {} data[0]: {}",
|
||||
*i,
|
||||
data_size,
|
||||
locks[idx].data()[0]
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
block_start += NUM_CODED;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use crdt;
|
||||
use erasure;
|
||||
use packet::{BlobRecycler, SharedBlob, PACKET_DATA_SIZE};
|
||||
use logger;
|
||||
use packet::{BlobRecycler, SharedBlob, BLOB_HEADER_SIZE};
|
||||
use signature::KeyPair;
|
||||
use signature::KeyPairUtil;
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
||||
#[test]
|
||||
pub fn test_coding() {
|
||||
@@ -338,10 +485,15 @@ mod test {
|
||||
for (i, w) in window.iter().enumerate() {
|
||||
print!("window({}): ", i);
|
||||
if w.is_some() {
|
||||
let window_lock = w.clone().unwrap();
|
||||
let window_data = window_lock.read().unwrap().data;
|
||||
let window_l1 = w.clone().unwrap();
|
||||
let window_l2 = window_l1.read().unwrap();
|
||||
print!(
|
||||
"index: {:?} meta.size: {} data: ",
|
||||
window_l2.get_index(),
|
||||
window_l2.meta.size
|
||||
);
|
||||
for i in 0..8 {
|
||||
print!("{} ", window_data[i]);
|
||||
print!("{} ", window_l2.data()[i]);
|
||||
}
|
||||
} else {
|
||||
print!("null");
|
||||
@@ -350,45 +502,102 @@ mod test {
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_window_recover() {
|
||||
let mut window = Vec::new();
|
||||
let blob_recycler = BlobRecycler::default();
|
||||
let offset = 4;
|
||||
for i in 0..(4 * erasure::NUM_CODED + 1) {
|
||||
fn generate_window(
|
||||
data_len: usize,
|
||||
blob_recycler: &BlobRecycler,
|
||||
offset: usize,
|
||||
num_blobs: usize,
|
||||
) -> (Vec<Option<SharedBlob>>, usize) {
|
||||
let mut window = vec![None; 32];
|
||||
let mut blobs = Vec::new();
|
||||
for i in 0..num_blobs {
|
||||
let b = blob_recycler.allocate();
|
||||
let b_ = b.clone();
|
||||
let data_len = b.read().unwrap().data.len();
|
||||
let mut w = b.write().unwrap();
|
||||
w.set_index(i as u64).unwrap();
|
||||
assert_eq!(i as u64, w.get_index().unwrap());
|
||||
w.meta.size = PACKET_DATA_SIZE;
|
||||
w.set_size(data_len);
|
||||
for k in 0..data_len {
|
||||
w.data[k] = (k + i) as u8;
|
||||
w.data_mut()[k] = (k + i) as u8;
|
||||
}
|
||||
window.push(Some(b_));
|
||||
blobs.push(b_);
|
||||
}
|
||||
erasure::add_coding_blobs(blob_recycler, &mut blobs, offset as u64);
|
||||
let blobs_len = blobs.len();
|
||||
|
||||
let d = crdt::ReplicatedData::new(
|
||||
KeyPair::new().pubkey(),
|
||||
"127.0.0.1:1234".parse().unwrap(),
|
||||
"127.0.0.1:1235".parse().unwrap(),
|
||||
"127.0.0.1:1236".parse().unwrap(),
|
||||
"127.0.0.1:1237".parse().unwrap(),
|
||||
"127.0.0.1:1238".parse().unwrap(),
|
||||
);
|
||||
let crdt = Arc::new(RwLock::new(crdt::Crdt::new(d.clone())));
|
||||
|
||||
assert!(crdt::Crdt::index_blobs(&crdt, &blobs, &mut (offset as u64)).is_ok());
|
||||
for b in blobs {
|
||||
let idx = b.read().unwrap().get_index().unwrap() as usize;
|
||||
window[idx] = Some(b);
|
||||
}
|
||||
(window, blobs_len)
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_window_recover_basic() {
|
||||
logger::setup();
|
||||
let data_len = 16;
|
||||
let blob_recycler = BlobRecycler::default();
|
||||
|
||||
// Generate a window
|
||||
let offset = 1;
|
||||
let num_blobs = erasure::NUM_DATA + 2;
|
||||
let (mut window, blobs_len) = generate_window(data_len, &blob_recycler, 0, num_blobs);
|
||||
println!("** after-gen-window:");
|
||||
print_window(&window);
|
||||
|
||||
// Generate the coding blocks
|
||||
assert!(erasure::generate_coding(&mut window, offset, blobs_len).is_ok());
|
||||
println!("** after-gen-coding:");
|
||||
print_window(&window);
|
||||
|
||||
let erase_offset = offset;
|
||||
// Create a hole in the window
|
||||
let refwindow = window[erase_offset].clone();
|
||||
window[erase_offset] = None;
|
||||
|
||||
// Recover it from coding
|
||||
assert!(erasure::recover(&blob_recycler, &mut window, offset, offset + blobs_len).is_ok());
|
||||
println!("** after-recover:");
|
||||
print_window(&window);
|
||||
|
||||
// Check the result
|
||||
let window_l = window[erase_offset].clone().unwrap();
|
||||
let window_l2 = window_l.read().unwrap();
|
||||
let ref_l = refwindow.clone().unwrap();
|
||||
let ref_l2 = ref_l.read().unwrap();
|
||||
assert_eq!(
|
||||
window_l2.data[..(data_len + BLOB_HEADER_SIZE)],
|
||||
ref_l2.data[..(data_len + BLOB_HEADER_SIZE)]
|
||||
);
|
||||
assert_eq!(window_l2.meta.size, ref_l2.meta.size);
|
||||
assert_eq!(window_l2.meta.addr, ref_l2.meta.addr);
|
||||
assert_eq!(window_l2.meta.port, ref_l2.meta.port);
|
||||
assert_eq!(window_l2.meta.v6, ref_l2.meta.v6);
|
||||
assert_eq!(window_l2.get_index().unwrap(), erase_offset as u64);
|
||||
}
|
||||
|
||||
//TODO This needs to be reworked
|
||||
#[test]
|
||||
#[ignore]
|
||||
pub fn test_window_recover() {
|
||||
logger::setup();
|
||||
let blob_recycler = BlobRecycler::default();
|
||||
let offset = 4;
|
||||
let data_len = 16;
|
||||
let num_blobs = erasure::NUM_DATA + 2;
|
||||
let (mut window, blobs_len) = generate_window(data_len, &blob_recycler, offset, num_blobs);
|
||||
println!("** after-gen:");
|
||||
print_window(&window);
|
||||
assert!(erasure::generate_coding(&blob_recycler, &mut window, offset).is_ok());
|
||||
assert!(
|
||||
erasure::generate_coding(&blob_recycler, &mut window, offset + erasure::NUM_CODED)
|
||||
.is_ok()
|
||||
);
|
||||
assert!(
|
||||
erasure::generate_coding(
|
||||
&blob_recycler,
|
||||
&mut window,
|
||||
offset + (2 * erasure::NUM_CODED)
|
||||
).is_ok()
|
||||
);
|
||||
assert!(
|
||||
erasure::generate_coding(
|
||||
&blob_recycler,
|
||||
&mut window,
|
||||
offset + (3 * erasure::NUM_CODED)
|
||||
).is_ok()
|
||||
);
|
||||
assert!(erasure::generate_coding(&mut window, offset, blobs_len).is_ok());
|
||||
println!("** after-coding:");
|
||||
print_window(&window);
|
||||
let refwindow = window[offset + 1].clone();
|
||||
@@ -402,29 +611,14 @@ mod test {
|
||||
window_l0.write().unwrap().data[0] = 55;
|
||||
println!("** after-nulling:");
|
||||
print_window(&window);
|
||||
assert!(erasure::recover(&blob_recycler, &mut window, offset).is_ok());
|
||||
assert!(erasure::recover(&blob_recycler, &mut window, offset + erasure::NUM_CODED).is_ok());
|
||||
assert!(
|
||||
erasure::recover(
|
||||
&blob_recycler,
|
||||
&mut window,
|
||||
offset + (2 * erasure::NUM_CODED)
|
||||
).is_err()
|
||||
);
|
||||
assert!(
|
||||
erasure::recover(
|
||||
&blob_recycler,
|
||||
&mut window,
|
||||
offset + (3 * erasure::NUM_CODED)
|
||||
).is_ok()
|
||||
);
|
||||
assert!(erasure::recover(&blob_recycler, &mut window, offset, offset + blobs_len).is_ok());
|
||||
println!("** after-restore:");
|
||||
print_window(&window);
|
||||
let window_l = window[offset + 1].clone().unwrap();
|
||||
let ref_l = refwindow.clone().unwrap();
|
||||
assert_eq!(
|
||||
window_l.read().unwrap().data.to_vec(),
|
||||
ref_l.read().unwrap().data.to_vec()
|
||||
window_l.read().unwrap().data()[..data_len],
|
||||
ref_l.read().unwrap().data()[..data_len]
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@@ -10,7 +10,7 @@ use streamer;
|
||||
|
||||
pub struct FetchStage {
|
||||
pub packet_receiver: streamer::PacketReceiver,
|
||||
pub thread_hdl: JoinHandle<()>,
|
||||
pub thread_hdls: Vec<JoinHandle<()>>,
|
||||
}
|
||||
|
||||
impl FetchStage {
|
||||
@@ -18,14 +18,30 @@ impl FetchStage {
|
||||
socket: UdpSocket,
|
||||
exit: Arc<AtomicBool>,
|
||||
packet_recycler: packet::PacketRecycler,
|
||||
) -> Self {
|
||||
Self::new_multi_socket(vec![socket], exit, packet_recycler)
|
||||
}
|
||||
pub fn new_multi_socket(
|
||||
sockets: Vec<UdpSocket>,
|
||||
exit: Arc<AtomicBool>,
|
||||
packet_recycler: packet::PacketRecycler,
|
||||
) -> Self {
|
||||
let (packet_sender, packet_receiver) = channel();
|
||||
let thread_hdl =
|
||||
streamer::receiver(socket, exit.clone(), packet_recycler.clone(), packet_sender);
|
||||
let thread_hdls: Vec<_> = sockets
|
||||
.into_iter()
|
||||
.map(|socket| {
|
||||
streamer::receiver(
|
||||
socket,
|
||||
exit.clone(),
|
||||
packet_recycler.clone(),
|
||||
packet_sender.clone(),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
FetchStage {
|
||||
packet_receiver,
|
||||
thread_hdl,
|
||||
thread_hdls,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -10,7 +10,10 @@ pub type Hash = GenericArray<u8, U32>;
|
||||
pub fn hash(val: &[u8]) -> Hash {
|
||||
let mut hasher = Sha256::default();
|
||||
hasher.input(val);
|
||||
hasher.result()
|
||||
|
||||
// At the time of this writing, the sha2 library is stuck on an old version
|
||||
// of generic_array (0.9.0). Decouple ourselves with a clone to our version.
|
||||
GenericArray::clone_from_slice(hasher.result().as_slice())
|
||||
}
|
||||
|
||||
/// Return the hash of the given hash extended with the given value.
|
||||
|
234
src/ledger.rs
234
src/ledger.rs
@@ -1,18 +1,17 @@
|
||||
//! The `ledger` module provides functions for parallel verification of the
|
||||
//! Proof of History ledger.
|
||||
|
||||
use bincode::{deserialize, serialize_into};
|
||||
use entry::{next_entry, Entry};
|
||||
use bincode::{self, deserialize, serialize_into, serialized_size};
|
||||
use entry::Entry;
|
||||
use hash::Hash;
|
||||
use packet;
|
||||
use packet::{SharedBlob, BLOB_DATA_SIZE, BLOB_SIZE};
|
||||
use packet::{self, SharedBlob, BLOB_DATA_SIZE, BLOB_SIZE};
|
||||
use rayon::prelude::*;
|
||||
use std::cmp::min;
|
||||
use std::collections::VecDeque;
|
||||
use std::io::Cursor;
|
||||
use std::mem::size_of;
|
||||
use transaction::Transaction;
|
||||
|
||||
// a Block is a slice of Entries
|
||||
|
||||
pub trait Block {
|
||||
/// Verifies the hashes and counts of a slice of transactions are all consistent.
|
||||
fn verify(&self, start_hash: &Hash) -> bool;
|
||||
@@ -27,112 +26,115 @@ impl Block for [Entry] {
|
||||
}
|
||||
|
||||
fn to_blobs(&self, blob_recycler: &packet::BlobRecycler, q: &mut VecDeque<SharedBlob>) {
|
||||
let mut start = 0;
|
||||
let mut end = 0;
|
||||
while start < self.len() {
|
||||
let mut entries: Vec<Vec<Entry>> = Vec::new();
|
||||
let mut total = 0;
|
||||
for i in &self[start..] {
|
||||
total += size_of::<Transaction>() * i.transactions.len();
|
||||
total += size_of::<Entry>();
|
||||
if total >= BLOB_DATA_SIZE {
|
||||
break;
|
||||
}
|
||||
end += 1;
|
||||
}
|
||||
// See if we need to split the transactions
|
||||
if end <= start {
|
||||
let mut transaction_start = 0;
|
||||
let num_transactions_per_blob = BLOB_DATA_SIZE / size_of::<Transaction>();
|
||||
let total_entry_chunks = (self[end].transactions.len() + num_transactions_per_blob
|
||||
- 1) / num_transactions_per_blob;
|
||||
trace!(
|
||||
"splitting transactions end: {} total_chunks: {}",
|
||||
end,
|
||||
total_entry_chunks
|
||||
);
|
||||
for _ in 0..total_entry_chunks {
|
||||
let transaction_end = min(
|
||||
transaction_start + num_transactions_per_blob,
|
||||
self[end].transactions.len(),
|
||||
);
|
||||
let mut entry = Entry {
|
||||
num_hashes: self[end].num_hashes,
|
||||
id: self[end].id,
|
||||
transactions: self[end].transactions[transaction_start..transaction_end]
|
||||
.to_vec(),
|
||||
};
|
||||
entries.push(vec![entry]);
|
||||
transaction_start = transaction_end;
|
||||
}
|
||||
end += 1;
|
||||
} else {
|
||||
entries.push(self[start..end].to_vec());
|
||||
}
|
||||
|
||||
for entry in entries {
|
||||
let b = blob_recycler.allocate();
|
||||
for entry in self {
|
||||
let blob = blob_recycler.allocate();
|
||||
let pos = {
|
||||
let mut bd = b.write().unwrap();
|
||||
let mut bd = blob.write().unwrap();
|
||||
let mut out = Cursor::new(bd.data_mut());
|
||||
serialize_into(&mut out, &entry).expect("failed to serialize output");
|
||||
out.position() as usize
|
||||
};
|
||||
assert!(pos < BLOB_SIZE);
|
||||
b.write().unwrap().set_size(pos);
|
||||
q.push_back(b);
|
||||
}
|
||||
start = end;
|
||||
blob.write().unwrap().set_size(pos);
|
||||
q.push_back(blob);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a vector of Entries of length `transaction_batches.len()` from `start_hash` hash, `num_hashes`, and `transaction_batches`.
|
||||
pub fn next_entries(
|
||||
start_hash: &Hash,
|
||||
num_hashes: u64,
|
||||
transaction_batches: Vec<Vec<Transaction>>,
|
||||
pub fn reconstruct_entries_from_blobs(
|
||||
blobs: VecDeque<SharedBlob>,
|
||||
blob_recycler: &packet::BlobRecycler,
|
||||
) -> bincode::Result<Vec<Entry>> {
|
||||
let mut entries: Vec<Entry> = Vec::with_capacity(blobs.len());
|
||||
|
||||
for blob in blobs {
|
||||
let entry = {
|
||||
let msg = blob.read().unwrap();
|
||||
deserialize(&msg.data()[..msg.meta.size])
|
||||
};
|
||||
blob_recycler.recycle(blob);
|
||||
|
||||
match entry {
|
||||
Ok(entry) => entries.push(entry),
|
||||
Err(err) => {
|
||||
trace!("reconstruct_entry_from_blobs: {}", err);
|
||||
return Err(err);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(entries)
|
||||
}
|
||||
|
||||
/// Creates the next entries for given transactions, outputs
|
||||
/// updates start_hash to id of last Entry, sets cur_hashes to 0
|
||||
pub fn next_entries_mut(
|
||||
start_hash: &mut Hash,
|
||||
cur_hashes: &mut u64,
|
||||
transactions: Vec<Transaction>,
|
||||
) -> Vec<Entry> {
|
||||
let mut id = *start_hash;
|
||||
let mut entries = vec![];
|
||||
for transactions in &transaction_batches {
|
||||
let transactions = transactions.clone();
|
||||
let entry = next_entry(&id, num_hashes, transactions);
|
||||
id = entry.id;
|
||||
entries.push(entry);
|
||||
if transactions.is_empty() {
|
||||
vec![Entry::new_mut(start_hash, cur_hashes, transactions)]
|
||||
} else {
|
||||
let mut chunk_len = transactions.len();
|
||||
|
||||
// check for fit, make sure they can be serialized
|
||||
while serialized_size(&Entry {
|
||||
num_hashes: 0,
|
||||
id: Hash::default(),
|
||||
transactions: transactions[0..chunk_len].to_vec(),
|
||||
}).unwrap() > BLOB_DATA_SIZE as u64
|
||||
{
|
||||
chunk_len /= 2;
|
||||
}
|
||||
|
||||
let mut entries = Vec::with_capacity(transactions.len() / chunk_len + 1);
|
||||
|
||||
for chunk in transactions.chunks(chunk_len) {
|
||||
entries.push(Entry::new_mut(start_hash, cur_hashes, chunk.to_vec()));
|
||||
}
|
||||
entries
|
||||
}
|
||||
}
|
||||
|
||||
pub fn reconstruct_entries_from_blobs(blobs: &VecDeque<SharedBlob>) -> Vec<Entry> {
|
||||
let mut entries_to_apply: Vec<Entry> = Vec::new();
|
||||
let mut last_id = Hash::default();
|
||||
for msgs in blobs {
|
||||
let blob = msgs.read().unwrap();
|
||||
let entries: Vec<Entry> = deserialize(&blob.data()[..blob.meta.size]).unwrap();
|
||||
for entry in entries {
|
||||
if entry.id == last_id {
|
||||
if let Some(last_entry) = entries_to_apply.last_mut() {
|
||||
last_entry.transactions.extend(entry.transactions);
|
||||
}
|
||||
} else {
|
||||
last_id = entry.id;
|
||||
entries_to_apply.push(entry);
|
||||
}
|
||||
}
|
||||
//TODO respond back to leader with hash of the state
|
||||
}
|
||||
entries_to_apply
|
||||
/// Creates the next Entries for given transactions
|
||||
pub fn next_entries(
|
||||
start_hash: &Hash,
|
||||
cur_hashes: u64,
|
||||
transactions: Vec<Transaction>,
|
||||
) -> Vec<Entry> {
|
||||
let mut id = *start_hash;
|
||||
let mut num_hashes = cur_hashes;
|
||||
next_entries_mut(&mut id, &mut num_hashes, transactions)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use entry::{next_entry, Entry};
|
||||
use hash::hash;
|
||||
use packet::BlobRecycler;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
||||
use transaction::Transaction;
|
||||
|
||||
/// Create a vector of Entries of length `transaction_batches.len()`
|
||||
/// from `start_hash` hash, `num_hashes`, and `transaction_batches`.
|
||||
fn next_entries_batched(
|
||||
start_hash: &Hash,
|
||||
cur_hashes: u64,
|
||||
transaction_batches: Vec<Vec<Transaction>>,
|
||||
) -> Vec<Entry> {
|
||||
let mut id = *start_hash;
|
||||
let mut entries = vec![];
|
||||
let mut num_hashes = cur_hashes;
|
||||
|
||||
for transactions in transaction_batches {
|
||||
let mut entry_batch = next_entries_mut(&mut id, &mut num_hashes, transactions);
|
||||
entries.append(&mut entry_batch);
|
||||
}
|
||||
entries
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verify_slice() {
|
||||
let zero = Hash::default();
|
||||
@@ -140,45 +142,57 @@ mod tests {
|
||||
assert!(vec![][..].verify(&zero)); // base case
|
||||
assert!(vec![Entry::new_tick(0, &zero)][..].verify(&zero)); // singleton case 1
|
||||
assert!(!vec![Entry::new_tick(0, &zero)][..].verify(&one)); // singleton case 2, bad
|
||||
assert!(next_entries(&zero, 0, vec![vec![]; 2])[..].verify(&zero)); // inductive step
|
||||
assert!(next_entries_batched(&zero, 0, vec![vec![]; 2])[..].verify(&zero)); // inductive step
|
||||
|
||||
let mut bad_ticks = next_entries(&zero, 0, vec![vec![]; 2]);
|
||||
let mut bad_ticks = next_entries_batched(&zero, 0, vec![vec![]; 2]);
|
||||
bad_ticks[1].id = one;
|
||||
assert!(!bad_ticks.verify(&zero)); // inductive step, bad
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_entry_to_blobs() {
|
||||
fn test_entries_to_blobs() {
|
||||
let zero = Hash::default();
|
||||
let one = hash(&zero);
|
||||
let keypair = KeyPair::new();
|
||||
let tx0 = Transaction::new(&keypair, keypair.pubkey(), 1, one);
|
||||
let transactions = vec![tx0.clone(); 10000];
|
||||
let e0 = Entry::new(&zero, 0, transactions);
|
||||
let transactions = vec![tx0; 10_000];
|
||||
let entries = next_entries(&zero, 0, transactions);
|
||||
|
||||
let entries = vec![e0.clone(); 1];
|
||||
let blob_recycler = BlobRecycler::default();
|
||||
let mut blob_q = VecDeque::new();
|
||||
entries.to_blobs(&blob_recycler, &mut blob_q);
|
||||
|
||||
assert_eq!(reconstruct_entries_from_blobs(&blob_q), entries);
|
||||
assert_eq!(
|
||||
reconstruct_entries_from_blobs(blob_q, &blob_recycler).unwrap(),
|
||||
entries
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_next_entries() {
|
||||
fn test_bad_blobs_attack() {
|
||||
let blob_recycler = BlobRecycler::default();
|
||||
let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
|
||||
let blobs_q = packet::to_blobs(vec![(0, addr)], &blob_recycler).unwrap(); // <-- attack!
|
||||
assert!(reconstruct_entries_from_blobs(blobs_q, &blob_recycler).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_next_entries_batched() {
|
||||
// this also tests next_entries, ugly, but is an easy way to do vec of vec (batch)
|
||||
let mut id = Hash::default();
|
||||
let next_id = hash(&id);
|
||||
let keypair = KeyPair::new();
|
||||
let tx0 = Transaction::new(&keypair, keypair.pubkey(), 1, next_id);
|
||||
let transactions = vec![tx0.clone(); 5];
|
||||
|
||||
let transactions = vec![tx0; 5];
|
||||
let transaction_batches = vec![transactions.clone(); 5];
|
||||
let entries0 = next_entries(&id, 0, transaction_batches);
|
||||
let entries0 = next_entries_batched(&id, 0, transaction_batches);
|
||||
|
||||
assert_eq!(entries0.len(), 5);
|
||||
|
||||
let mut entries1 = vec![];
|
||||
for _ in 0..5 {
|
||||
let entry = next_entry(&id, 0, transactions.clone());
|
||||
let entry = next_entry(&id, 1, transactions.clone());
|
||||
id = entry.id;
|
||||
entries1.push(entry);
|
||||
}
|
||||
@@ -190,14 +204,30 @@ mod tests {
|
||||
mod bench {
|
||||
extern crate test;
|
||||
use self::test::Bencher;
|
||||
use hash::hash;
|
||||
use ledger::*;
|
||||
use packet::BlobRecycler;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use transaction::Transaction;
|
||||
|
||||
#[bench]
|
||||
fn bench_next_entries(bencher: &mut Bencher) {
|
||||
let start_hash = Hash::default();
|
||||
let entries = next_entries(&start_hash, 10_000, vec![vec![]; 8]);
|
||||
fn bench_block_to_blobs_to_block(bencher: &mut Bencher) {
|
||||
let zero = Hash::default();
|
||||
let one = hash(&zero);
|
||||
let keypair = KeyPair::new();
|
||||
let tx0 = Transaction::new(&keypair, keypair.pubkey(), 1, one);
|
||||
let transactions = vec![tx0; 10];
|
||||
let entries = next_entries(&zero, 1, transactions);
|
||||
|
||||
let blob_recycler = BlobRecycler::default();
|
||||
bencher.iter(|| {
|
||||
assert!(entries.verify(&start_hash));
|
||||
let mut blob_q = VecDeque::new();
|
||||
entries.to_blobs(&blob_recycler, &mut blob_q);
|
||||
assert_eq!(
|
||||
reconstruct_entries_from_blobs(blob_q, &blob_recycler).unwrap(),
|
||||
entries
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
}
|
||||
|
17
src/lib.rs
17
src/lib.rs
@@ -1,8 +1,20 @@
|
||||
//! The `solana` library implements the Solana high-performance blockchain architecture.
|
||||
//! It includes a full Rust implementation of the architecture (see
|
||||
//! [Server](server/struct.Server.html)) as well as hooks to GPU implementations of its most
|
||||
//! paralellizable components (i.e. [SigVerify](sigverify/index.html)). It also includes
|
||||
//! command-line tools to spin up fullnodes and a Rust library
|
||||
//! (see [ThinClient](thin_client/struct.ThinClient.html)) to interact with them.
|
||||
//!
|
||||
|
||||
#![cfg_attr(feature = "unstable", feature(test))]
|
||||
#[macro_use]
|
||||
pub mod counter;
|
||||
pub mod bank;
|
||||
pub mod banking_stage;
|
||||
pub mod blob_fetch_stage;
|
||||
pub mod budget;
|
||||
pub mod crdt;
|
||||
pub mod drone;
|
||||
pub mod entry;
|
||||
pub mod entry_writer;
|
||||
#[cfg(feature = "erasure")]
|
||||
@@ -12,6 +24,7 @@ pub mod hash;
|
||||
pub mod ledger;
|
||||
pub mod logger;
|
||||
pub mod mint;
|
||||
pub mod ncp;
|
||||
pub mod packet;
|
||||
pub mod payment_plan;
|
||||
pub mod record_stage;
|
||||
@@ -32,6 +45,7 @@ pub mod timing;
|
||||
pub mod tpu;
|
||||
pub mod transaction;
|
||||
pub mod tvu;
|
||||
pub mod window_stage;
|
||||
pub mod write_stage;
|
||||
extern crate bincode;
|
||||
extern crate byteorder;
|
||||
@@ -45,12 +59,11 @@ extern crate ring;
|
||||
extern crate serde;
|
||||
#[macro_use]
|
||||
extern crate serde_derive;
|
||||
extern crate pnet_datalink;
|
||||
extern crate serde_json;
|
||||
extern crate sha2;
|
||||
extern crate untrusted;
|
||||
|
||||
extern crate futures;
|
||||
|
||||
#[cfg(test)]
|
||||
#[macro_use]
|
||||
extern crate matches;
|
||||
|
@@ -1,3 +1,6 @@
|
||||
//! The `logger` module provides a setup function for `env_logger`. Its only function,
|
||||
//! `setup()` may be called multiple times.
|
||||
|
||||
use std::sync::{Once, ONCE_INIT};
|
||||
extern crate env_logger;
|
||||
|
||||
|
89
src/ncp.rs
Normal file
89
src/ncp.rs
Normal file
@@ -0,0 +1,89 @@
|
||||
//! The `ncp` module implements the network control plane.
|
||||
|
||||
use crdt;
|
||||
use packet;
|
||||
use result::Result;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::JoinHandle;
|
||||
use streamer;
|
||||
|
||||
pub struct Ncp {
|
||||
pub thread_hdls: Vec<JoinHandle<()>>,
|
||||
}
|
||||
|
||||
impl Ncp {
|
||||
pub fn new(
|
||||
crdt: Arc<RwLock<crdt::Crdt>>,
|
||||
window: Arc<RwLock<Vec<Option<packet::SharedBlob>>>>,
|
||||
gossip_listen_socket: UdpSocket,
|
||||
gossip_send_socket: UdpSocket,
|
||||
exit: Arc<AtomicBool>,
|
||||
) -> Result<Ncp> {
|
||||
let blob_recycler = packet::BlobRecycler::default();
|
||||
let (request_sender, request_receiver) = channel();
|
||||
trace!(
|
||||
"Ncp: id: {:?}, listening on: {:?}",
|
||||
&crdt.read().unwrap().me[..4],
|
||||
gossip_listen_socket.local_addr().unwrap()
|
||||
);
|
||||
let t_receiver = streamer::blob_receiver(
|
||||
exit.clone(),
|
||||
blob_recycler.clone(),
|
||||
gossip_listen_socket,
|
||||
request_sender,
|
||||
)?;
|
||||
let (response_sender, response_receiver) = channel();
|
||||
let t_responder = streamer::responder(
|
||||
gossip_send_socket,
|
||||
exit.clone(),
|
||||
blob_recycler.clone(),
|
||||
response_receiver,
|
||||
);
|
||||
let t_listen = crdt::Crdt::listen(
|
||||
crdt.clone(),
|
||||
window,
|
||||
blob_recycler.clone(),
|
||||
request_receiver,
|
||||
response_sender.clone(),
|
||||
exit.clone(),
|
||||
);
|
||||
let t_gossip = crdt::Crdt::gossip(crdt.clone(), blob_recycler, response_sender, exit);
|
||||
let thread_hdls = vec![t_receiver, t_responder, t_listen, t_gossip];
|
||||
Ok(Ncp { thread_hdls })
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crdt::{Crdt, TestNode};
|
||||
use ncp::Ncp;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
// test that stage will exit when flag is set
|
||||
// TODO: Troubleshoot Docker-based coverage build and re-enabled
|
||||
// this test. It is probably failing due to too many threads.
|
||||
fn test_exit() {
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let tn = TestNode::new();
|
||||
let crdt = Crdt::new(tn.data.clone());
|
||||
let c = Arc::new(RwLock::new(crdt));
|
||||
let w = Arc::new(RwLock::new(vec![]));
|
||||
let d = Ncp::new(
|
||||
c.clone(),
|
||||
w,
|
||||
tn.sockets.gossip,
|
||||
tn.sockets.gossip_send,
|
||||
exit.clone(),
|
||||
).unwrap();
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
for t in d.thread_hdls {
|
||||
t.join().expect("thread join");
|
||||
}
|
||||
}
|
||||
}
|
@@ -1,6 +1,7 @@
|
||||
//! The `packet` module defines data structures and methods to pull data from the network.
|
||||
use bincode::{deserialize, serialize};
|
||||
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
|
||||
use counter::Counter;
|
||||
use result::{Error, Result};
|
||||
use serde::Serialize;
|
||||
use signature::PublicKey;
|
||||
@@ -9,7 +10,9 @@ use std::fmt;
|
||||
use std::io;
|
||||
use std::mem::size_of;
|
||||
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket};
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::sync::{Arc, Mutex, RwLock};
|
||||
use std::time::Instant;
|
||||
|
||||
pub type SharedPackets = Arc<RwLock<Packets>>;
|
||||
pub type SharedBlob = Arc<RwLock<Blob>>;
|
||||
@@ -18,7 +21,7 @@ pub type BlobRecycler = Recycler<Blob>;
|
||||
|
||||
pub const NUM_PACKETS: usize = 1024 * 8;
|
||||
pub const BLOB_SIZE: usize = 64 * 1024;
|
||||
pub const BLOB_DATA_SIZE: usize = BLOB_SIZE - BLOB_ID_END;
|
||||
pub const BLOB_DATA_SIZE: usize = BLOB_SIZE - BLOB_HEADER_SIZE;
|
||||
pub const PACKET_DATA_SIZE: usize = 256;
|
||||
pub const NUM_BLOBS: usize = (NUM_PACKETS * PACKET_DATA_SIZE) / BLOB_SIZE;
|
||||
|
||||
@@ -26,6 +29,7 @@ pub const NUM_BLOBS: usize = (NUM_PACKETS * PACKET_DATA_SIZE) / BLOB_SIZE;
|
||||
#[repr(C)]
|
||||
pub struct Meta {
|
||||
pub size: usize,
|
||||
pub num_retransmits: u64,
|
||||
pub addr: [u16; 8],
|
||||
pub port: u16,
|
||||
pub v6: bool,
|
||||
@@ -169,6 +173,7 @@ impl<T: Default> Recycler<T> {
|
||||
|
||||
impl Packets {
|
||||
fn run_read_from(&mut self, socket: &UdpSocket) -> Result<usize> {
|
||||
static mut COUNTER: Counter = create_counter!("packets", 10);
|
||||
self.packets.resize(NUM_PACKETS, Packet::default());
|
||||
let mut i = 0;
|
||||
//DOCUMENTED SIDE-EFFECT
|
||||
@@ -178,12 +183,14 @@ impl Packets {
|
||||
// * read until it fails
|
||||
// * set it back to blocking before returning
|
||||
socket.set_nonblocking(false)?;
|
||||
let mut start = Instant::now();
|
||||
for p in &mut self.packets {
|
||||
p.meta.size = 0;
|
||||
trace!("receiving");
|
||||
trace!("receiving on {}", socket.local_addr().unwrap());
|
||||
match socket.recv_from(&mut p.data) {
|
||||
Err(_) if i > 0 => {
|
||||
debug!("got {:?} messages", i);
|
||||
inc_counter!(COUNTER, i, start);
|
||||
debug!("got {:?} messages on {}", i, socket.local_addr().unwrap());
|
||||
break;
|
||||
}
|
||||
Err(e) => {
|
||||
@@ -194,6 +201,7 @@ impl Packets {
|
||||
p.meta.size = nrecv;
|
||||
p.meta.set_addr(&from);
|
||||
if i == 0 {
|
||||
start = Instant::now();
|
||||
socket.set_nonblocking(true)?;
|
||||
}
|
||||
}
|
||||
@@ -217,9 +225,13 @@ impl Packets {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_packets<T: Serialize>(r: &PacketRecycler, xs: Vec<T>) -> Vec<SharedPackets> {
|
||||
pub fn to_packets_chunked<T: Serialize>(
|
||||
r: &PacketRecycler,
|
||||
xs: Vec<T>,
|
||||
chunks: usize,
|
||||
) -> Vec<SharedPackets> {
|
||||
let mut out = vec![];
|
||||
for x in xs.chunks(NUM_PACKETS) {
|
||||
for x in xs.chunks(chunks) {
|
||||
let p = r.allocate();
|
||||
p.write()
|
||||
.unwrap()
|
||||
@@ -236,6 +248,10 @@ pub fn to_packets<T: Serialize>(r: &PacketRecycler, xs: Vec<T>) -> Vec<SharedPac
|
||||
return out;
|
||||
}
|
||||
|
||||
pub fn to_packets<T: Serialize>(r: &PacketRecycler, xs: Vec<T>) -> Vec<SharedPackets> {
|
||||
to_packets_chunked(r, xs, NUM_PACKETS)
|
||||
}
|
||||
|
||||
pub fn to_blob<T: Serialize>(
|
||||
resp: T,
|
||||
rsp_addr: SocketAddr,
|
||||
@@ -246,10 +262,7 @@ pub fn to_blob<T: Serialize>(
|
||||
let mut b = blob.write().unwrap();
|
||||
let v = serialize(&resp)?;
|
||||
let len = v.len();
|
||||
// TODO: we are not using .data_mut() method here because
|
||||
// the raw bytes are being serialized and sent, this isn't the
|
||||
// right interface, and we should create a separate path for
|
||||
// sending request responses in the RPU
|
||||
assert!(len < BLOB_SIZE);
|
||||
b.data[..len].copy_from_slice(&v);
|
||||
b.meta.size = len;
|
||||
b.meta.set_addr(&rsp_addr);
|
||||
@@ -270,6 +283,17 @@ pub fn to_blobs<T: Serialize>(
|
||||
|
||||
const BLOB_INDEX_END: usize = size_of::<u64>();
|
||||
const BLOB_ID_END: usize = BLOB_INDEX_END + size_of::<usize>() + size_of::<PublicKey>();
|
||||
const BLOB_FLAGS_END: usize = BLOB_ID_END + size_of::<u32>();
|
||||
const BLOB_SIZE_END: usize = BLOB_FLAGS_END + size_of::<u64>();
|
||||
|
||||
macro_rules! align {
|
||||
($x:expr, $align:expr) => {
|
||||
$x + ($align - 1) & !($align - 1)
|
||||
};
|
||||
}
|
||||
|
||||
pub const BLOB_FLAG_IS_CODING: u32 = 0x1;
|
||||
pub const BLOB_HEADER_SIZE: usize = align!(BLOB_SIZE_END, 64);
|
||||
|
||||
impl Blob {
|
||||
pub fn get_index(&self) -> Result<u64> {
|
||||
@@ -283,7 +307,8 @@ impl Blob {
|
||||
self.data[..BLOB_INDEX_END].clone_from_slice(&wtr);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// sender id, we use this for identifying if its a blob from the leader that we should
|
||||
/// retransmit. eventually blobs should have a signature that we can use ffor spam filtering
|
||||
pub fn get_id(&self) -> Result<PublicKey> {
|
||||
let e = deserialize(&self.data[BLOB_INDEX_END..BLOB_ID_END])?;
|
||||
Ok(e)
|
||||
@@ -295,14 +320,51 @@ impl Blob {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_flags(&self) -> Result<u32> {
|
||||
let mut rdr = io::Cursor::new(&self.data[BLOB_ID_END..BLOB_FLAGS_END]);
|
||||
let r = rdr.read_u32::<LittleEndian>()?;
|
||||
Ok(r)
|
||||
}
|
||||
|
||||
pub fn set_flags(&mut self, ix: u32) -> Result<()> {
|
||||
let mut wtr = vec![];
|
||||
wtr.write_u32::<LittleEndian>(ix)?;
|
||||
self.data[BLOB_ID_END..BLOB_FLAGS_END].clone_from_slice(&wtr);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn is_coding(&self) -> bool {
|
||||
return (self.get_flags().unwrap() & BLOB_FLAG_IS_CODING) != 0;
|
||||
}
|
||||
|
||||
pub fn set_coding(&mut self) -> Result<()> {
|
||||
let flags = self.get_flags().unwrap();
|
||||
self.set_flags(flags | BLOB_FLAG_IS_CODING)
|
||||
}
|
||||
|
||||
pub fn get_data_size(&self) -> Result<u64> {
|
||||
let mut rdr = io::Cursor::new(&self.data[BLOB_FLAGS_END..BLOB_SIZE_END]);
|
||||
let r = rdr.read_u64::<LittleEndian>()?;
|
||||
Ok(r)
|
||||
}
|
||||
|
||||
pub fn set_data_size(&mut self, ix: u64) -> Result<()> {
|
||||
let mut wtr = vec![];
|
||||
wtr.write_u64::<LittleEndian>(ix)?;
|
||||
self.data[BLOB_FLAGS_END..BLOB_SIZE_END].clone_from_slice(&wtr);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn data(&self) -> &[u8] {
|
||||
&self.data[BLOB_ID_END..]
|
||||
&self.data[BLOB_HEADER_SIZE..]
|
||||
}
|
||||
pub fn data_mut(&mut self) -> &mut [u8] {
|
||||
&mut self.data[BLOB_ID_END..]
|
||||
&mut self.data[BLOB_HEADER_SIZE..]
|
||||
}
|
||||
pub fn set_size(&mut self, size: usize) {
|
||||
self.meta.size = size + BLOB_ID_END;
|
||||
let new_size = size + BLOB_HEADER_SIZE;
|
||||
self.meta.size = new_size;
|
||||
self.set_data_size(new_size as u64).unwrap();
|
||||
}
|
||||
pub fn recv_from(re: &BlobRecycler, socket: &UdpSocket) -> Result<VecDeque<SharedBlob>> {
|
||||
let mut v = VecDeque::new();
|
||||
@@ -317,9 +379,10 @@ impl Blob {
|
||||
let r = re.allocate();
|
||||
{
|
||||
let mut p = r.write().expect("'r' write lock in pub fn recv_from");
|
||||
trace!("receiving on {}", socket.local_addr().unwrap());
|
||||
match socket.recv_from(&mut p.data) {
|
||||
Err(_) if i > 0 => {
|
||||
trace!("got {:?} messages", i);
|
||||
trace!("got {:?} messages on {}", i, socket.local_addr().unwrap());
|
||||
break;
|
||||
}
|
||||
Err(e) => {
|
||||
|
@@ -6,18 +6,27 @@
|
||||
use chrono::prelude::*;
|
||||
use signature::PublicKey;
|
||||
|
||||
/// The types of events a payment plan can process.
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub enum Witness {
|
||||
/// The current time.
|
||||
Timestamp(DateTime<Utc>),
|
||||
|
||||
/// A siganture from PublicKey.
|
||||
Signature(PublicKey),
|
||||
}
|
||||
|
||||
/// Some amount of tokens that should be sent to the `to` `PublicKey`.
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub struct Payment {
|
||||
/// Amount to be paid.
|
||||
pub tokens: i64,
|
||||
|
||||
/// The `PublicKey` that `tokens` should be paid to.
|
||||
pub to: PublicKey,
|
||||
}
|
||||
|
||||
/// Interface to smart contracts.
|
||||
pub trait PaymentPlan {
|
||||
/// Return Payment if the payment plan requires no additional Witnesses.
|
||||
fn final_payment(&self) -> Option<Payment>;
|
||||
|
@@ -1,22 +1,22 @@
|
||||
//! The `record_stage` module provides an object for generating a Proof of History.
|
||||
//! It records Event items on behalf of its users. It continuously generates
|
||||
//! new hashes, only stopping to check if it has been sent an Event item. It
|
||||
//! tags each Event with an Entry, and sends it back. The Entry includes the
|
||||
//! Event, the latest hash, and the number of hashes since the last transaction.
|
||||
//! It records Transaction items on behalf of its users. It continuously generates
|
||||
//! new hashes, only stopping to check if it has been sent an Transaction item. It
|
||||
//! tags each Transaction with an Entry, and sends it back. The Entry includes the
|
||||
//! Transaction, the latest hash, and the number of hashes since the last transaction.
|
||||
//! The resulting stream of entries represents ordered transactions in time.
|
||||
|
||||
use entry::Entry;
|
||||
use hash::Hash;
|
||||
use recorder::Recorder;
|
||||
use std::sync::mpsc::{channel, Receiver, Sender, TryRecvError};
|
||||
use std::thread::{spawn, JoinHandle};
|
||||
use std::sync::mpsc::{channel, Receiver, RecvError, Sender, TryRecvError};
|
||||
use std::thread::{Builder, JoinHandle};
|
||||
use std::time::{Duration, Instant};
|
||||
use transaction::Transaction;
|
||||
|
||||
#[cfg_attr(feature = "cargo-clippy", allow(large_enum_variant))]
|
||||
pub enum Signal {
|
||||
Tick,
|
||||
Events(Vec<Transaction>),
|
||||
Transactions(Vec<Transaction>),
|
||||
}
|
||||
|
||||
pub struct RecordStage {
|
||||
@@ -25,33 +25,19 @@ pub struct RecordStage {
|
||||
}
|
||||
|
||||
impl RecordStage {
|
||||
/// A background thread that will continue tagging received Event messages and
|
||||
/// A background thread that will continue tagging received Transaction messages and
|
||||
/// sending back Entry messages until either the receiver or sender channel is closed.
|
||||
pub fn new(
|
||||
transaction_receiver: Receiver<Signal>,
|
||||
start_hash: &Hash,
|
||||
tick_duration: Option<Duration>,
|
||||
) -> Self {
|
||||
pub fn new(signal_receiver: Receiver<Signal>, start_hash: &Hash) -> Self {
|
||||
let (entry_sender, entry_receiver) = channel();
|
||||
let start_hash = start_hash.clone();
|
||||
|
||||
let thread_hdl = spawn(move || {
|
||||
let thread_hdl = Builder::new()
|
||||
.name("solana-record-stage".to_string())
|
||||
.spawn(move || {
|
||||
let mut recorder = Recorder::new(start_hash);
|
||||
let duration_data = tick_duration.map(|dur| (Instant::now(), dur));
|
||||
loop {
|
||||
if let Err(_) = Self::process_transactions(
|
||||
&mut recorder,
|
||||
duration_data,
|
||||
&transaction_receiver,
|
||||
&entry_sender,
|
||||
) {
|
||||
return;
|
||||
}
|
||||
if duration_data.is_some() {
|
||||
recorder.hash();
|
||||
}
|
||||
}
|
||||
});
|
||||
let _ = Self::process_signals(&mut recorder, &signal_receiver, &entry_sender);
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
RecordStage {
|
||||
entry_receiver,
|
||||
@@ -59,29 +45,88 @@ impl RecordStage {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn process_transactions(
|
||||
/// Same as `RecordStage::new`, but will automatically produce entries every `tick_duration`.
|
||||
pub fn new_with_clock(
|
||||
signal_receiver: Receiver<Signal>,
|
||||
start_hash: &Hash,
|
||||
tick_duration: Duration,
|
||||
) -> Self {
|
||||
let (entry_sender, entry_receiver) = channel();
|
||||
let start_hash = start_hash.clone();
|
||||
|
||||
let thread_hdl = Builder::new()
|
||||
.name("solana-record-stage".to_string())
|
||||
.spawn(move || {
|
||||
let mut recorder = Recorder::new(start_hash);
|
||||
let start_time = Instant::now();
|
||||
loop {
|
||||
if let Err(_) = Self::try_process_signals(
|
||||
&mut recorder,
|
||||
start_time,
|
||||
tick_duration,
|
||||
&signal_receiver,
|
||||
&entry_sender,
|
||||
) {
|
||||
return;
|
||||
}
|
||||
recorder.hash();
|
||||
}
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
RecordStage {
|
||||
entry_receiver,
|
||||
thread_hdl,
|
||||
}
|
||||
}
|
||||
|
||||
fn process_signal(
|
||||
signal: Signal,
|
||||
recorder: &mut Recorder,
|
||||
sender: &Sender<Entry>,
|
||||
) -> Result<(), ()> {
|
||||
let txs = if let Signal::Transactions(txs) = signal {
|
||||
txs
|
||||
} else {
|
||||
vec![]
|
||||
};
|
||||
let entries = recorder.record(txs);
|
||||
let mut result = Ok(());
|
||||
for entry in entries {
|
||||
result = sender.send(entry).map_err(|_| ());
|
||||
if result.is_err() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
fn process_signals(
|
||||
recorder: &mut Recorder,
|
||||
duration_data: Option<(Instant, Duration)>,
|
||||
receiver: &Receiver<Signal>,
|
||||
sender: &Sender<Entry>,
|
||||
) -> Result<(), ()> {
|
||||
loop {
|
||||
if let Some((start_time, tick_duration)) = duration_data {
|
||||
match receiver.recv() {
|
||||
Ok(signal) => Self::process_signal(signal, recorder, sender)?,
|
||||
Err(RecvError) => return Err(()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn try_process_signals(
|
||||
recorder: &mut Recorder,
|
||||
start_time: Instant,
|
||||
tick_duration: Duration,
|
||||
receiver: &Receiver<Signal>,
|
||||
sender: &Sender<Entry>,
|
||||
) -> Result<(), ()> {
|
||||
loop {
|
||||
if let Some(entry) = recorder.tick(start_time, tick_duration) {
|
||||
sender.send(entry).or(Err(()))?;
|
||||
}
|
||||
}
|
||||
match receiver.try_recv() {
|
||||
Ok(signal) => match signal {
|
||||
Signal::Tick => {
|
||||
let entry = recorder.record(vec![]);
|
||||
sender.send(entry).or(Err(()))?;
|
||||
}
|
||||
Signal::Events(transactions) => {
|
||||
let entry = recorder.record(transactions);
|
||||
sender.send(entry).or(Err(()))?;
|
||||
}
|
||||
},
|
||||
Ok(signal) => Self::process_signal(signal, recorder, sender)?,
|
||||
Err(TryRecvError::Empty) => return Ok(()),
|
||||
Err(TryRecvError::Disconnected) => return Err(()),
|
||||
};
|
||||
@@ -101,7 +146,7 @@ mod tests {
|
||||
fn test_historian() {
|
||||
let (tx_sender, tx_receiver) = channel();
|
||||
let zero = Hash::default();
|
||||
let record_stage = RecordStage::new(tx_receiver, &zero, None);
|
||||
let record_stage = RecordStage::new(tx_receiver, &zero);
|
||||
|
||||
tx_sender.send(Signal::Tick).unwrap();
|
||||
sleep(Duration::new(0, 1_000_000));
|
||||
@@ -127,7 +172,7 @@ mod tests {
|
||||
fn test_historian_closed_sender() {
|
||||
let (tx_sender, tx_receiver) = channel();
|
||||
let zero = Hash::default();
|
||||
let record_stage = RecordStage::new(tx_receiver, &zero, None);
|
||||
let record_stage = RecordStage::new(tx_receiver, &zero);
|
||||
drop(record_stage.entry_receiver);
|
||||
tx_sender.send(Signal::Tick).unwrap();
|
||||
assert_eq!(record_stage.thread_hdl.join().unwrap(), ());
|
||||
@@ -137,23 +182,25 @@ mod tests {
|
||||
fn test_transactions() {
|
||||
let (tx_sender, signal_receiver) = channel();
|
||||
let zero = Hash::default();
|
||||
let record_stage = RecordStage::new(signal_receiver, &zero, None);
|
||||
let record_stage = RecordStage::new(signal_receiver, &zero);
|
||||
let alice_keypair = KeyPair::new();
|
||||
let bob_pubkey = KeyPair::new().pubkey();
|
||||
let tx0 = Transaction::new(&alice_keypair, bob_pubkey, 1, zero);
|
||||
let tx1 = Transaction::new(&alice_keypair, bob_pubkey, 2, zero);
|
||||
tx_sender.send(Signal::Events(vec![tx0, tx1])).unwrap();
|
||||
tx_sender
|
||||
.send(Signal::Transactions(vec![tx0, tx1]))
|
||||
.unwrap();
|
||||
drop(tx_sender);
|
||||
let entries: Vec<_> = record_stage.entry_receiver.iter().collect();
|
||||
assert_eq!(entries.len(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn test_ticking_historian() {
|
||||
fn test_clock() {
|
||||
let (tx_sender, tx_receiver) = channel();
|
||||
let zero = Hash::default();
|
||||
let record_stage = RecordStage::new(tx_receiver, &zero, Some(Duration::from_millis(20)));
|
||||
let record_stage =
|
||||
RecordStage::new_with_clock(tx_receiver, &zero, Duration::from_millis(20));
|
||||
sleep(Duration::from_millis(900));
|
||||
tx_sender.send(Signal::Tick).unwrap();
|
||||
drop(tx_sender);
|
||||
|
@@ -1,8 +1,9 @@
|
||||
//! The `recorder` module provides an object for generating a Proof of History.
|
||||
//! It records Event items on behalf of its users.
|
||||
//! It records Transaction items on behalf of its users.
|
||||
|
||||
use entry::Entry;
|
||||
use hash::{hash, Hash};
|
||||
use ledger::next_entries_mut;
|
||||
use std::time::{Duration, Instant};
|
||||
use transaction::Transaction;
|
||||
|
||||
@@ -26,15 +27,19 @@ impl Recorder {
|
||||
self.num_hashes += 1;
|
||||
}
|
||||
|
||||
pub fn record(&mut self, transactions: Vec<Transaction>) -> Entry {
|
||||
Entry::new_mut(&mut self.last_hash, &mut self.num_hashes, transactions)
|
||||
pub fn record(&mut self, transactions: Vec<Transaction>) -> Vec<Entry> {
|
||||
next_entries_mut(&mut self.last_hash, &mut self.num_hashes, transactions)
|
||||
}
|
||||
|
||||
pub fn tick(&mut self, start_time: Instant, tick_duration: Duration) -> Option<Entry> {
|
||||
if start_time.elapsed() > tick_duration * (self.num_ticks + 1) {
|
||||
// TODO: don't let this overflow u32
|
||||
self.num_ticks += 1;
|
||||
Some(self.record(vec![]))
|
||||
Some(Entry::new_mut(
|
||||
&mut self.last_hash,
|
||||
&mut self.num_hashes,
|
||||
vec![],
|
||||
))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
|
@@ -6,7 +6,7 @@ use packet;
|
||||
use result::Result;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::thread::{spawn, JoinHandle};
|
||||
use std::thread::{Builder, JoinHandle};
|
||||
use std::time::Duration;
|
||||
use streamer;
|
||||
|
||||
@@ -15,23 +15,21 @@ pub struct ReplicateStage {
|
||||
}
|
||||
|
||||
impl ReplicateStage {
|
||||
/// Process verified blobs, already in order
|
||||
/// Process entry blobs, already in order
|
||||
fn replicate_requests(
|
||||
bank: &Arc<Bank>,
|
||||
verified_receiver: &streamer::BlobReceiver,
|
||||
blob_receiver: &streamer::BlobReceiver,
|
||||
blob_recycler: &packet::BlobRecycler,
|
||||
) -> Result<()> {
|
||||
let timer = Duration::new(1, 0);
|
||||
let blobs = verified_receiver.recv_timeout(timer)?;
|
||||
let entries = ledger::reconstruct_entries_from_blobs(&blobs);
|
||||
let blobs = blob_receiver.recv_timeout(timer)?;
|
||||
let blobs_len = blobs.len();
|
||||
let entries = ledger::reconstruct_entries_from_blobs(blobs, &blob_recycler)?;
|
||||
let res = bank.process_entries(entries);
|
||||
if res.is_err() {
|
||||
error!("process_entries {} {:?}", blobs.len(), res);
|
||||
error!("process_entries {} {:?}", blobs_len, res);
|
||||
}
|
||||
res?;
|
||||
for blob in blobs {
|
||||
blob_recycler.recycle(blob);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -41,12 +39,15 @@ impl ReplicateStage {
|
||||
window_receiver: streamer::BlobReceiver,
|
||||
blob_recycler: packet::BlobRecycler,
|
||||
) -> Self {
|
||||
let thread_hdl = spawn(move || loop {
|
||||
let thread_hdl = Builder::new()
|
||||
.name("solana-replicate-stage".to_string())
|
||||
.spawn(move || loop {
|
||||
let e = Self::replicate_requests(&bank, &window_receiver, &blob_recycler);
|
||||
if e.is_err() && exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
});
|
||||
})
|
||||
.unwrap();
|
||||
ReplicateStage { thread_hdl }
|
||||
}
|
||||
}
|
||||
|
@@ -11,7 +11,7 @@ use std::net::SocketAddr;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::{channel, Receiver};
|
||||
use std::sync::Arc;
|
||||
use std::thread::{spawn, JoinHandle};
|
||||
use std::thread::{Builder, JoinHandle};
|
||||
use std::time::Instant;
|
||||
use streamer;
|
||||
use timing;
|
||||
@@ -43,7 +43,7 @@ impl RequestStage {
|
||||
) -> Result<()> {
|
||||
let (batch, batch_len) = streamer::recv_batch(packet_receiver)?;
|
||||
|
||||
info!(
|
||||
debug!(
|
||||
"@{:?} request_stage: processing: {}",
|
||||
timing::timestamp(),
|
||||
batch_len
|
||||
@@ -70,7 +70,7 @@ impl RequestStage {
|
||||
}
|
||||
let total_time_s = timing::duration_as_s(&proc_start.elapsed());
|
||||
let total_time_ms = timing::duration_as_ms(&proc_start.elapsed());
|
||||
info!(
|
||||
debug!(
|
||||
"@{:?} done process batches: {} time: {:?}ms reqs: {} reqs/s: {}",
|
||||
timing::timestamp(),
|
||||
batch_len,
|
||||
@@ -90,7 +90,9 @@ impl RequestStage {
|
||||
let request_processor = Arc::new(request_processor);
|
||||
let request_processor_ = request_processor.clone();
|
||||
let (blob_sender, blob_receiver) = channel();
|
||||
let thread_hdl = spawn(move || loop {
|
||||
let thread_hdl = Builder::new()
|
||||
.name("solana-request-stage".to_string())
|
||||
.spawn(move || loop {
|
||||
let e = Self::process_request_packets(
|
||||
&request_processor_,
|
||||
&packet_receiver,
|
||||
@@ -103,7 +105,8 @@ impl RequestStage {
|
||||
break;
|
||||
}
|
||||
}
|
||||
});
|
||||
})
|
||||
.unwrap();
|
||||
RequestStage {
|
||||
thread_hdl,
|
||||
blob_receiver,
|
||||
|
24
src/rpu.rs
24
src/rpu.rs
@@ -1,5 +1,27 @@
|
||||
//! The `rpu` module implements the Request Processing Unit, a
|
||||
//! 5-stage transaction processing pipeline in software.
|
||||
//! 3-stage transaction processing pipeline in software. It listens
|
||||
//! for `Request` messages from clients and replies with `Response`
|
||||
//! messages.
|
||||
//!
|
||||
//! ```text
|
||||
//! .------.
|
||||
//! | Bank |
|
||||
//! `---+--`
|
||||
//! |
|
||||
//! .------------------|-------------------.
|
||||
//! | RPU | |
|
||||
//! | v |
|
||||
//! .---------. | .-------. .---------. .---------. | .---------.
|
||||
//! | Alice |--->| | | | | +---->| Alice |
|
||||
//! `---------` | | Fetch | | Request | | Respond | | `---------`
|
||||
//! | | Stage |->| Stage |->| Stage | |
|
||||
//! .---------. | | | | | | | | .---------.
|
||||
//! | Bob |--->| | | | | +---->| Bob |
|
||||
//! `---------` | `-------` `---------` `---------` | `---------`
|
||||
//! | |
|
||||
//! | |
|
||||
//! `--------------------------------------`
|
||||
//! ```
|
||||
|
||||
use bank::Bank;
|
||||
use packet;
|
||||
|
133
src/server.rs
133
src/server.rs
@@ -2,7 +2,7 @@
|
||||
|
||||
use bank::Bank;
|
||||
use crdt::{Crdt, ReplicatedData};
|
||||
use hash::Hash;
|
||||
use ncp::Ncp;
|
||||
use packet;
|
||||
use rpu::Rpu;
|
||||
use std::io::Write;
|
||||
@@ -20,9 +20,32 @@ pub struct Server {
|
||||
}
|
||||
|
||||
impl Server {
|
||||
/// Create a server instance acting as a leader.
|
||||
///
|
||||
/// ```text
|
||||
/// .---------------------.
|
||||
/// | Leader |
|
||||
/// | |
|
||||
/// .--------. | .-----. |
|
||||
/// | |---->| | |
|
||||
/// | Client | | | RPU | |
|
||||
/// | |<----| | |
|
||||
/// `----+---` | `-----` |
|
||||
/// | | ^ |
|
||||
/// | | | |
|
||||
/// | | .--+---. |
|
||||
/// | | | Bank | |
|
||||
/// | | `------` |
|
||||
/// | | ^ |
|
||||
/// | | | | .------------.
|
||||
/// | | .--+--. .-----. | | |
|
||||
/// `-------->| TPU +-->| NCP +------>| Validators |
|
||||
/// | `-----` `-----` | | |
|
||||
/// | | `------------`
|
||||
/// `---------------------`
|
||||
/// ```
|
||||
pub fn new_leader<W: Write + Send + 'static>(
|
||||
bank: Bank,
|
||||
start_hash: Hash,
|
||||
tick_duration: Option<Duration>,
|
||||
me: ReplicatedData,
|
||||
requests_socket: UdpSocket,
|
||||
@@ -41,7 +64,6 @@ impl Server {
|
||||
let blob_recycler = packet::BlobRecycler::default();
|
||||
let tpu = Tpu::new(
|
||||
bank.clone(),
|
||||
start_hash,
|
||||
tick_duration,
|
||||
transactions_socket,
|
||||
blob_recycler.clone(),
|
||||
@@ -51,45 +73,132 @@ impl Server {
|
||||
thread_hdls.extend(tpu.thread_hdls);
|
||||
|
||||
let crdt = Arc::new(RwLock::new(Crdt::new(me)));
|
||||
let t_gossip = Crdt::gossip(crdt.clone(), exit.clone());
|
||||
let window = streamer::default_window();
|
||||
let t_listen = Crdt::listen(crdt.clone(), window.clone(), gossip_socket, exit.clone());
|
||||
let gossip_send_socket = UdpSocket::bind("0.0.0.0:0").expect("bind 0");
|
||||
let ncp = Ncp::new(
|
||||
crdt.clone(),
|
||||
window.clone(),
|
||||
gossip_socket,
|
||||
gossip_send_socket,
|
||||
exit.clone(),
|
||||
).expect("Ncp::new");
|
||||
thread_hdls.extend(ncp.thread_hdls);
|
||||
|
||||
let t_broadcast = streamer::broadcaster(
|
||||
broadcast_socket,
|
||||
exit.clone(),
|
||||
crdt.clone(),
|
||||
crdt,
|
||||
window,
|
||||
blob_recycler.clone(),
|
||||
tpu.blob_receiver,
|
||||
);
|
||||
thread_hdls.extend(vec![t_gossip, t_listen, t_broadcast]);
|
||||
thread_hdls.extend(vec![t_broadcast]);
|
||||
|
||||
Server { thread_hdls }
|
||||
}
|
||||
|
||||
/// Create a server instance acting as a validator.
|
||||
///
|
||||
/// ```text
|
||||
/// .-------------------------------.
|
||||
/// | Validator |
|
||||
/// | |
|
||||
/// .--------. | .-----. |
|
||||
/// | |-------------->| | |
|
||||
/// | Client | | | RPU | |
|
||||
/// | |<--------------| | |
|
||||
/// `--------` | `-----` |
|
||||
/// | ^ |
|
||||
/// | | |
|
||||
/// | .--+---. |
|
||||
/// | | Bank | |
|
||||
/// | `------` |
|
||||
/// | ^ |
|
||||
/// .--------. | | | .------------.
|
||||
/// | | | .--+--. | | |
|
||||
/// | Leader |<------------->| TVU +<--------------->| |
|
||||
/// | | | `-----` | | Validators |
|
||||
/// | | | ^ | | |
|
||||
/// | | | | | | |
|
||||
/// | | | .--+--. | | |
|
||||
/// | |<------------->| NCP +<--------------->| |
|
||||
/// | | | `-----` | | |
|
||||
/// `--------` | | `------------`
|
||||
/// `-------------------------------`
|
||||
/// ```
|
||||
pub fn new_validator(
|
||||
bank: Bank,
|
||||
me: ReplicatedData,
|
||||
requests_socket: UdpSocket,
|
||||
respond_socket: UdpSocket,
|
||||
replicate_socket: UdpSocket,
|
||||
gossip_socket: UdpSocket,
|
||||
leader_repl_data: ReplicatedData,
|
||||
gossip_listen_socket: UdpSocket,
|
||||
repair_socket: UdpSocket,
|
||||
entry_point: ReplicatedData,
|
||||
exit: Arc<AtomicBool>,
|
||||
) -> Self {
|
||||
let bank = Arc::new(bank);
|
||||
let mut thread_hdls = vec![];
|
||||
let rpu = Rpu::new(bank.clone(), requests_socket, respond_socket, exit.clone());
|
||||
thread_hdls.extend(rpu.thread_hdls);
|
||||
|
||||
let crdt = Arc::new(RwLock::new(Crdt::new(me)));
|
||||
crdt.write()
|
||||
.expect("'crdt' write lock before insert() in pub fn replicate")
|
||||
.insert(&entry_point);
|
||||
let window = streamer::default_window();
|
||||
let gossip_send_socket = UdpSocket::bind("0.0.0.0:0").expect("bind 0");
|
||||
let retransmit_socket = UdpSocket::bind("0.0.0.0:0").expect("bind 0");
|
||||
let ncp = Ncp::new(
|
||||
crdt.clone(),
|
||||
window.clone(),
|
||||
gossip_listen_socket,
|
||||
gossip_send_socket,
|
||||
exit.clone(),
|
||||
).expect("Ncp::new");
|
||||
|
||||
let tvu = Tvu::new(
|
||||
bank.clone(),
|
||||
me,
|
||||
gossip_socket,
|
||||
crdt.clone(),
|
||||
window.clone(),
|
||||
replicate_socket,
|
||||
leader_repl_data,
|
||||
repair_socket,
|
||||
retransmit_socket,
|
||||
exit.clone(),
|
||||
);
|
||||
thread_hdls.extend(tvu.thread_hdls);
|
||||
thread_hdls.extend(ncp.thread_hdls);
|
||||
Server { thread_hdls }
|
||||
}
|
||||
}
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use bank::Bank;
|
||||
use crdt::TestNode;
|
||||
use mint::Mint;
|
||||
use server::Server;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::Arc;
|
||||
#[test]
|
||||
fn validator_exit() {
|
||||
let tn = TestNode::new();
|
||||
let alice = Mint::new(10_000);
|
||||
let bank = Bank::new(&alice);
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let v = Server::new_validator(
|
||||
bank,
|
||||
tn.data.clone(),
|
||||
tn.sockets.requests,
|
||||
tn.sockets.respond,
|
||||
tn.sockets.replicate,
|
||||
tn.sockets.gossip,
|
||||
tn.sockets.repair,
|
||||
tn.data,
|
||||
exit.clone(),
|
||||
);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
for t in v.thread_hdls {
|
||||
t.join().unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -56,9 +56,8 @@ pub struct GenKeys {
|
||||
}
|
||||
|
||||
impl GenKeys {
|
||||
pub fn new(seed: &[u8]) -> GenKeys {
|
||||
let seed32: Vec<_> = seed.iter().map(|&x| x as u32).collect();
|
||||
let rng = ChaChaRng::from_seed(&seed32);
|
||||
pub fn new(seed: [u8; 32]) -> GenKeys {
|
||||
let rng = ChaChaRng::from_seed(seed);
|
||||
GenKeys {
|
||||
generator: RefCell::new(rng),
|
||||
}
|
||||
@@ -68,7 +67,7 @@ impl GenKeys {
|
||||
KeyPair::generate_pkcs8(self).unwrap().to_vec()
|
||||
}
|
||||
|
||||
pub fn gen_n_seeds(&self, n: i64) -> Vec<[u8; 16]> {
|
||||
pub fn gen_n_seeds(&self, n: i64) -> Vec<[u8; 32]> {
|
||||
let mut rng = self.generator.borrow_mut();
|
||||
(0..n).map(|_| rng.gen()).collect()
|
||||
}
|
||||
@@ -77,7 +76,7 @@ impl GenKeys {
|
||||
self.gen_n_seeds(n)
|
||||
.into_par_iter()
|
||||
.map(|seed| {
|
||||
let pkcs8 = GenKeys::new(&seed).new_key();
|
||||
let pkcs8 = GenKeys::new(seed).new_key();
|
||||
KeyPair::from_pkcs8(untrusted::Input::from(&pkcs8)).unwrap()
|
||||
})
|
||||
.collect()
|
||||
@@ -87,7 +86,7 @@ impl GenKeys {
|
||||
impl SecureRandom for GenKeys {
|
||||
fn fill(&self, dest: &mut [u8]) -> Result<(), Unspecified> {
|
||||
let mut rng = self.generator.borrow_mut();
|
||||
rng.fill_bytes(dest);
|
||||
rng.fill(dest);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -99,17 +98,17 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_new_key_is_deterministic() {
|
||||
let seed = [1, 2, 3, 4];
|
||||
let rng0 = GenKeys::new(&seed);
|
||||
let rng1 = GenKeys::new(&seed);
|
||||
let seed = [0u8; 32];
|
||||
let rng0 = GenKeys::new(seed);
|
||||
let rng1 = GenKeys::new(seed);
|
||||
|
||||
for _ in 0..100 {
|
||||
assert_eq!(rng0.new_key(), rng1.new_key());
|
||||
}
|
||||
}
|
||||
|
||||
fn gen_n_pubkeys(seed: &[u8], n: i64) -> HashSet<PublicKey> {
|
||||
GenKeys::new(&seed)
|
||||
fn gen_n_pubkeys(seed: [u8; 32], n: i64) -> HashSet<PublicKey> {
|
||||
GenKeys::new(seed)
|
||||
.gen_n_keypairs(n)
|
||||
.into_iter()
|
||||
.map(|x| x.pubkey())
|
||||
@@ -118,8 +117,8 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_gen_n_pubkeys_deterministic() {
|
||||
let seed = [1, 2, 3, 4];
|
||||
assert_eq!(gen_n_pubkeys(&seed, 50), gen_n_pubkeys(&seed, 50));
|
||||
let seed = [0u8; 32];
|
||||
assert_eq!(gen_n_pubkeys(seed, 50), gen_n_pubkeys(seed, 50));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -132,8 +131,7 @@ mod bench {
|
||||
|
||||
#[bench]
|
||||
fn bench_gen_keys(b: &mut Bencher) {
|
||||
let seed: &[_] = &[1, 2, 3, 4];
|
||||
let rnd = GenKeys::new(seed);
|
||||
let rnd = GenKeys::new([0u8; 32]);
|
||||
b.iter(|| rnd.gen_n_keypairs(1000));
|
||||
}
|
||||
}
|
||||
|
@@ -1,5 +1,14 @@
|
||||
//! The `sigverify` module provides digital signature verification functions.
|
||||
//! By default, signatures are verified in parallel using all available CPU
|
||||
//! cores. When `--features=cuda` is enabled, signature verification is
|
||||
//! offloaded to the GPU.
|
||||
//!
|
||||
|
||||
use counter::Counter;
|
||||
use packet::{Packet, SharedPackets};
|
||||
use std::mem::size_of;
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::time::Instant;
|
||||
use transaction::{PUB_KEY_OFFSET, SIGNED_DATA_OFFSET, SIG_OFFSET};
|
||||
|
||||
pub const TX_OFFSET: usize = 0;
|
||||
@@ -61,8 +70,11 @@ fn batch_size(batches: &Vec<SharedPackets>) -> usize {
|
||||
#[cfg(not(feature = "cuda"))]
|
||||
pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> {
|
||||
use rayon::prelude::*;
|
||||
static mut COUNTER: Counter = create_counter!("ed25519_verify", 1);
|
||||
let start = Instant::now();
|
||||
let count = batch_size(batches);
|
||||
info!("CPU ECDSA for {}", batch_size(batches));
|
||||
batches
|
||||
let rv = batches
|
||||
.into_par_iter()
|
||||
.map(|p| {
|
||||
p.read()
|
||||
@@ -72,13 +84,17 @@ pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> {
|
||||
.map(verify_packet)
|
||||
.collect()
|
||||
})
|
||||
.collect()
|
||||
.collect();
|
||||
inc_counter!(COUNTER, count, start);
|
||||
rv
|
||||
}
|
||||
|
||||
#[cfg(feature = "cuda")]
|
||||
pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> {
|
||||
use packet::PACKET_DATA_SIZE;
|
||||
|
||||
static mut COUNTER: Counter = create_counter!("ed25519_verify_cuda", 1);
|
||||
let start = Instant::now();
|
||||
let count = batch_size(batches);
|
||||
info!("CUDA ECDSA for {}", batch_size(batches));
|
||||
let mut out = Vec::new();
|
||||
let mut elems = Vec::new();
|
||||
@@ -137,6 +153,7 @@ pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> {
|
||||
num += 1;
|
||||
}
|
||||
}
|
||||
inc_counter!(COUNTER, count, start);
|
||||
rvs
|
||||
}
|
||||
|
||||
|
@@ -1,4 +1,9 @@
|
||||
//! The `sigverify_stage` implements the signature verification stage of the TPU.
|
||||
//! The `sigverify_stage` implements the signature verification stage of the TPU. It
|
||||
//! receives a list of lists of packets and outputs the same list, but tags each
|
||||
//! top-level list with a list of booleans, telling the next stage whether the
|
||||
//! signature in that packet is valid. It assumes each packet contains one
|
||||
//! transaction. All processing is done on the CPU by default and on a GPU
|
||||
//! if the `cuda` feature is enabled with `--features=cuda`.
|
||||
|
||||
use packet::SharedPackets;
|
||||
use rand::{thread_rng, Rng};
|
||||
|
357
src/streamer.rs
357
src/streamer.rs
@@ -10,7 +10,7 @@ use std::net::{SocketAddr, UdpSocket};
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::{spawn, JoinHandle};
|
||||
use std::thread::{Builder, JoinHandle};
|
||||
use std::time::Duration;
|
||||
|
||||
pub const WINDOW_SIZE: usize = 2 * 1024;
|
||||
@@ -18,6 +18,7 @@ pub type PacketReceiver = mpsc::Receiver<SharedPackets>;
|
||||
pub type PacketSender = mpsc::Sender<SharedPackets>;
|
||||
pub type BlobSender = mpsc::Sender<VecDeque<SharedBlob>>;
|
||||
pub type BlobReceiver = mpsc::Receiver<VecDeque<SharedBlob>>;
|
||||
pub type Window = Arc<RwLock<Vec<Option<SharedBlob>>>>;
|
||||
|
||||
fn recv_loop(
|
||||
sock: &UdpSocket,
|
||||
@@ -58,10 +59,13 @@ pub fn receiver(
|
||||
if res.is_err() {
|
||||
panic!("streamer::receiver set_read_timeout error");
|
||||
}
|
||||
spawn(move || {
|
||||
Builder::new()
|
||||
.name("solana-receiver".to_string())
|
||||
.spawn(move || {
|
||||
let _ = recv_loop(&sock, &exit, &recycler, &packet_sender);
|
||||
()
|
||||
})
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
fn recv_send(sock: &UdpSocket, recycler: &BlobRecycler, r: &BlobReceiver) -> Result<()> {
|
||||
@@ -96,16 +100,20 @@ pub fn responder(
|
||||
recycler: BlobRecycler,
|
||||
r: BlobReceiver,
|
||||
) -> JoinHandle<()> {
|
||||
spawn(move || loop {
|
||||
Builder::new()
|
||||
.name("solana-responder".to_string())
|
||||
.spawn(move || loop {
|
||||
if recv_send(&sock, &recycler, &r).is_err() && exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
})
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
//TODO, we would need to stick block authentication before we create the
|
||||
//window.
|
||||
fn recv_blobs(recycler: &BlobRecycler, sock: &UdpSocket, s: &BlobSender) -> Result<()> {
|
||||
trace!("receiving on {}", sock.local_addr().unwrap());
|
||||
let dq = Blob::recv_from(recycler, sock)?;
|
||||
if !dq.is_empty() {
|
||||
s.send(dq)?;
|
||||
@@ -123,17 +131,20 @@ pub fn blob_receiver(
|
||||
//1 second timeout on socket read
|
||||
let timer = Duration::new(1, 0);
|
||||
sock.set_read_timeout(Some(timer))?;
|
||||
let t = spawn(move || loop {
|
||||
let t = Builder::new()
|
||||
.name("solana-blob_receiver".to_string())
|
||||
.spawn(move || loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
let _ = recv_blobs(&recycler, &sock, &s);
|
||||
});
|
||||
})
|
||||
.unwrap();
|
||||
Ok(t)
|
||||
}
|
||||
|
||||
fn find_next_missing(
|
||||
locked_window: &Arc<RwLock<Vec<Option<SharedBlob>>>>,
|
||||
locked_window: &Window,
|
||||
crdt: &Arc<RwLock<Crdt>>,
|
||||
consumed: &mut usize,
|
||||
received: &mut usize,
|
||||
@@ -158,14 +169,26 @@ fn find_next_missing(
|
||||
}
|
||||
|
||||
fn repair_window(
|
||||
locked_window: &Arc<RwLock<Vec<Option<SharedBlob>>>>,
|
||||
locked_window: &Window,
|
||||
crdt: &Arc<RwLock<Crdt>>,
|
||||
_recycler: &BlobRecycler,
|
||||
last: &mut usize,
|
||||
times: &mut usize,
|
||||
consumed: &mut usize,
|
||||
received: &mut usize,
|
||||
) -> Result<()> {
|
||||
let reqs = find_next_missing(locked_window, crdt, consumed, received)?;
|
||||
#[cfg(feature = "erasure")]
|
||||
{
|
||||
if erasure::recover(
|
||||
_recycler,
|
||||
&mut locked_window.write().unwrap(),
|
||||
*consumed,
|
||||
*received,
|
||||
).is_err()
|
||||
{
|
||||
trace!("erasure::recover failed");
|
||||
}
|
||||
}
|
||||
//exponential backoff
|
||||
if *last != *consumed {
|
||||
*times = 0;
|
||||
@@ -177,6 +200,7 @@ fn repair_window(
|
||||
trace!("repair_window counter {} {}", *times, *consumed);
|
||||
return Ok(());
|
||||
}
|
||||
let reqs = find_next_missing(locked_window, crdt, consumed, received)?;
|
||||
let sock = UdpSocket::bind("0.0.0.0:0")?;
|
||||
for (to, req) in reqs {
|
||||
//todo cache socket
|
||||
@@ -188,7 +212,7 @@ fn repair_window(
|
||||
}
|
||||
|
||||
fn recv_window(
|
||||
locked_window: &Arc<RwLock<Vec<Option<SharedBlob>>>>,
|
||||
locked_window: &Window,
|
||||
crdt: &Arc<RwLock<Crdt>>,
|
||||
recycler: &BlobRecycler,
|
||||
consumed: &mut usize,
|
||||
@@ -251,16 +275,26 @@ fn recv_window(
|
||||
if pix > *received {
|
||||
*received = pix;
|
||||
}
|
||||
// Got a blob which has already been consumed, skip it
|
||||
// probably from a repair window request
|
||||
if pix < *consumed {
|
||||
debug!(
|
||||
"received: {} but older than consumed: {} skipping..",
|
||||
pix, *consumed
|
||||
);
|
||||
continue;
|
||||
}
|
||||
let w = pix % WINDOW_SIZE;
|
||||
//TODO, after the block are authenticated
|
||||
//if we get different blocks at the same index
|
||||
//that is a network failure/attack
|
||||
trace!("window w: {} size: {}", w, p.meta.size);
|
||||
drop(p);
|
||||
{
|
||||
let mut window = locked_window.write().unwrap();
|
||||
if window[w].is_none() {
|
||||
window[w] = Some(b_);
|
||||
} else if let &Some(ref cblob) = &window[w] {
|
||||
} else if let Some(cblob) = &window[w] {
|
||||
if cblob.read().unwrap().get_index().unwrap() != pix as u64 {
|
||||
warn!("overrun blob at index {:}", w);
|
||||
} else {
|
||||
@@ -273,31 +307,45 @@ fn recv_window(
|
||||
if window[k].is_none() {
|
||||
break;
|
||||
}
|
||||
contq.push_back(window[k].clone().expect("clone in fn recv_window"));
|
||||
window[k] = None;
|
||||
*consumed += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
let buf: Vec<_> = locked_window
|
||||
let mut is_coding = false;
|
||||
if let &Some(ref cblob) = &window[k] {
|
||||
if cblob
|
||||
.read()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(i, v)| {
|
||||
if i == (*consumed % WINDOW_SIZE) {
|
||||
assert!(v.is_none());
|
||||
"_"
|
||||
} else if v.is_none() {
|
||||
"0"
|
||||
.expect("blob read lock for flags streamer::window")
|
||||
.is_coding()
|
||||
{
|
||||
is_coding = true;
|
||||
}
|
||||
}
|
||||
if !is_coding {
|
||||
contq.push_back(window[k].clone().expect("clone in fn recv_window"));
|
||||
*consumed += 1;
|
||||
|
||||
#[cfg(not(feature = "erasure"))]
|
||||
{
|
||||
window[k] = None;
|
||||
}
|
||||
} else {
|
||||
"1"
|
||||
#[cfg(feature = "erasure")]
|
||||
{
|
||||
let block_start = *consumed - (*consumed % erasure::NUM_CODED);
|
||||
let coding_end = block_start + erasure::NUM_CODED;
|
||||
// We've received all this block's data blobs, go and null out the window now
|
||||
for j in block_start..coding_end {
|
||||
window[j % WINDOW_SIZE] = None;
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
trace!("WINDOW: {}", buf.join(""));
|
||||
|
||||
*consumed += erasure::MAX_MISSING;
|
||||
debug!(
|
||||
"skipping processing coding blob k: {} consumed: {}",
|
||||
k, *consumed
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
print_window(locked_window, *consumed);
|
||||
trace!("sending contq.len: {}", contq.len());
|
||||
if !contq.is_empty() {
|
||||
trace!("sending contq.len: {}", contq.len());
|
||||
@@ -306,20 +354,51 @@ fn recv_window(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn default_window() -> Arc<RwLock<Vec<Option<SharedBlob>>>> {
|
||||
fn print_window(locked_window: &Window, consumed: usize) {
|
||||
{
|
||||
let buf: Vec<_> = locked_window
|
||||
.read()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(i, v)| {
|
||||
if i == (consumed % WINDOW_SIZE) {
|
||||
"_"
|
||||
} else if v.is_none() {
|
||||
"0"
|
||||
} else {
|
||||
if let &Some(ref cblob) = &v {
|
||||
if cblob.read().unwrap().is_coding() {
|
||||
"C"
|
||||
} else {
|
||||
"1"
|
||||
}
|
||||
} else {
|
||||
"0"
|
||||
}
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
debug!("WINDOW ({}): {}", consumed, buf.join(""));
|
||||
}
|
||||
}
|
||||
|
||||
pub fn default_window() -> Window {
|
||||
Arc::new(RwLock::new(vec![None; WINDOW_SIZE]))
|
||||
}
|
||||
|
||||
pub fn window(
|
||||
exit: Arc<AtomicBool>,
|
||||
crdt: Arc<RwLock<Crdt>>,
|
||||
window: Arc<RwLock<Vec<Option<SharedBlob>>>>,
|
||||
window: Window,
|
||||
recycler: BlobRecycler,
|
||||
r: BlobReceiver,
|
||||
s: BlobSender,
|
||||
retransmit: BlobSender,
|
||||
) -> JoinHandle<()> {
|
||||
spawn(move || {
|
||||
Builder::new()
|
||||
.name("solana-window".to_string())
|
||||
.spawn(move || {
|
||||
let mut consumed = 0;
|
||||
let mut received = 0;
|
||||
let mut last = 0;
|
||||
@@ -341,6 +420,7 @@ pub fn window(
|
||||
let _ = repair_window(
|
||||
&window,
|
||||
&crdt,
|
||||
&recycler,
|
||||
&mut last,
|
||||
&mut times,
|
||||
&mut consumed,
|
||||
@@ -348,26 +428,36 @@ pub fn window(
|
||||
);
|
||||
}
|
||||
})
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
fn broadcast(
|
||||
crdt: &Arc<RwLock<Crdt>>,
|
||||
window: &Arc<RwLock<Vec<Option<SharedBlob>>>>,
|
||||
window: &Window,
|
||||
recycler: &BlobRecycler,
|
||||
r: &BlobReceiver,
|
||||
sock: &UdpSocket,
|
||||
transmit_index: &mut u64,
|
||||
receive_index: &mut u64,
|
||||
) -> Result<()> {
|
||||
let timer = Duration::new(1, 0);
|
||||
let mut dq = r.recv_timeout(timer)?;
|
||||
while let Ok(mut nq) = r.try_recv() {
|
||||
dq.append(&mut nq);
|
||||
}
|
||||
let mut blobs = dq.into_iter().collect();
|
||||
/// appends codes to the list of blobs allowing us to reconstruct the stream
|
||||
let mut blobs: Vec<_> = dq.into_iter().collect();
|
||||
|
||||
print_window(window, *receive_index as usize);
|
||||
|
||||
// Insert the coding blobs into the blob stream
|
||||
#[cfg(feature = "erasure")]
|
||||
erasure::generate_coding(re, blobs, consumed);
|
||||
Crdt::broadcast(crdt, &blobs, &sock, transmit_index)?;
|
||||
erasure::add_coding_blobs(recycler, &mut blobs, *receive_index);
|
||||
|
||||
let blobs_len = blobs.len();
|
||||
info!("broadcast blobs.len: {}", blobs_len);
|
||||
|
||||
// Index the blobs
|
||||
Crdt::index_blobs(crdt, &blobs, receive_index)?;
|
||||
// keep the cache of blobs that are broadcast
|
||||
{
|
||||
let mut win = window.write().unwrap();
|
||||
@@ -394,6 +484,24 @@ fn broadcast(
|
||||
win[pos] = Some(b);
|
||||
}
|
||||
}
|
||||
|
||||
// Fill in the coding blob data from the window data blobs
|
||||
#[cfg(feature = "erasure")]
|
||||
{
|
||||
if erasure::generate_coding(
|
||||
&mut window.write().unwrap(),
|
||||
*receive_index as usize,
|
||||
blobs_len,
|
||||
).is_err()
|
||||
{
|
||||
return Err(Error::GenericError);
|
||||
}
|
||||
}
|
||||
|
||||
*receive_index += blobs_len as u64;
|
||||
|
||||
// Send blobs out from the window
|
||||
Crdt::broadcast(crdt, &window, &sock, transmit_index, *receive_index)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -410,19 +518,31 @@ pub fn broadcaster(
|
||||
sock: UdpSocket,
|
||||
exit: Arc<AtomicBool>,
|
||||
crdt: Arc<RwLock<Crdt>>,
|
||||
window: Arc<RwLock<Vec<Option<SharedBlob>>>>,
|
||||
window: Window,
|
||||
recycler: BlobRecycler,
|
||||
r: BlobReceiver,
|
||||
) -> JoinHandle<()> {
|
||||
spawn(move || {
|
||||
Builder::new()
|
||||
.name("solana-broadcaster".to_string())
|
||||
.spawn(move || {
|
||||
let mut transmit_index = 0;
|
||||
let mut receive_index = 0;
|
||||
loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
let _ = broadcast(&crdt, &window, &recycler, &r, &sock, &mut transmit_index);
|
||||
let _ = broadcast(
|
||||
&crdt,
|
||||
&window,
|
||||
&recycler,
|
||||
&r,
|
||||
&sock,
|
||||
&mut transmit_index,
|
||||
&mut receive_index,
|
||||
);
|
||||
}
|
||||
})
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
fn retransmit(
|
||||
@@ -462,7 +582,9 @@ pub fn retransmitter(
|
||||
recycler: BlobRecycler,
|
||||
r: BlobReceiver,
|
||||
) -> JoinHandle<()> {
|
||||
spawn(move || {
|
||||
Builder::new()
|
||||
.name("solana-retransmitter".to_string())
|
||||
.spawn(move || {
|
||||
trace!("retransmitter started");
|
||||
loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
@@ -473,6 +595,7 @@ pub fn retransmitter(
|
||||
}
|
||||
trace!("exiting retransmitter");
|
||||
})
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "unstable", test))]
|
||||
@@ -583,11 +706,8 @@ mod bench {
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use crdt::{Crdt, ReplicatedData};
|
||||
use logger;
|
||||
use crdt::{Crdt, TestNode};
|
||||
use packet::{Blob, BlobRecycler, Packet, PacketRecycler, Packets, PACKET_DATA_SIZE};
|
||||
use signature::KeyPair;
|
||||
use signature::KeyPairUtil;
|
||||
use std::collections::VecDeque;
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
@@ -595,9 +715,8 @@ mod test {
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
use streamer::{blob_receiver, receiver, responder, retransmitter, window};
|
||||
use streamer::{blob_receiver, receiver, responder, window};
|
||||
use streamer::{default_window, BlobReceiver, PacketReceiver};
|
||||
|
||||
fn get_msgs(r: PacketReceiver, num: &mut usize) {
|
||||
@@ -671,29 +790,21 @@ mod test {
|
||||
|
||||
#[test]
|
||||
pub fn window_send_test() {
|
||||
let pubkey_me = KeyPair::new().pubkey();
|
||||
let read = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||
let addr = read.local_addr().unwrap();
|
||||
let send = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||
let serve = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||
let transaction = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||
let tn = TestNode::new();
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let rep_data = ReplicatedData::new(
|
||||
pubkey_me,
|
||||
read.local_addr().unwrap(),
|
||||
send.local_addr().unwrap(),
|
||||
serve.local_addr().unwrap(),
|
||||
transaction.local_addr().unwrap(),
|
||||
);
|
||||
let mut crdt_me = Crdt::new(rep_data);
|
||||
let mut crdt_me = Crdt::new(tn.data.clone());
|
||||
let me_id = crdt_me.my_data().id;
|
||||
crdt_me.set_leader(me_id);
|
||||
let subs = Arc::new(RwLock::new(crdt_me));
|
||||
|
||||
let resp_recycler = BlobRecycler::default();
|
||||
let (s_reader, r_reader) = channel();
|
||||
let t_receiver =
|
||||
blob_receiver(exit.clone(), resp_recycler.clone(), read, s_reader).unwrap();
|
||||
let t_receiver = blob_receiver(
|
||||
exit.clone(),
|
||||
resp_recycler.clone(),
|
||||
tn.sockets.gossip,
|
||||
s_reader,
|
||||
).unwrap();
|
||||
let (s_window, r_window) = channel();
|
||||
let (s_retransmit, r_retransmit) = channel();
|
||||
let win = default_window();
|
||||
@@ -707,7 +818,12 @@ mod test {
|
||||
s_retransmit,
|
||||
);
|
||||
let (s_responder, r_responder) = channel();
|
||||
let t_responder = responder(send, exit.clone(), resp_recycler.clone(), r_responder);
|
||||
let t_responder = responder(
|
||||
tn.sockets.replicate,
|
||||
exit.clone(),
|
||||
resp_recycler.clone(),
|
||||
r_responder,
|
||||
);
|
||||
let mut msgs = VecDeque::new();
|
||||
for v in 0..10 {
|
||||
let i = 9 - v;
|
||||
@@ -718,7 +834,7 @@ mod test {
|
||||
w.set_id(me_id).unwrap();
|
||||
assert_eq!(i, w.get_index().unwrap());
|
||||
w.meta.size = PACKET_DATA_SIZE;
|
||||
w.meta.set_addr(&addr);
|
||||
w.meta.set_addr(&tn.data.gossip_addr);
|
||||
msgs.push_back(b_);
|
||||
}
|
||||
s_responder.send(msgs).expect("send");
|
||||
@@ -735,111 +851,4 @@ mod test {
|
||||
t_responder.join().expect("join");
|
||||
t_window.join().expect("join");
|
||||
}
|
||||
|
||||
fn test_node() -> (Arc<RwLock<Crdt>>, UdpSocket, UdpSocket, UdpSocket) {
|
||||
let gossip = UdpSocket::bind("127.0.0.1:0").unwrap();
|
||||
let replicate = UdpSocket::bind("127.0.0.1:0").unwrap();
|
||||
let serve = UdpSocket::bind("127.0.0.1:0").unwrap();
|
||||
let transaction = UdpSocket::bind("127.0.0.1:0").unwrap();
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
let d = ReplicatedData::new(
|
||||
pubkey,
|
||||
gossip.local_addr().unwrap(),
|
||||
replicate.local_addr().unwrap(),
|
||||
serve.local_addr().unwrap(),
|
||||
transaction.local_addr().unwrap(),
|
||||
);
|
||||
trace!("data: {:?}", d);
|
||||
let crdt = Crdt::new(d);
|
||||
(Arc::new(RwLock::new(crdt)), gossip, replicate, serve)
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
//retransmit from leader to replicate target
|
||||
pub fn retransmit() {
|
||||
logger::setup();
|
||||
trace!("retransmit test start");
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let (crdt_leader, sock_gossip_leader, _, sock_leader) = test_node();
|
||||
let (crdt_target, sock_gossip_target, sock_replicate_target, _) = test_node();
|
||||
let leader_data = crdt_leader.read().unwrap().my_data().clone();
|
||||
crdt_leader.write().unwrap().insert(&leader_data);
|
||||
crdt_leader.write().unwrap().set_leader(leader_data.id);
|
||||
let t_crdt_leader_g = Crdt::gossip(crdt_leader.clone(), exit.clone());
|
||||
let window_leader = Arc::new(RwLock::new(vec![]));
|
||||
let t_crdt_leader_l = Crdt::listen(
|
||||
crdt_leader.clone(),
|
||||
window_leader,
|
||||
sock_gossip_leader,
|
||||
exit.clone(),
|
||||
);
|
||||
|
||||
crdt_target.write().unwrap().insert(&leader_data);
|
||||
crdt_target.write().unwrap().set_leader(leader_data.id);
|
||||
let t_crdt_target_g = Crdt::gossip(crdt_target.clone(), exit.clone());
|
||||
let window_target = Arc::new(RwLock::new(vec![]));
|
||||
let t_crdt_target_l = Crdt::listen(
|
||||
crdt_target.clone(),
|
||||
window_target,
|
||||
sock_gossip_target,
|
||||
exit.clone(),
|
||||
);
|
||||
//leader retransmitter
|
||||
let (s_retransmit, r_retransmit) = channel();
|
||||
let blob_recycler = BlobRecycler::default();
|
||||
let saddr = sock_leader.local_addr().unwrap();
|
||||
let t_retransmit = retransmitter(
|
||||
sock_leader,
|
||||
exit.clone(),
|
||||
crdt_leader.clone(),
|
||||
blob_recycler.clone(),
|
||||
r_retransmit,
|
||||
);
|
||||
|
||||
//target receiver
|
||||
let (s_blob_receiver, r_blob_receiver) = channel();
|
||||
let t_receiver = blob_receiver(
|
||||
exit.clone(),
|
||||
blob_recycler.clone(),
|
||||
sock_replicate_target,
|
||||
s_blob_receiver,
|
||||
).unwrap();
|
||||
for _ in 0..10 {
|
||||
let done = crdt_target.read().unwrap().update_index == 2
|
||||
&& crdt_leader.read().unwrap().update_index == 2;
|
||||
if done {
|
||||
break;
|
||||
}
|
||||
let timer = Duration::new(1, 0);
|
||||
sleep(timer);
|
||||
}
|
||||
|
||||
//send the data through
|
||||
let mut bq = VecDeque::new();
|
||||
let b = blob_recycler.allocate();
|
||||
b.write().unwrap().meta.size = 10;
|
||||
bq.push_back(b);
|
||||
s_retransmit.send(bq).unwrap();
|
||||
let timer = Duration::new(5, 0);
|
||||
trace!("Waiting for timeout");
|
||||
let mut oq = r_blob_receiver.recv_timeout(timer).unwrap();
|
||||
assert_eq!(oq.len(), 1);
|
||||
let o = oq.pop_front().unwrap();
|
||||
let ro = o.read().unwrap();
|
||||
assert_eq!(ro.meta.size, 10);
|
||||
assert_eq!(ro.meta.addr(), saddr);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
let threads = vec![
|
||||
t_receiver,
|
||||
t_retransmit,
|
||||
t_crdt_target_g,
|
||||
t_crdt_target_l,
|
||||
t_crdt_leader_g,
|
||||
t_crdt_leader_l,
|
||||
];
|
||||
for t in threads {
|
||||
t.join().unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -4,7 +4,6 @@
|
||||
//! unstable and may change in future releases.
|
||||
|
||||
use bincode::{deserialize, serialize};
|
||||
use futures::future::{ok, FutureResult};
|
||||
use hash::Hash;
|
||||
use request::{Request, Response};
|
||||
use signature::{KeyPair, PublicKey, Signature};
|
||||
@@ -13,6 +12,7 @@ use std::io;
|
||||
use std::net::{SocketAddr, UdpSocket};
|
||||
use transaction::Transaction;
|
||||
|
||||
/// An object for querying and sending transactions to the network.
|
||||
pub struct ThinClient {
|
||||
requests_addr: SocketAddr,
|
||||
requests_socket: UdpSocket,
|
||||
@@ -106,7 +106,7 @@ impl ThinClient {
|
||||
while !done {
|
||||
let resp = self.recv_response()?;
|
||||
trace!("recv_response {:?}", resp);
|
||||
if let &Response::Balance { ref key, .. } = &resp {
|
||||
if let Response::Balance { key, .. } = &resp {
|
||||
done = key == pubkey;
|
||||
}
|
||||
self.process_response(resp);
|
||||
@@ -121,39 +121,43 @@ impl ThinClient {
|
||||
let req = Request::GetTransactionCount;
|
||||
let data =
|
||||
serialize(&req).expect("serialize GetTransactionCount in pub fn transaction_count");
|
||||
let mut done = false;
|
||||
while !done {
|
||||
self.requests_socket
|
||||
.send_to(&data, &self.requests_addr)
|
||||
.expect("buffer error in pub fn transaction_count");
|
||||
let mut done = false;
|
||||
while !done {
|
||||
let resp = self.recv_response().expect("transaction count dropped");
|
||||
|
||||
if let Ok(resp) = self.recv_response() {
|
||||
info!("recv_response {:?}", resp);
|
||||
if let &Response::TransactionCount { .. } = &resp {
|
||||
done = true;
|
||||
}
|
||||
self.process_response(resp);
|
||||
}
|
||||
}
|
||||
self.transaction_count
|
||||
}
|
||||
|
||||
/// Request the last Entry ID from the server. This method blocks
|
||||
/// until the server sends a response.
|
||||
pub fn get_last_id(&mut self) -> FutureResult<Hash, ()> {
|
||||
pub fn get_last_id(&mut self) -> Hash {
|
||||
info!("get_last_id");
|
||||
let req = Request::GetLastId;
|
||||
let data = serialize(&req).expect("serialize GetLastId in pub fn get_last_id");
|
||||
let mut done = false;
|
||||
while !done {
|
||||
self.requests_socket
|
||||
.send_to(&data, &self.requests_addr)
|
||||
.expect("buffer error in pub fn get_last_id");
|
||||
let mut done = false;
|
||||
while !done {
|
||||
let resp = self.recv_response().expect("get_last_id response");
|
||||
|
||||
if let Ok(resp) = self.recv_response() {
|
||||
if let &Response::LastId { .. } = &resp {
|
||||
done = true;
|
||||
}
|
||||
self.process_response(resp);
|
||||
}
|
||||
ok(self.last_id.expect("some last_id"))
|
||||
}
|
||||
self.last_id.expect("some last_id")
|
||||
}
|
||||
|
||||
pub fn poll_get_balance(&mut self, pubkey: &PublicKey) -> io::Result<i64> {
|
||||
@@ -177,7 +181,7 @@ mod tests {
|
||||
use super::*;
|
||||
use bank::Bank;
|
||||
use budget::Budget;
|
||||
use futures::Future;
|
||||
use crdt::TestNode;
|
||||
use logger;
|
||||
use mint::Mint;
|
||||
use server::Server;
|
||||
@@ -188,7 +192,6 @@ mod tests {
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
use transaction::{Instruction, Plan};
|
||||
use tvu::TestNode;
|
||||
|
||||
#[test]
|
||||
fn test_thin_client() {
|
||||
@@ -202,7 +205,6 @@ mod tests {
|
||||
|
||||
let server = Server::new_leader(
|
||||
bank,
|
||||
alice.last_id(),
|
||||
Some(Duration::from_millis(30)),
|
||||
leader.data.clone(),
|
||||
leader.sockets.requests,
|
||||
@@ -224,7 +226,7 @@ mod tests {
|
||||
leader.data.transactions_addr,
|
||||
transactions_socket,
|
||||
);
|
||||
let last_id = client.get_last_id().wait().unwrap();
|
||||
let last_id = client.get_last_id();
|
||||
let _sig = client
|
||||
.transfer(500, &alice.keypair(), bob_pubkey, &last_id)
|
||||
.unwrap();
|
||||
@@ -247,7 +249,6 @@ mod tests {
|
||||
|
||||
let server = Server::new_leader(
|
||||
bank,
|
||||
alice.last_id(),
|
||||
Some(Duration::from_millis(30)),
|
||||
leader.data.clone(),
|
||||
leader.sockets.requests,
|
||||
@@ -271,13 +272,13 @@ mod tests {
|
||||
leader.data.transactions_addr,
|
||||
transactions_socket,
|
||||
);
|
||||
let last_id = client.get_last_id().wait().unwrap();
|
||||
let last_id = client.get_last_id();
|
||||
|
||||
let tx = Transaction::new(&alice.keypair(), bob_pubkey, 500, last_id);
|
||||
|
||||
let _sig = client.transfer_signed(tx).unwrap();
|
||||
|
||||
let last_id = client.get_last_id().wait().unwrap();
|
||||
let last_id = client.get_last_id();
|
||||
|
||||
let mut tr2 = Transaction::new(&alice.keypair(), bob_pubkey, 501, last_id);
|
||||
if let Instruction::NewContract(contract) = &mut tr2.instruction {
|
||||
|
@@ -1,6 +1,11 @@
|
||||
//! The `timing` module provides std::time utility functions.
|
||||
use std::time::Duration;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
pub fn duration_as_us(d: &Duration) -> u64 {
|
||||
return (d.as_secs() * 1000 * 1000) + (d.subsec_nanos() as u64 / 1_000);
|
||||
}
|
||||
|
||||
pub fn duration_as_ms(d: &Duration) -> u64 {
|
||||
return (d.as_secs() * 1000) + (d.subsec_nanos() as u64 / 1_000_000);
|
||||
}
|
||||
|
39
src/tpu.rs
39
src/tpu.rs
@@ -1,10 +1,33 @@
|
||||
//! The `tpu` module implements the Transaction Processing Unit, a
|
||||
//! 5-stage transaction processing pipeline in software.
|
||||
//!
|
||||
//! ```text
|
||||
//! .---------------------------------------------------------------.
|
||||
//! | TPU .-----. |
|
||||
//! | | PoH | |
|
||||
//! | `--+--` |
|
||||
//! | | |
|
||||
//! | v |
|
||||
//! | .-------. .-----------. .---------. .--------. .-------. |
|
||||
//! .---------. | | Fetch | | SigVerify | | Banking | | Record | | Write | | .------------.
|
||||
//! | Clients |--->| Stage |->| Stage |->| Stage |->| Stage |->| Stage +--->| Validators |
|
||||
//! `---------` | | | | | | | | | | | | `------------`
|
||||
//! | `-------` `-----------` `----+----` `--------` `---+---` |
|
||||
//! | | | |
|
||||
//! | | | |
|
||||
//! | | | |
|
||||
//! | | | |
|
||||
//! `---------------------------------|-----------------------|-----`
|
||||
//! | |
|
||||
//! v v
|
||||
//! .------. .--------.
|
||||
//! | Bank | | Ledger |
|
||||
//! `------` `--------`
|
||||
//! ```
|
||||
|
||||
use bank::Bank;
|
||||
use banking_stage::BankingStage;
|
||||
use fetch_stage::FetchStage;
|
||||
use hash::Hash;
|
||||
use packet::{BlobRecycler, PacketRecycler};
|
||||
use record_stage::RecordStage;
|
||||
use sigverify_stage::SigVerifyStage;
|
||||
@@ -25,7 +48,6 @@ pub struct Tpu {
|
||||
impl Tpu {
|
||||
pub fn new<W: Write + Send + 'static>(
|
||||
bank: Arc<Bank>,
|
||||
start_hash: Hash,
|
||||
tick_duration: Option<Duration>,
|
||||
transactions_socket: UdpSocket,
|
||||
blob_recycler: BlobRecycler,
|
||||
@@ -46,8 +68,14 @@ impl Tpu {
|
||||
packet_recycler.clone(),
|
||||
);
|
||||
|
||||
let record_stage =
|
||||
RecordStage::new(banking_stage.signal_receiver, &start_hash, tick_duration);
|
||||
let record_stage = match tick_duration {
|
||||
Some(tick_duration) => RecordStage::new_with_clock(
|
||||
banking_stage.signal_receiver,
|
||||
&bank.last_id(),
|
||||
tick_duration,
|
||||
),
|
||||
None => RecordStage::new(banking_stage.signal_receiver, &bank.last_id()),
|
||||
};
|
||||
|
||||
let write_stage = WriteStage::new(
|
||||
bank.clone(),
|
||||
@@ -56,13 +84,12 @@ impl Tpu {
|
||||
Mutex::new(writer),
|
||||
record_stage.entry_receiver,
|
||||
);
|
||||
|
||||
let mut thread_hdls = vec![
|
||||
fetch_stage.thread_hdl,
|
||||
banking_stage.thread_hdl,
|
||||
record_stage.thread_hdl,
|
||||
write_stage.thread_hdl,
|
||||
];
|
||||
thread_hdls.extend(fetch_stage.thread_hdls.into_iter());
|
||||
thread_hdls.extend(sigverify_stage.thread_hdls.into_iter());
|
||||
Tpu {
|
||||
blob_receiver: write_stage.blob_receiver,
|
||||
|
@@ -11,8 +11,10 @@ pub const SIGNED_DATA_OFFSET: usize = 112;
|
||||
pub const SIG_OFFSET: usize = 8;
|
||||
pub const PUB_KEY_OFFSET: usize = 80;
|
||||
|
||||
/// The type of payment plan. Each item must implement the PaymentPlan trait.
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub enum Plan {
|
||||
/// The builtin contract language Budget.
|
||||
Budget(Budget),
|
||||
}
|
||||
|
||||
@@ -37,29 +39,49 @@ impl PaymentPlan for Plan {
|
||||
}
|
||||
}
|
||||
|
||||
/// A smart contract.
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub struct Contract {
|
||||
/// The number of tokens allocated to the `Plan` and any transaction fees.
|
||||
pub tokens: i64,
|
||||
pub plan: Plan,
|
||||
}
|
||||
|
||||
/// An instruction to progress the smart contract.
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub enum Instruction {
|
||||
/// Declare and instanstansiate `Contract`.
|
||||
NewContract(Contract),
|
||||
|
||||
/// Tell a payment plan acknowledge the given `DateTime` has past.
|
||||
ApplyTimestamp(DateTime<Utc>),
|
||||
|
||||
/// Tell the payment plan that the `NewContract` with `Signature` has been
|
||||
/// signed by the containing transaction's `PublicKey`.
|
||||
ApplySignature(Signature),
|
||||
}
|
||||
|
||||
/// An instruction signed by a client with `PublicKey`.
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub struct Transaction {
|
||||
/// A digital signature of `instruction`, `last_id` and `fee`, signed by `PublicKey`.
|
||||
pub sig: Signature,
|
||||
|
||||
/// The `PublicKey` of the entity that signed the transaction data.
|
||||
pub from: PublicKey,
|
||||
|
||||
/// The action the server should take.
|
||||
pub instruction: Instruction,
|
||||
|
||||
/// The ID of a recent ledger entry.
|
||||
pub last_id: Hash,
|
||||
|
||||
/// The number of tokens paid for processing and storage of this transaction.
|
||||
pub fee: i64,
|
||||
}
|
||||
|
||||
impl Transaction {
|
||||
/// Create a signed transaction from the given `Instruction`.
|
||||
fn new_from_instruction(
|
||||
from_keypair: &KeyPair,
|
||||
instruction: Instruction,
|
||||
@@ -122,7 +144,7 @@ impl Transaction {
|
||||
last_id: Hash,
|
||||
) -> Self {
|
||||
let from = from_keypair.pubkey();
|
||||
let budget = Budget::Race(
|
||||
let budget = Budget::Or(
|
||||
(Condition::Timestamp(dt), Payment { tokens, to }),
|
||||
(Condition::Signature(from), Payment { tokens, to: from }),
|
||||
);
|
||||
@@ -131,6 +153,7 @@ impl Transaction {
|
||||
Self::new_from_instruction(from_keypair, instruction, last_id, 0)
|
||||
}
|
||||
|
||||
/// Get the transaction data to sign.
|
||||
fn get_sign_data(&self) -> Vec<u8> {
|
||||
let mut data = serialize(&(&self.instruction)).expect("serialize Contract");
|
||||
let last_id_data = serialize(&(&self.last_id)).expect("serialize last_id");
|
||||
@@ -148,13 +171,17 @@ impl Transaction {
|
||||
self.sig = Signature::clone_from_slice(keypair.sign(&sign_data).as_ref());
|
||||
}
|
||||
|
||||
/// Verify only the transaction signature.
|
||||
pub fn verify_sig(&self) -> bool {
|
||||
warn!("transaction signature verification called");
|
||||
self.sig.verify(&self.from, &self.get_sign_data())
|
||||
}
|
||||
|
||||
/// Verify only the payment plan.
|
||||
pub fn verify_plan(&self) -> bool {
|
||||
if let Instruction::NewContract(contract) = &self.instruction {
|
||||
self.fee >= 0 && self.fee <= contract.tokens
|
||||
self.fee >= 0
|
||||
&& self.fee <= contract.tokens
|
||||
&& contract.plan.verify(contract.tokens - self.fee)
|
||||
} else {
|
||||
true
|
||||
|
290
src/tvu.rs
290
src/tvu.rs
@@ -1,36 +1,50 @@
|
||||
//! The `tvu` module implements the Transaction Validation Unit, a
|
||||
//! 5-stage transaction validation pipeline in software.
|
||||
//! 1. streamer
|
||||
//! - Incoming blobs are picked up from the replicate socket.
|
||||
//! 2. verifier
|
||||
//! - TODO Blobs are sent to the GPU, and while the memory is there the PoH stream is verified
|
||||
//! along with the ecdsa signature for the blob and each signature in all the transactions. Blobs
|
||||
//! with errors are dropped, or marked for slashing.
|
||||
//! 3.a retransmit
|
||||
//! - Blobs originating from the parent (leader, at the moment, is the only parent), are retransmit to all the
|
||||
//! peers in the crdt. Peers is everyone who is not me or the leader that has a known replicate
|
||||
//! address.
|
||||
//! 3.b window
|
||||
//! - Verified blobs are placed into a window, indexed by the counter set by the leader.sockets. This could
|
||||
//! be the PoH counter if its monotonically increasing in each blob. Erasure coding is used to
|
||||
//! recover any missing packets, and requests are made at random to peers and parents to retransmit
|
||||
//! a missing packet.
|
||||
//! 4. accountant
|
||||
//! - Contigous blobs are sent to the accountant for processing transactions
|
||||
//! 5. validator
|
||||
//! - TODO Validation messages are sent back to the leader
|
||||
//! 3-stage transaction validation pipeline in software.
|
||||
//!
|
||||
//! ```text
|
||||
//! .------------------------------------------.
|
||||
//! | TVU |
|
||||
//! | |
|
||||
//! | | .------------.
|
||||
//! | .------------------------>| Validators |
|
||||
//! | .-------. | | `------------`
|
||||
//! .--------. | | | .----+---. .-----------. |
|
||||
//! | Leader |--------->| Blob | | Window | | Replicate | |
|
||||
//! `--------` | | Fetch |-->| Stage |-->| Stage | |
|
||||
//! .------------. | | Stage | | | | | |
|
||||
//! | Validators |----->| | `--------` `----+------` |
|
||||
//! `------------` | `-------` | |
|
||||
//! | | |
|
||||
//! | | |
|
||||
//! | | |
|
||||
//! `--------------------------------|---------`
|
||||
//! |
|
||||
//! v
|
||||
//! .------.
|
||||
//! | Bank |
|
||||
//! `------`
|
||||
//! ```
|
||||
//!
|
||||
//! 1. Fetch Stage
|
||||
//! - Incoming blobs are picked up from the replicate socket and repair socket.
|
||||
//! 2. Window Stage
|
||||
//! - Blobs are windowed until a contiguous chunk is available. This stage also repairs and
|
||||
//! retransmits blobs that are in the queue.
|
||||
//! 3. Replicate Stage
|
||||
//! - Transactions in blobs are processed and applied to the bank.
|
||||
//! - TODO We need to verify the signatures in the blobs.
|
||||
|
||||
use bank::Bank;
|
||||
use crdt::{Crdt, ReplicatedData};
|
||||
use blob_fetch_stage::BlobFetchStage;
|
||||
use crdt::Crdt;
|
||||
use packet;
|
||||
use replicate_stage::ReplicateStage;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::JoinHandle;
|
||||
use streamer;
|
||||
use window_stage::WindowStage;
|
||||
|
||||
pub struct Tvu {
|
||||
pub thread_hdls: Vec<JoinHandle<()>>,
|
||||
@@ -41,178 +55,84 @@ impl Tvu {
|
||||
/// on the bank state.
|
||||
/// # Arguments
|
||||
/// * `bank` - The bank state.
|
||||
/// * `me` - my configuration
|
||||
/// * `gossip` - my gossisp socket
|
||||
/// * `replicate` - my replicate socket
|
||||
/// * `leader` - leader configuration
|
||||
/// * `crdt` - The crdt state.
|
||||
/// * `window` - The window state.
|
||||
/// * `replicate_socket` - my replicate socket
|
||||
/// * `repair_socket` - my repair socket
|
||||
/// * `retransmit_socket` - my retransmit socket
|
||||
/// * `exit` - The exit signal.
|
||||
pub fn new(
|
||||
bank: Arc<Bank>,
|
||||
me: ReplicatedData,
|
||||
gossip: UdpSocket,
|
||||
replicate: UdpSocket,
|
||||
leader: ReplicatedData,
|
||||
crdt: Arc<RwLock<Crdt>>,
|
||||
window: streamer::Window,
|
||||
replicate_socket: UdpSocket,
|
||||
repair_socket: UdpSocket,
|
||||
retransmit_socket: UdpSocket,
|
||||
exit: Arc<AtomicBool>,
|
||||
) -> Self {
|
||||
//replicate pipeline
|
||||
let crdt = Arc::new(RwLock::new(Crdt::new(me)));
|
||||
crdt.write()
|
||||
.expect("'crdt' write lock in pub fn replicate")
|
||||
.set_leader(leader.id);
|
||||
crdt.write()
|
||||
.expect("'crdt' write lock before insert() in pub fn replicate")
|
||||
.insert(&leader);
|
||||
let t_gossip = Crdt::gossip(crdt.clone(), exit.clone());
|
||||
let window = streamer::default_window();
|
||||
let t_listen = Crdt::listen(crdt.clone(), window.clone(), gossip, exit.clone());
|
||||
|
||||
// TODO pull this socket out through the public interface
|
||||
// make sure we are on the same interface
|
||||
let mut local = replicate.local_addr().expect("tvu: get local address");
|
||||
local.set_port(0);
|
||||
let write = UdpSocket::bind(local).expect("tvu: bind to local socket");
|
||||
|
||||
let blob_recycler = packet::BlobRecycler::default();
|
||||
let (blob_sender, blob_receiver) = channel();
|
||||
let t_blob_receiver = streamer::blob_receiver(
|
||||
let fetch_stage = BlobFetchStage::new_multi_socket(
|
||||
vec![replicate_socket, repair_socket],
|
||||
exit.clone(),
|
||||
blob_recycler.clone(),
|
||||
replicate,
|
||||
blob_sender.clone(),
|
||||
).expect("tvu: blob receiver creation");
|
||||
let (window_sender, window_receiver) = channel();
|
||||
let (retransmit_sender, retransmit_receiver) = channel();
|
||||
|
||||
let t_retransmit = streamer::retransmitter(
|
||||
write,
|
||||
exit.clone(),
|
||||
crdt.clone(),
|
||||
blob_recycler.clone(),
|
||||
retransmit_receiver,
|
||||
);
|
||||
|
||||
//TODO
|
||||
//the packets coming out of blob_receiver need to be sent to the GPU and verified
|
||||
//then sent to the window, which does the erasure coding reconstruction
|
||||
let t_window = streamer::window(
|
||||
exit.clone(),
|
||||
crdt.clone(),
|
||||
let window_stage = WindowStage::new(
|
||||
crdt,
|
||||
window,
|
||||
blob_recycler.clone(),
|
||||
blob_receiver,
|
||||
window_sender,
|
||||
retransmit_sender,
|
||||
);
|
||||
|
||||
let replicate_stage = ReplicateStage::new(
|
||||
bank.clone(),
|
||||
retransmit_socket,
|
||||
exit.clone(),
|
||||
window_receiver,
|
||||
blob_recycler.clone(),
|
||||
fetch_stage.blob_receiver,
|
||||
);
|
||||
|
||||
let threads = vec![
|
||||
//replicate threads
|
||||
t_blob_receiver,
|
||||
t_retransmit,
|
||||
t_window,
|
||||
replicate_stage.thread_hdl,
|
||||
t_gossip,
|
||||
t_listen,
|
||||
];
|
||||
let replicate_stage =
|
||||
ReplicateStage::new(bank, exit, window_stage.blob_receiver, blob_recycler);
|
||||
|
||||
let mut threads = vec![replicate_stage.thread_hdl];
|
||||
threads.extend(fetch_stage.thread_hdls.into_iter());
|
||||
threads.extend(window_stage.thread_hdls.into_iter());
|
||||
Tvu {
|
||||
thread_hdls: threads,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Sockets {
|
||||
pub gossip: UdpSocket,
|
||||
pub requests: UdpSocket,
|
||||
pub replicate: UdpSocket,
|
||||
pub transaction: UdpSocket,
|
||||
pub respond: UdpSocket,
|
||||
pub broadcast: UdpSocket,
|
||||
}
|
||||
|
||||
pub struct TestNode {
|
||||
pub data: ReplicatedData,
|
||||
pub sockets: Sockets,
|
||||
}
|
||||
|
||||
impl TestNode {
|
||||
pub fn new() -> TestNode {
|
||||
let gossip = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let requests = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let transaction = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let replicate = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let respond = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let broadcast = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
let data = ReplicatedData::new(
|
||||
pubkey,
|
||||
gossip.local_addr().unwrap(),
|
||||
replicate.local_addr().unwrap(),
|
||||
requests.local_addr().unwrap(),
|
||||
transaction.local_addr().unwrap(),
|
||||
);
|
||||
TestNode {
|
||||
data: data,
|
||||
sockets: Sockets {
|
||||
gossip,
|
||||
requests,
|
||||
replicate,
|
||||
transaction,
|
||||
respond,
|
||||
broadcast,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn test_node() -> (ReplicatedData, UdpSocket, UdpSocket, UdpSocket, UdpSocket) {
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use std::time::Duration;
|
||||
|
||||
let transactions_socket = UdpSocket::bind("127.0.0.1:0").unwrap();
|
||||
let gossip = UdpSocket::bind("127.0.0.1:0").unwrap();
|
||||
let replicate = UdpSocket::bind("127.0.0.1:0").unwrap();
|
||||
let requests_socket = UdpSocket::bind("127.0.0.1:0").unwrap();
|
||||
requests_socket
|
||||
.set_read_timeout(Some(Duration::new(1, 0)))
|
||||
.unwrap();
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
let d = ReplicatedData::new(
|
||||
pubkey,
|
||||
gossip.local_addr().unwrap(),
|
||||
replicate.local_addr().unwrap(),
|
||||
requests_socket.local_addr().unwrap(),
|
||||
transactions_socket.local_addr().unwrap(),
|
||||
);
|
||||
(d, gossip, replicate, requests_socket, transactions_socket)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
use bank::Bank;
|
||||
use bincode::serialize;
|
||||
use crdt::Crdt;
|
||||
use crdt::{Crdt, TestNode};
|
||||
use entry::Entry;
|
||||
use hash::{hash, Hash};
|
||||
use logger;
|
||||
use mint::Mint;
|
||||
use ncp::Ncp;
|
||||
use packet::BlobRecycler;
|
||||
use result::Result;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use std::collections::VecDeque;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::time::Duration;
|
||||
use streamer;
|
||||
use transaction::Transaction;
|
||||
use tvu::{TestNode, Tvu};
|
||||
use tvu::Tvu;
|
||||
|
||||
fn new_ncp(
|
||||
crdt: Arc<RwLock<Crdt>>,
|
||||
listen: UdpSocket,
|
||||
exit: Arc<AtomicBool>,
|
||||
) -> Result<(Ncp, streamer::Window)> {
|
||||
let window = streamer::default_window();
|
||||
let send_sock = UdpSocket::bind("0.0.0.0:0").expect("bind 0");
|
||||
let ncp = Ncp::new(crdt, window.clone(), listen, send_sock, exit)?;
|
||||
Ok((ncp, window))
|
||||
}
|
||||
/// Test that message sent from leader to target1 and replicated to target2
|
||||
#[test]
|
||||
fn test_replicate() {
|
||||
@@ -227,9 +147,7 @@ pub mod tests {
|
||||
crdt_l.set_leader(leader.data.id);
|
||||
|
||||
let cref_l = Arc::new(RwLock::new(crdt_l));
|
||||
let t_l_gossip = Crdt::gossip(cref_l.clone(), exit.clone());
|
||||
let window1 = streamer::default_window();
|
||||
let t_l_listen = Crdt::listen(cref_l, window1, leader.sockets.gossip, exit.clone());
|
||||
let dr_l = new_ncp(cref_l, leader.sockets.gossip, exit.clone()).unwrap();
|
||||
|
||||
//start crdt2
|
||||
let mut crdt2 = Crdt::new(target2.data.clone());
|
||||
@@ -237,9 +155,7 @@ pub mod tests {
|
||||
crdt2.set_leader(leader.data.id);
|
||||
let leader_id = leader.data.id;
|
||||
let cref2 = Arc::new(RwLock::new(crdt2));
|
||||
let t2_gossip = Crdt::gossip(cref2.clone(), exit.clone());
|
||||
let window2 = streamer::default_window();
|
||||
let t2_listen = Crdt::listen(cref2, window2, target2.sockets.gossip, exit.clone());
|
||||
let dr_2 = new_ncp(cref2, target2.sockets.gossip, exit.clone()).unwrap();
|
||||
|
||||
// setup some blob services to send blobs into the socket
|
||||
// to simulate the source peer and get blobs out of the socket to
|
||||
@@ -267,28 +183,32 @@ pub mod tests {
|
||||
let mint = Mint::new(starting_balance);
|
||||
let replicate_addr = target1.data.replicate_addr;
|
||||
let bank = Arc::new(Bank::new(&mint));
|
||||
|
||||
//start crdt1
|
||||
let mut crdt1 = Crdt::new(target1.data.clone());
|
||||
crdt1.insert(&leader.data);
|
||||
crdt1.set_leader(leader.data.id);
|
||||
let cref1 = Arc::new(RwLock::new(crdt1));
|
||||
let dr_1 = new_ncp(cref1.clone(), target1.sockets.gossip, exit.clone()).unwrap();
|
||||
|
||||
let tvu = Tvu::new(
|
||||
bank.clone(),
|
||||
target1.data,
|
||||
target1.sockets.gossip,
|
||||
cref1,
|
||||
dr_1.1,
|
||||
target1.sockets.replicate,
|
||||
leader.data,
|
||||
target1.sockets.repair,
|
||||
target1.sockets.retransmit,
|
||||
exit.clone(),
|
||||
);
|
||||
|
||||
let mut alice_ref_balance = starting_balance;
|
||||
let mut msgs = VecDeque::new();
|
||||
let mut cur_hash = Hash::default();
|
||||
let num_blobs = 10;
|
||||
let mut blob_id = 0;
|
||||
let num_transfers = 10;
|
||||
let transfer_amount = 501;
|
||||
let bob_keypair = KeyPair::new();
|
||||
for i in 0..num_blobs {
|
||||
let b = resp_recycler.allocate();
|
||||
let b_ = b.clone();
|
||||
let mut w = b.write().unwrap();
|
||||
w.set_index(i).unwrap();
|
||||
w.set_id(leader_id).unwrap();
|
||||
|
||||
for i in 0..num_transfers {
|
||||
let entry0 = Entry::new(&cur_hash, i, vec![]);
|
||||
bank.register_entry_id(&cur_hash);
|
||||
cur_hash = hash(&cur_hash);
|
||||
@@ -301,13 +221,21 @@ pub mod tests {
|
||||
);
|
||||
bank.register_entry_id(&cur_hash);
|
||||
cur_hash = hash(&cur_hash);
|
||||
let entry1 = Entry::new(&cur_hash, i + num_blobs, vec![tx0]);
|
||||
let entry1 = Entry::new(&cur_hash, i + num_transfers, vec![tx0]);
|
||||
bank.register_entry_id(&cur_hash);
|
||||
cur_hash = hash(&cur_hash);
|
||||
|
||||
alice_ref_balance -= transfer_amount;
|
||||
|
||||
let serialized_entry = serialize(&vec![entry0, entry1]).unwrap();
|
||||
for entry in vec![entry0, entry1] {
|
||||
let b = resp_recycler.allocate();
|
||||
let b_ = b.clone();
|
||||
let mut w = b.write().unwrap();
|
||||
w.set_index(blob_id).unwrap();
|
||||
blob_id += 1;
|
||||
w.set_id(leader_id).unwrap();
|
||||
|
||||
let serialized_entry = serialize(&entry).unwrap();
|
||||
|
||||
w.data_mut()[..serialized_entry.len()].copy_from_slice(&serialized_entry);
|
||||
w.set_size(serialized_entry.len());
|
||||
@@ -315,16 +243,15 @@ pub mod tests {
|
||||
drop(w);
|
||||
msgs.push_back(b_);
|
||||
}
|
||||
}
|
||||
|
||||
// send the blobs into the socket
|
||||
s_responder.send(msgs).expect("send");
|
||||
|
||||
// receive retransmitted messages
|
||||
let timer = Duration::new(1, 0);
|
||||
let mut msgs: Vec<_> = Vec::new();
|
||||
while let Ok(msg) = r_reader.recv_timeout(timer) {
|
||||
trace!("msg: {:?}", msg);
|
||||
msgs.push(msg);
|
||||
}
|
||||
|
||||
let alice_balance = bank.get_balance(&mint.keypair().pubkey()).unwrap();
|
||||
@@ -337,11 +264,16 @@ pub mod tests {
|
||||
for t in tvu.thread_hdls {
|
||||
t.join().expect("join");
|
||||
}
|
||||
t2_gossip.join().expect("join");
|
||||
t2_listen.join().expect("join");
|
||||
for t in dr_l.0.thread_hdls {
|
||||
t.join().expect("join");
|
||||
}
|
||||
for t in dr_2.0.thread_hdls {
|
||||
t.join().expect("join");
|
||||
}
|
||||
for t in dr_1.0.thread_hdls {
|
||||
t.join().expect("join");
|
||||
}
|
||||
t_receiver.join().expect("join");
|
||||
t_responder.join().expect("join");
|
||||
t_l_gossip.join().expect("join");
|
||||
t_l_listen.join().expect("join");
|
||||
}
|
||||
}
|
||||
|
52
src/window_stage.rs
Normal file
52
src/window_stage.rs
Normal file
@@ -0,0 +1,52 @@
|
||||
//! The `window_stage` maintains the blob window
|
||||
|
||||
use crdt::Crdt;
|
||||
use packet;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::JoinHandle;
|
||||
use streamer;
|
||||
|
||||
pub struct WindowStage {
|
||||
pub blob_receiver: streamer::BlobReceiver,
|
||||
pub thread_hdls: Vec<JoinHandle<()>>,
|
||||
}
|
||||
|
||||
impl WindowStage {
|
||||
pub fn new(
|
||||
crdt: Arc<RwLock<Crdt>>,
|
||||
window: streamer::Window,
|
||||
retransmit_socket: UdpSocket,
|
||||
exit: Arc<AtomicBool>,
|
||||
blob_recycler: packet::BlobRecycler,
|
||||
fetch_stage_receiver: streamer::BlobReceiver,
|
||||
) -> Self {
|
||||
let (retransmit_sender, retransmit_receiver) = channel();
|
||||
|
||||
let t_retransmit = streamer::retransmitter(
|
||||
retransmit_socket,
|
||||
exit.clone(),
|
||||
crdt.clone(),
|
||||
blob_recycler.clone(),
|
||||
retransmit_receiver,
|
||||
);
|
||||
let (blob_sender, blob_receiver) = channel();
|
||||
let t_window = streamer::window(
|
||||
exit.clone(),
|
||||
crdt.clone(),
|
||||
window,
|
||||
blob_recycler.clone(),
|
||||
fetch_stage_receiver,
|
||||
blob_sender,
|
||||
retransmit_sender,
|
||||
);
|
||||
let thread_hdls = vec![t_retransmit, t_window];
|
||||
|
||||
WindowStage {
|
||||
blob_receiver,
|
||||
thread_hdls,
|
||||
}
|
||||
}
|
||||
}
|
@@ -1,4 +1,6 @@
|
||||
//! The `write_stage` module implements write stage of the RPU.
|
||||
//! The `write_stage` module implements the TPU's write stage. It
|
||||
//! writes entries to the given writer, which is typically a file or
|
||||
//! stdout, and then sends the Entry to its output channel.
|
||||
|
||||
use bank::Bank;
|
||||
use entry::Entry;
|
||||
@@ -8,7 +10,7 @@ use std::io::Write;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::{channel, Receiver};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::thread::{spawn, JoinHandle};
|
||||
use std::thread::{Builder, JoinHandle};
|
||||
use streamer;
|
||||
|
||||
pub struct WriteStage {
|
||||
@@ -26,7 +28,9 @@ impl WriteStage {
|
||||
entry_receiver: Receiver<Entry>,
|
||||
) -> Self {
|
||||
let (blob_sender, blob_receiver) = channel();
|
||||
let thread_hdl = spawn(move || loop {
|
||||
let thread_hdl = Builder::new()
|
||||
.name("solana-writer".to_string())
|
||||
.spawn(move || loop {
|
||||
let entry_writer = EntryWriter::new(&bank);
|
||||
let _ = entry_writer.write_and_send_entries(
|
||||
&blob_sender,
|
||||
@@ -38,7 +42,8 @@ impl WriteStage {
|
||||
info!("broadcat_service exiting");
|
||||
break;
|
||||
}
|
||||
});
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
WriteStage {
|
||||
thread_hdl,
|
||||
@@ -52,7 +57,9 @@ impl WriteStage {
|
||||
entry_receiver: Receiver<Entry>,
|
||||
) -> Self {
|
||||
let (_blob_sender, blob_receiver) = channel();
|
||||
let thread_hdl = spawn(move || {
|
||||
let thread_hdl = Builder::new()
|
||||
.name("solana-drain".to_string())
|
||||
.spawn(move || {
|
||||
let entry_writer = EntryWriter::new(&bank);
|
||||
loop {
|
||||
let _ = entry_writer.drain_entries(&entry_receiver);
|
||||
@@ -61,7 +68,8 @@ impl WriteStage {
|
||||
break;
|
||||
}
|
||||
}
|
||||
});
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
WriteStage {
|
||||
thread_hdl,
|
||||
|
185
tests/data_replicator.rs
Normal file
185
tests/data_replicator.rs
Normal file
@@ -0,0 +1,185 @@
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
extern crate rayon;
|
||||
extern crate solana;
|
||||
|
||||
use rayon::iter::*;
|
||||
use solana::crdt::{Crdt, TestNode};
|
||||
use solana::logger;
|
||||
use solana::ncp::Ncp;
|
||||
use solana::packet::Blob;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
|
||||
fn test_node(exit: Arc<AtomicBool>) -> (Arc<RwLock<Crdt>>, Ncp, UdpSocket) {
|
||||
let tn = TestNode::new();
|
||||
let crdt = Crdt::new(tn.data.clone());
|
||||
let c = Arc::new(RwLock::new(crdt));
|
||||
let w = Arc::new(RwLock::new(vec![]));
|
||||
let d = Ncp::new(
|
||||
c.clone(),
|
||||
w,
|
||||
tn.sockets.gossip,
|
||||
tn.sockets.gossip_send,
|
||||
exit,
|
||||
).unwrap();
|
||||
(c, d, tn.sockets.replicate)
|
||||
}
|
||||
|
||||
/// Test that the network converges.
|
||||
/// Run until every node in the network has a full ReplicatedData set.
|
||||
/// Check that nodes stop sending updates after all the ReplicatedData has been shared.
|
||||
/// tests that actually use this function are below
|
||||
fn run_gossip_topo<F>(topo: F)
|
||||
where
|
||||
F: Fn(&Vec<(Arc<RwLock<Crdt>>, Ncp, UdpSocket)>) -> (),
|
||||
{
|
||||
let num: usize = 5;
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let listen: Vec<_> = (0..num).map(|_| test_node(exit.clone())).collect();
|
||||
topo(&listen);
|
||||
let mut done = true;
|
||||
for i in 0..(num * 32) {
|
||||
done = false;
|
||||
trace!("round {}", i);
|
||||
for (c, _, _) in &listen {
|
||||
if num == c.read().unwrap().convergence() as usize {
|
||||
done = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
//at least 1 node converged
|
||||
if done == true {
|
||||
break;
|
||||
}
|
||||
sleep(Duration::new(1, 0));
|
||||
}
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
for (c, dr, _) in listen.into_iter() {
|
||||
for j in dr.thread_hdls.into_iter() {
|
||||
j.join().unwrap();
|
||||
}
|
||||
// make it clear what failed
|
||||
// protocol is to chatty, updates should stop after everyone receives `num`
|
||||
assert!(c.read().unwrap().update_index <= num as u64);
|
||||
// protocol is not chatty enough, everyone should get `num` entries
|
||||
assert_eq!(c.read().unwrap().table.len(), num);
|
||||
}
|
||||
assert!(done);
|
||||
}
|
||||
/// ring a -> b -> c -> d -> e -> a
|
||||
#[test]
|
||||
fn gossip_ring() {
|
||||
logger::setup();
|
||||
run_gossip_topo(|listen| {
|
||||
let num = listen.len();
|
||||
for n in 0..num {
|
||||
let y = n % listen.len();
|
||||
let x = (n + 1) % listen.len();
|
||||
let mut xv = listen[x].0.write().unwrap();
|
||||
let yv = listen[y].0.read().unwrap();
|
||||
let mut d = yv.table[&yv.me].clone();
|
||||
d.version = 0;
|
||||
xv.insert(&d);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/// star a -> (b,c,d,e)
|
||||
#[test]
|
||||
fn gossip_star() {
|
||||
logger::setup();
|
||||
run_gossip_topo(|listen| {
|
||||
let num = listen.len();
|
||||
for n in 0..(num - 1) {
|
||||
let x = 0;
|
||||
let y = (n + 1) % listen.len();
|
||||
let mut xv = listen[x].0.write().unwrap();
|
||||
let yv = listen[y].0.read().unwrap();
|
||||
let mut yd = yv.table[&yv.me].clone();
|
||||
yd.version = 0;
|
||||
xv.insert(&yd);
|
||||
trace!("star leader {:?}", &xv.me[..4]);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/// rstar a <- (b,c,d,e)
|
||||
#[test]
|
||||
fn gossip_rstar() {
|
||||
logger::setup();
|
||||
run_gossip_topo(|listen| {
|
||||
let num = listen.len();
|
||||
let xd = {
|
||||
let xv = listen[0].0.read().unwrap();
|
||||
xv.table[&xv.me].clone()
|
||||
};
|
||||
trace!("rstar leader {:?}", &xd.id[..4]);
|
||||
for n in 0..(num - 1) {
|
||||
let y = (n + 1) % listen.len();
|
||||
let mut yv = listen[y].0.write().unwrap();
|
||||
yv.insert(&xd);
|
||||
trace!("rstar insert {:?} into {:?}", &xd.id[..4], &yv.me[..4]);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn crdt_retransmit() {
|
||||
logger::setup();
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
trace!("c1:");
|
||||
let (c1, dr1, tn1) = test_node(exit.clone());
|
||||
trace!("c2:");
|
||||
let (c2, dr2, tn2) = test_node(exit.clone());
|
||||
trace!("c3:");
|
||||
let (c3, dr3, tn3) = test_node(exit.clone());
|
||||
let c1_data = c1.read().unwrap().my_data().clone();
|
||||
c1.write().unwrap().set_leader(c1_data.id);
|
||||
|
||||
c2.write().unwrap().insert(&c1_data);
|
||||
c3.write().unwrap().insert(&c1_data);
|
||||
|
||||
c2.write().unwrap().set_leader(c1_data.id);
|
||||
c3.write().unwrap().set_leader(c1_data.id);
|
||||
|
||||
//wait to converge
|
||||
trace!("waiting to converge:");
|
||||
let mut done = false;
|
||||
for _ in 0..30 {
|
||||
done = c1.read().unwrap().table.len() == 3
|
||||
&& c2.read().unwrap().table.len() == 3
|
||||
&& c3.read().unwrap().table.len() == 3;
|
||||
if done {
|
||||
break;
|
||||
}
|
||||
sleep(Duration::new(1, 0));
|
||||
}
|
||||
assert!(done);
|
||||
let mut b = Blob::default();
|
||||
b.meta.size = 10;
|
||||
Crdt::retransmit(&c1, &Arc::new(RwLock::new(b)), &tn1).unwrap();
|
||||
let res: Vec<_> = [tn1, tn2, tn3]
|
||||
.into_par_iter()
|
||||
.map(|s| {
|
||||
let mut b = Blob::default();
|
||||
s.set_read_timeout(Some(Duration::new(1, 0))).unwrap();
|
||||
let res = s.recv_from(&mut b.data);
|
||||
res.is_err() //true if failed to receive the retransmit packet
|
||||
})
|
||||
.collect();
|
||||
//true if failed receive the retransmit packet, r2, and r3 should succeed
|
||||
//r1 was the sender, so it should fail to receive the packet
|
||||
assert_eq!(res, [true, false, false]);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
let mut threads = vec![];
|
||||
threads.extend(dr1.thread_hdls.into_iter());
|
||||
threads.extend(dr2.thread_hdls.into_iter());
|
||||
threads.extend(dr3.thread_hdls.into_iter());
|
||||
for t in threads.into_iter() {
|
||||
t.join().unwrap();
|
||||
}
|
||||
}
|
@@ -1,19 +1,18 @@
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
extern crate bincode;
|
||||
extern crate futures;
|
||||
extern crate solana;
|
||||
|
||||
use futures::Future;
|
||||
use solana::bank::Bank;
|
||||
use solana::crdt::TestNode;
|
||||
use solana::crdt::{Crdt, ReplicatedData};
|
||||
use solana::logger;
|
||||
use solana::mint::Mint;
|
||||
use solana::ncp::Ncp;
|
||||
use solana::server::Server;
|
||||
use solana::signature::{KeyPair, KeyPairUtil, PublicKey};
|
||||
use solana::streamer::default_window;
|
||||
use solana::thin_client::ThinClient;
|
||||
use solana::tvu::TestNode;
|
||||
use std::io;
|
||||
use std::io::sink;
|
||||
use std::net::UdpSocket;
|
||||
@@ -38,6 +37,7 @@ fn validator(
|
||||
validator.sockets.respond,
|
||||
validator.sockets.replicate,
|
||||
validator.sockets.gossip,
|
||||
validator.sockets.repair,
|
||||
leader.clone(),
|
||||
exit.clone(),
|
||||
);
|
||||
@@ -59,16 +59,15 @@ fn converge(
|
||||
let mut spy_crdt = Crdt::new(spy.data);
|
||||
spy_crdt.insert(&leader);
|
||||
spy_crdt.set_leader(leader.id);
|
||||
|
||||
let spy_ref = Arc::new(RwLock::new(spy_crdt));
|
||||
let spy_window = default_window();
|
||||
let t_spy_listen = Crdt::listen(
|
||||
let dr = Ncp::new(
|
||||
spy_ref.clone(),
|
||||
spy_window,
|
||||
spy.sockets.gossip,
|
||||
exit.clone(),
|
||||
);
|
||||
let t_spy_gossip = Crdt::gossip(spy_ref.clone(), exit.clone());
|
||||
spy.sockets.gossip_send,
|
||||
exit,
|
||||
).unwrap();
|
||||
//wait for the network to converge
|
||||
let mut converged = false;
|
||||
for _ in 0..30 {
|
||||
@@ -80,8 +79,7 @@ fn converge(
|
||||
sleep(Duration::new(1, 0));
|
||||
}
|
||||
assert!(converged);
|
||||
threads.push(t_spy_listen);
|
||||
threads.push(t_spy_gossip);
|
||||
threads.extend(dr.thread_hdls.into_iter());
|
||||
let v: Vec<ReplicatedData> = spy_ref
|
||||
.read()
|
||||
.unwrap()
|
||||
@@ -107,7 +105,6 @@ fn test_multi_node() {
|
||||
let leader_bank = Bank::new(&alice);
|
||||
let server = Server::new_leader(
|
||||
leader_bank,
|
||||
alice.last_id(),
|
||||
None,
|
||||
leader.data.clone(),
|
||||
leader.sockets.requests,
|
||||
@@ -169,7 +166,7 @@ fn tx_and_retry_get_balance(
|
||||
) -> io::Result<i64> {
|
||||
let mut client = mk_client(leader);
|
||||
trace!("getting leader last_id");
|
||||
let last_id = client.get_last_id().wait().unwrap();
|
||||
let last_id = client.get_last_id();
|
||||
info!("executing leader transer");
|
||||
let _sig = client
|
||||
.transfer(500, &alice.keypair(), *bob_pubkey, &last_id)
|
||||
|
Reference in New Issue
Block a user