Compare commits

...

351 Commits

Author SHA1 Message Date
Greg Fitzgerald
a6cb2f1bcf Version bump 2018-07-18 09:07:25 -04:00
Greg Fitzgerald
28af9a39b4 Don't clone before borrowing
Clippy told us to change function parameters to references, but
wasn't able to then tell us that the clone() before borrowing
was superfluous. This patch removes those by hand.

No expectation of a performance improvement here, since we were
just cloning reference counts. Just removes a bunch of noise.
2018-07-18 08:04:31 -04:00
anatoly yakovenko
8cf5620b87 crdt_insert_new_entry_counter (#680) 2018-07-17 22:55:53 -07:00
Michael Vines
85d6627ee6 Deploy in one ssh login in a further attempt to avoid hitting GCP login quota 2018-07-17 20:45:52 -07:00
Michael Vines
611a005ec9 Avoid |wait| as it masks failures 2018-07-17 19:52:39 -07:00
Michael Vines
90b3b90391 -p 2018-07-17 19:42:00 -07:00
Michael Vines
fd4f294fd3 Rotate logs at 16MB 2018-07-17 19:42:00 -07:00
Michael Vines
145274c001 Ensure log directories are go+r 2018-07-17 18:16:40 -07:00
Michael Vines
df5d6693f6 Don't cache leader.json to make it easier to switch between nets 2018-07-17 18:16:40 -07:00
Anatoly Yakovenko
05c5603879 error counter 2018-07-17 17:28:23 -07:00
Anatoly Yakovenko
c2c48a5c3c write stage broadcast counters 2018-07-17 17:28:23 -07:00
pgarg66
4af556f70e Added tests for bad gossip address (#672) 2018-07-17 16:27:46 -07:00
anatoly yakovenko
8bad411962 env variable for default metrics rate that gets set for counters (#670)
* env variable for default metrics rate that gets set for counters

* ignore if env rate is set to 0

* use a slow rate by default

* fixed test
2018-07-17 15:26:10 -07:00
Michael Vines
5b0418793e Keep Snap fullnode/drone logs out of syslog, we're too spammy 2018-07-17 15:08:35 -07:00
pgarg66
4423ee6902 Renamed start_nodes.sh to remote_nodes.sh (#669) 2018-07-17 15:01:53 -07:00
pgarg66
f0c39cc84d Remote multinode scripts cleanup (#666)
- Also added support for stop nodes
2018-07-17 13:48:25 -07:00
Anatoly Yakovenko
3d45b04da8 review comments 2018-07-17 15:51:32 -04:00
Anatoly Yakovenko
9e2f26a5d2 review comments 2018-07-17 15:51:32 -04:00
Anatoly Yakovenko
a016f6e82e bulds 2018-07-17 15:51:32 -04:00
Anatoly Yakovenko
eb3e5fd204 server too 2018-07-17 15:51:32 -04:00
Anatoly Yakovenko
72282dc493 fast exit dynamic test 2018-07-17 15:51:32 -04:00
Michael Vines
47a22c66b4 Include program name in panic metric 2018-07-17 12:13:22 -07:00
Michael Vines
fb11d8a909 Install panic hook 2018-07-17 12:13:22 -07:00
Michael Vines
7d872f52f4 Add set_panic_hook 2018-07-17 12:13:22 -07:00
Michael Vines
d882bfe65c Ignore/log RequestWindowIndex from self 2018-07-17 12:12:54 -07:00
pgarg66
103584ef27 Use public IP for client gossip, if UPnP fails (#665) 2018-07-17 11:23:32 -07:00
anatoly yakovenko
1fb537deb9 Do not generate gossip requests to unspecified addresses (#657)
* Do not generate gossip requests to unspecified addresses

* review comments
2018-07-17 09:44:48 -07:00
Michael Vines
2bd48b4207 Display better deploy logs 2018-07-17 09:10:55 -07:00
Michael Vines
f5a6db3dc0 Add daemon plugs 2018-07-17 08:24:37 -07:00
anatoly yakovenko
dd0c1ac5b2 Error counters for streamer (#658)
* error counters for streamer

* more counters
2018-07-17 08:20:35 -07:00
anatoly yakovenko
d8c9655128 Dynamic test assert (#643)
* log responder error to warn

* log responder error to warn

* fixup!

* fixed assert

* fixed bad ports issue

* comments

* test for dummy address in Crdt::new instaad of NodeInfo::new

* return error if ContactInfo supplied to Crdt::new cannot be used to connect to network

* comments
2018-07-16 19:31:52 -07:00
anatoly yakovenko
09f2d273c5 less intrusive counters (#655)
* less intrusive counters

* fixed arg

* tests

* comments
2018-07-16 18:33:50 -07:00
Michael Vines
f6eb85e7a3 Permit Snap RUST_LOG to be overridden 2018-07-16 17:44:54 -07:00
pgarg66
0d85b43901 Fix input parameter processing for client num nodes (#653) 2018-07-16 17:23:35 -07:00
Michael Vines
fdf94a77b4 CUDA is now configurable 2018-07-16 16:23:45 -07:00
pgarg66
af40ab0c04 Split start_nodes script ssh commands to individual scripts (#642) 2018-07-16 16:21:32 -07:00
anatoly yakovenko
015b7a1ddb dash for namespaces (#649) 2018-07-16 15:55:54 -07:00
Anatoly Yakovenko
ab3e460e64 insert votes as they are observed 2018-07-16 13:39:20 -07:00
Michael Vines
194a84c8dd Add testnet-sanity.sh 2018-07-16 12:17:39 -07:00
Michael Vines
51d932dad1 Connect validators to the right leader 2018-07-16 11:05:01 -07:00
Michael Vines
561d31cc13 Add support for master.testnet.s.c 2018-07-16 10:08:58 -07:00
Michael Vines
d6a8e437bb Temporarily disable erasure 2018-07-16 08:15:47 -07:00
Anatoly Yakovenko
4631af5011 counters for vote not found 2018-07-15 20:31:23 -06:00
Michael Vines
5d28729b2a Use ed25519_init() for faster failures 2018-07-15 20:30:32 -06:00
Pankaj Garg
8c08e614b7 Start validator nodes in parallel
- This speeds up overall network startup time
2018-07-15 19:11:52 -06:00
Michael Vines
e76bf1438b A validator and leader running from the same workspace no longer share an identity 2018-07-15 13:34:48 -07:00
Michael Vines
4e177877c9 Add more error checking, better logging, avoid hitting GCP login quota 2018-07-15 09:27:25 -07:00
Michael Vines
60848b9d95 Testnet sanity test failures will now turn the build red 2018-07-14 21:27:27 -07:00
Michael Vines
79b3564a26 Log metrics params to stderr
Keep stdout clean for the actual program.  This is a specific concern for the
wallet command, where there exists tests that capture stdout from the wallet to
confirm transactions.
2018-07-14 21:24:22 -07:00
Michael Vines
1e8c36c555 Be less noisy 2018-07-14 20:42:00 -07:00
Michael Vines
94d015b089 Demote log level 2018-07-14 20:42:00 -07:00
Michael Vines
cfb3736372 Update buildkite-snap.yml 2018-07-14 17:55:03 -07:00
Michael Vines
2b77f62233 Poll longer while waiting for an airdrop 2018-07-14 17:10:44 -07:00
Michael Vines
e8d23c17ca timeout++ 2018-07-14 15:51:32 -07:00
Michael Vines
a7ed2a304a Add CUDA libraries 2018-07-14 15:27:24 -07:00
Michael Vines
0025b42c26 Locate perf libs 2018-07-14 10:24:20 -07:00
Michael Vines
3f7f492cc0 Fix snap client-demo fixes 2018-07-14 00:18:54 -07:00
Michael Vines
490d7875dd Snap client-demo fixes 2018-07-13 23:51:33 -07:00
Michael Vines
4240edf710 solana.client-demo now runs client.sh for the bash extras 2018-07-13 22:57:38 -07:00
Michael Vines
30e50d0f70 Log airdrop amount and client public key 2018-07-13 22:41:52 -07:00
Michael Vines
751c1eba32 Run wallet-sanity against the new testnet 2018-07-13 22:21:41 -07:00
Michael Vines
d349d6aa98 USE_SNAP=1 is now supported 2018-07-13 22:21:41 -07:00
Michael Vines
1f9152dc72 Detect and report airdrop failures 2018-07-13 18:08:28 -07:00
Michael Vines
1b9d50172b Correct log message 2018-07-13 18:08:28 -07:00
Michael Vines
084dbd7f58 Fail gracefully when leader.json is missing 2018-07-13 17:24:25 -07:00
Rob Walker
58c0508f94 add drone information to multinode demo instructions 2018-07-13 17:16:55 -07:00
Michael Vines
dcf82c024f Surface hidden call to solana-keygen 2018-07-13 16:16:46 -07:00
Greg Fitzgerald
b253ed0c46 Version bump 2018-07-13 15:10:45 -06:00
Greg Fitzgerald
61db53fc19 Version bump 2018-07-13 15:04:10 -06:00
Michael Vines
b0ead086a1 Fix bad copy'n'paste 2018-07-13 13:04:38 -07:00
Greg Fitzgerald
a3b22d0d33 Faster benchmarking for CI
Increase that sample size manually when you're doing performance
work.
2018-07-13 14:03:50 -06:00
Michael Vines
28d24497a3 Wait for the leader to initialize before starting the validators 2018-07-13 12:32:24 -07:00
Michael Vines
05cea4c1da dedup 2018-07-13 11:48:17 -07:00
Michael Vines
260f5edfd6 Use correct leader.json 2018-07-13 11:48:17 -07:00
Michael Vines
7105136595 Enable CUDA for the leader node 2018-07-13 11:36:12 -07:00
Michael Vines
54db379bf2 Refresh in parallel 2018-07-13 11:19:31 -07:00
Michael Vines
effbf0b978 Add script to refresh testnet nodes 2018-07-13 11:19:31 -07:00
Michael Vines
8e7a2a9587 Validators now request an airdrop of 1 token before starting up 2018-07-13 10:02:19 -07:00
Michael Vines
18e6ff4167 Fail gracefully when keypair file is unreadable 2018-07-13 10:00:55 -07:00
Michael Vines
fa1cdaa91a Add home plugs to enable Snap access to ~/.config/solana/id.json 2018-07-13 09:32:37 -07:00
Michael Vines
b538b67524 Bump timeout for stable build
When the CI build machine caches are empty stable occasionally needs more than 20m
2018-07-13 09:17:45 -07:00
Michael Vines
2b0f6355af Tagged snap builds now correctly publish to the beta channel 2018-07-12 23:43:59 -07:00
Rob Walker
11b9a0323d fixups 2018-07-12 22:51:55 -07:00
Rob Walker
710fa822a0 fixups 2018-07-12 22:51:55 -07:00
Rob Walker
aaf6ce5aea fixups 2018-07-12 22:51:55 -07:00
Rob Walker
34ea483736 step two: supply a ledger file argument to fullnode in the demo
(also whack unused "myip.sh", even though it was pretty)
2018-07-12 22:51:55 -07:00
Rob Walker
a3ff40476e Banish stdin/stdout for ledger
step one: accept a "ledger file" argument instead of "outfile"
2018-07-12 22:51:55 -07:00
Greg Fitzgerald
4cca3ff454 Fix keypair option in scripts
Thanks @CriesofCarrots!
2018-07-12 21:50:28 -06:00
Greg Fitzgerald
3d9acdd970 Fix nightly 2018-07-12 21:50:28 -06:00
Greg Fitzgerald
428f220b88 Battle shellcheck 2018-07-12 21:50:28 -06:00
Greg Fitzgerald
10add6a8ac Cleanup setup.sh 2018-07-12 21:50:28 -06:00
Greg Fitzgerald
f06a8dceda Fix keygen docs
Thanks @rob-solana
2018-07-12 21:50:28 -06:00
Greg Fitzgerald
545f4f1c87 Pass the owner's keypair to fullnode-config 2018-07-12 21:50:28 -06:00
Greg Fitzgerald
77543d83ff Fix default keypair paths 2018-07-12 21:50:28 -06:00
Greg Fitzgerald
eb6a30cb7c In Wallet, make --tokens required and --to optional 2018-07-12 21:50:28 -06:00
Greg Fitzgerald
97372b8e63 Add --outfile option to solana-keygen 2018-07-12 21:50:28 -06:00
Greg Fitzgerald
cea29ed772 More keygen 2018-07-12 21:50:28 -06:00
Greg Fitzgerald
b5006b8f2b Migrate to solana-keygen
Most of #593
2018-07-12 21:50:28 -06:00
Greg Fitzgerald
81c44c605b Add solana-keygen
Same as solana-mint, but without a tokens field.
2018-07-12 14:06:43 -06:00
Greg Fitzgerald
0b66a6626a Use docker image's clippy 2018-07-12 09:40:40 -06:00
Greg Fitzgerald
e8be4d7eae Add clippy to CI 2018-07-12 09:40:40 -06:00
Greg Fitzgerald
30f0c25b65 Fix all remaining clippy warnings
Fixes #586
2018-07-12 09:40:40 -06:00
Greg Fitzgerald
73ae3c3301 Apply most of clippy's feedback 2018-07-12 09:40:40 -06:00
Greg Fitzgerald
f98e9aba48 Apply clippy feedback to CLI apps 2018-07-12 09:40:40 -06:00
Michael Vines
84c28a077a Use custom rust nightly image with cargo-cov and clippy pre-installed 2018-07-12 07:25:56 -06:00
Tyera Eulberg
350cf62b90 Sequence client outgoing and incoming txs 2018-07-12 07:24:15 -06:00
Tyera Eulberg
aa4f30c491 Repay transactions from test accounts to client 2018-07-12 07:24:15 -06:00
Tyera Eulberg
3de979aa7c Check client balance and only airdrop if less than TPS quota 2018-07-12 07:24:15 -06:00
Pankaj Garg
5bc133985b Start drone on remote leader node
- Also, enables CUDA for leader node
2018-07-11 20:08:18 -06:00
Greg Fitzgerald
87156e1364 Fix flaky test
The test would fail any time the original value was coincidently
the same as the new bogus value.
2018-07-11 14:16:21 -07:00
Pankaj Garg
45ff142871 Optimized start_nodes script to speed up GCE nodes start time
- Reduced dependency on local network
- Validators get binaries (solana bins and scripts) from leader node
2018-07-11 11:42:25 -06:00
Greg Fitzgerald
2710ff271e cargo fmt 2018-07-11 11:38:41 -06:00
OEM Configuration (temporary user)
468ac9facd Refactor the "ReplicatedData" struct
Rename the "ReplicatedData" struct to the "NodeInfo" struct.
Also refactors and renames the members in this struct.
2018-07-11 11:38:41 -06:00
Rob Walker
705720f086 fixups 2018-07-11 10:37:47 -07:00
Rob Walker
a219e78f00 fixups 2018-07-11 10:37:47 -07:00
Rob Walker
7a41868173 fixups 2018-07-11 10:37:47 -07:00
Rob Walker
e16acec901 fixups 2018-07-11 10:37:47 -07:00
Rob Walker
de44d7475e fixups 2018-07-11 10:37:47 -07:00
Rob Walker
c2dd009e0b fixups 2018-07-11 10:37:47 -07:00
Rob Walker
5a8da75d06 optimize process_ledger() 2018-07-11 10:37:47 -07:00
Greg Fitzgerald
848c6e2371 Reduce sample size to restore number of transactions 2018-07-11 11:18:18 -06:00
Greg Fitzgerald
e3882950cf Run benchmarks from Rust stable CI 2018-07-11 11:18:18 -06:00
Greg Fitzgerald
28f6fbee23 Port all benchmarks to Criterion 2018-07-11 11:18:18 -06:00
Greg Fitzgerald
3144a70b18 Move all benchmarks to benches/ 2018-07-11 11:18:18 -06:00
Stephen Akridge
bed5438831 Improved streamer debug messages
distinguish between threads
2018-07-11 18:26:16 +02:00
Anatoly Yakovenko
6f991b3c11 Send keypair args for validators instead of leaders 2018-07-11 07:54:38 -06:00
Anatoly Yakovenko
03a8a5ed55 only submit to influx when we log
test accumilated value logging

lots of counters

higher influx rate

fix counter name

replicate-transactions
2018-07-11 07:53:39 -06:00
Greg Fitzgerald
0c6d2ef1f4 Fix typo 2018-07-10 19:38:29 -06:00
Greg Fitzgerald
d2be79f38c Use iter_with_setup() to improve precision 2018-07-10 19:38:29 -06:00
Greg Fitzgerald
cc89801b12 Port bank benchmark to Criterion 2018-07-10 19:38:29 -06:00
Greg Fitzgerald
dfa05a8742 Move bank benchmark outside src
This will make it available to third party benchmarking tools.
2018-07-10 19:38:29 -06:00
Pankaj Garg
d7d985365b Add script to create/delete multiple GCE instances
- This script outputs the IP address array that can be used
  with start_nodes script to launch multinode demo
- Changes to start_nodes to compress files for rsync
2018-07-10 18:16:05 -06:00
anatoly yakovenko
0d4e4b18c2 Quiet counter (#574)
* only submit to influx when we log

* test accumulated value logging
2018-07-10 15:14:59 -07:00
Anatoly Yakovenko
7687436bef some cleanup on messages 2018-07-10 13:32:31 -06:00
Anatoly Yakovenko
d531b9645d review comments 2018-07-10 13:32:31 -06:00
Anatoly Yakovenko
6a1b5a222a rebase builds 2018-07-10 13:32:31 -06:00
Anatoly Yakovenko
be2bf69c93 initial vote stage
wip

voting

wip

move voting into the replicate stage

update

fixup!

fixup!

fixup!

fixup!

fixup!

fixup!

fixup!

fixup!

fixup!

fixup!

update

fixup!

fixup!

fixup!

tpu processing votes in entries before write stage

fixup!

fixup!

txs

make sure validators have an account

fixup!

fixup!

fixup!

exit fullnode correctly

exit on exit not err

try 50

add delay for voting

300

300

startup logs

par start

100

no rayon

retry longer

log leader drop

fix distance

50 nodes

100

handle deserialize error

update

fix broadcast

new table every time

tweaks

table

update

try shuffle table

skip kill

skip add

purge test

fixed tests

rebase 2

fixed tests

fixed rebase

cleanup

ok for blobs to be longer then window

fix init window

60 nodes
2018-07-10 13:32:31 -06:00
Greg Fitzgerald
0672794692 Cleanup leader restarts
Try to avoid adding a runtime codepath when all paths are
statically known.
2018-07-10 11:11:36 -06:00
Greg Fitzgerald
c65c0d9b23 Expose fewer exit variables 2018-07-10 11:11:36 -06:00
Michael Vines
0ee86ff313 Map counters to metrics 2018-07-10 11:11:21 -06:00
Pankaj Garg
3b1aa846b5 Fixed issues with configuring new GCE instances
- New nodes cloned from a working node can be used with the script
- Script takes care of installing SSH keys, and package dependencies correctly
2018-07-10 10:31:03 -06:00
Michael Vines
0a34cb8023 Include hh:mm in image name 2018-07-09 23:07:07 -06:00
Michael Vines
227aa38c8a Add image --family arg 2018-07-09 23:02:46 -06:00
Rob Walker
1dd467ed7d fix issue #568 2018-07-09 22:27:11 -06:00
Anatoly Yakovenko
922dffb122 fix erasure 2018-07-09 20:40:14 -06:00
Anatoly Yakovenko
63985d4595 renamed to contact_info 2018-07-09 20:40:14 -06:00
Anatoly Yakovenko
97dd1834d7 fix tests, more logs 2018-07-09 20:40:14 -06:00
Anatoly Yakovenko
2ea030be48 stick all the addrs into one struct 2018-07-09 20:40:14 -06:00
Tyera Eulberg
606cfbfe1e Migrate fullnode and fullnode-config to clap for CLI arguments 2018-07-09 20:38:32 -06:00
Rob Walker
90a4ab7e57 fixes issue #299 2018-07-09 14:50:14 -07:00
Rob Walker
412e15fbdc add test for populated window 2018-07-09 14:50:14 -07:00
Rob Walker
ed0a590549 support an initial window filled with last up-to-WINDOW_SIZE blobs 2018-07-09 14:50:14 -07:00
Greg Fitzgerald
71f05cb23e Vet timestamp source from contract, not leader
Per @aeyakovenko, contracts shouldn't trust the network for
timestamps. Instead, pass the verified public key to the
contract and let it decide if that's a public key it wants
to trust the timestamp from.

Fixes #405
2018-07-09 08:40:07 -06:00
Greg Fitzgerald
5f99657523 Remove last_time from bank
We had a test for this, but without `Bank::time_sources` (removed in the last
commit), there's no last_time that can be trusted.
2018-07-09 08:40:07 -06:00
Greg Fitzgerald
587ae1bf3c Remove time_sources from bank
I wrote this, but per
https://github.com/solana-labs/solana#code-coverage, if it doesn't
break a test, it's fair game to delete.
2018-07-09 08:40:07 -06:00
Michael Vines
461dea69d9 Add SOLANA_METRICS_CONFIG environment variable 2018-07-07 19:40:09 -07:00
Michael Vines
22c0e3cd54 Metrics v0.1 2018-07-07 19:40:09 -07:00
Greg Fitzgerald
3ed9567f96 Remove exit variable from RequestStage 2018-07-05 17:32:41 -06:00
Greg Fitzgerald
c4fa841aa9 Remove exit variable from respond [stage]
And drop the sender that feeds input to the responder.
2018-07-05 17:32:41 -06:00
Greg Fitzgerald
f284af1c3d Remove exit variable from WindowStage and retransmit [stage] 2018-07-05 17:32:41 -06:00
Greg Fitzgerald
46602ba9c3 Remove exit variable from ReplicateStage 2018-07-05 17:32:41 -06:00
Greg Fitzgerald
81477246be Remove exit variable from VerifyStage 2018-07-05 17:32:41 -06:00
Greg Fitzgerald
9bd63867aa No longer need to ignore downstream send errors
By removing the exit variables, the downstream stages wait for
upstream stages to drop their senders before exiting.
2018-07-05 17:32:41 -06:00
Greg Fitzgerald
d1c317fd5f Remove exit variable from broadcast [stage] 2018-07-05 17:32:41 -06:00
Greg Fitzgerald
cbd664ba4b Remove exit variable from BankingStage 2018-07-05 17:32:41 -06:00
Greg Fitzgerald
4bb7cefa15 Remove exit variable from WriteStage 2018-07-05 17:32:41 -06:00
Greg Fitzgerald
82c86daa78 Exit write_stage on channel errors 2018-07-05 17:32:41 -06:00
Greg Fitzgerald
b95db62be3 Handle errors consistently
Error handling is still clumsy. We should switch to something like
`error-chain` or `Result<T, Box<Error>>`, but until then, we can
at least be consistent across modules.
2018-07-05 17:32:41 -06:00
Greg Fitzgerald
0f7fdd71cc Remove executable bit from nat.rs 2018-07-05 17:32:41 -06:00
Greg Fitzgerald
af1a7da0d5 Fix code comments 2018-07-05 17:32:41 -06:00
Tyera Eulberg
d698b3da3a Revert tps_demo marker 2018-07-05 15:15:23 -06:00
Tyera Eulberg
6d275d571c Clean up commented code 2018-07-05 15:15:23 -06:00
Tyera Eulberg
63acb82c87 Update drone airdrop test for tps_demo functionality 2018-07-05 15:15:23 -06:00
Tyera Eulberg
4d05b74314 Port solana-client-demo to clap crate for CLI arguments 2018-07-05 15:15:23 -06:00
Tyera Eulberg
37dd511356 Pass client.json location as argument 2018-07-05 15:15:23 -06:00
Tyera Eulberg
96c321da76 Update drone to allow TPS-sized airdrops 2018-07-05 15:15:23 -06:00
Tyera Eulberg
4701540cc9 Migrate solana-client-demo to use drone 2018-07-05 15:15:23 -06:00
Pankaj Garg
f54615b4e3 UDP port for client demo in range
* This change will allow clients to run behind a firewall
  with only certain port range opened for access
2018-07-05 10:17:35 -06:00
Greg Fitzgerald
9c456b2fb0 Fixup the integration tests 2018-07-04 16:40:34 -06:00
Greg Fitzgerald
77bf17064a Add Service trait
Added a consistent interface to all the microservices.
2018-07-04 16:40:34 -06:00
Tyera Eulberg
44150b2e85 Remove unused crate from wallet CLI 2018-07-04 16:39:26 -06:00
Tyera Eulberg
8ec2fe15f3 Port solana-drone to clap crate for CLI arguments 2018-07-04 16:39:26 -06:00
Michael Vines
687af3e3a4 Document source of magic net.core.rmem_max value 2018-07-04 14:24:01 -07:00
Michael Vines
72ab83cd45 Collect timing metrics for CI jobs 2018-07-04 11:00:56 -07:00
Greg Fitzgerald
4b07772e22 Add helper functions for reading entries
```rust
let entries = entry_writer::read_entries_from_str(entries_str).unwrap();
let entries_len = entries.len();
assert_eq!(entries_len, 7);
let bank = Bank::default();
bank.process_ledger(entries).unwrap();
assert_eq!(bank.transaction_count(), entries_len - 2);
```
2018-07-03 19:32:01 -06:00
Anatoly Yakovenko
22d2c962b2 ignore 2018-07-03 18:10:16 -06:00
Anatoly Yakovenko
e771d36278 Better logs 2018-07-03 18:10:16 -06:00
Rob Walker
800c2dd370 make the leader append to the ledger file 2018-07-03 17:17:52 -06:00
Greg Fitzgerald
f38842822f Cleanup code duplication 2018-07-03 16:33:36 -06:00
Greg Fitzgerald
88a6fb86bf Clean up read_entries() and its usage 2018-07-03 16:33:36 -06:00
Greg Fitzgerald
f6fe998ed4 Revert 1dd8c5ed36
Per @sakridge, this might cause a performance degradation. Need
to benchmark it.
2018-07-03 14:00:53 -06:00
Anatoly Yakovenko
16337d7c1e unstable test 2018-07-03 14:00:39 -06:00
Anatoly Yakovenko
ae309f80f7 boot from file test 2018-07-03 14:00:39 -06:00
Anatoly Yakovenko
fa70b3bf70 split out files, fixed a bug @garious! 2018-07-03 14:00:39 -06:00
Anatoly Yakovenko
3a90f138b2 dynamit network test
* cleaned up fullnode api
* added debug_id to ReplicatedData and crdt for debugging
2018-07-03 14:00:39 -06:00
Michael Vines
033f6dcbcb Demote 'sorted leader' log 2018-07-03 08:24:28 -07:00
Greg Fitzgerald
5d8b2f899a Fix wallet doc 2018-07-02 19:21:03 -07:00
Greg Fitzgerald
490205ab84 Fix sanity check
...that my last PR broke
2018-07-02 19:21:03 -07:00
Michael Vines
2c0e704c82 Confirm the payment 2018-07-02 17:59:50 -07:00
Michael Vines
253048f72d Only tune networking for leader/validator 2018-07-02 17:59:50 -07:00
Michael Vines
e09b8430ce Add |wallet reset| command 2018-07-02 17:59:50 -07:00
Michael Vines
9ae283dc3a Expose wallet.sh as a Snap program temporarily 2018-07-02 17:59:50 -07:00
Michael Vines
f95a79d145 Default to using testnet.s.c when running as a Snap 2018-07-02 17:59:50 -07:00
Greg Fitzgerald
0dabdfd48e Use zero to represent a nonexistent account
This also fixes a bug in the thin client where a nonexistent account
would have triggered a panic because we were using `balances[k]` instead
of `balances.get(key)`.

Fixes #534
2018-07-02 18:48:40 -06:00
Greg Fitzgerald
d2bb4dc14a Purge empty accounts 2018-07-02 18:48:40 -06:00
Michael Vines
b4dc180592 More quotes to pacify shellcheck 2018-07-02 16:41:22 -07:00
Michael Vines
263577773f Set client config directory correctly in a Snap 2018-07-02 16:41:22 -07:00
Michael Vines
7d708be121 Drone now grabs mint.json locally 2018-07-02 16:41:22 -07:00
Michael Vines
feb1669d39 Correct locate rsync when running as a Snap 2018-07-02 15:57:30 -07:00
Michael Vines
2cbfe41422 Abort nicer on drone connection failure 2018-07-02 15:57:30 -07:00
Michael Vines
b7653865b1 Support testnet.solana.com as first argument 2018-07-02 15:57:30 -07:00
Michael Vines
c72dced8fa Report error when an invalid confirmation signature or public key is provided 2018-07-02 15:57:30 -07:00
Anatoly Yakovenko
6feed5fd56 rebased 2018-07-02 16:34:49 -06:00
Anatoly Yakovenko
b8fe5ae076 rename server to fullnode 2018-07-02 16:34:49 -06:00
Anatoly Yakovenko
7e657d65f3 merged f2ab08c65e 2018-07-02 16:34:49 -06:00
Anatoly Yakovenko
a166bb816e wtfr 2018-07-02 16:34:49 -06:00
Anatoly Yakovenko
2952027d04 wtfr 2018-07-02 16:34:49 -06:00
Anatoly Yakovenko
430d9d9314 fixup! 2018-07-02 16:34:49 -06:00
Anatoly Yakovenko
fa247196c0 fullnode lib 2018-07-02 16:34:49 -06:00
Greg Fitzgerald
5d17c2b58f Return output receivers from each stage
Reaching into the stages' structs for their receivers is, in hindsight,
more awkward than returning multiple values from constructors. By
returning the receiver, the caller can name the receiver whatever it
wants (as you would with any return value), and doesn't need to
reach into the struct for the field (which is super awkward in
combination with move semantics).
2018-07-02 16:18:32 -06:00
Rob Walker
6ee45d282e some auto-detect of wallet commands 2018-07-02 15:51:12 -06:00
Michael Vines
cfc3bd0696 Add manual wallet sanity test 2018-07-02 14:38:01 -07:00
Michael Vines
3e0e09555a Undo UPnP UDP port binding 2018-07-02 14:38:01 -07:00
Michael Vines
1d8bb5144e Drop -demo suffix 2018-07-02 14:38:01 -07:00
Michael Vines
67e0100866 Bind to 0.0.0.0 2018-07-02 14:38:01 -07:00
Michael Vines
f2ab08c65e Reuse request UDP port for responses 2018-07-02 14:38:01 -07:00
Greg Fitzgerald
04a93050e7 No need to share a write lock across single-threaded methods 2018-07-02 15:25:16 -06:00
Michael Vines
03401041db Correct signature argument name 2018-07-02 11:24:13 -07:00
Michael Vines
6eac744a05 Only rsync leader.json once 2018-07-02 10:59:09 -07:00
Michael Vines
ae29e2085f Init env_logger 2018-07-02 10:59:09 -07:00
Michael Vines
7ce0b58af8 Document pkg-config dependency 2018-07-02 10:42:56 -07:00
Michael Vines
ea5663c0da Demote log 2018-07-02 10:28:43 -07:00
Michael Vines
a61bfae8a4 Document libssl-dev dependency 2018-07-02 10:28:43 -07:00
Michael Vines
5716898216 setup.sh can now be more picky about the kind of config it creates 2018-07-02 09:22:26 -07:00
Michael Vines
c0f9e452f2 mint.json is now private 2018-07-02 09:22:26 -07:00
Greg Fitzgerald
4e3526394e Use IntoInterator to simplify write_entries() usage 2018-07-02 09:51:39 -06:00
Greg Fitzgerald
6806a14a3f Use Cursor instead of tempfile.
Faster and one less dependency.
2018-07-02 09:51:39 -06:00
Greg Fitzgerald
ec7e50b37d Consolidate ledger serialization code
The new read_entries() works, but is overly-contrained. Not
using that function yet, but adding it here in the hopes some
Rust guru will tell us how to get that lifetime constraint out
of there.

Fixes #517
2018-07-02 09:51:39 -06:00
Greg Fitzgerald
e7b7dfebf5 Add tests for process_ledger() 2018-07-02 09:51:39 -06:00
Michael Vines
a9e0b27772 Speed up snap build
1. Use pre-installed host rust toolchain
2. Build reference/performance fullnode in same part to avoid rebuilding libraries
3. Merge scripts into same part
2018-07-01 17:47:51 -07:00
Greg Fitzgerald
669164bada Boot EntryWriter's Mutex
Finally!
2018-07-01 17:29:24 -06:00
Greg Fitzgerald
4f3a291391 Move the writer into EntryWriter 2018-07-01 17:29:24 -06:00
Greg Fitzgerald
56e37ad2f4 Limit sticky mutex to WriteStage 2018-07-01 17:29:24 -06:00
Greg Fitzgerald
17de79a83a Remove dead code 2018-07-01 17:29:24 -06:00
Greg Fitzgerald
09e9139855 Move channel code to write stage 2018-07-01 17:29:24 -06:00
Greg Fitzgerald
76fc5822c9 Send Vec<Entry> between stages instead of Entry
Might see a performance boost here.
2018-07-01 17:29:24 -06:00
Greg Fitzgerald
c767a854ed Remove useless Arc 2018-07-01 11:35:32 -07:00
Greg Fitzgerald
b60802ddff Refactor such that genesis can use entry_writer 2018-07-01 11:35:32 -07:00
Greg Fitzgerald
1c35d59f26 Receive entries first, then write 2018-07-01 11:35:32 -07:00
Greg Fitzgerald
adcaf715c6 Cleanup write_entries 2018-07-01 11:35:32 -07:00
Greg Fitzgerald
1f9494221b Make space for a write_entry() that only writes entries 2018-07-01 11:35:32 -07:00
Greg Fitzgerald
466d6f76b9 Don't hide error in write_entry() 2018-07-01 11:35:32 -07:00
Greg Fitzgerald
b05e6ce3db Cleanup solana-genesis 2018-07-01 11:35:32 -07:00
Michael Vines
1d812e78d5 Use hard linking to speed up target cache save/restore 2018-07-01 08:59:42 -07:00
Michael Vines
fba494343f Save/restore target/ directory between builds 2018-06-30 22:30:57 -07:00
Michael Vines
0b878eccf8 Map HOME to grant docker containers access to the ~/.cargo registry cache 2018-06-30 21:50:15 -07:00
Michael Vines
98772b16d6 Generalize solana-snap build trigger 2018-06-30 21:50:15 -07:00
Michael Vines
bb82ff0c80 Don't wanna wait 2018-06-30 20:05:27 -07:00
Michael Vines
71af03dc98 Skip snap build for PRs if nothing under snap/ is modified
Additionally relegate push snap build to a secondary CI pipeline
2018-06-30 20:05:27 -07:00
Michael Vines
5671da4a0a Generate a client-specific mint.json 2018-06-30 15:28:17 -07:00
Michael Vines
d63493a852 Grant the snap build more time 2018-06-30 12:20:22 -07:00
Greg Fitzgerald
c06582ba40 Wallet no longer uses global mint.json 2018-06-29 22:26:42 -07:00
Michael Vines
450f271cf7 Move public IP address detection out of bash 2018-06-29 21:12:05 -07:00
Greg Fitzgerald
a31889f129 Readme version bump 2018-06-29 21:39:41 -06:00
Tyera Eulberg
ba6a6f5227 Use clap crate for wallet CLI subcommands and arguments 2018-06-29 21:30:20 -06:00
Greg Fitzgerald
9a38d61048 Version bump 2018-06-29 21:23:50 -06:00
Michael Vines
903ec27754 Add BROKEN_NAT env variable to select Udp sender port workaround 2018-06-29 20:02:28 -07:00
Michael Vines
0b56d603c2 Client NAT traversal 0.1
UPnP is now used to request a port on the NAT be forwarded to the local machine.
This obviously only works for NATs that support UPnP, and thus is not a panacea
for all NAT-related connectivity issues.

Notable hacks in this patch include a transmit/receive UDP socket pair to work
around current protocol limitations whereby the full node assumes its peer can
receive on the same UDP port it transmitted from.
2018-06-29 17:36:26 -07:00
Michael Vines
4ffb5d157a Disable coverage until issue #433 is resolved 2018-06-29 17:36:26 -07:00
Greg Fitzgerald
816246ebee Add doc 2018-06-29 17:28:12 -06:00
Greg Fitzgerald
a9881aee05 Add base58-encoded addresses 2018-06-29 17:28:12 -06:00
Greg Fitzgerald
7b5b989cfe Print usage is a command is not provided 2018-06-29 17:28:12 -06:00
Greg Fitzgerald
c4b62e19f2 Do Proof of History verification before appending entries to the bank
Note: replicate_stage is still using `process_entries()` because
changing it to `process_blocks()` causes the `test_replicate` test to
fail.
2018-06-29 15:35:39 -06:00
Pankaj Garg
79a97ada04 Fix more shellchecks
Also, stops current nodes before pushing updates
2018-06-29 15:19:28 -06:00
Pankaj Garg
da215d1a21 Fix failed shellchecks 2018-06-29 15:19:28 -06:00
Pankaj Garg
9ffc50bead Address review comments 2018-06-29 15:19:28 -06:00
Pankaj Garg
f8352bac2f Address review comments
* Only public IP address in the list
* formatting and other comments
2018-06-29 15:19:28 -06:00
Pankaj Garg
27c1410fdc Script to deploy multiple nodes (one as leader, others as validators)
* The built code is loaded to the nodes
* ssh_keys can be copied to the nodes for internode comm
* The nodes are started with their respective roles
* The client demo is started on the last node
2018-06-29 15:19:28 -06:00
Greg Fitzgerald
9a4733bde7 Remove interactive behavior from wallet 2018-06-29 13:22:20 -06:00
Stephen Akridge
f3df5df52c add validator catchup to multi-node test 2018-06-29 10:39:41 -07:00
Greg Fitzgerald
517d08c637 Cleanup 2018-06-29 09:51:13 -07:00
Greg Fitzgerald
90dd794ae5 cargo fmt
rustfmt 0.6.1-stable (49279d71 2018-05-08)
2018-06-29 09:51:13 -07:00
Tyera Eulberg
e0dbbba8a3 fmt 2018-06-29 09:51:13 -07:00
Tyera Eulberg
705df55a7f Fix program name 2018-06-29 09:51:13 -07:00
Tyera Eulberg
d354e85a9a Return bool on signature check 2018-06-29 09:51:13 -07:00
Tyera Eulberg
e4e1f8ec1e Missing -m parameter handling 2018-06-29 09:51:13 -07:00
Tyera Eulberg
0112a24179 Add confirm command to wallet, and update RPU to check bank for a signature 2018-06-29 09:51:13 -07:00
Tyera Eulberg
d680f6b3a5 Fix bash scripts:
* Use wallet name everywhere
* Update drone to use mint.json
2018-06-29 09:51:13 -07:00
Anatoly Yakovenko
47e732717f more notes 2018-06-29 10:18:36 -06:00
Michael Vines
ec56abfccb Correct setup.sh args 2018-06-29 07:59:16 -07:00
Anatoly Yakovenko
e7cdb402fb highlight 2018-06-29 07:16:03 -06:00
Anatoly Yakovenko
a3fe1965fb spelling 2018-06-29 07:16:03 -06:00
Anatoly Yakovenko
5256e6833e update 2018-06-29 07:16:03 -06:00
Anatoly Yakovenko
051cd2e1ff more examples 2018-06-29 07:16:03 -06:00
Anatoly Yakovenko
51929e7df8 rfcs 2018-06-29 07:16:03 -06:00
Stephen Akridge
a094507bb8 Lower default benchmarking numbers to make CI timeout 2018-06-29 07:14:47 -06:00
Stephen Akridge
8effa4e3e0 Clear old blobs before putting in the new one
Otherwise we will just warn about overrun and not insert new blob
Also, break if the index we find is less than consumed otherwise
we can infinite loop
2018-06-29 07:14:47 -06:00
Stephen Akridge
1c9e7dbc45 Don't recycle in the replicate stage
Windowing stage owns all the blobs now
2018-06-29 07:14:47 -06:00
Stephen Akridge
799b249f02 Don't null blob window until we have to 2018-06-29 07:14:47 -06:00
Greg Fitzgerald
7b4a378c92 Add public-ip option to snap validator with cuda 2018-06-28 21:14:29 -06:00
Michael Vines
47917d00d1 Always bind to 0.0.0.0 regardless of what's being advertised to other nodes 2018-06-28 19:13:36 -07:00
Michael Vines
a4c49af859 Add public-ip argument to setup.sh 2018-06-28 19:13:36 -07:00
Michael Vines
1c1d7d1e0e Log get_last_id errors 2018-06-28 19:13:36 -07:00
Stephen Akridge
d28536d76e Fix spelling of signature 2018-06-28 16:31:33 -07:00
Greg Fitzgerald
63cfbb9497 Only register last entry after a split 2018-06-28 16:54:06 -06:00
Greg Fitzgerald
231040b93e Add tests 2018-06-28 12:28:43 -07:00
Greg Fitzgerald
7c74afc35a Relax recycler
Instead of asserting ref count is 1 before recycling, allow users
to recycle items early. If it turns out that was too early, and
allocate() wants to return it, then boot it and take a memory
allocation performance hit instead.
2018-06-28 12:28:43 -07:00
Greg Fitzgerald
7878a011eb Use a Mint to configure the wallet
* Send transactions from the mint's private key
* By default, send full balance to oneself
* By default, request the mint's number of tokens for airdrops
2018-06-27 17:35:50 -06:00
Greg Fitzgerald
c05416e27d Turn simple-client-demo into a simpler wallet 2018-06-27 17:35:50 -06:00
Michael Vines
ee200d8fa0 Add DEBUG= flag to select debug binaries 2018-06-27 15:34:31 -07:00
Rob Walker
2f42658cd4 ... 2018-06-27 14:51:18 -07:00
Rob Walker
d95e8030fc ... 2018-06-27 14:51:18 -07:00
Greg Fitzgerald
4aedd3f1b6 Cleanup type aliases and imports 2018-06-27 15:06:18 -06:00
Greg Fitzgerald
bb89d6f54d Get back to 500k transactions 2018-06-27 13:50:27 -07:00
Greg Fitzgerald
ed10841e3d No longer spin up accounts for client-demo
Now that the Bank is single-threaded again, we can spin up new
accounts on the fly without concern of thread contention. Likewise,
we can send all transactions from a single account, which was also
problematic in the multi-threaded bank. Sending from one account will
also make client-demo straightforward to port to solana-drone.
2018-06-27 13:50:27 -07:00
Tyera Eulberg
6dac87f2a7 Add entry to snapscraft yaml; cleanup bash header 2018-06-27 13:01:29 -06:00
Tyera Eulberg
a167d0d331 CI cleanup 2018-06-27 13:01:29 -06:00
Tyera Eulberg
eed37820b5 Comments 2018-06-27 13:01:29 -06:00
Tyera Eulberg
124e1fa350 Bash scripts to go with simple-client-demo 2018-06-27 13:01:29 -06:00
Tyera Eulberg
ac40434cdf Initial simple client demo commit 2018-06-27 13:01:29 -06:00
Rob Walker
39354c06f8 take multiple log files, allow restart of leader, validator 2018-06-27 11:41:25 -07:00
Michael Vines
faedb88de0 s/local/declare/g 2018-06-26 19:11:31 -07:00
Michael Vines
5cd1fb486f Automatically add rsync:// prefix to URLs that need it 2018-06-26 17:45:53 -07:00
Rob Walker
5b5df49e6c make client.sh behave like the others, i.e. no tee to a log 2018-06-26 17:02:24 -07:00
Michael Vines
86f9277e2d Add USE_SNAP flag 2018-06-26 16:32:55 -07:00
Greg Fitzgerald
56b09bf0ac cargo fmt 2018-06-26 16:51:07 -06:00
Stephen Akridge
f4c4b9df9c Only free in replicate if we did not hold the reference in window stage
And then free when we are consuming blobs
2018-06-26 16:51:07 -06:00
Greg Fitzgerald
6e568c69a7 Preemptive strike
Should that blob have been passed to a recycler, it would have
had too high a reference count.
2018-06-26 16:51:07 -06:00
Greg Fitzgerald
14d624ee40 Fix benchmarks too
This change will make these benchmarks way slower, because its now
cloning the transaction vector each iteration instead of the ref
counts. We need to rethink these.
2018-06-26 16:51:07 -06:00
Greg Fitzgerald
d5c0557891 Fix test_replicate too 2018-06-26 16:51:07 -06:00
Greg Fitzgerald
1691060a22 Assert recycler is given last reference to data
This patch likely fixes the sporadic failures in the following tests:

```
test server::tests::validator_exit ... FAILED
test streamer::test::streamer_send_test ... FAILED
test thin_client::tests::test_bad_sig ... FAILED
test drone::tests::test_send_airdrop ... FAILED
test thin_client::tests::test_thin_client ... FAILED
```
2018-06-26 16:51:07 -06:00
Rob Walker
a5ce578c72 ... 2018-06-26 16:23:41 -06:00
Greg Fitzgerald
05edfad13a Fix compiler warnings 2018-06-26 15:03:15 -07:00
Greg Fitzgerald
136b43f461 Fix whitespace
TODO: Why didn't "cargo fmt" fail the build.
2018-06-26 15:03:15 -07:00
Rob Walker
ac40c1818f .. 2018-06-26 13:57:10 -07:00
Rob Walker
eb63dbcd2a an Entry needs to be multiple of 4 bytes long 2018-06-26 13:57:10 -07:00
Rob Walker
4e2f1a519e whack next_entries_batched 2018-06-26 13:57:10 -07:00
Rob Walker
55ec7f9fe9 add entry.has_more
* quick fix for really big genesis
 * longer term fix for possible parallel verification over multiple
      Blobs/Entries
2018-06-26 13:57:10 -07:00
Michael Vines
b7ddefdbf9 Empty plug array is not accepted by the snap store 2018-06-26 12:49:40 -07:00
Michael Vines
ce361c2cdc Add Snap fullnode daemon 2018-06-26 12:32:33 -07:00
Michael Vines
ed6ba55261 Add snap/ README 2018-06-26 12:32:33 -07:00
Michael Vines
ec333d2bd6 Revert "-v was renamed to -t"
This reverts commit 8f4ce1e8d0.
2018-06-26 12:32:33 -07:00
OEM Configuration (temporary user)
551f639259 Some pull request fixes(linting + documentation) 2018-06-26 12:31:04 -06:00
OEM Configuration (temporary user)
da3bb6fb93 ran linter 2018-06-26 12:31:04 -06:00
OEM Configuration (temporary user)
08bcb62016 added remote table to update respones 2018-06-26 12:31:04 -06:00
Michael Vines
8f4ce1e8d0 -v was renamed to -t 2018-06-25 20:48:26 -07:00
Greg Fitzgerald
4a534d6abb Don't clone() Arc before recycling
This might fix an awful bug where the streamer reuses a Blob
before the current user is done with it. Recycler should probably
assert ref count is one?

* Also don't collect() an iterator before iterating over it.
2018-06-25 17:33:07 -06:00
Stephen Akridge
b48a8c0555 Chunk blobs into window size to avoid window overrun
Fixes #447
2018-06-25 17:33:07 -06:00
Rob Walker
1919ec247b add a clock to validator windows (part 3 of #309) (#448)
* count entries processed by Bank
 * initialize windows with initial height of Entries
2018-06-25 15:07:48 -07:00
Rob Walker
3966eb5374 support MacOS bash and ifconfig properly 2018-06-25 13:14:36 -06:00
Stephen Akridge
c22ef50cae Client fixes, poll for unique last id and cache clients
So we don't keep running up the port range
2018-06-25 10:02:29 -06:00
106 changed files with 7809 additions and 2887 deletions

View File

@@ -0,0 +1,2 @@
CI_BUILD_START=$(date +%s)
export CI_BUILD_START

View File

@@ -0,0 +1 @@
post-checkout

View File

@@ -0,0 +1,50 @@
#!/bin/bash -e
#
# Save target/ for the next CI build on this machine
#
if [[ -n $CARGO_TARGET_CACHE_NAME ]]; then
(
d=$HOME/cargo-target-cache/"$CARGO_TARGET_CACHE_NAME"
mkdir -p "$d"
set -x
rsync -a --delete --link-dest="$PWD" target "$d"
du -hs "$d"
)
fi
#
# Add job_stats data point
#
if [[ -z $CI_BUILD_START ]]; then
echo Error: CI_BUILD_START empty
else
CI_BUILD_DURATION=$(( $(date +%s) - CI_BUILD_START + 1 ))
CI_LABEL=${BUILDKITE_LABEL:-build label missing}
PR=false
if [[ $BUILDKITE_BRANCH =~ pull/* ]]; then
PR=true
fi
SUCCESS=true
if [[ $BUILDKITE_COMMAND_EXIT_STATUS != 0 ]]; then
SUCCESS=false
fi
point_tags="pipeline=$BUILDKITE_PIPELINE_SLUG,job=$CI_LABEL,pr=$PR,success=$SUCCESS"
point_tags="${point_tags// /\\ }" # Escape spaces
point_fields="duration=$CI_BUILD_DURATION"
point_fields="${point_fields// /\\ }" # Escape spaces
point="job_stats,$point_tags $point_fields"
echo "Influx data point: $point"
if [[ -n $INFLUX_USERNAME && -n $INFLUX_PASSWORD ]]; then
echo "https://metrics.solana.com:8086/write?db=ci&u=${INFLUX_USERNAME}&p=${INFLUX_PASSWORD}" \
| xargs curl -XPOST --data-binary "$point"
else
echo Influx user credentials not found
fi
fi

View File

@@ -0,0 +1 @@
post-command

View File

@@ -0,0 +1,13 @@
#!/bin/bash -e
[[ -n "$CARGO_TARGET_CACHE_NAME" ]] || exit 0
#
# Restore target/ from the previous CI build on this machine
#
(
d=$HOME/cargo-target-cache/"$CARGO_TARGET_CACHE_NAME"
mkdir -p "$d"/target
set -x
rsync -a --delete --link-dest="$d" "$d"/target .
)

View File

@@ -0,0 +1 @@
pre-command

1
.clippy.toml Normal file
View File

@@ -0,0 +1 @@
too-many-arguments-threshold = 9

7
.gitignore vendored
View File

@@ -2,3 +2,10 @@ Cargo.lock
/target/
**/*.rs.bk
.cargo
# node configuration files
/config/
/config-private/
/config-drone/
/config-validator/
/config-client/

View File

@@ -1,7 +1,7 @@
[package]
name = "solana"
description = "Blockchain, Rebuilt for Scale"
version = "0.7.0-alpha"
version = "0.7.0-rc.5"
documentation = "https://docs.rs/solana"
homepage = "http://solana.com/"
readme = "README.md"
@@ -17,10 +17,18 @@ license = "Apache-2.0"
name = "solana-client-demo"
path = "src/bin/client-demo.rs"
[[bin]]
name = "solana-wallet"
path = "src/bin/wallet.rs"
[[bin]]
name = "solana-fullnode"
path = "src/bin/fullnode.rs"
[[bin]]
name = "solana-keygen"
path = "src/bin/keygen.rs"
[[bin]]
name = "solana-fullnode-config"
path = "src/bin/fullnode-config.rs"
@@ -29,18 +37,6 @@ path = "src/bin/fullnode-config.rs"
name = "solana-genesis"
path = "src/bin/genesis.rs"
[[bin]]
name = "solana-genesis-demo"
path = "src/bin/genesis-demo.rs"
[[bin]]
name = "solana-mint"
path = "src/bin/mint.rs"
[[bin]]
name = "solana-mint-demo"
path = "src/bin/mint-demo.rs"
[[bin]]
name = "solana-drone"
path = "src/bin/drone.rs"
@@ -76,4 +72,36 @@ rand = "0.5.1"
pnet_datalink = "0.21.0"
tokio = "0.1"
tokio-codec = "0.1"
tokio-core = "0.1.17"
tokio-io = "0.1"
itertools = "0.7.8"
bs58 = "0.2.0"
p2p = "0.5.2"
futures = "0.1.21"
clap = "2.31"
reqwest = "0.8.6"
influx_db_client = "0.3.4"
dirs = "1.0.2"
[dev-dependencies]
criterion = "0.2"
[[bench]]
name = "bank"
harness = false
[[bench]]
name = "banking_stage"
harness = false
[[bench]]
name = "ledger"
harness = false
[[bench]]
name = "signature"
harness = false
[[bench]]
name = "streamer"
harness = false

113
README.md
View File

@@ -47,7 +47,7 @@ $ source $HOME/.cargo/env
Now checkout the code from github:
```bash
$ git clone https://github.com/solana-labs/solana.git
$ git clone https://github.com/solana-labs/solana.git
$ cd solana
```
@@ -58,7 +58,7 @@ your odds of success if you check out the
before proceeding:
```bash
$ git checkout v0.6.1
$ git checkout v0.7.0-beta
```
Configuration Setup
@@ -84,35 +84,45 @@ Now start the server:
$ ./multinode-demo/leader.sh
```
To run a performance-enhanced fullnode on Linux, download `libcuda_verify_ed25519.a`. Enable
it by adding `--features=cuda` to the line that runs `solana-fullnode` in
`leader.sh`. [CUDA 9.2](https://developer.nvidia.com/cuda-downloads) must be installed on your system.
```bash
$ ./fetch-perf-libs.sh
$ cargo run --release --features=cuda --bin solana-fullnode -- -l leader.json < genesis.log
```
Wait a few seconds for the server to initialize. It will print "Ready." when it's ready to
receive transactions.
Drone
---
In order for the below test client and validators to work, we'll also need to
spin up a drone to give out some test tokens. The drone delivers Milton
Friedman-style "air drops" (free tokens to requesting clients) to be used in
test transactions.
Start the drone on the leader node with:
```bash
$ ./multinode-demo/drone.sh
```
Multinode Testnet
---
To run a multinode testnet, after starting a leader node, spin up some validator nodes:
```bash
$ ./multinode-demo/validator.sh ubuntu@10.0.1.51:~/solana #The leader machine
$ ./multinode-demo/validator.sh ubuntu@10.0.1.51:~/solana 10.0.1.51
```
As with the leader node, you can run a performance-enhanced validator fullnode by adding
`--features=cuda` to the line that runs `solana-fullnode` in `validator.sh`.
To run a performance-enhanced leader or validator (on Linux),
[CUDA 9.2](https://developer.nvidia.com/cuda-downloads) must be installed on
your system:
```bash
$ cargo run --release --features=cuda --bin solana-fullnode -- -l validator.json -v leader.json < genesis.log
$ ./fetch-perf-libs.sh
$ SOLANA_CUDA=1 ./multinode-demo/leader.sh
$ SOLANA_CUDA=1 ./multinode-demo/validator.sh ubuntu@10.0.1.51:~/solana 10.0.1.51
```
Testnet Client Demo
---
@@ -152,6 +162,72 @@ $ snap info solana
$ sudo snap refresh solana --devmode
```
### Daemon support
The snap supports running a leader, validator or leader+drone node as a system
daemon.
Run `sudo snap get solana` to view the current daemon configuration, and
`sudo snap logs -f solana` to view the daemon logs.
Disable the daemon at any time by running:
```bash
$ sudo snap set solana mode=
```
Runtime configuration files for the daemon can be found in
`/var/snap/solana/current/config`.
#### Leader daemon
```bash
$ sudo snap set solana mode=leader
```
If CUDA is available:
```bash
$ sudo snap set solana mode=leader enable-cuda=1
```
`rsync` must be configured and running on the leader.
1. Ensure rsync is installed with `sudo apt-get -y install rsync`
2. Edit `/etc/rsyncd.conf` to include the following
```
[config]
path = /var/snap/solana/current/config
hosts allow = *
read only = true
```
3. Run `sudo systemctl enable rsync; sudo systemctl start rsync`
4. Test by running `rsync -Pzravv rsync://<ip-address-of-leader>/config
solana-config` from another machine. **If the leader is running on a cloud
provider it may be necessary to configure the Firewall rules to permit ingress
to port tcp:873, tcp:9900 and the port range udp:8000-udp:10000**
To run both the Leader and Drone:
```bash
$ sudo snap set solana mode=leader+drone
```
#### Validator daemon
```bash
$ sudo snap set solana mode=validator
```
If CUDA is available:
```bash
$ sudo snap set solana mode=validator enable-cuda=1
```
By default the validator will connect to **testnet.solana.com**, override
the leader IP address by running:
```bash
$ sudo snap set solana mode=validator leader-address=127.0.0.1 #<-- change IP address
```
It's assumed that the leader will be running `rsync` configured as described in
the previous **Leader daemon** section.
Developing
===
@@ -172,6 +248,11 @@ If your rustc version is lower than 1.26.1, please update it:
$ rustup update
```
On Linux systems you may need to install libssl-dev and pkg-config. On Ubuntu:
```bash
$ sudo apt-get install libssl-dev pkg-config
```
Download the source code:
```bash

66
benches/bank.rs Normal file
View File

@@ -0,0 +1,66 @@
#[macro_use]
extern crate criterion;
extern crate bincode;
extern crate rayon;
extern crate solana;
use bincode::serialize;
use criterion::{Bencher, Criterion};
use rayon::prelude::*;
use solana::bank::*;
use solana::hash::hash;
use solana::mint::Mint;
use solana::signature::{KeyPair, KeyPairUtil};
use solana::transaction::Transaction;
fn bench_process_transaction(bencher: &mut Bencher) {
let mint = Mint::new(100_000_000);
let bank = Bank::new(&mint);
// Create transactions between unrelated parties.
let transactions: Vec<_> = (0..4096)
.into_par_iter()
.map(|i| {
// Seed the 'from' account.
let rando0 = KeyPair::new();
let tx = Transaction::new(&mint.keypair(), rando0.pubkey(), 10_000, mint.last_id());
assert!(bank.process_transaction(&tx).is_ok());
// Seed the 'to' account and a cell for its signature.
let last_id = hash(&serialize(&i).unwrap()); // Unique hash
bank.register_entry_id(&last_id);
let rando1 = KeyPair::new();
let tx = Transaction::new(&rando0, rando1.pubkey(), 1, last_id);
assert!(bank.process_transaction(&tx).is_ok());
// Finally, return the transaction to the benchmark.
tx
})
.collect();
bencher.iter_with_setup(
|| {
// Since benchmarker runs this multiple times, we need to clear the signatures.
bank.clear_signatures();
transactions.clone()
},
|transactions| {
let results = bank.process_transactions(transactions);
assert!(results.iter().all(Result::is_ok));
},
)
}
fn bench(criterion: &mut Criterion) {
criterion.bench_function("bench_process_transaction", |bencher| {
bench_process_transaction(bencher);
});
}
criterion_group!(
name = benches;
config = Criterion::default().sample_size(2);
targets = bench
);
criterion_main!(benches);

226
benches/banking_stage.rs Normal file
View File

@@ -0,0 +1,226 @@
extern crate bincode;
#[macro_use]
extern crate criterion;
extern crate rayon;
extern crate solana;
use criterion::{Bencher, Criterion};
use rayon::prelude::*;
use solana::bank::Bank;
use solana::banking_stage::BankingStage;
use solana::mint::Mint;
use solana::packet::{to_packets_chunked, PacketRecycler};
use solana::record_stage::Signal;
use solana::signature::{KeyPair, KeyPairUtil};
use solana::transaction::Transaction;
use std::iter;
use std::sync::mpsc::{channel, Receiver};
use std::sync::Arc;
// use self::test::Bencher;
// use bank::{Bank, MAX_ENTRY_IDS};
// use bincode::serialize;
// use hash::hash;
// use mint::Mint;
// use rayon::prelude::*;
// use signature::{KeyPair, KeyPairUtil};
// use std::collections::HashSet;
// use std::time::Instant;
// use transaction::Transaction;
//
// fn bench_process_transactions(_bencher: &mut Bencher) {
// let mint = Mint::new(100_000_000);
// let bank = Bank::new(&mint);
// // Create transactions between unrelated parties.
// let txs = 100_000;
// let last_ids: Mutex<HashSet<Hash>> = Mutex::new(HashSet::new());
// let transactions: Vec<_> = (0..txs)
// .into_par_iter()
// .map(|i| {
// // Seed the 'to' account and a cell for its signature.
// let dummy_id = i % (MAX_ENTRY_IDS as i32);
// let last_id = hash(&serialize(&dummy_id).unwrap()); // Semi-unique hash
// {
// let mut last_ids = last_ids.lock().unwrap();
// if !last_ids.contains(&last_id) {
// last_ids.insert(last_id);
// bank.register_entry_id(&last_id);
// }
// }
//
// // Seed the 'from' account.
// let rando0 = KeyPair::new();
// let tx = Transaction::new(&mint.keypair(), rando0.pubkey(), 1_000, last_id);
// bank.process_transaction(&tx).unwrap();
//
// let rando1 = KeyPair::new();
// let tx = Transaction::new(&rando0, rando1.pubkey(), 2, last_id);
// bank.process_transaction(&tx).unwrap();
//
// // Finally, return a transaction that's unique
// Transaction::new(&rando0, rando1.pubkey(), 1, last_id)
// })
// .collect();
//
// let banking_stage = EventProcessor::new(bank, &mint.last_id(), None);
//
// let now = Instant::now();
// assert!(banking_stage.process_transactions(transactions).is_ok());
// let duration = now.elapsed();
// let sec = duration.as_secs() as f64 + duration.subsec_nanos() as f64 / 1_000_000_000.0;
// let tps = txs as f64 / sec;
//
// // Ensure that all transactions were successfully logged.
// drop(banking_stage.historian_input);
// let entries: Vec<Entry> = banking_stage.output.lock().unwrap().iter().collect();
// assert_eq!(entries.len(), 1);
// assert_eq!(entries[0].transactions.len(), txs as usize);
//
// println!("{} tps", tps);
// }
fn check_txs(batches: usize, receiver: &Receiver<Signal>, ref_tx_count: usize) {
let mut total = 0;
for _ in 0..batches {
let signal = receiver.recv().unwrap();
if let Signal::Transactions(transactions) = signal {
total += transactions.len();
} else {
assert!(false);
}
}
assert_eq!(total, ref_tx_count);
}
fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
let tx = 10_000_usize;
let mint_total = 1_000_000_000_000;
let mint = Mint::new(mint_total);
let num_dst_accounts = 8 * 1024;
let num_src_accounts = 8 * 1024;
let srckeys: Vec<_> = (0..num_src_accounts).map(|_| KeyPair::new()).collect();
let dstkeys: Vec<_> = (0..num_dst_accounts)
.map(|_| KeyPair::new().pubkey())
.collect();
let transactions: Vec<_> = (0..tx)
.map(|i| {
Transaction::new(
&srckeys[i % num_src_accounts],
dstkeys[i % num_dst_accounts],
i as i64,
mint.last_id(),
)
})
.collect();
let (verified_sender, verified_receiver) = channel();
let (signal_sender, signal_receiver) = channel();
let packet_recycler = PacketRecycler::default();
let setup_transactions: Vec<_> = (0..num_src_accounts)
.map(|i| {
Transaction::new(
&mint.keypair(),
srckeys[i].pubkey(),
mint_total / num_src_accounts as i64,
mint.last_id(),
)
})
.collect();
bencher.iter(move || {
let bank = Arc::new(Bank::new(&mint));
let verified_setup: Vec<_> =
to_packets_chunked(&packet_recycler, &setup_transactions.clone(), tx)
.into_iter()
.map(|x| {
let len = (*x).read().unwrap().packets.len();
(x, iter::repeat(1).take(len).collect())
})
.collect();
let verified_setup_len = verified_setup.len();
verified_sender.send(verified_setup).unwrap();
BankingStage::process_packets(&bank, &verified_receiver, &signal_sender, &packet_recycler)
.unwrap();
check_txs(verified_setup_len, &signal_receiver, num_src_accounts);
let verified: Vec<_> = to_packets_chunked(&packet_recycler, &transactions.clone(), 192)
.into_iter()
.map(|x| {
let len = (*x).read().unwrap().packets.len();
(x, iter::repeat(1).take(len).collect())
})
.collect();
let verified_len = verified.len();
verified_sender.send(verified).unwrap();
BankingStage::process_packets(&bank, &verified_receiver, &signal_sender, &packet_recycler)
.unwrap();
check_txs(verified_len, &signal_receiver, tx);
});
}
fn bench_banking_stage_single_from(bencher: &mut Bencher) {
let tx = 10_000_usize;
let mint = Mint::new(1_000_000_000_000);
let mut pubkeys = Vec::new();
let num_keys = 8;
for _ in 0..num_keys {
pubkeys.push(KeyPair::new().pubkey());
}
let transactions: Vec<_> = (0..tx)
.into_par_iter()
.map(|i| {
Transaction::new(
&mint.keypair(),
pubkeys[i % num_keys],
i as i64,
mint.last_id(),
)
})
.collect();
let (verified_sender, verified_receiver) = channel();
let (signal_sender, signal_receiver) = channel();
let packet_recycler = PacketRecycler::default();
bencher.iter(move || {
let bank = Arc::new(Bank::new(&mint));
let verified: Vec<_> = to_packets_chunked(&packet_recycler, &transactions.clone(), tx)
.into_iter()
.map(|x| {
let len = (*x).read().unwrap().packets.len();
(x, iter::repeat(1).take(len).collect())
})
.collect();
let verified_len = verified.len();
verified_sender.send(verified).unwrap();
BankingStage::process_packets(&bank, &verified_receiver, &signal_sender, &packet_recycler)
.unwrap();
check_txs(verified_len, &signal_receiver, tx);
});
}
fn bench(criterion: &mut Criterion) {
criterion.bench_function("bench_banking_stage_multi_accounts", |bencher| {
bench_banking_stage_multi_accounts(bencher);
});
criterion.bench_function("bench_process_stage_single_from", |bencher| {
bench_banking_stage_single_from(bencher);
});
}
criterion_group!(
name = benches;
config = Criterion::default().sample_size(2);
targets = bench
);
criterion_main!(benches);

40
benches/ledger.rs Normal file
View File

@@ -0,0 +1,40 @@
#[macro_use]
extern crate criterion;
extern crate solana;
use criterion::{Bencher, Criterion};
use solana::hash::{hash, Hash};
use solana::ledger::{next_entries, reconstruct_entries_from_blobs, Block};
use solana::packet::BlobRecycler;
use solana::signature::{KeyPair, KeyPairUtil};
use solana::transaction::Transaction;
use std::collections::VecDeque;
fn bench_block_to_blobs_to_block(bencher: &mut Bencher) {
let zero = Hash::default();
let one = hash(&zero);
let keypair = KeyPair::new();
let tx0 = Transaction::new(&keypair, keypair.pubkey(), 1, one);
let transactions = vec![tx0; 10];
let entries = next_entries(&zero, 1, transactions);
let blob_recycler = BlobRecycler::default();
bencher.iter(|| {
let mut blob_q = VecDeque::new();
entries.to_blobs(&blob_recycler, &mut blob_q);
assert_eq!(reconstruct_entries_from_blobs(blob_q).unwrap(), entries);
});
}
fn bench(criterion: &mut Criterion) {
criterion.bench_function("bench_block_to_blobs_to_block", |bencher| {
bench_block_to_blobs_to_block(bencher);
});
}
criterion_group!(
name = benches;
config = Criterion::default().sample_size(2);
targets = bench
);
criterion_main!(benches);

24
benches/signature.rs Normal file
View File

@@ -0,0 +1,24 @@
#[macro_use]
extern crate criterion;
extern crate solana;
use criterion::{Bencher, Criterion};
use solana::signature::GenKeys;
fn bench_gen_keys(b: &mut Bencher) {
let rnd = GenKeys::new([0u8; 32]);
b.iter(|| rnd.gen_n_keypairs(1000));
}
fn bench(criterion: &mut Criterion) {
criterion.bench_function("bench_gen_keys", |bencher| {
bench_gen_keys(bencher);
});
}
criterion_group!(
name = benches;
config = Criterion::default().sample_size(2);
targets = bench
);
criterion_main!(benches);

117
benches/streamer.rs Normal file
View File

@@ -0,0 +1,117 @@
#[macro_use]
extern crate log;
extern crate solana;
#[macro_use]
extern crate criterion;
use criterion::{Bencher, Criterion};
use solana::packet::{Packet, PacketRecycler, BLOB_SIZE, PACKET_DATA_SIZE};
use solana::result::Result;
use solana::streamer::{receiver, PacketReceiver};
use std::net::{SocketAddr, UdpSocket};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::channel;
use std::sync::{Arc, Mutex};
use std::thread::sleep;
use std::thread::{spawn, JoinHandle};
use std::time::Duration;
use std::time::SystemTime;
fn producer(addr: &SocketAddr, recycler: PacketRecycler, exit: Arc<AtomicBool>) -> JoinHandle<()> {
let send = UdpSocket::bind("0.0.0.0:0").unwrap();
let msgs = recycler.allocate();
let msgs_ = msgs.clone();
msgs.write().unwrap().packets.resize(10, Packet::default());
for w in msgs.write().unwrap().packets.iter_mut() {
w.meta.size = PACKET_DATA_SIZE;
w.meta.set_addr(&addr);
}
spawn(move || loop {
if exit.load(Ordering::Relaxed) {
return;
}
let mut num = 0;
for p in msgs_.read().unwrap().packets.iter() {
let a = p.meta.addr();
assert!(p.meta.size < BLOB_SIZE);
send.send_to(&p.data[..p.meta.size], &a).unwrap();
num += 1;
}
assert_eq!(num, 10);
})
}
fn sink(
recycler: PacketRecycler,
exit: Arc<AtomicBool>,
rvs: Arc<Mutex<usize>>,
r: PacketReceiver,
) -> JoinHandle<()> {
spawn(move || loop {
if exit.load(Ordering::Relaxed) {
return;
}
let timer = Duration::new(1, 0);
match r.recv_timeout(timer) {
Ok(msgs) => {
*rvs.lock().unwrap() += msgs.read().unwrap().packets.len();
recycler.recycle(msgs);
}
_ => (),
}
})
}
fn bench_streamer_with_result() -> Result<()> {
let read = UdpSocket::bind("127.0.0.1:0")?;
read.set_read_timeout(Some(Duration::new(1, 0)))?;
let addr = read.local_addr()?;
let exit = Arc::new(AtomicBool::new(false));
let pack_recycler = PacketRecycler::default();
let (s_reader, r_reader) = channel();
let t_reader = receiver(read, exit.clone(), pack_recycler.clone(), s_reader);
let t_producer1 = producer(&addr, pack_recycler.clone(), exit.clone());
let t_producer2 = producer(&addr, pack_recycler.clone(), exit.clone());
let t_producer3 = producer(&addr, pack_recycler.clone(), exit.clone());
let rvs = Arc::new(Mutex::new(0));
let t_sink = sink(pack_recycler.clone(), exit.clone(), rvs.clone(), r_reader);
let start = SystemTime::now();
let start_val = *rvs.lock().unwrap();
sleep(Duration::new(5, 0));
let elapsed = start.elapsed().unwrap();
let end_val = *rvs.lock().unwrap();
let time = elapsed.as_secs() * 10000000000 + elapsed.subsec_nanos() as u64;
let ftime = (time as f64) / 10000000000f64;
let fcount = (end_val - start_val) as f64;
trace!("performance: {:?}", fcount / ftime);
exit.store(true, Ordering::Relaxed);
t_reader.join()?;
t_producer1.join()?;
t_producer2.join()?;
t_producer3.join()?;
t_sink.join()?;
Ok(())
}
fn bench_streamer(bencher: &mut Bencher) {
bencher.iter(|| {
bench_streamer_with_result().unwrap();
});
}
fn bench(criterion: &mut Criterion) {
criterion.bench_function("bench_streamer", |bencher| {
bench_streamer(bencher);
});
}
criterion_group!(
name = benches;
config = Criterion::default().sample_size(2);
targets = bench
);
criterion_main!(benches);

View File

@@ -33,11 +33,12 @@ The process to update a disk image is as follows (TODO: make this less manual):
4. From another machine, `gcloud auth login`, then create a new Disk Image based
off the modified VM Instance:
```
$ gcloud compute images create ci-default-v5 --source-disk xxx --source-disk-zone us-east1-b
$ gcloud compute images create ci-default-$(date +%Y%m%d%H%M) --source-disk xxx --source-disk-zone us-east1-b --family ci-default
```
or
```
$ gcloud compute images create ci-cuda-v5 --source-disk xxx --source-disk-zone us-east1-b
$ gcloud compute images create ci-cuda-$(date +%Y%m%d%H%M) --source-disk xxx --source-disk-zone us-east1-b --family ci-cuda
```
5. Delete the new VM instance.
6. Go to the Instance templates tab, find the existing template named

4
ci/buildkite-snap.yml Normal file
View File

@@ -0,0 +1,4 @@
steps:
- command: "ci/snap.sh"
timeout_in_minutes: 40
name: "snap [public]"

View File

@@ -1,16 +1,21 @@
steps:
- command: "ci/docker-run.sh rust ci/test-stable.sh"
name: "stable [public]"
timeout_in_minutes: 20
env:
CARGO_TARGET_CACHE_NAME: "stable"
timeout_in_minutes: 30
- command: "ci/shellcheck.sh"
name: "shellcheck [public]"
timeout_in_minutes: 20
- wait
- command: "ci/docker-run.sh rustlang/rust:nightly ci/test-nightly.sh"
- command: "ci/docker-run.sh solanalabs/rust-nightly ci/test-nightly.sh"
name: "nightly [public]"
timeout_in_minutes: 20
env:
CARGO_TARGET_CACHE_NAME: "nightly"
timeout_in_minutes: 30
- command: "ci/test-stable-perf.sh"
name: "stable-perf [public]"
env:
CARGO_TARGET_CACHE_NAME: "stable-perf"
timeout_in_minutes: 20
retry:
automatic:
@@ -18,14 +23,22 @@ steps:
limit: 2
agents:
- "queue=cuda"
- command: "ci/snap.sh [public]"
- command: "ci/pr-snap.sh"
timeout_in_minutes: 20
name: "snap [public]"
- wait
- command: "ci/publish-crate.sh [public]"
- command: "ci/publish-crate.sh"
timeout_in_minutes: 20
name: "publish crate"
- command: "ci/hoover.sh [public]"
name: "publish crate [public]"
- command: "ci/hoover.sh"
timeout_in_minutes: 20
name: "clean agent"
name: "clean agent [public]"
- trigger: "solana-snap"
branches: "!pull/*"
async: true
build:
message: "${BUILDKITE_MESSAGE}"
commit: "${BUILDKITE_COMMIT}"
branch: "${BUILDKITE_BRANCH}"
env:
TRIGGERED_BUILDKITE_TAG: "${BUILDKITE_TAG}"

View File

@@ -22,12 +22,11 @@ shift
ARGS=(
--workdir /solana
--volume "$PWD:/solana"
--env "HOME=/solana"
--volume "$HOME:/home"
--env "CARGO_HOME=/home/.cargo"
--rm
)
ARGS+=(--env "CARGO_HOME=/solana/.cargo")
# kcov tries to set the personality of the binary which docker
# doesn't allow by default.
ARGS+=(--security-opt "seccomp=unconfined")

View File

@@ -0,0 +1,3 @@
FROM rustlang/rust:nightly
RUN cargo install --force clippy cargo-cov

View File

@@ -0,0 +1,6 @@
Docker image containing rust nightly and some preinstalled crates used in CI.
This image may be manually updated by running `./build.sh` if you are a member
of the [Solana Labs](https://hub.docker.com/u/solanalabs/) Docker Hub
organization, but it is also automatically updated periodically by
[this automation](https://buildkite.com/solana-labs/solana-ci-docker-rust-nightly).

View File

@@ -0,0 +1,6 @@
#!/bin/bash -ex
cd "$(dirname "$0")"
docker build -t solanalabs/rust-nightly .
docker push solanalabs/rust-nightly

View File

@@ -2,6 +2,6 @@ FROM snapcraft/xenial-amd64
# Update snapcraft to latest version
RUN apt-get update -qq \
&& apt-get install -y snapcraft \
&& apt-get install -y snapcraft daemontools \
&& rm -rf /var/lib/apt/lists/* \
&& snapcraft --version

8
ci/is-pr.sh Executable file
View File

@@ -0,0 +1,8 @@
#!/bin/bash -e
#
# The standard BUILDKITE_PULL_REQUEST environment variable is always "false" due
# to how solana-ci-gate is used to trigger PR builds rather than using the
# standard Buildkite PR trigger.
#
[[ $BUILDKITE_BRANCH =~ pull/* ]]

18
ci/pr-snap.sh Executable file
View File

@@ -0,0 +1,18 @@
#!/bin/bash -e
#
# Only run snap.sh for pull requests that modify files under /snap
#
cd "$(dirname "$0")"
if ./is-pr.sh; then
affected_files="$(buildkite-agent meta-data get affected_files)"
echo "Affected files in this PR: $affected_files"
if [[ ! ":$affected_files:" =~ :snap/ ]]; then
echo "Skipping snap build as no files under /snap were modified"
exit 0
fi
exec ./snap.sh
else
echo "Skipping snap build as this is not a pull request"
fi

View File

@@ -3,11 +3,15 @@
cd "$(dirname "$0")/.."
DRYRUN=
if [[ -z $BUILDKITE_BRANCH || $BUILDKITE_BRANCH =~ pull/* ]]; then
if [[ -z $BUILDKITE_BRANCH ]] || ./ci/is-pr.sh; then
DRYRUN="echo"
fi
if [[ -z "$BUILDKITE_TAG" ]]; then
# BUILDKITE_TAG is the normal environment variable set by Buildkite. However
# when this script is run from a triggered pipeline, TRIGGERED_BUILDKITE_TAG is
# used instead of BUILDKITE_TAG (due to Buildkite limitations that prevents
# BUILDKITE_TAG from propagating through to triggered pipelines)
if [[ -z "$BUILDKITE_TAG" && -z "$TRIGGERED_BUILDKITE_TAG" ]]; then
SNAP_CHANNEL=edge
else
SNAP_CHANNEL=beta
@@ -33,8 +37,17 @@ fi
set -x
echo --- checking for multilog
if [[ ! -x /usr/bin/multilog ]]; then
echo "multilog not found, install with: sudo apt-get install -y daemontools"
exit 1
fi
echo --- build
snapcraft
source ci/upload_ci_artifact.sh
upload_ci_artifact solana_*.snap
echo --- publish
$DRYRUN snapcraft push solana_*.snap --release $SNAP_CHANNEL

View File

@@ -13,11 +13,11 @@ _() {
_ cargo build --verbose --features unstable
_ cargo test --verbose --features unstable
_ cargo bench --verbose --features unstable
_ cargo clippy -- --deny=warnings
exit 0
# Coverage ...
_ cargo install --force cargo-cov
# Coverage disabled (see issue #433)
_ cargo cov test
_ cargo cov report

View File

@@ -12,7 +12,7 @@ _() {
}
_ rustup component add rustfmt-preview
_ cargo fmt -- --write-mode=diff
_ cargo fmt -- --write-mode=check
_ cargo build --verbose
_ cargo test --verbose
_ cargo test -- --ignored
_ cargo bench --verbose

135
ci/testnet-deploy.sh Executable file
View File

@@ -0,0 +1,135 @@
#!/bin/bash -e
#
# Deploys the Solana software running on the testnet full nodes
#
# This script must be run by a user/machine that has successfully authenticated
# with GCP and has sufficient permission.
#
cd "$(dirname "$0")/.."
if [[ -z $SOLANA_METRICS_CONFIG ]]; then
echo Error: SOLANA_METRICS_CONFIG environment variable is unset
exit 1
fi
# Default to edge channel. To select the beta channel:
# export SOLANA_SNAP_CHANNEL=beta
if [[ -z $SOLANA_SNAP_CHANNEL ]]; then
SOLANA_SNAP_CHANNEL=edge
fi
case $SOLANA_SNAP_CHANNEL in
edge)
publicUrl=master.testnet.solana.com
publicIp=$(dig +short $publicUrl | head -n1)
;;
beta)
publicUrl=testnet.solana.com
publicIp=# # Use default value
;;
*)
echo Error: Unknown SOLANA_SNAP_CHANNEL=$SOLANA_SNAP_CHANNEL
exit 1
;;
esac
resourcePrefix=${publicUrl//./-}
vmlist=("$resourcePrefix":us-west1-b) # Leader is hard coded as the first entry
validatorNamePrefix=$resourcePrefix-validator-
echo "--- Available validators for $publicUrl"
filter="name~^$validatorNamePrefix"
gcloud compute instances list --filter="$filter"
while read -r vmName vmZone status; do
if [[ $status != RUNNING ]]; then
echo "Warning: $vmName is not RUNNING, ignoring it."
continue
fi
vmlist+=("$vmName:$vmZone")
done < <(gcloud compute instances list --filter="$filter" --format 'value(name,zone,status)')
wait_for_node() {
declare pid=$1
declare ok=true
wait "$pid" || ok=false
cat "log-$pid.txt"
if ! $ok; then
echo ^^^ +++
exit 1
fi
}
echo "--- Refreshing leader for $publicUrl"
leader=true
pids=()
count=1
for info in "${vmlist[@]}"; do
nodePosition="($count/${#vmlist[*]})"
vmName=${info%:*}
vmZone=${info#*:}
echo "Starting refresh for $vmName $nodePosition"
(
SECONDS=0
echo "--- $vmName in zone $vmZone $nodePosition"
if $leader; then
nodeConfig="mode=leader+drone metrics-config=$SOLANA_METRICS_CONFIG"
if [[ -n $SOLANA_CUDA ]]; then
nodeConfig="$nodeConfig enable-cuda=1"
fi
else
nodeConfig="mode=validator metrics-config=$SOLANA_METRICS_CONFIG leader-address=$publicIp"
fi
set -x
gcloud compute ssh "$vmName" --zone "$vmZone" \
--ssh-flag="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -t" \
--command="\
set -ex; \
logmarker='solana deploy $(date)/$RANDOM'; \
sudo snap remove solana; \
logger \$logmarker; \
sudo snap install solana --$SOLANA_SNAP_CHANNEL --devmode; \
sudo snap set solana $nodeConfig; \
snap info solana; \
echo Slight delay to get more syslog output; \
sleep 2; \
sudo grep -Pzo \"\$logmarker(.|\\n)*\" /var/log/syslog \
"
echo "Succeeded in ${SECONDS} seconds"
) > "log-$vmName.txt" 2>&1 &
pid=$!
# Rename log file so it can be discovered later by $pid
mv "log-$vmName.txt" "log-$pid.txt"
if $leader; then
echo Waiting for leader...
# Wait for the leader to initialize before starting the validators
# TODO: Remove this limitation eventually.
wait_for_node "$pid"
echo "--- Refreshing validators"
else
# Slow down deployment to ~20 machines a minute to avoid triggering GCP login
# quota limits (each |ssh| counts as a login)
sleep 3
pids+=("$pid")
fi
leader=false
count=$((count + 1))
done
echo --- Waiting for validators
for pid in "${pids[@]}"; do
wait_for_node "$pid"
done
echo "--- $publicUrl sanity test"
USE_SNAP=1 ci/testnet-sanity.sh $publicUrl
exit 0

17
ci/testnet-sanity.sh Executable file
View File

@@ -0,0 +1,17 @@
#!/bin/bash -e
#
# Perform a quick sanity test on the specific testnet
#
cd "$(dirname "$0")/.."
TESTNET=$1
if [[ -z $TESTNET ]]; then
TESTNET=testnet.solana.com
fi
echo "--- $TESTNET: wallet sanity"
multinode-demo/test/wallet-sanity.sh $TESTNET
echo --- fin
exit 0

18
ci/upload_ci_artifact.sh Normal file
View File

@@ -0,0 +1,18 @@
# |source| me
upload_ci_artifact() {
echo "--- artifact: $1"
if [[ -r "$1" ]]; then
ls -l "$1"
if ${BUILDKITE:-false}; then
(
set -x
buildkite-agent artifact upload "$1"
)
fi
else
echo ^^^ +++
echo "$1 not found"
fi
}

View File

@@ -1,17 +1,33 @@
#!/bin/bash
#
# usage: $0 <rsync network path to solana repo on leader machine> <number of nodes in the network>"
#
if [[ -z $1 ]]; then
echo "usage: $0 [network path to solana repo on leader machine] <number of nodes in the network>"
exit 1
here=$(dirname "$0")
# shellcheck source=multinode-demo/common.sh
source "$here"/common.sh
leader=$1
if [[ -z $leader ]]; then
if [[ -d "$SNAP" ]]; then
leader=testnet.solana.com # Default to testnet when running as a Snap
else
leader=$here/.. # Default to local solana repo
fi
fi
count=${2:-1}
LEADER=$1
COUNT=${2:-1}
rsync_leader_url=$(rsync_url "$leader")
rsync -vz "$LEADER"/{leader.json,mint-demo.json} . || exit $?
set -ex
mkdir -p "$SOLANA_CONFIG_CLIENT_DIR"
$rsync -vPz "$rsync_leader_url"/config/leader.json "$SOLANA_CONFIG_CLIENT_DIR"/
# if RUST_LOG is unset, default to info
export RUST_LOG=${RUST_LOG:-solana=info}
client_json="$SOLANA_CONFIG_CLIENT_DIR"/client.json
[[ -r $client_json ]] || $solana_keygen -o "$client_json"
$solana_client_demo \
-n "$count" \
-l "$SOLANA_CONFIG_CLIENT_DIR"/leader.json \
-k "$SOLANA_CONFIG_CLIENT_DIR"/client.json \
cargo run --release --bin solana-client-demo -- \
-n "$COUNT" -l leader.json -d < mint-demo.json 2>&1 | tee client.log

167
multinode-demo/common.sh Normal file
View File

@@ -0,0 +1,167 @@
# |source| this file
#
# Disable complaints about unused variables in this file:
# shellcheck disable=2034
# shellcheck disable=2154 # 'here' is referenced but not assigned
if [[ -z $here ]]; then
echo "|here| is not defined"
exit 1
fi
rsync=rsync
leader_logger="cat"
validator_logger="cat"
drone_logger="cat"
if [[ -d "$SNAP" ]]; then # Running inside a Linux Snap?
solana_program() {
declare program="$1"
if [[ "$program" = wallet || "$program" = client-demo ]]; then
# TODO: Merge wallet.sh/client.sh functionality into
# solana-wallet/solana-demo-client proper and remove this special case
printf "%s/bin/solana-%s" "$SNAP" "$program"
else
printf "%s/command-%s.wrapper" "$SNAP" "$program"
fi
}
rsync="$SNAP"/bin/rsync
multilog="$SNAP/bin/multilog t s16777215"
leader_logger="$multilog $SNAP_DATA/leader"
validator_logger="$multilog t $SNAP_DATA/validator"
drone_logger="$multilog $SNAP_DATA/drone"
# Create log directories manually to prevent multilog from creating them as
# 0700
mkdir -p "$SNAP_DATA"/{drone,leader,validator}
SOLANA_METRICS_CONFIG="$(snapctl get metrics-config)"
SOLANA_CUDA="$(snapctl get enable-cuda)"
RUST_LOG="$(snapctl get rust-log)"
elif [[ -n "$USE_SNAP" ]]; then # Use the Linux Snap binaries
solana_program() {
declare program="$1"
printf "solana.%s" "$program"
}
elif [[ -n "$USE_INSTALL" ]]; then # Assume |cargo install| was run
solana_program() {
declare program="$1"
printf "solana-%s" "$program"
}
# CUDA was/wasn't selected at build time, can't affect CUDA state here
unset SOLANA_CUDA
else
solana_program() {
declare program="$1"
declare features=""
if [[ "$program" =~ ^(.*)-cuda$ ]]; then
program=${BASH_REMATCH[1]}
features="--features=cuda"
fi
if [[ -z "$DEBUG" ]]; then
maybe_release=--release
fi
printf "cargo run $maybe_release --bin solana-%s %s -- " "$program" "$features"
}
if [[ -n $SOLANA_CUDA ]]; then
# Locate perf libs downloaded by |./fetch-perf-libs.sh|
LD_LIBRARY_PATH=$(cd "$here" && dirname "$PWD"):$LD_LIBRARY_PATH
export LD_LIBRARY_PATH
fi
fi
solana_client_demo=$(solana_program client-demo)
solana_wallet=$(solana_program wallet)
solana_drone=$(solana_program drone)
solana_fullnode=$(solana_program fullnode)
solana_fullnode_config=$(solana_program fullnode-config)
solana_fullnode_cuda=$(solana_program fullnode-cuda)
solana_genesis=$(solana_program genesis)
solana_keygen=$(solana_program keygen)
export RUST_LOG=${RUST_LOG:-solana=info} # if RUST_LOG is unset, default to info
export RUST_BACKTRACE=1
# The SOLANA_METRICS_CONFIG environment variable is formatted as a
# comma-delimited list of parameters. All parameters are optional.
#
# Example:
# export SOLANA_METRICS_CONFIG="host=<metrics host>,db=<database name>,u=<username>,p=<password>"
#
configure_metrics() {
[[ -n $SOLANA_METRICS_CONFIG ]] || return
declare metrics_params
IFS=',' read -r -a metrics_params <<< "$SOLANA_METRICS_CONFIG"
for param in "${metrics_params[@]}"; do
IFS='=' read -r -a pair <<< "$param"
if [[ "${#pair[@]}" != 2 ]]; then
echo Error: invalid metrics parameter: "$param" >&2
else
declare name="${pair[0]}"
declare value="${pair[1]}"
case "$name" in
host)
export INFLUX_HOST="$value"
echo INFLUX_HOST="$INFLUX_HOST" >&2
;;
db)
export INFLUX_DATABASE="$value"
echo INFLUX_DATABASE="$INFLUX_DATABASE" >&2
;;
u)
export INFLUX_USERNAME="$value"
echo INFLUX_USERNAME="$INFLUX_USERNAME" >&2
;;
p)
export INFLUX_PASSWORD="$value"
echo INFLUX_PASSWORD="********" >&2
;;
*)
echo Error: Unknown metrics parameter name: "$name" >&2
;;
esac
fi
done
}
configure_metrics
tune_networking() {
# Reference: https://medium.com/@CameronSparr/increase-os-udp-buffers-to-improve-performance-51d167bb1360
if [[ $(uname) = Linux ]]; then
(
set -x +e
# test the existence of the sysctls before trying to set them
# go ahead and return true and don't exit if these calls fail
sysctl net.core.rmem_max 2>/dev/null 1>/dev/null &&
sudo sysctl -w net.core.rmem_max=26214400 1>/dev/null 2>/dev/null
sysctl net.core.rmem_default 2>/dev/null 1>/dev/null &&
sudo sysctl -w net.core.rmem_default=26214400 1>/dev/null 2>/dev/null
) || true
fi
}
SOLANA_CONFIG_DIR=${SNAP_DATA:-$PWD}/config
SOLANA_CONFIG_PRIVATE_DIR=${SNAP_DATA:-$PWD}/config-private
SOLANA_CONFIG_CLIENT_DIR=${SNAP_USER_DATA:-$PWD}/config-client
rsync_url() { # adds the 'rsync://` prefix to URLs that need it
declare url="$1"
if [[ "$url" =~ ^.*:.*$ ]]; then
# assume remote-shell transport when colon is present, use $url unmodified
echo "$url"
return
fi
if [[ -d "$url" ]]; then
# assume local directory if $url is a valid directory, use $url unmodified
echo "$url"
return
fi
# Default to rsync:// URL
echo "rsync://$url"
}

42
multinode-demo/drone.sh Executable file
View File

@@ -0,0 +1,42 @@
#!/bin/bash
#
# usage: $0 <rsync network path to solana repo on leader machine>
#
here=$(dirname "$0")
# shellcheck source=multinode-demo/common.sh
source "$here"/common.sh
SOLANA_CONFIG_DIR="$SOLANA_CONFIG_DIR"-drone
if [[ -d "$SNAP" ]]; then
# Exit if mode is not yet configured
# (typically the case after the Snap is first installed)
[[ -n "$(snapctl get mode)" ]] || exit 0
# Select leader from the Snap configuration
leader_address="$(snapctl get leader-address)"
if [[ -z "$leader_address" ]]; then
# Assume drone is running on the same node as the leader by default
leader_address="localhost"
fi
leader="$leader_address"
else
leader=${1:-${here}/..} # Default to local solana repo
fi
[[ -f "$SOLANA_CONFIG_PRIVATE_DIR"/mint.json ]] || {
echo "$SOLANA_CONFIG_PRIVATE_DIR/mint.json not found, create it by running:"
echo
echo " ${here}/setup.sh -t leader"
exit 1
}
rsync_leader_url=$(rsync_url "$leader")
set -ex
mkdir -p "$SOLANA_CONFIG_DIR"
$rsync -vPz "$rsync_leader_url"/config/leader.json "$SOLANA_CONFIG_DIR"/
set -o pipefail
$solana_drone \
-l "$SOLANA_CONFIG_DIR"/leader.json -k "$SOLANA_CONFIG_PRIVATE_DIR"/mint.json \
2>&1 | $drone_logger

80
multinode-demo/gce_multinode.sh Executable file
View File

@@ -0,0 +1,80 @@
#!/bin/bash
command=$1
prefix=
num_nodes=
out_file=
image_name="ubuntu-16-04-cuda-9-2-new"
shift
usage() {
exitcode=0
if [[ -n "$1" ]]; then
exitcode=1
echo "Error: $*"
fi
cat <<EOF
usage: $0 <create|delete> <-p prefix> <-n num_nodes> <-o file> [-i image-name]
Manage a GCE multinode network
create|delete - Create or delete the network
-p prefix - A common prefix for node names, to avoid collision
-n num_nodes - Number of nodes
-o out_file - Used for create option. Outputs an array of IP addresses
of new nodes to the file
-i image_name - Existing image on GCE (default $image_name)
EOF
exit $exitcode
}
while getopts "h?p:i:n:o:" opt; do
case $opt in
h | \?)
usage
;;
p)
prefix=$OPTARG
;;
i)
image_name=$OPTARG
;;
o)
out_file=$OPTARG
;;
n)
num_nodes=$OPTARG
;;
*)
usage "Error: unhandled option: $opt"
;;
esac
done
set -e
[[ -n $command ]] || usage "Need a command (create|delete)"
[[ -n $prefix ]] || usage "Need a prefix for GCE instance names"
[[ -n $num_nodes ]] || usage "Need number of nodes"
nodes=()
for i in $(seq 1 "$num_nodes"); do
nodes+=("$prefix$i")
done
if [[ $command == "create" ]]; then
[[ -n $out_file ]] || usage "Need an outfile to store IP Addresses"
ip_addr_list=$(gcloud beta compute instances create "${nodes[@]}" --zone=us-west1-b --tags=testnet \
--image="$image_name" | awk '/RUNNING/ {print $5}')
echo "ip_addr_array=($ip_addr_list)" >"$out_file"
elif [[ $command == "delete" ]]; then
gcloud beta compute instances delete "${nodes[@]}"
else
usage "Unknown command: $command"
fi

View File

@@ -1,28 +1,32 @@
#!/bin/bash
here=$(dirname "$0")
# shellcheck source=multinode-demo/common.sh
source "$here"/common.sh
# shellcheck source=/dev/null
. "${here}"/myip.sh
if [[ -d "$SNAP" ]]; then
# Exit if mode is not yet configured
# (typically the case after the Snap is first installed)
[[ -n "$(snapctl get mode)" ]] || exit 0
fi
myip=$(myip) || exit $?
[[ -f leader-"${myip}".json ]] || {
echo "I can't find a matching leader config file for \"${myip}\"...
Please run ${here}/setup.sh first.
"
[[ -f "$SOLANA_CONFIG_DIR"/leader.json ]] || {
echo "$SOLANA_CONFIG_DIR/leader.json not found, create it by running:"
echo
echo " ${here}/setup.sh -t leader"
exit 1
}
# if RUST_LOG is unset, default to info
export RUST_LOG=${RUST_LOG:-solana=info}
if [[ -n "$SOLANA_CUDA" ]]; then
program="$solana_fullnode_cuda"
else
program="$solana_fullnode"
fi
[[ $(uname) = Linux ]] && sudo sysctl -w net.core.rmem_max=26214400 1>/dev/null 2>/dev/null
tune_networking
# this makes a leader.json file available alongside genesis, etc. for
# validators and clients
cp leader-"${myip}".json leader.json
cargo run --release --bin solana-fullnode -- \
-l leader-"${myip}".json \
< genesis.log tx-*.log \
> tx-"$(date -u +%Y%m%d%H%M%S%N)".log
set -xo pipefail
$program \
--identity "$SOLANA_CONFIG_DIR"/leader.json \
--ledger "$SOLANA_CONFIG_DIR"/ledger.log \
2>&1 | $leader_logger

View File

@@ -1,58 +0,0 @@
#!/bin/bash
function myip()
{
declare ipaddrs=( )
# query interwebs
mapfile -t ipaddrs < <(curl -s ifconfig.co)
# machine's interfaces
mapfile -t -O "${#ipaddrs[*]}" ipaddrs < \
<(ifconfig | awk '/inet(6)? (addr:)?/ {print $2}')
ipaddrs=( "${extips[@]}" "${ipaddrs[@]}" )
if (( ! ${#ipaddrs[*]} ))
then
echo "
myip: error: I'm having trouble determining what our IP address is...
Are we connected to a network?
"
return 1
fi
declare prompt="
Please choose the IP address you want to advertise to the network:
0) ${ipaddrs[0]} <====== this one was returned by the interwebs...
"
for ((i=1; i < ${#ipaddrs[*]}; i++))
do
prompt+=" $i) ${ipaddrs[i]}
"
done
while read -r -p "${prompt}
please enter a number [0 for default]: " which
do
[[ -z ${which} ]] && break;
[[ ${which} =~ [0-9]+ ]] && (( which < ${#ipaddrs[*]} )) && break;
echo "Ug. invalid entry \"${which}\"...
"
sleep 1
done
which=${which:-0}
echo "${ipaddrs[which]}"
}
if [[ ${0} == "${BASH_SOURCE[0]}" ]]
then
myip "$@"
fi

14
multinode-demo/remote_leader.sh Executable file
View File

@@ -0,0 +1,14 @@
#!/bin/bash -e
[[ -n $FORCE ]] || exit
chmod 600 ~/.ssh/authorized_keys ~/.ssh/id_rsa
PATH="$HOME"/.cargo/bin:"$PATH"
./fetch-perf-libs.sh
# Run setup
USE_INSTALL=1 ./multinode-demo/setup.sh -p
USE_INSTALL=1 SOLANA_CUDA=1 ./multinode-demo/leader.sh >leader.log 2>&1 &
USE_INSTALL=1 ./multinode-demo/drone.sh >drone.log 2>&1 &

186
multinode-demo/remote_nodes.sh Executable file
View File

@@ -0,0 +1,186 @@
#!/bin/bash
command=$1
ip_addr_file=
remote_user=
ssh_keys=
shift
usage() {
exitcode=0
if [[ -n "$1" ]]; then
exitcode=1
echo "Error: $*"
fi
cat <<EOF
usage: $0 <start|stop> <-f IP Addr Array file> <-u username> [-k ssh-keys]
Manage a GCE multinode network
start|stop - Create or delete the network
-f file - A bash script that exports an array of IP addresses, ip_addr_array.
Elements of the array are public IP address of remote nodes.
-u username - The username for logging into remote nodes.
-k ssh-keys - Path to public/private key pair that remote nodes can use to perform
rsync and ssh among themselves. Must contain pub, and priv keys.
EOF
exit $exitcode
}
while getopts "h?f:u:k:" opt; do
case $opt in
h | \?)
usage
;;
f)
ip_addr_file=$OPTARG
;;
u)
remote_user=$OPTARG
;;
k)
ssh_keys=$OPTARG
;;
*)
usage "Error: unhandled option: $opt"
;;
esac
done
set -e
# Sample IP Address array file contents
# ip_addr_array=(192.168.1.1 192.168.1.5 192.168.2.2)
[[ -n $command ]] || usage "Need a command (start|stop)"
[[ -n $ip_addr_file ]] || usage "Need a file with IP address array"
[[ -n $remote_user ]] || usage "Need the username for remote nodes"
ip_addr_array=()
# Get IP address array
# shellcheck source=/dev/null
source "$ip_addr_file"
build_project() {
echo "Build started at $(date)"
SECONDS=0
# Build and install locally
PATH="$HOME"/.cargo/bin:"$PATH"
cargo install --force
echo "Build took $SECONDS seconds"
}
common_start_setup() {
ip_addr=$1
# Killing sshguard for now. TODO: Find a better solution
# sshguard is blacklisting IP address after ssh-keyscan and ssh login attempts
ssh -n -f "$remote_user@$ip_addr" " \
set -ex; \
sudo service sshguard stop; \
sudo apt-get --assume-yes install rsync libssl-dev; \
mkdir -p ~/.ssh ~/solana ~/.cargo/bin; \
" >log/"$ip_addr".log
# If provided, deploy SSH keys
if [[ -n $ssh_keys ]]; then
{
rsync -vPrz "$ssh_keys"/id_rsa "$remote_user@$ip_addr":~/.ssh/
rsync -vPrz "$ssh_keys"/id_rsa.pub "$remote_user@$ip_addr":~/.ssh/
rsync -vPrz "$ssh_keys"/id_rsa.pub "$remote_user@$ip_addr":~/.ssh/authorized_keys
} >>log/"$ip_addr".log
fi
}
start_leader() {
common_start_setup "$1"
{
rsync -vPrz ~/.cargo/bin/solana* "$remote_user@$ip_addr":~/.cargo/bin/
rsync -vPrz ./multinode-demo "$remote_user@$ip_addr":~/solana/
rsync -vPrz ./fetch-perf-libs.sh "$remote_user@$ip_addr":~/solana/
ssh -n -f "$remote_user@$ip_addr" 'cd solana; FORCE=1 ./multinode-demo/remote_leader.sh'
} >>log/"$1".log
leader_ip=$1
leader_time=$SECONDS
SECONDS=0
}
start_validator() {
common_start_setup "$1"
ssh "$remote_user@$ip_addr" "rsync -vPrz ""$remote_user@$leader_ip"":~/solana/multinode-demo ~/solana/" >>log/"$1".log
ssh -n -f "$remote_user@$ip_addr" "cd solana; FORCE=1 ./multinode-demo/remote_validator.sh $leader_ip" >>log/"$1".log
}
start_all_nodes() {
echo "Deployment started at $(date)"
SECONDS=0
count=0
leader_ip=
leader_time=
mkdir -p log
for ip_addr in "${ip_addr_array[@]}"; do
ssh-keygen -R "$ip_addr" >log/local.log
ssh-keyscan "$ip_addr" >>~/.ssh/known_hosts 2>/dev/null
if ((!count)); then
# Start the leader on the first node
echo "Leader node $ip_addr, killing previous instance and restarting"
start_leader "$ip_addr"
else
# Start validator on all other nodes
echo "Validator[$count] node $ip_addr, killing previous instance and restarting"
start_validator "$ip_addr" &
# TBD: Remove the sleep or reduce time once GCP login quota is increased
sleep 2
fi
((count = count + 1))
done
wait
((validator_count = count - 1))
echo "Deployment finished at $(date)"
echo "Leader deployment too $leader_time seconds"
echo "$validator_count Validator deployment took $SECONDS seconds"
}
stop_all_nodes() {
SECONDS=0
local count=0
for ip_addr in "${ip_addr_array[@]}"; do
echo "Stopping node[$count] $ip_addr. Remote user $remote_user"
ssh -n -f "$remote_user@$ip_addr" " \
set -ex; \
sudo service sshguard stop; \
pkill -9 solana-; \
pkill -9 validator; \
pkill -9 leader; \
"
sleep 2
((count = count + 1))
echo "Stopped node[$count] $ip_addr"
done
echo "Stopping $count nodes took $SECONDS seconds"
}
if [[ $command == "start" ]]; then
#build_project
stop_all_nodes
start_all_nodes
elif [[ $command == "stop" ]]; then
stop_all_nodes
else
usage "Unknown command: $command"
fi

View File

@@ -0,0 +1,16 @@
#!/bin/bash -e
[[ -n $FORCE ]] || exit
chmod 600 ~/.ssh/authorized_keys ~/.ssh/id_rsa
PATH="$HOME"/.cargo/bin:"$PATH"
ssh-keygen -R "$1"
ssh-keyscan "$1" >>~/.ssh/known_hosts 2>/dev/null
rsync -vPrz "$1":~/.cargo/bin/solana* ~/.cargo/bin/
# Run setup
USE_INSTALL=1 ./multinode-demo/setup.sh -p
USE_INSTALL=1 ./multinode-demo/validator.sh "$1":~/solana "$1" >validator.log 2>&1

View File

@@ -1,15 +1,110 @@
#!/bin/bash
here=$(dirname "$0")
# shellcheck source=multinode-demo/common.sh
source "$here"/common.sh
# shellcheck source=/dev/null
. "${here}"/myip.sh
usage () {
exitcode=0
if [[ -n "$1" ]]; then
exitcode=1
echo "Error: $*"
fi
cat <<EOF
usage: $0 [-n num_tokens] [-l] [-p] [-t node_type]
myip=$(myip) || exit $?
Creates a fullnode configuration
num_tokens=${1:-1000000000}
-n num_tokens - Number of tokens to create
-l - Detect network address from local machine configuration, which
may be a private IP address unaccessible on the Intenet (default)
-p - Detect public address using public Internet servers
-t node_type - Create configuration files only for this kind of node. Valid
options are validator or leader. Creates configuration files
for both by default
cargo run --release --bin solana-mint-demo <<<"${num_tokens}" > mint-demo.json
cargo run --release --bin solana-genesis-demo < mint-demo.json > genesis.log
EOF
exit $exitcode
}
cargo run --release --bin solana-fullnode-config -- -d > leader-"${myip}".json
cargo run --release --bin solana-fullnode-config -- -b 9000 -d > validator-"${myip}".json
ip_address_arg=-l
num_tokens=1000000000
node_type_leader=true
node_type_validator=true
while getopts "h?n:lpt:" opt; do
case $opt in
h|\?)
usage
exit 0
;;
l)
ip_address_arg=-l
;;
p)
ip_address_arg=-p
;;
n)
num_tokens="$OPTARG"
;;
t)
node_type="$OPTARG"
case $OPTARG in
leader)
node_type_leader=true
node_type_validator=false
;;
validator)
node_type_leader=false
node_type_validator=true
;;
*)
usage "Error: unknown node type: $node_type"
;;
esac
;;
*)
usage "Error: unhandled option: $opt"
;;
esac
done
leader_address_args=("$ip_address_arg")
validator_address_args=("$ip_address_arg" -b 9000)
leader_id_path="$SOLANA_CONFIG_PRIVATE_DIR"/leader-id.json
validator_id_path="$SOLANA_CONFIG_PRIVATE_DIR"/validator-id.json
mint_path="$SOLANA_CONFIG_PRIVATE_DIR"/mint.json
set -e
echo "Cleaning $SOLANA_CONFIG_DIR"
rm -rvf "$SOLANA_CONFIG_DIR"
mkdir -p "$SOLANA_CONFIG_DIR"
rm -rvf "$SOLANA_CONFIG_PRIVATE_DIR"
mkdir -p "$SOLANA_CONFIG_PRIVATE_DIR"
$solana_keygen -o "$leader_id_path"
$solana_keygen -o "$validator_id_path"
if $node_type_leader; then
echo "Creating $mint_path with $num_tokens tokens"
$solana_keygen -o "$mint_path"
echo "Creating $SOLANA_CONFIG_DIR/ledger.log"
$solana_genesis --tokens="$num_tokens" < "$mint_path" > "$SOLANA_CONFIG_DIR"/ledger.log
echo "Creating $SOLANA_CONFIG_DIR/leader.json"
$solana_fullnode_config --keypair="$leader_id_path" "${leader_address_args[@]}" > "$SOLANA_CONFIG_DIR"/leader.json
fi
if $node_type_validator; then
echo "Creating $SOLANA_CONFIG_DIR/validator.json"
$solana_fullnode_config --keypair="$validator_id_path" "${validator_address_args[@]}" > "$SOLANA_CONFIG_DIR"/validator.json
fi
ls -lh "$SOLANA_CONFIG_DIR"/
if $node_type_leader; then
ls -lh "$SOLANA_CONFIG_PRIVATE_DIR"
fi

View File

@@ -0,0 +1,47 @@
#!/bin/bash -e
#
# Wallet sanity test
#
here=$(dirname "$0")
cd "$here"
if [[ -n "$USE_SNAP" ]]; then
# TODO: Merge wallet.sh functionality into solana-wallet proper and
# remove this USE_SNAP case
wallet="solana.wallet $1"
else
wallet="../wallet.sh $1"
fi
# Tokens transferred to this address are lost forever...
garbage_address=vS3ngn1TfQmpsW1Z4NkLuqNAQFF3dYQw8UZ6TCx9bmq
check_balance_output() {
declare expected_output="$1"
exec 42>&1
output=$($wallet balance | tee >(cat - >&42))
if [[ ! "$output" =~ $expected_output ]]; then
echo "Balance is incorrect. Expected: $expected_output"
exit 1
fi
}
pay_and_confirm() {
exec 42>&1
signature=$($wallet pay "$@" | tee >(cat - >&42))
$wallet confirm "$signature"
}
$wallet reset
$wallet address
check_balance_output "Your balance is: 0"
$wallet airdrop --tokens 60
check_balance_output "Your balance is: 60"
$wallet airdrop --tokens 40
check_balance_output "Your balance is: 100"
pay_and_confirm --to $garbage_address --tokens 99
check_balance_output "Your balance is: 1"
echo PASS
exit 0

View File

@@ -1,32 +1,96 @@
#!/bin/bash
here=$(dirname "$0")
# shellcheck source=multinode-demo/common.sh
source "$here"/common.sh
# shellcheck source=/dev/null
. "${here}"/myip.sh
leader=$1
[[ -z ${leader} ]] && {
echo "usage: $0 [network path to solana repo on leader machine]"
usage() {
if [[ -n "$1" ]]; then
echo "$*"
echo
fi
echo "usage: $0 [rsync network path to solana repo on leader machine] [network ip address of leader]"
exit 1
}
myip=$(myip) || exit $?
if [[ "$1" = "-h" || -n "$3" ]]; then
usage
fi
[[ -f validator-"$myip".json ]] || {
echo "I can't find a matching validator config file for \"${myip}\"...
Please run ${here}/setup.sh first.
"
if [[ -d "$SNAP" ]]; then
# Exit if mode is not yet configured
# (typically the case after the Snap is first installed)
[[ -n "$(snapctl get mode)" ]] || exit 0
# Select leader from the Snap configuration
leader_address="$(snapctl get leader-address)"
if [[ -z "$leader_address" ]]; then
# Assume public testnet by default
leader_address=35.230.65.68 # testnet.solana.com
fi
leader="$leader_address"
else
if [[ -n "$3" ]]; then
usage
fi
if [[ -z "$1" ]]; then
leader=${1:-${here}/..} # Default to local solana repo
leader_address=${2:-127.0.0.1} # Default to local leader
elif [[ -z "$2" ]]; then
leader="$1"
leader_address=$(dig +short "$1" | head -n1)
if [[ -z "$leader_address" ]]; then
usage "Error: unable to resolve IP address for $leader"
fi
else
leader="$1"
leader_address="$2"
fi
fi
leader_port=8001
if [[ -n "$SOLANA_CUDA" ]]; then
program="$solana_fullnode_cuda"
else
program="$solana_fullnode"
fi
[[ -f "$SOLANA_CONFIG_DIR"/validator.json ]] || {
echo "$SOLANA_CONFIG_DIR/validator.json not found, create it by running:"
echo
echo " ${here}/setup.sh -t validator"
exit 1
}
rsync -vz "${leader}"/{mint-demo.json,leader.json,genesis.log,tx-*.log} . || exit $?
rsync_leader_url=$(rsync_url "$leader")
[[ $(uname) = Linux ]] && sudo sysctl -w net.core.rmem_max=26214400 1>/dev/null 2>/dev/null
tune_networking
# if RUST_LOG is unset, default to info
export RUST_LOG=${RUST_LOG:-solana=info}
SOLANA_LEADER_CONFIG_DIR="$SOLANA_CONFIG_DIR"/leader-config
rm -rf "$SOLANA_LEADER_CONFIG_DIR"
set -ex
$rsync -vPrz "$rsync_leader_url"/config/ "$SOLANA_LEADER_CONFIG_DIR"
cargo run --release --bin solana-fullnode -- \
-l validator-"${myip}".json -v leader.json \
< genesis.log tx-*.log
# migrate from old ledger format? why not...
if [[ ! -f "$SOLANA_LEADER_CONFIG_DIR"/ledger.log &&
-f "$SOLANA_LEADER_CONFIG_DIR"/genesis.log ]]; then
(shopt -s nullglob &&
cat "$SOLANA_LEADER_CONFIG_DIR"/genesis.log \
"$SOLANA_LEADER_CONFIG_DIR"/tx-*.log) > "$SOLANA_LEADER_CONFIG_DIR"/ledger.log
fi
# Ensure the validator has at least 1 token before connecting to the network
# TODO: Remove this workaround
while ! $solana_wallet \
-l "$SOLANA_LEADER_CONFIG_DIR"/leader.json \
-k "$SOLANA_CONFIG_PRIVATE_DIR"/validator-id.json airdrop --tokens 1; do
sleep 1
done
set -o pipefail
$program \
--identity "$SOLANA_CONFIG_DIR"/validator.json \
--testnet "$leader_address:$leader_port" \
--ledger "$SOLANA_LEADER_CONFIG_DIR"/ledger.log \
2>&1 | $validator_logger

45
multinode-demo/wallet.sh Executable file
View File

@@ -0,0 +1,45 @@
#!/bin/bash
#
# usage: $0 <rsync network path to solana repo on leader machine>"
#
here=$(dirname "$0")
# shellcheck source=multinode-demo/common.sh
source "$here"/common.sh
# if $1 isn't host:path, something.com, or a valid local path
if [[ ${1%:} != "$1" || "$1" =~ [^.]\.[^.] || -d $1 ]]; then
leader=$1 # interpret
shift
else
if [[ -d "$SNAP" ]]; then
leader=testnet.solana.com # Default to testnet when running as a Snap
else
leader=$here/.. # Default to local solana repo
fi
fi
if [[ "$1" = "reset" ]]; then
echo Wallet resetting
rm -rf "$SOLANA_CONFIG_CLIENT_DIR"
exit 0
fi
rsync_leader_url=$(rsync_url "$leader")
set -e
mkdir -p "$SOLANA_CONFIG_CLIENT_DIR"
if [[ ! -r "$SOLANA_CONFIG_CLIENT_DIR"/leader.json ]]; then
echo "Fetching leader configuration from $rsync_leader_url"
$rsync -Pz "$rsync_leader_url"/config/leader.json "$SOLANA_CONFIG_CLIENT_DIR"/
fi
client_id_path="$SOLANA_CONFIG_CLIENT_DIR"/id.json
if [[ ! -r $client_id_path ]]; then
echo "Generating client identity: $client_id_path"
$solana_keygen -o "$client_id_path"
fi
# shellcheck disable=SC2086 # $solana_wallet should not be quoted
exec $solana_wallet \
-l "$SOLANA_CONFIG_CLIENT_DIR"/leader.json -k "$client_id_path" "$@"

View File

@@ -1,7 +1,11 @@
# Smart Contracts Engine
The goal of this RFC is to define a set of constraints for APIs and runtime such that we can execute our smart contracts safely on massively parallel hardware such as a GPU. Our runtime is built around an OS *syscall* primitive. The difference in blockchain is that now the OS does a cryptographic check of memory region ownership before accessing the memory in the Solana kernel.
## Version
version 0.1
## Toolchain Stack
+---------------------+ +---------------------+
@@ -50,6 +54,10 @@ For 3, every load and store that is relative can be checked to be within the exp
For 4, Fully linked PIC ELF with just a single RX segment. Effectively we are linking a shared object with `-fpic -target bpf` and with a linker script to collect everything into a single RX segment. Writable globals are not supported.
### Address Checks
The interface to the module takes a `&mut Vec<Vec<u8>>` in rust, or a `int sz, void* data[sz], int szs[sz]` in `C`. Given the module's bytecode, for each method, we need to analyze the bounds on load and stores into each buffer the module uses. This check needs to be done `on chain`, and after those bounds are computed we can verify that the user supplied array of buffers will not cause a memory fault. For load and stores that we cannot analyze, we can replace with a `safe_load` and `safe_store` instruction that will check the table for access.
## Loader
The loader is our first smart contract. The job of this contract is to load the actual program with its own instance data. The loader will verify the bytecode and that the object implements the expected entry points.

122
rfcs/rfc-002-consensus.md Normal file
View File

@@ -0,0 +1,122 @@
# Consensus
VERY WIP
The goal of this RFC is to define the consensus algorithm used in solana. This proposal covers a Proof of Stake algorithm that leverages Proof of History. PoH is a permissionless clock for blockchain that is available before consensus. This PoS approach leverages PoH to make strong assumptions about time between partitions.
## Version
version 0.1
## Message Flow
1. Transactions are ingested at the leader.
2. Leader filters for valid transactions
3. Leader executes valid transactions on its state
4. Leader packages transactions into blobs
5. Leader transmits blobs to validator nodes.
a. The set of supermajority + `M` by stake weight of nodes is rotated in round robin fashion.
6. Validators retransmit blobs to peers in their set and to further downstream nodes.
7. Validators validate the transactions and execute them on their state.
8. Validators compute the hash of the state.
9. Validators transmit votes to the leader.
a. Votes are signatures of the hash of the computed state.
10. Leader executes the votes as any other transaction and broadcasts them out to the network
11. Validators observe their votes, and all the votes from the network.
12. Validators continue voting if the supermajority of stake is observed in the vote for the same hash.
Supermajority is defined as `2/3rds + 1` vote of the PoS stakes.
## Staking
Validators `stake` some of their spendable sol into a staking account. The stakes are not spendable and can only be used for voting.
```
CreateStake(
PoH count,
PoH hash,
source public key,
amount,
destination public key,
proof of ownership of destination public key,
signature of the message with the source keypair
)
```
Creating the stake has a warmup period of TBD. Unstaking requires the node to miss a certain amount of validation votes.
## Validation Votes
```
Validate(
PoH count,
PoH hash,
stake public key,
signature of the state,
signature of the message with the stake keypair
)
```
## Validator Slashing
Validators `stake` some of their spendable sol into a staking account. The stakes are not spendable and can only be used for voting.
```
Slash(Validate(
PoH count,
PoH hash,
stake public key,
...
signature of the message with the stake keypair
))
```
When the `Slash` vote is processed, validators should lookup `PoH hash` at `PoH count` and compare it with the message. If they do not match, the stake at `stake public key` should be set to `0`.
## Leader Slashing
TBD. The goal of this is to discourage leaders from generating multiple PoH streams.
## Validation Vote Contract
The goal of this contract is to simulate economic cost of mining on a shorter branch.
1. With my signature I am certifying that I computed `state hash` at `PoH count` and `PoH hash`.
2. I will not vote on a branch that doesn't contain this message for at least `N` counts, or until `PoH count` + `N` is reached by the PoH stream.
3. I will not vote for any other branch below `PoH count`.
a. if there are other votes not present in this PoH history the validator may need to `cancel` them before creating this vote.
## Leader Seed Generation
Leader selection is decided via a random seed. The process is as follows:
1. Periodically at a specific `PoH count` select the first vote signatures that create a supermajority from the previous round.
2. append them together
3. hash the string for `N` counts via a similar process as PoH itself.
4. The resulting hash is the random seed for `M` counts, where M > N
## Leader Ranking and Rotation
Leader's transmit for a count of `T`. When `T` is reached all the validators should switch to the next ranked leader. To rank leaders, the supermajority + `M` nodes are shuffled with the using the above calculated random seed.
TBD: define a ranking for critical partitions without a node from supermajority + `M` set.
## Partition selection
Validators should select the first branch to reach finality, or the highest ranking leader.
## Examples
### Small Partition
1. Network partition M occurs for 10% of the nodes
2. The larger partition K, with 90% of the stake weight continues to operate as normal
3. M cycles through the ranks until one of them is leader.
4. M validators observe 10% of the vote pool, finality is not reached
5. M and K re-connect.
6. M validators cancel their votes on K which are below K's `PoH count`
### Leader Timeout
1. Next rank node observes a timeout.
2. Nodes receiving both PoH streams pick the higher rank node.
3. 2, causes a partition, since nodes can only vote for 1 leader.
4. Partition is resolved just like in the [Small Partition](#small-parition)

54
rfcs/rfc-003-storage.md Normal file
View File

@@ -0,0 +1,54 @@
# Storage
The goal of this RFC is to define a protocol for storing a very large ledger over a p2p network that is verified by solana validators. At full capacity on a 1gbps network solana will generate 4 petabytes of data per year. To prevent the network from centralizing around full nodes that have to store the full data set this protocol proposes a way for mining nodes to provide storage capacity for pieces of the network.
# Version
version 0.1
# Background
The basic idea to Proof of Replication is encrypting a dataset with a public symmetric key using CBC encryption, then hash the encrypted dataset. The main problem with the naive approach is that a dishonest storage node can stream the encryption and delete the data as its hashed. The simple solution is to force the hash to be done on the reverse of the encryption, or perhaps with a random order. This ensures that all the data is present during the generation of the proof and it also requires the validator to have the entirety of the encrypted data present for verification of every proof of every identity. So the space required to validate is `(Number of Proofs)*(data size)`
# Optimization with PoH
Our improvement on this approach is to randomly sample the encrypted blocks faster than it takes to encrypt, and record the hash of those samples into the PoH ledger. Thus the blocks stay in the exact same order for every PoRep and verification can stream the data and verify all the proofs in a single batch. This way we can verify multiple proofs concurrently, each one on its own CUDA core. With the current generation of graphics cards our network can support up to 14k replication identities or symmetric keys. The total space required for verification is `(2 CBC blocks) * (Number of Identities)`, with core count of equal to (Number of Identities). A CBC block is expected to be 1MB in size.
# Network
Validators for PoRep are the same validators that are verifying transactions. They have some stake that they have put up as collateral that ensures that their work is honest. If you can prove that a validator verified a fake PoRep, then the validators stake can be slashed.
Replicators are specialized thin clients. They download a part of the ledger and store it, and provide PoReps of storing the ledger. For each verified PoRep replicators earn a reward of sol from the mining pool.
# Constraints
We have the following constraints:
* At most 14k replication identities can be used, because thats how many CUDA cores we can fit in a $5k box at the moment.
* Verification requires generating the CBC blocks. That requires space of 2 blocks per identity, and 1 CUDA core per identity for the same dataset. So as many identities at once should be batched with as many proofs for those identities verified concurrently for the same dataset.
# Validation and Replication Protocol
1. Network sets the replication target number, lets say 1k. 1k PoRep identities are created from signatures of a PoH hash. So they are tied to a specific PoH hash. It doesn't matter who creates them, or simply the last 1k validation signatures we saw for the ledger at that count. This maybe just the initial batch of identities, because we want to stagger identity rotation.
2. Any client can use any of these identities to create PoRep proofs. Replicator identities are the CBC encryption keys.
3. Periodically at a specific PoH count, replicator that want to create PoRep proofs sign the PoH hash at that count. That signature is the seed used to pick the block and identity to replicate. A block is 1TB of ledger.
4. Periodically at a specific PoH count, replicator submits PoRep proofs for their selected block. A signature of the PoH hash at that count is the seed used to sample the 1TB encrypted block, and hash it. This is done faster than it takes to encrypt the 1TB block with the original identity.
5. Replicators must submit some number of fake proofs, which they can prove to be fake by providing the seed for the hash result.
6. Periodically at a specific PoH count, validators sign the hash and use the signature to select the 1TB block that they need to validate. They batch all the identities and proofs and submit approval for all the verified ones.
7. After #6, replicator client submit the proofs of fake proofs.
For any random seed, we force everyone to use a signature that is derived from a PoH hash. Everyone must use the same count, so the same PoH hash is signed by every participant. The signatures are then each cryptographically tied to the keypair, which prevents a leader from grinding on the resulting value for more than 1 identity.
We need to stagger the rotation of the identity keys. Once this gets going, the next identity could be generated by hashing itself with a PoH hash, or via some other process based on the validation signatures.
Since there are many more client identities then encryption identities, we need to split the reward for multiple clients, and prevent Sybil attacks from generating many clients to acquire the same block of data. To remain BFT we want to avoid a single human entity from storing all the replications of a single chunk of the ledger.
Our solution to this is to force the clients to continue using the same identity. If the first round is used to acquire the same block for many client identities, the second round for the same client identities will force a redistribution of the signatures, and therefore PoRep identities and blocks. Thus to get a reward for storage clients need to store the first block for free and the network can reward long lived client identities more than new ones.
# Notes
* We can reduce the costs of verification of PoRep by using PoH, and actually make it feasible to verify a large number of proofs for a global dataset.
* We can eliminate grinding by forcing everyone to sign the same PoH hash and use the signatures as the seed
* The game between validators and replicators is over random blocks and random encryption identities and random data samples. The goal of randomization is to prevent colluding groups from having overlap on data or validation.
* Replicator clients fish for lazy validators by submitting fake proofs that they can prove are fake.
* Replication identities are just symmetric encryption keys, the number of them on the network is our storage replication target. Many more client identities can exist than replicator identities, so unlimited number of clients can provide proofs of the same replicator identity.
* To defend against Sybil client identities that try to store the same block we force the clients to store for multiple rounds before receiving a reward.

17
snap/README.md Normal file
View File

@@ -0,0 +1,17 @@
## Development
If you're running Ubuntu 16.04 and already have `snapcraft` installed, simply
run:
```
$ snapcraft
```
For other systems we provide a docker image that can be used for snap
development:
```
$ ./ci/docker-run.sh solanalabs/snapcraft snapcraft -d
```
## Reference
* https://docs.snapcraft.io/

34
snap/hooks/configure vendored Executable file
View File

@@ -0,0 +1,34 @@
#!/bin/bash -e
echo Stopping daemons
snapctl stop --disable solana.daemon-drone
snapctl stop --disable solana.daemon-leader
snapctl stop --disable solana.daemon-validator
mode="$(snapctl get mode)"
if [[ -z "$mode" ]]; then
exit 0
fi
ip_address_arg=-p # Use public IP address (TODO: make this configurable?)
num_tokens="$(snapctl get num-tokens)"
case $mode in
leader+drone)
$SNAP/bin/setup.sh ${num_tokens:+-n $num_tokens} ${ip_address_arg} -t leader
snapctl start --enable solana.daemon-leader
snapctl start --enable solana.daemon-drone
;;
leader)
$SNAP/bin/setup.sh ${num_tokens:+-n $num_tokens} ${ip_address_arg} -t leader
snapctl start --enable solana.daemon-leader
;;
validator)
$SNAP/bin/setup.sh ${ip_address_arg} -t validator
snapctl start --enable solana.daemon-validator
;;
*)
echo "Error: Unknown mode: $mode"
exit 1
;;
esac

View File

@@ -10,6 +10,10 @@ grade: devel
# CUDA dependency, so use 'devmode' confinement for now
confinement: devmode
hooks:
configure:
plugs: [network]
apps:
drone:
command: solana-drone
@@ -33,37 +37,82 @@ apps:
plugs:
- network
- network-bind
- home
genesis:
command: solana-genesis
genesis-demo:
command: solana-genesis-demo
mint:
command: solana-mint
mint-demo:
command: solana-mint-demo
keygen:
command: solana-keygen
plugs:
- home
client-demo:
command: solana-client-demo
# TODO: Merge client.sh functionality into solana-client-demo proper
command: client.sh
#command: solana-client-demo
plugs:
- network
- network-bind
- home
wallet:
# TODO: Merge wallet.sh functionality into solana-wallet proper
command: wallet.sh
#command: solana-wallet
plugs:
- network
- home
daemon-validator:
daemon: simple
command: validator.sh
plugs:
- network
- network-bind
daemon-leader:
daemon: simple
command: leader.sh
plugs:
- network
- network-bind
daemon-drone:
daemon: simple
command: drone.sh
plugs:
- network
- network-bind
parts:
solana-cuda:
plugin: rust
rust-channel: stable
rust-features:
- erasure
- cuda
solana:
plugin: nil
prime:
- bin/solana-fullnode-cuda
- usr/lib/libgf_complete.so.1
- usr/lib/libJerasure.so.2
- bin
- usr/lib
override-build: |
# Install CUDA 9.2 runtime
mkdir -p $SNAPCRAFT_PART_INSTALL/usr/
cp -rav /usr/local/cuda-9.2/targets/x86_64-linux/lib/ $SNAPCRAFT_PART_INSTALL/usr/lib
mkdir -p $SNAPCRAFT_PART_INSTALL/usr/lib/x86_64-linux-gnu/
cp -rav /usr/lib/x86_64-linux-gnu/libcuda.* $SNAPCRAFT_PART_INSTALL/usr/lib/x86_64-linux-gnu/
mkdir -p $SNAPCRAFT_PART_INSTALL/usr/lib/nvidia-396/
cp -v /usr/lib/nvidia-396/libnvidia-fatbinaryloader.so* $SNAPCRAFT_PART_INSTALL/usr/lib/nvidia-396/
# Build/install solana-fullnode-cuda
./fetch-perf-libs.sh
snapcraftctl build
cargo install --features=cuda --root $SNAPCRAFT_PART_INSTALL --bin solana-fullnode
mv $SNAPCRAFT_PART_INSTALL/bin/solana-fullnode $SNAPCRAFT_PART_INSTALL
rm -rf $SNAPCRAFT_PART_INSTALL/bin/*
mv $SNAPCRAFT_PART_INSTALL/solana-fullnode $SNAPCRAFT_PART_INSTALL/bin/solana-fullnode-cuda
mkdir -p $SNAPCRAFT_PART_INSTALL/usr/lib/
cp -f libJerasure.so $SNAPCRAFT_PART_INSTALL/usr/lib/libJerasure.so.2
cp -f libgf_complete.so $SNAPCRAFT_PART_INSTALL/usr/lib/libgf_complete.so.1
solana:
plugin: rust
rust-channel: stable
# Build/install all other programs
cargo install --root $SNAPCRAFT_PART_INSTALL --bins
# Install multinode scripts
mkdir -p $SNAPCRAFT_PART_INSTALL/bin
cp -av multinode-demo/* $SNAPCRAFT_PART_INSTALL/bin/
# TODO: build rsync/multilog from source instead of sneaking it in from the host
# system...
set -x
mkdir -p $SNAPCRAFT_PART_INSTALL/bin
cp -av /usr/bin/rsync $SNAPCRAFT_PART_INSTALL/bin/
cp -av /usr/bin/multilog $SNAPCRAFT_PART_INSTALL/bin/

View File

@@ -6,8 +6,11 @@
extern crate libc;
use chrono::prelude::*;
use counter::Counter;
use entry::Entry;
use hash::Hash;
use itertools::Itertools;
use ledger::Block;
use mint::Mint;
use payment_plan::{Payment, PaymentPlan, Witness};
use signature::{KeyPair, PublicKey, Signature};
@@ -17,6 +20,7 @@ use std::result;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::RwLock;
use std::time::Instant;
use streamer::WINDOW_SIZE;
use timing::duration_as_us;
use transaction::{Instruction, Plan, Transaction};
@@ -28,6 +32,8 @@ use transaction::{Instruction, Plan, Transaction};
/// not be processed by the network.
pub const MAX_ENTRY_IDS: usize = 1024 * 16;
pub const VERIFY_BLOCK_SIZE: usize = 16;
/// Reasons a transaction might be rejected.
#[derive(Debug, PartialEq, Eq)]
pub enum BankError {
@@ -42,7 +48,7 @@ pub enum BankError {
/// The bank has seen `Signature` before. This can occur under normal operation
/// when a UDP packet is duplicated, as a user error from a client not updating
/// its `last_id`, or as a double-spend attack.
DuplicateSiganture(Signature),
DuplicateSignature(Signature),
/// The bank has not seen the given `last_id` or the transaction is too old and
/// the `last_id` has been discarded.
@@ -51,6 +57,9 @@ pub enum BankError {
/// The transaction is invalid and has requested a debit or credit of negative
/// tokens.
NegativeTokens,
/// Proof of History verification failed.
LedgerVerificationFailed,
}
pub type Result<T> = result::Result<T, BankError>;
@@ -74,33 +83,27 @@ pub struct Bank {
/// reject transactions with signatures its seen before
last_ids_sigs: RwLock<HashMap<Hash, HashSet<Signature>>>,
/// The set of trusted timekeepers. A Timestamp transaction from a `PublicKey`
/// outside this set will be discarded. Note that if validators do not have the
/// same set as leaders, they may interpret the ledger differently.
time_sources: RwLock<HashSet<PublicKey>>,
/// The most recent timestamp from a trusted timekeeper. This timestamp is applied
/// to every smart contract when it enters the system. If it is waiting on a
/// timestamp witness before that timestamp, the bank will execute it immediately.
last_time: RwLock<DateTime<Utc>>,
/// The number of transactions the bank has processed without error since the
/// start of the ledger.
transaction_count: AtomicUsize,
}
impl Bank {
/// Create an Bank using a deposit.
pub fn new_from_deposit(deposit: &Payment) -> Self {
let bank = Bank {
impl Default for Bank {
fn default() -> Self {
Bank {
balances: RwLock::new(HashMap::new()),
pending: RwLock::new(HashMap::new()),
last_ids: RwLock::new(VecDeque::new()),
last_ids_sigs: RwLock::new(HashMap::new()),
time_sources: RwLock::new(HashSet::new()),
last_time: RwLock::new(Utc.timestamp(0, 0)),
transaction_count: AtomicUsize::new(0),
};
}
}
}
impl Bank {
/// Create an Bank using a deposit.
pub fn new_from_deposit(deposit: &Payment) -> Self {
let bank = Self::default();
bank.apply_payment(deposit, &mut bank.balances.write().unwrap());
bank
}
@@ -118,24 +121,23 @@ impl Bank {
/// Commit funds to the `payment.to` party.
fn apply_payment(&self, payment: &Payment, balances: &mut HashMap<PublicKey, i64>) {
if balances.contains_key(&payment.to) {
*balances.get_mut(&payment.to).unwrap() += payment.tokens;
} else {
balances.insert(payment.to, payment.tokens);
}
*balances.entry(payment.to).or_insert(0) += payment.tokens;
}
/// Return the last entry ID registered.
pub fn last_id(&self) -> Hash {
let last_ids = self.last_ids.read().expect("'last_ids' read lock");
let last_item = last_ids.iter().last().expect("empty 'last_ids' list");
let last_item = last_ids
.iter()
.last()
.expect("get last item from 'last_ids' list");
*last_item
}
/// Store the given signature. The bank will reject any transaction with the same signature.
fn reserve_signature(signatures: &mut HashSet<Signature>, sig: &Signature) -> Result<()> {
if let Some(sig) = signatures.get(sig) {
return Err(BankError::DuplicateSiganture(*sig));
return Err(BankError::DuplicateSignature(*sig));
}
signatures.insert(*sig);
Ok(())
@@ -157,6 +159,13 @@ impl Bank {
}
}
/// Forget all signatures. Useful for benchmarking.
pub fn clear_signatures(&self) {
for (_, sigs) in self.last_ids_sigs.write().unwrap().iter_mut() {
sigs.clear();
}
}
fn reserve_signature_with_last_id(&self, signature: &Signature, last_id: &Hash) -> Result<()> {
if let Some(entry) = self.last_ids_sigs
.write()
@@ -190,26 +199,41 @@ impl Bank {
/// Deduct tokens from the 'from' address the account has sufficient
/// funds and isn't a duplicate.
fn apply_debits(&self, tx: &Transaction, bals: &mut HashMap<PublicKey, i64>) -> Result<()> {
let option = bals.get_mut(&tx.from);
if option.is_none() {
return Err(BankError::AccountNotFound(tx.from));
let mut purge = false;
{
let option = bals.get_mut(&tx.from);
if option.is_none() {
if let Instruction::NewVote(_) = &tx.instruction {
inc_new_counter!("bank-appy_debits-vote_account_not_found", 1);
} else {
inc_new_counter!("bank-appy_debits-generic_account_not_found", 1);
}
return Err(BankError::AccountNotFound(tx.from));
}
let bal = option.unwrap();
self.reserve_signature_with_last_id(&tx.sig, &tx.last_id)?;
if let Instruction::NewContract(contract) = &tx.instruction {
if contract.tokens < 0 {
return Err(BankError::NegativeTokens);
}
if *bal < contract.tokens {
self.forget_signature_with_last_id(&tx.sig, &tx.last_id);
return Err(BankError::InsufficientFunds(tx.from));
} else if *bal == contract.tokens {
purge = true;
} else {
*bal -= contract.tokens;
}
};
}
let bal = option.unwrap();
self.reserve_signature_with_last_id(&tx.sig, &tx.last_id)?;
if purge {
bals.remove(&tx.from);
}
if let Instruction::NewContract(contract) = &tx.instruction {
if contract.tokens < 0 {
return Err(BankError::NegativeTokens);
}
if *bal < contract.tokens {
self.forget_signature_with_last_id(&tx.sig, &tx.last_id);
return Err(BankError::InsufficientFunds(tx.from));
}
*bal -= contract.tokens;
};
Ok(())
}
@@ -218,11 +242,7 @@ impl Bank {
fn apply_credits(&self, tx: &Transaction, balances: &mut HashMap<PublicKey, i64>) {
match &tx.instruction {
Instruction::NewContract(contract) => {
let mut plan = contract.plan.clone();
plan.apply_witness(&Witness::Timestamp(*self.last_time
.read()
.expect("timestamp creation in apply_credits")));
let plan = contract.plan.clone();
if let Some(payment) = plan.final_payment() {
self.apply_payment(&payment, balances);
} else {
@@ -238,12 +258,16 @@ impl Bank {
Instruction::ApplySignature(tx_sig) => {
let _ = self.apply_signature(tx.from, *tx_sig);
}
Instruction::NewVote(_vote) => {
info!("GOT VOTE!");
// TODO: record the vote in the stake table...
}
}
}
/// Process a Transaction. If it contains a payment plan that requires a witness
/// to progress, the payment plan will be stored in the bank.
fn process_transaction(&self, tx: &Transaction) -> Result<()> {
pub fn process_transaction(&self, tx: &Transaction) -> Result<()> {
let bals = &mut self.balances.write().unwrap();
self.apply_debits(tx, bals)?;
self.apply_credits(tx, bals);
@@ -295,22 +319,122 @@ impl Bank {
res
}
/// Process an ordered list of entries.
pub fn process_entries<I>(&self, entries: I) -> Result<()>
where
I: IntoIterator<Item = Entry>,
{
for entry in entries {
if !entry.transactions.is_empty() {
for result in self.process_transactions(entry.transactions) {
result?;
}
fn process_entry(&self, entry: Entry) -> Result<()> {
if !entry.transactions.is_empty() {
for result in self.process_transactions(entry.transactions) {
result?;
}
}
if !entry.has_more {
self.register_entry_id(&entry.id);
}
Ok(())
}
/// Process an ordered list of entries, populating a circular buffer "tail"
/// as we go.
fn process_entries_tail(
&self,
entries: Vec<Entry>,
tail: &mut Vec<Entry>,
tail_idx: &mut usize,
) -> Result<u64> {
let mut entry_count = 0;
for entry in entries {
if tail.len() > *tail_idx {
tail[*tail_idx] = entry.clone();
} else {
tail.push(entry.clone());
}
*tail_idx = (*tail_idx + 1) % WINDOW_SIZE as usize;
entry_count += 1;
self.process_entry(entry)?;
}
Ok(entry_count)
}
/// Process an ordered list of entries.
pub fn process_entries(&self, entries: Vec<Entry>) -> Result<u64> {
let mut entry_count = 0;
for entry in entries {
entry_count += 1;
self.process_entry(entry)?;
}
Ok(entry_count)
}
/// Append entry blocks to the ledger, verifying them along the way.
fn process_blocks<I>(
&self,
entries: I,
tail: &mut Vec<Entry>,
tail_idx: &mut usize,
) -> Result<u64>
where
I: IntoIterator<Item = Entry>,
{
// Ledger verification needs to be parallelized, but we can't pull the whole
// thing into memory. We therefore chunk it.
let mut entry_count = 0;
for block in &entries.into_iter().chunks(VERIFY_BLOCK_SIZE) {
let block: Vec<_> = block.collect();
if !block.verify(&self.last_id()) {
return Err(BankError::LedgerVerificationFailed);
}
entry_count += self.process_entries_tail(block, tail, tail_idx)?;
}
Ok(entry_count)
}
/// Process a full ledger.
pub fn process_ledger<I>(&self, entries: I) -> Result<(u64, Vec<Entry>)>
where
I: IntoIterator<Item = Entry>,
{
let mut entries = entries.into_iter();
// The first item in the ledger is required to be an entry with zero num_hashes,
// which implies its id can be used as the ledger's seed.
let entry0 = entries.next().expect("invalid ledger: empty");
// The second item in the ledger is a special transaction where the to and from
// fields are the same. That entry should be treated as a deposit, not a
// transfer to oneself.
let entry1 = entries
.next()
.expect("invalid ledger: need at least 2 entries");
{
let tx = &entry1.transactions[0];
let deposit = if let Instruction::NewContract(contract) = &tx.instruction {
contract.plan.final_payment()
} else {
None
}.expect("invalid ledger, needs to start with a contract");
self.apply_payment(&deposit, &mut self.balances.write().unwrap());
}
self.register_entry_id(&entry0.id);
self.register_entry_id(&entry1.id);
let mut tail = Vec::with_capacity(WINDOW_SIZE as usize);
tail.push(entry0);
tail.push(entry1);
let mut tail_idx = 2;
let entry_count = 2 + self.process_blocks(entries, &mut tail, &mut tail_idx)?;
// check f we need to rotate tail
let tail = if tail.len() == WINDOW_SIZE as usize {
rotate_vector(tail, tail_idx)
} else {
tail
};
Ok((entry_count, tail))
}
/// Process a Witness Signature. Any payment plans waiting on this signature
/// will progress one step.
fn apply_signature(&self, from: PublicKey, tx_sig: Signature) -> Result<()> {
@@ -319,7 +443,7 @@ impl Bank {
.expect("write() in apply_signature")
.entry(tx_sig)
{
e.get_mut().apply_witness(&Witness::Signature(from));
e.get_mut().apply_witness(&Witness::Signature, &from);
if let Some(payment) = e.get().final_payment() {
self.apply_payment(&payment, &mut self.balances.write().unwrap());
e.remove_entry();
@@ -332,31 +456,6 @@ impl Bank {
/// Process a Witness Timestamp. Any payment plans waiting on this timestamp
/// will progress one step.
fn apply_timestamp(&self, from: PublicKey, dt: DateTime<Utc>) -> Result<()> {
// If this is the first timestamp we've seen, it probably came from the genesis block,
// so we'll trust it.
if *self.last_time
.read()
.expect("'last_time' read lock on first timestamp check")
== Utc.timestamp(0, 0)
{
self.time_sources
.write()
.expect("'time_sources' write lock on first timestamp")
.insert(from);
}
if self.time_sources
.read()
.expect("'time_sources' read lock")
.contains(&from)
{
if dt > *self.last_time.read().expect("'last_time' read lock") {
*self.last_time.write().expect("'last_time' write lock") = dt;
}
} else {
return Ok(());
}
// Check to see if any timelocked transactions can be completed.
let mut completed = vec![];
@@ -366,9 +465,7 @@ impl Bank {
.write()
.expect("'pending' write lock in apply_timestamp");
for (key, plan) in pending.iter_mut() {
plan.apply_witness(&Witness::Timestamp(*self.last_time
.read()
.expect("'last_time' read lock when creating timestamp")));
plan.apply_witness(&Witness::Timestamp(dt), &from);
if let Some(payment) = plan.final_payment() {
self.apply_payment(&payment, &mut self.balances.write().unwrap());
completed.push(key.clone());
@@ -412,16 +509,39 @@ impl Bank {
self.process_transaction(&tx).map(|_| sig)
}
pub fn get_balance(&self, pubkey: &PublicKey) -> Option<i64> {
pub fn get_balance(&self, pubkey: &PublicKey) -> i64 {
let bals = self.balances
.read()
.expect("'balances' read lock in get_balance");
bals.get(pubkey).map(|x| *x)
bals.get(pubkey).cloned().unwrap_or(0)
}
pub fn transaction_count(&self) -> usize {
self.transaction_count.load(Ordering::Relaxed)
}
pub fn has_signature(&self, signature: &Signature) -> bool {
let last_ids_sigs = self.last_ids_sigs
.read()
.expect("'last_ids_sigs' read lock");
for (_hash, signatures) in last_ids_sigs.iter() {
if signatures.contains(signature) {
return true;
}
}
false
}
}
fn rotate_vector<T: Clone>(v: Vec<T>, at: usize) -> Vec<T> {
if at != 0 {
let mut ret = Vec::with_capacity(v.len());
ret.extend_from_slice(&v[at..]);
ret.extend_from_slice(&v[0..at]);
ret
} else {
v
}
}
#[cfg(test)]
@@ -429,8 +549,11 @@ mod tests {
use super::*;
use bincode::serialize;
use entry::next_entry;
use entry::Entry;
use entry_writer::{self, EntryWriter};
use hash::hash;
use signature::KeyPairUtil;
use std::io::{BufReader, Cursor, Seek, SeekFrom};
#[test]
fn test_two_payments_to_one_party() {
@@ -441,11 +564,11 @@ mod tests {
bank.transfer(1_000, &mint.keypair(), pubkey, mint.last_id())
.unwrap();
assert_eq!(bank.get_balance(&pubkey).unwrap(), 1_000);
assert_eq!(bank.get_balance(&pubkey), 1_000);
bank.transfer(500, &mint.keypair(), pubkey, mint.last_id())
.unwrap();
assert_eq!(bank.get_balance(&pubkey).unwrap(), 1_500);
assert_eq!(bank.get_balance(&pubkey), 1_500);
assert_eq!(bank.transaction_count(), 2);
}
@@ -488,8 +611,8 @@ mod tests {
assert_eq!(bank.transaction_count(), 1);
let mint_pubkey = mint.keypair().pubkey();
assert_eq!(bank.get_balance(&mint_pubkey).unwrap(), 10_000);
assert_eq!(bank.get_balance(&pubkey).unwrap(), 1_000);
assert_eq!(bank.get_balance(&mint_pubkey), 10_000);
assert_eq!(bank.get_balance(&pubkey), 1_000);
}
#[test]
@@ -499,7 +622,7 @@ mod tests {
let pubkey = KeyPair::new().pubkey();
bank.transfer(500, &mint.keypair(), pubkey, mint.last_id())
.unwrap();
assert_eq!(bank.get_balance(&pubkey).unwrap(), 500);
assert_eq!(bank.get_balance(&pubkey), 500);
}
#[test]
@@ -512,42 +635,26 @@ mod tests {
.unwrap();
// Mint's balance will be zero because all funds are locked up.
assert_eq!(bank.get_balance(&mint.pubkey()), Some(0));
assert_eq!(bank.get_balance(&mint.pubkey()), 0);
// tx count is 1, because debits were applied.
assert_eq!(bank.transaction_count(), 1);
// pubkey's balance will be None because the funds have not been
// sent.
assert_eq!(bank.get_balance(&pubkey), None);
assert_eq!(bank.get_balance(&pubkey), 0);
// Now, acknowledge the time in the condition occurred and
// that pubkey's funds are now available.
bank.apply_timestamp(mint.pubkey(), dt).unwrap();
assert_eq!(bank.get_balance(&pubkey), Some(1));
assert_eq!(bank.get_balance(&pubkey), 1);
// tx count is still 1, because we chose not to count timestamp transactions
// tx count.
assert_eq!(bank.transaction_count(), 1);
bank.apply_timestamp(mint.pubkey(), dt).unwrap(); // <-- Attack! Attempt to process completed transaction.
assert_ne!(bank.get_balance(&pubkey), Some(2));
}
#[test]
fn test_transfer_after_date() {
let mint = Mint::new(1);
let bank = Bank::new(&mint);
let pubkey = KeyPair::new().pubkey();
let dt = Utc::now();
bank.apply_timestamp(mint.pubkey(), dt).unwrap();
// It's now past now, so this transfer should be processed immediately.
bank.transfer_on_date(1, &mint.keypair(), pubkey, dt, mint.last_id())
.unwrap();
assert_eq!(bank.get_balance(&mint.pubkey()), Some(0));
assert_eq!(bank.get_balance(&pubkey), Some(1));
assert_ne!(bank.get_balance(&pubkey), 2);
}
#[test]
@@ -563,22 +670,22 @@ mod tests {
assert_eq!(bank.transaction_count(), 1);
// Mint's balance will be zero because all funds are locked up.
assert_eq!(bank.get_balance(&mint.pubkey()), Some(0));
assert_eq!(bank.get_balance(&mint.pubkey()), 0);
// pubkey's balance will be None because the funds have not been
// sent.
assert_eq!(bank.get_balance(&pubkey), None);
assert_eq!(bank.get_balance(&pubkey), 0);
// Now, cancel the trancaction. Mint gets her funds back, pubkey never sees them.
bank.apply_signature(mint.pubkey(), sig).unwrap();
assert_eq!(bank.get_balance(&mint.pubkey()), Some(1));
assert_eq!(bank.get_balance(&pubkey), None);
assert_eq!(bank.get_balance(&mint.pubkey()), 1);
assert_eq!(bank.get_balance(&pubkey), 0);
// Assert cancel doesn't cause count to go backward.
assert_eq!(bank.transaction_count(), 1);
bank.apply_signature(mint.pubkey(), sig).unwrap(); // <-- Attack! Attempt to cancel completed transaction.
assert_ne!(bank.get_balance(&mint.pubkey()), Some(2));
assert_ne!(bank.get_balance(&mint.pubkey()), 2);
}
#[test]
@@ -592,7 +699,7 @@ mod tests {
);
assert_eq!(
bank.reserve_signature_with_last_id(&sig, &mint.last_id()),
Err(BankError::DuplicateSiganture(sig))
Err(BankError::DuplicateSignature(sig))
);
}
@@ -610,6 +717,16 @@ mod tests {
);
}
#[test]
fn test_has_signature() {
let mint = Mint::new(1);
let bank = Bank::new(&mint);
let sig = Signature::default();
bank.reserve_signature_with_last_id(&sig, &mint.last_id())
.expect("reserve signature");
assert!(bank.has_signature(&sig));
}
#[test]
fn test_reject_old_last_id() {
let mint = Mint::new(1);
@@ -659,54 +776,110 @@ mod tests {
bank.process_entries(vec![entry]).unwrap();
assert!(bank.process_transaction(&tx).is_ok());
}
}
#[cfg(all(feature = "unstable", test))]
mod bench {
extern crate test;
use self::test::Bencher;
use bank::*;
use bincode::serialize;
use hash::hash;
use rayon::prelude::*;
use signature::KeyPairUtil;
#[bench]
fn bench_process_transaction(bencher: &mut Bencher) {
let mint = Mint::new(100_000_000);
let bank = Bank::new(&mint);
// Create transactions between unrelated parties.
let transactions: Vec<_> = (0..4096)
.into_par_iter()
.map(|i| {
// Seed the 'from' account.
let rando0 = KeyPair::new();
let tx = Transaction::new(&mint.keypair(), rando0.pubkey(), 1_000, mint.last_id());
bank.process_transaction(&tx).unwrap();
// Seed the 'to' account and a cell for its signature.
let last_id = hash(&serialize(&i).unwrap()); // Unique hash
bank.register_entry_id(&last_id);
let rando1 = KeyPair::new();
let tx = Transaction::new(&rando0, rando1.pubkey(), 1, last_id);
bank.process_transaction(&tx).unwrap();
// Finally, return a transaction that's unique
Transaction::new(&rando0, rando1.pubkey(), 1, last_id)
})
.collect();
bencher.iter(|| {
// Since benchmarker runs this multiple times, we need to clear the signatures.
for (_, sigs) in bank.last_ids_sigs.write().unwrap().iter_mut() {
sigs.clear();
}
assert!(
bank.process_transactions(transactions.clone())
.iter()
.all(|x| x.is_ok())
);
});
#[test]
fn test_process_genesis() {
let mint = Mint::new(1);
let genesis = mint.create_entries();
let bank = Bank::default();
bank.process_ledger(genesis).unwrap();
assert_eq!(bank.get_balance(&mint.pubkey()), 1);
}
fn create_sample_block(mint: &Mint, length: usize) -> impl Iterator<Item = Entry> {
let mut entries = Vec::with_capacity(length);
let mut hash = mint.last_id();
let mut cur_hashes = 0;
for _ in 0..length {
let keypair = KeyPair::new();
let tx = Transaction::new(&mint.keypair(), keypair.pubkey(), 1, hash);
let entry = Entry::new_mut(&mut hash, &mut cur_hashes, vec![tx], false);
entries.push(entry);
}
entries.into_iter()
}
fn create_sample_ledger(length: usize) -> (impl Iterator<Item = Entry>, PublicKey) {
let mint = Mint::new(1 + length as i64);
let genesis = mint.create_entries();
let block = create_sample_block(&mint, length);
(genesis.into_iter().chain(block), mint.pubkey())
}
#[test]
fn test_process_ledger() {
let (ledger, pubkey) = create_sample_ledger(1);
let (ledger, dup) = ledger.tee();
let bank = Bank::default();
let (ledger_height, tail) = bank.process_ledger(ledger).unwrap();
assert_eq!(bank.get_balance(&pubkey), 1);
assert_eq!(ledger_height, 3);
assert_eq!(tail.len(), 3);
assert_eq!(tail, dup.collect_vec());
let last_entry = &tail[tail.len() - 1];
assert_eq!(bank.last_id(), last_entry.id);
}
#[test]
fn test_process_ledger_around_window_size() {
// TODO: put me back in when Criterion is up
// for _ in 0..10 {
// let (ledger, _) = create_sample_ledger(WINDOW_SIZE as usize);
// let bank = Bank::default();
// let (_, _) = bank.process_ledger(ledger).unwrap();
// }
let window_size = WINDOW_SIZE as usize;
for entry_count in window_size - 3..window_size + 2 {
let (ledger, pubkey) = create_sample_ledger(entry_count);
let bank = Bank::default();
let (ledger_height, tail) = bank.process_ledger(ledger).unwrap();
assert_eq!(bank.get_balance(&pubkey), 1);
assert_eq!(ledger_height, entry_count as u64 + 2);
assert!(tail.len() <= window_size);
let last_entry = &tail[tail.len() - 1];
assert_eq!(bank.last_id(), last_entry.id);
}
}
// Write the given entries to a file and then return a file iterator to them.
fn to_file_iter(entries: impl Iterator<Item = Entry>) -> impl Iterator<Item = Entry> {
let mut file = Cursor::new(vec![]);
EntryWriter::write_entries(&mut file, entries).unwrap();
file.seek(SeekFrom::Start(0)).unwrap();
let reader = BufReader::new(file);
entry_writer::read_entries(reader).map(|x| x.unwrap())
}
#[test]
fn test_process_ledger_from_file() {
let (ledger, pubkey) = create_sample_ledger(1);
let ledger = to_file_iter(ledger);
let bank = Bank::default();
bank.process_ledger(ledger).unwrap();
assert_eq!(bank.get_balance(&pubkey), 1);
}
#[test]
fn test_process_ledger_from_files() {
let mint = Mint::new(2);
let genesis = to_file_iter(mint.create_entries().into_iter());
let block = to_file_iter(create_sample_block(&mint, 1));
let bank = Bank::default();
bank.process_ledger(genesis.chain(block)).unwrap();
assert_eq!(bank.get_balance(&mint.pubkey()), 1);
}
#[test]
fn test_rotate_vector() {
let expect = vec![1, 2, 3, 4];
assert_eq!(rotate_vector(vec![4, 1, 2, 3], 1), expect);
assert_eq!(rotate_vector(vec![1, 2, 3, 4], 0), expect);
assert_eq!(rotate_vector(vec![2, 3, 4, 1], 3), expect);
assert_eq!(rotate_vector(vec![3, 4, 1, 2], 2), expect);
}
}

View File

@@ -5,16 +5,16 @@
use bank::Bank;
use bincode::deserialize;
use counter::Counter;
use packet;
use packet::SharedPackets;
use packet::{PacketRecycler, Packets, SharedPackets};
use rayon::prelude::*;
use record_stage::Signal;
use result::Result;
use result::{Error, Result};
use service::Service;
use std::net::SocketAddr;
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::sync::mpsc::{channel, Receiver, Sender};
use std::sync::atomic::AtomicUsize;
use std::sync::mpsc::{channel, Receiver, RecvTimeoutError, Sender};
use std::sync::Arc;
use std::thread::{Builder, JoinHandle};
use std::thread::{self, Builder, JoinHandle};
use std::time::Duration;
use std::time::Instant;
use timing;
@@ -23,49 +23,42 @@ use transaction::Transaction;
/// Stores the stage's thread handle and output receiver.
pub struct BankingStage {
/// Handle to the stage's thread.
pub thread_hdl: JoinHandle<()>,
/// Output receiver for the following stage.
pub signal_receiver: Receiver<Signal>,
thread_hdl: JoinHandle<()>,
}
impl BankingStage {
/// Create the stage using `bank`. Exit when either `exit` is set or
/// when `verified_receiver` or the stage's output receiver is dropped.
/// Create the stage using `bank`. Exit when `verified_receiver` is dropped.
/// Discard input packets using `packet_recycler` to minimize memory
/// allocations in a previous stage such as the `fetch_stage`.
pub fn new(
bank: Arc<Bank>,
exit: Arc<AtomicBool>,
verified_receiver: Receiver<Vec<(SharedPackets, Vec<u8>)>>,
packet_recycler: packet::PacketRecycler,
) -> Self {
packet_recycler: PacketRecycler,
) -> (Self, Receiver<Signal>) {
let (signal_sender, signal_receiver) = channel();
let thread_hdl = Builder::new()
.name("solana-banking-stage".to_string())
.spawn(move || loop {
let e = Self::process_packets(
bank.clone(),
if let Err(e) = Self::process_packets(
&bank,
&verified_receiver,
&signal_sender,
&packet_recycler,
);
if e.is_err() {
if exit.load(Ordering::Relaxed) {
break;
) {
match e {
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
_ => error!("{:?}", e),
}
}
})
.unwrap();
BankingStage {
thread_hdl,
signal_receiver,
}
(BankingStage { thread_hdl }, signal_receiver)
}
/// Convert the transactions from a blob of binary data to a vector of transactions and
/// an unused `SocketAddr` that could be used to send a response.
fn deserialize_transactions(p: &packet::Packets) -> Vec<Option<(Transaction, SocketAddr)>> {
fn deserialize_transactions(p: &Packets) -> Vec<Option<(Transaction, SocketAddr)>> {
p.packets
.par_iter()
.map(|x| {
@@ -78,11 +71,11 @@ impl BankingStage {
/// Process the incoming packets and send output `Signal` messages to `signal_sender`.
/// Discard packets via `packet_recycler`.
fn process_packets(
bank: Arc<Bank>,
pub fn process_packets(
bank: &Arc<Bank>,
verified_receiver: &Receiver<Vec<(SharedPackets, Vec<u8>)>>,
signal_sender: &Sender<Signal>,
packet_recycler: &packet::PacketRecycler,
packet_recycler: &PacketRecycler,
) -> Result<()> {
let timer = Duration::new(1, 0);
let recv_start = Instant::now();
@@ -96,7 +89,6 @@ impl BankingStage {
mms.len(),
);
let count = mms.iter().map(|x| x.1.len()).sum();
static mut COUNTER: Counter = create_counter!("banking_stage_process_packets", 1);
let proc_start = Instant::now();
for (msgs, vers) in mms {
let transactions = Self::deserialize_transactions(&msgs.read().unwrap());
@@ -132,11 +124,21 @@ impl BankingStage {
reqs_len,
(reqs_len as f32) / (total_time_s)
);
inc_counter!(COUNTER, count, proc_start);
inc_new_counter!("banking_stage-process_packets", count);
Ok(())
}
}
impl Service for BankingStage {
fn thread_hdls(self) -> Vec<JoinHandle<()>> {
vec![self.thread_hdl]
}
fn join(self) -> thread::Result<()> {
self.thread_hdl.join()
}
}
// TODO: When banking is pulled out of RequestStage, add this test back in.
//use bank::Bank;
@@ -197,234 +199,3 @@ impl BankingStage {
// assert_eq!(bank.get_balance(&alice.pubkey()), Some(1));
// }
//}
//
//#[cfg(all(feature = "unstable", test))]
//mod bench {
// extern crate test;
// use self::test::Bencher;
// use bank::{Bank, MAX_ENTRY_IDS};
// use bincode::serialize;
// use hash::hash;
// use mint::Mint;
// use rayon::prelude::*;
// use signature::{KeyPair, KeyPairUtil};
// use std::collections::HashSet;
// use std::time::Instant;
// use transaction::Transaction;
//
// #[bench]
// fn bench_process_transactions(_bencher: &mut Bencher) {
// let mint = Mint::new(100_000_000);
// let bank = Bank::new(&mint);
// // Create transactions between unrelated parties.
// let txs = 100_000;
// let last_ids: Mutex<HashSet<Hash>> = Mutex::new(HashSet::new());
// let transactions: Vec<_> = (0..txs)
// .into_par_iter()
// .map(|i| {
// // Seed the 'to' account and a cell for its signature.
// let dummy_id = i % (MAX_ENTRY_IDS as i32);
// let last_id = hash(&serialize(&dummy_id).unwrap()); // Semi-unique hash
// {
// let mut last_ids = last_ids.lock().unwrap();
// if !last_ids.contains(&last_id) {
// last_ids.insert(last_id);
// bank.register_entry_id(&last_id);
// }
// }
//
// // Seed the 'from' account.
// let rando0 = KeyPair::new();
// let tx = Transaction::new(&mint.keypair(), rando0.pubkey(), 1_000, last_id);
// bank.process_transaction(&tx).unwrap();
//
// let rando1 = KeyPair::new();
// let tx = Transaction::new(&rando0, rando1.pubkey(), 2, last_id);
// bank.process_transaction(&tx).unwrap();
//
// // Finally, return a transaction that's unique
// Transaction::new(&rando0, rando1.pubkey(), 1, last_id)
// })
// .collect();
//
// let banking_stage = EventProcessor::new(bank, &mint.last_id(), None);
//
// let now = Instant::now();
// assert!(banking_stage.process_transactions(transactions).is_ok());
// let duration = now.elapsed();
// let sec = duration.as_secs() as f64 + duration.subsec_nanos() as f64 / 1_000_000_000.0;
// let tps = txs as f64 / sec;
//
// // Ensure that all transactions were successfully logged.
// drop(banking_stage.historian_input);
// let entries: Vec<Entry> = banking_stage.output.lock().unwrap().iter().collect();
// assert_eq!(entries.len(), 1);
// assert_eq!(entries[0].transactions.len(), txs as usize);
//
// println!("{} tps", tps);
// }
//}
#[cfg(all(feature = "unstable", test))]
mod bench {
extern crate test;
use self::test::Bencher;
use bank::*;
use banking_stage::BankingStage;
use logger;
use mint::Mint;
use packet::{to_packets_chunked, PacketRecycler};
use rayon::prelude::*;
use record_stage::Signal;
use signature::{KeyPair, KeyPairUtil};
use std::iter;
use std::sync::mpsc::{channel, Receiver};
use std::sync::Arc;
use transaction::Transaction;
fn check_txs(batches: usize, receiver: &Receiver<Signal>, ref_tx_count: usize) {
let mut total = 0;
for _ in 0..batches {
let signal = receiver.recv().unwrap();
if let Signal::Transactions(transactions) = signal {
total += transactions.len();
} else {
assert!(false);
}
}
assert_eq!(total, ref_tx_count);
}
#[bench]
fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
logger::setup();
let tx = 30_000_usize;
let mint_total = 1_000_000_000_000;
let mint = Mint::new(mint_total);
let num_dst_accounts = 8 * 1024;
let num_src_accounts = 8 * 1024;
let srckeys: Vec<_> = (0..num_src_accounts).map(|_| KeyPair::new()).collect();
let dstkeys: Vec<_> = (0..num_dst_accounts)
.map(|_| KeyPair::new().pubkey())
.collect();
info!("created keys src: {} dst: {}", srckeys.len(), dstkeys.len());
let transactions: Vec<_> = (0..tx)
.map(|i| {
Transaction::new(
&srckeys[i % num_src_accounts],
dstkeys[i % num_dst_accounts],
i as i64,
mint.last_id(),
)
})
.collect();
info!("created transactions");
let (verified_sender, verified_receiver) = channel();
let (signal_sender, signal_receiver) = channel();
let packet_recycler = PacketRecycler::default();
let verified: Vec<_> = to_packets_chunked(&packet_recycler, transactions, 192)
.into_iter()
.map(|x| {
let len = (*x).read().unwrap().packets.len();
(x, iter::repeat(1).take(len).collect())
})
.collect();
let setup_transactions: Vec<_> = (0..num_src_accounts)
.map(|i| {
Transaction::new(
&mint.keypair(),
srckeys[i].pubkey(),
mint_total / num_src_accounts as i64,
mint.last_id(),
)
})
.collect();
let verified_setup: Vec<_> = to_packets_chunked(&packet_recycler, setup_transactions, tx)
.into_iter()
.map(|x| {
let len = (*x).read().unwrap().packets.len();
(x, iter::repeat(1).take(len).collect())
})
.collect();
bencher.iter(move || {
let bank = Arc::new(Bank::new(&mint));
verified_sender.send(verified_setup.clone()).unwrap();
BankingStage::process_packets(
bank.clone(),
&verified_receiver,
&signal_sender,
&packet_recycler,
).unwrap();
check_txs(verified_setup.len(), &signal_receiver, num_src_accounts);
verified_sender.send(verified.clone()).unwrap();
BankingStage::process_packets(
bank.clone(),
&verified_receiver,
&signal_sender,
&packet_recycler,
).unwrap();
check_txs(verified.len(), &signal_receiver, tx);
});
}
#[bench]
fn bench_banking_stage_single_from(bencher: &mut Bencher) {
logger::setup();
let tx = 20_000_usize;
let mint = Mint::new(1_000_000_000_000);
let mut pubkeys = Vec::new();
let num_keys = 8;
for _ in 0..num_keys {
pubkeys.push(KeyPair::new().pubkey());
}
let transactions: Vec<_> = (0..tx)
.into_par_iter()
.map(|i| {
Transaction::new(
&mint.keypair(),
pubkeys[i % num_keys],
i as i64,
mint.last_id(),
)
})
.collect();
let (verified_sender, verified_receiver) = channel();
let (signal_sender, signal_receiver) = channel();
let packet_recycler = PacketRecycler::default();
let verified: Vec<_> = to_packets_chunked(&packet_recycler, transactions, tx)
.into_iter()
.map(|x| {
let len = (*x).read().unwrap().packets.len();
(x, iter::repeat(1).take(len).collect())
})
.collect();
bencher.iter(move || {
let bank = Arc::new(Bank::new(&mint));
verified_sender.send(verified.clone()).unwrap();
BankingStage::process_packets(
bank.clone(),
&verified_receiver,
&signal_sender,
&packet_recycler,
).unwrap();
check_txs(verified.len(), &signal_receiver, tx);
});
}
}

389
src/bin/client-demo.rs Normal file → Executable file
View File

@@ -1,26 +1,29 @@
extern crate atty;
extern crate bincode;
extern crate clap;
extern crate env_logger;
extern crate getopts;
extern crate rayon;
extern crate serde_json;
extern crate solana;
use atty::{is, Stream};
use getopts::Options;
use bincode::serialize;
use clap::{App, Arg};
use rayon::prelude::*;
use solana::crdt::{get_ip_addr, Crdt, ReplicatedData};
use solana::crdt::{Crdt, NodeInfo};
use solana::drone::DroneRequest;
use solana::fullnode::Config;
use solana::hash::Hash;
use solana::mint::MintDemo;
use solana::nat::{udp_public_bind, udp_random_bind};
use solana::ncp::Ncp;
use solana::signature::{GenKeys, KeyPair, KeyPairUtil};
use solana::service::Service;
use solana::signature::{read_keypair, GenKeys, KeyPair, KeyPairUtil};
use solana::streamer::default_window;
use solana::thin_client::ThinClient;
use solana::timing::{duration_as_ms, duration_as_s};
use solana::transaction::Transaction;
use std::env;
use std::error;
use std::fs::File;
use std::io::{stdin, Read};
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
use std::io::Write;
use std::net::{IpAddr, Ipv4Addr, SocketAddr, TcpStream, UdpSocket};
use std::process::exit;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, RwLock};
@@ -30,24 +33,14 @@ use std::thread::JoinHandle;
use std::time::Duration;
use std::time::Instant;
fn print_usage(program: &str, opts: Options) {
let mut brief = format!("Usage: cat <mint.json> | {} [options]\n\n", program);
brief += " Solana client demo creates a number of transactions and\n";
brief += " sends them to a target node.";
brief += " Takes json formatted mint file to stdin.";
print!("{}", opts.usage(&brief));
}
fn sample_tx_count(
thread_addr: Arc<RwLock<SocketAddr>>,
exit: Arc<AtomicBool>,
maxes: Arc<RwLock<Vec<(f64, u64)>>>,
exit: &Arc<AtomicBool>,
maxes: &Arc<RwLock<Vec<(f64, u64)>>>,
first_count: u64,
v: ReplicatedData,
v: &NodeInfo,
sample_period: u64,
) {
let mut client = mk_client(&thread_addr, &v);
let mut client = mk_client(&v);
let mut now = Instant::now();
let mut initial_tx_count = client.transaction_count();
let mut max_tps = 0.0;
@@ -58,17 +51,17 @@ fn sample_tx_count(
now = Instant::now();
let sample = tx_count - initial_tx_count;
initial_tx_count = tx_count;
println!("{}: Transactions processed {}", v.transactions_addr, sample);
println!("{}: Transactions processed {}", v.contact_info.tpu, sample);
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
let tps = (sample * 1_000_000_000) as f64 / ns as f64;
if tps > max_tps {
max_tps = tps;
}
println!("{}: {:.2} tps", v.transactions_addr, tps);
println!("{}: {:.2} tps", v.contact_info.tpu, tps);
total = tx_count - first_count;
println!(
"{}: Total Transactions processed {}",
v.transactions_addr, total
v.contact_info.tpu, total
);
sleep(Duration::new(sample_period, 0));
@@ -82,23 +75,29 @@ fn sample_tx_count(
fn generate_and_send_txs(
client: &mut ThinClient,
keypair_pairs: &Vec<&[KeyPair]>,
leader: &ReplicatedData,
tx_clients: &[ThinClient],
id: &KeyPair,
keypairs: &[KeyPair],
leader: &NodeInfo,
txs: i64,
last_id: &mut Hash,
threads: usize,
client_addr: Arc<RwLock<SocketAddr>>,
reclaim: bool,
) {
println!(
"Signing transactions... {} {}",
keypair_pairs.len(),
keypair_pairs[0].len()
);
println!("Signing transactions... {}", txs / 2,);
let signing_start = Instant::now();
let transactions: Vec<_> = keypair_pairs
.par_iter()
.map(|chunk| Transaction::new(&chunk[0], chunk[1].pubkey(), 1, *last_id))
.collect();
let transactions: Vec<_> = if !reclaim {
keypairs
.par_iter()
.map(|keypair| Transaction::new(&id, keypair.pubkey(), 1, *last_id))
.collect()
} else {
keypairs
.par_iter()
.map(|keypair| Transaction::new(keypair, id.pubkey(), 1, *last_id))
.collect()
};
let duration = signing_start.elapsed();
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
@@ -111,136 +110,154 @@ fn generate_and_send_txs(
duration_as_ms(&duration),
);
println!("Transfering {} transactions in {} batches", txs, threads);
println!(
"Transfering {} transactions in {} batches",
txs / 2,
threads
);
let transfer_start = Instant::now();
let sz = transactions.len() / threads;
let chunks: Vec<_> = transactions.chunks(sz).collect();
chunks.into_par_iter().for_each(|txs| {
println!(
"Transferring 1 unit {} times... to {:?}",
txs.len(),
leader.transactions_addr
);
let client = mk_client(&client_addr, &leader);
for tx in txs {
client.transfer_signed(tx.clone()).unwrap();
}
});
chunks
.into_par_iter()
.zip(tx_clients)
.for_each(|(txs, client)| {
println!(
"Transferring 1 unit {} times... to {:?}",
txs.len(),
leader.contact_info.tpu
);
for tx in txs {
client.transfer_signed(tx).unwrap();
}
});
println!(
"Transfer done. {:?} ms {} tps",
duration_as_ms(&transfer_start.elapsed()),
txs as f32 / (duration_as_s(&transfer_start.elapsed()))
);
*last_id = client.get_last_id();
loop {
let new_id = client.get_last_id();
if *last_id != new_id {
*last_id = new_id;
break;
}
sleep(Duration::from_millis(100));
}
}
fn main() {
env_logger::init();
let mut threads = 4usize;
let mut num_nodes = 1usize;
let mut time_sec = 60;
let mut time_sec = 90;
let mut opts = Options::new();
opts.optopt("l", "", "leader", "leader.json");
opts.optopt("c", "", "client port", "port");
opts.optopt("t", "", "number of threads", &format!("{}", threads));
opts.optflag("d", "dyn", "detect network address dynamically");
opts.optopt(
"s",
"",
"send transactions for this many seconds",
&format!("{}", time_sec),
);
opts.optopt(
"n",
"",
"number of nodes to converge to",
&format!("{}", num_nodes),
);
opts.optflag("h", "help", "print help");
let args: Vec<String> = env::args().collect();
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(e) => {
eprintln!("{}", e);
exit(1);
}
};
let matches = App::new("solana-client-demo")
.arg(
Arg::with_name("leader")
.short("l")
.long("leader")
.value_name("PATH")
.takes_value(true)
.help("/path/to/leader.json"),
)
.arg(
Arg::with_name("keypair")
.short("k")
.long("keypair")
.value_name("PATH")
.takes_value(true)
.default_value("~/.config/solana/id.json")
.help("/path/to/id.json"),
)
.arg(
Arg::with_name("num_nodes")
.short("n")
.long("nodes")
.value_name("NUMBER")
.takes_value(true)
.help("number of nodes to converge to"),
)
.arg(
Arg::with_name("threads")
.short("t")
.long("threads")
.value_name("NUMBER")
.takes_value(true)
.help("number of threads"),
)
.arg(
Arg::with_name("seconds")
.short("s")
.long("sec")
.value_name("NUMBER")
.takes_value(true)
.help("send transactions for this many seconds"),
)
.get_matches();
if matches.opt_present("h") {
let program = args[0].clone();
print_usage(&program, opts);
return;
}
let mut addr: SocketAddr = "0.0.0.0:8100".parse().unwrap();
if matches.opt_present("c") {
let port = matches.opt_str("c").unwrap().parse().unwrap();
addr.set_port(port);
}
if matches.opt_present("d") {
addr.set_ip(get_ip_addr().unwrap());
}
let client_addr: Arc<RwLock<SocketAddr>> = Arc::new(RwLock::new(addr));
if matches.opt_present("t") {
threads = matches.opt_str("t").unwrap().parse().expect("integer");
}
if matches.opt_present("n") {
num_nodes = matches.opt_str("n").unwrap().parse().expect("integer");
}
if matches.opt_present("s") {
time_sec = matches.opt_str("s").unwrap().parse().expect("integer");
}
let leader = if matches.opt_present("l") {
read_leader(matches.opt_str("l").unwrap())
let leader: NodeInfo;
if let Some(l) = matches.value_of("leader") {
leader = read_leader(l).node_info;
} else {
let server_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
ReplicatedData::new_leader(&server_addr)
leader = NodeInfo::new_leader(&server_addr);
};
let id = read_keypair(matches.value_of("keypair").unwrap()).expect("client keypair");
if let Some(t) = matches.value_of("threads") {
threads = t.to_string().parse().expect("integer");
}
if let Some(n) = matches.value_of("num_nodes") {
num_nodes = n.to_string().parse().expect("integer");
}
if let Some(s) = matches.value_of("seconds") {
time_sec = s.to_string().parse().expect("integer");
}
let mut drone_addr = leader.contact_info.tpu;
drone_addr.set_port(9900);
let signal = Arc::new(AtomicBool::new(false));
let mut c_threads = vec![];
let validators = converge(
&client_addr,
&leader,
signal.clone(),
num_nodes,
&mut c_threads,
);
let validators = converge(&leader, &signal, num_nodes, &mut c_threads);
assert_eq!(validators.len(), num_nodes);
if is(Stream::Stdin) {
eprintln!("nothing found on stdin, expected a json file");
exit(1);
}
let mut client = mk_client(&leader);
let mut buffer = String::new();
let num_bytes = stdin().read_to_string(&mut buffer).unwrap();
if num_bytes == 0 {
eprintln!("empty file on stdin, expected a json file");
exit(1);
}
let starting_balance = client.poll_get_balance(&id.pubkey()).unwrap();
let txs: i64 = 500_000;
println!("Parsing stdin...");
let demo: MintDemo = serde_json::from_str(&buffer).unwrap_or_else(|e| {
eprintln!("failed to parse json: {}", e);
exit(1);
});
let mut client = mk_client(&client_addr, &leader);
if starting_balance < txs {
let airdrop_amount = txs - starting_balance;
println!("Airdropping {:?} tokens", airdrop_amount);
request_airdrop(&drone_addr, &id, airdrop_amount as u64).unwrap();
// TODO: return airdrop Result from Drone
sleep(Duration::from_millis(100));
let balance = client.poll_get_balance(&id.pubkey()).unwrap();
println!("Your balance is: {:?}", balance);
if balance < txs || (starting_balance == balance) {
println!("TPS airdrop limit reached; wait 60sec to retry");
exit(1);
}
}
println!("Get last ID...");
let mut last_id = client.get_last_id();
println!("Got last ID {:?}", last_id);
let mut seed = [0u8; 32];
seed.copy_from_slice(&demo.mint.keypair().public_key_bytes()[..32]);
seed.copy_from_slice(&id.public_key_bytes()[..32]);
let rnd = GenKeys::new(seed);
println!("Creating keypairs...");
let txs = demo.num_accounts / 2;
let keypairs = rnd.gen_n_keypairs(demo.num_accounts);
let keypair_pairs: Vec<_> = keypairs.chunks(2).collect();
let keypairs = rnd.gen_n_keypairs(txs / 2);
let first_count = client.transaction_count();
println!("initial count {}", first_count);
@@ -255,29 +272,47 @@ fn main() {
.into_iter()
.map(|v| {
let exit = signal.clone();
let thread_addr = client_addr.clone();
let maxes = maxes.clone();
Builder::new()
.name("solana-client-sample".to_string())
.spawn(move || {
sample_tx_count(thread_addr, exit, maxes, first_count, v, sample_period);
sample_tx_count(&exit, &maxes, first_count, &v, sample_period);
})
.unwrap()
})
.collect();
let clients: Vec<_> = (0..threads).map(|_| mk_client(&leader)).collect();
// generate and send transactions for the specified duration
let time = Duration::new(time_sec, 0);
let now = Instant::now();
let time = Duration::new(time_sec / 2, 0);
let mut now = Instant::now();
while now.elapsed() < time {
generate_and_send_txs(
&mut client,
&keypair_pairs,
&clients,
&id,
&keypairs,
&leader,
txs,
&mut last_id,
threads,
client_addr.clone(),
false,
);
}
last_id = client.get_last_id();
now = Instant::now();
while now.elapsed() < time {
generate_and_send_txs(
&mut client,
&clients,
&id,
&keypairs,
&leader,
txs,
&mut last_id,
threads,
true,
);
}
@@ -310,61 +345,57 @@ fn main() {
}
}
fn mk_client(locked_addr: &Arc<RwLock<SocketAddr>>, r: &ReplicatedData) -> ThinClient {
let mut addr = locked_addr.write().unwrap();
let port = addr.port();
let transactions_socket = UdpSocket::bind(addr.clone()).unwrap();
addr.set_port(port + 1);
let requests_socket = UdpSocket::bind(addr.clone()).unwrap();
fn mk_client(r: &NodeInfo) -> ThinClient {
let requests_socket = udp_random_bind(8000, 10000, 5).unwrap();
let transactions_socket = udp_random_bind(8000, 10000, 5).unwrap();
requests_socket
.set_read_timeout(Some(Duration::new(1, 0)))
.unwrap();
addr.set_port(port + 2);
ThinClient::new(
r.requests_addr,
r.contact_info.rpu,
requests_socket,
r.transactions_addr,
r.contact_info.tpu,
transactions_socket,
)
}
fn spy_node(client_addr: &Arc<RwLock<SocketAddr>>) -> (ReplicatedData, UdpSocket) {
let mut addr = client_addr.write().unwrap();
let port = addr.port();
let gossip = UdpSocket::bind(addr.clone()).unwrap();
addr.set_port(port + 1);
let daddr = "0.0.0.0:0".parse().unwrap();
fn spy_node() -> (NodeInfo, UdpSocket) {
let gossip_socket_pair = udp_public_bind("gossip", 8000, 10000);
let pubkey = KeyPair::new().pubkey();
let node = ReplicatedData::new(
let daddr = "0.0.0.0:0".parse().unwrap();
assert!(!gossip_socket_pair.addr.ip().is_unspecified());
assert!(!gossip_socket_pair.addr.ip().is_multicast());
let node = NodeInfo::new(
pubkey,
gossip.local_addr().unwrap(),
//gossip.local_addr().unwrap(),
gossip_socket_pair.addr,
daddr,
daddr,
daddr,
daddr,
);
(node, gossip)
(node, gossip_socket_pair.receiver)
}
fn converge(
client_addr: &Arc<RwLock<SocketAddr>>,
leader: &ReplicatedData,
exit: Arc<AtomicBool>,
leader: &NodeInfo,
exit: &Arc<AtomicBool>,
num_nodes: usize,
threads: &mut Vec<JoinHandle<()>>,
) -> Vec<ReplicatedData> {
) -> Vec<NodeInfo> {
//lets spy on the network
let daddr = "0.0.0.0:0".parse().unwrap();
let (spy, spy_gossip) = spy_node(client_addr);
let mut spy_crdt = Crdt::new(spy);
let (spy, spy_gossip) = spy_node();
let mut spy_crdt = Crdt::new(spy).expect("Crdt::new");
spy_crdt.insert(&leader);
spy_crdt.set_leader(leader.id);
let spy_ref = Arc::new(RwLock::new(spy_crdt));
let window = default_window();
let gossip_send_socket = UdpSocket::bind("0.0.0.0:0").expect("bind 0");
let gossip_send_socket = udp_random_bind(8000, 10000, 5).unwrap();
let ncp = Ncp::new(
spy_ref.clone(),
&spy_ref,
window.clone(),
spy_gossip,
gossip_send_socket,
@@ -373,13 +404,13 @@ fn converge(
let mut rv = vec![];
//wait for the network to converge, 30 seconds should be plenty
for _ in 0..30 {
let v: Vec<ReplicatedData> = spy_ref
let v: Vec<NodeInfo> = spy_ref
.read()
.unwrap()
.table
.values()
.into_iter()
.filter(|x| x.requests_addr != daddr)
.filter(|x| x.contact_info.rpu != daddr)
.cloned()
.collect();
if v.len() >= num_nodes {
@@ -389,11 +420,27 @@ fn converge(
}
sleep(Duration::new(1, 0));
}
threads.extend(ncp.thread_hdls.into_iter());
threads.extend(ncp.thread_hdls().into_iter());
rv
}
fn read_leader(path: String) -> ReplicatedData {
let file = File::open(path.clone()).expect(&format!("file not found: {}", path));
serde_json::from_reader(file).expect(&format!("failed to parse {}", path))
fn read_leader(path: &str) -> Config {
let file = File::open(path).unwrap_or_else(|_| panic!("file not found: {}", path));
serde_json::from_reader(file).unwrap_or_else(|_| panic!("failed to parse {}", path))
}
fn request_airdrop(
drone_addr: &SocketAddr,
id: &KeyPair,
tokens: u64,
) -> Result<(), Box<error::Error>> {
let mut stream = TcpStream::connect(drone_addr)?;
let req = DroneRequest::GetAirdrop {
airdrop_request_amount: tokens,
client_public_key: id.pubkey(),
};
let tx = serialize(&req).expect("serialize drone request");
stream.write_all(&tx).unwrap();
// TODO: add timeout to this function, in case of unresponsive drone
Ok(())
}

View File

@@ -1,116 +1,97 @@
extern crate atty;
extern crate bincode;
extern crate clap;
extern crate env_logger;
extern crate getopts;
extern crate serde_json;
extern crate solana;
extern crate tokio;
extern crate tokio_codec;
extern crate tokio_io;
use atty::{is, Stream as atty_stream};
use bincode::deserialize;
use getopts::Options;
use solana::crdt::{get_ip_addr, ReplicatedData};
use clap::{App, Arg};
use solana::crdt::NodeInfo;
use solana::drone::{Drone, DroneRequest};
use solana::mint::MintDemo;
use std::env;
use solana::fullnode::Config;
use solana::metrics::set_panic_hook;
use solana::signature::read_keypair;
use std::fs::File;
use std::io::{stdin, Read};
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use std::process::exit;
use std::sync::{Arc, Mutex};
use std::thread;
use tokio::net::TcpListener;
use tokio::prelude::*;
use tokio_codec::{BytesCodec, Decoder};
fn print_usage(program: &str, opts: Options) {
let mut brief = format!("Usage: cat <mint-demo.json> | {} [options]\n\n", program);
brief += " Run a Solana Drone to act as the custodian of the mint's remaining tokens\n";
print!("{}", opts.usage(&brief));
}
fn main() {
env_logger::init();
let mut opts = Options::new();
opts.optopt(
"t",
"",
"time",
"time slice over which to limit token requests to drone",
);
opts.optopt("c", "", "cap", "request limit for time slice");
opts.optopt("l", "", "leader", "leader.json");
opts.optflag("h", "help", "print help");
let args: Vec<String> = env::args().collect();
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(e) => {
eprintln!("{}", e);
exit(1);
}
set_panic_hook("drone");
let matches = App::new("drone")
.arg(
Arg::with_name("leader")
.short("l")
.long("leader")
.value_name("PATH")
.takes_value(true)
.help("/path/to/leader.json"),
)
.arg(
Arg::with_name("keypair")
.short("k")
.long("keypair")
.value_name("PATH")
.takes_value(true)
.required(true)
.help("/path/to/mint.json"),
)
.arg(
Arg::with_name("time")
.short("t")
.long("time")
.value_name("SECONDS")
.takes_value(true)
.help("time slice over which to limit requests to drone"),
)
.arg(
Arg::with_name("cap")
.short("c")
.long("cap")
.value_name("NUMBER")
.takes_value(true)
.help("request limit for time slice"),
)
.get_matches();
let leader: NodeInfo;
if let Some(l) = matches.value_of("leader") {
leader = read_leader(l).node_info;
} else {
let server_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
leader = NodeInfo::new_leader(&server_addr);
};
if matches.opt_present("h") {
let program = args[0].clone();
print_usage(&program, opts);
return;
}
let mint_keypair =
read_keypair(matches.value_of("keypair").expect("keypair")).expect("client keypair");
let time_slice: Option<u64>;
if matches.opt_present("t") {
time_slice = matches
.opt_str("t")
.expect("unexpected string from input")
.parse()
.ok();
if let Some(t) = matches.value_of("time") {
time_slice = Some(t.to_string().parse().expect("integer"));
} else {
time_slice = None;
}
let request_cap: Option<u64>;
if matches.opt_present("c") {
request_cap = matches
.opt_str("c")
.expect("unexpected string from input")
.parse()
.ok();
if let Some(c) = matches.value_of("cap") {
request_cap = Some(c.to_string().parse().expect("integer"));
} else {
request_cap = None;
}
let leader = if matches.opt_present("l") {
read_leader(matches.opt_str("l").unwrap())
} else {
let server_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
ReplicatedData::new_leader(&server_addr)
};
if is(atty_stream::Stdin) {
eprintln!("nothing found on stdin, expected a json file");
exit(1);
}
let mut buffer = String::new();
let num_bytes = stdin().read_to_string(&mut buffer).unwrap();
if num_bytes == 0 {
eprintln!("empty file on stdin, expected a json file");
exit(1);
}
let demo: MintDemo = serde_json::from_str(&buffer).unwrap_or_else(|e| {
eprintln!("failed to parse json: {}", e);
exit(1);
});
let mint_keypair = demo.mint.keypair();
let mut drone_addr: SocketAddr = "0.0.0.0:9900".parse().unwrap();
drone_addr.set_ip(get_ip_addr().unwrap());
let drone_addr: SocketAddr = "0.0.0.0:9900".parse().unwrap();
let drone = Arc::new(Mutex::new(Drone::new(
mint_keypair,
drone_addr,
leader.transactions_addr,
leader.requests_addr,
leader.contact_info.tpu,
leader.contact_info.rpu,
time_slice,
request_cap,
)));
@@ -162,7 +143,7 @@ fn main() {
});
tokio::run(done);
}
fn read_leader(path: String) -> ReplicatedData {
let file = File::open(path.clone()).expect(&format!("file not found: {}", path));
serde_json::from_reader(file).expect(&format!("failed to parse {}", path))
fn read_leader(path: &str) -> Config {
let file = File::open(path).unwrap_or_else(|_| panic!("file not found: {}", path));
serde_json::from_reader(file).unwrap_or_else(|_| panic!("failed to parse {}", path))
}

View File

@@ -1,52 +1,81 @@
extern crate getopts;
extern crate clap;
extern crate dirs;
extern crate serde_json;
extern crate solana;
use getopts::Options;
use solana::crdt::{get_ip_addr, parse_port_or_addr, ReplicatedData};
use std::env;
use clap::{App, Arg};
use solana::crdt::{get_ip_addr, parse_port_or_addr};
use solana::fullnode::Config;
use solana::nat::get_public_ip_addr;
use solana::signature::read_pkcs8;
use std::io;
use std::net::SocketAddr;
use std::process::exit;
fn print_usage(program: &str, opts: Options) {
let mut brief = format!("Usage: {} [options]\n\n", program);
brief += " Create a solana fullnode config file\n";
print!("{}", opts.usage(&brief));
}
fn main() {
let mut opts = Options::new();
opts.optopt("b", "", "bind", "bind to port or address");
opts.optflag("d", "dyn", "detect network address dynamically");
opts.optflag("h", "help", "print help");
let args: Vec<String> = env::args().collect();
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(e) => {
eprintln!("{}", e);
exit(1);
}
};
if matches.opt_present("h") {
let program = args[0].clone();
print_usage(&program, opts);
return;
}
let matches = App::new("fullnode-config")
.arg(
Arg::with_name("local")
.short("l")
.long("local")
.takes_value(false)
.help("detect network address from local machine configuration"),
)
.arg(
Arg::with_name("keypair")
.short("k")
.long("keypair")
.value_name("PATH")
.takes_value(true)
.help("/path/to/id.json"),
)
.arg(
Arg::with_name("public")
.short("p")
.long("public")
.takes_value(false)
.help("detect public network address using public servers"),
)
.arg(
Arg::with_name("bind")
.short("b")
.long("bind")
.value_name("PORT")
.takes_value(true)
.help("bind to port or address"),
)
.get_matches();
let bind_addr: SocketAddr = {
let mut bind_addr = parse_port_or_addr(matches.opt_str("b"));
if matches.opt_present("d") {
let mut bind_addr = parse_port_or_addr({
if let Some(b) = matches.value_of("bind") {
Some(b.to_string())
} else {
None
}
});
if matches.is_present("local") {
let ip = get_ip_addr().unwrap();
bind_addr.set_ip(ip);
}
if matches.is_present("public") {
let ip = get_public_ip_addr().unwrap();
bind_addr.set_ip(ip);
}
bind_addr
};
let mut path = dirs::home_dir().expect("home directory");
let id_path = if matches.is_present("keypair") {
matches.value_of("keypair").unwrap()
} else {
path.extend(&[".config", "solana", "id.json"]);
path.to_str().unwrap()
};
let pkcs8 = read_pkcs8(id_path).expect("client keypair");
// we need all the receiving sockets to be bound within the expected
// port range that we open on aws
let repl_data = ReplicatedData::new_leader(&bind_addr);
let config = Config::new(&bind_addr, pkcs8);
let stdout = io::stdout();
serde_json::to_writer(stdout, &repl_data).expect("serialize");
serde_json::to_writer(stdout, &config).expect("serialize");
}

View File

@@ -1,118 +1,61 @@
extern crate atty;
extern crate clap;
extern crate env_logger;
extern crate getopts;
extern crate log;
extern crate serde_json;
extern crate solana;
use atty::{is, Stream};
use getopts::Options;
use solana::bank::Bank;
use solana::crdt::ReplicatedData;
use solana::entry::Entry;
use solana::payment_plan::PaymentPlan;
use solana::server::Server;
use solana::transaction::Instruction;
use std::env;
use clap::{App, Arg};
use solana::crdt::{NodeInfo, TestNode};
use solana::fullnode::{Config, FullNode, LedgerFile};
use solana::metrics::set_panic_hook;
use solana::service::Service;
use solana::signature::{KeyPair, KeyPairUtil};
use std::fs::File;
use std::io::{stdin, stdout, BufRead, Write};
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use std::process::exit;
use std::sync::atomic::AtomicBool;
use std::sync::Arc;
//use std::time::Duration;
fn print_usage(program: &str, opts: Options) {
let mut brief = format!("Usage: cat <transaction.log> | {} [options]\n\n", program);
brief += " Run a Solana node to handle transactions and\n";
brief += " write a new transaction log to stdout.\n";
brief += " Takes existing transaction log from stdin.";
print!("{}", opts.usage(&brief));
}
fn main() {
fn main() -> () {
env_logger::init();
let mut opts = Options::new();
opts.optflag("h", "help", "print help");
opts.optopt("l", "", "run with the identity found in FILE", "FILE");
opts.optopt(
"t",
"",
"testnet; connect to the network at this gossip entry point",
"HOST:PORT",
);
opts.optopt(
"o",
"",
"output log to FILE, defaults to stdout (ignored by validators)",
"FILE",
);
let args: Vec<String> = env::args().collect();
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(e) => {
eprintln!("{}", e);
exit(1);
}
};
if matches.opt_present("h") {
let program = args[0].clone();
print_usage(&program, opts);
return;
}
if is(Stream::Stdin) {
eprintln!("nothing found on stdin, expected a log file");
exit(1);
}
eprintln!("Initializing...");
let stdin = stdin();
let mut entries = stdin.lock().lines().map(|line| {
let entry: Entry = serde_json::from_str(&line.unwrap()).unwrap_or_else(|e| {
eprintln!("failed to parse json: {}", e);
exit(1);
});
entry
});
eprintln!("done parsing...");
// The first item in the ledger is required to be an entry with zero num_hashes,
// which implies its id can be used as the ledger's seed.
let entry0 = entries.next().expect("invalid ledger: empty");
// The second item in the ledger is a special transaction where the to and from
// fields are the same. That entry should be treated as a deposit, not a
// transfer to oneself.
let entry1 = entries
.next()
.expect("invalid ledger: need at least 2 entries");
let tx = &entry1.transactions[0];
let deposit = if let Instruction::NewContract(contract) = &tx.instruction {
contract.plan.final_payment()
} else {
None
}.expect("invalid ledger, needs to start with a contract");
eprintln!("creating bank...");
let bank = Bank::new_from_deposit(&deposit);
bank.register_entry_id(&entry0.id);
bank.register_entry_id(&entry1.id);
eprintln!("processing entries...");
bank.process_entries(entries).expect("process_entries");
eprintln!("creating networking stack...");
set_panic_hook("fullnode");
let matches = App::new("fullnode")
.arg(
Arg::with_name("identity")
.short("i")
.long("identity")
.value_name("FILE")
.takes_value(true)
.help("run with the identity found in FILE"),
)
.arg(
Arg::with_name("testnet")
.short("t")
.long("testnet")
.value_name("HOST:PORT")
.takes_value(true)
.help("connect to the network at this gossip entry point"),
)
.arg(
Arg::with_name("ledger")
.short("L")
.long("ledger")
.value_name("FILE")
.takes_value(true)
.help("use FILE as persistent ledger (defaults to stdin/stdout)"),
)
.get_matches();
let bind_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
let mut repl_data = ReplicatedData::new_leader(&bind_addr);
if matches.opt_present("l") {
let path = matches.opt_str("l").unwrap();
let mut keypair = KeyPair::new();
let mut repl_data = NodeInfo::new_leader_with_pubkey(keypair.pubkey(), &bind_addr);
if let Some(i) = matches.value_of("identity") {
let path = i.to_string();
if let Ok(file) = File::open(path.clone()) {
if let Ok(data) = serde_json::from_reader(file) {
repl_data = data;
let parse: serde_json::Result<Config> = serde_json::from_reader(file);
if let Ok(data) = parse {
keypair = data.keypair();
repl_data = data.node_info;
} else {
eprintln!("failed to parse {}", path);
exit(1);
@@ -122,59 +65,22 @@ fn main() {
exit(1);
}
}
let exit = Arc::new(AtomicBool::new(false));
let threads = if matches.opt_present("t") {
let testnet_address_string = matches.opt_str("t").unwrap();
eprintln!(
"starting validator... {} connecting to {}",
repl_data.requests_addr, testnet_address_string
);
let testnet_addr = testnet_address_string.parse().unwrap();
let newtwork_entry_point = ReplicatedData::new_entry_point(testnet_addr);
let s = Server::new_validator(
bank,
repl_data.clone(),
UdpSocket::bind(repl_data.requests_addr).unwrap(),
UdpSocket::bind("0.0.0.0:0").unwrap(),
UdpSocket::bind(repl_data.replicate_addr).unwrap(),
UdpSocket::bind(repl_data.gossip_addr).unwrap(),
UdpSocket::bind(repl_data.repair_addr).unwrap(),
newtwork_entry_point,
exit.clone(),
);
s.thread_hdls
let ledger = if let Some(l) = matches.value_of("ledger") {
LedgerFile::Path(l.to_string())
} else {
eprintln!("starting leader... {}", repl_data.requests_addr);
repl_data.current_leader_id = repl_data.id.clone();
let outfile: Box<Write + Send + 'static> = if matches.opt_present("o") {
let path = matches.opt_str("o").unwrap();
Box::new(
File::create(&path).expect(&format!("unable to open output file \"{}\"", path)),
)
} else {
Box::new(stdout())
};
let server = Server::new_leader(
bank,
//Some(Duration::from_millis(1000)),
None,
repl_data.clone(),
UdpSocket::bind(repl_data.requests_addr).unwrap(),
UdpSocket::bind(repl_data.transactions_addr).unwrap(),
UdpSocket::bind("0.0.0.0:0").unwrap(),
UdpSocket::bind("0.0.0.0:0").unwrap(),
UdpSocket::bind(repl_data.gossip_addr).unwrap(),
exit.clone(),
outfile,
);
server.thread_hdls
LedgerFile::StdInOut
};
eprintln!("Ready. Listening on {}", repl_data.transactions_addr);
for t in threads {
t.join().expect("join");
}
let mut node = TestNode::new_with_bind_addr(repl_data, bind_addr);
let fullnode = if let Some(t) = matches.value_of("testnet") {
let testnet_address_string = t.to_string();
let testnet_addr = testnet_address_string.parse().unwrap();
FullNode::new(node, false, ledger, Some(keypair), Some(testnet_addr))
} else {
node.data.leader_id = node.data.id;
FullNode::new(node, true, ledger, None, None)
};
fullnode.join().expect("join");
}

View File

@@ -1,82 +0,0 @@
extern crate atty;
extern crate rayon;
extern crate serde_json;
extern crate solana;
use atty::{is, Stream};
use rayon::prelude::*;
use solana::bank::MAX_ENTRY_IDS;
use solana::entry::next_entry;
use solana::ledger::next_entries;
use solana::mint::MintDemo;
use solana::signature::{GenKeys, KeyPairUtil};
use solana::transaction::Transaction;
use std::io::{stdin, Read};
use std::process::exit;
// Generate a ledger with lots and lots of accounts.
fn main() {
if is(Stream::Stdin) {
eprintln!("nothing found on stdin, expected a json file");
exit(1);
}
let mut buffer = String::new();
let num_bytes = stdin().read_to_string(&mut buffer).unwrap();
if num_bytes == 0 {
eprintln!("empty file on stdin, expected a json file");
exit(1);
}
let demo: MintDemo = serde_json::from_str(&buffer).unwrap_or_else(|e| {
eprintln!("failed to parse json: {}", e);
exit(1);
});
let mut seed = [0u8; 32];
seed.copy_from_slice(&demo.mint.keypair().public_key_bytes()[..32]);
let rnd = GenKeys::new(seed);
let num_accounts = demo.num_accounts;
let tokens_per_user = 500;
let keypairs = rnd.gen_n_keypairs(num_accounts);
let mint_keypair = demo.mint.keypair();
let last_id = demo.mint.last_id();
for entry in demo.mint.create_entries() {
println!("{}", serde_json::to_string(&entry).unwrap());
}
eprintln!("Creating {} empty entries...", MAX_ENTRY_IDS);
// Offer client lots of entry IDs to use for each transaction's last_id.
let mut last_id = last_id;
let mut last_ids = vec![];
for _ in 0..MAX_ENTRY_IDS {
let entry = next_entry(&last_id, 1, vec![]);
last_id = entry.id;
last_ids.push(last_id);
let serialized = serde_json::to_string(&entry).unwrap_or_else(|e| {
eprintln!("failed to serialize: {}", e);
exit(1);
});
println!("{}", serialized);
}
eprintln!("Creating {} transactions...", num_accounts);
let transactions: Vec<_> = keypairs
.into_par_iter()
.enumerate()
.map(|(i, rando)| {
let last_id = last_ids[i % MAX_ENTRY_IDS];
Transaction::new(&mint_keypair, rando.pubkey(), tokens_per_user, last_id)
})
.collect();
eprintln!("Logging the creation of {} accounts...", num_accounts);
let entries = next_entries(&last_id, 0, transactions);
for entry in entries {
println!("{}", serde_json::to_string(&entry).unwrap());
}
}

View File

@@ -1,36 +1,50 @@
//! A command-line executable for generating the chain's genesis block.
extern crate atty;
#[macro_use]
extern crate clap;
extern crate serde_json;
extern crate solana;
use atty::{is, Stream};
use clap::{App, Arg};
use solana::entry_writer::EntryWriter;
use solana::mint::Mint;
use std::io::{stdin, Read};
use std::error;
use std::io::{stdin, stdout, Read};
use std::process::exit;
fn main() {
fn main() -> Result<(), Box<error::Error>> {
let matches = App::new("solana-genesis")
.arg(
Arg::with_name("tokens")
.short("t")
.long("tokens")
.value_name("NUMBER")
.takes_value(true)
.required(true)
.help("Number of tokens with which to initialize mint"),
)
.get_matches();
let tokens = value_t_or_exit!(matches, "tokens", i64);
if is(Stream::Stdin) {
eprintln!("nothing found on stdin, expected a json file");
exit(1);
}
let mut buffer = String::new();
let num_bytes = stdin().read_to_string(&mut buffer).unwrap();
let num_bytes = stdin().read_to_string(&mut buffer)?;
if num_bytes == 0 {
eprintln!("empty file on stdin, expected a json file");
exit(1);
}
let mint: Mint = serde_json::from_str(&buffer).unwrap_or_else(|e| {
eprintln!("failed to parse json: {}", e);
exit(1);
});
for x in mint.create_entries() {
let serialized = serde_json::to_string(&x).unwrap_or_else(|e| {
eprintln!("failed to serialize: {}", e);
exit(1);
});
println!("{}", serialized);
}
let pkcs8: Vec<u8> = serde_json::from_str(&buffer)?;
let mint = Mint::new_with_pkcs8(tokens, pkcs8);
let mut writer = stdout();
EntryWriter::write_entries(&mut writer, mint.create_entries())?;
Ok(())
}

49
src/bin/keygen.rs Normal file
View File

@@ -0,0 +1,49 @@
extern crate clap;
extern crate dirs;
extern crate ring;
extern crate serde_json;
use clap::{App, Arg};
use ring::rand::SystemRandom;
use ring::signature::Ed25519KeyPair;
use std::error;
use std::fs::{self, File};
use std::io::Write;
use std::path::Path;
fn main() -> Result<(), Box<error::Error>> {
let matches = App::new("solana-keygen")
.arg(
Arg::with_name("outfile")
.short("o")
.long("outfile")
.value_name("PATH")
.takes_value(true)
.help("path to generated file"),
)
.get_matches();
let rnd = SystemRandom::new();
let pkcs8_bytes = Ed25519KeyPair::generate_pkcs8(&rnd)?;
let serialized = serde_json::to_string(&pkcs8_bytes.to_vec())?;
let mut path = dirs::home_dir().expect("home directory");
let outfile = if matches.is_present("outfile") {
matches.value_of("outfile").unwrap()
} else {
path.extend(&[".config", "solana", "id.json"]);
path.to_str().unwrap()
};
if outfile == "-" {
println!("{}", serialized);
} else {
if let Some(outdir) = Path::new(outfile).parent() {
fs::create_dir_all(outdir)?;
}
let mut f = File::create(outfile)?;
f.write_all(&serialized.into_bytes())?;
}
Ok(())
}

View File

@@ -1,29 +0,0 @@
extern crate atty;
extern crate rayon;
extern crate ring;
extern crate serde_json;
extern crate solana;
use atty::{is, Stream};
use solana::mint::{Mint, MintDemo};
use std::io;
use std::process::exit;
fn main() {
let mut input_text = String::new();
if is(Stream::Stdin) {
eprintln!("nothing found on stdin, expected a token number");
exit(1);
}
io::stdin().read_line(&mut input_text).unwrap();
let trimmed = input_text.trim();
let tokens = trimmed.parse::<i64>().unwrap();
let mint = Mint::new(tokens);
let tokens_per_user = 1_000;
let num_accounts = tokens / tokens_per_user;
let demo = MintDemo { mint, num_accounts };
println!("{}", serde_json::to_string(&demo).unwrap());
}

View File

@@ -1,29 +0,0 @@
extern crate atty;
extern crate serde_json;
extern crate solana;
use atty::{is, Stream};
use solana::mint::Mint;
use std::io;
use std::process::exit;
fn main() {
let mut input_text = String::new();
if is(Stream::Stdin) {
eprintln!("nothing found on stdin, expected a token number");
exit(1);
}
io::stdin().read_line(&mut input_text).unwrap();
let trimmed = input_text.trim();
let tokens = trimmed.parse::<i64>().unwrap_or_else(|e| {
eprintln!("{}", e);
exit(1);
});
let mint = Mint::new(tokens);
let serialized = serde_json::to_string(&mint).unwrap_or_else(|e| {
eprintln!("failed to serialize: {}", e);
exit(1);
});
println!("{}", serialized);
}

356
src/bin/wallet.rs Normal file
View File

@@ -0,0 +1,356 @@
extern crate atty;
extern crate bincode;
extern crate bs58;
extern crate clap;
extern crate dirs;
extern crate env_logger;
extern crate serde_json;
extern crate solana;
use bincode::serialize;
use clap::{App, Arg, SubCommand};
use solana::crdt::NodeInfo;
use solana::drone::DroneRequest;
use solana::fullnode::Config;
use solana::signature::{read_keypair, KeyPair, KeyPairUtil, PublicKey, Signature};
use solana::thin_client::ThinClient;
use std::error;
use std::fmt;
use std::fs::File;
use std::io;
use std::io::prelude::*;
use std::net::{IpAddr, Ipv4Addr, SocketAddr, TcpStream, UdpSocket};
use std::thread::sleep;
use std::time::Duration;
enum WalletCommand {
Address,
Balance,
AirDrop(i64),
Pay(i64, PublicKey),
Confirm(Signature),
}
#[derive(Debug, Clone)]
enum WalletError {
CommandNotRecognized(String),
BadParameter(String),
}
impl fmt::Display for WalletError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "invalid")
}
}
impl error::Error for WalletError {
fn description(&self) -> &str {
"invalid"
}
fn cause(&self) -> Option<&error::Error> {
// Generic error, underlying cause isn't tracked.
None
}
}
struct WalletConfig {
leader: NodeInfo,
id: KeyPair,
drone_addr: SocketAddr,
command: WalletCommand,
}
impl Default for WalletConfig {
fn default() -> WalletConfig {
let default_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
WalletConfig {
leader: NodeInfo::new_leader(&default_addr),
id: KeyPair::new(),
drone_addr: default_addr,
command: WalletCommand::Balance,
}
}
}
fn parse_args() -> Result<WalletConfig, Box<error::Error>> {
let matches = App::new("solana-wallet")
.arg(
Arg::with_name("leader")
.short("l")
.long("leader")
.value_name("PATH")
.takes_value(true)
.help("/path/to/leader.json"),
)
.arg(
Arg::with_name("keypair")
.short("k")
.long("keypair")
.value_name("PATH")
.takes_value(true)
.help("/path/to/id.json"),
)
.subcommand(
SubCommand::with_name("airdrop")
.about("Request a batch of tokens")
.arg(
Arg::with_name("tokens")
// .index(1)
.long("tokens")
.value_name("NUMBER")
.takes_value(true)
.required(true)
.help("The number of tokens to request"),
),
)
.subcommand(
SubCommand::with_name("pay")
.about("Send a payment")
.arg(
Arg::with_name("tokens")
// .index(2)
.long("tokens")
.value_name("NUMBER")
.takes_value(true)
.required(true)
.help("the number of tokens to send"),
)
.arg(
Arg::with_name("to")
// .index(1)
.long("to")
.value_name("PUBKEY")
.takes_value(true)
.help("The pubkey of recipient"),
),
)
.subcommand(
SubCommand::with_name("confirm")
.about("Confirm your payment by signature")
.arg(
Arg::with_name("signature")
.index(1)
.value_name("SIGNATURE")
.required(true)
.help("The transaction signature to confirm"),
),
)
.subcommand(SubCommand::with_name("balance").about("Get your balance"))
.subcommand(SubCommand::with_name("address").about("Get your public key"))
.get_matches();
let leader: NodeInfo;
if let Some(l) = matches.value_of("leader") {
leader = read_leader(l)?.node_info;
} else {
let server_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
leader = NodeInfo::new_leader(&server_addr);
};
let mut path = dirs::home_dir().expect("home directory");
let id_path = if matches.is_present("keypair") {
matches.value_of("keypair").unwrap()
} else {
path.extend(&[".config", "solana", "id.json"]);
path.to_str().unwrap()
};
let id = read_keypair(id_path).or_else(|err| {
display_actions();
Err(WalletError::BadParameter(format!(
"{}: Unable to open keypair file: {}",
err, id_path
)))
})?;
let mut drone_addr = leader.contact_info.tpu;
drone_addr.set_port(9900);
let command = match matches.subcommand() {
("airdrop", Some(airdrop_matches)) => {
let tokens = airdrop_matches.value_of("tokens").unwrap().parse()?;
Ok(WalletCommand::AirDrop(tokens))
}
("pay", Some(pay_matches)) => {
let to = if pay_matches.is_present("to") {
let pubkey_vec = bs58::decode(pay_matches.value_of("to").unwrap())
.into_vec()
.expect("base58-encoded public key");
if pubkey_vec.len() != std::mem::size_of::<PublicKey>() {
display_actions();
Err(WalletError::BadParameter("Invalid public key".to_string()))?;
}
PublicKey::clone_from_slice(&pubkey_vec)
} else {
id.pubkey()
};
let tokens = pay_matches.value_of("tokens").unwrap().parse()?;
Ok(WalletCommand::Pay(tokens, to))
}
("confirm", Some(confirm_matches)) => {
let sig_vec = bs58::decode(confirm_matches.value_of("signature").unwrap())
.into_vec()
.expect("base58-encoded signature");
if sig_vec.len() == std::mem::size_of::<Signature>() {
let sig = Signature::clone_from_slice(&sig_vec);
Ok(WalletCommand::Confirm(sig))
} else {
display_actions();
Err(WalletError::BadParameter("Invalid signature".to_string()))
}
}
("balance", Some(_balance_matches)) => Ok(WalletCommand::Balance),
("address", Some(_address_matches)) => Ok(WalletCommand::Address),
("", None) => {
display_actions();
Err(WalletError::CommandNotRecognized(
"no subcommand given".to_string(),
))
}
_ => unreachable!(),
}?;
Ok(WalletConfig {
leader,
id,
drone_addr, // TODO: Add an option for this.
command,
})
}
fn process_command(
config: &WalletConfig,
client: &mut ThinClient,
) -> Result<(), Box<error::Error>> {
match config.command {
// Check client balance
WalletCommand::Address => {
println!("{}", bs58::encode(config.id.pubkey()).into_string());
}
WalletCommand::Balance => {
println!("Balance requested...");
let balance = client.poll_get_balance(&config.id.pubkey());
match balance {
Ok(balance) => {
println!("Your balance is: {:?}", balance);
}
Err(ref e) if e.kind() == std::io::ErrorKind::Other => {
println!("No account found! Request an airdrop to get started.");
}
Err(error) => {
println!("An error occurred: {:?}", error);
}
}
}
// Request an airdrop from Solana Drone;
// Request amount is set in request_airdrop function
WalletCommand::AirDrop(tokens) => {
println!(
"Requesting airdrop of {:?} tokens from {}",
tokens, config.drone_addr
);
let previous_balance = client.poll_get_balance(&config.id.pubkey())?;
request_airdrop(&config.drone_addr, &config.id, tokens as u64)?;
// TODO: return airdrop Result from Drone instead of polling the
// network
let mut current_balance = previous_balance;
for _ in 0..20 {
sleep(Duration::from_millis(500));
current_balance = client.poll_get_balance(&config.id.pubkey())?;
if previous_balance != current_balance {
break;
}
println!(".");
}
println!("Your balance is: {:?}", current_balance);
if current_balance - previous_balance != tokens {
Err("Airdrop failed!")?;
}
}
// If client has positive balance, spend tokens in {balance} number of transactions
WalletCommand::Pay(tokens, to) => {
let last_id = client.get_last_id();
let sig = client.transfer(tokens, &config.id, to, &last_id)?;
println!("{}", bs58::encode(sig).into_string());
}
// Confirm the last client transaction by signature
WalletCommand::Confirm(sig) => {
if client.check_signature(&sig) {
println!("Confirmed");
} else {
println!("Not found");
}
}
}
Ok(())
}
fn display_actions() {
println!();
println!("Commands:");
println!(" address Get your public key");
println!(" balance Get your account balance");
println!(" airdrop Request a batch of tokens");
println!(" pay Send tokens to a public key");
println!(" confirm Confirm your last payment by signature");
println!();
}
fn read_leader(path: &str) -> Result<Config, WalletError> {
let file = File::open(path.to_string()).or_else(|err| {
Err(WalletError::BadParameter(format!(
"{}: Unable to open leader file: {}",
err, path
)))
})?;
serde_json::from_reader(file).or_else(|err| {
Err(WalletError::BadParameter(format!(
"{}: Failed to parse leader file: {}",
err, path
)))
})
}
fn mk_client(r: &NodeInfo) -> io::Result<ThinClient> {
let requests_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
requests_socket
.set_read_timeout(Some(Duration::new(1, 0)))
.unwrap();
Ok(ThinClient::new(
r.contact_info.rpu,
requests_socket,
r.contact_info.tpu,
transactions_socket,
))
}
fn request_airdrop(
drone_addr: &SocketAddr,
id: &KeyPair,
tokens: u64,
) -> Result<(), Box<error::Error>> {
let mut stream = TcpStream::connect(drone_addr)?;
let req = DroneRequest::GetAirdrop {
airdrop_request_amount: tokens,
client_public_key: id.pubkey(),
};
let tx = serialize(&req).expect("serialize drone request");
stream.write_all(&tx).unwrap();
// TODO: add timeout to this function, in case of unresponsive drone
Ok(())
}
fn main() -> Result<(), Box<error::Error>> {
env_logger::init();
let config = parse_args()?;
let mut client = mk_client(&config.leader)?;
process_command(&config, &mut client)
}

View File

@@ -1,31 +1,32 @@
//! The `blob_fetch_stage` pulls blobs from UDP sockets and sends it to a channel.
use packet;
use packet::BlobRecycler;
use service::Service;
use std::net::UdpSocket;
use std::sync::atomic::AtomicBool;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::channel;
use std::sync::Arc;
use std::thread::JoinHandle;
use streamer;
use std::thread::{self, JoinHandle};
use streamer::{self, BlobReceiver};
pub struct BlobFetchStage {
pub blob_receiver: streamer::BlobReceiver,
pub thread_hdls: Vec<JoinHandle<()>>,
exit: Arc<AtomicBool>,
thread_hdls: Vec<JoinHandle<()>>,
}
impl BlobFetchStage {
pub fn new(
socket: UdpSocket,
exit: Arc<AtomicBool>,
blob_recycler: packet::BlobRecycler,
) -> Self {
blob_recycler: &BlobRecycler,
) -> (Self, BlobReceiver) {
Self::new_multi_socket(vec![socket], exit, blob_recycler)
}
pub fn new_multi_socket(
sockets: Vec<UdpSocket>,
exit: Arc<AtomicBool>,
blob_recycler: packet::BlobRecycler,
) -> Self {
blob_recycler: &BlobRecycler,
) -> (Self, BlobReceiver) {
let (blob_sender, blob_receiver) = channel();
let thread_hdls: Vec<_> = sockets
.into_iter()
@@ -39,9 +40,23 @@ impl BlobFetchStage {
})
.collect();
BlobFetchStage {
blob_receiver,
thread_hdls,
}
(BlobFetchStage { exit, thread_hdls }, blob_receiver)
}
pub fn close(&self) {
self.exit.store(true, Ordering::Relaxed);
}
}
impl Service for BlobFetchStage {
fn thread_hdls(self) -> Vec<JoinHandle<()>> {
self.thread_hdls
}
fn join(self) -> thread::Result<()> {
for thread_hdl in self.thread_hdls() {
thread_hdl.join()?;
}
Ok(())
}
}

View File

@@ -12,7 +12,7 @@ use std::mem;
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub enum Condition {
/// Wait for a `Timestamp` `Witness` at or after the given `DateTime`.
Timestamp(DateTime<Utc>),
Timestamp(DateTime<Utc>, PublicKey),
/// Wait for a `Signature` `Witness` from `PublicKey`.
Signature(PublicKey),
@@ -20,10 +20,12 @@ pub enum Condition {
impl Condition {
/// Return true if the given Witness satisfies this Condition.
pub fn is_satisfied(&self, witness: &Witness) -> bool {
pub fn is_satisfied(&self, witness: &Witness, from: &PublicKey) -> bool {
match (self, witness) {
(Condition::Signature(pubkey), Witness::Signature(from)) => pubkey == from,
(Condition::Timestamp(dt), Witness::Timestamp(last_time)) => dt <= last_time,
(Condition::Signature(pubkey), Witness::Signature) => pubkey == from,
(Condition::Timestamp(dt, pubkey), Witness::Timestamp(last_time)) => {
pubkey == from && dt <= last_time
}
_ => false,
}
}
@@ -56,8 +58,13 @@ impl Budget {
}
/// Create a budget that pays `tokens` to `to` after the given DateTime.
pub fn new_future_payment(dt: DateTime<Utc>, tokens: i64, to: PublicKey) -> Self {
Budget::After(Condition::Timestamp(dt), Payment { tokens, to })
pub fn new_future_payment(
dt: DateTime<Utc>,
from: PublicKey,
tokens: i64,
to: PublicKey,
) -> Self {
Budget::After(Condition::Timestamp(dt, from), Payment { tokens, to })
}
/// Create a budget that pays `tokens` to `to` after the given DateTime
@@ -69,7 +76,7 @@ impl Budget {
to: PublicKey,
) -> Self {
Budget::Or(
(Condition::Timestamp(dt), Payment { tokens, to }),
(Condition::Timestamp(dt, from), Payment { tokens, to }),
(Condition::Signature(from), Payment { tokens, to: from }),
)
}
@@ -94,11 +101,11 @@ impl PaymentPlan for Budget {
/// Apply a witness to the budget to see if the budget can be reduced.
/// If so, modify the budget in-place.
fn apply_witness(&mut self, witness: &Witness) {
fn apply_witness(&mut self, witness: &Witness, from: &PublicKey) {
let new_payment = match self {
Budget::After(cond, payment) if cond.is_satisfied(witness) => Some(payment),
Budget::Or((cond, payment), _) if cond.is_satisfied(witness) => Some(payment),
Budget::Or(_, (cond, payment)) if cond.is_satisfied(witness) => Some(payment),
Budget::After(cond, payment) if cond.is_satisfied(witness, from) => Some(payment),
Budget::Or((cond, payment), _) if cond.is_satisfied(witness, from) => Some(payment),
Budget::Or(_, (cond, payment)) if cond.is_satisfied(witness, from) => Some(payment),
_ => None,
}.cloned();
@@ -111,20 +118,22 @@ impl PaymentPlan for Budget {
#[cfg(test)]
mod tests {
use super::*;
use signature::{KeyPair, KeyPairUtil};
#[test]
fn test_signature_satisfied() {
let sig = PublicKey::default();
assert!(Condition::Signature(sig).is_satisfied(&Witness::Signature(sig)));
let from = PublicKey::default();
assert!(Condition::Signature(from).is_satisfied(&Witness::Signature, &from));
}
#[test]
fn test_timestamp_satisfied() {
let dt1 = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10);
let dt2 = Utc.ymd(2014, 11, 14).and_hms(10, 9, 8);
assert!(Condition::Timestamp(dt1).is_satisfied(&Witness::Timestamp(dt1)));
assert!(Condition::Timestamp(dt1).is_satisfied(&Witness::Timestamp(dt2)));
assert!(!Condition::Timestamp(dt2).is_satisfied(&Witness::Timestamp(dt1)));
let from = PublicKey::default();
assert!(Condition::Timestamp(dt1, from).is_satisfied(&Witness::Timestamp(dt1), &from));
assert!(Condition::Timestamp(dt1, from).is_satisfied(&Witness::Timestamp(dt2), &from));
assert!(!Condition::Timestamp(dt2, from).is_satisfied(&Witness::Timestamp(dt1), &from));
}
#[test]
@@ -134,7 +143,7 @@ mod tests {
let to = PublicKey::default();
assert!(Budget::new_payment(42, to).verify(42));
assert!(Budget::new_authorized_payment(from, 42, to).verify(42));
assert!(Budget::new_future_payment(dt, 42, to).verify(42));
assert!(Budget::new_future_payment(dt, from, 42, to).verify(42));
assert!(Budget::new_cancelable_future_payment(dt, from, 42, to).verify(42));
}
@@ -144,20 +153,35 @@ mod tests {
let to = PublicKey::default();
let mut budget = Budget::new_authorized_payment(from, 42, to);
budget.apply_witness(&Witness::Signature(from));
budget.apply_witness(&Witness::Signature, &from);
assert_eq!(budget, Budget::new_payment(42, to));
}
#[test]
fn test_future_payment() {
let dt = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10);
let to = PublicKey::default();
let from = KeyPair::new().pubkey();
let to = KeyPair::new().pubkey();
let mut budget = Budget::new_future_payment(dt, 42, to);
budget.apply_witness(&Witness::Timestamp(dt));
let mut budget = Budget::new_future_payment(dt, from, 42, to);
budget.apply_witness(&Witness::Timestamp(dt), &from);
assert_eq!(budget, Budget::new_payment(42, to));
}
#[test]
fn test_unauthorized_future_payment() {
// Ensure timestamp will only be acknowledged if it came from the
// whitelisted public key.
let dt = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10);
let from = KeyPair::new().pubkey();
let to = KeyPair::new().pubkey();
let mut budget = Budget::new_future_payment(dt, from, 42, to);
let orig_budget = budget.clone();
budget.apply_witness(&Witness::Timestamp(dt), &to); // <-- Attack!
assert_eq!(budget, orig_budget);
}
#[test]
fn test_cancelable_future_payment() {
let dt = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10);
@@ -165,11 +189,11 @@ mod tests {
let to = PublicKey::default();
let mut budget = Budget::new_cancelable_future_payment(dt, from, 42, to);
budget.apply_witness(&Witness::Timestamp(dt));
budget.apply_witness(&Witness::Timestamp(dt), &from);
assert_eq!(budget, Budget::new_payment(42, to));
let mut budget = Budget::new_cancelable_future_payment(dt, from, 42, to);
budget.apply_witness(&Witness::Signature(from));
budget.apply_witness(&Witness::Signature, &from);
assert_eq!(budget, Budget::new_payment(42, from));
}
}

View File

@@ -0,0 +1,333 @@
use crdt::{CrdtError, NodeInfo};
use rand::distributions::{Distribution, Weighted, WeightedChoice};
use rand::thread_rng;
use result::Result;
use signature::PublicKey;
use std;
use std::collections::HashMap;
pub const DEFAULT_WEIGHT: u32 = 1;
pub trait ChooseGossipPeerStrategy {
fn choose_peer<'a>(&self, options: Vec<&'a NodeInfo>) -> Result<&'a NodeInfo>;
}
pub struct ChooseRandomPeerStrategy<'a> {
random: &'a Fn() -> u64,
}
// Given a source of randomness "random", this strategy will randomly pick a validator
// from the input options. This strategy works in isolation, but doesn't leverage any
// rumors from the rest of the gossip network to make more informed decisions about
// which validators have more/less updates
impl<'a, 'b> ChooseRandomPeerStrategy<'a> {
pub fn new(random: &'a Fn() -> u64) -> Self {
ChooseRandomPeerStrategy { random }
}
}
impl<'a> ChooseGossipPeerStrategy for ChooseRandomPeerStrategy<'a> {
fn choose_peer<'b>(&self, options: Vec<&'b NodeInfo>) -> Result<&'b NodeInfo> {
if options.is_empty() {
Err(CrdtError::NoPeers)?;
}
let n = ((self.random)() as usize) % options.len();
Ok(options[n])
}
}
// This strategy uses rumors accumulated from the rest of the network to weight
// the importance of communicating with a particular validator based on cumulative network
// perceiption of the number of updates the validator has to offer. A validator is randomly
// picked based on a weighted sample from the pool of viable choices. The "weight", w, of a
// particular validator "v" is calculated as follows:
//
// w = [Sum for all i in I_v: (rumor_v(i) - observed(v)) * stake(i)] /
// [Sum for all i in I_v: Sum(stake(i))]
//
// where I_v is the set of all validators that returned a rumor about the update_index of
// validator "v", stake(i) is the size of the stake of validator "i", observed(v) is the
// observed update_index from the last direct communication validator "v", and
// rumor_v(i) is the rumored update_index of validator "v" propagated by fellow validator "i".
// This could be a problem if there are validators with large stakes lying about their
// observed updates. There could also be a problem in network partitions, or even just
// when certain validators are disproportionately active, where we hear more rumors about
// certain clusters of nodes that then propagate more rumros about each other. Hopefully
// this can be resolved with a good baseline DEFAULT_WEIGHT, or by implementing lockout
// periods for very active validators in the future.
pub struct ChooseWeightedPeerStrategy<'a> {
// The map of last directly observed update_index for each active validator.
// This is how we get observed(v) from the formula above.
remote: &'a HashMap<PublicKey, u64>,
// The map of rumored update_index for each active validator. Using the formula above,
// to find rumor_v(i), we would first look up "v" in the outer map, then look up
// "i" in the inner map, i.e. look up external_liveness[v][i]
external_liveness: &'a HashMap<PublicKey, HashMap<PublicKey, u64>>,
// A function returning the size of the stake for a particular validator, corresponds
// to stake(i) in the formula above.
get_stake: &'a Fn(PublicKey) -> f64,
}
impl<'a> ChooseWeightedPeerStrategy<'a> {
pub fn new(
remote: &'a HashMap<PublicKey, u64>,
external_liveness: &'a HashMap<PublicKey, HashMap<PublicKey, u64>>,
get_stake: &'a Fn(PublicKey) -> f64,
) -> Self {
ChooseWeightedPeerStrategy {
remote,
external_liveness,
get_stake,
}
}
fn calculate_weighted_remote_index(&self, peer_id: PublicKey) -> u32 {
let mut last_seen_index = 0;
// If the peer is not in our remote table, then we leave last_seen_index as zero.
// Only happens when a peer appears in our crdt.table but not in our crdt.remote,
// which means a validator was directly injected into our crdt.table
if let Some(index) = self.remote.get(&peer_id) {
last_seen_index = *index;
}
let liveness_entry = self.external_liveness.get(&peer_id);
if liveness_entry.is_none() {
return DEFAULT_WEIGHT;
}
let votes = liveness_entry.unwrap();
if votes.is_empty() {
return DEFAULT_WEIGHT;
}
// Calculate the weighted average of the rumors
let mut relevant_votes = vec![];
let total_stake = votes.iter().fold(0.0, |total_stake, (&id, &vote)| {
let stake = (self.get_stake)(id);
// If the total stake is going to overflow u64, pick
// the larger of either the current total_stake, or the
// new stake, this way we are guaranteed to get at least u64/2
// sample of stake in our weighted calculation
if std::f64::MAX - total_stake < stake {
if stake > total_stake {
relevant_votes = vec![(stake, vote)];
stake
} else {
total_stake
}
} else {
relevant_votes.push((stake, vote));
total_stake + stake
}
});
let weighted_vote = relevant_votes.iter().fold(0.0, |sum, &(stake, vote)| {
if vote < last_seen_index {
// This should never happen because we maintain the invariant that the indexes
// in the external_liveness table are always greater than the corresponding
// indexes in the remote table, if the index exists in the remote table at all.
// Case 1: Attempt to insert bigger index into the "external_liveness" table
// happens after an insertion into the "remote" table. In this case,
// (see apply_updates()) function, we prevent the insertion if the entry
// in the remote table >= the atempted insertion into the "external" liveness
// table.
// Case 2: Bigger index in the "external_liveness" table inserted before
// a smaller insertion into the "remote" table. We clear the corresponding
// "external_liveness" table entry on all insertions into the "remote" table
// See apply_updates() function.
warn!("weighted peer index was smaller than local entry in remote table");
return sum;
}
let vote_difference = (vote - last_seen_index) as f64;
let new_weight = vote_difference * (stake / total_stake);
if std::f64::MAX - sum < new_weight {
return f64::max(new_weight, sum);
}
sum + new_weight
});
// Return u32 b/c the weighted sampling API from rand::distributions
// only takes u32 for weights
if weighted_vote >= f64::from(std::u32::MAX) {
return std::u32::MAX;
}
// If the weighted rumors we've heard about aren't any greater than
// what we've directly learned from the last time we communicated with the
// peer (i.e. weighted_vote == 0), then return a weight of 1.
// Otherwise, return the calculated weight.
weighted_vote as u32 + DEFAULT_WEIGHT
}
}
impl<'a> ChooseGossipPeerStrategy for ChooseWeightedPeerStrategy<'a> {
fn choose_peer<'b>(&self, options: Vec<&'b NodeInfo>) -> Result<&'b NodeInfo> {
if options.is_empty() {
Err(CrdtError::NoPeers)?;
}
let mut weighted_peers = vec![];
for peer in options {
let weight = self.calculate_weighted_remote_index(peer.id);
weighted_peers.push(Weighted { weight, item: peer });
}
let mut rng = thread_rng();
Ok(WeightedChoice::new(&mut weighted_peers).sample(&mut rng))
}
}
#[cfg(test)]
mod tests {
use choose_gossip_peer_strategy::{ChooseWeightedPeerStrategy, DEFAULT_WEIGHT};
use logger;
use signature::{KeyPair, KeyPairUtil, PublicKey};
use std;
use std::collections::HashMap;
fn get_stake(_id: PublicKey) -> f64 {
1.0
}
#[test]
fn test_default() {
logger::setup();
// Initialize the filler keys
let key1 = KeyPair::new().pubkey();
let remote: HashMap<PublicKey, u64> = HashMap::new();
let external_liveness: HashMap<PublicKey, HashMap<PublicKey, u64>> = HashMap::new();
let weighted_strategy =
ChooseWeightedPeerStrategy::new(&remote, &external_liveness, &get_stake);
// If external_liveness table doesn't contain this entry,
// return the default weight
let result = weighted_strategy.calculate_weighted_remote_index(key1);
assert_eq!(result, DEFAULT_WEIGHT);
}
#[test]
fn test_only_external_liveness() {
logger::setup();
// Initialize the filler keys
let key1 = KeyPair::new().pubkey();
let key2 = KeyPair::new().pubkey();
let remote: HashMap<PublicKey, u64> = HashMap::new();
let mut external_liveness: HashMap<PublicKey, HashMap<PublicKey, u64>> = HashMap::new();
// If only the liveness table contains the entry, should return the
// weighted liveness entries
let test_value: u32 = 5;
let mut rumors: HashMap<PublicKey, u64> = HashMap::new();
rumors.insert(key2, test_value as u64);
external_liveness.insert(key1, rumors);
let weighted_strategy =
ChooseWeightedPeerStrategy::new(&remote, &external_liveness, &get_stake);
let result = weighted_strategy.calculate_weighted_remote_index(key1);
assert_eq!(result, test_value + DEFAULT_WEIGHT);
}
#[test]
fn test_overflow_votes() {
logger::setup();
// Initialize the filler keys
let key1 = KeyPair::new().pubkey();
let key2 = KeyPair::new().pubkey();
let remote: HashMap<PublicKey, u64> = HashMap::new();
let mut external_liveness: HashMap<PublicKey, HashMap<PublicKey, u64>> = HashMap::new();
// If the vote index is greater than u32::MAX, default to u32::MAX
let test_value = (std::u32::MAX as u64) + 10;
let mut rumors: HashMap<PublicKey, u64> = HashMap::new();
rumors.insert(key2, test_value);
external_liveness.insert(key1, rumors);
let weighted_strategy =
ChooseWeightedPeerStrategy::new(&remote, &external_liveness, &get_stake);
let result = weighted_strategy.calculate_weighted_remote_index(key1);
assert_eq!(result, std::u32::MAX);
}
#[test]
fn test_many_validators() {
logger::setup();
// Initialize the filler keys
let key1 = KeyPair::new().pubkey();
let mut remote: HashMap<PublicKey, u64> = HashMap::new();
let mut external_liveness: HashMap<PublicKey, HashMap<PublicKey, u64>> = HashMap::new();
// Test many validators' rumors in external_liveness
let num_peers = 10;
let mut rumors: HashMap<PublicKey, u64> = HashMap::new();
remote.insert(key1, 0);
for i in 0..num_peers {
let pk = KeyPair::new().pubkey();
rumors.insert(pk, i);
}
external_liveness.insert(key1, rumors);
let weighted_strategy =
ChooseWeightedPeerStrategy::new(&remote, &external_liveness, &get_stake);
let result = weighted_strategy.calculate_weighted_remote_index(key1);
assert_eq!(result, (num_peers / 2) as u32);
}
#[test]
fn test_many_validators2() {
logger::setup();
// Initialize the filler keys
let key1 = KeyPair::new().pubkey();
let mut remote: HashMap<PublicKey, u64> = HashMap::new();
let mut external_liveness: HashMap<PublicKey, HashMap<PublicKey, u64>> = HashMap::new();
// Test many validators' rumors in external_liveness
let num_peers = 10;
let old_index = 20;
let mut rumors: HashMap<PublicKey, u64> = HashMap::new();
remote.insert(key1, old_index);
for _i in 0..num_peers {
let pk = KeyPair::new().pubkey();
rumors.insert(pk, old_index);
}
external_liveness.insert(key1, rumors);
let weighted_strategy =
ChooseWeightedPeerStrategy::new(&remote, &external_liveness, &get_stake);
let result = weighted_strategy.calculate_weighted_remote_index(key1);
// If nobody has seen a newer update then revert to default
assert_eq!(result, DEFAULT_WEIGHT);
}
}

View File

@@ -1,13 +1,19 @@
use influx_db_client as influxdb;
use metrics;
use std::env;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::time::Duration;
use timing;
const DEFAULT_METRICS_RATE: usize = 100;
pub struct Counter {
pub name: &'static str,
/// total accumulated value
pub counts: AtomicUsize,
pub nanos: AtomicUsize,
pub times: AtomicUsize,
pub lograte: usize,
/// last accumulated value logged
pub lastlog: AtomicUsize,
pub lograte: AtomicUsize,
}
macro_rules! create_counter {
@@ -15,55 +21,135 @@ macro_rules! create_counter {
Counter {
name: $name,
counts: AtomicUsize::new(0),
nanos: AtomicUsize::new(0),
times: AtomicUsize::new(0),
lograte: $lograte,
lastlog: AtomicUsize::new(0),
lograte: AtomicUsize::new($lograte),
}
};
}
macro_rules! inc_counter {
($name:expr, $count:expr, $start:expr) => {
unsafe { $name.inc($count, $start.elapsed()) };
($name:expr, $count:expr) => {
unsafe { $name.inc($count) };
};
}
macro_rules! inc_new_counter {
($name:expr, $count:expr) => {{
static mut INC_NEW_COUNTER: Counter = create_counter!($name, 0);
inc_counter!(INC_NEW_COUNTER, $count);
}};
($name:expr, $count:expr, $lograte:expr) => {{
static mut INC_NEW_COUNTER: Counter = create_counter!($name, $lograte);
inc_counter!(INC_NEW_COUNTER, $count);
}};
}
impl Counter {
pub fn inc(&mut self, events: usize, dur: Duration) {
let total = dur.as_secs() * 1_000_000_000 + dur.subsec_nanos() as u64;
fn default_log_rate() -> usize {
let v = env::var("SOLANA_DEFAULT_METRICS_RATE")
.map(|x| x.parse().unwrap_or(DEFAULT_METRICS_RATE))
.unwrap_or(DEFAULT_METRICS_RATE);
if v == 0 {
DEFAULT_METRICS_RATE
} else {
v
}
}
pub fn inc(&mut self, events: usize) {
let counts = self.counts.fetch_add(events, Ordering::Relaxed);
let nanos = self.nanos.fetch_add(total as usize, Ordering::Relaxed);
let times = self.times.fetch_add(1, Ordering::Relaxed);
if times % self.lograte == 0 && times > 0 {
let mut lograte = self.lograte.load(Ordering::Relaxed);
if lograte == 0 {
lograte = Counter::default_log_rate();
self.lograte.store(lograte, Ordering::Relaxed);
}
if times % lograte == 0 && times > 0 {
let lastlog = self.lastlog.load(Ordering::Relaxed);
info!(
"COUNTER:{{\"name\": \"{}\", \"counts\": {}, \"nanos\": {}, \"samples\": {}, \"rate\": {}, \"now\": {}}}",
"COUNTER:{{\"name\": \"{}\", \"counts\": {}, \"samples\": {}, \"now\": {}}}",
self.name,
counts,
nanos,
times,
counts as f64 * 1e9 / nanos as f64,
timing::timestamp(),
);
metrics::submit(
influxdb::Point::new(&format!("counter-{}", self.name))
.add_field(
"count",
influxdb::Value::Integer(counts as i64 - lastlog as i64),
)
.to_owned(),
);
self.lastlog
.compare_and_swap(lastlog, counts, Ordering::Relaxed);
}
}
}
#[cfg(test)]
mod tests {
use counter::Counter;
use counter::{Counter, DEFAULT_METRICS_RATE};
use std::env;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::time::Instant;
#[test]
fn test_counter() {
static mut COUNTER: Counter = create_counter!("test", 100);
let start = Instant::now();
let count = 1;
inc_counter!(COUNTER, count, start);
inc_counter!(COUNTER, count);
unsafe {
assert_eq!(COUNTER.counts.load(Ordering::Relaxed), 1);
assert_ne!(COUNTER.nanos.load(Ordering::Relaxed), 0);
assert_eq!(COUNTER.times.load(Ordering::Relaxed), 1);
assert_eq!(COUNTER.lograte, 100);
assert_eq!(COUNTER.lograte.load(Ordering::Relaxed), 100);
assert_eq!(COUNTER.lastlog.load(Ordering::Relaxed), 0);
assert_eq!(COUNTER.name, "test");
}
for _ in 0..199 {
inc_counter!(COUNTER, 2);
}
unsafe {
assert_eq!(COUNTER.lastlog.load(Ordering::Relaxed), 199);
}
inc_counter!(COUNTER, 2);
unsafe {
assert_eq!(COUNTER.lastlog.load(Ordering::Relaxed), 399);
}
}
#[test]
fn test_inc_new_counter() {
//make sure that macros are syntactically correct
//the variable is internal to the macro scope so there is no way to introspect it
inc_new_counter!("counter-1", 1);
inc_new_counter!("counter-2", 1, 2);
}
#[test]
fn test_lograte() {
static mut COUNTER: Counter = create_counter!("test_lograte", 0);
inc_counter!(COUNTER, 2);
unsafe {
assert_eq!(
COUNTER.lograte.load(Ordering::Relaxed),
DEFAULT_METRICS_RATE
);
}
}
#[test]
fn test_lograte_env() {
assert_ne!(DEFAULT_METRICS_RATE, 0);
static mut COUNTER: Counter = create_counter!("test_lograte_env", 0);
env::set_var("SOLANA_DEFAULT_METRICS_RATE", "50");
inc_counter!(COUNTER, 2);
unsafe {
assert_eq!(COUNTER.lograte.load(Ordering::Relaxed), 50);
}
static mut COUNTER2: Counter = create_counter!("test_lograte_env", 0);
env::set_var("SOLANA_DEFAULT_METRICS_RATE", "0");
inc_counter!(COUNTER2, 2);
unsafe {
assert_eq!(
COUNTER2.lograte.load(Ordering::Relaxed),
DEFAULT_METRICS_RATE
);
}
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -4,6 +4,8 @@
//! checking requests against a request cap for a given time time_slice
//! and (to come) an IP rate limit.
use influx_db_client as influxdb;
use metrics;
use signature::{KeyPair, PublicKey};
use std::io;
use std::io::{Error, ErrorKind};
@@ -13,9 +15,9 @@ use thin_client::ThinClient;
use transaction::Transaction;
pub const TIME_SLICE: u64 = 60;
pub const REQUEST_CAP: u64 = 150_000;
pub const REQUEST_CAP: u64 = 1_000_000;
#[derive(Serialize, Deserialize, Debug)]
#[derive(Serialize, Deserialize, Debug, Clone, Copy)]
pub enum DroneRequest {
GetAirdrop {
airdrop_request_amount: u64,
@@ -110,7 +112,11 @@ impl Drone {
airdrop_request_amount,
client_public_key,
} => {
request_amount = airdrop_request_amount.clone();
info!(
"Requesting airdrop of {} to {:?}",
airdrop_request_amount, client_public_key
);
request_amount = airdrop_request_amount;
tx = Transaction::new(
&self.mint_keypair,
client_public_key,
@@ -121,21 +127,41 @@ impl Drone {
}
if self.check_request_limit(request_amount) {
self.request_current += request_amount;
client.transfer_signed(tx)
metrics::submit(
influxdb::Point::new("drone")
.add_tag("op", influxdb::Value::String("airdrop".to_string()))
.add_field(
"request_amount",
influxdb::Value::Integer(request_amount as i64),
)
.add_field(
"request_current",
influxdb::Value::Integer(self.request_current as i64),
)
.to_owned(),
);
client.transfer_signed(&tx)
} else {
Err(Error::new(ErrorKind::Other, "token limit reached"))
}
}
}
impl Drop for Drone {
fn drop(&mut self) {
metrics::flush();
}
}
#[cfg(test)]
mod tests {
use bank::Bank;
use crdt::{get_ip_addr, TestNode};
use drone::{Drone, DroneRequest, REQUEST_CAP, TIME_SLICE};
use fullnode::FullNode;
use logger;
use mint::Mint;
use server::Server;
use service::Service;
use signature::{KeyPair, KeyPairUtil};
use std::io::sink;
use std::net::{SocketAddr, UdpSocket};
@@ -233,31 +259,31 @@ mod tests {
}
#[test]
#[ignore]
fn test_send_airdrop() {
const SMALL_BATCH: i64 = 50;
const TPS_BATCH: i64 = 5_000_000;
logger::setup();
let leader = TestNode::new();
let leader = TestNode::new_localhost();
let alice = Mint::new(10_000_000);
let bank = Bank::new(&alice);
let bob_pubkey = KeyPair::new().pubkey();
let carlos_pubkey = KeyPair::new().pubkey();
let exit = Arc::new(AtomicBool::new(false));
let leader_data = leader.data.clone();
let server = Server::new_leader(
let server = FullNode::new_leader(
bank,
0,
None,
Some(Duration::from_millis(30)),
leader.data.clone(),
leader.sockets.requests,
leader.sockets.transaction,
leader.sockets.broadcast,
leader.sockets.respond,
leader.sockets.gossip,
leader,
exit.clone(),
sink(),
);
//TODO: this seems unstable
sleep(Duration::from_millis(900));
let mut addr: SocketAddr = "0.0.0.0:9900".parse().expect("bind to drone socket");
@@ -265,10 +291,10 @@ mod tests {
let mut drone = Drone::new(
alice.keypair(),
addr,
leader.data.transactions_addr,
leader.data.requests_addr,
leader_data.contact_info.tpu,
leader_data.contact_info.rpu,
None,
Some(5_000_050),
Some(150_000),
);
let bob_req = DroneRequest::GetAirdrop {
@@ -290,9 +316,9 @@ mod tests {
UdpSocket::bind("0.0.0.0:0").expect("drone bind to transactions socket");
let mut client = ThinClient::new(
leader.data.requests_addr,
leader_data.contact_info.rpu,
requests_socket,
leader.data.transactions_addr,
leader_data.contact_info.tpu,
transactions_socket,
);
@@ -305,8 +331,6 @@ mod tests {
assert_eq!(carlos_balance.unwrap(), TPS_BATCH);
exit.store(true, Ordering::Relaxed);
for t in server.thread_hdls {
t.join().unwrap();
}
server.join().unwrap();
}
}

View File

@@ -35,29 +35,57 @@ pub struct Entry {
/// generated. The may have been observed before a previous Entry ID but were
/// pushed back into this list to ensure deterministic interpretation of the ledger.
pub transactions: Vec<Transaction>,
/// Indication that:
/// 1. the next Entry in the ledger has transactions that can potentially
/// be verified in parallel with these transactions
/// 2. this Entry can be left out of the bank's entry_id cache for
/// purposes of duplicate rejection
pub has_more: bool,
/// Erasure requires that Entry be a multiple of 4 bytes in size
pad: [u8; 3],
}
impl Entry {
/// Creates the next Entry `num_hashes` after `start_hash`.
pub fn new(start_hash: &Hash, cur_hashes: u64, transactions: Vec<Transaction>) -> Self {
pub fn new(
start_hash: &Hash,
cur_hashes: u64,
transactions: Vec<Transaction>,
has_more: bool,
) -> Self {
let num_hashes = cur_hashes + if transactions.is_empty() { 0 } else { 1 };
let id = next_hash(start_hash, 0, &transactions);
let entry = Entry {
num_hashes,
id,
transactions,
has_more,
pad: [0, 0, 0],
};
assert!(serialized_size(&entry).unwrap() <= BLOB_DATA_SIZE as u64);
entry
}
pub fn will_fit(transactions: Vec<Transaction>) -> bool {
serialized_size(&Entry {
num_hashes: 0,
id: Hash::default(),
transactions,
has_more: false,
pad: [0, 0, 0],
}).unwrap() <= BLOB_DATA_SIZE as u64
}
/// Creates the next Tick Entry `num_hashes` after `start_hash`.
pub fn new_mut(
start_hash: &mut Hash,
cur_hashes: &mut u64,
transactions: Vec<Transaction>,
has_more: bool,
) -> Self {
let entry = Self::new(start_hash, *cur_hashes, transactions);
let entry = Self::new(start_hash, *cur_hashes, transactions, has_more);
*start_hash = entry.id;
*cur_hashes = 0;
assert!(serialized_size(&entry).unwrap() <= BLOB_DATA_SIZE as u64);
@@ -71,6 +99,8 @@ impl Entry {
num_hashes,
id: *id,
transactions: vec![],
has_more: false,
pad: [0, 0, 0],
}
}
@@ -114,11 +144,13 @@ fn next_hash(start_hash: &Hash, num_hashes: u64, transactions: &[Transaction]) -
/// Creates the next Tick or Transaction Entry `num_hashes` after `start_hash`.
pub fn next_entry(start_hash: &Hash, num_hashes: u64, transactions: Vec<Transaction>) -> Entry {
assert!(num_hashes > 0 || transactions.len() == 0);
assert!(num_hashes > 0 || transactions.is_empty());
Entry {
num_hashes,
id: next_hash(start_hash, num_hashes, &transactions),
transactions,
has_more: false,
pad: [0, 0, 0],
}
}
@@ -149,7 +181,7 @@ mod tests {
let keypair = KeyPair::new();
let tx0 = Transaction::new(&keypair, keypair.pubkey(), 0, zero);
let tx1 = Transaction::new(&keypair, keypair.pubkey(), 1, zero);
let mut e0 = Entry::new(&zero, 0, vec![tx0.clone(), tx1.clone()]);
let mut e0 = Entry::new(&zero, 0, vec![tx0.clone(), tx1.clone()], false);
assert!(e0.verify(&zero));
// Next, swap two transactions and ensure verification fails.
@@ -166,7 +198,7 @@ mod tests {
let keypair = KeyPair::new();
let tx0 = Transaction::new_timestamp(&keypair, Utc::now(), zero);
let tx1 = Transaction::new_signature(&keypair, Default::default(), zero);
let mut e0 = Entry::new(&zero, 0, vec![tx0.clone(), tx1.clone()]);
let mut e0 = Entry::new(&zero, 0, vec![tx0.clone(), tx1.clone()], false);
assert!(e0.verify(&zero));
// Next, swap two witness transactions and ensure verification fails.

View File

@@ -4,79 +4,121 @@
use bank::Bank;
use entry::Entry;
use ledger::Block;
use packet;
use result::Result;
use serde_json;
use std::collections::VecDeque;
use std::io::sink;
use std::io::Write;
use std::sync::mpsc::Receiver;
use std::sync::{Arc, Mutex};
use std::time::Duration;
use streamer;
use std::io::{self, BufRead, Cursor, Error, ErrorKind, Write};
pub struct EntryWriter<'a> {
pub struct EntryWriter<'a, W> {
bank: &'a Bank,
writer: W,
}
impl<'a> EntryWriter<'a> {
impl<'a, W: Write> EntryWriter<'a, W> {
/// Create a new Tpu that wraps the given Bank.
pub fn new(bank: &'a Bank) -> Self {
EntryWriter { bank }
pub fn new(bank: &'a Bank, writer: W) -> Self {
EntryWriter { bank, writer }
}
fn write_entry<W: Write>(&self, writer: &Mutex<W>, entry: &Entry) {
trace!("write_entry entry");
self.bank.register_entry_id(&entry.id);
writeln!(
writer.lock().expect("'writer' lock in fn fn write_entry"),
"{}",
serde_json::to_string(&entry).expect("'entry' to_strong in fn write_entry")
).expect("writeln! in fn write_entry");
fn write_entry(writer: &mut W, entry: &Entry) -> io::Result<()> {
let serialized = serde_json::to_string(entry).unwrap();
writeln!(writer, "{}", serialized)
}
fn write_entries<W: Write>(
&self,
writer: &Mutex<W>,
entry_receiver: &Receiver<Entry>,
) -> Result<Vec<Entry>> {
//TODO implement a serialize for channel that does this without allocations
let mut l = vec![];
let entry = entry_receiver.recv_timeout(Duration::new(1, 0))?;
self.write_entry(writer, &entry);
l.push(entry);
while let Ok(entry) = entry_receiver.try_recv() {
self.write_entry(writer, &entry);
l.push(entry);
}
Ok(l)
}
/// Process any Entry items that have been published by the Historian.
/// continuosly broadcast blobs of entries out
pub fn write_and_send_entries<W: Write>(
&self,
broadcast: &streamer::BlobSender,
blob_recycler: &packet::BlobRecycler,
writer: &Mutex<W>,
entry_receiver: &Receiver<Entry>,
) -> Result<()> {
let mut q = VecDeque::new();
let list = self.write_entries(writer, entry_receiver)?;
trace!("New blobs? {}", list.len());
list.to_blobs(blob_recycler, &mut q);
if !q.is_empty() {
trace!("broadcasting {}", q.len());
broadcast.send(q)?;
pub fn write_entries<I>(writer: &mut W, entries: I) -> io::Result<()>
where
I: IntoIterator<Item = Entry>,
{
for entry in entries {
Self::write_entry(writer, &entry)?;
}
Ok(())
}
/// Process any Entry items that have been published by the Historian.
/// continuosly broadcast blobs of entries out
pub fn drain_entries(&self, entry_receiver: &Receiver<Entry>) -> Result<()> {
self.write_entries(&Arc::new(Mutex::new(sink())), entry_receiver)?;
fn write_and_register_entry(&mut self, entry: &Entry) -> io::Result<()> {
trace!("write_and_register_entry entry");
if !entry.has_more {
self.bank.register_entry_id(&entry.id);
}
Self::write_entry(&mut self.writer, entry)
}
pub fn write_and_register_entries(&mut self, entries: &[Entry]) -> io::Result<()> {
for entry in entries {
self.write_and_register_entry(&entry)?;
}
Ok(())
}
}
/// Parse a string containing an Entry.
pub fn read_entry(s: &str) -> io::Result<Entry> {
serde_json::from_str(s).map_err(|e| Error::new(ErrorKind::Other, e.to_string()))
}
/// Return an iterator for all the entries in the given file.
pub fn read_entries<R: BufRead>(reader: R) -> impl Iterator<Item = io::Result<Entry>> {
reader.lines().map(|s| read_entry(&s?))
}
/// Same as read_entries() but returning a vector. Handy for debugging short logs.
pub fn read_entries_to_vec<R: BufRead>(reader: R) -> io::Result<Vec<Entry>> {
let mut result = vec![];
for x in read_entries(reader) {
result.push(x?);
}
Ok(result)
}
/// Same as read_entries() but parsing a string and returning a vector.
pub fn read_entries_from_str(s: &str) -> io::Result<Vec<Entry>> {
read_entries_to_vec(Cursor::new(s))
}
#[cfg(test)]
mod tests {
use super::*;
use ledger;
use mint::Mint;
use packet::BLOB_DATA_SIZE;
use signature::{KeyPair, KeyPairUtil};
use std::str;
use transaction::Transaction;
#[test]
fn test_dont_register_partial_entries() {
let mint = Mint::new(1);
let bank = Bank::new(&mint);
let writer = io::sink();
let mut entry_writer = EntryWriter::new(&bank, writer);
let keypair = KeyPair::new();
let tx = Transaction::new(&mint.keypair(), keypair.pubkey(), 1, mint.last_id());
// NOTE: if Entry grows to larger than a transaction, the code below falls over
let threshold = (BLOB_DATA_SIZE / 256) - 1; // 256 is transaction size
// Verify large entries are split up and the first sets has_more.
let txs = vec![tx.clone(); threshold * 2];
let entries = ledger::next_entries(&mint.last_id(), 0, txs);
assert_eq!(entries.len(), 2);
assert!(entries[0].has_more);
assert!(!entries[1].has_more);
// Verify that write_and_register_entry doesn't register the first entries after a split.
assert_eq!(bank.last_id(), mint.last_id());
entry_writer.write_and_register_entry(&entries[0]).unwrap();
assert_eq!(bank.last_id(), mint.last_id());
// Verify that write_and_register_entry registers the final entry after a split.
entry_writer.write_and_register_entry(&entries[1]).unwrap();
assert_eq!(bank.last_id(), entries[1].id);
}
#[test]
fn test_read_entries_from_str() {
let mint = Mint::new(1);
let mut buf = vec![];
EntryWriter::write_entries(&mut buf, mint.create_entries()).unwrap();
let entries = read_entries_from_str(str::from_utf8(&buf).unwrap()).unwrap();
assert_eq!(entries, mint.create_entries());
}
}

View File

@@ -523,7 +523,7 @@ mod test {
erasure::add_coding_blobs(blob_recycler, &mut blobs, offset as u64);
let blobs_len = blobs.len();
let d = crdt::ReplicatedData::new(
let d = crdt::NodeInfo::new(
KeyPair::new().pubkey(),
"127.0.0.1:1234".parse().unwrap(),
"127.0.0.1:1235".parse().unwrap(),
@@ -531,9 +531,7 @@ mod test {
"127.0.0.1:1237".parse().unwrap(),
"127.0.0.1:1238".parse().unwrap(),
);
let crdt = Arc::new(RwLock::new(crdt::Crdt::new(d.clone())));
assert!(crdt::Crdt::index_blobs(&crdt, &blobs, &mut (offset as u64)).is_ok());
assert!(crdt::Crdt::index_blobs(&d, &blobs, &mut (offset as u64)).is_ok());
for b in blobs {
let idx = b.read().unwrap().get_index().unwrap() as usize;
window[idx] = Some(b);

View File

@@ -1,31 +1,32 @@
//! The `fetch_stage` batches input from a UDP socket and sends it to a channel.
use packet;
use packet::PacketRecycler;
use service::Service;
use std::net::UdpSocket;
use std::sync::atomic::AtomicBool;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::channel;
use std::sync::Arc;
use std::thread::JoinHandle;
use streamer;
use std::thread::{self, JoinHandle};
use streamer::{self, PacketReceiver};
pub struct FetchStage {
pub packet_receiver: streamer::PacketReceiver,
pub thread_hdls: Vec<JoinHandle<()>>,
exit: Arc<AtomicBool>,
thread_hdls: Vec<JoinHandle<()>>,
}
impl FetchStage {
pub fn new(
socket: UdpSocket,
exit: Arc<AtomicBool>,
packet_recycler: packet::PacketRecycler,
) -> Self {
packet_recycler: &PacketRecycler,
) -> (Self, PacketReceiver) {
Self::new_multi_socket(vec![socket], exit, packet_recycler)
}
pub fn new_multi_socket(
sockets: Vec<UdpSocket>,
exit: Arc<AtomicBool>,
packet_recycler: packet::PacketRecycler,
) -> Self {
packet_recycler: &PacketRecycler,
) -> (Self, PacketReceiver) {
let (packet_sender, packet_receiver) = channel();
let thread_hdls: Vec<_> = sockets
.into_iter()
@@ -39,9 +40,23 @@ impl FetchStage {
})
.collect();
FetchStage {
packet_receiver,
thread_hdls,
}
(FetchStage { exit, thread_hdls }, packet_receiver)
}
pub fn close(&self) {
self.exit.store(true, Ordering::Relaxed);
}
}
impl Service for FetchStage {
fn thread_hdls(self) -> Vec<JoinHandle<()>> {
self.thread_hdls
}
fn join(self) -> thread::Result<()> {
for thread_hdl in self.thread_hdls() {
thread_hdl.join()?;
}
Ok(())
}
}

385
src/fullnode.rs Normal file
View File

@@ -0,0 +1,385 @@
//! The `fullnode` module hosts all the fullnode microservices.
use bank::Bank;
use crdt::{Crdt, NodeInfo, TestNode};
use entry::Entry;
use entry_writer;
use ledger::Block;
use ncp::Ncp;
use packet::BlobRecycler;
use rpu::Rpu;
use service::Service;
use signature::{KeyPair, KeyPairUtil};
use std::collections::VecDeque;
use std::fs::{File, OpenOptions};
use std::io::{stdin, stdout, BufReader};
use std::io::{Read, Write};
use std::net::SocketAddr;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, RwLock};
use std::thread::{JoinHandle, Result};
use std::time::Duration;
use streamer;
use tpu::Tpu;
use tvu::Tvu;
use untrusted::Input;
//use std::time::Duration;
pub struct FullNode {
exit: Arc<AtomicBool>,
thread_hdls: Vec<JoinHandle<()>>,
}
pub enum LedgerFile {
StdInOut,
Path(String),
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
/// Fullnode configuration to be stored in file
pub struct Config {
pub node_info: NodeInfo,
pkcs8: Vec<u8>,
}
/// Structure to be replicated by the network
impl Config {
pub fn new(bind_addr: &SocketAddr, pkcs8: Vec<u8>) -> Self {
let keypair =
KeyPair::from_pkcs8(Input::from(&pkcs8)).expect("from_pkcs8 in fullnode::Config new");
let pubkey = keypair.pubkey();
let node_info = NodeInfo::new_leader_with_pubkey(pubkey, bind_addr);
Config { node_info, pkcs8 }
}
pub fn keypair(&self) -> KeyPair {
KeyPair::from_pkcs8(Input::from(&self.pkcs8))
.expect("from_pkcs8 in fullnode::Config keypair")
}
}
impl FullNode {
pub fn new(
mut node: TestNode,
leader: bool,
ledger: LedgerFile,
keypair_for_validator: Option<KeyPair>,
network_entry_for_validator: Option<SocketAddr>,
) -> FullNode {
info!("creating bank...");
let bank = Bank::default();
let (infile, outfile): (Box<Read>, Box<Write + Send>) = match ledger {
LedgerFile::Path(path) => (
Box::new(File::open(path.clone()).expect("opening ledger file")),
Box::new(
OpenOptions::new()
.create(true)
.append(true)
.open(path)
.expect("opening ledger file"),
),
),
LedgerFile::StdInOut => (Box::new(stdin()), Box::new(stdout())),
};
let reader = BufReader::new(infile);
let entries = entry_writer::read_entries(reader).map(|e| e.expect("failed to parse entry"));
info!("processing ledger...");
let (entry_height, ledger_tail) = bank.process_ledger(entries).expect("process_ledger");
// entry_height is the network-wide agreed height of the ledger.
// initialize it from the input ledger
info!("processed {} ledger...", entry_height);
info!("creating networking stack...");
let local_gossip_addr = node.sockets.gossip.local_addr().unwrap();
let local_requests_addr = node.sockets.requests.local_addr().unwrap();
info!(
"starting... local gossip address: {} (advertising {})",
local_gossip_addr, node.data.contact_info.ncp
);
let requests_addr = node.data.contact_info.rpu;
let exit = Arc::new(AtomicBool::new(false));
if !leader {
let testnet_addr = network_entry_for_validator.expect("validator requires entry");
let network_entry_point = NodeInfo::new_entry_point(testnet_addr);
let keypair = keypair_for_validator.expect("validator requires keypair");
let server = FullNode::new_validator(
keypair,
bank,
entry_height,
Some(ledger_tail),
node,
&network_entry_point,
exit.clone(),
);
info!(
"validator ready... local request address: {} (advertising {}) connected to: {}",
local_requests_addr, requests_addr, testnet_addr
);
server
} else {
node.data.leader_id = node.data.id;
let server = FullNode::new_leader(
bank,
entry_height,
Some(ledger_tail),
//Some(Duration::from_millis(1000)),
None,
node,
exit.clone(),
outfile,
);
info!(
"leader ready... local request address: {} (advertising {})",
local_requests_addr, requests_addr
);
server
}
}
fn new_window(
ledger_tail: Option<Vec<Entry>>,
entry_height: u64,
crdt: &Arc<RwLock<Crdt>>,
blob_recycler: &BlobRecycler,
) -> streamer::Window {
match ledger_tail {
Some(ledger_tail) => {
// convert to blobs
let mut blobs = VecDeque::new();
ledger_tail.to_blobs(&blob_recycler, &mut blobs);
// flatten deque to vec
let blobs: Vec<_> = blobs.into_iter().collect();
streamer::initialized_window(&crdt, blobs, entry_height)
}
None => streamer::default_window(),
}
}
/// Create a server instance acting as a leader.
///
/// ```text
/// .---------------------.
/// | Leader |
/// | |
/// .--------. | .-----. |
/// | |---->| | |
/// | Client | | | RPU | |
/// | |<----| | |
/// `----+---` | `-----` |
/// | | ^ |
/// | | | |
/// | | .--+---. |
/// | | | Bank | |
/// | | `------` |
/// | | ^ |
/// | | | | .------------.
/// | | .--+--. .-----. | | |
/// `-------->| TPU +-->| NCP +------>| Validators |
/// | `-----` `-----` | | |
/// | | `------------`
/// `---------------------`
/// ```
pub fn new_leader<W: Write + Send + 'static>(
bank: Bank,
entry_height: u64,
ledger_tail: Option<Vec<Entry>>,
tick_duration: Option<Duration>,
node: TestNode,
exit: Arc<AtomicBool>,
writer: W,
) -> Self {
let bank = Arc::new(bank);
let mut thread_hdls = vec![];
let rpu = Rpu::new(
&bank,
node.sockets.requests,
node.sockets.respond,
exit.clone(),
);
thread_hdls.extend(rpu.thread_hdls());
let blob_recycler = BlobRecycler::default();
let crdt = Arc::new(RwLock::new(Crdt::new(node.data).expect("Crdt::new")));
let (tpu, blob_receiver) = Tpu::new(
&bank,
&crdt,
tick_duration,
node.sockets.transaction,
&blob_recycler,
exit.clone(),
writer,
);
thread_hdls.extend(tpu.thread_hdls());
let window = FullNode::new_window(ledger_tail, entry_height, &crdt, &blob_recycler);
let ncp = Ncp::new(
&crdt,
window.clone(),
node.sockets.gossip,
node.sockets.gossip_send,
exit.clone(),
).expect("Ncp::new");
thread_hdls.extend(ncp.thread_hdls());
let t_broadcast = streamer::broadcaster(
node.sockets.broadcast,
crdt,
window,
entry_height,
blob_recycler.clone(),
blob_receiver,
);
thread_hdls.extend(vec![t_broadcast]);
FullNode { exit, thread_hdls }
}
/// Create a server instance acting as a validator.
///
/// ```text
/// .-------------------------------.
/// | Validator |
/// | |
/// .--------. | .-----. |
/// | |-------------->| | |
/// | Client | | | RPU | |
/// | |<--------------| | |
/// `--------` | `-----` |
/// | ^ |
/// | | |
/// | .--+---. |
/// | | Bank | |
/// | `------` |
/// | ^ |
/// .--------. | | | .------------.
/// | | | .--+--. | | |
/// | Leader |<------------->| TVU +<--------------->| |
/// | | | `-----` | | Validators |
/// | | | ^ | | |
/// | | | | | | |
/// | | | .--+--. | | |
/// | |<------------->| NCP +<--------------->| |
/// | | | `-----` | | |
/// `--------` | | `------------`
/// `-------------------------------`
/// ```
pub fn new_validator(
keypair: KeyPair,
bank: Bank,
entry_height: u64,
ledger_tail: Option<Vec<Entry>>,
node: TestNode,
entry_point: &NodeInfo,
exit: Arc<AtomicBool>,
) -> Self {
let bank = Arc::new(bank);
let mut thread_hdls = vec![];
let rpu = Rpu::new(
&bank,
node.sockets.requests,
node.sockets.respond,
exit.clone(),
);
thread_hdls.extend(rpu.thread_hdls());
let crdt = Arc::new(RwLock::new(Crdt::new(node.data).expect("Crdt::new")));
crdt.write()
.expect("'crdt' write lock before insert() in pub fn replicate")
.insert(&entry_point);
let blob_recycler = BlobRecycler::default();
let window = FullNode::new_window(ledger_tail, entry_height, &crdt, &blob_recycler);
let ncp = Ncp::new(
&crdt,
window.clone(),
node.sockets.gossip,
node.sockets.gossip_send,
exit.clone(),
).expect("Ncp::new");
let tvu = Tvu::new(
keypair,
&bank,
entry_height,
crdt.clone(),
window.clone(),
node.sockets.replicate,
node.sockets.repair,
node.sockets.retransmit,
exit.clone(),
);
thread_hdls.extend(tvu.thread_hdls());
thread_hdls.extend(ncp.thread_hdls());
FullNode { exit, thread_hdls }
}
//used for notifying many nodes in parallel to exit
pub fn exit(&self) {
self.exit.store(true, Ordering::Relaxed);
}
pub fn close(self) -> Result<()> {
self.exit();
self.join()
}
}
impl Service for FullNode {
fn thread_hdls(self) -> Vec<JoinHandle<()>> {
self.thread_hdls
}
fn join(self) -> Result<()> {
for thread_hdl in self.thread_hdls() {
thread_hdl.join()?;
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use bank::Bank;
use crdt::TestNode;
use fullnode::FullNode;
use mint::Mint;
use service::Service;
use signature::{KeyPair, KeyPairUtil};
use std::sync::atomic::AtomicBool;
use std::sync::Arc;
#[test]
fn validator_exit() {
let kp = KeyPair::new();
let tn = TestNode::new_localhost_with_pubkey(kp.pubkey());
let alice = Mint::new(10_000);
let bank = Bank::new(&alice);
let exit = Arc::new(AtomicBool::new(false));
let entry = tn.data.clone();
let v = FullNode::new_validator(kp, bank, 0, None, tn, &entry, exit);
v.exit();
v.join().unwrap();
}
#[test]
fn validator_parallel_exit() {
let vals: Vec<FullNode> = (0..2)
.map(|_| {
let kp = KeyPair::new();
let tn = TestNode::new_localhost_with_pubkey(kp.pubkey());
let alice = Mint::new(10_000);
let bank = Bank::new(&alice);
let exit = Arc::new(AtomicBool::new(false));
let entry = tn.data.clone();
FullNode::new_validator(kp, bank, 0, None, tn, &entry, exit)
})
.collect();
//each validator can exit in parallel to speed many sequential calls to `join`
vals.iter().for_each(|v| v.exit());
//while join is called sequentially, the above exit call notified all the
//validators to exit from all their threads
vals.into_iter().for_each(|v| v.join().unwrap());
}
}

View File

@@ -1,10 +1,10 @@
//! The `ledger` module provides functions for parallel verification of the
//! Proof of History ledger.
use bincode::{self, deserialize, serialize_into, serialized_size};
use bincode::{self, deserialize, serialize_into};
use entry::Entry;
use hash::Hash;
use packet::{self, SharedBlob, BLOB_DATA_SIZE, BLOB_SIZE};
use packet::{self, SharedBlob, BLOB_SIZE};
use rayon::prelude::*;
use std::collections::VecDeque;
use std::io::Cursor;
@@ -41,10 +41,7 @@ impl Block for [Entry] {
}
}
pub fn reconstruct_entries_from_blobs(
blobs: VecDeque<SharedBlob>,
blob_recycler: &packet::BlobRecycler,
) -> bincode::Result<Vec<Entry>> {
pub fn reconstruct_entries_from_blobs(blobs: VecDeque<SharedBlob>) -> bincode::Result<Vec<Entry>> {
let mut entries: Vec<Entry> = Vec::with_capacity(blobs.len());
for blob in blobs {
@@ -52,7 +49,6 @@ pub fn reconstruct_entries_from_blobs(
let msg = blob.read().unwrap();
deserialize(&msg.data()[..msg.meta.size])
};
blob_recycler.recycle(blob);
match entry {
Ok(entry) => entries.push(entry),
@@ -73,24 +69,31 @@ pub fn next_entries_mut(
transactions: Vec<Transaction>,
) -> Vec<Entry> {
if transactions.is_empty() {
vec![Entry::new_mut(start_hash, cur_hashes, transactions)]
vec![Entry::new_mut(start_hash, cur_hashes, transactions, false)]
} else {
let mut chunk_len = transactions.len();
// check for fit, make sure they can be serialized
while serialized_size(&Entry {
num_hashes: 0,
id: Hash::default(),
transactions: transactions[0..chunk_len].to_vec(),
}).unwrap() > BLOB_DATA_SIZE as u64
{
while !Entry::will_fit(transactions[0..chunk_len].to_vec()) {
chunk_len /= 2;
}
let mut entries = Vec::with_capacity(transactions.len() / chunk_len + 1);
let mut num_chunks = if transactions.len() % chunk_len == 0 {
transactions.len() / chunk_len
} else {
transactions.len() / chunk_len + 1
};
let mut entries = Vec::with_capacity(num_chunks);
for chunk in transactions.chunks(chunk_len) {
entries.push(Entry::new_mut(start_hash, cur_hashes, chunk.to_vec()));
num_chunks -= 1;
entries.push(Entry::new_mut(
start_hash,
cur_hashes,
chunk.to_vec(),
num_chunks > 0,
));
}
entries
}
@@ -112,29 +115,11 @@ mod tests {
use super::*;
use entry::{next_entry, Entry};
use hash::hash;
use packet::BlobRecycler;
use packet::{BlobRecycler, BLOB_DATA_SIZE};
use signature::{KeyPair, KeyPairUtil};
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use transaction::Transaction;
/// Create a vector of Entries of length `transaction_batches.len()`
/// from `start_hash` hash, `num_hashes`, and `transaction_batches`.
fn next_entries_batched(
start_hash: &Hash,
cur_hashes: u64,
transaction_batches: Vec<Vec<Transaction>>,
) -> Vec<Entry> {
let mut id = *start_hash;
let mut entries = vec![];
let mut num_hashes = cur_hashes;
for transactions in transaction_batches {
let mut entry_batch = next_entries_mut(&mut id, &mut num_hashes, transactions);
entries.append(&mut entry_batch);
}
entries
}
#[test]
fn test_verify_slice() {
let zero = Hash::default();
@@ -142,9 +127,9 @@ mod tests {
assert!(vec![][..].verify(&zero)); // base case
assert!(vec![Entry::new_tick(0, &zero)][..].verify(&zero)); // singleton case 1
assert!(!vec![Entry::new_tick(0, &zero)][..].verify(&one)); // singleton case 2, bad
assert!(next_entries_batched(&zero, 0, vec![vec![]; 2])[..].verify(&zero)); // inductive step
assert!(vec![next_entry(&zero, 0, vec![]); 2][..].verify(&zero)); // inductive step
let mut bad_ticks = next_entries_batched(&zero, 0, vec![vec![]; 2]);
let mut bad_ticks = vec![next_entry(&zero, 0, vec![]); 2];
bad_ticks[1].id = one;
assert!(!bad_ticks.verify(&zero)); // inductive step, bad
}
@@ -162,10 +147,7 @@ mod tests {
let mut blob_q = VecDeque::new();
entries.to_blobs(&blob_recycler, &mut blob_q);
assert_eq!(
reconstruct_entries_from_blobs(blob_q, &blob_recycler).unwrap(),
entries
);
assert_eq!(reconstruct_entries_from_blobs(blob_q).unwrap(), entries);
}
#[test]
@@ -173,61 +155,44 @@ mod tests {
let blob_recycler = BlobRecycler::default();
let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
let blobs_q = packet::to_blobs(vec![(0, addr)], &blob_recycler).unwrap(); // <-- attack!
assert!(reconstruct_entries_from_blobs(blobs_q, &blob_recycler).is_err());
assert!(reconstruct_entries_from_blobs(blobs_q).is_err());
}
#[test]
fn test_next_entries_batched() {
// this also tests next_entries, ugly, but is an easy way to do vec of vec (batch)
let mut id = Hash::default();
fn test_next_entries() {
let id = Hash::default();
let next_id = hash(&id);
let keypair = KeyPair::new();
let tx0 = Transaction::new(&keypair, keypair.pubkey(), 1, next_id);
let transactions = vec![tx0; 5];
let transaction_batches = vec![transactions.clone(); 5];
let entries0 = next_entries_batched(&id, 0, transaction_batches);
// NOTE: if Entry grows to larger than a transaction, the code below falls over
let threshold = (BLOB_DATA_SIZE / 256) - 1; // 256 is transaction size
assert_eq!(entries0.len(), 5);
// verify no split
let transactions = vec![tx0.clone(); threshold];
let entries0 = next_entries(&id, 0, transactions.clone());
assert_eq!(entries0.len(), 1);
assert!(entries0.verify(&id));
let mut entries1 = vec![];
for _ in 0..5 {
let entry = next_entry(&id, 1, transactions.clone());
id = entry.id;
entries1.push(entry);
}
assert_eq!(entries0, entries1);
// verify the split
let transactions = vec![tx0.clone(); threshold * 2];
let entries0 = next_entries(&id, 0, transactions.clone());
assert_eq!(entries0.len(), 2);
assert!(entries0[0].has_more);
assert!(!entries0[entries0.len() - 1].has_more);
assert!(entries0.verify(&id));
// test hand-construction... brittle, changes if split method changes... ?
// let mut entries1 = vec![];
// entries1.push(Entry::new(&id, 1, transactions[..threshold].to_vec(), true));
// id = entries1[0].id;
// entries1.push(Entry::new(
// &id,
// 1,
// transactions[threshold..].to_vec(),
// false,
// ));
//
// assert_eq!(entries0, entries1);
}
}
#[cfg(all(feature = "unstable", test))]
mod bench {
extern crate test;
use self::test::Bencher;
use hash::hash;
use ledger::*;
use packet::BlobRecycler;
use signature::{KeyPair, KeyPairUtil};
use transaction::Transaction;
#[bench]
fn bench_block_to_blobs_to_block(bencher: &mut Bencher) {
let zero = Hash::default();
let one = hash(&zero);
let keypair = KeyPair::new();
let tx0 = Transaction::new(&keypair, keypair.pubkey(), 1, one);
let transactions = vec![tx0; 10];
let entries = next_entries(&zero, 1, transactions);
let blob_recycler = BlobRecycler::default();
bencher.iter(|| {
let mut blob_q = VecDeque::new();
entries.to_blobs(&blob_recycler, &mut blob_q);
assert_eq!(
reconstruct_entries_from_blobs(blob_q, &blob_recycler).unwrap(),
entries
);
});
}
}

View File

@@ -13,6 +13,7 @@ pub mod bank;
pub mod banking_stage;
pub mod blob_fetch_stage;
pub mod budget;
pub mod choose_gossip_peer_strategy;
pub mod crdt;
pub mod drone;
pub mod entry;
@@ -20,10 +21,13 @@ pub mod entry_writer;
#[cfg(feature = "erasure")]
pub mod erasure;
pub mod fetch_stage;
pub mod fullnode;
pub mod hash;
pub mod ledger;
pub mod logger;
pub mod metrics;
pub mod mint;
pub mod nat;
pub mod ncp;
pub mod packet;
pub mod payment_plan;
@@ -35,7 +39,7 @@ pub mod request_processor;
pub mod request_stage;
pub mod result;
pub mod rpu;
pub mod server;
pub mod service;
pub mod signature;
pub mod sigverify;
pub mod sigverify_stage;
@@ -45,12 +49,14 @@ pub mod timing;
pub mod tpu;
pub mod transaction;
pub mod tvu;
pub mod voting;
pub mod window_stage;
pub mod write_stage;
extern crate bincode;
extern crate byteorder;
extern crate chrono;
extern crate generic_array;
extern crate itertools;
extern crate libc;
#[macro_use]
extern crate log;
@@ -68,4 +74,5 @@ extern crate untrusted;
#[macro_use]
extern crate matches;
extern crate influx_db_client;
extern crate rand;

View File

@@ -9,6 +9,6 @@ static INIT: Once = ONCE_INIT;
/// Setup function that is only run once, even if called multiple times.
pub fn setup() {
INIT.call_once(|| {
let _ = env_logger::init();
env_logger::init();
});
}

347
src/metrics.rs Normal file
View File

@@ -0,0 +1,347 @@
//! The `metrics` module enables sending measurements to an InfluxDB instance
use influx_db_client as influxdb;
use std::env;
use std::sync::mpsc::{channel, Receiver, RecvTimeoutError, Sender};
use std::sync::{Arc, Barrier, Mutex, Once, ONCE_INIT};
use std::thread;
use std::time::{Duration, Instant};
use timing;
#[derive(Debug)]
enum MetricsCommand {
Submit(influxdb::Point),
Flush(Arc<Barrier>),
}
struct MetricsAgent {
sender: Sender<MetricsCommand>,
}
trait MetricsWriter {
// Write the points and empty the vector. Called on the internal
// MetricsAgent worker thread.
fn write(&self, points: Vec<influxdb::Point>);
}
struct InfluxDbMetricsWriter {
client: Option<influxdb::Client>,
}
impl InfluxDbMetricsWriter {
fn new() -> Self {
InfluxDbMetricsWriter {
client: Self::build_client(),
}
}
fn build_client() -> Option<influxdb::Client> {
let host = env::var("INFLUX_HOST")
.unwrap_or_else(|_| "https://metrics.solana.com:8086".to_string());
let db = env::var("INFLUX_DATABASE").unwrap_or_else(|_| "scratch".to_string());
let username = env::var("INFLUX_USERNAME").unwrap_or_else(|_| "scratch_writer".to_string());
let password = env::var("INFLUX_PASSWORD").unwrap_or_else(|_| "topsecret".to_string());
debug!("InfluxDB host={} db={} username={}", host, db, username);
let mut client = influxdb::Client::new_with_option(host, db, None)
.set_authentication(username, password);
client.set_read_timeout(1 /*second*/);
client.set_write_timeout(1 /*second*/);
debug!("InfluxDB version: {:?}", client.get_version());
Some(client)
}
}
impl MetricsWriter for InfluxDbMetricsWriter {
fn write(&self, points: Vec<influxdb::Point>) {
if let Some(ref client) = self.client {
debug!("submitting {} points", points.len());
if let Err(err) = client.write_points(
influxdb::Points { point: points },
Some(influxdb::Precision::Milliseconds),
None,
) {
debug!("InfluxDbMetricsWriter write error: {:?}", err);
}
}
}
}
impl Default for MetricsAgent {
fn default() -> Self {
Self::new(
Arc::new(InfluxDbMetricsWriter::new()),
Duration::from_secs(10),
)
}
}
impl MetricsAgent {
fn new(writer: Arc<MetricsWriter + Send + Sync>, write_frequency: Duration) -> Self {
let (sender, receiver) = channel::<MetricsCommand>();
thread::spawn(move || Self::run(&receiver, &writer, write_frequency));
MetricsAgent { sender }
}
fn run(
receiver: &Receiver<MetricsCommand>,
writer: &Arc<MetricsWriter + Send + Sync>,
write_frequency: Duration,
) {
trace!("run: enter");
let mut last_write_time = Instant::now();
let mut points = Vec::new();
loop {
match receiver.recv_timeout(write_frequency / 2) {
Ok(cmd) => match cmd {
MetricsCommand::Flush(barrier) => {
debug!("metrics_thread: flush");
if !points.is_empty() {
writer.write(points);
points = Vec::new();
last_write_time = Instant::now();
}
barrier.wait();
}
MetricsCommand::Submit(point) => {
debug!("run: submit {:?}", point);
points.push(point);
}
},
Err(RecvTimeoutError::Timeout) => {
trace!("run: receive timeout");
}
Err(RecvTimeoutError::Disconnected) => {
debug!("run: sender disconnected");
break;
}
}
let now = Instant::now();
if now.duration_since(last_write_time) >= write_frequency && !points.is_empty() {
debug!("run: writing {} points", points.len());
writer.write(points);
points = Vec::new();
last_write_time = now;
}
}
trace!("run: exit");
}
pub fn submit(&self, mut point: influxdb::Point) {
if point.timestamp.is_none() {
point.timestamp = Some(timing::timestamp() as i64);
}
debug!("Submitting point: {:?}", point);
self.sender.send(MetricsCommand::Submit(point)).unwrap();
}
pub fn flush(&self) {
debug!("Flush");
let barrier = Arc::new(Barrier::new(2));
self.sender
.send(MetricsCommand::Flush(Arc::clone(&barrier)))
.unwrap();
barrier.wait();
}
}
impl Drop for MetricsAgent {
fn drop(&mut self) {
self.flush();
}
}
fn get_singleton_agent() -> Arc<Mutex<MetricsAgent>> {
static INIT: Once = ONCE_INIT;
static mut AGENT: Option<Arc<Mutex<MetricsAgent>>> = None;
unsafe {
INIT.call_once(|| AGENT = Some(Arc::new(Mutex::new(MetricsAgent::default()))));
match AGENT {
Some(ref agent) => agent.clone(),
None => panic!("Failed to initialize metrics agent"),
}
}
}
/// Submits a new point from any thread. Note that points are internally queued
/// and transmitted periodically in batches.
pub fn submit(point: influxdb::Point) {
let agent_mutex = get_singleton_agent();
let agent = agent_mutex.lock().unwrap();
agent.submit(point);
}
/// Blocks until all pending points from previous calls to `submit` have been
/// transmitted.
pub fn flush() {
let agent_mutex = get_singleton_agent();
let agent = agent_mutex.lock().unwrap();
agent.flush();
}
/// Hook the panic handler to generate a data point on each panic
pub fn set_panic_hook(program: &'static str) {
use std::panic;
use std::sync::{Once, ONCE_INIT};
static SET_HOOK: Once = ONCE_INIT;
SET_HOOK.call_once(|| {
let default_hook = panic::take_hook();
panic::set_hook(Box::new(move |ono| {
default_hook(ono);
submit(
influxdb::Point::new("panic")
.add_tag("program", influxdb::Value::String(program.to_string()))
.add_tag(
"thread",
influxdb::Value::String(
thread::current().name().unwrap_or("?").to_string(),
),
)
// The 'one' field exists to give Kapacitor Alerts a numerical value
// to filter on
.add_field("one", influxdb::Value::Integer(1))
.add_field(
"message",
influxdb::Value::String(
// TODO: use ono.message() when it becomes stable
ono.to_string(),
),
)
.add_field(
"location",
influxdb::Value::String(match ono.location() {
Some(location) => location.to_string(),
None => "?".to_string(),
}),
)
.to_owned(),
);
// Flush metrics immediately in case the process exits immediately
// upon return
flush();
}));
});
}
#[cfg(test)]
mod test {
use super::*;
use rand::random;
use std::sync::atomic::{AtomicUsize, Ordering};
struct MockMetricsWriter {
points_written: AtomicUsize,
}
impl MockMetricsWriter {
fn new() -> Self {
MockMetricsWriter {
points_written: AtomicUsize::new(0),
}
}
fn points_written(&self) -> usize {
return self.points_written.load(Ordering::SeqCst);
}
}
impl MetricsWriter for MockMetricsWriter {
fn write(&self, points: Vec<influxdb::Point>) {
assert!(!points.is_empty());
self.points_written
.fetch_add(points.len(), Ordering::SeqCst);
println!(
"Writing {} points ({} total)",
points.len(),
self.points_written.load(Ordering::SeqCst)
);
}
}
#[test]
fn test_submit() {
let writer = Arc::new(MockMetricsWriter::new());
let agent = MetricsAgent::new(writer.clone(), Duration::from_secs(10));
for i in 0..42 {
agent.submit(influxdb::Point::new(&format!("measurement {}", i)));
}
agent.flush();
assert_eq!(writer.points_written(), 42);
}
#[test]
fn test_submit_with_delay() {
let writer = Arc::new(MockMetricsWriter::new());
let agent = MetricsAgent::new(writer.clone(), Duration::from_millis(100));
agent.submit(influxdb::Point::new("point 1"));
thread::sleep(Duration::from_secs(2));
assert_eq!(writer.points_written(), 1);
}
#[test]
fn test_multithread_submit() {
let writer = Arc::new(MockMetricsWriter::new());
let agent = Arc::new(Mutex::new(MetricsAgent::new(
writer.clone(),
Duration::from_secs(10),
)));
//
// Submit measurements from different threads
//
let mut threads = Vec::new();
for i in 0..42 {
let point = influxdb::Point::new(&format!("measurement {}", i));
let agent = Arc::clone(&agent);
threads.push(thread::spawn(move || {
agent.lock().unwrap().submit(point);
}));
}
for thread in threads {
thread.join().unwrap();
}
agent.lock().unwrap().flush();
assert_eq!(writer.points_written(), 42);
}
#[test]
fn test_flush_before_drop() {
let writer = Arc::new(MockMetricsWriter::new());
{
let agent = MetricsAgent::new(writer.clone(), Duration::from_secs(9999999));
agent.submit(influxdb::Point::new("point 1"));
}
assert_eq!(writer.points_written(), 1);
}
#[test]
fn test_live_submit() {
let agent = MetricsAgent::default();
let point = influxdb::Point::new("live_submit_test")
.add_tag("test", influxdb::Value::Boolean(true))
.add_field(
"random_bool",
influxdb::Value::Boolean(random::<u8>() < 128),
)
.add_field(
"random_int",
influxdb::Value::Integer(random::<u8>() as i64),
)
.to_owned();
agent.submit(point);
}
}

View File

@@ -15,11 +15,7 @@ pub struct Mint {
}
impl Mint {
pub fn new(tokens: i64) -> Self {
let rnd = SystemRandom::new();
let pkcs8 = KeyPair::generate_pkcs8(&rnd)
.expect("generate_pkcs8 in mint pub fn new")
.to_vec();
pub fn new_with_pkcs8(tokens: i64, pkcs8: Vec<u8>) -> Self {
let keypair =
KeyPair::from_pkcs8(Input::from(&pkcs8)).expect("from_pkcs8 in mint pub fn new");
let pubkey = keypair.pubkey();
@@ -30,6 +26,14 @@ impl Mint {
}
}
pub fn new(tokens: i64) -> Self {
let rnd = SystemRandom::new();
let pkcs8 = KeyPair::generate_pkcs8(&rnd)
.expect("generate_pkcs8 in mint pub fn new")
.to_vec();
Self::new_with_pkcs8(tokens, pkcs8)
}
pub fn seed(&self) -> Hash {
hash(&self.pkcs8)
}
@@ -53,18 +57,12 @@ impl Mint {
}
pub fn create_entries(&self) -> Vec<Entry> {
let e0 = Entry::new(&self.seed(), 0, vec![]);
let e1 = Entry::new(&e0.id, 0, self.create_transactions());
let e0 = Entry::new(&self.seed(), 0, vec![], false);
let e1 = Entry::new(&e0.id, 0, self.create_transactions(), false);
vec![e0, e1]
}
}
#[derive(Serialize, Deserialize, Debug)]
pub struct MintDemo {
pub mint: Mint,
pub num_accounts: i64,
}
#[cfg(test)]
mod tests {
use super::*;

121
src/nat.rs Executable file
View File

@@ -0,0 +1,121 @@
//! The `nat` module assists with NAT traversal
extern crate futures;
extern crate p2p;
extern crate rand;
extern crate reqwest;
extern crate tokio_core;
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
use self::futures::Future;
use self::p2p::UdpSocketExt;
use rand::{thread_rng, Rng};
use std::env;
use std::io;
use std::str;
/// A data type representing a public Udp socket
pub struct UdpSocketPair {
pub addr: SocketAddr, // Public address of the socket
pub receiver: UdpSocket, // Locally bound socket that can receive from the public address
pub sender: UdpSocket, // Locally bound socket to send via public address
}
/// Tries to determine the public IP address of this machine
pub fn get_public_ip_addr() -> Result<IpAddr, String> {
let body = reqwest::get("http://ifconfig.co/ip")
.map_err(|err| err.to_string())?
.text()
.map_err(|err| err.to_string())?;
match body.lines().next() {
Some(ip) => Result::Ok(ip.parse().unwrap()),
None => Result::Err("Empty response body".to_string()),
}
}
pub fn udp_random_bind(start: u16, end: u16, tries: u32) -> io::Result<UdpSocket> {
let mut count = 0;
loop {
count += 1;
let rand_port = thread_rng().gen_range(start, end);
let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), rand_port);
match UdpSocket::bind(addr) {
Result::Ok(val) => break Result::Ok(val),
Result::Err(err) => if err.kind() != io::ErrorKind::AddrInUse || count >= tries {
return Err(err);
},
}
}
}
/// Binds a private Udp address to a public address using UPnP if possible
pub fn udp_public_bind(label: &str, startport: u16, endport: u16) -> UdpSocketPair {
let private_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 0);
let mut core = tokio_core::reactor::Core::new().unwrap();
let handle = core.handle();
let mc = p2p::P2p::default();
let res = core.run({
tokio_core::net::UdpSocket::bind_public(&private_addr, &handle, &mc)
.map_err(|e| {
info!("Failed to bind public socket for {}: {}", label, e);
})
.and_then(|(socket, public_addr)| Ok((public_addr, socket.local_addr().unwrap())))
});
match res {
Ok((public_addr, local_addr)) => {
info!(
"Using local address {} mapped to UPnP public address {} for {}",
local_addr, public_addr, label
);
// NAT should now be forwarding inbound packets directed at
// |public_addr| to the local |receiver| socket...
let receiver = UdpSocket::bind(local_addr).unwrap();
// TODO: try to autodetect a broken NAT (issue #496)
let sender = if env::var("BROKEN_NAT").is_err() {
receiver.try_clone().unwrap()
} else {
// ... however for outbound packets, some NATs *will not* rewrite the
// source port from |receiver.local_addr().port()| to |public_addr.port()|.
// This is currently a problem when talking with a fullnode as it
// assumes it can send UDP packets back at the source. This hits the
// NAT as a datagram for |receiver.local_addr().port()| on the NAT's public
// IP, which the NAT promptly discards. As a short term hack, create a
// local UDP socket, |sender|, with the same port as |public_addr.port()|.
//
// TODO: Remove the |sender| socket and deal with the downstream changes to
// the UDP signalling
let mut local_addr_sender = local_addr;
local_addr_sender.set_port(public_addr.port());
UdpSocket::bind(local_addr_sender).unwrap()
};
UdpSocketPair {
addr: public_addr,
receiver,
sender,
}
}
Err(_) => {
let sender = udp_random_bind(startport, endport, 5).unwrap();
let local_addr = sender.local_addr().unwrap();
let pub_ip = get_public_ip_addr().unwrap();
let pub_addr = SocketAddr::new(pub_ip, local_addr.port());
info!("Using source address {} for {}", pub_addr, label);
UdpSocketPair {
addr: pub_addr,
receiver: sender.try_clone().unwrap(),
sender,
}
}
}
}

View File

@@ -1,28 +1,30 @@
//! The `ncp` module implements the network control plane.
use crdt;
use packet;
use crdt::Crdt;
use packet::{BlobRecycler, SharedBlob};
use result::Result;
use service::Service;
use std::net::UdpSocket;
use std::sync::atomic::AtomicBool;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::channel;
use std::sync::{Arc, RwLock};
use std::thread::JoinHandle;
use std::thread::{self, JoinHandle};
use streamer;
pub struct Ncp {
pub thread_hdls: Vec<JoinHandle<()>>,
exit: Arc<AtomicBool>,
thread_hdls: Vec<JoinHandle<()>>,
}
impl Ncp {
pub fn new(
crdt: Arc<RwLock<crdt::Crdt>>,
window: Arc<RwLock<Vec<Option<packet::SharedBlob>>>>,
crdt: &Arc<RwLock<Crdt>>,
window: Arc<RwLock<Vec<Option<SharedBlob>>>>,
gossip_listen_socket: UdpSocket,
gossip_send_socket: UdpSocket,
exit: Arc<AtomicBool>,
) -> Result<Ncp> {
let blob_recycler = packet::BlobRecycler::default();
let blob_recycler = BlobRecycler::default();
let (request_sender, request_receiver) = channel();
trace!(
"Ncp: id: {:?}, listening on: {:?}",
@@ -37,12 +39,12 @@ impl Ncp {
)?;
let (response_sender, response_receiver) = channel();
let t_responder = streamer::responder(
"ncp",
gossip_send_socket,
exit.clone(),
blob_recycler.clone(),
response_receiver,
);
let t_listen = crdt::Crdt::listen(
let t_listen = Crdt::listen(
crdt.clone(),
window,
blob_recycler.clone(),
@@ -50,9 +52,27 @@ impl Ncp {
response_sender.clone(),
exit.clone(),
);
let t_gossip = crdt::Crdt::gossip(crdt.clone(), blob_recycler, response_sender, exit);
let t_gossip = Crdt::gossip(crdt.clone(), blob_recycler, response_sender, exit.clone());
let thread_hdls = vec![t_receiver, t_responder, t_listen, t_gossip];
Ok(Ncp { thread_hdls })
Ok(Ncp { exit, thread_hdls })
}
pub fn close(self) -> thread::Result<()> {
self.exit.store(true, Ordering::Relaxed);
self.join()
}
}
impl Service for Ncp {
fn thread_hdls(self) -> Vec<JoinHandle<()>> {
self.thread_hdls
}
fn join(self) -> thread::Result<()> {
for thread_hdl in self.thread_hdls() {
thread_hdl.join()?;
}
Ok(())
}
}
@@ -60,30 +80,25 @@ impl Ncp {
mod tests {
use crdt::{Crdt, TestNode};
use ncp::Ncp;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::atomic::AtomicBool;
use std::sync::{Arc, RwLock};
#[test]
#[ignore]
// test that stage will exit when flag is set
// TODO: Troubleshoot Docker-based coverage build and re-enabled
// this test. It is probably failing due to too many threads.
fn test_exit() {
let exit = Arc::new(AtomicBool::new(false));
let tn = TestNode::new();
let crdt = Crdt::new(tn.data.clone());
let tn = TestNode::new_localhost();
let crdt = Crdt::new(tn.data.clone()).expect("Crdt::new");
let c = Arc::new(RwLock::new(crdt));
let w = Arc::new(RwLock::new(vec![]));
let d = Ncp::new(
c.clone(),
&c,
w,
tn.sockets.gossip,
tn.sockets.gossip_send,
exit.clone(),
).unwrap();
exit.store(true, Ordering::Relaxed);
for t in d.thread_hdls {
t.join().expect("thread join");
}
d.close().expect("thread join");
}
}

View File

@@ -12,10 +12,10 @@ use std::mem::size_of;
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket};
use std::sync::atomic::AtomicUsize;
use std::sync::{Arc, Mutex, RwLock};
use std::time::Instant;
pub type SharedPackets = Arc<RwLock<Packets>>;
pub type SharedBlob = Arc<RwLock<Blob>>;
pub type SharedBlobs = VecDeque<SharedBlob>;
pub type PacketRecycler = Recycler<Packets>;
pub type BlobRecycler = Recycler<Blob>;
@@ -162,18 +162,31 @@ impl<T: Default> Clone for Recycler<T> {
impl<T: Default> Recycler<T> {
pub fn allocate(&self) -> Arc<RwLock<T>> {
let mut gc = self.gc.lock().expect("recycler lock in pb fn allocate");
gc.pop()
.unwrap_or_else(|| Arc::new(RwLock::new(Default::default())))
let x = gc.pop()
.unwrap_or_else(|| Arc::new(RwLock::new(Default::default())));
// Only return the item if this recycler is the last reference to it.
// Remove this check once `T` holds a Weak reference back to this
// recycler and implements `Drop`. At the time of this writing, Weak can't
// be passed across threads ('alloc' is a nightly-only API), and so our
// reference-counted recyclables are awkwardly being recycled by hand,
// which allows this race condition to exist.
if Arc::strong_count(&x) > 1 {
warn!("Recycled item still in use. Booting it.");
drop(gc);
self.allocate()
} else {
x
}
}
pub fn recycle(&self, msgs: Arc<RwLock<T>>) {
pub fn recycle(&self, x: Arc<RwLock<T>>) {
let mut gc = self.gc.lock().expect("recycler lock in pub fn recycle");
gc.push(msgs);
gc.push(x);
}
}
impl Packets {
fn run_read_from(&mut self, socket: &UdpSocket) -> Result<usize> {
static mut COUNTER: Counter = create_counter!("packets", 10);
self.packets.resize(NUM_PACKETS, Packet::default());
let mut i = 0;
//DOCUMENTED SIDE-EFFECT
@@ -183,13 +196,12 @@ impl Packets {
// * read until it fails
// * set it back to blocking before returning
socket.set_nonblocking(false)?;
let mut start = Instant::now();
for p in &mut self.packets {
p.meta.size = 0;
trace!("receiving on {}", socket.local_addr().unwrap());
match socket.recv_from(&mut p.data) {
Err(_) if i > 0 => {
inc_counter!(COUNTER, i, start);
inc_new_counter!("packets-recv_count", 1);
debug!("got {:?} messages on {}", i, socket.local_addr().unwrap());
break;
}
@@ -201,7 +213,6 @@ impl Packets {
p.meta.size = nrecv;
p.meta.set_addr(&from);
if i == 0 {
start = Instant::now();
socket.set_nonblocking(true)?;
}
}
@@ -227,7 +238,7 @@ impl Packets {
pub fn to_packets_chunked<T: Serialize>(
r: &PacketRecycler,
xs: Vec<T>,
xs: &[T],
chunks: usize,
) -> Vec<SharedPackets> {
let mut out = vec![];
@@ -245,10 +256,10 @@ pub fn to_packets_chunked<T: Serialize>(
}
out.push(p);
}
return out;
out
}
pub fn to_packets<T: Serialize>(r: &PacketRecycler, xs: Vec<T>) -> Vec<SharedPackets> {
pub fn to_packets<T: Serialize>(r: &PacketRecycler, xs: &[T]) -> Vec<SharedPackets> {
to_packets_chunked(r, xs, NUM_PACKETS)
}
@@ -273,7 +284,7 @@ pub fn to_blob<T: Serialize>(
pub fn to_blobs<T: Serialize>(
rsps: Vec<(T, SocketAddr)>,
blob_recycler: &BlobRecycler,
) -> Result<VecDeque<SharedBlob>> {
) -> Result<SharedBlobs> {
let mut blobs = VecDeque::new();
for (resp, rsp_addr) in rsps {
blobs.push_back(to_blob(resp, rsp_addr, blob_recycler)?);
@@ -334,7 +345,7 @@ impl Blob {
}
pub fn is_coding(&self) -> bool {
return (self.get_flags().unwrap() & BLOB_FLAG_IS_CODING) != 0;
(self.get_flags().unwrap() & BLOB_FLAG_IS_CODING) != 0
}
pub fn set_coding(&mut self) -> Result<()> {
@@ -366,7 +377,7 @@ impl Blob {
self.meta.size = new_size;
self.set_data_size(new_size as u64).unwrap();
}
pub fn recv_from(re: &BlobRecycler, socket: &UdpSocket) -> Result<VecDeque<SharedBlob>> {
pub fn recv_from(re: &BlobRecycler, socket: &UdpSocket) -> Result<SharedBlobs> {
let mut v = VecDeque::new();
//DOCUMENTED SIDE-EFFECT
//Performance out of the IO without poll
@@ -404,16 +415,18 @@ impl Blob {
}
Ok(v)
}
pub fn send_to(
re: &BlobRecycler,
socket: &UdpSocket,
v: &mut VecDeque<SharedBlob>,
) -> Result<()> {
pub fn send_to(re: &BlobRecycler, socket: &UdpSocket, v: &mut SharedBlobs) -> Result<()> {
while let Some(r) = v.pop_front() {
{
let p = r.read().expect("'r' read lock in pub fn send_to");
let a = p.meta.addr();
socket.send_to(&p.data[..p.meta.size], &a)?;
if let Err(e) = socket.send_to(&p.data[..p.meta.size], &a) {
warn!(
"error sending {} byte packet to {:?}: {:?}",
p.meta.size, a, e
);
Err(e)?;
}
}
re.recycle(r);
}
@@ -422,13 +435,16 @@ impl Blob {
}
#[cfg(test)]
mod test {
use packet::{to_packets, Blob, BlobRecycler, Packet, PacketRecycler, Packets, NUM_PACKETS};
mod tests {
use packet::{
to_packets, Blob, BlobRecycler, Packet, PacketRecycler, Packets, Recycler, NUM_PACKETS,
};
use request::Request;
use std::collections::VecDeque;
use std::io;
use std::io::Write;
use std::net::UdpSocket;
use std::sync::Arc;
#[test]
pub fn packet_recycler_test() {
@@ -439,6 +455,37 @@ mod test {
let _ = r.allocate();
assert_eq!(r.gc.lock().unwrap().len(), 0);
}
#[test]
pub fn test_leaked_recyclable() {
// Ensure that the recycler won't return an item
// that is still referenced outside the recycler.
let r = Recycler::<u8>::default();
let x0 = r.allocate();
r.recycle(x0.clone());
assert_eq!(Arc::strong_count(&x0), 2);
assert_eq!(r.gc.lock().unwrap().len(), 1);
let x1 = r.allocate();
assert_eq!(Arc::strong_count(&x1), 1);
assert_eq!(r.gc.lock().unwrap().len(), 0);
}
#[test]
pub fn test_leaked_recyclable_recursion() {
// In the case of a leaked recyclable, ensure the recycler drops its lock before recursing.
let r = Recycler::<u8>::default();
let x0 = r.allocate();
let x1 = r.allocate();
r.recycle(x0); // <-- allocate() of this will require locking the recycler's stack.
r.recycle(x1.clone()); // <-- allocate() of this will cause it to be dropped and recurse.
assert_eq!(Arc::strong_count(&x1), 2);
assert_eq!(r.gc.lock().unwrap().len(), 2);
r.allocate(); // Ensure lock is released before recursing.
assert_eq!(r.gc.lock().unwrap().len(), 0);
}
#[test]
pub fn blob_recycler_test() {
let r = BlobRecycler::default();
@@ -475,15 +522,15 @@ mod test {
fn test_to_packets() {
let tx = Request::GetTransactionCount;
let re = PacketRecycler::default();
let rv = to_packets(&re, vec![tx.clone(); 1]);
let rv = to_packets(&re, &vec![tx.clone(); 1]);
assert_eq!(rv.len(), 1);
assert_eq!(rv[0].read().unwrap().packets.len(), 1);
let rv = to_packets(&re, vec![tx.clone(); NUM_PACKETS]);
let rv = to_packets(&re, &vec![tx.clone(); NUM_PACKETS]);
assert_eq!(rv.len(), 1);
assert_eq!(rv[0].read().unwrap().packets.len(), NUM_PACKETS);
let rv = to_packets(&re, vec![tx.clone(); NUM_PACKETS + 1]);
let rv = to_packets(&re, &vec![tx.clone(); NUM_PACKETS + 1]);
assert_eq!(rv.len(), 2);
assert_eq!(rv[0].read().unwrap().packets.len(), NUM_PACKETS);
assert_eq!(rv[1].read().unwrap().packets.len(), 1);

View File

@@ -13,7 +13,7 @@ pub enum Witness {
Timestamp(DateTime<Utc>),
/// A siganture from PublicKey.
Signature(PublicKey),
Signature,
}
/// Some amount of tokens that should be sent to the `to` `PublicKey`.
@@ -36,5 +36,5 @@ pub trait PaymentPlan {
/// Apply a witness to the payment plan to see if the plan can be reduced.
/// If so, modify the plan in-place.
fn apply_witness(&mut self, witness: &Witness);
fn apply_witness(&mut self, witness: &Witness, from: &PublicKey);
}

View File

@@ -8,8 +8,9 @@
use entry::Entry;
use hash::Hash;
use recorder::Recorder;
use service::Service;
use std::sync::mpsc::{channel, Receiver, RecvError, Sender, TryRecvError};
use std::thread::{Builder, JoinHandle};
use std::thread::{self, Builder, JoinHandle};
use std::time::{Duration, Instant};
use transaction::Transaction;
@@ -20,16 +21,18 @@ pub enum Signal {
}
pub struct RecordStage {
pub entry_receiver: Receiver<Entry>,
pub thread_hdl: JoinHandle<()>,
thread_hdl: JoinHandle<()>,
}
impl RecordStage {
/// A background thread that will continue tagging received Transaction messages and
/// sending back Entry messages until either the receiver or sender channel is closed.
pub fn new(signal_receiver: Receiver<Signal>, start_hash: &Hash) -> Self {
pub fn new(
signal_receiver: Receiver<Signal>,
start_hash: &Hash,
) -> (Self, Receiver<Vec<Entry>>) {
let (entry_sender, entry_receiver) = channel();
let start_hash = start_hash.clone();
let start_hash = *start_hash;
let thread_hdl = Builder::new()
.name("solana-record-stage".to_string())
@@ -39,10 +42,7 @@ impl RecordStage {
})
.unwrap();
RecordStage {
entry_receiver,
thread_hdl,
}
(RecordStage { thread_hdl }, entry_receiver)
}
/// Same as `RecordStage::new`, but will automatically produce entries every `tick_duration`.
@@ -50,9 +50,9 @@ impl RecordStage {
signal_receiver: Receiver<Signal>,
start_hash: &Hash,
tick_duration: Duration,
) -> Self {
) -> (Self, Receiver<Vec<Entry>>) {
let (entry_sender, entry_receiver) = channel();
let start_hash = start_hash.clone();
let start_hash = *start_hash;
let thread_hdl = Builder::new()
.name("solana-record-stage".to_string())
@@ -60,13 +60,14 @@ impl RecordStage {
let mut recorder = Recorder::new(start_hash);
let start_time = Instant::now();
loop {
if let Err(_) = Self::try_process_signals(
if Self::try_process_signals(
&mut recorder,
start_time,
tick_duration,
&signal_receiver,
&entry_sender,
) {
).is_err()
{
return;
}
recorder.hash();
@@ -74,16 +75,13 @@ impl RecordStage {
})
.unwrap();
RecordStage {
entry_receiver,
thread_hdl,
}
(RecordStage { thread_hdl }, entry_receiver)
}
fn process_signal(
signal: Signal,
recorder: &mut Recorder,
sender: &Sender<Entry>,
sender: &Sender<Vec<Entry>>,
) -> Result<(), ()> {
let txs = if let Signal::Transactions(txs) = signal {
txs
@@ -91,20 +89,14 @@ impl RecordStage {
vec![]
};
let entries = recorder.record(txs);
let mut result = Ok(());
for entry in entries {
result = sender.send(entry).map_err(|_| ());
if result.is_err() {
break;
}
}
result
sender.send(entries).or(Err(()))?;
Ok(())
}
fn process_signals(
recorder: &mut Recorder,
receiver: &Receiver<Signal>,
sender: &Sender<Entry>,
sender: &Sender<Vec<Entry>>,
) -> Result<(), ()> {
loop {
match receiver.recv() {
@@ -119,11 +111,11 @@ impl RecordStage {
start_time: Instant,
tick_duration: Duration,
receiver: &Receiver<Signal>,
sender: &Sender<Entry>,
sender: &Sender<Vec<Entry>>,
) -> Result<(), ()> {
loop {
if let Some(entry) = recorder.tick(start_time, tick_duration) {
sender.send(entry).or(Err(()))?;
sender.send(vec![entry]).or(Err(()))?;
}
match receiver.try_recv() {
Ok(signal) => Self::process_signal(signal, recorder, sender)?,
@@ -134,6 +126,16 @@ impl RecordStage {
}
}
impl Service for RecordStage {
fn thread_hdls(self) -> Vec<JoinHandle<()>> {
vec![self.thread_hdl]
}
fn join(self) -> thread::Result<()> {
self.thread_hdl.join()
}
}
#[cfg(test)]
mod tests {
use super::*;
@@ -146,7 +148,7 @@ mod tests {
fn test_historian() {
let (tx_sender, tx_receiver) = channel();
let zero = Hash::default();
let record_stage = RecordStage::new(tx_receiver, &zero);
let (record_stage, entry_receiver) = RecordStage::new(tx_receiver, &zero);
tx_sender.send(Signal::Tick).unwrap();
sleep(Duration::new(0, 1_000_000));
@@ -154,9 +156,9 @@ mod tests {
sleep(Duration::new(0, 1_000_000));
tx_sender.send(Signal::Tick).unwrap();
let entry0 = record_stage.entry_receiver.recv().unwrap();
let entry1 = record_stage.entry_receiver.recv().unwrap();
let entry2 = record_stage.entry_receiver.recv().unwrap();
let entry0 = entry_receiver.recv().unwrap()[0].clone();
let entry1 = entry_receiver.recv().unwrap()[0].clone();
let entry2 = entry_receiver.recv().unwrap()[0].clone();
assert_eq!(entry0.num_hashes, 0);
assert_eq!(entry1.num_hashes, 0);
@@ -172,8 +174,8 @@ mod tests {
fn test_historian_closed_sender() {
let (tx_sender, tx_receiver) = channel();
let zero = Hash::default();
let record_stage = RecordStage::new(tx_receiver, &zero);
drop(record_stage.entry_receiver);
let (record_stage, entry_receiver) = RecordStage::new(tx_receiver, &zero);
drop(entry_receiver);
tx_sender.send(Signal::Tick).unwrap();
assert_eq!(record_stage.thread_hdl.join().unwrap(), ());
}
@@ -182,7 +184,7 @@ mod tests {
fn test_transactions() {
let (tx_sender, signal_receiver) = channel();
let zero = Hash::default();
let record_stage = RecordStage::new(signal_receiver, &zero);
let (_record_stage, entry_receiver) = RecordStage::new(signal_receiver, &zero);
let alice_keypair = KeyPair::new();
let bob_pubkey = KeyPair::new().pubkey();
let tx0 = Transaction::new(&alice_keypair, bob_pubkey, 1, zero);
@@ -191,7 +193,7 @@ mod tests {
.send(Signal::Transactions(vec![tx0, tx1]))
.unwrap();
drop(tx_sender);
let entries: Vec<_> = record_stage.entry_receiver.iter().collect();
let entries: Vec<_> = entry_receiver.iter().collect();
assert_eq!(entries.len(), 1);
}
@@ -199,12 +201,12 @@ mod tests {
fn test_clock() {
let (tx_sender, tx_receiver) = channel();
let zero = Hash::default();
let record_stage =
let (_record_stage, entry_receiver) =
RecordStage::new_with_clock(tx_receiver, &zero, Duration::from_millis(20));
sleep(Duration::from_millis(900));
tx_sender.send(Signal::Tick).unwrap();
drop(tx_sender);
let entries: Vec<Entry> = record_stage.entry_receiver.iter().collect();
let entries: Vec<_> = entry_receiver.iter().flat_map(|x| x).collect();
assert!(entries.len() > 1);
// Ensure the ID is not the seed.

View File

@@ -3,7 +3,7 @@
use entry::Entry;
use hash::{hash, Hash};
use ledger::next_entries_mut;
use ledger;
use std::time::{Duration, Instant};
use transaction::Transaction;
@@ -28,7 +28,7 @@ impl Recorder {
}
pub fn record(&mut self, transactions: Vec<Transaction>) -> Vec<Entry> {
next_entries_mut(&mut self.last_hash, &mut self.num_hashes, transactions)
ledger::next_entries_mut(&mut self.last_hash, &mut self.num_hashes, transactions)
}
pub fn tick(&mut self, start_time: Instant, tick_duration: Duration) -> Option<Entry> {
@@ -39,6 +39,7 @@ impl Recorder {
&mut self.last_hash,
&mut self.num_hashes,
vec![],
false,
))
} else {
None

View File

@@ -1,53 +1,149 @@
//! The `replicate_stage` replicates transactions broadcast by the leader.
use bank::Bank;
use bincode::serialize;
use counter::Counter;
use crdt::Crdt;
use ledger;
use packet;
use result::Result;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::thread::{Builder, JoinHandle};
use packet::BlobRecycler;
use result::{Error, Result};
use service::Service;
use signature::KeyPair;
use std::collections::VecDeque;
use std::net::UdpSocket;
use std::sync::atomic::AtomicUsize;
use std::sync::mpsc::channel;
use std::sync::mpsc::RecvTimeoutError;
use std::sync::{Arc, RwLock};
use std::thread::{self, Builder, JoinHandle};
use std::time::Duration;
use streamer;
use streamer::{responder, BlobReceiver, BlobSender};
use timing;
use transaction::Transaction;
use voting::entries_to_votes;
pub struct ReplicateStage {
pub thread_hdl: JoinHandle<()>,
thread_hdls: Vec<JoinHandle<()>>,
}
const VOTE_TIMEOUT_MS: u64 = 1000;
impl ReplicateStage {
/// Process entry blobs, already in order
fn replicate_requests(
keypair: &Arc<KeyPair>,
bank: &Arc<Bank>,
blob_receiver: &streamer::BlobReceiver,
blob_recycler: &packet::BlobRecycler,
crdt: &Arc<RwLock<Crdt>>,
blob_recycler: &BlobRecycler,
window_receiver: &BlobReceiver,
vote_blob_sender: &BlobSender,
last_vote: &mut u64,
) -> Result<()> {
let timer = Duration::new(1, 0);
let blobs = blob_receiver.recv_timeout(timer)?;
//coalesce all the available blobs into a single vote
let mut blobs = window_receiver.recv_timeout(timer)?;
while let Ok(mut more) = window_receiver.try_recv() {
blobs.append(&mut more);
}
let blobs_len = blobs.len();
let entries = ledger::reconstruct_entries_from_blobs(blobs, &blob_recycler)?;
let entries = ledger::reconstruct_entries_from_blobs(blobs.clone())?;
{
let votes = entries_to_votes(&entries);
let mut wcrdt = crdt.write().unwrap();
wcrdt.insert_votes(&votes);
};
inc_new_counter!(
"replicate-transactions",
entries.iter().map(|x| x.transactions.len()).sum()
);
let res = bank.process_entries(entries);
if res.is_err() {
error!("process_entries {} {:?}", blobs_len, res);
}
res?;
let now = timing::timestamp();
if now - *last_vote > VOTE_TIMEOUT_MS {
let height = res?;
let last_id = bank.last_id();
let shared_blob = blob_recycler.allocate();
let (vote, addr) = {
let mut wcrdt = crdt.write().unwrap();
//TODO: doesn't seem like there is a synchronous call to get height and id
info!("replicate_stage {} {:?}", height, &last_id[..8]);
wcrdt.new_vote(height, last_id)
}?;
{
let mut blob = shared_blob.write().unwrap();
let tx = Transaction::new_vote(&keypair, vote, last_id, 0);
let bytes = serialize(&tx)?;
let len = bytes.len();
blob.data[..len].copy_from_slice(&bytes);
blob.meta.set_addr(&addr);
blob.meta.size = len;
}
inc_new_counter!("replicate-vote_sent", 1);
*last_vote = now;
vote_blob_sender.send(VecDeque::from(vec![shared_blob]))?;
}
while let Some(blob) = blobs.pop_front() {
blob_recycler.recycle(blob);
}
Ok(())
}
pub fn new(
keypair: KeyPair,
bank: Arc<Bank>,
exit: Arc<AtomicBool>,
window_receiver: streamer::BlobReceiver,
blob_recycler: packet::BlobRecycler,
crdt: Arc<RwLock<Crdt>>,
blob_recycler: BlobRecycler,
window_receiver: BlobReceiver,
) -> Self {
let thread_hdl = Builder::new()
let (vote_blob_sender, vote_blob_receiver) = channel();
let send = UdpSocket::bind("0.0.0.0:0").expect("bind");
let t_responder = responder(
"replicate_stage",
send,
blob_recycler.clone(),
vote_blob_receiver,
);
let skeypair = Arc::new(keypair);
let t_replicate = Builder::new()
.name("solana-replicate-stage".to_string())
.spawn(move || loop {
let e = Self::replicate_requests(&bank, &window_receiver, &blob_recycler);
if e.is_err() && exit.load(Ordering::Relaxed) {
break;
.spawn(move || {
let mut timestamp: u64 = 0;
loop {
if let Err(e) = Self::replicate_requests(
&skeypair,
&bank,
&crdt,
&blob_recycler,
&window_receiver,
&vote_blob_sender,
&mut timestamp,
) {
match e {
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
_ => error!("{:?}", e),
}
}
}
})
.unwrap();
ReplicateStage { thread_hdl }
ReplicateStage {
thread_hdls: vec![t_responder, t_replicate],
}
}
}
impl Service for ReplicateStage {
fn thread_hdls(self) -> Vec<JoinHandle<()>> {
self.thread_hdls
}
fn join(self) -> thread::Result<()> {
for thread_hdl in self.thread_hdls() {
thread_hdl.join()?;
}
Ok(())
}
}

View File

@@ -1,14 +1,15 @@
//! The `request` module defines the messages for the thin client.
use hash::Hash;
use signature::PublicKey;
use signature::{PublicKey, Signature};
#[cfg_attr(feature = "cargo-clippy", allow(large_enum_variant))]
#[derive(Serialize, Deserialize, Debug, Clone)]
#[derive(Serialize, Deserialize, Debug, Clone, Copy)]
pub enum Request {
GetBalance { key: PublicKey },
GetLastId,
GetTransactionCount,
GetSignature { signature: Signature },
}
impl Request {
@@ -20,7 +21,8 @@ impl Request {
#[derive(Serialize, Deserialize, Debug)]
pub enum Response {
Balance { key: PublicKey, val: Option<i64> },
Balance { key: PublicKey, val: i64 },
LastId { id: Hash },
TransactionCount { transaction_count: u64 },
SignatureStatus { signature_status: bool },
}

View File

@@ -40,6 +40,12 @@ impl RequestProcessor {
info!("Response::TransactionCount {:?}", rsp);
Some(rsp)
}
Request::GetSignature { signature } => {
let signature_status = self.bank.has_signature(&signature);
let rsp = (Response::SignatureStatus { signature_status }, rsp_addr);
info!("Response::Signature {:?}", rsp);
Some(rsp)
}
}
}

View File

@@ -1,29 +1,27 @@
//! The `request_stage` processes thin client Request messages.
use bincode::deserialize;
use packet;
use packet::SharedPackets;
use packet::{to_blobs, BlobRecycler, PacketRecycler, Packets, SharedPackets};
use rayon::prelude::*;
use request::Request;
use request_processor::RequestProcessor;
use result::Result;
use result::{Error, Result};
use service::Service;
use std::net::SocketAddr;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::{channel, Receiver};
use std::sync::mpsc::{channel, Receiver, RecvTimeoutError};
use std::sync::Arc;
use std::thread::{Builder, JoinHandle};
use std::thread::{self, Builder, JoinHandle};
use std::time::Instant;
use streamer;
use streamer::{self, BlobReceiver, BlobSender};
use timing;
pub struct RequestStage {
pub thread_hdl: JoinHandle<()>,
pub blob_receiver: streamer::BlobReceiver,
thread_hdl: JoinHandle<()>,
pub request_processor: Arc<RequestProcessor>,
}
impl RequestStage {
pub fn deserialize_requests(p: &packet::Packets) -> Vec<Option<(Request, SocketAddr)>> {
pub fn deserialize_requests(p: &Packets) -> Vec<Option<(Request, SocketAddr)>> {
p.packets
.par_iter()
.map(|x| {
@@ -37,9 +35,9 @@ impl RequestStage {
pub fn process_request_packets(
request_processor: &RequestProcessor,
packet_receiver: &Receiver<SharedPackets>,
blob_sender: &streamer::BlobSender,
packet_recycler: &packet::PacketRecycler,
blob_recycler: &packet::BlobRecycler,
blob_sender: &BlobSender,
packet_recycler: &PacketRecycler,
blob_recycler: &BlobRecycler,
) -> Result<()> {
let (batch, batch_len) = streamer::recv_batch(packet_receiver)?;
@@ -60,7 +58,7 @@ impl RequestStage {
let rsps = request_processor.process_requests(reqs);
let blobs = packet::to_blobs(rsps, blob_recycler)?;
let blobs = to_blobs(rsps, blob_recycler)?;
if !blobs.is_empty() {
info!("process: sending blobs: {}", blobs.len());
//don't wake up the other side if there is nothing
@@ -82,35 +80,47 @@ impl RequestStage {
}
pub fn new(
request_processor: RequestProcessor,
exit: Arc<AtomicBool>,
packet_receiver: Receiver<SharedPackets>,
packet_recycler: packet::PacketRecycler,
blob_recycler: packet::BlobRecycler,
) -> Self {
packet_recycler: PacketRecycler,
blob_recycler: BlobRecycler,
) -> (Self, BlobReceiver) {
let request_processor = Arc::new(request_processor);
let request_processor_ = request_processor.clone();
let (blob_sender, blob_receiver) = channel();
let thread_hdl = Builder::new()
.name("solana-request-stage".to_string())
.spawn(move || loop {
let e = Self::process_request_packets(
if let Err(e) = Self::process_request_packets(
&request_processor_,
&packet_receiver,
&blob_sender,
&packet_recycler,
&blob_recycler,
);
if e.is_err() {
if exit.load(Ordering::Relaxed) {
break;
) {
match e {
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
_ => error!("{:?}", e),
}
}
})
.unwrap();
RequestStage {
thread_hdl,
(
RequestStage {
thread_hdl,
request_processor,
},
blob_receiver,
request_processor,
}
)
}
}
impl Service for RequestStage {
fn thread_hdls(self) -> Vec<JoinHandle<()>> {
vec![self.thread_hdl]
}
fn join(self) -> thread::Result<()> {
self.thread_hdl.join()
}
}

View File

@@ -2,9 +2,13 @@
use bank;
use bincode;
use crdt;
#[cfg(feature = "erasure")]
use erasure;
use serde_json;
use std;
use std::any::Any;
use streamer;
#[derive(Debug)]
pub enum Error {
@@ -16,10 +20,11 @@ pub enum Error {
RecvTimeoutError(std::sync::mpsc::RecvTimeoutError),
Serialize(std::boxed::Box<bincode::ErrorKind>),
BankError(bank::BankError),
CrdtError(crdt::CrdtError),
WindowError(streamer::WindowError),
#[cfg(feature = "erasure")]
ErasureError(erasure::ErasureError),
SendError,
Services,
CrdtTooSmall,
GenericError,
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -39,6 +44,22 @@ impl std::convert::From<bank::BankError> for Error {
Error::BankError(e)
}
}
impl std::convert::From<crdt::CrdtError> for Error {
fn from(e: crdt::CrdtError) -> Error {
Error::CrdtError(e)
}
}
impl std::convert::From<streamer::WindowError> for Error {
fn from(e: streamer::WindowError) -> Error {
Error::WindowError(e)
}
}
#[cfg(feature = "erasure")]
impl std::convert::From<erasure::ErasureError> for Error {
fn from(e: erasure::ErasureError) -> Error {
Error::ErasureError(e)
}
}
impl<T> std::convert::From<std::sync::mpsc::SendError<T>> for Error {
fn from(_e: std::sync::mpsc::SendError<T>) -> Error {
Error::SendError

View File

@@ -24,54 +24,64 @@
//! ```
use bank::Bank;
use packet;
use packet::{BlobRecycler, PacketRecycler};
use request_processor::RequestProcessor;
use request_stage::RequestStage;
use service::Service;
use std::net::UdpSocket;
use std::sync::atomic::AtomicBool;
use std::sync::mpsc::channel;
use std::sync::Arc;
use std::thread::JoinHandle;
use std::thread::{self, JoinHandle};
use streamer;
pub struct Rpu {
pub thread_hdls: Vec<JoinHandle<()>>,
thread_hdls: Vec<JoinHandle<()>>,
}
impl Rpu {
pub fn new(
bank: Arc<Bank>,
bank: &Arc<Bank>,
requests_socket: UdpSocket,
respond_socket: UdpSocket,
exit: Arc<AtomicBool>,
) -> Self {
let packet_recycler = packet::PacketRecycler::default();
let packet_recycler = PacketRecycler::default();
let (packet_sender, packet_receiver) = channel();
let t_receiver = streamer::receiver(
requests_socket,
exit.clone(),
exit,
packet_recycler.clone(),
packet_sender,
);
let blob_recycler = packet::BlobRecycler::default();
let blob_recycler = BlobRecycler::default();
let request_processor = RequestProcessor::new(bank.clone());
let request_stage = RequestStage::new(
let (request_stage, blob_receiver) = RequestStage::new(
request_processor,
exit.clone(),
packet_receiver,
packet_recycler.clone(),
blob_recycler.clone(),
);
let t_responder = streamer::responder(
respond_socket,
exit.clone(),
blob_recycler.clone(),
request_stage.blob_receiver,
);
let t_responder =
streamer::responder("rpu", respond_socket, blob_recycler.clone(), blob_receiver);
let thread_hdls = vec![t_receiver, t_responder, request_stage.thread_hdl];
let mut thread_hdls = vec![t_receiver, t_responder];
thread_hdls.extend(request_stage.thread_hdls().into_iter());
Rpu { thread_hdls }
}
}
impl Service for Rpu {
fn thread_hdls(self) -> Vec<JoinHandle<()>> {
self.thread_hdls
}
fn join(self) -> thread::Result<()> {
for thread_hdl in self.thread_hdls() {
thread_hdl.join()?;
}
Ok(())
}
}

View File

@@ -1,204 +0,0 @@
//! The `server` module hosts all the server microservices.
use bank::Bank;
use crdt::{Crdt, ReplicatedData};
use ncp::Ncp;
use packet;
use rpu::Rpu;
use std::io::Write;
use std::net::UdpSocket;
use std::sync::atomic::AtomicBool;
use std::sync::{Arc, RwLock};
use std::thread::JoinHandle;
use std::time::Duration;
use streamer;
use tpu::Tpu;
use tvu::Tvu;
pub struct Server {
pub thread_hdls: Vec<JoinHandle<()>>,
}
impl Server {
/// Create a server instance acting as a leader.
///
/// ```text
/// .---------------------.
/// | Leader |
/// | |
/// .--------. | .-----. |
/// | |---->| | |
/// | Client | | | RPU | |
/// | |<----| | |
/// `----+---` | `-----` |
/// | | ^ |
/// | | | |
/// | | .--+---. |
/// | | | Bank | |
/// | | `------` |
/// | | ^ |
/// | | | | .------------.
/// | | .--+--. .-----. | | |
/// `-------->| TPU +-->| NCP +------>| Validators |
/// | `-----` `-----` | | |
/// | | `------------`
/// `---------------------`
/// ```
pub fn new_leader<W: Write + Send + 'static>(
bank: Bank,
tick_duration: Option<Duration>,
me: ReplicatedData,
requests_socket: UdpSocket,
transactions_socket: UdpSocket,
broadcast_socket: UdpSocket,
respond_socket: UdpSocket,
gossip_socket: UdpSocket,
exit: Arc<AtomicBool>,
writer: W,
) -> Self {
let bank = Arc::new(bank);
let mut thread_hdls = vec![];
let rpu = Rpu::new(bank.clone(), requests_socket, respond_socket, exit.clone());
thread_hdls.extend(rpu.thread_hdls);
let blob_recycler = packet::BlobRecycler::default();
let tpu = Tpu::new(
bank.clone(),
tick_duration,
transactions_socket,
blob_recycler.clone(),
exit.clone(),
writer,
);
thread_hdls.extend(tpu.thread_hdls);
let crdt = Arc::new(RwLock::new(Crdt::new(me)));
let window = streamer::default_window();
let gossip_send_socket = UdpSocket::bind("0.0.0.0:0").expect("bind 0");
let ncp = Ncp::new(
crdt.clone(),
window.clone(),
gossip_socket,
gossip_send_socket,
exit.clone(),
).expect("Ncp::new");
thread_hdls.extend(ncp.thread_hdls);
let t_broadcast = streamer::broadcaster(
broadcast_socket,
exit.clone(),
crdt,
window,
blob_recycler.clone(),
tpu.blob_receiver,
);
thread_hdls.extend(vec![t_broadcast]);
Server { thread_hdls }
}
/// Create a server instance acting as a validator.
///
/// ```text
/// .-------------------------------.
/// | Validator |
/// | |
/// .--------. | .-----. |
/// | |-------------->| | |
/// | Client | | | RPU | |
/// | |<--------------| | |
/// `--------` | `-----` |
/// | ^ |
/// | | |
/// | .--+---. |
/// | | Bank | |
/// | `------` |
/// | ^ |
/// .--------. | | | .------------.
/// | | | .--+--. | | |
/// | Leader |<------------->| TVU +<--------------->| |
/// | | | `-----` | | Validators |
/// | | | ^ | | |
/// | | | | | | |
/// | | | .--+--. | | |
/// | |<------------->| NCP +<--------------->| |
/// | | | `-----` | | |
/// `--------` | | `------------`
/// `-------------------------------`
/// ```
pub fn new_validator(
bank: Bank,
me: ReplicatedData,
requests_socket: UdpSocket,
respond_socket: UdpSocket,
replicate_socket: UdpSocket,
gossip_listen_socket: UdpSocket,
repair_socket: UdpSocket,
entry_point: ReplicatedData,
exit: Arc<AtomicBool>,
) -> Self {
let bank = Arc::new(bank);
let mut thread_hdls = vec![];
let rpu = Rpu::new(bank.clone(), requests_socket, respond_socket, exit.clone());
thread_hdls.extend(rpu.thread_hdls);
let crdt = Arc::new(RwLock::new(Crdt::new(me)));
crdt.write()
.expect("'crdt' write lock before insert() in pub fn replicate")
.insert(&entry_point);
let window = streamer::default_window();
let gossip_send_socket = UdpSocket::bind("0.0.0.0:0").expect("bind 0");
let retransmit_socket = UdpSocket::bind("0.0.0.0:0").expect("bind 0");
let ncp = Ncp::new(
crdt.clone(),
window.clone(),
gossip_listen_socket,
gossip_send_socket,
exit.clone(),
).expect("Ncp::new");
let tvu = Tvu::new(
bank.clone(),
crdt.clone(),
window.clone(),
replicate_socket,
repair_socket,
retransmit_socket,
exit.clone(),
);
thread_hdls.extend(tvu.thread_hdls);
thread_hdls.extend(ncp.thread_hdls);
Server { thread_hdls }
}
}
#[cfg(test)]
mod tests {
use bank::Bank;
use crdt::TestNode;
use mint::Mint;
use server::Server;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
#[test]
fn validator_exit() {
let tn = TestNode::new();
let alice = Mint::new(10_000);
let bank = Bank::new(&alice);
let exit = Arc::new(AtomicBool::new(false));
let v = Server::new_validator(
bank,
tn.data.clone(),
tn.sockets.requests,
tn.sockets.respond,
tn.sockets.replicate,
tn.sockets.gossip,
tn.sockets.repair,
tn.data,
exit.clone(),
);
exit.store(true, Ordering::Relaxed);
for t in v.thread_hdls {
t.join().unwrap();
}
}
}

6
src/service.rs Normal file
View File

@@ -0,0 +1,6 @@
use std::thread::{JoinHandle, Result};
pub trait Service {
fn thread_hdls(self) -> Vec<JoinHandle<()>>;
fn join(self) -> Result<()>;
}

View File

@@ -8,8 +8,11 @@ use ring::error::Unspecified;
use ring::rand::SecureRandom;
use ring::signature::Ed25519KeyPair;
use ring::{rand, signature};
use serde_json;
use std::cell::RefCell;
use untrusted;
use std::error;
use std::fs::File;
use untrusted::Input;
pub type KeyPair = Ed25519KeyPair;
pub type PublicKey = GenericArray<u8, U32>;
@@ -24,10 +27,8 @@ impl KeyPairUtil for Ed25519KeyPair {
/// Return a new ED25519 keypair
fn new() -> Self {
let rng = rand::SystemRandom::new();
let pkcs8_bytes = signature::Ed25519KeyPair::generate_pkcs8(&rng)
.expect("generate_pkcs8 in signature pb fn new");
signature::Ed25519KeyPair::from_pkcs8(untrusted::Input::from(&pkcs8_bytes))
.expect("from_pcks8 in signature pb fn new")
let pkcs8_bytes = Ed25519KeyPair::generate_pkcs8(&rng).expect("generate_pkcs8");
Ed25519KeyPair::from_pkcs8(Input::from(&pkcs8_bytes)).expect("from_pcks8")
}
/// Return the public key for the given keypair
@@ -42,9 +43,9 @@ pub trait SignatureUtil {
impl SignatureUtil for GenericArray<u8, U64> {
fn verify(&self, peer_public_key_bytes: &[u8], msg_bytes: &[u8]) -> bool {
let peer_public_key = untrusted::Input::from(peer_public_key_bytes);
let msg = untrusted::Input::from(msg_bytes);
let sig = untrusted::Input::from(self);
let peer_public_key = Input::from(peer_public_key_bytes);
let msg = Input::from(msg_bytes);
let sig = Input::from(self);
signature::verify(&signature::ED25519, peer_public_key, msg, sig).is_ok()
}
}
@@ -77,7 +78,7 @@ impl GenKeys {
.into_par_iter()
.map(|seed| {
let pkcs8 = GenKeys::new(seed).new_key();
KeyPair::from_pkcs8(untrusted::Input::from(&pkcs8)).unwrap()
KeyPair::from_pkcs8(Input::from(&pkcs8)).unwrap()
})
.collect()
}
@@ -91,6 +92,18 @@ impl SecureRandom for GenKeys {
}
}
pub fn read_pkcs8(path: &str) -> Result<Vec<u8>, Box<error::Error>> {
let file = File::open(path.to_string())?;
let pkcs8: Vec<u8> = serde_json::from_reader(file)?;
Ok(pkcs8)
}
pub fn read_keypair(path: &str) -> Result<KeyPair, Box<error::Error>> {
let pkcs8 = read_pkcs8(path)?;
let keypair = Ed25519KeyPair::from_pkcs8(Input::from(&pkcs8))?;
Ok(keypair)
}
#[cfg(test)]
mod tests {
use super::*;
@@ -121,17 +134,3 @@ mod tests {
assert_eq!(gen_n_pubkeys(seed, 50), gen_n_pubkeys(seed, 50));
}
}
#[cfg(all(feature = "unstable", test))]
mod bench {
extern crate test;
use self::test::Bencher;
use super::*;
#[bench]
fn bench_gen_keys(b: &mut Bencher) {
let rnd = GenKeys::new([0u8; 32]);
b.iter(|| rnd.gen_n_keypairs(1000));
}
}

View File

@@ -8,7 +8,6 @@ use counter::Counter;
use packet::{Packet, SharedPackets};
use std::mem::size_of;
use std::sync::atomic::AtomicUsize;
use std::time::Instant;
use transaction::{PUB_KEY_OFFSET, SIGNED_DATA_OFFSET, SIG_OFFSET};
pub const TX_OFFSET: usize = 0;
@@ -23,6 +22,8 @@ struct Elems {
#[cfg(feature = "cuda")]
#[link(name = "cuda_verify_ed25519")]
extern "C" {
fn ed25519_init() -> bool;
fn ed25519_set_verbose(val: bool);
fn ed25519_verify_many(
vecs: *const Elems,
num: u32, //number of vecs
@@ -35,6 +36,11 @@ extern "C" {
) -> u32;
}
#[cfg(not(feature = "cuda"))]
pub fn init() {
// stub
}
#[cfg(not(feature = "cuda"))]
fn verify_packet(packet: &Packet) -> u8 {
use ring::signature;
@@ -60,18 +66,17 @@ fn verify_packet(packet: &Packet) -> u8 {
).is_ok() as u8
}
fn batch_size(batches: &Vec<SharedPackets>) -> usize {
fn batch_size(batches: &[SharedPackets]) -> usize {
batches
.iter()
.map(|p| p.read().unwrap().packets.len())
.sum()
}
#[cfg_attr(feature = "cargo-clippy", allow(ptr_arg))]
#[cfg(not(feature = "cuda"))]
pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> {
use rayon::prelude::*;
static mut COUNTER: Counter = create_counter!("ed25519_verify", 1);
let start = Instant::now();
let count = batch_size(batches);
info!("CPU ECDSA for {}", batch_size(batches));
let rv = batches
@@ -85,15 +90,24 @@ pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> {
.collect()
})
.collect();
inc_counter!(COUNTER, count, start);
inc_new_counter!("ed25519_verify", count);
rv
}
#[cfg(feature = "cuda")]
pub fn init() {
unsafe {
ed25519_set_verbose(true);
if !ed25519_init() {
panic!("ed25519_init() failed");
}
ed25519_set_verbose(false);
}
}
#[cfg(feature = "cuda")]
pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> {
use packet::PACKET_DATA_SIZE;
static mut COUNTER: Counter = create_counter!("ed25519_verify_cuda", 1);
let start = Instant::now();
let count = batch_size(batches);
info!("CUDA ECDSA for {}", batch_size(batches));
let mut out = Vec::new();
@@ -153,7 +167,7 @@ pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> {
num += 1;
}
}
inc_counter!(COUNTER, count, start);
inc_new_counter!("ed25519_verify", count);
rvs
}
@@ -189,7 +203,7 @@ mod tests {
// jumble some data to test failure
if modify_data {
packet.data[20] = 10;
packet.data[20] = packet.data[20].wrapping_add(10);
}
// generate packet vector

View File

@@ -7,39 +7,38 @@
use packet::SharedPackets;
use rand::{thread_rng, Rng};
use result::Result;
use result::{Error, Result};
use service::Service;
use sigverify;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::{channel, Receiver, Sender};
use std::sync::mpsc::{channel, Receiver, RecvTimeoutError, Sender};
use std::sync::{Arc, Mutex};
use std::thread::{spawn, JoinHandle};
use std::thread::{self, spawn, JoinHandle};
use std::time::Instant;
use streamer;
use streamer::{self, PacketReceiver};
use timing;
pub type VerifiedPackets = Vec<(SharedPackets, Vec<u8>)>;
pub struct SigVerifyStage {
pub verified_receiver: Receiver<Vec<(SharedPackets, Vec<u8>)>>,
pub thread_hdls: Vec<JoinHandle<()>>,
thread_hdls: Vec<JoinHandle<()>>,
}
impl SigVerifyStage {
pub fn new(exit: Arc<AtomicBool>, packet_receiver: Receiver<SharedPackets>) -> Self {
pub fn new(packet_receiver: Receiver<SharedPackets>) -> (Self, Receiver<VerifiedPackets>) {
sigverify::init();
let (verified_sender, verified_receiver) = channel();
let thread_hdls = Self::verifier_services(exit, packet_receiver, verified_sender);
SigVerifyStage {
thread_hdls,
verified_receiver,
}
let thread_hdls = Self::verifier_services(packet_receiver, verified_sender);
(SigVerifyStage { thread_hdls }, verified_receiver)
}
fn verify_batch(batch: Vec<SharedPackets>) -> Vec<(SharedPackets, Vec<u8>)> {
fn verify_batch(batch: Vec<SharedPackets>) -> VerifiedPackets {
let r = sigverify::ed25519_verify(&batch);
batch.into_iter().zip(r).collect()
}
fn verifier(
recvr: &Arc<Mutex<streamer::PacketReceiver>>,
sendr: &Arc<Mutex<Sender<Vec<(SharedPackets, Vec<u8>)>>>>,
recvr: &Arc<Mutex<PacketReceiver>>,
sendr: &Arc<Mutex<Sender<VerifiedPackets>>>,
) -> Result<()> {
let (batch, len) =
streamer::recv_batch(&recvr.lock().expect("'recvr' lock in fn verifier"))?;
@@ -75,27 +74,41 @@ impl SigVerifyStage {
}
fn verifier_service(
exit: Arc<AtomicBool>,
packet_receiver: Arc<Mutex<streamer::PacketReceiver>>,
verified_sender: Arc<Mutex<Sender<Vec<(SharedPackets, Vec<u8>)>>>>,
packet_receiver: Arc<Mutex<PacketReceiver>>,
verified_sender: Arc<Mutex<Sender<VerifiedPackets>>>,
) -> JoinHandle<()> {
spawn(move || loop {
let e = Self::verifier(&packet_receiver.clone(), &verified_sender.clone());
if e.is_err() && exit.load(Ordering::Relaxed) {
break;
if let Err(e) = Self::verifier(&packet_receiver, &verified_sender) {
match e {
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
_ => error!("{:?}", e),
}
}
})
}
fn verifier_services(
exit: Arc<AtomicBool>,
packet_receiver: streamer::PacketReceiver,
verified_sender: Sender<Vec<(SharedPackets, Vec<u8>)>>,
packet_receiver: PacketReceiver,
verified_sender: Sender<VerifiedPackets>,
) -> Vec<JoinHandle<()>> {
let sender = Arc::new(Mutex::new(verified_sender));
let receiver = Arc::new(Mutex::new(packet_receiver));
(0..4)
.map(|_| Self::verifier_service(exit.clone(), receiver.clone(), sender.clone()))
.map(|_| Self::verifier_service(receiver.clone(), sender.clone()))
.collect()
}
}
impl Service for SigVerifyStage {
fn thread_hdls(self) -> Vec<JoinHandle<()>> {
self.thread_hdls
}
fn join(self) -> thread::Result<()> {
for thread_hdl in self.thread_hdls() {
thread_hdl.join()?;
}
Ok(())
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -10,8 +10,13 @@ use signature::{KeyPair, PublicKey, Signature};
use std::collections::HashMap;
use std::io;
use std::net::{SocketAddr, UdpSocket};
use std::time::Instant;
use timing;
use transaction::Transaction;
use influx_db_client as influxdb;
use metrics;
/// An object for querying and sending transactions to the network.
pub struct ThinClient {
requests_addr: SocketAddr,
@@ -20,7 +25,8 @@ pub struct ThinClient {
transactions_socket: UdpSocket,
last_id: Option<Hash>,
transaction_count: u64,
balances: HashMap<PublicKey, Option<i64>>,
balances: HashMap<PublicKey, i64>,
signature_status: bool,
}
impl ThinClient {
@@ -33,7 +39,7 @@ impl ThinClient {
transactions_addr: SocketAddr,
transactions_socket: UdpSocket,
) -> Self {
let client = ThinClient {
ThinClient {
requests_addr,
requests_socket,
transactions_addr,
@@ -41,8 +47,8 @@ impl ThinClient {
last_id: None,
transaction_count: 0,
balances: HashMap::new(),
};
client
signature_status: false,
}
}
pub fn recv_response(&self) -> io::Result<Response> {
@@ -50,30 +56,37 @@ impl ThinClient {
trace!("start recv_from");
self.requests_socket.recv_from(&mut buf)?;
trace!("end recv_from");
let resp = deserialize(&buf).expect("deserialize balance in thin_client");
Ok(resp)
deserialize(&buf).or_else(|_| Err(io::Error::new(io::ErrorKind::Other, "deserialize")))
}
pub fn process_response(&mut self, resp: Response) {
match resp {
pub fn process_response(&mut self, resp: &Response) {
match *resp {
Response::Balance { key, val } => {
trace!("Response balance {:?} {:?}", key, val);
self.balances.insert(key, val);
}
Response::LastId { id } => {
info!("Response last_id {:?}", id);
trace!("Response last_id {:?}", id);
self.last_id = Some(id);
}
Response::TransactionCount { transaction_count } => {
info!("Response transaction count {:?}", transaction_count);
trace!("Response transaction count {:?}", transaction_count);
self.transaction_count = transaction_count;
}
Response::SignatureStatus { signature_status } => {
self.signature_status = signature_status;
if signature_status {
trace!("Response found signature");
} else {
trace!("Response signature not found");
}
}
}
}
/// Send a signed Transaction to the server for processing. This method
/// does not wait for a response.
pub fn transfer_signed(&self, tx: Transaction) -> io::Result<usize> {
pub fn transfer_signed(&self, tx: &Transaction) -> io::Result<usize> {
let data = serialize(&tx).expect("serialize Transaction in pub fn transfer_signed");
self.transactions_socket
.send_to(&data, &self.transactions_addr)
@@ -87,9 +100,20 @@ impl ThinClient {
to: PublicKey,
last_id: &Hash,
) -> io::Result<Signature> {
let now = Instant::now();
let tx = Transaction::new(keypair, to, n, *last_id);
let sig = tx.sig;
self.transfer_signed(tx).map(|_| sig)
let result = self.transfer_signed(&tx).map(|_| sig);
metrics::submit(
influxdb::Point::new("thinclient")
.add_tag("op", influxdb::Value::String("transfer".to_string()))
.add_field(
"duration_ms",
influxdb::Value::Integer(timing::duration_as_ms(&now.elapsed()) as i64),
)
.to_owned(),
);
result
}
/// Request the balance of the user holding `pubkey`. This method blocks
@@ -109,9 +133,12 @@ impl ThinClient {
if let Response::Balance { key, .. } = &resp {
done = key == pubkey;
}
self.process_response(resp);
self.process_response(&resp);
}
self.balances[pubkey].ok_or(io::Error::new(io::ErrorKind::Other, "nokey"))
self.balances
.get(pubkey)
.cloned()
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "nokey"))
}
/// Request the transaction count. If the response packet is dropped by the network,
@@ -129,10 +156,10 @@ impl ThinClient {
if let Ok(resp) = self.recv_response() {
info!("recv_response {:?}", resp);
if let &Response::TransactionCount { .. } = &resp {
if let Response::TransactionCount { .. } = resp {
done = true;
}
self.process_response(resp);
self.process_response(&resp);
}
}
self.transaction_count
@@ -141,38 +168,88 @@ impl ThinClient {
/// Request the last Entry ID from the server. This method blocks
/// until the server sends a response.
pub fn get_last_id(&mut self) -> Hash {
info!("get_last_id");
trace!("get_last_id");
let req = Request::GetLastId;
let data = serialize(&req).expect("serialize GetLastId in pub fn get_last_id");
let mut done = false;
while !done {
debug!("get_last_id send_to {}", &self.requests_addr);
self.requests_socket
.send_to(&data, &self.requests_addr)
.expect("buffer error in pub fn get_last_id");
match self.recv_response() {
Ok(resp) => {
if let Response::LastId { .. } = resp {
done = true;
}
self.process_response(&resp);
}
Err(e) => {
debug!("thin_client get_last_id error: {}", e);
}
}
}
self.last_id.expect("some last_id")
}
pub fn poll_get_balance(&mut self, pubkey: &PublicKey) -> io::Result<i64> {
let mut balance;
let now = Instant::now();
loop {
balance = self.get_balance(pubkey);
if balance.is_ok() && *balance.as_ref().unwrap() != 0 || now.elapsed().as_secs() > 1 {
break;
}
}
metrics::submit(
influxdb::Point::new("thinclient")
.add_tag("op", influxdb::Value::String("get_balance".to_string()))
.add_field(
"duration_ms",
influxdb::Value::Integer(timing::duration_as_ms(&now.elapsed()) as i64),
)
.to_owned(),
);
balance
}
/// Check a signature in the bank. This method blocks
/// until the server sends a response.
pub fn check_signature(&mut self, sig: &Signature) -> bool {
trace!("check_signature");
let req = Request::GetSignature { signature: *sig };
let data = serialize(&req).expect("serialize GetSignature in pub fn check_signature");
let now = Instant::now();
let mut done = false;
while !done {
self.requests_socket
.send_to(&data, &self.requests_addr)
.expect("buffer error in pub fn get_last_id");
if let Ok(resp) = self.recv_response() {
if let &Response::LastId { .. } = &resp {
if let Response::SignatureStatus { .. } = resp {
done = true;
}
self.process_response(resp);
self.process_response(&resp);
}
}
self.last_id.expect("some last_id")
metrics::submit(
influxdb::Point::new("thinclient")
.add_tag("op", influxdb::Value::String("check_signature".to_string()))
.add_field(
"duration_ms",
influxdb::Value::Integer(timing::duration_as_ms(&now.elapsed()) as i64),
)
.to_owned(),
);
self.signature_status
}
}
pub fn poll_get_balance(&mut self, pubkey: &PublicKey) -> io::Result<i64> {
use std::time::Instant;
let mut balance;
let now = Instant::now();
loop {
balance = self.get_balance(pubkey);
if balance.is_ok() || now.elapsed().as_secs() > 1 {
break;
}
}
balance
impl Drop for ThinClient {
fn drop(&mut self) {
metrics::flush();
}
}
@@ -182,9 +259,10 @@ mod tests {
use bank::Bank;
use budget::Budget;
use crdt::TestNode;
use fullnode::FullNode;
use logger;
use mint::Mint;
use server::Server;
use service::Service;
use signature::{KeyPair, KeyPairUtil};
use std::io::sink;
use std::sync::atomic::{AtomicBool, Ordering};
@@ -196,22 +274,20 @@ mod tests {
#[test]
fn test_thin_client() {
logger::setup();
let leader = TestNode::new();
let leader = TestNode::new_localhost();
let leader_data = leader.data.clone();
let alice = Mint::new(10_000);
let bank = Bank::new(&alice);
let bob_pubkey = KeyPair::new().pubkey();
let exit = Arc::new(AtomicBool::new(false));
let server = Server::new_leader(
let server = FullNode::new_leader(
bank,
0,
None,
Some(Duration::from_millis(30)),
leader.data.clone(),
leader.sockets.requests,
leader.sockets.transaction,
leader.sockets.broadcast,
leader.sockets.respond,
leader.sockets.gossip,
leader,
exit.clone(),
sink(),
);
@@ -221,9 +297,9 @@ mod tests {
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let mut client = ThinClient::new(
leader.data.requests_addr,
leader_data.contact_info.rpu,
requests_socket,
leader.data.transactions_addr,
leader_data.contact_info.tpu,
transactions_socket,
);
let last_id = client.get_last_id();
@@ -233,29 +309,80 @@ mod tests {
let balance = client.poll_get_balance(&bob_pubkey);
assert_eq!(balance.unwrap(), 500);
exit.store(true, Ordering::Relaxed);
for t in server.thread_hdls {
t.join().unwrap();
}
server.join().unwrap();
}
// sleep(Duration::from_millis(300)); is unstable
#[test]
#[ignore]
fn test_bad_sig() {
logger::setup();
let leader = TestNode::new();
let leader = TestNode::new_localhost();
let alice = Mint::new(10_000);
let bank = Bank::new(&alice);
let bob_pubkey = KeyPair::new().pubkey();
let exit = Arc::new(AtomicBool::new(false));
let leader_data = leader.data.clone();
let server = Server::new_leader(
let server = FullNode::new_leader(
bank,
0,
None,
Some(Duration::from_millis(30)),
leader.data.clone(),
leader.sockets.requests,
leader.sockets.transaction,
leader.sockets.broadcast,
leader.sockets.respond,
leader.sockets.gossip,
leader,
exit.clone(),
sink(),
);
//TODO: remove this sleep, or add a retry so CI is stable
sleep(Duration::from_millis(300));
let requests_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
requests_socket
.set_read_timeout(Some(Duration::new(5, 0)))
.unwrap();
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let mut client = ThinClient::new(
leader_data.contact_info.rpu,
requests_socket,
leader_data.contact_info.tpu,
transactions_socket,
);
let last_id = client.get_last_id();
let tx = Transaction::new(&alice.keypair(), bob_pubkey, 500, last_id);
let _sig = client.transfer_signed(&tx).unwrap();
let last_id = client.get_last_id();
let mut tr2 = Transaction::new(&alice.keypair(), bob_pubkey, 501, last_id);
if let Instruction::NewContract(contract) = &mut tr2.instruction {
contract.tokens = 502;
contract.plan = Plan::Budget(Budget::new_payment(502, bob_pubkey));
}
let _sig = client.transfer_signed(&tr2).unwrap();
let balance = client.poll_get_balance(&bob_pubkey);
assert_eq!(balance.unwrap(), 500);
exit.store(true, Ordering::Relaxed);
server.join().unwrap();
}
#[test]
fn test_client_check_signature() {
logger::setup();
let leader = TestNode::new_localhost();
let alice = Mint::new(10_000);
let bank = Bank::new(&alice);
let bob_pubkey = KeyPair::new().pubkey();
let exit = Arc::new(AtomicBool::new(false));
let leader_data = leader.data.clone();
let server = FullNode::new_leader(
bank,
0,
None,
Some(Duration::from_millis(30)),
leader,
exit.clone(),
sink(),
);
@@ -267,31 +394,20 @@ mod tests {
.unwrap();
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let mut client = ThinClient::new(
leader.data.requests_addr,
leader_data.contact_info.rpu,
requests_socket,
leader.data.transactions_addr,
leader_data.contact_info.tpu,
transactions_socket,
);
let last_id = client.get_last_id();
let sig = client
.transfer(500, &alice.keypair(), bob_pubkey, &last_id)
.unwrap();
sleep(Duration::from_millis(100));
let tx = Transaction::new(&alice.keypair(), bob_pubkey, 500, last_id);
assert!(client.check_signature(&sig));
let _sig = client.transfer_signed(tx).unwrap();
let last_id = client.get_last_id();
let mut tr2 = Transaction::new(&alice.keypair(), bob_pubkey, 501, last_id);
if let Instruction::NewContract(contract) = &mut tr2.instruction {
contract.tokens = 502;
contract.plan = Plan::Budget(Budget::new_payment(502, bob_pubkey));
}
let _sig = client.transfer_signed(tr2).unwrap();
let balance = client.poll_get_balance(&bob_pubkey);
assert_eq!(balance.unwrap(), 500);
exit.store(true, Ordering::Relaxed);
for t in server.thread_hdls {
t.join().unwrap();
}
server.join().unwrap();
}
}

View File

@@ -3,20 +3,20 @@ use std::time::Duration;
use std::time::{SystemTime, UNIX_EPOCH};
pub fn duration_as_us(d: &Duration) -> u64 {
return (d.as_secs() * 1000 * 1000) + (d.subsec_nanos() as u64 / 1_000);
(d.as_secs() * 1000 * 1000) + (u64::from(d.subsec_nanos()) / 1_000)
}
pub fn duration_as_ms(d: &Duration) -> u64 {
return (d.as_secs() * 1000) + (d.subsec_nanos() as u64 / 1_000_000);
(d.as_secs() * 1000) + (u64::from(d.subsec_nanos()) / 1_000_000)
}
pub fn duration_as_s(d: &Duration) -> f32 {
return d.as_secs() as f32 + (d.subsec_nanos() as f32 / 1_000_000_000.0);
d.as_secs() as f32 + (d.subsec_nanos() as f32 / 1_000_000_000.0)
}
pub fn timestamp() -> u64 {
let now = SystemTime::now()
.duration_since(UNIX_EPOCH)
.expect("create timestamp in timing");
return duration_as_ms(&now);
duration_as_ms(&now)
}

View File

@@ -27,73 +27,95 @@
use bank::Bank;
use banking_stage::BankingStage;
use crdt::Crdt;
use fetch_stage::FetchStage;
use packet::{BlobRecycler, PacketRecycler};
use record_stage::RecordStage;
use service::Service;
use sigverify_stage::SigVerifyStage;
use std::io::Write;
use std::net::UdpSocket;
use std::sync::atomic::AtomicBool;
use std::sync::{Arc, Mutex};
use std::thread::JoinHandle;
use std::sync::{Arc, RwLock};
use std::thread::{self, JoinHandle};
use std::time::Duration;
use streamer::BlobReceiver;
use write_stage::WriteStage;
pub struct Tpu {
pub blob_receiver: BlobReceiver,
pub thread_hdls: Vec<JoinHandle<()>>,
fetch_stage: FetchStage,
sigverify_stage: SigVerifyStage,
banking_stage: BankingStage,
record_stage: RecordStage,
write_stage: WriteStage,
}
impl Tpu {
pub fn new<W: Write + Send + 'static>(
bank: Arc<Bank>,
bank: &Arc<Bank>,
crdt: &Arc<RwLock<Crdt>>,
tick_duration: Option<Duration>,
transactions_socket: UdpSocket,
blob_recycler: BlobRecycler,
blob_recycler: &BlobRecycler,
exit: Arc<AtomicBool>,
writer: W,
) -> Self {
) -> (Self, BlobReceiver) {
let packet_recycler = PacketRecycler::default();
let fetch_stage =
FetchStage::new(transactions_socket, exit.clone(), packet_recycler.clone());
let (fetch_stage, packet_receiver) =
FetchStage::new(transactions_socket, exit, &packet_recycler);
let sigverify_stage = SigVerifyStage::new(exit.clone(), fetch_stage.packet_receiver);
let (sigverify_stage, verified_receiver) = SigVerifyStage::new(packet_receiver);
let banking_stage = BankingStage::new(
bank.clone(),
exit.clone(),
sigverify_stage.verified_receiver,
packet_recycler.clone(),
);
let (banking_stage, signal_receiver) =
BankingStage::new(bank.clone(), verified_receiver, packet_recycler.clone());
let record_stage = match tick_duration {
Some(tick_duration) => RecordStage::new_with_clock(
banking_stage.signal_receiver,
&bank.last_id(),
tick_duration,
),
None => RecordStage::new(banking_stage.signal_receiver, &bank.last_id()),
let (record_stage, entry_receiver) = match tick_duration {
Some(tick_duration) => {
RecordStage::new_with_clock(signal_receiver, &bank.last_id(), tick_duration)
}
None => RecordStage::new(signal_receiver, &bank.last_id()),
};
let write_stage = WriteStage::new(
let (write_stage, blob_receiver) = WriteStage::new(
bank.clone(),
exit.clone(),
crdt.clone(),
blob_recycler.clone(),
Mutex::new(writer),
record_stage.entry_receiver,
writer,
entry_receiver,
);
let mut thread_hdls = vec![
banking_stage.thread_hdl,
record_stage.thread_hdl,
write_stage.thread_hdl,
];
thread_hdls.extend(fetch_stage.thread_hdls.into_iter());
thread_hdls.extend(sigverify_stage.thread_hdls.into_iter());
Tpu {
blob_receiver: write_stage.blob_receiver,
thread_hdls,
}
let tpu = Tpu {
fetch_stage,
sigverify_stage,
banking_stage,
record_stage,
write_stage,
};
(tpu, blob_receiver)
}
pub fn close(self) -> thread::Result<()> {
self.fetch_stage.close();
self.join()
}
}
impl Service for Tpu {
fn thread_hdls(self) -> Vec<JoinHandle<()>> {
let mut thread_hdls = vec![];
thread_hdls.extend(self.fetch_stage.thread_hdls().into_iter());
thread_hdls.extend(self.sigverify_stage.thread_hdls().into_iter());
thread_hdls.extend(self.banking_stage.thread_hdls().into_iter());
thread_hdls.extend(self.record_stage.thread_hdls().into_iter());
thread_hdls.extend(self.write_stage.thread_hdls().into_iter());
thread_hdls
}
fn join(self) -> thread::Result<()> {
for thread_hdl in self.thread_hdls() {
thread_hdl.join()?;
}
Ok(())
}
}

View File

@@ -32,9 +32,9 @@ impl PaymentPlan for Plan {
}
}
fn apply_witness(&mut self, witness: &Witness) {
fn apply_witness(&mut self, witness: &Witness, from: &PublicKey) {
match self {
Plan::Budget(budget) => budget.apply_witness(witness),
Plan::Budget(budget) => budget.apply_witness(witness, from),
}
}
}
@@ -47,6 +47,17 @@ pub struct Contract {
pub plan: Plan,
}
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub struct Vote {
/// We send some gossip specific membershp information through the vote to shortcut
/// liveness voting
/// The version of the CRDT struct that the last_id of this network voted with
pub version: u64,
/// The version of the CRDT struct that has the same network configuration as this one
pub contact_info_version: u64,
// TODO: add signature of the state here as well
}
/// An instruction to progress the smart contract.
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub enum Instruction {
@@ -59,6 +70,9 @@ pub enum Instruction {
/// Tell the payment plan that the `NewContract` with `Signature` has been
/// signed by the containing transaction's `PublicKey`.
ApplySignature(Signature),
/// Vote for a PoH that is equal to the lastid of this transaction
NewVote(Vote),
}
/// An instruction signed by a client with `PublicKey`.
@@ -135,6 +149,10 @@ impl Transaction {
Self::new_from_instruction(from_keypair, instruction, last_id, 0)
}
pub fn new_vote(from_keypair: &KeyPair, vote: Vote, last_id: Hash, fee: i64) -> Self {
Transaction::new_from_instruction(&from_keypair, Instruction::NewVote(vote), last_id, fee)
}
/// Create and sign a postdated Transaction. Used for unit-testing.
pub fn new_on_date(
from_keypair: &KeyPair,
@@ -145,7 +163,7 @@ impl Transaction {
) -> Self {
let from = from_keypair.pubkey();
let budget = Budget::Or(
(Condition::Timestamp(dt), Payment { tokens, to }),
(Condition::Timestamp(dt, from), Payment { tokens, to }),
(Condition::Signature(from), Payment { tokens, to: from }),
);
let plan = Plan::Budget(budget);

Some files were not shown because too many files have changed in this diff Show More