Compare commits

...

200 Commits
v0.8.1 ... v0.9

Author SHA1 Message Date
68ec814bf3 Rename buildkite-snap to buildkite-secondary 2018-11-05 08:48:58 -08:00
d7f283a00a Add version for path dependencies 2018-10-24 20:29:20 -07:00
609889cc58 Wildcard early OOM deb package revision (#1554) (#1568) 2018-10-22 10:59:22 -07:00
83fa02ee44 Add ERC20-like Token program 2018-10-09 11:44:46 -07:00
d8fcb22000 Purge BudgetTransaction from banking_stage 2018-10-09 11:44:46 -07:00
6b1c90a8b5 Demote log messages 2018-10-09 11:44:46 -07:00
67d72e709f Demote 'not enough peers in crdt table' log message 2018-10-02 21:55:28 -07:00
f0d2870e0f Run a fullnode+drone automatically when the container starts up 2018-10-02 18:09:56 -07:00
02c47b48da Publish minimal Solana docker images to dockerhub 2018-10-02 16:28:54 -07:00
d30a39cd38 Remove SNAP_ prefix 2018-10-02 16:28:54 -07:00
9e57d0467e Return all instances 2018-10-01 07:51:12 -07:00
fb57d13c03 Correctly deserialize large userdata 2018-09-29 19:26:26 -07:00
41f6e27bba Ignore keep alive for completed games 2018-09-29 19:26:26 -07:00
aff5649b39 Add tic-tac-toe dashboard program 2018-09-28 19:13:09 -07:00
9fd6ffe83f s/grid/board/g 2018-09-28 19:13:09 -07:00
ff805361a9 Simplify game setup messaging 2018-09-28 19:13:09 -07:00
30d1b0b4bf Add KeepAlive message so players can detect abandoned games 2018-09-28 19:13:09 -07:00
5233cf1ca6 Update for new solana-jsonrpc 2018-09-28 18:10:15 -07:00
d2754fd702 Create new wallet on each run of wallet-sanity 2018-09-28 07:40:17 -07:00
ce9a0ae215 Specify zone 2018-09-28 07:35:33 -07:00
067adcdfa8 Include -z when deleting network 2018-09-27 21:27:38 -07:00
675ff64094 Fail CI on clippy warnings 2018-09-27 16:21:12 -06:00
423e7ebc3f Pacify clippy 2018-09-27 16:21:12 -06:00
f9fe6a0f72 Move clippy to Rust stable 2018-09-27 16:21:12 -06:00
8d007bd7f7 Upgrade rustc and add clippy to stable 2018-09-27 16:21:12 -06:00
6cdbdfbbcb Enable bench and fix upload-perf 2018-09-27 14:16:56 -07:00
35e6343d61 Update testnet-deploy script to configure GPUs for leader node (#1379) 2018-09-27 13:42:24 -07:00
7fb7839c8f Configure GPU type/count from command line in GCE scripts (#1376)
* Configure GPU type/count from command line in GCE scripts

* Change CLI to input full leader machine type information with GPU
2018-09-27 11:55:56 -07:00
dbc1ffc75e Use jsonrpc fork 2018-09-27 12:50:38 -06:00
1fdbe893c5 Improve game setup experience: X now shares game key and accepts O 2018-09-27 10:44:13 -07:00
55a542bff0 Fix erasure and cuda related compilation errors 2018-09-27 10:42:37 -06:00
e10574c64d Remove recycler and it's usage
- The memory usage due to recycler was high, and incrementing with
  time.
2018-09-27 10:42:37 -06:00
2e00be262e Remove data from BankError.
This reduces how much memory is written to last_id_sigs table on very TX, and has a 40% impact on
`cargo +nightly watch -x 'bench bench_banking_stage'`
2018-09-27 09:07:56 -06:00
4172bde081 Only send a vote once a second 2018-09-27 09:06:41 -06:00
9c47e022dc break dependency of programs on solana core (#1371)
* break dependency of programs on Solana core
2018-09-27 07:49:26 -07:00
874addc51a Move KeyedAccount into Account
Now programs don't need to depend on dynamic_program and its
dependencies.
2018-09-26 20:40:40 -06:00
b7ae5b712a Move Pubkey into its own module 2018-09-26 20:40:40 -06:00
c6d7cd2d33 Move Account into its own module
Also use default Default generator, since system program ID is
[0; 32]. Bank should probably be the one to set this anyway.
2018-09-26 20:40:40 -06:00
386a96b7e0 capture multinode logs by default (#1367) 2018-09-26 19:30:40 -07:00
b238c57179 Add trace! when an error is mapped to GenericFailure 2018-09-26 19:30:20 -07:00
1821e72812 Add getSignatureStatus 2018-09-26 19:00:34 -07:00
a23c230603 fix reverse loop in write_stage, simplify banking_stage, add tooling to help find this (#1366) 2018-09-26 18:37:24 -07:00
4e01fd5458 Update test to show when we should collect tx fees
See #1157 for details. The `from` account should be cloned
before execute_transaction(), and that's the only one that should
be stored if there's an error executing the program.
2018-09-26 19:30:27 -06:00
e416cf7adf Let clients know when transactions failed 2018-09-26 19:30:27 -06:00
25edb9e447 fix benches 2018-09-26 19:29:46 -06:00
93c4f6c9b8 Synchronize PoH, bank last_id queue and ledger entry channel.
PoH, bank's last_id queue and the Entry channel need to have a synchronized order of ids.
2018-09-26 16:19:03 -07:00
718031ec35 Ignore the test_leader_to_validator_transition until it can handle PoH entries 2018-09-26 16:59:57 -06:00
d546614936 Handle deserialize failure with error 2018-09-26 15:17:07 -07:00
ac8d738045 Don't call unwrap() in StorageProgram::process_tx 2018-09-26 15:17:07 -07:00
ca962371b8 Fix build
Two PRs crossed in flight.
2018-09-26 14:40:48 -06:00
e6f8922e35 fix issue #1347 (#1355) 2018-09-26 13:31:39 -07:00
7292ece7ad Free up term instruction for new multi-instruction feature 2018-09-26 14:17:15 -06:00
df3b78c18c Move BudgetTransaction into its own module 2018-09-26 14:17:15 -06:00
c83dcea87d Move SystemTransaction into its own module 2018-09-26 14:17:15 -06:00
be20c99758 Promote the one true transaction constructor 2018-09-26 14:17:15 -06:00
694add9919 Move budget-specific and system-specific tx constructors into traits
These functions pull in budget-specific and system-specific
dependencies that aren't needed by the runtime.
2018-09-26 14:17:15 -06:00
afc764752c Permit testnets without a GPU 2018-09-26 10:37:41 -07:00
113c8b5880 Rollback jsonrpc SendTransaction pool for signature; ignore flaky tests 2018-09-26 10:25:29 -07:00
a5b28349ed Add max entry height to download for replicator 2018-09-26 09:57:22 -07:00
bb7ecc7cd9 Migrate to solana-labs fork of jsonrpc
This changes aims to be a no-op. Future changes to rev should be
along the new solana-0.1 branch.
2018-09-26 10:08:37 -06:00
14bc160674 Clean up test and add signature return to rpc send tx 2018-09-25 16:38:51 -07:00
d438c22618 Update RFC 2018-09-25 16:38:51 -07:00
bcbae0a64f Fix witness functionality 2018-09-25 16:38:51 -07:00
f636408647 Fix timestamp and cancel functionality
- Also serialize and send helper fn
2018-09-25 16:38:51 -07:00
3ffc7aa5bc Add helper fn to get last id 2018-09-25 16:38:51 -07:00
7b7e8c0d3f Clippy 2018-09-25 16:38:51 -07:00
11ea9e7c4b Add cancelable handling 2018-09-25 16:38:51 -07:00
2b82121325 Fix wallet-sanity to reflect new wallet arg syntax 2018-09-25 16:38:51 -07:00
5038e5ccd7 Preliminary Wallet-Budget functionality 2018-09-25 16:38:51 -07:00
e943ed8caf Expand parse_command and add tests 2018-09-25 16:38:51 -07:00
c196952afd Flesh out Wallet CLI & add placeholder WalletCommands 2018-09-25 16:38:51 -07:00
e7383a7e66 Validator to leader (#1303)
* Add check in window_service to exit in checks for leader rotation, and propagate that service exit up to fullnode

* Added logic to shutdown Tvu once ReplicateStage finishes

* Added test for successfully shutting down validator and starting up leader

* Add test for leader validator interaction

* fix streamer to check for exit signal before checking socket again to prevent busy leaders from never returning

* PR comments - Rewrite make_consecutive_blobs() function, revert genesis function change
2018-09-25 15:41:29 -07:00
8a7545197f move tick generation back to banking_stage, add unit tests (#1332)
* move tick generation back to banking_stage, add unit tests

fixes #1217

* remove channel() stuff for synchronous comm; use a mutex
2018-09-25 15:01:51 -07:00
680072e5e2 No need to special case vote failures 2018-09-25 13:43:35 -06:00
4ca377a655 Delete dead code 2018-09-25 13:43:35 -06:00
751dd7eebb Move vote into ReplicateStage after process_entries 2018-09-25 13:43:35 -06:00
8f0e0c4440 Add tic-tac-toe program 2018-09-25 12:07:41 -07:00
50cf73500e Remove rfc 004 2018-09-25 12:07:41 -07:00
db310a044c Add Budget::And element, and supporting functions (#1329) 2018-09-25 12:38:13 -06:00
88a609ade5 groom write_stage 2018-09-25 00:18:35 -07:00
304d63623f give replication some time to happen
fixes #1307
2018-09-24 23:57:09 -07:00
407b2682e8 remove dead code 2018-09-24 23:12:09 -07:00
0f4fd8367d Add counters for channel pressure and time spent in TPU pipeline (#1324)
* Add counters for channel pressure and time spent in TPU pipeline

* Fixed failing tests

* Fix rust format issue
2018-09-24 17:13:49 -07:00
747ba6a8d3 Boot BudgetState::last_error 2018-09-24 17:14:23 -06:00
bb99fd40de Update transaction status in the bank
This will allow jsonrpc to query the system to find out if a
recent transaction failed.
2018-09-24 17:14:23 -06:00
e972d6639d Return errors from BudgetProgram::process_transaction 2018-09-24 17:14:23 -06:00
22e77c9485 Add a way of getting transaction errors out of the bank 2018-09-24 17:14:23 -06:00
bc88473030 Increase wmem for kernel network memory usage (#1323)
- Validators were running out of kernel buffer while retransmitting
  blobs
2018-09-24 13:02:56 -07:00
95677a81c5 Pacify clippy 2018-09-24 13:36:31 -06:00
ea37d29d3a Pass Bank::process_transactions() a reference to the txs instead of moving them 2018-09-24 13:36:31 -06:00
e030673c9d Do a recv on join to prevent channel destruction (#1320)
before window thread join
2018-09-24 11:50:37 -07:00
3e76efe97e Fix bench compilation (#1311) 2018-09-24 10:40:42 -07:00
f5a30615c1 Ignore replicator startup for now 2018-09-24 09:43:58 -06:00
e5e325154b Add --shell argument 2018-09-24 08:05:47 -07:00
9e3d2956d8 remove last recycle? 2018-09-24 08:09:41 -06:00
26b1466ef6 Initial integration of dynamic contracts and native module loading (#1256)
* Integration of native dynamic programs
2018-09-23 22:13:44 -07:00
a1f01fb8f8 revert is_some to not is_none, causes test failure 2018-09-23 17:09:18 -06:00
b2be0e2e5e fix clippy warning 2018-09-23 17:09:18 -06:00
1a45587c08 fix clippy warnings 2018-09-23 17:09:18 -06:00
3199f174a3 Add option to pass boot disk type to gce create (#1308) 2018-09-22 16:43:47 -07:00
a51c2f193e fix Rob and Carl crossing wires 2018-09-21 21:37:25 -07:00
be31da3dce lastidnotfound step 2: (#1300)
lastidnotfound step 2:
  * move "record stage", aka poh_service into banking stage
  * remove Entry.has_more, is incompatible with leader rotation
  * rewrite entry_next_hash in terms of Poh
  * simplify and unify transaction hashing (no embedded nulls)
  * register_last_entry from banking stage, fixes #1171 (w00t!)
  * new PoH doesn't generate empty ledger entries, so some fixes necessary in 
         multinode tests that rely on that (e.g. giving validators airdrops)
  * make window repair less patient, if we've been waiting for an answer, 
          don't be shy about most recent blobs
   * delete recorder and record stage
   * make more verbost  thin_client error reporting
   * more tracing in window (sigh)
2018-09-21 21:01:13 -07:00
54b407b4ca Wait on blob fetch before window, Seems to fix instability (#1304)
also cleanup ledger.
2018-09-21 18:56:20 -07:00
e87cac06da Request/reqwest improvements
- Use json macro to simplify request builds
- Add proxy option for reqwest to use TLS
- Add rpc port options for configured nodes
2018-09-21 18:06:20 -06:00
ad4fef4f09 Doc for rpc_port configuration 2018-09-21 18:06:20 -06:00
e3b3701e13 Add RPC port option to fullnode 2018-09-21 18:06:20 -06:00
9228fe11c9 Port Wallet to jsonrpc and fix tests 2018-09-21 18:06:20 -06:00
5ab38afa51 Changed the window_service in Replicator to send entries instead of blobs (#1302) 2018-09-21 16:50:58 -07:00
e49b8f0ce7 Update poh_service.rs 2018-09-21 16:03:54 -07:00
c50ac96f75 Moved deserialization of blobs to entries from replicate_stage to window_service (#1287) 2018-09-21 16:01:24 -07:00
a9355c33b2 Placeholder storage contract and replicator client (#1286)
* Add hooks for executing the storage contract

* Add store_ledger stage
  Similar to replicate_stage but no voting/banking stuff, just convert
  blobs to entries and write the ledger out

* Add storage_addr to tests and add new NodeInfo constructor
  to reduce duplication...
2018-09-21 15:32:15 -07:00
3dcee9f79e Update poh_service.rs 2018-09-21 08:01:24 -07:00
2614189157 cargo fmt 2018-09-20 19:46:20 -07:00
beeb09646a suppress warning: unused variable: recycler 2018-09-20 19:46:20 -07:00
67f1fbab5f Treat rustc warnings as errors in CI 2018-09-20 19:46:20 -07:00
c0e7e43e96 fixup! s/contract/program 2018-09-20 19:33:54 -07:00
9bfead2e01 s/contract/program 2018-09-20 19:33:54 -07:00
6073cd57fa Boot Recycler::recycle() 2018-09-20 17:08:51 -06:00
5174be5fe7 Rename getAccount to getAccountInfo 2018-09-20 15:18:56 -07:00
62a18d4c02 step one of lastidnotfound: record_stage->record_service, trim recorder to hashes (#1281)
step one of lastidnotfound

* record_stage->record_service, trim recorder to hashes
* doc updates, hash multiple without alloc()

cc #1171
2018-09-20 15:02:24 -07:00
a6c15684c9 Avoid panicking invalid instructions 2018-09-20 14:08:39 -07:00
5691bf557c Handle bad account userdata better 2018-09-20 14:08:39 -07:00
8f01f7cf21 Trace syscalls for more helpful logs 2018-09-20 14:08:39 -07:00
bb8c94ad2c Add getAccount JSON RPC request 2018-09-20 13:58:15 -07:00
d98e35e095 Delete no longer used PaymentPlan trait 2018-09-20 14:22:45 -06:00
3163fbad0e Remove 'Plan' indirection since it's implied by BUDGET_CONTRACT_ID 2018-09-20 14:22:45 -06:00
0172422961 Require a self-assigned account ID 2018-09-20 14:16:14 -06:00
8ccfb26923 tests for my IP picker 2018-09-20 09:21:09 -07:00
12a474b6ee sort local interfaces before selecting one 2018-09-20 09:21:09 -07:00
270fd6d61c Fix compiler warnings 2018-09-20 09:47:36 -06:00
7b9c7d4150 Cleaned up find_leader_rotation function. Added testing for WriteStage find_leader_rotation_index() function (#1276) 2018-09-19 18:16:00 -07:00
55126f5fb6 Marked Tvu functionality in Fullnode as unused for now 2018-09-19 16:05:31 -07:00
431692d9d0 Use a Drop trait to keep track of lifetimes for recycled objects.
* Move recycler instances to the point of allocation
* sinks no longer need to call `recycle`
* Remove the recycler arguments from all the apis that no longer need them
2018-09-19 16:59:42 -06:00
6732a9078d Clarify AfterTimestamp wire format 2018-09-19 13:28:35 -07:00
2981076a14 Add solana-upload-perf to parse json from bench and upload to influx (#1166) 2018-09-19 13:16:55 -07:00
5740ea3807 RFC 006: Wallet CLI 2018-09-19 12:10:53 -06:00
cd2d50e06c Changed transition to restart Rpu rather than modify bank to prevent lock contention 2018-09-19 10:48:05 -06:00
8c8a4ba705 debugging commit 2018-09-19 10:48:05 -06:00
b10de40506 Made LEADER_ROTATION_INTERVAL settable so that integration tests don't time out 2018-09-19 10:48:05 -06:00
2030dfa435 Implement PR comments, tidy up 2018-09-19 10:48:05 -06:00
bfe64f5f6e Added integration test for transitioning leader to validator to see that tpu pipeline can exit and restart a tvu. Fixed Tpu and broadcast stage so that exiting later stages in the pipeline also causes earlier stages to exit. 2018-09-19 10:48:05 -06:00
6d27751365 give fullnode ownership of state needed to dynamically start up a tpu or tvu for role transition 2018-09-19 10:48:05 -06:00
1fb1c0a681 added jointypes to the stages in the tpu involved in leader rotation 2018-09-19 10:48:05 -06:00
062f654fe0 formatted code 2018-09-19 10:48:05 -06:00
d3cb161c36 Added broadcast stage test for leader rotation exit 2018-09-19 10:48:05 -06:00
98b47d2540 Added check in broadcast stage to exit after transmitting last blob before leader rotation. Also added tests 2018-09-19 10:48:05 -06:00
f28ba3937b Added check in write stage to exit when scheduled entry_height for leader rotation is detected 2018-09-19 10:48:05 -06:00
91cf14e641 Rewrote service trait join() method to allow thread join handles to return values other than () 2018-09-19 10:48:05 -06:00
7601a8001c Update reqwest requirement from 0.8.6 to 0.9.0
Updates the requirements on [reqwest](https://github.com/seanmonstar/reqwest) to permit the latest version.
- [Release notes](https://github.com/seanmonstar/reqwest/releases)
- [Changelog](https://github.com/seanmonstar/reqwest/blob/master/CHANGELOG.md)
- [Commits](https://github.com/seanmonstar/reqwest/commits/v0.9.0)

Signed-off-by: dependabot[bot] <support@dependabot.com>
2018-09-19 10:47:02 -06:00
0ee6c5bf9d Read multiple entries in write stage (#1259)
- Also use rayon to parallelize to_blobs() to maximize CPU usage
2018-09-18 21:45:49 -07:00
6dee632d67 Remove Signature from ApplySignature 2018-09-18 20:00:42 -07:00
51e5de4d97 Log specific send_transaction error messages 2018-09-18 16:17:08 -07:00
1f08b22c8e Tweak log messages 2018-09-18 16:17:08 -07:00
83ae5bcee2 Detect binary changes in serialized contract userdata 2018-09-18 16:17:08 -07:00
339a570b26 Update comment 2018-09-18 16:17:08 -07:00
5310b6e5a2 Move entry->blob creation out of write stage (#1257)
- The write stage will output vector of entries
- Broadcast stage will create blobs out of the entries
- Helps reduce MIPS requirements for write stage
2018-09-18 13:49:10 -07:00
7d14f44a7c Move register_entry_id() call out of write stage (#1253)
* Move register_entry_id() call out of write stage

- Write stage is MIPS intensive and has become a bottleneck for
  TPU pipeline
- This will reduce the MIPS requirements for the stage

* Fix rust format issues
2018-09-18 11:42:25 -07:00
c830eeeae4 Update RELEASE.md 2018-09-18 10:31:26 -07:00
157fcf1de5 initial RELEASE.md (#1244)
initial RELEASE.md and RELEASE_TEMPLATE.md
2018-09-18 10:23:15 -07:00
e050160ce5 Use tagged perf-libs to enable controlled updates 2018-09-18 09:21:44 -07:00
f273351789 Add missing port number 2018-09-18 09:36:54 -06:00
aebf7f88e5 Various spelling fixes 2018-09-17 19:37:59 -07:00
aac1571670 mint now uses the SystemContract instead of Budget 2018-09-17 18:02:40 -07:00
8bae75a8a6 system contract tests 2018-09-17 14:34:55 -07:00
c2f7ca9d8f Change process_command return type and improve test 2018-09-17 13:45:47 -07:00
6ec0e42220 budget as separate contract and system call contract (#1189)
* budget and system contracts and verification

* contract check_id methods
* system call contract
* verify contract execution rules
* move system into its own file
* allocate before transfer for budget
* store error in budget context
* budget contract and tests without bank
* moved budget of of bank
2018-09-17 13:36:31 -07:00
072b244575 Add perf counters for record/write stages (#1240) 2018-09-17 11:07:04 -07:00
7ac9d6c604 Create keygen helper function for use in Wallet CLI, print keypair statement 2018-09-17 11:53:33 -06:00
0125163190 Remove wallet.sh, update entrypoint syntax for wallet network argument 2018-09-17 11:53:33 -06:00
a06f4b1d44 Update wallet to trigger keygen if no keypair provided and no keypair found in default location 2018-09-17 11:53:33 -06:00
10daa015c4 Simplify timeout arg 2018-09-17 11:53:33 -06:00
0babee39a4 Update wallet to take network arg 2018-09-17 11:53:33 -06:00
7c08b397eb Update testnet documentation 2018-09-17 09:26:25 -07:00
155ee8792f Add GPU support to ec2-provider 2018-09-17 09:26:25 -07:00
f89f121d2b Add AWS EC2 support 2018-09-17 09:26:25 -07:00
27986d7abb Standardize CLI help text 2018-09-16 15:17:10 -06:00
8b7edc6d64 Alphabetize 2018-09-16 15:17:10 -06:00
7dfab867fe Mark --outfile parameter as required 2018-09-16 10:49:02 -07:00
fd36954477 clippy 2018-09-15 05:12:53 -06:00
fd51599fa8 Replace replace(..., None) with take()
This is strictly for simplicity, since Option::take() is imlemented with replace().
2018-09-15 05:12:09 -06:00
3ca80c676c Disable large-network until it's fixed 2018-09-14 20:13:17 -07:00
be7cce1fd2 Tweak GCE scripts for higher node count (#1229)
* Tweak GCE scripts for higher node count

- Some validators were unable to rsync config from leader when
  the node count was high (e.g. 25). Looks like the leader node was
  getting more rsync requests in parallel than it count handle.
- This change staggers the validators bootup, and rsync time

* Address review comments
2018-09-14 17:17:08 -07:00
e142aafca9 Use multiple sockets for receiving blobs on validators (#1228)
* Use multiple sockets for receiving blobs on validators

- The blobs that are broadcasted by leader or retransmitted by peer
  validators are received on replicate_port
- Using reuse_addr/reuse_port, multiple sockets can be opened for
  the same port
- This allows the kernel to queue data to user space app on multiple
  socket queues, preventing over-running one queue
- This helps with reducing packets dropped due to queue over-runs

Fixes #1224

* Fixed failing tests
2018-09-14 16:56:06 -07:00
4196cf43e8 cargo fmt 2018-09-14 16:37:49 -07:00
a344eb7dd0 Upgrade rust stable to 1.29 2018-09-14 16:37:49 -07:00
d12537bdb7 Include UDP sent statistics in net stats (#1225) 2018-09-14 13:32:13 -07:00
bcb3b3c21f Add integration tests to wallet module 2018-09-14 08:21:33 -06:00
d8c9a1aae9 Add method to run local drone for tests 2018-09-14 08:21:33 -06:00
9ca2f5b3f7 Move all handling except network/gossip from /bin to wallet module 2018-09-14 08:21:33 -06:00
9e24775051 update README with v0.8 and update demo scripts to match 2018-09-13 18:37:37 -07:00
4dc30ea104 Add recycler stats (#1187) 2018-09-13 14:49:48 -07:00
90df6237c6 Implements recvmmsg() for UDP packets (#1161)
* Implemented recvmmsg() for UDP packets

- This change implements binding between libc API for recvmmsg()
- The function can receive multiple packets using one system call

Fixes #1141

* Added unit tests for recvmmsg()

* Added recv_mmsg() wrapper for non Linux OS

* Address review comments for recvmmsg()

* Remove unnecessary imports

* Moved target specific dependencies to the function
2018-09-13 14:41:28 -07:00
80caa8fdce add back some defaults for client.sh 2018-09-13 14:05:53 -07:00
8706774ea7 Rewrote service trait join() method to allow thread join handles to return values other than () (#1213) 2018-09-13 14:00:17 -07:00
1d7e87d430 Increase number of sockets for transaction processing 2018-09-13 14:22:07 -06:00
1a4cd763f8 Fix missing recycle in recv_from (#1205)
In the error case that i>0 (we have blobs to send)
we break out of the loop and do not push the allocated r
to the v array. We should recycle this blob, otherwise it
will be dropped.
2018-09-13 08:29:18 -07:00
ee74b367ce Add docker install script 2018-09-12 17:09:37 -07:00
f06113500d bench-tps/net sanity: add ability to check for unexpected extra nodes 2018-09-12 15:38:57 -07:00
9ab5692acf fix "leak" in Blob::recv_from (#1198)
* fix "leak" in Blob::recv_from

fixes #1199
2018-09-12 14:45:43 -07:00
e7a910b664 v0.9 2018-09-12 10:27:33 -07:00
133 changed files with 10919 additions and 4201 deletions

28
.github/RELEASE_TEMPLATE.md vendored Normal file
View File

@ -0,0 +1,28 @@
# Release v0.X.Y <milestone name>
fun blurb about the name, what's in the release
## Major Features And Improvements
* bulleted
* list of features and improvements
## Breaking Changes
* bulleted
* list
* of
* protocol changes/breaks
* API breaks
* CLI changes
* etc.
## Bug Fixes and Other Changes
* can be pulled from commit log, or synthesized
## Thanks to our Contributors
This release contains contributions from many people at Solana, as well as:
pull from commit log

View File

@ -1,7 +1,7 @@
[package]
name = "solana"
description = "Blockchain, Rebuilt for Scale"
version = "0.8.0"
version = "0.9.0"
documentation = "https://docs.rs/solana"
homepage = "http://solana.com/"
readme = "README.md"
@ -18,17 +18,25 @@ authors = [
license = "Apache-2.0"
[[bin]]
name = "solana-bench-tps"
path = "src/bin/bench-tps.rs"
name = "solana-upload-perf"
path = "src/bin/upload-perf.rs"
[[bin]]
name = "solana-bench-streamer"
path = "src/bin/bench-streamer.rs"
[[bin]]
name = "solana-bench-tps"
path = "src/bin/bench-tps.rs"
[[bin]]
name = "solana-drone"
path = "src/bin/drone.rs"
[[bin]]
name = "solana-replicator"
path = "src/bin/replicator.rs"
[[bin]]
name = "solana-fullnode"
path = "src/bin/fullnode.rs"
@ -76,28 +84,38 @@ env_logger = "0.5.12"
generic-array = { version = "0.12.0", default-features = false, features = ["serde"] }
getopts = "0.2"
influx_db_client = "0.3.4"
jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc", rev = "4b6060b" }
jsonrpc-http-server = { git = "https://github.com/paritytech/jsonrpc", rev = "4b6060b" }
jsonrpc-macros = { git = "https://github.com/paritytech/jsonrpc", rev = "4b6060b" }
solana-jsonrpc-core = "0.1"
solana-jsonrpc-http-server = "0.1"
solana-jsonrpc-macros = "0.1"
ipnetwork = "0.12.7"
itertools = "0.7.8"
libc = "0.2.43"
libloading = "0.5.0"
log = "0.4.2"
matches = "0.1.6"
nix = "0.11.0"
pnet_datalink = "0.21.0"
rand = "0.5.1"
rayon = "1.0.0"
reqwest = "0.8.6"
reqwest = "0.9.0"
ring = "0.13.2"
sha2 = "0.7.0"
serde = "1.0.27"
serde_cbor = "0.9.0"
serde_derive = "1.0.27"
serde_json = "1.0.10"
socket2 = "0.3.8"
solana_program_interface = { path = "common", version="0.1.0" }
sys-info = "0.5.6"
tokio = "0.1"
tokio-codec = "0.1"
untrusted = "0.6.2"
[dev-dependencies]
noop = { path = "programs/noop", version="0.1.0" }
print = { path = "programs/print", version="0.1.0" }
move_funds = { path = "programs/move_funds", version="0.1.0" }
[[bench]]
name = "bank"
@ -112,3 +130,19 @@ name = "signature"
[[bench]]
name = "sigverify"
[workspace]
members = [
".",
"common",
"programs/noop",
"programs/print",
"programs/move_funds",
]
default-members = [
".",
"common",
"programs/noop",
"programs/print",
"programs/move_funds",
]

View File

@ -62,7 +62,7 @@ your odds of success if you check out the
before proceeding:
```bash
$ git checkout v0.7.2
$ git checkout v0.8.0
```
Configuration Setup
@ -113,7 +113,7 @@ To run a multinode testnet, after starting a leader node, spin up some validator
separate shells:
```bash
$ ./multinode-demo/validator.sh ubuntu@10.0.1.51:~/solana 10.0.1.51
$ ./multinode-demo/validator.sh
```
To run a performance-enhanced leader or validator (on Linux),
@ -123,22 +123,20 @@ your system:
```bash
$ ./fetch-perf-libs.sh
$ SOLANA_CUDA=1 ./multinode-demo/leader.sh
$ SOLANA_CUDA=1 ./multinode-demo/validator.sh ubuntu@10.0.1.51:~/solana 10.0.1.51
$ SOLANA_CUDA=1 ./multinode-demo/validator.sh
```
Testnet Client Demo
---
Now that your singlenode or multinode testnet is up and running let's send it some transactions! Note that we pass in
the expected number of nodes in the network. If running singlenode, pass 1; if multinode, pass the number
of validators you started.
Now that your singlenode or multinode testnet is up and running let's send it
some transactions!
In a separate shell start the client:
```bash
$ ./multinode-demo/client.sh ubuntu@10.0.1.51:~/solana 1
$ ./multinode-demo/client.sh # runs against localhost by default
```
What just happened? The client demo spins up several threads to send 500,000 transactions
@ -155,7 +153,7 @@ Public Testnet
In this example the client connects to our public testnet. To run validators on the testnet you would need to open udp ports `8000-10000`.
```bash
$ ./multinode-demo/client.sh testnet.solana.com 1 #The minumum number of nodes to discover on the network
$ ./multinode-demo/client.sh --network $(dig +short testnet.solana.com):8001 --identity config-private/client-id.json --duration 60
```
You can observe the effects of your client's transactions on our [dashboard](https://metrics.solana.com:3000/d/testnet/testnet-hud?orgId=2&from=now-30m&to=now&refresh=5s&var-testnet=testnet)

32
RELEASE.md Normal file
View File

@ -0,0 +1,32 @@
# Solana Release process
## Introduction
Solana uses a channel-oriented, date-based branching process described [here](https://github.com/solana-labs/solana/blob/master/rfcs/rfc-005-branches-tags-and-channels.md).
## Release Steps
### Changing channels
When cutting a new channel branch these pre-steps are required:
1. Pick your branch point for release on master.
2. Create the branch. The name should be "v" + the first 2 "version" fields from Cargo.toml. For example, a Cargo.toml with version = "0.9.0" implies the next branch name is "v0.9".
3. Update Cargo.toml to the next semantic version (e.g. 0.9.0 -> 0.10.0).
4. Push your new branch to solana.git
5. Land your Carto.toml change as a master PR.
At this point, ci/channel-info.sh should show your freshly cut release branch as "BETA_CHANNEL" and the previous release branch as "STABLE_CHANNEL".
### Updating channels (i.e. "making a release")
We use [github's Releases UI](https://github.com/solana-labs/solana/releases) for tagging a release.
1. Go [there ;)](https://github.com/solana-labs/solana/releases).
2. Click "Draft new release".
3. If the first major release on the branch (e.g. v0.8.0), paste in [this template](https://raw.githubusercontent.com/solana-labs/solana/master/.github/RELEASE_TEMPLATE.md) and fill it in.
4. Test the release by generating a tag using semver's rules. First try at a release should be <branchname>.X-rc.0.
5. Verify release automation:
1. [Crates.io](https://crates.io/crates/solana) should have an updated Solana version.
2. ...
6. After testnet deployment, verify that testnets are running correct software. http://metrics.solana.com should show testnet running on a hash from your newly created branch.

View File

@ -10,6 +10,7 @@ use solana::bank::*;
use solana::hash::hash;
use solana::mint::Mint;
use solana::signature::{Keypair, KeypairUtil};
use solana::system_transaction::SystemTransaction;
use solana::transaction::Transaction;
use test::Bencher;
@ -24,7 +25,13 @@ fn bench_process_transaction(bencher: &mut Bencher) {
.map(|i| {
// Seed the 'from' account.
let rando0 = Keypair::new();
let tx = Transaction::new(&mint.keypair(), rando0.pubkey(), 10_000, mint.last_id());
let tx = Transaction::system_move(
&mint.keypair(),
rando0.pubkey(),
10_000,
mint.last_id(),
0,
);
assert!(bank.process_transaction(&tx).is_ok());
// Seed the 'to' account and a cell for its signature.
@ -32,18 +39,17 @@ fn bench_process_transaction(bencher: &mut Bencher) {
bank.register_entry_id(&last_id);
let rando1 = Keypair::new();
let tx = Transaction::new(&rando0, rando1.pubkey(), 1, last_id);
let tx = Transaction::system_move(&rando0, rando1.pubkey(), 1, last_id, 0);
assert!(bank.process_transaction(&tx).is_ok());
// Finally, return the transaction to the benchmark.
tx
})
.collect();
}).collect();
bencher.iter(|| {
// Since benchmarker runs this multiple times, we need to clear the signatures.
bank.clear_signatures();
let results = bank.process_transactions(transactions.clone());
let results = bank.process_transactions(&transactions);
assert!(results.iter().all(Result::is_ok));
})
}

View File

@ -1,95 +1,41 @@
#![feature(test)]
extern crate bincode;
extern crate rand;
extern crate rayon;
extern crate solana;
extern crate solana_program_interface;
extern crate test;
use rand::{thread_rng, Rng};
use rayon::prelude::*;
use solana::bank::Bank;
use solana::banking_stage::BankingStage;
use solana::banking_stage::{BankingStage, NUM_THREADS};
use solana::entry::Entry;
use solana::mint::Mint;
use solana::packet::{to_packets_chunked, PacketRecycler};
use solana::record_stage::Signal;
use solana::signature::{Keypair, KeypairUtil};
use solana::packet::to_packets_chunked;
use solana::signature::{KeypairUtil, Signature};
use solana::system_transaction::SystemTransaction;
use solana::transaction::Transaction;
use solana_program_interface::pubkey::Pubkey;
use std::iter;
use std::sync::mpsc::{channel, Receiver};
use std::sync::Arc;
use std::time::Duration;
use test::Bencher;
// use self::test::Bencher;
// use bank::{Bank, MAX_ENTRY_IDS};
// use bincode::serialize;
// use hash::hash;
// use mint::Mint;
// use rayon::prelude::*;
// use signature::{Keypair, KeypairUtil};
// use std::collections::HashSet;
// use std::time::Instant;
// use transaction::Transaction;
//
// fn bench_process_transactions(_bencher: &mut Bencher) {
// let mint = Mint::new(100_000_000);
// let bank = Bank::new(&mint);
// // Create transactions between unrelated parties.
// let txs = 100_000;
// let last_ids: Mutex<HashSet<Hash>> = Mutex::new(HashSet::new());
// let transactions: Vec<_> = (0..txs)
// .into_par_iter()
// .map(|i| {
// // Seed the 'to' account and a cell for its signature.
// let dummy_id = i % (MAX_ENTRY_IDS as i32);
// let last_id = hash(&serialize(&dummy_id).unwrap()); // Semi-unique hash
// {
// let mut last_ids = last_ids.lock().unwrap();
// if !last_ids.contains(&last_id) {
// last_ids.insert(last_id);
// bank.register_entry_id(&last_id);
// }
// }
//
// // Seed the 'from' account.
// let rando0 = Keypair::new();
// let tx = Transaction::new(&mint.keypair(), rando0.pubkey(), 1_000, last_id);
// bank.process_transaction(&tx).unwrap();
//
// let rando1 = Keypair::new();
// let tx = Transaction::new(&rando0, rando1.pubkey(), 2, last_id);
// bank.process_transaction(&tx).unwrap();
//
// // Finally, return a transaction that's unique
// Transaction::new(&rando0, rando1.pubkey(), 1, last_id)
// })
// .collect();
//
// let banking_stage = EventProcessor::new(bank, &mint.last_id(), None);
//
// let now = Instant::now();
// assert!(banking_stage.process_transactions(transactions).is_ok());
// let duration = now.elapsed();
// let sec = duration.as_secs() as f64 + duration.subsec_nanos() as f64 / 1_000_000_000.0;
// let tps = txs as f64 / sec;
//
// // Ensure that all transactions were successfully logged.
// drop(banking_stage.historian_input);
// let entries: Vec<Entry> = banking_stage.output.lock().unwrap().iter().collect();
// assert_eq!(entries.len(), 1);
// assert_eq!(entries[0].transactions.len(), txs as usize);
//
// println!("{} tps", tps);
// }
fn check_txs(receiver: &Receiver<Signal>, ref_tx_count: usize) {
fn check_txs(receiver: &Receiver<Vec<Entry>>, ref_tx_count: usize) {
let mut total = 0;
loop {
let signal = receiver.recv().unwrap();
if let Signal::Transactions(transactions) = signal {
total += transactions.len();
if total >= ref_tx_count {
break;
let entries = receiver.recv_timeout(Duration::new(1, 0));
if let Ok(entries) = entries {
for entry in &entries {
total += entry.transactions.len();
}
} else {
assert!(false);
break;
}
if total >= ref_tx_count {
break;
}
}
assert_eq!(total, ref_tx_count);
@ -97,116 +43,68 @@ fn check_txs(receiver: &Receiver<Signal>, ref_tx_count: usize) {
#[bench]
fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
let tx = 10_000_usize;
let txes = 1000 * NUM_THREADS;
let mint_total = 1_000_000_000_000;
let mint = Mint::new(mint_total);
let num_dst_accounts = 8 * 1024;
let num_src_accounts = 8 * 1024;
let srckeys: Vec<_> = (0..num_src_accounts).map(|_| Keypair::new()).collect();
let dstkeys: Vec<_> = (0..num_dst_accounts)
.map(|_| Keypair::new().pubkey())
.collect();
let transactions: Vec<_> = (0..tx)
.map(|i| {
Transaction::new(
&srckeys[i % num_src_accounts],
dstkeys[i % num_dst_accounts],
i as i64,
mint.last_id(),
)
})
.collect();
let (verified_sender, verified_receiver) = channel();
let (signal_sender, signal_receiver) = channel();
let packet_recycler = PacketRecycler::default();
let setup_transactions: Vec<_> = (0..num_src_accounts)
.map(|i| {
Transaction::new(
&mint.keypair(),
srckeys[i].pubkey(),
mint_total / num_src_accounts as i64,
mint.last_id(),
)
})
.collect();
bencher.iter(move || {
let bank = Arc::new(Bank::new(&mint));
let verified_setup: Vec<_> =
to_packets_chunked(&packet_recycler, &setup_transactions.clone(), tx)
.into_iter()
.map(|x| {
let len = (*x).read().unwrap().packets.len();
(x, iter::repeat(1).take(len).collect())
})
.collect();
verified_sender.send(verified_setup).unwrap();
BankingStage::process_packets(&bank, &verified_receiver, &signal_sender, &packet_recycler)
.unwrap();
check_txs(&signal_receiver, num_src_accounts);
let verified: Vec<_> = to_packets_chunked(&packet_recycler, &transactions.clone(), 192)
.into_iter()
.map(|x| {
let len = (*x).read().unwrap().packets.len();
(x, iter::repeat(1).take(len).collect())
})
.collect();
verified_sender.send(verified).unwrap();
BankingStage::process_packets(&bank, &verified_receiver, &signal_sender, &packet_recycler)
.unwrap();
check_txs(&signal_receiver, tx);
});
}
#[bench]
fn bench_banking_stage_single_from(bencher: &mut Bencher) {
let tx = 10_000_usize;
let mint = Mint::new(1_000_000_000_000);
let mut pubkeys = Vec::new();
let num_keys = 8;
for _ in 0..num_keys {
pubkeys.push(Keypair::new().pubkey());
}
let transactions: Vec<_> = (0..tx)
let bank = Arc::new(Bank::new(&mint));
let dummy = Transaction::system_move(
&mint.keypair(),
mint.keypair().pubkey(),
1,
mint.last_id(),
0,
);
let transactions: Vec<_> = (0..txes)
.into_par_iter()
.map(|i| {
Transaction::new(
&mint.keypair(),
pubkeys[i % num_keys],
i as i64,
mint.last_id(),
)
})
.collect();
let (verified_sender, verified_receiver) = channel();
let (signal_sender, signal_receiver) = channel();
let packet_recycler = PacketRecycler::default();
.map(|_| {
let mut new = dummy.clone();
let from: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
let to: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
new.keys[0] = Pubkey::new(&from[0..32]);
new.keys[1] = Pubkey::new(&to[0..32]);
new.signature = Signature::new(&sig[0..64]);
new
}).collect();
// fund all the accounts
transactions.iter().for_each(|tx| {
let fund = Transaction::system_move(
&mint.keypair(),
tx.keys[0],
mint_total / txes as i64,
mint.last_id(),
0,
);
assert!(bank.process_transaction(&fund).is_ok());
});
//sanity check, make sure all the transactions can execute sequentially
transactions.iter().for_each(|tx| {
let res = bank.process_transaction(&tx);
assert!(res.is_ok(), "sanity test transactions");
});
bank.clear_signatures();
//sanity check, make sure all the transactions can execute in parallel
let res = bank.process_transactions(&transactions);
for r in res {
assert!(r.is_ok(), "sanity parallel execution");
}
bank.clear_signatures();
let verified: Vec<_> = to_packets_chunked(&transactions.clone(), 192)
.into_iter()
.map(|x| {
let len = x.read().unwrap().packets.len();
(x, iter::repeat(1).take(len).collect())
}).collect();
let (_stage, signal_receiver) = BankingStage::new(&bank, verified_receiver, Default::default());
bencher.iter(move || {
let bank = Arc::new(Bank::new(&mint));
let verified: Vec<_> = to_packets_chunked(&packet_recycler, &transactions.clone(), tx)
.into_iter()
.map(|x| {
let len = (*x).read().unwrap().packets.len();
(x, iter::repeat(1).take(len).collect())
})
.collect();
verified_sender.send(verified).unwrap();
BankingStage::process_packets(&bank, &verified_receiver, &signal_sender, &packet_recycler)
.unwrap();
check_txs(&signal_receiver, tx);
for v in verified.chunks(verified.len() / NUM_THREADS) {
verified_sender.send(v.to_vec()).unwrap();
}
check_txs(&signal_receiver, txes);
bank.clear_signatures();
// make sure the tx last id is still registered
bank.register_entry_id(&mint.last_id());
});
}

View File

@ -4,8 +4,8 @@ extern crate test;
use solana::hash::{hash, Hash};
use solana::ledger::{next_entries, reconstruct_entries_from_blobs, Block};
use solana::packet::BlobRecycler;
use solana::signature::{Keypair, KeypairUtil};
use solana::system_transaction::SystemTransaction;
use solana::transaction::Transaction;
use test::Bencher;
@ -14,13 +14,12 @@ fn bench_block_to_blobs_to_block(bencher: &mut Bencher) {
let zero = Hash::default();
let one = hash(&zero.as_ref());
let keypair = Keypair::new();
let tx0 = Transaction::new(&keypair, keypair.pubkey(), 1, one);
let tx0 = Transaction::system_move(&keypair, keypair.pubkey(), 1, one, 0);
let transactions = vec![tx0; 10];
let entries = next_entries(&zero, 1, transactions);
let blob_recycler = BlobRecycler::default();
bencher.iter(|| {
let blobs = entries.to_blobs(&blob_recycler);
let blobs = entries.to_blobs();
assert_eq!(reconstruct_entries_from_blobs(blobs).unwrap(), entries);
});
}

View File

@ -4,9 +4,9 @@ extern crate rayon;
extern crate solana;
extern crate test;
use solana::packet::{to_packets, PacketRecycler};
use solana::packet::to_packets;
use solana::sigverify;
use solana::transaction::test_tx;
use solana::system_transaction::test_tx;
use test::Bencher;
#[bench]
@ -14,8 +14,7 @@ fn bench_sigverify(bencher: &mut Bencher) {
let tx = test_tx();
// generate packet vector
let packet_recycler = PacketRecycler::default();
let batches = to_packets(&packet_recycler, &vec![tx; 128]);
let batches = to_packets(&vec![tx; 128]);
// verify packets
bencher.iter(|| {

View File

@ -0,0 +1,7 @@
steps:
- command: "ci/snap.sh"
timeout_in_minutes: 40
name: "snap [public]"
- command: "ci/docker-solana/build.sh"
timeout_in_minutes: 20
name: "docker-solana"

View File

@ -1,4 +0,0 @@
steps:
- command: "ci/snap.sh"
timeout_in_minutes: 40
name: "snap [public]"

View File

@ -1,14 +1,14 @@
steps:
- command: "ci/docker-run.sh solanalabs/rust:1.28.0 ci/test-stable.sh"
- command: "ci/docker-run.sh solanalabs/rust:1.29.1 ci/test-stable.sh"
name: "stable [public]"
env:
CARGO_TARGET_CACHE_NAME: "stable"
timeout_in_minutes: 30
# - command: "ci/docker-run.sh solanalabs/rust-nightly ci/test-bench.sh"
# name: "bench [public]"
# env:
# CARGO_TARGET_CACHE_NAME: "nightly"
# timeout_in_minutes: 30
- command: "ci/docker-run.sh solanalabs/rust-nightly ci/test-bench.sh"
name: "bench [public]"
env:
CARGO_TARGET_CACHE_NAME: "nightly"
timeout_in_minutes: 30
- command: "ci/shellcheck.sh"
name: "shellcheck [public]"
timeout_in_minutes: 20
@ -24,13 +24,14 @@ steps:
timeout_in_minutes: 20
agents:
- "queue=cuda"
- command: "ci/test-large-network.sh || true"
name: "large-network [public] [ignored]"
env:
CARGO_TARGET_CACHE_NAME: "stable"
timeout_in_minutes: 20
agents:
- "queue=large"
# TODO: Fix and re-enable test-large-network.sh
# - command: "ci/test-large-network.sh || true"
# name: "large-network [public] [ignored]"
# env:
# CARGO_TARGET_CACHE_NAME: "stable"
# timeout_in_minutes: 20
# agents:
# - "queue=large"
- command: "ci/pr-snap.sh"
timeout_in_minutes: 20
name: "snap [public]"
@ -38,7 +39,7 @@ steps:
- command: "ci/publish-crate.sh"
timeout_in_minutes: 20
name: "publish crate [public]"
- trigger: "solana-snap"
- trigger: "solana-secondary"
branches: "!pull/*"
async: true
build:

View File

@ -7,11 +7,18 @@ usage() {
echo a CI-appropriate environment.
echo
echo "--nopull Skip the dockerhub image update"
echo "--shell Skip command and enter an interactive shell"
echo
}
cd "$(dirname "$0")/.."
INTERACTIVE=false
if [[ $1 = --shell ]]; then
INTERACTIVE=true
shift
fi
NOPULL=false
if [[ $1 = --nopull ]]; then
NOPULL=true
@ -64,5 +71,15 @@ ARGS+=(
--env SNAPCRAFT_CREDENTIALS_KEY
)
if $INTERACTIVE; then
if [[ -n $1 ]]; then
echo
echo "Note: '$*' ignored due to --shell argument"
echo
fi
set -x
exec docker run --interactive --tty "${ARGS[@]}" "$IMAGE" bash
fi
set -x
exec docker run "${ARGS[@]}" "$IMAGE" "$@"

View File

@ -1,6 +1,6 @@
# Note: when the rust version (1.28) is changed also modify
# Note: when the rust version is changed also modify
# ci/buildkite.yml to pick up the new image tag
FROM rust:1.28
FROM rust:1.29.1
RUN set -x && \
apt update && \
@ -18,6 +18,7 @@ RUN set -x && \
sudo \
&& \
rustup component add rustfmt-preview && \
rustup component add clippy-preview && \
rm -rf /var/lib/apt/lists/* && \
rustc --version && \
cargo --version

1
ci/docker-solana/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
cargo-install/

View File

@ -0,0 +1,13 @@
FROM debian:stretch
# JSON RPC port
EXPOSE 8899/tcp
# Install libssl
RUN apt update && \
apt-get install -y libssl-dev && \
rm -rf /var/lib/apt/lists/*
COPY usr/bin /usr/bin/
ENTRYPOINT [ "/usr/bin/solana-entrypoint.sh" ]
CMD [""]

View File

@ -0,0 +1,17 @@
## Minimal Solana Docker image
This image is automatically updated by CI
https://hub.docker.com/r/solanalabs/solana/
### Usage:
Run the latest beta image:
```bash
$ docker run --rm -p 8899:8899 solanalabs/solana:beta
```
Run the latest edge image:
```bash
$ docker run --rm -p 8899:8899 solanalabs/solana:edge
```
Port *8899* is the JSON RPC port, which is used by clients to communicate with the network.

38
ci/docker-solana/build.sh Executable file
View File

@ -0,0 +1,38 @@
#!/bin/bash -ex
cd "$(dirname "$0")"
eval "$(../channel-info.sh)"
if [[ $BUILDKITE_BRANCH = "$STABLE_CHANNEL" ]]; then
CHANNEL=stable
elif [[ $BUILDKITE_BRANCH = "$EDGE_CHANNEL" ]]; then
CHANNEL=edge
elif [[ $BUILDKITE_BRANCH = "$BETA_CHANNEL" ]]; then
CHANNEL=beta
fi
if [[ -z $CHANNEL ]]; then
echo Unable to determine channel to publish into, exiting.
exit 0
fi
rm -rf usr/
../docker-run.sh solanalabs/rust:1.29.1 \
cargo install --path . --root ci/docker-solana/usr
cp -f entrypoint.sh usr/bin/solana-entrypoint.sh
docker build -t solanalabs/solana:$CHANNEL .
maybeEcho=
if [[ -z $CI ]]; then
echo "Not CI, skipping |docker push|"
maybeEcho="echo"
else
(
set +x
if [[ -n $DOCKER_PASSWORD && -n $DOCKER_USERNAME ]]; then
echo "$DOCKER_PASSWORD" | docker login --username "$DOCKER_USERNAME" --password-stdin
fi
)
fi
$maybeEcho docker push solanalabs/solana:$CHANNEL

23
ci/docker-solana/entrypoint.sh Executable file
View File

@ -0,0 +1,23 @@
#!/bin/bash -ex
export RUST_LOG=solana=info
export RUST_BACKTRACE=1
solana-keygen -o /config/leader-keypair.json
solana-keygen -o /config/drone-keypair.json
solana-genesis --tokens=1000000000 --ledger /ledger < /config/drone-keypair.json
solana-fullnode-config --keypair=/config/leader-keypair.json -l > /config/leader-config.json
solana-drone --keypair /config/drone-keypair.json --network 127.0.0.1:8001 &
drone=$!
solana-fullnode --identity /config/leader-config.json --ledger /ledger/ &
fullnode=$!
abort() {
kill "$drone" "$fullnode"
}
trap abort SIGINT SIGTERM
wait "$fullnode"
kill "$drone" "$fullnode"

View File

@ -73,7 +73,7 @@ echo "--- Node count"
set -x
client_id=/tmp/client-id.json-$$
$solana_keygen -o $client_id
$solana_bench_tps --identity $client_id --num-nodes 3 --converge-only
$solana_bench_tps --identity $client_id --num-nodes 3 --reject-extra-nodes --converge-only
rm -rf $client_id
) || flag_error

View File

@ -10,14 +10,14 @@ fi
eval "$(ci/channel-info.sh)"
if [[ $BUILDKITE_BRANCH = "$STABLE_CHANNEL" ]]; then
SNAP_CHANNEL=stable
CHANNEL=stable
elif [[ $BUILDKITE_BRANCH = "$EDGE_CHANNEL" ]]; then
SNAP_CHANNEL=edge
CHANNEL=edge
elif [[ $BUILDKITE_BRANCH = "$BETA_CHANNEL" ]]; then
SNAP_CHANNEL=beta
CHANNEL=beta
fi
if [[ -z $SNAP_CHANNEL ]]; then
if [[ -z $CHANNEL ]]; then
echo Unable to determine channel to publish into, exiting.
exit 0
fi
@ -51,11 +51,11 @@ if [[ ! -x /usr/bin/multilog ]]; then
sudo apt-get install -y daemontools
fi
echo --- build: $SNAP_CHANNEL channel
echo --- build: $CHANNEL channel
snapcraft
source ci/upload_ci_artifact.sh
upload_ci_artifact solana_*.snap
echo --- publish: $SNAP_CHANNEL channel
$DRYRUN snapcraft push solana_*.snap --release $SNAP_CHANNEL
echo --- publish: $CHANNEL channel
$DRYRUN snapcraft push solana_*.snap --release $CHANNEL

View File

@ -10,4 +10,8 @@ _() {
"$@"
}
_ cargo bench --features=unstable --verbose
set -o pipefail
BENCH_FILE=bench_output.log
_ cargo bench --features=unstable --verbose -- -Z unstable-options --format=json | tee $BENCH_FILE
_ cargo run --release --bin solana-upload-perf -- $BENCH_FILE

View File

@ -13,10 +13,6 @@ _() {
_ cargo build --verbose --features unstable
_ cargo test --verbose --features=unstable
# TODO: Re-enable warnings-as-errors after clippy offers a way to not warn on unscoped lint names.
#_ cargo clippy -- --deny=warnings
_ cargo clippy
exit 0
# Coverage disabled (see issue #433)

View File

@ -9,6 +9,7 @@ if ! ci/version-check.sh stable; then
ci/version-check.sh stable
fi
export RUST_BACKTRACE=1
export RUSTFLAGS="-D warnings"
./fetch-perf-libs.sh
export LD_LIBRARY_PATH=$PWD/target/perf-libs:/usr/local/cuda/lib64:$LD_LIBRARY_PATH

View File

@ -4,6 +4,7 @@ cd "$(dirname "$0")/.."
ci/version-check.sh stable
export RUST_BACKTRACE=1
export RUSTFLAGS="-D warnings"
_() {
echo "--- $*"

View File

@ -4,11 +4,13 @@ cd "$(dirname "$0")"/..
zone=
leaderAddress=
leaderMachineType=
clientNodeCount=0
validatorNodeCount=10
publicNetwork=false
snapChannel=edge
delete=false
enableGpu=false
usage() {
exitcode=0
@ -22,7 +24,7 @@ usage: $0 [name] [zone] [options...]
Deploys a CD testnet
name - name of the network
zone - GCE to deploy the network into
zone - zone to deploy the network into
options:
-s edge|beta|stable - Deploy the specified Snap release channel
@ -30,6 +32,8 @@ Deploys a CD testnet
-n [number] - Number of validator nodes (default: $validatorNodeCount)
-c [number] - Number of client nodes (default: $clientNodeCount)
-P - Use public network IP addresses (default: $publicNetwork)
-G - Enable GPU, and set count/type of GPUs to use (e.g n1-standard-16 --accelerator count=4,type=nvidia-tesla-k80)
-g - Enable GPU (default: $enableGpu)
-a [address] - Set the leader node's external IP address to this GCE address
-d - Delete the network
@ -45,7 +49,7 @@ zone=$2
[[ -n $zone ]] || usage "Zone not specified"
shift 2
while getopts "h?p:Pn:c:s:a:d" opt; do
while getopts "h?p:Pn:c:s:gG:a:d" opt; do
case $opt in
h | \?)
usage
@ -69,6 +73,13 @@ while getopts "h?p:Pn:c:s:a:d" opt; do
;;
esac
;;
g)
enableGpu=true
;;
G)
enableGpu=true
leaderMachineType=$OPTARG
;;
a)
leaderAddress=$OPTARG
;;
@ -86,11 +97,18 @@ gce_create_args=(
-a "$leaderAddress"
-c "$clientNodeCount"
-n "$validatorNodeCount"
-g
-p "$netName"
-z "$zone"
)
if $enableGpu; then
if [[ -z $leaderMachineType ]]; then
gce_create_args+=(-g)
else
gce_create_args+=(-G "$leaderMachineType")
fi
fi
if $publicNetwork; then
gce_create_args+=(-P)
fi
@ -98,7 +116,7 @@ fi
set -x
echo --- gce.sh delete
time net/gce.sh delete -p "$netName"
time net/gce.sh delete -z "$zone" -p "$netName"
if $delete; then
exit 0
fi
@ -108,6 +126,11 @@ time net/gce.sh create "${gce_create_args[@]}"
net/init-metrics.sh -e
echo --- net.sh start
time net/net.sh start -s "$snapChannel"
maybeRejectExtraNodes=
if ! $publicNetwork; then
maybeRejectExtraNodes="-o rejectExtraNodes"
fi
# shellcheck disable=SC2086 # Don't want to double quote maybeRejectExtraNodes
time net/net.sh start -s "$snapChannel" $maybeRejectExtraNodes
exit 0

View File

@ -9,11 +9,12 @@ usage() {
echo "Error: $*"
fi
cat <<EOF
usage: $0 [name]
usage: $0 [name] [zone]
Sanity check a CD testnet
name - name of the network
zone - zone of the network
Note: the SOLANA_METRICS_CONFIG environment variable is used to configure
metrics
@ -22,15 +23,18 @@ EOF
}
netName=$1
zone=$2
[[ -n $netName ]] || usage ""
[[ -n $zone ]] || usage "Zone not specified"
set -x
echo --- gce.sh config
net/gce.sh config -p "$netName"
net/gce.sh config -p "$netName" -z "$zone"
net/init-metrics.sh -e
echo --- net.sh sanity
net/net.sh sanity \
${NO_LEDGER_VERIFY:+-o noLedgerVerify} \
${NO_VALIDATOR_SANITY:+-o noValidatorSanity} \
${REJECT_EXTRA_NODES:+-o rejectExtraNodes} \
exit 0

View File

@ -23,8 +23,8 @@ nightly)
require cargo 1.29.[0-9]+-nightly
;;
stable)
require rustc 1.28.[0-9]+
require cargo 1.28.[0-9]+
require rustc 1.29.[0-9]+
require cargo 1.29.[0-9]+
;;
*)
echo Error: unknown argument: "$1"

22
common/Cargo.toml Normal file
View File

@ -0,0 +1,22 @@
[package]
name = "solana_program_interface"
version = "0.1.0"
authors = [
"Anatoly Yakovenko <anatoly@solana.com>",
"Greg Fitzgerald <greg@solana.com>",
"Stephen Akridge <stephen@solana.com>",
"Michael Vines <mvines@solana.com>",
"Rob Walker <rob@solana.com>",
"Pankaj Garg <pankaj@solana.com>",
"Tyera Eulberg <tyera@solana.com>",
"Jack May <jack@solana.com>",
]
[dependencies]
bincode = "1.0.0"
bs58 = "0.2.0"
generic-array = { version = "0.12.0", default-features = false, features = ["serde"] }
serde = "1.0.27"
serde_derive = "1.0.27"

29
common/src/account.rs Normal file
View File

@ -0,0 +1,29 @@
use pubkey::Pubkey;
/// An Account with userdata that is stored on chain
#[derive(Serialize, Deserialize, Debug, Clone, Default)]
pub struct Account {
/// tokens in the account
pub tokens: i64,
/// user data
/// A transaction can write to its userdata
pub userdata: Vec<u8>,
/// contract id this contract belongs to
pub program_id: Pubkey,
}
impl Account {
pub fn new(tokens: i64, space: usize, program_id: Pubkey) -> Account {
Account {
tokens,
userdata: vec![0u8; space],
program_id,
}
}
}
#[derive(Debug)]
pub struct KeyedAccount<'a> {
pub key: &'a Pubkey,
pub account: &'a mut Account,
}

7
common/src/lib.rs Normal file
View File

@ -0,0 +1,7 @@
pub mod account;
pub mod pubkey;
extern crate bincode;
extern crate bs58;
extern crate generic_array;
#[macro_use]
extern crate serde_derive;

31
common/src/pubkey.rs Normal file
View File

@ -0,0 +1,31 @@
use bs58;
use generic_array::typenum::U32;
use generic_array::GenericArray;
use std::fmt;
#[derive(Serialize, Deserialize, Clone, Copy, Default, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub struct Pubkey(GenericArray<u8, U32>);
impl Pubkey {
pub fn new(pubkey_vec: &[u8]) -> Self {
Pubkey(GenericArray::clone_from_slice(&pubkey_vec))
}
}
impl AsRef<[u8]> for Pubkey {
fn as_ref(&self) -> &[u8] {
&self.0[..]
}
}
impl fmt::Debug for Pubkey {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", bs58::encode(self.0).into_string())
}
}
impl fmt::Display for Pubkey {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", bs58::encode(self.0).into_string())
}
}

View File

@ -8,7 +8,7 @@ To interact with a Solana node inside a JavaScript application, use the [solana-
RPC Endpoint
---
**Default port:** 8899
**Default port:** 8899
eg. http://localhost:8899, http://192.168.1.88:8899
Methods
@ -17,7 +17,9 @@ Methods
* [confirmTransaction](#confirmtransaction)
* [getAddress](#getaddress)
* [getBalance](#getbalance)
* [getAccountInfo](#getaccountinfo)
* [getLastId](#getlastid)
* [getSignatureStatus](#getsignaturestatus)
* [getTransactionCount](#gettransactioncount)
* [requestAirdrop](#requestairdrop)
* [sendTransaction](#sendtransaction)
@ -96,6 +98,30 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "
---
### getAccountInfo
Returns all information associated with the account of provided Pubkey
##### Parameters:
* `string` - Pubkey of account to query, as base-58 encoded string
##### Results:
The result field will be a JSON object with the following sub fields:
* `tokens`, number of tokens assigned to this account, as a signed 64-bit integer
* `program_id`, array of 32 bytes representing the program this account has been assigned to
* `userdata`, array of bytes representing any userdata associated with the account
##### Example:
```bash
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getAccountInfo", "params":["FVxxngPx368XvMCoeskdd6U8cZJFsfa1BEtGWqyAxRj4"]}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":{"program_id":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"tokens":1,"userdata":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"id":1}
```
---
### getLastId
Returns the last entry ID from the ledger
@ -116,6 +142,31 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m
---
### getSignatureStatus
Returns the status of a given signature. This method is similar to
[confirmTransaction](#confirmtransaction) but provides more resolution for error
events.
##### Parameters:
* `string` - Signature of Transaction to confirm, as base-58 encoded string
##### Results:
* `string` - Transaction status:
* `Confirmed` - Transaction was successful
* `SignatureNotFound` - Unknown transaction
* `ProgramRuntimeError` - An error occurred in the program that processed this Transaction
* `GenericFailure` - Some other error occurred. **Note**: In the future new Transaction statuses may be added to this list. It's safe to assume that all new statuses will be more specific error conditions that previously presented as `GenericFailure`
##### Example:
```bash
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getSignatureStatus", "params":["5VERv8NMvzbJMEkV8xnrLkEaWRtSz9CosKDYjCJjBRnbJLgp8uirBgmQpjKhoR4tjF3ZpRzrFmBV6UjKdiSZkQUW"]}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":"SignatureNotFound","id":1}
```
---
### getTransactionCount
Returns the current Transaction count from the ledger

View File

@ -1,8 +1,11 @@
# TestNet debugging info
Currently we have two testnets, 'perf' and 'master', both on the master branch of the solana repo. Deploys happen
at the top of every hour with the latest code. 'perf' has more cores for the client machine to flood the network
with transactions until failure.
Currently we have three testnets:
* `testnet` - public beta channel testnet accessible via testnet.solana.com. Runs 24/7
* `testnet-perf` - private beta channel testnet with clients trying to flood the network
with transactions until failure. Runs 24/7
* `testnet-master` - private edge channel testnet with clients trying to flood the network
with transactions until failure. Runs on weekday mornings for a couple hours
## Deploy process
@ -12,17 +15,21 @@ Validators are selected based on their machine name and everyone gets the binari
## Where are the testnet logs?
For the client they are put in `/tmp/solana`; for validators and leaders they are in `/var/snap/solana/current/`.
You can also see the backtrace of the client by ssh'ing into the client node and doing:
Attach to the testnet first by running one of:
```bash
$ sudo -u testnet-deploy
$ tmux attach -t solana
$ net/gce.sh config testnet-solana-com
$ net/gce.sh config master-testnet-solana-com
$ net/gce.sh config perf-testnet-solana-com
```
## How do I reset the testnet?
Then run:
```bash
$ net/ssh.sh
```
for log location details
Through buildkite.
## How do I reset the testnet?
Manually trigger the [testnet-deploy](https://buildkite.com/solana-labs/testnet-deploy/) pipeline
## How can I scale the tx generation rate?
@ -32,13 +39,9 @@ variable `RAYON_NUM_THREADS=<xx>`
## How can I test a change on the testnet?
Currently, a merged PR is the only way to test a change on the testnet.
Currently, a merged PR is the only way to test a change on the testnet. But you
can run your own testnet using the scripts in the `net/` directory.
## Adjusting the number of clients or validators on the testnet
Through the [testnet-deploy](https://buildkite.com/solana-labs/testnet-deploy/) settings.
1. Go to the [GCP Instance Group](https://console.cloud.google.com/compute/instanceGroups/list?project=principal-lane-200702) tab
2. Find the client or validator instance group you'd like to adjust
3. Edit it (pencil icon), change the "Number of instances", then click "Save" button
4. Refresh until the change to number of instances has been executed
5. Click the "New Build" button on the [testnet-deploy](https://buildkite.com/solana-labs/testnet-deploy/)
buildkite job to initiate a redeploy of the network with the updated instance count.

View File

@ -15,7 +15,7 @@ mkdir -p target/perf-libs
cd target/perf-libs
(
set -x
curl https://solana-perf.s3.amazonaws.com/master/x86_64-unknown-linux-gnu/solana-perf.tgz | tar zxvf -
curl https://solana-perf.s3.amazonaws.com/v0.9.0/x86_64-unknown-linux-gnu/solana-perf.tgz | tar zxvf -
)
if [[ -r /usr/local/cuda/version.txt && -r cuda-version.txt ]]; then

View File

@ -18,4 +18,8 @@ usage() {
exit 1
}
$solana_bench_tps "$@"
if [[ -z $1 ]]; then # default behavior
$solana_bench_tps --identity config-private/client-id.json --network 127.0.0.1:8001 --duration 90
else
$solana_bench_tps "$@"
fi

View File

@ -8,9 +8,9 @@
#
rsync=rsync
leader_logger="cat"
validator_logger="cat"
drone_logger="cat"
leader_logger="tee leader.log"
validator_logger="tee validator.log"
drone_logger="tee drone.log"
if [[ $(uname) != Linux ]]; then
# Protect against unsupported configurations to prevent non-obvious errors
@ -28,13 +28,7 @@ fi
if [[ -d $SNAP ]]; then # Running inside a Linux Snap?
solana_program() {
declare program="$1"
if [[ "$program" = wallet || "$program" = bench-tps ]]; then
# TODO: Merge wallet.sh/client.sh functionality into
# solana-wallet/solana-demo-client proper and remove this special case
printf "%s/bin/solana-%s" "$SNAP" "$program"
else
printf "%s/command-%s.wrapper" "$SNAP" "$program"
fi
printf "%s/command-%s.wrapper" "$SNAP" "$program"
}
rsync="$SNAP"/bin/rsync
multilog="$SNAP/bin/multilog t s16777215 n200"
@ -114,6 +108,12 @@ tune_networking() {
sysctl net.core.rmem_default 2>/dev/null 1>/dev/null &&
sudo sysctl -w net.core.rmem_default=26214400 1>/dev/null 2>/dev/null
sysctl net.core.wmem_max 2>/dev/null 1>/dev/null &&
sudo sysctl -w net.core.wmem_max=67108864 1>/dev/null 2>/dev/null
sysctl net.core.wmem_default 2>/dev/null 1>/dev/null &&
sudo sysctl -w net.core.wmem_default=26214400 1>/dev/null 2>/dev/null
) || true
fi

View File

@ -34,6 +34,7 @@ ip_address_arg=-l
num_tokens=1000000000
node_type_leader=true
node_type_validator=true
node_type_client=true
while getopts "h?n:lpt:" opt; do
case $opt in
h|\?)
@ -55,10 +56,17 @@ while getopts "h?n:lpt:" opt; do
leader)
node_type_leader=true
node_type_validator=false
node_type_client=false
;;
validator)
node_type_leader=false
node_type_validator=true
node_type_client=false
;;
client)
node_type_leader=false
node_type_validator=false
node_type_client=true
;;
*)
usage "Error: unknown node type: $node_type"
@ -74,13 +82,19 @@ done
set -e
if $node_type_leader; then
for i in "$SOLANA_CONFIG_DIR" "$SOLANA_CONFIG_PRIVATE_DIR"; do
echo "Cleaning $i"
rm -rvf "$i"
mkdir -p "$i"
done
for i in "$SOLANA_CONFIG_DIR" "$SOLANA_CONFIG_VALIDATOR_DIR" "$SOLANA_CONFIG_PRIVATE_DIR"; do
echo "Cleaning $i"
rm -rvf "$i"
mkdir -p "$i"
done
if $node_type_client; then
client_id_path="$SOLANA_CONFIG_PRIVATE_DIR"/client-id.json
$solana_keygen -o "$client_id_path"
ls -lhR "$SOLANA_CONFIG_PRIVATE_DIR"/
fi
if $node_type_leader; then
leader_address_args=("$ip_address_arg")
leader_id_path="$SOLANA_CONFIG_PRIVATE_DIR"/leader-id.json
mint_path="$SOLANA_CONFIG_PRIVATE_DIR"/mint.json
@ -102,11 +116,6 @@ fi
if $node_type_validator; then
echo "Cleaning $SOLANA_CONFIG_VALIDATOR_DIR"
rm -rvf "$SOLANA_CONFIG_VALIDATOR_DIR"
mkdir -p "$SOLANA_CONFIG_VALIDATOR_DIR"
validator_address_args=("$ip_address_arg" -b 9000)
validator_id_path="$SOLANA_CONFIG_PRIVATE_DIR"/validator-id.json

View File

@ -1,50 +0,0 @@
#!/bin/bash
#
# Runs solana-wallet against the specified network
#
# usage: $0 <rsync network path to solana repo on leader machine>"
#
here=$(dirname "$0")
# shellcheck source=multinode-demo/common.sh
source "$here"/common.sh
# shellcheck source=scripts/oom-score-adj.sh
source "$here"/../scripts/oom-score-adj.sh
# if $1 isn't host:path, something.com, or a valid local path
if [[ ${1%:} != "$1" || "$1" =~ [^.]\.[^.] || -d $1 ]]; then
leader=$1 # interpret
shift
else
if [[ -d "$SNAP" ]]; then
leader=testnet.solana.com # Default to testnet when running as a Snap
else
leader=$here/.. # Default to local solana repo
fi
fi
if [[ "$1" = "reset" ]]; then
echo Wallet resetting
rm -rf "$SOLANA_CONFIG_CLIENT_DIR"
exit 0
fi
rsync_leader_url=$(rsync_url "$leader")
set -e
mkdir -p "$SOLANA_CONFIG_CLIENT_DIR"
if [[ ! -r "$SOLANA_CONFIG_CLIENT_DIR"/leader.json ]]; then
echo "Fetching leader configuration from $rsync_leader_url"
$rsync -Pz "$rsync_leader_url"/config/leader.json "$SOLANA_CONFIG_CLIENT_DIR"/
fi
client_id_path="$SOLANA_CONFIG_CLIENT_DIR"/id.json
if [[ ! -r $client_id_path ]]; then
echo "Generating client identity: $client_id_path"
$solana_keygen -o "$client_id_path"
fi
# shellcheck disable=SC2086 # $solana_wallet should not be quoted
exec $solana_wallet \
-l "$SOLANA_CONFIG_CLIENT_DIR"/leader.json -k "$client_id_path" --timeout 10 "$@"

View File

@ -5,15 +5,30 @@ intended to be both dev and CD friendly.
### User Account Prerequisites
Log in to GCP with:
GCP and AWS are supported.
#### GCP
First authenticate with
```bash
$ gcloud auth login
```
Also ensure that `$(whoami)` is the name of an InfluxDB user account with enough
access to create a new database.
#### AWS
Obtain your credentials from the AWS IAM Console and configure the AWS CLI with
```bash
$ aws configure
```
More information on AWS CLI configuration can be found [here](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-quick-configuration)
### Metrics configuration
Ensure that `$(whoami)` is the name of an InfluxDB user account with enough
access to create a new InfluxDB database. Ask mvines@ for help if needed.
## Quick Start
NOTE: This example uses GCP. If you are using AWS, replace `./gce.sh` with
`./ec2.sh` in the commands.
```bash
$ cd net/
$ ./gce.sh create -n 5 -c 1 #<-- Create a GCE testnet with 5 validators, 1 client (billing starts here)
@ -32,6 +47,10 @@ network over public IP addresses:
```bash
$ ./gce.sh create -P ...
```
or
```bash
$ ./ec2.sh create -P ...
```
### Deploying a Snap-based network
To deploy the latest pre-built `edge` channel Snap (ie, latest from the `master`
@ -46,6 +65,10 @@ First ensure the network instances are created with GPU enabled:
```bash
$ ./gce.sh create -g ...
```
or
```bash
$ ./ec2.sh create -g ...
```
If deploying a Snap-based network nothing further is required, as GPU presence
is detected at runtime and the CUDA build is auto selected.
@ -58,9 +81,20 @@ $ ./net.sh start -f "cuda,erasure"
### How to interact with a CD testnet deployed by ci/testnet-deploy.sh
**AWS-Specific Extra Setup**: Follow the steps in `scripts/add-solana-user-authorized_keys.sh`,
then redeploy the testnet before continuing in this section.
Taking **master-testnet-solana-com** as an example, configure your workspace for
the testnet using:
```
```bash
$ ./gce.sh config -p master-testnet-solana-com
$ ./ssh.sh # <-- Details on how to ssh into any testnet node
```
or
```bash
$ ./ec2.sh config -p master-testnet-solana-com
```
Then run the following for details on how to ssh into any testnet node
```bash
$ ./ssh.sh
```

1
net/ec2.sh Symbolic link
View File

@ -0,0 +1 @@
gce.sh

View File

@ -1,27 +1,49 @@
#!/bin/bash -e
here=$(dirname "$0")
# shellcheck source=net/scripts/gcloud.sh
source "$here"/scripts/gcloud.sh
# shellcheck source=net/common.sh
source "$here"/common.sh
cloudProvider=$(basename "$0" .sh)
bootDiskType=""
case $cloudProvider in
gce)
# shellcheck source=net/scripts/gce-provider.sh
source "$here"/scripts/gce-provider.sh
imageName="ubuntu-16-04-cuda-9-2-new"
cpuLeaderMachineType=n1-standard-16
gpuLeaderMachineType="$cpuLeaderMachineType --accelerator count=4,type=nvidia-tesla-k80"
leaderMachineType=$cpuLeaderMachineType
validatorMachineType=n1-standard-4
clientMachineType=n1-standard-16
;;
ec2)
# shellcheck source=net/scripts/ec2-provider.sh
source "$here"/scripts/ec2-provider.sh
imageName="ami-0466e26ccc0e752c1"
cpuLeaderMachineType=m4.4xlarge
gpuLeaderMachineType=p2.xlarge
leaderMachineType=$cpuLeaderMachineType
validatorMachineType=m4.xlarge
clientMachineType=m4.4xlarge
;;
*)
echo "Error: Unknown cloud provider: $cloudProvider"
;;
esac
prefix=testnet-dev-${USER//[^A-Za-z0-9]/}
validatorNodeCount=5
clientNodeCount=1
leaderBootDiskSize=1TB
leaderMachineType=n1-standard-16
leaderAccelerator=
validatorMachineType=n1-standard-4
validatorBootDiskSize=$leaderBootDiskSize
validatorAccelerator=
clientMachineType=n1-standard-16
clientBootDiskSize=40GB
clientAccelerator=
leaderBootDiskSizeInGb=1000
validatorBootDiskSizeInGb=$leaderBootDiskSizeInGb
clientBootDiskSizeInGb=75
imageName="ubuntu-16-04-cuda-9-2-new"
publicNetwork=false
zone="us-west1-b"
enableGpu=false
leaderAddress=
usage() {
@ -33,7 +55,7 @@ usage() {
cat <<EOF
usage: $0 [create|config|delete] [common options] [command-specific options]
Configure a GCE-based testnet
Manage testnet instances
create - create a new testnet (implies 'config')
config - configure the testnet and write a config file describing it
@ -47,10 +69,15 @@ Configure a GCE-based testnet
-n [number] - Number of validator nodes (default: $validatorNodeCount)
-c [number] - Number of client nodes (default: $clientNodeCount)
-P - Use public network IP addresses (default: $publicNetwork)
-z [zone] - GCP Zone for the nodes (default: $zone)
-i [imageName] - Existing image on GCE (default: $imageName)
-g - Enable GPU
-a [address] - Set the leader node's external IP address to this GCE address
-z [zone] - Zone for the nodes (default: $zone)
-g - Enable GPU (default: $enableGpu)
-G - Enable GPU, and set count/type of GPUs to use (e.g $cpuLeaderMachineType --accelerator count=4,type=nvidia-tesla-k80)
-a [address] - Set the leader node's external IP address to this value.
For GCE, [address] is the "name" of the desired External
IP Address.
For EC2, [address] is the "allocation ID" of the desired
Elastic IP.
-d [disk-type] - Specify a boot disk type (default None) Use pd-ssd to get ssd on GCE.
config-specific options:
none
@ -68,7 +95,7 @@ command=$1
shift
[[ $command = create || $command = config || $command = delete ]] || usage "Invalid command: $command"
while getopts "h?p:Pi:n:c:z:ga:" opt; do
while getopts "h?p:Pn:c:z:gG:a:d:" opt; do
case $opt in
h | \?)
usage
@ -80,9 +107,6 @@ while getopts "h?p:Pi:n:c:z:ga:" opt; do
P)
publicNetwork=true
;;
i)
imageName=$OPTARG
;;
n)
validatorNodeCount=$OPTARG
;;
@ -90,14 +114,22 @@ while getopts "h?p:Pi:n:c:z:ga:" opt; do
clientNodeCount=$OPTARG
;;
z)
zone=$OPTARG
cloud_SetZone "$OPTARG"
;;
g)
leaderAccelerator="count=4,type=nvidia-tesla-k80"
enableGpu=true
leaderMachineType="$gpuLeaderMachineType"
;;
G)
enableGpu=true
leaderMachineType="$OPTARG"
;;
a)
leaderAddress=$OPTARG
;;
d)
bootDiskType=$OPTARG
;;
*)
usage "Error: unhandled option: $opt"
;;
@ -108,6 +140,37 @@ shift $((OPTIND - 1))
[[ -z $1 ]] || usage "Unexpected argument: $1"
sshPrivateKey="$netConfigDir/id_$prefix"
# cloud_ForEachInstance [cmd] [extra args to cmd]
#
# Execute a command for each element in the `instances` array
#
# cmd - The command to execute on each instance
# The command will receive arguments followed by any
# additionl arguments supplied to cloud_ForEachInstance:
# name - name of the instance
# publicIp - The public IP address of this instance
# privateIp - The priate IP address of this instance
# count - Monotonically increasing count for each
# invocation of cmd, starting at 1
# ... - Extra args to cmd..
#
#
cloud_ForEachInstance() {
declare cmd="$1"
shift
[[ -n $cmd ]] || { echo cloud_ForEachInstance: cmd not specified; exit 1; }
declare count=1
for info in "${instances[@]}"; do
declare name publicIp privateIp
IFS=: read -r name publicIp privateIp < <(echo "$info")
eval "$cmd" "$name" "$publicIp" "$privateIp" "$count" "$@"
count=$((count + 1))
done
}
prepareInstancesAndWriteConfigFile() {
$metricsWriteDatapoint "testnet-deploy net-config-begin=1"
@ -122,10 +185,10 @@ EOF
recordInstanceIp() {
declare name="$1"
declare publicIp="$3"
declare privateIp="$4"
declare publicIp="$2"
declare privateIp="$3"
declare arrayName="$6"
declare arrayName="$5"
echo "$arrayName+=($publicIp) # $name" >> "$configFile"
if [[ $arrayName = "leaderIp" ]]; then
@ -139,121 +202,133 @@ EOF
waitForStartupComplete() {
declare name="$1"
declare publicIp="$3"
declare publicIp="$2"
echo "Waiting for $name to finish booting..."
(
for i in $(seq 1 30); do
if (set -x; ssh "${sshOptions[@]}" "$publicIp" "test -f /.gce-startup-complete"); then
if (set -x; ssh "${sshOptions[@]}" "$publicIp" "test -f /.instance-startup-complete"); then
break
fi
sleep 2
echo "Retry $i..."
done
)
echo "$name has booted."
}
echo "Looking for leader instance..."
gcloud_FindInstances "name=$prefix-leader" show
cloud_FindInstance "$prefix-leader"
[[ ${#instances[@]} -eq 1 ]] || {
echo "Unable to find leader"
exit 1
}
echo "Fetching $sshPrivateKey from $leaderName"
(
rm -rf "$sshPrivateKey"{,pub}
declare leaderName
declare leaderZone
declare leaderIp
IFS=: read -r leaderName leaderZone leaderIp _ < <(echo "${instances[0]}")
IFS=: read -r leaderName leaderIp _ < <(echo "${instances[0]}")
set -x
# Try to ping the machine first.
timeout 60s bash -c "set -o pipefail; until ping -c 3 $leaderIp | tr - _; do echo .; done"
# Try to ping the machine first. There can be a delay between when the
# instance is reported as RUNNING and when it's reachable over the network
timeout 30s bash -c "set -o pipefail; until ping -c 3 $leaderIp | tr - _; do echo .; done"
if [[ ! -r $sshPrivateKey ]]; then
echo "Fetching $sshPrivateKey from $leaderName"
# Try to scp in a couple times, sshd may not yet be up even though the
# machine can be pinged...
set -o pipefail
for i in $(seq 1 10); do
if gcloud compute scp --zone "$leaderZone" \
"$leaderName:/solana-id_ecdsa" "$sshPrivateKey"; then
break
fi
sleep 1
echo "Retry $i..."
done
# Try to scp in a couple times, sshd may not yet be up even though the
# machine can be pinged...
set -x -o pipefail
for i in $(seq 1 30); do
if cloud_FetchFile "$leaderName" "$leaderIp" /solana-id_ecdsa "$sshPrivateKey"; then
break
fi
chmod 400 "$sshPrivateKey"
sleep 1
echo "Retry $i..."
done
chmod 400 "$sshPrivateKey"
ls -l "$sshPrivateKey"
fi
)
echo "leaderIp=()" >> "$configFile"
gcloud_ForEachInstance recordInstanceIp leaderIp
gcloud_ForEachInstance waitForStartupComplete
cloud_ForEachInstance recordInstanceIp leaderIp
cloud_ForEachInstance waitForStartupComplete
echo "Looking for validator instances..."
gcloud_FindInstances "name~^$prefix-validator" show
cloud_FindInstances "$prefix-validator"
[[ ${#instances[@]} -gt 0 ]] || {
echo "Unable to find validators"
exit 1
}
echo "validatorIpList=()" >> "$configFile"
gcloud_ForEachInstance recordInstanceIp validatorIpList
gcloud_ForEachInstance waitForStartupComplete
cloud_ForEachInstance recordInstanceIp validatorIpList
cloud_ForEachInstance waitForStartupComplete
echo "clientIpList=()" >> "$configFile"
echo "Looking for client instances..."
gcloud_FindInstances "name~^$prefix-client" show
cloud_FindInstances "$prefix-client"
[[ ${#instances[@]} -eq 0 ]] || {
gcloud_ForEachInstance recordInstanceIp clientIpList
gcloud_ForEachInstance waitForStartupComplete
cloud_ForEachInstance recordInstanceIp clientIpList
cloud_ForEachInstance waitForStartupComplete
}
echo "Wrote $configFile"
$metricsWriteDatapoint "testnet-deploy net-config-complete=1"
}
case $command in
delete)
delete() {
$metricsWriteDatapoint "testnet-deploy net-delete-begin=1"
# Delete the leader node first to prevent unusual metrics on the dashboard
# during shutdown.
# TODO: It would be better to fully cut-off metrics reporting before any
# instances are deleted.
for filter in "^$prefix-leader" "^$prefix-"; do
gcloud_FindInstances "name~$filter"
for filter in "$prefix-leader" "$prefix-"; do
echo "Searching for instances: $filter"
cloud_FindInstances "$filter"
if [[ ${#instances[@]} -eq 0 ]]; then
echo "No instances found matching '$filter'"
else
gcloud_DeleteInstances true
cloud_DeleteInstances true
fi
done
rm -f "$configFile"
$metricsWriteDatapoint "testnet-deploy net-delete-complete=1"
}
case $command in
delete)
delete
;;
create)
[[ -n $validatorNodeCount ]] || usage "Need number of nodes"
if [[ $validatorNodeCount -le 0 ]]; then
usage "One or more validator nodes is required"
fi
delete
$metricsWriteDatapoint "testnet-deploy net-create-begin=1"
rm -rf "$sshPrivateKey"{,.pub}
ssh-keygen -t ecdsa -N '' -f "$sshPrivateKey"
# Note: using rsa because |aws ec2 import-key-pair| seems to fail for ecdsa
ssh-keygen -t rsa -N '' -f "$sshPrivateKey"
printNetworkInfo() {
cat <<EOF
========================================================================================
Network composition:
Leader = $leaderMachineType (GPU=${leaderAccelerator:-none})
Validators = $validatorNodeCount x $validatorMachineType (GPU=${validatorAccelerator:-none})
Client(s) = $clientNodeCount x $clientMachineType (GPU=${clientAccelerator:-none})
Leader = $leaderMachineType (GPU=$enableGpu)
Validators = $validatorNodeCount x $validatorMachineType
Client(s) = $clientNodeCount x $clientMachineType
========================================================================================
@ -261,7 +336,7 @@ EOF
}
printNetworkInfo
declare startupScript="$netConfigDir"/gce-startup-script.sh
declare startupScript="$netConfigDir"/instance-startup-script.sh
cat > "$startupScript" <<EOF
#!/bin/bash -ex
# autogenerated at $(date)
@ -270,11 +345,12 @@ cat > /etc/motd <<EOM
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
This instance has not been fully configured.
See "startup-script" log messages in /var/log/syslog for status:
$ sudo cat /var/log/syslog | grep startup-script
See startup script log messages in /var/log/syslog for status:
$ sudo cat /var/log/syslog | egrep \\(startup-script\\|cloud-init\)
To block until setup is complete, run:
$ until [[ -f /.gce-startup-complete ]]; do sleep 1; done
$ until [[ -f /.instance-startup-complete ]]; do sleep 1; done
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
EOM
@ -296,31 +372,32 @@ $(
cat \
disable-background-upgrades.sh \
create-solana-user.sh \
add-solana-user-authorized_keys.sh \
install-earlyoom.sh \
install-rsync.sh \
install-libssl-compatability.sh \
install-rsync.sh \
)
cat > /etc/motd <<EOM
$(printNetworkInfo)
EOM
touch /.gce-startup-complete
touch /.instance-startup-complete
EOF
gcloud_CreateInstances "$prefix-leader" 1 "$zone" \
"$imageName" "$leaderMachineType" "$leaderBootDiskSize" "$leaderAccelerator" \
"$startupScript" "$leaderAddress"
cloud_CreateInstances "$prefix" "$prefix-leader" 1 \
"$imageName" "$leaderMachineType" "$leaderBootDiskSizeInGb" \
"$startupScript" "$leaderAddress" "$bootDiskType"
gcloud_CreateInstances "$prefix-validator" "$validatorNodeCount" "$zone" \
"$imageName" "$validatorMachineType" "$validatorBootDiskSize" "$validatorAccelerator" \
"$startupScript" ""
cloud_CreateInstances "$prefix" "$prefix-validator" "$validatorNodeCount" \
"$imageName" "$validatorMachineType" "$validatorBootDiskSizeInGb" \
"$startupScript" "" "$bootDiskType"
if [[ $clientNodeCount -gt 0 ]]; then
gcloud_CreateInstances "$prefix-client" "$clientNodeCount" "$zone" \
"$imageName" "$clientMachineType" "$clientBootDiskSize" "$clientAccelerator" \
"$startupScript" ""
cloud_CreateInstances "$prefix" "$prefix-client" "$clientNodeCount" \
"$imageName" "$clientMachineType" "$clientBootDiskSizeInGb" \
"$startupScript" "" "$bootDiskType"
fi
$metricsWriteDatapoint "testnet-deploy net-create-complete=1"

View File

@ -34,6 +34,7 @@ Operate a configured testnet
sanity/start-specific options:
-o noLedgerVerify - Skip ledger verification
-o noValidatorSanity - Skip validator sanity
-o rejectExtraNodes - Require the exact number of nodes
stop-specific options:
none
@ -78,7 +79,7 @@ while getopts "h?S:s:o:f:" opt; do
;;
o)
case $OPTARG in
noLedgerVerify|noValidatorSanity)
noLedgerVerify|noValidatorSanity|rejectExtraNodes)
sanityExtraArgs="$sanityExtraArgs -o $OPTARG"
;;
*)
@ -253,8 +254,14 @@ start() {
SECONDS=0
pids=()
loopCount=0
for ipAddress in "${validatorIpList[@]}"; do
startValidator "$ipAddress"
# Staggering validator startup time. If too many validators
# bootup simultaneously, leader node gets more rsync requests
# from the validators than it can handle.
((loopCount++ % 2 == 0)) && sleep 2
done
for pid in "${pids[@]}"; do

View File

@ -27,6 +27,7 @@ missing() {
ledgerVerify=true
validatorSanity=true
rejectExtraNodes=false
while [[ $1 = -o ]]; do
opt="$2"
shift 2
@ -37,6 +38,9 @@ while [[ $1 = -o ]]; do
noValidatorSanity)
validatorSanity=false
;;
rejectExtraNodes)
rejectExtraNodes=true
;;
*)
echo "Error: unknown option: $opt"
exit 1
@ -82,14 +86,25 @@ esac
echo "--- $entrypointIp: wallet sanity"
(
set -x
scripts/wallet-sanity.sh "$entrypointRsyncUrl"
scripts/wallet-sanity.sh "$entrypointIp:8001"
)
echo "+++ $entrypointIp: node count ($numNodes expected)"
(
set -x
$solana_keygen -o "$client_id"
$solana_bench_tps --network "$entrypointIp:8001" --identity "$client_id" --num-nodes "$numNodes" --converge-only
maybeRejectExtraNodes=
if $rejectExtraNodes; then
maybeRejectExtraNodes="--reject-extra-nodes"
fi
$solana_bench_tps \
--network "$entrypointIp:8001" \
--identity "$client_id" \
--num-nodes "$numNodes" \
$maybeRejectExtraNodes \
--converge-only
)
echo "--- $entrypointIp: verify ledger"

View File

@ -0,0 +1,20 @@
#!/bin/bash -ex
[[ $(uname) = Linux ]] || exit 1
[[ $USER = root ]] || exit 1
[[ -d /home/solana/.ssh ]] || exit 1
# /solana-authorized_keys contains the public keys for users that should
# automatically be granted access to ALL testnets.
#
# To add an entry into this list:
# 1. Run: ssh-keygen -t ecdsa -N '' -f ~/.ssh/id-solana-testnet
# 2. Inline ~/.ssh/id-solana-testnet.pub below
cat > /solana-authorized_keys <<EOF
ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFBNwLw0i+rI312gWshojFlNw9NV7WfaKeeUsYADqOvM2o4yrO2pPw+sgW8W+/rPpVyH7zU9WVRgTME8NgFV1Vc=
EOF
sudo -u solana bash -c "
cat /solana-authorized_keys >> /home/solana/.ssh/authorized_keys
"

240
net/scripts/ec2-provider.sh Normal file
View File

@ -0,0 +1,240 @@
# |source| this file
#
# Utilities for working with EC2 instances
#
zone=
region=
cloud_SetZone() {
zone="$1"
# AWS region is zone with the last character removed
region="${zone:0:$((${#zone} - 1))}"
}
# Set the default zone
cloud_SetZone "us-east-1b"
# sshPrivateKey should be globally defined whenever this function is called.
#
# TODO: Remove usage of the sshPrivateKey global
__cloud_SshPrivateKeyCheck() {
# shellcheck disable=SC2154
if [[ -z $sshPrivateKey ]]; then
echo Error: sshPrivateKey not defined
exit 1
fi
if [[ ! -r $sshPrivateKey ]]; then
echo "Error: file is not readable: $sshPrivateKey"
exit 1
fi
}
#
# __cloud_FindInstances
#
# Find instances with name matching the specified pattern.
#
# For each matching instance, an entry in the `instances` array will be added with the
# following information about the instance:
# "name:public IP:private IP"
#
# filter - The instances to filter on
#
# examples:
# $ __cloud_FindInstances "exact-machine-name"
# $ __cloud_FindInstances "all-machines-with-a-common-machine-prefix*"
#
__cloud_FindInstances() {
declare filter="$1"
instances=()
declare name publicIp privateIp
while read -r name publicIp privateIp; do
printf "%-30s | publicIp=%-16s privateIp=%s\n" "$name" "$publicIp" "$privateIp"
instances+=("$name:$publicIp:$privateIp")
done < <(aws ec2 describe-instances \
--region "$region" \
--filters \
"Name=tag:name,Values=$filter" \
"Name=instance-state-name,Values=pending,running" \
--query "Reservations[].Instances[].[InstanceId,PublicIpAddress,PrivateIpAddress]" \
--output text
)
}
#
# cloud_FindInstances [namePrefix]
#
# Find instances with names matching the specified prefix
#
# For each matching instance, an entry in the `instances` array will be added with the
# following information about the instance:
# "name:public IP:private IP"
#
# namePrefix - The instance name prefix to look for
#
# examples:
# $ cloud_FindInstances all-machines-with-a-common-machine-prefix
#
cloud_FindInstances() {
declare namePrefix="$1"
__cloud_FindInstances "$namePrefix*"
}
#
# cloud_FindInstance [name]
#
# Find an instance with a name matching the exact pattern.
#
# For each matching instance, an entry in the `instances` array will be added with the
# following information about the instance:
# "name:public IP:private IP"
#
# name - The instance name to look for
#
# examples:
# $ cloud_FindInstance exact-machine-name
#
cloud_FindInstance() {
declare name="$1"
__cloud_FindInstances "$name"
}
#
# cloud_CreateInstances [networkName] [namePrefix] [numNodes] [imageName]
# [machineType] [bootDiskSize] [startupScript] [address]
#
# Creates one more identical instances.
#
# networkName - unique name of this testnet
# namePrefix - unique string to prefix all the instance names with
# numNodes - number of instances to create
# imageName - Disk image for the instances
# machineType - GCE machine type
# bootDiskSize - Optional size of the boot disk in GB
# startupScript - Optional startup script to execute when the instance boots
# address - Optional name of the GCE static IP address to attach to the
# instance. Requires that |numNodes| = 1 and that addressName
# has been provisioned in the GCE region that is hosting |zone|
#
# Tip: use cloud_FindInstances to locate the instances once this function
# returns
cloud_CreateInstances() {
declare networkName="$1"
declare namePrefix="$2"
declare numNodes="$3"
declare imageName="$4"
declare machineType="$5"
declare optionalBootDiskSize="$6"
declare optionalStartupScript="$7"
declare optionalAddress="$8"
__cloud_SshPrivateKeyCheck
(
set -x
aws ec2 delete-key-pair --region "$region" --key-name "$networkName"
aws ec2 import-key-pair --region "$region" --key-name "$networkName" \
--public-key-material file://"${sshPrivateKey}".pub
)
declare -a args
args=(
--key-name "$networkName"
--count "$numNodes"
--region "$region"
--placement "AvailabilityZone=$zone"
--security-groups testnet
--image-id "$imageName"
--instance-type "$machineType"
--tag-specifications "ResourceType=instance,Tags=[{Key=name,Value=$namePrefix}]"
)
if [[ -n $optionalBootDiskSize ]]; then
args+=(
--block-device-mapping "[{\"DeviceName\": \"/dev/sda1\", \"Ebs\": { \"VolumeSize\": $optionalBootDiskSize }}]"
)
fi
if [[ -n $optionalStartupScript ]]; then
args+=(
--user-data "file://$optionalStartupScript"
)
fi
if [[ -n $optionalAddress ]]; then
[[ $numNodes = 1 ]] || {
echo "Error: address may not be supplied when provisioning multiple nodes: $optionalAddress"
exit 1
}
fi
(
set -x
aws ec2 run-instances "${args[@]}"
)
if [[ -n $optionalAddress ]]; then
cloud_FindInstance "$namePrefix"
if [[ ${#instances[@]} -ne 1 ]]; then
echo "Failed to find newly created instance: $namePrefix"
fi
declare instanceId
IFS=: read -r instanceId _ < <(echo "${instances[0]}")
(
set -x
# TODO: Poll that the instance has moved to the 'running' state instead of
# blindly sleeping for 30 seconds...
sleep 30
aws ec2 associate-address \
--instance-id "$instanceId" \
--region "$region" \
--allocation-id "$optionalAddress"
)
fi
}
#
# cloud_DeleteInstances
#
# Deletes all the instances listed in the `instances` array
#
cloud_DeleteInstances() {
if [[ ${#instances[0]} -eq 0 ]]; then
echo No instances to delete
return
fi
declare names=("${instances[@]/:*/}")
(
set -x
aws ec2 terminate-instances --region "$region" --instance-ids "${names[@]}"
)
}
#
# cloud_FetchFile [instanceName] [publicIp] [remoteFile] [localFile]
#
# Fetch a file from the given instance. This function uses a cloud-specific
# mechanism to fetch the file
#
cloud_FetchFile() {
# shellcheck disable=SC2034 # instanceName is unused
declare instanceName="$1"
declare publicIp="$2"
declare remoteFile="$3"
declare localFile="$4"
__cloud_SshPrivateKeyCheck
(
set -x
scp \
-o "StrictHostKeyChecking=no" \
-o "UserKnownHostsFile=/dev/null" \
-o "User=solana" \
-o "IdentityFile=$sshPrivateKey" \
-o "LogLevel=ERROR" \
-F /dev/null \
"solana@$publicIp:$remoteFile" "$localFile"
)
}

201
net/scripts/gce-provider.sh Normal file
View File

@ -0,0 +1,201 @@
# |source| this file
#
# Utilities for working with GCE instances
#
# Default zone
zone="us-west1-b"
cloud_SetZone() {
zone="$1"
}
#
# __cloud_FindInstances
#
# Find instances matching the specified pattern.
#
# For each matching instance, an entry in the `instances` array will be added with the
# following information about the instance:
# "name:zone:public IP:private IP"
#
# filter - The instances to filter on
#
# examples:
# $ __cloud_FindInstances "name=exact-machine-name"
# $ __cloud_FindInstances "name~^all-machines-with-a-common-machine-prefix"
#
__cloud_FindInstances() {
declare filter="$1"
instances=()
declare name zone publicIp privateIp status
while read -r name publicIp privateIp status; do
printf "%-30s | publicIp=%-16s privateIp=%s staus=%s\n" "$name" "$publicIp" "$privateIp" "$status"
instances+=("$name:$publicIp:$privateIp")
done < <(gcloud compute instances list \
--filter "$filter" \
--format 'value(name,networkInterfaces[0].accessConfigs[0].natIP,networkInterfaces[0].networkIP,status)')
}
#
# cloud_FindInstances [namePrefix]
#
# Find instances with names matching the specified prefix
#
# For each matching instance, an entry in the `instances` array will be added with the
# following information about the instance:
# "name:public IP:private IP"
#
# namePrefix - The instance name prefix to look for
#
# examples:
# $ cloud_FindInstances all-machines-with-a-common-machine-prefix
#
cloud_FindInstances() {
declare namePrefix="$1"
__cloud_FindInstances "name~^$namePrefix"
}
#
# cloud_FindInstance [name]
#
# Find an instance with a name matching the exact pattern.
#
# For each matching instance, an entry in the `instances` array will be added with the
# following information about the instance:
# "name:public IP:private IP"
#
# name - The instance name to look for
#
# examples:
# $ cloud_FindInstance exact-machine-name
#
cloud_FindInstance() {
declare name="$1"
__cloud_FindInstances "name=$name"
}
#
# cloud_CreateInstances [networkName] [namePrefix] [numNodes] [imageName]
# [machineType] [bootDiskSize] [enableGpu]
# [startupScript] [address]
#
# Creates one more identical instances.
#
# networkName - unique name of this testnet
# namePrefix - unique string to prefix all the instance names with
# numNodes - number of instances to create
# imageName - Disk image for the instances
# machineType - GCE machine type. Note that this may also include an
# `--accelerator=` or other |gcloud compute instances create|
# options
# bootDiskSize - Optional size of the boot disk in GB
# enableGpu - Optionally enable GPU, use the value "true" to enable
# eg, request 4 K80 GPUs with "count=4,type=nvidia-tesla-k80"
# startupScript - Optional startup script to execute when the instance boots
# address - Optional name of the GCE static IP address to attach to the
# instance. Requires that |numNodes| = 1 and that addressName
# has been provisioned in the GCE region that is hosting `$zone`
#
# Tip: use cloud_FindInstances to locate the instances once this function
# returns
cloud_CreateInstances() {
declare networkName="$1"
declare namePrefix="$2"
declare numNodes="$3"
declare imageName="$4"
declare machineType="$5"
declare optionalBootDiskSize="$6"
declare optionalStartupScript="$7"
declare optionalAddress="$8"
declare optionalBootDiskType="$9"
declare nodes
if [[ $numNodes = 1 ]]; then
nodes=("$namePrefix")
else
read -ra nodes <<<$(seq -f "${namePrefix}%0${#numNodes}g" 1 "$numNodes")
fi
declare -a args
args=(
--zone "$zone"
--tags testnet
--metadata "testnet=$networkName"
--image "$imageName"
--maintenance-policy TERMINATE
--no-restart-on-failure
)
# shellcheck disable=SC2206 # Do not want to quote $machineType as it may contain extra args
args+=(--machine-type $machineType)
if [[ -n $optionalBootDiskSize ]]; then
args+=(
--boot-disk-size "${optionalBootDiskSize}GB"
)
fi
if [[ -n $optionalStartupScript ]]; then
args+=(
--metadata-from-file "startup-script=$optionalStartupScript"
)
fi
if [[ -n $optionalBootDiskType ]]; then
args+=(
--boot-disk-type "${optionalBootDiskType}"
)
fi
if [[ -n $optionalAddress ]]; then
[[ $numNodes = 1 ]] || {
echo "Error: address may not be supplied when provisioning multiple nodes: $optionalAddress"
exit 1
}
args+=(
--address "$optionalAddress"
)
fi
(
set -x
gcloud beta compute instances create "${nodes[@]}" "${args[@]}"
)
}
#
# cloud_DeleteInstances
#
# Deletes all the instances listed in the `instances` array
#
cloud_DeleteInstances() {
if [[ ${#instances[0]} -eq 0 ]]; then
echo No instances to delete
return
fi
declare names=("${instances[@]/:*/}")
(
set -x
gcloud beta compute instances delete --zone "$zone" --quiet "${names[@]}"
)
}
#
# cloud_FetchFile [instanceName] [publicIp] [remoteFile] [localFile]
#
# Fetch a file from the given instance. This function uses a cloud-specific
# mechanism to fetch the file
#
cloud_FetchFile() {
declare instanceName="$1"
# shellcheck disable=SC2034 # publicIp is unused
declare publicIp="$2"
declare remoteFile="$3"
declare localFile="$4"
(
set -x
gcloud compute scp --zone "$zone" "$instanceName:$remoteFile" "$localFile"
)
}

View File

@ -1,187 +0,0 @@
# |source| this file
#
# Utilities for working with gcloud
#
#
# gcloud_FindInstances [filter] [options]
#
# Find instances matching the specified pattern.
#
# For each matching instance, an entry in the `instances` array will be added with the
# following information about the instance:
# "name:zone:public IP:private IP"
#
# filter - The instances to filter on
# options - If set to the string "show", the list of instances will be echoed
# to stdout
#
# examples:
# $ gcloud_FindInstances "name=exact-machine-name"
# $ gcloud_FindInstances "name~^all-machines-with-a-common-machine-prefix"
#
gcloud_FindInstances() {
declare filter="$1"
declare options="$2"
instances=()
declare name zone publicIp privateIp status
while read -r name zone publicIp privateIp status; do
if [[ $status != RUNNING ]]; then
echo "Warning: $name is not RUNNING, ignoring it."
continue
fi
if [[ $options = show ]]; then
printf "%-30s | %-16s publicIp=%-16s privateIp=%s\n" "$name" "$zone" "$publicIp" "$privateIp"
fi
instances+=("$name:$zone:$publicIp:$privateIp")
done < <(gcloud compute instances list \
--filter="$filter" \
--format 'value(name,zone,networkInterfaces[0].accessConfigs[0].natIP,networkInterfaces[0].networkIP,status)')
}
#
# gcloud_ForEachInstance [cmd] [extra args to cmd]
#
# Execute a command for each element in the `instances` array
#
# cmd - The command to execute on each instance
# The command will receive arguments followed by any
# additionl arguments supplied to gcloud_ForEachInstance:
# name - name of the instance
# zone - zone the instance is located in
# publicIp - The public IP address of this instance
# privateIp - The priate IP address of this instance
# count - Monotonically increasing count for each
# invocation of cmd, starting at 1
# ... - Extra args to cmd..
#
#
gcloud_ForEachInstance() {
declare cmd="$1"
shift
[[ -n $cmd ]] || { echo gcloud_ForEachInstance: cmd not specified; exit 1; }
declare count=1
for info in "${instances[@]}"; do
declare name zone publicIp privateIp
IFS=: read -r name zone publicIp privateIp < <(echo "$info")
eval "$cmd" "$name" "$zone" "$publicIp" "$privateIp" "$count" "$@"
count=$((count + 1))
done
}
#
# gcloud_CreateInstances [namePrefix] [numNodes] [zone] [imageName]
# [machineType] [bootDiskSize] [accelerator]
# [startupScript] [address]
#
# Creates one more identical instances.
#
# namePrefix - unique string to prefix all the instance names with
# numNodes - number of instances to create
# zone - zone to create the instances in
# imageName - Disk image for the instances
# machineType - GCE machine type
# bootDiskSize - Optional disk of the boot disk
# accelerator - Optional accelerator to attach to the instance(s), see
# eg, request 4 K80 GPUs with "count=4,type=nvidia-tesla-k80"
# startupScript - Optional startup script to execute when the instance boots
# address - Optional name of the GCE static IP address to attach to the
# instance. Requires that |numNodes| = 1 and that addressName
# has been provisioned in the GCE region that is hosting |zone|
#
# Tip: use gcloud_FindInstances to locate the instances once this function
# returns
gcloud_CreateInstances() {
declare namePrefix="$1"
declare numNodes="$2"
declare zone="$3"
declare imageName="$4"
declare machineType="$5"
declare optionalBootDiskSize="$6"
declare optionalAccelerator="$7"
declare optionalStartupScript="$8"
declare optionalAddress="$9"
declare nodes
if [[ $numNodes = 1 ]]; then
nodes=("$namePrefix")
else
read -ra nodes <<<$(seq -f "${namePrefix}%0${#numNodes}g" 1 "$numNodes")
fi
declare -a args
args=(
"--zone=$zone"
"--tags=testnet"
"--image=$imageName"
"--machine-type=$machineType"
)
if [[ -n $optionalBootDiskSize ]]; then
args+=(
"--boot-disk-size=$optionalBootDiskSize"
)
fi
if [[ -n $optionalAccelerator ]]; then
args+=(
"--accelerator=$optionalAccelerator"
--maintenance-policy TERMINATE
--restart-on-failure
)
fi
if [[ -n $optionalStartupScript ]]; then
args+=(
--metadata-from-file "startup-script=$optionalStartupScript"
)
fi
if [[ -n $optionalAddress ]]; then
[[ $numNodes = 1 ]] || {
echo "Error: address may not be supplied when provisioning multiple nodes: $optionalAddress"
exit 1
}
args+=(
"--address=$optionalAddress"
)
fi
(
set -x
gcloud beta compute instances create "${nodes[@]}" "${args[@]}"
)
}
#
# gcloud_DeleteInstances [yes]
#
# Deletes all the instances listed in the `instances` array
#
# If yes = "true", skip the delete confirmation
#
gcloud_DeleteInstances() {
declare maybeQuiet=
if [[ $1 = true ]]; then
maybeQuiet=--quiet
fi
if [[ ${#instances[0]} -eq 0 ]]; then
echo No instances to delete
return
fi
declare names=("${instances[@]/:*/}")
# Assume all instances are in the same zone
# TODO: One day this assumption will be invalid
declare zone
IFS=: read -r _ zone _ < <(echo "${instances[0]}")
(
set -x
gcloud beta compute instances delete --zone "$zone" $maybeQuiet "${names[@]}"
)
}

25
net/scripts/install-docker.sh Executable file
View File

@ -0,0 +1,25 @@
#!/bin/bash -ex
[[ $(uname) = Linux ]] || exit 1
[[ $USER = root ]] || exit 1
apt-get update
apt-get install -y \
apt-transport-https \
ca-certificates \
curl \
software-properties-common \
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
apt-get update
apt-get install -y docker-ce
docker run hello-world
# Grant the solana user access to docker
if id solana; then
addgroup solana docker
fi

View File

@ -13,8 +13,8 @@ sysctl -w kernel.sysrq=$(( $(cat /proc/sys/kernel/sysrq) | 64 ))
if command -v earlyoom; then
systemctl status earlyoom
else
wget http://ftp.us.debian.org/debian/pool/main/e/earlyoom/earlyoom_1.1-2_amd64.deb
apt install --quiet --yes ./earlyoom_1.1-2_amd64.deb
wget -r -l1 -np http://ftp.us.debian.org/debian/pool/main/e/earlyoom/ -A 'earlyoom_1.1-*_amd64.deb' -e robots=off -nd
apt install --quiet --yes ./earlyoom_1.1-*_amd64.deb
cat > earlyoom <<OOM
# use the kernel OOM killer, trigger at 20% available RAM,

View File

@ -0,0 +1,23 @@
[package]
name = "move_funds"
version = "0.1.0"
authors = [
"Anatoly Yakovenko <anatoly@solana.com>",
"Greg Fitzgerald <greg@solana.com>",
"Stephen Akridge <stephen@solana.com>",
"Michael Vines <mvines@solana.com>",
"Rob Walker <rob@solana.com>",
"Pankaj Garg <pankaj@solana.com>",
"Tyera Eulberg <tyera@solana.com>",
"Jack May <jack@solana.com>",
]
[dependencies]
bincode = "1.0.0"
generic-array = { version = "0.12.0", default-features = false, features = ["serde"] }
solana_program_interface = { path = "../../common" }
[lib]
name = "move_funds"
crate-type = ["dylib"]

View File

@ -0,0 +1,48 @@
extern crate bincode;
extern crate solana_program_interface;
use bincode::deserialize;
use solana_program_interface::account::KeyedAccount;
#[no_mangle]
pub extern "C" fn process(infos: &mut Vec<KeyedAccount>, data: &[u8]) {
let tokens: i64 = deserialize(data).unwrap();
if infos[0].account.tokens >= tokens {
infos[0].account.tokens -= tokens;
infos[1].account.tokens += tokens;
} else {
println!(
"Insufficient funds, asked {}, only had {}",
tokens, infos[0].account.tokens
);
}
}
#[cfg(test)]
mod tests {
use super::*;
use bincode::serialize;
use solana_program_interface::account::Account;
use solana_program_interface::pubkey::Pubkey;
#[test]
fn test_move_funds() {
let tokens: i64 = 100;
let data: Vec<u8> = serialize(&tokens).unwrap();
let keys = vec![Pubkey::default(); 2];
let mut accounts = vec![Account::default(), Account::default()];
accounts[0].tokens = 100;
accounts[1].tokens = 1;
{
let mut infos: Vec<KeyedAccount> = Vec::new();
for (key, account) in keys.iter().zip(&mut accounts).collect::<Vec<_>>() {
infos.push(KeyedAccount { key, account });
}
process(&mut infos, &data);
}
assert_eq!(0, accounts[0].tokens);
assert_eq!(101, accounts[1].tokens);
}
}

21
programs/noop/Cargo.toml Normal file
View File

@ -0,0 +1,21 @@
[package]
name = "noop"
version = "0.1.0"
authors = [
"Anatoly Yakovenko <anatoly@solana.com>",
"Greg Fitzgerald <greg@solana.com>",
"Stephen Akridge <stephen@solana.com>",
"Michael Vines <mvines@solana.com>",
"Rob Walker <rob@solana.com>",
"Pankaj Garg <pankaj@solana.com>",
"Tyera Eulberg <tyera@solana.com>",
"Jack May <jack@solana.com>",
]
[dependencies]
solana_program_interface = { path = "../../common" }
[lib]
name = "noop"
crate-type = ["dylib"]

6
programs/noop/src/lib.rs Normal file
View File

@ -0,0 +1,6 @@
extern crate solana_program_interface;
use solana_program_interface::account::KeyedAccount;
#[no_mangle]
pub extern "C" fn process(_infos: &mut Vec<KeyedAccount>, _data: &[u8]) {}

21
programs/print/Cargo.toml Normal file
View File

@ -0,0 +1,21 @@
[package]
name = "print"
version = "0.1.0"
authors = [
"Anatoly Yakovenko <anatoly@solana.com>",
"Greg Fitzgerald <greg@solana.com>",
"Stephen Akridge <stephen@solana.com>",
"Michael Vines <mvines@solana.com>",
"Rob Walker <rob@solana.com>",
"Pankaj Garg <pankaj@solana.com>",
"Tyera Eulberg <tyera@solana.com>",
"Jack May <jack@solana.com>",
]
[dependencies]
solana_program_interface = { path = "../../common" }
[lib]
name = "print"
crate-type = ["dylib"]

View File

@ -0,0 +1,9 @@
extern crate solana_program_interface;
use solana_program_interface::account::KeyedAccount;
#[no_mangle]
pub extern "C" fn process(infos: &mut Vec<KeyedAccount>, _data: &[u8]) {
println!("AccountInfos: {:#?}", infos);
//println!("data: {:#?}", data);
}

View File

@ -1,77 +0,0 @@
Two players want to play tic-tac-toe with each other on Solana.
The tic-tac-toe program has already been provisioned on the network, and the
program author has advertised the following information to potential gamers:
* `tictactoe_publickey` - the program's public key
* `tictactoe_gamestate_size` - the number of bytes needed to maintain the game state
The game state is a well-documented data structure consisting of:
- Player 1's public key
- Player 2's public key
- Game status. An 8-bit value where:
* 0 = game uninitialized
* 1 = Player 1's turn
* 2 = Player 2's turn
* 3 = Player 1 won
* 4 = Player 2 won
- Current board configuration. A 3x3 character array containing the values '\0', 'X' or 'O'
### Game Setup
1. Two players want to start a game. Player 2 sends Player 1 their public key,
`player2_publickey` off-chain (IM, email, etc)
2. Player 1 creates a new keypair to represent the game state, `(gamestate_publickey,
gamestate_privatekey)`.
3. Player 1 issues an allocate_memory transaction, assigning that memory page to the
tic-tac-toe program. The `memory_fee` is used to *rent* the memory page for the
duration of the game and is subtracted from current account balance of Player
1:
```
allocate_memory(gamestate_publickey, tictactoe_publickey, tictactoe_gamestate_size, memory_fee)
```
4. Game state is then initialized by issuing a *new* call transaction to the
tic-tac-toe program. This transaction is signed by `gamestate_privatekey`, known only
to Player 1.
```
call(tictactoe_publickey, gamestate_publickey, 'new', player1_publickey, player2_publickey)
```
5. Once the game is initialized, Player 1 shares `gamestate_publickey` with
Player 2 off-chain (IM, email, etc)
Note that it's likely each player prefer to generate a game-specific keypair
rather than sharing their primary public key (`player1_publickey`,
`player2_publickey`) with each other and the tic-tac-toe program.
### Game Play
Both players poll the network, via a **TBD off-chain RPC API**, to read the
current game state from the `gamestate_publickey` memory page.
When the *Game status* field indicates it's their turn, the player issues a
*move* call transaction passing in the board position (1..9) that they want to
mark as X or O:
```
call(tictactoe_publickey, gamestate_publickey, 'move', position)
```
The program will reject the transaction if it was not signed by the player whose
turn it is.
The outcome of the *move* call is also observed by polling the current game state via
the **TBD off-chain RPC API**.
### Game Cancellation
At any time Player 1 may conclude the game by issuing:
```
call(tictactoe_publickey, gamestate_publickey, 'abort')
```
causing any remaining *rent* tokens assigned to the `gamestate_publickey` page
to be transferred back to Player 1 by the tic-tac-toe program. Lastly, the
network recognizes the empty account and frees the `gamestate_publickey` memory
page.

View File

@ -0,0 +1,124 @@
### Wallet CLI
The general form is:
```
$ solana-wallet [common-options] [command] [command-specific options]
```
`common-options` include:
* `--fee xyz` - Transaction fee (0 by default)
* `--output file` - Write the raw Transaction to a file instead of sending it
`command` variants:
* `pay`
* `cancel`
* `send-signature`
* `send-timestamp`
#### Unconditional Immediate Transfer
```sh
// Command
$ solana-wallet pay <PUBKEY> 123
// Return
<TX_SIGNATURE>
```
#### Post-Dated Transfer
```sh
// Command
$ solana-wallet pay <PUBKEY> 123 \
--after 2018-12-24T23:59:00 --require-timestamp-from <PUBKEY>
// Return
{signature: <TX_SIGNATURE>, processId: <PROCESS_ID>}
```
*`require-timestamp-from` is optional. If not provided, the transaction will expect a timestamp signed by this wallet's secret key*
#### Authorized Transfer
A third party must send a signature to unlock the tokens.
```sh
// Command
$ solana-wallet pay <PUBKEY> 123 \
--require-signature-from <PUBKEY>
// Return
{signature: <TX_SIGNATURE>, processId: <PROCESS_ID>}
```
#### Post-Dated and Authorized Transfer
```sh
// Command
$ solana-wallet pay <PUBKEY> 123 \
--after 2018-12-24T23:59 --require-timestamp-from <PUBKEY> \
--require-signature-from <PUBKEY>
// Return
{signature: <TX_SIGNATURE>, processId: <PROCESS_ID>}
```
#### Multiple Witnesses
```sh
// Command
$ solana-wallet pay <PUBKEY> 123 \
--require-signature-from <PUBKEY> \
--require-signature-from <PUBKEY>
// Return
{signature: <TX_SIGNATURE>, processId: <PROCESS_ID>}
```
#### Cancelable Transfer
```sh
// Command
$ solana-wallet pay <PUBKEY> 123 \
--require-signature-from <PUBKEY> \
--cancelable
// Return
{signature: <TX_SIGNATURE>, processId: <PROCESS_ID>}
```
#### Cancel Transfer
```sh
// Command
$ solana-wallet cancel <PROCESS_ID>
// Return
<TX_SIGNATURE>
```
#### Send Signature
```sh
// Command
$ solana-wallet send-signature <PUBKEY> <PROCESS_ID>
// Return
<TX_SIGNATURE>
```
#### Indicate Elapsed Time
Use the current system time:
```sh
// Command
$ solana-wallet send-timestamp <PUBKEY> <PROCESS_ID>
// Return
<TX_SIGNATURE>
```
Or specify some other arbitrary timestamp:
```sh
// Command
$ solana-wallet send-timestamp <PUBKEY> <PROCESS_ID> --date 2018-12-24T23:59:00
// Return
<TX_SIGNATURE>
```
## Javascript solana-web3.js Interface
*TBD, but will look similar to what the Wallet CLI offers wrapped up in a
Javacsript object*

View File

@ -10,6 +10,8 @@ cd "$(dirname "$0")"
# shellcheck source=scripts/configure-metrics.sh
source configure-metrics.sh
packets_sent=0
packets_sent_diff=0
packets_received=0
packets_received_diff=0
receive_errors=0
@ -22,6 +24,10 @@ update_netstat() {
net_stat=$(netstat -suna)
declare stats
stats=$(echo "$net_stat" | awk 'BEGIN {tmp_var = 0} /packets sent/ {tmp_var = $1} END { print tmp_var }')
packets_sent_diff=$((stats - packets_sent))
packets_sent="$stats"
stats=$(echo "$net_stat" | awk 'BEGIN {tmp_var = 0} /packets received/ {tmp_var = $1} END { print tmp_var }')
packets_received_diff=$((stats - packets_received))
packets_received="$stats"
@ -39,7 +45,7 @@ update_netstat
while true; do
update_netstat
report="packets_received=$packets_received_diff,receive_errors=$receive_errors_diff,rcvbuf_errors=$rcvbuf_errors_diff"
report="packets_sent=$packets_sent_diff,packets_received=$packets_received_diff,receive_errors=$receive_errors_diff,rcvbuf_errors=$rcvbuf_errors_diff"
echo "$report"
./metrics-write-datapoint.sh "net-stats,hostname=$HOSTNAME $report"

View File

@ -5,12 +5,13 @@
cd "$(dirname "$0")"/..
if [[ -n "$USE_SNAP" ]]; then
# TODO: Merge wallet.sh functionality into solana-wallet proper and
# remove this USE_SNAP case
wallet="solana.wallet $1"
# shellcheck source=multinode-demo/common.sh
source multinode-demo/common.sh
if [[ -z $1 ]]; then # no network argument, use default
entrypoint=()
else
wallet="multinode-demo/wallet.sh $1"
entrypoint=(-n "$1")
fi
# Tokens transferred to this address are lost forever...
@ -19,7 +20,7 @@ garbage_address=vS3ngn1TfQmpsW1Z4NkLuqNAQFF3dYQw8UZ6TCx9bmq
check_balance_output() {
declare expected_output="$1"
exec 42>&1
output=$($wallet balance | tee >(cat - >&42))
output=$($solana_wallet "${entrypoint[@]}" balance | tee >(cat - >&42))
if [[ ! "$output" =~ $expected_output ]]; then
echo "Balance is incorrect. Expected: $expected_output"
exit 1
@ -28,18 +29,18 @@ check_balance_output() {
pay_and_confirm() {
exec 42>&1
signature=$($wallet pay "$@" | tee >(cat - >&42))
$wallet confirm "$signature"
signature=$($solana_wallet "${entrypoint[@]}" pay "$@" | tee >(cat - >&42))
$solana_wallet "${entrypoint[@]}" confirm "$signature"
}
$wallet reset
$wallet address
$solana_keygen
$solana_wallet "${entrypoint[@]}" address
check_balance_output "No account found" "Your balance is: 0"
$wallet airdrop --tokens 60
$solana_wallet "${entrypoint[@]}" airdrop 60
check_balance_output "Your balance is: 60"
$wallet airdrop --tokens 40
$solana_wallet "${entrypoint[@]}" airdrop 40
check_balance_output "Your balance is: 100"
pay_and_confirm --to $garbage_address --tokens 99
pay_and_confirm $garbage_address 99
check_balance_output "Your balance is: 1"
echo PASS

View File

@ -55,9 +55,7 @@ apps:
- network-bind
- home
wallet:
# TODO: Merge wallet.sh functionality into solana-wallet proper
command: multinode-demo/wallet.sh
#command: solana-wallet
command: solana-wallet
plugs:
- network
- home

File diff suppressed because it is too large Load Diff

View File

@ -5,56 +5,121 @@
use bank::Bank;
use bincode::deserialize;
use counter::Counter;
use entry::Entry;
use log::Level;
use packet::{PacketRecycler, Packets, SharedPackets};
use packet::Packets;
use poh_recorder::PohRecorder;
use rayon::prelude::*;
use record_stage::Signal;
use result::{Error, Result};
use service::Service;
use sigverify_stage::VerifiedPackets;
use std::net::SocketAddr;
use std::sync::atomic::AtomicUsize;
use std::sync::mpsc::{channel, Receiver, RecvTimeoutError, Sender};
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::sync::mpsc::{channel, Receiver, RecvTimeoutError};
use std::sync::{Arc, Mutex};
use std::thread::sleep;
use std::thread::{self, Builder, JoinHandle};
use std::time::Duration;
use std::time::Instant;
use timing;
use transaction::Transaction;
// number of threads is 1 until mt bank is ready
pub const NUM_THREADS: usize = 1;
/// Stores the stage's thread handle and output receiver.
pub struct BankingStage {
/// Handle to the stage's thread.
thread_hdl: JoinHandle<()>,
thread_hdls: Vec<JoinHandle<()>>,
}
pub enum Config {
/// * `Tick` - Run full PoH thread. Tick is a rough estimate of how many hashes to roll before transmitting a new entry.
Tick(usize),
/// * `Sleep`- Low power mode. Sleep is a rough estimate of how long to sleep before rolling 1 poh once and producing 1
/// tick.
Sleep(Duration),
}
impl Default for Config {
fn default() -> Config {
// TODO: Change this to Tick to enable PoH
Config::Sleep(Duration::from_millis(500))
}
}
impl BankingStage {
/// Create the stage using `bank`. Exit when `verified_receiver` is dropped.
/// Discard input packets using `packet_recycler` to minimize memory
/// allocations in a previous stage such as the `fetch_stage`.
pub fn new(
bank: Arc<Bank>,
verified_receiver: Receiver<Vec<(SharedPackets, Vec<u8>)>>,
packet_recycler: PacketRecycler,
) -> (Self, Receiver<Signal>) {
let (signal_sender, signal_receiver) = channel();
let thread_hdl = Builder::new()
.name("solana-banking-stage".to_string())
.spawn(move || loop {
if let Err(e) = Self::process_packets(
&bank,
&verified_receiver,
&signal_sender,
&packet_recycler,
) {
bank: &Arc<Bank>,
verified_receiver: Receiver<VerifiedPackets>,
config: Config,
) -> (Self, Receiver<Vec<Entry>>) {
let (entry_sender, entry_receiver) = channel();
let shared_verified_receiver = Arc::new(Mutex::new(verified_receiver));
let poh = PohRecorder::new(bank.clone(), entry_sender);
let tick_poh = poh.clone();
// Tick producer is a headless producer, so when it exits it should notify the banking stage.
// Since channel are not used to talk between these threads an AtomicBool is used as a
// signal.
let poh_exit = Arc::new(AtomicBool::new(false));
let banking_exit = poh_exit.clone();
// Single thread to generate entries from many banks.
// This thread talks to poh_service and broadcasts the entries once they have been recorded.
// Once an entry has been recorded, its last_id is registered with the bank.
let tick_producer = Builder::new()
.name("solana-banking-stage-tick_producer".to_string())
.spawn(move || {
if let Err(e) = Self::tick_producer(&tick_poh, &config, &poh_exit) {
match e {
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
_ => error!("{:?}", e),
Error::SendError => (),
_ => error!(
"solana-banking-stage-tick_producer unexpected error {:?}",
e
),
}
}
})
.unwrap();
(BankingStage { thread_hdl }, signal_receiver)
debug!("tick producer exiting");
poh_exit.store(true, Ordering::Relaxed);
}).unwrap();
// Many banks that process transactions in parallel.
let mut thread_hdls: Vec<JoinHandle<()>> = (0..NUM_THREADS)
.into_iter()
.map(|_| {
let thread_bank = bank.clone();
let thread_verified_receiver = shared_verified_receiver.clone();
let thread_poh = poh.clone();
let thread_banking_exit = banking_exit.clone();
Builder::new()
.name("solana-banking-stage-tx".to_string())
.spawn(move || {
loop {
if let Err(e) = Self::process_packets(
&thread_bank,
&thread_verified_receiver,
&thread_poh,
) {
debug!("got error {:?}", e);
match e {
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => {
break
}
Error::RecvError(_) => break,
Error::SendError => break,
_ => error!("solana-banking-stage-tx {:?}", e),
}
}
if thread_banking_exit.load(Ordering::Relaxed) {
debug!("tick service exited");
break;
}
}
thread_banking_exit.store(true, Ordering::Relaxed);
}).unwrap()
}).collect();
thread_hdls.push(tick_producer);
(BankingStage { thread_hdls }, entry_receiver)
}
/// Convert the transactions from a blob of binary data to a vector of transactions and
@ -66,21 +131,75 @@ impl BankingStage {
deserialize(&x.data[0..x.meta.size])
.map(|req| (req, x.meta.addr()))
.ok()
})
.collect()
}).collect()
}
fn tick_producer(poh: &PohRecorder, config: &Config, poh_exit: &AtomicBool) -> Result<()> {
loop {
match *config {
Config::Tick(num) => {
for _ in 0..num {
poh.hash();
}
}
Config::Sleep(duration) => {
sleep(duration);
}
}
poh.tick()?;
if poh_exit.load(Ordering::Relaxed) {
debug!("tick service exited");
return Ok(());
}
}
}
fn process_transactions(
bank: &Arc<Bank>,
transactions: &[Transaction],
poh: &PohRecorder,
) -> Result<()> {
debug!("transactions: {}", transactions.len());
let mut chunk_start = 0;
while chunk_start != transactions.len() {
let chunk_end = chunk_start + Entry::num_will_fit(&transactions[chunk_start..]);
let results = bank.process_transactions(&transactions[chunk_start..chunk_end]);
let processed_transactions: Vec<_> = transactions[chunk_start..chunk_end]
.into_iter()
.enumerate()
.filter_map(|(i, x)| match results[i] {
Ok(_) => Some(x.clone()),
Err(ref e) => {
debug!("process transaction failed {:?}", e);
None
}
}).collect();
if !processed_transactions.is_empty() {
let hash = Transaction::hash(&processed_transactions);
debug!("processed ok: {} {}", processed_transactions.len(), hash);
poh.record(hash, processed_transactions)?;
}
chunk_start = chunk_end;
}
debug!("done process_transactions");
Ok(())
}
/// Process the incoming packets and send output `Signal` messages to `signal_sender`.
/// Discard packets via `packet_recycler`.
pub fn process_packets(
bank: &Arc<Bank>,
verified_receiver: &Receiver<Vec<(SharedPackets, Vec<u8>)>>,
signal_sender: &Sender<Signal>,
packet_recycler: &PacketRecycler,
verified_receiver: &Arc<Mutex<Receiver<VerifiedPackets>>>,
poh: &PohRecorder,
) -> Result<()> {
let timer = Duration::new(1, 0);
let recv_start = Instant::now();
let mms = verified_receiver.recv_timeout(timer)?;
let mms = verified_receiver
.lock()
.unwrap()
.recv_timeout(Duration::from_millis(100))?;
let mut reqs_len = 0;
let mms_len = mms.len();
info!(
@ -89,33 +208,35 @@ impl BankingStage {
timing::duration_as_ms(&recv_start.elapsed()),
mms.len(),
);
inc_new_counter_info!("banking_stage-entries_received", mms_len);
let bank_starting_tx_count = bank.transaction_count();
let count = mms.iter().map(|x| x.1.len()).sum();
let proc_start = Instant::now();
for (msgs, vers) in mms {
let transactions = Self::deserialize_transactions(&msgs.read().unwrap());
reqs_len += transactions.len();
let transactions = transactions
debug!("transactions received {}", transactions.len());
let transactions: Vec<_> = transactions
.into_iter()
.zip(vers)
.filter_map(|(tx, ver)| match tx {
None => None,
Some((tx, _addr)) => if tx.verify_plan() && ver != 0 {
Some((tx, _addr)) => if ver != 0 {
Some(tx)
} else {
None
},
})
.collect();
debug!("process_transactions");
let results = bank.process_transactions(transactions);
let transactions = results.into_iter().filter_map(|x| x.ok()).collect();
signal_sender.send(Signal::Transactions(transactions))?;
debug!("done process_transactions");
packet_recycler.recycle(msgs, "process_transactions");
}).collect();
debug!("verified transactions {}", transactions.len());
Self::process_transactions(bank, &transactions, poh)?;
}
inc_new_counter_info!(
"banking_stage-time_ms",
timing::duration_as_ms(&proc_start.elapsed()) as usize
);
let total_time_s = timing::duration_as_s(&proc_start.elapsed());
let total_time_ms = timing::duration_as_ms(&proc_start.elapsed());
info!(
@ -136,72 +257,157 @@ impl BankingStage {
}
impl Service for BankingStage {
fn thread_hdls(self) -> Vec<JoinHandle<()>> {
vec![self.thread_hdl]
}
type JoinReturnType = ();
fn join(self) -> thread::Result<()> {
self.thread_hdl.join()
for thread_hdl in self.thread_hdls {
thread_hdl.join()?;
}
Ok(())
}
}
// TODO: When banking is pulled out of RequestStage, add this test back in.
#[cfg(test)]
mod tests {
use super::*;
use bank::Bank;
use ledger::Block;
use mint::Mint;
use packet::to_packets;
use signature::{Keypair, KeypairUtil};
use std::thread::sleep;
use system_transaction::SystemTransaction;
use transaction::Transaction;
//use bank::Bank;
//use entry::Entry;
//use hash::Hash;
//use record_stage::RecordStage;
//use record_stage::Signal;
//use result::Result;
//use std::sync::mpsc::{channel, Sender};
//use std::sync::{Arc, Mutex};
//use std::time::Duration;
//use transaction::Transaction;
//
//#[cfg(test)]
//mod tests {
// use bank::Bank;
// use mint::Mint;
// use signature::{KeyPair, KeyPairUtil};
// use transaction::Transaction;
//
// #[test]
// // TODO: Move this test banking_stage. Calling process_transactions() directly
// // defeats the purpose of this test.
// fn test_banking_sequential_consistency() {
// // In this attack we'll demonstrate that a verifier can interpret the ledger
// // differently if either the server doesn't signal the ledger to add an
// // Entry OR if the verifier tries to parallelize across multiple Entries.
// let mint = Mint::new(2);
// let bank = Bank::new(&mint);
// let banking_stage = EventProcessor::new(bank, &mint.last_id(), None);
//
// // Process a batch that includes a transaction that receives two tokens.
// let alice = KeyPair::new();
// let tx = Transaction::new(&mint.keypair(), alice.pubkey(), 2, mint.last_id());
// let transactions = vec![tx];
// let entry0 = banking_stage.process_transactions(transactions).unwrap();
//
// // Process a second batch that spends one of those tokens.
// let tx = Transaction::new(&alice, mint.pubkey(), 1, mint.last_id());
// let transactions = vec![tx];
// let entry1 = banking_stage.process_transactions(transactions).unwrap();
//
// // Collect the ledger and feed it to a new bank.
// let entries = vec![entry0, entry1];
//
// // Assert the user holds one token, not two. If the server only output one
// // entry, then the second transaction will be rejected, because it drives
// // the account balance below zero before the credit is added.
// let bank = Bank::new(&mint);
// for entry in entries {
// assert!(
// bank
// .process_transactions(entry.transactions)
// .into_iter()
// .all(|x| x.is_ok())
// );
// }
// assert_eq!(bank.get_balance(&alice.pubkey()), Some(1));
// }
//}
#[test]
fn test_banking_stage_shutdown1() {
let bank = Bank::new(&Mint::new(2));
let (verified_sender, verified_receiver) = channel();
let (banking_stage, _entry_receiver) =
BankingStage::new(&Arc::new(bank), verified_receiver, Default::default());
drop(verified_sender);
assert_eq!(banking_stage.join().unwrap(), ());
}
#[test]
fn test_banking_stage_shutdown2() {
let bank = Bank::new(&Mint::new(2));
let (_verified_sender, verified_receiver) = channel();
let (banking_stage, entry_receiver) =
BankingStage::new(&Arc::new(bank), verified_receiver, Default::default());
drop(entry_receiver);
assert_eq!(banking_stage.join().unwrap(), ());
}
#[test]
fn test_banking_stage_tick() {
let bank = Arc::new(Bank::new(&Mint::new(2)));
let start_hash = bank.last_id();
let (verified_sender, verified_receiver) = channel();
let (banking_stage, entry_receiver) = BankingStage::new(
&bank,
verified_receiver,
Config::Sleep(Duration::from_millis(1)),
);
sleep(Duration::from_millis(500));
drop(verified_sender);
let entries: Vec<_> = entry_receiver.iter().flat_map(|x| x).collect();
assert!(entries.len() != 0);
assert!(entries.verify(&start_hash));
assert_eq!(entries[entries.len() - 1].id, bank.last_id());
assert_eq!(banking_stage.join().unwrap(), ());
}
#[test]
fn test_banking_stage_entries_only() {
let mint = Mint::new(2);
let bank = Arc::new(Bank::new(&mint));
let start_hash = bank.last_id();
let (verified_sender, verified_receiver) = channel();
let (banking_stage, entry_receiver) =
BankingStage::new(&bank, verified_receiver, Default::default());
// good tx
let keypair = mint.keypair();
let tx = Transaction::system_new(&keypair, keypair.pubkey(), 1, start_hash);
// good tx, but no verify
let tx_no_ver = Transaction::system_new(&keypair, keypair.pubkey(), 1, start_hash);
// bad tx, AccountNotFound
let keypair = Keypair::new();
let tx_anf = Transaction::system_new(&keypair, keypair.pubkey(), 1, start_hash);
// send 'em over
let packets = to_packets(&[tx, tx_no_ver, tx_anf]);
// glad they all fit
assert_eq!(packets.len(), 1);
verified_sender // tx, no_ver, anf
.send(vec![(packets[0].clone(), vec![1u8, 0u8, 1u8])])
.unwrap();
drop(verified_sender);
//receive entries + ticks
let entries: Vec<_> = entry_receiver.iter().map(|x| x).collect();
assert!(entries.len() >= 1);
let mut last_id = start_hash;
entries.iter().for_each(|entries| {
assert_eq!(entries.len(), 1);
assert!(entries.verify(&last_id));
last_id = entries.last().unwrap().id;
});
drop(entry_receiver);
assert_eq!(banking_stage.join().unwrap(), ());
}
#[test]
fn test_banking_stage_entryfication() {
// In this attack we'll demonstrate that a verifier can interpret the ledger
// differently if either the server doesn't signal the ledger to add an
// Entry OR if the verifier tries to parallelize across multiple Entries.
let mint = Mint::new(2);
let bank = Arc::new(Bank::new(&mint));
let (verified_sender, verified_receiver) = channel();
let (banking_stage, entry_receiver) =
BankingStage::new(&bank, verified_receiver, Default::default());
// Process a batch that includes a transaction that receives two tokens.
let alice = Keypair::new();
let tx = Transaction::system_new(&mint.keypair(), alice.pubkey(), 2, mint.last_id());
let packets = to_packets(&[tx]);
verified_sender
.send(vec![(packets[0].clone(), vec![1u8])])
.unwrap();
// Process a second batch that spends one of those tokens.
let tx = Transaction::system_new(&alice, mint.pubkey(), 1, mint.last_id());
let packets = to_packets(&[tx]);
verified_sender
.send(vec![(packets[0].clone(), vec![1u8])])
.unwrap();
drop(verified_sender);
assert_eq!(banking_stage.join().unwrap(), ());
// Collect the ledger and feed it to a new bank.
let entries: Vec<_> = entry_receiver.iter().flat_map(|x| x).collect();
// same assertion as running through the bank, really...
assert!(entries.len() >= 2);
// Assert the user holds one token, not two. If the stage only outputs one
// entry, then the second transaction will be rejected, because it drives
// the account balance below zero before the credit is added.
let bank = Bank::new(&mint);
for entry in entries {
assert!(
bank.process_transactions(&entry.transactions)
.into_iter()
.all(|x| x.is_ok())
);
}
assert_eq!(bank.get_balance(&alice.pubkey()), 1);
}
}

View File

@ -3,7 +3,7 @@ extern crate solana;
use clap::{App, Arg};
use solana::netutil::bind_to;
use solana::packet::{Packet, PacketRecycler, BLOB_SIZE, PACKET_DATA_SIZE};
use solana::packet::{Packet, SharedPackets, BLOB_SIZE, PACKET_DATA_SIZE};
use solana::result::Result;
use solana::streamer::{receiver, PacketReceiver};
use std::cmp::max;
@ -16,9 +16,9 @@ use std::thread::{spawn, JoinHandle};
use std::time::Duration;
use std::time::SystemTime;
fn producer(addr: &SocketAddr, recycler: &PacketRecycler, exit: Arc<AtomicBool>) -> JoinHandle<()> {
fn producer(addr: &SocketAddr, exit: Arc<AtomicBool>) -> JoinHandle<()> {
let send = UdpSocket::bind("0.0.0.0:0").unwrap();
let msgs = recycler.allocate();
let msgs = SharedPackets::default();
let msgs_ = msgs.clone();
msgs.write().unwrap().packets.resize(10, Packet::default());
for w in &mut msgs.write().unwrap().packets {
@ -40,12 +40,7 @@ fn producer(addr: &SocketAddr, recycler: &PacketRecycler, exit: Arc<AtomicBool>)
})
}
fn sink(
recycler: PacketRecycler,
exit: Arc<AtomicBool>,
rvs: Arc<AtomicUsize>,
r: PacketReceiver,
) -> JoinHandle<()> {
fn sink(exit: Arc<AtomicBool>, rvs: Arc<AtomicUsize>, r: PacketReceiver) -> JoinHandle<()> {
spawn(move || loop {
if exit.load(Ordering::Relaxed) {
return;
@ -53,7 +48,6 @@ fn sink(
let timer = Duration::new(1, 0);
if let Ok(msgs) = r.recv_timeout(timer) {
rvs.fetch_add(msgs.read().unwrap().packets.len(), Ordering::Relaxed);
recycler.recycle(msgs, "sink");
}
})
}
@ -68,8 +62,7 @@ fn main() -> Result<()> {
.value_name("NUM")
.takes_value(true)
.help("Use NUM receive sockets"),
)
.get_matches();
).get_matches();
if let Some(n) = matches.value_of("num-recv-sockets") {
num_sockets = max(num_sockets, n.to_string().parse().expect("integer"));
@ -79,7 +72,6 @@ fn main() -> Result<()> {
let mut addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 0);
let exit = Arc::new(AtomicBool::new(false));
let pack_recycler = PacketRecycler::default();
let mut read_channels = Vec::new();
let mut read_threads = Vec::new();
@ -95,19 +87,19 @@ fn main() -> Result<()> {
read_threads.push(receiver(
Arc::new(read),
exit.clone(),
pack_recycler.clone(),
s_reader,
"bench-streamer",
));
}
let t_producer1 = producer(&addr, &pack_recycler, exit.clone());
let t_producer2 = producer(&addr, &pack_recycler, exit.clone());
let t_producer3 = producer(&addr, &pack_recycler, exit.clone());
let t_producer1 = producer(&addr, exit.clone());
let t_producer2 = producer(&addr, exit.clone());
let t_producer3 = producer(&addr, exit.clone());
let rvs = Arc::new(AtomicUsize::new(0));
let sink_threads: Vec<_> = read_channels
.into_iter()
.map(|r_reader| sink(pack_recycler.clone(), exit.clone(), rvs.clone(), r_reader))
.map(|r_reader| sink(exit.clone(), rvs.clone(), r_reader))
.collect();
let start = SystemTime::now();
let start_val = rvs.load(Ordering::Relaxed);

View File

@ -17,9 +17,9 @@ use solana::hash::Hash;
use solana::logger;
use solana::metrics;
use solana::ncp::Ncp;
use solana::packet::BlobRecycler;
use solana::service::Service;
use solana::signature::{read_keypair, GenKeys, Keypair, KeypairUtil};
use solana::system_transaction::SystemTransaction;
use solana::thin_client::{poll_gossip_for_leader, ThinClient};
use solana::timing::{duration_as_ms, duration_as_s};
use solana::transaction::Transaction;
@ -32,7 +32,6 @@ use std::sync::atomic::{AtomicBool, AtomicIsize, AtomicUsize, Ordering};
use std::sync::{Arc, RwLock};
use std::thread::sleep;
use std::thread::Builder;
use std::thread::JoinHandle;
use std::time::Duration;
use std::time::Instant;
@ -135,8 +134,7 @@ fn send_barrier_transaction(barrier_client: &mut ThinClient, last_id: &mut Hash,
.add_tag(
"op",
influxdb::Value::String("send_barrier_transaction".to_string()),
)
.add_field("poll_count", influxdb::Value::Integer(poll_count))
).add_field("poll_count", influxdb::Value::Integer(poll_count))
.add_field("duration", influxdb::Value::Integer(duration_ms as i64))
.to_owned(),
);
@ -147,8 +145,7 @@ fn send_barrier_transaction(barrier_client: &mut ThinClient, last_id: &mut Hash,
&id.pubkey(),
&Duration::from_millis(100),
&Duration::from_secs(10),
)
.expect("Failed to get balance");
).expect("Failed to get balance");
if balance != 1 {
panic!("Expected an account balance of 1 (balance: {}", balance);
}
@ -191,12 +188,11 @@ fn generate_txs(
.par_iter()
.map(|keypair| {
if !reclaim {
Transaction::new(&id, keypair.pubkey(), 1, *last_id)
Transaction::system_new(&id, keypair.pubkey(), 1, *last_id)
} else {
Transaction::new(keypair, id.pubkey(), 1, *last_id)
Transaction::system_new(keypair, id.pubkey(), 1, *last_id)
}
})
.collect();
}).collect();
let duration = signing_start.elapsed();
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
@ -214,8 +210,7 @@ fn generate_txs(
.add_field(
"duration",
influxdb::Value::Integer(duration_as_ms(&duration) as i64),
)
.to_owned(),
).to_owned(),
);
let sz = transactions.len() / threads;
@ -267,8 +262,7 @@ fn do_tx_transfers(
.add_field(
"duration",
influxdb::Value::Integer(duration_as_ms(&transfer_start.elapsed()) as i64),
)
.add_field("count", influxdb::Value::Integer(tx_len as i64))
).add_field("count", influxdb::Value::Integer(tx_len as i64))
.to_owned(),
);
}
@ -412,7 +406,7 @@ fn main() {
.long("network")
.value_name("HOST:PORT")
.takes_value(true)
.help("rendezvous with the network at this gossip entry point, defaults to 127.0.0.1:8001"),
.help("Rendezvous with the network at this gossip entry point; defaults to 127.0.0.1:8001"),
)
.arg(
Arg::with_name("identity")
@ -421,7 +415,7 @@ fn main() {
.value_name("PATH")
.takes_value(true)
.required(true)
.help("file containing a client identity (keypair)"),
.help("File containing a client identity (keypair)"),
)
.arg(
Arg::with_name("num-nodes")
@ -429,7 +423,12 @@ fn main() {
.long("num-nodes")
.value_name("NUM")
.takes_value(true)
.help("wait for NUM nodes to converge"),
.help("Wait for NUM nodes to converge"),
)
.arg(
Arg::with_name("reject-extra-nodes")
.long("reject-extra-nodes")
.help("Require exactly `num-nodes` on convergence. Appropriate only for internal networks"),
)
.arg(
Arg::with_name("threads")
@ -437,31 +436,31 @@ fn main() {
.long("threads")
.value_name("NUM")
.takes_value(true)
.help("number of threads"),
.help("Number of threads"),
)
.arg(
Arg::with_name("duration")
.long("duration")
.value_name("SECS")
.takes_value(true)
.help("run benchmark for SECS seconds then exit, default is forever"),
.help("Seconds to run benchmark, then exit; default is forever"),
)
.arg(
Arg::with_name("converge-only")
.long("converge-only")
.help("exit immediately after converging"),
.help("Exit immediately after converging"),
)
.arg(
Arg::with_name("sustained")
.long("sustained")
.help("use sustained performance mode vs. peak mode. This overlaps the tx generation with transfers."),
.help("Use sustained performance mode vs. peak mode. This overlaps the tx generation with transfers."),
)
.arg(
Arg::with_name("tx_count")
.long("tx_count")
.value_name("NUM")
.takes_value(true)
.help("number of transactions to send per batch")
.help("Number of transactions to send per batch")
)
.get_matches();
@ -507,8 +506,7 @@ fn main() {
let leader = poll_gossip_for_leader(network, None).expect("unable to find leader on network");
let exit_signal = Arc::new(AtomicBool::new(false));
let mut c_threads = vec![];
let (nodes, leader) = converge(&leader, &exit_signal, num_nodes, &mut c_threads);
let (nodes, leader, ncp) = converge(&leader, &exit_signal, num_nodes);
if nodes.len() < num_nodes {
println!(
@ -517,6 +515,14 @@ fn main() {
);
exit(1);
}
if matches.is_present("reject-extra-nodes") && nodes.len() > num_nodes {
println!(
"Error: Extra nodes discovered. Expecting exactly {}",
num_nodes
);
exit(1);
}
if leader.is_none() {
println!("no leader");
exit(1);
@ -578,10 +584,8 @@ fn main() {
.name("solana-client-sample".to_string())
.spawn(move || {
sample_tx_count(&exit_signal, &maxes, first_tx_count, &v, sample_period);
})
.unwrap()
})
.collect();
}).unwrap()
}).collect();
let shared_txs: Arc<RwLock<VecDeque<Vec<Transaction>>>> =
Arc::new(RwLock::new(VecDeque::new()));
@ -606,10 +610,8 @@ fn main() {
&shared_tx_active_thread_count,
&total_tx_sent_count,
);
})
.unwrap()
})
.collect();
}).unwrap()
}).collect();
// generate and send transactions for the specified duration
let start = Instant::now();
@ -678,17 +680,14 @@ fn main() {
);
// join the crdt client threads
for t in c_threads {
t.join().unwrap();
}
ncp.join().unwrap();
}
fn converge(
leader: &NodeInfo,
exit_signal: &Arc<AtomicBool>,
num_nodes: usize,
threads: &mut Vec<JoinHandle<()>>,
) -> (Vec<NodeInfo>, Option<NodeInfo>) {
) -> (Vec<NodeInfo>, Option<NodeInfo>, Ncp) {
//lets spy on the network
let (node, gossip_socket) = Crdt::spy_node();
let mut spy_crdt = Crdt::new(node).expect("Crdt::new");
@ -696,14 +695,7 @@ fn converge(
spy_crdt.set_leader(leader.id);
let spy_ref = Arc::new(RwLock::new(spy_crdt));
let window = Arc::new(RwLock::new(default_window()));
let ncp = Ncp::new(
&spy_ref,
window,
BlobRecycler::default(),
None,
gossip_socket,
exit_signal.clone(),
);
let ncp = Ncp::new(&spy_ref, window, None, gossip_socket, exit_signal.clone());
let mut v: Vec<NodeInfo> = vec![];
// wait for the network to converge, 30 seconds should be plenty
for _ in 0..30 {
@ -734,9 +726,8 @@ fn converge(
}
sleep(Duration::new(1, 0));
}
threads.extend(ncp.thread_hdls().into_iter());
let leader = spy_ref.read().unwrap().leader_data().cloned();
(v, leader)
(v, leader, ncp)
}
#[cfg(test)]

View File

@ -47,32 +47,28 @@ fn main() -> Result<(), Box<error::Error>> {
.value_name("HOST:PORT")
.takes_value(true)
.required(true)
.help("rendezvous with the network at this gossip entry point"),
)
.arg(
.help("Rendezvous with the network at this gossip entry point"),
).arg(
Arg::with_name("keypair")
.short("k")
.long("keypair")
.value_name("PATH")
.takes_value(true)
.required(true)
.help("File to read the client's keypair from"),
)
.arg(
.help("File from which to read the mint's keypair"),
).arg(
Arg::with_name("slice")
.long("slice")
.value_name("SECONDS")
.value_name("SECS")
.takes_value(true)
.help("Time slice over which to limit requests to drone"),
)
.arg(
).arg(
Arg::with_name("cap")
.long("cap")
.value_name("NUMBER")
.value_name("NUM")
.takes_value(true)
.help("Request limit for time slice"),
)
.get_matches();
).get_matches();
let network = matches
.value_of("network")
@ -159,8 +155,7 @@ fn main() -> Result<(), Box<error::Error>> {
io::ErrorKind::Other,
format!("Drone response: {:?}", err),
))
}))
.then(|_| Ok(()));
})).then(|_| Ok(()));
tokio::spawn(server)
});
tokio::run(done);

View File

@ -7,12 +7,14 @@ extern crate solana;
use clap::{App, Arg};
use solana::crdt::FULLNODE_PORT_RANGE;
use solana::fullnode::Config;
use solana::logger;
use solana::netutil::{get_ip_addr, get_public_ip_addr, parse_port_or_addr};
use solana::signature::read_pkcs8;
use std::io;
use std::net::SocketAddr;
fn main() {
logger::setup();
let matches = App::new("fullnode-config")
.version(crate_version!())
.arg(
@ -20,32 +22,28 @@ fn main() {
.short("l")
.long("local")
.takes_value(false)
.help("detect network address from local machine configuration"),
)
.arg(
.help("Detect network address from local machine configuration"),
).arg(
Arg::with_name("keypair")
.short("k")
.long("keypair")
.value_name("PATH")
.takes_value(true)
.help("/path/to/id.json"),
)
.arg(
).arg(
Arg::with_name("public")
.short("p")
.long("public")
.takes_value(false)
.help("detect public network address using public servers"),
)
.arg(
.help("Detect public network address using public servers"),
).arg(
Arg::with_name("bind")
.short("b")
.long("bind")
.value_name("PORT")
.takes_value(true)
.help("bind to port or address"),
)
.get_matches();
.help("Bind to port or address"),
).get_matches();
let bind_addr: SocketAddr = {
let mut bind_addr = parse_port_or_addr(matches.value_of("bind"), FULLNODE_PORT_RANGE.0);

View File

@ -11,10 +11,9 @@ use clap::{App, Arg};
use solana::client::mk_client;
use solana::crdt::Node;
use solana::drone::DRONE_PORT;
use solana::fullnode::{Config, Fullnode};
use solana::fullnode::{Config, Fullnode, FullnodeReturnType};
use solana::logger;
use solana::metrics::set_panic_hook;
use solana::service::Service;
use solana::signature::{Keypair, KeypairUtil};
use solana::thin_client::poll_gossip_for_leader;
use solana::wallet::request_airdrop;
@ -33,19 +32,17 @@ fn main() -> () {
Arg::with_name("identity")
.short("i")
.long("identity")
.value_name("FILE")
.value_name("PATH")
.takes_value(true)
.help("run with the identity found in FILE"),
)
.arg(
.help("Run with the identity found in FILE"),
).arg(
Arg::with_name("network")
.short("n")
.long("network")
.value_name("HOST:PORT")
.takes_value(true)
.help("connect/rendezvous with the network at this gossip entry point"),
)
.arg(
.help("Rendezvous with the network at this gossip entry point"),
).arg(
Arg::with_name("ledger")
.short("l")
.long("ledger")
@ -53,8 +50,7 @@ fn main() -> () {
.takes_value(true)
.required(true)
.help("use DIR as persistent ledger location"),
)
.get_matches();
).get_matches();
let (keypair, ncp) = if let Some(i) = matches.value_of("identity") {
let path = i.to_string();
@ -87,7 +83,7 @@ fn main() -> () {
let node_info = node.info.clone();
let pubkey = keypair.pubkey();
let fullnode = Fullnode::new(node, ledger_path, keypair, network, false);
let mut fullnode = Fullnode::new(node, ledger_path, keypair, network, false, None);
// airdrop stuff, probably goes away at some point
let leader = match network {
@ -127,5 +123,15 @@ fn main() -> () {
}
}
fullnode.join().expect("to never happen");
loop {
let status = fullnode.handle_role_transition();
match status {
Ok(Some(FullnodeReturnType::LeaderRotation)) => (),
_ => {
// Fullnode tpu/tvu exited for some unexpected
// reason, so exit
exit(1);
}
}
}
}

View File

@ -21,21 +21,19 @@ fn main() -> Result<(), Box<error::Error>> {
Arg::with_name("tokens")
.short("t")
.long("tokens")
.value_name("NUMBER")
.value_name("NUM")
.takes_value(true)
.required(true)
.help("Number of tokens with which to initialize mint"),
)
.arg(
).arg(
Arg::with_name("ledger")
.short("l")
.long("ledger")
.value_name("DIR")
.takes_value(true)
.required(true)
.help("use DIR as persistent ledger location"),
)
.get_matches();
.help("Use directory as persistent ledger location"),
).get_matches();
let tokens = value_t_or_exit!(matches, "tokens", i64);
let ledger_path = matches.value_of("ledger").unwrap();

View File

@ -3,14 +3,11 @@ extern crate clap;
extern crate dirs;
extern crate ring;
extern crate serde_json;
extern crate solana;
use clap::{App, Arg};
use ring::rand::SystemRandom;
use ring::signature::Ed25519KeyPair;
use solana::wallet::gen_keypair_file;
use std::error;
use std::fs::{self, File};
use std::io::Write;
use std::path::Path;
fn main() -> Result<(), Box<error::Error>> {
let matches = App::new("solana-keygen")
@ -21,13 +18,8 @@ fn main() -> Result<(), Box<error::Error>> {
.long("outfile")
.value_name("PATH")
.takes_value(true)
.help("path to generated file"),
)
.get_matches();
let rnd = SystemRandom::new();
let pkcs8_bytes = Ed25519KeyPair::generate_pkcs8(&rnd)?;
let serialized = serde_json::to_string(&pkcs8_bytes.to_vec())?;
.help("Path to generated file"),
).get_matches();
let mut path = dirs::home_dir().expect("home directory");
let outfile = if matches.is_present("outfile") {
@ -37,15 +29,9 @@ fn main() -> Result<(), Box<error::Error>> {
path.to_str().unwrap()
};
let serialized_keypair = gen_keypair_file(outfile.to_string())?;
if outfile == "-" {
println!("{}", serialized);
} else {
if let Some(outdir) = Path::new(outfile).parent() {
fs::create_dir_all(outdir)?;
}
let mut f = File::create(outfile)?;
f.write_all(&serialized.into_bytes())?;
println!("{}", serialized_keypair);
}
Ok(())
}

View File

@ -21,7 +21,7 @@ fn main() {
.value_name("DIR")
.takes_value(true)
.required(true)
.help("use DIR for ledger location"),
.help("Use directory for ledger location"),
)
.arg(
Arg::with_name("head")
@ -29,13 +29,19 @@ fn main() {
.long("head")
.value_name("NUM")
.takes_value(true)
.help("at most the first NUM entries in ledger\n (only applies to verify, print, json commands)"),
.help("Limit to at most the first NUM entries in ledger\n (only applies to verify, print, json commands)"),
)
.arg(
Arg::with_name("precheck")
.short("p")
.long("precheck")
.help("use ledger_verify() to check internal ledger consistency before proceeding"),
.help("Use ledger_verify() to check internal ledger consistency before proceeding"),
)
.arg(
Arg::with_name("continue")
.short("c")
.long("continue")
.help("Continue verify even if verification fails"),
)
.subcommand(SubCommand::with_name("print").about("Print the ledger"))
.subcommand(SubCommand::with_name("json").about("Print the ledger in JSON format"))
@ -50,6 +56,7 @@ fn main() {
exit(1);
}
}
let entries = match read_ledger(ledger_path, true) {
Ok(entries) => entries,
Err(err) => {
@ -112,7 +119,9 @@ fn main() {
if let Err(e) = bank.process_ledger(genesis) {
eprintln!("verify failed at genesis err: {:?}", e);
exit(1);
if !matches.is_present("continue") {
exit(1);
}
}
}
let entries = entries.map(|e| e.unwrap());
@ -122,9 +131,17 @@ fn main() {
if i >= head {
break;
}
if let Err(e) = bank.process_entry(entry) {
if !entry.verify(&bank.last_id()) {
eprintln!("entry.verify() failed at entry[{}]", i + 2);
if !matches.is_present("continue") {
exit(1);
}
}
if let Err(e) = bank.process_entry(&entry) {
eprintln!("verify failed at entry[{}], err: {:?}", i + 2, e);
exit(1);
if !matches.is_present("continue") {
exit(1);
}
}
}
}

106
src/bin/replicator.rs Normal file
View File

@ -0,0 +1,106 @@
#[macro_use]
extern crate clap;
extern crate getopts;
extern crate serde_json;
#[macro_use]
extern crate solana;
use clap::{App, Arg};
use solana::crdt::Node;
use solana::fullnode::Config;
use solana::logger;
use solana::replicator::Replicator;
use solana::signature::{Keypair, KeypairUtil};
use std::fs::File;
use std::net::{Ipv4Addr, SocketAddr};
use std::process::exit;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::thread::sleep;
use std::time::Duration;
fn main() {
logger::setup();
let matches = App::new("replicator")
.version(crate_version!())
.arg(
Arg::with_name("identity")
.short("i")
.long("identity")
.value_name("PATH")
.takes_value(true)
.help("Run with the identity found in FILE"),
).arg(
Arg::with_name("network")
.short("n")
.long("network")
.value_name("HOST:PORT")
.takes_value(true)
.help("Rendezvous with the network at this gossip entry point"),
).arg(
Arg::with_name("ledger")
.short("l")
.long("ledger")
.value_name("DIR")
.takes_value(true)
.required(true)
.help("use DIR as persistent ledger location"),
).get_matches();
let ledger_path = matches.value_of("ledger");
let (keypair, ncp) = if let Some(i) = matches.value_of("identity") {
let path = i.to_string();
if let Ok(file) = File::open(path.clone()) {
let parse: serde_json::Result<Config> = serde_json::from_reader(file);
if let Ok(data) = parse {
(data.keypair(), data.node_info.contact_info.ncp)
} else {
eprintln!("failed to parse {}", path);
exit(1);
}
} else {
eprintln!("failed to read {}", path);
exit(1);
}
} else {
(Keypair::new(), socketaddr!([127, 0, 0, 1], 8700))
};
let node = Node::new_with_external_ip(keypair.pubkey(), &ncp);
println!(
"replicating the data with keypair: {:?} ncp:{:?}",
keypair.pubkey(),
ncp
);
println!("my node: {:?}", node);
let exit = Arc::new(AtomicBool::new(false));
let done = Arc::new(AtomicBool::new(false));
let network_addr = matches
.value_of("network")
.map(|network| network.parse().expect("failed to parse network address"));
// TODO: ask network what slice we should store
let entry_height = 0;
let replicator = Replicator::new(
entry_height,
5,
&exit,
ledger_path,
node,
network_addr,
done.clone(),
);
while !done.load(Ordering::Relaxed) {
sleep(Duration::from_millis(100));
}
println!("Done downloading ledger");
replicator.join();
}

50
src/bin/upload-perf.rs Normal file
View File

@ -0,0 +1,50 @@
extern crate influx_db_client;
extern crate serde_json;
extern crate solana;
use influx_db_client as influxdb;
use serde_json::Value;
use solana::metrics;
use std::env;
use std::fs::File;
use std::io::{BufRead, BufReader};
use std::process::Command;
fn main() {
let args: Vec<String> = env::args().collect();
// Open the path in read-only mode, returns `io::Result<File>`
let fname = &args[1];
let file = match File::open(fname) {
Err(why) => panic!("couldn't open {}: {:?}", fname, why),
Ok(file) => file,
};
let git_output = Command::new("git")
.args(&["rev-parse", "HEAD"])
.output()
.expect("failed to execute git rev-parse");
let git_commit_hash = String::from_utf8_lossy(&git_output.stdout);
let trimmed_hash = git_commit_hash.trim().to_string();
println!("uploading hash: {}", trimmed_hash);
for line in BufReader::new(file).lines() {
if let Ok(v) = serde_json::from_str(&line.unwrap()) {
let v: Value = v;
if v["type"] == "bench" {
println!("{}", v);
println!(" {}", v["type"]);
let median = v["median"].to_string().parse().unwrap();
let deviation = v["deviation"].to_string().parse().unwrap();
metrics::submit(
influxdb::Point::new(&v["name"].as_str().unwrap().trim_matches('\"'))
.add_field("median", influxdb::Value::Integer(median))
.add_field("deviation", influxdb::Value::Integer(deviation))
.add_field(
"commit",
influxdb::Value::String(git_commit_hash.trim().to_string()),
).to_owned(),
);
}
}
}
metrics::flush();
}

View File

@ -1,170 +1,45 @@
extern crate atty;
extern crate bincode;
extern crate bs58;
#[macro_use]
extern crate clap;
extern crate dirs;
extern crate serde_json;
#[macro_use]
extern crate solana;
use clap::{App, Arg, SubCommand};
use solana::client::mk_client;
use solana::crdt::NodeInfo;
use clap::{App, Arg, ArgMatches, SubCommand};
use solana::drone::DRONE_PORT;
use solana::fullnode::Config;
use solana::logger;
use solana::signature::{read_keypair, Keypair, KeypairUtil, Pubkey, Signature};
use solana::thin_client::{poll_gossip_for_leader, ThinClient};
use solana::wallet::request_airdrop;
use solana::rpc::RPC_PORT;
use solana::signature::{read_keypair, KeypairUtil};
use solana::thin_client::poll_gossip_for_leader;
use solana::wallet::{gen_keypair_file, parse_command, process_command, WalletConfig, WalletError};
use std::error;
use std::fmt;
use std::fs::File;
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use std::thread::sleep;
use std::time::Duration;
use std::net::SocketAddr;
enum WalletCommand {
Address,
Balance,
AirDrop(i64),
Pay(i64, Pubkey),
Confirm(Signature),
}
#[derive(Debug, Clone)]
enum WalletError {
CommandNotRecognized(String),
BadParameter(String),
}
impl fmt::Display for WalletError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "invalid")
}
}
impl error::Error for WalletError {
fn description(&self) -> &str {
"invalid"
}
fn cause(&self) -> Option<&error::Error> {
// Generic error, underlying cause isn't tracked.
None
}
}
struct WalletConfig {
leader: NodeInfo,
id: Keypair,
drone_addr: SocketAddr,
command: WalletCommand,
}
impl Default for WalletConfig {
fn default() -> WalletConfig {
let default_addr = socketaddr!(0, 8000);
WalletConfig {
leader: NodeInfo::new_with_socketaddr(&default_addr),
id: Keypair::new(),
drone_addr: default_addr,
command: WalletCommand::Balance,
}
}
}
fn parse_args() -> Result<WalletConfig, Box<error::Error>> {
let matches = App::new("solana-wallet")
.version(crate_version!())
.arg(
Arg::with_name("leader")
.short("l")
.long("leader")
.value_name("PATH")
.takes_value(true)
.help("/path/to/leader.json"),
)
.arg(
Arg::with_name("keypair")
.short("k")
.long("keypair")
.value_name("PATH")
.takes_value(true)
.help("/path/to/id.json"),
)
.arg(
Arg::with_name("timeout")
.long("timeout")
.value_name("SECONDS")
.takes_value(true)
.help("Max SECONDS to wait to get necessary gossip from the network"),
)
.subcommand(
SubCommand::with_name("airdrop")
.about("Request a batch of tokens")
.arg(
Arg::with_name("tokens")
.long("tokens")
.value_name("NUMBER")
.takes_value(true)
.required(true)
.help("The number of tokens to request"),
),
)
.subcommand(
SubCommand::with_name("pay")
.about("Send a payment")
.arg(
Arg::with_name("tokens")
.long("tokens")
.value_name("NUMBER")
.takes_value(true)
.required(true)
.help("The number of tokens to send"),
)
.arg(
Arg::with_name("to")
.long("to")
.value_name("PUBKEY")
.takes_value(true)
.help("The pubkey of recipient"),
),
)
.subcommand(
SubCommand::with_name("confirm")
.about("Confirm your payment by signature")
.arg(
Arg::with_name("signature")
.index(1)
.value_name("SIGNATURE")
.required(true)
.help("The transaction signature to confirm"),
),
)
.subcommand(SubCommand::with_name("balance").about("Get your balance"))
.subcommand(SubCommand::with_name("address").about("Get your public key"))
.get_matches();
let leader: NodeInfo;
if let Some(l) = matches.value_of("leader") {
leader = read_leader(l)?.node_info;
pub fn parse_args(matches: &ArgMatches) -> Result<WalletConfig, Box<error::Error>> {
let network = if let Some(addr) = matches.value_of("network") {
addr.parse().or_else(|_| {
Err(WalletError::BadParameter(
"Invalid network location".to_string(),
))
})?
} else {
let server_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
leader = NodeInfo::new_with_socketaddr(&server_addr);
socketaddr!("127.0.0.1:8001")
};
let timeout: Option<u64>;
if let Some(secs) = matches.value_of("timeout") {
timeout = Some(secs.to_string().parse().expect("integer"));
let timeout = if let Some(secs) = matches.value_of("timeout") {
Some(secs.to_string().parse().expect("integer"))
} else {
timeout = None;
}
None
};
let mut path = dirs::home_dir().expect("home directory");
let id_path = if matches.is_present("keypair") {
matches.value_of("keypair").unwrap()
} else {
path.extend(&[".config", "solana", "id.json"]);
if !path.exists() {
gen_keypair_file(path.to_str().unwrap().to_string())?;
println!("New keypair generated at: {:?}", path.to_str().unwrap());
}
path.to_str().unwrap()
};
let id = read_keypair(id_path).or_else(|err| {
@ -174,157 +49,196 @@ fn parse_args() -> Result<WalletConfig, Box<error::Error>> {
)))
})?;
let leader = poll_gossip_for_leader(leader.contact_info.ncp, timeout)?;
let leader = poll_gossip_for_leader(network, timeout)?;
let mut drone_addr = leader.contact_info.tpu;
drone_addr.set_port(DRONE_PORT);
let command = match matches.subcommand() {
("airdrop", Some(airdrop_matches)) => {
let tokens = airdrop_matches.value_of("tokens").unwrap().parse()?;
Ok(WalletCommand::AirDrop(tokens))
}
("pay", Some(pay_matches)) => {
let to = if pay_matches.is_present("to") {
let pubkey_vec = bs58::decode(pay_matches.value_of("to").unwrap())
.into_vec()
.expect("base58-encoded public key");
let rpc_addr = if let Some(proxy) = matches.value_of("proxy") {
proxy.to_string()
} else {
let rpc_port = if let Some(port) = matches.value_of("rpc-port") {
port.to_string().parse().expect("integer")
} else {
RPC_PORT
};
let mut rpc_addr = leader.contact_info.tpu;
rpc_addr.set_port(rpc_port);
format!("http://{}", rpc_addr.to_string())
};
if pubkey_vec.len() != std::mem::size_of::<Pubkey>() {
eprintln!("{}", pay_matches.usage());
Err(WalletError::BadParameter("Invalid public key".to_string()))?;
}
Pubkey::new(&pubkey_vec)
} else {
id.pubkey()
};
let tokens = pay_matches.value_of("tokens").unwrap().parse()?;
Ok(WalletCommand::Pay(tokens, to))
}
("confirm", Some(confirm_matches)) => {
let signatures = bs58::decode(confirm_matches.value_of("signature").unwrap())
.into_vec()
.expect("base58-encoded signature");
if signatures.len() == std::mem::size_of::<Signature>() {
let signature = Signature::new(&signatures);
Ok(WalletCommand::Confirm(signature))
} else {
eprintln!("{}", confirm_matches.usage());
Err(WalletError::BadParameter("Invalid signature".to_string()))
}
}
("balance", Some(_balance_matches)) => Ok(WalletCommand::Balance),
("address", Some(_address_matches)) => Ok(WalletCommand::Address),
("", None) => {
println!("{}", matches.usage());
Err(WalletError::CommandNotRecognized(
"no subcommand given".to_string(),
))
}
_ => unreachable!(),
}?;
let command = parse_command(id.pubkey(), &matches)?;
Ok(WalletConfig {
leader,
id,
drone_addr, // TODO: Add an option for this.
rpc_addr,
command,
})
}
fn process_command(
config: &WalletConfig,
client: &mut ThinClient,
) -> Result<(), Box<error::Error>> {
match config.command {
// Check client balance
WalletCommand::Address => {
println!("{}", config.id.pubkey());
}
WalletCommand::Balance => {
println!("Balance requested...");
let balance = client.poll_get_balance(&config.id.pubkey());
match balance {
Ok(balance) => {
println!("Your balance is: {:?}", balance);
}
Err(ref e) if e.kind() == std::io::ErrorKind::Other => {
println!("No account found! Request an airdrop to get started.");
}
Err(error) => {
println!("An error occurred: {:?}", error);
}
}
}
// Request an airdrop from Solana Drone;
// Request amount is set in request_airdrop function
WalletCommand::AirDrop(tokens) => {
println!(
"Requesting airdrop of {:?} tokens from {}",
tokens, config.drone_addr
);
let previous_balance = client.poll_get_balance(&config.id.pubkey()).unwrap_or(0);
request_airdrop(&config.drone_addr, &config.id.pubkey(), tokens as u64)?;
// TODO: return airdrop Result from Drone instead of polling the
// network
let mut current_balance = previous_balance;
for _ in 0..20 {
sleep(Duration::from_millis(500));
current_balance = client
.poll_get_balance(&config.id.pubkey())
.unwrap_or(previous_balance);
if previous_balance != current_balance {
break;
}
println!(".");
}
println!("Your balance is: {:?}", current_balance);
if current_balance - previous_balance != tokens {
Err("Airdrop failed!")?;
}
}
// If client has positive balance, spend tokens in {balance} number of transactions
WalletCommand::Pay(tokens, to) => {
let last_id = client.get_last_id();
let signature = client.transfer(tokens, &config.id, to, &last_id)?;
println!("{}", signature);
}
// Confirm the last client transaction by signature
WalletCommand::Confirm(signature) => {
if client.check_signature(&signature) {
println!("Confirmed");
} else {
println!("Not found");
}
}
}
Ok(())
}
fn read_leader(path: &str) -> Result<Config, WalletError> {
let file = File::open(path.to_string()).or_else(|err| {
Err(WalletError::BadParameter(format!(
"{}: Unable to open leader file: {}",
err, path
)))
})?;
serde_json::from_reader(file).or_else(|err| {
Err(WalletError::BadParameter(format!(
"{}: Failed to parse leader file: {}",
err, path
)))
})
}
fn main() -> Result<(), Box<error::Error>> {
logger::setup();
let config = parse_args()?;
let mut client = mk_client(&config.leader);
process_command(&config, &mut client)
let matches = App::new("solana-wallet")
.version(crate_version!())
.arg(
Arg::with_name("network")
.short("n")
.long("network")
.value_name("HOST:PORT")
.takes_value(true)
.help("Rendezvous with the network at this gossip entry point; defaults to 127.0.0.1:8001"),
).arg(
Arg::with_name("keypair")
.short("k")
.long("keypair")
.value_name("PATH")
.takes_value(true)
.help("/path/to/id.json"),
).arg(
Arg::with_name("timeout")
.long("timeout")
.value_name("SECS")
.takes_value(true)
.help("Max seconds to wait to get necessary gossip from the network"),
).arg(
Arg::with_name("rpc-port")
.long("port")
.takes_value(true)
.value_name("NUM")
.help("Optional rpc-port configuration to connect to non-default nodes")
).arg(
Arg::with_name("proxy")
.long("proxy")
.takes_value(true)
.value_name("URL")
.help("Address of TLS proxy")
.conflicts_with("rpc-port")
).subcommand(SubCommand::with_name("address").about("Get your public key"))
.subcommand(
SubCommand::with_name("airdrop")
.about("Request a batch of tokens")
.arg(
Arg::with_name("tokens")
.index(1)
.value_name("NUM")
.takes_value(true)
.required(true)
.help("The number of tokens to request"),
),
).subcommand(SubCommand::with_name("balance").about("Get your balance"))
.subcommand(
SubCommand::with_name("cancel")
.about("Cancel a transfer")
.arg(
Arg::with_name("process-id")
.index(1)
.value_name("PROCESS_ID")
.takes_value(true)
.required(true)
.help("The process id of the transfer to cancel"),
),
).subcommand(
SubCommand::with_name("confirm")
.about("Confirm transaction by signature")
.arg(
Arg::with_name("signature")
.index(1)
.value_name("SIGNATURE")
.takes_value(true)
.required(true)
.help("The transaction signature to confirm"),
),
).subcommand(
SubCommand::with_name("pay")
.about("Send a payment")
.arg(
Arg::with_name("to")
.index(1)
.value_name("PUBKEY")
.takes_value(true)
.required(true)
.help("The pubkey of recipient"),
).arg(
Arg::with_name("tokens")
.index(2)
.value_name("NUM")
.takes_value(true)
.required(true)
.help("The number of tokens to send"),
).arg(
Arg::with_name("timestamp")
.long("after")
.value_name("DATETIME")
.takes_value(true)
.help("A timestamp after which transaction will execute"),
).arg(
Arg::with_name("timestamp-pubkey")
.long("require-timestamp-from")
.value_name("PUBKEY")
.takes_value(true)
.requires("timestamp")
.help("Require timestamp from this third party"),
).arg(
Arg::with_name("witness")
.long("require-signature-from")
.value_name("PUBKEY")
.takes_value(true)
.multiple(true)
.use_delimiter(true)
.help("Any third party signatures required to unlock the tokens"),
).arg(
Arg::with_name("cancelable")
.long("cancelable")
.takes_value(false),
),
).subcommand(
SubCommand::with_name("send-signature")
.about("Send a signature to authorize a transfer")
.arg(
Arg::with_name("to")
.index(1)
.value_name("PUBKEY")
.takes_value(true)
.required(true)
.help("The pubkey of recipient"),
).arg(
Arg::with_name("process-id")
.index(2)
.value_name("PROCESS_ID")
.takes_value(true)
.required(true)
.help("The process id of the transfer to authorize")
)
).subcommand(
SubCommand::with_name("send-timestamp")
.about("Send a timestamp to unlock a transfer")
.arg(
Arg::with_name("to")
.index(1)
.value_name("PUBKEY")
.takes_value(true)
.required(true)
.help("The pubkey of recipient"),
).arg(
Arg::with_name("process-id")
.index(2)
.value_name("PROCESS_ID")
.takes_value(true)
.required(true)
.help("The process id of the transfer to unlock")
).arg(
Arg::with_name("datetime")
.long("date")
.value_name("DATETIME")
.takes_value(true)
.help("Optional arbitrary timestamp to apply")
)
).get_matches();
let config = parse_args(&matches)?;
let result = process_command(&config)?;
println!("{}", result);
Ok(())
}

View File

@ -1,6 +1,5 @@
//! The `blob_fetch_stage` pulls blobs from UDP sockets and sends it to a channel.
use packet::BlobRecycler;
use service::Service;
use std::net::UdpSocket;
use std::sync::atomic::{AtomicBool, Ordering};
@ -15,24 +14,17 @@ pub struct BlobFetchStage {
}
impl BlobFetchStage {
pub fn new(
socket: Arc<UdpSocket>,
exit: Arc<AtomicBool>,
recycler: &BlobRecycler,
) -> (Self, BlobReceiver) {
Self::new_multi_socket(vec![socket], exit, recycler)
pub fn new(socket: Arc<UdpSocket>, exit: Arc<AtomicBool>) -> (Self, BlobReceiver) {
Self::new_multi_socket(vec![socket], exit)
}
pub fn new_multi_socket(
sockets: Vec<Arc<UdpSocket>>,
exit: Arc<AtomicBool>,
recycler: &BlobRecycler,
) -> (Self, BlobReceiver) {
let (sender, receiver) = channel();
let thread_hdls: Vec<_> = sockets
.into_iter()
.map(|socket| {
streamer::blob_receiver(socket, exit.clone(), recycler.clone(), sender.clone())
})
.map(|socket| streamer::blob_receiver(socket, exit.clone(), sender.clone()))
.collect();
(BlobFetchStage { exit, thread_hdls }, receiver)
@ -44,12 +36,10 @@ impl BlobFetchStage {
}
impl Service for BlobFetchStage {
fn thread_hdls(self) -> Vec<JoinHandle<()>> {
self.thread_hdls
}
type JoinReturnType = ();
fn join(self) -> thread::Result<()> {
for thread_hdl in self.thread_hdls() {
for thread_hdl in self.thread_hdls {
thread_hdl.join()?;
}
Ok(())

View File

@ -2,48 +2,74 @@
//!
use counter::Counter;
use crdt::{Crdt, CrdtError, NodeInfo};
use entry::Entry;
#[cfg(feature = "erasure")]
use erasure;
use ledger::Block;
use log::Level;
use packet::BlobRecycler;
use packet::SharedBlobs;
use rayon::prelude::*;
use result::{Error, Result};
use service::Service;
use std::mem;
use std::net::UdpSocket;
use std::sync::atomic::AtomicUsize;
use std::sync::mpsc::RecvTimeoutError;
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::sync::mpsc::{Receiver, RecvTimeoutError};
use std::sync::{Arc, RwLock};
use std::thread::{self, Builder, JoinHandle};
use std::time::Duration;
use streamer::BlobReceiver;
use std::time::{Duration, Instant};
use timing::duration_as_ms;
use window::{self, SharedWindow, WindowIndex, WindowUtil, WINDOW_SIZE};
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum BroadcastStageReturnType {
LeaderRotation,
ChannelDisconnected,
}
fn broadcast(
crdt: &Arc<RwLock<Crdt>>,
leader_rotation_interval: u64,
node_info: &NodeInfo,
broadcast_table: &[NodeInfo],
window: &SharedWindow,
recycler: &BlobRecycler,
receiver: &BlobReceiver,
receiver: &Receiver<Vec<Entry>>,
sock: &UdpSocket,
transmit_index: &mut WindowIndex,
receive_index: &mut u64,
) -> Result<()> {
let id = node_info.id;
let timer = Duration::new(1, 0);
let mut dq = receiver.recv_timeout(timer)?;
while let Ok(mut nq) = receiver.try_recv() {
dq.append(&mut nq);
let entries = receiver.recv_timeout(timer)?;
let now = Instant::now();
let mut num_entries = entries.len();
let mut ventries = Vec::new();
ventries.push(entries);
while let Ok(entries) = receiver.try_recv() {
num_entries += entries.len();
ventries.push(entries);
}
inc_new_counter_info!("broadcast_stage-entries_received", num_entries);
let to_blobs_start = Instant::now();
let dq: SharedBlobs = ventries
.into_par_iter()
.flat_map(|p| p.to_blobs())
.collect();
let to_blobs_elapsed = duration_as_ms(&to_blobs_start.elapsed());
// flatten deque to vec
let blobs_vec: Vec<_> = dq.into_iter().collect();
let blobs_vec: SharedBlobs = dq.into_iter().collect();
let blobs_chunking = Instant::now();
// We could receive more blobs than window slots so
// break them up into window-sized chunks to process
let blobs_chunked = blobs_vec.chunks(WINDOW_SIZE as usize).map(|x| x.to_vec());
let chunking_elapsed = duration_as_ms(&blobs_chunking.elapsed());
trace!("{}", window.read().unwrap().print(&id, *receive_index));
let broadcast_start = Instant::now();
for mut blobs in blobs_chunked {
let blobs_len = blobs.len();
trace!("{}: broadcast blobs.len: {}", id, blobs_len);
@ -60,33 +86,31 @@ fn broadcast(
for b in &blobs {
let ix = b.read().unwrap().get_index().expect("blob index");
let pos = (ix % WINDOW_SIZE) as usize;
if let Some(x) = mem::replace(&mut win[pos].data, None) {
if let Some(x) = win[pos].data.take() {
trace!(
"{} popped {} at {}",
id,
x.read().unwrap().get_index().unwrap(),
pos
);
recycler.recycle(x, "broadcast-data");
}
if let Some(x) = mem::replace(&mut win[pos].coding, None) {
if let Some(x) = win[pos].coding.take() {
trace!(
"{} popped {} at {}",
id,
x.read().unwrap().get_index().unwrap(),
pos
);
recycler.recycle(x, "broadcast-coding");
}
trace!("{} null {}", id, pos);
}
while let Some(b) = blobs.pop() {
for b in &blobs {
let ix = b.read().unwrap().get_index().expect("blob index");
let pos = (ix % WINDOW_SIZE) as usize;
trace!("{} caching {} at {}", id, ix, pos);
assert!(win[pos].data.is_none());
win[pos].data = Some(b);
win[pos].data = Some(b.clone());
}
}
@ -96,7 +120,6 @@ fn broadcast(
erasure::generate_coding(
&id,
&mut window.write().unwrap(),
recycler,
*receive_index,
blobs_len,
&mut transmit_index.coding,
@ -107,6 +130,8 @@ fn broadcast(
// Send blobs out from the window
Crdt::broadcast(
crdt,
leader_rotation_interval,
&node_info,
&broadcast_table,
&window,
@ -115,11 +140,40 @@ fn broadcast(
*receive_index,
)?;
}
let broadcast_elapsed = duration_as_ms(&broadcast_start.elapsed());
inc_new_counter_info!(
"broadcast_stage-time_ms",
duration_as_ms(&now.elapsed()) as usize
);
info!(
"broadcast: {} entries, blob time {} chunking time {} broadcast time {}",
num_entries, to_blobs_elapsed, chunking_elapsed, broadcast_elapsed
);
Ok(())
}
// Implement a destructor for the BroadcastStage thread to signal it exited
// even on panics
struct Finalizer {
exit_sender: Arc<AtomicBool>,
}
impl Finalizer {
fn new(exit_sender: Arc<AtomicBool>) -> Self {
Finalizer { exit_sender }
}
}
// Implement a destructor for Finalizer.
impl Drop for Finalizer {
fn drop(&mut self) {
self.exit_sender.clone().store(true, Ordering::Relaxed);
}
}
pub struct BroadcastStage {
thread_hdl: JoinHandle<()>,
thread_hdl: JoinHandle<BroadcastStageReturnType>,
}
impl BroadcastStage {
@ -128,29 +182,51 @@ impl BroadcastStage {
crdt: &Arc<RwLock<Crdt>>,
window: &SharedWindow,
entry_height: u64,
recycler: &BlobRecycler,
receiver: &BlobReceiver,
) {
receiver: &Receiver<Vec<Entry>>,
) -> BroadcastStageReturnType {
let mut transmit_index = WindowIndex {
data: entry_height,
coding: entry_height,
};
let mut receive_index = entry_height;
let me = crdt.read().unwrap().my_data().clone();
let me;
let leader_rotation_interval;
{
let rcrdt = crdt.read().unwrap();
me = rcrdt.my_data().clone();
leader_rotation_interval = rcrdt.get_leader_rotation_interval();
}
loop {
if transmit_index.data % (leader_rotation_interval as u64) == 0 {
let rcrdt = crdt.read().unwrap();
let my_id = rcrdt.my_data().id;
match rcrdt.get_scheduled_leader(transmit_index.data) {
Some(id) if id == my_id => (),
// If the leader stays in power for the next
// round as well, then we don't exit. Otherwise, exit.
_ => {
return BroadcastStageReturnType::LeaderRotation;
}
}
}
let broadcast_table = crdt.read().unwrap().compute_broadcast_table();
if let Err(e) = broadcast(
crdt,
leader_rotation_interval,
&me,
&broadcast_table,
&window,
&recycler,
&receiver,
&sock,
&mut transmit_index,
&mut receive_index,
) {
match e {
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => {
return BroadcastStageReturnType::ChannelDisconnected
}
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
Error::CrdtError(CrdtError::NoPeers) => (), // TODO: Why are the unit-tests throwing hundreds of these?
_ => {
@ -169,33 +245,184 @@ impl BroadcastStage {
/// * `exit` - Boolean to signal system exit.
/// * `crdt` - CRDT structure
/// * `window` - Cache of blobs that we have broadcast
/// * `recycler` - Blob recycler.
/// * `receiver` - Receive channel for blobs to be retransmitted to all the layer 1 nodes.
/// * `exit_sender` - Set to true when this stage exits, allows rest of Tpu to exit cleanly. Otherwise,
/// when a Tpu stage closes, it only closes the stages that come after it. The stages
/// that come before could be blocked on a receive, and never notice that they need to
/// exit. Now, if any stage of the Tpu closes, it will lead to closing the WriteStage (b/c
/// WriteStage is the last stage in the pipeline), which will then close Broadcast stage,
/// which will then close FetchStage in the Tpu, and then the rest of the Tpu,
/// completing the cycle.
pub fn new(
sock: UdpSocket,
crdt: Arc<RwLock<Crdt>>,
window: SharedWindow,
entry_height: u64,
recycler: BlobRecycler,
receiver: BlobReceiver,
receiver: Receiver<Vec<Entry>>,
exit_sender: Arc<AtomicBool>,
) -> Self {
let thread_hdl = Builder::new()
.name("solana-broadcaster".to_string())
.spawn(move || {
Self::run(&sock, &crdt, &window, entry_height, &recycler, &receiver);
})
.unwrap();
let _exit = Finalizer::new(exit_sender);
Self::run(&sock, &crdt, &window, entry_height, &receiver)
}).unwrap();
BroadcastStage { thread_hdl }
}
}
impl Service for BroadcastStage {
fn thread_hdls(self) -> Vec<JoinHandle<()>> {
vec![self.thread_hdl]
}
type JoinReturnType = BroadcastStageReturnType;
fn join(self) -> thread::Result<()> {
fn join(self) -> thread::Result<BroadcastStageReturnType> {
self.thread_hdl.join()
}
}
#[cfg(test)]
mod tests {
use broadcast_stage::{BroadcastStage, BroadcastStageReturnType};
use crdt::{Crdt, Node};
use entry::Entry;
use ledger::next_entries_mut;
use mint::Mint;
use service::Service;
use signature::{Keypair, KeypairUtil};
use solana_program_interface::pubkey::Pubkey;
use std::cmp;
use std::sync::atomic::AtomicBool;
use std::sync::mpsc::{channel, Sender};
use std::sync::{Arc, RwLock};
use window::{new_window_from_entries, SharedWindow};
struct DummyBroadcastStage {
my_id: Pubkey,
buddy_id: Pubkey,
broadcast_stage: BroadcastStage,
shared_window: SharedWindow,
entry_sender: Sender<Vec<Entry>>,
crdt: Arc<RwLock<Crdt>>,
entries: Vec<Entry>,
}
fn setup_dummy_broadcast_stage(leader_rotation_interval: u64) -> DummyBroadcastStage {
// Setup dummy leader info
let leader_keypair = Keypair::new();
let my_id = leader_keypair.pubkey();
let leader_info = Node::new_localhost_with_pubkey(leader_keypair.pubkey());
// Give the leader somebody to broadcast to so he isn't lonely
let buddy_keypair = Keypair::new();
let buddy_id = buddy_keypair.pubkey();
let broadcast_buddy = Node::new_localhost_with_pubkey(buddy_keypair.pubkey());
// Fill the crdt with the buddy's info
let mut crdt = Crdt::new(leader_info.info.clone()).expect("Crdt::new");
crdt.insert(&broadcast_buddy.info);
crdt.set_leader_rotation_interval(leader_rotation_interval);
let crdt = Arc::new(RwLock::new(crdt));
// Make dummy initial entries
let mint = Mint::new(10000);
let entries = mint.create_entries();
let entry_height = entries.len() as u64;
// Setup a window
let window = new_window_from_entries(&entries, entry_height, &leader_info.info);
let shared_window = Arc::new(RwLock::new(window));
let (entry_sender, entry_receiver) = channel();
let exit_sender = Arc::new(AtomicBool::new(false));
// Start up the broadcast stage
let broadcast_stage = BroadcastStage::new(
leader_info.sockets.broadcast,
crdt.clone(),
shared_window.clone(),
entry_height,
entry_receiver,
exit_sender,
);
DummyBroadcastStage {
my_id,
buddy_id,
broadcast_stage,
shared_window,
entry_sender,
crdt,
entries,
}
}
fn find_highest_window_index(shared_window: &SharedWindow) -> u64 {
let window = shared_window.read().unwrap();
window.iter().fold(0, |m, w_slot| {
if let Some(ref blob) = w_slot.data {
cmp::max(m, blob.read().unwrap().get_index().unwrap())
} else {
m
}
})
}
#[test]
fn test_broadcast_stage_leader_rotation_exit() {
let leader_rotation_interval = 10;
let broadcast_info = setup_dummy_broadcast_stage(leader_rotation_interval);
{
let mut wcrdt = broadcast_info.crdt.write().unwrap();
// Set the leader for the next rotation to be myself
wcrdt.set_scheduled_leader(leader_rotation_interval, broadcast_info.my_id);
}
let genesis_len = broadcast_info.entries.len() as u64;
let mut last_id = broadcast_info
.entries
.last()
.expect("Ledger should not be empty")
.id;
let mut num_hashes = 0;
// Input enough entries to make exactly leader_rotation_interval entries, which will
// trigger a check for leader rotation. Because the next scheduled leader
// is ourselves, we won't exit
for _ in genesis_len..leader_rotation_interval {
let new_entry = next_entries_mut(&mut last_id, &mut num_hashes, vec![]);
broadcast_info.entry_sender.send(new_entry).unwrap();
}
// Set the scheduled next leader in the crdt to the other buddy on the network
broadcast_info
.crdt
.write()
.unwrap()
.set_scheduled_leader(2 * leader_rotation_interval, broadcast_info.buddy_id);
// Input another leader_rotation_interval dummy entries, which will take us
// past the point of the leader rotation. The write_stage will see that
// it's no longer the leader after checking the crdt, and exit
for _ in 0..leader_rotation_interval {
let new_entry = next_entries_mut(&mut last_id, &mut num_hashes, vec![]);
match broadcast_info.entry_sender.send(new_entry) {
// We disconnected, break out of loop and check the results
Err(_) => break,
_ => (),
};
}
// Make sure the threads closed cleanly
assert_eq!(
broadcast_info.broadcast_stage.join().unwrap(),
BroadcastStageReturnType::LeaderRotation
);
let highest_index = find_highest_window_index(&broadcast_info.shared_window);
// The blob index is zero indexed, so it will always be one behind the entry height
// which starts at one.
assert_eq!(highest_index, 2 * leader_rotation_interval - 1);
}
}

View File

@ -4,8 +4,8 @@
//! `Payment`, the payment is executed.
use chrono::prelude::*;
use payment_plan::{Payment, PaymentPlan, Witness};
use signature::Pubkey;
use payment_plan::{Payment, Witness};
use solana_program_interface::pubkey::Pubkey;
use std::mem;
/// A data type representing a `Witness` that the payment plan is waiting on.
@ -44,6 +44,9 @@ pub enum Budget {
/// Either make a payment after one condition or a different payment after another
/// condition, which ever condition is satisfied first.
Or((Condition, Payment), (Condition, Payment)),
/// Make a payment after both of two conditions are satisfied
And(Condition, Condition, Payment),
}
impl Budget {
@ -57,6 +60,15 @@ impl Budget {
Budget::After(Condition::Signature(from), Payment { tokens, to })
}
/// Create a budget that pays tokens` to `to` after being witnessed by 2x `from`s
pub fn new_2_2_multisig_payment(from0: Pubkey, from1: Pubkey, tokens: i64, to: Pubkey) -> Self {
Budget::And(
Condition::Signature(from0),
Condition::Signature(from1),
Payment { tokens, to },
)
}
/// Create a budget that pays `tokens` to `to` after the given DateTime.
pub fn new_future_payment(dt: DateTime<Utc>, from: Pubkey, tokens: i64, to: Pubkey) -> Self {
Budget::After(Condition::Timestamp(dt, from), Payment { tokens, to })
@ -75,11 +87,9 @@ impl Budget {
(Condition::Signature(from), Payment { tokens, to: from }),
)
}
}
impl PaymentPlan for Budget {
/// Return Payment if the budget requires no additional Witnesses.
fn final_payment(&self) -> Option<Payment> {
pub fn final_payment(&self) -> Option<Payment> {
match self {
Budget::Pay(payment) => Some(payment.clone()),
_ => None,
@ -87,25 +97,41 @@ impl PaymentPlan for Budget {
}
/// Return true if the budget spends exactly `spendable_tokens`.
fn verify(&self, spendable_tokens: i64) -> bool {
pub fn verify(&self, spendable_tokens: i64) -> bool {
match self {
Budget::Pay(payment) | Budget::After(_, payment) => payment.tokens == spendable_tokens,
Budget::Pay(payment) | Budget::After(_, payment) | Budget::And(_, _, payment) => {
payment.tokens == spendable_tokens
}
Budget::Or(a, b) => a.1.tokens == spendable_tokens && b.1.tokens == spendable_tokens,
}
}
/// Apply a witness to the budget to see if the budget can be reduced.
/// If so, modify the budget in-place.
fn apply_witness(&mut self, witness: &Witness, from: &Pubkey) {
let new_payment = match self {
Budget::After(cond, payment) if cond.is_satisfied(witness, from) => Some(payment),
Budget::Or((cond, payment), _) if cond.is_satisfied(witness, from) => Some(payment),
Budget::Or(_, (cond, payment)) if cond.is_satisfied(witness, from) => Some(payment),
pub fn apply_witness(&mut self, witness: &Witness, from: &Pubkey) {
let new_budget = match self {
Budget::After(cond, payment) if cond.is_satisfied(witness, from) => {
Some(Budget::Pay(payment.clone()))
}
Budget::Or((cond, payment), _) if cond.is_satisfied(witness, from) => {
Some(Budget::Pay(payment.clone()))
}
Budget::Or(_, (cond, payment)) if cond.is_satisfied(witness, from) => {
Some(Budget::Pay(payment.clone()))
}
Budget::And(cond0, cond1, payment) => {
if cond0.is_satisfied(witness, from) {
Some(Budget::After(cond1.clone(), payment.clone()))
} else if cond1.is_satisfied(witness, from) {
Some(Budget::After(cond0.clone(), payment.clone()))
} else {
None
}
}
_ => None,
}.cloned();
if let Some(payment) = new_payment {
mem::replace(self, Budget::Pay(payment));
};
if let Some(budget) = new_budget {
mem::replace(self, budget);
}
}
}
@ -191,4 +217,14 @@ mod tests {
budget.apply_witness(&Witness::Signature, &from);
assert_eq!(budget, Budget::new_payment(42, from));
}
#[test]
fn test_2_2_multisig_payment() {
let from0 = Keypair::new().pubkey();
let from1 = Keypair::new().pubkey();
let to = Pubkey::default();
let mut budget = Budget::new_2_2_multisig_payment(from0, from1, 42, to);
budget.apply_witness(&Witness::Signature, &from0);
assert_eq!(budget, Budget::new_authorized_payment(from1, 42, to));
}
}

37
src/budget_instruction.rs Normal file
View File

@ -0,0 +1,37 @@
use budget::Budget;
use chrono::prelude::{DateTime, Utc};
/// A smart contract.
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub struct Contract {
/// The number of tokens allocated to the `Budget` and any transaction fees.
pub tokens: i64,
pub budget: Budget,
}
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub struct Vote {
/// We send some gossip specific membership information through the vote to shortcut
/// liveness voting
/// The version of the CRDT struct that the last_id of this network voted with
pub version: u64,
/// The version of the CRDT struct that has the same network configuration as this one
pub contact_info_version: u64,
// TODO: add signature of the state here as well
}
/// An instruction to progress the smart contract.
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub enum Instruction {
/// Declare and instantiate `Contract`.
NewContract(Contract),
/// Tell a payment plan acknowledge the given `DateTime` has past.
ApplyTimestamp(DateTime<Utc>),
/// Tell the payment plan that the `NewContract` with `Signature` has been
/// signed by the containing transaction's `Pubkey`.
ApplySignature,
/// Vote for a PoH that is equal to the lastid of this transaction
NewVote(Vote),
}

570
src/budget_program.rs Normal file
View File

@ -0,0 +1,570 @@
//! budget program
use bincode::{self, deserialize, serialize_into, serialized_size};
use budget::Budget;
use budget_instruction::Instruction;
use chrono::prelude::{DateTime, Utc};
use payment_plan::Witness;
use solana_program_interface::account::Account;
use solana_program_interface::pubkey::Pubkey;
use std::io;
use transaction::Transaction;
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
pub enum BudgetError {
InsufficientFunds(Pubkey),
ContractAlreadyExists(Pubkey),
ContractNotPending(Pubkey),
SourceIsPendingContract(Pubkey),
UninitializedContract(Pubkey),
NegativeTokens,
DestinationMissing(Pubkey),
FailedWitness,
UserdataTooSmall,
UserdataDeserializeFailure,
}
#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq)]
pub struct BudgetState {
pub initialized: bool,
pub pending_budget: Option<Budget>,
}
pub const BUDGET_PROGRAM_ID: [u8; 32] = [
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
];
impl BudgetState {
fn is_pending(&self) -> bool {
self.pending_budget != None
}
pub fn id() -> Pubkey {
Pubkey::new(&BUDGET_PROGRAM_ID)
}
pub fn check_id(program_id: &Pubkey) -> bool {
program_id.as_ref() == BUDGET_PROGRAM_ID
}
/// Process a Witness Signature. Any payment plans waiting on this signature
/// will progress one step.
fn apply_signature(
&mut self,
keys: &[Pubkey],
account: &mut [Account],
) -> Result<(), BudgetError> {
let mut final_payment = None;
if let Some(ref mut budget) = self.pending_budget {
budget.apply_witness(&Witness::Signature, &keys[0]);
final_payment = budget.final_payment();
}
if let Some(payment) = final_payment {
if keys.len() < 2 || payment.to != keys[2] {
trace!("destination missing");
return Err(BudgetError::DestinationMissing(payment.to));
}
self.pending_budget = None;
account[1].tokens -= payment.tokens;
account[2].tokens += payment.tokens;
}
Ok(())
}
/// Process a Witness Timestamp. Any payment plans waiting on this timestamp
/// will progress one step.
fn apply_timestamp(
&mut self,
keys: &[Pubkey],
accounts: &mut [Account],
dt: DateTime<Utc>,
) -> Result<(), BudgetError> {
// Check to see if any timelocked transactions can be completed.
let mut final_payment = None;
if let Some(ref mut budget) = self.pending_budget {
budget.apply_witness(&Witness::Timestamp(dt), &keys[0]);
final_payment = budget.final_payment();
}
if let Some(payment) = final_payment {
if keys.len() < 2 || payment.to != keys[2] {
trace!("destination missing");
return Err(BudgetError::DestinationMissing(payment.to));
}
self.pending_budget = None;
accounts[1].tokens -= payment.tokens;
accounts[2].tokens += payment.tokens;
}
Ok(())
}
/// Deduct tokens from the source account if it has sufficient funds and the contract isn't
/// pending
fn apply_debits_to_budget_state(
tx: &Transaction,
accounts: &mut [Account],
instruction: &Instruction,
) -> Result<(), BudgetError> {
{
// if the source account userdata is not empty, this is a pending contract
if !accounts[0].userdata.is_empty() {
trace!("source is pending");
return Err(BudgetError::SourceIsPendingContract(tx.keys[0]));
}
if let Instruction::NewContract(contract) = &instruction {
if contract.tokens < 0 {
trace!("negative tokens");
return Err(BudgetError::NegativeTokens);
}
if accounts[0].tokens < contract.tokens {
trace!("insufficient funds");
return Err(BudgetError::InsufficientFunds(tx.keys[0]));
} else {
accounts[0].tokens -= contract.tokens;
}
};
}
Ok(())
}
/// Apply only a transaction's credits.
/// Note: It is safe to apply credits from multiple transactions in parallel.
fn apply_credits_to_budget_state(
tx: &Transaction,
accounts: &mut [Account],
instruction: &Instruction,
) -> Result<(), BudgetError> {
match instruction {
Instruction::NewContract(contract) => {
let budget = contract.budget.clone();
if let Some(payment) = budget.final_payment() {
accounts[1].tokens += payment.tokens;
Ok(())
} else {
let existing = Self::deserialize(&accounts[1].userdata).ok();
if Some(true) == existing.map(|x| x.initialized) {
trace!("contract already exists");
Err(BudgetError::ContractAlreadyExists(tx.keys[1]))
} else {
let mut state = BudgetState::default();
state.pending_budget = Some(budget);
accounts[1].tokens += contract.tokens;
state.initialized = true;
state.serialize(&mut accounts[1].userdata)
}
}
}
Instruction::ApplyTimestamp(dt) => {
if let Ok(mut state) = Self::deserialize(&accounts[1].userdata) {
if !state.is_pending() {
Err(BudgetError::ContractNotPending(tx.keys[1]))
} else if !state.initialized {
trace!("contract is uninitialized");
Err(BudgetError::UninitializedContract(tx.keys[1]))
} else {
trace!("apply timestamp");
state.apply_timestamp(&tx.keys, accounts, *dt)?;
trace!("apply timestamp committed");
state.serialize(&mut accounts[1].userdata)
}
} else {
Err(BudgetError::UninitializedContract(tx.keys[1]))
}
}
Instruction::ApplySignature => {
if let Ok(mut state) = Self::deserialize(&accounts[1].userdata) {
if !state.is_pending() {
Err(BudgetError::ContractNotPending(tx.keys[1]))
} else if !state.initialized {
trace!("contract is uninitialized");
Err(BudgetError::UninitializedContract(tx.keys[1]))
} else {
trace!("apply signature");
state.apply_signature(&tx.keys, accounts)?;
trace!("apply signature committed");
state.serialize(&mut accounts[1].userdata)
}
} else {
Err(BudgetError::UninitializedContract(tx.keys[1]))
}
}
Instruction::NewVote(_vote) => {
// TODO: move vote instruction into a different contract
trace!("GOT VOTE! last_id={}", tx.last_id);
Ok(())
}
}
}
fn serialize(&self, output: &mut [u8]) -> Result<(), BudgetError> {
let len = serialized_size(self).unwrap() as u64;
if output.len() < len as usize {
warn!(
"{} bytes required to serialize, only have {} bytes",
len,
output.len()
);
return Err(BudgetError::UserdataTooSmall);
}
{
let writer = io::BufWriter::new(&mut output[..8]);
serialize_into(writer, &len).unwrap();
}
{
let writer = io::BufWriter::new(&mut output[8..8 + len as usize]);
serialize_into(writer, self).unwrap();
}
Ok(())
}
pub fn deserialize(input: &[u8]) -> bincode::Result<Self> {
if input.len() < 8 {
return Err(Box::new(bincode::ErrorKind::SizeLimit));
}
let len: u64 = deserialize(&input[..8]).unwrap();
if len < 2 {
return Err(Box::new(bincode::ErrorKind::SizeLimit));
}
if input.len() < 8 + len as usize {
return Err(Box::new(bincode::ErrorKind::SizeLimit));
}
deserialize(&input[8..8 + len as usize])
}
/// Budget DSL contract interface
/// * tx - the transaction
/// * accounts[0] - The source of the tokens
/// * accounts[1] - The contract context. Once the contract has been completed, the tokens can
/// be spent from this account .
pub fn process_transaction(
tx: &Transaction,
accounts: &mut [Account],
) -> Result<(), BudgetError> {
if let Ok(instruction) = deserialize(&tx.userdata) {
trace!("process_transaction: {:?}", instruction);
Self::apply_debits_to_budget_state(tx, accounts, &instruction)
.and_then(|_| Self::apply_credits_to_budget_state(tx, accounts, &instruction))
} else {
info!("Invalid transaction userdata: {:?}", tx.userdata);
Err(BudgetError::UserdataDeserializeFailure)
}
}
//TODO the contract needs to provide a "get_balance" introspection call of the userdata
pub fn get_balance(account: &Account) -> i64 {
if let Ok(state) = deserialize(&account.userdata) {
let state: BudgetState = state;
if state.is_pending() {
0
} else {
account.tokens
}
} else {
account.tokens
}
}
}
#[cfg(test)]
mod test {
use bincode::serialize;
use budget_program::{BudgetError, BudgetState};
use budget_transaction::BudgetTransaction;
use chrono::prelude::{DateTime, NaiveDate, Utc};
use hash::Hash;
use signature::{GenKeys, Keypair, KeypairUtil};
use solana_program_interface::account::Account;
use solana_program_interface::pubkey::Pubkey;
use transaction::Transaction;
#[test]
fn test_serializer() {
let mut a = Account::new(0, 512, BudgetState::id());
let b = BudgetState::default();
b.serialize(&mut a.userdata).unwrap();
let buf = serialize(&b).unwrap();
assert_eq!(a.userdata[8..8 + buf.len()], buf[0..]);
let c = BudgetState::deserialize(&a.userdata).unwrap();
assert_eq!(b, c);
}
#[test]
fn test_serializer_userdata_too_small() {
let mut a = Account::new(0, 1, BudgetState::id());
let b = BudgetState::default();
assert_eq!(
b.serialize(&mut a.userdata),
Err(BudgetError::UserdataTooSmall)
);
}
#[test]
fn test_invalid_instruction() {
let mut accounts = vec![
Account::new(1, 0, BudgetState::id()),
Account::new(0, 512, BudgetState::id()),
];
let from = Keypair::new();
let contract = Keypair::new();
let tx = Transaction::new(
&from,
&[contract.pubkey()],
BudgetState::id(),
vec![1, 2, 3], // <== garbage instruction
Hash::default(),
0,
);
assert!(BudgetState::process_transaction(&tx, &mut accounts).is_err());
}
#[test]
fn test_transfer_on_date() {
let mut accounts = vec![
Account::new(1, 0, BudgetState::id()),
Account::new(0, 512, BudgetState::id()),
Account::new(0, 0, BudgetState::id()),
];
let from_account = 0;
let contract_account = 1;
let to_account = 2;
let from = Keypair::new();
let contract = Keypair::new();
let to = Keypair::new();
let rando = Keypair::new();
let dt = Utc::now();
let tx = Transaction::budget_new_on_date(
&from,
to.pubkey(),
contract.pubkey(),
dt,
from.pubkey(),
None,
1,
Hash::default(),
);
BudgetState::process_transaction(&tx, &mut accounts).unwrap();
assert_eq!(accounts[from_account].tokens, 0);
assert_eq!(accounts[contract_account].tokens, 1);
let state = BudgetState::deserialize(&accounts[contract_account].userdata).unwrap();
assert!(state.is_pending());
// Attack! Try to payout to a rando key
let tx = Transaction::budget_new_timestamp(
&from,
contract.pubkey(),
rando.pubkey(),
dt,
Hash::default(),
);
assert_eq!(
BudgetState::process_transaction(&tx, &mut accounts),
Err(BudgetError::DestinationMissing(to.pubkey()))
);
assert_eq!(accounts[from_account].tokens, 0);
assert_eq!(accounts[contract_account].tokens, 1);
assert_eq!(accounts[to_account].tokens, 0);
let state = BudgetState::deserialize(&accounts[contract_account].userdata).unwrap();
assert!(state.is_pending());
// Now, acknowledge the time in the condition occurred and
// that pubkey's funds are now available.
let tx = Transaction::budget_new_timestamp(
&from,
contract.pubkey(),
to.pubkey(),
dt,
Hash::default(),
);
BudgetState::process_transaction(&tx, &mut accounts).unwrap();
assert_eq!(accounts[from_account].tokens, 0);
assert_eq!(accounts[contract_account].tokens, 0);
assert_eq!(accounts[to_account].tokens, 1);
let state = BudgetState::deserialize(&accounts[contract_account].userdata).unwrap();
assert!(!state.is_pending());
// try to replay the timestamp contract
assert_eq!(
BudgetState::process_transaction(&tx, &mut accounts),
Err(BudgetError::ContractNotPending(contract.pubkey()))
);
assert_eq!(accounts[from_account].tokens, 0);
assert_eq!(accounts[contract_account].tokens, 0);
assert_eq!(accounts[to_account].tokens, 1);
}
#[test]
fn test_cancel_transfer() {
let mut accounts = vec![
Account::new(1, 0, BudgetState::id()),
Account::new(0, 512, BudgetState::id()),
Account::new(0, 0, BudgetState::id()),
];
let from_account = 0;
let contract_account = 1;
let pay_account = 2;
let from = Keypair::new();
let contract = Keypair::new();
let to = Keypair::new();
let dt = Utc::now();
let tx = Transaction::budget_new_on_date(
&from,
to.pubkey(),
contract.pubkey(),
dt,
from.pubkey(),
Some(from.pubkey()),
1,
Hash::default(),
);
BudgetState::process_transaction(&tx, &mut accounts).unwrap();
assert_eq!(accounts[from_account].tokens, 0);
assert_eq!(accounts[contract_account].tokens, 1);
let state = BudgetState::deserialize(&accounts[contract_account].userdata).unwrap();
assert!(state.is_pending());
// Attack! try to put the tokens into the wrong account with cancel
let tx =
Transaction::budget_new_signature(&to, contract.pubkey(), to.pubkey(), Hash::default());
// unit test hack, the `from account` is passed instead of the `to` account to avoid
// creating more account vectors
BudgetState::process_transaction(&tx, &mut accounts).unwrap();
// nothing should be changed because apply witness didn't finalize a payment
assert_eq!(accounts[from_account].tokens, 0);
assert_eq!(accounts[contract_account].tokens, 1);
// this would be the `to.pubkey()` account
assert_eq!(accounts[pay_account].tokens, 0);
// Now, cancel the transaction. from gets her funds back
let tx = Transaction::budget_new_signature(
&from,
contract.pubkey(),
from.pubkey(),
Hash::default(),
);
BudgetState::process_transaction(&tx, &mut accounts).unwrap();
assert_eq!(accounts[from_account].tokens, 0);
assert_eq!(accounts[contract_account].tokens, 0);
assert_eq!(accounts[pay_account].tokens, 1);
// try to replay the signature contract
let tx = Transaction::budget_new_signature(
&from,
contract.pubkey(),
from.pubkey(),
Hash::default(),
);
assert_eq!(
BudgetState::process_transaction(&tx, &mut accounts),
Err(BudgetError::ContractNotPending(contract.pubkey()))
);
assert_eq!(accounts[from_account].tokens, 0);
assert_eq!(accounts[contract_account].tokens, 0);
assert_eq!(accounts[pay_account].tokens, 1);
}
#[test]
fn test_userdata_too_small() {
let mut accounts = vec![
Account::new(1, 0, BudgetState::id()),
Account::new(1, 0, BudgetState::id()), // <== userdata is 0, which is not enough
Account::new(1, 0, BudgetState::id()),
];
let from = Keypair::new();
let contract = Keypair::new();
let to = Keypair::new();
let tx = Transaction::budget_new_on_date(
&from,
to.pubkey(),
contract.pubkey(),
Utc::now(),
from.pubkey(),
None,
1,
Hash::default(),
);
assert!(BudgetState::process_transaction(&tx, &mut accounts).is_err());
assert!(BudgetState::deserialize(&accounts[1].userdata).is_err());
let tx = Transaction::budget_new_timestamp(
&from,
contract.pubkey(),
to.pubkey(),
Utc::now(),
Hash::default(),
);
assert!(BudgetState::process_transaction(&tx, &mut accounts).is_err());
assert!(BudgetState::deserialize(&accounts[1].userdata).is_err());
// Success if there was no panic...
}
/// Detect binary changes in the serialized contract userdata, which could have a downstream
/// affect on SDKs and DApps
#[test]
fn test_sdk_serialize() {
let keypair = &GenKeys::new([0u8; 32]).gen_n_keypairs(1)[0];
let to = Pubkey::new(&[
1, 1, 1, 4, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 8, 7, 6, 5, 4,
1, 1, 1,
]);
let contract = Pubkey::new(&[
2, 2, 2, 4, 5, 6, 7, 8, 9, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 9, 8, 7, 6, 5, 4,
2, 2, 2,
]);
let date =
DateTime::<Utc>::from_utc(NaiveDate::from_ymd(2016, 7, 8).and_hms(9, 10, 11), Utc);
let date_iso8601 = "2016-07-08T09:10:11Z";
let tx = Transaction::budget_new(&keypair, to, 192, Hash::default());
assert_eq!(
tx.userdata,
vec![
0, 0, 0, 0, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 192, 0, 0, 0, 0, 0, 0, 0, 1, 1,
1, 4, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 8, 7, 6, 5, 4, 1,
1, 1
]
);
let tx = Transaction::budget_new_on_date(
&keypair,
to,
contract,
date,
keypair.pubkey(),
Some(keypair.pubkey()),
192,
Hash::default(),
);
assert_eq!(
tx.userdata,
vec![
0, 0, 0, 0, 192, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 20, 0, 0, 0, 0, 0, 0,
0, 50, 48, 49, 54, 45, 48, 55, 45, 48, 56, 84, 48, 57, 58, 49, 48, 58, 49, 49, 90,
32, 253, 186, 201, 177, 11, 117, 135, 187, 167, 181, 188, 22, 59, 206, 105, 231,
150, 215, 30, 78, 212, 76, 16, 252, 180, 72, 134, 137, 247, 161, 68, 192, 0, 0, 0,
0, 0, 0, 0, 1, 1, 1, 4, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9,
8, 7, 6, 5, 4, 1, 1, 1, 1, 0, 0, 0, 32, 253, 186, 201, 177, 11, 117, 135, 187, 167,
181, 188, 22, 59, 206, 105, 231, 150, 215, 30, 78, 212, 76, 16, 252, 180, 72, 134,
137, 247, 161, 68, 192, 0, 0, 0, 0, 0, 0, 0, 32, 253, 186, 201, 177, 11, 117, 135,
187, 167, 181, 188, 22, 59, 206, 105, 231, 150, 215, 30, 78, 212, 76, 16, 252, 180,
72, 134, 137, 247, 161, 68
]
);
// ApplyTimestamp(date)
let tx = Transaction::budget_new_timestamp(
&keypair,
keypair.pubkey(),
to,
date,
Hash::default(),
);
let mut expected_userdata = vec![1, 0, 0, 0, 20, 0, 0, 0, 0, 0, 0, 0];
expected_userdata.extend(date_iso8601.as_bytes());
assert_eq!(tx.userdata, expected_userdata);
// ApplySignature
let tx = Transaction::budget_new_signature(&keypair, keypair.pubkey(), to, Hash::default());
assert_eq!(tx.userdata, vec![2, 0, 0, 0]);
}
}

347
src/budget_transaction.rs Normal file
View File

@ -0,0 +1,347 @@
//! The `budget_transaction` module provides functionality for creating Budget transactions.
use bincode::{deserialize, serialize};
use budget::{Budget, Condition};
use budget_instruction::{Contract, Instruction, Vote};
use budget_program::BudgetState;
use chrono::prelude::*;
use hash::Hash;
use payment_plan::Payment;
use signature::Keypair;
use solana_program_interface::pubkey::Pubkey;
use transaction::Transaction;
pub trait BudgetTransaction {
fn budget_new_taxed(
from_keypair: &Keypair,
to: Pubkey,
tokens: i64,
fee: i64,
last_id: Hash,
) -> Self;
fn budget_new(from_keypair: &Keypair, to: Pubkey, tokens: i64, last_id: Hash) -> Self;
fn budget_new_timestamp(
from_keypair: &Keypair,
contract: Pubkey,
to: Pubkey,
dt: DateTime<Utc>,
last_id: Hash,
) -> Self;
fn budget_new_signature(
from_keypair: &Keypair,
contract: Pubkey,
to: Pubkey,
last_id: Hash,
) -> Self;
fn budget_new_vote(from_keypair: &Keypair, vote: Vote, last_id: Hash, fee: i64) -> Self;
fn budget_new_on_date(
from_keypair: &Keypair,
to: Pubkey,
contract: Pubkey,
dt: DateTime<Utc>,
dt_pubkey: Pubkey,
cancelable: Option<Pubkey>,
tokens: i64,
last_id: Hash,
) -> Self;
fn budget_new_when_signed(
from_keypair: &Keypair,
to: Pubkey,
contract: Pubkey,
witness: Pubkey,
cancelable: Option<Pubkey>,
tokens: i64,
last_id: Hash,
) -> Self;
fn vote(&self) -> Option<(Pubkey, Vote, Hash)>;
fn instruction(&self) -> Option<Instruction>;
fn verify_plan(&self) -> bool;
}
impl BudgetTransaction for Transaction {
/// Create and sign a new Transaction. Used for unit-testing.
fn budget_new_taxed(
from_keypair: &Keypair,
to: Pubkey,
tokens: i64,
fee: i64,
last_id: Hash,
) -> Self {
let payment = Payment {
tokens: tokens - fee,
to,
};
let budget = Budget::Pay(payment);
let instruction = Instruction::NewContract(Contract { budget, tokens });
let userdata = serialize(&instruction).unwrap();
Self::new(
from_keypair,
&[to],
BudgetState::id(),
userdata,
last_id,
fee,
)
}
/// Create and sign a new Transaction. Used for unit-testing.
fn budget_new(from_keypair: &Keypair, to: Pubkey, tokens: i64, last_id: Hash) -> Self {
Self::budget_new_taxed(from_keypair, to, tokens, 0, last_id)
}
/// Create and sign a new Witness Timestamp. Used for unit-testing.
fn budget_new_timestamp(
from_keypair: &Keypair,
contract: Pubkey,
to: Pubkey,
dt: DateTime<Utc>,
last_id: Hash,
) -> Self {
let instruction = Instruction::ApplyTimestamp(dt);
let userdata = serialize(&instruction).unwrap();
Self::new(
from_keypair,
&[contract, to],
BudgetState::id(),
userdata,
last_id,
0,
)
}
/// Create and sign a new Witness Signature. Used for unit-testing.
fn budget_new_signature(
from_keypair: &Keypair,
contract: Pubkey,
to: Pubkey,
last_id: Hash,
) -> Self {
let instruction = Instruction::ApplySignature;
let userdata = serialize(&instruction).unwrap();
Self::new(
from_keypair,
&[contract, to],
BudgetState::id(),
userdata,
last_id,
0,
)
}
fn budget_new_vote(from_keypair: &Keypair, vote: Vote, last_id: Hash, fee: i64) -> Self {
let instruction = Instruction::NewVote(vote);
let userdata = serialize(&instruction).expect("serialize instruction");
Self::new(from_keypair, &[], BudgetState::id(), userdata, last_id, fee)
}
/// Create and sign a postdated Transaction. Used for unit-testing.
fn budget_new_on_date(
from_keypair: &Keypair,
to: Pubkey,
contract: Pubkey,
dt: DateTime<Utc>,
dt_pubkey: Pubkey,
cancelable: Option<Pubkey>,
tokens: i64,
last_id: Hash,
) -> Self {
let budget = if let Some(from) = cancelable {
Budget::Or(
(Condition::Timestamp(dt, dt_pubkey), Payment { tokens, to }),
(Condition::Signature(from), Payment { tokens, to: from }),
)
} else {
Budget::After(Condition::Timestamp(dt, dt_pubkey), Payment { tokens, to })
};
let instruction = Instruction::NewContract(Contract { budget, tokens });
let userdata = serialize(&instruction).expect("serialize instruction");
Self::new(
from_keypair,
&[contract],
BudgetState::id(),
userdata,
last_id,
0,
)
}
/// Create and sign a multisig Transaction.
fn budget_new_when_signed(
from_keypair: &Keypair,
to: Pubkey,
contract: Pubkey,
witness: Pubkey,
cancelable: Option<Pubkey>,
tokens: i64,
last_id: Hash,
) -> Self {
let budget = if let Some(from) = cancelable {
Budget::Or(
(Condition::Signature(witness), Payment { tokens, to }),
(Condition::Signature(from), Payment { tokens, to: from }),
)
} else {
Budget::After(Condition::Signature(witness), Payment { tokens, to })
};
let instruction = Instruction::NewContract(Contract { budget, tokens });
let userdata = serialize(&instruction).expect("serialize instruction");
Self::new(
from_keypair,
&[contract],
BudgetState::id(),
userdata,
last_id,
0,
)
}
fn vote(&self) -> Option<(Pubkey, Vote, Hash)> {
if let Some(Instruction::NewVote(vote)) = self.instruction() {
Some((*self.from(), vote, self.last_id))
} else {
None
}
}
fn instruction(&self) -> Option<Instruction> {
deserialize(&self.userdata).ok()
}
/// Verify only the payment plan.
fn verify_plan(&self) -> bool {
if let Some(Instruction::NewContract(contract)) = self.instruction() {
self.fee >= 0
&& self.fee <= contract.tokens
&& contract.budget.verify(contract.tokens - self.fee)
} else {
true
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use bincode::{deserialize, serialize};
use signature::KeypairUtil;
#[test]
fn test_claim() {
let keypair = Keypair::new();
let zero = Hash::default();
let tx0 = Transaction::budget_new(&keypair, keypair.pubkey(), 42, zero);
assert!(tx0.verify_plan());
}
#[test]
fn test_transfer() {
let zero = Hash::default();
let keypair0 = Keypair::new();
let keypair1 = Keypair::new();
let pubkey1 = keypair1.pubkey();
let tx0 = Transaction::budget_new(&keypair0, pubkey1, 42, zero);
assert!(tx0.verify_plan());
}
#[test]
fn test_transfer_with_fee() {
let zero = Hash::default();
let keypair0 = Keypair::new();
let pubkey1 = Keypair::new().pubkey();
assert!(Transaction::budget_new_taxed(&keypair0, pubkey1, 1, 1, zero).verify_plan());
assert!(!Transaction::budget_new_taxed(&keypair0, pubkey1, 1, 2, zero).verify_plan());
assert!(!Transaction::budget_new_taxed(&keypair0, pubkey1, 1, -1, zero).verify_plan());
}
#[test]
fn test_serialize_claim() {
let budget = Budget::Pay(Payment {
tokens: 0,
to: Default::default(),
});
let instruction = Instruction::NewContract(Contract { budget, tokens: 0 });
let userdata = serialize(&instruction).unwrap();
let claim0 = Transaction {
keys: vec![],
last_id: Default::default(),
signature: Default::default(),
program_id: Default::default(),
fee: 0,
userdata,
};
let buf = serialize(&claim0).unwrap();
let claim1: Transaction = deserialize(&buf).unwrap();
assert_eq!(claim1, claim0);
}
#[test]
fn test_token_attack() {
let zero = Hash::default();
let keypair = Keypair::new();
let pubkey = keypair.pubkey();
let mut tx = Transaction::budget_new(&keypair, pubkey, 42, zero);
let mut instruction = tx.instruction().unwrap();
if let Instruction::NewContract(ref mut contract) = instruction {
contract.tokens = 1_000_000; // <-- attack, part 1!
if let Budget::Pay(ref mut payment) = contract.budget {
payment.tokens = contract.tokens; // <-- attack, part 2!
}
}
tx.userdata = serialize(&instruction).unwrap();
assert!(tx.verify_plan());
assert!(!tx.verify_signature());
}
#[test]
fn test_hijack_attack() {
let keypair0 = Keypair::new();
let keypair1 = Keypair::new();
let thief_keypair = Keypair::new();
let pubkey1 = keypair1.pubkey();
let zero = Hash::default();
let mut tx = Transaction::budget_new(&keypair0, pubkey1, 42, zero);
let mut instruction = tx.instruction();
if let Some(Instruction::NewContract(ref mut contract)) = instruction {
if let Budget::Pay(ref mut payment) = contract.budget {
payment.to = thief_keypair.pubkey(); // <-- attack!
}
}
tx.userdata = serialize(&instruction).unwrap();
assert!(tx.verify_plan());
assert!(!tx.verify_signature());
}
#[test]
fn test_overspend_attack() {
let keypair0 = Keypair::new();
let keypair1 = Keypair::new();
let zero = Hash::default();
let mut tx = Transaction::budget_new(&keypair0, keypair1.pubkey(), 1, zero);
let mut instruction = tx.instruction().unwrap();
if let Instruction::NewContract(ref mut contract) = instruction {
if let Budget::Pay(ref mut payment) = contract.budget {
payment.tokens = 2; // <-- attack!
}
}
tx.userdata = serialize(&instruction).unwrap();
assert!(!tx.verify_plan());
// Also, ensure all branchs of the plan spend all tokens
let mut instruction = tx.instruction().unwrap();
if let Instruction::NewContract(ref mut contract) = instruction {
if let Budget::Pay(ref mut payment) = contract.budget {
payment.tokens = 0; // <-- whoops!
}
}
tx.userdata = serialize(&instruction).unwrap();
assert!(!tx.verify_plan());
}
}

View File

@ -2,7 +2,7 @@ use crdt::{CrdtError, NodeInfo};
use rand::distributions::{Distribution, Weighted, WeightedChoice};
use rand::thread_rng;
use result::Result;
use signature::Pubkey;
use solana_program_interface::pubkey::Pubkey;
use std;
use std::collections::HashMap;
@ -192,7 +192,8 @@ impl<'a> ChooseGossipPeerStrategy for ChooseWeightedPeerStrategy<'a> {
mod tests {
use choose_gossip_peer_strategy::{ChooseWeightedPeerStrategy, DEFAULT_WEIGHT};
use logger;
use signature::{Keypair, KeypairUtil, Pubkey};
use signature::{Keypair, KeypairUtil};
use solana_program_interface::pubkey::Pubkey;
use std;
use std::collections::HashMap;

View File

@ -86,8 +86,7 @@ impl Counter {
.add_field(
"count",
influxdb::Value::Integer(counts as i64 - lastlog as i64),
)
.to_owned(),
).to_owned(),
);
self.lastlog
.compare_and_swap(lastlog, counts, Ordering::Relaxed);

View File

@ -13,17 +13,19 @@
//!
//! Bank needs to provide an interface for us to query the stake weight
use bincode::{deserialize, serialize};
use budget_instruction::Vote;
use choose_gossip_peer_strategy::{ChooseGossipPeerStrategy, ChooseWeightedPeerStrategy};
use counter::Counter;
use hash::Hash;
use ledger::LedgerWindow;
use log::Level;
use netutil::{bind_in_range, bind_to, multi_bind_in_range};
use packet::{to_blob, Blob, BlobRecycler, SharedBlob, BLOB_SIZE};
use packet::{to_blob, Blob, SharedBlob, BLOB_SIZE};
use rand::{thread_rng, Rng};
use rayon::prelude::*;
use result::{Error, Result};
use signature::{Keypair, KeypairUtil, Pubkey};
use signature::{Keypair, KeypairUtil};
use solana_program_interface::pubkey::Pubkey;
use std;
use std::collections::HashMap;
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
@ -33,10 +35,10 @@ use std::thread::{sleep, Builder, JoinHandle};
use std::time::{Duration, Instant};
use streamer::{BlobReceiver, BlobSender};
use timing::{duration_as_ms, timestamp};
use transaction::Vote;
use window::{SharedWindow, WindowIndex};
pub const FULLNODE_PORT_RANGE: (u16, u16) = (8000, 10_000);
/// milliseconds we sleep for between gossip requests
const GOSSIP_SLEEP_MILLIS: u64 = 100;
const GOSSIP_PURGE_MILLIS: u64 = 15000;
@ -81,6 +83,8 @@ pub struct ContactInfo {
pub rpu: SocketAddr,
/// transactions address
pub tpu: SocketAddr,
/// storage data address
pub storage_addr: SocketAddr,
/// if this struture changes update this value as well
/// Always update `NodeInfo` version too
/// This separate version for addresses allows us to use the `Vote`
@ -115,6 +119,7 @@ impl NodeInfo {
tvu: SocketAddr,
rpu: SocketAddr,
tpu: SocketAddr,
storage_addr: SocketAddr,
) -> Self {
NodeInfo {
id,
@ -124,6 +129,7 @@ impl NodeInfo {
tvu,
rpu,
tpu,
storage_addr,
version: 0,
},
leader_id: Pubkey::default(),
@ -133,19 +139,30 @@ impl NodeInfo {
}
}
pub fn new_localhost(id: Pubkey) -> Self {
Self::new(
id,
socketaddr!("127.0.0.1:1234"),
socketaddr!("127.0.0.1:1235"),
socketaddr!("127.0.0.1:1236"),
socketaddr!("127.0.0.1:1237"),
socketaddr!("127.0.0.1:1238"),
)
}
#[cfg(test)]
/// NodeInfo with unspecified addresses for adversarial testing.
pub fn new_unspecified() -> Self {
let addr = socketaddr!(0, 0);
assert!(addr.ip().is_unspecified());
Self::new(Keypair::new().pubkey(), addr, addr, addr, addr)
Self::new(Keypair::new().pubkey(), addr, addr, addr, addr, addr)
}
#[cfg(test)]
/// NodeInfo with multicast addresses for adversarial testing.
pub fn new_multicast() -> Self {
let addr = socketaddr!("224.0.1.255:1000");
assert!(addr.ip().is_multicast());
Self::new(Keypair::new().pubkey(), addr, addr, addr, addr)
Self::new(Keypair::new().pubkey(), addr, addr, addr, addr, addr)
}
fn next_port(addr: &SocketAddr, nxt: u16) -> SocketAddr {
let mut nxt_addr = *addr;
@ -163,6 +180,7 @@ impl NodeInfo {
replicate_addr,
requests_addr,
transactions_addr,
"0.0.0.0:0".parse().unwrap(),
)
}
pub fn new_with_socketaddr(bind_addr: &SocketAddr) -> Self {
@ -172,7 +190,7 @@ impl NodeInfo {
//
pub fn new_entry_point(gossip_addr: &SocketAddr) -> Self {
let daddr: SocketAddr = socketaddr!("0.0.0.0:0");
NodeInfo::new(Pubkey::default(), *gossip_addr, daddr, daddr, daddr)
NodeInfo::new(Pubkey::default(), *gossip_addr, daddr, daddr, daddr, daddr)
}
}
@ -205,7 +223,14 @@ pub struct Crdt {
/// last time we heard from anyone getting a message fro this public key
/// these are rumers and shouldn't be trusted directly
external_liveness: HashMap<Pubkey, HashMap<Pubkey, u64>>,
/// TODO: Clearly not the correct implementation of this, but a temporary abstraction
/// for testing
pub scheduled_leaders: HashMap<u64, Pubkey>,
// TODO: Is there a better way to do this? We didn't make this a constant because
// we want to be able to set it in integration tests so that the tests don't time out.
pub leader_rotation_interval: u64,
}
// TODO These messages should be signed, and go through the gpu pipeline for spam filtering
#[derive(Serialize, Deserialize, Debug)]
enum Protocol {
@ -235,6 +260,8 @@ impl Crdt {
external_liveness: HashMap::new(),
id: node_info.id,
update_index: 1,
scheduled_leaders: HashMap::new(),
leader_rotation_interval: 100,
};
me.local.insert(node_info.id, me.update_index);
me.table.insert(node_info.id, node_info);
@ -277,8 +304,7 @@ impl Crdt {
node.contact_info.rpu.to_string(),
node.contact_info.tpu.to_string()
)
})
.collect();
}).collect();
format!(
" NodeInfo.contact_info | Node identifier\n\
@ -298,6 +324,38 @@ impl Crdt {
self.insert(&me);
}
// TODO: Dummy leader scheduler, need to implement actual leader scheduling.
pub fn get_scheduled_leader(&self, entry_height: u64) -> Option<Pubkey> {
match self.scheduled_leaders.get(&entry_height) {
Some(x) => Some(*x),
None => Some(self.my_data().leader_id),
}
}
pub fn set_leader_rotation_interval(&mut self, leader_rotation_interval: u64) {
self.leader_rotation_interval = leader_rotation_interval;
}
pub fn get_leader_rotation_interval(&self) -> u64 {
self.leader_rotation_interval
}
// TODO: Dummy leader schedule setter, need to implement actual leader scheduling.
pub fn set_scheduled_leader(&mut self, entry_height: u64, new_leader_id: Pubkey) -> () {
self.scheduled_leaders.insert(entry_height, new_leader_id);
}
pub fn get_valid_peers(&self) -> Vec<NodeInfo> {
let me = self.my_data().id;
self.table
.values()
.into_iter()
.filter(|x| x.id != me)
.filter(|x| Crdt::is_valid_address(&x.contact_info.rpu))
.cloned()
.collect()
}
pub fn get_external_liveness_entry(&self, key: &Pubkey) -> Option<&HashMap<Pubkey, u64>> {
self.external_liveness.get(key)
}
@ -402,8 +460,7 @@ impl Crdt {
trace!("{} purge skipped {} {} {}", self.id, k, now - v, limit);
None
}
})
.collect();
}).collect();
inc_new_counter_info!("crdt-purge-count", dead_ids.len());
@ -450,8 +507,7 @@ impl Crdt {
trace!("{}:broadcast node {} {}", me.id, v.id, v.contact_info.tvu);
true
}
})
.cloned()
}).cloned()
.collect();
cloned_table
}
@ -460,6 +516,8 @@ impl Crdt {
/// # Remarks
/// We need to avoid having obj locked while doing any io, such as the `send_to`
pub fn broadcast(
crdt: &Arc<RwLock<Crdt>>,
leader_rotation_interval: u64,
me: &NodeInfo,
broadcast_table: &[NodeInfo],
window: &SharedWindow,
@ -468,7 +526,7 @@ impl Crdt {
received_index: u64,
) -> Result<()> {
if broadcast_table.is_empty() {
warn!("{}:not enough peers in crdt table", me.id);
debug!("{}:not enough peers in crdt table", me.id);
inc_new_counter_info!("crdt-broadcast-not_enough_peers_error", 1);
Err(CrdtError::NoPeers)?;
}
@ -483,8 +541,10 @@ impl Crdt {
let old_transmit_index = transmit_index.data;
// enumerate all the blobs in the window, those are the indices
// transmit them to nodes, starting from a different node
let mut orders = Vec::with_capacity((received_index - transmit_index.data) as usize);
// transmit them to nodes, starting from a different node. Add one
// to the capacity in case we want to send an extra blob notifying the
// next leader about the blob right before leader rotation
let mut orders = Vec::with_capacity((received_index - transmit_index.data + 1) as usize);
let window_l = window.read().unwrap();
let mut br_idx = transmit_index.data as usize % broadcast_table.len();
@ -499,6 +559,21 @@ impl Crdt {
br_idx
);
// Make sure the next leader in line knows about the last entry before rotation
// so he can initiate repairs if necessary
let entry_height = idx + 1;
if entry_height % leader_rotation_interval == 0 {
let next_leader_id = crdt.read().unwrap().get_scheduled_leader(entry_height);
if next_leader_id.is_some() && next_leader_id != Some(me.id) {
let info_result = broadcast_table
.iter()
.position(|n| n.id == next_leader_id.unwrap());
if let Some(index) = info_result {
orders.push((window_l[w_idx].data.clone(), &broadcast_table[index]));
}
}
}
orders.push((window_l[w_idx].data.clone(), &broadcast_table[br_idx]));
br_idx += 1;
br_idx %= broadcast_table.len();
@ -531,7 +606,7 @@ impl Crdt {
// only leader should be broadcasting
assert!(me.leader_id != v.id);
let bl = b.unwrap();
let blob = bl.read().expect("blob read lock in streamer::broadcast");
let blob = bl.read().unwrap();
//TODO profile this, may need multiple sockets for par_iter
trace!(
"{}: BROADCAST idx: {} sz: {} to {},{} coding: {}",
@ -552,8 +627,7 @@ impl Crdt {
v.contact_info.tvu
);
e
})
.collect();
}).collect();
trace!("broadcast results {}", errs.len());
for e in errs {
@ -607,8 +681,7 @@ impl Crdt {
} else {
true
}
})
.collect();
}).collect();
trace!("retransmit orders {}", orders.len());
let errs: Vec<_> = orders
.par_iter()
@ -623,8 +696,7 @@ impl Crdt {
//TODO profile this, may need multiple sockets for par_iter
assert!(rblob.meta.size <= BLOB_SIZE);
s.send_to(&rblob.data[..rblob.meta.size], &v.contact_info.tvu)
})
.collect();
}).collect();
for e in errs {
if let Err(e) = &e {
inc_new_counter_info!("crdt-retransmit-send_to_error", 1, 1);
@ -666,8 +738,7 @@ impl Crdt {
r.id != Pubkey::default()
&& (Self::is_valid_address(&r.contact_info.tpu)
|| Self::is_valid_address(&r.contact_info.tvu))
})
.map(|x| x.ledger_state.last_id)
}).map(|x| x.ledger_state.last_id)
.collect()
}
@ -702,8 +773,7 @@ impl Crdt {
v.id != self.id
&& !v.contact_info.ncp.ip().is_unspecified()
&& !v.contact_info.ncp.ip().is_multicast()
})
.collect();
}).collect();
let choose_peer_strategy = ChooseWeightedPeerStrategy::new(
&self.remote,
@ -745,11 +815,7 @@ impl Crdt {
}
/// At random pick a node and try to get updated changes from them
fn run_gossip(
obj: &Arc<RwLock<Self>>,
blob_sender: &BlobSender,
blob_recycler: &BlobRecycler,
) -> Result<()> {
fn run_gossip(obj: &Arc<RwLock<Self>>, blob_sender: &BlobSender) -> Result<()> {
//TODO we need to keep track of stakes and weight the selection by stake size
//TODO cache sockets
@ -762,7 +828,7 @@ impl Crdt {
// TODO this will get chatty, so we need to first ask for number of updates since
// then only ask for specific data that we dont have
let blob = to_blob(req, remote_gossip_addr, blob_recycler)?;
let blob = to_blob(req, remote_gossip_addr)?;
blob_sender.send(vec![blob])?;
Ok(())
}
@ -846,7 +912,6 @@ impl Crdt {
/// randomly pick a node and ask them for updates asynchronously
pub fn gossip(
obj: Arc<RwLock<Self>>,
blob_recycler: BlobRecycler,
blob_sender: BlobSender,
exit: Arc<AtomicBool>,
) -> JoinHandle<()> {
@ -854,7 +919,7 @@ impl Crdt {
.name("solana-gossip".to_string())
.spawn(move || loop {
let start = timestamp();
let _ = Self::run_gossip(&obj, &blob_sender, &blob_recycler);
let _ = Self::run_gossip(&obj, &blob_sender);
if exit.load(Ordering::Relaxed) {
return;
}
@ -867,8 +932,7 @@ impl Crdt {
let time_left = GOSSIP_SLEEP_MILLIS - elapsed;
sleep(Duration::from_millis(time_left));
}
})
.unwrap()
}).unwrap()
}
fn run_window_request(
from: &NodeInfo,
@ -877,10 +941,9 @@ impl Crdt {
ledger_window: &mut Option<&mut LedgerWindow>,
me: &NodeInfo,
ix: u64,
blob_recycler: &BlobRecycler,
) -> Option<SharedBlob> {
let pos = (ix as usize) % window.read().unwrap().len();
if let Some(blob) = &window.read().unwrap()[pos].data {
if let Some(ref mut blob) = &mut window.write().unwrap()[pos].data {
let mut wblob = blob.write().unwrap();
let blob_ix = wblob.get_index().expect("run_window_request get_index");
if blob_ix == ix {
@ -900,7 +963,7 @@ impl Crdt {
sender_id = me.id
}
let out = blob_recycler.allocate();
let out = SharedBlob::default();
// copy to avoid doing IO inside the lock
{
@ -930,7 +993,6 @@ impl Crdt {
inc_new_counter_info!("crdt-window-request-ledger", 1);
let out = entry.to_blob(
blob_recycler,
Some(ix),
Some(me.id), // causes retransmission if I'm the leader
Some(from_addr),
@ -957,18 +1019,12 @@ impl Crdt {
obj: &Arc<RwLock<Self>>,
window: &SharedWindow,
ledger_window: &mut Option<&mut LedgerWindow>,
blob_recycler: &BlobRecycler,
blob: &Blob,
) -> Option<SharedBlob> {
match deserialize(&blob.data[..blob.meta.size]) {
Ok(request) => Crdt::handle_protocol(
obj,
&blob.meta.addr(),
request,
window,
ledger_window,
blob_recycler,
),
Ok(request) => {
Crdt::handle_protocol(obj, &blob.meta.addr(), request, window, ledger_window)
}
Err(_) => {
warn!("deserialize crdt packet failed");
None
@ -982,7 +1038,6 @@ impl Crdt {
request: Protocol,
window: &SharedWindow,
ledger_window: &mut Option<&mut LedgerWindow>,
blob_recycler: &BlobRecycler,
) -> Option<SharedBlob> {
match request {
// TODO sigverify these
@ -1051,7 +1106,7 @@ impl Crdt {
} else {
let rsp = Protocol::ReceiveUpdates(from_id, ups, data, liveness);
if let Ok(r) = to_blob(rsp, from.contact_info.ncp, &blob_recycler) {
if let Ok(r) = to_blob(rsp, from.contact_info.ncp) {
trace!(
"sending updates me {} len {} to {} {}",
id,
@ -1108,15 +1163,8 @@ impl Crdt {
let me = me.read().unwrap().my_data().clone();
inc_new_counter_info!("crdt-window-request-recv", 1);
trace!("{}: received RequestWindowIndex {} {} ", me.id, from.id, ix,);
let res = Self::run_window_request(
&from,
&from_addr,
&window,
ledger_window,
&me,
ix,
blob_recycler,
);
let res =
Self::run_window_request(&from, &from_addr, &window, ledger_window, &me, ix);
report_time_spent(
"RequestWindowIndex",
&now.elapsed(),
@ -1132,7 +1180,6 @@ impl Crdt {
obj: &Arc<RwLock<Self>>,
window: &SharedWindow,
ledger_window: &mut Option<&mut LedgerWindow>,
blob_recycler: &BlobRecycler,
requests_receiver: &BlobReceiver,
response_sender: &BlobSender,
) -> Result<()> {
@ -1144,16 +1191,10 @@ impl Crdt {
}
let mut resps = Vec::new();
for req in reqs {
if let Some(resp) = Self::handle_blob(
obj,
window,
ledger_window,
blob_recycler,
&req.read().unwrap(),
) {
if let Some(resp) = Self::handle_blob(obj, window, ledger_window, &req.read().unwrap())
{
resps.push(resp);
}
blob_recycler.recycle(req, "run_listen");
}
response_sender.send(resps)?;
Ok(())
@ -1162,7 +1203,6 @@ impl Crdt {
me: Arc<RwLock<Self>>,
window: SharedWindow,
ledger_path: Option<&str>,
blob_recycler: BlobRecycler,
requests_receiver: BlobReceiver,
response_sender: BlobSender,
exit: Arc<AtomicBool>,
@ -1176,7 +1216,6 @@ impl Crdt {
&me,
&window,
&mut ledger_window.as_mut(),
&blob_recycler,
&requests_receiver,
&response_sender,
);
@ -1191,8 +1230,7 @@ impl Crdt {
me.table.len()
);
}
})
.unwrap()
}).unwrap()
}
fn is_valid_ip(addr: IpAddr) -> bool {
@ -1212,15 +1250,16 @@ impl Crdt {
let pubkey = Keypair::new().pubkey();
let daddr = socketaddr_any!();
let node = NodeInfo::new(pubkey, daddr, daddr, daddr, daddr);
let node = NodeInfo::new(pubkey, daddr, daddr, daddr, daddr, daddr);
(node, gossip_socket)
}
}
#[derive(Debug)]
pub struct Sockets {
pub gossip: UdpSocket,
pub requests: UdpSocket,
pub replicate: UdpSocket,
pub replicate: Vec<UdpSocket>,
pub transaction: Vec<UdpSocket>,
pub respond: UdpSocket,
pub broadcast: UdpSocket,
@ -1228,6 +1267,7 @@ pub struct Sockets {
pub retransmit: UdpSocket,
}
#[derive(Debug)]
pub struct Node {
pub info: NodeInfo,
pub sockets: Sockets,
@ -1248,19 +1288,21 @@ impl Node {
let respond = UdpSocket::bind("0.0.0.0:0").unwrap();
let broadcast = UdpSocket::bind("0.0.0.0:0").unwrap();
let retransmit = UdpSocket::bind("0.0.0.0:0").unwrap();
let storage = UdpSocket::bind("0.0.0.0:0").unwrap();
let info = NodeInfo::new(
pubkey,
gossip.local_addr().unwrap(),
replicate.local_addr().unwrap(),
requests.local_addr().unwrap(),
transaction.local_addr().unwrap(),
storage.local_addr().unwrap(),
);
Node {
info,
sockets: Sockets {
gossip,
requests,
replicate,
replicate: vec![replicate],
transaction: vec![transaction],
respond,
broadcast,
@ -1280,15 +1322,18 @@ impl Node {
bind()
};
let (replicate_port, replicate) = bind();
let (replicate_port, replicate_sockets) =
multi_bind_in_range(FULLNODE_PORT_RANGE, 8).expect("tvu multi_bind");
let (requests_port, requests) = bind();
let (transaction_port, transaction_sockets) =
multi_bind_in_range(FULLNODE_PORT_RANGE, 5).expect("tpu multi_bind");
multi_bind_in_range(FULLNODE_PORT_RANGE, 32).expect("tpu multi_bind");
let (_, repair) = bind();
let (_, broadcast) = bind();
let (_, retransmit) = bind();
let (storage_port, _) = bind();
// Responses are sent from the same Udp port as requests are received
// from, in hopes that a NAT sitting in the middle will route the
@ -1301,6 +1346,7 @@ impl Node {
SocketAddr::new(ncp.ip(), replicate_port),
SocketAddr::new(ncp.ip(), requests_port),
SocketAddr::new(ncp.ip(), transaction_port),
SocketAddr::new(ncp.ip(), storage_port),
);
trace!("new NodeInfo: {:?}", info);
@ -1309,7 +1355,7 @@ impl Node {
sockets: Sockets {
gossip,
requests,
replicate,
replicate: replicate_sockets,
transaction: transaction_sockets,
respond,
broadcast,
@ -1329,6 +1375,7 @@ fn report_time_spent(label: &str, time: &Duration, extra: &str) {
#[cfg(test)]
mod tests {
use budget_instruction::Vote;
use crdt::{
Crdt, CrdtError, Node, NodeInfo, Protocol, FULLNODE_PORT_RANGE, GOSSIP_PURGE_MILLIS,
GOSSIP_SLEEP_MILLIS, MIN_TABLE_SIZE,
@ -1337,9 +1384,10 @@ mod tests {
use hash::{hash, Hash};
use ledger::{LedgerWindow, LedgerWriter};
use logger;
use packet::BlobRecycler;
use packet::SharedBlob;
use result::Error;
use signature::{Keypair, KeypairUtil, Pubkey};
use signature::{Keypair, KeypairUtil};
use solana_program_interface::pubkey::Pubkey;
use std::fs::remove_dir_all;
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use std::sync::atomic::{AtomicBool, Ordering};
@ -1347,18 +1395,11 @@ mod tests {
use std::sync::{Arc, RwLock};
use std::thread::sleep;
use std::time::Duration;
use transaction::Vote;
use window::default_window;
#[test]
fn insert_test() {
let mut d = NodeInfo::new(
Keypair::new().pubkey(),
socketaddr!("127.0.0.1:1234"),
socketaddr!("127.0.0.1:1235"),
socketaddr!("127.0.0.1:1236"),
socketaddr!("127.0.0.1:1237"),
);
let mut d = NodeInfo::new_localhost(Keypair::new().pubkey());
assert_eq!(d.version, 0);
let mut crdt = Crdt::new(d.clone()).unwrap();
assert_eq!(crdt.table[&d.id].version, 0);
@ -1458,27 +1499,9 @@ mod tests {
}
#[test]
fn update_test() {
let d1 = NodeInfo::new(
Keypair::new().pubkey(),
socketaddr!("127.0.0.1:1234"),
socketaddr!("127.0.0.1:1235"),
socketaddr!("127.0.0.1:1236"),
socketaddr!("127.0.0.1:1237"),
);
let d2 = NodeInfo::new(
Keypair::new().pubkey(),
socketaddr!("127.0.0.1:1234"),
socketaddr!("127.0.0.1:1235"),
socketaddr!("127.0.0.1:1236"),
socketaddr!("127.0.0.1:1237"),
);
let d3 = NodeInfo::new(
Keypair::new().pubkey(),
socketaddr!("127.0.0.1:1234"),
socketaddr!("127.0.0.1:1235"),
socketaddr!("127.0.0.1:1236"),
socketaddr!("127.0.0.1:1237"),
);
let d1 = NodeInfo::new_localhost(Keypair::new().pubkey());
let d2 = NodeInfo::new_localhost(Keypair::new().pubkey());
let d3 = NodeInfo::new_localhost(Keypair::new().pubkey());
let mut crdt = Crdt::new(d1.clone()).expect("Crdt::new");
let (key, ix, ups) = crdt.get_updates_since(0);
assert_eq!(key, d1.id);
@ -1514,13 +1537,7 @@ mod tests {
}
#[test]
fn window_index_request() {
let me = NodeInfo::new(
Keypair::new().pubkey(),
socketaddr!([127, 0, 0, 1], 1234),
socketaddr!([127, 0, 0, 1], 1235),
socketaddr!([127, 0, 0, 1], 1236),
socketaddr!([127, 0, 0, 1], 1237),
);
let me = NodeInfo::new_localhost(Keypair::new().pubkey());
let mut crdt = Crdt::new(me).expect("Crdt::new");
let rv = crdt.window_index_request(0);
assert_matches!(rv, Err(Error::CrdtError(CrdtError::NoPeers)));
@ -1532,6 +1549,7 @@ mod tests {
socketaddr!([127, 0, 0, 1], 1235),
socketaddr!([127, 0, 0, 1], 1236),
socketaddr!([127, 0, 0, 1], 1237),
socketaddr!([127, 0, 0, 1], 1238),
);
crdt.insert(&nxt);
let rv = crdt.window_index_request(0).unwrap();
@ -1545,6 +1563,7 @@ mod tests {
socketaddr!([127, 0, 0, 1], 1235),
socketaddr!([127, 0, 0, 1], 1236),
socketaddr!([127, 0, 0, 1], 1237),
socketaddr!([127, 0, 0, 1], 1238),
);
crdt.insert(&nxt);
let mut one = false;
@ -1570,6 +1589,7 @@ mod tests {
socketaddr!("127.0.0.1:127"),
socketaddr!("127.0.0.1:127"),
socketaddr!("127.0.0.1:127"),
socketaddr!("127.0.0.1:127"),
);
let mut crdt = Crdt::new(me).expect("Crdt::new");
@ -1588,23 +1608,11 @@ mod tests {
/// test that gossip requests are eventually generated for all nodes
#[test]
fn gossip_request() {
let me = NodeInfo::new(
Keypair::new().pubkey(),
socketaddr!("127.0.0.1:1234"),
socketaddr!("127.0.0.1:1235"),
socketaddr!("127.0.0.1:1236"),
socketaddr!("127.0.0.1:1237"),
);
let me = NodeInfo::new_localhost(Keypair::new().pubkey());
let mut crdt = Crdt::new(me.clone()).expect("Crdt::new");
let rv = crdt.gossip_request();
assert_matches!(rv, Err(Error::CrdtError(CrdtError::NoPeers)));
let nxt1 = NodeInfo::new(
Keypair::new().pubkey(),
socketaddr!("127.0.0.2:1234"),
socketaddr!("127.0.0.1:1235"),
socketaddr!("127.0.0.1:1236"),
socketaddr!("127.0.0.1:1237"),
);
let nxt1 = NodeInfo::new_localhost(Keypair::new().pubkey());
crdt.insert(&nxt1);
@ -1616,10 +1624,9 @@ mod tests {
// check that the service works
// and that it eventually produces a request for both nodes
let (sender, reader) = channel();
let recycler = BlobRecycler::default();
let exit = Arc::new(AtomicBool::new(false));
let obj = Arc::new(RwLock::new(crdt));
let thread = Crdt::gossip(obj, recycler, sender, exit.clone());
let thread = Crdt::gossip(obj, sender, exit.clone());
let mut one = false;
let mut two = false;
for _ in 0..30 {
@ -1727,44 +1734,20 @@ mod tests {
socketaddr!("127.0.0.1:1235"),
socketaddr!("127.0.0.1:1236"),
socketaddr!("127.0.0.1:1237"),
socketaddr!("127.0.0.1:1238"),
);
let recycler = BlobRecycler::default();
let rv = Crdt::run_window_request(
&me,
&socketaddr_any!(),
&window,
&mut None,
&me,
0,
&recycler,
);
let rv = Crdt::run_window_request(&me, &socketaddr_any!(), &window, &mut None, &me, 0);
assert!(rv.is_none());
let out = recycler.allocate();
let out = SharedBlob::default();
out.write().unwrap().meta.size = 200;
window.write().unwrap()[0].data = Some(out);
let rv = Crdt::run_window_request(
&me,
&socketaddr_any!(),
&window,
&mut None,
&me,
0,
&recycler,
);
let rv = Crdt::run_window_request(&me, &socketaddr_any!(), &window, &mut None, &me, 0);
assert!(rv.is_some());
let v = rv.unwrap();
//test we copied the blob
assert_eq!(v.read().unwrap().meta.size, 200);
let len = window.read().unwrap().len() as u64;
let rv = Crdt::run_window_request(
&me,
&socketaddr_any!(),
&window,
&mut None,
&me,
len,
&recycler,
);
let rv = Crdt::run_window_request(&me, &socketaddr_any!(), &window, &mut None, &me, len);
assert!(rv.is_none());
fn tmp_ledger(name: &str) -> String {
@ -1793,7 +1776,6 @@ mod tests {
&mut Some(&mut ledger_window),
&me,
1,
&recycler,
);
assert!(rv.is_some());
@ -1810,20 +1792,11 @@ mod tests {
let mock_peer = NodeInfo::new_with_socketaddr(&socketaddr!("127.0.0.1:1234"));
let recycler = BlobRecycler::default();
// Simulate handling a repair request from mock_peer
let rv = Crdt::run_window_request(
&mock_peer,
&socketaddr_any!(),
&window,
&mut None,
&me,
0,
&recycler,
);
let rv =
Crdt::run_window_request(&mock_peer, &socketaddr_any!(), &window, &mut None, &me, 0);
assert!(rv.is_none());
let blob = recycler.allocate();
let blob = SharedBlob::default();
let blob_size = 200;
blob.write().unwrap().meta.size = blob_size;
window.write().unwrap()[0].data = Some(blob);
@ -1837,7 +1810,6 @@ mod tests {
&mut None,
&me,
0,
&recycler,
).unwrap();
let blob = shared_blob.read().unwrap();
// Test we copied the blob
@ -1895,6 +1867,7 @@ mod tests {
socketaddr_any!(),
socketaddr!("127.0.0.1:1236"),
socketaddr_any!(),
socketaddr_any!(),
);
leader3.ledger_state.last_id = hash(b"3");
let mut crdt = Crdt::new(leader0.clone()).expect("Crdt::new");
@ -1911,7 +1884,6 @@ mod tests {
fn protocol_requestupdate_alive() {
logger::setup();
let window = Arc::new(RwLock::new(default_window()));
let recycler = BlobRecycler::default();
let node = NodeInfo::new_with_socketaddr(&socketaddr!("127.0.0.1:1234"));
let node_with_same_addr = NodeInfo::new_with_socketaddr(&socketaddr!("127.0.0.1:1234"));
@ -1925,37 +1897,18 @@ mod tests {
let request = Protocol::RequestUpdates(1, node.clone());
assert!(
Crdt::handle_protocol(
&obj,
&node.contact_info.ncp,
request,
&window,
&mut None,
&recycler
).is_none()
Crdt::handle_protocol(&obj, &node.contact_info.ncp, request, &window, &mut None,)
.is_none()
);
let request = Protocol::RequestUpdates(1, node_with_same_addr.clone());
assert!(
Crdt::handle_protocol(
&obj,
&node.contact_info.ncp,
request,
&window,
&mut None,
&recycler
).is_none()
Crdt::handle_protocol(&obj, &node.contact_info.ncp, request, &window, &mut None,)
.is_none()
);
let request = Protocol::RequestUpdates(1, node_with_diff_addr.clone());
Crdt::handle_protocol(
&obj,
&node.contact_info.ncp,
request,
&window,
&mut None,
&recycler,
);
Crdt::handle_protocol(&obj, &node.contact_info.ncp, request, &window, &mut None);
let me = obj.write().unwrap();
@ -1984,13 +1937,7 @@ mod tests {
#[test]
fn test_default_leader() {
logger::setup();
let node_info = NodeInfo::new(
Keypair::new().pubkey(),
socketaddr!("127.0.0.1:1234"),
socketaddr!("127.0.0.1:1235"),
socketaddr!("127.0.0.1:1236"),
socketaddr!("127.0.0.1:1237"),
);
let node_info = NodeInfo::new_localhost(Keypair::new().pubkey());
let mut crdt = Crdt::new(node_info).unwrap();
let network_entry_point = NodeInfo::new_entry_point(&socketaddr!("127.0.0.1:1239"));
crdt.insert(&network_entry_point);
@ -2002,7 +1949,10 @@ mod tests {
let ip = Ipv4Addr::from(0);
let node = Node::new_with_external_ip(Keypair::new().pubkey(), &socketaddr!(ip, 0));
assert_eq!(node.sockets.gossip.local_addr().unwrap().ip(), ip);
assert_eq!(node.sockets.replicate.local_addr().unwrap().ip(), ip);
assert!(node.sockets.replicate.len() > 1);
for tx_socket in node.sockets.replicate.iter() {
assert_eq!(tx_socket.local_addr().unwrap().ip(), ip);
}
assert_eq!(node.sockets.requests.local_addr().unwrap().ip(), ip);
assert!(node.sockets.transaction.len() > 1);
for tx_socket in node.sockets.transaction.iter() {
@ -2012,8 +1962,12 @@ mod tests {
assert!(node.sockets.gossip.local_addr().unwrap().port() >= FULLNODE_PORT_RANGE.0);
assert!(node.sockets.gossip.local_addr().unwrap().port() < FULLNODE_PORT_RANGE.1);
assert!(node.sockets.replicate.local_addr().unwrap().port() >= FULLNODE_PORT_RANGE.0);
assert!(node.sockets.replicate.local_addr().unwrap().port() < FULLNODE_PORT_RANGE.1);
let tx_port = node.sockets.replicate[0].local_addr().unwrap().port();
assert!(tx_port >= FULLNODE_PORT_RANGE.0);
assert!(tx_port < FULLNODE_PORT_RANGE.1);
for tx_socket in node.sockets.replicate.iter() {
assert_eq!(tx_socket.local_addr().unwrap().port(), tx_port);
}
assert!(node.sockets.requests.local_addr().unwrap().port() >= FULLNODE_PORT_RANGE.0);
assert!(node.sockets.requests.local_addr().unwrap().port() < FULLNODE_PORT_RANGE.1);
let tx_port = node.sockets.transaction[0].local_addr().unwrap().port();
@ -2031,7 +1985,10 @@ mod tests {
let ip = IpAddr::V4(Ipv4Addr::from(0));
let node = Node::new_with_external_ip(Keypair::new().pubkey(), &socketaddr!(0, 8050));
assert_eq!(node.sockets.gossip.local_addr().unwrap().ip(), ip);
assert_eq!(node.sockets.replicate.local_addr().unwrap().ip(), ip);
assert!(node.sockets.replicate.len() > 1);
for tx_socket in node.sockets.replicate.iter() {
assert_eq!(tx_socket.local_addr().unwrap().ip(), ip);
}
assert_eq!(node.sockets.requests.local_addr().unwrap().ip(), ip);
assert!(node.sockets.transaction.len() > 1);
for tx_socket in node.sockets.transaction.iter() {
@ -2040,8 +1997,12 @@ mod tests {
assert_eq!(node.sockets.repair.local_addr().unwrap().ip(), ip);
assert_eq!(node.sockets.gossip.local_addr().unwrap().port(), 8050);
assert!(node.sockets.replicate.local_addr().unwrap().port() >= FULLNODE_PORT_RANGE.0);
assert!(node.sockets.replicate.local_addr().unwrap().port() < FULLNODE_PORT_RANGE.1);
let tx_port = node.sockets.replicate[0].local_addr().unwrap().port();
assert!(tx_port >= FULLNODE_PORT_RANGE.0);
assert!(tx_port < FULLNODE_PORT_RANGE.1);
for tx_socket in node.sockets.replicate.iter() {
assert_eq!(tx_socket.local_addr().unwrap().port(), tx_port);
}
assert!(node.sockets.requests.local_addr().unwrap().port() >= FULLNODE_PORT_RANGE.0);
assert!(node.sockets.requests.local_addr().unwrap().port() < FULLNODE_PORT_RANGE.1);
let tx_port = node.sockets.transaction[0].local_addr().unwrap().port();

View File

@ -4,15 +4,25 @@
//! checking requests against a request cap for a given time time_slice
//! and (to come) an IP rate limit.
use bincode::{deserialize, serialize};
use bytes::Bytes;
use influx_db_client as influxdb;
use metrics;
use signature::Signature;
use signature::{Keypair, Pubkey};
use signature::{Keypair, Signature};
use solana_program_interface::pubkey::Pubkey;
use std::io;
use std::io::{Error, ErrorKind};
use std::net::{IpAddr, SocketAddr, UdpSocket};
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
use std::sync::mpsc::Sender;
use std::sync::{Arc, Mutex};
use std::thread;
use std::time::Duration;
use system_transaction::SystemTransaction;
use thin_client::{poll_gossip_for_leader, ThinClient};
use tokio;
use tokio::net::TcpListener;
use tokio::prelude::*;
use tokio_codec::{BytesCodec, Decoder};
use transaction::Transaction;
pub const TIME_SLICE: u64 = 60;
@ -118,7 +128,7 @@ impl Drone {
airdrop_request_amount, client_pubkey
);
request_amount = airdrop_request_amount;
Transaction::new(
Transaction::system_new(
&self.mint_keypair,
client_pubkey,
airdrop_request_amount as i64,
@ -134,12 +144,10 @@ impl Drone {
.add_field(
"request_amount",
influxdb::Value::Integer(request_amount as i64),
)
.add_field(
).add_field(
"request_current",
influxdb::Value::Integer(self.request_current as i64),
)
.to_owned(),
).to_owned(),
);
client.retry_transfer_signed(&tx, 10)
} else {
@ -154,6 +162,65 @@ impl Drop for Drone {
}
}
pub fn run_local_drone(mint_keypair: Keypair, network: SocketAddr, sender: Sender<SocketAddr>) {
thread::spawn(move || {
let drone_addr = socketaddr!(0, 0);
let drone = Arc::new(Mutex::new(Drone::new(
mint_keypair,
drone_addr,
network,
None,
None,
)));
let socket = TcpListener::bind(&drone_addr).unwrap();
sender.send(socket.local_addr().unwrap()).unwrap();
info!("Drone started. Listening on: {}", drone_addr);
let done = socket
.incoming()
.map_err(|e| debug!("failed to accept socket; error = {:?}", e))
.for_each(move |socket| {
let drone2 = drone.clone();
let framed = BytesCodec::new().framed(socket);
let (writer, reader) = framed.split();
let processor = reader.and_then(move |bytes| {
let req: DroneRequest = deserialize(&bytes).or_else(|err| {
Err(io::Error::new(
io::ErrorKind::Other,
format!("deserialize packet in drone: {:?}", err),
))
})?;
info!("Airdrop requested...");
let res1 = drone2.lock().unwrap().send_airdrop(req);
match res1 {
Ok(_) => info!("Airdrop sent!"),
Err(_) => info!("Request limit reached for this time slice"),
}
let response = res1?;
info!("Airdrop tx signature: {:?}", response);
let response_vec = serialize(&response).or_else(|err| {
Err(io::Error::new(
io::ErrorKind::Other,
format!("serialize signature in drone: {:?}", err),
))
})?;
let response_bytes = Bytes::from(response_vec.clone());
Ok(response_bytes)
});
let server = writer
.send_all(processor.or_else(|err| {
Err(io::Error::new(
io::ErrorKind::Other,
format!("Drone response: {:?}", err),
))
})).then(|_| Ok(()));
tokio::spawn(server)
});
tokio::run(done);
});
}
#[cfg(test)]
mod tests {
use bank::Bank;
@ -269,8 +336,10 @@ mod tests {
&[],
leader,
None,
Some(&ledger_path),
&ledger_path,
false,
None,
Some(0),
);
let mut addr: SocketAddr = "0.0.0.0:9900".parse().expect("bind to drone socket");
@ -307,7 +376,7 @@ mod tests {
let leader_keypair = Keypair::new();
let leader = Node::new_localhost_with_pubkey(leader_keypair.pubkey());
let leader_data = leader.info.clone();
let server = Fullnode::new(leader, &ledger_path, leader_keypair, None, false);
let server = Fullnode::new(leader, &ledger_path, leader_keypair, None, false, None);
let requests_socket = UdpSocket::bind("0.0.0.0:0").expect("drone bind to requests socket");
let transactions_socket =

245
src/dynamic_program.rs Normal file
View File

@ -0,0 +1,245 @@
extern crate bincode;
extern crate generic_array;
use libc;
use libloading;
use solana_program_interface::account::KeyedAccount;
use std::path::PathBuf;
#[cfg(debug_assertions)]
const CARGO_PROFILE: &str = "debug";
#[cfg(not(debug_assertions))]
const CARGO_PROFILE: &str = "release";
/// Dynamic link library prefix
#[cfg(unix)]
const PLATFORM_FILE_PREFIX: &str = "lib";
/// Dynamic link library prefix
#[cfg(windows)]
const PLATFORM_FILE_PREFIX: &str = "";
/// Dynamic link library file extension specific to the platform
#[cfg(any(target_os = "macos", target_os = "ios"))]
const PLATFORM_FILE_EXTENSION: &str = "dylib";
/// Dynamic link library file extension specific to the platform
#[cfg(all(unix, not(any(target_os = "macos", target_os = "ios"))))]
const PLATFORM_FILE_EXTENSION: &str = "so";
/// Dynamic link library file extension specific to the platform
#[cfg(windows)]
const PLATFORM_FILE_EXTENSION: &str = "dll";
/// Creates a platform-specific file path
fn create_library_path(name: &str) -> PathBuf {
let mut path = PathBuf::new();
path.push("target");
path.push(CARGO_PROFILE);
path.push("deps");
path.push(PLATFORM_FILE_PREFIX.to_string() + name);
path.set_extension(PLATFORM_FILE_EXTENSION);
path
}
// All programs export a symbol named process()
const ENTRYPOINT: &str = "process";
type Entrypoint = unsafe extern "C" fn(infos: &mut Vec<KeyedAccount>, data: &[u8]);
#[derive(Debug)]
pub enum DynamicProgram {
/// Native program
/// * Transaction::keys[0..] - program dependent
/// * name - name of the program, translated to a file path of the program module
/// * userdata - program specific user data
Native {
name: String,
library: libloading::Library,
},
/// Bpf program
/// * Transaction::keys[0..] - program dependent
/// * TODO BPF specific stuff
/// * userdata - program specific user data
Bpf { userdata: Vec<u8> },
}
impl DynamicProgram {
pub fn new(name: String) -> Self {
// TODO determine what kind of module to load
// create native program
let path = create_library_path(&name);
// TODO linux tls bug can cause crash on dlclose, workaround by never unloading
let os_lib =
libloading::os::unix::Library::open(Some(path), libc::RTLD_NODELETE | libc::RTLD_NOW)
.unwrap();
let library = libloading::Library::from(os_lib);
DynamicProgram::Native { name, library }
}
pub fn call(&self, infos: &mut Vec<KeyedAccount>, data: &[u8]) {
match self {
DynamicProgram::Native { name, library } => unsafe {
let entrypoint: libloading::Symbol<Entrypoint> =
match library.get(ENTRYPOINT.as_bytes()) {
Ok(s) => s,
Err(e) => panic!(
"{:?} Unable to find {:?} in program {}",
e, ENTRYPOINT, name
),
};
entrypoint(infos, data);
},
DynamicProgram::Bpf { .. } => {
// TODO BPF
println!{"Bpf program not supported"}
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use bincode::serialize;
use solana_program_interface::account::Account;
use solana_program_interface::pubkey::Pubkey;
use std::path::Path;
use std::thread;
#[test]
fn test_create_library_path() {
let path = create_library_path("noop");
assert_eq!(true, Path::new(&path).exists());
let path = create_library_path("print");
assert_eq!(true, Path::new(&path).exists());
let path = create_library_path("move_funds");
assert_eq!(true, Path::new(&path).exists());
}
#[test]
fn test_program_noop() {
let data: Vec<u8> = vec![0];
let keys = vec![Pubkey::default(); 2];
let mut accounts = vec![Account::default(), Account::default()];
accounts[0].tokens = 100;
accounts[1].tokens = 1;
{
let mut infos: Vec<_> = (&keys)
.into_iter()
.zip(&mut accounts)
.map(|(key, account)| KeyedAccount { key, account })
.collect();
let dp = DynamicProgram::new("noop".to_string());
dp.call(&mut infos, &data);
}
}
#[test]
#[ignore]
fn test_program_print() {
let data: Vec<u8> = vec![0];
let keys = vec![Pubkey::default(); 2];
let mut accounts = vec![Account::default(), Account::default()];
accounts[0].tokens = 100;
accounts[1].tokens = 1;
{
let mut infos: Vec<_> = (&keys)
.into_iter()
.zip(&mut accounts)
.map(|(key, account)| KeyedAccount { key, account })
.collect();
let dp = DynamicProgram::new("print".to_string());
dp.call(&mut infos, &data);
}
}
#[test]
fn test_program_move_funds_success() {
let tokens: i64 = 100;
let data: Vec<u8> = serialize(&tokens).unwrap();
let keys = vec![Pubkey::default(); 2];
let mut accounts = vec![Account::default(), Account::default()];
accounts[0].tokens = 100;
accounts[1].tokens = 1;
{
let mut infos: Vec<_> = (&keys)
.into_iter()
.zip(&mut accounts)
.map(|(key, account)| KeyedAccount { key, account })
.collect();
let dp = DynamicProgram::new("move_funds".to_string());
dp.call(&mut infos, &data);
}
assert_eq!(0, accounts[0].tokens);
assert_eq!(101, accounts[1].tokens);
}
#[test]
fn test_program_move_funds_insufficient_funds() {
let tokens: i64 = 100;
let data: Vec<u8> = serialize(&tokens).unwrap();
let keys = vec![Pubkey::default(); 2];
let mut accounts = vec![Account::default(), Account::default()];
accounts[0].tokens = 10;
accounts[1].tokens = 1;
{
let mut infos: Vec<_> = (&keys)
.into_iter()
.zip(&mut accounts)
.map(|(key, account)| KeyedAccount { key, account })
.collect();
let dp = DynamicProgram::new("move_funds".to_string());
dp.call(&mut infos, &data);
}
assert_eq!(10, accounts[0].tokens);
assert_eq!(1, accounts[1].tokens);
}
#[test]
fn test_program_move_funds_succes_many_threads() {
let num_threads = 42; // number of threads to spawn
let num_iters = 100; // number of iterations of test in each thread
let mut threads = Vec::new();
for _t in 0..num_threads {
threads.push(thread::spawn(move || {
for _i in 0..num_iters {
{
let tokens: i64 = 100;
let data: Vec<u8> = serialize(&tokens).unwrap();
let keys = vec![Pubkey::default(); 2];
let mut accounts = vec![Account::default(), Account::default()];
accounts[0].tokens = 100;
accounts[1].tokens = 1;
{
let mut infos: Vec<_> = (&keys)
.into_iter()
.zip(&mut accounts)
.map(|(key, account)| KeyedAccount { key, account })
.collect();
let dp = DynamicProgram::new("move_funds".to_string());
dp.call(&mut infos, &data);
}
assert_eq!(0, accounts[0].tokens);
assert_eq!(101, accounts[1].tokens);
}
}
}));
}
for thread in threads {
thread.join().unwrap();
}
}
// TODO add more tests to validate the Userdata and Account data is
// moving across the boundary correctly
}

View File

@ -3,14 +3,20 @@
//! transactions within it. Entries cannot be reordered, and its field `num_hashes`
//! represents an approximate amount of time since the last Entry was created.
use bincode::{serialize_into, serialized_size};
use hash::{extend_and_hash, hash, Hash};
use packet::{BlobRecycler, SharedBlob, BLOB_DATA_SIZE};
use budget_transaction::BudgetTransaction;
use hash::Hash;
use packet::{SharedBlob, BLOB_DATA_SIZE};
use poh::Poh;
use rayon::prelude::*;
use signature::Pubkey;
use solana_program_interface::pubkey::Pubkey;
use std::io::Cursor;
use std::net::SocketAddr;
use std::sync::mpsc::{Receiver, Sender};
use transaction::Transaction;
pub type EntrySender = Sender<Vec<Entry>>;
pub type EntryReceiver = Receiver<Vec<Entry>>;
/// Each Entry contains three pieces of data. The `num_hashes` field is the number
/// of hashes performed since the previous entry. The `id` field is the result
/// of hashing `id` from the previous entry `num_hashes` times. The `transactions`
@ -38,30 +44,17 @@ pub struct Entry {
/// generated. They may have been observed before a previous Entry ID but were
/// pushed back into this list to ensure deterministic interpretation of the ledger.
pub transactions: Vec<Transaction>,
/// Indication that:
/// 1. the next Entry in the ledger has transactions that can potentially
/// be verified in parallel with these transactions
/// 2. this Entry can be left out of the bank's entry_id cache for
/// purposes of duplicate rejection
pub has_more: bool,
}
impl Entry {
/// Creates the next Entry `num_hashes` after `start_hash`.
pub fn new(
start_hash: &Hash,
num_hashes: u64,
transactions: Vec<Transaction>,
has_more: bool,
) -> Self {
pub fn new(start_hash: &Hash, num_hashes: u64, transactions: Vec<Transaction>) -> Self {
let num_hashes = num_hashes + if transactions.is_empty() { 0 } else { 1 };
let id = next_hash(start_hash, 0, &transactions);
let entry = Entry {
num_hashes,
id,
transactions,
has_more,
};
let size = serialized_size(&entry).unwrap();
@ -77,12 +70,11 @@ impl Entry {
pub fn to_blob(
&self,
blob_recycler: &BlobRecycler,
idx: Option<u64>,
id: Option<Pubkey>,
addr: Option<&SocketAddr>,
) -> SharedBlob {
let blob = blob_recycler.allocate();
let blob = SharedBlob::default();
{
let mut blob_w = blob.write().unwrap();
let pos = {
@ -111,8 +103,44 @@ impl Entry {
num_hashes: 0,
id: Hash::default(),
transactions,
has_more: false,
}).unwrap() <= BLOB_DATA_SIZE as u64
}).unwrap()
<= BLOB_DATA_SIZE as u64
}
pub fn num_will_fit(transactions: &[Transaction]) -> usize {
if transactions.is_empty() {
return 0;
}
let mut num = transactions.len();
let mut upper = transactions.len();
let mut lower = 1; // if one won't fit, we have a lot of TODOs
let mut next = transactions.len(); // optimistic
loop {
debug!(
"num {}, upper {} lower {} next {} transactions.len() {}",
num,
upper,
lower,
next,
transactions.len()
);
if Entry::will_fit(transactions[..num].to_vec()) {
next = (upper + num) / 2;
lower = num;
debug!("num {} fits, maybe too well? trying {}", num, next);
} else {
next = (lower + num) / 2;
upper = num;
debug!("num {} doesn't fit! trying {}", num, next);
}
// same as last time
if next == num {
debug!("converged on num {}", num);
break;
}
num = next;
}
num
}
/// Creates the next Tick Entry `num_hashes` after `start_hash`.
@ -120,9 +148,8 @@ impl Entry {
start_hash: &mut Hash,
num_hashes: &mut u64,
transactions: Vec<Transaction>,
has_more: bool,
) -> Self {
let entry = Self::new(start_hash, *num_hashes, transactions, has_more);
let entry = Self::new(start_hash, *num_hashes, transactions);
*start_hash = entry.id;
*num_hashes = 0;
assert!(serialized_size(&entry).unwrap() <= BLOB_DATA_SIZE as u64);
@ -136,7 +163,6 @@ impl Entry {
num_hashes,
id: *id,
transactions: vec![],
has_more: false,
}
}
@ -165,33 +191,25 @@ impl Entry {
}
}
fn add_transaction_data(hash_data: &mut Vec<u8>, tx: &Transaction) {
hash_data.push(0u8);
hash_data.extend_from_slice(&tx.signature.as_ref());
}
/// Creates the hash `num_hashes` after `start_hash`. If the transaction contains
/// a signature, the final hash will be a hash of both the previous ID and
/// the signature. If num_hashes is zero and there's no transaction data,
/// start_hash is returned.
fn next_hash(start_hash: &Hash, num_hashes: u64, transactions: &[Transaction]) -> Hash {
let mut id = *start_hash;
if num_hashes == 0 && transactions.is_empty() {
return *start_hash;
}
let mut poh = Poh::new(*start_hash);
for _ in 1..num_hashes {
id = hash(&id.as_ref());
poh.hash();
}
// Hash all the transaction data
let mut hash_data = vec![];
for tx in transactions {
add_transaction_data(&mut hash_data, tx);
}
if !hash_data.is_empty() {
extend_and_hash(&id, &hash_data)
} else if num_hashes != 0 {
hash(&id.as_ref())
if transactions.is_empty() {
poh.tick().id
} else {
id
poh.record(Transaction::hash(transactions)).id
}
}
@ -202,17 +220,18 @@ pub fn next_entry(start_hash: &Hash, num_hashes: u64, transactions: Vec<Transact
num_hashes,
id: next_hash(start_hash, num_hashes, &transactions),
transactions,
has_more: false,
}
}
#[cfg(test)]
mod tests {
use super::*;
use budget_transaction::BudgetTransaction;
use chrono::prelude::*;
use entry::Entry;
use hash::hash;
use signature::{Keypair, KeypairUtil};
use system_transaction::SystemTransaction;
use transaction::Transaction;
#[test]
@ -231,9 +250,9 @@ mod tests {
// First, verify entries
let keypair = Keypair::new();
let tx0 = Transaction::new(&keypair, keypair.pubkey(), 0, zero);
let tx1 = Transaction::new(&keypair, keypair.pubkey(), 1, zero);
let mut e0 = Entry::new(&zero, 0, vec![tx0.clone(), tx1.clone()], false);
let tx0 = Transaction::system_new(&keypair, keypair.pubkey(), 0, zero);
let tx1 = Transaction::system_new(&keypair, keypair.pubkey(), 1, zero);
let mut e0 = Entry::new(&zero, 0, vec![tx0.clone(), tx1.clone()]);
assert!(e0.verify(&zero));
// Next, swap two transactions and ensure verification fails.
@ -248,9 +267,16 @@ mod tests {
// First, verify entries
let keypair = Keypair::new();
let tx0 = Transaction::new_timestamp(&keypair, keypair.pubkey(), Utc::now(), zero);
let tx1 = Transaction::new_signature(&keypair, keypair.pubkey(), Default::default(), zero);
let mut e0 = Entry::new(&zero, 0, vec![tx0.clone(), tx1.clone()], false);
let tx0 = Transaction::budget_new_timestamp(
&keypair,
keypair.pubkey(),
keypair.pubkey(),
Utc::now(),
zero,
);
let tx1 =
Transaction::budget_new_signature(&keypair, keypair.pubkey(), keypair.pubkey(), zero);
let mut e0 = Entry::new(&zero, 0, vec![tx0.clone(), tx1.clone()]);
assert!(e0.verify(&zero));
// Next, swap two witness transactions and ensure verification fails.
@ -271,7 +297,13 @@ mod tests {
assert_eq!(tick.id, zero);
let keypair = Keypair::new();
let tx0 = Transaction::new_timestamp(&keypair, keypair.pubkey(), Utc::now(), zero);
let tx0 = Transaction::budget_new_timestamp(
&keypair,
keypair.pubkey(),
keypair.pubkey(),
Utc::now(),
zero,
);
let entry0 = next_entry(&zero, 1, vec![tx0.clone()]);
assert_eq!(entry0.num_hashes, 1);
assert_eq!(entry0.id, next_hash(&zero, 1, &vec![tx0]));
@ -282,7 +314,7 @@ mod tests {
fn test_next_entry_panic() {
let zero = Hash::default();
let keypair = Keypair::new();
let tx = Transaction::new(&keypair, keypair.pubkey(), 0, zero);
let tx = Transaction::system_new(&keypair, keypair.pubkey(), 0, zero);
next_entry(&zero, 0, vec![tx]);
}
}

View File

@ -44,9 +44,8 @@ impl<'a, W: Write> EntryWriter<'a, W> {
fn write_and_register_entry(&mut self, entry: &Entry) -> io::Result<()> {
trace!("write_and_register_entry entry");
if !entry.has_more {
self.bank.register_entry_id(&entry.id);
}
self.bank.register_entry_id(&entry.id);
Self::write_entry(&mut self.writer, entry)
}
@ -101,46 +100,8 @@ pub fn read_entries<R: BufRead>(reader: R) -> impl Iterator<Item = io::Result<En
#[cfg(test)]
mod tests {
use super::*;
use bincode::serialize;
use ledger;
use mint::Mint;
use packet::BLOB_DATA_SIZE;
use packet::PACKET_DATA_SIZE;
use signature::{Keypair, KeypairUtil};
use std::io::Cursor;
use transaction::Transaction;
#[test]
fn test_dont_register_partial_entries() {
let mint = Mint::new(1);
let bank = Bank::new(&mint);
let writer = io::sink();
let mut entry_writer = EntryWriter::new(&bank, writer);
let keypair = Keypair::new();
let tx = Transaction::new(&mint.keypair(), keypair.pubkey(), 1, mint.last_id());
let tx_size = serialize(&tx).unwrap().len();
assert!(tx_size <= PACKET_DATA_SIZE);
assert!(BLOB_DATA_SIZE >= PACKET_DATA_SIZE);
let threshold = (BLOB_DATA_SIZE / tx_size) - 1; // PACKET_DATA_SIZE is transaction size
// Verify large entries are split up and the first sets has_more.
let txs = vec![tx.clone(); threshold * 2];
let entries = ledger::next_entries(&mint.last_id(), 0, txs);
assert_eq!(entries.len(), 2);
assert!(entries[0].has_more);
assert!(!entries[1].has_more);
// Verify that write_and_register_entry doesn't register the first entries after a split.
assert_eq!(bank.last_id(), mint.last_id());
entry_writer.write_and_register_entry(&entries[0]).unwrap();
assert_eq!(bank.last_id(), mint.last_id());
// Verify that write_and_register_entry registers the final entry after a split.
entry_writer.write_and_register_entry(&entries[1]).unwrap();
assert_eq!(bank.last_id(), entries[1].id);
}
/// Same as read_entries() but parsing a buffer and returning a vector.
fn read_entries_from_buf(s: &[u8]) -> io::Result<Vec<Entry>> {

View File

@ -1,6 +1,6 @@
// Support erasure coding
use packet::{BlobRecycler, SharedBlob, BLOB_DATA_SIZE, BLOB_HEADER_SIZE};
use signature::Pubkey;
use packet::{SharedBlob, BLOB_DATA_SIZE, BLOB_HEADER_SIZE};
use solana_program_interface::pubkey::Pubkey;
use std::cmp;
use std::mem;
use std::result;
@ -75,7 +75,7 @@ pub const ERASURE_W: i32 = 32;
// There are some alignment restrictions, blocks should be aligned by 16 bytes
// which means their size should be >= 16 bytes
pub fn generate_coding_blocks(coding: &mut [&mut [u8]], data: &[&[u8]]) -> Result<()> {
if data.len() == 0 {
if data.is_empty() {
return Ok(());
}
let k = data.len() as i32;
@ -130,7 +130,7 @@ pub fn decode_blocks(
coding: &mut [&mut [u8]],
erasures: &[i32],
) -> Result<()> {
if data.len() == 0 {
if data.is_empty() {
return Ok(());
}
let block_len = data[0].len();
@ -217,7 +217,6 @@ pub fn decode_blocks(
pub fn generate_coding(
id: &Pubkey,
window: &mut [WindowSlot],
recycler: &BlobRecycler,
receive_index: u64,
num_blobs: usize,
transmit_index_coding: &mut u64,
@ -285,7 +284,7 @@ pub fn generate_coding(
let n = i % window.len();
assert!(window[n].coding.is_none());
window[n].coding = Some(recycler.allocate());
window[n].coding = Some(SharedBlob::default());
let coding = window[n].coding.clone().unwrap();
let mut coding_wl = coding.write().unwrap();
@ -316,10 +315,7 @@ pub fn generate_coding(
coding_blobs.push(coding.clone());
}
let data_locks: Vec<_> = data_blobs
.iter()
.map(|b| b.read().expect("'data_locks' of data_blobs"))
.collect();
let data_locks: Vec<_> = data_blobs.iter().map(|b| b.read().unwrap()).collect();
let data_ptrs: Vec<_> = data_locks
.iter()
@ -327,13 +323,9 @@ pub fn generate_coding(
.map(|(i, l)| {
trace!("{} i: {} data: {}", id, i, l.data[0]);
&l.data[..max_data_size]
})
.collect();
}).collect();
let mut coding_locks: Vec<_> = coding_blobs
.iter()
.map(|b| b.write().expect("'coding_locks' of coding_blobs"))
.collect();
let mut coding_locks: Vec<_> = coding_blobs.iter().map(|b| b.write().unwrap()).collect();
let mut coding_ptrs: Vec<_> = coding_locks
.iter_mut()
@ -341,8 +333,7 @@ pub fn generate_coding(
.map(|(i, l)| {
trace!("{} i: {} coding: {}", id, i, l.data[0],);
&mut l.data_mut()[..max_data_size]
})
.collect();
}).collect();
generate_coding_blocks(coding_ptrs.as_mut_slice(), &data_ptrs)?;
debug!(
@ -358,14 +349,8 @@ pub fn generate_coding(
// true if slot is empty
// true if slot is stale (i.e. has the wrong index), old blob is flushed
// false if slot has a blob with the right index
fn is_missing(
id: &Pubkey,
idx: u64,
window_slot: &mut Option<SharedBlob>,
recycler: &BlobRecycler,
c_or_d: &str,
) -> bool {
if let Some(blob) = mem::replace(window_slot, None) {
fn is_missing(id: &Pubkey, idx: u64, window_slot: &mut Option<SharedBlob>, c_or_d: &str) -> bool {
if let Some(blob) = window_slot.take() {
let blob_idx = blob.read().unwrap().get_index().unwrap();
if blob_idx == idx {
trace!("recover {}: idx: {} good {}", id, idx, c_or_d);
@ -380,8 +365,6 @@ fn is_missing(
c_or_d,
blob_idx,
);
// recycle it
recycler.recycle(blob, "is_missing");
true
}
} else {
@ -400,7 +383,6 @@ fn find_missing(
block_start_idx: u64,
block_start: usize,
window: &mut [WindowSlot],
recycler: &BlobRecycler,
) -> (usize, usize) {
let mut data_missing = 0;
let mut coding_missing = 0;
@ -412,11 +394,11 @@ fn find_missing(
let idx = (i - block_start) as u64 + block_start_idx;
let n = i % window.len();
if is_missing(id, idx, &mut window[n].data, recycler, "data") {
if is_missing(id, idx, &mut window[n].data, "data") {
data_missing += 1;
}
if i >= coding_start && is_missing(id, idx, &mut window[n].coding, recycler, "coding") {
if i >= coding_start && is_missing(id, idx, &mut window[n].coding, "coding") {
coding_missing += 1;
}
}
@ -425,17 +407,10 @@ fn find_missing(
// Recover a missing block into window
// missing blocks should be None or old...
// Use recycler to allocate new ones.
// If not enough coding or data blocks are present to restore
// any of the blocks, the block is skipped.
// Side effect: old blobs in a block are None'd
pub fn recover(
id: &Pubkey,
recycler: &BlobRecycler,
window: &mut [WindowSlot],
start_idx: u64,
start: usize,
) -> Result<()> {
pub fn recover(id: &Pubkey, window: &mut [WindowSlot], start_idx: u64, start: usize) -> Result<()> {
let block_start = start - (start % NUM_DATA);
let block_start_idx = start_idx - (start_idx % NUM_DATA as u64);
@ -452,8 +427,7 @@ pub fn recover(
block_end
);
let (data_missing, coding_missing) =
find_missing(id, block_start_idx, block_start, window, recycler);
let (data_missing, coding_missing) = find_missing(id, block_start_idx, block_start, window);
// if we're not missing data, or if we have too much missin but have enough coding
if data_missing == 0 {
@ -496,7 +470,7 @@ pub fn recover(
}
blobs.push(b);
} else {
let n = recycler.allocate();
let n = SharedBlob::default();
window[j].data = Some(n.clone());
// mark the missing memory
blobs.push(n);
@ -517,7 +491,7 @@ pub fn recover(
}
blobs.push(b);
} else {
let n = recycler.allocate();
let n = SharedBlob::default();
window[j].coding = Some(n.clone());
//mark the missing memory
blobs.push(n);
@ -543,7 +517,7 @@ pub fn recover(
trace!("erasures[]: {} {:?} data_size: {}", id, erasures, size,);
//lock everything for write
for b in &blobs {
locks.push(b.write().expect("'locks' arr in pb fn recover"));
locks.push(b.write().unwrap());
}
{
@ -620,9 +594,10 @@ mod test {
use crdt;
use erasure;
use logger;
use packet::{BlobRecycler, BLOB_DATA_SIZE, BLOB_HEADER_SIZE, BLOB_SIZE};
use packet::{SharedBlob, BLOB_DATA_SIZE, BLOB_HEADER_SIZE, BLOB_SIZE};
use rand::{thread_rng, Rng};
use signature::{Keypair, KeypairUtil, Pubkey};
use signature::{Keypair, KeypairUtil};
use solana_program_interface::pubkey::Pubkey;
// use std::sync::{Arc, RwLock};
use window::{index_blobs, WindowSlot};
@ -694,7 +669,7 @@ mod test {
} else {
print!("data null ");
}
println!("");
println!();
print!("window({:>w$}): ", i, w = 2);
if w.coding.is_some() {
let window_l1 = w.coding.clone().unwrap();
@ -710,16 +685,12 @@ mod test {
} else {
print!("coding null");
}
println!("");
println!();
}
}
const WINDOW_SIZE: usize = 64;
fn generate_window(
blob_recycler: &BlobRecycler,
offset: usize,
num_blobs: usize,
) -> Vec<WindowSlot> {
fn generate_window(offset: usize, num_blobs: usize) -> Vec<WindowSlot> {
let mut window = vec![
WindowSlot {
data: None,
@ -730,7 +701,7 @@ mod test {
];
let mut blobs = Vec::with_capacity(num_blobs);
for i in 0..num_blobs {
let b = blob_recycler.allocate();
let b = SharedBlob::default();
let b_ = b.clone();
let mut w = b.write().unwrap();
// generate a random length, multiple of 4 between 8 and 32
@ -755,13 +726,7 @@ mod test {
blobs.push(b_);
}
let d = crdt::NodeInfo::new(
Keypair::new().pubkey(),
"127.0.0.1:1234".parse().unwrap(),
"127.0.0.1:1235".parse().unwrap(),
"127.0.0.1:1236".parse().unwrap(),
"127.0.0.1:1237".parse().unwrap(),
);
let d = crdt::NodeInfo::new_localhost(Keypair::new().pubkey());
assert!(index_blobs(&d, &blobs, &mut (offset as u64)).is_ok());
for b in blobs {
let idx = b.read().unwrap().get_index().unwrap() as usize % WINDOW_SIZE;
@ -787,39 +752,13 @@ mod test {
}
}
fn pollute_recycler(blob_recycler: &BlobRecycler) {
let mut blobs = Vec::with_capacity(WINDOW_SIZE * 2);
for _ in 0..WINDOW_SIZE * 10 {
let blob = blob_recycler.allocate();
{
let mut b_l = blob.write().unwrap();
for i in 0..BLOB_SIZE {
b_l.data[i] = thread_rng().gen();
}
// some of the blobs should previously been used for coding
if thread_rng().gen_bool(erasure::NUM_CODING as f64 / erasure::NUM_DATA as f64) {
b_l.set_coding().unwrap();
}
}
blobs.push(blob);
}
for blob in blobs {
blob_recycler.recycle(blob, "pollute_recycler");
}
}
#[test]
pub fn test_window_recover_basic() {
logger::setup();
let blob_recycler = BlobRecycler::default();
pollute_recycler(&blob_recycler);
// Generate a window
let offset = 0;
let num_blobs = erasure::NUM_DATA + 2;
let mut window = generate_window(&blob_recycler, WINDOW_SIZE, num_blobs);
let mut window = generate_window(WINDOW_SIZE, num_blobs);
for slot in &window {
if let Some(blob) = &slot.data {
@ -835,14 +774,8 @@ mod test {
let mut index = (erasure::NUM_DATA + 2) as u64;
let id = Pubkey::default();
assert!(
erasure::generate_coding(
&id,
&mut window,
&blob_recycler,
offset as u64,
num_blobs,
&mut index
).is_ok()
erasure::generate_coding(&id, &mut window, offset as u64, num_blobs, &mut index)
.is_ok()
);
assert_eq!(index, (erasure::NUM_DATA - erasure::NUM_CODING) as u64);
@ -861,15 +794,7 @@ mod test {
scramble_window_tails(&mut window, num_blobs);
// Recover it from coding
assert!(
erasure::recover(
&id,
&blob_recycler,
&mut window,
(offset + WINDOW_SIZE) as u64,
offset,
).is_ok()
);
assert!(erasure::recover(&id, &mut window, (offset + WINDOW_SIZE) as u64, offset,).is_ok());
println!("** after-recover:");
print_window(&window);
@ -901,25 +826,12 @@ mod test {
// Create a hole in the window
let refwindow = window[erase_offset].data.clone();
window[erase_offset].data = None;
blob_recycler.recycle(
window[erase_offset].coding.clone().unwrap(),
"window_recover_basic",
);
window[erase_offset].coding = None;
print_window(&window);
// Recover it from coding
assert!(
erasure::recover(
&id,
&blob_recycler,
&mut window,
(offset + WINDOW_SIZE) as u64,
offset,
).is_ok()
);
assert!(erasure::recover(&id, &mut window, (offset + WINDOW_SIZE) as u64, offset,).is_ok());
println!("** after-recover:");
print_window(&window);
@ -957,15 +869,7 @@ mod test {
print_window(&window);
// Recover it from coding
assert!(
erasure::recover(
&id,
&blob_recycler,
&mut window,
(offset + WINDOW_SIZE) as u64,
offset,
).is_ok()
);
assert!(erasure::recover(&id, &mut window, (offset + WINDOW_SIZE) as u64, offset,).is_ok());
println!("** after-recover:");
print_window(&window);
@ -1003,11 +907,10 @@ mod test {
// #[ignore]
// pub fn test_window_recover() {
// logger::setup();
// let blob_recycler = BlobRecycler::default();
// let offset = 4;
// let data_len = 16;
// let num_blobs = erasure::NUM_DATA + 2;
// let (mut window, blobs_len) = generate_window(data_len, &blob_recycler, offset, num_blobs);
// let (mut window, blobs_len) = generate_window(data_len, offset, num_blobs);
// println!("** after-gen:");
// print_window(&window);
// assert!(erasure::generate_coding(&mut window, offset, blobs_len).is_ok());
@ -1024,7 +927,7 @@ mod test {
// window_l0.write().unwrap().data[0] = 55;
// println!("** after-nulling:");
// print_window(&window);
// assert!(erasure::recover(&blob_recycler, &mut window, offset, offset + blobs_len).is_ok());
// assert!(erasure::recover(&mut window, offset, offset + blobs_len).is_ok());
// println!("** after-restore:");
// print_window(&window);
// let window_l = window[offset + 1].clone().unwrap();

View File

@ -1,6 +1,5 @@
//! The `fetch_stage` batches input from a UDP socket and sends it to a channel.
use packet::PacketRecycler;
use service::Service;
use std::net::UdpSocket;
use std::sync::atomic::{AtomicBool, Ordering};
@ -15,25 +14,18 @@ pub struct FetchStage {
}
impl FetchStage {
pub fn new(
sockets: Vec<UdpSocket>,
exit: Arc<AtomicBool>,
recycler: &PacketRecycler,
) -> (Self, PacketReceiver) {
pub fn new(sockets: Vec<UdpSocket>, exit: Arc<AtomicBool>) -> (Self, PacketReceiver) {
let tx_sockets = sockets.into_iter().map(Arc::new).collect();
Self::new_multi_socket(tx_sockets, exit, recycler)
Self::new_multi_socket(tx_sockets, exit)
}
pub fn new_multi_socket(
sockets: Vec<Arc<UdpSocket>>,
exit: Arc<AtomicBool>,
recycler: &PacketRecycler,
) -> (Self, PacketReceiver) {
let (sender, receiver) = channel();
let thread_hdls: Vec<_> = sockets
.into_iter()
.map(|socket| {
streamer::receiver(socket, exit.clone(), recycler.clone(), sender.clone())
})
.map(|socket| streamer::receiver(socket, exit.clone(), sender.clone(), "fetch-stage"))
.collect();
(FetchStage { exit, thread_hdls }, receiver)
@ -45,12 +37,10 @@ impl FetchStage {
}
impl Service for FetchStage {
fn thread_hdls(self) -> Vec<JoinHandle<()>> {
self.thread_hdls
}
type JoinReturnType = ();
fn join(self) -> thread::Result<()> {
for thread_hdl in self.thread_hdls() {
for thread_hdl in self.thread_hdls {
thread_hdl.join()?;
}
Ok(())

View File

@ -7,23 +7,90 @@ use drone::DRONE_PORT;
use entry::Entry;
use ledger::read_ledger;
use ncp::Ncp;
use packet::BlobRecycler;
use rpc::{JsonRpcService, RPC_PORT};
use rpu::Rpu;
use service::Service;
use signature::{Keypair, KeypairUtil};
use solana_program_interface::pubkey::Pubkey;
use std::net::UdpSocket;
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, RwLock};
use std::thread::{JoinHandle, Result};
use tpu::Tpu;
use tvu::Tvu;
use std::thread::Result;
use tpu::{Tpu, TpuReturnType};
use tvu::{Tvu, TvuReturnType};
use untrusted::Input;
use window;
pub enum NodeRole {
Leader(LeaderServices),
Validator(ValidatorServices),
}
pub struct LeaderServices {
tpu: Tpu,
broadcast_stage: BroadcastStage,
}
impl LeaderServices {
fn new(tpu: Tpu, broadcast_stage: BroadcastStage) -> Self {
LeaderServices {
tpu,
broadcast_stage,
}
}
pub fn join(self) -> Result<Option<TpuReturnType>> {
self.broadcast_stage.join()?;
self.tpu.join()
}
pub fn exit(&self) -> () {
self.tpu.exit();
}
}
pub struct ValidatorServices {
tvu: Tvu,
}
impl ValidatorServices {
fn new(tvu: Tvu) -> Self {
ValidatorServices { tvu }
}
pub fn join(self) -> Result<Option<TvuReturnType>> {
self.tvu.join()
}
pub fn exit(&self) -> () {
self.tvu.exit()
}
}
pub enum FullnodeReturnType {
LeaderRotation,
}
pub struct Fullnode {
pub node_role: Option<NodeRole>,
keypair: Arc<Keypair>,
exit: Arc<AtomicBool>,
thread_hdls: Vec<JoinHandle<()>>,
rpu: Option<Rpu>,
rpc_service: JsonRpcService,
ncp: Ncp,
bank: Arc<Bank>,
crdt: Arc<RwLock<Crdt>>,
ledger_path: String,
sigverify_disabled: bool,
shared_window: window::SharedWindow,
replicate_socket: Vec<UdpSocket>,
repair_socket: UdpSocket,
retransmit_socket: UdpSocket,
transaction_sockets: Vec<UdpSocket>,
broadcast_socket: UdpSocket,
requests_socket: UdpSocket,
respond_socket: UdpSocket,
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
@ -55,24 +122,14 @@ impl Fullnode {
keypair: Keypair,
leader_addr: Option<SocketAddr>,
sigverify_disabled: bool,
leader_rotation_interval: Option<u64>,
) -> Self {
info!("creating bank...");
let bank = Bank::new_default(leader_addr.is_none());
let entries = read_ledger(ledger_path, true).expect("opening ledger");
let entries = entries
.map(|e| e.unwrap_or_else(|err| panic!("failed to parse entry. error: {}", err)));
info!("processing ledger...");
let (entry_height, ledger_tail) = bank.process_ledger(entries).expect("process_ledger");
// entry_height is the network-wide agreed height of the ledger.
// initialize it from the input ledger
info!("processed {} ledger...", entry_height);
let (bank, entry_height, ledger_tail) = Self::new_bank_from_ledger(ledger_path);
info!("creating networking stack...");
let local_gossip_addr = node.sockets.gossip.local_addr().unwrap();
info!(
"starting... local gossip address: {} (advertising {})",
local_gossip_addr, node.info.contact_info.ncp
@ -88,16 +145,18 @@ impl Fullnode {
&ledger_tail,
node,
leader_info.as_ref(),
Some(ledger_path),
ledger_path,
sigverify_disabled,
leader_rotation_interval,
None,
);
match leader_addr {
Some(leader_addr) => {
info!(
"validator ready... local request address: {} (advertising {}) connected to: {}",
local_requests_addr, requests_addr, leader_addr
);
"validator ready... local request address: {} (advertising {}) connected to: {}",
local_requests_addr, requests_addr, leader_addr
);
}
None => {
info!(
@ -160,6 +219,7 @@ impl Fullnode {
/// `--------` | | `------------`
/// `-------------------------------`
/// ```
#[cfg_attr(feature = "cargo-clippy", allow(too_many_arguments))]
pub fn new_with_bank(
keypair: Keypair,
bank: Bank,
@ -167,30 +227,38 @@ impl Fullnode {
ledger_tail: &[Entry],
mut node: Node,
leader_info: Option<&NodeInfo>,
ledger_path: Option<&str>,
ledger_path: &str,
sigverify_disabled: bool,
leader_rotation_interval: Option<u64>,
rpc_port: Option<u16>,
) -> Self {
if leader_info.is_none() {
node.info.leader_id = node.info.id;
}
let exit = Arc::new(AtomicBool::new(false));
let bank = Arc::new(bank);
let blob_recycler = BlobRecycler::default();
let mut thread_hdls = vec![];
let rpu = Rpu::new(
let rpu = Some(Rpu::new(
&bank,
node.sockets.requests,
node.sockets.respond,
&blob_recycler,
exit.clone(),
);
thread_hdls.extend(rpu.thread_hdls());
node.sockets
.requests
.try_clone()
.expect("Failed to clone requests socket"),
node.sockets
.respond
.try_clone()
.expect("Failed to clone respond socket"),
));
// TODO: this code assumes this node is the leader
let mut drone_addr = node.info.contact_info.tpu;
drone_addr.set_port(DRONE_PORT);
let rpc_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::from(0)), RPC_PORT);
// Use custom RPC port, if provided (`Some(port)`)
// RPC port may be any open port on the node
// If rpc_port == `None`, node will listen on the default RPC_PORT from Rpc module
// If rpc_port == `Some(0)`, node will dynamically choose any open port. Useful for tests.
let rpc_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::from(0)), rpc_port.unwrap_or(RPC_PORT));
let rpc_service = JsonRpcService::new(
&bank,
node.info.contact_info.tpu,
@ -198,99 +266,283 @@ impl Fullnode {
rpc_addr,
exit.clone(),
);
thread_hdls.extend(rpc_service.thread_hdls());
let window =
window::new_window_from_entries(ledger_tail, entry_height, &node.info, &blob_recycler);
let window = window::new_window_from_entries(ledger_tail, entry_height, &node.info);
let shared_window = Arc::new(RwLock::new(window));
let crdt = Arc::new(RwLock::new(Crdt::new(node.info).expect("Crdt::new")));
let mut crdt = Crdt::new(node.info).expect("Crdt::new");
if let Some(interval) = leader_rotation_interval {
crdt.set_leader_rotation_interval(interval);
}
let crdt = Arc::new(RwLock::new(crdt));
let ncp = Ncp::new(
&crdt,
shared_window.clone(),
blob_recycler.clone(),
ledger_path,
Some(ledger_path),
node.sockets.gossip,
exit.clone(),
);
thread_hdls.extend(ncp.thread_hdls());
let keypair = Arc::new(keypair);
let node_role;
match leader_info {
Some(leader_info) => {
// Start in validator mode.
// TODO: let Crdt get that data from the network?
crdt.write().unwrap().insert(leader_info);
let tvu = Tvu::new(
keypair,
keypair.clone(),
&bank,
entry_height,
crdt,
shared_window,
blob_recycler.clone(),
node.sockets.replicate,
node.sockets.repair,
node.sockets.retransmit,
ledger_path,
exit.clone(),
crdt.clone(),
shared_window.clone(),
node.sockets
.replicate
.iter()
.map(|s| s.try_clone().expect("Failed to clone replicate sockets"))
.collect(),
node.sockets
.repair
.try_clone()
.expect("Failed to clone repair socket"),
node.sockets
.retransmit
.try_clone()
.expect("Failed to clone retransmit socket"),
Some(ledger_path),
);
thread_hdls.extend(tvu.thread_hdls());
let validator_state = ValidatorServices::new(tvu);
node_role = Some(NodeRole::Validator(validator_state));
}
None => {
// Start in leader mode.
let ledger_path = ledger_path.expect("ledger path");
let tick_duration = None;
// TODO: To light up PoH, uncomment the following line:
//let tick_duration = Some(Duration::from_millis(1000));
let (tpu, blob_receiver) = Tpu::new(
keypair,
let (tpu, entry_receiver, tpu_exit) = Tpu::new(
keypair.clone(),
&bank,
&crdt,
tick_duration,
node.sockets.transaction,
blob_recycler.clone(),
exit.clone(),
Default::default(),
node.sockets
.transaction
.iter()
.map(|s| s.try_clone().expect("Failed to clone transaction sockets"))
.collect(),
ledger_path,
sigverify_disabled,
entry_height,
);
thread_hdls.extend(tpu.thread_hdls());
let broadcast_stage = BroadcastStage::new(
node.sockets.broadcast,
crdt,
shared_window,
node.sockets
.broadcast
.try_clone()
.expect("Failed to clone broadcast socket"),
crdt.clone(),
shared_window.clone(),
entry_height,
blob_recycler.clone(),
blob_receiver,
entry_receiver,
tpu_exit,
);
thread_hdls.extend(broadcast_stage.thread_hdls());
let leader_state = LeaderServices::new(tpu, broadcast_stage);
node_role = Some(NodeRole::Leader(leader_state));
}
}
Fullnode { exit, thread_hdls }
Fullnode {
keypair,
crdt,
shared_window,
bank,
sigverify_disabled,
rpu,
ncp,
rpc_service,
node_role,
ledger_path: ledger_path.to_owned(),
exit,
replicate_socket: node.sockets.replicate,
repair_socket: node.sockets.repair,
retransmit_socket: node.sockets.retransmit,
transaction_sockets: node.sockets.transaction,
broadcast_socket: node.sockets.broadcast,
requests_socket: node.sockets.requests,
respond_socket: node.sockets.respond,
}
}
fn leader_to_validator(&mut self) -> Result<()> {
// TODO: We can avoid building the bank again once RecordStage is
// integrated with BankingStage
let (bank, entry_height, _) = Self::new_bank_from_ledger(&self.ledger_path);
self.bank = Arc::new(bank);
{
let mut wcrdt = self.crdt.write().unwrap();
let scheduled_leader = wcrdt.get_scheduled_leader(entry_height);
match scheduled_leader {
//TODO: Handle the case where we don't know who the next
//scheduled leader is
None => (),
Some(leader_id) => wcrdt.set_leader(leader_id),
}
}
// Make a new RPU to serve requests out of the new bank we've created
// instead of the old one
if self.rpu.is_some() {
let old_rpu = self.rpu.take().unwrap();
old_rpu.close()?;
self.rpu = Some(Rpu::new(
&self.bank,
self.requests_socket
.try_clone()
.expect("Failed to clone requests socket"),
self.respond_socket
.try_clone()
.expect("Failed to clone respond socket"),
));
}
let tvu = Tvu::new(
self.keypair.clone(),
&self.bank,
entry_height,
self.crdt.clone(),
self.shared_window.clone(),
self.replicate_socket
.iter()
.map(|s| s.try_clone().expect("Failed to clone replicate sockets"))
.collect(),
self.repair_socket
.try_clone()
.expect("Failed to clone repair socket"),
self.retransmit_socket
.try_clone()
.expect("Failed to clone retransmit socket"),
Some(&self.ledger_path),
);
let validator_state = ValidatorServices::new(tvu);
self.node_role = Some(NodeRole::Validator(validator_state));
Ok(())
}
fn validator_to_leader(&mut self, entry_height: u64) {
self.crdt.write().unwrap().set_leader(self.keypair.pubkey());
let (tpu, blob_receiver, tpu_exit) = Tpu::new(
self.keypair.clone(),
&self.bank,
&self.crdt,
Default::default(),
self.transaction_sockets
.iter()
.map(|s| s.try_clone().expect("Failed to clone transaction sockets"))
.collect(),
&self.ledger_path,
self.sigverify_disabled,
entry_height,
);
let broadcast_stage = BroadcastStage::new(
self.broadcast_socket
.try_clone()
.expect("Failed to clone broadcast socket"),
self.crdt.clone(),
self.shared_window.clone(),
entry_height,
blob_receiver,
tpu_exit,
);
let leader_state = LeaderServices::new(tpu, broadcast_stage);
self.node_role = Some(NodeRole::Leader(leader_state));
}
pub fn handle_role_transition(&mut self) -> Result<Option<FullnodeReturnType>> {
let node_role = self.node_role.take();
match node_role {
Some(NodeRole::Leader(leader_services)) => match leader_services.join()? {
Some(TpuReturnType::LeaderRotation) => {
self.leader_to_validator()?;
Ok(Some(FullnodeReturnType::LeaderRotation))
}
_ => Ok(None),
},
Some(NodeRole::Validator(validator_services)) => match validator_services.join()? {
Some(TvuReturnType::LeaderRotation(entry_height)) => {
self.validator_to_leader(entry_height);
Ok(Some(FullnodeReturnType::LeaderRotation))
}
_ => Ok(None),
},
None => Ok(None),
}
}
//used for notifying many nodes in parallel to exit
pub fn exit(&self) {
self.exit.store(true, Ordering::Relaxed);
if let Some(ref rpu) = self.rpu {
rpu.exit();
}
match self.node_role {
Some(NodeRole::Leader(ref leader_services)) => leader_services.exit(),
Some(NodeRole::Validator(ref validator_services)) => validator_services.exit(),
_ => (),
}
}
pub fn close(self) -> Result<()> {
pub fn close(self) -> Result<(Option<FullnodeReturnType>)> {
self.exit();
self.join()
}
// TODO: only used for testing, get rid of this once we have actual
// leader scheduling
pub fn set_scheduled_leader(&self, leader_id: Pubkey, entry_height: u64) {
self.crdt
.write()
.unwrap()
.set_scheduled_leader(entry_height, leader_id);
}
fn new_bank_from_ledger(ledger_path: &str) -> (Bank, u64, Vec<Entry>) {
let bank = Bank::new_default(false);
let entries = read_ledger(ledger_path, true).expect("opening ledger");
let entries = entries
.map(|e| e.unwrap_or_else(|err| panic!("failed to parse entry. error: {}", err)));
info!("processing ledger...");
let (entry_height, ledger_tail) = bank.process_ledger(entries).expect("process_ledger");
// entry_height is the network-wide agreed height of the ledger.
// initialize it from the input ledger
info!("processed {} ledger...", entry_height);
(bank, entry_height, ledger_tail)
}
}
impl Service for Fullnode {
fn thread_hdls(self) -> Vec<JoinHandle<()>> {
self.thread_hdls
}
type JoinReturnType = Option<FullnodeReturnType>;
fn join(self) -> Result<()> {
for thread_hdl in self.thread_hdls() {
thread_hdl.join()?;
fn join(self) -> Result<Option<FullnodeReturnType>> {
if let Some(rpu) = self.rpu {
rpu.join()?;
}
Ok(())
self.ncp.join()?;
self.rpc_service.join()?;
match self.node_role {
Some(NodeRole::Validator(validator_service)) => {
if let Some(TvuReturnType::LeaderRotation(_)) = validator_service.join()? {
return Ok(Some(FullnodeReturnType::LeaderRotation));
}
}
Some(NodeRole::Leader(leader_service)) => {
if let Some(TpuReturnType::LeaderRotation) = leader_service.join()? {
return Ok(Some(FullnodeReturnType::LeaderRotation));
}
}
_ => (),
}
Ok(None)
}
}
@ -298,33 +550,67 @@ impl Service for Fullnode {
mod tests {
use bank::Bank;
use crdt::Node;
use fullnode::Fullnode;
use mint::Mint;
use fullnode::{Fullnode, FullnodeReturnType};
use ledger::genesis;
use packet::make_consecutive_blobs;
use service::Service;
use signature::{Keypair, KeypairUtil};
use std::cmp;
use std::fs::remove_dir_all;
use std::net::UdpSocket;
use std::sync::mpsc::channel;
use std::sync::Arc;
use streamer::responder;
#[test]
fn validator_exit() {
let keypair = Keypair::new();
let tn = Node::new_localhost_with_pubkey(keypair.pubkey());
let alice = Mint::new(10_000);
let (alice, validator_ledger_path) = genesis("validator_exit", 10_000);
let bank = Bank::new(&alice);
let entry = tn.info.clone();
let v = Fullnode::new_with_bank(keypair, bank, 0, &[], tn, Some(&entry), None, false);
let v = Fullnode::new_with_bank(
keypair,
bank,
0,
&[],
tn,
Some(&entry),
&validator_ledger_path,
false,
None,
Some(0),
);
v.close().unwrap();
remove_dir_all(validator_ledger_path).unwrap();
}
#[test]
fn validator_parallel_exit() {
let mut ledger_paths = vec![];
let vals: Vec<Fullnode> = (0..2)
.map(|_| {
.map(|i| {
let keypair = Keypair::new();
let tn = Node::new_localhost_with_pubkey(keypair.pubkey());
let alice = Mint::new(10_000);
let (alice, validator_ledger_path) =
genesis(&format!("validator_parallel_exit_{}", i), 10_000);
ledger_paths.push(validator_ledger_path.clone());
let bank = Bank::new(&alice);
let entry = tn.info.clone();
Fullnode::new_with_bank(keypair, bank, 0, &[], tn, Some(&entry), None, false)
})
.collect();
Fullnode::new_with_bank(
keypair,
bank,
0,
&[],
tn,
Some(&entry),
&validator_ledger_path,
false,
None,
Some(0),
)
}).collect();
//each validator can exit in parallel to speed many sequential calls to `join`
vals.iter().for_each(|v| v.exit());
//while join is called sequentially, the above exit call notified all the
@ -332,5 +618,98 @@ mod tests {
vals.into_iter().for_each(|v| {
v.join().unwrap();
});
for path in ledger_paths {
remove_dir_all(path).unwrap();
}
}
#[test]
fn test_validator_to_leader_transition() {
// Make a leader identity
let leader_keypair = Keypair::new();
let leader_node = Node::new_localhost_with_pubkey(leader_keypair.pubkey());
let leader_id = leader_node.info.id;
let leader_ncp = leader_node.info.contact_info.ncp;
// Start the validator node
let leader_rotation_interval = 10;
let (mint, validator_ledger_path) = genesis("test_validator_to_leader_transition", 10_000);
let validator_keypair = Keypair::new();
let validator_node = Node::new_localhost_with_pubkey(validator_keypair.pubkey());
let validator_info = validator_node.info.clone();
let mut validator = Fullnode::new(
validator_node,
&validator_ledger_path,
validator_keypair,
Some(leader_ncp),
false,
Some(leader_rotation_interval),
);
// Set the leader schedule for the validator
let my_leader_begin_epoch = 2;
for i in 0..my_leader_begin_epoch {
validator.set_scheduled_leader(leader_id, leader_rotation_interval * i);
}
validator.set_scheduled_leader(
validator_info.id,
my_leader_begin_epoch * leader_rotation_interval,
);
// Send blobs to the validator from our mock leader
let t_responder = {
let (s_responder, r_responder) = channel();
let blob_sockets: Vec<Arc<UdpSocket>> = leader_node
.sockets
.replicate
.into_iter()
.map(Arc::new)
.collect();
let t_responder = responder(
"test_validator_to_leader_transition",
blob_sockets[0].clone(),
r_responder,
);
// Send the blobs out of order, in reverse. Also send an extra
// "extra_blobs" number of blobs to make sure the window stops in the right place.
let extra_blobs = cmp::max(leader_rotation_interval / 3, 1);
let total_blobs_to_send =
my_leader_begin_epoch * leader_rotation_interval + extra_blobs;
let genesis_entries = mint.create_entries();
let last_id = genesis_entries
.last()
.expect("expected at least one genesis entry")
.id;
let tvu_address = &validator_info.contact_info.tvu;
let msgs =
make_consecutive_blobs(leader_id, total_blobs_to_send, last_id, &tvu_address)
.into_iter()
.rev()
.collect();
s_responder.send(msgs).expect("send");
t_responder
};
// Wait for validator to shut down tvu and restart tpu
match validator.handle_role_transition().unwrap() {
Some(FullnodeReturnType::LeaderRotation) => (),
_ => panic!("Expected reason for exit to be leader rotation"),
}
// Check the validator ledger to make sure it's the right height
let (_, entry_height, _) = Fullnode::new_bank_from_ledger(&validator_ledger_path);
assert_eq!(
entry_height,
my_leader_begin_epoch * leader_rotation_interval
);
// Shut down
t_responder.join().expect("responder thread join");
validator.close().unwrap();
remove_dir_all(&validator_ledger_path).unwrap();
}
}

View File

@ -9,6 +9,29 @@ use std::fmt;
#[derive(Serialize, Deserialize, Clone, Copy, Default, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub struct Hash(GenericArray<u8, U32>);
#[derive(Clone, Default)]
pub struct Hasher {
hasher: Sha256,
}
impl Hasher {
pub fn hash(&mut self, val: &[u8]) -> () {
self.hasher.input(val);
}
pub fn hashv(&mut self, vals: &[&[u8]]) -> () {
for val in vals {
self.hash(val);
}
}
pub fn result(self) -> Hash {
// At the time of this writing, the sha2 library is stuck on an old version
// of generic_array (0.9.0). Decouple ourselves with a clone to our version.
Hash(GenericArray::clone_from_slice(
self.hasher.result().as_slice(),
))
}
}
impl AsRef<[u8]> for Hash {
fn as_ref(&self) -> &[u8] {
&self.0[..]
@ -26,14 +49,22 @@ impl fmt::Display for Hash {
write!(f, "{}", bs58::encode(self.0).into_string())
}
}
impl Hash {
pub fn new(hash_slice: &[u8]) -> Self {
Hash(GenericArray::clone_from_slice(&hash_slice))
}
}
/// Return a Sha256 hash for the given data.
pub fn hashv(vals: &[&[u8]]) -> Hash {
let mut hasher = Hasher::default();
hasher.hashv(vals);
hasher.result()
}
/// Return a Sha256 hash for the given data.
pub fn hash(val: &[u8]) -> Hash {
let mut hasher = Sha256::default();
hasher.input(val);
// At the time of this writing, the sha2 library is stuck on an old version
// of generic_array (0.9.0). Decouple ourselves with a clone to our version.
Hash(GenericArray::clone_from_slice(hasher.result().as_slice()))
hashv(&[val])
}
/// Return the hash of the given hash extended with the given value.

View File

@ -3,19 +3,26 @@
//! access read to a persistent file-based ledger.
use bincode::{self, deserialize, deserialize_from, serialize_into, serialized_size};
use budget_instruction::Vote;
use budget_transaction::BudgetTransaction;
use entry::Entry;
use hash::Hash;
use log::Level::Trace;
use packet::{self, SharedBlob, BLOB_DATA_SIZE};
#[cfg(test)]
use mint::Mint;
use packet::{SharedBlob, BLOB_DATA_SIZE};
use rayon::prelude::*;
use result::{Error, Result};
use signature::Pubkey;
#[cfg(test)]
use signature::{Keypair, KeypairUtil};
use solana_program_interface::pubkey::Pubkey;
use std::fs::{create_dir_all, remove_dir_all, File, OpenOptions};
use std::io::prelude::*;
use std::io::{self, BufReader, BufWriter, Seek, SeekFrom};
use std::mem::size_of;
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use std::path::Path;
use transaction::{Transaction, Vote};
use transaction::Transaction;
use window::WINDOW_SIZE;
//
@ -392,27 +399,12 @@ pub fn read_ledger(
Ok(LedgerReader { data })
}
///// copy ledger is doesn't fix up the "from" ledger
//pub fn copy_ledger(from: &str, to: &str) -> io::Result<()> {
// let mut to = LedgerWriter::new(to, true)?;
//
// let from = Path::new(&from);
//
// // for a copy, we read "readonly" from data
// let data = File::open(from.join("data"))?;
//
// for entry in (LedgerReader { data }) {
// let entry = entry?;
// to.write_entry(&entry)?;
// }
// Ok(())
//}
// a Block is a slice of Entries
pub trait Block {
/// Verifies the hashes and counts of a slice of transactions are all consistent.
fn verify(&self, start_hash: &Hash) -> bool;
fn to_blobs(&self, blob_recycler: &packet::BlobRecycler) -> Vec<SharedBlob>;
fn to_blobs(&self) -> Vec<SharedBlob>;
fn to_blobs_with_id(&self, id: Pubkey, start_id: u64, addr: &SocketAddr) -> Vec<SharedBlob>;
fn votes(&self) -> Vec<(Pubkey, Vote, Hash)>;
}
@ -433,16 +425,26 @@ impl Block for [Entry] {
})
}
fn to_blobs(&self, blob_recycler: &packet::BlobRecycler) -> Vec<SharedBlob> {
fn to_blobs_with_id(&self, id: Pubkey, start_idx: u64, addr: &SocketAddr) -> Vec<SharedBlob> {
self.iter()
.map(|entry| entry.to_blob(blob_recycler, None, None, None))
.enumerate()
.map(|(i, entry)| entry.to_blob(Some(start_idx + i as u64), Some(id), Some(&addr)))
.collect()
}
fn to_blobs(&self) -> Vec<SharedBlob> {
let default_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 0);
self.to_blobs_with_id(Pubkey::default(), 0, &default_addr)
}
fn votes(&self) -> Vec<(Pubkey, Vote, Hash)> {
self.iter()
.flat_map(|entry| entry.transactions.iter().filter_map(Transaction::vote))
.collect()
.flat_map(|entry| {
entry
.transactions
.iter()
.filter_map(BudgetTransaction::vote)
}).collect()
}
}
@ -474,10 +476,10 @@ pub fn next_entries_mut(
num_hashes: &mut u64,
transactions: Vec<Transaction>,
) -> Vec<Entry> {
// TODO: find a magic number that works better than | ?
// V
// TODO: ?? find a number that works better than |?
// V
if transactions.is_empty() || transactions.len() == 1 {
vec![Entry::new_mut(start_hash, num_hashes, transactions, false)]
vec![Entry::new_mut(start_hash, num_hashes, transactions)]
} else {
let mut chunk_start = 0;
let mut entries = Vec::new();
@ -521,7 +523,6 @@ pub fn next_entries_mut(
start_hash,
num_hashes,
transactions[chunk_start..chunk_end].to_vec(),
transactions.len() - chunk_end > 0,
));
chunk_start = chunk_end;
}
@ -541,26 +542,37 @@ pub fn next_entries(
next_entries_mut(&mut id, &mut num_hashes, transactions)
}
#[cfg(test)]
pub fn tmp_ledger_path(name: &str) -> String {
let keypair = Keypair::new();
format!("/tmp/tmp-ledger-{}-{}", name, keypair.pubkey())
}
#[cfg(test)]
pub fn genesis(name: &str, num: i64) -> (Mint, String) {
let mint = Mint::new(num);
let path = tmp_ledger_path(name);
let mut writer = LedgerWriter::open(&path, true).unwrap();
writer.write_entries(mint.create_entries()).unwrap();
(mint, path)
}
#[cfg(test)]
mod tests {
use super::*;
use bincode::serialized_size;
use budget_instruction::Vote;
use budget_transaction::BudgetTransaction;
use chrono::prelude::*;
use entry::{next_entry, Entry};
use hash::hash;
use packet::{BlobRecycler, BLOB_DATA_SIZE, PACKET_DATA_SIZE};
use packet::{to_blobs, BLOB_DATA_SIZE, PACKET_DATA_SIZE};
use signature::{Keypair, KeypairUtil};
use std;
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use transaction::{Transaction, Vote};
fn tmp_ledger_path(name: &str) -> String {
use std::env;
let out_dir = env::var("OUT_DIR").unwrap_or_else(|_| "target".to_string());
let keypair = Keypair::new();
format!("{}/tmp-ledger-{}-{}", out_dir, name, keypair.pubkey())
}
use transaction::Transaction;
#[test]
fn test_verify_slice() {
@ -590,23 +602,22 @@ mod tests {
Entry::new_mut(
&mut id,
&mut num_hashes,
vec![Transaction::new_timestamp(
vec![Transaction::budget_new_timestamp(
&keypair,
keypair.pubkey(),
keypair.pubkey(),
Utc::now(),
one,
)],
false,
)
})
.collect()
}).collect()
}
fn make_test_entries() -> Vec<Entry> {
let zero = Hash::default();
let one = hash(&zero.as_ref());
let keypair = Keypair::new();
let tx0 = Transaction::new_vote(
let tx0 = Transaction::budget_new_vote(
&keypair,
Vote {
version: 0,
@ -615,7 +626,13 @@ mod tests {
one,
1,
);
let tx1 = Transaction::new_timestamp(&keypair, keypair.pubkey(), Utc::now(), one);
let tx1 = Transaction::budget_new_timestamp(
&keypair,
keypair.pubkey(),
keypair.pubkey(),
Utc::now(),
one,
);
//
// TODO: this magic number and the mix of transaction types
// is designed to fill up a Blob more or less exactly,
@ -637,8 +654,7 @@ mod tests {
logger::setup();
let entries = make_test_entries();
let blob_recycler = BlobRecycler::default();
let blob_q = entries.to_blobs(&blob_recycler);
let blob_q = entries.to_blobs();
assert_eq!(reconstruct_entries_from_blobs(blob_q).unwrap(), entries);
}
@ -647,9 +663,8 @@ mod tests {
fn test_bad_blobs_attack() {
use logger;
logger::setup();
let blob_recycler = BlobRecycler::default();
let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
let blobs_q = packet::to_blobs(vec![(0, addr)], &blob_recycler).unwrap(); // <-- attack!
let blobs_q = to_blobs(vec![(0, addr)]).unwrap(); // <-- attack!
assert!(reconstruct_entries_from_blobs(blobs_q).is_err());
}
@ -660,7 +675,7 @@ mod tests {
let id = Hash::default();
let next_id = hash(&id.as_ref());
let keypair = Keypair::new();
let tx_small = Transaction::new_vote(
let tx_small = Transaction::budget_new_vote(
&keypair,
Vote {
version: 0,
@ -669,7 +684,7 @@ mod tests {
next_id,
2,
);
let tx_large = Transaction::new(&keypair, keypair.pubkey(), 1, next_id);
let tx_large = Transaction::budget_new(&keypair, keypair.pubkey(), 1, next_id);
let tx_small_size = serialized_size(&tx_small).unwrap() as usize;
let tx_large_size = serialized_size(&tx_large).unwrap() as usize;
@ -677,7 +692,6 @@ mod tests {
num_hashes: 0,
id: Hash::default(),
transactions: vec![],
has_more: false,
}).unwrap() as usize;
assert!(tx_small_size < tx_large_size);
assert!(tx_large_size < PACKET_DATA_SIZE);
@ -694,8 +708,6 @@ mod tests {
let transactions = vec![tx_small.clone(); threshold * 2];
let entries0 = next_entries(&id, 0, transactions.clone());
assert_eq!(entries0.len(), 2);
assert!(entries0[0].has_more);
assert!(!entries0[entries0.len() - 1].has_more);
assert!(entries0.verify(&id));
// verify the split with small transactions followed by large
@ -707,8 +719,6 @@ mod tests {
let entries0 = next_entries(&id, 0, transactions.clone());
assert!(entries0.len() >= 2);
assert!(entries0[0].has_more);
assert!(!entries0[entries0.len() - 1].has_more);
assert!(entries0.verify(&id));
}
@ -859,31 +869,4 @@ mod tests {
let _ignored = remove_dir_all(&ledger_path);
}
// #[test]
// fn test_copy_ledger() {
// use logger;
// logger::setup();
//
// let from = tmp_ledger_path("test_ledger_copy_from");
// let entries = make_tiny_test_entries(10);
//
// let mut writer = LedgerWriter::new(&from, true).unwrap();
// writer.write_entries(entries.clone()).unwrap();
//
// let to = tmp_ledger_path("test_ledger_copy_to");
//
// copy_ledger(&from, &to).unwrap();
//
// let mut read_entries = vec![];
// for x in read_ledger(&to).unwrap() {
// let entry = x.unwrap();
// trace!("entry... {:?}", entry);
// read_entries.push(entry);
// }
// assert_eq!(read_entries, entries);
//
// std::fs::remove_dir_all(from).unwrap();
// std::fs::remove_dir_all(to).unwrap();
// }
}

View File

@ -14,11 +14,15 @@ pub mod banking_stage;
pub mod blob_fetch_stage;
pub mod broadcast_stage;
pub mod budget;
pub mod budget_instruction;
pub mod budget_transaction;
pub mod choose_gossip_peer_strategy;
pub mod client;
#[macro_use]
pub mod crdt;
pub mod budget_program;
pub mod drone;
pub mod dynamic_program;
pub mod entry;
pub mod entry_writer;
#[cfg(feature = "erasure")]
@ -34,9 +38,11 @@ pub mod ncp;
pub mod netutil;
pub mod packet;
pub mod payment_plan;
pub mod record_stage;
pub mod recorder;
pub mod poh;
pub mod poh_recorder;
pub mod recvmmsg;
pub mod replicate_stage;
pub mod replicator;
pub mod request;
pub mod request_processor;
pub mod request_stage;
@ -48,9 +54,16 @@ pub mod service;
pub mod signature;
pub mod sigverify;
pub mod sigverify_stage;
pub mod storage_program;
pub mod store_ledger_stage;
pub mod streamer;
pub mod system_program;
pub mod system_transaction;
pub mod thin_client;
pub mod tictactoe_dashboard_program;
pub mod tictactoe_program;
pub mod timing;
pub mod token_program;
pub mod tpu;
pub mod transaction;
pub mod tvu;
@ -62,27 +75,38 @@ pub mod write_stage;
extern crate bincode;
extern crate bs58;
extern crate byteorder;
extern crate bytes;
extern crate chrono;
extern crate clap;
extern crate dirs;
extern crate generic_array;
extern crate ipnetwork;
extern crate itertools;
extern crate jsonrpc_core;
#[macro_use]
extern crate jsonrpc_macros;
extern crate jsonrpc_http_server;
extern crate libc;
extern crate libloading;
#[macro_use]
extern crate log;
extern crate nix;
extern crate pnet_datalink;
extern crate rayon;
extern crate reqwest;
extern crate ring;
extern crate serde;
#[macro_use]
extern crate serde_derive;
extern crate pnet_datalink;
#[macro_use]
extern crate serde_json;
extern crate serde_cbor;
extern crate sha2;
extern crate socket2;
extern crate solana_jsonrpc_core as jsonrpc_core;
extern crate solana_jsonrpc_http_server as jsonrpc_http_server;
#[macro_use]
extern crate solana_jsonrpc_macros as jsonrpc_macros;
extern crate solana_program_interface;
extern crate sys_info;
extern crate tokio;
extern crate tokio_codec;
extern crate untrusted;
#[cfg(test)]

View File

@ -342,12 +342,10 @@ mod test {
.add_field(
"random_bool",
influxdb::Value::Boolean(random::<u8>() < 128),
)
.add_field(
).add_field(
"random_int",
influxdb::Value::Integer(random::<u8>() as i64),
)
.to_owned();
).to_owned();
agent.submit(point);
}

View File

@ -3,7 +3,9 @@
use entry::Entry;
use hash::{hash, Hash};
use ring::rand::SystemRandom;
use signature::{Keypair, KeypairUtil, Pubkey};
use signature::{Keypair, KeypairUtil};
use solana_program_interface::pubkey::Pubkey;
use system_transaction::SystemTransaction;
use transaction::Transaction;
use untrusted::Input;
@ -52,13 +54,13 @@ impl Mint {
pub fn create_transactions(&self) -> Vec<Transaction> {
let keypair = self.keypair();
let tx = Transaction::new(&keypair, self.pubkey(), self.tokens, self.seed());
let tx = Transaction::system_move(&keypair, self.pubkey(), self.tokens, self.seed(), 0);
vec![tx]
}
pub fn create_entries(&self) -> Vec<Entry> {
let e0 = Entry::new(&self.seed(), 0, vec![], false);
let e1 = Entry::new(&e0.id, 0, self.create_transactions(), false);
let e0 = Entry::new(&self.seed(), 0, vec![]);
let e1 = Entry::new(&e0.id, 0, self.create_transactions());
vec![e0, e1]
}
}
@ -66,18 +68,18 @@ impl Mint {
#[cfg(test)]
mod tests {
use super::*;
use budget::Budget;
use bincode::deserialize;
use ledger::Block;
use transaction::{Instruction, Plan};
use system_program::SystemProgram;
#[test]
fn test_create_transactions() {
let mut transactions = Mint::new(100).create_transactions().into_iter();
let tx = transactions.next().unwrap();
if let Instruction::NewContract(contract) = tx.instruction() {
if let Plan::Budget(Budget::Pay(payment)) = contract.plan {
assert_eq!(*tx.from(), payment.to);
}
assert!(SystemProgram::check_id(&tx.program_id));
let instruction: SystemProgram = deserialize(&tx.userdata).unwrap();
if let SystemProgram::Move { tokens } = instruction {
assert_eq!(tokens, 100);
}
assert_eq!(transactions.next(), None);
}

View File

@ -1,7 +1,6 @@
//! The `ncp` module implements the network control plane.
use crdt::Crdt;
use packet::BlobRecycler;
use service::Service;
use std::net::UdpSocket;
use std::sync::atomic::{AtomicBool, Ordering};
@ -20,7 +19,6 @@ impl Ncp {
pub fn new(
crdt: &Arc<RwLock<Crdt>>,
window: SharedWindow,
blob_recycler: BlobRecycler,
ledger_path: Option<&str>,
gossip_socket: UdpSocket,
exit: Arc<AtomicBool>,
@ -32,29 +30,19 @@ impl Ncp {
&crdt.read().unwrap().id.as_ref()[..4],
gossip_socket.local_addr().unwrap()
);
let t_receiver = streamer::blob_receiver(
gossip_socket.clone(),
exit.clone(),
blob_recycler.clone(),
request_sender,
);
let t_receiver =
streamer::blob_receiver(gossip_socket.clone(), exit.clone(), request_sender);
let (response_sender, response_receiver) = channel();
let t_responder = streamer::responder(
"ncp",
gossip_socket,
blob_recycler.clone(),
response_receiver,
);
let t_responder = streamer::responder("ncp", gossip_socket, response_receiver);
let t_listen = Crdt::listen(
crdt.clone(),
window,
ledger_path,
blob_recycler.clone(),
request_receiver,
response_sender.clone(),
exit.clone(),
);
let t_gossip = Crdt::gossip(crdt.clone(), blob_recycler, response_sender, exit.clone());
let t_gossip = Crdt::gossip(crdt.clone(), response_sender, exit.clone());
let thread_hdls = vec![t_receiver, t_responder, t_listen, t_gossip];
Ncp { exit, thread_hdls }
}
@ -66,12 +54,10 @@ impl Ncp {
}
impl Service for Ncp {
fn thread_hdls(self) -> Vec<JoinHandle<()>> {
self.thread_hdls
}
type JoinReturnType = ();
fn join(self) -> thread::Result<()> {
for thread_hdl in self.thread_hdls() {
for thread_hdl in self.thread_hdls {
thread_hdl.join()?;
}
Ok(())
@ -82,7 +68,6 @@ impl Service for Ncp {
mod tests {
use crdt::{Crdt, Node};
use ncp::Ncp;
use packet::BlobRecycler;
use std::sync::atomic::AtomicBool;
use std::sync::{Arc, RwLock};
@ -95,14 +80,7 @@ mod tests {
let crdt = Crdt::new(tn.info.clone()).expect("Crdt::new");
let c = Arc::new(RwLock::new(crdt));
let w = Arc::new(RwLock::new(vec![]));
let d = Ncp::new(
&c,
w,
BlobRecycler::default(),
None,
tn.sockets.gossip,
exit.clone(),
);
let d = Ncp::new(&c, w, None, tn.sockets.gossip, exit.clone());
d.close().expect("thread join");
}
}

View File

@ -48,22 +48,42 @@ pub fn parse_port_or_addr(optstr: Option<&str>, default_port: u16) -> SocketAddr
}
}
pub fn get_ip_addr() -> Option<IpAddr> {
for iface in datalink::interfaces() {
fn find_eth0ish_ip_addr(ifaces: &mut Vec<datalink::NetworkInterface>) -> Option<IpAddr> {
// put eth0 and wifi0, etc. up front of our list of candidates
ifaces.sort_by(|a, b| {
a.name
.chars()
.last()
.unwrap()
.cmp(&b.name.chars().last().unwrap())
});
for iface in ifaces.clone() {
trace!("get_ip_addr considering iface {}", iface.name);
for p in iface.ips {
if !p.ip().is_loopback() && !p.ip().is_multicast() {
match p.ip() {
IpAddr::V4(addr) => {
if !addr.is_link_local() {
return Some(p.ip());
}
trace!(" ip {}", p);
if p.ip().is_loopback() {
trace!(" loopback");
continue;
}
if p.ip().is_multicast() {
trace!(" multicast");
continue;
}
match p.ip() {
IpAddr::V4(addr) => {
if addr.is_link_local() {
trace!(" link local");
continue;
}
IpAddr::V6(_addr) => {
// Select an ipv6 address if the config is selected
#[cfg(feature = "ipv6")]
{
return Some(p.ip());
}
trace!(" picked {}", p.ip());
return Some(p.ip());
}
IpAddr::V6(_addr) => {
// Select an ipv6 address if the config is selected
#[cfg(feature = "ipv6")]
{
return Some(p.ip());
}
}
}
@ -72,6 +92,12 @@ pub fn get_ip_addr() -> Option<IpAddr> {
None
}
pub fn get_ip_addr() -> Option<IpAddr> {
let mut ifaces = datalink::interfaces();
find_eth0ish_ip_addr(&mut ifaces)
}
fn udp_socket(reuseaddr: bool) -> io::Result<Socket> {
let sock = Socket::new(Domain::ipv4(), Type::dgram(), None)?;
let sock_fd = sock.as_raw_fd();
@ -135,7 +161,74 @@ pub fn bind_to(port: u16, reuseaddr: bool) -> io::Result<UdpSocket> {
#[cfg(test)]
mod tests {
use ipnetwork::IpNetwork;
use logger;
use netutil::*;
use pnet_datalink as datalink;
#[test]
fn test_find_eth0ish_ip_addr() {
logger::setup();
macro_rules! mock_interface {
($name:ident, $ip_mask:expr) => {
datalink::NetworkInterface {
name: stringify!($name).to_string(),
index: 0,
mac: None,
ips: vec![IpNetwork::V4($ip_mask.parse().unwrap())],
flags: 0,
}
};
}
// loopback bad
assert_eq!(
find_eth0ish_ip_addr(&mut vec![mock_interface!(lo, "127.0.0.1/24")]),
None
);
// multicast bad
assert_eq!(
find_eth0ish_ip_addr(&mut vec![mock_interface!(eth0, "224.0.1.5/24")]),
None
);
// finds "wifi0"
assert_eq!(
find_eth0ish_ip_addr(&mut vec![
mock_interface!(eth0, "224.0.1.5/24"),
mock_interface!(eth2, "192.168.137.1/8"),
mock_interface!(eth3, "10.0.75.1/8"),
mock_interface!(eth4, "172.22.140.113/4"),
mock_interface!(lo, "127.0.0.1/24"),
mock_interface!(wifi0, "192.168.1.184/8"),
]),
Some(mock_interface!(wifi0, "192.168.1.184/8").ips[0].ip())
);
// finds "wifi0" in the middle
assert_eq!(
find_eth0ish_ip_addr(&mut vec![
mock_interface!(eth0, "224.0.1.5/24"),
mock_interface!(eth2, "192.168.137.1/8"),
mock_interface!(eth3, "10.0.75.1/8"),
mock_interface!(wifi0, "192.168.1.184/8"),
mock_interface!(eth4, "172.22.140.113/4"),
mock_interface!(lo, "127.0.0.1/24"),
]),
Some(mock_interface!(wifi0, "192.168.1.184/8").ips[0].ip())
);
// picks "eth2", is lowest valid "eth"
assert_eq!(
find_eth0ish_ip_addr(&mut vec![
mock_interface!(eth0, "224.0.1.5/24"),
mock_interface!(eth2, "192.168.137.1/8"),
mock_interface!(eth3, "10.0.75.1/8"),
mock_interface!(eth4, "172.22.140.113/4"),
mock_interface!(lo, "127.0.0.1/24"),
]),
Some(mock_interface!(eth2, "192.168.137.1/8").ips[0].ip())
);
}
#[test]
fn test_parse_port_or_addr() {

View File

@ -2,27 +2,30 @@
use bincode::{deserialize, serialize};
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use counter::Counter;
#[cfg(test)]
use hash::Hash;
#[cfg(test)]
use ledger::{next_entries_mut, Block};
use log::Level;
use recvmmsg::{recv_mmsg, NUM_RCVMMSGS};
use result::{Error, Result};
use serde::Serialize;
use signature::Pubkey;
use solana_program_interface::pubkey::Pubkey;
use std::fmt;
use std::io;
use std::mem::size_of;
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket};
use std::sync::atomic::AtomicUsize;
use std::sync::{Arc, Mutex, RwLock};
use std::sync::{Arc, RwLock};
pub type SharedPackets = Arc<RwLock<Packets>>;
pub type SharedBlob = Arc<RwLock<Blob>>;
pub type SharedBlobs = Vec<SharedBlob>;
pub type PacketRecycler = Recycler<Packets>;
pub type BlobRecycler = Recycler<Blob>;
pub const NUM_PACKETS: usize = 1024 * 8;
pub const BLOB_SIZE: usize = (64 * 1024 - 128); // wikipedia says there should be 20b for ipv4 headers
pub const BLOB_DATA_SIZE: usize = BLOB_SIZE - (BLOB_HEADER_SIZE * 2);
pub const PACKET_DATA_SIZE: usize = 256;
pub const PACKET_DATA_SIZE: usize = 512;
pub const NUM_BLOBS: usize = (NUM_PACKETS * PACKET_DATA_SIZE) / BLOB_SIZE;
#[derive(Clone, Default, Debug, PartialEq)]
@ -62,19 +65,6 @@ impl Default for Packet {
}
}
pub trait Reset {
// Reset trait is an object that can re-initialize important parts
// of itself, similar to Default, but not necessarily a full clear
// also, we do it in-place.
fn reset(&mut self);
}
impl Reset for Packet {
fn reset(&mut self) {
self.meta = Meta::default();
}
}
impl Meta {
pub fn addr(&self) -> SocketAddr {
if !self.v6 {
@ -129,14 +119,6 @@ impl Default for Packets {
}
}
impl Reset for Packets {
fn reset(&mut self) {
for i in 0..self.packets.len() {
self.packets[i].reset();
}
}
}
#[derive(Clone)]
pub struct Blob {
pub data: [u8; BLOB_SIZE],
@ -164,82 +146,12 @@ impl Default for Blob {
}
}
impl Reset for Blob {
fn reset(&mut self) {
self.meta = Meta::default();
self.data[..BLOB_HEADER_SIZE].copy_from_slice(&[0u8; BLOB_HEADER_SIZE]);
}
}
#[derive(Debug)]
pub enum BlobError {
/// the Blob's meta and data are not self-consistent
BadState,
}
pub struct Recycler<T> {
#[cfg_attr(feature = "cargo-clippy", allow(type_complexity))]
gc: Arc<Mutex<Vec<(Arc<RwLock<T>>, &'static str)>>>,
}
impl<T: Default> Default for Recycler<T> {
fn default() -> Recycler<T> {
Recycler {
gc: Arc::new(Mutex::new(vec![])),
}
}
}
impl<T: Default> Clone for Recycler<T> {
fn clone(&self) -> Recycler<T> {
Recycler {
gc: self.gc.clone(),
}
}
}
impl<T: Default + Reset> Recycler<T> {
pub fn allocate(&self) -> Arc<RwLock<T>> {
let mut gc = self.gc.lock().expect("recycler lock in pb fn allocate");
loop {
if let Some((x, who)) = gc.pop() {
// Only return the item if this recycler is the last reference to it.
// Remove this check once `T` holds a Weak reference back to this
// recycler and implements `Drop`. At the time of this writing, Weak can't
// be passed across threads ('alloc' is a nightly-only API), and so our
// reference-counted recyclables are awkwardly being recycled by hand,
// which allows this race condition to exist.
if Arc::strong_count(&x) > 1 {
// Commenting out this message, is annoying for known use case of
// validator hanging onto a blob in the window, but also sending it over
// to retransmmit_request
//
// warn!("Recycled item still in use. Booting it.");
trace!(
"Recycled item from \"{}\" still in use. {} Booting it.",
who,
Arc::strong_count(&x)
);
continue;
}
{
let mut w = x.write().unwrap();
w.reset();
}
return x;
} else {
return Arc::new(RwLock::new(Default::default()));
}
}
}
pub fn recycle(&self, x: Arc<RwLock<T>>, who: &'static str) {
let mut gc = self.gc.lock().expect("recycler lock in pub fn recycle");
gc.push((x, who));
}
}
impl Packets {
fn run_read_from(&mut self, socket: &UdpSocket) -> Result<usize> {
self.packets.resize(NUM_PACKETS, Packet::default());
@ -251,31 +163,30 @@ impl Packets {
// * read until it fails
// * set it back to blocking before returning
socket.set_nonblocking(false)?;
for p in &mut self.packets {
p.meta.size = 0;
trace!("receiving on {}", socket.local_addr().unwrap());
match socket.recv_from(&mut p.data) {
trace!("receiving on {}", socket.local_addr().unwrap());
loop {
match recv_mmsg(socket, &mut self.packets[i..]) {
Err(_) if i > 0 => {
inc_new_counter_info!("packets-recv_count", i);
debug!("got {:?} messages on {}", i, socket.local_addr().unwrap());
break;
socket.set_nonblocking(true)?;
return Ok(i);
}
Err(e) => {
trace!("recv_from err {:?}", e);
return Err(Error::IO(e));
}
Ok((nrecv, from)) => {
p.meta.size = nrecv;
p.meta.set_addr(&from);
trace!("got {} bytes from {}", nrecv, from);
if i == 0 {
Ok(npkts) => {
trace!("got {} packets", npkts);
i += npkts;
if npkts != NUM_RCVMMSGS {
socket.set_nonblocking(true)?;
inc_new_counter_info!("packets-recv_count", i);
return Ok(i);
}
}
}
i += 1;
}
Ok(i)
}
pub fn recv_from(&mut self, socket: &UdpSocket) -> Result<()> {
let sz = self.run_read_from(socket)?;
@ -292,14 +203,10 @@ impl Packets {
}
}
pub fn to_packets_chunked<T: Serialize>(
r: &PacketRecycler,
xs: &[T],
chunks: usize,
) -> Vec<SharedPackets> {
pub fn to_packets_chunked<T: Serialize>(xs: &[T], chunks: usize) -> Vec<SharedPackets> {
let mut out = vec![];
for x in xs.chunks(chunks) {
let p = r.allocate();
let mut p = SharedPackets::default();
p.write()
.unwrap()
.packets
@ -315,16 +222,12 @@ pub fn to_packets_chunked<T: Serialize>(
out
}
pub fn to_packets<T: Serialize>(r: &PacketRecycler, xs: &[T]) -> Vec<SharedPackets> {
to_packets_chunked(r, xs, NUM_PACKETS)
pub fn to_packets<T: Serialize>(xs: &[T]) -> Vec<SharedPackets> {
to_packets_chunked(xs, NUM_PACKETS)
}
pub fn to_blob<T: Serialize>(
resp: T,
rsp_addr: SocketAddr,
blob_recycler: &BlobRecycler,
) -> Result<SharedBlob> {
let blob = blob_recycler.allocate();
pub fn to_blob<T: Serialize>(resp: T, rsp_addr: SocketAddr) -> Result<SharedBlob> {
let blob = SharedBlob::default();
{
let mut b = blob.write().unwrap();
let v = serialize(&resp)?;
@ -337,13 +240,10 @@ pub fn to_blob<T: Serialize>(
Ok(blob)
}
pub fn to_blobs<T: Serialize>(
rsps: Vec<(T, SocketAddr)>,
blob_recycler: &BlobRecycler,
) -> Result<SharedBlobs> {
pub fn to_blobs<T: Serialize>(rsps: Vec<(T, SocketAddr)>) -> Result<SharedBlobs> {
let mut blobs = Vec::new();
for (resp, rsp_addr) in rsps {
blobs.push(to_blob(resp, rsp_addr, blob_recycler)?);
blobs.push(to_blob(resp, rsp_addr)?);
}
Ok(blobs)
}
@ -441,7 +341,19 @@ impl Blob {
self.meta.size = new_size;
self.set_data_size(new_size as u64).unwrap();
}
pub fn recv_from(re: &BlobRecycler, socket: &UdpSocket) -> Result<SharedBlobs> {
pub fn recv_blob(socket: &UdpSocket, r: &SharedBlob) -> io::Result<()> {
let mut p = r.write().unwrap();
trace!("receiving on {}", socket.local_addr().unwrap());
let (nrecv, from) = socket.recv_from(&mut p.data)?;
p.meta.size = nrecv;
p.meta.set_addr(&from);
trace!("got {} bytes from {}", nrecv, from);
Ok(())
}
pub fn recv_from(socket: &UdpSocket) -> Result<SharedBlobs> {
let mut v = Vec::new();
//DOCUMENTED SIDE-EFFECT
//Performance out of the IO without poll
@ -451,39 +363,31 @@ impl Blob {
// * set it back to blocking before returning
socket.set_nonblocking(false)?;
for i in 0..NUM_BLOBS {
let r = re.allocate();
{
let mut p = r.write().expect("'r' write lock in pub fn recv_from");
trace!("receiving on {}", socket.local_addr().unwrap());
match socket.recv_from(&mut p.data) {
Err(_) if i > 0 => {
trace!("got {:?} messages on {}", i, socket.local_addr().unwrap());
break;
}
Err(e) => {
if e.kind() != io::ErrorKind::WouldBlock {
info!("recv_from err {:?}", e);
}
return Err(Error::IO(e));
}
Ok((nrecv, from)) => {
p.meta.size = nrecv;
p.meta.set_addr(&from);
trace!("got {} bytes from {}", nrecv, from);
if i == 0 {
socket.set_nonblocking(true)?;
}
}
let r = SharedBlob::default();
match Blob::recv_blob(socket, &r) {
Err(_) if i > 0 => {
trace!("got {:?} messages on {}", i, socket.local_addr().unwrap());
break;
}
Err(e) => {
if e.kind() != io::ErrorKind::WouldBlock {
info!("recv_from err {:?}", e);
}
return Err(Error::IO(e));
}
Ok(()) => if i == 0 {
socket.set_nonblocking(true)?;
},
}
v.push(r);
}
Ok(v)
}
pub fn send_to(re: &BlobRecycler, socket: &UdpSocket, v: SharedBlobs) -> Result<()> {
pub fn send_to(socket: &UdpSocket, v: SharedBlobs) -> Result<()> {
for r in v {
{
let p = r.read().expect("'r' read lock in pub fn send_to");
let p = r.read().unwrap();
let a = p.meta.addr();
if let Err(e) = socket.send_to(&p.data[..p.meta.size], &a) {
warn!(
@ -493,104 +397,47 @@ impl Blob {
Err(e)?;
}
}
re.recycle(r, "send_to");
}
Ok(())
}
}
#[cfg(test)]
pub fn make_consecutive_blobs(
me_id: Pubkey,
num_blobs_to_make: u64,
start_hash: Hash,
addr: &SocketAddr,
) -> SharedBlobs {
let mut last_hash = start_hash;
let mut num_hashes = 0;
let mut all_entries = Vec::with_capacity(num_blobs_to_make as usize);
for _ in 0..num_blobs_to_make {
all_entries.extend(next_entries_mut(&mut last_hash, &mut num_hashes, vec![]));
}
let mut new_blobs = all_entries.to_blobs_with_id(me_id, 0, addr);
new_blobs.truncate(num_blobs_to_make as usize);
new_blobs
}
#[cfg(test)]
mod tests {
use packet::{
to_packets, Blob, BlobRecycler, Meta, Packet, PacketRecycler, Packets, Recycler, Reset,
BLOB_HEADER_SIZE, NUM_PACKETS, PACKET_DATA_SIZE,
to_packets, Blob, Meta, Packet, Packets, SharedBlob, SharedPackets, NUM_PACKETS,
PACKET_DATA_SIZE,
};
use request::Request;
use std::io;
use std::io::Write;
use std::net::UdpSocket;
use std::sync::Arc;
#[test]
pub fn packet_recycler_test() {
let r = PacketRecycler::default();
let p = r.allocate();
r.recycle(p, "recycler_test");
assert_eq!(r.gc.lock().unwrap().len(), 1);
let _ = r.allocate();
assert_eq!(r.gc.lock().unwrap().len(), 0);
}
impl Reset for u8 {
fn reset(&mut self) {
*self = Default::default();
}
}
#[test]
pub fn test_leaked_recyclable() {
// Ensure that the recycler won't return an item
// that is still referenced outside the recycler.
let r = Recycler::<u8>::default();
let x0 = r.allocate();
r.recycle(x0.clone(), "leaked_recyclable:1");
assert_eq!(Arc::strong_count(&x0), 2);
assert_eq!(r.gc.lock().unwrap().len(), 1);
let x1 = r.allocate();
assert_eq!(Arc::strong_count(&x1), 1);
assert_eq!(r.gc.lock().unwrap().len(), 0);
}
#[test]
pub fn test_leaked_recyclable_recursion() {
// In the case of a leaked recyclable, ensure the recycler drops its lock before recursing.
let r = Recycler::<u8>::default();
let x0 = r.allocate();
let x1 = r.allocate();
r.recycle(x0, "leaked_recyclable_recursion:1"); // <-- allocate() of this will require locking the recycler's stack.
r.recycle(x1.clone(), "leaked_recyclable_recursion:2"); // <-- allocate() of this will cause it to be dropped and recurse.
assert_eq!(Arc::strong_count(&x1), 2);
assert_eq!(r.gc.lock().unwrap().len(), 2);
r.allocate(); // Ensure lock is released before recursing.
assert_eq!(r.gc.lock().unwrap().len(), 0);
}
#[test]
pub fn test_recycling_is_happening() {
// Test the case in allocate() which should return a re-used object and not allocate a new
// one.
let r = PacketRecycler::default();
let x0 = r.allocate();
{
x0.write().unwrap().packets.resize(1, Packet::default());
}
r.recycle(x0, "recycle");
let x1 = r.allocate();
assert_ne!(
x1.read().unwrap().packets.len(),
Packets::default().packets.len()
);
}
#[test]
pub fn blob_recycler_test() {
let r = BlobRecycler::default();
let p = r.allocate();
r.recycle(p, "blob_recycler_test");
assert_eq!(r.gc.lock().unwrap().len(), 1);
let _ = r.allocate();
assert_eq!(r.gc.lock().unwrap().len(), 0);
}
#[test]
pub fn packet_send_recv() {
let reader = UdpSocket::bind("127.0.0.1:0").expect("bind");
let addr = reader.local_addr().unwrap();
let sender = UdpSocket::bind("127.0.0.1:0").expect("bind");
let saddr = sender.local_addr().unwrap();
let r = PacketRecycler::default();
let p = r.allocate();
let p = SharedPackets::default();
p.write().unwrap().packets.resize(10, Packet::default());
for m in p.write().unwrap().packets.iter_mut() {
m.meta.set_addr(&addr);
@ -602,23 +449,20 @@ mod tests {
assert_eq!(m.meta.size, PACKET_DATA_SIZE);
assert_eq!(m.meta.addr(), saddr);
}
r.recycle(p, "packet_send_recv");
}
#[test]
fn test_to_packets() {
let tx = Request::GetTransactionCount;
let re = PacketRecycler::default();
let rv = to_packets(&re, &vec![tx.clone(); 1]);
let rv = to_packets(&vec![tx.clone(); 1]);
assert_eq!(rv.len(), 1);
assert_eq!(rv[0].read().unwrap().packets.len(), 1);
let rv = to_packets(&re, &vec![tx.clone(); NUM_PACKETS]);
let rv = to_packets(&vec![tx.clone(); NUM_PACKETS]);
assert_eq!(rv.len(), 1);
assert_eq!(rv[0].read().unwrap().packets.len(), NUM_PACKETS);
let rv = to_packets(&re, &vec![tx.clone(); NUM_PACKETS + 1]);
let rv = to_packets(&vec![tx.clone(); NUM_PACKETS + 1]);
assert_eq!(rv.len(), 2);
assert_eq!(rv[0].read().unwrap().packets.len(), NUM_PACKETS);
assert_eq!(rv[1].read().unwrap().packets.len(), 1);
@ -630,17 +474,16 @@ mod tests {
let reader = UdpSocket::bind("127.0.0.1:0").expect("bind");
let addr = reader.local_addr().unwrap();
let sender = UdpSocket::bind("127.0.0.1:0").expect("bind");
let r = BlobRecycler::default();
let p = r.allocate();
let p = SharedBlob::default();
p.write().unwrap().meta.set_addr(&addr);
p.write().unwrap().meta.size = 1024;
let v = vec![p];
Blob::send_to(&r, &sender, v).unwrap();
Blob::send_to(&sender, v).unwrap();
trace!("send_to");
let rv = Blob::recv_from(&r, &reader).unwrap();
let rv = Blob::recv_from(&reader).unwrap();
trace!("recv_from");
assert_eq!(rv.len(), 1);
assert_eq!(rv[0].write().unwrap().meta.size, 1024);
assert_eq!(rv[0].read().unwrap().meta.size, 1024);
}
#[cfg(all(feature = "ipv6", test))]
@ -649,17 +492,15 @@ mod tests {
let reader = UdpSocket::bind("[::1]:0").expect("bind");
let addr = reader.local_addr().unwrap();
let sender = UdpSocket::bind("[::1]:0").expect("bind");
let r = BlobRecycler::default();
let p = r.allocate();
p.write().unwrap().meta.set_addr(&addr);
p.write().unwrap().meta.size = 1024;
let p = SharedBlob::default();
p.as_mut().unwrap().meta.set_addr(&addr);
p.as_mut().unwrap().meta.size = 1024;
let mut v = VecDeque::default();
v.push_back(p);
Blob::send_to(&r, &sender, &mut v).unwrap();
let mut rv = Blob::recv_from(&r, &reader).unwrap();
let mut rv = Blob::recv_from(&reader).unwrap();
let rp = rv.pop_front().unwrap();
assert_eq!(rp.write().unwrap().meta.size, 1024);
r.recycle(rp, "blob_ip6_send_recv");
assert_eq!(rp.as_mut().meta.size, 1024);
}
#[test]
@ -676,8 +517,6 @@ mod tests {
b.data_mut()[0] = 1;
assert_eq!(b.data()[0], 1);
assert_eq!(b.get_index().unwrap(), <u64>::max_value());
b.reset();
assert!(b.data[..BLOB_HEADER_SIZE].starts_with(&[0u8; BLOB_HEADER_SIZE]));
assert_eq!(b.meta, Meta::default());
}

View File

@ -4,7 +4,7 @@
//! `Payment`, the payment is executed.
use chrono::prelude::*;
use signature::Pubkey;
use solana_program_interface::pubkey::Pubkey;
/// The types of events a payment plan can process.
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
@ -25,16 +25,3 @@ pub struct Payment {
/// The `Pubkey` that `tokens` should be paid to.
pub to: Pubkey,
}
/// Interface to smart contracts.
pub trait PaymentPlan {
/// Return Payment if the payment plan requires no additional Witnesses.
fn final_payment(&self) -> Option<Payment>;
/// Return true if the plan spends exactly `spendable_tokens`.
fn verify(&self, spendable_tokens: i64) -> bool;
/// Apply a witness to the payment plan to see if the plan can be reduced.
/// If so, modify the plan in-place.
fn apply_witness(&mut self, witness: &Witness, from: &Pubkey);
}

98
src/poh.rs Normal file
View File

@ -0,0 +1,98 @@
//! The `Poh` module provides an object for generating a Proof of History.
//! It records Hashes items on behalf of its users.
use hash::{hash, hashv, Hash};
pub struct Poh {
last_hash: Hash,
num_hashes: u64,
}
#[derive(Debug)]
pub struct PohEntry {
pub num_hashes: u64,
pub id: Hash,
pub mixin: Option<Hash>,
}
impl Poh {
pub fn new(last_hash: Hash) -> Self {
Poh {
last_hash,
num_hashes: 0,
}
}
pub fn hash(&mut self) {
self.last_hash = hash(&self.last_hash.as_ref());
self.num_hashes += 1;
}
pub fn record(&mut self, mixin: Hash) -> PohEntry {
let num_hashes = self.num_hashes + 1;
self.last_hash = hashv(&[&self.last_hash.as_ref(), &mixin.as_ref()]);
self.num_hashes = 0;
PohEntry {
num_hashes,
id: self.last_hash,
mixin: Some(mixin),
}
}
// emissions of Ticks (i.e. PohEntries without a mixin) allows
// validators to parallelize the work of catching up
pub fn tick(&mut self) -> PohEntry {
self.hash();
let num_hashes = self.num_hashes;
self.num_hashes = 0;
PohEntry {
num_hashes,
id: self.last_hash,
mixin: None,
}
}
}
#[cfg(test)]
pub fn verify(initial: Hash, entries: &[PohEntry]) -> bool {
let mut last_hash = initial;
for entry in entries {
assert!(entry.num_hashes != 0);
for _ in 1..entry.num_hashes {
last_hash = hash(&last_hash.as_ref());
}
let id = match entry.mixin {
Some(mixin) => hashv(&[&last_hash.as_ref(), &mixin.as_ref()]),
None => hash(&last_hash.as_ref()),
};
if id != entry.id {
return false;
}
last_hash = id;
}
true
}
#[cfg(test)]
mod tests {
use hash::Hash;
use poh::{self, PohEntry};
#[test]
#[should_panic]
fn test_poh_verify_assert() {
poh::verify(
Hash::default(),
&[PohEntry {
num_hashes: 0,
id: Hash::default(),
mixin: None,
}],
);
}
}

95
src/poh_recorder.rs Normal file
View File

@ -0,0 +1,95 @@
//! The `poh_recorder` module provides an object for synchronizing with Proof of History.
//! It synchronizes PoH, bank's register_entry_id and the ledger
//!
use bank::Bank;
use entry::Entry;
use hash::Hash;
use poh::Poh;
use result::Result;
use std::sync::mpsc::Sender;
use std::sync::{Arc, Mutex};
use transaction::Transaction;
#[derive(Clone)]
pub struct PohRecorder {
poh: Arc<Mutex<Poh>>,
bank: Arc<Bank>,
sender: Sender<Vec<Entry>>,
}
impl PohRecorder {
/// A recorder to synchronize PoH with the following data structures
/// * bank - the LastId's queue is updated on `tick` and `record` events
/// * sender - the Entry channel that outputs to the ledger
pub fn new(bank: Arc<Bank>, sender: Sender<Vec<Entry>>) -> Self {
let poh = Arc::new(Mutex::new(Poh::new(bank.last_id())));
PohRecorder { poh, bank, sender }
}
pub fn hash(&self) {
// TODO: amortize the cost of this lock by doing the loop in here for
// some min amount of hashes
let mut poh = self.poh.lock().unwrap();
poh.hash()
}
pub fn tick(&self) -> Result<()> {
// Register and send the entry out while holding the lock.
// This guarantees PoH order and Entry production and banks LastId queue is the same
let mut poh = self.poh.lock().unwrap();
let tick = poh.tick();
self.bank.register_entry_id(&tick.id);
let entry = Entry {
num_hashes: tick.num_hashes,
id: tick.id,
transactions: vec![],
};
self.sender.send(vec![entry])?;
Ok(())
}
pub fn record(&self, mixin: Hash, txs: Vec<Transaction>) -> Result<()> {
// Register and send the entry out while holding the lock.
// This guarantees PoH order and Entry production and banks LastId queue is the same.
let mut poh = self.poh.lock().unwrap();
let tick = poh.record(mixin);
self.bank.register_entry_id(&tick.id);
let entry = Entry {
num_hashes: tick.num_hashes,
id: tick.id,
transactions: txs,
};
self.sender.send(vec![entry])?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use hash::hash;
use mint::Mint;
use std::sync::mpsc::channel;
use std::sync::Arc;
#[test]
fn test_poh() {
let mint = Mint::new(1);
let bank = Arc::new(Bank::new(&mint));
let (entry_sender, entry_receiver) = channel();
let poh_recorder = PohRecorder::new(bank, entry_sender);
//send some data
let h1 = hash(b"hello world!");
assert!(poh_recorder.record(h1, vec![]).is_ok());
assert!(poh_recorder.tick().is_ok());
//get some events
let _ = entry_receiver.recv().unwrap();
let _ = entry_receiver.recv().unwrap();
//make sure it handles channel close correctly
drop(entry_receiver);
assert!(poh_recorder.tick().is_err());
}
}

Some files were not shown because too many files have changed in this diff Show More