Compare commits

...

105 Commits

Author SHA1 Message Date
cd3ff5c335 V0.12.3, cherry pick 3523 and 3529 (#3531)
* validator confirmation

* validator confirmaiton

* remove leader confirmaiton

* hang out on progress until fork is confirmed

* use the right id for delegate id

* fixup! hang out on progress until fork is confirmed

* fixup! use the right id for delegate id

* version bump
2019-03-28 05:59:42 -07:00
e55249e63f propagate TESTNET_DB_HOST env variable to next step in buildkite 2019-03-26 15:00:33 -07:00
10bc0c6ee2 Add provisions to specify a database server in testnet manager buildkite 2019-03-26 15:00:33 -07:00
ed14b78d81 also check the delegate_id 2019-03-26 13:44:53 -07:00
7f404941bb remove status_cache.freeze (#3509) 2019-03-26 12:10:46 -07:00
6d45ac1bc7 Record the current nodes locktower votes from the bank (#3502)
* observed_locktower_stats

* fixup! observed_locktower_stats
2019-03-26 11:45:59 -07:00
fabb6d2092 delay freeze of status_cache until squash (#3503) 2019-03-26 11:37:38 -07:00
93cea4c86c Remove rewards crate from publishing script 2019-03-25 21:34:54 -07:00
5fb35f79c3 Added stats for locktower in testnet dashboard 2019-03-25 21:11:37 -07:00
da11274b63 Add support for influx cloud 2019-03-25 21:11:37 -07:00
5d70e2efa9 0.12.2 2019-03-25 20:38:48 -07:00
8f181b4350 keep track of locktower slots and stakes 2019-03-25 16:36:19 -07:00
48844924e5 Setup staking (#3480) 2019-03-25 14:19:14 -07:00
f84593ad5f Revert "Disable accounts squash call from bank"
This reverts commit 7685ba2805.
2019-03-25 12:21:32 -07:00
0469dc52ac Ensure accounts are unlocked (#3458) 2019-03-25 12:21:32 -07:00
4cf418f33f Fix wrong keypair 2019-03-23 16:33:50 -07:00
6c46fcfa4e Restart node test (#3459)
* Add test to local_cluster for restarting a node

* fix so that we don't hit end of epoch - leader not found before trying to transfer
2019-03-23 15:00:23 -07:00
12ec5304f2 Revert "fix so that we don't hit end of epoch - leader not found before trying to transfer"
Revert "Add test to local_cluster for restarting a node"
2019-03-22 21:46:08 -07:00
e32f798d5f fix so that we don't hit end of epoch - leader not found before trying to transfer 2019-03-22 20:47:32 -07:00
68a8b955bc Add test to local_cluster for restarting a node 2019-03-22 19:30:14 -07:00
f479021c0f Update leader slot in poh recorder if we skipped it (#3451)
* reset poh recorder with the original start slot
2019-03-22 17:35:54 -07:00
b91afb7079 Remove attempt to update the cluster, just restart it (v0.12 is not ready for update) 2019-03-22 16:51:53 -07:00
e189c429d5 Refrain from trying to configure a staking account that was previously configured 2019-03-22 16:51:53 -07:00
6a1904664c Demote log level 2019-03-22 16:51:53 -07:00
3285cf8047 Retry more for a new blockhash 2019-03-22 10:56:59 -07:00
bdee3a25f2 Add --poll-for-new-genesis-block flag 2019-03-22 00:44:31 -07:00
8655df0520 Use same gossip port for all testnet nodes 2019-03-21 23:56:23 -07:00
c43eecb8ca Include multinode-demo scripts in release tarball 2019-03-21 22:12:07 -07:00
18f45ebc2c Use installed binaries if not within the cargo workspace 2019-03-21 22:12:07 -07:00
fd28642603 Run a drone on blockstreamer nodes 2019-03-21 22:12:07 -07:00
038583b466 Kill all node processes (blockexplorer) 2019-03-21 22:12:07 -07:00
ed138d392d Fixup ledger path 2019-03-21 17:06:05 -07:00
58f1f0a28b solana-install doesn't exist on v0.12 2019-03-21 16:49:41 -07:00
330d9330b0 Ensure current crate versions match the tag before publishing to crates.io 2019-03-21 16:27:44 -07:00
d626a89c88 / 2019-03-21 16:27:06 -07:00
db5d22e532 Upload tarball as a github release asset 2019-03-21 16:27:06 -07:00
aa8759744e Add script to upload github release assets 2019-03-21 16:27:06 -07:00
060db36c34 Add GITHUB_TOKEN 2019-03-21 16:27:06 -07:00
fa1ea1c458 Switch version file from .txt to .yaml; add target tuple to version.yml 2019-03-21 16:27:06 -07:00
7685ba2805 Disable accounts squash call from bank
- It's asserting and killing testnet
- temporary solution for beacons
2019-03-21 16:01:43 -07:00
a0d940acf0 allow empty ancestors 2019-03-21 16:01:43 -07:00
f4c914a630 Clear progress map on squash (#3377) 2019-03-21 16:01:43 -07:00
eede274cfe fix is_locked_out logic 2019-03-21 16:01:43 -07:00
4df79b653b PR comments 2019-03-21 16:01:43 -07:00
a2c1fa7cb4 Modify bank_forks to support squashing/filtering new root and also don't remove parents from bank_forks when inserting, otherwise we lose potential fork points when querying blocktree for child slots 2019-03-21 16:01:43 -07:00
95cead91a5 Decendent is not a word 2019-03-21 16:01:43 -07:00
89c42ecd3f Implement locktower voting (#3251)
* locktower components and tests

* integrate locktower into replay stage

* track locktower duration

* make sure threshold is checked after simulating the vote

* check vote lockouts using the VoteState program

* duplicate vote test

* epoch stakes

* disable impossible to verify tests
2019-03-21 16:01:43 -07:00
f93c9f052f Ensure genesis ledger directory is populated on all validator nodes
This allows all nodes to serve the genesis ledger over rsync instead of
just the bootstrap leader
2019-03-21 15:55:12 -07:00
e2871053bd Get client-id.json out of the genesis ledger directory 2019-03-21 15:55:08 -07:00
351c9c33d2 change num threads in banking stage bench 2019-03-21 15:00:30 -07:00
59f2a478b7 v0.12 specific stability changes 2019-03-21 15:00:30 -07:00
3f7cd4adc4 Ignore broken tests that are fixed on master
- ignoring, as cherry picking from master will bring in other
  unnecessary dependent changes
2019-03-21 13:45:41 -07:00
4318854a64 ignore broken test 2019-03-21 13:45:41 -07:00
430740b691 use ticks per slot to check if the current tick is in the leader slot 2019-03-21 13:45:41 -07:00
797603a0fe address review comments 2019-03-21 13:45:41 -07:00
f402139991 change pubkey to ref 2019-03-21 13:45:41 -07:00
4db72d85d7 find next leader slot before resetting working bank in Poh recorder 2019-03-21 13:45:41 -07:00
007e17c290 Check if poh recorder has over stepped the leader slot 2019-03-21 13:45:41 -07:00
ad7e727938 Use same VM type for validators as leader, if CUDA is enabled (#3253)
- Since all nodes are created equal
2019-03-21 13:45:41 -07:00
3d5eeab6d9 stop copying Blooms (#3379)
* stop copying Blooms

* fixup

* clippy
2019-03-21 13:45:41 -07:00
8278585545 Avoid panic on duplicate account indices 2019-03-19 16:06:50 -07:00
061d6ec8fd fix formatting 2019-03-19 11:21:00 -07:00
000cc27e53 Schedule node for consecutive slots as leader (#3353)
* Also tweak epoch and slot duration

* new test for leader schedule
2019-03-19 11:21:00 -07:00
9b3092b965 Report how many grace ticks were afforded to previous leader (#3350) 2019-03-19 11:21:00 -07:00
ca819fc4fb Fix leader rotation counter 2019-03-19 11:21:00 -07:00
5ff8f57c0e Remove dangling thin_client 2019-03-18 22:20:14 -07:00
4798612560 Reduce log level for periodic debug messages 2019-03-15 16:02:52 -07:00
9760cb2e6a add support for finding the next slot a node will be leader (#3298) 2019-03-15 15:02:20 -07:00
46b3b3a1c6 Give last leader some grace ticks to catch up (#3299)
* Wait for last leader for some ticks

* New tests and fixed existing tests
2019-03-15 15:02:20 -07:00
1e70f85e83 [v0.12] Reduce ticks per second (#3287)
* Reduce ticks per second

- It's improving TPS. Temp fix for beacons timeframe

* Fix confirmation test
2019-03-15 14:15:54 -07:00
b2d6681762 Bump log level for better CI logs 2019-03-15 07:48:57 -07:00
1b51cba778 Avoid stray '' when rust version is not specified 2019-03-14 21:32:25 -07:00
19ab7333aa cloud_DeleteInstances() now waits for the instances to be terminated 2019-03-14 21:17:36 -07:00
b0e6604b9a Revert "Block until instances are confirmed to be deleted"
This reverts commit 5e40a5bfc1.
2019-03-14 21:17:30 -07:00
9ce1d5e990 Upgrade nightly rust version 2019-03-14 20:37:44 -07:00
facc47cb62 Preserve original nightly name 2019-03-14 20:37:44 -07:00
3dba8b7952 Overhaul cargo/rustc version management 2019-03-14 20:37:44 -07:00
5e40a5bfc1 Block until instances are confirmed to be deleted 2019-03-14 16:20:35 -07:00
c60baf99f3 Rename userdata to data (#3282)
* Rename userdata to data

Instead of saying "userdata", which is ambiguous and imprecise,
say "instruction data" or "account data".

Also, add `ProgramError::InvalidInstructionData`

Fixes #2761
2019-03-14 13:04:42 -07:00
de04884c1b Fix flag to disable leader-rotation (#3243) 2019-03-14 12:08:53 -07:00
e666509409 Don't vote for empty leader transmissions (#3248)
* Don't vote for empty leader transmissions

* Add is_delta flag to bank to detect empty leader transmissions

* Plumb new is_votable flag through replay stage

* Fix PohRecorder tests

* Change is_delta to AtomicBool to avoid making Bank references mutable

* Reset start slot in poh_recorder when working bank is cleared, so that connsecutive TPU's will start from the correct place

* Use proper max tick height calculation

* Test for not voting on empty transmission

* tests for is_votable
2019-03-13 14:32:04 -07:00
28aff96d21 Replace stale --no-signer usage with --no-voting 2019-03-13 13:56:57 -07:00
242975f8cd Remove duplicate --rpc-drone-address 2019-03-13 13:23:18 -07:00
c6ba6cac83 Revert "Add case for --rpc-drone-address"
This reverts commit dc67dd3357.
2019-03-13 13:15:49 -07:00
dc67dd3357 Add case for --rpc-drone-address 2019-03-13 13:03:54 -07:00
733c2a0b07 Enable rpc for all testnet nodes 2019-03-13 10:51:49 -07:00
07d6212d18 Drop socat for iptables 2019-03-13 10:16:28 -07:00
c20d60e4cf Run socat in the background 2019-03-13 08:18:10 -07:00
7147f03efe tell blockexplorer to run on port 8080 (#3237)
* tell blockexplorer to run on port 8080

* forward port 80 to 5000 for a blockexplorer node
2019-03-13 07:37:28 -07:00
6740cb5b02 Replay Stage start_leader() can use wrong parent fork() (#3238)
*  Make sure start_leader starts on the last voted block, not necessarily the biggest indexed bank in frozen_slots()

* Fix tvu test
2019-03-13 03:16:13 -07:00
1e8e99cc3e Move and rename cluster_client 2019-03-12 23:07:48 -06:00
ef7f30e09f Update publish script 2019-03-12 23:07:48 -06:00
ca8e0ec7ae Move thin client tests to integration test suite 2019-03-12 23:07:48 -06:00
2a4f4b3e53 Update crate references 2019-03-12 23:07:48 -06:00
7cecd3851a Add solana-client crate 2019-03-12 23:07:48 -06:00
4d189f2c38 Cargo.lock 2019-03-12 23:07:48 -06:00
9a232475a7 0.12.1 2019-03-12 13:42:47 -07:00
09c9897591 Adjust crate list 2019-03-12 13:36:18 -07:00
06d7573478 Adjust readme path 2019-03-12 13:36:13 -07:00
0b55ffa368 Move programs/system into runtime/ 2019-03-12 12:25:47 -05:00
ae750bb16b Filter vote accounts with no delegate from being selected in Rotation (#3224) 2019-03-11 21:32:19 -07:00
80b2f2f6b7 Update current leader information in metrics and dashboard 2019-03-11 18:47:27 -07:00
6684d84fbc Provide drone's host address while setting up staking account 2019-03-11 18:20:27 -07:00
dc02abae3c Keep stable dashboard on stable channel at all times 2019-03-11 16:19:35 -07:00
6caec655d3 Move testnet/testnet-perf to the stable channel 2019-03-11 16:15:47 -07:00
150 changed files with 3949 additions and 1675 deletions

View File

@ -1,10 +1,12 @@
{ {
"_public_key": "ae29f4f7ad2fc92de70d470e411c8426d5d48db8817c9e3dae574b122192335f", "_public_key": "ae29f4f7ad2fc92de70d470e411c8426d5d48db8817c9e3dae574b122192335f",
"environment": { "environment": {
"CODECOV_TOKEN": "EJ[1:Kqnm+k1Z4p8nr7GqMczXnzh6azTk39tj3bAbCKPitUc=:EzVa4Gpj2Qn5OhZQlVfGFchuROgupvnW:CbWc6sNh1GCrAbrncxDjW00zUAD/Sa+ccg7CFSz8Ua6LnCYnSddTBxJWcJEbEs0MrjuZRQ==]", "CODECOV_TOKEN": "EJ[1:+7nLVR8NlnN48zgaJPPXF9JOZDXVNHDZLeARlCFHyRk=:rHBSqXK7uSnveA4qwUxARZjTNZcA0hXU:ko8lLGwPECpVm19znWBRxKEpMF7xpTHBCEzVOxRar2wDThw4lNDAKqTS61vtkJLtdkHtug==]",
"CRATES_IO_TOKEN": "EJ[1:Kqnm+k1Z4p8nr7GqMczXnzh6azTk39tj3bAbCKPitUc=:qF7QrUM8j+19mptcE1YS71CqmrCM13Ah:TZCatJeT1egCHiufE6cGFC1VsdJkKaaqV6QKWkEsMPBKvOAdaZbbVz9Kl+lGnIsF]", "CRATES_IO_TOKEN": "EJ[1:+7nLVR8NlnN48zgaJPPXF9JOZDXVNHDZLeARlCFHyRk=:NzN6y0ooXJBYvxB589khepthSxhKFkLB:ZTTFZh2A/kB2SAgjJJAMbwAfanRlzxOCNMVcA2MXBCpQHJeeZGULg+0MLACYswfS]",
"INFLUX_DATABASE": "EJ[1:Kqnm+k1Z4p8nr7GqMczXnzh6azTk39tj3bAbCKPitUc=:PetD/4c/EbkQmFEcK21g3cBBAPwFqHEw:wvYmDZRajy2WngVFs9AlwyHk]", "GITHUB_TOKEN": "EJ[1:+7nLVR8NlnN48zgaJPPXF9JOZDXVNHDZLeARlCFHyRk=:iy0Fnxeo0aslTCvgXc5Ddj2ly6ZsQ8gK:GNOOj/kZUJ2rYKxTbLyVKtajWNoGQ3PcChwfEB4HdN18qDHlB96Z7gx01Pcf0qeIHODOWRtxlH4=]",
"INFLUX_USERNAME": "EJ[1:Kqnm+k1Z4p8nr7GqMczXnzh6azTk39tj3bAbCKPitUc=:WcnqZdmDFtJJ01Zu5LbeGgbYGfRzBdFc:a7c5zDDtCOu5L1Qd2NKkxT6kljyBcbck]", "INFLUX_DATABASE": "EJ[1:+7nLVR8NlnN48zgaJPPXF9JOZDXVNHDZLeARlCFHyRk=:Ly/TpIRF0oCxmiBWv225S3mX8s6pfQR+:+tXGB2c9rRCVDcgNO1IDOo89]",
"INFLUX_PASSWORD": "EJ[1:Kqnm+k1Z4p8nr7GqMczXnzh6azTk39tj3bAbCKPitUc=:LIZgP9Tp9yE9OlpV8iogmLOI7iW7SiU3:x0nYdT1A6sxu+O+MMLIN19d2t6rrK1qJ3+HnoWG3PDodsXjz06YJWQKU/mx6saqH+QbGtGV5mk0=]" "INFLUX_PASSWORD": "EJ[1:+7nLVR8NlnN48zgaJPPXF9JOZDXVNHDZLeARlCFHyRk=:ycrq1uQLoSfI932czD+krUOaJeLWpeq6:2iS7ukp/C7wVD3IT0GvQVcwccWGyLr4UocStF/XiDi0OB/N3YKIKN8SQU4ob1b6StAPZ/XOHmag=]",
"INFLUX_USERNAME": "EJ[1:+7nLVR8NlnN48zgaJPPXF9JOZDXVNHDZLeARlCFHyRk=:35hBKofakZ4Db/u0TOW53RXoNWzJTIcl:HWREcMTrgZ8DGB0ZupgSzNWr/tVyE06P]",
"SOLANA_INSTALL_UPDATE_MANIFEST_KEYPAIR_x86_64_unknown_linux_gnu": "EJ[1:+7nLVR8NlnN48zgaJPPXF9JOZDXVNHDZLeARlCFHyRk=:kRz8CyJYKAg/AiwgLrcRNDJAmlRX2zvX:uV1XV6y2Fb+dN4Z9BIMPBRiNS3n+NL8GlJXyu1i7meIsph1DzfLg4Thcp5Mj9nUsFNLgqQgjnsa5C4XNY/h5AgMSzRrJxVj7RhVTRmDJ5/Vjq6v7wCMRfBOvF3rITsV4zTwWSV8yafFmS+ZQ+QJTRgtYsuoYAUNZ06IEebfDHcuNwws72hEGoD9w43hOLSpyEOmXbtZ9h1lIRxrgsrhYDpBlU5LkhDeTXAX5M5dwYxyquJFRwd5quGDV5DYsCh9bAkbjAyjWYymVJ78U9YJIQHT9izzQqTDlMQN49EbLo7MDIaC7O7HVtb7unDJs+DRejbHacoyWVulqVVwu3GRiZezu8zdjwzGHphMMxOtKQaidnqYgflNp/O01I8wZRgR1alsGcmIhEhI8YV/IvQ==]"
} }
} }

310
Cargo.lock generated
View File

@ -1967,7 +1967,7 @@ dependencies = [
[[package]] [[package]]
name = "solana" name = "solana"
version = "0.12.0" version = "0.12.3"
dependencies = [ dependencies = [
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"bs58 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "bs58 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
@ -1990,24 +1990,24 @@ dependencies = [
"rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
"rand_chacha 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "rand_chacha 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rayon 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", "rayon 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
"reqwest 0.9.11 (registry+https://github.com/rust-lang/crates.io-index)",
"ring 0.13.5 (registry+https://github.com/rust-lang/crates.io-index)", "ring 0.13.5 (registry+https://github.com/rust-lang/crates.io-index)",
"rocksdb 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", "rocksdb 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)",
"solana-budget-api 0.12.0", "solana-budget-api 0.12.3",
"solana-budget-program 0.12.0", "solana-budget-program 0.12.3",
"solana-drone 0.12.0", "solana-client 0.12.3",
"solana-logger 0.12.0", "solana-drone 0.12.3",
"solana-metrics 0.12.0", "solana-logger 0.12.3",
"solana-netutil 0.12.0", "solana-metrics 0.12.3",
"solana-runtime 0.12.0", "solana-netutil 0.12.3",
"solana-sdk 0.12.0", "solana-runtime 0.12.3",
"solana-storage-api 0.12.0", "solana-sdk 0.12.3",
"solana-vote-api 0.12.0", "solana-storage-api 0.12.3",
"solana-vote-program 0.12.0", "solana-vote-api 0.12.3",
"solana-vote-signer 0.12.0", "solana-vote-program 0.12.3",
"solana-vote-signer 0.12.3",
"sys-info 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)", "sys-info 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)", "tokio 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
@ -2016,86 +2016,104 @@ dependencies = [
[[package]] [[package]]
name = "solana-bench-streamer" name = "solana-bench-streamer"
version = "0.12.0" version = "0.12.3"
dependencies = [ dependencies = [
"clap 2.32.0 (registry+https://github.com/rust-lang/crates.io-index)", "clap 2.32.0 (registry+https://github.com/rust-lang/crates.io-index)",
"solana 0.12.0", "solana 0.12.3",
"solana-logger 0.12.0", "solana-logger 0.12.3",
"solana-netutil 0.12.0", "solana-netutil 0.12.3",
] ]
[[package]] [[package]]
name = "solana-bench-tps" name = "solana-bench-tps"
version = "0.12.0" version = "0.12.3"
dependencies = [ dependencies = [
"clap 2.32.0 (registry+https://github.com/rust-lang/crates.io-index)", "clap 2.32.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rayon 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", "rayon 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)",
"solana 0.12.0", "solana 0.12.3",
"solana-drone 0.12.0", "solana-client 0.12.3",
"solana-logger 0.12.0", "solana-drone 0.12.3",
"solana-metrics 0.12.0", "solana-logger 0.12.3",
"solana-sdk 0.12.0", "solana-metrics 0.12.3",
"solana-sdk 0.12.3",
] ]
[[package]] [[package]]
name = "solana-bpf-programs" name = "solana-bpf-programs"
version = "0.12.0" version = "0.12.3"
dependencies = [ dependencies = [
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
"elf 0.0.10 (registry+https://github.com/rust-lang/crates.io-index)", "elf 0.0.10 (registry+https://github.com/rust-lang/crates.io-index)",
"solana-bpfloader 0.12.0", "solana-bpfloader 0.12.3",
"solana-logger 0.12.0", "solana-logger 0.12.3",
"solana-runtime 0.12.0", "solana-runtime 0.12.3",
"solana-sdk 0.12.0", "solana-sdk 0.12.3",
"solana_rbpf 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", "solana_rbpf 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
"walkdir 2.2.7 (registry+https://github.com/rust-lang/crates.io-index)", "walkdir 2.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]] [[package]]
name = "solana-bpfloader" name = "solana-bpfloader"
version = "0.12.0" version = "0.12.3"
dependencies = [ dependencies = [
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.50 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.50 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
"solana-logger 0.12.0", "solana-logger 0.12.3",
"solana-sdk 0.12.0", "solana-sdk 0.12.3",
"solana_rbpf 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", "solana_rbpf 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]] [[package]]
name = "solana-budget-api" name = "solana-budget-api"
version = "0.12.0" version = "0.12.3"
dependencies = [ dependencies = [
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"chrono 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", "chrono 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
"solana-sdk 0.12.0", "solana-sdk 0.12.3",
] ]
[[package]] [[package]]
name = "solana-budget-program" name = "solana-budget-program"
version = "0.12.0" version = "0.12.3"
dependencies = [ dependencies = [
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"chrono 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", "chrono 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
"solana-budget-api 0.12.0", "solana-budget-api 0.12.3",
"solana-logger 0.12.0", "solana-logger 0.12.3",
"solana-runtime 0.12.0", "solana-runtime 0.12.3",
"solana-sdk 0.12.0", "solana-sdk 0.12.3",
]
[[package]]
name = "solana-client"
version = "0.12.3"
dependencies = [
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"bs58 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"jsonrpc-core 10.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"jsonrpc-http-server 10.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"reqwest 0.9.11 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)",
"solana-logger 0.12.3",
"solana-metrics 0.12.3",
"solana-netutil 0.12.3",
"solana-sdk 0.12.3",
] ]
[[package]] [[package]]
name = "solana-drone" name = "solana-drone"
version = "0.12.0" version = "0.12.3"
dependencies = [ dependencies = [
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
@ -2104,95 +2122,95 @@ dependencies = [
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
"solana-logger 0.12.0", "solana-logger 0.12.3",
"solana-metrics 0.12.0", "solana-metrics 0.12.3",
"solana-sdk 0.12.0", "solana-sdk 0.12.3",
"tokio 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)", "tokio 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]] [[package]]
name = "solana-failure" name = "solana-failure"
version = "0.12.0" version = "0.12.3"
dependencies = [ dependencies = [
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"solana-runtime 0.12.0", "solana-runtime 0.12.3",
"solana-sdk 0.12.0", "solana-sdk 0.12.3",
] ]
[[package]] [[package]]
name = "solana-fullnode" name = "solana-fullnode"
version = "0.12.0" version = "0.12.3"
dependencies = [ dependencies = [
"clap 2.32.0 (registry+https://github.com/rust-lang/crates.io-index)", "clap 2.32.0 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)",
"solana 0.12.0", "solana 0.12.3",
"solana-drone 0.12.0", "solana-drone 0.12.3",
"solana-logger 0.12.0", "solana-logger 0.12.3",
"solana-metrics 0.12.0", "solana-metrics 0.12.3",
"solana-netutil 0.12.0", "solana-netutil 0.12.3",
"solana-runtime 0.12.0", "solana-runtime 0.12.3",
"solana-sdk 0.12.0", "solana-sdk 0.12.3",
"solana-vote-api 0.12.0", "solana-vote-api 0.12.3",
"solana-vote-signer 0.12.0", "solana-vote-signer 0.12.3",
] ]
[[package]] [[package]]
name = "solana-genesis" name = "solana-genesis"
version = "0.12.0" version = "0.12.3"
dependencies = [ dependencies = [
"clap 2.32.0 (registry+https://github.com/rust-lang/crates.io-index)", "clap 2.32.0 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)",
"solana 0.12.0", "solana 0.12.3",
"solana-sdk 0.12.0", "solana-sdk 0.12.3",
] ]
[[package]] [[package]]
name = "solana-keygen" name = "solana-keygen"
version = "0.12.0" version = "0.12.3"
dependencies = [ dependencies = [
"clap 2.32.0 (registry+https://github.com/rust-lang/crates.io-index)", "clap 2.32.0 (registry+https://github.com/rust-lang/crates.io-index)",
"dirs 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "dirs 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)",
"solana-sdk 0.12.0", "solana-sdk 0.12.3",
] ]
[[package]] [[package]]
name = "solana-ledger-tool" name = "solana-ledger-tool"
version = "0.12.0" version = "0.12.3"
dependencies = [ dependencies = [
"assert_cmd 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", "assert_cmd 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
"clap 2.32.0 (registry+https://github.com/rust-lang/crates.io-index)", "clap 2.32.0 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)",
"solana 0.12.0", "solana 0.12.3",
"solana-logger 0.12.0", "solana-logger 0.12.3",
"solana-runtime 0.12.0", "solana-runtime 0.12.3",
"solana-sdk 0.12.0", "solana-sdk 0.12.3",
] ]
[[package]] [[package]]
name = "solana-logger" name = "solana-logger"
version = "0.12.0" version = "0.12.3"
dependencies = [ dependencies = [
"env_logger 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]] [[package]]
name = "solana-metrics" name = "solana-metrics"
version = "0.12.0" version = "0.12.3"
dependencies = [ dependencies = [
"influx_db_client 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "influx_db_client 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
"reqwest 0.9.11 (registry+https://github.com/rust-lang/crates.io-index)", "reqwest 0.9.11 (registry+https://github.com/rust-lang/crates.io-index)",
"solana-sdk 0.12.0", "solana-sdk 0.12.3",
"sys-info 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)", "sys-info 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]] [[package]]
name = "solana-netutil" name = "solana-netutil"
version = "0.12.0" version = "0.12.3"
dependencies = [ dependencies = [
"ipnetwork 0.12.8 (registry+https://github.com/rust-lang/crates.io-index)", "ipnetwork 0.12.8 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
@ -2201,57 +2219,57 @@ dependencies = [
"rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
"reqwest 0.9.11 (registry+https://github.com/rust-lang/crates.io-index)", "reqwest 0.9.11 (registry+https://github.com/rust-lang/crates.io-index)",
"socket2 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "socket2 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"solana-logger 0.12.0", "solana-logger 0.12.3",
] ]
[[package]] [[package]]
name = "solana-noop" name = "solana-noop"
version = "0.12.0" version = "0.12.3"
dependencies = [ dependencies = [
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"solana-logger 0.12.0", "solana-logger 0.12.3",
"solana-runtime 0.12.0", "solana-runtime 0.12.3",
"solana-sdk 0.12.0", "solana-sdk 0.12.3",
] ]
[[package]] [[package]]
name = "solana-replicator" name = "solana-replicator"
version = "0.12.0" version = "0.12.3"
dependencies = [ dependencies = [
"clap 2.32.0 (registry+https://github.com/rust-lang/crates.io-index)", "clap 2.32.0 (registry+https://github.com/rust-lang/crates.io-index)",
"solana 0.12.0", "solana 0.12.3",
"solana-logger 0.12.0", "solana-logger 0.12.3",
"solana-netutil 0.12.0", "solana-netutil 0.12.3",
"solana-sdk 0.12.0", "solana-sdk 0.12.3",
] ]
[[package]] [[package]]
name = "solana-rewards-api" name = "solana-rewards-api"
version = "0.12.0" version = "0.12.3"
dependencies = [ dependencies = [
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
"solana-sdk 0.12.0", "solana-sdk 0.12.3",
"solana-vote-api 0.12.0", "solana-vote-api 0.12.3",
] ]
[[package]] [[package]]
name = "solana-rewards-program" name = "solana-rewards-program"
version = "0.12.0" version = "0.12.3"
dependencies = [ dependencies = [
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"solana-logger 0.12.0", "solana-logger 0.12.3",
"solana-rewards-api 0.12.0", "solana-rewards-api 0.12.3",
"solana-runtime 0.12.0", "solana-runtime 0.12.3",
"solana-sdk 0.12.0", "solana-sdk 0.12.3",
"solana-vote-api 0.12.0", "solana-vote-api 0.12.3",
] ]
[[package]] [[package]]
name = "solana-runtime" name = "solana-runtime"
version = "0.12.0" version = "0.12.3"
dependencies = [ dependencies = [
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"bv 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", "bv 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
@ -2266,19 +2284,18 @@ dependencies = [
"serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)",
"solana-budget-api 0.12.0", "solana-budget-api 0.12.3",
"solana-logger 0.12.0", "solana-logger 0.12.3",
"solana-metrics 0.12.0", "solana-metrics 0.12.3",
"solana-sdk 0.12.0", "solana-sdk 0.12.3",
"solana-storage-api 0.12.0", "solana-storage-api 0.12.3",
"solana-system-program 0.12.0", "solana-token-api 0.12.3",
"solana-token-api 0.12.0", "solana-vote-api 0.12.3",
"solana-vote-api 0.12.0",
] ]
[[package]] [[package]]
name = "solana-sdk" name = "solana-sdk"
version = "0.12.0" version = "0.12.3"
dependencies = [ dependencies = [
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"bs58 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "bs58 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
@ -2298,97 +2315,85 @@ dependencies = [
[[package]] [[package]]
name = "solana-storage-api" name = "solana-storage-api"
version = "0.12.0" version = "0.12.3"
dependencies = [ dependencies = [
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
"solana-sdk 0.12.0", "solana-sdk 0.12.3",
] ]
[[package]] [[package]]
name = "solana-storage-program" name = "solana-storage-program"
version = "0.12.0" version = "0.12.3"
dependencies = [ dependencies = [
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
"solana-logger 0.12.0", "solana-logger 0.12.3",
"solana-runtime 0.12.0", "solana-runtime 0.12.3",
"solana-sdk 0.12.0", "solana-sdk 0.12.3",
"solana-storage-api 0.12.0", "solana-storage-api 0.12.3",
]
[[package]]
name = "solana-system-program"
version = "0.12.0"
dependencies = [
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
"solana-runtime 0.12.0",
"solana-sdk 0.12.0",
] ]
[[package]] [[package]]
name = "solana-token-api" name = "solana-token-api"
version = "0.12.0" version = "0.12.3"
dependencies = [ dependencies = [
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
"solana-sdk 0.12.0", "solana-sdk 0.12.3",
] ]
[[package]] [[package]]
name = "solana-token-program" name = "solana-token-program"
version = "0.12.0" version = "0.12.3"
dependencies = [ dependencies = [
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
"solana-logger 0.12.0", "solana-logger 0.12.3",
"solana-sdk 0.12.0", "solana-sdk 0.12.3",
] ]
[[package]] [[package]]
name = "solana-upload-perf" name = "solana-upload-perf"
version = "0.12.0" version = "0.12.3"
dependencies = [ dependencies = [
"serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)",
"solana-metrics 0.12.0", "solana-metrics 0.12.3",
] ]
[[package]] [[package]]
name = "solana-vote-api" name = "solana-vote-api"
version = "0.12.0" version = "0.12.3"
dependencies = [ dependencies = [
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
"solana-sdk 0.12.0", "solana-sdk 0.12.3",
] ]
[[package]] [[package]]
name = "solana-vote-program" name = "solana-vote-program"
version = "0.12.0" version = "0.12.3"
dependencies = [ dependencies = [
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"solana-logger 0.12.0", "solana-logger 0.12.3",
"solana-metrics 0.12.0", "solana-metrics 0.12.3",
"solana-runtime 0.12.0", "solana-runtime 0.12.3",
"solana-sdk 0.12.0", "solana-sdk 0.12.3",
"solana-vote-api 0.12.0", "solana-vote-api 0.12.3",
] ]
[[package]] [[package]]
name = "solana-vote-signer" name = "solana-vote-signer"
version = "0.12.0" version = "0.12.3"
dependencies = [ dependencies = [
"bs58 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "bs58 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"clap 2.32.0 (registry+https://github.com/rust-lang/crates.io-index)", "clap 2.32.0 (registry+https://github.com/rust-lang/crates.io-index)",
@ -2398,13 +2403,13 @@ dependencies = [
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)",
"solana-metrics 0.12.0", "solana-metrics 0.12.3",
"solana-sdk 0.12.0", "solana-sdk 0.12.3",
] ]
[[package]] [[package]]
name = "solana-wallet" name = "solana-wallet"
version = "0.12.0" version = "0.12.3"
dependencies = [ dependencies = [
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"bs58 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "bs58 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
@ -2413,18 +2418,20 @@ dependencies = [
"dirs 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "dirs 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)",
"solana 0.12.0", "solana 0.12.3",
"solana-budget-api 0.12.0", "solana-budget-api 0.12.3",
"solana-drone 0.12.0", "solana-budget-program 0.12.3",
"solana-logger 0.12.0", "solana-client 0.12.3",
"solana-sdk 0.12.0", "solana-drone 0.12.3",
"solana-vote-api 0.12.0", "solana-logger 0.12.3",
"solana-vote-signer 0.12.0", "solana-sdk 0.12.3",
"solana-vote-api 0.12.3",
"solana-vote-signer 0.12.3",
] ]
[[package]] [[package]]
name = "solana-workspace" name = "solana-workspace"
version = "0.12.0" version = "0.12.3"
dependencies = [ dependencies = [
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"bs58 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "bs58 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
@ -2434,11 +2441,14 @@ dependencies = [
"rayon 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", "rayon 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
"reqwest 0.9.11 (registry+https://github.com/rust-lang/crates.io-index)", "reqwest 0.9.11 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)",
"solana 0.12.0", "solana 0.12.3",
"solana-logger 0.12.0", "solana-budget-program 0.12.3",
"solana-netutil 0.12.0", "solana-client 0.12.3",
"solana-runtime 0.12.0", "solana-logger 0.12.3",
"solana-sdk 0.12.0", "solana-netutil 0.12.3",
"solana-runtime 0.12.3",
"solana-sdk 0.12.3",
"solana-vote-api 0.12.3",
"sys-info 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)", "sys-info 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)",
] ]

View File

@ -1,7 +1,7 @@
[package] [package]
name = "solana-workspace" name = "solana-workspace"
description = "Blockchain, Rebuilt for Scale" description = "Blockchain, Rebuilt for Scale"
version = "0.12.0" version = "0.12.3"
documentation = "https://docs.rs/solana" documentation = "https://docs.rs/solana"
homepage = "https://solana.com/" homepage = "https://solana.com/"
readme = "README.md" readme = "README.md"
@ -27,11 +27,14 @@ rand = "0.6.5"
rayon = "1.0.0" rayon = "1.0.0"
reqwest = "0.9.11" reqwest = "0.9.11"
serde_json = "1.0.39" serde_json = "1.0.39"
solana = { path = "core", version = "0.12.0" } solana = { path = "core", version = "0.12.3" }
solana-logger = { path = "logger", version = "0.12.0" } solana-budget-program = { path = "programs/budget", version = "0.12.3" }
solana-netutil = { path = "netutil", version = "0.12.0" } solana-client = { path = "client", version = "0.12.3" }
solana-runtime = { path = "runtime", version = "0.12.0" } solana-logger = { path = "logger", version = "0.12.3" }
solana-sdk = { path = "sdk", version = "0.12.0" } solana-netutil = { path = "netutil", version = "0.12.3" }
solana-runtime = { path = "runtime", version = "0.12.3" }
solana-sdk = { path = "sdk", version = "0.12.3" }
solana-vote-api = { path = "programs/vote_api", version = "0.12.3" }
sys-info = "0.5.6" sys-info = "0.5.6"
[[bench]] [[bench]]
@ -78,7 +81,6 @@ members = [
"programs/rewards_api", "programs/rewards_api",
"programs/storage", "programs/storage",
"programs/storage_api", "programs/storage_api",
"programs/system",
"programs/vote", "programs/vote",
"programs/vote_api", "programs/vote_api",
"replicator", "replicator",

View File

@ -2,16 +2,16 @@
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018" edition = "2018"
name = "solana-bench-streamer" name = "solana-bench-streamer"
version = "0.12.0" version = "0.12.3"
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0" license = "Apache-2.0"
homepage = "https://solana.com/" homepage = "https://solana.com/"
[dependencies] [dependencies]
clap = "2.32.0" clap = "2.32.0"
solana = { path = "../core", version = "0.12.0" } solana = { path = "../core", version = "0.12.3" }
solana-logger = { path = "../logger", version = "0.12.0" } solana-logger = { path = "../logger", version = "0.12.3" }
solana-netutil = { path = "../netutil", version = "0.12.0" } solana-netutil = { path = "../netutil", version = "0.12.3" }
[features] [features]
cuda = ["solana/cuda"] cuda = ["solana/cuda"]

View File

@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018" edition = "2018"
name = "solana-bench-tps" name = "solana-bench-tps"
version = "0.12.0" version = "0.12.3"
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0" license = "Apache-2.0"
homepage = "https://solana.com/" homepage = "https://solana.com/"
@ -11,11 +11,12 @@ homepage = "https://solana.com/"
clap = "2.32.0" clap = "2.32.0"
rayon = "1.0.3" rayon = "1.0.3"
serde_json = "1.0.39" serde_json = "1.0.39"
solana = { path = "../core", version = "0.12.0" } solana = { path = "../core", version = "0.12.3" }
solana-drone = { path = "../drone", version = "0.12.0" } solana-client = { path = "../client", version = "0.12.3" }
solana-logger = { path = "../logger", version = "0.12.0" } solana-drone = { path = "../drone", version = "0.12.3" }
solana-metrics = { path = "../metrics", version = "0.12.0" } solana-logger = { path = "../logger", version = "0.12.3" }
solana-sdk = { path = "../sdk", version = "0.12.0" } solana-metrics = { path = "../metrics", version = "0.12.3" }
solana-sdk = { path = "../sdk", version = "0.12.3" }
[features] [features]
cuda = ["solana/cuda"] cuda = ["solana/cuda"]

View File

@ -1,9 +1,10 @@
use solana_metrics; use solana_metrics;
use rayon::prelude::*; use rayon::prelude::*;
use solana::client::mk_client; use solana::cluster_info::FULLNODE_PORT_RANGE;
use solana::contact_info::ContactInfo; use solana::contact_info::ContactInfo;
use solana::thin_client::ThinClient; use solana_client::client::create_client;
use solana_client::thin_client::ThinClient;
use solana_drone::drone::request_airdrop_transaction; use solana_drone::drone::request_airdrop_transaction;
use solana_metrics::influxdb; use solana_metrics::influxdb;
use solana_sdk::hash::Hash; use solana_sdk::hash::Hash;
@ -51,7 +52,7 @@ pub fn sample_tx_count(
v: &ContactInfo, v: &ContactInfo,
sample_period: u64, sample_period: u64,
) { ) {
let mut client = mk_client(&v); let mut client = create_client(v.client_facing_addr(), FULLNODE_PORT_RANGE);
let mut now = Instant::now(); let mut now = Instant::now();
let mut initial_tx_count = client.transaction_count(); let mut initial_tx_count = client.transaction_count();
let mut max_tps = 0.0; let mut max_tps = 0.0;
@ -181,7 +182,7 @@ pub fn generate_txs(
reclaim: bool, reclaim: bool,
contact_info: &ContactInfo, contact_info: &ContactInfo,
) { ) {
let mut client = mk_client(contact_info); let mut client = create_client(contact_info.client_facing_addr(), FULLNODE_PORT_RANGE);
let blockhash = client.get_recent_blockhash(); let blockhash = client.get_recent_blockhash();
let tx_count = source.len(); let tx_count = source.len();
println!("Signing transactions... {} (reclaim={})", tx_count, reclaim); println!("Signing transactions... {} (reclaim={})", tx_count, reclaim);
@ -241,7 +242,7 @@ pub fn do_tx_transfers(
total_tx_sent_count: &Arc<AtomicUsize>, total_tx_sent_count: &Arc<AtomicUsize>,
thread_batch_sleep_ms: usize, thread_batch_sleep_ms: usize,
) { ) {
let client = mk_client(&contact_info); let client = create_client(contact_info.client_facing_addr(), FULLNODE_PORT_RANGE);
loop { loop {
if thread_batch_sleep_ms > 0 { if thread_batch_sleep_ms > 0 {
sleep(Duration::from_millis(thread_batch_sleep_ms as u64)); sleep(Duration::from_millis(thread_batch_sleep_ms as u64));

View File

@ -2,9 +2,10 @@ mod bench;
mod cli; mod cli;
use crate::bench::*; use crate::bench::*;
use solana::client::mk_client; use solana::cluster_info::FULLNODE_PORT_RANGE;
use solana::gen_keys::GenKeys; use solana::gen_keys::GenKeys;
use solana::gossip_service::discover; use solana::gossip_service::discover;
use solana_client::client::create_client;
use solana_metrics; use solana_metrics;
use solana_sdk::signature::{Keypair, KeypairUtil}; use solana_sdk::signature::{Keypair, KeypairUtil};
use std::collections::VecDeque; use std::collections::VecDeque;
@ -62,8 +63,9 @@ fn main() {
} }
let cluster_entrypoint = nodes[0].clone(); // Pick the first node, why not? let cluster_entrypoint = nodes[0].clone(); // Pick the first node, why not?
let mut client = mk_client(&cluster_entrypoint); let mut client = create_client(cluster_entrypoint.client_facing_addr(), FULLNODE_PORT_RANGE);
let mut barrier_client = mk_client(&cluster_entrypoint); let mut barrier_client =
create_client(cluster_entrypoint.client_facing_addr(), FULLNODE_PORT_RANGE);
let mut seed = [0u8; 32]; let mut seed = [0u8; 32];
seed.copy_from_slice(&id.public_key_bytes()[..32]); seed.copy_from_slice(&id.public_key_bytes()[..32]);

View File

@ -45,7 +45,7 @@ fn check_txs(receiver: &Receiver<WorkingBankEntries>, ref_tx_count: usize) {
#[bench] #[bench]
#[ignore] #[ignore]
fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) { fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
let num_threads = BankingStage::num_threads() as usize; let num_threads = 4;
// a multiple of packet chunk 2X duplicates to avoid races // a multiple of packet chunk 2X duplicates to avoid races
let txes = 192 * 50 * num_threads * 2; let txes = 192 * 50 * num_threads * 2;
let mint_total = 1_000_000_000_000; let mint_total = 1_000_000_000_000;
@ -137,7 +137,7 @@ fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
#[ignore] #[ignore]
fn bench_banking_stage_multi_programs(bencher: &mut Bencher) { fn bench_banking_stage_multi_programs(bencher: &mut Bencher) {
let progs = 4; let progs = 4;
let num_threads = BankingStage::num_threads() as usize; let num_threads = 4;
// a multiple of packet chunk 2X duplicates to avoid races // a multiple of packet chunk 2X duplicates to avoid races
let txes = 96 * 100 * num_threads * 2; let txes = 96 * 100 * num_threads * 2;
let mint_total = 1_000_000_000_000; let mint_total = 1_000_000_000_000;

View File

@ -102,7 +102,7 @@ pub fn test_large_invalid_gossip_nodes(
let cluster = discover(&entry_point_info, num_nodes); let cluster = discover(&entry_point_info, num_nodes);
// Poison the cluster. // Poison the cluster.
let mut client = mk_client(&entry_point_info); let mut client = create_client(entry_point_info.client_facing_addr(), FULLNODE_PORT_RANGE);
for _ in 0..(num_nodes * 100) { for _ in 0..(num_nodes * 100) {
client.gossip_push( client.gossip_push(
cluster_info::invalid_contact_info() cluster_info::invalid_contact_info()
@ -112,7 +112,7 @@ pub fn test_large_invalid_gossip_nodes(
// Force refresh of the active set. // Force refresh of the active set.
for node in &cluster { for node in &cluster {
let mut client = mk_client(&node); let mut client = create_client(node.client_facing_addr(), FULLNODE_PORT_RANGE);
client.gossip_refresh_active_set(); client.gossip_refresh_active_set();
} }

View File

@ -124,7 +124,7 @@ The result field will be a JSON object with the following sub fields:
* `lamports`, number of lamports assigned to this account, as a signed 64-bit integer * `lamports`, number of lamports assigned to this account, as a signed 64-bit integer
* `owner`, array of 32 bytes representing the program this account has been assigned to * `owner`, array of 32 bytes representing the program this account has been assigned to
* `userdata`, array of bytes representing any userdata associated with the account * `data`, array of bytes representing any data associated with the account
* `executable`, boolean indicating if the account contains a program (and is strictly read-only) * `executable`, boolean indicating if the account contains a program (and is strictly read-only)
* `loader`, array of 32 bytes representing the loader for this program (if `executable`), otherwise all * `loader`, array of 32 bytes representing the loader for this program (if `executable`), otherwise all
@ -134,7 +134,7 @@ The result field will be a JSON object with the following sub fields:
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getAccountInfo", "params":["2gVkYWexTHR5Hb2aLeQN3tnngvWzisFKXDUPrgMHpdST"]}' http://localhost:8899 curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getAccountInfo", "params":["2gVkYWexTHR5Hb2aLeQN3tnngvWzisFKXDUPrgMHpdST"]}' http://localhost:8899
// Result // Result
{"jsonrpc":"2.0","result":{"executable":false,"loader":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"userdata":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"id":1} {"jsonrpc":"2.0","result":{"executable":false,"loader":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"id":1}
``` ```
--- ---
@ -254,7 +254,7 @@ After connect to the RPC PubSub websocket at `ws://<ADDRESS>/`:
--- ---
### accountSubscribe ### accountSubscribe
Subscribe to an account to receive notifications when the lamports or userdata Subscribe to an account to receive notifications when the lamports or data
for a given account public key changes for a given account public key changes
##### Parameters: ##### Parameters:
@ -274,7 +274,7 @@ for a given account public key changes
##### Notification Format: ##### Notification Format:
```bash ```bash
{"jsonrpc": "2.0","method": "accountNotification", "params": {"result": {"executable":false,"loader":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"userdata":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"subscription":0}} {"jsonrpc": "2.0","method": "accountNotification", "params": {"result": {"executable":false,"loader":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"subscription":0}}
``` ```
--- ---
@ -300,7 +300,7 @@ Unsubscribe from account change notifications
--- ---
### programSubscribe ### programSubscribe
Subscribe to a program to receive notifications when the lamports or userdata Subscribe to a program to receive notifications when the lamports or data
for a given account owned by the program changes for a given account owned by the program changes
##### Parameters: ##### Parameters:
@ -322,7 +322,7 @@ for a given account owned by the program changes
* `string` - account Pubkey, as base-58 encoded string * `string` - account Pubkey, as base-58 encoded string
* `object` - account info JSON object (see [getAccountInfo](#getaccountinfo) for field details) * `object` - account info JSON object (see [getAccountInfo](#getaccountinfo) for field details)
```bash ```bash
{"jsonrpc":"2.0","method":"programNotification","params":{{"result":["8Rshv2oMkPu5E4opXTRyuyBeZBqQ4S477VG26wUTFxUM",{"executable":false,"lamports":1,"owner":[129,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"userdata":[1,1,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,49,56,45,49,50,45,50,52,84,50,51,58,53,57,58,48,48,90,235,233,39,152,15,44,117,176,41,89,100,86,45,61,2,44,251,46,212,37,35,118,163,189,247,84,27,235,178,62,55,89,0,0,0,0,50,0,0,0,0,0,0,0,235,233,39,152,15,44,117,176,41,89,100,86,45,61,2,44,251,46,212,37,35,118,163,189,247,84,27,235,178,62,45,4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]}],"subscription":0}} {"jsonrpc":"2.0","method":"programNotification","params":{{"result":["8Rshv2oMkPu5E4opXTRyuyBeZBqQ4S477VG26wUTFxUM",{"executable":false,"lamports":1,"owner":[129,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"data":[1,1,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,49,56,45,49,50,45,50,52,84,50,51,58,53,57,58,48,48,90,235,233,39,152,15,44,117,176,41,89,100,86,45,61,2,44,251,46,212,37,35,118,163,189,247,84,27,235,178,62,55,89,0,0,0,0,50,0,0,0,0,0,0,0,235,233,39,152,15,44,117,176,41,89,100,86,45,61,2,44,251,46,212,37,35,118,163,189,247,84,27,235,178,62,45,4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]}],"subscription":0}}
``` ```
--- ---

View File

@ -40,7 +40,7 @@ retransmitted twice around the network.
4. CrdsValue for vote should look like this ``` Votes(Vec<Transaction>) ``` 4. CrdsValue for vote should look like this ``` Votes(Vec<Transaction>) ```
Each vote transaction should maintain a `wallclock` in its userdata. The merge Each vote transaction should maintain a `wallclock` in its data. The merge
strategy for Votes will keep the last N set of votes as configured by the local strategy for Votes will keep the last N set of votes as configured by the local
client. For push/pull the vector is traversed recursively and each Transaction client. For push/pull the vector is traversed recursively and each Transaction
is treated as an individual CrdsValue with its own local wallclock and is treated as an individual CrdsValue with its own local wallclock and

View File

@ -6,7 +6,7 @@ separating program code from the state it operates on, the runtime is able to
choreograph concurrent access. Transactions accessing only credit-only choreograph concurrent access. Transactions accessing only credit-only
accounts are executed in parallel whereas transactions accessing writable accounts are executed in parallel whereas transactions accessing writable
accounts are serialized. The runtime interacts with the program through an accounts are serialized. The runtime interacts with the program through an
entrypoint with a well-defined interface. The userdata stored in an account is entrypoint with a well-defined interface. The data stored in an account is
an opaque type, an array of bytes. The program has full control over its an opaque type, an array of bytes. The program has full control over its
contents. contents.
@ -42,7 +42,7 @@ programs can be executed in parallel.
The runtime enforces the following rules: The runtime enforces the following rules:
1. Only the *owner* program may modify the contents of an account. This means 1. Only the *owner* program may modify the contents of an account. This means
that upon assignment userdata vector is guaranteed to be zero. that upon assignment data vector is guaranteed to be zero.
2. Total balances on all the accounts is equal before and after execution of a 2. Total balances on all the accounts is equal before and after execution of a
transaction. transaction.
@ -59,11 +59,11 @@ accounts.
## SystemProgram Interface ## SystemProgram Interface
The interface is best described by the `Instruction::userdata` that the user The interface is best described by the `Instruction::data` that the user
encodes. encodes.
* `CreateAccount` - This allows the user to create an account with an allocated * `CreateAccount` - This allows the user to create an account with an allocated
userdata array and assign it to a Program. data array and assign it to a Program.
* `Assign` - Allows the user to assign an existing account to a program. * `Assign` - Allows the user to assign an existing account to a program.
@ -73,10 +73,10 @@ userdata array and assign it to a Program.
For blockchain to function correctly, the program code must be resilient to user For blockchain to function correctly, the program code must be resilient to user
inputs. That is why in this design the program specific code is the only code inputs. That is why in this design the program specific code is the only code
that can change the state of the userdata byte array in the Accounts that are that can change the state of the data byte array in the Accounts that are
assigned to it. It is also the reason why `Assign` or `CreateAccount` must zero assigned to it. It is also the reason why `Assign` or `CreateAccount` must zero
out the userdata. Otherwise there would be no possible way for the program to out the data. Otherwise there would be no possible way for the program to
distinguish the recently assigned account userdata from a natively generated distinguish the recently assigned account data from a natively generated
state transition without some additional metadata from the runtime to indicate state transition without some additional metadata from the runtime to indicate
that this memory is assigned instead of natively generated. that this memory is assigned instead of natively generated.
@ -94,12 +94,12 @@ instruction can be composed into a single transaction with the call to the
program itself. program itself.
* `CreateAccount` and `Assign` guarantee that when account is assigned to the * `CreateAccount` and `Assign` guarantee that when account is assigned to the
program, the Account's userdata is zero initialized. program, the Account's data is zero initialized.
* Once assigned to program an Account cannot be reassigned. * Once assigned to program an Account cannot be reassigned.
* Runtime guarantees that a program's code is the only code that can modify * Runtime guarantees that a program's code is the only code that can modify
Account userdata that the Account is assigned to. Account data that the Account is assigned to.
* Runtime guarantees that the program can only spend lamports that are in * Runtime guarantees that the program can only spend lamports that are in
accounts that are assigned to it. accounts that are assigned to it.

View File

@ -2,7 +2,7 @@ steps:
- command: "ci/shellcheck.sh" - command: "ci/shellcheck.sh"
name: "shellcheck" name: "shellcheck"
timeout_in_minutes: 5 timeout_in_minutes: 5
- command: "ci/docker-run.sh solanalabs/rust:1.32.0 ci/test-checks.sh" - command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-checks.sh"
name: "checks" name: "checks"
timeout_in_minutes: 15 timeout_in_minutes: 15
- wait - wait
@ -14,10 +14,10 @@ steps:
- command: "ci/test-bench.sh" - command: "ci/test-bench.sh"
name: "bench" name: "bench"
timeout_in_minutes: 20 timeout_in_minutes: 20
- command: "ci/docker-run.sh solanalabs/rust:1.32.0 ci/test-stable.sh" - command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-stable.sh"
name: "stable" name: "stable"
timeout_in_minutes: 20 timeout_in_minutes: 20
- command: "ci/docker-run.sh solanalabs/rust-nightly:2019-01-31 ci/test-coverage.sh" - command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_nightly_docker_image ci/test-coverage.sh"
name: "coverage" name: "coverage"
timeout_in_minutes: 20 timeout_in_minutes: 20
# TODO: Fix and re-enable test-large-network.sh # TODO: Fix and re-enable test-large-network.sh

View File

@ -4,11 +4,9 @@ ARG date
RUN set -x \ RUN set -x \
&& rustup install nightly-$date \ && rustup install nightly-$date \
&& rustup show \ && rustup show \
&& mv /usr/local/rustup/toolchains/nightly-$date-* \
/usr/local/rustup/toolchains/nightly-x86_64-unknown-linux-gnu \
&& rustup show \ && rustup show \
&& rustc --version \ && rustc --version \
&& cargo --version \ && cargo --version \
&& rustc +nightly --version \ && rustc +nightly-$date --version \
&& cargo +nightly --version && cargo +nightly-$date --version

View File

@ -19,7 +19,7 @@ To update the pinned version:
to confirm the new nightly image builds. Fix any issues as needed to confirm the new nightly image builds. Fix any issues as needed
1. Run `docker login` to enable pushing images to Docker Hub, if you're authorized. 1. Run `docker login` to enable pushing images to Docker Hub, if you're authorized.
1. Run `CI=true ci/docker-rust-nightly/build.sh YYYY-MM-DD` to push the new nightly image to dockerhub.com. 1. Run `CI=true ci/docker-rust-nightly/build.sh YYYY-MM-DD` to push the new nightly image to dockerhub.com.
1. Modify the `solanalabs/rust-nightly:YYYY-MM-DD` reference in `ci/buildkite.yml` from the previous to 1. Modify the `solanalabs/rust-nightly:YYYY-MM-DD` reference in `ci/rust-version.sh` from the previous to
new *YYYY-MM-DD* value, send a PR with this change and any codebase adjustments needed. new *YYYY-MM-DD* value, send a PR with this change and any codebase adjustments needed.
## Troubleshooting ## Troubleshooting

View File

@ -24,9 +24,10 @@ fi
build() { build() {
$genPipeline && return $genPipeline && return
ci/version-check-with-upgrade.sh stable source ci/rust-version.sh stable
_ scripts/ulimit-n.sh _ scripts/ulimit-n.sh
_ cargo build --all _ cargo +$rust_stable build --all
} }
runTest() { runTest() {

View File

@ -55,7 +55,7 @@ while getopts "ch?i:k:brxR" opt; do
restartInterval=$OPTARG restartInterval=$OPTARG
;; ;;
b) b)
maybeNoLeaderRotation="--no-leader-rotation" maybeNoLeaderRotation="--only-bootstrap-stake"
;; ;;
x) x)
extraNodes=$((extraNodes + 1)) extraNodes=$((extraNodes + 1))

View File

@ -1,7 +1,7 @@
#!/usr/bin/env bash #!/usr/bin/env bash
set -e set -e
cd "$(dirname "$0")/.." cd "$(dirname "$0")/.."
source ci/semver_bash/semver.sh
# List of internal crates to publish # List of internal crates to publish
# #
@ -16,14 +16,17 @@ CRATES=(
sdk sdk
keygen keygen
metrics metrics
client
drone drone
programs/{budget,bpf_loader,native_loader,noop,system,vote} programs/{budget_api,storage_api,token_api,vote_api}
runtime
programs/{budget,bpf_loader,storage,token,vote}
vote-signer
core core
fullnode fullnode
genesis genesis
ledger-tool ledger-tool
wallet wallet
runtime
) )
@ -33,6 +36,9 @@ CRATES=(
exit 0 exit 0
} }
semverParseInto "$TRIGGERED_BUILDKITE_TAG" MAJOR MINOR PATCH SPECIAL
expectedCrateVersion="$MAJOR.$MINOR.$PATCH$SPECIAL"
[[ -n "$CRATES_IO_TOKEN" ]] || { [[ -n "$CRATES_IO_TOKEN" ]] || {
echo CRATES_IO_TOKEN undefined echo CRATES_IO_TOKEN undefined
exit 1 exit 1
@ -46,13 +52,17 @@ for crate in "${CRATES[@]}"; do
exit 1 exit 1
fi fi
echo "-- $crate" echo "-- $crate"
# TODO: Ensure the published version matches the contents of grep -q "^version = \"$expectedCrateVersion\"$" Cargo.toml || {
# TRIGGERED_BUILDKITE_TAG echo "Error: $crate/Cargo.toml version is not $expectedCrateVersion"
exit 1
}
( (
set -x set -x
# TODO: the rocksdb package does not build with the stock rust docker image, # TODO: the rocksdb package does not build with the stock rust docker image,
# so use the solana rust docker image until this is resolved upstream # so use the solana rust docker image until this is resolved upstream
ci/docker-run.sh solanalabs/rust:1.31.0 bash -exc "cd $crate; $cargoCommand" source ci/rust-version.sh
ci/docker-run.sh "$rust_stable_docker_image" bash -exc "cd $crate; $cargoCommand"
#ci/docker-run.sh rust bash -exc "cd $crate; $cargoCommand" #ci/docker-run.sh rust bash -exc "cd $crate; $cargoCommand"
) )
done done

View File

@ -45,11 +45,7 @@ beta)
CHANNEL_BRANCH=$BETA_CHANNEL CHANNEL_BRANCH=$BETA_CHANNEL
;; ;;
stable) stable)
if [[ -n $BETA_CHANNEL_LATEST_TAG ]]; then
CHANNEL_BRANCH=$BETA_CHANNEL
else
CHANNEL_BRANCH=$STABLE_CHANNEL CHANNEL_BRANCH=$STABLE_CHANNEL
fi
;; ;;
*) *)
echo "Error: Invalid PUBLISH_CHANNEL=$PUBLISH_CHANNEL" echo "Error: Invalid PUBLISH_CHANNEL=$PUBLISH_CHANNEL"

View File

@ -11,10 +11,13 @@ fi
eval "$(ci/channel-info.sh)" eval "$(ci/channel-info.sh)"
TAG=
if [[ -n "$BUILDKITE_TAG" ]]; then if [[ -n "$BUILDKITE_TAG" ]]; then
CHANNEL_OR_TAG=$BUILDKITE_TAG CHANNEL_OR_TAG=$BUILDKITE_TAG
TAG="$BUILDKITE_TAG"
elif [[ -n "$TRIGGERED_BUILDKITE_TAG" ]]; then elif [[ -n "$TRIGGERED_BUILDKITE_TAG" ]]; then
CHANNEL_OR_TAG=$TRIGGERED_BUILDKITE_TAG CHANNEL_OR_TAG=$TRIGGERED_BUILDKITE_TAG
TAG="$TRIGGERED_BUILDKITE_TAG"
else else
CHANNEL_OR_TAG=$CHANNEL CHANNEL_OR_TAG=$CHANNEL
fi fi
@ -24,18 +27,34 @@ if [[ -z $CHANNEL_OR_TAG ]]; then
exit 1 exit 1
fi fi
case "$(uname)" in
Darwin)
TARGET=x86_64-apple-darwin
;;
Linux)
TARGET=x86_64-unknown-linux-gnu
;;
*)
TARGET=unknown-unknown-unknown
;;
esac
echo --- Creating tarball echo --- Creating tarball
( (
set -x set -x
rm -rf solana-release/ rm -rf solana-release/
mkdir solana-release/ mkdir solana-release/
(
echo "$CHANNEL_OR_TAG"
git rev-parse HEAD
) > solana-release/version.txt
scripts/cargo-install-all.sh solana-release COMMIT="$(git rev-parse HEAD)"
(
echo "channel: $CHANNEL"
echo "commit: $COMMIT"
echo "target: $TARGET"
) > solana-release/version.yml
source ci/rust-version.sh stable
scripts/cargo-install-all.sh +"$rust_stable" solana-release
./fetch-perf-libs.sh ./fetch-perf-libs.sh
# shellcheck source=/dev/null # shellcheck source=/dev/null
@ -45,20 +64,22 @@ echo --- Creating tarball
cargo install --path . --features=cuda --root ../solana-release-cuda cargo install --path . --features=cuda --root ../solana-release-cuda
) )
cp solana-release-cuda/bin/solana-fullnode solana-release/bin/solana-fullnode-cuda cp solana-release-cuda/bin/solana-fullnode solana-release/bin/solana-fullnode-cuda
cp -a scripts multinode-demo solana-release/
tar jvcf solana-release.tar.bz2 solana-release/ tar jvcf solana-release-$TARGET.tar.bz2 solana-release/
) )
echo --- Saving build artifacts echo --- Saving build artifacts
source ci/upload-ci-artifact.sh source ci/upload-ci-artifact.sh
upload-ci-artifact solana-release.tar.bz2 upload-ci-artifact solana-release-$TARGET.tar.bz2
if [[ -n $DO_NOT_PUBLISH_TAR ]]; then if [[ -n $DO_NOT_PUBLISH_TAR ]]; then
echo Skipped due to DO_NOT_PUBLISH_TAR echo Skipped due to DO_NOT_PUBLISH_TAR
exit 0 exit 0
fi fi
echo --- AWS S3 Store file=solana-release-$TARGET.tar.bz2
echo --- AWS S3 Store: $file
( (
set -x set -x
$DRYRUN docker run \ $DRYRUN docker run \
@ -67,11 +88,14 @@ echo --- AWS S3 Store
--env AWS_SECRET_ACCESS_KEY \ --env AWS_SECRET_ACCESS_KEY \
--volume "$PWD:/solana" \ --volume "$PWD:/solana" \
eremite/aws-cli:2018.12.18 \ eremite/aws-cli:2018.12.18 \
/usr/bin/s3cmd --acl-public put /solana/solana-release.tar.bz2 \ /usr/bin/s3cmd --acl-public put /solana/"$file" s3://solana-release/"$CHANNEL_OR_TAG"/"$file"
s3://solana-release/"$CHANNEL_OR_TAG"/solana-release.tar.bz2
echo Published to: echo Published to:
$DRYRUN ci/format-url.sh http://solana-release.s3.amazonaws.com/"$CHANNEL_OR_TAG"/solana-release.tar.bz2 $DRYRUN ci/format-url.sh http://solana-release.s3.amazonaws.com/"$CHANNEL_OR_TAG"/"$file"
) )
if [[ -n $TAG ]]; then
ci/upload-github-release-asset.sh $file
fi
echo --- ok echo --- ok

45
ci/rust-version.sh Normal file
View File

@ -0,0 +1,45 @@
#
# This file maintains the rust versions for use by CI.
#
# Build with stable rust, updating the stable toolchain if necessary:
# $ source ci/rust-version.sh stable
# $ cargo +"$rust_stable" build
#
# Build with nightly rust, updating the nightly toolchain if necessary:
# $ source ci/rust-version.sh nightly
# $ cargo +"$rust_nightly" build
#
# Obtain the environment variables without any automatic toolchain updating:
# $ source ci/rust-version.sh
#
export rust_stable=1.32.0
export rust_stable_docker_image=solanalabs/rust:1.32.0
export rust_nightly=nightly-2019-03-14
export rust_nightly_docker_image=solanalabs/rust-nightly:2019-03-14
[[ -z $1 ]] || (
rustup_install() {
declare toolchain=$1
if ! cargo +"$toolchain" -V; then
rustup install "$toolchain"
cargo +"$toolchain" -V
fi
}
set -e
cd "$(dirname "${BASH_SOURCE[0]}")"
case $1 in
stable)
rustup_install "$rust_stable"
;;
nightly)
rustup_install "$rust_nightly"
;;
*)
echo "Note: ignoring unknown argument: $1"
;;
esac
)

View File

@ -24,7 +24,7 @@ source ci/_
source ci/upload-ci-artifact.sh source ci/upload-ci-artifact.sh
eval "$(ci/channel-info.sh)" eval "$(ci/channel-info.sh)"
ci/version-check-with-upgrade.sh nightly source ci/rust-version.sh nightly
set -o pipefail set -o pipefail
export RUST_BACKTRACE=1 export RUST_BACKTRACE=1
@ -39,7 +39,7 @@ fi
BENCH_FILE=bench_output.log BENCH_FILE=bench_output.log
BENCH_ARTIFACT=current_bench_results.log BENCH_ARTIFACT=current_bench_results.log
_ cargo +nightly bench ${V:+--verbose} \ _ cargo +$rust_nightly bench ${V:+--verbose} \
-- -Z unstable-options --format=json | tee "$BENCH_FILE" -- -Z unstable-options --format=json | tee "$BENCH_FILE"
# Run bpf benches # Run bpf benches
@ -47,11 +47,11 @@ echo --- program/bpf
( (
set -x set -x
cd programs/bpf cd programs/bpf
cargo +nightly bench ${V:+--verbose} --features=bpf_c \ cargo +$rust_nightly bench ${V:+--verbose} --features=bpf_c \
-- -Z unstable-options --format=json --nocapture | tee -a ../../../"$BENCH_FILE" -- -Z unstable-options --format=json --nocapture | tee -a ../../../"$BENCH_FILE"
) )
_ cargo +nightly run --release --package solana-upload-perf \ _ cargo +$rust_nightly run --release --package solana-upload-perf \
-- "$BENCH_FILE" "$TARGET_BRANCH" "$UPLOAD_METRICS" > "$BENCH_ARTIFACT" -- "$BENCH_FILE" "$TARGET_BRANCH" "$UPLOAD_METRICS" > "$BENCH_ARTIFACT"
upload-ci-artifact "$BENCH_ARTIFACT" upload-ci-artifact "$BENCH_ARTIFACT"

View File

@ -4,14 +4,14 @@ set -e
cd "$(dirname "$0")/.." cd "$(dirname "$0")/.."
source ci/_ source ci/_
ci/version-check.sh stable source ci/rust-version.sh stable
export RUST_BACKTRACE=1 export RUST_BACKTRACE=1
export RUSTFLAGS="-D warnings" export RUSTFLAGS="-D warnings"
_ cargo fmt --all -- --check _ cargo +"$rust_stable" fmt --all -- --check
_ cargo clippy --all -- --version _ cargo +"$rust_stable" clippy --all -- --version
_ cargo clippy --all -- --deny=warnings _ cargo +"$rust_stable" clippy --all -- --deny=warnings
_ ci/audit.sh _ ci/audit.sh
_ ci/nits.sh _ ci/nits.sh
_ book/build.sh _ book/build.sh

View File

@ -21,7 +21,6 @@ ci/affects-files.sh \
} }
source ci/upload-ci-artifact.sh source ci/upload-ci-artifact.sh
ci/version-check-with-upgrade.sh nightly
source scripts/ulimit-n.sh source scripts/ulimit-n.sh
scripts/coverage.sh scripts/coverage.sh

View File

@ -4,9 +4,7 @@ set -e
here=$(dirname "$0") here=$(dirname "$0")
cd "$here"/.. cd "$here"/..
# This job doesn't run within a container, try once to upgrade tooling on a source ci/rust-version.sh stable
# version check failure
ci/version-check-with-upgrade.sh stable
export RUST_BACKTRACE=1 export RUST_BACKTRACE=1
@ -39,4 +37,4 @@ fi
set -x set -x
export SOLANA_DYNAMIC_NODES=120 export SOLANA_DYNAMIC_NODES=120
exec cargo test --release --features=erasure test_multi_node_dynamic_network -- --ignored exec cargo +"$rust_stable" test --release --features=erasure test_multi_node_dynamic_network -- --ignored

View File

@ -10,7 +10,8 @@ annotate() {
} }
} }
ci/version-check-with-upgrade.sh stable source ci/rust-version.sh stable
export RUST_BACKTRACE=1 export RUST_BACKTRACE=1
export RUSTFLAGS="-D warnings" export RUSTFLAGS="-D warnings"
source scripts/ulimit-n.sh source scripts/ulimit-n.sh
@ -24,9 +25,9 @@ case $testName in
test-stable) test-stable)
echo "Executing $testName" echo "Executing $testName"
_ cargo build --all ${V:+--verbose} _ cargo +"$rust_stable" build --all ${V:+--verbose}
_ cargo test --all ${V:+--verbose} -- --nocapture --test-threads=1 _ cargo +"$rust_stable" test --all ${V:+--verbose} -- --nocapture --test-threads=1
_ cargo test --manifest-path programs/system/Cargo.toml _ cargo +"$rust_stable" test --manifest-path runtime/Cargo.toml
;; ;;
test-stable-perf) test-stable-perf)
echo "Executing $testName" echo "Executing $testName"
@ -48,7 +49,9 @@ test-stable-perf)
# BPF program tests # BPF program tests
_ make -C programs/bpf/c tests _ make -C programs/bpf/c tests
_ programs/bpf/rust/noop/build.sh # Must be built out of band _ programs/bpf/rust/noop/build.sh # Must be built out of band
_ cargo test --manifest-path programs/bpf/Cargo.toml --no-default-features --features=bpf_c,bpf_rust _ cargo +"$rust_stable" test \
--manifest-path programs/bpf/Cargo.toml \
--no-default-features --features=bpf_c,bpf_rust
# Run root package tests with these features # Run root package tests with these features
ROOT_FEATURES=erasure,chacha ROOT_FEATURES=erasure,chacha
@ -67,9 +70,9 @@ test-stable-perf)
fi fi
# Run root package library tests # Run root package library tests
_ cargo build --all ${V:+--verbose} --features="$ROOT_FEATURES" _ cargo +"$rust_stable" build --all ${V:+--verbose} --features="$ROOT_FEATURES"
_ cargo test --all --lib ${V:+--verbose} --features="$ROOT_FEATURES" -- --nocapture --test-threads=1 _ cargo +"$rust_stable" test --all --lib ${V:+--verbose} --features="$ROOT_FEATURES" -- --nocapture --test-threads=1
_ cargo test --manifest-path programs/system/Cargo.toml _ cargo +"$rust_stable" test --manifest-path runtime/Cargo.toml
# Run root package integration tests # Run root package integration tests
for test in tests/*.rs; do for test in tests/*.rs; do
@ -77,7 +80,7 @@ test-stable-perf)
test=${test%.rs} # basename x .rs test=${test%.rs} # basename x .rs
( (
export RUST_LOG="$test"=trace,$RUST_LOG export RUST_LOG="$test"=trace,$RUST_LOG
_ cargo test --all ${V:+--verbose} --features="$ROOT_FEATURES" --test="$test" \ _ cargo +"$rust_stable" test --all ${V:+--verbose} --features="$ROOT_FEATURES" --test="$test" \
-- --test-threads=1 --nocapture -- --test-threads=1 --nocapture
) )
done done

View File

@ -64,6 +64,10 @@ EOF
exit 0 exit 0
fi fi
if [[ -n $TESTNET_DB_HOST ]]; then
SOLANA_METRICS_PARTIAL_CONFIG="host=$TESTNET_DB_HOST,$SOLANA_METRICS_PARTIAL_CONFIG"
fi
export SOLANA_METRICS_CONFIG="db=$TESTNET,$SOLANA_METRICS_PARTIAL_CONFIG" export SOLANA_METRICS_CONFIG="db=$TESTNET,$SOLANA_METRICS_PARTIAL_CONFIG"
echo "SOLANA_METRICS_CONFIG: $SOLANA_METRICS_CONFIG" echo "SOLANA_METRICS_CONFIG: $SOLANA_METRICS_CONFIG"
source scripts/configure-metrics.sh source scripts/configure-metrics.sh
@ -81,13 +85,8 @@ testnet-beta|testnet-beta-perf)
CHANNEL_BRANCH=$BETA_CHANNEL CHANNEL_BRANCH=$BETA_CHANNEL
;; ;;
testnet|testnet-perf) testnet|testnet-perf)
if [[ -n $BETA_CHANNEL_LATEST_TAG ]]; then
CHANNEL_OR_TAG=$BETA_CHANNEL_LATEST_TAG
CHANNEL_BRANCH=$BETA_CHANNEL
else
CHANNEL_OR_TAG=$STABLE_CHANNEL_LATEST_TAG CHANNEL_OR_TAG=$STABLE_CHANNEL_LATEST_TAG
CHANNEL_BRANCH=$STABLE_CHANNEL CHANNEL_BRANCH=$STABLE_CHANNEL
fi
;; ;;
*) *)
echo "Error: Invalid TESTNET=$TESTNET" echo "Error: Invalid TESTNET=$TESTNET"
@ -107,6 +106,7 @@ steps:
env: env:
TESTNET: "$TESTNET" TESTNET: "$TESTNET"
TESTNET_OP: "$TESTNET_OP" TESTNET_OP: "$TESTNET_OP"
TESTNET_DB_HOST: "$TESTNET_DB_HOST"
EOF EOF
) | buildkite-agent pipeline upload ) | buildkite-agent pipeline upload
exit 0 exit 0
@ -285,28 +285,17 @@ stop)
stop stop
;; ;;
update-or-restart) update-or-restart)
if start "" update; then echo "+++ Restarting the network"
echo Update successful
else
echo "+++ Update failed, restarting the network"
$metricsWriteDatapoint "testnet-manager update-failure=1"
start start
fi
;; ;;
sanity-or-restart) sanity-or-restart)
if sanity; then if sanity; then
echo Pass echo Pass
else else
echo "+++ Sanity failed, updating the network" echo "+++ Sanity failed, restarting the network"
$metricsWriteDatapoint "testnet-manager sanity-failure=1" $metricsWriteDatapoint "testnet-manager sanity-failure=1"
if start "" update; then
echo Update successful
else
echo "+++ Update failed, restarting the network"
$metricsWriteDatapoint "testnet-manager update-failure=1"
start start
fi fi
fi
;; ;;
esac esac

View File

@ -0,0 +1,50 @@
#!/usr/bin/env bash
#
# Uploads one or more files to a github release
#
# Prerequisites
# 1) GITHUB_TOKEN defined in the environment
# 2) TAG defined in the environment
#
set -e
REPO_SLUG=solana-labs/solana
if [[ -z $1 ]]; then
echo No files specified
exit 1
fi
if [[ -z $GITHUB_TOKEN ]]; then
echo Error: GITHUB_TOKEN not defined
exit 1
fi
if [[ -n $BUILDKITE_TAG ]]; then
TAG=$BUILDKITE_TAG
elif [[ -n $TRIGGERED_BUILDKITE_TAG ]]; then
TAG=$TRIGGERED_BUILDKITE_TAG
fi
if [[ -z $TAG ]]; then
echo Error: TAG not defined
exit 1
fi
releaseId=$( \
curl -s "https://api.github.com/repos/$REPO_SLUG/releases/tags/$TAG" \
| grep -m 1 \"id\": \
| sed -ne 's/^[^0-9]*\([0-9]*\),$/\1/p' \
)
echo "Github release id for $TAG is $releaseId"
for file in "$@"; do
echo "--- Uploading $file to tag $TAG of $REPO_SLUG"
curl \
--data-binary @"$file" \
-H "Authorization: token $GITHUB_TOKEN" \
-H "Content-Type: application/octet-stream" \
"https://uploads.github.com/repos/$REPO_SLUG/releases/$releaseId/assets?name=$(basename "$file")"
echo
done

View File

@ -1,10 +0,0 @@
#!/usr/bin/env bash
set -e
cd "$(dirname "$0")"
channel=${1:-stable}
if ! ./version-check.sh "$channel"; then
rustup install "$channel"
./version-check.sh "$channel"
fi

View File

@ -1,37 +0,0 @@
#!/usr/bin/env bash
set -e
require() {
declare expectedProgram="$1"
declare expectedVersion="$2"
shift 2
read -r program version _ < <($expectedProgram "$@" -V)
declare ok=true
[[ $program = "$expectedProgram" ]] || ok=false
[[ $version =~ $expectedVersion ]] || ok=false
echo "Found $program $version"
if ! $ok; then
echo Error: expected "$expectedProgram $expectedVersion"
exit 1
fi
}
case ${1:-stable} in
nightly)
require rustc 1.34.[0-9]+-nightly +nightly
require cargo 1.34.[0-9]+-nightly +nightly
;;
stable)
require rustc 1.32.[0-9]+
require cargo 1.32.[0-9]+
;;
*)
echo Error: unknown argument: "$1"
exit 1
;;
esac
exit 0

24
client/Cargo.toml Normal file
View File

@ -0,0 +1,24 @@
[package]
name = "solana-client"
version = "0.12.3"
description = "Solana Client"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
homepage = "https://solana.com/"
license = "Apache-2.0"
edition = "2018"
[dependencies]
bincode = "1.1.2"
bs58 = "0.2.0"
log = "0.4.2"
reqwest = "0.9.11"
serde_json = "1.0.39"
solana-metrics = { path = "../metrics", version = "0.12.3" }
solana-netutil = { path = "../netutil", version = "0.12.3" }
solana-sdk = { path = "../sdk", version = "0.12.3" }
[dev-dependencies]
jsonrpc-core = "10.1.0"
jsonrpc-http-server = "10.1.0"
solana-logger = { path = "../logger", version = "0.12.3" }

17
client/src/client.rs Normal file
View File

@ -0,0 +1,17 @@
use crate::thin_client::ThinClient;
use std::net::SocketAddr;
use std::time::Duration;
pub fn create_client((rpc, tpu): (SocketAddr, SocketAddr), range: (u16, u16)) -> ThinClient {
let (_, transactions_socket) = solana_netutil::bind_in_range(range).unwrap();
ThinClient::new(rpc, tpu, transactions_socket)
}
pub fn create_client_with_timeout(
(rpc, tpu): (SocketAddr, SocketAddr),
range: (u16, u16),
timeout: Duration,
) -> ThinClient {
let (_, transactions_socket) = solana_netutil::bind_in_range(range).unwrap();
ThinClient::new_with_timeout(rpc, tpu, transactions_socket, timeout)
}

4
client/src/lib.rs Normal file
View File

@ -0,0 +1,4 @@
pub mod client;
pub mod rpc_mock;
pub mod rpc_request;
pub mod thin_client;

View File

@ -1,7 +1,7 @@
// Implementation of RpcRequestHandler trait for testing Rpc requests without i/o // Implementation of RpcRequestHandler trait for testing Rpc requests without i/o
use crate::rpc_request::{RpcRequest, RpcRequestHandler}; use crate::rpc_request::{RpcRequest, RpcRequestHandler};
use serde_json::{Number, Value}; use serde_json::{json, Number, Value};
use solana_sdk::hash::Hash; use solana_sdk::hash::Hash;
use solana_sdk::pubkey::Pubkey; use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::{Keypair, KeypairUtil}; use solana_sdk::signature::{Keypair, KeypairUtil};

View File

@ -1,6 +1,7 @@
use log::*;
use reqwest; use reqwest;
use reqwest::header::CONTENT_TYPE; use reqwest::header::CONTENT_TYPE;
use serde_json::{self, Value}; use serde_json::{json, Value};
use solana_sdk::timing::{DEFAULT_TICKS_PER_SLOT, NUM_TICKS_PER_SECOND}; use solana_sdk::timing::{DEFAULT_TICKS_PER_SLOT, NUM_TICKS_PER_SECOND};
use std::net::SocketAddr; use std::net::SocketAddr;
use std::thread::sleep; use std::thread::sleep;
@ -203,7 +204,7 @@ mod tests {
use jsonrpc_core::{Error, IoHandler, Params}; use jsonrpc_core::{Error, IoHandler, Params};
use jsonrpc_http_server::{AccessControlAllowOrigin, DomainsValidation, ServerBuilder}; use jsonrpc_http_server::{AccessControlAllowOrigin, DomainsValidation, ServerBuilder};
use serde_json::Number; use serde_json::Number;
use std::net::Ipv4Addr; use solana_logger;
use std::sync::mpsc::channel; use std::sync::mpsc::channel;
use std::thread; use std::thread;
@ -239,7 +240,7 @@ mod tests {
fn test_make_rpc_request() { fn test_make_rpc_request() {
let (sender, receiver) = channel(); let (sender, receiver) = channel();
thread::spawn(move || { thread::spawn(move || {
let rpc_addr = socketaddr!(0, 0); let rpc_addr = "0.0.0.0:0".parse().unwrap();
let mut io = IoHandler::default(); let mut io = IoHandler::default();
// Successful request // Successful request
io.add_method("getBalance", |_params: Params| { io.add_method("getBalance", |_params: Params| {
@ -298,7 +299,7 @@ mod tests {
// 2. Tell the client to start using it // 2. Tell the client to start using it
// 3. Delay for 1.5 seconds before starting the server to ensure the client will fail // 3. Delay for 1.5 seconds before starting the server to ensure the client will fail
// and need to retry // and need to retry
let rpc_addr = socketaddr!(0, 4242); let rpc_addr: SocketAddr = "0.0.0.0:4242".parse().unwrap();
sender.send(rpc_addr.clone()).unwrap(); sender.send(rpc_addr.clone()).unwrap();
sleep(Duration::from_millis(1500)); sleep(Duration::from_millis(1500));

View File

@ -3,17 +3,16 @@
//! messages to the network directly. The binary encoding of its messages are //! messages to the network directly. The binary encoding of its messages are
//! unstable and may change in future releases. //! unstable and may change in future releases.
use crate::contact_info::ContactInfo;
use crate::fullnode::{Fullnode, FullnodeConfig};
use crate::packet::PACKET_DATA_SIZE;
use crate::rpc_request::{RpcClient, RpcRequest, RpcRequestHandler}; use crate::rpc_request::{RpcClient, RpcRequest, RpcRequestHandler};
use bincode::serialize_into; use bincode::serialize_into;
use bs58; use bs58;
use serde_json; use log::*;
use serde_json::json;
use solana_metrics; use solana_metrics;
use solana_metrics::influxdb; use solana_metrics::influxdb;
use solana_sdk::account::Account; use solana_sdk::account::Account;
use solana_sdk::hash::Hash; use solana_sdk::hash::Hash;
use solana_sdk::packet::PACKET_DATA_SIZE;
use solana_sdk::pubkey::Pubkey; use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::{Keypair, KeypairUtil, Signature}; use solana_sdk::signature::{Keypair, KeypairUtil, Signature};
use solana_sdk::system_transaction::SystemTransaction; use solana_sdk::system_transaction::SystemTransaction;
@ -22,7 +21,6 @@ use solana_sdk::transaction::Transaction;
use std; use std;
use std::io; use std::io;
use std::net::{SocketAddr, UdpSocket}; use std::net::{SocketAddr, UdpSocket};
use std::sync::Arc;
use std::thread::sleep; use std::thread::sleep;
use std::time::Duration; use std::time::Duration;
use std::time::Instant; use std::time::Instant;
@ -144,7 +142,7 @@ impl ThinClient {
result result
} }
pub fn get_account_userdata(&mut self, pubkey: &Pubkey) -> io::Result<Option<Vec<u8>>> { pub fn get_account_data(&mut self, pubkey: &Pubkey) -> io::Result<Option<Vec<u8>>> {
let params = json!([format!("{}", pubkey)]); let params = json!([format!("{}", pubkey)]);
let response = let response =
self.rpc_client self.rpc_client
@ -153,13 +151,13 @@ impl ThinClient {
Ok(account_json) => { Ok(account_json) => {
let account: Account = let account: Account =
serde_json::from_value(account_json).expect("deserialize account"); serde_json::from_value(account_json).expect("deserialize account");
Ok(Some(account.userdata)) Ok(Some(account.data))
} }
Err(error) => { Err(error) => {
debug!("get_account_userdata failed: {:?}", error); debug!("get_account_data failed: {:?}", error);
Err(io::Error::new( Err(io::Error::new(
io::ErrorKind::Other, io::ErrorKind::Other,
"get_account_userdata failed", "get_account_data failed",
)) ))
} }
} }
@ -411,211 +409,3 @@ pub fn retry_get_balance(
} }
None None
} }
pub fn new_fullnode() -> (Fullnode, ContactInfo, Keypair, String) {
use crate::blocktree::create_new_tmp_ledger;
use crate::cluster_info::Node;
use crate::fullnode::Fullnode;
use solana_sdk::genesis_block::GenesisBlock;
use solana_sdk::signature::KeypairUtil;
let node_keypair = Arc::new(Keypair::new());
let node = Node::new_localhost_with_pubkey(&node_keypair.pubkey());
let contact_info = node.info.clone();
let (genesis_block, mint_keypair) = GenesisBlock::new_with_leader(10_000, &contact_info.id, 42);
let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_block);
let voting_keypair = Keypair::new();
let node = Fullnode::new(
node,
&node_keypair,
&ledger_path,
&voting_keypair.pubkey(),
voting_keypair,
None,
&FullnodeConfig::default(),
);
(node, contact_info, mint_keypair, ledger_path)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::client::mk_client;
use crate::gossip_service::discover;
use bincode::{deserialize, serialize};
use solana_sdk::system_instruction::SystemInstruction;
use solana_vote_api::vote_state::VoteState;
use solana_vote_api::vote_transaction::VoteTransaction;
use std::fs::remove_dir_all;
#[test]
fn test_thin_client_basic() {
solana_logger::setup();
let (server, leader_data, alice, ledger_path) = new_fullnode();
let bob_pubkey = Keypair::new().pubkey();
discover(&leader_data.gossip, 1).unwrap();
let mut client = mk_client(&leader_data);
let transaction_count = client.transaction_count();
assert_eq!(transaction_count, 0);
let blockhash = client.get_recent_blockhash();
info!("test_thin_client blockhash: {:?}", blockhash);
let signature = client
.transfer(500, &alice, &bob_pubkey, &blockhash)
.unwrap();
info!("test_thin_client signature: {:?}", signature);
client.poll_for_signature(&signature).unwrap();
let balance = client.get_balance(&bob_pubkey);
assert_eq!(balance.unwrap(), 500);
let transaction_count = client.transaction_count();
assert_eq!(transaction_count, 1);
server.close().unwrap();
remove_dir_all(ledger_path).unwrap();
}
#[test]
#[ignore]
fn test_bad_sig() {
solana_logger::setup();
let (server, leader_data, alice, ledger_path) = new_fullnode();
let bob_pubkey = Keypair::new().pubkey();
discover(&leader_data.gossip, 1).unwrap();
let mut client = mk_client(&leader_data);
let blockhash = client.get_recent_blockhash();
let tx = SystemTransaction::new_account(&alice, &bob_pubkey, 500, blockhash, 0);
let _sig = client.transfer_signed(&tx).unwrap();
let blockhash = client.get_recent_blockhash();
let mut tr2 = SystemTransaction::new_account(&alice, &bob_pubkey, 501, blockhash, 0);
let mut instruction2 = deserialize(tr2.userdata(0)).unwrap();
if let SystemInstruction::Move { ref mut lamports } = instruction2 {
*lamports = 502;
}
tr2.instructions[0].userdata = serialize(&instruction2).unwrap();
let signature = client.transfer_signed(&tr2).unwrap();
client.poll_for_signature(&signature).unwrap();
let balance = client.get_balance(&bob_pubkey);
assert_eq!(balance.unwrap(), 1001);
server.close().unwrap();
remove_dir_all(ledger_path).unwrap();
}
#[test]
fn test_register_vote_account() {
solana_logger::setup();
let (server, leader_data, alice, ledger_path) = new_fullnode();
discover(&leader_data.gossip, 1).unwrap();
let mut client = mk_client(&leader_data);
// Create the validator account, transfer some lamports to that account
let validator_keypair = Keypair::new();
let blockhash = client.get_recent_blockhash();
let signature = client
.transfer(500, &alice, &validator_keypair.pubkey(), &blockhash)
.unwrap();
client.poll_for_signature(&signature).unwrap();
// Create and register the vote account
let validator_vote_account_keypair = Keypair::new();
let vote_account_id = validator_vote_account_keypair.pubkey();
let blockhash = client.get_recent_blockhash();
let transaction =
VoteTransaction::new_account(&validator_keypair, &vote_account_id, blockhash, 1, 1);
let signature = client.transfer_signed(&transaction).unwrap();
client.poll_for_signature(&signature).unwrap();
let balance = retry_get_balance(&mut client, &vote_account_id, Some(1))
.expect("Expected balance for new account to exist");
assert_eq!(balance, 1);
const LAST: usize = 30;
for run in 0..=LAST {
let account_user_data = client
.get_account_userdata(&vote_account_id)
.expect("Expected valid response for account userdata")
.expect("Expected valid account userdata to exist after account creation");
let vote_state = VoteState::deserialize(&account_user_data);
if vote_state.map(|vote_state| vote_state.delegate_id) == Ok(vote_account_id) {
break;
}
if run == LAST {
panic!("Expected successful vote account registration");
}
sleep(Duration::from_millis(900));
}
server.close().unwrap();
remove_dir_all(ledger_path).unwrap();
}
#[test]
fn test_transaction_count() {
// set a bogus address, see that we don't hang
solana_logger::setup();
let addr = "0.0.0.0:1234".parse().unwrap();
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let mut client =
ThinClient::new_with_timeout(addr, addr, transactions_socket, Duration::from_secs(2));
assert_eq!(client.transaction_count(), 0);
}
#[test]
fn test_zero_balance_after_nonzero() {
solana_logger::setup();
let (server, leader_data, alice, ledger_path) = new_fullnode();
let bob_keypair = Keypair::new();
discover(&leader_data.gossip, 1).unwrap();
let mut client = mk_client(&leader_data);
let blockhash = client.get_recent_blockhash();
info!("test_thin_client blockhash: {:?}", blockhash);
let starting_alice_balance = client.poll_get_balance(&alice.pubkey()).unwrap();
info!("Alice has {} lamports", starting_alice_balance);
info!("Give Bob 500 lamports");
let signature = client
.transfer(500, &alice, &bob_keypair.pubkey(), &blockhash)
.unwrap();
client.poll_for_signature(&signature).unwrap();
let bob_balance = client.poll_get_balance(&bob_keypair.pubkey());
assert_eq!(bob_balance.unwrap(), 500);
info!("Take Bob's 500 lamports away");
let signature = client
.transfer(500, &bob_keypair, &alice.pubkey(), &blockhash)
.unwrap();
client.poll_for_signature(&signature).unwrap();
let alice_balance = client.poll_get_balance(&alice.pubkey()).unwrap();
assert_eq!(alice_balance, starting_alice_balance);
info!("Should get an error when Bob's balance hits zero and is purged");
let bob_balance = client.poll_get_balance(&bob_keypair.pubkey());
info!("Bob's balance is {:?}", bob_balance);
assert!(bob_balance.is_err(),);
server.close().unwrap();
remove_dir_all(ledger_path).unwrap();
}
}

View File

@ -1,10 +1,10 @@
[package] [package]
name = "solana" name = "solana"
description = "Blockchain, Rebuilt for Scale" description = "Blockchain, Rebuilt for Scale"
version = "0.12.0" version = "0.12.3"
documentation = "https://docs.rs/solana" documentation = "https://docs.rs/solana"
homepage = "https://solana.com/" homepage = "https://solana.com/"
readme = "README.md" readme = "../README.md"
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
license = "Apache-2.0" license = "Apache-2.0"
@ -39,22 +39,22 @@ nix = "0.13.0"
rand = "0.6.5" rand = "0.6.5"
rand_chacha = "0.1.1" rand_chacha = "0.1.1"
rayon = "1.0.0" rayon = "1.0.0"
reqwest = "0.9.11"
ring = "0.13.2" ring = "0.13.2"
rocksdb = "0.11.0" rocksdb = "0.11.0"
serde = "1.0.89" serde = "1.0.89"
serde_derive = "1.0.88" serde_derive = "1.0.88"
serde_json = "1.0.39" serde_json = "1.0.39"
solana-budget-api = { path = "../programs/budget_api", version = "0.12.0" } solana-budget-api = { path = "../programs/budget_api", version = "0.12.3" }
solana-drone = { path = "../drone", version = "0.12.0" } solana-client = { path = "../client", version = "0.12.3" }
solana-logger = { path = "../logger", version = "0.12.0" } solana-drone = { path = "../drone", version = "0.12.3" }
solana-metrics = { path = "../metrics", version = "0.12.0" } solana-logger = { path = "../logger", version = "0.12.3" }
solana-netutil = { path = "../netutil", version = "0.12.0" } solana-metrics = { path = "../metrics", version = "0.12.3" }
solana-runtime = { path = "../runtime", version = "0.12.0" } solana-netutil = { path = "../netutil", version = "0.12.3" }
solana-sdk = { path = "../sdk", version = "0.12.0" } solana-runtime = { path = "../runtime", version = "0.12.3" }
solana-storage-api = { path = "../programs/storage_api", version = "0.12.0" } solana-sdk = { path = "../sdk", version = "0.12.3" }
solana-vote-api = { path = "../programs/vote_api", version = "0.12.0" } solana-storage-api = { path = "../programs/storage_api", version = "0.12.3" }
solana-vote-signer = { path = "../vote-signer", version = "0.12.0" } solana-vote-api = { path = "../programs/vote_api", version = "0.12.3" }
solana-vote-signer = { path = "../vote-signer", version = "0.12.3" }
sys-info = "0.5.6" sys-info = "0.5.6"
tokio = "0.1" tokio = "0.1"
tokio-codec = "0.1" tokio-codec = "0.1"
@ -63,6 +63,5 @@ untrusted = "0.6.2"
[dev-dependencies] [dev-dependencies]
hex-literal = "0.1.3" hex-literal = "0.1.3"
matches = "0.1.6" matches = "0.1.6"
solana-vote-program = { path = "../programs/vote", version = "0.12.0" } solana-vote-program = { path = "../programs/vote", version = "0.12.3" }
solana-budget-program = { path = "../programs/budget", version = "0.12.0" } solana-budget-program = { path = "../programs/budget", version = "0.12.3" }

View File

@ -1,7 +1,7 @@
//! The `bank_forks` module implments BankForks a DAG of checkpointed Banks //! The `bank_forks` module implments BankForks a DAG of checkpointed Banks
use hashbrown::{HashMap, HashSet};
use solana_runtime::bank::Bank; use solana_runtime::bank::Bank;
use std::collections::HashMap;
use std::ops::Index; use std::ops::Index;
use std::sync::Arc; use std::sync::Arc;
@ -27,17 +27,40 @@ impl BankForks {
working_bank, working_bank,
} }
} }
/// Create a map of bank slot id to the set of ancestors for the bank slot.
pub fn ancestors(&self) -> HashMap<u64, HashSet<u64>> {
let mut ancestors = HashMap::new();
for bank in self.banks.values() {
let set = bank.parents().into_iter().map(|b| b.slot()).collect();
ancestors.insert(bank.slot(), set);
}
ancestors
}
/// Create a map of bank slot id to the set of all of its descendants
pub fn descendants(&self) -> HashMap<u64, HashSet<u64>> {
let mut descendants = HashMap::new();
for bank in self.banks.values() {
let _ = descendants.entry(bank.slot()).or_insert(HashSet::new());
for parent in bank.parents() {
descendants
.entry(parent.slot())
.or_insert(HashSet::new())
.insert(bank.slot());
}
}
descendants
}
pub fn frozen_banks(&self) -> HashMap<u64, Arc<Bank>> { pub fn frozen_banks(&self) -> HashMap<u64, Arc<Bank>> {
let mut frozen_banks: Vec<Arc<Bank>> = vec![];
frozen_banks.extend(self.banks.values().filter(|v| v.is_frozen()).cloned());
frozen_banks.extend(
self.banks self.banks
.iter() .iter()
.flat_map(|(_, v)| v.parents()) .filter(|(_, b)| b.is_frozen())
.filter(|v| v.is_frozen()), .map(|(k, b)| (*k, b.clone()))
); .collect()
frozen_banks.into_iter().map(|b| (b.slot(), b)).collect()
} }
pub fn active_banks(&self) -> Vec<u64> { pub fn active_banks(&self) -> Vec<u64> {
self.banks self.banks
.iter() .iter()
@ -45,6 +68,7 @@ impl BankForks {
.map(|(k, _v)| *k) .map(|(k, _v)| *k)
.collect() .collect()
} }
pub fn get(&self, bank_slot: u64) -> Option<&Arc<Bank>> { pub fn get(&self, bank_slot: u64) -> Option<&Arc<Bank>> {
self.banks.get(&bank_slot) self.banks.get(&bank_slot)
} }
@ -61,30 +85,32 @@ impl BankForks {
} }
} }
// TODO: use the bank's own ID instead of receiving a parameter? pub fn insert(&mut self, bank: Bank) {
pub fn insert(&mut self, bank_slot: u64, bank: Bank) { let bank = Arc::new(bank);
let mut bank = Arc::new(bank); let prev = self.banks.insert(bank.slot(), bank.clone());
assert_eq!(bank_slot, bank.slot());
let prev = self.banks.insert(bank_slot, bank.clone());
assert!(prev.is_none()); assert!(prev.is_none());
self.working_bank = bank.clone(); self.working_bank = bank.clone();
// TODO: this really only needs to look at the first
// parent if we're always calling insert()
// when we construct a child bank
while let Some(parent) = bank.parent() {
if let Some(prev) = self.banks.remove(&parent.slot()) {
assert!(Arc::ptr_eq(&prev, &parent));
}
bank = parent;
}
} }
// TODO: really want to kill this... // TODO: really want to kill this...
pub fn working_bank(&self) -> Arc<Bank> { pub fn working_bank(&self) -> Arc<Bank> {
self.working_bank.clone() self.working_bank.clone()
} }
pub fn set_root(&mut self, root: u64) {
let root_bank = self
.banks
.get(&root)
.expect("root bank didn't exist in bank_forks");
root_bank.squash();
self.prune_non_root(root);
}
fn prune_non_root(&mut self, root: u64) {
self.banks
.retain(|slot, bank| *slot >= root || bank.is_in_subtree_of(root))
}
} }
#[cfg(test)] #[cfg(test)]
@ -101,18 +127,53 @@ mod tests {
let mut bank_forks = BankForks::new(0, bank); let mut bank_forks = BankForks::new(0, bank);
let child_bank = Bank::new_from_parent(&bank_forks[0u64], &Pubkey::default(), 1); let child_bank = Bank::new_from_parent(&bank_forks[0u64], &Pubkey::default(), 1);
child_bank.register_tick(&Hash::default()); child_bank.register_tick(&Hash::default());
bank_forks.insert(1, child_bank); bank_forks.insert(child_bank);
assert_eq!(bank_forks[1u64].tick_height(), 1); assert_eq!(bank_forks[1u64].tick_height(), 1);
assert_eq!(bank_forks.working_bank().tick_height(), 1); assert_eq!(bank_forks.working_bank().tick_height(), 1);
} }
#[test]
fn test_bank_forks_descendants() {
let (genesis_block, _) = GenesisBlock::new(10_000);
let bank = Bank::new(&genesis_block);
let mut bank_forks = BankForks::new(0, bank);
let bank0 = bank_forks[0].clone();
let bank = Bank::new_from_parent(&bank0, &Pubkey::default(), 1);
bank_forks.insert(bank);
let bank = Bank::new_from_parent(&bank0, &Pubkey::default(), 2);
bank_forks.insert(bank);
let descendants = bank_forks.descendants();
let children: Vec<u64> = descendants[&0].iter().cloned().collect();
assert_eq!(children, vec![1, 2]);
assert!(descendants[&1].is_empty());
assert!(descendants[&2].is_empty());
}
#[test]
fn test_bank_forks_ancestors() {
let (genesis_block, _) = GenesisBlock::new(10_000);
let bank = Bank::new(&genesis_block);
let mut bank_forks = BankForks::new(0, bank);
let bank0 = bank_forks[0].clone();
let bank = Bank::new_from_parent(&bank0, &Pubkey::default(), 1);
bank_forks.insert(bank);
let bank = Bank::new_from_parent(&bank0, &Pubkey::default(), 2);
bank_forks.insert(bank);
let ancestors = bank_forks.ancestors();
assert!(ancestors[&0].is_empty());
let parents: Vec<u64> = ancestors[&1].iter().cloned().collect();
assert_eq!(parents, vec![0]);
let parents: Vec<u64> = ancestors[&2].iter().cloned().collect();
assert_eq!(parents, vec![0]);
}
#[test] #[test]
fn test_bank_forks_frozen_banks() { fn test_bank_forks_frozen_banks() {
let (genesis_block, _) = GenesisBlock::new(10_000); let (genesis_block, _) = GenesisBlock::new(10_000);
let bank = Bank::new(&genesis_block); let bank = Bank::new(&genesis_block);
let mut bank_forks = BankForks::new(0, bank); let mut bank_forks = BankForks::new(0, bank);
let child_bank = Bank::new_from_parent(&bank_forks[0u64], &Pubkey::default(), 1); let child_bank = Bank::new_from_parent(&bank_forks[0u64], &Pubkey::default(), 1);
bank_forks.insert(1, child_bank); bank_forks.insert(child_bank);
assert!(bank_forks.frozen_banks().get(&0).is_some()); assert!(bank_forks.frozen_banks().get(&0).is_some());
assert!(bank_forks.frozen_banks().get(&1).is_none()); assert!(bank_forks.frozen_banks().get(&1).is_none());
} }
@ -123,7 +184,7 @@ mod tests {
let bank = Bank::new(&genesis_block); let bank = Bank::new(&genesis_block);
let mut bank_forks = BankForks::new(0, bank); let mut bank_forks = BankForks::new(0, bank);
let child_bank = Bank::new_from_parent(&bank_forks[0u64], &Pubkey::default(), 1); let child_bank = Bank::new_from_parent(&bank_forks[0u64], &Pubkey::default(), 1);
bank_forks.insert(1, child_bank); bank_forks.insert(child_bank);
assert_eq!(bank_forks.active_banks(), vec![1]); assert_eq!(bank_forks.active_banks(), vec![1]);
} }

View File

@ -4,7 +4,6 @@
use crate::cluster_info::ClusterInfo; use crate::cluster_info::ClusterInfo;
use crate::entry::Entry; use crate::entry::Entry;
use crate::leader_confirmation_service::LeaderConfirmationService;
use crate::leader_schedule_utils; use crate::leader_schedule_utils;
use crate::packet; use crate::packet;
use crate::packet::SharedPackets; use crate::packet::SharedPackets;
@ -17,6 +16,7 @@ use crate::sigverify_stage::VerifiedPackets;
use bincode::deserialize; use bincode::deserialize;
use solana_metrics::counter::Counter; use solana_metrics::counter::Counter;
use solana_runtime::bank::{self, Bank, BankError}; use solana_runtime::bank::{self, Bank, BankError};
use solana_sdk::pubkey::Pubkey;
use solana_sdk::timing::{self, duration_as_us, MAX_RECENT_BLOCKHASHES}; use solana_sdk::timing::{self, duration_as_us, MAX_RECENT_BLOCKHASHES};
use solana_sdk::transaction::Transaction; use solana_sdk::transaction::Transaction;
use std::net::UdpSocket; use std::net::UdpSocket;
@ -26,13 +26,9 @@ use std::sync::{Arc, Mutex, RwLock};
use std::thread::{self, Builder, JoinHandle}; use std::thread::{self, Builder, JoinHandle};
use std::time::Duration; use std::time::Duration;
use std::time::Instant; use std::time::Instant;
use sys_info;
pub type UnprocessedPackets = Vec<(SharedPackets, usize)>; // `usize` is the index of the first unprocessed packet in `SharedPackets` pub type UnprocessedPackets = Vec<(SharedPackets, usize)>; // `usize` is the index of the first unprocessed packet in `SharedPackets`
// number of threads is 1 until mt bank is ready
pub const NUM_THREADS: u32 = 10;
/// Stores the stage's thread handle and output receiver. /// Stores the stage's thread handle and output receiver.
pub struct BankingStage { pub struct BankingStage {
bank_thread_hdls: Vec<JoinHandle<()>>, bank_thread_hdls: Vec<JoinHandle<()>>,
@ -53,10 +49,8 @@ impl BankingStage {
// Once an entry has been recorded, its blockhash is registered with the bank. // Once an entry has been recorded, its blockhash is registered with the bank.
let exit = Arc::new(AtomicBool::new(false)); let exit = Arc::new(AtomicBool::new(false));
// Single thread to compute confirmation
let lcs_handle = LeaderConfirmationService::start(&poh_recorder, exit.clone());
// Many banks that process transactions in parallel. // Many banks that process transactions in parallel.
let mut bank_thread_hdls: Vec<JoinHandle<()>> = (0..Self::num_threads()) let bank_thread_hdls: Vec<JoinHandle<()>> = (0..4)
.map(|_| { .map(|_| {
let verified_receiver = verified_receiver.clone(); let verified_receiver = verified_receiver.clone();
let poh_recorder = poh_recorder.clone(); let poh_recorder = poh_recorder.clone();
@ -71,7 +65,6 @@ impl BankingStage {
.unwrap() .unwrap()
}) })
.collect(); .collect();
bank_thread_hdls.push(lcs_handle);
Self { bank_thread_hdls } Self { bank_thread_hdls }
} }
@ -135,7 +128,9 @@ impl BankingStage {
// Buffer the packets if I am the next leader // Buffer the packets if I am the next leader
// or, if it was getting sent to me // or, if it was getting sent to me
let leader_id = match poh_recorder.lock().unwrap().bank() { let leader_id = match poh_recorder.lock().unwrap().bank() {
Some(bank) => leader_schedule_utils::slot_leader_at(bank.slot() + 1, &bank).unwrap(), Some(bank) => {
leader_schedule_utils::slot_leader_at(bank.slot() + 1, &bank).unwrap_or_default()
}
None => rcluster_info None => rcluster_info
.leader_data() .leader_data()
.map(|x| x.id) .map(|x| x.id)
@ -186,10 +181,6 @@ impl BankingStage {
} }
} }
pub fn num_threads() -> u32 {
sys_info::cpu_num().unwrap_or(NUM_THREADS)
}
/// Convert the transactions from a blob of binary data to a vector of transactions /// Convert the transactions from a blob of binary data to a vector of transactions
fn deserialize_transactions(p: &Packets) -> Vec<Option<Transaction>> { fn deserialize_transactions(p: &Packets) -> Vec<Option<Transaction>> {
p.packets p.packets
@ -228,24 +219,22 @@ impl BankingStage {
Ok(()) Ok(())
} }
pub fn process_and_record_transactions( fn process_and_record_transactions_locked(
bank: &Bank, bank: &Bank,
txs: &[Transaction], txs: &[Transaction],
poh: &Arc<Mutex<PohRecorder>>, poh: &Arc<Mutex<PohRecorder>>,
lock_results: &[bank::Result<()>],
) -> Result<()> { ) -> Result<()> {
let now = Instant::now();
// Once accounts are locked, other threads cannot encode transactions that will modify the
// same account state
let lock_results = bank.lock_accounts(txs);
let lock_time = now.elapsed();
let now = Instant::now(); let now = Instant::now();
// Use a shorter maximum age when adding transactions into the pipeline. This will reduce // Use a shorter maximum age when adding transactions into the pipeline. This will reduce
// the likelihood of any single thread getting starved and processing old ids. // the likelihood of any single thread getting starved and processing old ids.
// TODO: Banking stage threads should be prioritized to complete faster then this queue // TODO: Banking stage threads should be prioritized to complete faster then this queue
// expires. // expires.
let (loaded_accounts, results) = let (loaded_accounts, results) = bank.load_and_execute_transactions(
bank.load_and_execute_transactions(txs, lock_results, MAX_RECENT_BLOCKHASHES / 2); txs,
lock_results.to_vec(),
MAX_RECENT_BLOCKHASHES / 2,
);
let load_execute_time = now.elapsed(); let load_execute_time = now.elapsed();
let record_time = { let record_time = {
@ -260,21 +249,45 @@ impl BankingStage {
now.elapsed() now.elapsed()
}; };
let now = Instant::now();
// Once the accounts are new transactions can enter the pipeline to process them
bank.unlock_accounts(&txs, &results);
let unlock_time = now.elapsed();
debug!( debug!(
"bank: {} lock: {}us load_execute: {}us record: {}us commit: {}us unlock: {}us txs_len: {}", "bank: {} load_execute: {}us record: {}us commit: {}us txs_len: {}",
bank.slot(), bank.slot(),
duration_as_us(&lock_time),
duration_as_us(&load_execute_time), duration_as_us(&load_execute_time),
duration_as_us(&record_time), duration_as_us(&record_time),
duration_as_us(&commit_time), duration_as_us(&commit_time),
txs.len(),
);
Ok(())
}
pub fn process_and_record_transactions(
bank: &Bank,
txs: &[Transaction],
poh: &Arc<Mutex<PohRecorder>>,
) -> Result<()> {
let now = Instant::now();
// Once accounts are locked, other threads cannot encode transactions that will modify the
// same account state
let lock_results = bank.lock_accounts(txs);
let lock_time = now.elapsed();
let results = Self::process_and_record_transactions_locked(bank, txs, poh, &lock_results);
let now = Instant::now();
// Once the accounts are new transactions can enter the pipeline to process them
bank.unlock_accounts(&txs, &lock_results);
let unlock_time = now.elapsed();
debug!(
"bank: {} lock: {}us unlock: {}us txs_len: {}",
bank.slot(),
duration_as_us(&lock_time),
duration_as_us(&unlock_time), duration_as_us(&unlock_time),
txs.len(), txs.len(),
); );
Ok(())
results
} }
/// Sends transactions to the bank. /// Sends transactions to the bank.
@ -431,8 +444,14 @@ pub fn create_test_recorder(
Receiver<WorkingBankEntries>, Receiver<WorkingBankEntries>,
) { ) {
let exit = Arc::new(AtomicBool::new(false)); let exit = Arc::new(AtomicBool::new(false));
let (poh_recorder, entry_receiver) = let (poh_recorder, entry_receiver) = PohRecorder::new(
PohRecorder::new(bank.tick_height(), bank.last_blockhash()); bank.tick_height(),
bank.last_blockhash(),
bank.slot(),
Some(4),
bank.ticks_per_slot(),
&Pubkey::default(),
);
let poh_recorder = Arc::new(Mutex::new(poh_recorder)); let poh_recorder = Arc::new(Mutex::new(poh_recorder));
let poh_service = PohService::new(poh_recorder.clone(), &PohServiceConfig::default(), &exit); let poh_service = PohService::new(poh_recorder.clone(), &PohServiceConfig::default(), &exit);
(exit, poh_recorder, poh_service, entry_receiver) (exit, poh_recorder, poh_service, entry_receiver)
@ -640,8 +659,14 @@ mod tests {
max_tick_height: std::u64::MAX, max_tick_height: std::u64::MAX,
}; };
let (poh_recorder, entry_receiver) = let (poh_recorder, entry_receiver) = PohRecorder::new(
PohRecorder::new(bank.tick_height(), bank.last_blockhash()); bank.tick_height(),
bank.last_blockhash(),
bank.slot(),
None,
bank.ticks_per_slot(),
&Pubkey::default(),
);
let poh_recorder = Arc::new(Mutex::new(poh_recorder)); let poh_recorder = Arc::new(Mutex::new(poh_recorder));
poh_recorder.lock().unwrap().set_working_bank(working_bank); poh_recorder.lock().unwrap().set_working_bank(working_bank);
@ -693,8 +718,14 @@ mod tests {
min_tick_height: bank.tick_height(), min_tick_height: bank.tick_height(),
max_tick_height: bank.tick_height() + 1, max_tick_height: bank.tick_height() + 1,
}; };
let (poh_recorder, entry_receiver) = let (poh_recorder, entry_receiver) = PohRecorder::new(
PohRecorder::new(bank.tick_height(), bank.last_blockhash()); bank.tick_height(),
bank.last_blockhash(),
bank.slot(),
Some(4),
bank.ticks_per_slot(),
&pubkey,
);
let poh_recorder = Arc::new(Mutex::new(poh_recorder)); let poh_recorder = Arc::new(Mutex::new(poh_recorder));
poh_recorder.lock().unwrap().set_working_bank(working_bank); poh_recorder.lock().unwrap().set_working_bank(working_bank);

View File

@ -138,14 +138,13 @@ pub fn process_blocktree(
warn!("entry0 not present"); warn!("entry0 not present");
return Err(BankError::LedgerVerificationFailed); return Err(BankError::LedgerVerificationFailed);
} }
let entry0 = &entries[0]; let entry0 = entries.remove(0);
if !(entry0.is_tick() && entry0.verify(&last_entry_hash)) { if !(entry0.is_tick() && entry0.verify(&last_entry_hash)) {
warn!("Ledger proof of history failed at entry0"); warn!("Ledger proof of history failed at entry0");
return Err(BankError::LedgerVerificationFailed); return Err(BankError::LedgerVerificationFailed);
} }
last_entry_hash = entry0.hash; last_entry_hash = entry0.hash;
entry_height += 1; entry_height += 1;
entries = entries.drain(1..).collect();
} }
if !entries.is_empty() { if !entries.is_empty() {

View File

@ -1,14 +0,0 @@
use crate::cluster_info::FULLNODE_PORT_RANGE;
use crate::contact_info::ContactInfo;
use crate::thin_client::ThinClient;
use std::time::Duration;
pub fn mk_client(r: &ContactInfo) -> ThinClient {
let (_, transactions_socket) = solana_netutil::bind_in_range(FULLNODE_PORT_RANGE).unwrap();
ThinClient::new(r.rpc, r.tpu, transactions_socket)
}
pub fn mk_client_with_timeout(r: &ContactInfo, timeout: Duration) -> ThinClient {
let (_, transactions_socket) = solana_netutil::bind_in_range(FULLNODE_PORT_RANGE).unwrap();
ThinClient::new_with_timeout(r.rpc, r.tpu, transactions_socket, timeout)
}

6
core/src/cluster.rs Normal file
View File

@ -0,0 +1,6 @@
use solana_sdk::pubkey::Pubkey;
pub trait Cluster {
fn get_node_ids(&self) -> Vec<Pubkey>;
fn restart_node(&mut self, pubkey: Pubkey);
}

View File

@ -3,14 +3,18 @@ use crate::blocktree::Blocktree;
/// ///
/// All tests must start from an entry point and a funding keypair and /// All tests must start from an entry point and a funding keypair and
/// discover the rest of the network. /// discover the rest of the network.
use crate::client::mk_client; use crate::cluster_info::FULLNODE_PORT_RANGE;
use crate::contact_info::ContactInfo; use crate::contact_info::ContactInfo;
use crate::entry::{Entry, EntrySlice}; use crate::entry::{Entry, EntrySlice};
use crate::gossip_service::discover; use crate::gossip_service::discover;
use crate::poh_service::PohServiceConfig;
use solana_client::client::create_client;
use solana_sdk::hash::Hash; use solana_sdk::hash::Hash;
use solana_sdk::signature::{Keypair, KeypairUtil}; use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_sdk::system_transaction::SystemTransaction; use solana_sdk::system_transaction::SystemTransaction;
use solana_sdk::timing::{DEFAULT_SLOTS_PER_EPOCH, DEFAULT_TICKS_PER_SLOT, NUM_TICKS_PER_SECOND}; use solana_sdk::timing::{
duration_as_ms, DEFAULT_SLOTS_PER_EPOCH, DEFAULT_TICKS_PER_SLOT, NUM_TICKS_PER_SECOND,
};
use std::thread::sleep; use std::thread::sleep;
use std::time::Duration; use std::time::Duration;
@ -26,7 +30,7 @@ pub fn spend_and_verify_all_nodes(
assert!(cluster_nodes.len() >= nodes); assert!(cluster_nodes.len() >= nodes);
for ingress_node in &cluster_nodes { for ingress_node in &cluster_nodes {
let random_keypair = Keypair::new(); let random_keypair = Keypair::new();
let mut client = mk_client(&ingress_node); let mut client = create_client(ingress_node.client_facing_addr(), FULLNODE_PORT_RANGE);
let bal = client let bal = client
.poll_get_balance(&funding_keypair.pubkey()) .poll_get_balance(&funding_keypair.pubkey())
.expect("balance in source"); .expect("balance in source");
@ -42,14 +46,14 @@ pub fn spend_and_verify_all_nodes(
.retry_transfer(&funding_keypair, &mut transaction, 5) .retry_transfer(&funding_keypair, &mut transaction, 5)
.unwrap(); .unwrap();
for validator in &cluster_nodes { for validator in &cluster_nodes {
let mut client = mk_client(&validator); let mut client = create_client(validator.client_facing_addr(), FULLNODE_PORT_RANGE);
client.poll_for_signature(&sig).unwrap(); client.poll_for_signature(&sig).unwrap();
} }
} }
} }
pub fn send_many_transactions(node: &ContactInfo, funding_keypair: &Keypair, num_txs: u64) { pub fn send_many_transactions(node: &ContactInfo, funding_keypair: &Keypair, num_txs: u64) {
let mut client = mk_client(node); let mut client = create_client(node.client_facing_addr(), FULLNODE_PORT_RANGE);
for _ in 0..num_txs { for _ in 0..num_txs {
let random_keypair = Keypair::new(); let random_keypair = Keypair::new();
let bal = client let bal = client
@ -73,12 +77,12 @@ pub fn fullnode_exit(entry_point_info: &ContactInfo, nodes: usize) {
let cluster_nodes = discover(&entry_point_info.gossip, nodes).unwrap(); let cluster_nodes = discover(&entry_point_info.gossip, nodes).unwrap();
assert!(cluster_nodes.len() >= nodes); assert!(cluster_nodes.len() >= nodes);
for node in &cluster_nodes { for node in &cluster_nodes {
let mut client = mk_client(&node); let mut client = create_client(node.client_facing_addr(), FULLNODE_PORT_RANGE);
assert!(client.fullnode_exit().unwrap()); assert!(client.fullnode_exit().unwrap());
} }
sleep(Duration::from_millis(SLOT_MILLIS)); sleep(Duration::from_millis(SLOT_MILLIS));
for node in &cluster_nodes { for node in &cluster_nodes {
let mut client = mk_client(&node); let mut client = create_client(node.client_facing_addr(), FULLNODE_PORT_RANGE);
assert!(client.fullnode_exit().is_err()); assert!(client.fullnode_exit().is_err());
} }
} }
@ -116,6 +120,25 @@ pub fn verify_ledger_ticks(ledger_path: &str, ticks_per_slot: usize) {
} }
} }
pub fn sleep_n_epochs(
num_epochs: f64,
config: &PohServiceConfig,
ticks_per_slot: u64,
slots_per_epoch: u64,
) {
let num_ticks_per_second = {
match config {
PohServiceConfig::Sleep(d) => (1000 / duration_as_ms(d)) as f64,
_ => panic!("Unsuppported tick config for testing"),
}
};
let num_ticks_to_sleep = num_epochs * ticks_per_slot as f64 * slots_per_epoch as f64;
sleep(Duration::from_secs(
((num_ticks_to_sleep + num_ticks_per_second - 1.0) / num_ticks_per_second) as u64,
));
}
pub fn kill_entry_and_spend_and_verify_rest( pub fn kill_entry_and_spend_and_verify_rest(
entry_point_info: &ContactInfo, entry_point_info: &ContactInfo,
funding_keypair: &Keypair, funding_keypair: &Keypair,
@ -124,7 +147,7 @@ pub fn kill_entry_and_spend_and_verify_rest(
solana_logger::setup(); solana_logger::setup();
let cluster_nodes = discover(&entry_point_info.gossip, nodes).unwrap(); let cluster_nodes = discover(&entry_point_info.gossip, nodes).unwrap();
assert!(cluster_nodes.len() >= nodes); assert!(cluster_nodes.len() >= nodes);
let mut client = mk_client(&entry_point_info); let mut client = create_client(entry_point_info.client_facing_addr(), FULLNODE_PORT_RANGE);
info!("sleeping for an epoch"); info!("sleeping for an epoch");
sleep(Duration::from_millis(SLOT_MILLIS * DEFAULT_SLOTS_PER_EPOCH)); sleep(Duration::from_millis(SLOT_MILLIS * DEFAULT_SLOTS_PER_EPOCH));
info!("done sleeping for an epoch"); info!("done sleeping for an epoch");
@ -138,7 +161,7 @@ pub fn kill_entry_and_spend_and_verify_rest(
continue; continue;
} }
let random_keypair = Keypair::new(); let random_keypair = Keypair::new();
let mut client = mk_client(&ingress_node); let mut client = create_client(ingress_node.client_facing_addr(), FULLNODE_PORT_RANGE);
let bal = client let bal = client
.poll_get_balance(&funding_keypair.pubkey()) .poll_get_balance(&funding_keypair.pubkey())
.expect("balance in source"); .expect("balance in source");
@ -157,7 +180,7 @@ pub fn kill_entry_and_spend_and_verify_rest(
if validator.id == entry_point_info.id { if validator.id == entry_point_info.id {
continue; continue;
} }
let mut client = mk_client(&validator); let mut client = create_client(validator.client_facing_addr(), FULLNODE_PORT_RANGE);
client.poll_for_signature(&sig).unwrap(); client.poll_for_signature(&sig).unwrap();
} }
} }

View File

@ -196,6 +196,10 @@ impl ContactInfo {
pub fn is_valid_address(addr: &SocketAddr) -> bool { pub fn is_valid_address(addr: &SocketAddr) -> bool {
(addr.port() != 0) && Self::is_valid_ip(addr.ip()) (addr.port() != 0) && Self::is_valid_ip(addr.ip())
} }
pub fn client_facing_addr(&self) -> (SocketAddr, SocketAddr) {
(self.rpc, self.tpu)
}
} }
impl Signable for ContactInfo { impl Signable for ContactInfo {

View File

@ -68,7 +68,7 @@ impl CrdsValueLabel {
} }
impl Vote { impl Vote {
// TODO: it might make sense for the transaction to encode the wallclock in the userdata // TODO: it might make sense for the transaction to encode the wallclock in the data
pub fn new(transaction: Transaction, wallclock: u64) -> Self { pub fn new(transaction: Transaction, wallclock: u64) -> Self {
Vote { Vote {
transaction, transaction,

View File

@ -9,6 +9,7 @@ use crate::entry::create_ticks;
use crate::entry::next_entry_mut; use crate::entry::next_entry_mut;
use crate::entry::Entry; use crate::entry::Entry;
use crate::gossip_service::GossipService; use crate::gossip_service::GossipService;
use crate::leader_schedule_utils;
use crate::poh_recorder::PohRecorder; use crate::poh_recorder::PohRecorder;
use crate::poh_service::{PohService, PohServiceConfig}; use crate::poh_service::{PohService, PohServiceConfig};
use crate::rpc::JsonRpcConfig; use crate::rpc::JsonRpcConfig;
@ -36,6 +37,7 @@ use std::thread::JoinHandle;
use std::thread::{spawn, Result}; use std::thread::{spawn, Result};
use std::time::Duration; use std::time::Duration;
#[derive(Clone)]
pub struct FullnodeConfig { pub struct FullnodeConfig {
pub sigverify_disabled: bool, pub sigverify_disabled: bool,
pub voting_disabled: bool, pub voting_disabled: bool,
@ -106,8 +108,14 @@ impl Fullnode {
bank.tick_height(), bank.tick_height(),
bank.last_blockhash(), bank.last_blockhash(),
); );
let (poh_recorder, entry_receiver) = let (poh_recorder, entry_receiver) = PohRecorder::new(
PohRecorder::new(bank.tick_height(), bank.last_blockhash()); bank.tick_height(),
bank.last_blockhash(),
bank.slot(),
leader_schedule_utils::next_leader_slot(&id, bank.slot(), &bank),
bank.ticks_per_slot(),
&id,
);
let poh_recorder = Arc::new(Mutex::new(poh_recorder)); let poh_recorder = Arc::new(Mutex::new(poh_recorder));
let poh_service = PohService::new(poh_recorder.clone(), &config.tick_config, &exit); let poh_service = PohService::new(poh_recorder.clone(), &config.tick_config, &exit);
poh_recorder.lock().unwrap().clear_bank_signal = poh_recorder.lock().unwrap().clear_bank_signal =
@ -258,15 +266,6 @@ impl Fullnode {
// Used for notifying many nodes in parallel to exit // Used for notifying many nodes in parallel to exit
pub fn exit(&self) { pub fn exit(&self) {
self.exit.store(true, Ordering::Relaxed); self.exit.store(true, Ordering::Relaxed);
// Need to force the poh_recorder to drop the WorkingBank,
// which contains the channel to BroadcastStage. This should be
// sufficient as long as no other rotations are happening that
// can cause the Tpu to restart a BankingStage and reset a
// WorkingBank in poh_recorder. It follows no other rotations can be
// in motion because exit()/close() are only called by the run() loop
// which is the sole initiator of rotations.
self.poh_recorder.lock().unwrap().clear_bank();
} }
pub fn close(self) -> Result<()> { pub fn close(self) -> Result<()> {
@ -372,6 +371,31 @@ pub fn make_active_set_entries(
(entries, voting_keypair) (entries, voting_keypair)
} }
pub fn new_fullnode_for_tests() -> (Fullnode, ContactInfo, Keypair, String) {
use crate::blocktree::create_new_tmp_ledger;
use crate::cluster_info::Node;
let node_keypair = Arc::new(Keypair::new());
let node = Node::new_localhost_with_pubkey(&node_keypair.pubkey());
let contact_info = node.info.clone();
let (genesis_block, mint_keypair) = GenesisBlock::new_with_leader(10_000, &contact_info.id, 42);
let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_block);
let voting_keypair = Keypair::new();
let node = Fullnode::new(
node,
&node_keypair,
&ledger_path,
&voting_keypair.pubkey(),
voting_keypair,
None,
&FullnodeConfig::default(),
);
(node, contact_info, mint_keypair, ledger_path)
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;

View File

@ -67,7 +67,7 @@ pub fn discover(gossip_addr: &SocketAddr, num_nodes: usize) -> std::io::Result<V
while now.elapsed() < Duration::from_secs(30) { while now.elapsed() < Duration::from_secs(30) {
let rpc_peers = spy_ref.read().unwrap().rpc_peers(); let rpc_peers = spy_ref.read().unwrap().rpc_peers();
if rpc_peers.len() >= num_nodes { if rpc_peers.len() >= num_nodes {
trace!( info!(
"discover success in {}s...\n{}", "discover success in {}s...\n{}",
now.elapsed().as_secs(), now.elapsed().as_secs(),
spy_ref.read().unwrap().contact_info_trace() spy_ref.read().unwrap().contact_info_trace()

View File

@ -1,182 +0,0 @@
//! The `leader_confirmation_service` module implements the tools necessary
//! to generate a thread which regularly calculates the last confirmation times
//! observed by the leader
use crate::poh_recorder::PohRecorder;
use solana_metrics::{influxdb, submit};
use solana_runtime::bank::Bank;
use solana_sdk::timing;
use solana_vote_api::vote_state::VoteState;
use std::result;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Mutex};
use std::thread::sleep;
use std::thread::{Builder, JoinHandle};
use std::time::Duration;
#[derive(Debug, PartialEq, Eq)]
pub enum ConfirmationError {
NoValidSupermajority,
}
pub const COMPUTE_CONFIRMATION_MS: u64 = 100;
pub struct LeaderConfirmationService {}
impl LeaderConfirmationService {
fn get_last_supermajority_timestamp(
bank: &Bank,
last_valid_validator_timestamp: u64,
) -> result::Result<u64, ConfirmationError> {
let mut total_stake = 0;
let mut slots_and_stakes: Vec<(u64, u64)> = vec![];
// Hold an accounts_db read lock as briefly as possible, just long enough to collect all
// the vote states
bank.vote_accounts().for_each(|(_, account)| {
total_stake += account.lamports;
let vote_state = VoteState::deserialize(&account.userdata).unwrap();
if let Some(stake_and_state) = vote_state
.votes
.back()
.map(|vote| (vote.slot, account.lamports))
{
slots_and_stakes.push(stake_and_state);
}
});
let super_majority_stake = (2 * total_stake) / 3;
if let Some(last_valid_validator_timestamp) =
bank.get_confirmation_timestamp(slots_and_stakes, super_majority_stake)
{
return Ok(last_valid_validator_timestamp);
}
if last_valid_validator_timestamp != 0 {
let now = timing::timestamp();
submit(
influxdb::Point::new(&"leader-confirmation")
.add_field(
"duration_ms",
influxdb::Value::Integer((now - last_valid_validator_timestamp) as i64),
)
.to_owned(),
);
}
Err(ConfirmationError::NoValidSupermajority)
}
pub fn compute_confirmation(bank: &Bank, last_valid_validator_timestamp: &mut u64) {
if let Ok(super_majority_timestamp) =
Self::get_last_supermajority_timestamp(bank, *last_valid_validator_timestamp)
{
let now = timing::timestamp();
let confirmation_ms = now - super_majority_timestamp;
*last_valid_validator_timestamp = super_majority_timestamp;
submit(
influxdb::Point::new(&"leader-confirmation")
.add_field(
"duration_ms",
influxdb::Value::Integer(confirmation_ms as i64),
)
.to_owned(),
);
}
}
/// Create a new LeaderConfirmationService for computing confirmation.
pub fn start(poh_recorder: &Arc<Mutex<PohRecorder>>, exit: Arc<AtomicBool>) -> JoinHandle<()> {
let poh_recorder = poh_recorder.clone();
Builder::new()
.name("solana-leader-confirmation-service".to_string())
.spawn(move || {
let mut last_valid_validator_timestamp = 0;
loop {
if exit.load(Ordering::Relaxed) {
break;
}
// dont hold this lock too long
let maybe_bank = poh_recorder.lock().unwrap().bank();
if let Some(ref bank) = maybe_bank {
Self::compute_confirmation(bank, &mut last_valid_validator_timestamp);
}
sleep(Duration::from_millis(COMPUTE_CONFIRMATION_MS));
}
})
.unwrap()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::voting_keypair::tests::{new_vote_account, push_vote};
use bincode::serialize;
use solana_sdk::genesis_block::GenesisBlock;
use solana_sdk::hash::hash;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_vote_api::vote_transaction::VoteTransaction;
use std::sync::Arc;
#[test]
fn test_compute_confirmation() {
solana_logger::setup();
let (genesis_block, mint_keypair) = GenesisBlock::new(1234);
let mut tick_hash = genesis_block.hash();
let mut bank = Arc::new(Bank::new(&genesis_block));
// Move the bank up 10 slots
for slot in 1..=10 {
let max_tick_height = slot * bank.ticks_per_slot() - 1;
while bank.tick_height() != max_tick_height {
tick_hash = hash(&serialize(&tick_hash).unwrap());
bank.register_tick(&tick_hash);
}
bank = Arc::new(Bank::new_from_parent(&bank, &Pubkey::default(), slot));
}
let blockhash = bank.last_blockhash();
// Create a total of 10 vote accounts, each will have a balance of 1 (after giving 1 to
// their vote account), for a total staking pool of 10 lamports.
let vote_accounts: Vec<_> = (0..10)
.map(|i| {
// Create new validator to vote
let validator_keypair = Arc::new(Keypair::new());
let voting_keypair = Keypair::new();
let voting_pubkey = voting_keypair.pubkey();
// Give the validator some lamports
bank.transfer(2, &mint_keypair, &validator_keypair.pubkey(), blockhash)
.unwrap();
new_vote_account(&validator_keypair, &voting_pubkey, &bank, 1);
if i < 6 {
push_vote(&voting_keypair, &bank, (i + 1) as u64);
}
(voting_keypair, validator_keypair)
})
.collect();
// There isn't 2/3 consensus, so the bank's confirmation value should be the default
let mut last_confirmation_time = 0;
LeaderConfirmationService::compute_confirmation(&bank, &mut last_confirmation_time);
assert_eq!(last_confirmation_time, 0);
// Get another validator to vote, so we now have 2/3 consensus
let voting_keypair = &vote_accounts[7].0;
let vote_tx =
VoteTransaction::new_vote(&voting_keypair.pubkey(), voting_keypair, 7, blockhash, 0);
bank.process_transaction(&vote_tx).unwrap();
LeaderConfirmationService::compute_confirmation(&bank, &mut last_confirmation_time);
assert!(last_confirmation_time > 0);
}
}

View File

@ -12,18 +12,29 @@ pub struct LeaderSchedule {
impl LeaderSchedule { impl LeaderSchedule {
// Note: passing in zero stakers will cause a panic. // Note: passing in zero stakers will cause a panic.
pub fn new(ids_and_stakes: &[(Pubkey, u64)], seed: [u8; 32], len: u64) -> Self { pub fn new(ids_and_stakes: &[(Pubkey, u64)], seed: [u8; 32], len: u64, repeat: u64) -> Self {
let (ids, stakes): (Vec<_>, Vec<_>) = ids_and_stakes.iter().cloned().unzip(); let (ids, stakes): (Vec<_>, Vec<_>) = ids_and_stakes.iter().cloned().unzip();
let rng = &mut ChaChaRng::from_seed(seed); let rng = &mut ChaChaRng::from_seed(seed);
let weighted_index = WeightedIndex::new(stakes).unwrap(); let weighted_index = WeightedIndex::new(stakes).unwrap();
let slot_leaders = (0..len).map(|_| ids[weighted_index.sample(rng)]).collect(); let mut current_node = Pubkey::default();
let slot_leaders = (0..len)
.map(|i| {
if i % repeat == 0 {
current_node = ids[weighted_index.sample(rng)];
current_node
} else {
current_node
}
})
.collect();
Self { slot_leaders } Self { slot_leaders }
} }
} }
impl Index<usize> for LeaderSchedule { impl Index<u64> for LeaderSchedule {
type Output = Pubkey; type Output = Pubkey;
fn index(&self, index: usize) -> &Pubkey { fn index(&self, index: u64) -> &Pubkey {
let index = index as usize;
&self.slot_leaders[index % self.slot_leaders.len()] &self.slot_leaders[index % self.slot_leaders.len()]
} }
} }
@ -56,10 +67,76 @@ mod tests {
let mut seed_bytes = [0u8; 32]; let mut seed_bytes = [0u8; 32];
seed_bytes.copy_from_slice(seed.as_ref()); seed_bytes.copy_from_slice(seed.as_ref());
let len = num_keys * 10; let len = num_keys * 10;
let leader_schedule = LeaderSchedule::new(&stakes, seed_bytes, len); let leader_schedule = LeaderSchedule::new(&stakes, seed_bytes, len, 1);
let leader_schedule2 = LeaderSchedule::new(&stakes, seed_bytes, len); let leader_schedule2 = LeaderSchedule::new(&stakes, seed_bytes, len, 1);
assert_eq!(leader_schedule.slot_leaders.len() as u64, len); assert_eq!(leader_schedule.slot_leaders.len() as u64, len);
// Check that the same schedule is reproducibly generated // Check that the same schedule is reproducibly generated
assert_eq!(leader_schedule, leader_schedule2); assert_eq!(leader_schedule, leader_schedule2);
} }
#[test]
fn test_repeated_leader_schedule() {
let num_keys = 10;
let stakes: Vec<_> = (0..num_keys)
.map(|i| (Keypair::new().pubkey(), i))
.collect();
let seed = Keypair::new().pubkey();
let mut seed_bytes = [0u8; 32];
seed_bytes.copy_from_slice(seed.as_ref());
let len = num_keys * 10;
let repeat = 8;
let leader_schedule = LeaderSchedule::new(&stakes, seed_bytes, len, repeat);
assert_eq!(leader_schedule.slot_leaders.len() as u64, len);
let mut leader_node = Pubkey::default();
for (i, node) in leader_schedule.slot_leaders.iter().enumerate() {
if i % repeat as usize == 0 {
leader_node = *node;
} else {
assert_eq!(leader_node, *node);
}
}
}
#[test]
fn test_repeated_leader_schedule_specific() {
let alice_pubkey = Keypair::new().pubkey();
let bob_pubkey = Keypair::new().pubkey();
let stakes = vec![(alice_pubkey, 2), (bob_pubkey, 1)];
let seed = Pubkey::default();
let mut seed_bytes = [0u8; 32];
seed_bytes.copy_from_slice(seed.as_ref());
let len = 8;
// What the schedule looks like without any repeats
let leaders1 = LeaderSchedule::new(&stakes, seed_bytes, len, 1).slot_leaders;
// What the schedule looks like with repeats
let leaders2 = LeaderSchedule::new(&stakes, seed_bytes, len, 2).slot_leaders;
assert_eq!(leaders1.len(), leaders2.len());
let leaders1_expected = vec![
alice_pubkey,
alice_pubkey,
alice_pubkey,
bob_pubkey,
alice_pubkey,
alice_pubkey,
alice_pubkey,
alice_pubkey,
];
let leaders2_expected = vec![
alice_pubkey,
alice_pubkey,
alice_pubkey,
alice_pubkey,
alice_pubkey,
alice_pubkey,
bob_pubkey,
bob_pubkey,
];
assert_eq!(leaders1, leaders1_expected);
assert_eq!(leaders2, leaders2_expected);
}
} }

View File

@ -2,6 +2,7 @@ use crate::leader_schedule::LeaderSchedule;
use crate::staking_utils; use crate::staking_utils;
use solana_runtime::bank::Bank; use solana_runtime::bank::Bank;
use solana_sdk::pubkey::Pubkey; use solana_sdk::pubkey::Pubkey;
use solana_sdk::timing::NUM_CONSECUTIVE_LEADER_SLOTS;
/// Return the leader schedule for the given epoch. /// Return the leader schedule for the given epoch.
fn leader_schedule(epoch_height: u64, bank: &Bank) -> Option<LeaderSchedule> { fn leader_schedule(epoch_height: u64, bank: &Bank) -> Option<LeaderSchedule> {
@ -10,7 +11,12 @@ fn leader_schedule(epoch_height: u64, bank: &Bank) -> Option<LeaderSchedule> {
seed[0..8].copy_from_slice(&epoch_height.to_le_bytes()); seed[0..8].copy_from_slice(&epoch_height.to_le_bytes());
let mut stakes: Vec<_> = stakes.into_iter().collect(); let mut stakes: Vec<_> = stakes.into_iter().collect();
sort_stakes(&mut stakes); sort_stakes(&mut stakes);
LeaderSchedule::new(&stakes, seed, bank.get_slots_in_epoch(epoch_height)) LeaderSchedule::new(
&stakes,
seed,
bank.get_slots_in_epoch(epoch_height),
NUM_CONSECUTIVE_LEADER_SLOTS,
)
}) })
} }
@ -34,7 +40,33 @@ fn sort_stakes(stakes: &mut Vec<(Pubkey, u64)>) {
pub fn slot_leader_at(slot: u64, bank: &Bank) -> Option<Pubkey> { pub fn slot_leader_at(slot: u64, bank: &Bank) -> Option<Pubkey> {
let (epoch, slot_index) = bank.get_epoch_and_slot_index(slot); let (epoch, slot_index) = bank.get_epoch_and_slot_index(slot);
leader_schedule(epoch, bank).map(|leader_schedule| leader_schedule[slot_index as usize]) leader_schedule(epoch, bank).map(|leader_schedule| leader_schedule[slot_index])
}
/// Return the next slot after the given current_slot that the given node will be leader
pub fn next_leader_slot(pubkey: &Pubkey, mut current_slot: u64, bank: &Bank) -> Option<u64> {
let (mut epoch, mut start_index) = bank.get_epoch_and_slot_index(current_slot + 1);
while let Some(leader_schedule) = leader_schedule(epoch, bank) {
// clippy thinks I should do this:
// for (i, <item>) in leader_schedule
// .iter()
// .enumerate()
// .take(bank.get_slots_in_epoch(epoch))
// .skip(from_slot_index + 1) {
//
// but leader_schedule doesn't implement Iter...
#[allow(clippy::needless_range_loop)]
for i in start_index..bank.get_slots_in_epoch(epoch) {
current_slot += 1;
if *pubkey == leader_schedule[i] {
return Some(current_slot);
}
}
epoch += 1;
start_index = 0;
}
None
} }
// Returns the number of ticks remaining from the specified tick_height to the end of the // Returns the number of ticks remaining from the specified tick_height to the end of the
@ -43,8 +75,8 @@ pub fn num_ticks_left_in_slot(bank: &Bank, tick_height: u64) -> u64 {
bank.ticks_per_slot() - tick_height % bank.ticks_per_slot() - 1 bank.ticks_per_slot() - tick_height % bank.ticks_per_slot() - 1
} }
pub fn tick_height_to_slot(bank: &Bank, tick_height: u64) -> u64 { pub fn tick_height_to_slot(ticks_per_slot: u64, tick_height: u64) -> u64 {
tick_height / bank.ticks_per_slot() tick_height / ticks_per_slot
} }
#[cfg(test)] #[cfg(test)]
@ -54,6 +86,40 @@ mod tests {
use solana_sdk::genesis_block::{GenesisBlock, BOOTSTRAP_LEADER_LAMPORTS}; use solana_sdk::genesis_block::{GenesisBlock, BOOTSTRAP_LEADER_LAMPORTS};
use solana_sdk::signature::{Keypair, KeypairUtil}; use solana_sdk::signature::{Keypair, KeypairUtil};
#[test]
fn test_next_leader_slot() {
let pubkey = Keypair::new().pubkey();
let mut genesis_block = GenesisBlock::new_with_leader(
BOOTSTRAP_LEADER_LAMPORTS,
&pubkey,
BOOTSTRAP_LEADER_LAMPORTS,
)
.0;
genesis_block.epoch_warmup = false;
let bank = Bank::new(&genesis_block);
assert_eq!(slot_leader_at(bank.slot(), &bank).unwrap(), pubkey);
assert_eq!(next_leader_slot(&pubkey, 0, &bank), Some(1));
assert_eq!(next_leader_slot(&pubkey, 1, &bank), Some(2));
assert_eq!(
next_leader_slot(
&pubkey,
2 * genesis_block.slots_per_epoch - 1, // no schedule generated for epoch 2
&bank
),
None
);
assert_eq!(
next_leader_slot(
&Keypair::new().pubkey(), // not in leader_schedule
0,
&bank
),
None
);
}
#[test] #[test]
fn test_leader_schedule_via_bank() { fn test_leader_schedule_via_bank() {
let pubkey = Keypair::new().pubkey(); let pubkey = Keypair::new().pubkey();
@ -66,8 +132,12 @@ mod tests {
let ids_and_stakes: Vec<_> = staking_utils::delegated_stakes(&bank).into_iter().collect(); let ids_and_stakes: Vec<_> = staking_utils::delegated_stakes(&bank).into_iter().collect();
let seed = [0u8; 32]; let seed = [0u8; 32];
let leader_schedule = let leader_schedule = LeaderSchedule::new(
LeaderSchedule::new(&ids_and_stakes, seed, genesis_block.slots_per_epoch); &ids_and_stakes,
seed,
genesis_block.slots_per_epoch,
NUM_CONSECUTIVE_LEADER_SLOTS,
);
assert_eq!(leader_schedule[0], pubkey); assert_eq!(leader_schedule[0], pubkey);
assert_eq!(leader_schedule[1], pubkey); assert_eq!(leader_schedule[1], pubkey);

View File

@ -3,7 +3,6 @@
//! [Fullnode](server/struct.Fullnode.html)) as well as hooks to GPU implementations of its most //! [Fullnode](server/struct.Fullnode.html)) as well as hooks to GPU implementations of its most
//! paralellizable components (i.e. [SigVerify](sigverify/index.html)). It also includes //! paralellizable components (i.e. [SigVerify](sigverify/index.html)). It also includes
//! command-line tools to spin up fullnodes and a Rust library //! command-line tools to spin up fullnodes and a Rust library
//! (see [ThinClient](thin_client/struct.ThinClient.html)) to interact with them.
//! //!
pub mod bank_forks; pub mod bank_forks;
@ -14,7 +13,6 @@ pub mod broadcast_stage;
pub mod chacha; pub mod chacha;
#[cfg(all(feature = "chacha", feature = "cuda"))] #[cfg(all(feature = "chacha", feature = "cuda"))]
pub mod chacha_cuda; pub mod chacha_cuda;
pub mod client;
pub mod cluster_info_vote_listener; pub mod cluster_info_vote_listener;
#[macro_use] #[macro_use]
pub mod contact_info; pub mod contact_info;
@ -29,6 +27,7 @@ pub mod blocktree;
pub mod blockstream; pub mod blockstream;
pub mod blockstream_service; pub mod blockstream_service;
pub mod blocktree_processor; pub mod blocktree_processor;
pub mod cluster;
pub mod cluster_info; pub mod cluster_info;
pub mod cluster_tests; pub mod cluster_tests;
pub mod db_window; pub mod db_window;
@ -41,11 +40,11 @@ pub mod gen_keys;
pub mod gossip_service; pub mod gossip_service;
#[cfg(feature = "kvstore")] #[cfg(feature = "kvstore")]
pub mod kvstore; pub mod kvstore;
pub mod leader_confirmation_service;
pub mod leader_schedule; pub mod leader_schedule;
pub mod leader_schedule_utils; pub mod leader_schedule_utils;
pub mod local_cluster; pub mod local_cluster;
pub mod local_vote_signer_service; pub mod local_vote_signer_service;
pub mod locktower;
pub mod packet; pub mod packet;
pub mod poh; pub mod poh;
pub mod poh_recorder; pub mod poh_recorder;
@ -57,10 +56,8 @@ pub mod replicator;
pub mod result; pub mod result;
pub mod retransmit_stage; pub mod retransmit_stage;
pub mod rpc; pub mod rpc;
pub mod rpc_mock;
pub mod rpc_pubsub; pub mod rpc_pubsub;
pub mod rpc_pubsub_service; pub mod rpc_pubsub_service;
pub mod rpc_request;
pub mod rpc_service; pub mod rpc_service;
pub mod rpc_status; pub mod rpc_status;
pub mod rpc_subscriptions; pub mod rpc_subscriptions;
@ -71,7 +68,6 @@ pub mod staking_utils;
pub mod storage_stage; pub mod storage_stage;
pub mod streamer; pub mod streamer;
pub mod test_tx; pub mod test_tx;
pub mod thin_client;
pub mod tpu; pub mod tpu;
pub mod tvu; pub mod tvu;
pub mod voting_keypair; pub mod voting_keypair;

View File

@ -1,29 +1,47 @@
use crate::blocktree::{create_new_tmp_ledger, tmp_copy_blocktree}; use crate::blocktree::{create_new_tmp_ledger, tmp_copy_blocktree};
use crate::client::mk_client; use crate::cluster::Cluster;
use crate::cluster_info::Node; use crate::cluster_info::{Node, FULLNODE_PORT_RANGE};
use crate::contact_info::ContactInfo; use crate::contact_info::ContactInfo;
use crate::fullnode::{Fullnode, FullnodeConfig}; use crate::fullnode::{Fullnode, FullnodeConfig};
use crate::gossip_service::discover; use crate::gossip_service::discover;
use crate::service::Service; use crate::service::Service;
use crate::thin_client::retry_get_balance; use solana_client::client::create_client;
use crate::thin_client::ThinClient; use solana_client::thin_client::{retry_get_balance, ThinClient};
use solana_sdk::genesis_block::GenesisBlock; use solana_sdk::genesis_block::GenesisBlock;
use solana_sdk::pubkey::Pubkey; use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::{Keypair, KeypairUtil}; use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_sdk::system_transaction::SystemTransaction; use solana_sdk::system_transaction::SystemTransaction;
use solana_sdk::timing::DEFAULT_SLOTS_PER_EPOCH;
use solana_sdk::timing::DEFAULT_TICKS_PER_SLOT;
use solana_vote_api::vote_state::VoteState; use solana_vote_api::vote_state::VoteState;
use solana_vote_api::vote_transaction::VoteTransaction; use solana_vote_api::vote_transaction::VoteTransaction;
use std::collections::HashMap;
use std::fs::remove_dir_all; use std::fs::remove_dir_all;
use std::io::{Error, ErrorKind, Result}; use std::io::{Error, ErrorKind, Result};
use std::sync::Arc; use std::sync::Arc;
pub struct FullnodeInfo {
pub keypair: Arc<Keypair>,
pub ledger_path: String,
}
impl FullnodeInfo {
fn new(keypair: Arc<Keypair>, ledger_path: String) -> Self {
Self {
keypair,
ledger_path,
}
}
}
pub struct LocalCluster { pub struct LocalCluster {
/// Keypair with funding to particpiate in the network /// Keypair with funding to particpiate in the network
pub funding_keypair: Keypair, pub funding_keypair: Keypair,
pub fullnode_config: FullnodeConfig,
/// Entry point from which the rest of the network can be discovered /// Entry point from which the rest of the network can be discovered
pub entry_point_info: ContactInfo, pub entry_point_info: ContactInfo,
pub ledger_paths: Vec<String>, pub fullnodes: HashMap<Pubkey, Fullnode>,
fullnodes: Vec<Fullnode>, pub fullnode_infos: HashMap<Pubkey, FullnodeInfo>,
} }
impl LocalCluster { impl LocalCluster {
@ -36,17 +54,32 @@ impl LocalCluster {
node_stakes: &[u64], node_stakes: &[u64],
cluster_lamports: u64, cluster_lamports: u64,
fullnode_config: &FullnodeConfig, fullnode_config: &FullnodeConfig,
) -> Self {
Self::new_with_tick_config(
node_stakes,
cluster_lamports,
fullnode_config,
DEFAULT_TICKS_PER_SLOT,
DEFAULT_SLOTS_PER_EPOCH,
)
}
pub fn new_with_tick_config(
node_stakes: &[u64],
cluster_lamports: u64,
fullnode_config: &FullnodeConfig,
ticks_per_slot: u64,
slots_per_epoch: u64,
) -> Self { ) -> Self {
let leader_keypair = Arc::new(Keypair::new()); let leader_keypair = Arc::new(Keypair::new());
let leader_pubkey = leader_keypair.pubkey(); let leader_pubkey = leader_keypair.pubkey();
let leader_node = Node::new_localhost_with_pubkey(&leader_keypair.pubkey()); let leader_node = Node::new_localhost_with_pubkey(&leader_keypair.pubkey());
let (genesis_block, mint_keypair) = let (mut genesis_block, mint_keypair) =
GenesisBlock::new_with_leader(cluster_lamports, &leader_pubkey, node_stakes[0]); GenesisBlock::new_with_leader(cluster_lamports, &leader_pubkey, node_stakes[0]);
genesis_block.ticks_per_slot = ticks_per_slot;
genesis_block.slots_per_epoch = slots_per_epoch;
let (genesis_ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_block); let (genesis_ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_block);
let leader_ledger_path = tmp_copy_blocktree!(&genesis_ledger_path); let leader_ledger_path = tmp_copy_blocktree!(&genesis_ledger_path);
let mut ledger_paths = vec![];
ledger_paths.push(genesis_ledger_path.clone());
ledger_paths.push(leader_ledger_path.clone());
let voting_keypair = Keypair::new(); let voting_keypair = Keypair::new();
let leader_contact_info = leader_node.info.clone(); let leader_contact_info = leader_node.info.clone();
let leader_server = Fullnode::new( let leader_server = Fullnode::new(
@ -58,8 +91,18 @@ impl LocalCluster {
None, None,
fullnode_config, fullnode_config,
); );
let mut fullnodes = vec![leader_server]; let mut fullnodes = HashMap::new();
let mut client = mk_client(&leader_contact_info); let mut fullnode_infos = HashMap::new();
fullnodes.insert(leader_pubkey, leader_server);
fullnode_infos.insert(
leader_pubkey,
FullnodeInfo::new(leader_keypair.clone(), leader_ledger_path),
);
let mut client = create_client(
leader_contact_info.client_facing_addr(),
FULLNODE_PORT_RANGE,
);
for stake in &node_stakes[1..] { for stake in &node_stakes[1..] {
// Must have enough tokens to fund vote account and set delegate // Must have enough tokens to fund vote account and set delegate
assert!(*stake > 2); assert!(*stake > 2);
@ -68,7 +111,6 @@ impl LocalCluster {
let validator_pubkey = validator_keypair.pubkey(); let validator_pubkey = validator_keypair.pubkey();
let validator_node = Node::new_localhost_with_pubkey(&validator_keypair.pubkey()); let validator_node = Node::new_localhost_with_pubkey(&validator_keypair.pubkey());
let ledger_path = tmp_copy_blocktree!(&genesis_ledger_path); let ledger_path = tmp_copy_blocktree!(&genesis_ledger_path);
ledger_paths.push(ledger_path.clone());
// Send each validator some lamports to vote // Send each validator some lamports to vote
let validator_balance = let validator_balance =
@ -94,34 +136,40 @@ impl LocalCluster {
Some(&leader_contact_info), Some(&leader_contact_info),
fullnode_config, fullnode_config,
); );
fullnodes.push(validator_server); fullnodes.insert(validator_keypair.pubkey(), validator_server);
fullnode_infos.insert(
validator_keypair.pubkey(),
FullnodeInfo::new(validator_keypair.clone(), ledger_path),
);
} }
discover(&leader_contact_info.gossip, node_stakes.len()).unwrap(); discover(&leader_contact_info.gossip, node_stakes.len()).unwrap();
Self { Self {
funding_keypair: mint_keypair, funding_keypair: mint_keypair,
entry_point_info: leader_contact_info, entry_point_info: leader_contact_info,
fullnodes, fullnodes,
ledger_paths, fullnode_config: fullnode_config.clone(),
fullnode_infos,
} }
} }
pub fn exit(&self) { pub fn exit(&self) {
for node in &self.fullnodes { for node in self.fullnodes.values() {
node.exit(); node.exit();
} }
} }
pub fn close_preserve_ledgers(&mut self) { pub fn close_preserve_ledgers(&mut self) {
self.exit(); self.exit();
while let Some(node) = self.fullnodes.pop() { for (_, node) in self.fullnodes.drain() {
node.join().unwrap(); node.join().unwrap();
} }
} }
fn close(&mut self) { fn close(&mut self) {
self.close_preserve_ledgers(); self.close_preserve_ledgers();
for path in &self.ledger_paths { for info in self.fullnode_infos.values() {
remove_dir_all(path).unwrap_or_else(|_| panic!("Unable to remove {}", path)); remove_dir_all(&info.ledger_path)
.unwrap_or_else(|_| panic!("Unable to remove {}", info.ledger_path));
} }
} }
@ -185,7 +233,7 @@ impl LocalCluster {
} }
info!("Checking for vote account registration"); info!("Checking for vote account registration");
let vote_account_user_data = client.get_account_userdata(&vote_account_pubkey); let vote_account_user_data = client.get_account_data(&vote_account_pubkey);
if let Ok(Some(vote_account_user_data)) = vote_account_user_data { if let Ok(Some(vote_account_user_data)) = vote_account_user_data {
if let Ok(vote_state) = VoteState::deserialize(&vote_account_user_data) { if let Ok(vote_state) = VoteState::deserialize(&vote_account_user_data) {
if vote_state.delegate_id == delegate_id { if vote_state.delegate_id == delegate_id {
@ -201,6 +249,38 @@ impl LocalCluster {
} }
} }
impl Cluster for LocalCluster {
fn restart_node(&mut self, pubkey: Pubkey) {
// Shut down the fullnode
let node = self.fullnodes.remove(&pubkey).unwrap();
node.exit();
node.join().unwrap();
// Restart the node
let fullnode_info = &self.fullnode_infos[&pubkey];
let node = Node::new_localhost_with_pubkey(&fullnode_info.keypair.pubkey());
if pubkey == self.entry_point_info.id {
self.entry_point_info = node.info.clone();
}
let new_voting_keypair = Keypair::new();
let restarted_node = Fullnode::new(
node,
&fullnode_info.keypair,
&fullnode_info.ledger_path,
&new_voting_keypair.pubkey(),
new_voting_keypair,
None,
&self.fullnode_config,
);
self.fullnodes.insert(pubkey, restarted_node);
}
fn get_node_ids(&self) -> Vec<Pubkey> {
self.fullnodes.keys().cloned().collect()
}
}
impl Drop for LocalCluster { impl Drop for LocalCluster {
fn drop(&mut self) { fn drop(&mut self) {
self.close(); self.close();
@ -222,6 +302,6 @@ mod test {
solana_logger::setup(); solana_logger::setup();
let mut fullnode_exit = FullnodeConfig::default(); let mut fullnode_exit = FullnodeConfig::default();
fullnode_exit.rpc_config.enable_fullnode_exit = true; fullnode_exit.rpc_config.enable_fullnode_exit = true;
let _cluster = LocalCluster::new_with_config(&[3], 100, &fullnode_exit); let _cluster = LocalCluster::new_with_tick_config(&[3], 100, &fullnode_exit, 16, 16);
} }
} }

693
core/src/locktower.rs Normal file
View File

@ -0,0 +1,693 @@
use crate::bank_forks::BankForks;
use crate::staking_utils;
use hashbrown::{HashMap, HashSet};
use solana_metrics::influxdb;
use solana_runtime::bank::Bank;
use solana_sdk::account::Account;
use solana_sdk::pubkey::Pubkey;
use solana_vote_api::vote_instruction::Vote;
use solana_vote_api::vote_state::{Lockout, VoteState, MAX_LOCKOUT_HISTORY};
pub const VOTE_THRESHOLD_DEPTH: usize = 8;
pub const VOTE_THRESHOLD_SIZE: f64 = 2f64 / 3f64;
#[derive(Default)]
pub struct EpochStakes {
slot: u64,
stakes: HashMap<Pubkey, u64>,
self_staked: u64,
total_staked: u64,
delegate_id: Pubkey,
}
#[derive(Default, Debug)]
pub struct StakeLockout {
lockout: u64,
stake: u64,
}
#[derive(Default)]
pub struct Locktower {
epoch_stakes: EpochStakes,
threshold_depth: usize,
threshold_size: f64,
lockouts: VoteState,
}
impl EpochStakes {
pub fn new(slot: u64, stakes: HashMap<Pubkey, u64>, delegate_id: &Pubkey) -> Self {
let total_staked = stakes.values().sum();
let self_staked = *stakes.get(&delegate_id).unwrap_or(&0);
Self {
slot,
stakes,
total_staked,
self_staked,
delegate_id: *delegate_id,
}
}
pub fn new_for_tests(lamports: u64) -> Self {
Self::new(
0,
vec![(Pubkey::default(), lamports)].into_iter().collect(),
&Pubkey::default(),
)
}
pub fn new_from_stake_accounts(slot: u64, accounts: &[(Pubkey, Account)]) -> Self {
let stakes = accounts.iter().map(|(k, v)| (*k, v.lamports)).collect();
Self::new(slot, stakes, &accounts[0].0)
}
pub fn new_from_bank(bank: &Bank, my_id: &Pubkey) -> Self {
let bank_epoch = bank.get_epoch_and_slot_index(bank.slot()).0;
let stakes = staking_utils::vote_account_balances_at_epoch(bank, bank_epoch)
.expect("voting require a bank with stakes");
Self::new(bank_epoch, stakes, my_id)
}
}
impl Locktower {
pub fn new_from_forks(bank_forks: &BankForks, my_id: &Pubkey) -> Self {
//TODO: which bank to start with?
let mut frozen_banks: Vec<_> = bank_forks.frozen_banks().values().cloned().collect();
frozen_banks.sort_by_key(|b| (b.parents().len(), b.slot()));
if let Some(bank) = frozen_banks.last() {
Self::new_from_bank(bank, my_id)
} else {
Self::default()
}
}
pub fn new_from_bank(bank: &Bank, my_id: &Pubkey) -> Self {
let current_epoch = bank.get_epoch_and_slot_index(bank.slot()).0;
let mut lockouts = VoteState::default();
if let Some(iter) = staking_utils::node_staked_accounts_at_epoch(bank, current_epoch) {
for (delegate_id, _, account) in iter {
if *delegate_id == *my_id {
let state = VoteState::deserialize(&account.data).expect("votes");
if lockouts.votes.len() < state.votes.len() {
//TODO: which state to init with?
lockouts = state;
}
}
}
}
let epoch_stakes = EpochStakes::new_from_bank(bank, my_id);
Self {
epoch_stakes,
threshold_depth: VOTE_THRESHOLD_DEPTH,
threshold_size: VOTE_THRESHOLD_SIZE,
lockouts,
}
}
pub fn new(epoch_stakes: EpochStakes, threshold_depth: usize, threshold_size: f64) -> Self {
Self {
epoch_stakes,
threshold_depth,
threshold_size,
lockouts: VoteState::default(),
}
}
pub fn collect_vote_lockouts<F>(
&self,
bank_slot: u64,
vote_accounts: F,
ancestors: &HashMap<u64, HashSet<u64>>,
) -> HashMap<u64, StakeLockout>
where
F: Iterator<Item = (Pubkey, Account)>,
{
let mut stake_lockouts = HashMap::new();
for (key, account) in vote_accounts {
let lamports: u64 = *self.epoch_stakes.stakes.get(&key).unwrap_or(&0);
if lamports == 0 {
continue;
}
let mut vote_state: VoteState = VoteState::deserialize(&account.data)
.expect("bank should always have valid VoteState data");
if key == self.epoch_stakes.delegate_id
|| vote_state.delegate_id == self.epoch_stakes.delegate_id
{
debug!("vote state {:?}", vote_state);
debug!(
"observed slot {}",
vote_state.nth_recent_vote(0).map(|v| v.slot).unwrap_or(0) as i64
);
debug!("observed root {}", vote_state.root_slot.unwrap_or(0) as i64);
solana_metrics::submit(
influxdb::Point::new("counter-locktower-observed")
.add_field(
"slot",
influxdb::Value::Integer(
vote_state.nth_recent_vote(0).map(|v| v.slot).unwrap_or(0) as i64,
),
)
.add_field(
"root",
influxdb::Value::Integer(vote_state.root_slot.unwrap_or(0) as i64),
)
.to_owned(),
);
}
let start_root = vote_state.root_slot;
vote_state.process_vote(Vote { slot: bank_slot });
for vote in &vote_state.votes {
Self::update_ancestor_lockouts(&mut stake_lockouts, &vote, ancestors);
}
if start_root != vote_state.root_slot {
if let Some(root) = start_root {
let vote = Lockout {
confirmation_count: MAX_LOCKOUT_HISTORY as u32,
slot: root,
};
trace!("ROOT: {}", vote.slot);
Self::update_ancestor_lockouts(&mut stake_lockouts, &vote, ancestors);
}
}
if let Some(root) = vote_state.root_slot {
let vote = Lockout {
confirmation_count: MAX_LOCKOUT_HISTORY as u32,
slot: root,
};
Self::update_ancestor_lockouts(&mut stake_lockouts, &vote, ancestors);
}
// each account hash a stake for all the forks in the active tree for this bank
Self::update_ancestor_stakes(&mut stake_lockouts, bank_slot, lamports, ancestors);
}
stake_lockouts
}
pub fn is_slot_confirmed(&self, slot: u64, lockouts: &HashMap<u64, StakeLockout>) -> bool {
lockouts
.get(&slot)
.map(|lockout| {
(lockout.stake as f64 / self.epoch_stakes.total_staked as f64) > self.threshold_size
})
.unwrap_or(false)
}
pub fn is_recent_epoch(&self, bank: &Bank) -> bool {
let bank_epoch = bank.get_epoch_and_slot_index(bank.slot()).0;
bank_epoch >= self.epoch_stakes.slot
}
pub fn update_epoch(&mut self, bank: &Bank) {
let bank_epoch = bank.get_epoch_and_slot_index(bank.slot()).0;
if bank_epoch != self.epoch_stakes.slot {
assert!(
bank_epoch > self.epoch_stakes.slot,
"epoch_stakes cannot move backwards"
);
self.epoch_stakes = EpochStakes::new_from_bank(bank, &self.epoch_stakes.delegate_id);
solana_metrics::submit(
influxdb::Point::new("counter-locktower-epoch")
.add_field(
"slot",
influxdb::Value::Integer(self.epoch_stakes.slot as i64),
)
.add_field(
"self_staked",
influxdb::Value::Integer(self.epoch_stakes.self_staked as i64),
)
.add_field(
"total_staked",
influxdb::Value::Integer(self.epoch_stakes.total_staked as i64),
)
.to_owned(),
);
}
}
pub fn record_vote(&mut self, slot: u64) -> Option<u64> {
let root_slot = self.lockouts.root_slot;
self.lockouts.process_vote(Vote { slot });
solana_metrics::submit(
influxdb::Point::new("counter-locktower-vote")
.add_field("latest", influxdb::Value::Integer(slot as i64))
.add_field(
"root",
influxdb::Value::Integer(self.lockouts.root_slot.unwrap_or(0) as i64),
)
.to_owned(),
);
if root_slot != self.lockouts.root_slot {
Some(self.lockouts.root_slot.unwrap())
} else {
None
}
}
pub fn calculate_weight(&self, stake_lockouts: &HashMap<u64, StakeLockout>) -> u128 {
let mut sum = 0u128;
let root_slot = self.lockouts.root_slot.unwrap_or(0);
for (slot, stake_lockout) in stake_lockouts {
if self.lockouts.root_slot.is_some() && *slot <= root_slot {
continue;
}
sum += u128::from(stake_lockout.lockout) * u128::from(stake_lockout.stake)
}
sum
}
pub fn has_voted(&self, slot: u64) -> bool {
for vote in &self.lockouts.votes {
if vote.slot == slot {
return true;
}
}
false
}
pub fn is_locked_out(&self, slot: u64, descendants: &HashMap<u64, HashSet<u64>>) -> bool {
let mut lockouts = self.lockouts.clone();
lockouts.process_vote(Vote { slot });
for vote in &lockouts.votes {
if vote.slot == slot {
continue;
}
if !descendants[&vote.slot].contains(&slot) {
return true;
}
}
if let Some(root) = lockouts.root_slot {
!descendants[&root].contains(&slot)
} else {
false
}
}
pub fn check_vote_stake_threshold(
&self,
slot: u64,
stake_lockouts: &HashMap<u64, StakeLockout>,
) -> bool {
let mut lockouts = self.lockouts.clone();
lockouts.process_vote(Vote { slot });
let vote = lockouts.nth_recent_vote(self.threshold_depth);
if let Some(vote) = vote {
if let Some(fork_stake) = stake_lockouts.get(&vote.slot) {
(fork_stake.stake as f64 / self.epoch_stakes.total_staked as f64)
> self.threshold_size
} else {
false
}
} else {
true
}
}
/// Update lockouts for all the ancestors
fn update_ancestor_lockouts(
stake_lockouts: &mut HashMap<u64, StakeLockout>,
vote: &Lockout,
ancestors: &HashMap<u64, HashSet<u64>>,
) {
let mut slot_with_ancestors = vec![vote.slot];
slot_with_ancestors.extend(ancestors.get(&vote.slot).unwrap_or(&HashSet::new()));
for slot in slot_with_ancestors {
let entry = &mut stake_lockouts.entry(slot).or_default();
entry.lockout += vote.lockout();
}
}
/// Update stake for all the ancestors.
/// Note, stake is the same for all the ancestor.
fn update_ancestor_stakes(
stake_lockouts: &mut HashMap<u64, StakeLockout>,
slot: u64,
lamports: u64,
ancestors: &HashMap<u64, HashSet<u64>>,
) {
let mut slot_with_ancestors = vec![slot];
slot_with_ancestors.extend(ancestors.get(&slot).unwrap_or(&HashSet::new()));
for slot in slot_with_ancestors {
let entry = &mut stake_lockouts.entry(slot).or_default();
entry.stake += lamports;
}
}
}
#[cfg(test)]
mod test {
use super::*;
use solana_sdk::signature::{Keypair, KeypairUtil};
fn gen_accounts(stake_votes: &[(u64, &[u64])]) -> Vec<(Pubkey, Account)> {
let mut accounts = vec![];
for (lamports, votes) in stake_votes {
let mut account = Account::default();
account.data = vec![0; 1024];
account.lamports = *lamports;
let mut vote_state = VoteState::default();
for slot in *votes {
vote_state.process_vote(Vote { slot: *slot });
}
vote_state
.serialize(&mut account.data)
.expect("serialize state");
accounts.push((Keypair::new().pubkey(), account));
}
accounts
}
#[test]
fn test_collect_vote_lockouts_no_epoch_stakes() {
let accounts = gen_accounts(&[(1, &[0])]);
let epoch_stakes = EpochStakes::new_for_tests(2);
let locktower = Locktower::new(epoch_stakes, 0, 0.67);
let ancestors = vec![(1, vec![0].into_iter().collect()), (0, HashSet::new())]
.into_iter()
.collect();
let staked_lockouts = locktower.collect_vote_lockouts(1, accounts.into_iter(), &ancestors);
assert!(staked_lockouts.is_empty());
}
#[test]
fn test_collect_vote_lockouts_sums() {
//two accounts voting for slot 0 with 1 token staked
let accounts = gen_accounts(&[(1, &[0]), (1, &[0])]);
let epoch_stakes = EpochStakes::new_from_stake_accounts(0, &accounts);
let locktower = Locktower::new(epoch_stakes, 0, 0.67);
let ancestors = vec![(1, vec![0].into_iter().collect()), (0, HashSet::new())]
.into_iter()
.collect();
let staked_lockouts = locktower.collect_vote_lockouts(1, accounts.into_iter(), &ancestors);
assert_eq!(staked_lockouts[&0].stake, 2);
assert_eq!(staked_lockouts[&0].lockout, 2 + 2 + 4 + 4);
}
#[test]
fn test_collect_vote_lockouts_root() {
let votes: Vec<u64> = (0..MAX_LOCKOUT_HISTORY as u64).into_iter().collect();
//two accounts voting for slot 0 with 1 token staked
let accounts = gen_accounts(&[(1, &votes), (1, &votes)]);
let epoch_stakes = EpochStakes::new_from_stake_accounts(0, &accounts);
let mut locktower = Locktower::new(epoch_stakes, 0, 0.67);
let mut ancestors = HashMap::new();
for i in 0..(MAX_LOCKOUT_HISTORY + 1) {
locktower.record_vote(i as u64);
ancestors.insert(i as u64, (0..i as u64).into_iter().collect());
}
assert_eq!(locktower.lockouts.root_slot, Some(0));
let staked_lockouts = locktower.collect_vote_lockouts(
MAX_LOCKOUT_HISTORY as u64,
accounts.into_iter(),
&ancestors,
);
for i in 0..MAX_LOCKOUT_HISTORY {
assert_eq!(staked_lockouts[&(i as u64)].stake, 2);
}
// should be the sum of all the weights for root
assert!(staked_lockouts[&0].lockout > (2 * (1 << MAX_LOCKOUT_HISTORY)));
}
#[test]
fn test_calculate_weight_skips_root() {
let mut locktower = Locktower::new(EpochStakes::new_for_tests(2), 0, 0.67);
locktower.lockouts.root_slot = Some(1);
let stakes = vec![
(
0,
StakeLockout {
stake: 1,
lockout: 8,
},
),
(
1,
StakeLockout {
stake: 1,
lockout: 8,
},
),
]
.into_iter()
.collect();
assert_eq!(locktower.calculate_weight(&stakes), 0u128);
}
#[test]
fn test_calculate_weight() {
let locktower = Locktower::new(EpochStakes::new_for_tests(2), 0, 0.67);
let stakes = vec![(
0,
StakeLockout {
stake: 1,
lockout: 8,
},
)]
.into_iter()
.collect();
assert_eq!(locktower.calculate_weight(&stakes), 8u128);
}
#[test]
fn test_check_vote_threshold_without_votes() {
let locktower = Locktower::new(EpochStakes::new_for_tests(2), 1, 0.67);
let stakes = vec![(
0,
StakeLockout {
stake: 1,
lockout: 8,
},
)]
.into_iter()
.collect();
assert!(locktower.check_vote_stake_threshold(0, &stakes));
}
#[test]
fn test_is_slot_confirmed_not_enough_stake_failure() {
let locktower = Locktower::new(EpochStakes::new_for_tests(2), 1, 0.67);
let stakes = vec![(
0,
StakeLockout {
stake: 1,
lockout: 8,
},
)]
.into_iter()
.collect();
assert!(!locktower.is_slot_confirmed(0, &stakes));
}
#[test]
fn test_is_slot_confirmed_unknown_slot() {
let locktower = Locktower::new(EpochStakes::new_for_tests(2), 1, 0.67);
let stakes = HashMap::new();
assert!(!locktower.is_slot_confirmed(0, &stakes));
}
#[test]
fn test_is_slot_confirmed_pass() {
let locktower = Locktower::new(EpochStakes::new_for_tests(2), 1, 0.67);
let stakes = vec![(
0,
StakeLockout {
stake: 2,
lockout: 8,
},
)]
.into_iter()
.collect();
assert!(locktower.is_slot_confirmed(0, &stakes));
}
#[test]
fn test_is_locked_out_empty() {
let locktower = Locktower::new(EpochStakes::new_for_tests(2), 0, 0.67);
let descendants = HashMap::new();
assert!(!locktower.is_locked_out(0, &descendants));
}
#[test]
fn test_is_locked_out_root_slot_child_pass() {
let mut locktower = Locktower::new(EpochStakes::new_for_tests(2), 0, 0.67);
let descendants = vec![(0, vec![1].into_iter().collect())]
.into_iter()
.collect();
locktower.lockouts.root_slot = Some(0);
assert!(!locktower.is_locked_out(1, &descendants));
}
#[test]
fn test_is_locked_out_root_slot_sibling_fail() {
let mut locktower = Locktower::new(EpochStakes::new_for_tests(2), 0, 0.67);
let descendants = vec![(0, vec![1].into_iter().collect())]
.into_iter()
.collect();
locktower.lockouts.root_slot = Some(0);
assert!(locktower.is_locked_out(2, &descendants));
}
#[test]
fn test_check_already_voted() {
let mut locktower = Locktower::new(EpochStakes::new_for_tests(2), 0, 0.67);
locktower.record_vote(0);
assert!(locktower.has_voted(0));
assert!(!locktower.has_voted(1));
}
#[test]
fn test_is_locked_out_double_vote() {
let mut locktower = Locktower::new(EpochStakes::new_for_tests(2), 0, 0.67);
let descendants = vec![(0, vec![1].into_iter().collect()), (1, HashSet::new())]
.into_iter()
.collect();
locktower.record_vote(0);
locktower.record_vote(1);
assert!(locktower.is_locked_out(0, &descendants));
}
#[test]
fn test_is_locked_out_child() {
let mut locktower = Locktower::new(EpochStakes::new_for_tests(2), 0, 0.67);
let descendants = vec![(0, vec![1].into_iter().collect())]
.into_iter()
.collect();
locktower.record_vote(0);
assert!(!locktower.is_locked_out(1, &descendants));
}
#[test]
fn test_is_locked_out_sibling() {
let mut locktower = Locktower::new(EpochStakes::new_for_tests(2), 0, 0.67);
let descendants = vec![
(0, vec![1, 2].into_iter().collect()),
(1, HashSet::new()),
(2, HashSet::new()),
]
.into_iter()
.collect();
locktower.record_vote(0);
locktower.record_vote(1);
assert!(locktower.is_locked_out(2, &descendants));
}
#[test]
fn test_is_locked_out_last_vote_expired() {
let mut locktower = Locktower::new(EpochStakes::new_for_tests(2), 0, 0.67);
let descendants = vec![(0, vec![1, 4].into_iter().collect()), (1, HashSet::new())]
.into_iter()
.collect();
locktower.record_vote(0);
locktower.record_vote(1);
assert!(!locktower.is_locked_out(4, &descendants));
locktower.record_vote(4);
assert_eq!(locktower.lockouts.votes[0].slot, 0);
assert_eq!(locktower.lockouts.votes[0].confirmation_count, 2);
assert_eq!(locktower.lockouts.votes[1].slot, 4);
assert_eq!(locktower.lockouts.votes[1].confirmation_count, 1);
}
#[test]
fn test_check_vote_threshold_below_threshold() {
let mut locktower = Locktower::new(EpochStakes::new_for_tests(2), 1, 0.67);
let stakes = vec![(
0,
StakeLockout {
stake: 1,
lockout: 8,
},
)]
.into_iter()
.collect();
locktower.record_vote(0);
assert!(!locktower.check_vote_stake_threshold(1, &stakes));
}
#[test]
fn test_check_vote_threshold_above_threshold() {
let mut locktower = Locktower::new(EpochStakes::new_for_tests(2), 1, 0.67);
let stakes = vec![(
0,
StakeLockout {
stake: 2,
lockout: 8,
},
)]
.into_iter()
.collect();
locktower.record_vote(0);
assert!(locktower.check_vote_stake_threshold(1, &stakes));
}
#[test]
fn test_check_vote_threshold_above_threshold_after_pop() {
let mut locktower = Locktower::new(EpochStakes::new_for_tests(2), 1, 0.67);
let stakes = vec![(
0,
StakeLockout {
stake: 2,
lockout: 8,
},
)]
.into_iter()
.collect();
locktower.record_vote(0);
locktower.record_vote(1);
locktower.record_vote(2);
assert!(locktower.check_vote_stake_threshold(6, &stakes));
}
#[test]
fn test_check_vote_threshold_above_threshold_no_stake() {
let mut locktower = Locktower::new(EpochStakes::new_for_tests(2), 1, 0.67);
let stakes = HashMap::new();
locktower.record_vote(0);
assert!(!locktower.check_vote_stake_threshold(1, &stakes));
}
#[test]
fn test_lockout_is_updated_for_entire_branch() {
let mut stake_lockouts = HashMap::new();
let vote = Lockout {
slot: 2,
confirmation_count: 1,
};
let set: HashSet<u64> = vec![0u64, 1u64].into_iter().collect();
let mut ancestors = HashMap::new();
ancestors.insert(2, set);
let set: HashSet<u64> = vec![0u64].into_iter().collect();
ancestors.insert(1, set);
Locktower::update_ancestor_lockouts(&mut stake_lockouts, &vote, &ancestors);
assert_eq!(stake_lockouts[&0].lockout, 2);
assert_eq!(stake_lockouts[&1].lockout, 2);
assert_eq!(stake_lockouts[&2].lockout, 2);
}
#[test]
fn test_lockout_is_updated_for_slot_or_lower() {
let mut stake_lockouts = HashMap::new();
let set: HashSet<u64> = vec![0u64, 1u64].into_iter().collect();
let mut ancestors = HashMap::new();
ancestors.insert(2, set);
let set: HashSet<u64> = vec![0u64].into_iter().collect();
ancestors.insert(1, set);
let vote = Lockout {
slot: 2,
confirmation_count: 1,
};
Locktower::update_ancestor_lockouts(&mut stake_lockouts, &vote, &ancestors);
let vote = Lockout {
slot: 1,
confirmation_count: 2,
};
Locktower::update_ancestor_lockouts(&mut stake_lockouts, &vote, &ancestors);
assert_eq!(stake_lockouts[&0].lockout, 2 + 4);
assert_eq!(stake_lockouts[&1].lockout, 2 + 4);
assert_eq!(stake_lockouts[&2].lockout, 2);
}
#[test]
fn test_stake_is_updated_for_entire_branch() {
let mut stake_lockouts = HashMap::new();
let mut account = Account::default();
account.lamports = 1;
let set: HashSet<u64> = vec![0u64, 1u64].into_iter().collect();
let ancestors: HashMap<u64, HashSet<u64>> = [(2u64, set)].into_iter().cloned().collect();
Locktower::update_ancestor_stakes(&mut stake_lockouts, 2, account.lamports, &ancestors);
assert_eq!(stake_lockouts[&0].stake, 1);
assert_eq!(stake_lockouts[&1].stake, 1);
assert_eq!(stake_lockouts[&2].stake, 1);
}
}

View File

@ -11,14 +11,18 @@
//! * recorded entry must be >= WorkingBank::min_tick_height && entry must be < WorkingBank::man_tick_height //! * recorded entry must be >= WorkingBank::min_tick_height && entry must be < WorkingBank::man_tick_height
//! //!
use crate::entry::Entry; use crate::entry::Entry;
use crate::leader_schedule_utils;
use crate::poh::Poh; use crate::poh::Poh;
use crate::result::{Error, Result}; use crate::result::{Error, Result};
use solana_runtime::bank::Bank; use solana_runtime::bank::Bank;
use solana_sdk::hash::Hash; use solana_sdk::hash::Hash;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::transaction::Transaction; use solana_sdk::transaction::Transaction;
use std::sync::mpsc::{channel, Receiver, Sender, SyncSender}; use std::sync::mpsc::{channel, Receiver, Sender, SyncSender};
use std::sync::Arc; use std::sync::Arc;
const MAX_LAST_LEADER_GRACE_TICKS_FACTOR: u64 = 2;
#[derive(Debug, PartialEq, Eq, Clone)] #[derive(Debug, PartialEq, Eq, Clone)]
pub enum PohRecorderError { pub enum PohRecorderError {
InvalidCallingObject, InvalidCallingObject,
@ -37,15 +41,32 @@ pub struct WorkingBank {
pub struct PohRecorder { pub struct PohRecorder {
pub poh: Poh, pub poh: Poh,
pub clear_bank_signal: Option<SyncSender<bool>>,
start_slot: u64,
start_tick: u64,
tick_cache: Vec<(Entry, u64)>, tick_cache: Vec<(Entry, u64)>,
working_bank: Option<WorkingBank>, working_bank: Option<WorkingBank>,
sender: Sender<WorkingBankEntries>, sender: Sender<WorkingBankEntries>,
pub clear_bank_signal: Option<SyncSender<bool>>, start_leader_at_tick: Option<u64>,
last_leader_tick: Option<u64>,
max_last_leader_grace_ticks: u64,
id: Pubkey,
} }
impl PohRecorder { impl PohRecorder {
pub fn clear_bank(&mut self) { pub fn clear_bank(&mut self) {
self.working_bank = None; if let Some(working_bank) = self.working_bank.take() {
let bank = working_bank.bank;
let next_leader_slot =
leader_schedule_utils::next_leader_slot(&self.id, bank.slot(), &bank);
let (start_leader_at_tick, last_leader_tick) = Self::compute_leader_slot_ticks(
&next_leader_slot,
bank.ticks_per_slot(),
self.max_last_leader_grace_ticks,
);
self.start_leader_at_tick = start_leader_at_tick;
self.last_leader_tick = last_leader_tick;
}
if let Some(ref signal) = self.clear_bank_signal { if let Some(ref signal) = self.clear_bank_signal {
let _ = signal.try_send(true); let _ = signal.try_send(true);
} }
@ -57,35 +78,94 @@ impl PohRecorder {
self.poh.hash(); self.poh.hash();
} }
pub fn start_slot(&self) -> u64 {
self.start_slot
}
pub fn bank(&self) -> Option<Arc<Bank>> { pub fn bank(&self) -> Option<Arc<Bank>> {
self.working_bank.clone().map(|w| w.bank) self.working_bank.clone().map(|w| w.bank)
} }
pub fn tick_height(&self) -> u64 { pub fn tick_height(&self) -> u64 {
self.poh.tick_height self.poh.tick_height
} }
// synchronize PoH with a bank
pub fn reset(&mut self, tick_height: u64, blockhash: Hash) { // returns if leader tick has reached, and how many grace ticks were afforded
self.clear_bank(); pub fn reached_leader_tick(&self) -> (bool, u64) {
let existing = self.tick_cache.iter().any(|(entry, entry_tick_height)| { self.start_leader_at_tick
if entry.hash == blockhash { .map(|target_tick| {
assert_eq!(*entry_tick_height, tick_height); debug!(
} "Current tick {}, start tick {} target {}, grace {}",
entry.hash == blockhash self.tick_height(),
}); self.start_tick,
if existing { target_tick,
info!( self.max_last_leader_grace_ticks
"reset skipped for: {},{}", );
self.poh.hash, self.poh.tick_height
let leader_ideal_start_tick =
target_tick.saturating_sub(self.max_last_leader_grace_ticks);
// Is the current tick in the same slot as the target tick?
// Check if either grace period has expired,
// or target tick is = grace period (i.e. poh recorder was just reset)
if self.tick_height() <= self.last_leader_tick.unwrap_or(0)
&& (self.tick_height() >= target_tick
|| self.max_last_leader_grace_ticks
>= target_tick.saturating_sub(self.start_tick))
{
return (
true,
self.tick_height().saturating_sub(leader_ideal_start_tick),
); );
return;
} }
(false, 0)
})
.unwrap_or((false, 0))
}
fn compute_leader_slot_ticks(
next_leader_slot: &Option<u64>,
ticks_per_slot: u64,
grace_ticks: u64,
) -> (Option<u64>, Option<u64>) {
next_leader_slot
.map(|slot| {
(
Some(slot * ticks_per_slot + grace_ticks),
Some((slot + 1) * ticks_per_slot - 1),
)
})
.unwrap_or((None, None))
}
// synchronize PoH with a bank
pub fn reset(
&mut self,
tick_height: u64,
blockhash: Hash,
start_slot: u64,
my_next_leader_slot: Option<u64>,
ticks_per_slot: u64,
) {
self.clear_bank();
let mut cache = vec![]; let mut cache = vec![];
info!( info!(
"reset poh from: {},{} to: {},{}", "reset poh from: {},{} to: {},{}",
self.poh.hash, self.poh.tick_height, blockhash, tick_height, self.poh.hash, self.poh.tick_height, blockhash, tick_height,
); );
std::mem::swap(&mut cache, &mut self.tick_cache); std::mem::swap(&mut cache, &mut self.tick_cache);
self.start_slot = start_slot;
self.start_tick = tick_height + 1;
self.poh = Poh::new(blockhash, tick_height); self.poh = Poh::new(blockhash, tick_height);
self.max_last_leader_grace_ticks = ticks_per_slot / MAX_LAST_LEADER_GRACE_TICKS_FACTOR;
let (start_leader_at_tick, last_leader_tick) = Self::compute_leader_slot_ticks(
&my_next_leader_slot,
ticks_per_slot,
self.max_last_leader_grace_ticks,
);
self.start_leader_at_tick = start_leader_at_tick;
self.last_leader_tick = last_leader_tick;
} }
pub fn set_working_bank(&mut self, working_bank: WorkingBank) { pub fn set_working_bank(&mut self, working_bank: WorkingBank) {
@ -151,6 +231,7 @@ impl PohRecorder {
"poh_record: max_tick_height reached, setting working bank {} to None", "poh_record: max_tick_height reached, setting working bank {} to None",
working_bank.bank.slot() working_bank.bank.slot()
); );
self.start_slot = working_bank.max_tick_height / working_bank.bank.ticks_per_slot();
self.clear_bank(); self.clear_bank();
} }
if e.is_err() { if e.is_err() {
@ -166,6 +247,10 @@ impl PohRecorder {
} }
pub fn tick(&mut self) { pub fn tick(&mut self) {
if self.start_leader_at_tick.is_none() {
return;
}
let tick = self.generate_tick(); let tick = self.generate_tick();
trace!("tick {}", tick.1); trace!("tick {}", tick.1);
self.tick_cache.push(tick); self.tick_cache.push(tick);
@ -180,9 +265,22 @@ impl PohRecorder {
/// A recorder to synchronize PoH with the following data structures /// A recorder to synchronize PoH with the following data structures
/// * bank - the LastId's queue is updated on `tick` and `record` events /// * bank - the LastId's queue is updated on `tick` and `record` events
/// * sender - the Entry channel that outputs to the ledger /// * sender - the Entry channel that outputs to the ledger
pub fn new(tick_height: u64, last_entry_hash: Hash) -> (Self, Receiver<WorkingBankEntries>) { pub fn new(
tick_height: u64,
last_entry_hash: Hash,
start_slot: u64,
my_leader_slot_index: Option<u64>,
ticks_per_slot: u64,
id: &Pubkey,
) -> (Self, Receiver<WorkingBankEntries>) {
let poh = Poh::new(last_entry_hash, tick_height); let poh = Poh::new(last_entry_hash, tick_height);
let (sender, receiver) = channel(); let (sender, receiver) = channel();
let max_last_leader_grace_ticks = ticks_per_slot / MAX_LAST_LEADER_GRACE_TICKS_FACTOR;
let (start_leader_at_tick, last_leader_tick) = Self::compute_leader_slot_ticks(
&my_leader_slot_index,
ticks_per_slot,
max_last_leader_grace_ticks,
);
( (
PohRecorder { PohRecorder {
poh, poh,
@ -190,6 +288,12 @@ impl PohRecorder {
working_bank: None, working_bank: None,
sender, sender,
clear_bank_signal: None, clear_bank_signal: None,
start_slot,
start_tick: tick_height + 1,
start_leader_at_tick,
last_leader_tick,
max_last_leader_grace_ticks,
id: *id,
}, },
receiver, receiver,
) )
@ -235,13 +339,21 @@ mod tests {
use crate::test_tx::test_tx; use crate::test_tx::test_tx;
use solana_sdk::genesis_block::GenesisBlock; use solana_sdk::genesis_block::GenesisBlock;
use solana_sdk::hash::hash; use solana_sdk::hash::hash;
use solana_sdk::timing::DEFAULT_TICKS_PER_SLOT;
use std::sync::mpsc::sync_channel; use std::sync::mpsc::sync_channel;
use std::sync::Arc; use std::sync::Arc;
#[test] #[test]
fn test_poh_recorder_no_zero_tick() { fn test_poh_recorder_no_zero_tick() {
let prev_hash = Hash::default(); let prev_hash = Hash::default();
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(0, prev_hash); let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
0,
prev_hash,
0,
Some(4),
DEFAULT_TICKS_PER_SLOT,
&Pubkey::default(),
);
poh_recorder.tick(); poh_recorder.tick();
assert_eq!(poh_recorder.tick_cache.len(), 1); assert_eq!(poh_recorder.tick_cache.len(), 1);
assert_eq!(poh_recorder.tick_cache[0].1, 1); assert_eq!(poh_recorder.tick_cache[0].1, 1);
@ -251,7 +363,14 @@ mod tests {
#[test] #[test]
fn test_poh_recorder_tick_height_is_last_tick() { fn test_poh_recorder_tick_height_is_last_tick() {
let prev_hash = Hash::default(); let prev_hash = Hash::default();
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(0, prev_hash); let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
0,
prev_hash,
0,
Some(4),
DEFAULT_TICKS_PER_SLOT,
&Pubkey::default(),
);
poh_recorder.tick(); poh_recorder.tick();
poh_recorder.tick(); poh_recorder.tick();
assert_eq!(poh_recorder.tick_cache.len(), 2); assert_eq!(poh_recorder.tick_cache.len(), 2);
@ -261,10 +380,17 @@ mod tests {
#[test] #[test]
fn test_poh_recorder_reset_clears_cache() { fn test_poh_recorder_reset_clears_cache() {
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(0, Hash::default()); let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
0,
Hash::default(),
0,
Some(4),
DEFAULT_TICKS_PER_SLOT,
&Pubkey::default(),
);
poh_recorder.tick(); poh_recorder.tick();
assert_eq!(poh_recorder.tick_cache.len(), 1); assert_eq!(poh_recorder.tick_cache.len(), 1);
poh_recorder.reset(0, Hash::default()); poh_recorder.reset(0, Hash::default(), 0, Some(4), DEFAULT_TICKS_PER_SLOT);
assert_eq!(poh_recorder.tick_cache.len(), 0); assert_eq!(poh_recorder.tick_cache.len(), 0);
} }
@ -273,7 +399,14 @@ mod tests {
let (genesis_block, _mint_keypair) = GenesisBlock::new(2); let (genesis_block, _mint_keypair) = GenesisBlock::new(2);
let bank = Arc::new(Bank::new(&genesis_block)); let bank = Arc::new(Bank::new(&genesis_block));
let prev_hash = bank.last_blockhash(); let prev_hash = bank.last_blockhash();
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(0, prev_hash); let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
0,
prev_hash,
0,
Some(4),
bank.ticks_per_slot(),
&Pubkey::default(),
);
let working_bank = WorkingBank { let working_bank = WorkingBank {
bank, bank,
@ -291,7 +424,14 @@ mod tests {
let (genesis_block, _mint_keypair) = GenesisBlock::new(2); let (genesis_block, _mint_keypair) = GenesisBlock::new(2);
let bank = Arc::new(Bank::new(&genesis_block)); let bank = Arc::new(Bank::new(&genesis_block));
let prev_hash = bank.last_blockhash(); let prev_hash = bank.last_blockhash();
let (mut poh_recorder, entry_receiver) = PohRecorder::new(0, prev_hash); let (mut poh_recorder, entry_receiver) = PohRecorder::new(
0,
prev_hash,
0,
Some(4),
bank.ticks_per_slot(),
&Pubkey::default(),
);
let working_bank = WorkingBank { let working_bank = WorkingBank {
bank: bank.clone(), bank: bank.clone(),
@ -321,7 +461,14 @@ mod tests {
let (genesis_block, _mint_keypair) = GenesisBlock::new(2); let (genesis_block, _mint_keypair) = GenesisBlock::new(2);
let bank = Arc::new(Bank::new(&genesis_block)); let bank = Arc::new(Bank::new(&genesis_block));
let prev_hash = bank.last_blockhash(); let prev_hash = bank.last_blockhash();
let (mut poh_recorder, entry_receiver) = PohRecorder::new(0, prev_hash); let (mut poh_recorder, entry_receiver) = PohRecorder::new(
0,
prev_hash,
0,
Some(4),
bank.ticks_per_slot(),
&Pubkey::default(),
);
poh_recorder.tick(); poh_recorder.tick();
poh_recorder.tick(); poh_recorder.tick();
@ -349,7 +496,14 @@ mod tests {
let (genesis_block, _mint_keypair) = GenesisBlock::new(2); let (genesis_block, _mint_keypair) = GenesisBlock::new(2);
let bank = Arc::new(Bank::new(&genesis_block)); let bank = Arc::new(Bank::new(&genesis_block));
let prev_hash = bank.last_blockhash(); let prev_hash = bank.last_blockhash();
let (mut poh_recorder, entry_receiver) = PohRecorder::new(0, prev_hash); let (mut poh_recorder, entry_receiver) = PohRecorder::new(
0,
prev_hash,
0,
Some(4),
bank.ticks_per_slot(),
&Pubkey::default(),
);
let working_bank = WorkingBank { let working_bank = WorkingBank {
bank, bank,
@ -369,7 +523,14 @@ mod tests {
let (genesis_block, _mint_keypair) = GenesisBlock::new(2); let (genesis_block, _mint_keypair) = GenesisBlock::new(2);
let bank = Arc::new(Bank::new(&genesis_block)); let bank = Arc::new(Bank::new(&genesis_block));
let prev_hash = bank.last_blockhash(); let prev_hash = bank.last_blockhash();
let (mut poh_recorder, entry_receiver) = PohRecorder::new(0, prev_hash); let (mut poh_recorder, entry_receiver) = PohRecorder::new(
0,
prev_hash,
0,
Some(4),
bank.ticks_per_slot(),
&Pubkey::default(),
);
let working_bank = WorkingBank { let working_bank = WorkingBank {
bank, bank,
@ -398,7 +559,14 @@ mod tests {
let (genesis_block, _mint_keypair) = GenesisBlock::new(2); let (genesis_block, _mint_keypair) = GenesisBlock::new(2);
let bank = Arc::new(Bank::new(&genesis_block)); let bank = Arc::new(Bank::new(&genesis_block));
let prev_hash = bank.last_blockhash(); let prev_hash = bank.last_blockhash();
let (mut poh_recorder, entry_receiver) = PohRecorder::new(0, prev_hash); let (mut poh_recorder, entry_receiver) = PohRecorder::new(
0,
prev_hash,
0,
Some(4),
bank.ticks_per_slot(),
&Pubkey::default(),
);
let working_bank = WorkingBank { let working_bank = WorkingBank {
bank, bank,
@ -424,7 +592,14 @@ mod tests {
let (genesis_block, _mint_keypair) = GenesisBlock::new(2); let (genesis_block, _mint_keypair) = GenesisBlock::new(2);
let bank = Arc::new(Bank::new(&genesis_block)); let bank = Arc::new(Bank::new(&genesis_block));
let prev_hash = bank.last_blockhash(); let prev_hash = bank.last_blockhash();
let (mut poh_recorder, entry_receiver) = PohRecorder::new(0, prev_hash); let (mut poh_recorder, entry_receiver) = PohRecorder::new(
0,
prev_hash,
0,
Some(4),
bank.ticks_per_slot(),
&Pubkey::default(),
);
let working_bank = WorkingBank { let working_bank = WorkingBank {
bank, bank,
@ -443,55 +618,66 @@ mod tests {
#[test] #[test]
fn test_reset_current() { fn test_reset_current() {
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(0, Hash::default()); let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
0,
Hash::default(),
0,
Some(4),
DEFAULT_TICKS_PER_SLOT,
&Pubkey::default(),
);
poh_recorder.tick(); poh_recorder.tick();
poh_recorder.tick(); poh_recorder.tick();
assert_eq!(poh_recorder.tick_cache.len(), 2); assert_eq!(poh_recorder.tick_cache.len(), 2);
poh_recorder.reset(poh_recorder.poh.tick_height, poh_recorder.poh.hash); poh_recorder.reset(
assert_eq!(poh_recorder.tick_cache.len(), 2); poh_recorder.poh.tick_height,
poh_recorder.poh.hash,
0,
Some(4),
DEFAULT_TICKS_PER_SLOT,
);
assert_eq!(poh_recorder.tick_cache.len(), 0);
} }
#[test] #[test]
fn test_reset_with_cached() { fn test_reset_with_cached() {
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(0, Hash::default()); let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
0,
Hash::default(),
0,
Some(4),
DEFAULT_TICKS_PER_SLOT,
&Pubkey::default(),
);
poh_recorder.tick(); poh_recorder.tick();
poh_recorder.tick(); poh_recorder.tick();
assert_eq!(poh_recorder.tick_cache.len(), 2); assert_eq!(poh_recorder.tick_cache.len(), 2);
poh_recorder.reset( poh_recorder.reset(
poh_recorder.tick_cache[0].1, poh_recorder.tick_cache[0].1,
poh_recorder.tick_cache[0].0.hash, poh_recorder.tick_cache[0].0.hash,
0,
Some(4),
DEFAULT_TICKS_PER_SLOT,
); );
assert_eq!(poh_recorder.tick_cache.len(), 2); assert_eq!(poh_recorder.tick_cache.len(), 0);
poh_recorder.reset(
poh_recorder.tick_cache[1].1,
poh_recorder.tick_cache[1].0.hash,
);
assert_eq!(poh_recorder.tick_cache.len(), 2);
}
#[test]
#[should_panic]
fn test_reset_with_cached_bad_height() {
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(0, Hash::default());
poh_recorder.tick();
poh_recorder.tick();
assert_eq!(poh_recorder.tick_cache.len(), 2);
//mixed up heights
poh_recorder.reset(
poh_recorder.tick_cache[0].1,
poh_recorder.tick_cache[1].0.hash,
);
} }
#[test] #[test]
fn test_reset_to_new_value() { fn test_reset_to_new_value() {
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(0, Hash::default()); let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
0,
Hash::default(),
0,
Some(4),
DEFAULT_TICKS_PER_SLOT,
&Pubkey::default(),
);
poh_recorder.tick(); poh_recorder.tick();
poh_recorder.tick(); poh_recorder.tick();
poh_recorder.tick(); poh_recorder.tick();
assert_eq!(poh_recorder.tick_cache.len(), 3); assert_eq!(poh_recorder.tick_cache.len(), 3);
assert_eq!(poh_recorder.poh.tick_height, 3); assert_eq!(poh_recorder.poh.tick_height, 3);
poh_recorder.reset(1, hash(b"hello")); poh_recorder.reset(1, hash(b"hello"), 0, Some(4), DEFAULT_TICKS_PER_SLOT);
assert_eq!(poh_recorder.tick_cache.len(), 0); assert_eq!(poh_recorder.tick_cache.len(), 0);
poh_recorder.tick(); poh_recorder.tick();
assert_eq!(poh_recorder.poh.tick_height, 2); assert_eq!(poh_recorder.poh.tick_height, 2);
@ -501,14 +687,22 @@ mod tests {
fn test_reset_clear_bank() { fn test_reset_clear_bank() {
let (genesis_block, _mint_keypair) = GenesisBlock::new(2); let (genesis_block, _mint_keypair) = GenesisBlock::new(2);
let bank = Arc::new(Bank::new(&genesis_block)); let bank = Arc::new(Bank::new(&genesis_block));
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(0, Hash::default()); let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
0,
Hash::default(),
0,
Some(4),
bank.ticks_per_slot(),
&Pubkey::default(),
);
let ticks_per_slot = bank.ticks_per_slot();
let working_bank = WorkingBank { let working_bank = WorkingBank {
bank, bank,
min_tick_height: 2, min_tick_height: 2,
max_tick_height: 3, max_tick_height: 3,
}; };
poh_recorder.set_working_bank(working_bank); poh_recorder.set_working_bank(working_bank);
poh_recorder.reset(1, hash(b"hello")); poh_recorder.reset(1, hash(b"hello"), 0, Some(4), ticks_per_slot);
assert!(poh_recorder.working_bank.is_none()); assert!(poh_recorder.working_bank.is_none());
} }
@ -516,11 +710,217 @@ mod tests {
pub fn test_clear_signal() { pub fn test_clear_signal() {
let (genesis_block, _mint_keypair) = GenesisBlock::new(2); let (genesis_block, _mint_keypair) = GenesisBlock::new(2);
let bank = Arc::new(Bank::new(&genesis_block)); let bank = Arc::new(Bank::new(&genesis_block));
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(0, Hash::default()); let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
0,
Hash::default(),
0,
None,
bank.ticks_per_slot(),
&Pubkey::default(),
);
let (sender, receiver) = sync_channel(1); let (sender, receiver) = sync_channel(1);
poh_recorder.set_bank(&bank); poh_recorder.set_bank(&bank);
poh_recorder.clear_bank_signal = Some(sender); poh_recorder.clear_bank_signal = Some(sender);
poh_recorder.clear_bank(); poh_recorder.clear_bank();
assert!(receiver.try_recv().is_ok()); assert!(receiver.try_recv().is_ok());
} }
#[test]
fn test_poh_recorder_reset_start_slot() {
let ticks_per_slot = 5;
let (mut genesis_block, _mint_keypair) = GenesisBlock::new(2);
genesis_block.ticks_per_slot = ticks_per_slot;
let bank = Arc::new(Bank::new(&genesis_block));
let prev_hash = bank.last_blockhash();
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
0,
prev_hash,
0,
Some(4),
bank.ticks_per_slot(),
&Pubkey::default(),
);
let end_slot = 3;
let max_tick_height = (end_slot + 1) * ticks_per_slot - 1;
let working_bank = WorkingBank {
bank,
min_tick_height: 1,
max_tick_height,
};
poh_recorder.set_working_bank(working_bank);
for _ in 0..max_tick_height {
poh_recorder.tick();
}
let tx = test_tx();
let h1 = hash(b"hello world!");
assert!(poh_recorder.record(h1, vec![tx.clone()]).is_err());
assert!(poh_recorder.working_bank.is_none());
// Make sure the starting slot is updated
assert_eq!(poh_recorder.start_slot(), end_slot);
}
#[test]
fn test_reached_leader_tick() {
let (genesis_block, _mint_keypair) = GenesisBlock::new(2);
let bank = Arc::new(Bank::new(&genesis_block));
let prev_hash = bank.last_blockhash();
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
0,
prev_hash,
0,
None,
bank.ticks_per_slot(),
&Pubkey::default(),
);
// Test that with no leader slot, we don't reach the leader tick
assert_eq!(poh_recorder.reached_leader_tick().0, false);
for _ in 0..bank.ticks_per_slot() {
poh_recorder.tick();
}
// Tick should not be recorded
assert_eq!(poh_recorder.tick_height(), 0);
// Test that with no leader slot, we don't reach the leader tick after sending some ticks
assert_eq!(poh_recorder.reached_leader_tick().0, false);
poh_recorder.reset(
poh_recorder.tick_height(),
bank.last_blockhash(),
0,
None,
bank.ticks_per_slot(),
);
// Test that with no leader slot in reset(), we don't reach the leader tick
assert_eq!(poh_recorder.reached_leader_tick().0, false);
// Provide a leader slot 1 slot down
poh_recorder.reset(
bank.ticks_per_slot(),
bank.last_blockhash(),
0,
Some(2),
bank.ticks_per_slot(),
);
let init_ticks = poh_recorder.tick_height();
// Send one slot worth of ticks
for _ in 0..bank.ticks_per_slot() {
poh_recorder.tick();
}
// Tick should be recorded
assert_eq!(
poh_recorder.tick_height(),
init_ticks + bank.ticks_per_slot()
);
// Test that we don't reach the leader tick because of grace ticks
assert_eq!(poh_recorder.reached_leader_tick().0, false);
// reset poh now. it should discard the grace ticks wait
poh_recorder.reset(
poh_recorder.tick_height(),
bank.last_blockhash(),
1,
Some(2),
bank.ticks_per_slot(),
);
// without sending more ticks, we should be leader now
assert_eq!(poh_recorder.reached_leader_tick().0, true);
assert_eq!(poh_recorder.reached_leader_tick().1, 0);
// Now test that with grace ticks we can reach leader ticks
// Set the leader slot 1 slot down
poh_recorder.reset(
poh_recorder.tick_height(),
bank.last_blockhash(),
2,
Some(3),
bank.ticks_per_slot(),
);
// Send one slot worth of ticks
for _ in 0..bank.ticks_per_slot() {
poh_recorder.tick();
}
// We are not the leader yet, as expected
assert_eq!(poh_recorder.reached_leader_tick().0, false);
// Send 1 less tick than the grace ticks
for _ in 0..bank.ticks_per_slot() / MAX_LAST_LEADER_GRACE_TICKS_FACTOR - 1 {
poh_recorder.tick();
}
// We are still not the leader
assert_eq!(poh_recorder.reached_leader_tick().0, false);
// Send one more tick
poh_recorder.tick();
// We should be the leader now
assert_eq!(poh_recorder.reached_leader_tick().0, true);
assert_eq!(
poh_recorder.reached_leader_tick().1,
bank.ticks_per_slot() / MAX_LAST_LEADER_GRACE_TICKS_FACTOR
);
// Let's test that correct grace ticks are reported
// Set the leader slot 1 slot down
poh_recorder.reset(
poh_recorder.tick_height(),
bank.last_blockhash(),
3,
Some(4),
bank.ticks_per_slot(),
);
// Send remaining ticks for the slot (remember we sent extra ticks in the previous part of the test)
for _ in bank.ticks_per_slot() / MAX_LAST_LEADER_GRACE_TICKS_FACTOR..bank.ticks_per_slot() {
poh_recorder.tick();
}
// Send one extra tick before resetting (so that there's one grace tick)
poh_recorder.tick();
// We are not the leader yet, as expected
assert_eq!(poh_recorder.reached_leader_tick().0, false);
poh_recorder.reset(
poh_recorder.tick_height(),
bank.last_blockhash(),
3,
Some(4),
bank.ticks_per_slot(),
);
// without sending more ticks, we should be leader now
assert_eq!(poh_recorder.reached_leader_tick().0, true);
assert_eq!(poh_recorder.reached_leader_tick().1, 1);
// Let's test that if a node overshoots the ticks for its target
// leader slot, reached_leader_tick() will return false
// Set the leader slot 1 slot down
poh_recorder.reset(
poh_recorder.tick_height(),
bank.last_blockhash(),
4,
Some(5),
bank.ticks_per_slot(),
);
// Send remaining ticks for the slot (remember we sent extra ticks in the previous part of the test)
for _ in 0..4 * bank.ticks_per_slot() {
poh_recorder.tick();
}
// We are not the leader, as expected
assert_eq!(poh_recorder.reached_leader_tick().0, false);
}
} }

View File

@ -104,13 +104,21 @@ mod tests {
use solana_runtime::bank::Bank; use solana_runtime::bank::Bank;
use solana_sdk::genesis_block::GenesisBlock; use solana_sdk::genesis_block::GenesisBlock;
use solana_sdk::hash::hash; use solana_sdk::hash::hash;
use solana_sdk::pubkey::Pubkey;
#[test] #[test]
fn test_poh_service() { fn test_poh_service() {
let (genesis_block, _mint_keypair) = GenesisBlock::new(2); let (genesis_block, _mint_keypair) = GenesisBlock::new(2);
let bank = Arc::new(Bank::new(&genesis_block)); let bank = Arc::new(Bank::new(&genesis_block));
let prev_hash = bank.last_blockhash(); let prev_hash = bank.last_blockhash();
let (poh_recorder, entry_receiver) = PohRecorder::new(bank.tick_height(), prev_hash); let (poh_recorder, entry_receiver) = PohRecorder::new(
bank.tick_height(),
prev_hash,
bank.slot(),
Some(4),
bank.ticks_per_slot(),
&Pubkey::default(),
);
let poh_recorder = Arc::new(Mutex::new(poh_recorder)); let poh_recorder = Arc::new(Mutex::new(poh_recorder));
let exit = Arc::new(AtomicBool::new(false)); let exit = Arc::new(AtomicBool::new(false));
let working_bank = WorkingBank { let working_bank = WorkingBank {

View File

@ -6,21 +6,23 @@ use crate::blocktree_processor;
use crate::cluster_info::ClusterInfo; use crate::cluster_info::ClusterInfo;
use crate::entry::{Entry, EntryReceiver, EntrySender, EntrySlice}; use crate::entry::{Entry, EntryReceiver, EntrySender, EntrySlice};
use crate::leader_schedule_utils; use crate::leader_schedule_utils;
use crate::locktower::{Locktower, StakeLockout};
use crate::packet::BlobError; use crate::packet::BlobError;
use crate::poh_recorder::PohRecorder; use crate::poh_recorder::PohRecorder;
use crate::result; use crate::result;
use crate::rpc_subscriptions::RpcSubscriptions; use crate::rpc_subscriptions::RpcSubscriptions;
use crate::service::Service; use crate::service::Service;
use hashbrown::HashMap;
use solana_metrics::counter::Counter; use solana_metrics::counter::Counter;
use solana_metrics::influxdb;
use solana_runtime::bank::Bank; use solana_runtime::bank::Bank;
use solana_sdk::hash::Hash; use solana_sdk::hash::Hash;
use solana_sdk::pubkey::Pubkey; use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::KeypairUtil; use solana_sdk::signature::KeypairUtil;
use solana_sdk::timing::duration_as_ms; use solana_sdk::timing::{self, duration_as_ms};
use solana_vote_api::vote_transaction::VoteTransaction; use solana_vote_api::vote_transaction::VoteTransaction;
use std::collections::HashMap;
use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::{channel, Receiver, RecvTimeoutError}; use std::sync::mpsc::{channel, Receiver, RecvTimeoutError, Sender};
use std::sync::{Arc, Mutex, RwLock}; use std::sync::{Arc, Mutex, RwLock};
use std::thread::{self, Builder, JoinHandle}; use std::thread::{self, Builder, JoinHandle};
use std::time::Duration; use std::time::Duration;
@ -50,6 +52,22 @@ pub struct ReplayStage {
t_replay: JoinHandle<result::Result<()>>, t_replay: JoinHandle<result::Result<()>>,
} }
#[derive(Default)]
struct ForkProgress {
last_entry: Hash,
num_blobs: usize,
started_ms: u64,
}
impl ForkProgress {
pub fn new(last_entry: Hash) -> Self {
Self {
last_entry,
num_blobs: 0,
started_ms: timing::timestamp(),
}
}
}
impl ReplayStage { impl ReplayStage {
#[allow(clippy::new_ret_no_self, clippy::too_many_arguments)] #[allow(clippy::new_ret_no_self, clippy::too_many_arguments)]
pub fn new<T>( pub fn new<T>(
@ -76,6 +94,8 @@ impl ReplayStage {
let poh_recorder = poh_recorder.clone(); let poh_recorder = poh_recorder.clone();
let my_id = *my_id; let my_id = *my_id;
let vote_account = *vote_account; let vote_account = *vote_account;
let mut ticks_per_slot = 0;
let mut locktower = Locktower::new_from_forks(&bank_forks.read().unwrap(), &my_id);
// Start the replay stage loop // Start the replay stage loop
let t_replay = Builder::new() let t_replay = Builder::new()
@ -92,10 +112,10 @@ impl ReplayStage {
Self::generate_new_bank_forks(&blocktree, &mut bank_forks.write().unwrap()); Self::generate_new_bank_forks(&blocktree, &mut bank_forks.write().unwrap());
let active_banks = bank_forks.read().unwrap().active_banks(); let active_banks = bank_forks.read().unwrap().active_banks();
trace!("active banks {:?}", active_banks); trace!("active banks {:?}", active_banks);
let mut votable: Vec<u64> = vec![];
let mut is_tpu_bank_active = poh_recorder.lock().unwrap().bank().is_some(); let mut is_tpu_bank_active = poh_recorder.lock().unwrap().bank().is_some();
for bank_slot in &active_banks { for bank_slot in &active_banks {
let bank = bank_forks.read().unwrap().get(*bank_slot).unwrap().clone(); let bank = bank_forks.read().unwrap().get(*bank_slot).unwrap().clone();
ticks_per_slot = bank.ticks_per_slot();
if bank.collector_id() != my_id { if bank.collector_id() != my_id {
Self::replay_blocktree_into_bank( Self::replay_blocktree_into_bank(
&bank, &bank,
@ -106,51 +126,80 @@ impl ReplayStage {
} }
let max_tick_height = (*bank_slot + 1) * bank.ticks_per_slot() - 1; let max_tick_height = (*bank_slot + 1) * bank.ticks_per_slot() - 1;
if bank.tick_height() == max_tick_height { if bank.tick_height() == max_tick_height {
bank.freeze(); Self::process_completed_bank(&my_id, bank, &slot_full_sender);
info!("bank frozen {}", bank.slot());
votable.push(*bank_slot);
progress.remove(bank_slot);
if let Err(e) =
slot_full_sender.send((bank.slot(), bank.collector_id()))
{
info!("{} slot_full alert failed: {:?}", my_id, e);
} }
} }
}
// TODO: fork selection
// vote on the latest one for now
votable.sort();
if let Some(latest_slot_vote) = votable.last() { if ticks_per_slot == 0 {
let parent = bank_forks let frozen_banks = bank_forks.read().unwrap().frozen_banks();
.read() let bank = frozen_banks.values().next().unwrap();
.unwrap() ticks_per_slot = bank.ticks_per_slot();
.get(*latest_slot_vote) }
.unwrap()
.clone();
subscriptions.notify_subscribers(&parent); let votable =
Self::generate_votable_banks(&bank_forks, &locktower, &mut progress);
if let Some((_, bank)) = votable.last() {
subscriptions.notify_subscribers(&bank);
if let Some(ref voting_keypair) = voting_keypair { if let Some(ref voting_keypair) = voting_keypair {
let keypair = voting_keypair.as_ref(); let keypair = voting_keypair.as_ref();
let vote = VoteTransaction::new_vote( let vote = VoteTransaction::new_vote(
&vote_account, &vote_account,
keypair, keypair,
*latest_slot_vote, bank.slot(),
parent.last_blockhash(), bank.last_blockhash(),
0, 0,
); );
if let Some(new_root) = locktower.record_vote(bank.slot()) {
bank_forks.write().unwrap().set_root(new_root);
Self::handle_new_root(&bank_forks, &mut progress);
}
locktower.update_epoch(&bank);
cluster_info.write().unwrap().push_vote(vote); cluster_info.write().unwrap().push_vote(vote);
} }
poh_recorder let next_leader_slot =
.lock() leader_schedule_utils::next_leader_slot(&my_id, bank.slot(), &bank);
.unwrap() poh_recorder.lock().unwrap().reset(
.reset(parent.tick_height(), parent.last_blockhash()); bank.tick_height(),
bank.last_blockhash(),
bank.slot(),
next_leader_slot,
ticks_per_slot,
);
debug!(
"{:?} voted and reset poh at {}. next leader slot {:?}",
my_id,
bank.tick_height(),
next_leader_slot
);
is_tpu_bank_active = false; is_tpu_bank_active = false;
} }
let (reached_leader_tick, grace_ticks) = if !is_tpu_bank_active {
let poh = poh_recorder.lock().unwrap();
poh.reached_leader_tick()
} else {
(false, 0)
};
if !is_tpu_bank_active { if !is_tpu_bank_active {
Self::start_leader(&my_id, &bank_forks, &poh_recorder, &cluster_info); assert!(ticks_per_slot > 0);
let poh_tick_height = poh_recorder.lock().unwrap().tick_height();
let poh_slot = leader_schedule_utils::tick_height_to_slot(
ticks_per_slot,
poh_tick_height + 1,
);
Self::start_leader(
&my_id,
&bank_forks,
&poh_recorder,
&cluster_info,
&blocktree,
poh_slot,
reached_leader_tick,
grace_ticks,
);
} }
inc_new_counter_info!( inc_new_counter_info!(
@ -179,21 +228,41 @@ impl ReplayStage {
bank_forks: &Arc<RwLock<BankForks>>, bank_forks: &Arc<RwLock<BankForks>>,
poh_recorder: &Arc<Mutex<PohRecorder>>, poh_recorder: &Arc<Mutex<PohRecorder>>,
cluster_info: &Arc<RwLock<ClusterInfo>>, cluster_info: &Arc<RwLock<ClusterInfo>>,
blocktree: &Blocktree,
poh_slot: u64,
reached_leader_tick: bool,
grace_ticks: u64,
) { ) {
let frozen = bank_forks.read().unwrap().frozen_banks(); trace!("{} checking poh slot {}", my_id, poh_slot);
if blocktree.meta(poh_slot).unwrap().is_some() {
// We've already broadcasted entries for this slot, skip it
// Since we are skipping our leader slot, let's tell poh recorder when we should be
// leader again
if reached_leader_tick {
let _ = bank_forks.read().unwrap().get(poh_slot).map(|bank| {
let next_leader_slot =
leader_schedule_utils::next_leader_slot(&my_id, bank.slot(), &bank);
let mut poh = poh_recorder.lock().unwrap();
let start_slot = poh.start_slot();
poh.reset(
bank.tick_height(),
bank.last_blockhash(),
start_slot,
next_leader_slot,
bank.ticks_per_slot(),
);
});
}
// TODO: fork selection
let mut newest_frozen: Vec<(&u64, &Arc<Bank>)> = frozen.iter().collect();
newest_frozen.sort_by_key(|x| *x.0);
if let Some((_, parent)) = newest_frozen.last() {
let poh_tick_height = poh_recorder.lock().unwrap().tick_height();
let poh_slot = leader_schedule_utils::tick_height_to_slot(parent, poh_tick_height + 1);
trace!("checking poh slot for leader {}", poh_slot);
if frozen.get(&poh_slot).is_some() {
// Already been a leader for this slot, skip it
return; return;
} }
if bank_forks.read().unwrap().get(poh_slot).is_none() { if bank_forks.read().unwrap().get(poh_slot).is_none() {
let frozen = bank_forks.read().unwrap().frozen_banks();
let parent_slot = poh_recorder.lock().unwrap().start_slot();
assert!(frozen.contains_key(&parent_slot));
let parent = &frozen[&parent_slot];
leader_schedule_utils::slot_leader_at(poh_slot, parent) leader_schedule_utils::slot_leader_at(poh_slot, parent)
.map(|next_leader| { .map(|next_leader| {
debug!( debug!(
@ -201,10 +270,21 @@ impl ReplayStage {
my_id, next_leader, poh_slot my_id, next_leader, poh_slot
); );
cluster_info.write().unwrap().set_leader(&next_leader); cluster_info.write().unwrap().set_leader(&next_leader);
if next_leader == *my_id { if next_leader == *my_id && reached_leader_tick {
debug!("starting tpu for slot {}", poh_slot); debug!("{} starting tpu for slot {}", my_id, poh_slot);
solana_metrics::submit(
influxdb::Point::new("counter-replay_stage-new_leader")
.add_field(
"count",
influxdb::Value::Integer(poh_slot as i64),
)
.add_field(
"grace",
influxdb::Value::Integer(grace_ticks as i64),
)
.to_owned(),);
let tpu_bank = Bank::new_from_parent(parent, my_id, poh_slot); let tpu_bank = Bank::new_from_parent(parent, my_id, poh_slot);
bank_forks.write().unwrap().insert(poh_slot, tpu_bank); bank_forks.write().unwrap().insert(tpu_bank);
if let Some(tpu_bank) = bank_forks.read().unwrap().get(poh_slot).cloned() { if let Some(tpu_bank) = bank_forks.read().unwrap().get(poh_slot).cloned() {
assert_eq!( assert_eq!(
bank_forks.read().unwrap().working_bank().slot(), bank_forks.read().unwrap().working_bank().slot(),
@ -221,18 +301,15 @@ impl ReplayStage {
} }
}) })
.or_else(|| { .or_else(|| {
error!("No next leader found"); error!("{} No next leader found", my_id);
None None
}); });
} }
} else {
error!("No frozen banks available!");
} }
} fn replay_blocktree_into_bank(
pub fn replay_blocktree_into_bank(
bank: &Bank, bank: &Bank,
blocktree: &Blocktree, blocktree: &Blocktree,
progress: &mut HashMap<u64, (Hash, usize)>, progress: &mut HashMap<u64, ForkProgress>,
forward_entry_sender: &EntrySender, forward_entry_sender: &EntrySender,
) -> result::Result<()> { ) -> result::Result<()> {
let (entries, num) = Self::load_blocktree_entries(bank, blocktree, progress)?; let (entries, num) = Self::load_blocktree_entries(bank, blocktree, progress)?;
@ -250,32 +327,128 @@ impl ReplayStage {
Ok(()) Ok(())
} }
pub fn load_blocktree_entries( fn generate_votable_banks(
bank_forks: &Arc<RwLock<BankForks>>,
locktower: &Locktower,
progress: &mut HashMap<u64, ForkProgress>,
) -> Vec<(u128, Arc<Bank>)> {
let locktower_start = Instant::now();
// Locktower voting
let descendants = bank_forks.read().unwrap().descendants();
let ancestors = bank_forks.read().unwrap().ancestors();
let frozen_banks = bank_forks.read().unwrap().frozen_banks();
trace!("frozen_banks {}", frozen_banks.len());
let mut votable: Vec<(u128, Arc<Bank>)> = frozen_banks
.values()
.filter(|b| {
let is_votable = b.is_votable();
trace!("bank is votable: {} {}", b.slot(), is_votable);
is_votable
})
.filter(|b| {
let is_recent_epoch = locktower.is_recent_epoch(b);
trace!("bank is is_recent_epoch: {} {}", b.slot(), is_recent_epoch);
is_recent_epoch
})
.filter(|b| {
let has_voted = locktower.has_voted(b.slot());
trace!("bank is has_voted: {} {}", b.slot(), has_voted);
!has_voted
})
.filter(|b| {
let is_locked_out = locktower.is_locked_out(b.slot(), &descendants);
trace!("bank is is_locked_out: {} {}", b.slot(), is_locked_out);
!is_locked_out
})
.map(|bank| {
(
bank,
locktower.collect_vote_lockouts(bank.slot(), bank.vote_accounts(), &ancestors),
)
})
.filter(|(b, stake_lockouts)| {
let vote_threshold =
locktower.check_vote_stake_threshold(b.slot(), &stake_lockouts);
Self::confirm_forks(locktower, stake_lockouts, progress);
debug!("bank vote_threshold: {} {}", b.slot(), vote_threshold);
vote_threshold
})
.map(|(b, stake_lockouts)| (locktower.calculate_weight(&stake_lockouts), b.clone()))
.collect();
votable.sort_by_key(|b| b.0);
let ms = timing::duration_as_ms(&locktower_start.elapsed());
trace!("votable_banks {}", votable.len());
if !votable.is_empty() {
let weights: Vec<u128> = votable.iter().map(|x| x.0).collect();
info!(
"@{:?} locktower duration: {:?} len: {} weights: {:?}",
timing::timestamp(),
ms,
votable.len(),
weights
);
}
inc_new_counter_info!("replay_stage-locktower_duration", ms as usize);
votable
}
fn confirm_forks(
locktower: &Locktower,
stake_lockouts: &HashMap<u64, StakeLockout>,
progress: &mut HashMap<u64, ForkProgress>,
) {
progress.retain(|slot, prog| {
let duration = timing::timestamp() - prog.started_ms;
if locktower.is_slot_confirmed(*slot, stake_lockouts) {
info!("validator fork confirmed {} {}", *slot, duration);
solana_metrics::submit(
influxdb::Point::new(&"validator-confirmation")
.add_field("duration_ms", influxdb::Value::Integer(duration as i64))
.to_owned(),
);
false
} else {
debug!(
"validator fork not confirmed {} {} {:?}",
*slot,
duration,
stake_lockouts.get(slot)
);
true
}
});
}
fn load_blocktree_entries(
bank: &Bank, bank: &Bank,
blocktree: &Blocktree, blocktree: &Blocktree,
progress: &mut HashMap<u64, (Hash, usize)>, progress: &mut HashMap<u64, ForkProgress>,
) -> result::Result<(Vec<Entry>, usize)> { ) -> result::Result<(Vec<Entry>, usize)> {
let bank_slot = bank.slot(); let bank_slot = bank.slot();
let bank_progress = &mut progress let bank_progress = &mut progress
.entry(bank_slot) .entry(bank_slot)
.or_insert((bank.last_blockhash(), 0)); .or_insert(ForkProgress::new(bank.last_blockhash()));
blocktree.get_slot_entries_with_blob_count(bank_slot, bank_progress.1 as u64, None) blocktree.get_slot_entries_with_blob_count(bank_slot, bank_progress.num_blobs as u64, None)
} }
pub fn replay_entries_into_bank( fn replay_entries_into_bank(
bank: &Bank, bank: &Bank,
entries: Vec<Entry>, entries: Vec<Entry>,
progress: &mut HashMap<u64, (Hash, usize)>, progress: &mut HashMap<u64, ForkProgress>,
forward_entry_sender: &EntrySender, forward_entry_sender: &EntrySender,
num: usize, num: usize,
) -> result::Result<()> { ) -> result::Result<()> {
let bank_progress = &mut progress let bank_progress = &mut progress
.entry(bank.slot()) .entry(bank.slot())
.or_insert((bank.last_blockhash(), 0)); .or_insert(ForkProgress::new(bank.last_blockhash()));
let result = Self::verify_and_process_entries(&bank, &entries, &bank_progress.0); let result = Self::verify_and_process_entries(&bank, &entries, &bank_progress.last_entry);
bank_progress.1 += num; bank_progress.num_blobs += num;
if let Some(last_entry) = entries.last() { if let Some(last_entry) = entries.last() {
bank_progress.0 = last_entry.hash; bank_progress.last_entry = last_entry.hash;
} }
if result.is_ok() { if result.is_ok() {
forward_entry_sender.send(entries)?; forward_entry_sender.send(entries)?;
@ -303,6 +476,26 @@ impl ReplayStage {
Ok(()) Ok(())
} }
fn handle_new_root(
bank_forks: &Arc<RwLock<BankForks>>,
progress: &mut HashMap<u64, ForkProgress>,
) {
let r_bank_forks = bank_forks.read().unwrap();
progress.retain(|k, _| r_bank_forks.get(*k).is_some());
}
fn process_completed_bank(
my_id: &Pubkey,
bank: Arc<Bank>,
slot_full_sender: &Sender<(u64, Pubkey)>,
) {
bank.freeze();
info!("bank frozen {}", bank.slot());
if let Err(e) = slot_full_sender.send((bank.slot(), bank.collector_id())) {
trace!("{} slot_full alert failed: {:?}", my_id, e);
}
}
fn generate_new_bank_forks(blocktree: &Blocktree, forks: &mut BankForks) { fn generate_new_bank_forks(blocktree: &Blocktree, forks: &mut BankForks) {
// Find the next slot that chains to the old slot // Find the next slot that chains to the old slot
let frozen_banks = forks.frozen_banks(); let frozen_banks = forks.frozen_banks();
@ -311,6 +504,7 @@ impl ReplayStage {
let next_slots = blocktree let next_slots = blocktree
.get_slots_since(&frozen_bank_slots) .get_slots_since(&frozen_bank_slots)
.expect("Db error"); .expect("Db error");
// Filter out what we've already seen
trace!("generate new forks {:?}", next_slots); trace!("generate new forks {:?}", next_slots);
for (parent_id, children) in next_slots { for (parent_id, children) in next_slots {
let parent_bank = frozen_banks let parent_bank = frozen_banks
@ -318,20 +512,13 @@ impl ReplayStage {
.expect("missing parent in bank forks") .expect("missing parent in bank forks")
.clone(); .clone();
for child_id in children { for child_id in children {
if frozen_banks.get(&child_id).is_some() {
trace!("child already frozen {}", child_id);
continue;
}
if forks.get(child_id).is_some() { if forks.get(child_id).is_some() {
trace!("child already active {}", child_id); trace!("child already active or frozen {}", child_id);
continue; continue;
} }
let leader = leader_schedule_utils::slot_leader_at(child_id, &parent_bank).unwrap(); let leader = leader_schedule_utils::slot_leader_at(child_id, &parent_bank).unwrap();
info!("new fork:{} parent:{}", child_id, parent_id); info!("new fork:{} parent:{}", child_id, parent_id);
forks.insert( forks.insert(Bank::new_from_parent(&parent_bank, &leader, child_id));
child_id,
Bank::new_from_parent(&parent_bank, &leader, child_id),
);
} }
} }
} }
@ -349,11 +536,12 @@ impl Service for ReplayStage {
mod test { mod test {
use super::*; use super::*;
use crate::banking_stage::create_test_recorder; use crate::banking_stage::create_test_recorder;
use crate::blocktree::create_new_tmp_ledger; use crate::blocktree::{create_new_tmp_ledger, get_tmp_ledger_path};
use crate::cluster_info::{ClusterInfo, Node}; use crate::cluster_info::{ClusterInfo, Node};
use crate::entry::create_ticks; use crate::entry::create_ticks;
use crate::entry::{next_entry_mut, Entry}; use crate::entry::{next_entry_mut, Entry};
use crate::fullnode::new_banks_from_blocktree; use crate::fullnode::new_banks_from_blocktree;
use crate::packet::Blob;
use crate::replay_stage::ReplayStage; use crate::replay_stage::ReplayStage;
use crate::result::Error; use crate::result::Error;
use solana_sdk::genesis_block::GenesisBlock; use solana_sdk::genesis_block::GenesisBlock;
@ -486,4 +674,50 @@ mod test {
} }
assert!(forward_entry_receiver.try_recv().is_err()); assert!(forward_entry_receiver.try_recv().is_err());
} }
#[test]
fn test_child_slots_of_same_parent() {
let ledger_path = get_tmp_ledger_path!();
{
let blocktree = Arc::new(
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"),
);
let genesis_block = GenesisBlock::new(10_000).0;
let bank0 = Bank::new(&genesis_block);
let mut bank_forks = BankForks::new(0, bank0);
bank_forks.working_bank().freeze();
// Insert blob for slot 1, generate new forks, check result
let mut blob_slot_1 = Blob::default();
blob_slot_1.set_slot(1);
blob_slot_1.set_parent(0);
blocktree.insert_data_blobs(&vec![blob_slot_1]).unwrap();
assert!(bank_forks.get(1).is_none());
ReplayStage::generate_new_bank_forks(&blocktree, &mut bank_forks);
assert!(bank_forks.get(1).is_some());
// Insert blob for slot 3, generate new forks, check result
let mut blob_slot_2 = Blob::default();
blob_slot_2.set_slot(2);
blob_slot_2.set_parent(0);
blocktree.insert_data_blobs(&vec![blob_slot_2]).unwrap();
assert!(bank_forks.get(2).is_none());
ReplayStage::generate_new_bank_forks(&blocktree, &mut bank_forks);
assert!(bank_forks.get(1).is_some());
assert!(bank_forks.get(2).is_some());
}
let _ignored = remove_dir_all(&ledger_path);
}
#[test]
fn test_handle_new_root() {
let bank0 = Bank::default();
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank0)));
let mut progress = HashMap::new();
progress.insert(5, ForkProgress::new(Hash::default()));
ReplayStage::handle_new_root(&bank_forks, &mut progress);
assert!(progress.is_empty());
}
} }

View File

@ -3,19 +3,19 @@ use crate::blocktree::Blocktree;
use crate::blocktree_processor; use crate::blocktree_processor;
#[cfg(feature = "chacha")] #[cfg(feature = "chacha")]
use crate::chacha::{chacha_cbc_encrypt_ledger, CHACHA_BLOCK_SIZE}; use crate::chacha::{chacha_cbc_encrypt_ledger, CHACHA_BLOCK_SIZE};
use crate::client::mk_client; use crate::cluster_info::{ClusterInfo, Node, FULLNODE_PORT_RANGE};
use crate::cluster_info::{ClusterInfo, Node};
use crate::contact_info::ContactInfo; use crate::contact_info::ContactInfo;
use crate::gossip_service::GossipService; use crate::gossip_service::GossipService;
use crate::result::Result; use crate::result::Result;
use crate::rpc_request::{RpcClient, RpcRequest, RpcRequestHandler};
use crate::service::Service; use crate::service::Service;
use crate::storage_stage::{get_segment_from_entry, ENTRIES_PER_SEGMENT}; use crate::storage_stage::{get_segment_from_entry, ENTRIES_PER_SEGMENT};
use crate::streamer::BlobReceiver; use crate::streamer::BlobReceiver;
use crate::thin_client::{retry_get_balance, ThinClient};
use crate::window_service::WindowService; use crate::window_service::WindowService;
use rand::thread_rng; use rand::thread_rng;
use rand::Rng; use rand::Rng;
use solana_client::client::create_client;
use solana_client::rpc_request::{RpcClient, RpcRequest, RpcRequestHandler};
use solana_client::thin_client::{retry_get_balance, ThinClient};
use solana_drone::drone::{request_airdrop_transaction, DRONE_PORT}; use solana_drone::drone::{request_airdrop_transaction, DRONE_PORT};
use solana_sdk::genesis_block::GenesisBlock; use solana_sdk::genesis_block::GenesisBlock;
use solana_sdk::hash::{Hash, Hasher}; use solana_sdk::hash::{Hash, Hasher};
@ -205,7 +205,7 @@ impl Replicator {
cluster_info_w.insert_self(contact_info); cluster_info_w.insert_self(contact_info);
} }
let mut client = mk_client(leader_info); let mut client = create_client(leader_info.client_facing_addr(), FULLNODE_PORT_RANGE);
Self::get_airdrop_lamports(&mut client, &keypair, &leader_info); Self::get_airdrop_lamports(&mut client, &keypair, &leader_info);
info!("Done downloading ledger at {}", ledger_path); info!("Done downloading ledger at {}", ledger_path);

View File

@ -513,7 +513,7 @@ mod tests {
"result":{ "result":{
"owner": [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], "owner": [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
"lamports": 20, "lamports": 20,
"userdata": [], "data": [],
"executable": false "executable": false
}, },
"id":1} "id":1}

View File

@ -17,7 +17,7 @@ use std::sync::{atomic, Arc};
pub trait RpcSolPubSub { pub trait RpcSolPubSub {
type Metadata; type Metadata;
// Get notification every time account userdata is changed // Get notification every time account data is changed
// Accepts pubkey parameter as base-58 encoded string // Accepts pubkey parameter as base-58 encoded string
#[pubsub( #[pubsub(
subscription = "accountNotification", subscription = "accountNotification",
@ -34,7 +34,7 @@ pub trait RpcSolPubSub {
)] )]
fn account_unsubscribe(&self, _: Option<Self::Metadata>, _: SubscriptionId) -> Result<bool>; fn account_unsubscribe(&self, _: Option<Self::Metadata>, _: SubscriptionId) -> Result<bool>;
// Get notification every time account userdata owned by a particular program is changed // Get notification every time account data owned by a particular program is changed
// Accepts pubkey parameter as base-58 encoded string // Accepts pubkey parameter as base-58 encoded string
#[pubsub( #[pubsub(
subscription = "programNotification", subscription = "programNotification",
@ -365,10 +365,7 @@ mod tests {
// Test signature confirmation notification #1 // Test signature confirmation notification #1
let string = receiver.poll(); let string = receiver.poll();
let expected_userdata = arc_bank let expected_data = arc_bank.get_account(&contract_state.pubkey()).unwrap().data;
.get_account(&contract_state.pubkey())
.unwrap()
.userdata;
let expected = json!({ let expected = json!({
"jsonrpc": "2.0", "jsonrpc": "2.0",
"method": "accountNotification", "method": "accountNotification",
@ -376,7 +373,7 @@ mod tests {
"result": { "result": {
"owner": budget_program_id, "owner": budget_program_id,
"lamports": 51, "lamports": 51,
"userdata": expected_userdata, "data": expected_data,
"executable": executable, "executable": executable,
}, },
"subscription": 0, "subscription": 0,

View File

@ -236,7 +236,7 @@ mod tests {
subscriptions.check_account(&alice.pubkey(), &account); subscriptions.check_account(&alice.pubkey(), &account);
let string = transport_receiver.poll(); let string = transport_receiver.poll();
if let Async::Ready(Some(response)) = string.unwrap() { if let Async::Ready(Some(response)) = string.unwrap() {
let expected = format!(r#"{{"jsonrpc":"2.0","method":"accountNotification","params":{{"result":{{"executable":false,"lamports":1,"owner":[129,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"userdata":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]}},"subscription":0}}}}"#); let expected = format!(r#"{{"jsonrpc":"2.0","method":"accountNotification","params":{{"result":{{"data":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"executable":false,"lamports":1,"owner":[129,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]}},"subscription":0}}}}"#);
assert_eq!(expected, response); assert_eq!(expected, response);
} }
@ -282,7 +282,7 @@ mod tests {
subscriptions.check_program(&solana_budget_api::id(), &alice.pubkey(), &account); subscriptions.check_program(&solana_budget_api::id(), &alice.pubkey(), &account);
let string = transport_receiver.poll(); let string = transport_receiver.poll();
if let Async::Ready(Some(response)) = string.unwrap() { if let Async::Ready(Some(response)) = string.unwrap() {
let expected = format!(r#"{{"jsonrpc":"2.0","method":"programNotification","params":{{"result":["{:?}",{{"executable":false,"lamports":1,"owner":[129,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"userdata":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]}}],"subscription":0}}}}"#, alice.pubkey()); let expected = format!(r#"{{"jsonrpc":"2.0","method":"programNotification","params":{{"result":["{:?}",{{"data":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"executable":false,"lamports":1,"owner":[129,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]}}],"subscription":0}}}}"#, alice.pubkey());
assert_eq!(expected, response); assert_eq!(expected, response);
} }

View File

@ -395,10 +395,10 @@ mod tests {
} }
#[test] #[test]
fn test_system_transaction_userdata_layout() { fn test_system_transaction_data_layout() {
use crate::packet::PACKET_DATA_SIZE; use crate::packet::PACKET_DATA_SIZE;
let mut tx0 = test_tx(); let mut tx0 = test_tx();
tx0.instructions[0].userdata = vec![1, 2, 3]; tx0.instructions[0].data = vec![1, 2, 3];
let message0a = tx0.message(); let message0a = tx0.message();
let tx_bytes = serialize(&tx0).unwrap(); let tx_bytes = serialize(&tx0).unwrap();
assert!(tx_bytes.len() < PACKET_DATA_SIZE); assert!(tx_bytes.len() < PACKET_DATA_SIZE);
@ -408,9 +408,9 @@ mod tests {
); );
let tx1 = deserialize(&tx_bytes).unwrap(); let tx1 = deserialize(&tx_bytes).unwrap();
assert_eq!(tx0, tx1); assert_eq!(tx0, tx1);
assert_eq!(tx1.instructions[0].userdata, vec![1, 2, 3]); assert_eq!(tx1.instructions[0].data, vec![1, 2, 3]);
tx0.instructions[0].userdata = vec![1, 2, 4]; tx0.instructions[0].data = vec![1, 2, 4];
let message0b = tx0.message(); let message0b = tx0.message();
assert_ne!(message0a, message0b); assert_ne!(message0a, message0b);
} }

View File

@ -58,17 +58,26 @@ fn node_staked_accounts(bank: &Bank) -> impl Iterator<Item = (Pubkey, u64, Accou
}) })
} }
fn node_staked_accounts_at_epoch( pub fn node_staked_accounts_at_epoch(
bank: &Bank, bank: &Bank,
epoch_height: u64, epoch_height: u64,
) -> Option<impl Iterator<Item = (&Pubkey, u64, &Account)>> { ) -> Option<impl Iterator<Item = (&Pubkey, u64, &Account)>> {
bank.epoch_vote_accounts(epoch_height).map(|epoch_state| { bank.epoch_vote_accounts(epoch_height).map(|epoch_state| {
epoch_state.into_iter().filter_map(|(account_id, account)| { epoch_state
.into_iter()
.filter_map(|(account_id, account)| {
filter_zero_balances(account).map(|stake| (account_id, stake, account)) filter_zero_balances(account).map(|stake| (account_id, stake, account))
}) })
.filter(|(account_id, _, account)| filter_no_delegate(account_id, account))
}) })
} }
fn filter_no_delegate(account_id: &Pubkey, account: &Account) -> bool {
VoteState::deserialize(&account.data)
.map(|vote_state| vote_state.delegate_id != *account_id)
.unwrap_or(false)
}
fn filter_zero_balances(account: &Account) -> Option<u64> { fn filter_zero_balances(account: &Account) -> Option<u64> {
let balance = Bank::read_balance(&account); let balance = Bank::read_balance(&account);
if balance > 0 { if balance > 0 {
@ -82,7 +91,7 @@ fn to_vote_state(
node_staked_accounts: impl Iterator<Item = (impl Borrow<Pubkey>, u64, impl Borrow<Account>)>, node_staked_accounts: impl Iterator<Item = (impl Borrow<Pubkey>, u64, impl Borrow<Account>)>,
) -> impl Iterator<Item = (u64, VoteState)> { ) -> impl Iterator<Item = (u64, VoteState)> {
node_staked_accounts.filter_map(|(_, stake, account)| { node_staked_accounts.filter_map(|(_, stake, account)| {
VoteState::deserialize(&account.borrow().userdata) VoteState::deserialize(&account.borrow().data)
.ok() .ok()
.map(|vote_state| (stake, vote_state)) .map(|vote_state| (stake, vote_state))
}) })
@ -189,7 +198,13 @@ mod tests {
// Make a mint vote account. Because the mint has nonzero stake, this // Make a mint vote account. Because the mint has nonzero stake, this
// should show up in the active set // should show up in the active set
voting_keypair_tests::new_vote_account_with_vote(&mint_keypair, &bank_voter, &bank, 499, 0); voting_keypair_tests::new_vote_account_with_delegate(
&mint_keypair,
&bank_voter,
&mint_keypair.pubkey(),
&bank,
499,
);
// soonest slot that could be a new epoch is 1 // soonest slot that could be a new epoch is 1
let mut slot = 1; let mut slot = 1;

View File

@ -5,14 +5,14 @@
use crate::blocktree::Blocktree; use crate::blocktree::Blocktree;
#[cfg(all(feature = "chacha", feature = "cuda"))] #[cfg(all(feature = "chacha", feature = "cuda"))]
use crate::chacha_cuda::chacha_cbc_encrypt_file_many_keys; use crate::chacha_cuda::chacha_cbc_encrypt_file_many_keys;
use crate::client::mk_client_with_timeout; use crate::cluster_info::{ClusterInfo, FULLNODE_PORT_RANGE};
use crate::cluster_info::ClusterInfo;
use crate::entry::{Entry, EntryReceiver}; use crate::entry::{Entry, EntryReceiver};
use crate::result::{Error, Result}; use crate::result::{Error, Result};
use crate::service::Service; use crate::service::Service;
use bincode::deserialize; use bincode::deserialize;
use rand::{Rng, SeedableRng}; use rand::{Rng, SeedableRng};
use rand_chacha::ChaChaRng; use rand_chacha::ChaChaRng;
use solana_client::client::create_client_with_timeout;
use solana_sdk::hash::Hash; use solana_sdk::hash::Hash;
use solana_sdk::pubkey::Pubkey; use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::{Keypair, Signature}; use solana_sdk::signature::{Keypair, Signature};
@ -228,10 +228,14 @@ impl StorageStage {
account_to_create: Option<Pubkey>, account_to_create: Option<Pubkey>,
) -> io::Result<()> { ) -> io::Result<()> {
let contact_info = cluster_info.read().unwrap().my_data(); let contact_info = cluster_info.read().unwrap().my_data();
let mut client = mk_client_with_timeout(&contact_info, Duration::from_secs(5)); let mut client = create_client_with_timeout(
contact_info.client_facing_addr(),
FULLNODE_PORT_RANGE,
Duration::from_secs(5),
);
if let Some(account) = account_to_create { if let Some(account) = account_to_create {
if client.get_account_userdata(&account).is_ok() { if client.get_account_data(&account).is_ok() {
return Ok(()); return Ok(());
} }
} }
@ -379,7 +383,7 @@ impl StorageStage {
*current_key_idx += size_of::<Signature>(); *current_key_idx += size_of::<Signature>();
*current_key_idx %= storage_keys.len(); *current_key_idx %= storage_keys.len();
} else if solana_storage_api::check_id(&program_id) { } else if solana_storage_api::check_id(&program_id) {
match deserialize(&tx.instructions[i].userdata) { match deserialize(&tx.instructions[i].data) {
Ok(StorageProgram::SubmitMiningProof { Ok(StorageProgram::SubmitMiningProof {
entry_height: proof_entry_height, entry_height: proof_entry_height,
.. ..

View File

@ -1,7 +1,7 @@
//! The `vote_signer_proxy` votes on the `blockhash` of the bank at a regular cadence //! The `vote_signer_proxy` votes on the `blockhash` of the bank at a regular cadence
use crate::rpc_request::{RpcClient, RpcRequest};
use jsonrpc_core; use jsonrpc_core;
use solana_client::rpc_request::{RpcClient, RpcRequest};
use solana_sdk::pubkey::Pubkey; use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::{Keypair, KeypairUtil, Signature}; use solana_sdk::signature::{Keypair, KeypairUtil, Signature};
use solana_vote_signer::rpc::LocalVoteSigner; use solana_vote_signer::rpc::LocalVoteSigner;
@ -120,6 +120,25 @@ pub mod tests {
bank.process_transaction(&tx).unwrap(); bank.process_transaction(&tx).unwrap();
} }
pub fn new_vote_account_with_delegate(
from_keypair: &Keypair,
voting_keypair: &Keypair,
delegate: &Pubkey,
bank: &Bank,
lamports: u64,
) {
let blockhash = bank.last_blockhash();
let tx = VoteTransaction::new_account_with_delegate(
from_keypair,
voting_keypair,
delegate,
blockhash,
lamports,
0,
);
bank.process_transaction(&tx).unwrap();
}
pub fn push_vote<T: KeypairUtil>(voting_keypair: &T, bank: &Bank, slot: u64) { pub fn push_vote<T: KeypairUtil>(voting_keypair: &T, bank: &Bank, slot: u64) {
let blockhash = bank.last_blockhash(); let blockhash = bank.last_blockhash();
let tx = let tx =

View File

@ -1,6 +1,6 @@
[package] [package]
name = "solana-drone" name = "solana-drone"
version = "0.12.0" version = "0.12.3"
description = "Solana Drone" description = "Solana Drone"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -19,9 +19,9 @@ clap = "2.31"
log = "0.4.2" log = "0.4.2"
serde = "1.0.89" serde = "1.0.89"
serde_derive = "1.0.89" serde_derive = "1.0.89"
solana-logger = { path = "../logger", version = "0.12.0" } solana-logger = { path = "../logger", version = "0.12.3" }
solana-sdk = { path = "../sdk", version = "0.12.0" } solana-sdk = { path = "../sdk", version = "0.12.3" }
solana-metrics = { path = "../metrics", version = "0.12.0" } solana-metrics = { path = "../metrics", version = "0.12.3" }
tokio = "0.1" tokio = "0.1"
tokio-codec = "0.1" tokio-codec = "0.1"

View File

@ -372,7 +372,7 @@ mod tests {
assert_eq!(tx.program_ids, vec![system_program::id()]); assert_eq!(tx.program_ids, vec![system_program::id()]);
assert_eq!(tx.instructions.len(), 1); assert_eq!(tx.instructions.len(), 1);
let instruction: SystemInstruction = deserialize(&tx.instructions[0].userdata).unwrap(); let instruction: SystemInstruction = deserialize(&tx.instructions[0].data).unwrap();
assert_eq!( assert_eq!(
instruction, instruction,
SystemInstruction::CreateAccount { SystemInstruction::CreateAccount {

View File

@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018" edition = "2018"
name = "solana-fullnode" name = "solana-fullnode"
description = "Blockchain, Rebuilt for Scale" description = "Blockchain, Rebuilt for Scale"
version = "0.12.0" version = "0.12.3"
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0" license = "Apache-2.0"
homepage = "https://solana.com/" homepage = "https://solana.com/"
@ -12,15 +12,15 @@ homepage = "https://solana.com/"
clap = "2.32.0" clap = "2.32.0"
log = "0.4.2" log = "0.4.2"
serde_json = "1.0.39" serde_json = "1.0.39"
solana = { path = "../core", version = "0.12.0" } solana = { path = "../core", version = "0.12.3" }
solana-drone = { path = "../drone", version = "0.12.0" } solana-drone = { path = "../drone", version = "0.12.3" }
solana-logger = { path = "../logger", version = "0.12.0" } solana-logger = { path = "../logger", version = "0.12.3" }
solana-netutil = { path = "../netutil", version = "0.12.0" } solana-netutil = { path = "../netutil", version = "0.12.3" }
solana-metrics = { path = "../metrics", version = "0.12.0" } solana-metrics = { path = "../metrics", version = "0.12.3" }
solana-runtime = { path = "../runtime", version = "0.12.0" } solana-runtime = { path = "../runtime", version = "0.12.3" }
solana-sdk = { path = "../sdk", version = "0.12.0" } solana-sdk = { path = "../sdk", version = "0.12.3" }
solana-vote-api = { path = "../programs/vote_api", version = "0.12.0" } solana-vote-api = { path = "../programs/vote_api", version = "0.12.3" }
solana-vote-signer = { path = "../vote-signer", version = "0.12.0" } solana-vote-signer = { path = "../vote-signer", version = "0.12.3" }
[features] [features]
chacha = ["solana/chacha"] chacha = ["solana/chacha"]

View File

@ -5,7 +5,6 @@ use solana::contact_info::ContactInfo;
use solana::fullnode::{Fullnode, FullnodeConfig}; use solana::fullnode::{Fullnode, FullnodeConfig};
use solana::local_vote_signer_service::LocalVoteSignerService; use solana::local_vote_signer_service::LocalVoteSignerService;
use solana::service::Service; use solana::service::Service;
use solana_sdk::genesis_block::GenesisBlock;
use solana_sdk::signature::{read_keypair, Keypair, KeypairUtil}; use solana_sdk::signature::{read_keypair, Keypair, KeypairUtil};
use std::fs::File; use std::fs::File;
use std::process::exit; use std::process::exit;
@ -70,11 +69,6 @@ fn main() {
.takes_value(true) .takes_value(true)
.help("Rendezvous with the cluster at this gossip entry point"), .help("Rendezvous with the cluster at this gossip entry point"),
) )
.arg(
Arg::with_name("no_leader_rotation")
.long("no-leader-rotation")
.help("Disable leader rotation"),
)
.arg( .arg(
Arg::with_name("no_voting") Arg::with_name("no_voting")
.long("no-voting") .long("no-voting")
@ -169,8 +163,6 @@ fn main() {
fullnode_config.voting_disabled = matches.is_present("no_voting"); fullnode_config.voting_disabled = matches.is_present("no_voting");
let use_only_bootstrap_leader = matches.is_present("no_leader_rotation");
if matches.is_present("enable_rpc_exit") { if matches.is_present("enable_rpc_exit") {
fullnode_config.rpc_config.enable_fullnode_exit = true; fullnode_config.rpc_config.enable_fullnode_exit = true;
} }
@ -233,11 +225,6 @@ fn main() {
node.info.rpc.set_port(rpc_port); node.info.rpc.set_port(rpc_port);
node.info.rpc_pubsub.set_port(rpc_pubsub_port); node.info.rpc_pubsub.set_port(rpc_pubsub_port);
let genesis_block = GenesisBlock::load(ledger_path).expect("Unable to load genesis block");
if use_only_bootstrap_leader && node.info.id != genesis_block.bootstrap_leader_id {
fullnode_config.voting_disabled = true;
}
let fullnode = Fullnode::new( let fullnode = Fullnode::new(
node, node,
&keypair, &keypair,

View File

@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018" edition = "2018"
name = "solana-genesis" name = "solana-genesis"
description = "Blockchain, Rebuilt for Scale" description = "Blockchain, Rebuilt for Scale"
version = "0.12.0" version = "0.12.3"
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0" license = "Apache-2.0"
homepage = "https://solana.com/" homepage = "https://solana.com/"
@ -11,8 +11,8 @@ homepage = "https://solana.com/"
[dependencies] [dependencies]
clap = "2.32.0" clap = "2.32.0"
serde_json = "1.0.39" serde_json = "1.0.39"
solana = { path = "../core", version = "0.12.0" } solana = { path = "../core", version = "0.12.3" }
solana-sdk = { path = "../sdk", version = "0.12.0" } solana-sdk = { path = "../sdk", version = "0.12.3" }
[features] [features]
cuda = ["solana/cuda"] cuda = ["solana/cuda"]

View File

@ -1,6 +1,6 @@
[package] [package]
name = "solana-keygen" name = "solana-keygen"
version = "0.12.0" version = "0.12.3"
description = "Solana key generation utility" description = "Solana key generation utility"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -14,7 +14,7 @@ cuda = []
[dependencies] [dependencies]
dirs = "1.0.5" dirs = "1.0.5"
clap = "2.31" clap = "2.31"
solana-sdk = { path = "../sdk", version = "0.12.0" } solana-sdk = { path = "../sdk", version = "0.12.3" }
[[bin]] [[bin]]
name = "solana-keygen" name = "solana-keygen"

View File

@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018" edition = "2018"
name = "solana-ledger-tool" name = "solana-ledger-tool"
description = "Blockchain, Rebuilt for Scale" description = "Blockchain, Rebuilt for Scale"
version = "0.12.0" version = "0.12.3"
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0" license = "Apache-2.0"
homepage = "https://solana.com/" homepage = "https://solana.com/"
@ -11,10 +11,10 @@ homepage = "https://solana.com/"
[dependencies] [dependencies]
clap = "2.32.0" clap = "2.32.0"
serde_json = "1.0.39" serde_json = "1.0.39"
solana = { path = "../core", version = "0.12.0" } solana = { path = "../core", version = "0.12.3" }
solana-sdk = { path = "../sdk", version = "0.12.0" } solana-sdk = { path = "../sdk", version = "0.12.3" }
solana-logger = { path = "../logger", version = "0.12.0" } solana-logger = { path = "../logger", version = "0.12.3" }
solana-runtime = { path = "../runtime", version = "0.12.0" } solana-runtime = { path = "../runtime", version = "0.12.3" }
[dev-dependencies] [dev-dependencies]
assert_cmd = "0.11" assert_cmd = "0.11"

View File

@ -1,6 +1,6 @@
[package] [package]
name = "solana-logger" name = "solana-logger"
version = "0.12.0" version = "0.12.3"
description = "Solana Logger" description = "Solana Logger"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"

View File

@ -1,6 +1,6 @@
[package] [package]
name = "solana-metrics" name = "solana-metrics"
version = "0.12.0" version = "0.12.3"
description = "Solana Metrics" description = "Solana Metrics"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -14,7 +14,7 @@ log = "0.4.2"
reqwest = "0.9.11" reqwest = "0.9.11"
lazy_static = "1.3.0" lazy_static = "1.3.0"
sys-info = "0.5.6" sys-info = "0.5.6"
solana-sdk = { path = "../sdk", version = "0.12.0" } solana-sdk = { path = "../sdk", version = "0.12.3" }
[dev-dependencies] [dev-dependencies]
rand = "0.6.5" rand = "0.6.5"

View File

@ -15,8 +15,8 @@
"editable": true, "editable": true,
"gnetId": null, "gnetId": null,
"graphTooltip": 0, "graphTooltip": 0,
"id": 251, "id": 399,
"iteration": 1549301870214, "iteration": 1553559957575,
"links": [ "links": [
{ {
"asDropdown": true, "asDropdown": true,
@ -1412,7 +1412,7 @@
} }
], ],
"thresholds": "", "thresholds": "",
"title": "Last Leader Rotation Tick", "title": "Last Leader Rotation Slot",
"type": "singlestat", "type": "singlestat",
"valueFontSize": "70%", "valueFontSize": "70%",
"valueMaps": [ "valueMaps": [
@ -3811,7 +3811,7 @@
"x": 0, "x": 0,
"y": 49 "y": 49
}, },
"id": 42, "id": 34,
"legend": { "legend": {
"alignAsTable": false, "alignAsTable": false,
"avg": false, "avg": false,
@ -4001,7 +4001,7 @@
"x": 12, "x": 12,
"y": 49 "y": 49
}, },
"id": 41, "id": 35,
"legend": { "legend": {
"alignAsTable": false, "alignAsTable": false,
"avg": false, "avg": false,
@ -4156,7 +4156,7 @@
"x": 0, "x": 0,
"y": 54 "y": 54
}, },
"id": 34, "id": 36,
"legend": { "legend": {
"alignAsTable": false, "alignAsTable": false,
"avg": false, "avg": false,
@ -4496,7 +4496,7 @@
"x": 12, "x": 12,
"y": 54 "y": 54
}, },
"id": 40, "id": 37,
"legend": { "legend": {
"alignAsTable": false, "alignAsTable": false,
"avg": false, "avg": false,
@ -4727,7 +4727,7 @@
"x": 0, "x": 0,
"y": 60 "y": 60
}, },
"id": 35, "id": 38,
"legend": { "legend": {
"alignAsTable": false, "alignAsTable": false,
"avg": false, "avg": false,
@ -4997,7 +4997,7 @@
"x": 12, "x": 12,
"y": 60 "y": 60
}, },
"id": 43, "id": 39,
"legend": { "legend": {
"alignAsTable": false, "alignAsTable": false,
"avg": false, "avg": false,
@ -5100,6 +5100,249 @@
"alignLevel": null "alignLevel": null
} }
}, },
{
"aliasColors": {
"cluster-info.repair": "#ba43a9",
"counter-replay_stage-new_leader.last": "#00ffbb",
"window-service.receive": "#b7dbab",
"window-stage.consumed": "#5195ce"
},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "Solana Metrics (read-only)",
"fill": 1,
"gridPos": {
"h": 6,
"w": 12,
"x": 0,
"y": 66
},
"id": 44,
"legend": {
"alignAsTable": false,
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": false,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"percentage": false,
"pointradius": 2,
"points": true,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"groupBy": [
{
"params": [
"$__interval"
],
"type": "time"
},
{
"params": [
"null"
],
"type": "fill"
}
],
"measurement": "counter-cluster_info-vote-count",
"orderByTime": "ASC",
"policy": "autogen",
"query": "SELECT last(\"latest\") - last(\"root\") FROM \"$testnet\".\"autogen\".\"counter-locktower-vote\" WHERE host_id =~ /$hostid/ AND $timeFilter GROUP BY time($__interval)",
"rawQuery": true,
"refId": "A",
"resultFormat": "time_series",
"select": [
[
{
"params": [
"count"
],
"type": "field"
},
{
"params": [],
"type": "sum"
}
]
],
"tags": []
}
],
"thresholds": [],
"timeFrom": null,
"timeShift": null,
"title": "Locktower Distance in Latest and Root Slot ($hostid)",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {
"cluster-info.repair": "#ba43a9",
"counter-locktower-vote.last": "#00ffbb",
"counter-replay_stage-new_leader.last": "#00ffbb",
"window-service.receive": "#b7dbab",
"window-stage.consumed": "#5195ce"
},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "Solana Metrics (read-only)",
"fill": 1,
"gridPos": {
"h": 6,
"w": 12,
"x": 12,
"y": 66
},
"id": 45,
"legend": {
"alignAsTable": false,
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": false,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"percentage": false,
"pointradius": 2,
"points": true,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"groupBy": [
{
"params": [
"$__interval"
],
"type": "time"
},
{
"params": [
"null"
],
"type": "fill"
}
],
"measurement": "counter-cluster_info-vote-count",
"orderByTime": "ASC",
"policy": "autogen",
"query": "SELECT last(\"root\") FROM \"$testnet\".\"autogen\".\"counter-locktower-vote\" WHERE host_id =~ /$hostid/ AND $timeFilter GROUP BY time($__interval)",
"rawQuery": true,
"refId": "A",
"resultFormat": "time_series",
"select": [
[
{
"params": [
"count"
],
"type": "field"
},
{
"params": [],
"type": "sum"
}
]
],
"tags": []
}
],
"thresholds": [],
"timeFrom": null,
"timeShift": null,
"title": "Locktower Root Slot ($hostid)",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{ {
"aliasColors": {}, "aliasColors": {},
"bars": false, "bars": false,
@ -5111,9 +5354,9 @@
"h": 5, "h": 5,
"w": 12, "w": 12,
"x": 0, "x": 0,
"y": 66 "y": 72
}, },
"id": 36, "id": 40,
"legend": { "legend": {
"alignAsTable": false, "alignAsTable": false,
"avg": false, "avg": false,
@ -5343,9 +5586,9 @@
"h": 5, "h": 5,
"w": 12, "w": 12,
"x": 12, "x": 12,
"y": 66 "y": 72
}, },
"id": 38, "id": 41,
"legend": { "legend": {
"alignAsTable": false, "alignAsTable": false,
"avg": false, "avg": false,
@ -5496,9 +5739,9 @@
"h": 1, "h": 1,
"w": 24, "w": 24,
"x": 0, "x": 0,
"y": 71 "y": 77
}, },
"id": 44, "id": 42,
"panels": [], "panels": [],
"title": "Signature Verification", "title": "Signature Verification",
"type": "row" "type": "row"
@ -5514,9 +5757,9 @@
"h": 5, "h": 5,
"w": 12, "w": 12,
"x": 0, "x": 0,
"y": 72 "y": 78
}, },
"id": 45, "id": 43,
"legend": { "legend": {
"avg": false, "avg": false,
"current": false, "current": false,
@ -5701,5 +5944,5 @@
"timezone": "", "timezone": "",
"title": "Testnet Monitor (edge)", "title": "Testnet Monitor (edge)",
"uid": "testnet-edge", "uid": "testnet-edge",
"version": 116 "version": 117
} }

View File

@ -10,6 +10,46 @@ source "$here"/common.sh
# shellcheck source=scripts/oom-score-adj.sh # shellcheck source=scripts/oom-score-adj.sh
source "$here"/../scripts/oom-score-adj.sh source "$here"/../scripts/oom-score-adj.sh
if [[ $1 = -h ]]; then
fullnode_usage "$@"
fi
extra_fullnode_args=()
setup_stakes=true
while [[ ${1:0:1} = - ]]; do
if [[ $1 = --blockstream ]]; then
extra_fullnode_args+=("$1" "$2")
shift 2
elif [[ $1 = --enable-rpc-exit ]]; then
extra_fullnode_args+=("$1")
shift
elif [[ $1 = --init-complete-file ]]; then
extra_fullnode_args+=("$1" "$2")
shift 2
elif [[ $1 = --only-bootstrap-stake ]]; then
setup_stakes=false
shift
elif [[ $1 = --public-address ]]; then
extra_fullnode_args+=("$1")
shift
elif [[ $1 = --no-signer ]]; then
extra_fullnode_args+=("$1")
shift
elif [[ $1 = --rpc-port ]]; then
extra_fullnode_args+=("$1" "$2")
shift 2
else
echo "Unknown argument: $1"
exit 1
fi
done
if [[ -n $3 ]]; then
fullnode_usage "$@"
fi
[[ -f "$SOLANA_CONFIG_DIR"/bootstrap-leader-id.json ]] || { [[ -f "$SOLANA_CONFIG_DIR"/bootstrap-leader-id.json ]] || {
echo "$SOLANA_CONFIG_DIR/bootstrap-leader-id.json not found, create it by running:" echo "$SOLANA_CONFIG_DIR/bootstrap-leader-id.json not found, create it by running:"
echo echo
@ -27,12 +67,10 @@ tune_system
$solana_ledger_tool --ledger "$SOLANA_CONFIG_DIR"/bootstrap-leader-ledger verify $solana_ledger_tool --ledger "$SOLANA_CONFIG_DIR"/bootstrap-leader-ledger verify
bootstrap_leader_id_path="$SOLANA_CONFIG_DIR"/bootstrap-leader-id.json bootstrap_leader_id_path="$SOLANA_CONFIG_DIR"/bootstrap-leader-id.json
bootstrap_leader_staker_id_path="$SOLANA_CONFIG_DIR"/bootstrap-leader-staker-id.json bootstrap_leader_staker_id_path="$SOLANA_CONFIG_DIR"/bootstrap-leader-staker-id.json
bootstrap_leader_staker_id=$($solana_wallet --keypair "$bootstrap_leader_staker_id_path" address) bootstrap_leader_staker_id=$($solana_wallet --keypair "$bootstrap_leader_staker_id_path" address)
set -x
trap 'kill "$pid" && wait "$pid"' INT TERM ERR trap 'kill "$pid" && wait "$pid"' INT TERM ERR
$program \ $program \
--identity "$bootstrap_leader_id_path" \ --identity "$bootstrap_leader_id_path" \
@ -42,11 +80,13 @@ $program \
--accounts "$SOLANA_CONFIG_DIR"/bootstrap-leader-accounts \ --accounts "$SOLANA_CONFIG_DIR"/bootstrap-leader-accounts \
--rpc-port 8899 \ --rpc-port 8899 \
--rpc-drone-address 127.0.0.1:9900 \ --rpc-drone-address 127.0.0.1:9900 \
"$@" \ "${extra_fullnode_args[@]}" \
> >($bootstrap_leader_logger) 2>&1 & > >($bootstrap_leader_logger) 2>&1 &
pid=$! pid=$!
oom_score_adj "$pid" 1000 oom_score_adj "$pid" 1000
setup_fullnode_staking 127.0.0.1 "$bootstrap_leader_id_path" "$bootstrap_leader_staker_id_path" if [[ $setup_stakes = true ]] ; then
setup_fullnode_staking 127.0.0.1 "$bootstrap_leader_id_path" "$bootstrap_leader_staker_id_path"
fi
wait "$pid" wait "$pid"

View File

@ -21,7 +21,8 @@ if [[ $(uname) != Linux ]]; then
fi fi
fi fi
if [[ -n $USE_INSTALL ]]; then # Assume |./scripts/cargo-install-all.sh| was run
if [[ -n $USE_INSTALL || ! -f "$(dirname "${BASH_SOURCE[0]}")"/../Cargo.toml ]]; then
solana_program() { solana_program() {
declare program="$1" declare program="$1"
printf "solana-%s" "$program" printf "solana-%s" "$program"
@ -150,6 +151,11 @@ setup_fullnode_staking() {
declare staker_id declare staker_id
staker_id=$($solana_wallet --keypair "$staker_id_path" address) staker_id=$($solana_wallet --keypair "$staker_id_path" address)
if [[ -f "$staker_id_path".configured ]]; then
echo "Staking account has already been configured"
return 0
fi
# A fullnode requires 43 lamports to function: # A fullnode requires 43 lamports to function:
# - one lamport to keep the node identity public key valid. TODO: really?? # - one lamport to keep the node identity public key valid. TODO: really??
# - 42 more for the staker account we fund # - 42 more for the staker account we fund
@ -158,19 +164,44 @@ setup_fullnode_staking() {
# A little wrong, fund the staking account from the # A little wrong, fund the staking account from the
# to the node. Maybe next time consider doing this the opposite # to the node. Maybe next time consider doing this the opposite
# way or use an ephemeral account # way or use an ephemeral account
$solana_wallet --keypair "$fullnode_id_path" \ $solana_wallet --keypair "$fullnode_id_path" --host "$drone_address" \
create-staking-account "$staker_id" 42 || return $? create-staking-account "$staker_id" 42 || return $?
# as the staker, set the node as the delegate and the staker as # as the staker, set the node as the delegate and the staker as
# the vote-signer # the vote-signer
$solana_wallet --keypair "$staker_id_path" \ $solana_wallet --keypair "$staker_id_path" --host "$drone_address" \
configure-staking-account \ configure-staking-account \
--delegate-account "$fullnode_id" \ --delegate-account "$fullnode_id" \
--authorize-voter "$staker_id" || return $? --authorize-voter "$staker_id" || return $?
touch "$staker_id_path".configured
return 0 return 0
} }
fullnode_usage() {
if [[ -n $1 ]]; then
echo "$*"
echo
fi
cat <<EOF
usage: $0 [-x] [--blockstream PATH] [--init-complete-file FILE] [--only-bootstrap-stake] [--no-signer] [--rpc-port port] [rsync network path to bootstrap leader configuration] [network entry point]
Start a full node on the specified network
-x - start a new, dynamically-configured full node. Does not apply to the bootstrap leader
-X [label] - start or restart a dynamically-configured full node with
the specified label. Does not apply to the bootstrap leader
--blockstream PATH - open blockstream at this unix domain socket location
--init-complete-file FILE - create this file, if it doesn't already exist, once node initialization is complete
--only-bootstrap-stake - only stake the bootstrap leader, effectively disabling leader rotation
--public-address - advertise public machine address in gossip. By default the local machine address is advertised
--no-signer - start node without vote signer
--rpc-port port - custom RPC port for this node
EOF
exit 1
}
# The directory on the bootstrap leader that is rsynced by other full nodes as # The directory on the bootstrap leader that is rsynced by other full nodes as
# they boot (TODO: Eventually this should go away) # they boot (TODO: Eventually this should go away)

View File

@ -9,37 +9,15 @@ source "$here"/common.sh
# shellcheck source=scripts/oom-score-adj.sh # shellcheck source=scripts/oom-score-adj.sh
source "$here"/../scripts/oom-score-adj.sh source "$here"/../scripts/oom-score-adj.sh
usage() {
if [[ -n $1 ]]; then
echo "$*"
echo
fi
cat <<EOF
usage: $0 [-x] [--blockstream PATH] [--init-complete-file FILE] [--no-leader-rotation] [--no-signer] [--rpc-port port] [rsync network path to bootstrap leader configuration] [network entry point]
Start a full node on the specified network
-x - start a new, dynamically-configured full node
-X [label] - start or restart a dynamically-configured full node with
the specified label
--blockstream PATH - open blockstream at this unix domain socket location
--init-complete-file FILE - create this file, if it doesn't already exist, once node initialization is complete
--no-leader-rotation - disable leader rotation
--public-address - advertise public machine address in gossip. By default the local machine address is advertised
--no-signer - start node without vote signer
--rpc-port port - custom RPC port for this node
EOF
exit 1
}
if [[ $1 = -h ]]; then if [[ $1 = -h ]]; then
usage fullnode_usage "$@"
fi fi
gossip_port=9000 gossip_port=9000
extra_fullnode_args=() extra_fullnode_args=()
self_setup=0 self_setup=0
setup_stakes=1
poll_for_new_genesis_block=0
while [[ ${1:0:1} = - ]]; do while [[ ${1:0:1} = - ]]; do
if [[ $1 = -X ]]; then if [[ $1 = -X ]]; then
@ -50,6 +28,9 @@ while [[ ${1:0:1} = - ]]; do
self_setup=1 self_setup=1
self_setup_label=$$ self_setup_label=$$
shift shift
elif [[ $1 = --poll-for-new-genesis-block ]]; then
poll_for_new_genesis_block=1
shift
elif [[ $1 = --blockstream ]]; then elif [[ $1 = --blockstream ]]; then
extra_fullnode_args+=("$1" "$2") extra_fullnode_args+=("$1" "$2")
shift 2 shift 2
@ -59,15 +40,18 @@ while [[ ${1:0:1} = - ]]; do
elif [[ $1 = --init-complete-file ]]; then elif [[ $1 = --init-complete-file ]]; then
extra_fullnode_args+=("$1" "$2") extra_fullnode_args+=("$1" "$2")
shift 2 shift 2
elif [[ $1 = --no-leader-rotation ]]; then elif [[ $1 = --only-bootstrap-stake ]]; then
extra_fullnode_args+=("$1") setup_stakes=0
shift shift
elif [[ $1 = --public-address ]]; then elif [[ $1 = --public-address ]]; then
extra_fullnode_args+=("$1") extra_fullnode_args+=("$1")
shift shift
elif [[ $1 = --no-signer ]]; then elif [[ $1 = --no-voting ]]; then
extra_fullnode_args+=("$1") extra_fullnode_args+=("$1")
shift shift
elif [[ $1 = --gossip-port ]]; then
gossip_port=$2
shift 2
elif [[ $1 = --rpc-port ]]; then elif [[ $1 = --rpc-port ]]; then
extra_fullnode_args+=("$1" "$2") extra_fullnode_args+=("$1" "$2")
shift 2 shift 2
@ -78,7 +62,7 @@ while [[ ${1:0:1} = - ]]; do
done done
if [[ -n $3 ]]; then if [[ -n $3 ]]; then
usage fullnode_usage "$@"
fi fi
find_leader() { find_leader() {
@ -86,7 +70,7 @@ find_leader() {
declare shift=0 declare shift=0
if [[ -z $1 ]]; then if [[ -z $1 ]]; then
leader=${here}/.. # Default to local tree for rsync leader=$PWD # Default to local tree for rsync
leader_address=127.0.0.1:8001 # Default to local leader leader_address=127.0.0.1:8001 # Default to local leader
elif [[ -z $2 ]]; then elif [[ -z $2 ]]; then
leader=$1 leader=$1
@ -189,19 +173,23 @@ rsync_url() { # adds the 'rsync://` prefix to URLs that need it
rsync_leader_url=$(rsync_url "$leader") rsync_leader_url=$(rsync_url "$leader")
set -ex set -e
if [[ ! -d "$ledger_config_dir" ]]; then
$rsync -vPr "$rsync_leader_url"/config/ledger/ "$ledger_config_dir" secs_to_next_genesis_poll=0
[[ -d $ledger_config_dir ]] || { PS4="$(basename "$0"): "
echo "Unable to retrieve ledger from $rsync_leader_url" while true; do
exit 1 set -x
} if [[ ! -d "$SOLANA_RSYNC_CONFIG_DIR"/ledger ]]; then
$rsync -vPr "$rsync_leader_url"/config/ledger "$SOLANA_RSYNC_CONFIG_DIR"
fi
if [[ ! -d "$ledger_config_dir" ]]; then
cp -a "$SOLANA_RSYNC_CONFIG_DIR"/ledger/ "$ledger_config_dir"
$solana_ledger_tool --ledger "$ledger_config_dir" verify $solana_ledger_tool --ledger "$ledger_config_dir" verify
fi
fi trap 'kill "$pid" && wait "$pid"' INT TERM ERR
$program \
trap 'kill "$pid" && wait "$pid"' INT TERM ERR
$program \
--gossip-port "$gossip_port" \ --gossip-port "$gossip_port" \
--identity "$fullnode_id_path" \ --identity "$fullnode_id_path" \
--voting-keypair "$fullnode_staker_id_path" \ --voting-keypair "$fullnode_staker_id_path" \
@ -212,9 +200,35 @@ $program \
--rpc-drone-address "${leader_address%:*}:9900" \ --rpc-drone-address "${leader_address%:*}:9900" \
"${extra_fullnode_args[@]}" \ "${extra_fullnode_args[@]}" \
> >($fullnode_logger) 2>&1 & > >($fullnode_logger) 2>&1 &
pid=$! pid=$!
oom_score_adj "$pid" 1000 oom_score_adj "$pid" 1000
setup_fullnode_staking "${leader_address%:*}" "$fullnode_id_path" "$fullnode_staker_id_path" if ((setup_stakes)); then
setup_fullnode_staking "${leader_address%:*}" "$fullnode_id_path" "$fullnode_staker_id_path"
fi
set +x
wait "$pid" while true; do
if ! kill -0 "$pid"; then
wait "$pid"
exit 0
fi
sleep 1
if ((poll_for_new_genesis_block)); then
if ((!secs_to_next_genesis_poll)); then
secs_to_next_genesis_poll=60
$rsync -r "$rsync_leader_url"/config/ledger "$SOLANA_RSYNC_CONFIG_DIR" || true
if [[ -n $(diff "$SOLANA_RSYNC_CONFIG_DIR"/ledger/genesis.json "$ledger_config_dir"/genesis.json 2>&1) ]]; then
echo "############## New genesis detected, restarting fullnode ##############"
rm -rf "$ledger_config_dir"
kill "$pid" || true
wait "$pid" || true
break
fi
fi
((secs_to_next_genesis_poll--))
fi
done
done

View File

@ -15,7 +15,7 @@ gce)
cpuBootstrapLeaderMachineType=n1-standard-16 cpuBootstrapLeaderMachineType=n1-standard-16
gpuBootstrapLeaderMachineType="$cpuBootstrapLeaderMachineType --accelerator count=4,type=nvidia-tesla-k80" gpuBootstrapLeaderMachineType="$cpuBootstrapLeaderMachineType --accelerator count=4,type=nvidia-tesla-k80"
bootstrapLeaderMachineType=$cpuBootstrapLeaderMachineType bootstrapLeaderMachineType=$cpuBootstrapLeaderMachineType
fullNodeMachineType=n1-standard-16 fullNodeMachineType=$cpuBootstrapLeaderMachineType
clientMachineType=n1-standard-16 clientMachineType=n1-standard-16
blockstreamerMachineType=n1-standard-8 blockstreamerMachineType=n1-standard-8
;; ;;
@ -133,10 +133,12 @@ while getopts "h?p:Pn:c:z:gG:a:d:bu" opt; do
g) g)
enableGpu=true enableGpu=true
bootstrapLeaderMachineType=$gpuBootstrapLeaderMachineType bootstrapLeaderMachineType=$gpuBootstrapLeaderMachineType
fullNodeMachineType=$bootstrapLeaderMachineType
;; ;;
G) G)
enableGpu=true enableGpu=true
bootstrapLeaderMachineType="$OPTARG" bootstrapLeaderMachineType="$OPTARG"
fullNodeMachineType=$bootstrapLeaderMachineType
;; ;;
a) a)
customAddress=$OPTARG customAddress=$OPTARG
@ -225,10 +227,6 @@ ec2)
;; ;;
esac esac
if $leaderRotation; then
fullNodeMachineType=$bootstrapLeaderMachineType
fi
# cloud_ForEachInstance [cmd] [extra args to cmd] # cloud_ForEachInstance [cmd] [extra args to cmd]
# #
# Execute a command for each element in the `instances` array # Execute a command for each element in the `instances` array

View File

@ -12,11 +12,12 @@ usage() {
echo "Error: $*" echo "Error: $*"
fi fi
cat <<EOF cat <<EOF
usage: $0 [-e] [-d] [username] usage: $0 [-e] [-d] [-c] [username]
Creates a testnet dev metrics database Creates a testnet dev metrics database
username InfluxDB user with access to create a new database username InfluxDB user with access to create a new database
-c Use Influx Cloud instance
-d Delete the database instead of creating it -d Delete the database instead of creating it
-e Assume database already exists and SOLANA_METRICS_CONFIG is -e Assume database already exists and SOLANA_METRICS_CONFIG is
defined in the environment already defined in the environment already
@ -29,12 +30,16 @@ loadConfigFile
useEnv=false useEnv=false
delete=false delete=false
while getopts "hde" opt; do host="https://metrics.solana.com:8086"
while getopts "hcde" opt; do
case $opt in case $opt in
h|\?) h|\?)
usage usage
exit 0 exit 0
;; ;;
c)
host="https://clocktower-f1d56615.influxcloud.net:8086"
;;
d) d)
delete=true delete=true
;; ;;
@ -62,7 +67,7 @@ else
query() { query() {
echo "$*" echo "$*"
curl -XPOST \ curl -XPOST \
"https://metrics.solana.com:8086/query?u=${username}&p=${password}" \ "$host/query?u=${username}&p=${password}" \
--data-urlencode "q=$*" --data-urlencode "q=$*"
} }
@ -73,7 +78,7 @@ else
query "GRANT READ ON \"$netBasename\" TO \"ro\"" query "GRANT READ ON \"$netBasename\" TO \"ro\""
query "GRANT WRITE ON \"$netBasename\" TO \"scratch_writer\"" query "GRANT WRITE ON \"$netBasename\" TO \"scratch_writer\""
SOLANA_METRICS_CONFIG="db=$netBasename,u=scratch_writer,p=topsecret" SOLANA_METRICS_CONFIG="host=$host,db=$netBasename,u=scratch_writer,p=topsecret"
fi fi
echo "export SOLANA_METRICS_CONFIG=\"$SOLANA_METRICS_CONFIG\"" >> "$configFile" echo "export SOLANA_METRICS_CONFIG=\"$SOLANA_METRICS_CONFIG\"" >> "$configFile"

View File

@ -117,7 +117,8 @@ loadConfigFile
build() { build() {
declare MAYBE_DOCKER= declare MAYBE_DOCKER=
if [[ $(uname) != Linux ]]; then if [[ $(uname) != Linux ]]; then
MAYBE_DOCKER="ci/docker-run.sh solanalabs/rust" source ci/rust-version.sh
MAYBE_DOCKER="ci/docker-run.sh +$rust_stable_docker_image"
fi fi
SECONDS=0 SECONDS=0
( (
@ -277,7 +278,8 @@ start() {
rm -f "$SOLANA_ROOT"/solana-release.tar.bz2 rm -f "$SOLANA_ROOT"/solana-release.tar.bz2
( (
set -x set -x
curl -o "$SOLANA_ROOT"/solana-release.tar.bz2 http://solana-release.s3.amazonaws.com/"$releaseChannel"/solana-release.tar.bz2 curl -o "$SOLANA_ROOT"/solana-release.tar.bz2 \
http://solana-release.s3.amazonaws.com/"$releaseChannel"/solana-release-x86_64-unknown-linux-gnu.tar.bz2
) )
tarballFilename="$SOLANA_ROOT"/solana-release.tar.bz2 tarballFilename="$SOLANA_ROOT"/solana-release.tar.bz2
fi fi
@ -285,7 +287,7 @@ start() {
set -x set -x
rm -rf "$SOLANA_ROOT"/solana-release rm -rf "$SOLANA_ROOT"/solana-release
(cd "$SOLANA_ROOT"; tar jxv) < "$tarballFilename" (cd "$SOLANA_ROOT"; tar jxv) < "$tarballFilename"
cat "$SOLANA_ROOT"/solana-release/version.txt cat "$SOLANA_ROOT"/solana-release/version.yml
) )
;; ;;
local) local)
@ -370,7 +372,10 @@ start() {
case $deployMethod in case $deployMethod in
tar) tar)
networkVersion="$( networkVersion="$(
tail -n1 "$SOLANA_ROOT"/solana-release/version.txt || echo "tar-unknown" (
set -o pipefail
grep "^version: " "$SOLANA_ROOT"/solana-release/version.yml | head -n1 | cut -d\ -f2
) || echo "tar-unknown"
)" )"
;; ;;
local) local)
@ -406,7 +411,7 @@ stopNode() {
pgid=\$(ps opgid= \$(cat \$pid) | tr -d '[:space:]') pgid=\$(ps opgid= \$(cat \$pid) | tr -d '[:space:]')
sudo kill -- -\$pgid sudo kill -- -\$pgid
done done
for pattern in solana- remote-; do for pattern in node solana- remote-; do
pkill -9 \$pattern pkill -9 \$pattern
done done
" "

View File

@ -57,6 +57,7 @@ clientCommand="\
--duration 7500 \ --duration 7500 \
--sustained \ --sustained \
--threads $threadCount \ --threads $threadCount \
--tx_count 10000 \
" "
tmux new -s solana-bench-tps -d " tmux new -s solana-bench-tps -d "

View File

@ -76,7 +76,7 @@ local|tar)
maybeNoLeaderRotation= maybeNoLeaderRotation=
if ! $leaderRotation; then if ! $leaderRotation; then
maybeNoLeaderRotation="--no-leader-rotation" maybeNoLeaderRotation="--only-bootstrap-stake"
fi fi
maybePublicAddress= maybePublicAddress=
if $publicNetwork; then if $publicNetwork; then
@ -96,7 +96,7 @@ local|tar)
args=() args=()
if ! $leaderRotation; then if ! $leaderRotation; then
args+=("--no-leader-rotation") args+=("--only-bootstrap-stake")
fi fi
if $publicNetwork; then if $publicNetwork; then
args+=("--public-address") args+=("--public-address")
@ -104,18 +104,41 @@ local|tar)
if [[ $nodeType = blockstreamer ]]; then if [[ $nodeType = blockstreamer ]]; then
args+=( args+=(
--blockstream /tmp/solana-blockstream.sock --blockstream /tmp/solana-blockstream.sock
--no-signer --no-voting
) )
fi fi
args+=(
--gossip-port 8001
--rpc-port 8899
)
set -x set -x
if [[ $skipSetup != true ]]; then if [[ $skipSetup != true ]]; then
./multinode-demo/setup.sh -t fullnode ./multinode-demo/setup.sh -t fullnode
fi fi
if [[ $nodeType = blockstreamer ]]; then if [[ $nodeType = blockstreamer ]]; then
# Sneak the mint-id.json from the bootstrap leader and run another drone
# with it on the blockstreamer node. Typically the blockstreamer node has
# a static IP/DNS name for hosting the blockexplorer web app, and is
# a location that somebody would expect to be able to airdrop from
scp "$entrypointIp":~/solana/config-local/mint-id.json config-local/
./multinode-demo/drone.sh > drone.log 2>&1 &
npm install @solana/blockexplorer@1 npm install @solana/blockexplorer@1
npx solana-blockexplorer > blockexplorer.log 2>&1 & npx solana-blockexplorer > blockexplorer.log 2>&1 &
# Confirm the blockexplorer is accessible
curl --head --retry 3 --retry-connrefused http://localhost:5000/
# Redirect port 80 to port 5000
sudo iptables -A INPUT -p tcp --dport 80 -j ACCEPT
sudo iptables -A INPUT -p tcp --dport 5000 -j ACCEPT
sudo iptables -A PREROUTING -t nat -p tcp --dport 80 -j REDIRECT --to-port 5000
# Confirm the blockexplorer is now globally accessible
curl --head "$(curl ifconfig.io)"
fi fi
./multinode-demo/fullnode.sh "${args[@]}" "$entrypointIp":~/solana "$entrypointIp:8001" > fullnode.log 2>&1 & ./multinode-demo/fullnode.sh "${args[@]}" "$entrypointIp":~/solana "$entrypointIp:8001" > fullnode.log 2>&1 &
;; ;;

View File

@ -72,7 +72,7 @@ local|tar)
solana_keygen=solana-keygen solana_keygen=solana-keygen
ledger=config-local/bootstrap-leader-ledger ledger=config-local/bootstrap-leader-ledger
client_id=config/client-id.json client_id=config-local/client-id.json
;; ;;
*) *)
echo "Unknown deployment method: $deployMethod" echo "Unknown deployment method: $deployMethod"

View File

@ -59,7 +59,7 @@ __cloud_FindInstances() {
"Name=tag:name,Values=$filter" \ "Name=tag:name,Values=$filter" \
"Name=instance-state-name,Values=pending,running" \ "Name=instance-state-name,Values=pending,running" \
--query "Reservations[].Instances[].[InstanceId,PublicIpAddress,PrivateIpAddress]" \ --query "Reservations[].Instances[].[InstanceId,PublicIpAddress,PrivateIpAddress]" \
--output text --output text \
) )
} }
@ -223,11 +223,32 @@ cloud_DeleteInstances() {
echo No instances to delete echo No instances to delete
return return
fi fi
declare names=("${instances[@]/:*/}") declare names=("${instances[@]/:*/}")
( (
set -x set -x
aws ec2 terminate-instances --region "$region" --instance-ids "${names[@]}" aws ec2 terminate-instances --region "$region" --instance-ids "${names[@]}"
) )
# Wait until the instances are terminated
for name in "${names[@]}"; do
while true; do
declare instanceState
instanceState=$(\
aws ec2 describe-instances \
--region "$region" \
--instance-ids "$name" \
--query "Reservations[].Instances[].State.Name" \
--output text \
)
echo "$name: $instanceState"
if [[ $instanceState = terminated ]]; then
break;
fi
sleep 2
done
done
} }

View File

@ -1,6 +1,6 @@
[package] [package]
name = "solana-netutil" name = "solana-netutil"
version = "0.12.0" version = "0.12.3"
description = "Solana Network Utilities" description = "Solana Network Utilities"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -18,7 +18,7 @@ reqwest = "0.9.0"
socket2 = "0.3.8" socket2 = "0.3.8"
[dev-dependencies] [dev-dependencies]
solana-logger = { path = "../logger", version = "0.12.0" } solana-logger = { path = "../logger", version = "0.12.3" }
[lib] [lib]
name = "solana_netutil" name = "solana_netutil"

View File

@ -1,7 +1,7 @@
[package] [package]
name = "solana-bpf-programs" name = "solana-bpf-programs"
description = "Blockchain, Rebuilt for Scale" description = "Blockchain, Rebuilt for Scale"
version = "0.12.0" version = "0.12.3"
documentation = "https://docs.rs/solana" documentation = "https://docs.rs/solana"
homepage = "https://solana.com/" homepage = "https://solana.com/"
readme = "README.md" readme = "README.md"
@ -22,10 +22,10 @@ bincode = "1.1.2"
byteorder = "1.3.1" byteorder = "1.3.1"
elf = "0.0.10" elf = "0.0.10"
solana_rbpf = "=0.1.10" solana_rbpf = "=0.1.10"
solana-bpfloader = { path = "../bpf_loader", version = "0.12.0" } solana-bpfloader = { path = "../bpf_loader", version = "0.12.3" }
solana-logger = { path = "../../logger", version = "0.12.0" } solana-logger = { path = "../../logger", version = "0.12.3" }
solana-runtime = { path = "../../runtime", version = "0.12.0" } solana-runtime = { path = "../../runtime", version = "0.12.3" }
solana-sdk = { path = "../../sdk", version = "0.12.0" } solana-sdk = { path = "../../sdk", version = "0.12.3" }
[[bench]] [[bench]]
name = "bpf_loader" name = "bpf_loader"

View File

@ -3,7 +3,7 @@
[package] [package]
name = "solana-bpf-rust-noop" name = "solana-bpf-rust-noop"
version = "0.12.0" version = "0.12.3"
description = "Solana BPF noop program written in Rust" description = "Solana BPF noop program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"

View File

@ -123,8 +123,8 @@ pub fn sol_log_params(ka: &[SolKeyedAccount], data: &[u8]) {
sol_log_key(&k.key); sol_log_key(&k.key);
sol_log("- Lamports"); sol_log("- Lamports");
sol_log_64(0, 0, 0, 0, k.lamports); sol_log_64(0, 0, 0, 0, k.lamports);
sol_log("- Userdata"); sol_log("- AccountData");
sol_log_slice(k.userdata); sol_log_slice(k.data);
sol_log("- Owner"); sol_log("- Owner");
sol_log_key(&k.owner); sol_log_key(&k.owner);
} }
@ -148,7 +148,7 @@ pub struct SolKeyedAccount<'a> {
/// Number of lamports owned by this account /// Number of lamports owned by this account
pub lamports: u64, pub lamports: u64,
/// On-chain data within this account /// On-chain data within this account
pub userdata: &'a [u8], pub data: &'a [u8],
/// Program that owns this account /// Program that owns this account
pub owner: SolPubkey<'a>, pub owner: SolPubkey<'a>,
} }
@ -200,15 +200,15 @@ pub extern "C" fn entrypoint(input: *mut u8) -> bool {
}; };
offset += size_of::<u64>(); offset += size_of::<u64>();
let userdata_length = unsafe { let data_length = unsafe {
#[allow(clippy::cast_ptr_alignment)] #[allow(clippy::cast_ptr_alignment)]
let userdata_length_ptr: *const u64 = input.add(offset) as *const u64; let data_length_ptr: *const u64 = input.add(offset) as *const u64;
*userdata_length_ptr *data_length_ptr
} as usize; } as usize;
offset += size_of::<u64>(); offset += size_of::<u64>();
let userdata = unsafe { from_raw_parts(input.add(offset), userdata_length) }; let data = unsafe { from_raw_parts(input.add(offset), data_length) };
offset += userdata_length; offset += data_length;
let owner_slice = unsafe { from_raw_parts(input.add(offset), SIZE_PUBKEY) }; let owner_slice = unsafe { from_raw_parts(input.add(offset), SIZE_PUBKEY) };
let owner = SolPubkey { key: &owner_slice }; let owner = SolPubkey { key: &owner_slice };
@ -218,7 +218,7 @@ pub extern "C" fn entrypoint(input: *mut u8) -> bool {
key, key,
is_signer, is_signer,
lamports, lamports,
userdata, data,
owner, owner,
}]; }];
@ -386,7 +386,7 @@ mod tests {
assert_eq!(SIZE_PUBKEY, ka[0].key.key.len()); assert_eq!(SIZE_PUBKEY, ka[0].key.key.len());
assert_eq!(key, ka[0].key.key); assert_eq!(key, ka[0].key.key);
assert_eq!(48, ka[0].lamports); assert_eq!(48, ka[0].lamports);
assert_eq!(1, ka[0].userdata.len()); assert_eq!(1, ka[0].data.len());
let owner = [0; 32]; let owner = [0; 32];
assert_eq!(SIZE_PUBKEY, ka[0].owner.key.len()); assert_eq!(SIZE_PUBKEY, ka[0].owner.key.len());
assert_eq!(owner, ka[0].owner.key); assert_eq!(owner, ka[0].owner.key);

View File

@ -1,6 +1,6 @@
[package] [package]
name = "solana-bpfloader" name = "solana-bpfloader"
version = "0.12.0" version = "0.12.3"
description = "Solana BPF Loader" description = "Solana BPF Loader"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -15,8 +15,8 @@ libc = "0.2.50"
log = "0.4.2" log = "0.4.2"
solana_rbpf = "=0.1.10" solana_rbpf = "=0.1.10"
serde = "1.0.89" serde = "1.0.89"
solana-logger = { path = "../../logger", version = "0.12.0" } solana-logger = { path = "../../logger", version = "0.12.3" }
solana-sdk = { path = "../../sdk", version = "0.12.0" } solana-sdk = { path = "../../sdk", version = "0.12.3" }
[lib] [lib]
name = "solana_bpf_loader" name = "solana_bpf_loader"

View File

@ -148,9 +148,9 @@ fn serialize_parameters(
.unwrap(); .unwrap();
v.write_all(info.unsigned_key().as_ref()).unwrap(); v.write_all(info.unsigned_key().as_ref()).unwrap();
v.write_u64::<LittleEndian>(info.account.lamports).unwrap(); v.write_u64::<LittleEndian>(info.account.lamports).unwrap();
v.write_u64::<LittleEndian>(info.account.userdata.len() as u64) v.write_u64::<LittleEndian>(info.account.data.len() as u64)
.unwrap(); .unwrap();
v.write_all(&info.account.userdata).unwrap(); v.write_all(&info.account.data).unwrap();
v.write_all(info.account.owner.as_ref()).unwrap(); v.write_all(info.account.owner.as_ref()).unwrap();
} }
v.write_u64::<LittleEndian>(data.len() as u64).unwrap(); v.write_u64::<LittleEndian>(data.len() as u64).unwrap();
@ -171,10 +171,10 @@ fn deserialize_parameters(keyed_accounts: &mut [KeyedAccount], buffer: &[u8]) {
start += mem::size_of::<u64>() // skip lamports start += mem::size_of::<u64>() // skip lamports
+ mem::size_of::<u64>(); // skip length tag + mem::size_of::<u64>(); // skip length tag
let end = start + info.account.userdata.len(); let end = start + info.account.data.len();
info.account.userdata.clone_from_slice(&buffer[start..end]); info.account.data.clone_from_slice(&buffer[start..end]);
start += info.account.userdata.len() // skip userdata start += info.account.data.len() // skip data
+ mem::size_of::<Pubkey>(); // skip owner + mem::size_of::<Pubkey>(); // skip owner
} }
} }
@ -190,7 +190,7 @@ fn entrypoint(
if keyed_accounts[0].account.executable { if keyed_accounts[0].account.executable {
let (progs, params) = keyed_accounts.split_at_mut(1); let (progs, params) = keyed_accounts.split_at_mut(1);
let prog = &progs[0].account.userdata; let prog = &progs[0].account.data;
info!("Call BPF program"); info!("Call BPF program");
//dump_program(keyed_accounts[0].key, prog); //dump_program(keyed_accounts[0].key, prog);
let mut vm = match create_vm(prog) { let mut vm = match create_vm(prog) {
@ -228,15 +228,15 @@ fn entrypoint(
let offset = offset as usize; let offset = offset as usize;
let len = bytes.len(); let len = bytes.len();
debug!("Write: offset={} length={}", offset, len); debug!("Write: offset={} length={}", offset, len);
if keyed_accounts[0].account.userdata.len() < offset + len { if keyed_accounts[0].account.data.len() < offset + len {
warn!( warn!(
"Write overflow: {} < {}", "Write overflow: {} < {}",
keyed_accounts[0].account.userdata.len(), keyed_accounts[0].account.data.len(),
offset + len offset + len
); );
return Err(ProgramError::GenericError); return Err(ProgramError::GenericError);
} }
keyed_accounts[0].account.userdata[offset..offset + len].copy_from_slice(&bytes); keyed_accounts[0].account.data[offset..offset + len].copy_from_slice(&bytes);
} }
LoaderInstruction::Finalize => { LoaderInstruction::Finalize => {
keyed_accounts[0].account.executable = true; keyed_accounts[0].account.executable = true;

View File

@ -1,6 +1,6 @@
[package] [package]
name = "solana-budget-program" name = "solana-budget-program"
version = "0.12.0" version = "0.12.3"
description = "Solana budget program" description = "Solana budget program"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -14,12 +14,12 @@ chrono = { version = "0.4.0", features = ["serde"] }
log = "0.4.2" log = "0.4.2"
serde = "1.0.89" serde = "1.0.89"
serde_derive = "1.0.89" serde_derive = "1.0.89"
solana-budget-api = { path = "../budget_api", version = "0.12.0" } solana-budget-api = { path = "../budget_api", version = "0.12.3" }
solana-logger = { path = "../../logger", version = "0.12.0" } solana-logger = { path = "../../logger", version = "0.12.3" }
solana-sdk = { path = "../../sdk", version = "0.12.0" } solana-sdk = { path = "../../sdk", version = "0.12.3" }
[dev-dependencies] [dev-dependencies]
solana-runtime = { path = "../../runtime", version = "0.12.0" } solana-runtime = { path = "../../runtime", version = "0.12.3" }
[lib] [lib]
name = "solana_budget_program" name = "solana_budget_program"

View File

@ -87,7 +87,7 @@ fn apply_debits(
keyed_accounts[0].account.lamports += payment.lamports; keyed_accounts[0].account.lamports += payment.lamports;
Ok(()) Ok(())
} else { } else {
let existing = BudgetState::deserialize(&keyed_accounts[0].account.userdata).ok(); let existing = BudgetState::deserialize(&keyed_accounts[0].account.data).ok();
if Some(true) == existing.map(|x| x.initialized) { if Some(true) == existing.map(|x| x.initialized) {
trace!("contract already exists"); trace!("contract already exists");
Err(BudgetError::ContractAlreadyExists) Err(BudgetError::ContractAlreadyExists)
@ -95,13 +95,12 @@ fn apply_debits(
let mut budget_state = BudgetState::default(); let mut budget_state = BudgetState::default();
budget_state.pending_budget = Some(expr); budget_state.pending_budget = Some(expr);
budget_state.initialized = true; budget_state.initialized = true;
budget_state.serialize(&mut keyed_accounts[0].account.userdata) budget_state.serialize(&mut keyed_accounts[0].account.data)
} }
} }
} }
BudgetInstruction::ApplyTimestamp(dt) => { BudgetInstruction::ApplyTimestamp(dt) => {
if let Ok(mut budget_state) = if let Ok(mut budget_state) = BudgetState::deserialize(&keyed_accounts[1].account.data)
BudgetState::deserialize(&keyed_accounts[1].account.userdata)
{ {
if !budget_state.is_pending() { if !budget_state.is_pending() {
Err(BudgetError::ContractNotPending) Err(BudgetError::ContractNotPending)
@ -112,15 +111,14 @@ fn apply_debits(
trace!("apply timestamp"); trace!("apply timestamp");
apply_timestamp(&mut budget_state, keyed_accounts, *dt)?; apply_timestamp(&mut budget_state, keyed_accounts, *dt)?;
trace!("apply timestamp committed"); trace!("apply timestamp committed");
budget_state.serialize(&mut keyed_accounts[1].account.userdata) budget_state.serialize(&mut keyed_accounts[1].account.data)
} }
} else { } else {
Err(BudgetError::UninitializedContract) Err(BudgetError::UninitializedContract)
} }
} }
BudgetInstruction::ApplySignature => { BudgetInstruction::ApplySignature => {
if let Ok(mut budget_state) = if let Ok(mut budget_state) = BudgetState::deserialize(&keyed_accounts[1].account.data)
BudgetState::deserialize(&keyed_accounts[1].account.userdata)
{ {
if !budget_state.is_pending() { if !budget_state.is_pending() {
Err(BudgetError::ContractNotPending) Err(BudgetError::ContractNotPending)
@ -131,7 +129,7 @@ fn apply_debits(
trace!("apply signature"); trace!("apply signature");
apply_signature(&mut budget_state, keyed_accounts)?; apply_signature(&mut budget_state, keyed_accounts)?;
trace!("apply signature committed"); trace!("apply signature committed");
budget_state.serialize(&mut keyed_accounts[1].account.userdata) budget_state.serialize(&mut keyed_accounts[1].account.data)
} }
} else { } else {
Err(BudgetError::UninitializedContract) Err(BudgetError::UninitializedContract)
@ -146,8 +144,8 @@ pub fn process_instruction(
data: &[u8], data: &[u8],
) -> Result<(), BudgetError> { ) -> Result<(), BudgetError> {
let instruction = deserialize(data).map_err(|err| { let instruction = deserialize(data).map_err(|err| {
info!("Invalid transaction userdata: {:?} {:?}", data, err); info!("Invalid transaction data: {:?} {:?}", data, err);
BudgetError::UserdataDeserializeFailure BudgetError::AccountDataDeserializeFailure
})?; })?;
trace!("process_instruction: {:?}", instruction); trace!("process_instruction: {:?}", instruction);
@ -178,12 +176,12 @@ mod test {
let mut accounts = vec![Account::new(1, 0, &id()), Account::new(0, 512, &id())]; let mut accounts = vec![Account::new(1, 0, &id()), Account::new(0, 512, &id())];
let from = Keypair::new(); let from = Keypair::new();
let contract = Keypair::new(); let contract = Keypair::new();
let userdata = (1u8, 2u8, 3u8); let data = (1u8, 2u8, 3u8);
let tx = Transaction::new( let tx = Transaction::new(
&from, &from,
&[contract.pubkey()], &[contract.pubkey()],
&id(), &id(),
&userdata, &data,
Hash::default(), Hash::default(),
0, 0,
); );
@ -285,7 +283,7 @@ mod test {
process_transaction(&tx, &mut accounts).unwrap(); process_transaction(&tx, &mut accounts).unwrap();
assert_eq!(accounts[from_account].lamports, 0); assert_eq!(accounts[from_account].lamports, 0);
assert_eq!(accounts[contract_account].lamports, 1); assert_eq!(accounts[contract_account].lamports, 1);
let budget_state = BudgetState::deserialize(&accounts[contract_account].userdata).unwrap(); let budget_state = BudgetState::deserialize(&accounts[contract_account].data).unwrap();
assert!(budget_state.is_pending()); assert!(budget_state.is_pending());
// Attack! Try to payout to a rando key // Attack! Try to payout to a rando key
@ -304,7 +302,7 @@ mod test {
assert_eq!(accounts[contract_account].lamports, 1); assert_eq!(accounts[contract_account].lamports, 1);
assert_eq!(accounts[to_account].lamports, 0); assert_eq!(accounts[to_account].lamports, 0);
let budget_state = BudgetState::deserialize(&accounts[contract_account].userdata).unwrap(); let budget_state = BudgetState::deserialize(&accounts[contract_account].data).unwrap();
assert!(budget_state.is_pending()); assert!(budget_state.is_pending());
// Now, acknowledge the time in the condition occurred and // Now, acknowledge the time in the condition occurred and
@ -321,7 +319,7 @@ mod test {
assert_eq!(accounts[contract_account].lamports, 0); assert_eq!(accounts[contract_account].lamports, 0);
assert_eq!(accounts[to_account].lamports, 1); assert_eq!(accounts[to_account].lamports, 1);
let budget_state = BudgetState::deserialize(&accounts[contract_account].userdata).unwrap(); let budget_state = BudgetState::deserialize(&accounts[contract_account].data).unwrap();
assert!(!budget_state.is_pending()); assert!(!budget_state.is_pending());
// try to replay the timestamp contract // try to replay the timestamp contract
@ -356,7 +354,7 @@ mod test {
process_transaction(&tx, &mut accounts).unwrap(); process_transaction(&tx, &mut accounts).unwrap();
assert_eq!(accounts[from_account].lamports, 0); assert_eq!(accounts[from_account].lamports, 0);
assert_eq!(accounts[contract_account].lamports, 1); assert_eq!(accounts[contract_account].lamports, 1);
let budget_state = BudgetState::deserialize(&accounts[contract_account].userdata).unwrap(); let budget_state = BudgetState::deserialize(&accounts[contract_account].data).unwrap();
assert!(budget_state.is_pending()); assert!(budget_state.is_pending());
// Attack! try to put the lamports into the wrong account with cancel // Attack! try to put the lamports into the wrong account with cancel

View File

@ -1,6 +1,6 @@
[package] [package]
name = "solana-budget-api" name = "solana-budget-api"
version = "0.12.0" version = "0.12.3"
description = "Solana Budget program API" description = "Solana Budget program API"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -13,7 +13,7 @@ bincode = "1.1.2"
chrono = { version = "0.4.0", features = ["serde"] } chrono = { version = "0.4.0", features = ["serde"] }
serde = "1.0.89" serde = "1.0.89"
serde_derive = "1.0.89" serde_derive = "1.0.89"
solana-sdk = { path = "../../sdk", version = "0.12.0" } solana-sdk = { path = "../../sdk", version = "0.12.3" }
[lib] [lib]
name = "solana_budget_api" name = "solana_budget_api"

View File

@ -12,8 +12,8 @@ pub enum BudgetError {
UninitializedContract, UninitializedContract,
DestinationMissing, DestinationMissing,
FailedWitness, FailedWitness,
UserdataTooSmall, AccountDataTooSmall,
UserdataDeserializeFailure, AccountDataDeserializeFailure,
UnsignedKey, UnsignedKey,
} }
@ -37,7 +37,7 @@ impl BudgetState {
pub fn serialize(&self, output: &mut [u8]) -> Result<(), BudgetError> { pub fn serialize(&self, output: &mut [u8]) -> Result<(), BudgetError> {
serialize_into(output, self).map_err(|err| match *err { serialize_into(output, self).map_err(|err| match *err {
_ => BudgetError::UserdataTooSmall, _ => BudgetError::AccountDataTooSmall,
}) })
} }
@ -56,18 +56,18 @@ mod test {
fn test_serializer() { fn test_serializer() {
let mut a = Account::new(0, 512, &id()); let mut a = Account::new(0, 512, &id());
let b = BudgetState::default(); let b = BudgetState::default();
b.serialize(&mut a.userdata).unwrap(); b.serialize(&mut a.data).unwrap();
let c = BudgetState::deserialize(&a.userdata).unwrap(); let c = BudgetState::deserialize(&a.data).unwrap();
assert_eq!(b, c); assert_eq!(b, c);
} }
#[test] #[test]
fn test_serializer_userdata_too_small() { fn test_serializer_data_too_small() {
let mut a = Account::new(0, 1, &id()); let mut a = Account::new(0, 1, &id());
let b = BudgetState::default(); let b = BudgetState::default();
assert_eq!( assert_eq!(
b.serialize(&mut a.userdata), b.serialize(&mut a.data),
Err(BudgetError::UserdataTooSmall) Err(BudgetError::AccountDataTooSmall)
); );
} }
} }

View File

@ -151,11 +151,11 @@ impl BudgetTransaction {
} }
pub fn system_instruction(tx: &Transaction, index: usize) -> Option<SystemInstruction> { pub fn system_instruction(tx: &Transaction, index: usize) -> Option<SystemInstruction> {
deserialize(&tx.userdata(index)).ok() deserialize(&tx.data(index)).ok()
} }
pub fn instruction(tx: &Transaction, index: usize) -> Option<BudgetInstruction> { pub fn instruction(tx: &Transaction, index: usize) -> Option<BudgetInstruction> {
deserialize(&tx.userdata(index)).ok() deserialize(&tx.data(index)).ok()
} }
/// Verify only the payment plan. /// Verify only the payment plan.
@ -236,9 +236,9 @@ mod tests {
payment.lamports = *lamports; // <-- attack, part 2! payment.lamports = *lamports; // <-- attack, part 2!
} }
} }
tx.instructions[1].userdata = serialize(&instruction).unwrap(); tx.instructions[1].data = serialize(&instruction).unwrap();
} }
tx.instructions[0].userdata = serialize(&system_instruction).unwrap(); tx.instructions[0].data = serialize(&system_instruction).unwrap();
assert!(BudgetTransaction::verify_plan(&tx)); assert!(BudgetTransaction::verify_plan(&tx));
assert!(!tx.verify_signature()); assert!(!tx.verify_signature());
} }
@ -257,7 +257,7 @@ mod tests {
payment.to = thief_keypair.pubkey(); // <-- attack! payment.to = thief_keypair.pubkey(); // <-- attack!
} }
} }
tx.instructions[1].userdata = serialize(&instruction).unwrap(); tx.instructions[1].data = serialize(&instruction).unwrap();
assert!(BudgetTransaction::verify_plan(&tx)); assert!(BudgetTransaction::verify_plan(&tx));
assert!(!tx.verify_signature()); assert!(!tx.verify_signature());
} }
@ -274,7 +274,7 @@ mod tests {
payment.lamports = 2; // <-- attack! payment.lamports = 2; // <-- attack!
} }
} }
tx.instructions[1].userdata = serialize(&instruction).unwrap(); tx.instructions[1].data = serialize(&instruction).unwrap();
assert!(!BudgetTransaction::verify_plan(&tx)); assert!(!BudgetTransaction::verify_plan(&tx));
// Also, ensure all branchs of the plan spend all lamports // Also, ensure all branchs of the plan spend all lamports
@ -284,7 +284,7 @@ mod tests {
payment.lamports = 0; // <-- whoops! payment.lamports = 0; // <-- whoops!
} }
} }
tx.instructions[1].userdata = serialize(&instruction).unwrap(); tx.instructions[1].data = serialize(&instruction).unwrap();
assert!(!BudgetTransaction::verify_plan(&tx)); assert!(!BudgetTransaction::verify_plan(&tx));
} }
} }

View File

@ -1,6 +1,6 @@
[package] [package]
name = "solana-failure" name = "solana-failure"
version = "0.12.0" version = "0.12.3"
description = "Solana failure program" description = "Solana failure program"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -9,11 +9,11 @@ homepage = "https://solana.com/"
edition = "2018" edition = "2018"
[dependencies] [dependencies]
solana-sdk = { path = "../../sdk", version = "0.12.0" } solana-sdk = { path = "../../sdk", version = "0.12.3" }
log = "0.4.2" log = "0.4.2"
[dev-dependencies] [dev-dependencies]
solana-runtime = { path = "../../runtime", version = "0.12.0" } solana-runtime = { path = "../../runtime", version = "0.12.3" }
[lib] [lib]
name = "failure" name = "failure"

View File

@ -1,6 +1,6 @@
[package] [package]
name = "solana-noop" name = "solana-noop"
version = "0.12.0" version = "0.12.3"
description = "Solana noop program" description = "Solana noop program"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -9,12 +9,12 @@ homepage = "https://solana.com/"
edition = "2018" edition = "2018"
[dependencies] [dependencies]
solana-sdk = { path = "../../sdk", version = "0.12.0" } solana-sdk = { path = "../../sdk", version = "0.12.3" }
solana-logger = { path = "../../logger", version = "0.12.0" } solana-logger = { path = "../../logger", version = "0.12.3" }
log = "0.4.2" log = "0.4.2"
[dev-dependencies] [dev-dependencies]
solana-runtime = { path = "../../runtime", version = "0.12.0" } solana-runtime = { path = "../../runtime", version = "0.12.3" }
[lib] [lib]
name = "noop" name = "noop"

View File

@ -1,6 +1,6 @@
[package] [package]
name = "solana-rewards-program" name = "solana-rewards-program"
version = "0.12.0" version = "0.12.3"
description = "Solana rewards program" description = "Solana rewards program"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -11,13 +11,13 @@ edition = "2018"
[dependencies] [dependencies]
bincode = "1.1.2" bincode = "1.1.2"
log = "0.4.2" log = "0.4.2"
solana-logger = { path = "../../logger", version = "0.12.0" } solana-logger = { path = "../../logger", version = "0.12.3" }
solana-sdk = { path = "../../sdk", version = "0.12.0" } solana-sdk = { path = "../../sdk", version = "0.12.3" }
solana-rewards-api = { path = "../rewards_api", version = "0.12.0" } solana-rewards-api = { path = "../rewards_api", version = "0.12.3" }
solana-vote-api = { path = "../vote_api", version = "0.12.0" } solana-vote-api = { path = "../vote_api", version = "0.12.3" }
[dev-dependencies] [dev-dependencies]
solana-runtime = { path = "../../runtime", version = "0.12.0" } solana-runtime = { path = "../../runtime", version = "0.12.3" }
[lib] [lib]
name = "solana_rewards_program" name = "solana_rewards_program"

View File

@ -44,7 +44,7 @@ fn redeem_vote_credits(keyed_accounts: &mut [KeyedAccount]) -> Result<(), Progra
// VoteInstruction::ClearCredits and that it points to the same vote account // VoteInstruction::ClearCredits and that it points to the same vote account
// as keyed_accounts[0]. // as keyed_accounts[0].
let vote_state = VoteState::deserialize(&keyed_accounts[0].account.userdata)?; let vote_state = VoteState::deserialize(&keyed_accounts[0].account.data)?;
// TODO: This assumes the stake is static. If not, it should use the account value // TODO: This assumes the stake is static. If not, it should use the account value
// at the time of voting, not at credit redemption. // at the time of voting, not at credit redemption.
@ -75,7 +75,7 @@ fn entrypoint(
trace!("process_instruction: {:?}", data); trace!("process_instruction: {:?}", data);
trace!("keyed_accounts: {:?}", keyed_accounts); trace!("keyed_accounts: {:?}", keyed_accounts);
match deserialize(data).map_err(|_| ProgramError::InvalidUserdata)? { match deserialize(data).map_err(|_| ProgramError::InvalidInstructionData)? {
RewardsInstruction::RedeemVoteCredits => redeem_vote_credits(keyed_accounts), RewardsInstruction::RedeemVoteCredits => redeem_vote_credits(keyed_accounts),
} }
} }

View File

@ -52,7 +52,7 @@ impl<'a> RewardsBank<'a> {
self.bank.register_tick(&hash(blockhash.as_ref())); self.bank.register_tick(&hash(blockhash.as_ref()));
let vote_account = self.bank.get_account(&vote_keypair.pubkey()).unwrap(); let vote_account = self.bank.get_account(&vote_keypair.pubkey()).unwrap();
Ok(VoteState::deserialize(&vote_account.userdata).unwrap()) Ok(VoteState::deserialize(&vote_account.data).unwrap())
} }
fn redeem_credits(&self, rewards_id: &Pubkey, vote_keypair: &Keypair) -> Result<VoteState> { fn redeem_credits(&self, rewards_id: &Pubkey, vote_keypair: &Keypair) -> Result<VoteState> {
@ -60,7 +60,7 @@ impl<'a> RewardsBank<'a> {
let tx = RewardsTransaction::new_redeem_credits(&vote_keypair, rewards_id, blockhash, 0); let tx = RewardsTransaction::new_redeem_credits(&vote_keypair, rewards_id, blockhash, 0);
self.bank.process_transaction(&tx)?; self.bank.process_transaction(&tx)?;
let vote_account = self.bank.get_account(&vote_keypair.pubkey()).unwrap(); let vote_account = self.bank.get_account(&vote_keypair.pubkey()).unwrap();
Ok(VoteState::deserialize(&vote_account.userdata).unwrap()) Ok(VoteState::deserialize(&vote_account.data).unwrap())
} }
} }

Some files were not shown because too many files have changed in this diff Show More