Compare commits

..

28 Commits

Author SHA1 Message Date
Pankaj Garg
cfe91c67f9 Revert "Revert "Disable accounts squash call from bank""
This reverts commit f84593ad5f.
2019-04-03 06:59:21 -07:00
carllin
0f9c516570 Cherry pick #3572 and #3459 (#3608)
* Fix skipping half leader slot

* Get rid of unnecessary frozen banks
2019-04-02 17:57:00 -07:00
Pankaj Garg
e4aba9c900 Remove second block streamer from testnet beta 2019-04-02 15:38:02 -07:00
Pankaj Garg
88aea74ab1 Don't use external node ssh key if one is not configured 2019-04-02 14:52:30 -07:00
carllin
357f28ae4e Implement finalizer so that all locked accounts are dropped (#3585) (#3598)
* Implement finalizer so that all locked accounts are dropped when finalizer goes out of scope

* Add test for tx error with lock conflict

* Fix double unlock from destructor running after a call to unlock
2019-04-02 13:49:09 -07:00
Pankaj Garg
7c9c667c5c Deploy beta testnet with 100 nodes across AWS and GCP 2019-04-02 12:44:42 -07:00
Pankaj Garg
d62d1174d0 Support for configuring testnet nodes across multiple cloud services 2019-04-01 19:56:01 -07:00
Pankaj Garg
5d5ae1c962 Increase node count in beta testnet 2019-04-01 13:02:13 -07:00
carllin
bf11d7ef43 Fix resetting PohRecorder to wrong bank (#3553) (#3574)
* Check whether future slot already has transmission
2019-03-30 02:06:49 -07:00
Pankaj Garg
9568a1da03 Send metrics data to the correct/configured database host 2019-03-29 13:12:01 -07:00
Pankaj Garg
f11a3d9796 Fix EC2 scripts for blockstream startup 2019-03-28 15:56:28 -07:00
Pankaj Garg
8a7e8aacdd enable leader rotation in beta testnet 2019-03-28 14:34:30 -07:00
Pankaj Garg
e3750fb73d fix clippy errors 2019-03-28 14:34:30 -07:00
Pankaj Garg
d78b09492e review comments 2019-03-28 14:34:30 -07:00
Pankaj Garg
d573b55821 fix the ip address that's stored in the config file 2019-03-28 14:34:30 -07:00
Pankaj Garg
18cad565cf fix shell-check errors 2019-03-28 14:34:30 -07:00
Pankaj Garg
9abf90b443 Added support for multi-region cloud testnet 2019-03-28 14:34:30 -07:00
anatoly yakovenko
cd3ff5c335 V0.12.3, cherry pick 3523 and 3529 (#3531)
* validator confirmation

* validator confirmaiton

* remove leader confirmaiton

* hang out on progress until fork is confirmed

* use the right id for delegate id

* fixup! hang out on progress until fork is confirmed

* fixup! use the right id for delegate id

* version bump
2019-03-28 05:59:42 -07:00
Pankaj Garg
e55249e63f propagate TESTNET_DB_HOST env variable to next step in buildkite 2019-03-26 15:00:33 -07:00
Pankaj Garg
10bc0c6ee2 Add provisions to specify a database server in testnet manager buildkite 2019-03-26 15:00:33 -07:00
Anatoly Yakovenko
ed14b78d81 also check the delegate_id 2019-03-26 13:44:53 -07:00
Rob Walker
7f404941bb remove status_cache.freeze (#3509) 2019-03-26 12:10:46 -07:00
anatoly yakovenko
6d45ac1bc7 Record the current nodes locktower votes from the bank (#3502)
* observed_locktower_stats

* fixup! observed_locktower_stats
2019-03-26 11:45:59 -07:00
Rob Walker
fabb6d2092 delay freeze of status_cache until squash (#3503) 2019-03-26 11:37:38 -07:00
Pankaj Garg
93cea4c86c Remove rewards crate from publishing script 2019-03-25 21:34:54 -07:00
Pankaj Garg
5fb35f79c3 Added stats for locktower in testnet dashboard 2019-03-25 21:11:37 -07:00
Pankaj Garg
da11274b63 Add support for influx cloud 2019-03-25 21:11:37 -07:00
Pankaj Garg
5d70e2efa9 0.12.2 2019-03-25 20:38:48 -07:00
63 changed files with 2439 additions and 1663 deletions

296
Cargo.lock generated
View File

@@ -1967,7 +1967,7 @@ dependencies = [
[[package]]
name = "solana"
version = "0.12.1"
version = "0.12.3"
dependencies = [
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"bs58 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -1995,19 +1995,19 @@ dependencies = [
"serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)",
"solana-budget-api 0.12.1",
"solana-budget-program 0.12.1",
"solana-client 0.12.1",
"solana-drone 0.12.1",
"solana-logger 0.12.1",
"solana-metrics 0.12.1",
"solana-netutil 0.12.1",
"solana-runtime 0.12.1",
"solana-sdk 0.12.1",
"solana-storage-api 0.12.1",
"solana-vote-api 0.12.1",
"solana-vote-program 0.12.1",
"solana-vote-signer 0.12.1",
"solana-budget-api 0.12.3",
"solana-budget-program 0.12.3",
"solana-client 0.12.3",
"solana-drone 0.12.3",
"solana-logger 0.12.3",
"solana-metrics 0.12.3",
"solana-netutil 0.12.3",
"solana-runtime 0.12.3",
"solana-sdk 0.12.3",
"solana-storage-api 0.12.3",
"solana-vote-api 0.12.3",
"solana-vote-program 0.12.3",
"solana-vote-signer 0.12.3",
"sys-info 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -2016,87 +2016,87 @@ dependencies = [
[[package]]
name = "solana-bench-streamer"
version = "0.12.1"
version = "0.12.3"
dependencies = [
"clap 2.32.0 (registry+https://github.com/rust-lang/crates.io-index)",
"solana 0.12.1",
"solana-logger 0.12.1",
"solana-netutil 0.12.1",
"solana 0.12.3",
"solana-logger 0.12.3",
"solana-netutil 0.12.3",
]
[[package]]
name = "solana-bench-tps"
version = "0.12.1"
version = "0.12.3"
dependencies = [
"clap 2.32.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rayon 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)",
"solana 0.12.1",
"solana-client 0.12.1",
"solana-drone 0.12.1",
"solana-logger 0.12.1",
"solana-metrics 0.12.1",
"solana-sdk 0.12.1",
"solana 0.12.3",
"solana-client 0.12.3",
"solana-drone 0.12.3",
"solana-logger 0.12.3",
"solana-metrics 0.12.3",
"solana-sdk 0.12.3",
]
[[package]]
name = "solana-bpf-programs"
version = "0.12.1"
version = "0.12.3"
dependencies = [
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
"elf 0.0.10 (registry+https://github.com/rust-lang/crates.io-index)",
"solana-bpfloader 0.12.1",
"solana-logger 0.12.1",
"solana-runtime 0.12.1",
"solana-sdk 0.12.1",
"solana-bpfloader 0.12.3",
"solana-logger 0.12.3",
"solana-runtime 0.12.3",
"solana-sdk 0.12.3",
"solana_rbpf 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
"walkdir 2.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "solana-bpfloader"
version = "0.12.1"
version = "0.12.3"
dependencies = [
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.50 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
"solana-logger 0.12.1",
"solana-sdk 0.12.1",
"solana-logger 0.12.3",
"solana-sdk 0.12.3",
"solana_rbpf 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "solana-budget-api"
version = "0.12.1"
version = "0.12.3"
dependencies = [
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"chrono 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
"solana-sdk 0.12.1",
"solana-sdk 0.12.3",
]
[[package]]
name = "solana-budget-program"
version = "0.12.1"
version = "0.12.3"
dependencies = [
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"chrono 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
"solana-budget-api 0.12.1",
"solana-logger 0.12.1",
"solana-runtime 0.12.1",
"solana-sdk 0.12.1",
"solana-budget-api 0.12.3",
"solana-logger 0.12.3",
"solana-runtime 0.12.3",
"solana-sdk 0.12.3",
]
[[package]]
name = "solana-client"
version = "0.12.1"
version = "0.12.3"
dependencies = [
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"bs58 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -2105,15 +2105,15 @@ dependencies = [
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"reqwest 0.9.11 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)",
"solana-logger 0.12.1",
"solana-metrics 0.12.1",
"solana-netutil 0.12.1",
"solana-sdk 0.12.1",
"solana-logger 0.12.3",
"solana-metrics 0.12.3",
"solana-netutil 0.12.3",
"solana-sdk 0.12.3",
]
[[package]]
name = "solana-drone"
version = "0.12.1"
version = "0.12.3"
dependencies = [
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -2122,95 +2122,95 @@ dependencies = [
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
"solana-logger 0.12.1",
"solana-metrics 0.12.1",
"solana-sdk 0.12.1",
"solana-logger 0.12.3",
"solana-metrics 0.12.3",
"solana-sdk 0.12.3",
"tokio 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "solana-failure"
version = "0.12.1"
version = "0.12.3"
dependencies = [
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"solana-runtime 0.12.1",
"solana-sdk 0.12.1",
"solana-runtime 0.12.3",
"solana-sdk 0.12.3",
]
[[package]]
name = "solana-fullnode"
version = "0.12.1"
version = "0.12.3"
dependencies = [
"clap 2.32.0 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)",
"solana 0.12.1",
"solana-drone 0.12.1",
"solana-logger 0.12.1",
"solana-metrics 0.12.1",
"solana-netutil 0.12.1",
"solana-runtime 0.12.1",
"solana-sdk 0.12.1",
"solana-vote-api 0.12.1",
"solana-vote-signer 0.12.1",
"solana 0.12.3",
"solana-drone 0.12.3",
"solana-logger 0.12.3",
"solana-metrics 0.12.3",
"solana-netutil 0.12.3",
"solana-runtime 0.12.3",
"solana-sdk 0.12.3",
"solana-vote-api 0.12.3",
"solana-vote-signer 0.12.3",
]
[[package]]
name = "solana-genesis"
version = "0.12.1"
version = "0.12.3"
dependencies = [
"clap 2.32.0 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)",
"solana 0.12.1",
"solana-sdk 0.12.1",
"solana 0.12.3",
"solana-sdk 0.12.3",
]
[[package]]
name = "solana-keygen"
version = "0.12.1"
version = "0.12.3"
dependencies = [
"clap 2.32.0 (registry+https://github.com/rust-lang/crates.io-index)",
"dirs 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)",
"solana-sdk 0.12.1",
"solana-sdk 0.12.3",
]
[[package]]
name = "solana-ledger-tool"
version = "0.12.1"
version = "0.12.3"
dependencies = [
"assert_cmd 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
"clap 2.32.0 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)",
"solana 0.12.1",
"solana-logger 0.12.1",
"solana-runtime 0.12.1",
"solana-sdk 0.12.1",
"solana 0.12.3",
"solana-logger 0.12.3",
"solana-runtime 0.12.3",
"solana-sdk 0.12.3",
]
[[package]]
name = "solana-logger"
version = "0.12.1"
version = "0.12.3"
dependencies = [
"env_logger 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "solana-metrics"
version = "0.12.1"
version = "0.12.3"
dependencies = [
"influx_db_client 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
"reqwest 0.9.11 (registry+https://github.com/rust-lang/crates.io-index)",
"solana-sdk 0.12.1",
"solana-sdk 0.12.3",
"sys-info 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "solana-netutil"
version = "0.12.1"
version = "0.12.3"
dependencies = [
"ipnetwork 0.12.8 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -2219,57 +2219,57 @@ dependencies = [
"rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
"reqwest 0.9.11 (registry+https://github.com/rust-lang/crates.io-index)",
"socket2 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"solana-logger 0.12.1",
"solana-logger 0.12.3",
]
[[package]]
name = "solana-noop"
version = "0.12.1"
version = "0.12.3"
dependencies = [
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"solana-logger 0.12.1",
"solana-runtime 0.12.1",
"solana-sdk 0.12.1",
"solana-logger 0.12.3",
"solana-runtime 0.12.3",
"solana-sdk 0.12.3",
]
[[package]]
name = "solana-replicator"
version = "0.12.1"
version = "0.12.3"
dependencies = [
"clap 2.32.0 (registry+https://github.com/rust-lang/crates.io-index)",
"solana 0.12.1",
"solana-logger 0.12.1",
"solana-netutil 0.12.1",
"solana-sdk 0.12.1",
"solana 0.12.3",
"solana-logger 0.12.3",
"solana-netutil 0.12.3",
"solana-sdk 0.12.3",
]
[[package]]
name = "solana-rewards-api"
version = "0.12.1"
version = "0.12.3"
dependencies = [
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
"solana-sdk 0.12.1",
"solana-vote-api 0.12.1",
"solana-sdk 0.12.3",
"solana-vote-api 0.12.3",
]
[[package]]
name = "solana-rewards-program"
version = "0.12.1"
version = "0.12.3"
dependencies = [
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"solana-logger 0.12.1",
"solana-rewards-api 0.12.1",
"solana-runtime 0.12.1",
"solana-sdk 0.12.1",
"solana-vote-api 0.12.1",
"solana-logger 0.12.3",
"solana-rewards-api 0.12.3",
"solana-runtime 0.12.3",
"solana-sdk 0.12.3",
"solana-vote-api 0.12.3",
]
[[package]]
name = "solana-runtime"
version = "0.12.1"
version = "0.12.3"
dependencies = [
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"bv 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -2284,18 +2284,18 @@ dependencies = [
"serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)",
"solana-budget-api 0.12.1",
"solana-logger 0.12.1",
"solana-metrics 0.12.1",
"solana-sdk 0.12.1",
"solana-storage-api 0.12.1",
"solana-token-api 0.12.1",
"solana-vote-api 0.12.1",
"solana-budget-api 0.12.3",
"solana-logger 0.12.3",
"solana-metrics 0.12.3",
"solana-sdk 0.12.3",
"solana-storage-api 0.12.3",
"solana-token-api 0.12.3",
"solana-vote-api 0.12.3",
]
[[package]]
name = "solana-sdk"
version = "0.12.1"
version = "0.12.3"
dependencies = [
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"bs58 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -2315,85 +2315,85 @@ dependencies = [
[[package]]
name = "solana-storage-api"
version = "0.12.1"
version = "0.12.3"
dependencies = [
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
"solana-sdk 0.12.1",
"solana-sdk 0.12.3",
]
[[package]]
name = "solana-storage-program"
version = "0.12.1"
version = "0.12.3"
dependencies = [
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
"solana-logger 0.12.1",
"solana-runtime 0.12.1",
"solana-sdk 0.12.1",
"solana-storage-api 0.12.1",
"solana-logger 0.12.3",
"solana-runtime 0.12.3",
"solana-sdk 0.12.3",
"solana-storage-api 0.12.3",
]
[[package]]
name = "solana-token-api"
version = "0.12.1"
version = "0.12.3"
dependencies = [
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
"solana-sdk 0.12.1",
"solana-sdk 0.12.3",
]
[[package]]
name = "solana-token-program"
version = "0.12.1"
version = "0.12.3"
dependencies = [
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
"solana-logger 0.12.1",
"solana-sdk 0.12.1",
"solana-logger 0.12.3",
"solana-sdk 0.12.3",
]
[[package]]
name = "solana-upload-perf"
version = "0.12.1"
version = "0.12.3"
dependencies = [
"serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)",
"solana-metrics 0.12.1",
"solana-metrics 0.12.3",
]
[[package]]
name = "solana-vote-api"
version = "0.12.1"
version = "0.12.3"
dependencies = [
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
"solana-sdk 0.12.1",
"solana-sdk 0.12.3",
]
[[package]]
name = "solana-vote-program"
version = "0.12.1"
version = "0.12.3"
dependencies = [
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"solana-logger 0.12.1",
"solana-metrics 0.12.1",
"solana-runtime 0.12.1",
"solana-sdk 0.12.1",
"solana-vote-api 0.12.1",
"solana-logger 0.12.3",
"solana-metrics 0.12.3",
"solana-runtime 0.12.3",
"solana-sdk 0.12.3",
"solana-vote-api 0.12.3",
]
[[package]]
name = "solana-vote-signer"
version = "0.12.1"
version = "0.12.3"
dependencies = [
"bs58 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"clap 2.32.0 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -2403,13 +2403,13 @@ dependencies = [
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)",
"solana-metrics 0.12.1",
"solana-sdk 0.12.1",
"solana-metrics 0.12.3",
"solana-sdk 0.12.3",
]
[[package]]
name = "solana-wallet"
version = "0.12.1"
version = "0.12.3"
dependencies = [
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"bs58 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -2418,20 +2418,20 @@ dependencies = [
"dirs 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)",
"solana 0.12.1",
"solana-budget-api 0.12.1",
"solana-budget-program 0.12.1",
"solana-client 0.12.1",
"solana-drone 0.12.1",
"solana-logger 0.12.1",
"solana-sdk 0.12.1",
"solana-vote-api 0.12.1",
"solana-vote-signer 0.12.1",
"solana 0.12.3",
"solana-budget-api 0.12.3",
"solana-budget-program 0.12.3",
"solana-client 0.12.3",
"solana-drone 0.12.3",
"solana-logger 0.12.3",
"solana-sdk 0.12.3",
"solana-vote-api 0.12.3",
"solana-vote-signer 0.12.3",
]
[[package]]
name = "solana-workspace"
version = "0.12.1"
version = "0.12.3"
dependencies = [
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"bs58 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -2441,14 +2441,14 @@ dependencies = [
"rayon 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
"reqwest 0.9.11 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)",
"solana 0.12.1",
"solana-budget-program 0.12.1",
"solana-client 0.12.1",
"solana-logger 0.12.1",
"solana-netutil 0.12.1",
"solana-runtime 0.12.1",
"solana-sdk 0.12.1",
"solana-vote-api 0.12.1",
"solana 0.12.3",
"solana-budget-program 0.12.3",
"solana-client 0.12.3",
"solana-logger 0.12.3",
"solana-netutil 0.12.3",
"solana-runtime 0.12.3",
"solana-sdk 0.12.3",
"solana-vote-api 0.12.3",
"sys-info 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)",
]

View File

@@ -1,7 +1,7 @@
[package]
name = "solana-workspace"
description = "Blockchain, Rebuilt for Scale"
version = "0.12.1"
version = "0.12.3"
documentation = "https://docs.rs/solana"
homepage = "https://solana.com/"
readme = "README.md"
@@ -27,14 +27,14 @@ rand = "0.6.5"
rayon = "1.0.0"
reqwest = "0.9.11"
serde_json = "1.0.39"
solana = { path = "core", version = "0.12.1" }
solana-budget-program = { path = "programs/budget", version = "0.12.1" }
solana-client = { path = "client", version = "0.12.1" }
solana-logger = { path = "logger", version = "0.12.1" }
solana-netutil = { path = "netutil", version = "0.12.1" }
solana-runtime = { path = "runtime", version = "0.12.1" }
solana-sdk = { path = "sdk", version = "0.12.1" }
solana-vote-api = { path = "programs/vote_api", version = "0.12.1" }
solana = { path = "core", version = "0.12.3" }
solana-budget-program = { path = "programs/budget", version = "0.12.3" }
solana-client = { path = "client", version = "0.12.3" }
solana-logger = { path = "logger", version = "0.12.3" }
solana-netutil = { path = "netutil", version = "0.12.3" }
solana-runtime = { path = "runtime", version = "0.12.3" }
solana-sdk = { path = "sdk", version = "0.12.3" }
solana-vote-api = { path = "programs/vote_api", version = "0.12.3" }
sys-info = "0.5.6"
[[bench]]

View File

@@ -2,16 +2,16 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-bench-streamer"
version = "0.12.1"
version = "0.12.3"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
[dependencies]
clap = "2.32.0"
solana = { path = "../core", version = "0.12.1" }
solana-logger = { path = "../logger", version = "0.12.1" }
solana-netutil = { path = "../netutil", version = "0.12.1" }
solana = { path = "../core", version = "0.12.3" }
solana-logger = { path = "../logger", version = "0.12.3" }
solana-netutil = { path = "../netutil", version = "0.12.3" }
[features]
cuda = ["solana/cuda"]

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-bench-tps"
version = "0.12.1"
version = "0.12.3"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -11,12 +11,12 @@ homepage = "https://solana.com/"
clap = "2.32.0"
rayon = "1.0.3"
serde_json = "1.0.39"
solana = { path = "../core", version = "0.12.1" }
solana-client = { path = "../client", version = "0.12.1" }
solana-drone = { path = "../drone", version = "0.12.1" }
solana-logger = { path = "../logger", version = "0.12.1" }
solana-metrics = { path = "../metrics", version = "0.12.1" }
solana-sdk = { path = "../sdk", version = "0.12.1" }
solana = { path = "../core", version = "0.12.3" }
solana-client = { path = "../client", version = "0.12.3" }
solana-drone = { path = "../drone", version = "0.12.3" }
solana-logger = { path = "../logger", version = "0.12.3" }
solana-metrics = { path = "../metrics", version = "0.12.3" }
solana-sdk = { path = "../sdk", version = "0.12.3" }
[features]
cuda = ["solana/cuda"]

View File

@@ -1,10 +1,13 @@
#![feature(test)]
extern crate test;
#[macro_use]
extern crate solana;
use rand::{thread_rng, Rng};
use rayon::prelude::*;
use solana::banking_stage::{create_test_recorder, BankingStage};
use solana::blocktree::{get_tmp_ledger_path, Blocktree};
use solana::cluster_info::ClusterInfo;
use solana::cluster_info::Node;
use solana::packet::to_packets_chunked;
@@ -104,33 +107,41 @@ fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
(x, iter::repeat(1).take(len).collect())
})
.collect();
let (exit, poh_recorder, poh_service, signal_receiver) = create_test_recorder(&bank);
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
let _banking_stage = BankingStage::new(&cluster_info, &poh_recorder, verified_receiver);
poh_recorder.lock().unwrap().set_bank(&bank);
let ledger_path = get_tmp_ledger_path!();
{
let blocktree = Arc::new(
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"),
);
let (exit, poh_recorder, poh_service, signal_receiver) =
create_test_recorder(&bank, &blocktree);
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
let _banking_stage = BankingStage::new(&cluster_info, &poh_recorder, verified_receiver);
poh_recorder.lock().unwrap().set_bank(&bank);
let mut id = genesis_block.hash();
for _ in 0..(MAX_RECENT_BLOCKHASHES * DEFAULT_TICKS_PER_SLOT as usize) {
id = hash(&id.as_ref());
bank.register_tick(&id);
}
let half_len = verified.len() / 2;
let mut start = 0;
bencher.iter(move || {
// make sure the transactions are still valid
bank.register_tick(&genesis_block.hash());
for v in verified[start..start + half_len].chunks(verified.len() / num_threads) {
verified_sender.send(v.to_vec()).unwrap();
let mut id = genesis_block.hash();
for _ in 0..(MAX_RECENT_BLOCKHASHES * DEFAULT_TICKS_PER_SLOT as usize) {
id = hash(&id.as_ref());
bank.register_tick(&id);
}
check_txs(&signal_receiver, txes / 2);
bank.clear_signatures();
start += half_len;
start %= verified.len();
});
exit.store(true, Ordering::Relaxed);
poh_service.join().unwrap();
let half_len = verified.len() / 2;
let mut start = 0;
bencher.iter(move || {
// make sure the transactions are still valid
bank.register_tick(&genesis_block.hash());
for v in verified[start..start + half_len].chunks(verified.len() / num_threads) {
verified_sender.send(v.to_vec()).unwrap();
}
check_txs(&signal_receiver, txes / 2);
bank.clear_signatures();
start += half_len;
start %= verified.len();
});
exit.store(true, Ordering::Relaxed);
poh_service.join().unwrap();
}
Blocktree::destroy(&ledger_path).unwrap();
}
#[bench]
@@ -211,31 +222,40 @@ fn bench_banking_stage_multi_programs(bencher: &mut Bencher) {
(x, iter::repeat(1).take(len).collect())
})
.collect();
let (exit, poh_recorder, poh_service, signal_receiver) = create_test_recorder(&bank);
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
let _banking_stage = BankingStage::new(&cluster_info, &poh_recorder, verified_receiver);
poh_recorder.lock().unwrap().set_bank(&bank);
let mut id = genesis_block.hash();
for _ in 0..(MAX_RECENT_BLOCKHASHES * DEFAULT_TICKS_PER_SLOT as usize) {
id = hash(&id.as_ref());
bank.register_tick(&id);
}
let ledger_path = get_tmp_ledger_path!();
{
let blocktree = Arc::new(
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"),
);
let (exit, poh_recorder, poh_service, signal_receiver) =
create_test_recorder(&bank, &blocktree);
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
let _banking_stage = BankingStage::new(&cluster_info, &poh_recorder, verified_receiver);
poh_recorder.lock().unwrap().set_bank(&bank);
let half_len = verified.len() / 2;
let mut start = 0;
bencher.iter(move || {
// make sure the transactions are still valid
bank.register_tick(&genesis_block.hash());
for v in verified[start..start + half_len].chunks(verified.len() / num_threads) {
verified_sender.send(v.to_vec()).unwrap();
let mut id = genesis_block.hash();
for _ in 0..(MAX_RECENT_BLOCKHASHES * DEFAULT_TICKS_PER_SLOT as usize) {
id = hash(&id.as_ref());
bank.register_tick(&id);
}
check_txs(&signal_receiver, txes / 2);
bank.clear_signatures();
start += half_len;
start %= verified.len();
});
exit.store(true, Ordering::Relaxed);
poh_service.join().unwrap();
let half_len = verified.len() / 2;
let mut start = 0;
bencher.iter(move || {
// make sure the transactions are still valid
bank.register_tick(&genesis_block.hash());
for v in verified[start..start + half_len].chunks(verified.len() / num_threads) {
verified_sender.send(v.to_vec()).unwrap();
}
check_txs(&signal_receiver, txes / 2);
bank.clear_signatures();
start += half_len;
start %= verified.len();
});
exit.store(true, Ordering::Relaxed);
poh_service.join().unwrap();
}
Blocktree::destroy(&ledger_path).unwrap();
}

View File

@@ -18,9 +18,9 @@ CRATES=(
metrics
client
drone
programs/{budget_api,rewards_api,storage_api,token_api,vote_api}
programs/{budget_api,storage_api,token_api,vote_api}
runtime
programs/{budget,bpf_loader,vote,rewards,storage,token,vote}
programs/{budget,bpf_loader,storage,token,vote}
vote-signer
core
fullnode

View File

@@ -11,6 +11,8 @@ clientNodeCount=0
additionalFullNodeCount=10
publicNetwork=false
skipSetup=false
skipStart=false
externalNode=false
tarChannelOrTag=edge
delete=false
enableGpu=false
@@ -65,7 +67,7 @@ zone=$3
[[ -n $zone ]] || usage "Zone not specified"
shift 3
while getopts "h?p:Pn:c:t:gG:a:Dbd:ru" opt; do
while getopts "h?p:Pn:c:t:gG:a:Dbd:rusx" opt; do
case $opt in
h | \?)
usage
@@ -111,6 +113,12 @@ while getopts "h?p:Pn:c:t:gG:a:Dbd:ru" opt; do
r)
skipSetup=true
;;
s)
skipStart=true
;;
x)
externalNode=true
;;
u)
blockstreamer=true
;;
@@ -142,7 +150,7 @@ set -x
if ! $skipSetup; then
echo "--- $cloudProvider.sh delete"
time net/"$cloudProvider".sh delete -z "$zone" -p "$netName"
time net/"$cloudProvider".sh delete -z "$zone" -p "$netName" ${externalNode:+-x}
if $delete; then
exit 0
fi
@@ -180,6 +188,10 @@ if ! $skipSetup; then
create_args+=(-P)
fi
if $externalNode; then
create_args+=(-x)
fi
time net/"$cloudProvider".sh create "${create_args[@]}"
else
echo "--- $cloudProvider.sh config"
@@ -218,19 +230,22 @@ if $skipSetup; then
fi
ok=true
(
if $skipSetup; then
# TODO: Enable rolling updates
#op=update
op=restart
else
op=start
fi
if ! $skipStart; then
(
if $skipSetup; then
# TODO: Enable rolling updates
#op=update
op=restart
else
op=start
fi
# shellcheck disable=SC2086 # Don't want to double quote maybeRejectExtraNodes
time net/net.sh $op -t "$tarChannelOrTag" \
$maybeSkipSetup $maybeRejectExtraNodes $maybeNoValidatorSanity $maybeNoLedgerVerify
) || ok=false
# shellcheck disable=SC2086 # Don't want to double quote maybeRejectExtraNodes
time net/net.sh $op -t "$tarChannelOrTag" \
$maybeSkipSetup $maybeRejectExtraNodes $maybeNoValidatorSanity $maybeNoLedgerVerify
) || ok=false
net/net.sh logs
fi
net/net.sh logs
$ok

View File

@@ -64,6 +64,10 @@ EOF
exit 0
fi
if [[ -n $TESTNET_DB_HOST ]]; then
SOLANA_METRICS_PARTIAL_CONFIG="host=$TESTNET_DB_HOST,$SOLANA_METRICS_PARTIAL_CONFIG"
fi
export SOLANA_METRICS_CONFIG="db=$TESTNET,$SOLANA_METRICS_PARTIAL_CONFIG"
echo "SOLANA_METRICS_CONFIG: $SOLANA_METRICS_CONFIG"
source scripts/configure-metrics.sh
@@ -102,6 +106,7 @@ steps:
env:
TESTNET: "$TESTNET"
TESTNET_OP: "$TESTNET_OP"
TESTNET_DB_HOST: "$TESTNET_DB_HOST"
EOF
) | buildkite-agent pipeline upload
exit 0
@@ -205,8 +210,13 @@ start() {
NO_VALIDATOR_SANITY=1 \
RUST_LOG=solana=info \
ci/testnet-deploy.sh beta-testnet-solana-com ec2 us-west-1a \
-t "$CHANNEL_OR_TAG" -n 3 -c 0 -u -P -a eipalloc-0f286cf8a0771ce35 \
-b \
-t "$CHANNEL_OR_TAG" -n 35 -c 0 -s -u -P -a eipalloc-0f286cf8a0771ce35 \
${maybeReuseLedger:+-r} \
${maybeDelete:+-D}
NO_VALIDATOR_SANITY=1 \
RUST_LOG=solana=info \
ci/testnet-deploy.sh beta-testnet-solana-com gce us-west1-a \
-t "$CHANNEL_OR_TAG" -n 65 -c 0 -x -P \
${maybeReuseLedger:+-r} \
${maybeDelete:+-D}
)

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-client"
version = "0.12.1"
version = "0.12.3"
description = "Solana Client"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -14,11 +14,11 @@ bs58 = "0.2.0"
log = "0.4.2"
reqwest = "0.9.11"
serde_json = "1.0.39"
solana-metrics = { path = "../metrics", version = "0.12.1" }
solana-netutil = { path = "../netutil", version = "0.12.1" }
solana-sdk = { path = "../sdk", version = "0.12.1" }
solana-metrics = { path = "../metrics", version = "0.12.3" }
solana-netutil = { path = "../netutil", version = "0.12.3" }
solana-sdk = { path = "../sdk", version = "0.12.3" }
[dev-dependencies]
jsonrpc-core = "10.1.0"
jsonrpc-http-server = "10.1.0"
solana-logger = { path = "../logger", version = "0.12.1" }
solana-logger = { path = "../logger", version = "0.12.3" }

View File

@@ -1,7 +1,7 @@
[package]
name = "solana"
description = "Blockchain, Rebuilt for Scale"
version = "0.12.1"
version = "0.12.3"
documentation = "https://docs.rs/solana"
homepage = "https://solana.com/"
readme = "../README.md"
@@ -44,17 +44,17 @@ rocksdb = "0.11.0"
serde = "1.0.89"
serde_derive = "1.0.88"
serde_json = "1.0.39"
solana-budget-api = { path = "../programs/budget_api", version = "0.12.1" }
solana-client = { path = "../client", version = "0.12.1" }
solana-drone = { path = "../drone", version = "0.12.1" }
solana-logger = { path = "../logger", version = "0.12.1" }
solana-metrics = { path = "../metrics", version = "0.12.1" }
solana-netutil = { path = "../netutil", version = "0.12.1" }
solana-runtime = { path = "../runtime", version = "0.12.1" }
solana-sdk = { path = "../sdk", version = "0.12.1" }
solana-storage-api = { path = "../programs/storage_api", version = "0.12.1" }
solana-vote-api = { path = "../programs/vote_api", version = "0.12.1" }
solana-vote-signer = { path = "../vote-signer", version = "0.12.1" }
solana-budget-api = { path = "../programs/budget_api", version = "0.12.3" }
solana-client = { path = "../client", version = "0.12.3" }
solana-drone = { path = "../drone", version = "0.12.3" }
solana-logger = { path = "../logger", version = "0.12.3" }
solana-metrics = { path = "../metrics", version = "0.12.3" }
solana-netutil = { path = "../netutil", version = "0.12.3" }
solana-runtime = { path = "../runtime", version = "0.12.3" }
solana-sdk = { path = "../sdk", version = "0.12.3" }
solana-storage-api = { path = "../programs/storage_api", version = "0.12.3" }
solana-vote-api = { path = "../programs/vote_api", version = "0.12.3" }
solana-vote-signer = { path = "../vote-signer", version = "0.12.3" }
sys-info = "0.5.6"
tokio = "0.1"
tokio-codec = "0.1"
@@ -63,5 +63,5 @@ untrusted = "0.6.2"
[dev-dependencies]
hex-literal = "0.1.3"
matches = "0.1.6"
solana-vote-program = { path = "../programs/vote", version = "0.12.1" }
solana-budget-program = { path = "../programs/budget", version = "0.12.1" }
solana-vote-program = { path = "../programs/vote", version = "0.12.3" }
solana-budget-program = { path = "../programs/budget", version = "0.12.3" }

View File

@@ -1,10 +1,9 @@
//! The `banking_stage` processes Transaction messages. It is intended to be used
//! to contruct a software pipeline. The stage uses all available CPU cores and
//! can do its processing in parallel with signature verification on the GPU.
use crate::blocktree::Blocktree;
use crate::cluster_info::ClusterInfo;
use crate::entry::Entry;
use crate::leader_confirmation_service::LeaderConfirmationService;
use crate::leader_schedule_utils;
use crate::packet;
use crate::packet::SharedPackets;
@@ -17,6 +16,7 @@ use crate::sigverify_stage::VerifiedPackets;
use bincode::deserialize;
use solana_metrics::counter::Counter;
use solana_runtime::bank::{self, Bank, BankError};
use solana_runtime::locked_accounts_results::LockedAccountsResults;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::timing::{self, duration_as_us, MAX_RECENT_BLOCKHASHES};
use solana_sdk::transaction::Transaction;
@@ -50,10 +50,8 @@ impl BankingStage {
// Once an entry has been recorded, its blockhash is registered with the bank.
let exit = Arc::new(AtomicBool::new(false));
// Single thread to compute confirmation
let lcs_handle = LeaderConfirmationService::start(&poh_recorder, exit.clone());
// Many banks that process transactions in parallel.
let mut bank_thread_hdls: Vec<JoinHandle<()>> = (0..4)
let bank_thread_hdls: Vec<JoinHandle<()>> = (0..4)
.map(|_| {
let verified_receiver = verified_receiver.clone();
let poh_recorder = poh_recorder.clone();
@@ -68,7 +66,6 @@ impl BankingStage {
.unwrap()
})
.collect();
bank_thread_hdls.push(lcs_handle);
Self { bank_thread_hdls }
}
@@ -227,18 +224,15 @@ impl BankingStage {
bank: &Bank,
txs: &[Transaction],
poh: &Arc<Mutex<PohRecorder>>,
lock_results: &[bank::Result<()>],
lock_results: &LockedAccountsResults,
) -> Result<()> {
let now = Instant::now();
// Use a shorter maximum age when adding transactions into the pipeline. This will reduce
// the likelihood of any single thread getting starved and processing old ids.
// TODO: Banking stage threads should be prioritized to complete faster then this queue
// expires.
let (loaded_accounts, results) = bank.load_and_execute_transactions(
txs,
lock_results.to_vec(),
MAX_RECENT_BLOCKHASHES / 2,
);
let (loaded_accounts, results) =
bank.load_and_execute_transactions(txs, lock_results, MAX_RECENT_BLOCKHASHES / 2);
let load_execute_time = now.elapsed();
let record_time = {
@@ -280,7 +274,7 @@ impl BankingStage {
let now = Instant::now();
// Once the accounts are new transactions can enter the pipeline to process them
bank.unlock_accounts(&txs, &lock_results);
drop(lock_results);
let unlock_time = now.elapsed();
debug!(
@@ -441,6 +435,7 @@ impl Service for BankingStage {
pub fn create_test_recorder(
bank: &Arc<Bank>,
blocktree: &Arc<Blocktree>,
) -> (
Arc<AtomicBool>,
Arc<Mutex<PohRecorder>>,
@@ -455,6 +450,7 @@ pub fn create_test_recorder(
Some(4),
bank.ticks_per_slot(),
&Pubkey::default(),
blocktree,
);
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
let poh_service = PohService::new(poh_recorder.clone(), &PohServiceConfig::default(), &exit);
@@ -464,10 +460,12 @@ pub fn create_test_recorder(
#[cfg(test)]
mod tests {
use super::*;
use crate::blocktree::get_tmp_ledger_path;
use crate::cluster_info::Node;
use crate::entry::EntrySlice;
use crate::packet::to_packets;
use crate::poh_recorder::WorkingBank;
use crate::{get_tmp_ledger_path, tmp_ledger_name};
use solana_sdk::genesis_block::GenesisBlock;
use solana_sdk::native_program::ProgramError;
use solana_sdk::signature::{Keypair, KeypairUtil};
@@ -480,14 +478,22 @@ mod tests {
let (genesis_block, _mint_keypair) = GenesisBlock::new(2);
let bank = Arc::new(Bank::new(&genesis_block));
let (verified_sender, verified_receiver) = channel();
let (exit, poh_recorder, poh_service, _entry_receiever) = create_test_recorder(&bank);
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
let banking_stage = BankingStage::new(&cluster_info, &poh_recorder, verified_receiver);
drop(verified_sender);
exit.store(true, Ordering::Relaxed);
banking_stage.join().unwrap();
poh_service.join().unwrap();
let ledger_path = get_tmp_ledger_path!();
{
let blocktree = Arc::new(
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"),
);
let (exit, poh_recorder, poh_service, _entry_receiever) =
create_test_recorder(&bank, &blocktree);
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
let banking_stage = BankingStage::new(&cluster_info, &poh_recorder, verified_receiver);
drop(verified_sender);
exit.store(true, Ordering::Relaxed);
banking_stage.join().unwrap();
poh_service.join().unwrap();
}
Blocktree::destroy(&ledger_path).unwrap();
}
#[test]
@@ -498,28 +504,36 @@ mod tests {
let bank = Arc::new(Bank::new(&genesis_block));
let start_hash = bank.last_blockhash();
let (verified_sender, verified_receiver) = channel();
let (exit, poh_recorder, poh_service, entry_receiver) = create_test_recorder(&bank);
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
poh_recorder.lock().unwrap().set_bank(&bank);
let banking_stage = BankingStage::new(&cluster_info, &poh_recorder, verified_receiver);
trace!("sending bank");
sleep(Duration::from_millis(600));
drop(verified_sender);
exit.store(true, Ordering::Relaxed);
poh_service.join().unwrap();
drop(poh_recorder);
let ledger_path = get_tmp_ledger_path!();
{
let blocktree = Arc::new(
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"),
);
let (exit, poh_recorder, poh_service, entry_receiver) =
create_test_recorder(&bank, &blocktree);
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
poh_recorder.lock().unwrap().set_bank(&bank);
let banking_stage = BankingStage::new(&cluster_info, &poh_recorder, verified_receiver);
trace!("sending bank");
sleep(Duration::from_millis(600));
drop(verified_sender);
exit.store(true, Ordering::Relaxed);
poh_service.join().unwrap();
drop(poh_recorder);
trace!("getting entries");
let entries: Vec<_> = entry_receiver
.iter()
.flat_map(|x| x.1.into_iter().map(|e| e.0))
.collect();
trace!("done");
assert_eq!(entries.len(), genesis_block.ticks_per_slot as usize - 1);
assert!(entries.verify(&start_hash));
assert_eq!(entries[entries.len() - 1].hash, bank.last_blockhash());
banking_stage.join().unwrap();
trace!("getting entries");
let entries: Vec<_> = entry_receiver
.iter()
.flat_map(|x| x.1.into_iter().map(|e| e.0))
.collect();
trace!("done");
assert_eq!(entries.len(), genesis_block.ticks_per_slot as usize - 1);
assert!(entries.verify(&start_hash));
assert_eq!(entries[entries.len() - 1].hash, bank.last_blockhash());
banking_stage.join().unwrap();
}
Blocktree::destroy(&ledger_path).unwrap();
}
#[test]
@@ -528,54 +542,63 @@ mod tests {
let bank = Arc::new(Bank::new(&genesis_block));
let start_hash = bank.last_blockhash();
let (verified_sender, verified_receiver) = channel();
let (exit, poh_recorder, poh_service, entry_receiver) = create_test_recorder(&bank);
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
poh_recorder.lock().unwrap().set_bank(&bank);
let banking_stage = BankingStage::new(&cluster_info, &poh_recorder, verified_receiver);
let ledger_path = get_tmp_ledger_path!();
{
let blocktree = Arc::new(
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"),
);
let (exit, poh_recorder, poh_service, entry_receiver) =
create_test_recorder(&bank, &blocktree);
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
poh_recorder.lock().unwrap().set_bank(&bank);
let banking_stage = BankingStage::new(&cluster_info, &poh_recorder, verified_receiver);
// good tx
let keypair = mint_keypair;
let tx = SystemTransaction::new_account(&keypair, &keypair.pubkey(), 1, start_hash, 0);
// good tx
let keypair = mint_keypair;
let tx = SystemTransaction::new_account(&keypair, &keypair.pubkey(), 1, start_hash, 0);
// good tx, but no verify
let tx_no_ver =
SystemTransaction::new_account(&keypair, &keypair.pubkey(), 1, start_hash, 0);
// good tx, but no verify
let tx_no_ver =
SystemTransaction::new_account(&keypair, &keypair.pubkey(), 1, start_hash, 0);
// bad tx, AccountNotFound
let keypair = Keypair::new();
let tx_anf = SystemTransaction::new_account(&keypair, &keypair.pubkey(), 1, start_hash, 0);
// bad tx, AccountNotFound
let keypair = Keypair::new();
let tx_anf =
SystemTransaction::new_account(&keypair, &keypair.pubkey(), 1, start_hash, 0);
// send 'em over
let packets = to_packets(&[tx, tx_no_ver, tx_anf]);
// send 'em over
let packets = to_packets(&[tx, tx_no_ver, tx_anf]);
// glad they all fit
assert_eq!(packets.len(), 1);
verified_sender // tx, no_ver, anf
.send(vec![(packets[0].clone(), vec![1u8, 0u8, 1u8])])
.unwrap();
// glad they all fit
assert_eq!(packets.len(), 1);
verified_sender // tx, no_ver, anf
.send(vec![(packets[0].clone(), vec![1u8, 0u8, 1u8])])
.unwrap();
drop(verified_sender);
exit.store(true, Ordering::Relaxed);
poh_service.join().unwrap();
drop(poh_recorder);
drop(verified_sender);
exit.store(true, Ordering::Relaxed);
poh_service.join().unwrap();
drop(poh_recorder);
//receive entries + ticks
let entries: Vec<Vec<Entry>> = entry_receiver
.iter()
.map(|x| x.1.into_iter().map(|e| e.0).collect())
.collect();
//receive entries + ticks
let entries: Vec<Vec<Entry>> = entry_receiver
.iter()
.map(|x| x.1.into_iter().map(|e| e.0).collect())
.collect();
assert!(entries.len() >= 1);
assert!(entries.len() >= 1);
let mut blockhash = start_hash;
entries.iter().for_each(|entries| {
assert_eq!(entries.len(), 1);
assert!(entries.verify(&blockhash));
blockhash = entries.last().unwrap().hash;
});
drop(entry_receiver);
banking_stage.join().unwrap();
let mut blockhash = start_hash;
entries.iter().for_each(|entries| {
assert_eq!(entries.len(), 1);
assert!(entries.verify(&blockhash));
blockhash = entries.last().unwrap().hash;
});
drop(entry_receiver);
banking_stage.join().unwrap();
}
Blocktree::destroy(&ledger_path).unwrap();
}
#[test]
@@ -586,71 +609,74 @@ mod tests {
let (genesis_block, mint_keypair) = GenesisBlock::new(2);
let bank = Arc::new(Bank::new(&genesis_block));
let (verified_sender, verified_receiver) = channel();
let (exit, poh_recorder, poh_service, entry_receiver) = create_test_recorder(&bank);
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
poh_recorder.lock().unwrap().set_bank(&bank);
let _banking_stage = BankingStage::new(&cluster_info, &poh_recorder, verified_receiver);
let ledger_path = get_tmp_ledger_path!();
{
let blocktree = Arc::new(
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"),
);
let (exit, poh_recorder, poh_service, entry_receiver) =
create_test_recorder(&bank, &blocktree);
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
poh_recorder.lock().unwrap().set_bank(&bank);
let _banking_stage = BankingStage::new(&cluster_info, &poh_recorder, verified_receiver);
// Process a batch that includes a transaction that receives two lamports.
let alice = Keypair::new();
let tx = SystemTransaction::new_account(
&mint_keypair,
&alice.pubkey(),
2,
genesis_block.hash(),
0,
);
// Process a batch that includes a transaction that receives two lamports.
let alice = Keypair::new();
let tx = SystemTransaction::new_account(
&mint_keypair,
&alice.pubkey(),
2,
genesis_block.hash(),
0,
);
let packets = to_packets(&[tx]);
verified_sender
.send(vec![(packets[0].clone(), vec![1u8])])
.unwrap();
let packets = to_packets(&[tx]);
verified_sender
.send(vec![(packets[0].clone(), vec![1u8])])
.unwrap();
// Process a second batch that spends one of those lamports.
let tx = SystemTransaction::new_account(
&alice,
&mint_keypair.pubkey(),
1,
genesis_block.hash(),
0,
);
let packets = to_packets(&[tx]);
verified_sender
.send(vec![(packets[0].clone(), vec![1u8])])
.unwrap();
// Process a second batch that spends one of those lamports.
let tx = SystemTransaction::new_account(
&alice,
&mint_keypair.pubkey(),
1,
genesis_block.hash(),
0,
);
let packets = to_packets(&[tx]);
verified_sender
.send(vec![(packets[0].clone(), vec![1u8])])
.unwrap();
drop(verified_sender);
exit.store(true, Ordering::Relaxed);
poh_service.join().unwrap();
drop(poh_recorder);
drop(verified_sender);
exit.store(true, Ordering::Relaxed);
poh_service.join().unwrap();
drop(poh_recorder);
// Poll the entry_receiver, feeding it into a new bank
// until the balance is what we expect.
let bank = Bank::new(&genesis_block);
for _ in 0..10 {
let entries: Vec<_> = entry_receiver
.iter()
.flat_map(|x| x.1.into_iter().map(|e| e.0))
.collect();
for entry in &entries {
bank.process_transactions(&entry.transactions)
// Poll the entry_receiver, feeding it into a new bank
// until the balance is what we expect.
let bank = Bank::new(&genesis_block);
for _ in 0..10 {
let entries: Vec<_> = entry_receiver
.iter()
.for_each(|x| assert_eq!(*x, Ok(())));
}
.flat_map(|x| x.1.into_iter().map(|e| e.0))
.collect();
if bank.get_balance(&alice.pubkey()) == 1 {
break;
}
for entry in &entries {
bank.process_transactions(&entry.transactions)
.iter()
.for_each(|x| assert_eq!(*x, Ok(())));
}
sleep(Duration::from_millis(100));
if bank.get_balance(&alice.pubkey()) == 1 {
break;
}
sleep(Duration::from_millis(100));
}
}
// Assert the user holds one lamport, not two. If the stage only outputs one
// entry, then the second transaction will be rejected, because it drives
// the account balance below zero before the credit is added.
assert_eq!(bank.get_balance(&alice.pubkey()), 1);
Blocktree::destroy(&ledger_path).unwrap();
}
#[test]
@@ -663,43 +689,50 @@ mod tests {
max_tick_height: std::u64::MAX,
};
let (poh_recorder, entry_receiver) = PohRecorder::new(
bank.tick_height(),
bank.last_blockhash(),
bank.slot(),
None,
bank.ticks_per_slot(),
&Pubkey::default(),
);
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
let ledger_path = get_tmp_ledger_path!();
{
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let (poh_recorder, entry_receiver) = PohRecorder::new(
bank.tick_height(),
bank.last_blockhash(),
bank.slot(),
None,
bank.ticks_per_slot(),
&Pubkey::default(),
&Arc::new(blocktree),
);
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
poh_recorder.lock().unwrap().set_working_bank(working_bank);
let pubkey = Keypair::new().pubkey();
poh_recorder.lock().unwrap().set_working_bank(working_bank);
let pubkey = Keypair::new().pubkey();
let transactions = vec![
SystemTransaction::new_move(&mint_keypair, &pubkey, 1, genesis_block.hash(), 0),
SystemTransaction::new_move(&mint_keypair, &pubkey, 1, genesis_block.hash(), 0),
];
let transactions = vec![
SystemTransaction::new_move(&mint_keypair, &pubkey, 1, genesis_block.hash(), 0),
SystemTransaction::new_move(&mint_keypair, &pubkey, 1, genesis_block.hash(), 0),
];
let mut results = vec![Ok(()), Ok(())];
BankingStage::record_transactions(&transactions, &results, &poh_recorder).unwrap();
let (_, entries) = entry_receiver.recv().unwrap();
assert_eq!(entries[0].0.transactions.len(), transactions.len());
let mut results = vec![Ok(()), Ok(())];
BankingStage::record_transactions(&transactions, &results, &poh_recorder).unwrap();
let (_, entries) = entry_receiver.recv().unwrap();
assert_eq!(entries[0].0.transactions.len(), transactions.len());
// ProgramErrors should still be recorded
results[0] = Err(BankError::ProgramError(
1,
ProgramError::ResultWithNegativeLamports,
));
BankingStage::record_transactions(&transactions, &results, &poh_recorder).unwrap();
let (_, entries) = entry_receiver.recv().unwrap();
assert_eq!(entries[0].0.transactions.len(), transactions.len());
// ProgramErrors should still be recorded
results[0] = Err(BankError::ProgramError(
1,
ProgramError::ResultWithNegativeLamports,
));
BankingStage::record_transactions(&transactions, &results, &poh_recorder).unwrap();
let (_, entries) = entry_receiver.recv().unwrap();
assert_eq!(entries[0].0.transactions.len(), transactions.len());
// Other BankErrors should not be recorded
results[0] = Err(BankError::AccountNotFound);
BankingStage::record_transactions(&transactions, &results, &poh_recorder).unwrap();
let (_, entries) = entry_receiver.recv().unwrap();
assert_eq!(entries[0].0.transactions.len(), transactions.len() - 1);
// Other BankErrors should not be recorded
results[0] = Err(BankError::AccountNotFound);
BankingStage::record_transactions(&transactions, &results, &poh_recorder).unwrap();
let (_, entries) = entry_receiver.recv().unwrap();
assert_eq!(entries[0].0.transactions.len(), transactions.len() - 1);
}
Blocktree::destroy(&ledger_path).unwrap();
}
#[test]
@@ -722,53 +755,61 @@ mod tests {
min_tick_height: bank.tick_height(),
max_tick_height: bank.tick_height() + 1,
};
let (poh_recorder, entry_receiver) = PohRecorder::new(
bank.tick_height(),
bank.last_blockhash(),
bank.slot(),
Some(4),
bank.ticks_per_slot(),
&pubkey,
);
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
let ledger_path = get_tmp_ledger_path!();
{
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let (poh_recorder, entry_receiver) = PohRecorder::new(
bank.tick_height(),
bank.last_blockhash(),
bank.slot(),
Some(4),
bank.ticks_per_slot(),
&pubkey,
&Arc::new(blocktree),
);
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
poh_recorder.lock().unwrap().set_working_bank(working_bank);
poh_recorder.lock().unwrap().set_working_bank(working_bank);
BankingStage::process_and_record_transactions(&bank, &transactions, &poh_recorder).unwrap();
poh_recorder.lock().unwrap().tick();
BankingStage::process_and_record_transactions(&bank, &transactions, &poh_recorder)
.unwrap();
poh_recorder.lock().unwrap().tick();
let mut done = false;
// read entries until I find mine, might be ticks...
while let Ok((_, entries)) = entry_receiver.recv() {
for (entry, _) in entries {
if !entry.is_tick() {
trace!("got entry");
assert_eq!(entry.transactions.len(), transactions.len());
assert_eq!(bank.get_balance(&pubkey), 1);
done = true;
let mut done = false;
// read entries until I find mine, might be ticks...
while let Ok((_, entries)) = entry_receiver.recv() {
for (entry, _) in entries {
if !entry.is_tick() {
trace!("got entry");
assert_eq!(entry.transactions.len(), transactions.len());
assert_eq!(bank.get_balance(&pubkey), 1);
done = true;
}
}
if done {
break;
}
}
if done {
break;
}
trace!("done ticking");
assert_eq!(done, true);
let transactions = vec![SystemTransaction::new_move(
&mint_keypair,
&pubkey,
2,
genesis_block.hash(),
0,
)];
assert_matches!(
BankingStage::process_and_record_transactions(&bank, &transactions, &poh_recorder),
Err(Error::PohRecorderError(PohRecorderError::MaxHeightReached))
);
assert_eq!(bank.get_balance(&pubkey), 1);
}
trace!("done ticking");
assert_eq!(done, true);
let transactions = vec![SystemTransaction::new_move(
&mint_keypair,
&pubkey,
2,
genesis_block.hash(),
0,
)];
assert_matches!(
BankingStage::process_and_record_transactions(&bank, &transactions, &poh_recorder),
Err(Error::PohRecorderError(PohRecorderError::MaxHeightReached))
);
assert_eq!(bank.get_balance(&pubkey), 1);
Blocktree::destroy(&ledger_path).unwrap();
}
}

View File

@@ -5,6 +5,7 @@ use crate::leader_schedule_utils;
use rayon::prelude::*;
use solana_metrics::counter::Counter;
use solana_runtime::bank::{Bank, BankError, Result};
use solana_runtime::locked_accounts_results::LockedAccountsResults;
use solana_sdk::genesis_block::GenesisBlock;
use solana_sdk::timing::duration_as_ms;
use solana_sdk::timing::MAX_RECENT_BLOCKHASHES;
@@ -27,17 +28,16 @@ fn first_err(results: &[Result<()>]) -> Result<()> {
Ok(())
}
fn par_execute_entries(bank: &Bank, entries: &[(&Entry, Vec<Result<()>>)]) -> Result<()> {
fn par_execute_entries(bank: &Bank, entries: &[(&Entry, LockedAccountsResults)]) -> Result<()> {
inc_new_counter_info!("bank-par_execute_entries-count", entries.len());
let results: Vec<Result<()>> = entries
.into_par_iter()
.map(|(e, lock_results)| {
.map(|(e, locked_accounts)| {
let results = bank.load_execute_and_commit_transactions(
&e.transactions,
lock_results.to_vec(),
locked_accounts,
MAX_RECENT_BLOCKHASHES,
);
bank.unlock_accounts(&e.transactions, &results);
first_err(&results)
})
.collect();
@@ -65,11 +65,12 @@ fn par_process_entries(bank: &Bank, entries: &[Entry]) -> Result<()> {
let lock_results = bank.lock_accounts(&entry.transactions);
// if any of the locks error out
// execute the current group
if first_err(&lock_results).is_err() {
if first_err(lock_results.locked_accounts_results()).is_err() {
par_execute_entries(bank, &mt_group)?;
// Drop all the locks on accounts by clearing the LockedAccountsFinalizer's in the
// mt_group
mt_group = vec![];
//reset the lock and push the entry
bank.unlock_accounts(&entry.transactions, &lock_results);
drop(lock_results);
let lock_results = bank.lock_accounts(&entry.transactions);
mt_group.push((entry, lock_results));
} else {
@@ -627,7 +628,98 @@ mod tests {
}
#[test]
fn test_par_process_entries_2_entries_par() {
fn test_process_entries_2_txes_collision_and_error() {
let (genesis_block, mint_keypair) = GenesisBlock::new(1000);
let bank = Bank::new(&genesis_block);
let keypair1 = Keypair::new();
let keypair2 = Keypair::new();
let keypair3 = Keypair::new();
let keypair4 = Keypair::new();
// fund: put 4 in each of 1 and 2
assert_matches!(
bank.transfer(4, &mint_keypair, &keypair1.pubkey(), bank.last_blockhash()),
Ok(_)
);
assert_matches!(
bank.transfer(4, &mint_keypair, &keypair2.pubkey(), bank.last_blockhash()),
Ok(_)
);
assert_matches!(
bank.transfer(4, &mint_keypair, &keypair4.pubkey(), bank.last_blockhash()),
Ok(_)
);
// construct an Entry whose 2nd transaction would cause a lock conflict with previous entry
let entry_1_to_mint = next_entry(
&bank.last_blockhash(),
1,
vec![
SystemTransaction::new_account(
&keypair1,
&mint_keypair.pubkey(),
1,
bank.last_blockhash(),
0,
),
SystemTransaction::new_move(
&keypair4,
&keypair4.pubkey(),
1,
Hash::default(), // Should cause a transaction failure with BlockhashNotFound
0,
),
],
);
let entry_2_to_3_mint_to_1 = next_entry(
&entry_1_to_mint.hash,
1,
vec![
SystemTransaction::new_account(
&keypair2,
&keypair3.pubkey(),
2,
bank.last_blockhash(),
0,
), // should be fine
SystemTransaction::new_account(
&keypair1,
&mint_keypair.pubkey(),
2,
bank.last_blockhash(),
0,
), // will collide
],
);
assert!(process_entries(
&bank,
&[entry_1_to_mint.clone(), entry_2_to_3_mint_to_1.clone()]
)
.is_err());
// First transaction in first entry succeeded, so keypair1 lost 1 lamport
assert_eq!(bank.get_balance(&keypair1.pubkey()), 3);
assert_eq!(bank.get_balance(&keypair2.pubkey()), 4);
// Check all accounts are unlocked
let txs1 = &entry_1_to_mint.transactions[..];
let txs2 = &entry_2_to_3_mint_to_1.transactions[..];
let locked_accounts1 = bank.lock_accounts(txs1);
for result in locked_accounts1.locked_accounts_results() {
assert!(result.is_ok());
}
// txs1 and txs2 have accounts that conflict, so we must drop txs1 first
drop(locked_accounts1);
let locked_accounts2 = bank.lock_accounts(txs2);
for result in locked_accounts2.locked_accounts_results() {
assert!(result.is_ok());
}
}
#[test]
fn test_process_entries_2_entries_par() {
let (genesis_block, mint_keypair) = GenesisBlock::new(1000);
let bank = Bank::new(&genesis_block);
let keypair1 = Keypair::new();

View File

@@ -108,13 +108,15 @@ impl Fullnode {
bank.tick_height(),
bank.last_blockhash(),
);
let blocktree = Arc::new(blocktree);
let (poh_recorder, entry_receiver) = PohRecorder::new(
bank.tick_height(),
bank.last_blockhash(),
bank.slot(),
leader_schedule_utils::next_leader_slot(&id, bank.slot(), &bank),
leader_schedule_utils::next_leader_slot(&id, bank.slot(), &bank, Some(&blocktree)),
bank.ticks_per_slot(),
&id,
&blocktree,
);
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
let poh_service = PohService::new(poh_recorder.clone(), &config.tick_config, &exit);
@@ -133,7 +135,6 @@ impl Fullnode {
node.sockets.gossip.local_addr().unwrap()
);
let blocktree = Arc::new(blocktree);
let bank_forks = Arc::new(RwLock::new(bank_forks));
node.info.wallclock = timestamp();

View File

@@ -1,192 +0,0 @@
//! The `leader_confirmation_service` module implements the tools necessary
//! to generate a thread which regularly calculates the last confirmation times
//! observed by the leader
use crate::poh_recorder::PohRecorder;
use solana_metrics::{influxdb, submit};
use solana_runtime::bank::Bank;
use solana_sdk::timing;
use solana_vote_api::vote_state::VoteState;
use std::result;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Mutex};
use std::thread::sleep;
use std::thread::{Builder, JoinHandle};
use std::time::Duration;
#[derive(Debug, PartialEq, Eq)]
pub enum ConfirmationError {
NoValidSupermajority,
}
pub const COMPUTE_CONFIRMATION_MS: u64 = 100;
pub struct LeaderConfirmationService {}
impl LeaderConfirmationService {
fn get_last_supermajority_timestamp(
bank: &Bank,
last_valid_validator_timestamp: u64,
) -> result::Result<u64, ConfirmationError> {
let mut total_stake = 0;
let mut slots_and_stakes: Vec<(u64, u64)> = vec![];
// Hold an accounts_db read lock as briefly as possible, just long enough to collect all
// the vote states
bank.vote_accounts().for_each(|(_, account)| {
total_stake += account.lamports;
let vote_state = VoteState::deserialize(&account.data).unwrap();
if let Some(stake_and_state) = vote_state
.votes
.back()
.map(|vote| (vote.slot, account.lamports))
{
slots_and_stakes.push(stake_and_state);
}
});
let super_majority_stake = (2 * total_stake) / 3;
if let Some(last_valid_validator_timestamp) =
bank.get_confirmation_timestamp(slots_and_stakes, super_majority_stake)
{
return Ok(last_valid_validator_timestamp);
}
if last_valid_validator_timestamp != 0 {
let now = timing::timestamp();
submit(
influxdb::Point::new(&"leader-confirmation")
.add_field(
"duration_ms",
influxdb::Value::Integer((now - last_valid_validator_timestamp) as i64),
)
.to_owned(),
);
}
Err(ConfirmationError::NoValidSupermajority)
}
pub fn compute_confirmation(bank: &Bank, last_valid_validator_timestamp: &mut u64) {
if let Ok(super_majority_timestamp) =
Self::get_last_supermajority_timestamp(bank, *last_valid_validator_timestamp)
{
let now = timing::timestamp();
let confirmation_ms = now - super_majority_timestamp;
*last_valid_validator_timestamp = super_majority_timestamp;
submit(
influxdb::Point::new(&"leader-confirmation")
.add_field(
"duration_ms",
influxdb::Value::Integer(confirmation_ms as i64),
)
.to_owned(),
);
}
}
/// Create a new LeaderConfirmationService for computing confirmation.
pub fn start(poh_recorder: &Arc<Mutex<PohRecorder>>, exit: Arc<AtomicBool>) -> JoinHandle<()> {
let poh_recorder = poh_recorder.clone();
Builder::new()
.name("solana-leader-confirmation-service".to_string())
.spawn(move || {
let mut last_valid_validator_timestamp = 0;
loop {
if exit.load(Ordering::Relaxed) {
break;
}
// dont hold this lock too long
let maybe_bank = poh_recorder.lock().unwrap().bank();
if let Some(ref bank) = maybe_bank {
Self::compute_confirmation(bank, &mut last_valid_validator_timestamp);
}
sleep(Duration::from_millis(COMPUTE_CONFIRMATION_MS));
}
})
.unwrap()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::voting_keypair::tests::{new_vote_account, push_vote};
use bincode::serialize;
use solana_sdk::genesis_block::GenesisBlock;
use solana_sdk::hash::hash;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_sdk::timing::MAX_RECENT_BLOCKHASHES;
use solana_vote_api::vote_transaction::VoteTransaction;
use std::sync::Arc;
#[test]
fn test_compute_confirmation() {
solana_logger::setup();
let (genesis_block, mint_keypair) = GenesisBlock::new(1234);
let mut tick_hash = genesis_block.hash();
let mut bank = Arc::new(Bank::new(&genesis_block));
// Move the bank up MAX_RECENT_BLOCKHASHES slots
for slot in 1..=MAX_RECENT_BLOCKHASHES as u64 {
let max_tick_height = slot * bank.ticks_per_slot() - 1;
while bank.tick_height() != max_tick_height {
tick_hash = hash(&serialize(&tick_hash).unwrap());
bank.register_tick(&tick_hash);
}
bank = Arc::new(Bank::new_from_parent(&bank, &Pubkey::default(), slot));
}
let blockhash = bank.last_blockhash();
// Create a total of 10 vote accounts, each will have a balance of 1 (after giving 1 to
// their vote account), for a total staking pool of 10 lamports.
let vote_accounts: Vec<_> = (0..10)
.map(|i| {
// Create new validator to vote
let validator_keypair = Arc::new(Keypair::new());
let voting_keypair = Keypair::new();
let voting_pubkey = voting_keypair.pubkey();
// Give the validator some lamports
bank.transfer(2, &mint_keypair, &validator_keypair.pubkey(), blockhash)
.unwrap();
new_vote_account(&validator_keypair, &voting_pubkey, &bank, 1);
if i < 6 {
push_vote(
&voting_keypair,
&bank,
MAX_RECENT_BLOCKHASHES.saturating_sub(i) as u64,
);
}
(voting_keypair, validator_keypair)
})
.collect();
// There isn't 2/3 consensus, so the bank's confirmation value should be the default
let mut last_confirmation_time = 0;
LeaderConfirmationService::compute_confirmation(&bank, &mut last_confirmation_time);
assert_eq!(last_confirmation_time, 0);
// Get another validator to vote, so we now have 2/3 consensus
let voting_keypair = &vote_accounts[7].0;
let vote_tx = VoteTransaction::new_vote(
&voting_keypair.pubkey(),
voting_keypair,
MAX_RECENT_BLOCKHASHES as u64,
blockhash,
0,
);
bank.process_transaction(&vote_tx).unwrap();
LeaderConfirmationService::compute_confirmation(&bank, &mut last_confirmation_time);
assert!(last_confirmation_time > 0);
}
}

View File

@@ -1,3 +1,4 @@
use crate::blocktree::Blocktree;
use crate::leader_schedule::LeaderSchedule;
use crate::staking_utils;
use solana_runtime::bank::Bank;
@@ -44,7 +45,12 @@ pub fn slot_leader_at(slot: u64, bank: &Bank) -> Option<Pubkey> {
}
/// Return the next slot after the given current_slot that the given node will be leader
pub fn next_leader_slot(pubkey: &Pubkey, mut current_slot: u64, bank: &Bank) -> Option<u64> {
pub fn next_leader_slot(
pubkey: &Pubkey,
mut current_slot: u64,
bank: &Bank,
blocktree: Option<&Blocktree>,
) -> Option<u64> {
let (mut epoch, mut start_index) = bank.get_epoch_and_slot_index(current_slot + 1);
while let Some(leader_schedule) = leader_schedule(epoch, bank) {
// clippy thinks I should do this:
@@ -59,6 +65,15 @@ pub fn next_leader_slot(pubkey: &Pubkey, mut current_slot: u64, bank: &Bank) ->
for i in start_index..bank.get_slots_in_epoch(epoch) {
current_slot += 1;
if *pubkey == leader_schedule[i] {
if let Some(blocktree) = blocktree {
if let Some(meta) = blocktree.meta(current_slot).unwrap() {
// We have already sent a blob for this slot, so skip it
if meta.received > 0 {
continue;
}
}
}
return Some(current_slot);
}
}
@@ -82,9 +97,13 @@ pub fn tick_height_to_slot(ticks_per_slot: u64, tick_height: u64) -> u64 {
#[cfg(test)]
mod tests {
use super::*;
use crate::blocktree::get_tmp_ledger_path;
use crate::blocktree::tests::make_slot_entries;
use crate::staking_utils;
use crate::voting_keypair::tests::new_vote_account_with_delegate;
use solana_sdk::genesis_block::{GenesisBlock, BOOTSTRAP_LEADER_LAMPORTS};
use solana_sdk::signature::{Keypair, KeypairUtil};
use std::sync::Arc;
#[test]
fn test_next_leader_slot() {
@@ -99,13 +118,14 @@ mod tests {
let bank = Bank::new(&genesis_block);
assert_eq!(slot_leader_at(bank.slot(), &bank).unwrap(), pubkey);
assert_eq!(next_leader_slot(&pubkey, 0, &bank), Some(1));
assert_eq!(next_leader_slot(&pubkey, 1, &bank), Some(2));
assert_eq!(next_leader_slot(&pubkey, 0, &bank, None), Some(1));
assert_eq!(next_leader_slot(&pubkey, 1, &bank, None), Some(2));
assert_eq!(
next_leader_slot(
&pubkey,
2 * genesis_block.slots_per_epoch - 1, // no schedule generated for epoch 2
&bank
&bank,
None
),
None
);
@@ -114,12 +134,133 @@ mod tests {
next_leader_slot(
&Keypair::new().pubkey(), // not in leader_schedule
0,
&bank
&bank,
None
),
None
);
}
#[test]
fn test_next_leader_slot_blocktree() {
let pubkey = Keypair::new().pubkey();
let mut genesis_block = GenesisBlock::new_with_leader(
BOOTSTRAP_LEADER_LAMPORTS,
&pubkey,
BOOTSTRAP_LEADER_LAMPORTS,
)
.0;
genesis_block.epoch_warmup = false;
let bank = Bank::new(&genesis_block);
let ledger_path = get_tmp_ledger_path!();
{
let blocktree = Arc::new(
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"),
);
assert_eq!(slot_leader_at(bank.slot(), &bank).unwrap(), pubkey);
// Check that the next leader slot after 0 is slot 1
assert_eq!(
next_leader_slot(&pubkey, 0, &bank, Some(&blocktree)),
Some(1)
);
// Write a blob into slot 2 that chains to slot 1,
// but slot 1 is empty so should not be skipped
let (blobs, _) = make_slot_entries(2, 1, 1);
blocktree.write_blobs(&blobs[..]).unwrap();
assert_eq!(
next_leader_slot(&pubkey, 0, &bank, Some(&blocktree)),
Some(1)
);
// Write a blob into slot 1
let (blobs, _) = make_slot_entries(1, 0, 1);
// Check that slot 1 and 2 are skipped
blocktree.write_blobs(&blobs[..]).unwrap();
assert_eq!(
next_leader_slot(&pubkey, 0, &bank, Some(&blocktree)),
Some(3)
);
// Integrity checks
assert_eq!(
next_leader_slot(
&pubkey,
2 * genesis_block.slots_per_epoch - 1, // no schedule generated for epoch 2
&bank,
Some(&blocktree)
),
None
);
assert_eq!(
next_leader_slot(
&Keypair::new().pubkey(), // not in leader_schedule
0,
&bank,
Some(&blocktree)
),
None
);
}
Blocktree::destroy(&ledger_path).unwrap();
}
#[test]
fn test_next_leader_slot_next_epoch() {
let pubkey = Keypair::new().pubkey();
let (mut genesis_block, mint_keypair) = GenesisBlock::new_with_leader(
2 * BOOTSTRAP_LEADER_LAMPORTS,
&pubkey,
BOOTSTRAP_LEADER_LAMPORTS,
);
genesis_block.epoch_warmup = false;
let bank = Bank::new(&genesis_block);
let delegate_id = Keypair::new().pubkey();
// Create new vote account
let new_voting_keypair = Keypair::new();
new_vote_account_with_delegate(
&mint_keypair,
&new_voting_keypair,
&delegate_id,
&bank,
BOOTSTRAP_LEADER_LAMPORTS,
);
// Have to wait until the epoch at after the epoch stakes generated at genesis
// for the new votes to take effect.
let mut target_slot = 1;
let epoch = bank.get_stakers_epoch(0);
while bank.get_stakers_epoch(target_slot) == epoch {
target_slot += 1;
}
let bank = Bank::new_from_parent(&Arc::new(bank), &Pubkey::default(), target_slot);
let mut expected_slot = 0;
let epoch = bank.get_stakers_epoch(target_slot);
for i in 0..epoch {
expected_slot += bank.get_slots_in_epoch(i);
}
let schedule = leader_schedule(epoch, &bank).unwrap();
let mut index = 0;
while schedule[index] != delegate_id {
index += 1
}
expected_slot += index;
assert_eq!(
next_leader_slot(&delegate_id, 0, &bank, None),
Some(expected_slot),
);
}
#[test]
fn test_leader_schedule_via_bank() {
let pubkey = Keypair::new().pubkey();

View File

@@ -40,7 +40,6 @@ pub mod gen_keys;
pub mod gossip_service;
#[cfg(feature = "kvstore")]
pub mod kvstore;
pub mod leader_confirmation_service;
pub mod leader_schedule;
pub mod leader_schedule_utils;
pub mod local_cluster;

View File

@@ -17,9 +17,10 @@ pub struct EpochStakes {
stakes: HashMap<Pubkey, u64>,
self_staked: u64,
total_staked: u64,
delegate_id: Pubkey,
}
#[derive(Default)]
#[derive(Default, Debug)]
pub struct StakeLockout {
lockout: u64,
stake: u64,
@@ -34,14 +35,15 @@ pub struct Locktower {
}
impl EpochStakes {
pub fn new(slot: u64, stakes: HashMap<Pubkey, u64>, self_id: &Pubkey) -> Self {
pub fn new(slot: u64, stakes: HashMap<Pubkey, u64>, delegate_id: &Pubkey) -> Self {
let total_staked = stakes.values().sum();
let self_staked = *stakes.get(&self_id).unwrap_or(&0);
let self_staked = *stakes.get(&delegate_id).unwrap_or(&0);
Self {
slot,
stakes,
total_staked,
self_staked,
delegate_id: *delegate_id,
}
}
pub fn new_for_tests(lamports: u64) -> Self {
@@ -55,32 +57,32 @@ impl EpochStakes {
let stakes = accounts.iter().map(|(k, v)| (*k, v.lamports)).collect();
Self::new(slot, stakes, &accounts[0].0)
}
pub fn new_from_bank(bank: &Bank) -> Self {
pub fn new_from_bank(bank: &Bank, my_id: &Pubkey) -> Self {
let bank_epoch = bank.get_epoch_and_slot_index(bank.slot()).0;
let stakes = staking_utils::vote_account_balances_at_epoch(bank, bank_epoch)
.expect("voting require a bank with stakes");
Self::new(bank_epoch, stakes, &bank.collector_id())
Self::new(bank_epoch, stakes, my_id)
}
}
impl Locktower {
pub fn new_from_forks(bank_forks: &BankForks) -> Self {
pub fn new_from_forks(bank_forks: &BankForks, my_id: &Pubkey) -> Self {
//TODO: which bank to start with?
let mut frozen_banks: Vec<_> = bank_forks.frozen_banks().values().cloned().collect();
frozen_banks.sort_by_key(|b| (b.parents().len(), b.slot()));
if let Some(bank) = frozen_banks.last() {
Self::new_from_bank(bank)
Self::new_from_bank(bank, my_id)
} else {
Self::default()
}
}
pub fn new_from_bank(bank: &Bank) -> Self {
pub fn new_from_bank(bank: &Bank, my_id: &Pubkey) -> Self {
let current_epoch = bank.get_epoch_and_slot_index(bank.slot()).0;
let mut lockouts = VoteState::default();
if let Some(iter) = staking_utils::node_staked_accounts_at_epoch(bank, current_epoch) {
for (delegate_id, _, account) in iter {
if *delegate_id == bank.collector_id() {
if *delegate_id == *my_id {
let state = VoteState::deserialize(&account.data).expect("votes");
if lockouts.votes.len() < state.votes.len() {
//TODO: which state to init with?
@@ -89,7 +91,7 @@ impl Locktower {
}
}
}
let epoch_stakes = EpochStakes::new_from_bank(bank);
let epoch_stakes = EpochStakes::new_from_bank(bank, my_id);
Self {
epoch_stakes,
threshold_depth: VOTE_THRESHOLD_DEPTH,
@@ -122,6 +124,31 @@ impl Locktower {
}
let mut vote_state: VoteState = VoteState::deserialize(&account.data)
.expect("bank should always have valid VoteState data");
if key == self.epoch_stakes.delegate_id
|| vote_state.delegate_id == self.epoch_stakes.delegate_id
{
debug!("vote state {:?}", vote_state);
debug!(
"observed slot {}",
vote_state.nth_recent_vote(0).map(|v| v.slot).unwrap_or(0) as i64
);
debug!("observed root {}", vote_state.root_slot.unwrap_or(0) as i64);
solana_metrics::submit(
influxdb::Point::new("counter-locktower-observed")
.add_field(
"slot",
influxdb::Value::Integer(
vote_state.nth_recent_vote(0).map(|v| v.slot).unwrap_or(0) as i64,
),
)
.add_field(
"root",
influxdb::Value::Integer(vote_state.root_slot.unwrap_or(0) as i64),
)
.to_owned(),
);
}
let start_root = vote_state.root_slot;
vote_state.process_vote(Vote { slot: bank_slot });
for vote in &vote_state.votes {
@@ -150,6 +177,20 @@ impl Locktower {
stake_lockouts
}
pub fn is_slot_confirmed(&self, slot: u64, lockouts: &HashMap<u64, StakeLockout>) -> bool {
lockouts
.get(&slot)
.map(|lockout| {
(lockout.stake as f64 / self.epoch_stakes.total_staked as f64) > self.threshold_size
})
.unwrap_or(false)
}
pub fn is_recent_epoch(&self, bank: &Bank) -> bool {
let bank_epoch = bank.get_epoch_and_slot_index(bank.slot()).0;
bank_epoch >= self.epoch_stakes.slot
}
pub fn update_epoch(&mut self, bank: &Bank) {
let bank_epoch = bank.get_epoch_and_slot_index(bank.slot()).0;
if bank_epoch != self.epoch_stakes.slot {
@@ -157,7 +198,7 @@ impl Locktower {
bank_epoch > self.epoch_stakes.slot,
"epoch_stakes cannot move backwards"
);
self.epoch_stakes = EpochStakes::new_from_bank(bank);
self.epoch_stakes = EpochStakes::new_from_bank(bank, &self.epoch_stakes.delegate_id);
solana_metrics::submit(
influxdb::Point::new("counter-locktower-epoch")
.add_field(
@@ -415,6 +456,43 @@ mod test {
assert!(locktower.check_vote_stake_threshold(0, &stakes));
}
#[test]
fn test_is_slot_confirmed_not_enough_stake_failure() {
let locktower = Locktower::new(EpochStakes::new_for_tests(2), 1, 0.67);
let stakes = vec![(
0,
StakeLockout {
stake: 1,
lockout: 8,
},
)]
.into_iter()
.collect();
assert!(!locktower.is_slot_confirmed(0, &stakes));
}
#[test]
fn test_is_slot_confirmed_unknown_slot() {
let locktower = Locktower::new(EpochStakes::new_for_tests(2), 1, 0.67);
let stakes = HashMap::new();
assert!(!locktower.is_slot_confirmed(0, &stakes));
}
#[test]
fn test_is_slot_confirmed_pass() {
let locktower = Locktower::new(EpochStakes::new_for_tests(2), 1, 0.67);
let stakes = vec![(
0,
StakeLockout {
stake: 2,
lockout: 8,
},
)]
.into_iter()
.collect();
assert!(locktower.is_slot_confirmed(0, &stakes));
}
#[test]
fn test_is_locked_out_empty() {
let locktower = Locktower::new(EpochStakes::new_for_tests(2), 0, 0.67);

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,5 @@
//! The `poh_service` module implements a service that records the passing of
//! "ticks", a measure of time in the PoH stream
use crate::poh_recorder::PohRecorder;
use crate::service::Service;
use solana_sdk::timing::NUM_TICKS_PER_SECOND;
@@ -98,6 +97,7 @@ impl Service for PohService {
#[cfg(test)]
mod tests {
use super::*;
use crate::blocktree::{get_tmp_ledger_path, Blocktree};
use crate::poh_recorder::WorkingBank;
use crate::result::Result;
use crate::test_tx::test_tx;
@@ -111,83 +111,90 @@ mod tests {
let (genesis_block, _mint_keypair) = GenesisBlock::new(2);
let bank = Arc::new(Bank::new(&genesis_block));
let prev_hash = bank.last_blockhash();
let (poh_recorder, entry_receiver) = PohRecorder::new(
bank.tick_height(),
prev_hash,
bank.slot(),
Some(4),
bank.ticks_per_slot(),
&Pubkey::default(),
);
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
let exit = Arc::new(AtomicBool::new(false));
let working_bank = WorkingBank {
bank: bank.clone(),
min_tick_height: bank.tick_height(),
max_tick_height: std::u64::MAX,
};
let ledger_path = get_tmp_ledger_path!();
{
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let (poh_recorder, entry_receiver) = PohRecorder::new(
bank.tick_height(),
prev_hash,
bank.slot(),
Some(4),
bank.ticks_per_slot(),
&Pubkey::default(),
&Arc::new(blocktree),
);
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
let exit = Arc::new(AtomicBool::new(false));
let working_bank = WorkingBank {
bank: bank.clone(),
min_tick_height: bank.tick_height(),
max_tick_height: std::u64::MAX,
};
let entry_producer: JoinHandle<Result<()>> = {
let poh_recorder = poh_recorder.clone();
let exit = exit.clone();
let entry_producer: JoinHandle<Result<()>> = {
let poh_recorder = poh_recorder.clone();
let exit = exit.clone();
Builder::new()
.name("solana-poh-service-entry_producer".to_string())
.spawn(move || {
loop {
// send some data
let h1 = hash(b"hello world!");
let tx = test_tx();
poh_recorder.lock().unwrap().record(h1, vec![tx]).unwrap();
Builder::new()
.name("solana-poh-service-entry_producer".to_string())
.spawn(move || {
loop {
// send some data
let h1 = hash(b"hello world!");
let tx = test_tx();
poh_recorder.lock().unwrap().record(h1, vec![tx]).unwrap();
if exit.load(Ordering::Relaxed) {
break Ok(());
if exit.load(Ordering::Relaxed) {
break Ok(());
}
}
}
})
.unwrap()
};
})
.unwrap()
};
const HASHES_PER_TICK: u64 = 2;
let poh_service = PohService::new(
poh_recorder.clone(),
&PohServiceConfig::Tick(HASHES_PER_TICK as usize),
&exit,
);
poh_recorder.lock().unwrap().set_working_bank(working_bank);
const HASHES_PER_TICK: u64 = 2;
let poh_service = PohService::new(
poh_recorder.clone(),
&PohServiceConfig::Tick(HASHES_PER_TICK as usize),
&exit,
);
poh_recorder.lock().unwrap().set_working_bank(working_bank);
// get some events
let mut hashes = 0;
let mut need_tick = true;
let mut need_entry = true;
let mut need_partial = true;
// get some events
let mut hashes = 0;
let mut need_tick = true;
let mut need_entry = true;
let mut need_partial = true;
while need_tick || need_entry || need_partial {
for entry in entry_receiver.recv().unwrap().1 {
let entry = &entry.0;
if entry.is_tick() {
assert!(entry.num_hashes <= HASHES_PER_TICK);
while need_tick || need_entry || need_partial {
for entry in entry_receiver.recv().unwrap().1 {
let entry = &entry.0;
if entry.is_tick() {
assert!(entry.num_hashes <= HASHES_PER_TICK);
if entry.num_hashes == HASHES_PER_TICK {
need_tick = false;
if entry.num_hashes == HASHES_PER_TICK {
need_tick = false;
} else {
need_partial = false;
}
hashes += entry.num_hashes;
assert_eq!(hashes, HASHES_PER_TICK);
hashes = 0;
} else {
need_partial = false;
assert!(entry.num_hashes >= 1);
need_entry = false;
hashes += entry.num_hashes - 1;
}
hashes += entry.num_hashes;
assert_eq!(hashes, HASHES_PER_TICK);
hashes = 0;
} else {
assert!(entry.num_hashes >= 1);
need_entry = false;
hashes += entry.num_hashes - 1;
}
}
exit.store(true, Ordering::Relaxed);
let _ = poh_service.join().unwrap();
let _ = entry_producer.join().unwrap();
}
exit.store(true, Ordering::Relaxed);
let _ = poh_service.join().unwrap();
let _ = entry_producer.join().unwrap();
Blocktree::destroy(&ledger_path).unwrap();
}
}

View File

@@ -6,12 +6,13 @@ use crate::blocktree_processor;
use crate::cluster_info::ClusterInfo;
use crate::entry::{Entry, EntryReceiver, EntrySender, EntrySlice};
use crate::leader_schedule_utils;
use crate::locktower::Locktower;
use crate::locktower::{Locktower, StakeLockout};
use crate::packet::BlobError;
use crate::poh_recorder::PohRecorder;
use crate::result;
use crate::rpc_subscriptions::RpcSubscriptions;
use crate::service::Service;
use hashbrown::HashMap;
use solana_metrics::counter::Counter;
use solana_metrics::influxdb;
use solana_runtime::bank::Bank;
@@ -20,7 +21,6 @@ use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::KeypairUtil;
use solana_sdk::timing::{self, duration_as_ms};
use solana_vote_api::vote_transaction::VoteTransaction;
use std::collections::HashMap;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::{channel, Receiver, RecvTimeoutError, Sender};
use std::sync::{Arc, Mutex, RwLock};
@@ -52,6 +52,22 @@ pub struct ReplayStage {
t_replay: JoinHandle<result::Result<()>>,
}
#[derive(Default)]
struct ForkProgress {
last_entry: Hash,
num_blobs: usize,
started_ms: u64,
}
impl ForkProgress {
pub fn new(last_entry: Hash) -> Self {
Self {
last_entry,
num_blobs: 0,
started_ms: timing::timestamp(),
}
}
}
impl ReplayStage {
#[allow(clippy::new_ret_no_self, clippy::too_many_arguments)]
pub fn new<T>(
@@ -79,7 +95,7 @@ impl ReplayStage {
let my_id = *my_id;
let vote_account = *vote_account;
let mut ticks_per_slot = 0;
let mut locktower = Locktower::new_from_forks(&bank_forks.read().unwrap());
let mut locktower = Locktower::new_from_forks(&bank_forks.read().unwrap(), &my_id);
// Start the replay stage loop
let t_replay = Builder::new()
@@ -110,12 +126,7 @@ impl ReplayStage {
}
let max_tick_height = (*bank_slot + 1) * bank.ticks_per_slot() - 1;
if bank.tick_height() == max_tick_height {
Self::process_completed_bank(
&my_id,
bank,
&mut progress,
&slot_full_sender,
);
Self::process_completed_bank(&my_id, bank, &slot_full_sender);
}
}
@@ -125,65 +136,8 @@ impl ReplayStage {
ticks_per_slot = bank.ticks_per_slot();
}
let locktower_start = Instant::now();
// Locktower voting
let descendants = bank_forks.read().unwrap().descendants();
let ancestors = bank_forks.read().unwrap().ancestors();
let frozen_banks = bank_forks.read().unwrap().frozen_banks();
trace!("frozen_banks {}", frozen_banks.len());
let mut votable: Vec<(u128, Arc<Bank>)> = frozen_banks
.values()
.filter(|b| {
let is_votable = b.is_votable();
trace!("bank is votable: {} {}", b.slot(), is_votable);
is_votable
})
.filter(|b| {
let has_voted = locktower.has_voted(b.slot());
trace!("bank is has_voted: {} {}", b.slot(), has_voted);
!has_voted
})
.filter(|b| {
let is_locked_out = locktower.is_locked_out(b.slot(), &descendants);
trace!("bank is is_locked_out: {} {}", b.slot(), is_locked_out);
!is_locked_out
})
.map(|bank| {
(
bank,
locktower.collect_vote_lockouts(
bank.slot(),
bank.vote_accounts(),
&ancestors,
),
)
})
.filter(|(b, stake_lockouts)| {
let vote_threshold =
locktower.check_vote_stake_threshold(b.slot(), &stake_lockouts);
trace!("bank vote_threshold: {} {}", b.slot(), vote_threshold);
vote_threshold
})
.map(|(b, stake_lockouts)| {
(locktower.calculate_weight(&stake_lockouts), b.clone())
})
.collect();
votable.sort_by_key(|b| b.0);
trace!("votable_banks {}", votable.len());
let ms = timing::duration_as_ms(&locktower_start.elapsed());
if !votable.is_empty() {
let weights: Vec<u128> = votable.iter().map(|x| x.0).collect();
info!(
"@{:?} locktower duration: {:?} len: {} weights: {:?}",
timing::timestamp(),
ms,
votable.len(),
weights
);
}
inc_new_counter_info!("replay_stage-locktower_duration", ms as usize);
let votable =
Self::generate_votable_banks(&bank_forks, &locktower, &mut progress);
if let Some((_, bank)) = votable.last() {
subscriptions.notify_subscribers(&bank);
@@ -204,8 +158,12 @@ impl ReplayStage {
locktower.update_epoch(&bank);
cluster_info.write().unwrap().push_vote(vote);
}
let next_leader_slot =
leader_schedule_utils::next_leader_slot(&my_id, bank.slot(), &bank);
let next_leader_slot = leader_schedule_utils::next_leader_slot(
&my_id,
bank.slot(),
&bank,
Some(&blocktree),
);
poh_recorder.lock().unwrap().reset(
bank.tick_height(),
bank.last_blockhash(),
@@ -241,7 +199,6 @@ impl ReplayStage {
&bank_forks,
&poh_recorder,
&cluster_info,
&blocktree,
poh_slot,
reached_leader_tick,
grace_ticks,
@@ -274,42 +231,22 @@ impl ReplayStage {
bank_forks: &Arc<RwLock<BankForks>>,
poh_recorder: &Arc<Mutex<PohRecorder>>,
cluster_info: &Arc<RwLock<ClusterInfo>>,
blocktree: &Blocktree,
poh_slot: u64,
reached_leader_tick: bool,
grace_ticks: u64,
) {
trace!("{} checking poh slot {}", my_id, poh_slot);
if blocktree.meta(poh_slot).unwrap().is_some() {
// We've already broadcasted entries for this slot, skip it
// Since we are skipping our leader slot, let's tell poh recorder when we should be
// leader again
if reached_leader_tick {
let _ = bank_forks.read().unwrap().get(poh_slot).map(|bank| {
let next_leader_slot =
leader_schedule_utils::next_leader_slot(&my_id, bank.slot(), &bank);
let mut poh = poh_recorder.lock().unwrap();
let start_slot = poh.start_slot();
poh.reset(
bank.tick_height(),
bank.last_blockhash(),
start_slot,
next_leader_slot,
bank.ticks_per_slot(),
);
});
}
return;
}
if bank_forks.read().unwrap().get(poh_slot).is_none() {
let frozen = bank_forks.read().unwrap().frozen_banks();
let parent_slot = poh_recorder.lock().unwrap().start_slot();
assert!(frozen.contains_key(&parent_slot));
let parent = &frozen[&parent_slot];
let parent = {
let r_bf = bank_forks.read().unwrap();
r_bf.get(parent_slot)
.expect("start slot doesn't exist in bank forks")
.clone()
};
assert!(parent.is_frozen());
leader_schedule_utils::slot_leader_at(poh_slot, parent)
leader_schedule_utils::slot_leader_at(poh_slot, &parent)
.map(|next_leader| {
debug!(
"me: {} leader {} at poh slot {}",
@@ -329,7 +266,7 @@ impl ReplayStage {
influxdb::Value::Integer(grace_ticks as i64),
)
.to_owned(),);
let tpu_bank = Bank::new_from_parent(parent, my_id, poh_slot);
let tpu_bank = Bank::new_from_parent(&parent, my_id, poh_slot);
bank_forks.write().unwrap().insert(tpu_bank);
if let Some(tpu_bank) = bank_forks.read().unwrap().get(poh_slot).cloned() {
assert_eq!(
@@ -352,10 +289,10 @@ impl ReplayStage {
});
}
}
pub fn replay_blocktree_into_bank(
fn replay_blocktree_into_bank(
bank: &Bank,
blocktree: &Blocktree,
progress: &mut HashMap<u64, (Hash, usize)>,
progress: &mut HashMap<u64, ForkProgress>,
forward_entry_sender: &EntrySender,
) -> result::Result<()> {
let (entries, num) = Self::load_blocktree_entries(bank, blocktree, progress)?;
@@ -373,32 +310,128 @@ impl ReplayStage {
Ok(())
}
pub fn load_blocktree_entries(
fn generate_votable_banks(
bank_forks: &Arc<RwLock<BankForks>>,
locktower: &Locktower,
progress: &mut HashMap<u64, ForkProgress>,
) -> Vec<(u128, Arc<Bank>)> {
let locktower_start = Instant::now();
// Locktower voting
let descendants = bank_forks.read().unwrap().descendants();
let ancestors = bank_forks.read().unwrap().ancestors();
let frozen_banks = bank_forks.read().unwrap().frozen_banks();
trace!("frozen_banks {}", frozen_banks.len());
let mut votable: Vec<(u128, Arc<Bank>)> = frozen_banks
.values()
.filter(|b| {
let is_votable = b.is_votable();
trace!("bank is votable: {} {}", b.slot(), is_votable);
is_votable
})
.filter(|b| {
let is_recent_epoch = locktower.is_recent_epoch(b);
trace!("bank is is_recent_epoch: {} {}", b.slot(), is_recent_epoch);
is_recent_epoch
})
.filter(|b| {
let has_voted = locktower.has_voted(b.slot());
trace!("bank is has_voted: {} {}", b.slot(), has_voted);
!has_voted
})
.filter(|b| {
let is_locked_out = locktower.is_locked_out(b.slot(), &descendants);
trace!("bank is is_locked_out: {} {}", b.slot(), is_locked_out);
!is_locked_out
})
.map(|bank| {
(
bank,
locktower.collect_vote_lockouts(bank.slot(), bank.vote_accounts(), &ancestors),
)
})
.filter(|(b, stake_lockouts)| {
let vote_threshold =
locktower.check_vote_stake_threshold(b.slot(), &stake_lockouts);
Self::confirm_forks(locktower, stake_lockouts, progress);
debug!("bank vote_threshold: {} {}", b.slot(), vote_threshold);
vote_threshold
})
.map(|(b, stake_lockouts)| (locktower.calculate_weight(&stake_lockouts), b.clone()))
.collect();
votable.sort_by_key(|b| b.0);
let ms = timing::duration_as_ms(&locktower_start.elapsed());
trace!("votable_banks {}", votable.len());
if !votable.is_empty() {
let weights: Vec<u128> = votable.iter().map(|x| x.0).collect();
info!(
"@{:?} locktower duration: {:?} len: {} weights: {:?}",
timing::timestamp(),
ms,
votable.len(),
weights
);
}
inc_new_counter_info!("replay_stage-locktower_duration", ms as usize);
votable
}
fn confirm_forks(
locktower: &Locktower,
stake_lockouts: &HashMap<u64, StakeLockout>,
progress: &mut HashMap<u64, ForkProgress>,
) {
progress.retain(|slot, prog| {
let duration = timing::timestamp() - prog.started_ms;
if locktower.is_slot_confirmed(*slot, stake_lockouts) {
info!("validator fork confirmed {} {}", *slot, duration);
solana_metrics::submit(
influxdb::Point::new(&"validator-confirmation")
.add_field("duration_ms", influxdb::Value::Integer(duration as i64))
.to_owned(),
);
false
} else {
debug!(
"validator fork not confirmed {} {} {:?}",
*slot,
duration,
stake_lockouts.get(slot)
);
true
}
});
}
fn load_blocktree_entries(
bank: &Bank,
blocktree: &Blocktree,
progress: &mut HashMap<u64, (Hash, usize)>,
progress: &mut HashMap<u64, ForkProgress>,
) -> result::Result<(Vec<Entry>, usize)> {
let bank_slot = bank.slot();
let bank_progress = &mut progress
.entry(bank_slot)
.or_insert((bank.last_blockhash(), 0));
blocktree.get_slot_entries_with_blob_count(bank_slot, bank_progress.1 as u64, None)
.or_insert(ForkProgress::new(bank.last_blockhash()));
blocktree.get_slot_entries_with_blob_count(bank_slot, bank_progress.num_blobs as u64, None)
}
pub fn replay_entries_into_bank(
fn replay_entries_into_bank(
bank: &Bank,
entries: Vec<Entry>,
progress: &mut HashMap<u64, (Hash, usize)>,
progress: &mut HashMap<u64, ForkProgress>,
forward_entry_sender: &EntrySender,
num: usize,
) -> result::Result<()> {
let bank_progress = &mut progress
.entry(bank.slot())
.or_insert((bank.last_blockhash(), 0));
let result = Self::verify_and_process_entries(&bank, &entries, &bank_progress.0);
bank_progress.1 += num;
.or_insert(ForkProgress::new(bank.last_blockhash()));
let result = Self::verify_and_process_entries(&bank, &entries, &bank_progress.last_entry);
bank_progress.num_blobs += num;
if let Some(last_entry) = entries.last() {
bank_progress.0 = last_entry.hash;
bank_progress.last_entry = last_entry.hash;
}
if result.is_ok() {
forward_entry_sender.send(entries)?;
@@ -428,7 +461,7 @@ impl ReplayStage {
fn handle_new_root(
bank_forks: &Arc<RwLock<BankForks>>,
progress: &mut HashMap<u64, (Hash, usize)>,
progress: &mut HashMap<u64, ForkProgress>,
) {
let r_bank_forks = bank_forks.read().unwrap();
progress.retain(|k, _| r_bank_forks.get(*k).is_some());
@@ -437,12 +470,10 @@ impl ReplayStage {
fn process_completed_bank(
my_id: &Pubkey,
bank: Arc<Bank>,
progress: &mut HashMap<u64, (Hash, usize)>,
slot_full_sender: &Sender<(u64, Pubkey)>,
) {
bank.freeze();
info!("bank frozen {}", bank.slot());
progress.remove(&bank.slot());
if let Err(e) = slot_full_sender.send((bank.slot(), bank.collector_id())) {
trace!("{} slot_full alert failed: {:?}", my_id, e);
}
@@ -531,7 +562,8 @@ mod test {
let bank = bank_forks.working_bank();
let blocktree = Arc::new(blocktree);
let (exit, poh_recorder, poh_service, _entry_receiver) = create_test_recorder(&bank);
let (exit, poh_recorder, poh_service, _entry_receiver) =
create_test_recorder(&bank, &blocktree);
let (replay_stage, _slot_full_receiver, ledger_writer_recv) = ReplayStage::new(
&my_keypair.pubkey(),
&voting_keypair.pubkey(),
@@ -668,7 +700,7 @@ mod test {
let bank0 = Bank::default();
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank0)));
let mut progress = HashMap::new();
progress.insert(5, (Hash::default(), 0));
progress.insert(5, ForkProgress::new(Hash::default()));
ReplayStage::handle_new_root(&bank_forks, &mut progress);
assert!(progress.is_empty());
}

View File

@@ -202,8 +202,10 @@ pub mod tests {
let blocktree_path = get_tmp_ledger_path!();
let (blocktree, l_receiver) = Blocktree::open_with_signal(&blocktree_path)
.expect("Expected to successfully open ledger");
let blocktree = Arc::new(blocktree);
let bank = bank_forks.working_bank();
let (exit, poh_recorder, poh_service, _entry_receiver) = create_test_recorder(&bank);
let (exit, poh_recorder, poh_service, _entry_receiver) =
create_test_recorder(&bank, &blocktree);
let voting_keypair = Keypair::new();
let tvu = Tvu::new(
&voting_keypair.pubkey(),
@@ -218,7 +220,7 @@ pub mod tests {
fetch: target1.sockets.tvu,
}
},
Arc::new(blocktree),
blocktree,
STORAGE_ROTATE_TEST_COUNT,
&StorageState::default(),
None,

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-drone"
version = "0.12.1"
version = "0.12.3"
description = "Solana Drone"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -19,9 +19,9 @@ clap = "2.31"
log = "0.4.2"
serde = "1.0.89"
serde_derive = "1.0.89"
solana-logger = { path = "../logger", version = "0.12.1" }
solana-sdk = { path = "../sdk", version = "0.12.1" }
solana-metrics = { path = "../metrics", version = "0.12.1" }
solana-logger = { path = "../logger", version = "0.12.3" }
solana-sdk = { path = "../sdk", version = "0.12.3" }
solana-metrics = { path = "../metrics", version = "0.12.3" }
tokio = "0.1"
tokio-codec = "0.1"

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-fullnode"
description = "Blockchain, Rebuilt for Scale"
version = "0.12.1"
version = "0.12.3"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -12,15 +12,15 @@ homepage = "https://solana.com/"
clap = "2.32.0"
log = "0.4.2"
serde_json = "1.0.39"
solana = { path = "../core", version = "0.12.1" }
solana-drone = { path = "../drone", version = "0.12.1" }
solana-logger = { path = "../logger", version = "0.12.1" }
solana-netutil = { path = "../netutil", version = "0.12.1" }
solana-metrics = { path = "../metrics", version = "0.12.1" }
solana-runtime = { path = "../runtime", version = "0.12.1" }
solana-sdk = { path = "../sdk", version = "0.12.1" }
solana-vote-api = { path = "../programs/vote_api", version = "0.12.1" }
solana-vote-signer = { path = "../vote-signer", version = "0.12.1" }
solana = { path = "../core", version = "0.12.3" }
solana-drone = { path = "../drone", version = "0.12.3" }
solana-logger = { path = "../logger", version = "0.12.3" }
solana-netutil = { path = "../netutil", version = "0.12.3" }
solana-metrics = { path = "../metrics", version = "0.12.3" }
solana-runtime = { path = "../runtime", version = "0.12.3" }
solana-sdk = { path = "../sdk", version = "0.12.3" }
solana-vote-api = { path = "../programs/vote_api", version = "0.12.3" }
solana-vote-signer = { path = "../vote-signer", version = "0.12.3" }
[features]
chacha = ["solana/chacha"]

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-genesis"
description = "Blockchain, Rebuilt for Scale"
version = "0.12.1"
version = "0.12.3"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -11,8 +11,8 @@ homepage = "https://solana.com/"
[dependencies]
clap = "2.32.0"
serde_json = "1.0.39"
solana = { path = "../core", version = "0.12.1" }
solana-sdk = { path = "../sdk", version = "0.12.1" }
solana = { path = "../core", version = "0.12.3" }
solana-sdk = { path = "../sdk", version = "0.12.3" }
[features]
cuda = ["solana/cuda"]

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-keygen"
version = "0.12.1"
version = "0.12.3"
description = "Solana key generation utility"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -14,7 +14,7 @@ cuda = []
[dependencies]
dirs = "1.0.5"
clap = "2.31"
solana-sdk = { path = "../sdk", version = "0.12.1" }
solana-sdk = { path = "../sdk", version = "0.12.3" }
[[bin]]
name = "solana-keygen"

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-ledger-tool"
description = "Blockchain, Rebuilt for Scale"
version = "0.12.1"
version = "0.12.3"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -11,10 +11,10 @@ homepage = "https://solana.com/"
[dependencies]
clap = "2.32.0"
serde_json = "1.0.39"
solana = { path = "../core", version = "0.12.1" }
solana-sdk = { path = "../sdk", version = "0.12.1" }
solana-logger = { path = "../logger", version = "0.12.1" }
solana-runtime = { path = "../runtime", version = "0.12.1" }
solana = { path = "../core", version = "0.12.3" }
solana-sdk = { path = "../sdk", version = "0.12.3" }
solana-logger = { path = "../logger", version = "0.12.3" }
solana-runtime = { path = "../runtime", version = "0.12.3" }
[dev-dependencies]
assert_cmd = "0.11"

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-logger"
version = "0.12.1"
version = "0.12.3"
description = "Solana Logger"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-metrics"
version = "0.12.1"
version = "0.12.3"
description = "Solana Metrics"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -14,7 +14,7 @@ log = "0.4.2"
reqwest = "0.9.11"
lazy_static = "1.3.0"
sys-info = "0.5.6"
solana-sdk = { path = "../sdk", version = "0.12.1" }
solana-sdk = { path = "../sdk", version = "0.12.3" }
[dev-dependencies]
rand = "0.6.5"

View File

@@ -15,8 +15,8 @@
"editable": true,
"gnetId": null,
"graphTooltip": 0,
"id": 251,
"iteration": 1549301870214,
"id": 399,
"iteration": 1553559957575,
"links": [
{
"asDropdown": true,
@@ -3811,7 +3811,7 @@
"x": 0,
"y": 49
},
"id": 42,
"id": 34,
"legend": {
"alignAsTable": false,
"avg": false,
@@ -4001,7 +4001,7 @@
"x": 12,
"y": 49
},
"id": 41,
"id": 35,
"legend": {
"alignAsTable": false,
"avg": false,
@@ -4156,7 +4156,7 @@
"x": 0,
"y": 54
},
"id": 34,
"id": 36,
"legend": {
"alignAsTable": false,
"avg": false,
@@ -4496,7 +4496,7 @@
"x": 12,
"y": 54
},
"id": 40,
"id": 37,
"legend": {
"alignAsTable": false,
"avg": false,
@@ -4727,7 +4727,7 @@
"x": 0,
"y": 60
},
"id": 35,
"id": 38,
"legend": {
"alignAsTable": false,
"avg": false,
@@ -4997,7 +4997,7 @@
"x": 12,
"y": 60
},
"id": 43,
"id": 39,
"legend": {
"alignAsTable": false,
"avg": false,
@@ -5100,6 +5100,249 @@
"alignLevel": null
}
},
{
"aliasColors": {
"cluster-info.repair": "#ba43a9",
"counter-replay_stage-new_leader.last": "#00ffbb",
"window-service.receive": "#b7dbab",
"window-stage.consumed": "#5195ce"
},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "Solana Metrics (read-only)",
"fill": 1,
"gridPos": {
"h": 6,
"w": 12,
"x": 0,
"y": 66
},
"id": 44,
"legend": {
"alignAsTable": false,
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": false,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"percentage": false,
"pointradius": 2,
"points": true,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"groupBy": [
{
"params": [
"$__interval"
],
"type": "time"
},
{
"params": [
"null"
],
"type": "fill"
}
],
"measurement": "counter-cluster_info-vote-count",
"orderByTime": "ASC",
"policy": "autogen",
"query": "SELECT last(\"latest\") - last(\"root\") FROM \"$testnet\".\"autogen\".\"counter-locktower-vote\" WHERE host_id =~ /$hostid/ AND $timeFilter GROUP BY time($__interval)",
"rawQuery": true,
"refId": "A",
"resultFormat": "time_series",
"select": [
[
{
"params": [
"count"
],
"type": "field"
},
{
"params": [],
"type": "sum"
}
]
],
"tags": []
}
],
"thresholds": [],
"timeFrom": null,
"timeShift": null,
"title": "Locktower Distance in Latest and Root Slot ($hostid)",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {
"cluster-info.repair": "#ba43a9",
"counter-locktower-vote.last": "#00ffbb",
"counter-replay_stage-new_leader.last": "#00ffbb",
"window-service.receive": "#b7dbab",
"window-stage.consumed": "#5195ce"
},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "Solana Metrics (read-only)",
"fill": 1,
"gridPos": {
"h": 6,
"w": 12,
"x": 12,
"y": 66
},
"id": 45,
"legend": {
"alignAsTable": false,
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": false,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"percentage": false,
"pointradius": 2,
"points": true,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"groupBy": [
{
"params": [
"$__interval"
],
"type": "time"
},
{
"params": [
"null"
],
"type": "fill"
}
],
"measurement": "counter-cluster_info-vote-count",
"orderByTime": "ASC",
"policy": "autogen",
"query": "SELECT last(\"root\") FROM \"$testnet\".\"autogen\".\"counter-locktower-vote\" WHERE host_id =~ /$hostid/ AND $timeFilter GROUP BY time($__interval)",
"rawQuery": true,
"refId": "A",
"resultFormat": "time_series",
"select": [
[
{
"params": [
"count"
],
"type": "field"
},
{
"params": [],
"type": "sum"
}
]
],
"tags": []
}
],
"thresholds": [],
"timeFrom": null,
"timeShift": null,
"title": "Locktower Root Slot ($hostid)",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
"bars": false,
@@ -5111,9 +5354,9 @@
"h": 5,
"w": 12,
"x": 0,
"y": 66
"y": 72
},
"id": 36,
"id": 40,
"legend": {
"alignAsTable": false,
"avg": false,
@@ -5343,9 +5586,9 @@
"h": 5,
"w": 12,
"x": 12,
"y": 66
"y": 72
},
"id": 38,
"id": 41,
"legend": {
"alignAsTable": false,
"avg": false,
@@ -5496,9 +5739,9 @@
"h": 1,
"w": 24,
"x": 0,
"y": 71
"y": 77
},
"id": 44,
"id": 42,
"panels": [],
"title": "Signature Verification",
"type": "row"
@@ -5514,9 +5757,9 @@
"h": 5,
"w": 12,
"x": 0,
"y": 72
"y": 78
},
"id": 45,
"id": 43,
"legend": {
"avg": false,
"current": false,
@@ -5701,5 +5944,5 @@
"timezone": "",
"title": "Testnet Monitor (edge)",
"uid": "testnet-edge",
"version": 116
"version": 117
}

View File

@@ -24,6 +24,7 @@ entrypointIp=
publicNetwork=
netBasename=
sshPrivateKey=
externalNodeSshKey=
sshOptions=()
fullnodeIpList=()
fullnodeIpListPrivate=()
@@ -41,8 +42,9 @@ buildSshOptions() {
-o "User=solana"
-o "IdentityFile=$sshPrivateKey"
-o "LogLevel=ERROR"
-F /dev/null
)
[[ -z $externalNodeSshKey ]] || sshOptions+=(-o "IdentityFile=$externalNodeSshKey")
}
loadConfigFile() {

View File

@@ -26,7 +26,7 @@ ec2)
cpuBootstrapLeaderMachineType=m4.4xlarge
gpuBootstrapLeaderMachineType=p2.xlarge
bootstrapLeaderMachineType=$cpuBootstrapLeaderMachineType
fullNodeMachineType=m4.2xlarge
fullNodeMachineType=$cpuBootstrapLeaderMachineType
clientMachineType=m4.2xlarge
blockstreamerMachineType=m4.2xlarge
;;
@@ -42,11 +42,20 @@ clientNodeCount=1
blockstreamer=false
fullNodeBootDiskSizeInGb=1000
clientBootDiskSizeInGb=75
externalNodes=false
publicNetwork=false
enableGpu=false
customAddress=
leaderRotation=true
zones=()
containsZone() {
local e match="$1"
shift
for e; do [[ "$e" == "$match" ]] && return 0; done
return 1
}
usage() {
exitcode=0
@@ -106,7 +115,7 @@ shift
[[ $command = create || $command = config || $command = info || $command = delete ]] ||
usage "Invalid command: $command"
while getopts "h?p:Pn:c:z:gG:a:d:bu" opt; do
while getopts "h?p:Pn:c:z:gG:a:d:bux" opt; do
case $opt in
h | \?)
usage
@@ -125,7 +134,7 @@ while getopts "h?p:Pn:c:z:gG:a:d:bu" opt; do
clientNodeCount=$OPTARG
;;
z)
cloud_SetZone "$OPTARG"
containsZone "$OPTARG" "${zones[@]}" || zones+=("$OPTARG")
;;
b)
leaderRotation=false
@@ -149,6 +158,9 @@ while getopts "h?p:Pn:c:z:gG:a:d:bu" opt; do
u)
blockstreamer=true
;;
x)
externalNodes=true
;;
*)
usage "unhandled option: $opt"
;;
@@ -156,6 +168,8 @@ while getopts "h?p:Pn:c:z:gG:a:d:bu" opt; do
done
shift $((OPTIND - 1))
[[ ${#zones[@]} -gt 0 ]] || zones+=("$(cloud_DefaultZone)")
[[ -z $1 ]] || usage "Unexpected argument: $1"
if [[ $cloudProvider = ec2 ]]; then
# EC2 keys can't be retrieved from running instances like GCE keys can so save
@@ -168,59 +182,8 @@ fi
case $cloudProvider in
gce)
if $enableGpu; then
# Custom Ubuntu 18.04 LTS image with CUDA 9.2 and CUDA 10.0 installed
#
# TODO: Unfortunately this image is not public. When this becomes an issue,
# use the stock Ubuntu 18.04 image and programmatically install CUDA after the
# instance boots
#
imageName="ubuntu-1804-bionic-v20181029-with-cuda-10-and-cuda-9-2"
else
# Upstream Ubuntu 18.04 LTS image
imageName="ubuntu-1804-bionic-v20181029 --image-project ubuntu-os-cloud"
fi
;;
ec2)
if $enableGpu; then
#
# Custom Ubuntu 18.04 LTS image with CUDA 9.2 and CUDA 10.0 installed
#
# TODO: Unfortunately these AMIs are not public. When this becomes an issue,
# use the stock Ubuntu 18.04 image and programmatically install CUDA after the
# instance boots
#
case $region in
us-east-1)
imageName="ami-0a8bd6fb204473f78"
;;
us-west-1)
imageName="ami-07011f0795513c59d"
;;
us-west-2)
imageName="ami-0a11ef42b62b82b68"
;;
*)
usage "Unsupported region: $region"
;;
esac
else
# Select an upstream Ubuntu 18.04 AMI from https://cloud-images.ubuntu.com/locator/ec2/
case $region in
us-east-1)
imageName="ami-0a313d6098716f372"
;;
us-west-1)
imageName="ami-06397100adf427136"
;;
us-west-2)
imageName="ami-0dc34f4b016c9ce49"
;;
*)
usage "Unsupported region: $region"
;;
esac
fi
;;
*)
echo "Error: Unknown cloud provider: $cloudProvider"
@@ -250,7 +213,7 @@ cloud_ForEachInstance() {
declare count=1
for info in "${instances[@]}"; do
declare name publicIp privateIp
IFS=: read -r name publicIp privateIp < <(echo "$info")
IFS=: read -r name publicIp privateIp zone < <(echo "$info")
eval "$cmd" "$name" "$publicIp" "$privateIp" "$count" "$@"
count=$((count + 1))
@@ -260,13 +223,18 @@ cloud_ForEachInstance() {
prepareInstancesAndWriteConfigFile() {
$metricsWriteDatapoint "testnet-deploy net-config-begin=1"
cat >> "$configFile" <<EOF
if $externalNodes; then
echo "Appending to existing config file"
echo "externalNodeSshKey=$sshPrivateKey" >> "$configFile"
else
cat >> "$configFile" <<EOF
# autogenerated at $(date)
netBasename=$prefix
publicNetwork=$publicNetwork
sshPrivateKey=$sshPrivateKey
leaderRotation=$leaderRotation
EOF
fi
buildSshOptions
@@ -303,57 +271,68 @@ EOF
echo "$name has booted."
}
echo "Looking for bootstrap leader instance..."
cloud_FindInstance "$prefix-bootstrap-leader"
[[ ${#instances[@]} -eq 1 ]] || {
echo "Unable to find bootstrap leader"
exit 1
}
if $externalNodes; then
echo "Bootstrap leader is already configured"
else
echo "Looking for bootstrap leader instance..."
cloud_FindInstance "$prefix-bootstrap-leader"
[[ ${#instances[@]} -eq 1 ]] || {
echo "Unable to find bootstrap leader"
exit 1
}
(
declare nodeName
declare nodeIp
IFS=: read -r nodeName nodeIp _ < <(echo "${instances[0]}")
(
declare nodeName
declare nodeIp
declare nodeZone
IFS=: read -r nodeName nodeIp _ nodeZone < <(echo "${instances[0]}")
# Try to ping the machine first.
timeout 90s bash -c "set -o pipefail; until ping -c 3 $nodeIp | tr - _; do echo .; done"
# Try to ping the machine first.
timeout 90s bash -c "set -o pipefail; until ping -c 3 $nodeIp | tr - _; do echo .; done"
if [[ ! -r $sshPrivateKey ]]; then
echo "Fetching $sshPrivateKey from $nodeName"
if [[ ! -r $sshPrivateKey ]]; then
echo "Fetching $sshPrivateKey from $nodeName"
# Try to scp in a couple times, sshd may not yet be up even though the
# machine can be pinged...
set -x -o pipefail
for i in $(seq 1 30); do
if cloud_FetchFile "$nodeName" "$nodeIp" /solana-id_ecdsa "$sshPrivateKey"; then
break
fi
# Try to scp in a couple times, sshd may not yet be up even though the
# machine can be pinged...
set -x -o pipefail
for i in $(seq 1 30); do
if cloud_FetchFile "$nodeName" "$nodeIp" /solana-id_ecdsa "$sshPrivateKey" "$nodeZone"; then
break
fi
sleep 1
echo "Retry $i..."
done
sleep 1
echo "Retry $i..."
done
chmod 400 "$sshPrivateKey"
ls -l "$sshPrivateKey"
fi
)
chmod 400 "$sshPrivateKey"
ls -l "$sshPrivateKey"
fi
)
echo "fullnodeIpList=()" >> "$configFile"
echo "fullnodeIpListPrivate=()" >> "$configFile"
cloud_ForEachInstance recordInstanceIp fullnodeIpList
cloud_ForEachInstance waitForStartupComplete
echo "fullnodeIpList=()" >> "$configFile"
echo "fullnodeIpListPrivate=()" >> "$configFile"
cloud_ForEachInstance recordInstanceIp fullnodeIpList
cloud_ForEachInstance waitForStartupComplete
fi
echo "Looking for additional fullnode instances..."
cloud_FindInstances "$prefix-fullnode"
[[ ${#instances[@]} -gt 0 ]] || {
echo "Unable to find additional fullnodes"
exit 1
}
cloud_ForEachInstance recordInstanceIp fullnodeIpList
cloud_ForEachInstance waitForStartupComplete
for zone in "${zones[@]}"; do
cloud_FindInstances "$prefix-$zone-fullnode"
[[ ${#instances[@]} -gt 0 ]] || {
echo "Unable to find additional fullnodes"
exit 1
}
cloud_ForEachInstance recordInstanceIp fullnodeIpList
cloud_ForEachInstance waitForStartupComplete
done
echo "clientIpList=()" >> "$configFile"
echo "clientIpListPrivate=()" >> "$configFile"
if $externalNodes; then
echo "Let's not reset the current client configuration"
else
echo "clientIpList=()" >> "$configFile"
echo "clientIpListPrivate=()" >> "$configFile"
fi
echo "Looking for client bencher instances..."
cloud_FindInstances "$prefix-client"
[[ ${#instances[@]} -eq 0 ]] || {
@@ -361,8 +340,12 @@ EOF
cloud_ForEachInstance waitForStartupComplete
}
echo "blockstreamerIpList=()" >> "$configFile"
echo "blockstreamerIpListPrivate=()" >> "$configFile"
if $externalNodes; then
echo "Let's not reset the current blockstream configuration"
else
echo "blockstreamerIpList=()" >> "$configFile"
echo "blockstreamerIpListPrivate=()" >> "$configFile"
fi
echo "Looking for blockstreamer instances..."
cloud_FindInstances "$prefix-blockstreamer"
[[ ${#instances[@]} -eq 0 ]] || {
@@ -381,7 +364,14 @@ delete() {
# during shutdown (only applicable when leader rotation is disabled).
# TODO: It would be better to fully cut-off metrics reporting before any
# instances are deleted.
for filter in "$prefix-bootstrap-leader" "$prefix-"; do
filters=("$prefix-bootstrap-leader")
for zone in "${zones[@]}"; do
filters+=("$prefix-$zone")
done
# Filter for all other nodes (client, blockstreamer)
filters+=("$prefix-")
for filter in "${filters[@]}"; do
echo "Searching for instances: $filter"
cloud_FindInstances "$filter"
@@ -391,7 +381,11 @@ delete() {
cloud_DeleteInstances true
fi
done
rm -f "$configFile"
if $externalNodes; then
echo "Let's not delete the current configuration file"
else
rm -f "$configFile"
fi
$metricsWriteDatapoint "testnet-deploy net-delete-complete=1"
@@ -501,25 +495,40 @@ EOF
bootstrapLeaderAddress=$customAddress
fi
cloud_Initialize "$prefix"
for zone in "${zones[@]}"; do
cloud_Initialize "$prefix" "$zone"
done
cloud_CreateInstances "$prefix" "$prefix-bootstrap-leader" 1 \
"$imageName" "$bootstrapLeaderMachineType" "$fullNodeBootDiskSizeInGb" \
"$startupScript" "$bootstrapLeaderAddress" "$bootDiskType"
if $externalNodes; then
echo "Bootstrap leader is already configured"
else
cloud_CreateInstances "$prefix" "$prefix-bootstrap-leader" 1 \
"$enableGpu" "$bootstrapLeaderMachineType" "${zones[0]}" "$fullNodeBootDiskSizeInGb" \
"$startupScript" "$bootstrapLeaderAddress" "$bootDiskType"
fi
cloud_CreateInstances "$prefix" "$prefix-fullnode" "$additionalFullNodeCount" \
"$imageName" "$fullNodeMachineType" "$fullNodeBootDiskSizeInGb" \
"$startupScript" "" "$bootDiskType"
num_zones=${#zones[@]}
numNodesPerZone=$((additionalFullNodeCount / num_zones))
numLeftOverNodes=$((additionalFullNodeCount % num_zones))
for ((i=0; i < "$num_zones"; i++)); do
zone=${zones[i]}
if [[ $i -eq $((num_zones - 1)) ]]; then
numNodesPerZone=$((numNodesPerZone + numLeftOverNodes))
fi
cloud_CreateInstances "$prefix" "$prefix-$zone-fullnode" "$numNodesPerZone" \
"$enableGpu" "$fullNodeMachineType" "$zone" "$fullNodeBootDiskSizeInGb" \
"$startupScript" "" "$bootDiskType"
done
if [[ $clientNodeCount -gt 0 ]]; then
cloud_CreateInstances "$prefix" "$prefix-client" "$clientNodeCount" \
"$imageName" "$clientMachineType" "$clientBootDiskSizeInGb" \
"$enableGpu" "$clientMachineType" "${zones[0]}" "$clientBootDiskSizeInGb" \
"$startupScript" "" "$bootDiskType"
fi
if $blockstreamer; then
cloud_CreateInstances "$prefix" "$prefix-blockstreamer" "1" \
"$imageName" "$blockstreamerMachineType" "$fullNodeBootDiskSizeInGb" \
"$enableGpu" "$blockstreamerMachineType" "${zones[0]}" "$fullNodeBootDiskSizeInGb" \
"$startupScript" "$blockstreamerAddress" "$bootDiskType"
fi

View File

@@ -12,11 +12,12 @@ usage() {
echo "Error: $*"
fi
cat <<EOF
usage: $0 [-e] [-d] [username]
usage: $0 [-e] [-d] [-c] [username]
Creates a testnet dev metrics database
username InfluxDB user with access to create a new database
-c Use Influx Cloud instance
-d Delete the database instead of creating it
-e Assume database already exists and SOLANA_METRICS_CONFIG is
defined in the environment already
@@ -29,12 +30,16 @@ loadConfigFile
useEnv=false
delete=false
while getopts "hde" opt; do
host="https://metrics.solana.com:8086"
while getopts "hcde" opt; do
case $opt in
h|\?)
usage
exit 0
;;
c)
host="https://clocktower-f1d56615.influxcloud.net:8086"
;;
d)
delete=true
;;
@@ -62,7 +67,7 @@ else
query() {
echo "$*"
curl -XPOST \
"https://metrics.solana.com:8086/query?u=${username}&p=${password}" \
"$host/query?u=${username}&p=${password}" \
--data-urlencode "q=$*"
}
@@ -73,7 +78,7 @@ else
query "GRANT READ ON \"$netBasename\" TO \"ro\""
query "GRANT WRITE ON \"$netBasename\" TO \"scratch_writer\""
SOLANA_METRICS_CONFIG="db=$netBasename,u=scratch_writer,p=topsecret"
SOLANA_METRICS_CONFIG="host=$host,db=$netBasename,u=scratch_writer,p=topsecret"
fi
echo "export SOLANA_METRICS_CONFIG=\"$SOLANA_METRICS_CONFIG\"" >> "$configFile"

View File

@@ -163,6 +163,7 @@ startCommon() {
mkdir -p ~/.cargo/bin
"
fi
[[ -z "$externalNodeSshKey" ]] || ssh-copy-id -f -i "$externalNodeSshKey" "${sshOptions[@]}" "solana@$ipAddress"
rsync -vPrc -e "ssh ${sshOptions[*]}" \
"$SOLANA_ROOT"/{fetch-perf-libs.sh,scripts,net,multinode-demo} \
"$ipAddress":~/solana/

View File

@@ -3,17 +3,17 @@
# Utilities for working with EC2 instances
#
zone=
region=
cloud_SetZone() {
zone="$1"
# AWS region is zone with the last character removed
region="${zone:0:$((${#zone} - 1))}"
cloud_DefaultZone() {
echo "us-east-1b"
}
# Set the default zone
cloud_SetZone "us-east-1b"
# AWS region is zone with the last character removed
__cloud_GetRegion() {
declare zone="$1"
# AWS region is zone with the last character removed
declare region="${zone:0:$((${#zone} - 1))}"
echo "$region"
}
# sshPrivateKey should be globally defined whenever this function is called.
#
@@ -49,18 +49,22 @@ __cloud_FindInstances() {
declare filter="$1"
instances=()
declare name publicIp privateIp
while read -r name publicIp privateIp; do
printf "%-30s | publicIp=%-16s privateIp=%s\n" "$name" "$publicIp" "$privateIp"
instances+=("$name:$publicIp:$privateIp")
done < <(aws ec2 describe-instances \
--region "$region" \
--filters \
"Name=tag:name,Values=$filter" \
"Name=instance-state-name,Values=pending,running" \
--query "Reservations[].Instances[].[InstanceId,PublicIpAddress,PrivateIpAddress]" \
--output text \
)
declare -a regions=("us-east-1" "us-west-1" "us-west-2")
for region in "${regions[@]}"
do
declare name publicIp privateIp
while read -r name publicIp privateIp zone; do
printf "%-30s | publicIp=%-16s privateIp=%s zone=%s\n" "$name" "$publicIp" "$privateIp" "$zone"
instances+=("$name:$publicIp:$privateIp:$zone")
done < <(aws ec2 describe-instances \
--region "$region" \
--filters \
"Name=tag:name,Values=$filter" \
"Name=instance-state-name,Values=pending,running" \
--query "Reservations[].Instances[].[InstanceId,PublicIpAddress,PrivateIpAddress,Placement.AvailabilityZone]" \
--output text \
)
done
}
#
@@ -111,6 +115,9 @@ cloud_FindInstance() {
# This function will be called before |cloud_CreateInstances|
cloud_Initialize() {
declare networkName="$1"
declare zone="$2"
declare region=
region=$(__cloud_GetRegion "$zone")
__cloud_SshPrivateKeyCheck
(
@@ -152,11 +159,54 @@ cloud_CreateInstances() {
declare networkName="$1"
declare namePrefix="$2"
declare numNodes="$3"
declare imageName="$4"
declare enableGpu="$4"
declare machineType="$5"
declare optionalBootDiskSize="$6"
declare optionalStartupScript="$7"
declare optionalAddress="$8"
declare zone="$6"
declare optionalBootDiskSize="$7"
declare optionalStartupScript="$8"
declare optionalAddress="$9"
declare region=
region=$(__cloud_GetRegion "$zone")
if $enableGpu; then
#
# Custom Ubuntu 18.04 LTS image with CUDA 9.2 and CUDA 10.0 installed
#
# TODO: Unfortunately these AMIs are not public. When this becomes an issue,
# use the stock Ubuntu 18.04 image and programmatically install CUDA after the
# instance boots
#
case $region in
us-east-1)
imageName="ami-0a8bd6fb204473f78"
;;
us-west-1)
imageName="ami-07011f0795513c59d"
;;
us-west-2)
imageName="ami-0a11ef42b62b82b68"
;;
*)
usage "Unsupported region: $region"
;;
esac
else
# Select an upstream Ubuntu 18.04 AMI from https://cloud-images.ubuntu.com/locator/ec2/
case $region in
us-east-1)
imageName="ami-0a313d6098716f372"
;;
us-west-1)
imageName="ami-06397100adf427136"
;;
us-west-2)
imageName="ami-0dc34f4b016c9ce49"
;;
*)
usage "Unsupported region: $region"
;;
esac
fi
declare -a args
args=(
@@ -199,12 +249,14 @@ cloud_CreateInstances() {
fi
declare instanceId
IFS=: read -r instanceId _ < <(echo "${instances[0]}")
IFS=: read -r instanceId publicIp privateIp zone < <(echo "${instances[0]}")
(
set -x
# TODO: Poll that the instance has moved to the 'running' state instead of
# blindly sleeping for 30 seconds...
sleep 30
declare region=
region=$(__cloud_GetRegion "$zone")
aws ec2 associate-address \
--instance-id "$instanceId" \
--region "$region" \
@@ -225,6 +277,9 @@ cloud_DeleteInstances() {
fi
declare names=("${instances[@]/:*/}")
declare zones=("${instances[@]/*:/}")
declare region=
region=$(__cloud_GetRegion "${zones[0]}")
(
set -x

View File

@@ -4,12 +4,10 @@
#
# Default zone
zone="us-west1-b"
cloud_SetZone() {
zone="$1"
cloud_DefaultZone() {
echo "us-west1-b"
}
#
# __cloud_FindInstances
#
@@ -30,13 +28,13 @@ __cloud_FindInstances() {
instances=()
declare name zone publicIp privateIp status
while read -r name publicIp privateIp status; do
printf "%-30s | publicIp=%-16s privateIp=%s status=%s\n" "$name" "$publicIp" "$privateIp" "$status"
while read -r name publicIp privateIp status zone; do
printf "%-30s | publicIp=%-16s privateIp=%s status=%s zone=%s\n" "$name" "$publicIp" "$privateIp" "$status" "$zone"
instances+=("$name:$publicIp:$privateIp")
instances+=("$name:$publicIp:$privateIp:$zone")
done < <(gcloud compute instances list \
--filter "$filter" \
--format 'value(name,networkInterfaces[0].accessConfigs[0].natIP,networkInterfaces[0].networkIP,status)')
--format 'value(name,networkInterfaces[0].accessConfigs[0].natIP,networkInterfaces[0].networkIP,status,zone)')
}
#
# cloud_FindInstances [namePrefix]
@@ -119,12 +117,26 @@ cloud_CreateInstances() {
declare networkName="$1"
declare namePrefix="$2"
declare numNodes="$3"
declare imageName="$4"
declare enableGpu="$4"
declare machineType="$5"
declare optionalBootDiskSize="$6"
declare optionalStartupScript="$7"
declare optionalAddress="$8"
declare optionalBootDiskType="$9"
declare zone="$6"
declare optionalBootDiskSize="$7"
declare optionalStartupScript="$8"
declare optionalAddress="$9"
declare optionalBootDiskType="${10}"
if $enableGpu; then
# Custom Ubuntu 18.04 LTS image with CUDA 9.2 and CUDA 10.0 installed
#
# TODO: Unfortunately this image is not public. When this becomes an issue,
# use the stock Ubuntu 18.04 image and programmatically install CUDA after the
# instance boots
#
imageName="ubuntu-1804-bionic-v20181029-with-cuda-10-and-cuda-9-2"
else
# Upstream Ubuntu 18.04 LTS image
imageName="ubuntu-1804-bionic-v20181029 --image-project ubuntu-os-cloud"
fi
declare -a nodes
if [[ $numNodes = 1 ]]; then
@@ -192,11 +204,13 @@ cloud_DeleteInstances() {
echo No instances to delete
return
fi
declare names=("${instances[@]/:*/}")
declare zones=("${instances[@]/*:/}")
(
set -x
gcloud beta compute instances delete --zone "$zone" --quiet "${names[@]}"
gcloud beta compute instances delete --zone "${zones[0]}" --quiet "${names[@]}"
)
}
@@ -213,6 +227,7 @@ cloud_FetchFile() {
declare publicIp="$2"
declare remoteFile="$3"
declare localFile="$4"
declare zone="$5"
(
set -x

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-netutil"
version = "0.12.1"
version = "0.12.3"
description = "Solana Network Utilities"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -18,7 +18,7 @@ reqwest = "0.9.0"
socket2 = "0.3.8"
[dev-dependencies]
solana-logger = { path = "../logger", version = "0.12.1" }
solana-logger = { path = "../logger", version = "0.12.3" }
[lib]
name = "solana_netutil"

View File

@@ -1,7 +1,7 @@
[package]
name = "solana-bpf-programs"
description = "Blockchain, Rebuilt for Scale"
version = "0.12.1"
version = "0.12.3"
documentation = "https://docs.rs/solana"
homepage = "https://solana.com/"
readme = "README.md"
@@ -22,10 +22,10 @@ bincode = "1.1.2"
byteorder = "1.3.1"
elf = "0.0.10"
solana_rbpf = "=0.1.10"
solana-bpfloader = { path = "../bpf_loader", version = "0.12.1" }
solana-logger = { path = "../../logger", version = "0.12.1" }
solana-runtime = { path = "../../runtime", version = "0.12.1" }
solana-sdk = { path = "../../sdk", version = "0.12.1" }
solana-bpfloader = { path = "../bpf_loader", version = "0.12.3" }
solana-logger = { path = "../../logger", version = "0.12.3" }
solana-runtime = { path = "../../runtime", version = "0.12.3" }
solana-sdk = { path = "../../sdk", version = "0.12.3" }
[[bench]]
name = "bpf_loader"

View File

@@ -3,7 +3,7 @@
[package]
name = "solana-bpf-rust-noop"
version = "0.12.1"
version = "0.12.3"
description = "Solana BPF noop program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-bpfloader"
version = "0.12.1"
version = "0.12.3"
description = "Solana BPF Loader"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -15,8 +15,8 @@ libc = "0.2.50"
log = "0.4.2"
solana_rbpf = "=0.1.10"
serde = "1.0.89"
solana-logger = { path = "../../logger", version = "0.12.1" }
solana-sdk = { path = "../../sdk", version = "0.12.1" }
solana-logger = { path = "../../logger", version = "0.12.3" }
solana-sdk = { path = "../../sdk", version = "0.12.3" }
[lib]
name = "solana_bpf_loader"

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-budget-program"
version = "0.12.1"
version = "0.12.3"
description = "Solana budget program"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -14,12 +14,12 @@ chrono = { version = "0.4.0", features = ["serde"] }
log = "0.4.2"
serde = "1.0.89"
serde_derive = "1.0.89"
solana-budget-api = { path = "../budget_api", version = "0.12.1" }
solana-logger = { path = "../../logger", version = "0.12.1" }
solana-sdk = { path = "../../sdk", version = "0.12.1" }
solana-budget-api = { path = "../budget_api", version = "0.12.3" }
solana-logger = { path = "../../logger", version = "0.12.3" }
solana-sdk = { path = "../../sdk", version = "0.12.3" }
[dev-dependencies]
solana-runtime = { path = "../../runtime", version = "0.12.1" }
solana-runtime = { path = "../../runtime", version = "0.12.3" }
[lib]
name = "solana_budget_program"

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-budget-api"
version = "0.12.1"
version = "0.12.3"
description = "Solana Budget program API"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -13,7 +13,7 @@ bincode = "1.1.2"
chrono = { version = "0.4.0", features = ["serde"] }
serde = "1.0.89"
serde_derive = "1.0.89"
solana-sdk = { path = "../../sdk", version = "0.12.1" }
solana-sdk = { path = "../../sdk", version = "0.12.3" }
[lib]
name = "solana_budget_api"

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-failure"
version = "0.12.1"
version = "0.12.3"
description = "Solana failure program"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -9,11 +9,11 @@ homepage = "https://solana.com/"
edition = "2018"
[dependencies]
solana-sdk = { path = "../../sdk", version = "0.12.1" }
solana-sdk = { path = "../../sdk", version = "0.12.3" }
log = "0.4.2"
[dev-dependencies]
solana-runtime = { path = "../../runtime", version = "0.12.1" }
solana-runtime = { path = "../../runtime", version = "0.12.3" }
[lib]
name = "failure"

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-noop"
version = "0.12.1"
version = "0.12.3"
description = "Solana noop program"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -9,12 +9,12 @@ homepage = "https://solana.com/"
edition = "2018"
[dependencies]
solana-sdk = { path = "../../sdk", version = "0.12.1" }
solana-logger = { path = "../../logger", version = "0.12.1" }
solana-sdk = { path = "../../sdk", version = "0.12.3" }
solana-logger = { path = "../../logger", version = "0.12.3" }
log = "0.4.2"
[dev-dependencies]
solana-runtime = { path = "../../runtime", version = "0.12.1" }
solana-runtime = { path = "../../runtime", version = "0.12.3" }
[lib]
name = "noop"

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-rewards-program"
version = "0.12.1"
version = "0.12.3"
description = "Solana rewards program"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -11,13 +11,13 @@ edition = "2018"
[dependencies]
bincode = "1.1.2"
log = "0.4.2"
solana-logger = { path = "../../logger", version = "0.12.1" }
solana-sdk = { path = "../../sdk", version = "0.12.1" }
solana-rewards-api = { path = "../rewards_api", version = "0.12.1" }
solana-vote-api = { path = "../vote_api", version = "0.12.1" }
solana-logger = { path = "../../logger", version = "0.12.3" }
solana-sdk = { path = "../../sdk", version = "0.12.3" }
solana-rewards-api = { path = "../rewards_api", version = "0.12.3" }
solana-vote-api = { path = "../vote_api", version = "0.12.3" }
[dev-dependencies]
solana-runtime = { path = "../../runtime", version = "0.12.1" }
solana-runtime = { path = "../../runtime", version = "0.12.3" }
[lib]
name = "solana_rewards_program"

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-rewards-api"
version = "0.12.1"
version = "0.12.3"
description = "Solana rewards API"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -12,8 +12,8 @@ edition = "2018"
bincode = "1.1.2"
serde = "1.0.89"
serde_derive = "1.0.89"
solana-sdk = { path = "../../sdk", version = "0.12.1" }
solana-vote-api = { path = "../vote_api", version = "0.12.1" }
solana-sdk = { path = "../../sdk", version = "0.12.3" }
solana-vote-api = { path = "../vote_api", version = "0.12.3" }
[lib]
name = "solana_rewards_api"

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-storage-program"
version = "0.12.1"
version = "0.12.3"
description = "Solana storage program"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -13,12 +13,12 @@ bincode = "1.1.2"
log = "0.4.2"
serde = "1.0.89"
serde_derive = "1.0.89"
solana-logger = { path = "../../logger", version = "0.12.1" }
solana-sdk = { path = "../../sdk", version = "0.12.1" }
solana-storage-api = { path = "../storage_api", version = "0.12.1" }
solana-logger = { path = "../../logger", version = "0.12.3" }
solana-sdk = { path = "../../sdk", version = "0.12.3" }
solana-storage-api = { path = "../storage_api", version = "0.12.3" }
[dev-dependencies]
solana-runtime = { path = "../../runtime", version = "0.12.1" }
solana-runtime = { path = "../../runtime", version = "0.12.3" }
[lib]
name = "solana_storage_program"

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-storage-api"
version = "0.12.1"
version = "0.12.3"
description = "Solana Storage program API"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -12,7 +12,7 @@ edition = "2018"
bincode = "1.1.2"
serde = "1.0.89"
serde_derive = "1.0.89"
solana-sdk = { path = "../../sdk", version = "0.12.1" }
solana-sdk = { path = "../../sdk", version = "0.12.3" }
[lib]
name = "solana_storage_api"

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-token-program"
version = "0.12.1"
version = "0.12.3"
description = "Solana token program"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -13,8 +13,8 @@ bincode = "1.1.2"
log = "0.4.2"
serde = "1.0.89"
serde_derive = "1.0.89"
solana-logger = { path = "../../logger", version = "0.12.1" }
solana-sdk = { path = "../../sdk", version = "0.12.1" }
solana-logger = { path = "../../logger", version = "0.12.3" }
solana-sdk = { path = "../../sdk", version = "0.12.3" }
[lib]
name = "solana_token_program"

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-token-api"
version = "0.12.1"
version = "0.12.3"
description = "Solana Token API"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -12,7 +12,7 @@ edition = "2018"
bincode = "1.1.2"
serde = "1.0.89"
serde_derive = "1.0.89"
solana-sdk = { path = "../../sdk", version = "0.12.1" }
solana-sdk = { path = "../../sdk", version = "0.12.3" }
[lib]
name = "solana_token_api"

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-vote-program"
version = "0.12.1"
version = "0.12.3"
description = "Solana vote program"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -11,13 +11,13 @@ edition = "2018"
[dependencies]
bincode = "1.1.2"
log = "0.4.2"
solana-logger = { path = "../../logger", version = "0.12.1" }
solana-metrics = { path = "../../metrics", version = "0.12.1" }
solana-sdk = { path = "../../sdk", version = "0.12.1" }
solana-vote-api = { path = "../vote_api", version = "0.12.1" }
solana-logger = { path = "../../logger", version = "0.12.3" }
solana-metrics = { path = "../../metrics", version = "0.12.3" }
solana-sdk = { path = "../../sdk", version = "0.12.3" }
solana-vote-api = { path = "../vote_api", version = "0.12.3" }
[dev-dependencies]
solana-runtime = { path = "../../runtime", version = "0.12.1" }
solana-runtime = { path = "../../runtime", version = "0.12.3" }
[lib]
name = "solana_vote_program"

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-vote-api"
version = "0.12.1"
version = "0.12.3"
description = "Solana Vote program API"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -13,7 +13,7 @@ bincode = "1.1.2"
log = "0.4.2"
serde = "1.0.89"
serde_derive = "1.0.89"
solana-sdk = { path = "../../sdk", version = "0.12.1" }
solana-sdk = { path = "../../sdk", version = "0.12.3" }
[lib]
name = "solana_vote_api"

View File

@@ -2,17 +2,17 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-replicator"
version = "0.12.1"
version = "0.12.3"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
[dependencies]
clap = "2.32.0"
solana = { path = "../core", version = "0.12.1" }
solana-logger = { path = "../logger", version = "0.12.1" }
solana-netutil = { path = "../netutil", version = "0.12.1" }
solana-sdk = { path = "../sdk", version = "0.12.1" }
solana = { path = "../core", version = "0.12.3" }
solana-logger = { path = "../logger", version = "0.12.3" }
solana-netutil = { path = "../netutil", version = "0.12.3" }
solana-sdk = { path = "../sdk", version = "0.12.3" }
[features]
chacha = ["solana/chacha"]

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-runtime"
version = "0.12.1"
version = "0.12.3"
description = "Solana runtime"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -22,13 +22,13 @@ rand = "0.6.5"
serde = "1.0.88"
serde_derive = "1.0.88"
serde_json = "1.0.38"
solana-budget-api = { path = "../programs/budget_api", version = "0.12.1" }
solana-logger = { path = "../logger", version = "0.12.1" }
solana-metrics = { path = "../metrics", version = "0.12.1" }
solana-sdk = { path = "../sdk", version = "0.12.1" }
solana-storage-api = { path = "../programs/storage_api", version = "0.12.1" }
solana-token-api = { path = "../programs/token_api", version = "0.12.1" }
solana-vote-api = { path = "../programs/vote_api", version = "0.12.1" }
solana-budget-api = { path = "../programs/budget_api", version = "0.12.3" }
solana-logger = { path = "../logger", version = "0.12.3" }
solana-metrics = { path = "../metrics", version = "0.12.3" }
solana-sdk = { path = "../sdk", version = "0.12.3" }
solana-storage-api = { path = "../programs/storage_api", version = "0.12.3" }
solana-token-api = { path = "../programs/token_api", version = "0.12.3" }
solana-vote-api = { path = "../programs/vote_api", version = "0.12.3" }
[lib]
name = "solana_runtime"

View File

@@ -727,6 +727,7 @@ impl AccountsDB {
.map_or(0, |fork_info| fork_info.transaction_count)
}
#[allow(dead_code)]
fn remove_parents(&self, fork: Fork) -> Vec<Fork> {
let mut info = self.fork_infos.write().unwrap();
let fork_info = info.get_mut(&fork).unwrap();
@@ -743,6 +744,7 @@ impl AccountsDB {
.is_empty()
}
#[allow(dead_code)]
fn get_merged_account_map(
&self,
fork: Fork,
@@ -763,6 +765,7 @@ impl AccountsDB {
}
/// make fork a root, i.e. forget its heritage
#[allow(dead_code)]
fn squash(&self, fork: Fork) {
let parents = self.remove_parents(fork);
@@ -991,6 +994,7 @@ impl Accounts {
/// accounts starts with an empty data structure for every child/fork
/// this function squashes all the parents into this instance
#[allow(dead_code)]
pub fn squash(&self, fork: Fork) {
assert!(!self.account_locks.lock().unwrap().contains_key(&fork));
self.accounts_db.squash(fork);

View File

@@ -5,6 +5,7 @@
use crate::accounts::{Accounts, ErrorCounters, InstructionAccounts, InstructionLoaders};
use crate::hash_queue::HashQueue;
use crate::locked_accounts_results::LockedAccountsResults;
use crate::runtime::{self, RuntimeError};
use crate::status_cache::StatusCache;
use bincode::serialize;
@@ -289,7 +290,7 @@ impl Bank {
// freeze is a one-way trip, idempotent
*hash = self.hash_internal_state();
}
self.status_cache.write().unwrap().freeze();
// self.status_cache.write().unwrap().freeze();
}
/// squash the parent's state up into this Bank,
@@ -300,11 +301,15 @@ impl Bank {
let parents = self.parents();
*self.parent.write().unwrap() = None;
self.accounts().squash(self.accounts_id);
// self.accounts().squash(self.accounts_id);
let parent_caches: Vec<_> = parents
.iter()
.map(|b| b.status_cache.read().unwrap())
.map(|p| {
let mut parent = p.status_cache.write().unwrap();
parent.freeze();
parent
})
.collect();
self.status_cache.write().unwrap().squash(&parent_caches);
}
@@ -485,18 +490,28 @@ impl Bank {
.map_or(Ok(()), |sig| self.get_signature_status(sig).unwrap())
}
pub fn lock_accounts(&self, txs: &[Transaction]) -> Vec<Result<()>> {
pub fn lock_accounts<'a, 'b>(
&'a self,
txs: &'b [Transaction],
) -> LockedAccountsResults<'a, 'b> {
if self.is_frozen() {
warn!("=========== FIXME: lock_accounts() working on a frozen bank! ================");
}
// TODO: put this assert back in
// assert!(!self.is_frozen());
self.accounts().lock_accounts(self.accounts_id, txs)
let results = self.accounts().lock_accounts(self.accounts_id, txs);
LockedAccountsResults::new(results, &self, txs)
}
pub fn unlock_accounts(&self, txs: &[Transaction], results: &[Result<()>]) {
self.accounts()
.unlock_accounts(self.accounts_id, txs, results)
pub fn unlock_accounts(&self, locked_accounts_results: &mut LockedAccountsResults) {
if locked_accounts_results.needs_unlock {
locked_accounts_results.needs_unlock = false;
self.accounts().unlock_accounts(
self.accounts_id,
locked_accounts_results.transactions(),
locked_accounts_results.locked_accounts_results(),
)
}
}
fn load_accounts(
@@ -508,22 +523,23 @@ impl Bank {
self.accounts()
.load_accounts(self.accounts_id, txs, results, error_counters)
}
fn check_age(
&self,
txs: &[Transaction],
lock_results: Vec<Result<()>>,
lock_results: &LockedAccountsResults,
max_age: usize,
error_counters: &mut ErrorCounters,
) -> Vec<Result<()>> {
let hash_queue = self.blockhash_queue.read().unwrap();
txs.iter()
.zip(lock_results.into_iter())
.zip(lock_results.locked_accounts_results())
.map(|(tx, lock_res)| {
if lock_res.is_ok() && !hash_queue.check_entry_age(tx.recent_blockhash, max_age) {
error_counters.reserve_blockhash += 1;
Err(BankError::BlockhashNotFound)
} else {
lock_res
lock_res.clone()
}
})
.collect()
@@ -556,7 +572,7 @@ impl Bank {
pub fn load_and_execute_transactions(
&self,
txs: &[Transaction],
lock_results: Vec<Result<()>>,
lock_results: &LockedAccountsResults,
max_age: usize,
) -> (
Vec<Result<(InstructionAccounts, InstructionLoaders)>>,
@@ -714,7 +730,7 @@ impl Bank {
pub fn load_execute_and_commit_transactions(
&self,
txs: &[Transaction],
lock_results: Vec<Result<()>>,
lock_results: &LockedAccountsResults,
max_age: usize,
) -> Vec<Result<()>> {
let (loaded_accounts, executed) =
@@ -726,10 +742,7 @@ impl Bank {
#[must_use]
pub fn process_transactions(&self, txs: &[Transaction]) -> Vec<Result<()>> {
let lock_results = self.lock_accounts(txs);
let results =
self.load_execute_and_commit_transactions(txs, lock_results, MAX_RECENT_BLOCKHASHES);
self.unlock_accounts(txs, &results);
results
self.load_execute_and_commit_transactions(txs, &lock_results, MAX_RECENT_BLOCKHASHES)
}
/// Create, sign, and process a Transaction from `keypair` to `to` of
@@ -1287,7 +1300,7 @@ mod tests {
let lock_result = bank.lock_accounts(&pay_alice);
let results_alice = bank.load_execute_and_commit_transactions(
&pay_alice,
lock_result,
&lock_result,
MAX_RECENT_BLOCKHASHES,
);
assert_eq!(results_alice[0], Ok(()));
@@ -1304,7 +1317,7 @@ mod tests {
Err(BankError::AccountInUse)
);
bank.unlock_accounts(&pay_alice, &results_alice);
drop(lock_result);
assert!(bank
.transfer(2, &mint_keypair, &bob.pubkey(), genesis_block.hash())

View File

@@ -4,6 +4,7 @@ pub mod bank;
pub mod bloom;
mod hash_queue;
pub mod loader_utils;
pub mod locked_accounts_results;
mod native_loader;
pub mod runtime;
mod status_cache;

View File

@@ -0,0 +1,42 @@
use crate::bank::{Bank, Result};
use solana_sdk::transaction::Transaction;
// Represents the results of trying to lock a set of accounts
pub struct LockedAccountsResults<'a, 'b> {
locked_accounts_results: Vec<Result<()>>,
bank: &'a Bank,
transactions: &'b [Transaction],
pub(crate) needs_unlock: bool,
}
impl<'a, 'b> LockedAccountsResults<'a, 'b> {
pub fn new(
locked_accounts_results: Vec<Result<()>>,
bank: &'a Bank,
transactions: &'b [Transaction],
) -> Self {
Self {
locked_accounts_results,
bank,
transactions,
needs_unlock: true,
}
}
pub fn locked_accounts_results(&self) -> &Vec<Result<()>> {
&self.locked_accounts_results
}
pub fn transactions(&self) -> &[Transaction] {
self.transactions
}
}
// Unlock all locked accounts in destructor.
impl<'a, 'b> Drop for LockedAccountsResults<'a, 'b> {
fn drop(&mut self) {
if self.needs_unlock {
self.bank.unlock_accounts(self)
}
}
}

View File

@@ -15,6 +15,12 @@ if [[ -z $INFLUX_DATABASE || -z $INFLUX_USERNAME || -z $INFLUX_PASSWORD ]]; then
exit 0
fi
echo "https://metrics.solana.com:8086/write?db=${INFLUX_DATABASE}&u=${INFLUX_USERNAME}&p=${INFLUX_PASSWORD}" \
host="https://metrics.solana.com:8086"
if [[ -n $INFLUX_HOST ]]; then
host="$INFLUX_HOST"
fi
echo "${host}/write?db=${INFLUX_DATABASE}&u=${INFLUX_USERNAME}&p=${INFLUX_PASSWORD}" \
| xargs curl --max-time 5 -XPOST --data-binary "$point"
exit 0

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-sdk"
version = "0.12.1"
version = "0.12.3"
description = "Solana SDK"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"

View File

@@ -34,7 +34,6 @@ fn new_gossip(
GossipService::new(&cluster_info, None, None, gossip, exit)
}
/// Test that message sent from leader to target1 and replayed to target2
#[test]
fn test_replay() {
solana_logger::setup();
@@ -99,8 +98,9 @@ fn test_replay() {
let dr_1 = new_gossip(cref1.clone(), target1.sockets.gossip, &exit);
let voting_keypair = Keypair::new();
let blocktree = Arc::new(blocktree);
let (poh_service_exit, poh_recorder, poh_service, _entry_receiver) =
create_test_recorder(&bank);
create_test_recorder(&bank, &blocktree);
let tvu = Tvu::new(
&voting_keypair.pubkey(),
Some(Arc::new(voting_keypair)),
@@ -114,7 +114,7 @@ fn test_replay() {
fetch: target1.sockets.tvu,
}
},
Arc::new(blocktree),
blocktree,
STORAGE_ROTATE_TEST_COUNT,
&StorageState::default(),
None,
@@ -185,6 +185,7 @@ fn test_replay() {
dr_1.join().unwrap();
t_receiver.join().unwrap();
t_responder.join().unwrap();
drop(poh_recorder);
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
let _ignored = remove_dir_all(&blocktree_path);
}

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-upload-perf"
version = "0.12.1"
version = "0.12.3"
description = "Metrics Upload Utility"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -10,7 +10,7 @@ homepage = "https://solana.com/"
[dependencies]
serde_json = "1.0.39"
solana-metrics = { path = "../metrics", version = "0.12.1" }
solana-metrics = { path = "../metrics", version = "0.12.3" }
[[bin]]
name = "solana-upload-perf"

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-vote-signer"
description = "Solana Vote Signing Service"
version = "0.12.1"
version = "0.12.3"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -17,8 +17,8 @@ jsonrpc-derive = "10.1.0"
jsonrpc-http-server = "10.1.0"
serde = "1.0.89"
serde_json = "1.0.39"
solana-sdk = { path = "../sdk", version = "0.12.1" }
solana-metrics = { path = "../metrics", version = "0.12.1" }
solana-sdk = { path = "../sdk", version = "0.12.3" }
solana-metrics = { path = "../metrics", version = "0.12.3" }
[lib]
name = "solana_vote_signer"

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-wallet"
description = "Blockchain, Rebuilt for Scale"
version = "0.12.1"
version = "0.12.3"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -16,17 +16,17 @@ chrono = { version = "0.4.0", features = ["serde"] }
dirs = "1.0.5"
log = "0.4.2"
serde_json = "1.0.39"
solana = { path = "../core", version = "0.12.1" }
solana-budget-api = { path = "../programs/budget_api", version = "0.12.1" }
solana-client = { path = "../client", version = "0.12.1" }
solana-drone = { path = "../drone", version = "0.12.1" }
solana-logger = { path = "../logger", version = "0.12.1" }
solana-sdk = { path = "../sdk", version = "0.12.1" }
solana-vote-api = { path = "../programs/vote_api", version = "0.12.1" }
solana-vote-signer = { path = "../vote-signer", version = "0.12.1" }
solana = { path = "../core", version = "0.12.3" }
solana-budget-api = { path = "../programs/budget_api", version = "0.12.3" }
solana-client = { path = "../client", version = "0.12.3" }
solana-drone = { path = "../drone", version = "0.12.3" }
solana-logger = { path = "../logger", version = "0.12.3" }
solana-sdk = { path = "../sdk", version = "0.12.3" }
solana-vote-api = { path = "../programs/vote_api", version = "0.12.3" }
solana-vote-signer = { path = "../vote-signer", version = "0.12.3" }
[dev-dependencies]
solana-budget-program = { path = "../programs/budget", version = "0.12.1" }
solana-budget-program = { path = "../programs/budget", version = "0.12.3" }
[features]
cuda = ["solana/cuda"]