Compare commits
104 Commits
Author | SHA1 | Date | |
---|---|---|---|
e55249e63f | |||
10bc0c6ee2 | |||
ed14b78d81 | |||
7f404941bb | |||
6d45ac1bc7 | |||
fabb6d2092 | |||
93cea4c86c | |||
5fb35f79c3 | |||
da11274b63 | |||
5d70e2efa9 | |||
8f181b4350 | |||
48844924e5 | |||
f84593ad5f | |||
0469dc52ac | |||
4cf418f33f | |||
6c46fcfa4e | |||
12ec5304f2 | |||
e32f798d5f | |||
68a8b955bc | |||
f479021c0f | |||
b91afb7079 | |||
e189c429d5 | |||
6a1904664c | |||
3285cf8047 | |||
bdee3a25f2 | |||
8655df0520 | |||
c43eecb8ca | |||
18f45ebc2c | |||
fd28642603 | |||
038583b466 | |||
ed138d392d | |||
58f1f0a28b | |||
330d9330b0 | |||
d626a89c88 | |||
db5d22e532 | |||
aa8759744e | |||
060db36c34 | |||
fa1ea1c458 | |||
7685ba2805 | |||
a0d940acf0 | |||
f4c914a630 | |||
eede274cfe | |||
4df79b653b | |||
a2c1fa7cb4 | |||
95cead91a5 | |||
89c42ecd3f | |||
f93c9f052f | |||
e2871053bd | |||
351c9c33d2 | |||
59f2a478b7 | |||
3f7cd4adc4 | |||
4318854a64 | |||
430740b691 | |||
797603a0fe | |||
f402139991 | |||
4db72d85d7 | |||
007e17c290 | |||
ad7e727938 | |||
3d5eeab6d9 | |||
8278585545 | |||
061d6ec8fd | |||
000cc27e53 | |||
9b3092b965 | |||
ca819fc4fb | |||
5ff8f57c0e | |||
4798612560 | |||
9760cb2e6a | |||
46b3b3a1c6 | |||
1e70f85e83 | |||
b2d6681762 | |||
1b51cba778 | |||
19ab7333aa | |||
b0e6604b9a | |||
9ce1d5e990 | |||
facc47cb62 | |||
3dba8b7952 | |||
5e40a5bfc1 | |||
c60baf99f3 | |||
de04884c1b | |||
e666509409 | |||
28aff96d21 | |||
242975f8cd | |||
c6ba6cac83 | |||
dc67dd3357 | |||
733c2a0b07 | |||
07d6212d18 | |||
c20d60e4cf | |||
7147f03efe | |||
6740cb5b02 | |||
1e8e99cc3e | |||
ef7f30e09f | |||
ca8e0ec7ae | |||
2a4f4b3e53 | |||
7cecd3851a | |||
4d189f2c38 | |||
9a232475a7 | |||
09c9897591 | |||
06d7573478 | |||
0b55ffa368 | |||
ae750bb16b | |||
80b2f2f6b7 | |||
6684d84fbc | |||
dc02abae3c | |||
6caec655d3 |
12
.buildkite/env/secrets.ejson
vendored
12
.buildkite/env/secrets.ejson
vendored
@ -1,10 +1,12 @@
|
||||
{
|
||||
"_public_key": "ae29f4f7ad2fc92de70d470e411c8426d5d48db8817c9e3dae574b122192335f",
|
||||
"environment": {
|
||||
"CODECOV_TOKEN": "EJ[1:Kqnm+k1Z4p8nr7GqMczXnzh6azTk39tj3bAbCKPitUc=:EzVa4Gpj2Qn5OhZQlVfGFchuROgupvnW:CbWc6sNh1GCrAbrncxDjW00zUAD/Sa+ccg7CFSz8Ua6LnCYnSddTBxJWcJEbEs0MrjuZRQ==]",
|
||||
"CRATES_IO_TOKEN": "EJ[1:Kqnm+k1Z4p8nr7GqMczXnzh6azTk39tj3bAbCKPitUc=:qF7QrUM8j+19mptcE1YS71CqmrCM13Ah:TZCatJeT1egCHiufE6cGFC1VsdJkKaaqV6QKWkEsMPBKvOAdaZbbVz9Kl+lGnIsF]",
|
||||
"INFLUX_DATABASE": "EJ[1:Kqnm+k1Z4p8nr7GqMczXnzh6azTk39tj3bAbCKPitUc=:PetD/4c/EbkQmFEcK21g3cBBAPwFqHEw:wvYmDZRajy2WngVFs9AlwyHk]",
|
||||
"INFLUX_USERNAME": "EJ[1:Kqnm+k1Z4p8nr7GqMczXnzh6azTk39tj3bAbCKPitUc=:WcnqZdmDFtJJ01Zu5LbeGgbYGfRzBdFc:a7c5zDDtCOu5L1Qd2NKkxT6kljyBcbck]",
|
||||
"INFLUX_PASSWORD": "EJ[1:Kqnm+k1Z4p8nr7GqMczXnzh6azTk39tj3bAbCKPitUc=:LIZgP9Tp9yE9OlpV8iogmLOI7iW7SiU3:x0nYdT1A6sxu+O+MMLIN19d2t6rrK1qJ3+HnoWG3PDodsXjz06YJWQKU/mx6saqH+QbGtGV5mk0=]"
|
||||
"CODECOV_TOKEN": "EJ[1:+7nLVR8NlnN48zgaJPPXF9JOZDXVNHDZLeARlCFHyRk=:rHBSqXK7uSnveA4qwUxARZjTNZcA0hXU:ko8lLGwPECpVm19znWBRxKEpMF7xpTHBCEzVOxRar2wDThw4lNDAKqTS61vtkJLtdkHtug==]",
|
||||
"CRATES_IO_TOKEN": "EJ[1:+7nLVR8NlnN48zgaJPPXF9JOZDXVNHDZLeARlCFHyRk=:NzN6y0ooXJBYvxB589khepthSxhKFkLB:ZTTFZh2A/kB2SAgjJJAMbwAfanRlzxOCNMVcA2MXBCpQHJeeZGULg+0MLACYswfS]",
|
||||
"GITHUB_TOKEN": "EJ[1:+7nLVR8NlnN48zgaJPPXF9JOZDXVNHDZLeARlCFHyRk=:iy0Fnxeo0aslTCvgXc5Ddj2ly6ZsQ8gK:GNOOj/kZUJ2rYKxTbLyVKtajWNoGQ3PcChwfEB4HdN18qDHlB96Z7gx01Pcf0qeIHODOWRtxlH4=]",
|
||||
"INFLUX_DATABASE": "EJ[1:+7nLVR8NlnN48zgaJPPXF9JOZDXVNHDZLeARlCFHyRk=:Ly/TpIRF0oCxmiBWv225S3mX8s6pfQR+:+tXGB2c9rRCVDcgNO1IDOo89]",
|
||||
"INFLUX_PASSWORD": "EJ[1:+7nLVR8NlnN48zgaJPPXF9JOZDXVNHDZLeARlCFHyRk=:ycrq1uQLoSfI932czD+krUOaJeLWpeq6:2iS7ukp/C7wVD3IT0GvQVcwccWGyLr4UocStF/XiDi0OB/N3YKIKN8SQU4ob1b6StAPZ/XOHmag=]",
|
||||
"INFLUX_USERNAME": "EJ[1:+7nLVR8NlnN48zgaJPPXF9JOZDXVNHDZLeARlCFHyRk=:35hBKofakZ4Db/u0TOW53RXoNWzJTIcl:HWREcMTrgZ8DGB0ZupgSzNWr/tVyE06P]",
|
||||
"SOLANA_INSTALL_UPDATE_MANIFEST_KEYPAIR_x86_64_unknown_linux_gnu": "EJ[1:+7nLVR8NlnN48zgaJPPXF9JOZDXVNHDZLeARlCFHyRk=:kRz8CyJYKAg/AiwgLrcRNDJAmlRX2zvX:uV1XV6y2Fb+dN4Z9BIMPBRiNS3n+NL8GlJXyu1i7meIsph1DzfLg4Thcp5Mj9nUsFNLgqQgjnsa5C4XNY/h5AgMSzRrJxVj7RhVTRmDJ5/Vjq6v7wCMRfBOvF3rITsV4zTwWSV8yafFmS+ZQ+QJTRgtYsuoYAUNZ06IEebfDHcuNwws72hEGoD9w43hOLSpyEOmXbtZ9h1lIRxrgsrhYDpBlU5LkhDeTXAX5M5dwYxyquJFRwd5quGDV5DYsCh9bAkbjAyjWYymVJ78U9YJIQHT9izzQqTDlMQN49EbLo7MDIaC7O7HVtb7unDJs+DRejbHacoyWVulqVVwu3GRiZezu8zdjwzGHphMMxOtKQaidnqYgflNp/O01I8wZRgR1alsGcmIhEhI8YV/IvQ==]"
|
||||
}
|
||||
}
|
||||
|
310
Cargo.lock
generated
310
Cargo.lock
generated
@ -1967,7 +1967,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "solana"
|
||||
version = "0.12.0"
|
||||
version = "0.12.2"
|
||||
dependencies = [
|
||||
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"bs58 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -1990,24 +1990,24 @@ dependencies = [
|
||||
"rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rand_chacha 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rayon 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"reqwest 0.9.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"ring 0.13.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rocksdb 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_derive 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"solana-budget-api 0.12.0",
|
||||
"solana-budget-program 0.12.0",
|
||||
"solana-drone 0.12.0",
|
||||
"solana-logger 0.12.0",
|
||||
"solana-metrics 0.12.0",
|
||||
"solana-netutil 0.12.0",
|
||||
"solana-runtime 0.12.0",
|
||||
"solana-sdk 0.12.0",
|
||||
"solana-storage-api 0.12.0",
|
||||
"solana-vote-api 0.12.0",
|
||||
"solana-vote-program 0.12.0",
|
||||
"solana-vote-signer 0.12.0",
|
||||
"solana-budget-api 0.12.2",
|
||||
"solana-budget-program 0.12.2",
|
||||
"solana-client 0.12.2",
|
||||
"solana-drone 0.12.2",
|
||||
"solana-logger 0.12.2",
|
||||
"solana-metrics 0.12.2",
|
||||
"solana-netutil 0.12.2",
|
||||
"solana-runtime 0.12.2",
|
||||
"solana-sdk 0.12.2",
|
||||
"solana-storage-api 0.12.2",
|
||||
"solana-vote-api 0.12.2",
|
||||
"solana-vote-program 0.12.2",
|
||||
"solana-vote-signer 0.12.2",
|
||||
"sys-info 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tokio 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -2016,86 +2016,104 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "solana-bench-streamer"
|
||||
version = "0.12.0"
|
||||
version = "0.12.2"
|
||||
dependencies = [
|
||||
"clap 2.32.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"solana 0.12.0",
|
||||
"solana-logger 0.12.0",
|
||||
"solana-netutil 0.12.0",
|
||||
"solana 0.12.2",
|
||||
"solana-logger 0.12.2",
|
||||
"solana-netutil 0.12.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "solana-bench-tps"
|
||||
version = "0.12.0"
|
||||
version = "0.12.2"
|
||||
dependencies = [
|
||||
"clap 2.32.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rayon 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"solana 0.12.0",
|
||||
"solana-drone 0.12.0",
|
||||
"solana-logger 0.12.0",
|
||||
"solana-metrics 0.12.0",
|
||||
"solana-sdk 0.12.0",
|
||||
"solana 0.12.2",
|
||||
"solana-client 0.12.2",
|
||||
"solana-drone 0.12.2",
|
||||
"solana-logger 0.12.2",
|
||||
"solana-metrics 0.12.2",
|
||||
"solana-sdk 0.12.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "solana-bpf-programs"
|
||||
version = "0.12.0"
|
||||
version = "0.12.2"
|
||||
dependencies = [
|
||||
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"elf 0.0.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"solana-bpfloader 0.12.0",
|
||||
"solana-logger 0.12.0",
|
||||
"solana-runtime 0.12.0",
|
||||
"solana-sdk 0.12.0",
|
||||
"solana-bpfloader 0.12.2",
|
||||
"solana-logger 0.12.2",
|
||||
"solana-runtime 0.12.2",
|
||||
"solana-sdk 0.12.2",
|
||||
"solana_rbpf 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"walkdir 2.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "solana-bpfloader"
|
||||
version = "0.12.0"
|
||||
version = "0.12.2"
|
||||
dependencies = [
|
||||
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.50 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"solana-logger 0.12.0",
|
||||
"solana-sdk 0.12.0",
|
||||
"solana-logger 0.12.2",
|
||||
"solana-sdk 0.12.2",
|
||||
"solana_rbpf 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "solana-budget-api"
|
||||
version = "0.12.0"
|
||||
version = "0.12.2"
|
||||
dependencies = [
|
||||
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"chrono 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_derive 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"solana-sdk 0.12.0",
|
||||
"solana-sdk 0.12.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "solana-budget-program"
|
||||
version = "0.12.0"
|
||||
version = "0.12.2"
|
||||
dependencies = [
|
||||
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"chrono 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_derive 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"solana-budget-api 0.12.0",
|
||||
"solana-logger 0.12.0",
|
||||
"solana-runtime 0.12.0",
|
||||
"solana-sdk 0.12.0",
|
||||
"solana-budget-api 0.12.2",
|
||||
"solana-logger 0.12.2",
|
||||
"solana-runtime 0.12.2",
|
||||
"solana-sdk 0.12.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "solana-client"
|
||||
version = "0.12.2"
|
||||
dependencies = [
|
||||
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"bs58 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"jsonrpc-core 10.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"jsonrpc-http-server 10.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"reqwest 0.9.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"solana-logger 0.12.2",
|
||||
"solana-metrics 0.12.2",
|
||||
"solana-netutil 0.12.2",
|
||||
"solana-sdk 0.12.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "solana-drone"
|
||||
version = "0.12.0"
|
||||
version = "0.12.2"
|
||||
dependencies = [
|
||||
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -2104,95 +2122,95 @@ dependencies = [
|
||||
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_derive 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"solana-logger 0.12.0",
|
||||
"solana-metrics 0.12.0",
|
||||
"solana-sdk 0.12.0",
|
||||
"solana-logger 0.12.2",
|
||||
"solana-metrics 0.12.2",
|
||||
"solana-sdk 0.12.2",
|
||||
"tokio 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "solana-failure"
|
||||
version = "0.12.0"
|
||||
version = "0.12.2"
|
||||
dependencies = [
|
||||
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"solana-runtime 0.12.0",
|
||||
"solana-sdk 0.12.0",
|
||||
"solana-runtime 0.12.2",
|
||||
"solana-sdk 0.12.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "solana-fullnode"
|
||||
version = "0.12.0"
|
||||
version = "0.12.2"
|
||||
dependencies = [
|
||||
"clap 2.32.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"solana 0.12.0",
|
||||
"solana-drone 0.12.0",
|
||||
"solana-logger 0.12.0",
|
||||
"solana-metrics 0.12.0",
|
||||
"solana-netutil 0.12.0",
|
||||
"solana-runtime 0.12.0",
|
||||
"solana-sdk 0.12.0",
|
||||
"solana-vote-api 0.12.0",
|
||||
"solana-vote-signer 0.12.0",
|
||||
"solana 0.12.2",
|
||||
"solana-drone 0.12.2",
|
||||
"solana-logger 0.12.2",
|
||||
"solana-metrics 0.12.2",
|
||||
"solana-netutil 0.12.2",
|
||||
"solana-runtime 0.12.2",
|
||||
"solana-sdk 0.12.2",
|
||||
"solana-vote-api 0.12.2",
|
||||
"solana-vote-signer 0.12.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "solana-genesis"
|
||||
version = "0.12.0"
|
||||
version = "0.12.2"
|
||||
dependencies = [
|
||||
"clap 2.32.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"solana 0.12.0",
|
||||
"solana-sdk 0.12.0",
|
||||
"solana 0.12.2",
|
||||
"solana-sdk 0.12.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "solana-keygen"
|
||||
version = "0.12.0"
|
||||
version = "0.12.2"
|
||||
dependencies = [
|
||||
"clap 2.32.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"dirs 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"solana-sdk 0.12.0",
|
||||
"solana-sdk 0.12.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "solana-ledger-tool"
|
||||
version = "0.12.0"
|
||||
version = "0.12.2"
|
||||
dependencies = [
|
||||
"assert_cmd 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"clap 2.32.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"solana 0.12.0",
|
||||
"solana-logger 0.12.0",
|
||||
"solana-runtime 0.12.0",
|
||||
"solana-sdk 0.12.0",
|
||||
"solana 0.12.2",
|
||||
"solana-logger 0.12.2",
|
||||
"solana-runtime 0.12.2",
|
||||
"solana-sdk 0.12.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "solana-logger"
|
||||
version = "0.12.0"
|
||||
version = "0.12.2"
|
||||
dependencies = [
|
||||
"env_logger 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "solana-metrics"
|
||||
version = "0.12.0"
|
||||
version = "0.12.2"
|
||||
dependencies = [
|
||||
"influx_db_client 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"reqwest 0.9.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"solana-sdk 0.12.0",
|
||||
"solana-sdk 0.12.2",
|
||||
"sys-info 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "solana-netutil"
|
||||
version = "0.12.0"
|
||||
version = "0.12.2"
|
||||
dependencies = [
|
||||
"ipnetwork 0.12.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -2201,57 +2219,57 @@ dependencies = [
|
||||
"rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"reqwest 0.9.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"socket2 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"solana-logger 0.12.0",
|
||||
"solana-logger 0.12.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "solana-noop"
|
||||
version = "0.12.0"
|
||||
version = "0.12.2"
|
||||
dependencies = [
|
||||
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"solana-logger 0.12.0",
|
||||
"solana-runtime 0.12.0",
|
||||
"solana-sdk 0.12.0",
|
||||
"solana-logger 0.12.2",
|
||||
"solana-runtime 0.12.2",
|
||||
"solana-sdk 0.12.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "solana-replicator"
|
||||
version = "0.12.0"
|
||||
version = "0.12.2"
|
||||
dependencies = [
|
||||
"clap 2.32.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"solana 0.12.0",
|
||||
"solana-logger 0.12.0",
|
||||
"solana-netutil 0.12.0",
|
||||
"solana-sdk 0.12.0",
|
||||
"solana 0.12.2",
|
||||
"solana-logger 0.12.2",
|
||||
"solana-netutil 0.12.2",
|
||||
"solana-sdk 0.12.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "solana-rewards-api"
|
||||
version = "0.12.0"
|
||||
version = "0.12.2"
|
||||
dependencies = [
|
||||
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_derive 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"solana-sdk 0.12.0",
|
||||
"solana-vote-api 0.12.0",
|
||||
"solana-sdk 0.12.2",
|
||||
"solana-vote-api 0.12.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "solana-rewards-program"
|
||||
version = "0.12.0"
|
||||
version = "0.12.2"
|
||||
dependencies = [
|
||||
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"solana-logger 0.12.0",
|
||||
"solana-rewards-api 0.12.0",
|
||||
"solana-runtime 0.12.0",
|
||||
"solana-sdk 0.12.0",
|
||||
"solana-vote-api 0.12.0",
|
||||
"solana-logger 0.12.2",
|
||||
"solana-rewards-api 0.12.2",
|
||||
"solana-runtime 0.12.2",
|
||||
"solana-sdk 0.12.2",
|
||||
"solana-vote-api 0.12.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "solana-runtime"
|
||||
version = "0.12.0"
|
||||
version = "0.12.2"
|
||||
dependencies = [
|
||||
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"bv 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -2266,19 +2284,18 @@ dependencies = [
|
||||
"serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_derive 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"solana-budget-api 0.12.0",
|
||||
"solana-logger 0.12.0",
|
||||
"solana-metrics 0.12.0",
|
||||
"solana-sdk 0.12.0",
|
||||
"solana-storage-api 0.12.0",
|
||||
"solana-system-program 0.12.0",
|
||||
"solana-token-api 0.12.0",
|
||||
"solana-vote-api 0.12.0",
|
||||
"solana-budget-api 0.12.2",
|
||||
"solana-logger 0.12.2",
|
||||
"solana-metrics 0.12.2",
|
||||
"solana-sdk 0.12.2",
|
||||
"solana-storage-api 0.12.2",
|
||||
"solana-token-api 0.12.2",
|
||||
"solana-vote-api 0.12.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "solana-sdk"
|
||||
version = "0.12.0"
|
||||
version = "0.12.2"
|
||||
dependencies = [
|
||||
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"bs58 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -2298,97 +2315,85 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "solana-storage-api"
|
||||
version = "0.12.0"
|
||||
version = "0.12.2"
|
||||
dependencies = [
|
||||
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_derive 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"solana-sdk 0.12.0",
|
||||
"solana-sdk 0.12.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "solana-storage-program"
|
||||
version = "0.12.0"
|
||||
version = "0.12.2"
|
||||
dependencies = [
|
||||
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_derive 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"solana-logger 0.12.0",
|
||||
"solana-runtime 0.12.0",
|
||||
"solana-sdk 0.12.0",
|
||||
"solana-storage-api 0.12.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "solana-system-program"
|
||||
version = "0.12.0"
|
||||
dependencies = [
|
||||
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_derive 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"solana-runtime 0.12.0",
|
||||
"solana-sdk 0.12.0",
|
||||
"solana-logger 0.12.2",
|
||||
"solana-runtime 0.12.2",
|
||||
"solana-sdk 0.12.2",
|
||||
"solana-storage-api 0.12.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "solana-token-api"
|
||||
version = "0.12.0"
|
||||
version = "0.12.2"
|
||||
dependencies = [
|
||||
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_derive 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"solana-sdk 0.12.0",
|
||||
"solana-sdk 0.12.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "solana-token-program"
|
||||
version = "0.12.0"
|
||||
version = "0.12.2"
|
||||
dependencies = [
|
||||
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_derive 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"solana-logger 0.12.0",
|
||||
"solana-sdk 0.12.0",
|
||||
"solana-logger 0.12.2",
|
||||
"solana-sdk 0.12.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "solana-upload-perf"
|
||||
version = "0.12.0"
|
||||
version = "0.12.2"
|
||||
dependencies = [
|
||||
"serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"solana-metrics 0.12.0",
|
||||
"solana-metrics 0.12.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "solana-vote-api"
|
||||
version = "0.12.0"
|
||||
version = "0.12.2"
|
||||
dependencies = [
|
||||
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_derive 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"solana-sdk 0.12.0",
|
||||
"solana-sdk 0.12.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "solana-vote-program"
|
||||
version = "0.12.0"
|
||||
version = "0.12.2"
|
||||
dependencies = [
|
||||
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"solana-logger 0.12.0",
|
||||
"solana-metrics 0.12.0",
|
||||
"solana-runtime 0.12.0",
|
||||
"solana-sdk 0.12.0",
|
||||
"solana-vote-api 0.12.0",
|
||||
"solana-logger 0.12.2",
|
||||
"solana-metrics 0.12.2",
|
||||
"solana-runtime 0.12.2",
|
||||
"solana-sdk 0.12.2",
|
||||
"solana-vote-api 0.12.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "solana-vote-signer"
|
||||
version = "0.12.0"
|
||||
version = "0.12.2"
|
||||
dependencies = [
|
||||
"bs58 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"clap 2.32.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -2398,13 +2403,13 @@ dependencies = [
|
||||
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"solana-metrics 0.12.0",
|
||||
"solana-sdk 0.12.0",
|
||||
"solana-metrics 0.12.2",
|
||||
"solana-sdk 0.12.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "solana-wallet"
|
||||
version = "0.12.0"
|
||||
version = "0.12.2"
|
||||
dependencies = [
|
||||
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"bs58 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -2413,18 +2418,20 @@ dependencies = [
|
||||
"dirs 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"solana 0.12.0",
|
||||
"solana-budget-api 0.12.0",
|
||||
"solana-drone 0.12.0",
|
||||
"solana-logger 0.12.0",
|
||||
"solana-sdk 0.12.0",
|
||||
"solana-vote-api 0.12.0",
|
||||
"solana-vote-signer 0.12.0",
|
||||
"solana 0.12.2",
|
||||
"solana-budget-api 0.12.2",
|
||||
"solana-budget-program 0.12.2",
|
||||
"solana-client 0.12.2",
|
||||
"solana-drone 0.12.2",
|
||||
"solana-logger 0.12.2",
|
||||
"solana-sdk 0.12.2",
|
||||
"solana-vote-api 0.12.2",
|
||||
"solana-vote-signer 0.12.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "solana-workspace"
|
||||
version = "0.12.0"
|
||||
version = "0.12.2"
|
||||
dependencies = [
|
||||
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"bs58 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -2434,11 +2441,14 @@ dependencies = [
|
||||
"rayon 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"reqwest 0.9.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"solana 0.12.0",
|
||||
"solana-logger 0.12.0",
|
||||
"solana-netutil 0.12.0",
|
||||
"solana-runtime 0.12.0",
|
||||
"solana-sdk 0.12.0",
|
||||
"solana 0.12.2",
|
||||
"solana-budget-program 0.12.2",
|
||||
"solana-client 0.12.2",
|
||||
"solana-logger 0.12.2",
|
||||
"solana-netutil 0.12.2",
|
||||
"solana-runtime 0.12.2",
|
||||
"solana-sdk 0.12.2",
|
||||
"solana-vote-api 0.12.2",
|
||||
"sys-info 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
|
16
Cargo.toml
16
Cargo.toml
@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "solana-workspace"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "0.12.0"
|
||||
version = "0.12.2"
|
||||
documentation = "https://docs.rs/solana"
|
||||
homepage = "https://solana.com/"
|
||||
readme = "README.md"
|
||||
@ -27,11 +27,14 @@ rand = "0.6.5"
|
||||
rayon = "1.0.0"
|
||||
reqwest = "0.9.11"
|
||||
serde_json = "1.0.39"
|
||||
solana = { path = "core", version = "0.12.0" }
|
||||
solana-logger = { path = "logger", version = "0.12.0" }
|
||||
solana-netutil = { path = "netutil", version = "0.12.0" }
|
||||
solana-runtime = { path = "runtime", version = "0.12.0" }
|
||||
solana-sdk = { path = "sdk", version = "0.12.0" }
|
||||
solana = { path = "core", version = "0.12.2" }
|
||||
solana-budget-program = { path = "programs/budget", version = "0.12.2" }
|
||||
solana-client = { path = "client", version = "0.12.2" }
|
||||
solana-logger = { path = "logger", version = "0.12.2" }
|
||||
solana-netutil = { path = "netutil", version = "0.12.2" }
|
||||
solana-runtime = { path = "runtime", version = "0.12.2" }
|
||||
solana-sdk = { path = "sdk", version = "0.12.2" }
|
||||
solana-vote-api = { path = "programs/vote_api", version = "0.12.2" }
|
||||
sys-info = "0.5.6"
|
||||
|
||||
[[bench]]
|
||||
@ -78,7 +81,6 @@ members = [
|
||||
"programs/rewards_api",
|
||||
"programs/storage",
|
||||
"programs/storage_api",
|
||||
"programs/system",
|
||||
"programs/vote",
|
||||
"programs/vote_api",
|
||||
"replicator",
|
||||
|
@ -2,16 +2,16 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-streamer"
|
||||
version = "0.12.0"
|
||||
version = "0.12.2"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
||||
[dependencies]
|
||||
clap = "2.32.0"
|
||||
solana = { path = "../core", version = "0.12.0" }
|
||||
solana-logger = { path = "../logger", version = "0.12.0" }
|
||||
solana-netutil = { path = "../netutil", version = "0.12.0" }
|
||||
solana = { path = "../core", version = "0.12.2" }
|
||||
solana-logger = { path = "../logger", version = "0.12.2" }
|
||||
solana-netutil = { path = "../netutil", version = "0.12.2" }
|
||||
|
||||
[features]
|
||||
cuda = ["solana/cuda"]
|
||||
|
@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-tps"
|
||||
version = "0.12.0"
|
||||
version = "0.12.2"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -11,11 +11,12 @@ homepage = "https://solana.com/"
|
||||
clap = "2.32.0"
|
||||
rayon = "1.0.3"
|
||||
serde_json = "1.0.39"
|
||||
solana = { path = "../core", version = "0.12.0" }
|
||||
solana-drone = { path = "../drone", version = "0.12.0" }
|
||||
solana-logger = { path = "../logger", version = "0.12.0" }
|
||||
solana-metrics = { path = "../metrics", version = "0.12.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.12.0" }
|
||||
solana = { path = "../core", version = "0.12.2" }
|
||||
solana-client = { path = "../client", version = "0.12.2" }
|
||||
solana-drone = { path = "../drone", version = "0.12.2" }
|
||||
solana-logger = { path = "../logger", version = "0.12.2" }
|
||||
solana-metrics = { path = "../metrics", version = "0.12.2" }
|
||||
solana-sdk = { path = "../sdk", version = "0.12.2" }
|
||||
|
||||
[features]
|
||||
cuda = ["solana/cuda"]
|
||||
|
@ -1,9 +1,10 @@
|
||||
use solana_metrics;
|
||||
|
||||
use rayon::prelude::*;
|
||||
use solana::client::mk_client;
|
||||
use solana::cluster_info::FULLNODE_PORT_RANGE;
|
||||
use solana::contact_info::ContactInfo;
|
||||
use solana::thin_client::ThinClient;
|
||||
use solana_client::client::create_client;
|
||||
use solana_client::thin_client::ThinClient;
|
||||
use solana_drone::drone::request_airdrop_transaction;
|
||||
use solana_metrics::influxdb;
|
||||
use solana_sdk::hash::Hash;
|
||||
@ -51,7 +52,7 @@ pub fn sample_tx_count(
|
||||
v: &ContactInfo,
|
||||
sample_period: u64,
|
||||
) {
|
||||
let mut client = mk_client(&v);
|
||||
let mut client = create_client(v.client_facing_addr(), FULLNODE_PORT_RANGE);
|
||||
let mut now = Instant::now();
|
||||
let mut initial_tx_count = client.transaction_count();
|
||||
let mut max_tps = 0.0;
|
||||
@ -181,7 +182,7 @@ pub fn generate_txs(
|
||||
reclaim: bool,
|
||||
contact_info: &ContactInfo,
|
||||
) {
|
||||
let mut client = mk_client(contact_info);
|
||||
let mut client = create_client(contact_info.client_facing_addr(), FULLNODE_PORT_RANGE);
|
||||
let blockhash = client.get_recent_blockhash();
|
||||
let tx_count = source.len();
|
||||
println!("Signing transactions... {} (reclaim={})", tx_count, reclaim);
|
||||
@ -241,7 +242,7 @@ pub fn do_tx_transfers(
|
||||
total_tx_sent_count: &Arc<AtomicUsize>,
|
||||
thread_batch_sleep_ms: usize,
|
||||
) {
|
||||
let client = mk_client(&contact_info);
|
||||
let client = create_client(contact_info.client_facing_addr(), FULLNODE_PORT_RANGE);
|
||||
loop {
|
||||
if thread_batch_sleep_ms > 0 {
|
||||
sleep(Duration::from_millis(thread_batch_sleep_ms as u64));
|
||||
|
@ -2,9 +2,10 @@ mod bench;
|
||||
mod cli;
|
||||
|
||||
use crate::bench::*;
|
||||
use solana::client::mk_client;
|
||||
use solana::cluster_info::FULLNODE_PORT_RANGE;
|
||||
use solana::gen_keys::GenKeys;
|
||||
use solana::gossip_service::discover;
|
||||
use solana_client::client::create_client;
|
||||
use solana_metrics;
|
||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||
use std::collections::VecDeque;
|
||||
@ -62,8 +63,9 @@ fn main() {
|
||||
}
|
||||
let cluster_entrypoint = nodes[0].clone(); // Pick the first node, why not?
|
||||
|
||||
let mut client = mk_client(&cluster_entrypoint);
|
||||
let mut barrier_client = mk_client(&cluster_entrypoint);
|
||||
let mut client = create_client(cluster_entrypoint.client_facing_addr(), FULLNODE_PORT_RANGE);
|
||||
let mut barrier_client =
|
||||
create_client(cluster_entrypoint.client_facing_addr(), FULLNODE_PORT_RANGE);
|
||||
|
||||
let mut seed = [0u8; 32];
|
||||
seed.copy_from_slice(&id.public_key_bytes()[..32]);
|
||||
|
@ -45,7 +45,7 @@ fn check_txs(receiver: &Receiver<WorkingBankEntries>, ref_tx_count: usize) {
|
||||
#[bench]
|
||||
#[ignore]
|
||||
fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
|
||||
let num_threads = BankingStage::num_threads() as usize;
|
||||
let num_threads = 4;
|
||||
// a multiple of packet chunk 2X duplicates to avoid races
|
||||
let txes = 192 * 50 * num_threads * 2;
|
||||
let mint_total = 1_000_000_000_000;
|
||||
@ -137,7 +137,7 @@ fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
|
||||
#[ignore]
|
||||
fn bench_banking_stage_multi_programs(bencher: &mut Bencher) {
|
||||
let progs = 4;
|
||||
let num_threads = BankingStage::num_threads() as usize;
|
||||
let num_threads = 4;
|
||||
// a multiple of packet chunk 2X duplicates to avoid races
|
||||
let txes = 96 * 100 * num_threads * 2;
|
||||
let mint_total = 1_000_000_000_000;
|
||||
|
@ -102,7 +102,7 @@ pub fn test_large_invalid_gossip_nodes(
|
||||
let cluster = discover(&entry_point_info, num_nodes);
|
||||
|
||||
// Poison the cluster.
|
||||
let mut client = mk_client(&entry_point_info);
|
||||
let mut client = create_client(entry_point_info.client_facing_addr(), FULLNODE_PORT_RANGE);
|
||||
for _ in 0..(num_nodes * 100) {
|
||||
client.gossip_push(
|
||||
cluster_info::invalid_contact_info()
|
||||
@ -112,7 +112,7 @@ pub fn test_large_invalid_gossip_nodes(
|
||||
|
||||
// Force refresh of the active set.
|
||||
for node in &cluster {
|
||||
let mut client = mk_client(&node);
|
||||
let mut client = create_client(node.client_facing_addr(), FULLNODE_PORT_RANGE);
|
||||
client.gossip_refresh_active_set();
|
||||
}
|
||||
|
||||
|
@ -124,7 +124,7 @@ The result field will be a JSON object with the following sub fields:
|
||||
|
||||
* `lamports`, number of lamports assigned to this account, as a signed 64-bit integer
|
||||
* `owner`, array of 32 bytes representing the program this account has been assigned to
|
||||
* `userdata`, array of bytes representing any userdata associated with the account
|
||||
* `data`, array of bytes representing any data associated with the account
|
||||
* `executable`, boolean indicating if the account contains a program (and is strictly read-only)
|
||||
* `loader`, array of 32 bytes representing the loader for this program (if `executable`), otherwise all
|
||||
|
||||
@ -134,7 +134,7 @@ The result field will be a JSON object with the following sub fields:
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getAccountInfo", "params":["2gVkYWexTHR5Hb2aLeQN3tnngvWzisFKXDUPrgMHpdST"]}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"executable":false,"loader":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"userdata":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"id":1}
|
||||
{"jsonrpc":"2.0","result":{"executable":false,"loader":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"id":1}
|
||||
```
|
||||
|
||||
---
|
||||
@ -254,7 +254,7 @@ After connect to the RPC PubSub websocket at `ws://<ADDRESS>/`:
|
||||
---
|
||||
|
||||
### accountSubscribe
|
||||
Subscribe to an account to receive notifications when the lamports or userdata
|
||||
Subscribe to an account to receive notifications when the lamports or data
|
||||
for a given account public key changes
|
||||
|
||||
##### Parameters:
|
||||
@ -274,7 +274,7 @@ for a given account public key changes
|
||||
|
||||
##### Notification Format:
|
||||
```bash
|
||||
{"jsonrpc": "2.0","method": "accountNotification", "params": {"result": {"executable":false,"loader":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"userdata":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"subscription":0}}
|
||||
{"jsonrpc": "2.0","method": "accountNotification", "params": {"result": {"executable":false,"loader":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"subscription":0}}
|
||||
```
|
||||
|
||||
---
|
||||
@ -300,7 +300,7 @@ Unsubscribe from account change notifications
|
||||
---
|
||||
|
||||
### programSubscribe
|
||||
Subscribe to a program to receive notifications when the lamports or userdata
|
||||
Subscribe to a program to receive notifications when the lamports or data
|
||||
for a given account owned by the program changes
|
||||
|
||||
##### Parameters:
|
||||
@ -322,7 +322,7 @@ for a given account owned by the program changes
|
||||
* `string` - account Pubkey, as base-58 encoded string
|
||||
* `object` - account info JSON object (see [getAccountInfo](#getaccountinfo) for field details)
|
||||
```bash
|
||||
{"jsonrpc":"2.0","method":"programNotification","params":{{"result":["8Rshv2oMkPu5E4opXTRyuyBeZBqQ4S477VG26wUTFxUM",{"executable":false,"lamports":1,"owner":[129,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"userdata":[1,1,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,49,56,45,49,50,45,50,52,84,50,51,58,53,57,58,48,48,90,235,233,39,152,15,44,117,176,41,89,100,86,45,61,2,44,251,46,212,37,35,118,163,189,247,84,27,235,178,62,55,89,0,0,0,0,50,0,0,0,0,0,0,0,235,233,39,152,15,44,117,176,41,89,100,86,45,61,2,44,251,46,212,37,35,118,163,189,247,84,27,235,178,62,45,4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]}],"subscription":0}}
|
||||
{"jsonrpc":"2.0","method":"programNotification","params":{{"result":["8Rshv2oMkPu5E4opXTRyuyBeZBqQ4S477VG26wUTFxUM",{"executable":false,"lamports":1,"owner":[129,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"data":[1,1,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,49,56,45,49,50,45,50,52,84,50,51,58,53,57,58,48,48,90,235,233,39,152,15,44,117,176,41,89,100,86,45,61,2,44,251,46,212,37,35,118,163,189,247,84,27,235,178,62,55,89,0,0,0,0,50,0,0,0,0,0,0,0,235,233,39,152,15,44,117,176,41,89,100,86,45,61,2,44,251,46,212,37,35,118,163,189,247,84,27,235,178,62,45,4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]}],"subscription":0}}
|
||||
```
|
||||
|
||||
---
|
||||
|
@ -40,7 +40,7 @@ retransmitted twice around the network.
|
||||
|
||||
4. CrdsValue for vote should look like this ``` Votes(Vec<Transaction>) ```
|
||||
|
||||
Each vote transaction should maintain a `wallclock` in its userdata. The merge
|
||||
Each vote transaction should maintain a `wallclock` in its data. The merge
|
||||
strategy for Votes will keep the last N set of votes as configured by the local
|
||||
client. For push/pull the vector is traversed recursively and each Transaction
|
||||
is treated as an individual CrdsValue with its own local wallclock and
|
||||
|
@ -6,7 +6,7 @@ separating program code from the state it operates on, the runtime is able to
|
||||
choreograph concurrent access. Transactions accessing only credit-only
|
||||
accounts are executed in parallel whereas transactions accessing writable
|
||||
accounts are serialized. The runtime interacts with the program through an
|
||||
entrypoint with a well-defined interface. The userdata stored in an account is
|
||||
entrypoint with a well-defined interface. The data stored in an account is
|
||||
an opaque type, an array of bytes. The program has full control over its
|
||||
contents.
|
||||
|
||||
@ -42,7 +42,7 @@ programs can be executed in parallel.
|
||||
The runtime enforces the following rules:
|
||||
|
||||
1. Only the *owner* program may modify the contents of an account. This means
|
||||
that upon assignment userdata vector is guaranteed to be zero.
|
||||
that upon assignment data vector is guaranteed to be zero.
|
||||
|
||||
2. Total balances on all the accounts is equal before and after execution of a
|
||||
transaction.
|
||||
@ -59,11 +59,11 @@ accounts.
|
||||
|
||||
## SystemProgram Interface
|
||||
|
||||
The interface is best described by the `Instruction::userdata` that the user
|
||||
The interface is best described by the `Instruction::data` that the user
|
||||
encodes.
|
||||
|
||||
* `CreateAccount` - This allows the user to create an account with an allocated
|
||||
userdata array and assign it to a Program.
|
||||
data array and assign it to a Program.
|
||||
|
||||
* `Assign` - Allows the user to assign an existing account to a program.
|
||||
|
||||
@ -73,10 +73,10 @@ userdata array and assign it to a Program.
|
||||
|
||||
For blockchain to function correctly, the program code must be resilient to user
|
||||
inputs. That is why in this design the program specific code is the only code
|
||||
that can change the state of the userdata byte array in the Accounts that are
|
||||
that can change the state of the data byte array in the Accounts that are
|
||||
assigned to it. It is also the reason why `Assign` or `CreateAccount` must zero
|
||||
out the userdata. Otherwise there would be no possible way for the program to
|
||||
distinguish the recently assigned account userdata from a natively generated
|
||||
out the data. Otherwise there would be no possible way for the program to
|
||||
distinguish the recently assigned account data from a natively generated
|
||||
state transition without some additional metadata from the runtime to indicate
|
||||
that this memory is assigned instead of natively generated.
|
||||
|
||||
@ -94,12 +94,12 @@ instruction can be composed into a single transaction with the call to the
|
||||
program itself.
|
||||
|
||||
* `CreateAccount` and `Assign` guarantee that when account is assigned to the
|
||||
program, the Account's userdata is zero initialized.
|
||||
program, the Account's data is zero initialized.
|
||||
|
||||
* Once assigned to program an Account cannot be reassigned.
|
||||
|
||||
* Runtime guarantees that a program's code is the only code that can modify
|
||||
Account userdata that the Account is assigned to.
|
||||
Account data that the Account is assigned to.
|
||||
|
||||
* Runtime guarantees that the program can only spend lamports that are in
|
||||
accounts that are assigned to it.
|
||||
|
@ -2,7 +2,7 @@ steps:
|
||||
- command: "ci/shellcheck.sh"
|
||||
name: "shellcheck"
|
||||
timeout_in_minutes: 5
|
||||
- command: "ci/docker-run.sh solanalabs/rust:1.32.0 ci/test-checks.sh"
|
||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-checks.sh"
|
||||
name: "checks"
|
||||
timeout_in_minutes: 15
|
||||
- wait
|
||||
@ -14,10 +14,10 @@ steps:
|
||||
- command: "ci/test-bench.sh"
|
||||
name: "bench"
|
||||
timeout_in_minutes: 20
|
||||
- command: "ci/docker-run.sh solanalabs/rust:1.32.0 ci/test-stable.sh"
|
||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-stable.sh"
|
||||
name: "stable"
|
||||
timeout_in_minutes: 20
|
||||
- command: "ci/docker-run.sh solanalabs/rust-nightly:2019-01-31 ci/test-coverage.sh"
|
||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_nightly_docker_image ci/test-coverage.sh"
|
||||
name: "coverage"
|
||||
timeout_in_minutes: 20
|
||||
# TODO: Fix and re-enable test-large-network.sh
|
||||
|
@ -4,11 +4,9 @@ ARG date
|
||||
RUN set -x \
|
||||
&& rustup install nightly-$date \
|
||||
&& rustup show \
|
||||
&& mv /usr/local/rustup/toolchains/nightly-$date-* \
|
||||
/usr/local/rustup/toolchains/nightly-x86_64-unknown-linux-gnu \
|
||||
&& rustup show \
|
||||
&& rustc --version \
|
||||
&& cargo --version \
|
||||
&& rustc +nightly --version \
|
||||
&& cargo +nightly --version
|
||||
&& rustc +nightly-$date --version \
|
||||
&& cargo +nightly-$date --version
|
||||
|
||||
|
@ -19,7 +19,7 @@ To update the pinned version:
|
||||
to confirm the new nightly image builds. Fix any issues as needed
|
||||
1. Run `docker login` to enable pushing images to Docker Hub, if you're authorized.
|
||||
1. Run `CI=true ci/docker-rust-nightly/build.sh YYYY-MM-DD` to push the new nightly image to dockerhub.com.
|
||||
1. Modify the `solanalabs/rust-nightly:YYYY-MM-DD` reference in `ci/buildkite.yml` from the previous to
|
||||
1. Modify the `solanalabs/rust-nightly:YYYY-MM-DD` reference in `ci/rust-version.sh` from the previous to
|
||||
new *YYYY-MM-DD* value, send a PR with this change and any codebase adjustments needed.
|
||||
|
||||
## Troubleshooting
|
||||
|
@ -24,9 +24,10 @@ fi
|
||||
|
||||
build() {
|
||||
$genPipeline && return
|
||||
ci/version-check-with-upgrade.sh stable
|
||||
source ci/rust-version.sh stable
|
||||
|
||||
_ scripts/ulimit-n.sh
|
||||
_ cargo build --all
|
||||
_ cargo +$rust_stable build --all
|
||||
}
|
||||
|
||||
runTest() {
|
||||
|
@ -55,7 +55,7 @@ while getopts "ch?i:k:brxR" opt; do
|
||||
restartInterval=$OPTARG
|
||||
;;
|
||||
b)
|
||||
maybeNoLeaderRotation="--no-leader-rotation"
|
||||
maybeNoLeaderRotation="--only-bootstrap-stake"
|
||||
;;
|
||||
x)
|
||||
extraNodes=$((extraNodes + 1))
|
||||
|
@ -1,7 +1,7 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
source ci/semver_bash/semver.sh
|
||||
|
||||
# List of internal crates to publish
|
||||
#
|
||||
@ -16,14 +16,17 @@ CRATES=(
|
||||
sdk
|
||||
keygen
|
||||
metrics
|
||||
client
|
||||
drone
|
||||
programs/{budget,bpf_loader,native_loader,noop,system,vote}
|
||||
programs/{budget_api,storage_api,token_api,vote_api}
|
||||
runtime
|
||||
programs/{budget,bpf_loader,storage,token,vote}
|
||||
vote-signer
|
||||
core
|
||||
fullnode
|
||||
genesis
|
||||
ledger-tool
|
||||
wallet
|
||||
runtime
|
||||
)
|
||||
|
||||
|
||||
@ -33,6 +36,9 @@ CRATES=(
|
||||
exit 0
|
||||
}
|
||||
|
||||
semverParseInto "$TRIGGERED_BUILDKITE_TAG" MAJOR MINOR PATCH SPECIAL
|
||||
expectedCrateVersion="$MAJOR.$MINOR.$PATCH$SPECIAL"
|
||||
|
||||
[[ -n "$CRATES_IO_TOKEN" ]] || {
|
||||
echo CRATES_IO_TOKEN undefined
|
||||
exit 1
|
||||
@ -46,13 +52,17 @@ for crate in "${CRATES[@]}"; do
|
||||
exit 1
|
||||
fi
|
||||
echo "-- $crate"
|
||||
# TODO: Ensure the published version matches the contents of
|
||||
# TRIGGERED_BUILDKITE_TAG
|
||||
grep -q "^version = \"$expectedCrateVersion\"$" Cargo.toml || {
|
||||
echo "Error: $crate/Cargo.toml version is not $expectedCrateVersion"
|
||||
exit 1
|
||||
}
|
||||
|
||||
(
|
||||
set -x
|
||||
# TODO: the rocksdb package does not build with the stock rust docker image,
|
||||
# so use the solana rust docker image until this is resolved upstream
|
||||
ci/docker-run.sh solanalabs/rust:1.31.0 bash -exc "cd $crate; $cargoCommand"
|
||||
source ci/rust-version.sh
|
||||
ci/docker-run.sh "$rust_stable_docker_image" bash -exc "cd $crate; $cargoCommand"
|
||||
#ci/docker-run.sh rust bash -exc "cd $crate; $cargoCommand"
|
||||
)
|
||||
done
|
||||
|
@ -45,11 +45,7 @@ beta)
|
||||
CHANNEL_BRANCH=$BETA_CHANNEL
|
||||
;;
|
||||
stable)
|
||||
if [[ -n $BETA_CHANNEL_LATEST_TAG ]]; then
|
||||
CHANNEL_BRANCH=$BETA_CHANNEL
|
||||
else
|
||||
CHANNEL_BRANCH=$STABLE_CHANNEL
|
||||
fi
|
||||
CHANNEL_BRANCH=$STABLE_CHANNEL
|
||||
;;
|
||||
*)
|
||||
echo "Error: Invalid PUBLISH_CHANNEL=$PUBLISH_CHANNEL"
|
||||
|
@ -11,10 +11,13 @@ fi
|
||||
|
||||
eval "$(ci/channel-info.sh)"
|
||||
|
||||
TAG=
|
||||
if [[ -n "$BUILDKITE_TAG" ]]; then
|
||||
CHANNEL_OR_TAG=$BUILDKITE_TAG
|
||||
TAG="$BUILDKITE_TAG"
|
||||
elif [[ -n "$TRIGGERED_BUILDKITE_TAG" ]]; then
|
||||
CHANNEL_OR_TAG=$TRIGGERED_BUILDKITE_TAG
|
||||
TAG="$TRIGGERED_BUILDKITE_TAG"
|
||||
else
|
||||
CHANNEL_OR_TAG=$CHANNEL
|
||||
fi
|
||||
@ -24,18 +27,34 @@ if [[ -z $CHANNEL_OR_TAG ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
case "$(uname)" in
|
||||
Darwin)
|
||||
TARGET=x86_64-apple-darwin
|
||||
;;
|
||||
Linux)
|
||||
TARGET=x86_64-unknown-linux-gnu
|
||||
;;
|
||||
*)
|
||||
TARGET=unknown-unknown-unknown
|
||||
;;
|
||||
esac
|
||||
|
||||
echo --- Creating tarball
|
||||
(
|
||||
set -x
|
||||
rm -rf solana-release/
|
||||
mkdir solana-release/
|
||||
(
|
||||
echo "$CHANNEL_OR_TAG"
|
||||
git rev-parse HEAD
|
||||
) > solana-release/version.txt
|
||||
|
||||
scripts/cargo-install-all.sh solana-release
|
||||
COMMIT="$(git rev-parse HEAD)"
|
||||
|
||||
(
|
||||
echo "channel: $CHANNEL"
|
||||
echo "commit: $COMMIT"
|
||||
echo "target: $TARGET"
|
||||
) > solana-release/version.yml
|
||||
|
||||
source ci/rust-version.sh stable
|
||||
scripts/cargo-install-all.sh +"$rust_stable" solana-release
|
||||
|
||||
./fetch-perf-libs.sh
|
||||
# shellcheck source=/dev/null
|
||||
@ -45,20 +64,22 @@ echo --- Creating tarball
|
||||
cargo install --path . --features=cuda --root ../solana-release-cuda
|
||||
)
|
||||
cp solana-release-cuda/bin/solana-fullnode solana-release/bin/solana-fullnode-cuda
|
||||
cp -a scripts multinode-demo solana-release/
|
||||
|
||||
tar jvcf solana-release.tar.bz2 solana-release/
|
||||
tar jvcf solana-release-$TARGET.tar.bz2 solana-release/
|
||||
)
|
||||
|
||||
echo --- Saving build artifacts
|
||||
source ci/upload-ci-artifact.sh
|
||||
upload-ci-artifact solana-release.tar.bz2
|
||||
upload-ci-artifact solana-release-$TARGET.tar.bz2
|
||||
|
||||
if [[ -n $DO_NOT_PUBLISH_TAR ]]; then
|
||||
echo Skipped due to DO_NOT_PUBLISH_TAR
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo --- AWS S3 Store
|
||||
file=solana-release-$TARGET.tar.bz2
|
||||
echo --- AWS S3 Store: $file
|
||||
(
|
||||
set -x
|
||||
$DRYRUN docker run \
|
||||
@ -67,11 +88,14 @@ echo --- AWS S3 Store
|
||||
--env AWS_SECRET_ACCESS_KEY \
|
||||
--volume "$PWD:/solana" \
|
||||
eremite/aws-cli:2018.12.18 \
|
||||
/usr/bin/s3cmd --acl-public put /solana/solana-release.tar.bz2 \
|
||||
s3://solana-release/"$CHANNEL_OR_TAG"/solana-release.tar.bz2
|
||||
/usr/bin/s3cmd --acl-public put /solana/"$file" s3://solana-release/"$CHANNEL_OR_TAG"/"$file"
|
||||
|
||||
echo Published to:
|
||||
$DRYRUN ci/format-url.sh http://solana-release.s3.amazonaws.com/"$CHANNEL_OR_TAG"/solana-release.tar.bz2
|
||||
$DRYRUN ci/format-url.sh http://solana-release.s3.amazonaws.com/"$CHANNEL_OR_TAG"/"$file"
|
||||
)
|
||||
|
||||
if [[ -n $TAG ]]; then
|
||||
ci/upload-github-release-asset.sh $file
|
||||
fi
|
||||
|
||||
echo --- ok
|
||||
|
45
ci/rust-version.sh
Normal file
45
ci/rust-version.sh
Normal file
@ -0,0 +1,45 @@
|
||||
#
|
||||
# This file maintains the rust versions for use by CI.
|
||||
#
|
||||
# Build with stable rust, updating the stable toolchain if necessary:
|
||||
# $ source ci/rust-version.sh stable
|
||||
# $ cargo +"$rust_stable" build
|
||||
#
|
||||
# Build with nightly rust, updating the nightly toolchain if necessary:
|
||||
# $ source ci/rust-version.sh nightly
|
||||
# $ cargo +"$rust_nightly" build
|
||||
#
|
||||
# Obtain the environment variables without any automatic toolchain updating:
|
||||
# $ source ci/rust-version.sh
|
||||
#
|
||||
|
||||
export rust_stable=1.32.0
|
||||
export rust_stable_docker_image=solanalabs/rust:1.32.0
|
||||
|
||||
export rust_nightly=nightly-2019-03-14
|
||||
export rust_nightly_docker_image=solanalabs/rust-nightly:2019-03-14
|
||||
|
||||
[[ -z $1 ]] || (
|
||||
|
||||
rustup_install() {
|
||||
declare toolchain=$1
|
||||
if ! cargo +"$toolchain" -V; then
|
||||
rustup install "$toolchain"
|
||||
cargo +"$toolchain" -V
|
||||
fi
|
||||
}
|
||||
|
||||
set -e
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")"
|
||||
case $1 in
|
||||
stable)
|
||||
rustup_install "$rust_stable"
|
||||
;;
|
||||
nightly)
|
||||
rustup_install "$rust_nightly"
|
||||
;;
|
||||
*)
|
||||
echo "Note: ignoring unknown argument: $1"
|
||||
;;
|
||||
esac
|
||||
)
|
@ -24,7 +24,7 @@ source ci/_
|
||||
source ci/upload-ci-artifact.sh
|
||||
|
||||
eval "$(ci/channel-info.sh)"
|
||||
ci/version-check-with-upgrade.sh nightly
|
||||
source ci/rust-version.sh nightly
|
||||
|
||||
set -o pipefail
|
||||
export RUST_BACKTRACE=1
|
||||
@ -39,7 +39,7 @@ fi
|
||||
|
||||
BENCH_FILE=bench_output.log
|
||||
BENCH_ARTIFACT=current_bench_results.log
|
||||
_ cargo +nightly bench ${V:+--verbose} \
|
||||
_ cargo +$rust_nightly bench ${V:+--verbose} \
|
||||
-- -Z unstable-options --format=json | tee "$BENCH_FILE"
|
||||
|
||||
# Run bpf benches
|
||||
@ -47,11 +47,11 @@ echo --- program/bpf
|
||||
(
|
||||
set -x
|
||||
cd programs/bpf
|
||||
cargo +nightly bench ${V:+--verbose} --features=bpf_c \
|
||||
cargo +$rust_nightly bench ${V:+--verbose} --features=bpf_c \
|
||||
-- -Z unstable-options --format=json --nocapture | tee -a ../../../"$BENCH_FILE"
|
||||
)
|
||||
|
||||
_ cargo +nightly run --release --package solana-upload-perf \
|
||||
_ cargo +$rust_nightly run --release --package solana-upload-perf \
|
||||
-- "$BENCH_FILE" "$TARGET_BRANCH" "$UPLOAD_METRICS" > "$BENCH_ARTIFACT"
|
||||
|
||||
upload-ci-artifact "$BENCH_ARTIFACT"
|
||||
|
@ -4,14 +4,14 @@ set -e
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
source ci/_
|
||||
ci/version-check.sh stable
|
||||
source ci/rust-version.sh stable
|
||||
|
||||
export RUST_BACKTRACE=1
|
||||
export RUSTFLAGS="-D warnings"
|
||||
|
||||
_ cargo fmt --all -- --check
|
||||
_ cargo clippy --all -- --version
|
||||
_ cargo clippy --all -- --deny=warnings
|
||||
_ cargo +"$rust_stable" fmt --all -- --check
|
||||
_ cargo +"$rust_stable" clippy --all -- --version
|
||||
_ cargo +"$rust_stable" clippy --all -- --deny=warnings
|
||||
_ ci/audit.sh
|
||||
_ ci/nits.sh
|
||||
_ book/build.sh
|
||||
|
@ -21,7 +21,6 @@ ci/affects-files.sh \
|
||||
}
|
||||
|
||||
source ci/upload-ci-artifact.sh
|
||||
ci/version-check-with-upgrade.sh nightly
|
||||
source scripts/ulimit-n.sh
|
||||
|
||||
scripts/coverage.sh
|
||||
|
@ -4,9 +4,7 @@ set -e
|
||||
here=$(dirname "$0")
|
||||
cd "$here"/..
|
||||
|
||||
# This job doesn't run within a container, try once to upgrade tooling on a
|
||||
# version check failure
|
||||
ci/version-check-with-upgrade.sh stable
|
||||
source ci/rust-version.sh stable
|
||||
|
||||
export RUST_BACKTRACE=1
|
||||
|
||||
@ -39,4 +37,4 @@ fi
|
||||
|
||||
set -x
|
||||
export SOLANA_DYNAMIC_NODES=120
|
||||
exec cargo test --release --features=erasure test_multi_node_dynamic_network -- --ignored
|
||||
exec cargo +"$rust_stable" test --release --features=erasure test_multi_node_dynamic_network -- --ignored
|
||||
|
@ -10,7 +10,8 @@ annotate() {
|
||||
}
|
||||
}
|
||||
|
||||
ci/version-check-with-upgrade.sh stable
|
||||
source ci/rust-version.sh stable
|
||||
|
||||
export RUST_BACKTRACE=1
|
||||
export RUSTFLAGS="-D warnings"
|
||||
source scripts/ulimit-n.sh
|
||||
@ -24,9 +25,9 @@ case $testName in
|
||||
test-stable)
|
||||
echo "Executing $testName"
|
||||
|
||||
_ cargo build --all ${V:+--verbose}
|
||||
_ cargo test --all ${V:+--verbose} -- --nocapture --test-threads=1
|
||||
_ cargo test --manifest-path programs/system/Cargo.toml
|
||||
_ cargo +"$rust_stable" build --all ${V:+--verbose}
|
||||
_ cargo +"$rust_stable" test --all ${V:+--verbose} -- --nocapture --test-threads=1
|
||||
_ cargo +"$rust_stable" test --manifest-path runtime/Cargo.toml
|
||||
;;
|
||||
test-stable-perf)
|
||||
echo "Executing $testName"
|
||||
@ -48,7 +49,9 @@ test-stable-perf)
|
||||
# BPF program tests
|
||||
_ make -C programs/bpf/c tests
|
||||
_ programs/bpf/rust/noop/build.sh # Must be built out of band
|
||||
_ cargo test --manifest-path programs/bpf/Cargo.toml --no-default-features --features=bpf_c,bpf_rust
|
||||
_ cargo +"$rust_stable" test \
|
||||
--manifest-path programs/bpf/Cargo.toml \
|
||||
--no-default-features --features=bpf_c,bpf_rust
|
||||
|
||||
# Run root package tests with these features
|
||||
ROOT_FEATURES=erasure,chacha
|
||||
@ -67,9 +70,9 @@ test-stable-perf)
|
||||
fi
|
||||
|
||||
# Run root package library tests
|
||||
_ cargo build --all ${V:+--verbose} --features="$ROOT_FEATURES"
|
||||
_ cargo test --all --lib ${V:+--verbose} --features="$ROOT_FEATURES" -- --nocapture --test-threads=1
|
||||
_ cargo test --manifest-path programs/system/Cargo.toml
|
||||
_ cargo +"$rust_stable" build --all ${V:+--verbose} --features="$ROOT_FEATURES"
|
||||
_ cargo +"$rust_stable" test --all --lib ${V:+--verbose} --features="$ROOT_FEATURES" -- --nocapture --test-threads=1
|
||||
_ cargo +"$rust_stable" test --manifest-path runtime/Cargo.toml
|
||||
|
||||
# Run root package integration tests
|
||||
for test in tests/*.rs; do
|
||||
@ -77,7 +80,7 @@ test-stable-perf)
|
||||
test=${test%.rs} # basename x .rs
|
||||
(
|
||||
export RUST_LOG="$test"=trace,$RUST_LOG
|
||||
_ cargo test --all ${V:+--verbose} --features="$ROOT_FEATURES" --test="$test" \
|
||||
_ cargo +"$rust_stable" test --all ${V:+--verbose} --features="$ROOT_FEATURES" --test="$test" \
|
||||
-- --test-threads=1 --nocapture
|
||||
)
|
||||
done
|
||||
|
@ -64,6 +64,10 @@ EOF
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [[ -n $TESTNET_DB_HOST ]]; then
|
||||
SOLANA_METRICS_PARTIAL_CONFIG="host=$TESTNET_DB_HOST,$SOLANA_METRICS_PARTIAL_CONFIG"
|
||||
fi
|
||||
|
||||
export SOLANA_METRICS_CONFIG="db=$TESTNET,$SOLANA_METRICS_PARTIAL_CONFIG"
|
||||
echo "SOLANA_METRICS_CONFIG: $SOLANA_METRICS_CONFIG"
|
||||
source scripts/configure-metrics.sh
|
||||
@ -81,13 +85,8 @@ testnet-beta|testnet-beta-perf)
|
||||
CHANNEL_BRANCH=$BETA_CHANNEL
|
||||
;;
|
||||
testnet|testnet-perf)
|
||||
if [[ -n $BETA_CHANNEL_LATEST_TAG ]]; then
|
||||
CHANNEL_OR_TAG=$BETA_CHANNEL_LATEST_TAG
|
||||
CHANNEL_BRANCH=$BETA_CHANNEL
|
||||
else
|
||||
CHANNEL_OR_TAG=$STABLE_CHANNEL_LATEST_TAG
|
||||
CHANNEL_BRANCH=$STABLE_CHANNEL
|
||||
fi
|
||||
CHANNEL_OR_TAG=$STABLE_CHANNEL_LATEST_TAG
|
||||
CHANNEL_BRANCH=$STABLE_CHANNEL
|
||||
;;
|
||||
*)
|
||||
echo "Error: Invalid TESTNET=$TESTNET"
|
||||
@ -107,6 +106,7 @@ steps:
|
||||
env:
|
||||
TESTNET: "$TESTNET"
|
||||
TESTNET_OP: "$TESTNET_OP"
|
||||
TESTNET_DB_HOST: "$TESTNET_DB_HOST"
|
||||
EOF
|
||||
) | buildkite-agent pipeline upload
|
||||
exit 0
|
||||
@ -285,27 +285,16 @@ stop)
|
||||
stop
|
||||
;;
|
||||
update-or-restart)
|
||||
if start "" update; then
|
||||
echo Update successful
|
||||
else
|
||||
echo "+++ Update failed, restarting the network"
|
||||
$metricsWriteDatapoint "testnet-manager update-failure=1"
|
||||
start
|
||||
fi
|
||||
echo "+++ Restarting the network"
|
||||
start
|
||||
;;
|
||||
sanity-or-restart)
|
||||
if sanity; then
|
||||
echo Pass
|
||||
else
|
||||
echo "+++ Sanity failed, updating the network"
|
||||
echo "+++ Sanity failed, restarting the network"
|
||||
$metricsWriteDatapoint "testnet-manager sanity-failure=1"
|
||||
if start "" update; then
|
||||
echo Update successful
|
||||
else
|
||||
echo "+++ Update failed, restarting the network"
|
||||
$metricsWriteDatapoint "testnet-manager update-failure=1"
|
||||
start
|
||||
fi
|
||||
start
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
50
ci/upload-github-release-asset.sh
Executable file
50
ci/upload-github-release-asset.sh
Executable file
@ -0,0 +1,50 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Uploads one or more files to a github release
|
||||
#
|
||||
# Prerequisites
|
||||
# 1) GITHUB_TOKEN defined in the environment
|
||||
# 2) TAG defined in the environment
|
||||
#
|
||||
set -e
|
||||
|
||||
REPO_SLUG=solana-labs/solana
|
||||
|
||||
if [[ -z $1 ]]; then
|
||||
echo No files specified
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z $GITHUB_TOKEN ]]; then
|
||||
echo Error: GITHUB_TOKEN not defined
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -n $BUILDKITE_TAG ]]; then
|
||||
TAG=$BUILDKITE_TAG
|
||||
elif [[ -n $TRIGGERED_BUILDKITE_TAG ]]; then
|
||||
TAG=$TRIGGERED_BUILDKITE_TAG
|
||||
fi
|
||||
|
||||
if [[ -z $TAG ]]; then
|
||||
echo Error: TAG not defined
|
||||
exit 1
|
||||
fi
|
||||
|
||||
releaseId=$( \
|
||||
curl -s "https://api.github.com/repos/$REPO_SLUG/releases/tags/$TAG" \
|
||||
| grep -m 1 \"id\": \
|
||||
| sed -ne 's/^[^0-9]*\([0-9]*\),$/\1/p' \
|
||||
)
|
||||
echo "Github release id for $TAG is $releaseId"
|
||||
|
||||
for file in "$@"; do
|
||||
echo "--- Uploading $file to tag $TAG of $REPO_SLUG"
|
||||
curl \
|
||||
--data-binary @"$file" \
|
||||
-H "Authorization: token $GITHUB_TOKEN" \
|
||||
-H "Content-Type: application/octet-stream" \
|
||||
"https://uploads.github.com/repos/$REPO_SLUG/releases/$releaseId/assets?name=$(basename "$file")"
|
||||
echo
|
||||
done
|
||||
|
@ -1,10 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
channel=${1:-stable}
|
||||
if ! ./version-check.sh "$channel"; then
|
||||
rustup install "$channel"
|
||||
./version-check.sh "$channel"
|
||||
fi
|
@ -1,37 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
require() {
|
||||
declare expectedProgram="$1"
|
||||
declare expectedVersion="$2"
|
||||
shift 2
|
||||
|
||||
read -r program version _ < <($expectedProgram "$@" -V)
|
||||
|
||||
declare ok=true
|
||||
[[ $program = "$expectedProgram" ]] || ok=false
|
||||
[[ $version =~ $expectedVersion ]] || ok=false
|
||||
|
||||
echo "Found $program $version"
|
||||
if ! $ok; then
|
||||
echo Error: expected "$expectedProgram $expectedVersion"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
case ${1:-stable} in
|
||||
nightly)
|
||||
require rustc 1.34.[0-9]+-nightly +nightly
|
||||
require cargo 1.34.[0-9]+-nightly +nightly
|
||||
;;
|
||||
stable)
|
||||
require rustc 1.32.[0-9]+
|
||||
require cargo 1.32.[0-9]+
|
||||
;;
|
||||
*)
|
||||
echo Error: unknown argument: "$1"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
exit 0
|
24
client/Cargo.toml
Normal file
24
client/Cargo.toml
Normal file
@ -0,0 +1,24 @@
|
||||
[package]
|
||||
name = "solana-client"
|
||||
version = "0.12.2"
|
||||
description = "Solana Client"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
homepage = "https://solana.com/"
|
||||
license = "Apache-2.0"
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
bincode = "1.1.2"
|
||||
bs58 = "0.2.0"
|
||||
log = "0.4.2"
|
||||
reqwest = "0.9.11"
|
||||
serde_json = "1.0.39"
|
||||
solana-metrics = { path = "../metrics", version = "0.12.2" }
|
||||
solana-netutil = { path = "../netutil", version = "0.12.2" }
|
||||
solana-sdk = { path = "../sdk", version = "0.12.2" }
|
||||
|
||||
[dev-dependencies]
|
||||
jsonrpc-core = "10.1.0"
|
||||
jsonrpc-http-server = "10.1.0"
|
||||
solana-logger = { path = "../logger", version = "0.12.2" }
|
17
client/src/client.rs
Normal file
17
client/src/client.rs
Normal file
@ -0,0 +1,17 @@
|
||||
use crate::thin_client::ThinClient;
|
||||
use std::net::SocketAddr;
|
||||
use std::time::Duration;
|
||||
|
||||
pub fn create_client((rpc, tpu): (SocketAddr, SocketAddr), range: (u16, u16)) -> ThinClient {
|
||||
let (_, transactions_socket) = solana_netutil::bind_in_range(range).unwrap();
|
||||
ThinClient::new(rpc, tpu, transactions_socket)
|
||||
}
|
||||
|
||||
pub fn create_client_with_timeout(
|
||||
(rpc, tpu): (SocketAddr, SocketAddr),
|
||||
range: (u16, u16),
|
||||
timeout: Duration,
|
||||
) -> ThinClient {
|
||||
let (_, transactions_socket) = solana_netutil::bind_in_range(range).unwrap();
|
||||
ThinClient::new_with_timeout(rpc, tpu, transactions_socket, timeout)
|
||||
}
|
4
client/src/lib.rs
Normal file
4
client/src/lib.rs
Normal file
@ -0,0 +1,4 @@
|
||||
pub mod client;
|
||||
pub mod rpc_mock;
|
||||
pub mod rpc_request;
|
||||
pub mod thin_client;
|
@ -1,7 +1,7 @@
|
||||
// Implementation of RpcRequestHandler trait for testing Rpc requests without i/o
|
||||
|
||||
use crate::rpc_request::{RpcRequest, RpcRequestHandler};
|
||||
use serde_json::{Number, Value};
|
||||
use serde_json::{json, Number, Value};
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
@ -1,6 +1,7 @@
|
||||
use log::*;
|
||||
use reqwest;
|
||||
use reqwest::header::CONTENT_TYPE;
|
||||
use serde_json::{self, Value};
|
||||
use serde_json::{json, Value};
|
||||
use solana_sdk::timing::{DEFAULT_TICKS_PER_SLOT, NUM_TICKS_PER_SECOND};
|
||||
use std::net::SocketAddr;
|
||||
use std::thread::sleep;
|
||||
@ -203,7 +204,7 @@ mod tests {
|
||||
use jsonrpc_core::{Error, IoHandler, Params};
|
||||
use jsonrpc_http_server::{AccessControlAllowOrigin, DomainsValidation, ServerBuilder};
|
||||
use serde_json::Number;
|
||||
use std::net::Ipv4Addr;
|
||||
use solana_logger;
|
||||
use std::sync::mpsc::channel;
|
||||
use std::thread;
|
||||
|
||||
@ -239,7 +240,7 @@ mod tests {
|
||||
fn test_make_rpc_request() {
|
||||
let (sender, receiver) = channel();
|
||||
thread::spawn(move || {
|
||||
let rpc_addr = socketaddr!(0, 0);
|
||||
let rpc_addr = "0.0.0.0:0".parse().unwrap();
|
||||
let mut io = IoHandler::default();
|
||||
// Successful request
|
||||
io.add_method("getBalance", |_params: Params| {
|
||||
@ -298,7 +299,7 @@ mod tests {
|
||||
// 2. Tell the client to start using it
|
||||
// 3. Delay for 1.5 seconds before starting the server to ensure the client will fail
|
||||
// and need to retry
|
||||
let rpc_addr = socketaddr!(0, 4242);
|
||||
let rpc_addr: SocketAddr = "0.0.0.0:4242".parse().unwrap();
|
||||
sender.send(rpc_addr.clone()).unwrap();
|
||||
sleep(Duration::from_millis(1500));
|
||||
|
@ -3,17 +3,16 @@
|
||||
//! messages to the network directly. The binary encoding of its messages are
|
||||
//! unstable and may change in future releases.
|
||||
|
||||
use crate::contact_info::ContactInfo;
|
||||
use crate::fullnode::{Fullnode, FullnodeConfig};
|
||||
use crate::packet::PACKET_DATA_SIZE;
|
||||
use crate::rpc_request::{RpcClient, RpcRequest, RpcRequestHandler};
|
||||
use bincode::serialize_into;
|
||||
use bs58;
|
||||
use serde_json;
|
||||
use log::*;
|
||||
use serde_json::json;
|
||||
use solana_metrics;
|
||||
use solana_metrics::influxdb;
|
||||
use solana_sdk::account::Account;
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::packet::PACKET_DATA_SIZE;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::signature::{Keypair, KeypairUtil, Signature};
|
||||
use solana_sdk::system_transaction::SystemTransaction;
|
||||
@ -22,7 +21,6 @@ use solana_sdk::transaction::Transaction;
|
||||
use std;
|
||||
use std::io;
|
||||
use std::net::{SocketAddr, UdpSocket};
|
||||
use std::sync::Arc;
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
@ -144,7 +142,7 @@ impl ThinClient {
|
||||
result
|
||||
}
|
||||
|
||||
pub fn get_account_userdata(&mut self, pubkey: &Pubkey) -> io::Result<Option<Vec<u8>>> {
|
||||
pub fn get_account_data(&mut self, pubkey: &Pubkey) -> io::Result<Option<Vec<u8>>> {
|
||||
let params = json!([format!("{}", pubkey)]);
|
||||
let response =
|
||||
self.rpc_client
|
||||
@ -153,13 +151,13 @@ impl ThinClient {
|
||||
Ok(account_json) => {
|
||||
let account: Account =
|
||||
serde_json::from_value(account_json).expect("deserialize account");
|
||||
Ok(Some(account.userdata))
|
||||
Ok(Some(account.data))
|
||||
}
|
||||
Err(error) => {
|
||||
debug!("get_account_userdata failed: {:?}", error);
|
||||
debug!("get_account_data failed: {:?}", error);
|
||||
Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
"get_account_userdata failed",
|
||||
"get_account_data failed",
|
||||
))
|
||||
}
|
||||
}
|
||||
@ -411,211 +409,3 @@ pub fn retry_get_balance(
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
pub fn new_fullnode() -> (Fullnode, ContactInfo, Keypair, String) {
|
||||
use crate::blocktree::create_new_tmp_ledger;
|
||||
use crate::cluster_info::Node;
|
||||
use crate::fullnode::Fullnode;
|
||||
use solana_sdk::genesis_block::GenesisBlock;
|
||||
use solana_sdk::signature::KeypairUtil;
|
||||
|
||||
let node_keypair = Arc::new(Keypair::new());
|
||||
let node = Node::new_localhost_with_pubkey(&node_keypair.pubkey());
|
||||
let contact_info = node.info.clone();
|
||||
|
||||
let (genesis_block, mint_keypair) = GenesisBlock::new_with_leader(10_000, &contact_info.id, 42);
|
||||
let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_block);
|
||||
|
||||
let voting_keypair = Keypair::new();
|
||||
let node = Fullnode::new(
|
||||
node,
|
||||
&node_keypair,
|
||||
&ledger_path,
|
||||
&voting_keypair.pubkey(),
|
||||
voting_keypair,
|
||||
None,
|
||||
&FullnodeConfig::default(),
|
||||
);
|
||||
|
||||
(node, contact_info, mint_keypair, ledger_path)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::client::mk_client;
|
||||
use crate::gossip_service::discover;
|
||||
use bincode::{deserialize, serialize};
|
||||
use solana_sdk::system_instruction::SystemInstruction;
|
||||
use solana_vote_api::vote_state::VoteState;
|
||||
use solana_vote_api::vote_transaction::VoteTransaction;
|
||||
use std::fs::remove_dir_all;
|
||||
|
||||
#[test]
|
||||
fn test_thin_client_basic() {
|
||||
solana_logger::setup();
|
||||
let (server, leader_data, alice, ledger_path) = new_fullnode();
|
||||
let bob_pubkey = Keypair::new().pubkey();
|
||||
discover(&leader_data.gossip, 1).unwrap();
|
||||
|
||||
let mut client = mk_client(&leader_data);
|
||||
|
||||
let transaction_count = client.transaction_count();
|
||||
assert_eq!(transaction_count, 0);
|
||||
|
||||
let blockhash = client.get_recent_blockhash();
|
||||
info!("test_thin_client blockhash: {:?}", blockhash);
|
||||
|
||||
let signature = client
|
||||
.transfer(500, &alice, &bob_pubkey, &blockhash)
|
||||
.unwrap();
|
||||
info!("test_thin_client signature: {:?}", signature);
|
||||
client.poll_for_signature(&signature).unwrap();
|
||||
|
||||
let balance = client.get_balance(&bob_pubkey);
|
||||
assert_eq!(balance.unwrap(), 500);
|
||||
|
||||
let transaction_count = client.transaction_count();
|
||||
assert_eq!(transaction_count, 1);
|
||||
server.close().unwrap();
|
||||
remove_dir_all(ledger_path).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn test_bad_sig() {
|
||||
solana_logger::setup();
|
||||
let (server, leader_data, alice, ledger_path) = new_fullnode();
|
||||
let bob_pubkey = Keypair::new().pubkey();
|
||||
discover(&leader_data.gossip, 1).unwrap();
|
||||
|
||||
let mut client = mk_client(&leader_data);
|
||||
|
||||
let blockhash = client.get_recent_blockhash();
|
||||
|
||||
let tx = SystemTransaction::new_account(&alice, &bob_pubkey, 500, blockhash, 0);
|
||||
|
||||
let _sig = client.transfer_signed(&tx).unwrap();
|
||||
|
||||
let blockhash = client.get_recent_blockhash();
|
||||
|
||||
let mut tr2 = SystemTransaction::new_account(&alice, &bob_pubkey, 501, blockhash, 0);
|
||||
let mut instruction2 = deserialize(tr2.userdata(0)).unwrap();
|
||||
if let SystemInstruction::Move { ref mut lamports } = instruction2 {
|
||||
*lamports = 502;
|
||||
}
|
||||
tr2.instructions[0].userdata = serialize(&instruction2).unwrap();
|
||||
let signature = client.transfer_signed(&tr2).unwrap();
|
||||
client.poll_for_signature(&signature).unwrap();
|
||||
|
||||
let balance = client.get_balance(&bob_pubkey);
|
||||
assert_eq!(balance.unwrap(), 1001);
|
||||
server.close().unwrap();
|
||||
remove_dir_all(ledger_path).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_register_vote_account() {
|
||||
solana_logger::setup();
|
||||
let (server, leader_data, alice, ledger_path) = new_fullnode();
|
||||
discover(&leader_data.gossip, 1).unwrap();
|
||||
|
||||
let mut client = mk_client(&leader_data);
|
||||
|
||||
// Create the validator account, transfer some lamports to that account
|
||||
let validator_keypair = Keypair::new();
|
||||
let blockhash = client.get_recent_blockhash();
|
||||
let signature = client
|
||||
.transfer(500, &alice, &validator_keypair.pubkey(), &blockhash)
|
||||
.unwrap();
|
||||
|
||||
client.poll_for_signature(&signature).unwrap();
|
||||
|
||||
// Create and register the vote account
|
||||
let validator_vote_account_keypair = Keypair::new();
|
||||
let vote_account_id = validator_vote_account_keypair.pubkey();
|
||||
let blockhash = client.get_recent_blockhash();
|
||||
|
||||
let transaction =
|
||||
VoteTransaction::new_account(&validator_keypair, &vote_account_id, blockhash, 1, 1);
|
||||
let signature = client.transfer_signed(&transaction).unwrap();
|
||||
client.poll_for_signature(&signature).unwrap();
|
||||
|
||||
let balance = retry_get_balance(&mut client, &vote_account_id, Some(1))
|
||||
.expect("Expected balance for new account to exist");
|
||||
assert_eq!(balance, 1);
|
||||
|
||||
const LAST: usize = 30;
|
||||
for run in 0..=LAST {
|
||||
let account_user_data = client
|
||||
.get_account_userdata(&vote_account_id)
|
||||
.expect("Expected valid response for account userdata")
|
||||
.expect("Expected valid account userdata to exist after account creation");
|
||||
|
||||
let vote_state = VoteState::deserialize(&account_user_data);
|
||||
|
||||
if vote_state.map(|vote_state| vote_state.delegate_id) == Ok(vote_account_id) {
|
||||
break;
|
||||
}
|
||||
|
||||
if run == LAST {
|
||||
panic!("Expected successful vote account registration");
|
||||
}
|
||||
sleep(Duration::from_millis(900));
|
||||
}
|
||||
|
||||
server.close().unwrap();
|
||||
remove_dir_all(ledger_path).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_transaction_count() {
|
||||
// set a bogus address, see that we don't hang
|
||||
solana_logger::setup();
|
||||
let addr = "0.0.0.0:1234".parse().unwrap();
|
||||
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let mut client =
|
||||
ThinClient::new_with_timeout(addr, addr, transactions_socket, Duration::from_secs(2));
|
||||
assert_eq!(client.transaction_count(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_zero_balance_after_nonzero() {
|
||||
solana_logger::setup();
|
||||
let (server, leader_data, alice, ledger_path) = new_fullnode();
|
||||
let bob_keypair = Keypair::new();
|
||||
discover(&leader_data.gossip, 1).unwrap();
|
||||
|
||||
let mut client = mk_client(&leader_data);
|
||||
let blockhash = client.get_recent_blockhash();
|
||||
info!("test_thin_client blockhash: {:?}", blockhash);
|
||||
|
||||
let starting_alice_balance = client.poll_get_balance(&alice.pubkey()).unwrap();
|
||||
info!("Alice has {} lamports", starting_alice_balance);
|
||||
|
||||
info!("Give Bob 500 lamports");
|
||||
let signature = client
|
||||
.transfer(500, &alice, &bob_keypair.pubkey(), &blockhash)
|
||||
.unwrap();
|
||||
client.poll_for_signature(&signature).unwrap();
|
||||
|
||||
let bob_balance = client.poll_get_balance(&bob_keypair.pubkey());
|
||||
assert_eq!(bob_balance.unwrap(), 500);
|
||||
|
||||
info!("Take Bob's 500 lamports away");
|
||||
let signature = client
|
||||
.transfer(500, &bob_keypair, &alice.pubkey(), &blockhash)
|
||||
.unwrap();
|
||||
client.poll_for_signature(&signature).unwrap();
|
||||
let alice_balance = client.poll_get_balance(&alice.pubkey()).unwrap();
|
||||
assert_eq!(alice_balance, starting_alice_balance);
|
||||
|
||||
info!("Should get an error when Bob's balance hits zero and is purged");
|
||||
let bob_balance = client.poll_get_balance(&bob_keypair.pubkey());
|
||||
info!("Bob's balance is {:?}", bob_balance);
|
||||
assert!(bob_balance.is_err(),);
|
||||
|
||||
server.close().unwrap();
|
||||
remove_dir_all(ledger_path).unwrap();
|
||||
}
|
||||
}
|
@ -1,10 +1,10 @@
|
||||
[package]
|
||||
name = "solana"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "0.12.0"
|
||||
version = "0.12.2"
|
||||
documentation = "https://docs.rs/solana"
|
||||
homepage = "https://solana.com/"
|
||||
readme = "README.md"
|
||||
readme = "../README.md"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
license = "Apache-2.0"
|
||||
@ -39,22 +39,22 @@ nix = "0.13.0"
|
||||
rand = "0.6.5"
|
||||
rand_chacha = "0.1.1"
|
||||
rayon = "1.0.0"
|
||||
reqwest = "0.9.11"
|
||||
ring = "0.13.2"
|
||||
rocksdb = "0.11.0"
|
||||
serde = "1.0.89"
|
||||
serde_derive = "1.0.88"
|
||||
serde_json = "1.0.39"
|
||||
solana-budget-api = { path = "../programs/budget_api", version = "0.12.0" }
|
||||
solana-drone = { path = "../drone", version = "0.12.0" }
|
||||
solana-logger = { path = "../logger", version = "0.12.0" }
|
||||
solana-metrics = { path = "../metrics", version = "0.12.0" }
|
||||
solana-netutil = { path = "../netutil", version = "0.12.0" }
|
||||
solana-runtime = { path = "../runtime", version = "0.12.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.12.0" }
|
||||
solana-storage-api = { path = "../programs/storage_api", version = "0.12.0" }
|
||||
solana-vote-api = { path = "../programs/vote_api", version = "0.12.0" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "0.12.0" }
|
||||
solana-budget-api = { path = "../programs/budget_api", version = "0.12.2" }
|
||||
solana-client = { path = "../client", version = "0.12.2" }
|
||||
solana-drone = { path = "../drone", version = "0.12.2" }
|
||||
solana-logger = { path = "../logger", version = "0.12.2" }
|
||||
solana-metrics = { path = "../metrics", version = "0.12.2" }
|
||||
solana-netutil = { path = "../netutil", version = "0.12.2" }
|
||||
solana-runtime = { path = "../runtime", version = "0.12.2" }
|
||||
solana-sdk = { path = "../sdk", version = "0.12.2" }
|
||||
solana-storage-api = { path = "../programs/storage_api", version = "0.12.2" }
|
||||
solana-vote-api = { path = "../programs/vote_api", version = "0.12.2" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "0.12.2" }
|
||||
sys-info = "0.5.6"
|
||||
tokio = "0.1"
|
||||
tokio-codec = "0.1"
|
||||
@ -63,6 +63,5 @@ untrusted = "0.6.2"
|
||||
[dev-dependencies]
|
||||
hex-literal = "0.1.3"
|
||||
matches = "0.1.6"
|
||||
solana-vote-program = { path = "../programs/vote", version = "0.12.0" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "0.12.0" }
|
||||
|
||||
solana-vote-program = { path = "../programs/vote", version = "0.12.2" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "0.12.2" }
|
||||
|
@ -1,7 +1,7 @@
|
||||
//! The `bank_forks` module implments BankForks a DAG of checkpointed Banks
|
||||
|
||||
use hashbrown::{HashMap, HashSet};
|
||||
use solana_runtime::bank::Bank;
|
||||
use std::collections::HashMap;
|
||||
use std::ops::Index;
|
||||
use std::sync::Arc;
|
||||
|
||||
@ -27,17 +27,40 @@ impl BankForks {
|
||||
working_bank,
|
||||
}
|
||||
}
|
||||
pub fn frozen_banks(&self) -> HashMap<u64, Arc<Bank>> {
|
||||
let mut frozen_banks: Vec<Arc<Bank>> = vec![];
|
||||
frozen_banks.extend(self.banks.values().filter(|v| v.is_frozen()).cloned());
|
||||
frozen_banks.extend(
|
||||
self.banks
|
||||
.iter()
|
||||
.flat_map(|(_, v)| v.parents())
|
||||
.filter(|v| v.is_frozen()),
|
||||
);
|
||||
frozen_banks.into_iter().map(|b| (b.slot(), b)).collect()
|
||||
|
||||
/// Create a map of bank slot id to the set of ancestors for the bank slot.
|
||||
pub fn ancestors(&self) -> HashMap<u64, HashSet<u64>> {
|
||||
let mut ancestors = HashMap::new();
|
||||
for bank in self.banks.values() {
|
||||
let set = bank.parents().into_iter().map(|b| b.slot()).collect();
|
||||
ancestors.insert(bank.slot(), set);
|
||||
}
|
||||
ancestors
|
||||
}
|
||||
|
||||
/// Create a map of bank slot id to the set of all of its descendants
|
||||
pub fn descendants(&self) -> HashMap<u64, HashSet<u64>> {
|
||||
let mut descendants = HashMap::new();
|
||||
for bank in self.banks.values() {
|
||||
let _ = descendants.entry(bank.slot()).or_insert(HashSet::new());
|
||||
for parent in bank.parents() {
|
||||
descendants
|
||||
.entry(parent.slot())
|
||||
.or_insert(HashSet::new())
|
||||
.insert(bank.slot());
|
||||
}
|
||||
}
|
||||
descendants
|
||||
}
|
||||
|
||||
pub fn frozen_banks(&self) -> HashMap<u64, Arc<Bank>> {
|
||||
self.banks
|
||||
.iter()
|
||||
.filter(|(_, b)| b.is_frozen())
|
||||
.map(|(k, b)| (*k, b.clone()))
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn active_banks(&self) -> Vec<u64> {
|
||||
self.banks
|
||||
.iter()
|
||||
@ -45,6 +68,7 @@ impl BankForks {
|
||||
.map(|(k, _v)| *k)
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn get(&self, bank_slot: u64) -> Option<&Arc<Bank>> {
|
||||
self.banks.get(&bank_slot)
|
||||
}
|
||||
@ -61,30 +85,32 @@ impl BankForks {
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: use the bank's own ID instead of receiving a parameter?
|
||||
pub fn insert(&mut self, bank_slot: u64, bank: Bank) {
|
||||
let mut bank = Arc::new(bank);
|
||||
assert_eq!(bank_slot, bank.slot());
|
||||
let prev = self.banks.insert(bank_slot, bank.clone());
|
||||
pub fn insert(&mut self, bank: Bank) {
|
||||
let bank = Arc::new(bank);
|
||||
let prev = self.banks.insert(bank.slot(), bank.clone());
|
||||
assert!(prev.is_none());
|
||||
|
||||
self.working_bank = bank.clone();
|
||||
|
||||
// TODO: this really only needs to look at the first
|
||||
// parent if we're always calling insert()
|
||||
// when we construct a child bank
|
||||
while let Some(parent) = bank.parent() {
|
||||
if let Some(prev) = self.banks.remove(&parent.slot()) {
|
||||
assert!(Arc::ptr_eq(&prev, &parent));
|
||||
}
|
||||
bank = parent;
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: really want to kill this...
|
||||
pub fn working_bank(&self) -> Arc<Bank> {
|
||||
self.working_bank.clone()
|
||||
}
|
||||
|
||||
pub fn set_root(&mut self, root: u64) {
|
||||
let root_bank = self
|
||||
.banks
|
||||
.get(&root)
|
||||
.expect("root bank didn't exist in bank_forks");
|
||||
root_bank.squash();
|
||||
self.prune_non_root(root);
|
||||
}
|
||||
|
||||
fn prune_non_root(&mut self, root: u64) {
|
||||
self.banks
|
||||
.retain(|slot, bank| *slot >= root || bank.is_in_subtree_of(root))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@ -101,18 +127,53 @@ mod tests {
|
||||
let mut bank_forks = BankForks::new(0, bank);
|
||||
let child_bank = Bank::new_from_parent(&bank_forks[0u64], &Pubkey::default(), 1);
|
||||
child_bank.register_tick(&Hash::default());
|
||||
bank_forks.insert(1, child_bank);
|
||||
bank_forks.insert(child_bank);
|
||||
assert_eq!(bank_forks[1u64].tick_height(), 1);
|
||||
assert_eq!(bank_forks.working_bank().tick_height(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bank_forks_descendants() {
|
||||
let (genesis_block, _) = GenesisBlock::new(10_000);
|
||||
let bank = Bank::new(&genesis_block);
|
||||
let mut bank_forks = BankForks::new(0, bank);
|
||||
let bank0 = bank_forks[0].clone();
|
||||
let bank = Bank::new_from_parent(&bank0, &Pubkey::default(), 1);
|
||||
bank_forks.insert(bank);
|
||||
let bank = Bank::new_from_parent(&bank0, &Pubkey::default(), 2);
|
||||
bank_forks.insert(bank);
|
||||
let descendants = bank_forks.descendants();
|
||||
let children: Vec<u64> = descendants[&0].iter().cloned().collect();
|
||||
assert_eq!(children, vec![1, 2]);
|
||||
assert!(descendants[&1].is_empty());
|
||||
assert!(descendants[&2].is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bank_forks_ancestors() {
|
||||
let (genesis_block, _) = GenesisBlock::new(10_000);
|
||||
let bank = Bank::new(&genesis_block);
|
||||
let mut bank_forks = BankForks::new(0, bank);
|
||||
let bank0 = bank_forks[0].clone();
|
||||
let bank = Bank::new_from_parent(&bank0, &Pubkey::default(), 1);
|
||||
bank_forks.insert(bank);
|
||||
let bank = Bank::new_from_parent(&bank0, &Pubkey::default(), 2);
|
||||
bank_forks.insert(bank);
|
||||
let ancestors = bank_forks.ancestors();
|
||||
assert!(ancestors[&0].is_empty());
|
||||
let parents: Vec<u64> = ancestors[&1].iter().cloned().collect();
|
||||
assert_eq!(parents, vec![0]);
|
||||
let parents: Vec<u64> = ancestors[&2].iter().cloned().collect();
|
||||
assert_eq!(parents, vec![0]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bank_forks_frozen_banks() {
|
||||
let (genesis_block, _) = GenesisBlock::new(10_000);
|
||||
let bank = Bank::new(&genesis_block);
|
||||
let mut bank_forks = BankForks::new(0, bank);
|
||||
let child_bank = Bank::new_from_parent(&bank_forks[0u64], &Pubkey::default(), 1);
|
||||
bank_forks.insert(1, child_bank);
|
||||
bank_forks.insert(child_bank);
|
||||
assert!(bank_forks.frozen_banks().get(&0).is_some());
|
||||
assert!(bank_forks.frozen_banks().get(&1).is_none());
|
||||
}
|
||||
@ -123,7 +184,7 @@ mod tests {
|
||||
let bank = Bank::new(&genesis_block);
|
||||
let mut bank_forks = BankForks::new(0, bank);
|
||||
let child_bank = Bank::new_from_parent(&bank_forks[0u64], &Pubkey::default(), 1);
|
||||
bank_forks.insert(1, child_bank);
|
||||
bank_forks.insert(child_bank);
|
||||
assert_eq!(bank_forks.active_banks(), vec![1]);
|
||||
}
|
||||
|
||||
|
@ -17,6 +17,7 @@ use crate::sigverify_stage::VerifiedPackets;
|
||||
use bincode::deserialize;
|
||||
use solana_metrics::counter::Counter;
|
||||
use solana_runtime::bank::{self, Bank, BankError};
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::timing::{self, duration_as_us, MAX_RECENT_BLOCKHASHES};
|
||||
use solana_sdk::transaction::Transaction;
|
||||
use std::net::UdpSocket;
|
||||
@ -26,13 +27,9 @@ use std::sync::{Arc, Mutex, RwLock};
|
||||
use std::thread::{self, Builder, JoinHandle};
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
use sys_info;
|
||||
|
||||
pub type UnprocessedPackets = Vec<(SharedPackets, usize)>; // `usize` is the index of the first unprocessed packet in `SharedPackets`
|
||||
|
||||
// number of threads is 1 until mt bank is ready
|
||||
pub const NUM_THREADS: u32 = 10;
|
||||
|
||||
/// Stores the stage's thread handle and output receiver.
|
||||
pub struct BankingStage {
|
||||
bank_thread_hdls: Vec<JoinHandle<()>>,
|
||||
@ -56,7 +53,7 @@ impl BankingStage {
|
||||
// Single thread to compute confirmation
|
||||
let lcs_handle = LeaderConfirmationService::start(&poh_recorder, exit.clone());
|
||||
// Many banks that process transactions in parallel.
|
||||
let mut bank_thread_hdls: Vec<JoinHandle<()>> = (0..Self::num_threads())
|
||||
let mut bank_thread_hdls: Vec<JoinHandle<()>> = (0..4)
|
||||
.map(|_| {
|
||||
let verified_receiver = verified_receiver.clone();
|
||||
let poh_recorder = poh_recorder.clone();
|
||||
@ -135,7 +132,9 @@ impl BankingStage {
|
||||
// Buffer the packets if I am the next leader
|
||||
// or, if it was getting sent to me
|
||||
let leader_id = match poh_recorder.lock().unwrap().bank() {
|
||||
Some(bank) => leader_schedule_utils::slot_leader_at(bank.slot() + 1, &bank).unwrap(),
|
||||
Some(bank) => {
|
||||
leader_schedule_utils::slot_leader_at(bank.slot() + 1, &bank).unwrap_or_default()
|
||||
}
|
||||
None => rcluster_info
|
||||
.leader_data()
|
||||
.map(|x| x.id)
|
||||
@ -186,10 +185,6 @@ impl BankingStage {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn num_threads() -> u32 {
|
||||
sys_info::cpu_num().unwrap_or(NUM_THREADS)
|
||||
}
|
||||
|
||||
/// Convert the transactions from a blob of binary data to a vector of transactions
|
||||
fn deserialize_transactions(p: &Packets) -> Vec<Option<Transaction>> {
|
||||
p.packets
|
||||
@ -228,24 +223,22 @@ impl BankingStage {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn process_and_record_transactions(
|
||||
fn process_and_record_transactions_locked(
|
||||
bank: &Bank,
|
||||
txs: &[Transaction],
|
||||
poh: &Arc<Mutex<PohRecorder>>,
|
||||
lock_results: &[bank::Result<()>],
|
||||
) -> Result<()> {
|
||||
let now = Instant::now();
|
||||
// Once accounts are locked, other threads cannot encode transactions that will modify the
|
||||
// same account state
|
||||
let lock_results = bank.lock_accounts(txs);
|
||||
let lock_time = now.elapsed();
|
||||
|
||||
let now = Instant::now();
|
||||
// Use a shorter maximum age when adding transactions into the pipeline. This will reduce
|
||||
// the likelihood of any single thread getting starved and processing old ids.
|
||||
// TODO: Banking stage threads should be prioritized to complete faster then this queue
|
||||
// expires.
|
||||
let (loaded_accounts, results) =
|
||||
bank.load_and_execute_transactions(txs, lock_results, MAX_RECENT_BLOCKHASHES / 2);
|
||||
let (loaded_accounts, results) = bank.load_and_execute_transactions(
|
||||
txs,
|
||||
lock_results.to_vec(),
|
||||
MAX_RECENT_BLOCKHASHES / 2,
|
||||
);
|
||||
let load_execute_time = now.elapsed();
|
||||
|
||||
let record_time = {
|
||||
@ -260,21 +253,45 @@ impl BankingStage {
|
||||
now.elapsed()
|
||||
};
|
||||
|
||||
let now = Instant::now();
|
||||
// Once the accounts are new transactions can enter the pipeline to process them
|
||||
bank.unlock_accounts(&txs, &results);
|
||||
let unlock_time = now.elapsed();
|
||||
debug!(
|
||||
"bank: {} lock: {}us load_execute: {}us record: {}us commit: {}us unlock: {}us txs_len: {}",
|
||||
"bank: {} load_execute: {}us record: {}us commit: {}us txs_len: {}",
|
||||
bank.slot(),
|
||||
duration_as_us(&lock_time),
|
||||
duration_as_us(&load_execute_time),
|
||||
duration_as_us(&record_time),
|
||||
duration_as_us(&commit_time),
|
||||
txs.len(),
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn process_and_record_transactions(
|
||||
bank: &Bank,
|
||||
txs: &[Transaction],
|
||||
poh: &Arc<Mutex<PohRecorder>>,
|
||||
) -> Result<()> {
|
||||
let now = Instant::now();
|
||||
// Once accounts are locked, other threads cannot encode transactions that will modify the
|
||||
// same account state
|
||||
let lock_results = bank.lock_accounts(txs);
|
||||
let lock_time = now.elapsed();
|
||||
|
||||
let results = Self::process_and_record_transactions_locked(bank, txs, poh, &lock_results);
|
||||
|
||||
let now = Instant::now();
|
||||
// Once the accounts are new transactions can enter the pipeline to process them
|
||||
bank.unlock_accounts(&txs, &lock_results);
|
||||
let unlock_time = now.elapsed();
|
||||
|
||||
debug!(
|
||||
"bank: {} lock: {}us unlock: {}us txs_len: {}",
|
||||
bank.slot(),
|
||||
duration_as_us(&lock_time),
|
||||
duration_as_us(&unlock_time),
|
||||
txs.len(),
|
||||
);
|
||||
Ok(())
|
||||
|
||||
results
|
||||
}
|
||||
|
||||
/// Sends transactions to the bank.
|
||||
@ -431,8 +448,14 @@ pub fn create_test_recorder(
|
||||
Receiver<WorkingBankEntries>,
|
||||
) {
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let (poh_recorder, entry_receiver) =
|
||||
PohRecorder::new(bank.tick_height(), bank.last_blockhash());
|
||||
let (poh_recorder, entry_receiver) = PohRecorder::new(
|
||||
bank.tick_height(),
|
||||
bank.last_blockhash(),
|
||||
bank.slot(),
|
||||
Some(4),
|
||||
bank.ticks_per_slot(),
|
||||
&Pubkey::default(),
|
||||
);
|
||||
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
|
||||
let poh_service = PohService::new(poh_recorder.clone(), &PohServiceConfig::default(), &exit);
|
||||
(exit, poh_recorder, poh_service, entry_receiver)
|
||||
@ -640,8 +663,14 @@ mod tests {
|
||||
max_tick_height: std::u64::MAX,
|
||||
};
|
||||
|
||||
let (poh_recorder, entry_receiver) =
|
||||
PohRecorder::new(bank.tick_height(), bank.last_blockhash());
|
||||
let (poh_recorder, entry_receiver) = PohRecorder::new(
|
||||
bank.tick_height(),
|
||||
bank.last_blockhash(),
|
||||
bank.slot(),
|
||||
None,
|
||||
bank.ticks_per_slot(),
|
||||
&Pubkey::default(),
|
||||
);
|
||||
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
|
||||
|
||||
poh_recorder.lock().unwrap().set_working_bank(working_bank);
|
||||
@ -693,8 +722,14 @@ mod tests {
|
||||
min_tick_height: bank.tick_height(),
|
||||
max_tick_height: bank.tick_height() + 1,
|
||||
};
|
||||
let (poh_recorder, entry_receiver) =
|
||||
PohRecorder::new(bank.tick_height(), bank.last_blockhash());
|
||||
let (poh_recorder, entry_receiver) = PohRecorder::new(
|
||||
bank.tick_height(),
|
||||
bank.last_blockhash(),
|
||||
bank.slot(),
|
||||
Some(4),
|
||||
bank.ticks_per_slot(),
|
||||
&pubkey,
|
||||
);
|
||||
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
|
||||
|
||||
poh_recorder.lock().unwrap().set_working_bank(working_bank);
|
||||
|
@ -138,14 +138,13 @@ pub fn process_blocktree(
|
||||
warn!("entry0 not present");
|
||||
return Err(BankError::LedgerVerificationFailed);
|
||||
}
|
||||
let entry0 = &entries[0];
|
||||
let entry0 = entries.remove(0);
|
||||
if !(entry0.is_tick() && entry0.verify(&last_entry_hash)) {
|
||||
warn!("Ledger proof of history failed at entry0");
|
||||
return Err(BankError::LedgerVerificationFailed);
|
||||
}
|
||||
last_entry_hash = entry0.hash;
|
||||
entry_height += 1;
|
||||
entries = entries.drain(1..).collect();
|
||||
}
|
||||
|
||||
if !entries.is_empty() {
|
||||
|
@ -1,14 +0,0 @@
|
||||
use crate::cluster_info::FULLNODE_PORT_RANGE;
|
||||
use crate::contact_info::ContactInfo;
|
||||
use crate::thin_client::ThinClient;
|
||||
use std::time::Duration;
|
||||
|
||||
pub fn mk_client(r: &ContactInfo) -> ThinClient {
|
||||
let (_, transactions_socket) = solana_netutil::bind_in_range(FULLNODE_PORT_RANGE).unwrap();
|
||||
ThinClient::new(r.rpc, r.tpu, transactions_socket)
|
||||
}
|
||||
|
||||
pub fn mk_client_with_timeout(r: &ContactInfo, timeout: Duration) -> ThinClient {
|
||||
let (_, transactions_socket) = solana_netutil::bind_in_range(FULLNODE_PORT_RANGE).unwrap();
|
||||
ThinClient::new_with_timeout(r.rpc, r.tpu, transactions_socket, timeout)
|
||||
}
|
6
core/src/cluster.rs
Normal file
6
core/src/cluster.rs
Normal file
@ -0,0 +1,6 @@
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
|
||||
pub trait Cluster {
|
||||
fn get_node_ids(&self) -> Vec<Pubkey>;
|
||||
fn restart_node(&mut self, pubkey: Pubkey);
|
||||
}
|
@ -3,14 +3,18 @@ use crate::blocktree::Blocktree;
|
||||
///
|
||||
/// All tests must start from an entry point and a funding keypair and
|
||||
/// discover the rest of the network.
|
||||
use crate::client::mk_client;
|
||||
use crate::cluster_info::FULLNODE_PORT_RANGE;
|
||||
use crate::contact_info::ContactInfo;
|
||||
use crate::entry::{Entry, EntrySlice};
|
||||
use crate::gossip_service::discover;
|
||||
use crate::poh_service::PohServiceConfig;
|
||||
use solana_client::client::create_client;
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||
use solana_sdk::system_transaction::SystemTransaction;
|
||||
use solana_sdk::timing::{DEFAULT_SLOTS_PER_EPOCH, DEFAULT_TICKS_PER_SLOT, NUM_TICKS_PER_SECOND};
|
||||
use solana_sdk::timing::{
|
||||
duration_as_ms, DEFAULT_SLOTS_PER_EPOCH, DEFAULT_TICKS_PER_SLOT, NUM_TICKS_PER_SECOND,
|
||||
};
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
|
||||
@ -26,7 +30,7 @@ pub fn spend_and_verify_all_nodes(
|
||||
assert!(cluster_nodes.len() >= nodes);
|
||||
for ingress_node in &cluster_nodes {
|
||||
let random_keypair = Keypair::new();
|
||||
let mut client = mk_client(&ingress_node);
|
||||
let mut client = create_client(ingress_node.client_facing_addr(), FULLNODE_PORT_RANGE);
|
||||
let bal = client
|
||||
.poll_get_balance(&funding_keypair.pubkey())
|
||||
.expect("balance in source");
|
||||
@ -42,14 +46,14 @@ pub fn spend_and_verify_all_nodes(
|
||||
.retry_transfer(&funding_keypair, &mut transaction, 5)
|
||||
.unwrap();
|
||||
for validator in &cluster_nodes {
|
||||
let mut client = mk_client(&validator);
|
||||
let mut client = create_client(validator.client_facing_addr(), FULLNODE_PORT_RANGE);
|
||||
client.poll_for_signature(&sig).unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn send_many_transactions(node: &ContactInfo, funding_keypair: &Keypair, num_txs: u64) {
|
||||
let mut client = mk_client(node);
|
||||
let mut client = create_client(node.client_facing_addr(), FULLNODE_PORT_RANGE);
|
||||
for _ in 0..num_txs {
|
||||
let random_keypair = Keypair::new();
|
||||
let bal = client
|
||||
@ -73,12 +77,12 @@ pub fn fullnode_exit(entry_point_info: &ContactInfo, nodes: usize) {
|
||||
let cluster_nodes = discover(&entry_point_info.gossip, nodes).unwrap();
|
||||
assert!(cluster_nodes.len() >= nodes);
|
||||
for node in &cluster_nodes {
|
||||
let mut client = mk_client(&node);
|
||||
let mut client = create_client(node.client_facing_addr(), FULLNODE_PORT_RANGE);
|
||||
assert!(client.fullnode_exit().unwrap());
|
||||
}
|
||||
sleep(Duration::from_millis(SLOT_MILLIS));
|
||||
for node in &cluster_nodes {
|
||||
let mut client = mk_client(&node);
|
||||
let mut client = create_client(node.client_facing_addr(), FULLNODE_PORT_RANGE);
|
||||
assert!(client.fullnode_exit().is_err());
|
||||
}
|
||||
}
|
||||
@ -116,6 +120,25 @@ pub fn verify_ledger_ticks(ledger_path: &str, ticks_per_slot: usize) {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn sleep_n_epochs(
|
||||
num_epochs: f64,
|
||||
config: &PohServiceConfig,
|
||||
ticks_per_slot: u64,
|
||||
slots_per_epoch: u64,
|
||||
) {
|
||||
let num_ticks_per_second = {
|
||||
match config {
|
||||
PohServiceConfig::Sleep(d) => (1000 / duration_as_ms(d)) as f64,
|
||||
_ => panic!("Unsuppported tick config for testing"),
|
||||
}
|
||||
};
|
||||
|
||||
let num_ticks_to_sleep = num_epochs * ticks_per_slot as f64 * slots_per_epoch as f64;
|
||||
sleep(Duration::from_secs(
|
||||
((num_ticks_to_sleep + num_ticks_per_second - 1.0) / num_ticks_per_second) as u64,
|
||||
));
|
||||
}
|
||||
|
||||
pub fn kill_entry_and_spend_and_verify_rest(
|
||||
entry_point_info: &ContactInfo,
|
||||
funding_keypair: &Keypair,
|
||||
@ -124,7 +147,7 @@ pub fn kill_entry_and_spend_and_verify_rest(
|
||||
solana_logger::setup();
|
||||
let cluster_nodes = discover(&entry_point_info.gossip, nodes).unwrap();
|
||||
assert!(cluster_nodes.len() >= nodes);
|
||||
let mut client = mk_client(&entry_point_info);
|
||||
let mut client = create_client(entry_point_info.client_facing_addr(), FULLNODE_PORT_RANGE);
|
||||
info!("sleeping for an epoch");
|
||||
sleep(Duration::from_millis(SLOT_MILLIS * DEFAULT_SLOTS_PER_EPOCH));
|
||||
info!("done sleeping for an epoch");
|
||||
@ -138,7 +161,7 @@ pub fn kill_entry_and_spend_and_verify_rest(
|
||||
continue;
|
||||
}
|
||||
let random_keypair = Keypair::new();
|
||||
let mut client = mk_client(&ingress_node);
|
||||
let mut client = create_client(ingress_node.client_facing_addr(), FULLNODE_PORT_RANGE);
|
||||
let bal = client
|
||||
.poll_get_balance(&funding_keypair.pubkey())
|
||||
.expect("balance in source");
|
||||
@ -157,7 +180,7 @@ pub fn kill_entry_and_spend_and_verify_rest(
|
||||
if validator.id == entry_point_info.id {
|
||||
continue;
|
||||
}
|
||||
let mut client = mk_client(&validator);
|
||||
let mut client = create_client(validator.client_facing_addr(), FULLNODE_PORT_RANGE);
|
||||
client.poll_for_signature(&sig).unwrap();
|
||||
}
|
||||
}
|
||||
|
@ -196,6 +196,10 @@ impl ContactInfo {
|
||||
pub fn is_valid_address(addr: &SocketAddr) -> bool {
|
||||
(addr.port() != 0) && Self::is_valid_ip(addr.ip())
|
||||
}
|
||||
|
||||
pub fn client_facing_addr(&self) -> (SocketAddr, SocketAddr) {
|
||||
(self.rpc, self.tpu)
|
||||
}
|
||||
}
|
||||
|
||||
impl Signable for ContactInfo {
|
||||
|
@ -68,7 +68,7 @@ impl CrdsValueLabel {
|
||||
}
|
||||
|
||||
impl Vote {
|
||||
// TODO: it might make sense for the transaction to encode the wallclock in the userdata
|
||||
// TODO: it might make sense for the transaction to encode the wallclock in the data
|
||||
pub fn new(transaction: Transaction, wallclock: u64) -> Self {
|
||||
Vote {
|
||||
transaction,
|
||||
|
@ -9,6 +9,7 @@ use crate::entry::create_ticks;
|
||||
use crate::entry::next_entry_mut;
|
||||
use crate::entry::Entry;
|
||||
use crate::gossip_service::GossipService;
|
||||
use crate::leader_schedule_utils;
|
||||
use crate::poh_recorder::PohRecorder;
|
||||
use crate::poh_service::{PohService, PohServiceConfig};
|
||||
use crate::rpc::JsonRpcConfig;
|
||||
@ -36,6 +37,7 @@ use std::thread::JoinHandle;
|
||||
use std::thread::{spawn, Result};
|
||||
use std::time::Duration;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct FullnodeConfig {
|
||||
pub sigverify_disabled: bool,
|
||||
pub voting_disabled: bool,
|
||||
@ -106,8 +108,14 @@ impl Fullnode {
|
||||
bank.tick_height(),
|
||||
bank.last_blockhash(),
|
||||
);
|
||||
let (poh_recorder, entry_receiver) =
|
||||
PohRecorder::new(bank.tick_height(), bank.last_blockhash());
|
||||
let (poh_recorder, entry_receiver) = PohRecorder::new(
|
||||
bank.tick_height(),
|
||||
bank.last_blockhash(),
|
||||
bank.slot(),
|
||||
leader_schedule_utils::next_leader_slot(&id, bank.slot(), &bank),
|
||||
bank.ticks_per_slot(),
|
||||
&id,
|
||||
);
|
||||
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
|
||||
let poh_service = PohService::new(poh_recorder.clone(), &config.tick_config, &exit);
|
||||
poh_recorder.lock().unwrap().clear_bank_signal =
|
||||
@ -258,15 +266,6 @@ impl Fullnode {
|
||||
// Used for notifying many nodes in parallel to exit
|
||||
pub fn exit(&self) {
|
||||
self.exit.store(true, Ordering::Relaxed);
|
||||
|
||||
// Need to force the poh_recorder to drop the WorkingBank,
|
||||
// which contains the channel to BroadcastStage. This should be
|
||||
// sufficient as long as no other rotations are happening that
|
||||
// can cause the Tpu to restart a BankingStage and reset a
|
||||
// WorkingBank in poh_recorder. It follows no other rotations can be
|
||||
// in motion because exit()/close() are only called by the run() loop
|
||||
// which is the sole initiator of rotations.
|
||||
self.poh_recorder.lock().unwrap().clear_bank();
|
||||
}
|
||||
|
||||
pub fn close(self) -> Result<()> {
|
||||
@ -372,6 +371,31 @@ pub fn make_active_set_entries(
|
||||
(entries, voting_keypair)
|
||||
}
|
||||
|
||||
pub fn new_fullnode_for_tests() -> (Fullnode, ContactInfo, Keypair, String) {
|
||||
use crate::blocktree::create_new_tmp_ledger;
|
||||
use crate::cluster_info::Node;
|
||||
|
||||
let node_keypair = Arc::new(Keypair::new());
|
||||
let node = Node::new_localhost_with_pubkey(&node_keypair.pubkey());
|
||||
let contact_info = node.info.clone();
|
||||
|
||||
let (genesis_block, mint_keypair) = GenesisBlock::new_with_leader(10_000, &contact_info.id, 42);
|
||||
let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_block);
|
||||
|
||||
let voting_keypair = Keypair::new();
|
||||
let node = Fullnode::new(
|
||||
node,
|
||||
&node_keypair,
|
||||
&ledger_path,
|
||||
&voting_keypair.pubkey(),
|
||||
voting_keypair,
|
||||
None,
|
||||
&FullnodeConfig::default(),
|
||||
);
|
||||
|
||||
(node, contact_info, mint_keypair, ledger_path)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
@ -67,7 +67,7 @@ pub fn discover(gossip_addr: &SocketAddr, num_nodes: usize) -> std::io::Result<V
|
||||
while now.elapsed() < Duration::from_secs(30) {
|
||||
let rpc_peers = spy_ref.read().unwrap().rpc_peers();
|
||||
if rpc_peers.len() >= num_nodes {
|
||||
trace!(
|
||||
info!(
|
||||
"discover success in {}s...\n{}",
|
||||
now.elapsed().as_secs(),
|
||||
spy_ref.read().unwrap().contact_info_trace()
|
||||
|
@ -33,7 +33,7 @@ impl LeaderConfirmationService {
|
||||
// the vote states
|
||||
bank.vote_accounts().for_each(|(_, account)| {
|
||||
total_stake += account.lamports;
|
||||
let vote_state = VoteState::deserialize(&account.userdata).unwrap();
|
||||
let vote_state = VoteState::deserialize(&account.data).unwrap();
|
||||
if let Some(stake_and_state) = vote_state
|
||||
.votes
|
||||
.back()
|
||||
@ -118,6 +118,7 @@ mod tests {
|
||||
use solana_sdk::hash::hash;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||
use solana_sdk::timing::MAX_RECENT_BLOCKHASHES;
|
||||
use solana_vote_api::vote_transaction::VoteTransaction;
|
||||
use std::sync::Arc;
|
||||
|
||||
@ -130,8 +131,8 @@ mod tests {
|
||||
|
||||
let mut bank = Arc::new(Bank::new(&genesis_block));
|
||||
|
||||
// Move the bank up 10 slots
|
||||
for slot in 1..=10 {
|
||||
// Move the bank up MAX_RECENT_BLOCKHASHES slots
|
||||
for slot in 1..=MAX_RECENT_BLOCKHASHES as u64 {
|
||||
let max_tick_height = slot * bank.ticks_per_slot() - 1;
|
||||
|
||||
while bank.tick_height() != max_tick_height {
|
||||
@ -159,7 +160,11 @@ mod tests {
|
||||
new_vote_account(&validator_keypair, &voting_pubkey, &bank, 1);
|
||||
|
||||
if i < 6 {
|
||||
push_vote(&voting_keypair, &bank, (i + 1) as u64);
|
||||
push_vote(
|
||||
&voting_keypair,
|
||||
&bank,
|
||||
MAX_RECENT_BLOCKHASHES.saturating_sub(i) as u64,
|
||||
);
|
||||
}
|
||||
(voting_keypair, validator_keypair)
|
||||
})
|
||||
@ -172,8 +177,13 @@ mod tests {
|
||||
|
||||
// Get another validator to vote, so we now have 2/3 consensus
|
||||
let voting_keypair = &vote_accounts[7].0;
|
||||
let vote_tx =
|
||||
VoteTransaction::new_vote(&voting_keypair.pubkey(), voting_keypair, 7, blockhash, 0);
|
||||
let vote_tx = VoteTransaction::new_vote(
|
||||
&voting_keypair.pubkey(),
|
||||
voting_keypair,
|
||||
MAX_RECENT_BLOCKHASHES as u64,
|
||||
blockhash,
|
||||
0,
|
||||
);
|
||||
bank.process_transaction(&vote_tx).unwrap();
|
||||
|
||||
LeaderConfirmationService::compute_confirmation(&bank, &mut last_confirmation_time);
|
||||
|
@ -12,18 +12,29 @@ pub struct LeaderSchedule {
|
||||
|
||||
impl LeaderSchedule {
|
||||
// Note: passing in zero stakers will cause a panic.
|
||||
pub fn new(ids_and_stakes: &[(Pubkey, u64)], seed: [u8; 32], len: u64) -> Self {
|
||||
pub fn new(ids_and_stakes: &[(Pubkey, u64)], seed: [u8; 32], len: u64, repeat: u64) -> Self {
|
||||
let (ids, stakes): (Vec<_>, Vec<_>) = ids_and_stakes.iter().cloned().unzip();
|
||||
let rng = &mut ChaChaRng::from_seed(seed);
|
||||
let weighted_index = WeightedIndex::new(stakes).unwrap();
|
||||
let slot_leaders = (0..len).map(|_| ids[weighted_index.sample(rng)]).collect();
|
||||
let mut current_node = Pubkey::default();
|
||||
let slot_leaders = (0..len)
|
||||
.map(|i| {
|
||||
if i % repeat == 0 {
|
||||
current_node = ids[weighted_index.sample(rng)];
|
||||
current_node
|
||||
} else {
|
||||
current_node
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
Self { slot_leaders }
|
||||
}
|
||||
}
|
||||
|
||||
impl Index<usize> for LeaderSchedule {
|
||||
impl Index<u64> for LeaderSchedule {
|
||||
type Output = Pubkey;
|
||||
fn index(&self, index: usize) -> &Pubkey {
|
||||
fn index(&self, index: u64) -> &Pubkey {
|
||||
let index = index as usize;
|
||||
&self.slot_leaders[index % self.slot_leaders.len()]
|
||||
}
|
||||
}
|
||||
@ -56,10 +67,76 @@ mod tests {
|
||||
let mut seed_bytes = [0u8; 32];
|
||||
seed_bytes.copy_from_slice(seed.as_ref());
|
||||
let len = num_keys * 10;
|
||||
let leader_schedule = LeaderSchedule::new(&stakes, seed_bytes, len);
|
||||
let leader_schedule2 = LeaderSchedule::new(&stakes, seed_bytes, len);
|
||||
let leader_schedule = LeaderSchedule::new(&stakes, seed_bytes, len, 1);
|
||||
let leader_schedule2 = LeaderSchedule::new(&stakes, seed_bytes, len, 1);
|
||||
assert_eq!(leader_schedule.slot_leaders.len() as u64, len);
|
||||
// Check that the same schedule is reproducibly generated
|
||||
assert_eq!(leader_schedule, leader_schedule2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_repeated_leader_schedule() {
|
||||
let num_keys = 10;
|
||||
let stakes: Vec<_> = (0..num_keys)
|
||||
.map(|i| (Keypair::new().pubkey(), i))
|
||||
.collect();
|
||||
|
||||
let seed = Keypair::new().pubkey();
|
||||
let mut seed_bytes = [0u8; 32];
|
||||
seed_bytes.copy_from_slice(seed.as_ref());
|
||||
let len = num_keys * 10;
|
||||
let repeat = 8;
|
||||
let leader_schedule = LeaderSchedule::new(&stakes, seed_bytes, len, repeat);
|
||||
assert_eq!(leader_schedule.slot_leaders.len() as u64, len);
|
||||
let mut leader_node = Pubkey::default();
|
||||
for (i, node) in leader_schedule.slot_leaders.iter().enumerate() {
|
||||
if i % repeat as usize == 0 {
|
||||
leader_node = *node;
|
||||
} else {
|
||||
assert_eq!(leader_node, *node);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_repeated_leader_schedule_specific() {
|
||||
let alice_pubkey = Keypair::new().pubkey();
|
||||
let bob_pubkey = Keypair::new().pubkey();
|
||||
let stakes = vec![(alice_pubkey, 2), (bob_pubkey, 1)];
|
||||
|
||||
let seed = Pubkey::default();
|
||||
let mut seed_bytes = [0u8; 32];
|
||||
seed_bytes.copy_from_slice(seed.as_ref());
|
||||
let len = 8;
|
||||
// What the schedule looks like without any repeats
|
||||
let leaders1 = LeaderSchedule::new(&stakes, seed_bytes, len, 1).slot_leaders;
|
||||
|
||||
// What the schedule looks like with repeats
|
||||
let leaders2 = LeaderSchedule::new(&stakes, seed_bytes, len, 2).slot_leaders;
|
||||
assert_eq!(leaders1.len(), leaders2.len());
|
||||
|
||||
let leaders1_expected = vec![
|
||||
alice_pubkey,
|
||||
alice_pubkey,
|
||||
alice_pubkey,
|
||||
bob_pubkey,
|
||||
alice_pubkey,
|
||||
alice_pubkey,
|
||||
alice_pubkey,
|
||||
alice_pubkey,
|
||||
];
|
||||
let leaders2_expected = vec![
|
||||
alice_pubkey,
|
||||
alice_pubkey,
|
||||
alice_pubkey,
|
||||
alice_pubkey,
|
||||
alice_pubkey,
|
||||
alice_pubkey,
|
||||
bob_pubkey,
|
||||
bob_pubkey,
|
||||
];
|
||||
|
||||
assert_eq!(leaders1, leaders1_expected);
|
||||
assert_eq!(leaders2, leaders2_expected);
|
||||
}
|
||||
}
|
||||
|
@ -2,6 +2,7 @@ use crate::leader_schedule::LeaderSchedule;
|
||||
use crate::staking_utils;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::timing::NUM_CONSECUTIVE_LEADER_SLOTS;
|
||||
|
||||
/// Return the leader schedule for the given epoch.
|
||||
fn leader_schedule(epoch_height: u64, bank: &Bank) -> Option<LeaderSchedule> {
|
||||
@ -10,7 +11,12 @@ fn leader_schedule(epoch_height: u64, bank: &Bank) -> Option<LeaderSchedule> {
|
||||
seed[0..8].copy_from_slice(&epoch_height.to_le_bytes());
|
||||
let mut stakes: Vec<_> = stakes.into_iter().collect();
|
||||
sort_stakes(&mut stakes);
|
||||
LeaderSchedule::new(&stakes, seed, bank.get_slots_in_epoch(epoch_height))
|
||||
LeaderSchedule::new(
|
||||
&stakes,
|
||||
seed,
|
||||
bank.get_slots_in_epoch(epoch_height),
|
||||
NUM_CONSECUTIVE_LEADER_SLOTS,
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
@ -34,7 +40,33 @@ fn sort_stakes(stakes: &mut Vec<(Pubkey, u64)>) {
|
||||
pub fn slot_leader_at(slot: u64, bank: &Bank) -> Option<Pubkey> {
|
||||
let (epoch, slot_index) = bank.get_epoch_and_slot_index(slot);
|
||||
|
||||
leader_schedule(epoch, bank).map(|leader_schedule| leader_schedule[slot_index as usize])
|
||||
leader_schedule(epoch, bank).map(|leader_schedule| leader_schedule[slot_index])
|
||||
}
|
||||
|
||||
/// Return the next slot after the given current_slot that the given node will be leader
|
||||
pub fn next_leader_slot(pubkey: &Pubkey, mut current_slot: u64, bank: &Bank) -> Option<u64> {
|
||||
let (mut epoch, mut start_index) = bank.get_epoch_and_slot_index(current_slot + 1);
|
||||
while let Some(leader_schedule) = leader_schedule(epoch, bank) {
|
||||
// clippy thinks I should do this:
|
||||
// for (i, <item>) in leader_schedule
|
||||
// .iter()
|
||||
// .enumerate()
|
||||
// .take(bank.get_slots_in_epoch(epoch))
|
||||
// .skip(from_slot_index + 1) {
|
||||
//
|
||||
// but leader_schedule doesn't implement Iter...
|
||||
#[allow(clippy::needless_range_loop)]
|
||||
for i in start_index..bank.get_slots_in_epoch(epoch) {
|
||||
current_slot += 1;
|
||||
if *pubkey == leader_schedule[i] {
|
||||
return Some(current_slot);
|
||||
}
|
||||
}
|
||||
|
||||
epoch += 1;
|
||||
start_index = 0;
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
// Returns the number of ticks remaining from the specified tick_height to the end of the
|
||||
@ -43,8 +75,8 @@ pub fn num_ticks_left_in_slot(bank: &Bank, tick_height: u64) -> u64 {
|
||||
bank.ticks_per_slot() - tick_height % bank.ticks_per_slot() - 1
|
||||
}
|
||||
|
||||
pub fn tick_height_to_slot(bank: &Bank, tick_height: u64) -> u64 {
|
||||
tick_height / bank.ticks_per_slot()
|
||||
pub fn tick_height_to_slot(ticks_per_slot: u64, tick_height: u64) -> u64 {
|
||||
tick_height / ticks_per_slot
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@ -54,6 +86,40 @@ mod tests {
|
||||
use solana_sdk::genesis_block::{GenesisBlock, BOOTSTRAP_LEADER_LAMPORTS};
|
||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||
|
||||
#[test]
|
||||
fn test_next_leader_slot() {
|
||||
let pubkey = Keypair::new().pubkey();
|
||||
let mut genesis_block = GenesisBlock::new_with_leader(
|
||||
BOOTSTRAP_LEADER_LAMPORTS,
|
||||
&pubkey,
|
||||
BOOTSTRAP_LEADER_LAMPORTS,
|
||||
)
|
||||
.0;
|
||||
genesis_block.epoch_warmup = false;
|
||||
|
||||
let bank = Bank::new(&genesis_block);
|
||||
assert_eq!(slot_leader_at(bank.slot(), &bank).unwrap(), pubkey);
|
||||
assert_eq!(next_leader_slot(&pubkey, 0, &bank), Some(1));
|
||||
assert_eq!(next_leader_slot(&pubkey, 1, &bank), Some(2));
|
||||
assert_eq!(
|
||||
next_leader_slot(
|
||||
&pubkey,
|
||||
2 * genesis_block.slots_per_epoch - 1, // no schedule generated for epoch 2
|
||||
&bank
|
||||
),
|
||||
None
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
next_leader_slot(
|
||||
&Keypair::new().pubkey(), // not in leader_schedule
|
||||
0,
|
||||
&bank
|
||||
),
|
||||
None
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_leader_schedule_via_bank() {
|
||||
let pubkey = Keypair::new().pubkey();
|
||||
@ -66,8 +132,12 @@ mod tests {
|
||||
|
||||
let ids_and_stakes: Vec<_> = staking_utils::delegated_stakes(&bank).into_iter().collect();
|
||||
let seed = [0u8; 32];
|
||||
let leader_schedule =
|
||||
LeaderSchedule::new(&ids_and_stakes, seed, genesis_block.slots_per_epoch);
|
||||
let leader_schedule = LeaderSchedule::new(
|
||||
&ids_and_stakes,
|
||||
seed,
|
||||
genesis_block.slots_per_epoch,
|
||||
NUM_CONSECUTIVE_LEADER_SLOTS,
|
||||
);
|
||||
|
||||
assert_eq!(leader_schedule[0], pubkey);
|
||||
assert_eq!(leader_schedule[1], pubkey);
|
||||
|
@ -3,7 +3,6 @@
|
||||
//! [Fullnode](server/struct.Fullnode.html)) as well as hooks to GPU implementations of its most
|
||||
//! paralellizable components (i.e. [SigVerify](sigverify/index.html)). It also includes
|
||||
//! command-line tools to spin up fullnodes and a Rust library
|
||||
//! (see [ThinClient](thin_client/struct.ThinClient.html)) to interact with them.
|
||||
//!
|
||||
|
||||
pub mod bank_forks;
|
||||
@ -14,7 +13,6 @@ pub mod broadcast_stage;
|
||||
pub mod chacha;
|
||||
#[cfg(all(feature = "chacha", feature = "cuda"))]
|
||||
pub mod chacha_cuda;
|
||||
pub mod client;
|
||||
pub mod cluster_info_vote_listener;
|
||||
#[macro_use]
|
||||
pub mod contact_info;
|
||||
@ -29,6 +27,7 @@ pub mod blocktree;
|
||||
pub mod blockstream;
|
||||
pub mod blockstream_service;
|
||||
pub mod blocktree_processor;
|
||||
pub mod cluster;
|
||||
pub mod cluster_info;
|
||||
pub mod cluster_tests;
|
||||
pub mod db_window;
|
||||
@ -46,6 +45,7 @@ pub mod leader_schedule;
|
||||
pub mod leader_schedule_utils;
|
||||
pub mod local_cluster;
|
||||
pub mod local_vote_signer_service;
|
||||
pub mod locktower;
|
||||
pub mod packet;
|
||||
pub mod poh;
|
||||
pub mod poh_recorder;
|
||||
@ -57,10 +57,8 @@ pub mod replicator;
|
||||
pub mod result;
|
||||
pub mod retransmit_stage;
|
||||
pub mod rpc;
|
||||
pub mod rpc_mock;
|
||||
pub mod rpc_pubsub;
|
||||
pub mod rpc_pubsub_service;
|
||||
pub mod rpc_request;
|
||||
pub mod rpc_service;
|
||||
pub mod rpc_status;
|
||||
pub mod rpc_subscriptions;
|
||||
@ -71,7 +69,6 @@ pub mod staking_utils;
|
||||
pub mod storage_stage;
|
||||
pub mod streamer;
|
||||
pub mod test_tx;
|
||||
pub mod thin_client;
|
||||
pub mod tpu;
|
||||
pub mod tvu;
|
||||
pub mod voting_keypair;
|
||||
|
@ -1,29 +1,47 @@
|
||||
use crate::blocktree::{create_new_tmp_ledger, tmp_copy_blocktree};
|
||||
use crate::client::mk_client;
|
||||
use crate::cluster_info::Node;
|
||||
use crate::cluster::Cluster;
|
||||
use crate::cluster_info::{Node, FULLNODE_PORT_RANGE};
|
||||
use crate::contact_info::ContactInfo;
|
||||
use crate::fullnode::{Fullnode, FullnodeConfig};
|
||||
use crate::gossip_service::discover;
|
||||
use crate::service::Service;
|
||||
use crate::thin_client::retry_get_balance;
|
||||
use crate::thin_client::ThinClient;
|
||||
use solana_client::client::create_client;
|
||||
use solana_client::thin_client::{retry_get_balance, ThinClient};
|
||||
use solana_sdk::genesis_block::GenesisBlock;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||
use solana_sdk::system_transaction::SystemTransaction;
|
||||
use solana_sdk::timing::DEFAULT_SLOTS_PER_EPOCH;
|
||||
use solana_sdk::timing::DEFAULT_TICKS_PER_SLOT;
|
||||
use solana_vote_api::vote_state::VoteState;
|
||||
use solana_vote_api::vote_transaction::VoteTransaction;
|
||||
use std::collections::HashMap;
|
||||
use std::fs::remove_dir_all;
|
||||
use std::io::{Error, ErrorKind, Result};
|
||||
use std::sync::Arc;
|
||||
|
||||
pub struct FullnodeInfo {
|
||||
pub keypair: Arc<Keypair>,
|
||||
pub ledger_path: String,
|
||||
}
|
||||
|
||||
impl FullnodeInfo {
|
||||
fn new(keypair: Arc<Keypair>, ledger_path: String) -> Self {
|
||||
Self {
|
||||
keypair,
|
||||
ledger_path,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct LocalCluster {
|
||||
/// Keypair with funding to particpiate in the network
|
||||
pub funding_keypair: Keypair,
|
||||
pub fullnode_config: FullnodeConfig,
|
||||
/// Entry point from which the rest of the network can be discovered
|
||||
pub entry_point_info: ContactInfo,
|
||||
pub ledger_paths: Vec<String>,
|
||||
fullnodes: Vec<Fullnode>,
|
||||
pub fullnodes: HashMap<Pubkey, Fullnode>,
|
||||
pub fullnode_infos: HashMap<Pubkey, FullnodeInfo>,
|
||||
}
|
||||
|
||||
impl LocalCluster {
|
||||
@ -36,17 +54,32 @@ impl LocalCluster {
|
||||
node_stakes: &[u64],
|
||||
cluster_lamports: u64,
|
||||
fullnode_config: &FullnodeConfig,
|
||||
) -> Self {
|
||||
Self::new_with_tick_config(
|
||||
node_stakes,
|
||||
cluster_lamports,
|
||||
fullnode_config,
|
||||
DEFAULT_TICKS_PER_SLOT,
|
||||
DEFAULT_SLOTS_PER_EPOCH,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn new_with_tick_config(
|
||||
node_stakes: &[u64],
|
||||
cluster_lamports: u64,
|
||||
fullnode_config: &FullnodeConfig,
|
||||
ticks_per_slot: u64,
|
||||
slots_per_epoch: u64,
|
||||
) -> Self {
|
||||
let leader_keypair = Arc::new(Keypair::new());
|
||||
let leader_pubkey = leader_keypair.pubkey();
|
||||
let leader_node = Node::new_localhost_with_pubkey(&leader_keypair.pubkey());
|
||||
let (genesis_block, mint_keypair) =
|
||||
let (mut genesis_block, mint_keypair) =
|
||||
GenesisBlock::new_with_leader(cluster_lamports, &leader_pubkey, node_stakes[0]);
|
||||
genesis_block.ticks_per_slot = ticks_per_slot;
|
||||
genesis_block.slots_per_epoch = slots_per_epoch;
|
||||
let (genesis_ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_block);
|
||||
let leader_ledger_path = tmp_copy_blocktree!(&genesis_ledger_path);
|
||||
let mut ledger_paths = vec![];
|
||||
ledger_paths.push(genesis_ledger_path.clone());
|
||||
ledger_paths.push(leader_ledger_path.clone());
|
||||
let voting_keypair = Keypair::new();
|
||||
let leader_contact_info = leader_node.info.clone();
|
||||
let leader_server = Fullnode::new(
|
||||
@ -58,8 +91,18 @@ impl LocalCluster {
|
||||
None,
|
||||
fullnode_config,
|
||||
);
|
||||
let mut fullnodes = vec![leader_server];
|
||||
let mut client = mk_client(&leader_contact_info);
|
||||
let mut fullnodes = HashMap::new();
|
||||
let mut fullnode_infos = HashMap::new();
|
||||
fullnodes.insert(leader_pubkey, leader_server);
|
||||
fullnode_infos.insert(
|
||||
leader_pubkey,
|
||||
FullnodeInfo::new(leader_keypair.clone(), leader_ledger_path),
|
||||
);
|
||||
|
||||
let mut client = create_client(
|
||||
leader_contact_info.client_facing_addr(),
|
||||
FULLNODE_PORT_RANGE,
|
||||
);
|
||||
for stake in &node_stakes[1..] {
|
||||
// Must have enough tokens to fund vote account and set delegate
|
||||
assert!(*stake > 2);
|
||||
@ -68,7 +111,6 @@ impl LocalCluster {
|
||||
let validator_pubkey = validator_keypair.pubkey();
|
||||
let validator_node = Node::new_localhost_with_pubkey(&validator_keypair.pubkey());
|
||||
let ledger_path = tmp_copy_blocktree!(&genesis_ledger_path);
|
||||
ledger_paths.push(ledger_path.clone());
|
||||
|
||||
// Send each validator some lamports to vote
|
||||
let validator_balance =
|
||||
@ -94,34 +136,40 @@ impl LocalCluster {
|
||||
Some(&leader_contact_info),
|
||||
fullnode_config,
|
||||
);
|
||||
fullnodes.push(validator_server);
|
||||
fullnodes.insert(validator_keypair.pubkey(), validator_server);
|
||||
fullnode_infos.insert(
|
||||
validator_keypair.pubkey(),
|
||||
FullnodeInfo::new(validator_keypair.clone(), ledger_path),
|
||||
);
|
||||
}
|
||||
discover(&leader_contact_info.gossip, node_stakes.len()).unwrap();
|
||||
Self {
|
||||
funding_keypair: mint_keypair,
|
||||
entry_point_info: leader_contact_info,
|
||||
fullnodes,
|
||||
ledger_paths,
|
||||
fullnode_config: fullnode_config.clone(),
|
||||
fullnode_infos,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn exit(&self) {
|
||||
for node in &self.fullnodes {
|
||||
for node in self.fullnodes.values() {
|
||||
node.exit();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn close_preserve_ledgers(&mut self) {
|
||||
self.exit();
|
||||
while let Some(node) = self.fullnodes.pop() {
|
||||
for (_, node) in self.fullnodes.drain() {
|
||||
node.join().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
fn close(&mut self) {
|
||||
self.close_preserve_ledgers();
|
||||
for path in &self.ledger_paths {
|
||||
remove_dir_all(path).unwrap_or_else(|_| panic!("Unable to remove {}", path));
|
||||
for info in self.fullnode_infos.values() {
|
||||
remove_dir_all(&info.ledger_path)
|
||||
.unwrap_or_else(|_| panic!("Unable to remove {}", info.ledger_path));
|
||||
}
|
||||
}
|
||||
|
||||
@ -185,7 +233,7 @@ impl LocalCluster {
|
||||
}
|
||||
|
||||
info!("Checking for vote account registration");
|
||||
let vote_account_user_data = client.get_account_userdata(&vote_account_pubkey);
|
||||
let vote_account_user_data = client.get_account_data(&vote_account_pubkey);
|
||||
if let Ok(Some(vote_account_user_data)) = vote_account_user_data {
|
||||
if let Ok(vote_state) = VoteState::deserialize(&vote_account_user_data) {
|
||||
if vote_state.delegate_id == delegate_id {
|
||||
@ -201,6 +249,38 @@ impl LocalCluster {
|
||||
}
|
||||
}
|
||||
|
||||
impl Cluster for LocalCluster {
|
||||
fn restart_node(&mut self, pubkey: Pubkey) {
|
||||
// Shut down the fullnode
|
||||
let node = self.fullnodes.remove(&pubkey).unwrap();
|
||||
node.exit();
|
||||
node.join().unwrap();
|
||||
|
||||
// Restart the node
|
||||
let fullnode_info = &self.fullnode_infos[&pubkey];
|
||||
let node = Node::new_localhost_with_pubkey(&fullnode_info.keypair.pubkey());
|
||||
if pubkey == self.entry_point_info.id {
|
||||
self.entry_point_info = node.info.clone();
|
||||
}
|
||||
let new_voting_keypair = Keypair::new();
|
||||
let restarted_node = Fullnode::new(
|
||||
node,
|
||||
&fullnode_info.keypair,
|
||||
&fullnode_info.ledger_path,
|
||||
&new_voting_keypair.pubkey(),
|
||||
new_voting_keypair,
|
||||
None,
|
||||
&self.fullnode_config,
|
||||
);
|
||||
|
||||
self.fullnodes.insert(pubkey, restarted_node);
|
||||
}
|
||||
|
||||
fn get_node_ids(&self) -> Vec<Pubkey> {
|
||||
self.fullnodes.keys().cloned().collect()
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for LocalCluster {
|
||||
fn drop(&mut self) {
|
||||
self.close();
|
||||
@ -222,6 +302,6 @@ mod test {
|
||||
solana_logger::setup();
|
||||
let mut fullnode_exit = FullnodeConfig::default();
|
||||
fullnode_exit.rpc_config.enable_fullnode_exit = true;
|
||||
let _cluster = LocalCluster::new_with_config(&[3], 100, &fullnode_exit);
|
||||
let _cluster = LocalCluster::new_with_tick_config(&[3], 100, &fullnode_exit, 16, 16);
|
||||
}
|
||||
}
|
||||
|
635
core/src/locktower.rs
Normal file
635
core/src/locktower.rs
Normal file
@ -0,0 +1,635 @@
|
||||
use crate::bank_forks::BankForks;
|
||||
use crate::staking_utils;
|
||||
use hashbrown::{HashMap, HashSet};
|
||||
use solana_metrics::influxdb;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::account::Account;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_vote_api::vote_instruction::Vote;
|
||||
use solana_vote_api::vote_state::{Lockout, VoteState, MAX_LOCKOUT_HISTORY};
|
||||
|
||||
pub const VOTE_THRESHOLD_DEPTH: usize = 8;
|
||||
pub const VOTE_THRESHOLD_SIZE: f64 = 2f64 / 3f64;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct EpochStakes {
|
||||
slot: u64,
|
||||
stakes: HashMap<Pubkey, u64>,
|
||||
self_staked: u64,
|
||||
total_staked: u64,
|
||||
delegate_id: Pubkey,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct StakeLockout {
|
||||
lockout: u64,
|
||||
stake: u64,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct Locktower {
|
||||
epoch_stakes: EpochStakes,
|
||||
threshold_depth: usize,
|
||||
threshold_size: f64,
|
||||
lockouts: VoteState,
|
||||
}
|
||||
|
||||
impl EpochStakes {
|
||||
pub fn new(slot: u64, stakes: HashMap<Pubkey, u64>, delegate_id: &Pubkey) -> Self {
|
||||
let total_staked = stakes.values().sum();
|
||||
let self_staked = *stakes.get(&delegate_id).unwrap_or(&0);
|
||||
Self {
|
||||
slot,
|
||||
stakes,
|
||||
total_staked,
|
||||
self_staked,
|
||||
delegate_id: *delegate_id,
|
||||
}
|
||||
}
|
||||
pub fn new_for_tests(lamports: u64) -> Self {
|
||||
Self::new(
|
||||
0,
|
||||
vec![(Pubkey::default(), lamports)].into_iter().collect(),
|
||||
&Pubkey::default(),
|
||||
)
|
||||
}
|
||||
pub fn new_from_stake_accounts(slot: u64, accounts: &[(Pubkey, Account)]) -> Self {
|
||||
let stakes = accounts.iter().map(|(k, v)| (*k, v.lamports)).collect();
|
||||
Self::new(slot, stakes, &accounts[0].0)
|
||||
}
|
||||
pub fn new_from_bank(bank: &Bank) -> Self {
|
||||
let bank_epoch = bank.get_epoch_and_slot_index(bank.slot()).0;
|
||||
let stakes = staking_utils::vote_account_balances_at_epoch(bank, bank_epoch)
|
||||
.expect("voting require a bank with stakes");
|
||||
Self::new(bank_epoch, stakes, &bank.collector_id())
|
||||
}
|
||||
}
|
||||
|
||||
impl Locktower {
|
||||
pub fn new_from_forks(bank_forks: &BankForks) -> Self {
|
||||
//TODO: which bank to start with?
|
||||
let mut frozen_banks: Vec<_> = bank_forks.frozen_banks().values().cloned().collect();
|
||||
frozen_banks.sort_by_key(|b| (b.parents().len(), b.slot()));
|
||||
if let Some(bank) = frozen_banks.last() {
|
||||
Self::new_from_bank(bank)
|
||||
} else {
|
||||
Self::default()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_from_bank(bank: &Bank) -> Self {
|
||||
let current_epoch = bank.get_epoch_and_slot_index(bank.slot()).0;
|
||||
let mut lockouts = VoteState::default();
|
||||
if let Some(iter) = staking_utils::node_staked_accounts_at_epoch(bank, current_epoch) {
|
||||
for (delegate_id, _, account) in iter {
|
||||
if *delegate_id == bank.collector_id() {
|
||||
let state = VoteState::deserialize(&account.data).expect("votes");
|
||||
if lockouts.votes.len() < state.votes.len() {
|
||||
//TODO: which state to init with?
|
||||
lockouts = state;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
let epoch_stakes = EpochStakes::new_from_bank(bank);
|
||||
Self {
|
||||
epoch_stakes,
|
||||
threshold_depth: VOTE_THRESHOLD_DEPTH,
|
||||
threshold_size: VOTE_THRESHOLD_SIZE,
|
||||
lockouts,
|
||||
}
|
||||
}
|
||||
pub fn new(epoch_stakes: EpochStakes, threshold_depth: usize, threshold_size: f64) -> Self {
|
||||
Self {
|
||||
epoch_stakes,
|
||||
threshold_depth,
|
||||
threshold_size,
|
||||
lockouts: VoteState::default(),
|
||||
}
|
||||
}
|
||||
pub fn collect_vote_lockouts<F>(
|
||||
&self,
|
||||
bank_slot: u64,
|
||||
vote_accounts: F,
|
||||
ancestors: &HashMap<u64, HashSet<u64>>,
|
||||
) -> HashMap<u64, StakeLockout>
|
||||
where
|
||||
F: Iterator<Item = (Pubkey, Account)>,
|
||||
{
|
||||
let mut stake_lockouts = HashMap::new();
|
||||
for (key, account) in vote_accounts {
|
||||
let lamports: u64 = *self.epoch_stakes.stakes.get(&key).unwrap_or(&0);
|
||||
if lamports == 0 {
|
||||
continue;
|
||||
}
|
||||
let mut vote_state: VoteState = VoteState::deserialize(&account.data)
|
||||
.expect("bank should always have valid VoteState data");
|
||||
if key == self.epoch_stakes.delegate_id
|
||||
|| vote_state.delegate_id == self.epoch_stakes.delegate_id
|
||||
{
|
||||
solana_metrics::submit(
|
||||
influxdb::Point::new("counter-locktower-observed")
|
||||
.add_field(
|
||||
"slot",
|
||||
influxdb::Value::Integer(
|
||||
vote_state.nth_recent_vote(0).map(|v| v.slot).unwrap_or(0) as i64,
|
||||
),
|
||||
)
|
||||
.add_field(
|
||||
"root",
|
||||
influxdb::Value::Integer(vote_state.root_slot.unwrap_or(0) as i64),
|
||||
)
|
||||
.to_owned(),
|
||||
);
|
||||
}
|
||||
let start_root = vote_state.root_slot;
|
||||
vote_state.process_vote(Vote { slot: bank_slot });
|
||||
for vote in &vote_state.votes {
|
||||
Self::update_ancestor_lockouts(&mut stake_lockouts, &vote, ancestors);
|
||||
}
|
||||
if start_root != vote_state.root_slot {
|
||||
if let Some(root) = start_root {
|
||||
let vote = Lockout {
|
||||
confirmation_count: MAX_LOCKOUT_HISTORY as u32,
|
||||
slot: root,
|
||||
};
|
||||
trace!("ROOT: {}", vote.slot);
|
||||
Self::update_ancestor_lockouts(&mut stake_lockouts, &vote, ancestors);
|
||||
}
|
||||
}
|
||||
if let Some(root) = vote_state.root_slot {
|
||||
let vote = Lockout {
|
||||
confirmation_count: MAX_LOCKOUT_HISTORY as u32,
|
||||
slot: root,
|
||||
};
|
||||
Self::update_ancestor_lockouts(&mut stake_lockouts, &vote, ancestors);
|
||||
}
|
||||
// each account hash a stake for all the forks in the active tree for this bank
|
||||
Self::update_ancestor_stakes(&mut stake_lockouts, bank_slot, lamports, ancestors);
|
||||
}
|
||||
stake_lockouts
|
||||
}
|
||||
|
||||
pub fn update_epoch(&mut self, bank: &Bank) {
|
||||
let bank_epoch = bank.get_epoch_and_slot_index(bank.slot()).0;
|
||||
if bank_epoch != self.epoch_stakes.slot {
|
||||
assert!(
|
||||
bank_epoch > self.epoch_stakes.slot,
|
||||
"epoch_stakes cannot move backwards"
|
||||
);
|
||||
self.epoch_stakes = EpochStakes::new_from_bank(bank);
|
||||
solana_metrics::submit(
|
||||
influxdb::Point::new("counter-locktower-epoch")
|
||||
.add_field(
|
||||
"slot",
|
||||
influxdb::Value::Integer(self.epoch_stakes.slot as i64),
|
||||
)
|
||||
.add_field(
|
||||
"self_staked",
|
||||
influxdb::Value::Integer(self.epoch_stakes.self_staked as i64),
|
||||
)
|
||||
.add_field(
|
||||
"total_staked",
|
||||
influxdb::Value::Integer(self.epoch_stakes.total_staked as i64),
|
||||
)
|
||||
.to_owned(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn record_vote(&mut self, slot: u64) -> Option<u64> {
|
||||
let root_slot = self.lockouts.root_slot;
|
||||
self.lockouts.process_vote(Vote { slot });
|
||||
solana_metrics::submit(
|
||||
influxdb::Point::new("counter-locktower-vote")
|
||||
.add_field("latest", influxdb::Value::Integer(slot as i64))
|
||||
.add_field(
|
||||
"root",
|
||||
influxdb::Value::Integer(self.lockouts.root_slot.unwrap_or(0) as i64),
|
||||
)
|
||||
.to_owned(),
|
||||
);
|
||||
if root_slot != self.lockouts.root_slot {
|
||||
Some(self.lockouts.root_slot.unwrap())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
pub fn calculate_weight(&self, stake_lockouts: &HashMap<u64, StakeLockout>) -> u128 {
|
||||
let mut sum = 0u128;
|
||||
let root_slot = self.lockouts.root_slot.unwrap_or(0);
|
||||
for (slot, stake_lockout) in stake_lockouts {
|
||||
if self.lockouts.root_slot.is_some() && *slot <= root_slot {
|
||||
continue;
|
||||
}
|
||||
sum += u128::from(stake_lockout.lockout) * u128::from(stake_lockout.stake)
|
||||
}
|
||||
sum
|
||||
}
|
||||
|
||||
pub fn has_voted(&self, slot: u64) -> bool {
|
||||
for vote in &self.lockouts.votes {
|
||||
if vote.slot == slot {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
pub fn is_locked_out(&self, slot: u64, descendants: &HashMap<u64, HashSet<u64>>) -> bool {
|
||||
let mut lockouts = self.lockouts.clone();
|
||||
lockouts.process_vote(Vote { slot });
|
||||
for vote in &lockouts.votes {
|
||||
if vote.slot == slot {
|
||||
continue;
|
||||
}
|
||||
if !descendants[&vote.slot].contains(&slot) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
if let Some(root) = lockouts.root_slot {
|
||||
!descendants[&root].contains(&slot)
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
pub fn check_vote_stake_threshold(
|
||||
&self,
|
||||
slot: u64,
|
||||
stake_lockouts: &HashMap<u64, StakeLockout>,
|
||||
) -> bool {
|
||||
let mut lockouts = self.lockouts.clone();
|
||||
lockouts.process_vote(Vote { slot });
|
||||
let vote = lockouts.nth_recent_vote(self.threshold_depth);
|
||||
if let Some(vote) = vote {
|
||||
if let Some(fork_stake) = stake_lockouts.get(&vote.slot) {
|
||||
(fork_stake.stake as f64 / self.epoch_stakes.total_staked as f64)
|
||||
> self.threshold_size
|
||||
} else {
|
||||
false
|
||||
}
|
||||
} else {
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
/// Update lockouts for all the ancestors
|
||||
fn update_ancestor_lockouts(
|
||||
stake_lockouts: &mut HashMap<u64, StakeLockout>,
|
||||
vote: &Lockout,
|
||||
ancestors: &HashMap<u64, HashSet<u64>>,
|
||||
) {
|
||||
let mut slot_with_ancestors = vec![vote.slot];
|
||||
slot_with_ancestors.extend(ancestors.get(&vote.slot).unwrap_or(&HashSet::new()));
|
||||
for slot in slot_with_ancestors {
|
||||
let entry = &mut stake_lockouts.entry(slot).or_default();
|
||||
entry.lockout += vote.lockout();
|
||||
}
|
||||
}
|
||||
|
||||
/// Update stake for all the ancestors.
|
||||
/// Note, stake is the same for all the ancestor.
|
||||
fn update_ancestor_stakes(
|
||||
stake_lockouts: &mut HashMap<u64, StakeLockout>,
|
||||
slot: u64,
|
||||
lamports: u64,
|
||||
ancestors: &HashMap<u64, HashSet<u64>>,
|
||||
) {
|
||||
let mut slot_with_ancestors = vec![slot];
|
||||
slot_with_ancestors.extend(ancestors.get(&slot).unwrap_or(&HashSet::new()));
|
||||
for slot in slot_with_ancestors {
|
||||
let entry = &mut stake_lockouts.entry(slot).or_default();
|
||||
entry.stake += lamports;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||
|
||||
fn gen_accounts(stake_votes: &[(u64, &[u64])]) -> Vec<(Pubkey, Account)> {
|
||||
let mut accounts = vec![];
|
||||
for (lamports, votes) in stake_votes {
|
||||
let mut account = Account::default();
|
||||
account.data = vec![0; 1024];
|
||||
account.lamports = *lamports;
|
||||
let mut vote_state = VoteState::default();
|
||||
for slot in *votes {
|
||||
vote_state.process_vote(Vote { slot: *slot });
|
||||
}
|
||||
vote_state
|
||||
.serialize(&mut account.data)
|
||||
.expect("serialize state");
|
||||
accounts.push((Keypair::new().pubkey(), account));
|
||||
}
|
||||
accounts
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_collect_vote_lockouts_no_epoch_stakes() {
|
||||
let accounts = gen_accounts(&[(1, &[0])]);
|
||||
let epoch_stakes = EpochStakes::new_for_tests(2);
|
||||
let locktower = Locktower::new(epoch_stakes, 0, 0.67);
|
||||
let ancestors = vec![(1, vec![0].into_iter().collect()), (0, HashSet::new())]
|
||||
.into_iter()
|
||||
.collect();
|
||||
let staked_lockouts = locktower.collect_vote_lockouts(1, accounts.into_iter(), &ancestors);
|
||||
assert!(staked_lockouts.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_collect_vote_lockouts_sums() {
|
||||
//two accounts voting for slot 0 with 1 token staked
|
||||
let accounts = gen_accounts(&[(1, &[0]), (1, &[0])]);
|
||||
let epoch_stakes = EpochStakes::new_from_stake_accounts(0, &accounts);
|
||||
let locktower = Locktower::new(epoch_stakes, 0, 0.67);
|
||||
let ancestors = vec![(1, vec![0].into_iter().collect()), (0, HashSet::new())]
|
||||
.into_iter()
|
||||
.collect();
|
||||
let staked_lockouts = locktower.collect_vote_lockouts(1, accounts.into_iter(), &ancestors);
|
||||
assert_eq!(staked_lockouts[&0].stake, 2);
|
||||
assert_eq!(staked_lockouts[&0].lockout, 2 + 2 + 4 + 4);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_collect_vote_lockouts_root() {
|
||||
let votes: Vec<u64> = (0..MAX_LOCKOUT_HISTORY as u64).into_iter().collect();
|
||||
//two accounts voting for slot 0 with 1 token staked
|
||||
let accounts = gen_accounts(&[(1, &votes), (1, &votes)]);
|
||||
let epoch_stakes = EpochStakes::new_from_stake_accounts(0, &accounts);
|
||||
let mut locktower = Locktower::new(epoch_stakes, 0, 0.67);
|
||||
let mut ancestors = HashMap::new();
|
||||
for i in 0..(MAX_LOCKOUT_HISTORY + 1) {
|
||||
locktower.record_vote(i as u64);
|
||||
ancestors.insert(i as u64, (0..i as u64).into_iter().collect());
|
||||
}
|
||||
assert_eq!(locktower.lockouts.root_slot, Some(0));
|
||||
let staked_lockouts = locktower.collect_vote_lockouts(
|
||||
MAX_LOCKOUT_HISTORY as u64,
|
||||
accounts.into_iter(),
|
||||
&ancestors,
|
||||
);
|
||||
for i in 0..MAX_LOCKOUT_HISTORY {
|
||||
assert_eq!(staked_lockouts[&(i as u64)].stake, 2);
|
||||
}
|
||||
// should be the sum of all the weights for root
|
||||
assert!(staked_lockouts[&0].lockout > (2 * (1 << MAX_LOCKOUT_HISTORY)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_calculate_weight_skips_root() {
|
||||
let mut locktower = Locktower::new(EpochStakes::new_for_tests(2), 0, 0.67);
|
||||
locktower.lockouts.root_slot = Some(1);
|
||||
let stakes = vec![
|
||||
(
|
||||
0,
|
||||
StakeLockout {
|
||||
stake: 1,
|
||||
lockout: 8,
|
||||
},
|
||||
),
|
||||
(
|
||||
1,
|
||||
StakeLockout {
|
||||
stake: 1,
|
||||
lockout: 8,
|
||||
},
|
||||
),
|
||||
]
|
||||
.into_iter()
|
||||
.collect();
|
||||
assert_eq!(locktower.calculate_weight(&stakes), 0u128);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_calculate_weight() {
|
||||
let locktower = Locktower::new(EpochStakes::new_for_tests(2), 0, 0.67);
|
||||
let stakes = vec![(
|
||||
0,
|
||||
StakeLockout {
|
||||
stake: 1,
|
||||
lockout: 8,
|
||||
},
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
assert_eq!(locktower.calculate_weight(&stakes), 8u128);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_check_vote_threshold_without_votes() {
|
||||
let locktower = Locktower::new(EpochStakes::new_for_tests(2), 1, 0.67);
|
||||
let stakes = vec![(
|
||||
0,
|
||||
StakeLockout {
|
||||
stake: 1,
|
||||
lockout: 8,
|
||||
},
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
assert!(locktower.check_vote_stake_threshold(0, &stakes));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_locked_out_empty() {
|
||||
let locktower = Locktower::new(EpochStakes::new_for_tests(2), 0, 0.67);
|
||||
let descendants = HashMap::new();
|
||||
assert!(!locktower.is_locked_out(0, &descendants));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_locked_out_root_slot_child_pass() {
|
||||
let mut locktower = Locktower::new(EpochStakes::new_for_tests(2), 0, 0.67);
|
||||
let descendants = vec![(0, vec![1].into_iter().collect())]
|
||||
.into_iter()
|
||||
.collect();
|
||||
locktower.lockouts.root_slot = Some(0);
|
||||
assert!(!locktower.is_locked_out(1, &descendants));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_locked_out_root_slot_sibling_fail() {
|
||||
let mut locktower = Locktower::new(EpochStakes::new_for_tests(2), 0, 0.67);
|
||||
let descendants = vec![(0, vec![1].into_iter().collect())]
|
||||
.into_iter()
|
||||
.collect();
|
||||
locktower.lockouts.root_slot = Some(0);
|
||||
assert!(locktower.is_locked_out(2, &descendants));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_check_already_voted() {
|
||||
let mut locktower = Locktower::new(EpochStakes::new_for_tests(2), 0, 0.67);
|
||||
locktower.record_vote(0);
|
||||
assert!(locktower.has_voted(0));
|
||||
assert!(!locktower.has_voted(1));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_locked_out_double_vote() {
|
||||
let mut locktower = Locktower::new(EpochStakes::new_for_tests(2), 0, 0.67);
|
||||
let descendants = vec![(0, vec![1].into_iter().collect()), (1, HashSet::new())]
|
||||
.into_iter()
|
||||
.collect();
|
||||
locktower.record_vote(0);
|
||||
locktower.record_vote(1);
|
||||
assert!(locktower.is_locked_out(0, &descendants));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_locked_out_child() {
|
||||
let mut locktower = Locktower::new(EpochStakes::new_for_tests(2), 0, 0.67);
|
||||
let descendants = vec![(0, vec![1].into_iter().collect())]
|
||||
.into_iter()
|
||||
.collect();
|
||||
locktower.record_vote(0);
|
||||
assert!(!locktower.is_locked_out(1, &descendants));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_locked_out_sibling() {
|
||||
let mut locktower = Locktower::new(EpochStakes::new_for_tests(2), 0, 0.67);
|
||||
let descendants = vec![
|
||||
(0, vec![1, 2].into_iter().collect()),
|
||||
(1, HashSet::new()),
|
||||
(2, HashSet::new()),
|
||||
]
|
||||
.into_iter()
|
||||
.collect();
|
||||
locktower.record_vote(0);
|
||||
locktower.record_vote(1);
|
||||
assert!(locktower.is_locked_out(2, &descendants));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_locked_out_last_vote_expired() {
|
||||
let mut locktower = Locktower::new(EpochStakes::new_for_tests(2), 0, 0.67);
|
||||
let descendants = vec![(0, vec![1, 4].into_iter().collect()), (1, HashSet::new())]
|
||||
.into_iter()
|
||||
.collect();
|
||||
locktower.record_vote(0);
|
||||
locktower.record_vote(1);
|
||||
assert!(!locktower.is_locked_out(4, &descendants));
|
||||
locktower.record_vote(4);
|
||||
assert_eq!(locktower.lockouts.votes[0].slot, 0);
|
||||
assert_eq!(locktower.lockouts.votes[0].confirmation_count, 2);
|
||||
assert_eq!(locktower.lockouts.votes[1].slot, 4);
|
||||
assert_eq!(locktower.lockouts.votes[1].confirmation_count, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_check_vote_threshold_below_threshold() {
|
||||
let mut locktower = Locktower::new(EpochStakes::new_for_tests(2), 1, 0.67);
|
||||
let stakes = vec![(
|
||||
0,
|
||||
StakeLockout {
|
||||
stake: 1,
|
||||
lockout: 8,
|
||||
},
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
locktower.record_vote(0);
|
||||
assert!(!locktower.check_vote_stake_threshold(1, &stakes));
|
||||
}
|
||||
#[test]
|
||||
fn test_check_vote_threshold_above_threshold() {
|
||||
let mut locktower = Locktower::new(EpochStakes::new_for_tests(2), 1, 0.67);
|
||||
let stakes = vec![(
|
||||
0,
|
||||
StakeLockout {
|
||||
stake: 2,
|
||||
lockout: 8,
|
||||
},
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
locktower.record_vote(0);
|
||||
assert!(locktower.check_vote_stake_threshold(1, &stakes));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_check_vote_threshold_above_threshold_after_pop() {
|
||||
let mut locktower = Locktower::new(EpochStakes::new_for_tests(2), 1, 0.67);
|
||||
let stakes = vec![(
|
||||
0,
|
||||
StakeLockout {
|
||||
stake: 2,
|
||||
lockout: 8,
|
||||
},
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
locktower.record_vote(0);
|
||||
locktower.record_vote(1);
|
||||
locktower.record_vote(2);
|
||||
assert!(locktower.check_vote_stake_threshold(6, &stakes));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_check_vote_threshold_above_threshold_no_stake() {
|
||||
let mut locktower = Locktower::new(EpochStakes::new_for_tests(2), 1, 0.67);
|
||||
let stakes = HashMap::new();
|
||||
locktower.record_vote(0);
|
||||
assert!(!locktower.check_vote_stake_threshold(1, &stakes));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_lockout_is_updated_for_entire_branch() {
|
||||
let mut stake_lockouts = HashMap::new();
|
||||
let vote = Lockout {
|
||||
slot: 2,
|
||||
confirmation_count: 1,
|
||||
};
|
||||
let set: HashSet<u64> = vec![0u64, 1u64].into_iter().collect();
|
||||
let mut ancestors = HashMap::new();
|
||||
ancestors.insert(2, set);
|
||||
let set: HashSet<u64> = vec![0u64].into_iter().collect();
|
||||
ancestors.insert(1, set);
|
||||
Locktower::update_ancestor_lockouts(&mut stake_lockouts, &vote, &ancestors);
|
||||
assert_eq!(stake_lockouts[&0].lockout, 2);
|
||||
assert_eq!(stake_lockouts[&1].lockout, 2);
|
||||
assert_eq!(stake_lockouts[&2].lockout, 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_lockout_is_updated_for_slot_or_lower() {
|
||||
let mut stake_lockouts = HashMap::new();
|
||||
let set: HashSet<u64> = vec![0u64, 1u64].into_iter().collect();
|
||||
let mut ancestors = HashMap::new();
|
||||
ancestors.insert(2, set);
|
||||
let set: HashSet<u64> = vec![0u64].into_iter().collect();
|
||||
ancestors.insert(1, set);
|
||||
let vote = Lockout {
|
||||
slot: 2,
|
||||
confirmation_count: 1,
|
||||
};
|
||||
Locktower::update_ancestor_lockouts(&mut stake_lockouts, &vote, &ancestors);
|
||||
let vote = Lockout {
|
||||
slot: 1,
|
||||
confirmation_count: 2,
|
||||
};
|
||||
Locktower::update_ancestor_lockouts(&mut stake_lockouts, &vote, &ancestors);
|
||||
assert_eq!(stake_lockouts[&0].lockout, 2 + 4);
|
||||
assert_eq!(stake_lockouts[&1].lockout, 2 + 4);
|
||||
assert_eq!(stake_lockouts[&2].lockout, 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_stake_is_updated_for_entire_branch() {
|
||||
let mut stake_lockouts = HashMap::new();
|
||||
let mut account = Account::default();
|
||||
account.lamports = 1;
|
||||
let set: HashSet<u64> = vec![0u64, 1u64].into_iter().collect();
|
||||
let ancestors: HashMap<u64, HashSet<u64>> = [(2u64, set)].into_iter().cloned().collect();
|
||||
Locktower::update_ancestor_stakes(&mut stake_lockouts, 2, account.lamports, &ancestors);
|
||||
assert_eq!(stake_lockouts[&0].stake, 1);
|
||||
assert_eq!(stake_lockouts[&1].stake, 1);
|
||||
assert_eq!(stake_lockouts[&2].stake, 1);
|
||||
}
|
||||
}
|
@ -11,14 +11,18 @@
|
||||
//! * recorded entry must be >= WorkingBank::min_tick_height && entry must be < WorkingBank::man_tick_height
|
||||
//!
|
||||
use crate::entry::Entry;
|
||||
use crate::leader_schedule_utils;
|
||||
use crate::poh::Poh;
|
||||
use crate::result::{Error, Result};
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::transaction::Transaction;
|
||||
use std::sync::mpsc::{channel, Receiver, Sender, SyncSender};
|
||||
use std::sync::Arc;
|
||||
|
||||
const MAX_LAST_LEADER_GRACE_TICKS_FACTOR: u64 = 2;
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Clone)]
|
||||
pub enum PohRecorderError {
|
||||
InvalidCallingObject,
|
||||
@ -37,15 +41,32 @@ pub struct WorkingBank {
|
||||
|
||||
pub struct PohRecorder {
|
||||
pub poh: Poh,
|
||||
pub clear_bank_signal: Option<SyncSender<bool>>,
|
||||
start_slot: u64,
|
||||
start_tick: u64,
|
||||
tick_cache: Vec<(Entry, u64)>,
|
||||
working_bank: Option<WorkingBank>,
|
||||
sender: Sender<WorkingBankEntries>,
|
||||
pub clear_bank_signal: Option<SyncSender<bool>>,
|
||||
start_leader_at_tick: Option<u64>,
|
||||
last_leader_tick: Option<u64>,
|
||||
max_last_leader_grace_ticks: u64,
|
||||
id: Pubkey,
|
||||
}
|
||||
|
||||
impl PohRecorder {
|
||||
pub fn clear_bank(&mut self) {
|
||||
self.working_bank = None;
|
||||
if let Some(working_bank) = self.working_bank.take() {
|
||||
let bank = working_bank.bank;
|
||||
let next_leader_slot =
|
||||
leader_schedule_utils::next_leader_slot(&self.id, bank.slot(), &bank);
|
||||
let (start_leader_at_tick, last_leader_tick) = Self::compute_leader_slot_ticks(
|
||||
&next_leader_slot,
|
||||
bank.ticks_per_slot(),
|
||||
self.max_last_leader_grace_ticks,
|
||||
);
|
||||
self.start_leader_at_tick = start_leader_at_tick;
|
||||
self.last_leader_tick = last_leader_tick;
|
||||
}
|
||||
if let Some(ref signal) = self.clear_bank_signal {
|
||||
let _ = signal.try_send(true);
|
||||
}
|
||||
@ -57,35 +78,94 @@ impl PohRecorder {
|
||||
self.poh.hash();
|
||||
}
|
||||
|
||||
pub fn start_slot(&self) -> u64 {
|
||||
self.start_slot
|
||||
}
|
||||
|
||||
pub fn bank(&self) -> Option<Arc<Bank>> {
|
||||
self.working_bank.clone().map(|w| w.bank)
|
||||
}
|
||||
|
||||
pub fn tick_height(&self) -> u64 {
|
||||
self.poh.tick_height
|
||||
}
|
||||
|
||||
// returns if leader tick has reached, and how many grace ticks were afforded
|
||||
pub fn reached_leader_tick(&self) -> (bool, u64) {
|
||||
self.start_leader_at_tick
|
||||
.map(|target_tick| {
|
||||
debug!(
|
||||
"Current tick {}, start tick {} target {}, grace {}",
|
||||
self.tick_height(),
|
||||
self.start_tick,
|
||||
target_tick,
|
||||
self.max_last_leader_grace_ticks
|
||||
);
|
||||
|
||||
let leader_ideal_start_tick =
|
||||
target_tick.saturating_sub(self.max_last_leader_grace_ticks);
|
||||
|
||||
// Is the current tick in the same slot as the target tick?
|
||||
// Check if either grace period has expired,
|
||||
// or target tick is = grace period (i.e. poh recorder was just reset)
|
||||
if self.tick_height() <= self.last_leader_tick.unwrap_or(0)
|
||||
&& (self.tick_height() >= target_tick
|
||||
|| self.max_last_leader_grace_ticks
|
||||
>= target_tick.saturating_sub(self.start_tick))
|
||||
{
|
||||
return (
|
||||
true,
|
||||
self.tick_height().saturating_sub(leader_ideal_start_tick),
|
||||
);
|
||||
}
|
||||
|
||||
(false, 0)
|
||||
})
|
||||
.unwrap_or((false, 0))
|
||||
}
|
||||
|
||||
fn compute_leader_slot_ticks(
|
||||
next_leader_slot: &Option<u64>,
|
||||
ticks_per_slot: u64,
|
||||
grace_ticks: u64,
|
||||
) -> (Option<u64>, Option<u64>) {
|
||||
next_leader_slot
|
||||
.map(|slot| {
|
||||
(
|
||||
Some(slot * ticks_per_slot + grace_ticks),
|
||||
Some((slot + 1) * ticks_per_slot - 1),
|
||||
)
|
||||
})
|
||||
.unwrap_or((None, None))
|
||||
}
|
||||
|
||||
// synchronize PoH with a bank
|
||||
pub fn reset(&mut self, tick_height: u64, blockhash: Hash) {
|
||||
pub fn reset(
|
||||
&mut self,
|
||||
tick_height: u64,
|
||||
blockhash: Hash,
|
||||
start_slot: u64,
|
||||
my_next_leader_slot: Option<u64>,
|
||||
ticks_per_slot: u64,
|
||||
) {
|
||||
self.clear_bank();
|
||||
let existing = self.tick_cache.iter().any(|(entry, entry_tick_height)| {
|
||||
if entry.hash == blockhash {
|
||||
assert_eq!(*entry_tick_height, tick_height);
|
||||
}
|
||||
entry.hash == blockhash
|
||||
});
|
||||
if existing {
|
||||
info!(
|
||||
"reset skipped for: {},{}",
|
||||
self.poh.hash, self.poh.tick_height
|
||||
);
|
||||
return;
|
||||
}
|
||||
let mut cache = vec![];
|
||||
info!(
|
||||
"reset poh from: {},{} to: {},{}",
|
||||
self.poh.hash, self.poh.tick_height, blockhash, tick_height,
|
||||
);
|
||||
std::mem::swap(&mut cache, &mut self.tick_cache);
|
||||
self.start_slot = start_slot;
|
||||
self.start_tick = tick_height + 1;
|
||||
self.poh = Poh::new(blockhash, tick_height);
|
||||
self.max_last_leader_grace_ticks = ticks_per_slot / MAX_LAST_LEADER_GRACE_TICKS_FACTOR;
|
||||
let (start_leader_at_tick, last_leader_tick) = Self::compute_leader_slot_ticks(
|
||||
&my_next_leader_slot,
|
||||
ticks_per_slot,
|
||||
self.max_last_leader_grace_ticks,
|
||||
);
|
||||
self.start_leader_at_tick = start_leader_at_tick;
|
||||
self.last_leader_tick = last_leader_tick;
|
||||
}
|
||||
|
||||
pub fn set_working_bank(&mut self, working_bank: WorkingBank) {
|
||||
@ -151,6 +231,7 @@ impl PohRecorder {
|
||||
"poh_record: max_tick_height reached, setting working bank {} to None",
|
||||
working_bank.bank.slot()
|
||||
);
|
||||
self.start_slot = working_bank.max_tick_height / working_bank.bank.ticks_per_slot();
|
||||
self.clear_bank();
|
||||
}
|
||||
if e.is_err() {
|
||||
@ -166,6 +247,10 @@ impl PohRecorder {
|
||||
}
|
||||
|
||||
pub fn tick(&mut self) {
|
||||
if self.start_leader_at_tick.is_none() {
|
||||
return;
|
||||
}
|
||||
|
||||
let tick = self.generate_tick();
|
||||
trace!("tick {}", tick.1);
|
||||
self.tick_cache.push(tick);
|
||||
@ -180,9 +265,22 @@ impl PohRecorder {
|
||||
/// A recorder to synchronize PoH with the following data structures
|
||||
/// * bank - the LastId's queue is updated on `tick` and `record` events
|
||||
/// * sender - the Entry channel that outputs to the ledger
|
||||
pub fn new(tick_height: u64, last_entry_hash: Hash) -> (Self, Receiver<WorkingBankEntries>) {
|
||||
pub fn new(
|
||||
tick_height: u64,
|
||||
last_entry_hash: Hash,
|
||||
start_slot: u64,
|
||||
my_leader_slot_index: Option<u64>,
|
||||
ticks_per_slot: u64,
|
||||
id: &Pubkey,
|
||||
) -> (Self, Receiver<WorkingBankEntries>) {
|
||||
let poh = Poh::new(last_entry_hash, tick_height);
|
||||
let (sender, receiver) = channel();
|
||||
let max_last_leader_grace_ticks = ticks_per_slot / MAX_LAST_LEADER_GRACE_TICKS_FACTOR;
|
||||
let (start_leader_at_tick, last_leader_tick) = Self::compute_leader_slot_ticks(
|
||||
&my_leader_slot_index,
|
||||
ticks_per_slot,
|
||||
max_last_leader_grace_ticks,
|
||||
);
|
||||
(
|
||||
PohRecorder {
|
||||
poh,
|
||||
@ -190,6 +288,12 @@ impl PohRecorder {
|
||||
working_bank: None,
|
||||
sender,
|
||||
clear_bank_signal: None,
|
||||
start_slot,
|
||||
start_tick: tick_height + 1,
|
||||
start_leader_at_tick,
|
||||
last_leader_tick,
|
||||
max_last_leader_grace_ticks,
|
||||
id: *id,
|
||||
},
|
||||
receiver,
|
||||
)
|
||||
@ -235,13 +339,21 @@ mod tests {
|
||||
use crate::test_tx::test_tx;
|
||||
use solana_sdk::genesis_block::GenesisBlock;
|
||||
use solana_sdk::hash::hash;
|
||||
use solana_sdk::timing::DEFAULT_TICKS_PER_SLOT;
|
||||
use std::sync::mpsc::sync_channel;
|
||||
use std::sync::Arc;
|
||||
|
||||
#[test]
|
||||
fn test_poh_recorder_no_zero_tick() {
|
||||
let prev_hash = Hash::default();
|
||||
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(0, prev_hash);
|
||||
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
|
||||
0,
|
||||
prev_hash,
|
||||
0,
|
||||
Some(4),
|
||||
DEFAULT_TICKS_PER_SLOT,
|
||||
&Pubkey::default(),
|
||||
);
|
||||
poh_recorder.tick();
|
||||
assert_eq!(poh_recorder.tick_cache.len(), 1);
|
||||
assert_eq!(poh_recorder.tick_cache[0].1, 1);
|
||||
@ -251,7 +363,14 @@ mod tests {
|
||||
#[test]
|
||||
fn test_poh_recorder_tick_height_is_last_tick() {
|
||||
let prev_hash = Hash::default();
|
||||
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(0, prev_hash);
|
||||
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
|
||||
0,
|
||||
prev_hash,
|
||||
0,
|
||||
Some(4),
|
||||
DEFAULT_TICKS_PER_SLOT,
|
||||
&Pubkey::default(),
|
||||
);
|
||||
poh_recorder.tick();
|
||||
poh_recorder.tick();
|
||||
assert_eq!(poh_recorder.tick_cache.len(), 2);
|
||||
@ -261,10 +380,17 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_poh_recorder_reset_clears_cache() {
|
||||
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(0, Hash::default());
|
||||
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
|
||||
0,
|
||||
Hash::default(),
|
||||
0,
|
||||
Some(4),
|
||||
DEFAULT_TICKS_PER_SLOT,
|
||||
&Pubkey::default(),
|
||||
);
|
||||
poh_recorder.tick();
|
||||
assert_eq!(poh_recorder.tick_cache.len(), 1);
|
||||
poh_recorder.reset(0, Hash::default());
|
||||
poh_recorder.reset(0, Hash::default(), 0, Some(4), DEFAULT_TICKS_PER_SLOT);
|
||||
assert_eq!(poh_recorder.tick_cache.len(), 0);
|
||||
}
|
||||
|
||||
@ -273,7 +399,14 @@ mod tests {
|
||||
let (genesis_block, _mint_keypair) = GenesisBlock::new(2);
|
||||
let bank = Arc::new(Bank::new(&genesis_block));
|
||||
let prev_hash = bank.last_blockhash();
|
||||
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(0, prev_hash);
|
||||
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
|
||||
0,
|
||||
prev_hash,
|
||||
0,
|
||||
Some(4),
|
||||
bank.ticks_per_slot(),
|
||||
&Pubkey::default(),
|
||||
);
|
||||
|
||||
let working_bank = WorkingBank {
|
||||
bank,
|
||||
@ -291,7 +424,14 @@ mod tests {
|
||||
let (genesis_block, _mint_keypair) = GenesisBlock::new(2);
|
||||
let bank = Arc::new(Bank::new(&genesis_block));
|
||||
let prev_hash = bank.last_blockhash();
|
||||
let (mut poh_recorder, entry_receiver) = PohRecorder::new(0, prev_hash);
|
||||
let (mut poh_recorder, entry_receiver) = PohRecorder::new(
|
||||
0,
|
||||
prev_hash,
|
||||
0,
|
||||
Some(4),
|
||||
bank.ticks_per_slot(),
|
||||
&Pubkey::default(),
|
||||
);
|
||||
|
||||
let working_bank = WorkingBank {
|
||||
bank: bank.clone(),
|
||||
@ -321,7 +461,14 @@ mod tests {
|
||||
let (genesis_block, _mint_keypair) = GenesisBlock::new(2);
|
||||
let bank = Arc::new(Bank::new(&genesis_block));
|
||||
let prev_hash = bank.last_blockhash();
|
||||
let (mut poh_recorder, entry_receiver) = PohRecorder::new(0, prev_hash);
|
||||
let (mut poh_recorder, entry_receiver) = PohRecorder::new(
|
||||
0,
|
||||
prev_hash,
|
||||
0,
|
||||
Some(4),
|
||||
bank.ticks_per_slot(),
|
||||
&Pubkey::default(),
|
||||
);
|
||||
|
||||
poh_recorder.tick();
|
||||
poh_recorder.tick();
|
||||
@ -349,7 +496,14 @@ mod tests {
|
||||
let (genesis_block, _mint_keypair) = GenesisBlock::new(2);
|
||||
let bank = Arc::new(Bank::new(&genesis_block));
|
||||
let prev_hash = bank.last_blockhash();
|
||||
let (mut poh_recorder, entry_receiver) = PohRecorder::new(0, prev_hash);
|
||||
let (mut poh_recorder, entry_receiver) = PohRecorder::new(
|
||||
0,
|
||||
prev_hash,
|
||||
0,
|
||||
Some(4),
|
||||
bank.ticks_per_slot(),
|
||||
&Pubkey::default(),
|
||||
);
|
||||
|
||||
let working_bank = WorkingBank {
|
||||
bank,
|
||||
@ -369,7 +523,14 @@ mod tests {
|
||||
let (genesis_block, _mint_keypair) = GenesisBlock::new(2);
|
||||
let bank = Arc::new(Bank::new(&genesis_block));
|
||||
let prev_hash = bank.last_blockhash();
|
||||
let (mut poh_recorder, entry_receiver) = PohRecorder::new(0, prev_hash);
|
||||
let (mut poh_recorder, entry_receiver) = PohRecorder::new(
|
||||
0,
|
||||
prev_hash,
|
||||
0,
|
||||
Some(4),
|
||||
bank.ticks_per_slot(),
|
||||
&Pubkey::default(),
|
||||
);
|
||||
|
||||
let working_bank = WorkingBank {
|
||||
bank,
|
||||
@ -398,7 +559,14 @@ mod tests {
|
||||
let (genesis_block, _mint_keypair) = GenesisBlock::new(2);
|
||||
let bank = Arc::new(Bank::new(&genesis_block));
|
||||
let prev_hash = bank.last_blockhash();
|
||||
let (mut poh_recorder, entry_receiver) = PohRecorder::new(0, prev_hash);
|
||||
let (mut poh_recorder, entry_receiver) = PohRecorder::new(
|
||||
0,
|
||||
prev_hash,
|
||||
0,
|
||||
Some(4),
|
||||
bank.ticks_per_slot(),
|
||||
&Pubkey::default(),
|
||||
);
|
||||
|
||||
let working_bank = WorkingBank {
|
||||
bank,
|
||||
@ -424,7 +592,14 @@ mod tests {
|
||||
let (genesis_block, _mint_keypair) = GenesisBlock::new(2);
|
||||
let bank = Arc::new(Bank::new(&genesis_block));
|
||||
let prev_hash = bank.last_blockhash();
|
||||
let (mut poh_recorder, entry_receiver) = PohRecorder::new(0, prev_hash);
|
||||
let (mut poh_recorder, entry_receiver) = PohRecorder::new(
|
||||
0,
|
||||
prev_hash,
|
||||
0,
|
||||
Some(4),
|
||||
bank.ticks_per_slot(),
|
||||
&Pubkey::default(),
|
||||
);
|
||||
|
||||
let working_bank = WorkingBank {
|
||||
bank,
|
||||
@ -443,55 +618,66 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_reset_current() {
|
||||
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(0, Hash::default());
|
||||
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
|
||||
0,
|
||||
Hash::default(),
|
||||
0,
|
||||
Some(4),
|
||||
DEFAULT_TICKS_PER_SLOT,
|
||||
&Pubkey::default(),
|
||||
);
|
||||
poh_recorder.tick();
|
||||
poh_recorder.tick();
|
||||
assert_eq!(poh_recorder.tick_cache.len(), 2);
|
||||
poh_recorder.reset(poh_recorder.poh.tick_height, poh_recorder.poh.hash);
|
||||
assert_eq!(poh_recorder.tick_cache.len(), 2);
|
||||
poh_recorder.reset(
|
||||
poh_recorder.poh.tick_height,
|
||||
poh_recorder.poh.hash,
|
||||
0,
|
||||
Some(4),
|
||||
DEFAULT_TICKS_PER_SLOT,
|
||||
);
|
||||
assert_eq!(poh_recorder.tick_cache.len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_reset_with_cached() {
|
||||
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(0, Hash::default());
|
||||
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
|
||||
0,
|
||||
Hash::default(),
|
||||
0,
|
||||
Some(4),
|
||||
DEFAULT_TICKS_PER_SLOT,
|
||||
&Pubkey::default(),
|
||||
);
|
||||
poh_recorder.tick();
|
||||
poh_recorder.tick();
|
||||
assert_eq!(poh_recorder.tick_cache.len(), 2);
|
||||
poh_recorder.reset(
|
||||
poh_recorder.tick_cache[0].1,
|
||||
poh_recorder.tick_cache[0].0.hash,
|
||||
0,
|
||||
Some(4),
|
||||
DEFAULT_TICKS_PER_SLOT,
|
||||
);
|
||||
assert_eq!(poh_recorder.tick_cache.len(), 2);
|
||||
poh_recorder.reset(
|
||||
poh_recorder.tick_cache[1].1,
|
||||
poh_recorder.tick_cache[1].0.hash,
|
||||
);
|
||||
assert_eq!(poh_recorder.tick_cache.len(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_reset_with_cached_bad_height() {
|
||||
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(0, Hash::default());
|
||||
poh_recorder.tick();
|
||||
poh_recorder.tick();
|
||||
assert_eq!(poh_recorder.tick_cache.len(), 2);
|
||||
//mixed up heights
|
||||
poh_recorder.reset(
|
||||
poh_recorder.tick_cache[0].1,
|
||||
poh_recorder.tick_cache[1].0.hash,
|
||||
);
|
||||
assert_eq!(poh_recorder.tick_cache.len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_reset_to_new_value() {
|
||||
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(0, Hash::default());
|
||||
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
|
||||
0,
|
||||
Hash::default(),
|
||||
0,
|
||||
Some(4),
|
||||
DEFAULT_TICKS_PER_SLOT,
|
||||
&Pubkey::default(),
|
||||
);
|
||||
poh_recorder.tick();
|
||||
poh_recorder.tick();
|
||||
poh_recorder.tick();
|
||||
assert_eq!(poh_recorder.tick_cache.len(), 3);
|
||||
assert_eq!(poh_recorder.poh.tick_height, 3);
|
||||
poh_recorder.reset(1, hash(b"hello"));
|
||||
poh_recorder.reset(1, hash(b"hello"), 0, Some(4), DEFAULT_TICKS_PER_SLOT);
|
||||
assert_eq!(poh_recorder.tick_cache.len(), 0);
|
||||
poh_recorder.tick();
|
||||
assert_eq!(poh_recorder.poh.tick_height, 2);
|
||||
@ -501,14 +687,22 @@ mod tests {
|
||||
fn test_reset_clear_bank() {
|
||||
let (genesis_block, _mint_keypair) = GenesisBlock::new(2);
|
||||
let bank = Arc::new(Bank::new(&genesis_block));
|
||||
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(0, Hash::default());
|
||||
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
|
||||
0,
|
||||
Hash::default(),
|
||||
0,
|
||||
Some(4),
|
||||
bank.ticks_per_slot(),
|
||||
&Pubkey::default(),
|
||||
);
|
||||
let ticks_per_slot = bank.ticks_per_slot();
|
||||
let working_bank = WorkingBank {
|
||||
bank,
|
||||
min_tick_height: 2,
|
||||
max_tick_height: 3,
|
||||
};
|
||||
poh_recorder.set_working_bank(working_bank);
|
||||
poh_recorder.reset(1, hash(b"hello"));
|
||||
poh_recorder.reset(1, hash(b"hello"), 0, Some(4), ticks_per_slot);
|
||||
assert!(poh_recorder.working_bank.is_none());
|
||||
}
|
||||
|
||||
@ -516,11 +710,217 @@ mod tests {
|
||||
pub fn test_clear_signal() {
|
||||
let (genesis_block, _mint_keypair) = GenesisBlock::new(2);
|
||||
let bank = Arc::new(Bank::new(&genesis_block));
|
||||
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(0, Hash::default());
|
||||
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
|
||||
0,
|
||||
Hash::default(),
|
||||
0,
|
||||
None,
|
||||
bank.ticks_per_slot(),
|
||||
&Pubkey::default(),
|
||||
);
|
||||
let (sender, receiver) = sync_channel(1);
|
||||
poh_recorder.set_bank(&bank);
|
||||
poh_recorder.clear_bank_signal = Some(sender);
|
||||
poh_recorder.clear_bank();
|
||||
assert!(receiver.try_recv().is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_poh_recorder_reset_start_slot() {
|
||||
let ticks_per_slot = 5;
|
||||
let (mut genesis_block, _mint_keypair) = GenesisBlock::new(2);
|
||||
genesis_block.ticks_per_slot = ticks_per_slot;
|
||||
let bank = Arc::new(Bank::new(&genesis_block));
|
||||
|
||||
let prev_hash = bank.last_blockhash();
|
||||
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
|
||||
0,
|
||||
prev_hash,
|
||||
0,
|
||||
Some(4),
|
||||
bank.ticks_per_slot(),
|
||||
&Pubkey::default(),
|
||||
);
|
||||
|
||||
let end_slot = 3;
|
||||
let max_tick_height = (end_slot + 1) * ticks_per_slot - 1;
|
||||
let working_bank = WorkingBank {
|
||||
bank,
|
||||
min_tick_height: 1,
|
||||
max_tick_height,
|
||||
};
|
||||
|
||||
poh_recorder.set_working_bank(working_bank);
|
||||
for _ in 0..max_tick_height {
|
||||
poh_recorder.tick();
|
||||
}
|
||||
|
||||
let tx = test_tx();
|
||||
let h1 = hash(b"hello world!");
|
||||
assert!(poh_recorder.record(h1, vec![tx.clone()]).is_err());
|
||||
assert!(poh_recorder.working_bank.is_none());
|
||||
// Make sure the starting slot is updated
|
||||
assert_eq!(poh_recorder.start_slot(), end_slot);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_reached_leader_tick() {
|
||||
let (genesis_block, _mint_keypair) = GenesisBlock::new(2);
|
||||
let bank = Arc::new(Bank::new(&genesis_block));
|
||||
let prev_hash = bank.last_blockhash();
|
||||
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
|
||||
0,
|
||||
prev_hash,
|
||||
0,
|
||||
None,
|
||||
bank.ticks_per_slot(),
|
||||
&Pubkey::default(),
|
||||
);
|
||||
|
||||
// Test that with no leader slot, we don't reach the leader tick
|
||||
assert_eq!(poh_recorder.reached_leader_tick().0, false);
|
||||
|
||||
for _ in 0..bank.ticks_per_slot() {
|
||||
poh_recorder.tick();
|
||||
}
|
||||
|
||||
// Tick should not be recorded
|
||||
assert_eq!(poh_recorder.tick_height(), 0);
|
||||
|
||||
// Test that with no leader slot, we don't reach the leader tick after sending some ticks
|
||||
assert_eq!(poh_recorder.reached_leader_tick().0, false);
|
||||
|
||||
poh_recorder.reset(
|
||||
poh_recorder.tick_height(),
|
||||
bank.last_blockhash(),
|
||||
0,
|
||||
None,
|
||||
bank.ticks_per_slot(),
|
||||
);
|
||||
|
||||
// Test that with no leader slot in reset(), we don't reach the leader tick
|
||||
assert_eq!(poh_recorder.reached_leader_tick().0, false);
|
||||
|
||||
// Provide a leader slot 1 slot down
|
||||
poh_recorder.reset(
|
||||
bank.ticks_per_slot(),
|
||||
bank.last_blockhash(),
|
||||
0,
|
||||
Some(2),
|
||||
bank.ticks_per_slot(),
|
||||
);
|
||||
|
||||
let init_ticks = poh_recorder.tick_height();
|
||||
|
||||
// Send one slot worth of ticks
|
||||
for _ in 0..bank.ticks_per_slot() {
|
||||
poh_recorder.tick();
|
||||
}
|
||||
|
||||
// Tick should be recorded
|
||||
assert_eq!(
|
||||
poh_recorder.tick_height(),
|
||||
init_ticks + bank.ticks_per_slot()
|
||||
);
|
||||
|
||||
// Test that we don't reach the leader tick because of grace ticks
|
||||
assert_eq!(poh_recorder.reached_leader_tick().0, false);
|
||||
|
||||
// reset poh now. it should discard the grace ticks wait
|
||||
poh_recorder.reset(
|
||||
poh_recorder.tick_height(),
|
||||
bank.last_blockhash(),
|
||||
1,
|
||||
Some(2),
|
||||
bank.ticks_per_slot(),
|
||||
);
|
||||
// without sending more ticks, we should be leader now
|
||||
assert_eq!(poh_recorder.reached_leader_tick().0, true);
|
||||
assert_eq!(poh_recorder.reached_leader_tick().1, 0);
|
||||
|
||||
// Now test that with grace ticks we can reach leader ticks
|
||||
// Set the leader slot 1 slot down
|
||||
poh_recorder.reset(
|
||||
poh_recorder.tick_height(),
|
||||
bank.last_blockhash(),
|
||||
2,
|
||||
Some(3),
|
||||
bank.ticks_per_slot(),
|
||||
);
|
||||
|
||||
// Send one slot worth of ticks
|
||||
for _ in 0..bank.ticks_per_slot() {
|
||||
poh_recorder.tick();
|
||||
}
|
||||
|
||||
// We are not the leader yet, as expected
|
||||
assert_eq!(poh_recorder.reached_leader_tick().0, false);
|
||||
|
||||
// Send 1 less tick than the grace ticks
|
||||
for _ in 0..bank.ticks_per_slot() / MAX_LAST_LEADER_GRACE_TICKS_FACTOR - 1 {
|
||||
poh_recorder.tick();
|
||||
}
|
||||
// We are still not the leader
|
||||
assert_eq!(poh_recorder.reached_leader_tick().0, false);
|
||||
|
||||
// Send one more tick
|
||||
poh_recorder.tick();
|
||||
|
||||
// We should be the leader now
|
||||
assert_eq!(poh_recorder.reached_leader_tick().0, true);
|
||||
assert_eq!(
|
||||
poh_recorder.reached_leader_tick().1,
|
||||
bank.ticks_per_slot() / MAX_LAST_LEADER_GRACE_TICKS_FACTOR
|
||||
);
|
||||
|
||||
// Let's test that correct grace ticks are reported
|
||||
// Set the leader slot 1 slot down
|
||||
poh_recorder.reset(
|
||||
poh_recorder.tick_height(),
|
||||
bank.last_blockhash(),
|
||||
3,
|
||||
Some(4),
|
||||
bank.ticks_per_slot(),
|
||||
);
|
||||
|
||||
// Send remaining ticks for the slot (remember we sent extra ticks in the previous part of the test)
|
||||
for _ in bank.ticks_per_slot() / MAX_LAST_LEADER_GRACE_TICKS_FACTOR..bank.ticks_per_slot() {
|
||||
poh_recorder.tick();
|
||||
}
|
||||
|
||||
// Send one extra tick before resetting (so that there's one grace tick)
|
||||
poh_recorder.tick();
|
||||
|
||||
// We are not the leader yet, as expected
|
||||
assert_eq!(poh_recorder.reached_leader_tick().0, false);
|
||||
poh_recorder.reset(
|
||||
poh_recorder.tick_height(),
|
||||
bank.last_blockhash(),
|
||||
3,
|
||||
Some(4),
|
||||
bank.ticks_per_slot(),
|
||||
);
|
||||
// without sending more ticks, we should be leader now
|
||||
assert_eq!(poh_recorder.reached_leader_tick().0, true);
|
||||
assert_eq!(poh_recorder.reached_leader_tick().1, 1);
|
||||
|
||||
// Let's test that if a node overshoots the ticks for its target
|
||||
// leader slot, reached_leader_tick() will return false
|
||||
// Set the leader slot 1 slot down
|
||||
poh_recorder.reset(
|
||||
poh_recorder.tick_height(),
|
||||
bank.last_blockhash(),
|
||||
4,
|
||||
Some(5),
|
||||
bank.ticks_per_slot(),
|
||||
);
|
||||
|
||||
// Send remaining ticks for the slot (remember we sent extra ticks in the previous part of the test)
|
||||
for _ in 0..4 * bank.ticks_per_slot() {
|
||||
poh_recorder.tick();
|
||||
}
|
||||
|
||||
// We are not the leader, as expected
|
||||
assert_eq!(poh_recorder.reached_leader_tick().0, false);
|
||||
}
|
||||
}
|
||||
|
@ -104,13 +104,21 @@ mod tests {
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::genesis_block::GenesisBlock;
|
||||
use solana_sdk::hash::hash;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
|
||||
#[test]
|
||||
fn test_poh_service() {
|
||||
let (genesis_block, _mint_keypair) = GenesisBlock::new(2);
|
||||
let bank = Arc::new(Bank::new(&genesis_block));
|
||||
let prev_hash = bank.last_blockhash();
|
||||
let (poh_recorder, entry_receiver) = PohRecorder::new(bank.tick_height(), prev_hash);
|
||||
let (poh_recorder, entry_receiver) = PohRecorder::new(
|
||||
bank.tick_height(),
|
||||
prev_hash,
|
||||
bank.slot(),
|
||||
Some(4),
|
||||
bank.ticks_per_slot(),
|
||||
&Pubkey::default(),
|
||||
);
|
||||
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let working_bank = WorkingBank {
|
||||
|
@ -6,21 +6,23 @@ use crate::blocktree_processor;
|
||||
use crate::cluster_info::ClusterInfo;
|
||||
use crate::entry::{Entry, EntryReceiver, EntrySender, EntrySlice};
|
||||
use crate::leader_schedule_utils;
|
||||
use crate::locktower::Locktower;
|
||||
use crate::packet::BlobError;
|
||||
use crate::poh_recorder::PohRecorder;
|
||||
use crate::result;
|
||||
use crate::rpc_subscriptions::RpcSubscriptions;
|
||||
use crate::service::Service;
|
||||
use solana_metrics::counter::Counter;
|
||||
use solana_metrics::influxdb;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::signature::KeypairUtil;
|
||||
use solana_sdk::timing::duration_as_ms;
|
||||
use solana_sdk::timing::{self, duration_as_ms};
|
||||
use solana_vote_api::vote_transaction::VoteTransaction;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::{channel, Receiver, RecvTimeoutError};
|
||||
use std::sync::mpsc::{channel, Receiver, RecvTimeoutError, Sender};
|
||||
use std::sync::{Arc, Mutex, RwLock};
|
||||
use std::thread::{self, Builder, JoinHandle};
|
||||
use std::time::Duration;
|
||||
@ -76,6 +78,8 @@ impl ReplayStage {
|
||||
let poh_recorder = poh_recorder.clone();
|
||||
let my_id = *my_id;
|
||||
let vote_account = *vote_account;
|
||||
let mut ticks_per_slot = 0;
|
||||
let mut locktower = Locktower::new_from_forks(&bank_forks.read().unwrap());
|
||||
|
||||
// Start the replay stage loop
|
||||
let t_replay = Builder::new()
|
||||
@ -92,10 +96,10 @@ impl ReplayStage {
|
||||
Self::generate_new_bank_forks(&blocktree, &mut bank_forks.write().unwrap());
|
||||
let active_banks = bank_forks.read().unwrap().active_banks();
|
||||
trace!("active banks {:?}", active_banks);
|
||||
let mut votable: Vec<u64> = vec![];
|
||||
let mut is_tpu_bank_active = poh_recorder.lock().unwrap().bank().is_some();
|
||||
for bank_slot in &active_banks {
|
||||
let bank = bank_forks.read().unwrap().get(*bank_slot).unwrap().clone();
|
||||
ticks_per_slot = bank.ticks_per_slot();
|
||||
if bank.collector_id() != my_id {
|
||||
Self::replay_blocktree_into_bank(
|
||||
&bank,
|
||||
@ -106,51 +110,142 @@ impl ReplayStage {
|
||||
}
|
||||
let max_tick_height = (*bank_slot + 1) * bank.ticks_per_slot() - 1;
|
||||
if bank.tick_height() == max_tick_height {
|
||||
bank.freeze();
|
||||
info!("bank frozen {}", bank.slot());
|
||||
votable.push(*bank_slot);
|
||||
progress.remove(bank_slot);
|
||||
if let Err(e) =
|
||||
slot_full_sender.send((bank.slot(), bank.collector_id()))
|
||||
{
|
||||
info!("{} slot_full alert failed: {:?}", my_id, e);
|
||||
}
|
||||
Self::process_completed_bank(
|
||||
&my_id,
|
||||
bank,
|
||||
&mut progress,
|
||||
&slot_full_sender,
|
||||
);
|
||||
}
|
||||
}
|
||||
// TODO: fork selection
|
||||
// vote on the latest one for now
|
||||
votable.sort();
|
||||
|
||||
if let Some(latest_slot_vote) = votable.last() {
|
||||
let parent = bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(*latest_slot_vote)
|
||||
.unwrap()
|
||||
.clone();
|
||||
if ticks_per_slot == 0 {
|
||||
let frozen_banks = bank_forks.read().unwrap().frozen_banks();
|
||||
let bank = frozen_banks.values().next().unwrap();
|
||||
ticks_per_slot = bank.ticks_per_slot();
|
||||
}
|
||||
|
||||
subscriptions.notify_subscribers(&parent);
|
||||
let locktower_start = Instant::now();
|
||||
// Locktower voting
|
||||
let descendants = bank_forks.read().unwrap().descendants();
|
||||
let ancestors = bank_forks.read().unwrap().ancestors();
|
||||
let frozen_banks = bank_forks.read().unwrap().frozen_banks();
|
||||
|
||||
trace!("frozen_banks {}", frozen_banks.len());
|
||||
let mut votable: Vec<(u128, Arc<Bank>)> = frozen_banks
|
||||
.values()
|
||||
.filter(|b| {
|
||||
let is_votable = b.is_votable();
|
||||
trace!("bank is votable: {} {}", b.slot(), is_votable);
|
||||
is_votable
|
||||
})
|
||||
.filter(|b| {
|
||||
let has_voted = locktower.has_voted(b.slot());
|
||||
trace!("bank is has_voted: {} {}", b.slot(), has_voted);
|
||||
!has_voted
|
||||
})
|
||||
.filter(|b| {
|
||||
let is_locked_out = locktower.is_locked_out(b.slot(), &descendants);
|
||||
trace!("bank is is_locked_out: {} {}", b.slot(), is_locked_out);
|
||||
!is_locked_out
|
||||
})
|
||||
.map(|bank| {
|
||||
(
|
||||
bank,
|
||||
locktower.collect_vote_lockouts(
|
||||
bank.slot(),
|
||||
bank.vote_accounts(),
|
||||
&ancestors,
|
||||
),
|
||||
)
|
||||
})
|
||||
.filter(|(b, stake_lockouts)| {
|
||||
let vote_threshold =
|
||||
locktower.check_vote_stake_threshold(b.slot(), &stake_lockouts);
|
||||
trace!("bank vote_threshold: {} {}", b.slot(), vote_threshold);
|
||||
vote_threshold
|
||||
})
|
||||
.map(|(b, stake_lockouts)| {
|
||||
(locktower.calculate_weight(&stake_lockouts), b.clone())
|
||||
})
|
||||
.collect();
|
||||
|
||||
votable.sort_by_key(|b| b.0);
|
||||
trace!("votable_banks {}", votable.len());
|
||||
let ms = timing::duration_as_ms(&locktower_start.elapsed());
|
||||
if !votable.is_empty() {
|
||||
let weights: Vec<u128> = votable.iter().map(|x| x.0).collect();
|
||||
info!(
|
||||
"@{:?} locktower duration: {:?} len: {} weights: {:?}",
|
||||
timing::timestamp(),
|
||||
ms,
|
||||
votable.len(),
|
||||
weights
|
||||
);
|
||||
}
|
||||
inc_new_counter_info!("replay_stage-locktower_duration", ms as usize);
|
||||
|
||||
if let Some((_, bank)) = votable.last() {
|
||||
subscriptions.notify_subscribers(&bank);
|
||||
|
||||
if let Some(ref voting_keypair) = voting_keypair {
|
||||
let keypair = voting_keypair.as_ref();
|
||||
let vote = VoteTransaction::new_vote(
|
||||
&vote_account,
|
||||
keypair,
|
||||
*latest_slot_vote,
|
||||
parent.last_blockhash(),
|
||||
bank.slot(),
|
||||
bank.last_blockhash(),
|
||||
0,
|
||||
);
|
||||
if let Some(new_root) = locktower.record_vote(bank.slot()) {
|
||||
bank_forks.write().unwrap().set_root(new_root);
|
||||
Self::handle_new_root(&bank_forks, &mut progress);
|
||||
}
|
||||
locktower.update_epoch(&bank);
|
||||
cluster_info.write().unwrap().push_vote(vote);
|
||||
}
|
||||
poh_recorder
|
||||
.lock()
|
||||
.unwrap()
|
||||
.reset(parent.tick_height(), parent.last_blockhash());
|
||||
let next_leader_slot =
|
||||
leader_schedule_utils::next_leader_slot(&my_id, bank.slot(), &bank);
|
||||
poh_recorder.lock().unwrap().reset(
|
||||
bank.tick_height(),
|
||||
bank.last_blockhash(),
|
||||
bank.slot(),
|
||||
next_leader_slot,
|
||||
ticks_per_slot,
|
||||
);
|
||||
debug!(
|
||||
"{:?} voted and reset poh at {}. next leader slot {:?}",
|
||||
my_id,
|
||||
bank.tick_height(),
|
||||
next_leader_slot
|
||||
);
|
||||
is_tpu_bank_active = false;
|
||||
}
|
||||
|
||||
let (reached_leader_tick, grace_ticks) = if !is_tpu_bank_active {
|
||||
let poh = poh_recorder.lock().unwrap();
|
||||
poh.reached_leader_tick()
|
||||
} else {
|
||||
(false, 0)
|
||||
};
|
||||
|
||||
if !is_tpu_bank_active {
|
||||
Self::start_leader(&my_id, &bank_forks, &poh_recorder, &cluster_info);
|
||||
assert!(ticks_per_slot > 0);
|
||||
let poh_tick_height = poh_recorder.lock().unwrap().tick_height();
|
||||
let poh_slot = leader_schedule_utils::tick_height_to_slot(
|
||||
ticks_per_slot,
|
||||
poh_tick_height + 1,
|
||||
);
|
||||
Self::start_leader(
|
||||
&my_id,
|
||||
&bank_forks,
|
||||
&poh_recorder,
|
||||
&cluster_info,
|
||||
&blocktree,
|
||||
poh_slot,
|
||||
reached_leader_tick,
|
||||
grace_ticks,
|
||||
);
|
||||
}
|
||||
|
||||
inc_new_counter_info!(
|
||||
@ -179,54 +274,82 @@ impl ReplayStage {
|
||||
bank_forks: &Arc<RwLock<BankForks>>,
|
||||
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
blocktree: &Blocktree,
|
||||
poh_slot: u64,
|
||||
reached_leader_tick: bool,
|
||||
grace_ticks: u64,
|
||||
) {
|
||||
let frozen = bank_forks.read().unwrap().frozen_banks();
|
||||
trace!("{} checking poh slot {}", my_id, poh_slot);
|
||||
if blocktree.meta(poh_slot).unwrap().is_some() {
|
||||
// We've already broadcasted entries for this slot, skip it
|
||||
|
||||
// TODO: fork selection
|
||||
let mut newest_frozen: Vec<(&u64, &Arc<Bank>)> = frozen.iter().collect();
|
||||
newest_frozen.sort_by_key(|x| *x.0);
|
||||
if let Some((_, parent)) = newest_frozen.last() {
|
||||
let poh_tick_height = poh_recorder.lock().unwrap().tick_height();
|
||||
let poh_slot = leader_schedule_utils::tick_height_to_slot(parent, poh_tick_height + 1);
|
||||
trace!("checking poh slot for leader {}", poh_slot);
|
||||
if frozen.get(&poh_slot).is_some() {
|
||||
// Already been a leader for this slot, skip it
|
||||
return;
|
||||
// Since we are skipping our leader slot, let's tell poh recorder when we should be
|
||||
// leader again
|
||||
if reached_leader_tick {
|
||||
let _ = bank_forks.read().unwrap().get(poh_slot).map(|bank| {
|
||||
let next_leader_slot =
|
||||
leader_schedule_utils::next_leader_slot(&my_id, bank.slot(), &bank);
|
||||
let mut poh = poh_recorder.lock().unwrap();
|
||||
let start_slot = poh.start_slot();
|
||||
poh.reset(
|
||||
bank.tick_height(),
|
||||
bank.last_blockhash(),
|
||||
start_slot,
|
||||
next_leader_slot,
|
||||
bank.ticks_per_slot(),
|
||||
);
|
||||
});
|
||||
}
|
||||
if bank_forks.read().unwrap().get(poh_slot).is_none() {
|
||||
leader_schedule_utils::slot_leader_at(poh_slot, parent)
|
||||
.map(|next_leader| {
|
||||
debug!(
|
||||
"me: {} leader {} at poh slot {}",
|
||||
my_id, next_leader, poh_slot
|
||||
);
|
||||
cluster_info.write().unwrap().set_leader(&next_leader);
|
||||
if next_leader == *my_id {
|
||||
debug!("starting tpu for slot {}", poh_slot);
|
||||
let tpu_bank = Bank::new_from_parent(parent, my_id, poh_slot);
|
||||
bank_forks.write().unwrap().insert(poh_slot, tpu_bank);
|
||||
if let Some(tpu_bank) = bank_forks.read().unwrap().get(poh_slot).cloned() {
|
||||
assert_eq!(
|
||||
bank_forks.read().unwrap().working_bank().slot(),
|
||||
tpu_bank.slot()
|
||||
);
|
||||
debug!(
|
||||
"poh_recorder new working bank: me: {} next_slot: {} next_leader: {}",
|
||||
my_id,
|
||||
tpu_bank.slot(),
|
||||
next_leader
|
||||
);
|
||||
poh_recorder.lock().unwrap().set_bank(&tpu_bank);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
if bank_forks.read().unwrap().get(poh_slot).is_none() {
|
||||
let frozen = bank_forks.read().unwrap().frozen_banks();
|
||||
let parent_slot = poh_recorder.lock().unwrap().start_slot();
|
||||
assert!(frozen.contains_key(&parent_slot));
|
||||
let parent = &frozen[&parent_slot];
|
||||
|
||||
leader_schedule_utils::slot_leader_at(poh_slot, parent)
|
||||
.map(|next_leader| {
|
||||
debug!(
|
||||
"me: {} leader {} at poh slot {}",
|
||||
my_id, next_leader, poh_slot
|
||||
);
|
||||
cluster_info.write().unwrap().set_leader(&next_leader);
|
||||
if next_leader == *my_id && reached_leader_tick {
|
||||
debug!("{} starting tpu for slot {}", my_id, poh_slot);
|
||||
solana_metrics::submit(
|
||||
influxdb::Point::new("counter-replay_stage-new_leader")
|
||||
.add_field(
|
||||
"count",
|
||||
influxdb::Value::Integer(poh_slot as i64),
|
||||
)
|
||||
.add_field(
|
||||
"grace",
|
||||
influxdb::Value::Integer(grace_ticks as i64),
|
||||
)
|
||||
.to_owned(),);
|
||||
let tpu_bank = Bank::new_from_parent(parent, my_id, poh_slot);
|
||||
bank_forks.write().unwrap().insert(tpu_bank);
|
||||
if let Some(tpu_bank) = bank_forks.read().unwrap().get(poh_slot).cloned() {
|
||||
assert_eq!(
|
||||
bank_forks.read().unwrap().working_bank().slot(),
|
||||
tpu_bank.slot()
|
||||
);
|
||||
debug!(
|
||||
"poh_recorder new working bank: me: {} next_slot: {} next_leader: {}",
|
||||
my_id,
|
||||
tpu_bank.slot(),
|
||||
next_leader
|
||||
);
|
||||
poh_recorder.lock().unwrap().set_bank(&tpu_bank);
|
||||
}
|
||||
})
|
||||
.or_else(|| {
|
||||
error!("No next leader found");
|
||||
None
|
||||
});
|
||||
}
|
||||
} else {
|
||||
error!("No frozen banks available!");
|
||||
}
|
||||
})
|
||||
.or_else(|| {
|
||||
error!("{} No next leader found", my_id);
|
||||
None
|
||||
});
|
||||
}
|
||||
}
|
||||
pub fn replay_blocktree_into_bank(
|
||||
@ -303,6 +426,28 @@ impl ReplayStage {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn handle_new_root(
|
||||
bank_forks: &Arc<RwLock<BankForks>>,
|
||||
progress: &mut HashMap<u64, (Hash, usize)>,
|
||||
) {
|
||||
let r_bank_forks = bank_forks.read().unwrap();
|
||||
progress.retain(|k, _| r_bank_forks.get(*k).is_some());
|
||||
}
|
||||
|
||||
fn process_completed_bank(
|
||||
my_id: &Pubkey,
|
||||
bank: Arc<Bank>,
|
||||
progress: &mut HashMap<u64, (Hash, usize)>,
|
||||
slot_full_sender: &Sender<(u64, Pubkey)>,
|
||||
) {
|
||||
bank.freeze();
|
||||
info!("bank frozen {}", bank.slot());
|
||||
progress.remove(&bank.slot());
|
||||
if let Err(e) = slot_full_sender.send((bank.slot(), bank.collector_id())) {
|
||||
trace!("{} slot_full alert failed: {:?}", my_id, e);
|
||||
}
|
||||
}
|
||||
|
||||
fn generate_new_bank_forks(blocktree: &Blocktree, forks: &mut BankForks) {
|
||||
// Find the next slot that chains to the old slot
|
||||
let frozen_banks = forks.frozen_banks();
|
||||
@ -311,6 +456,7 @@ impl ReplayStage {
|
||||
let next_slots = blocktree
|
||||
.get_slots_since(&frozen_bank_slots)
|
||||
.expect("Db error");
|
||||
// Filter out what we've already seen
|
||||
trace!("generate new forks {:?}", next_slots);
|
||||
for (parent_id, children) in next_slots {
|
||||
let parent_bank = frozen_banks
|
||||
@ -318,20 +464,13 @@ impl ReplayStage {
|
||||
.expect("missing parent in bank forks")
|
||||
.clone();
|
||||
for child_id in children {
|
||||
if frozen_banks.get(&child_id).is_some() {
|
||||
trace!("child already frozen {}", child_id);
|
||||
continue;
|
||||
}
|
||||
if forks.get(child_id).is_some() {
|
||||
trace!("child already active {}", child_id);
|
||||
trace!("child already active or frozen {}", child_id);
|
||||
continue;
|
||||
}
|
||||
let leader = leader_schedule_utils::slot_leader_at(child_id, &parent_bank).unwrap();
|
||||
info!("new fork:{} parent:{}", child_id, parent_id);
|
||||
forks.insert(
|
||||
child_id,
|
||||
Bank::new_from_parent(&parent_bank, &leader, child_id),
|
||||
);
|
||||
forks.insert(Bank::new_from_parent(&parent_bank, &leader, child_id));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -349,11 +488,12 @@ impl Service for ReplayStage {
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::banking_stage::create_test_recorder;
|
||||
use crate::blocktree::create_new_tmp_ledger;
|
||||
use crate::blocktree::{create_new_tmp_ledger, get_tmp_ledger_path};
|
||||
use crate::cluster_info::{ClusterInfo, Node};
|
||||
use crate::entry::create_ticks;
|
||||
use crate::entry::{next_entry_mut, Entry};
|
||||
use crate::fullnode::new_banks_from_blocktree;
|
||||
use crate::packet::Blob;
|
||||
use crate::replay_stage::ReplayStage;
|
||||
use crate::result::Error;
|
||||
use solana_sdk::genesis_block::GenesisBlock;
|
||||
@ -486,4 +626,50 @@ mod test {
|
||||
}
|
||||
assert!(forward_entry_receiver.try_recv().is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_child_slots_of_same_parent() {
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blocktree = Arc::new(
|
||||
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"),
|
||||
);
|
||||
|
||||
let genesis_block = GenesisBlock::new(10_000).0;
|
||||
let bank0 = Bank::new(&genesis_block);
|
||||
let mut bank_forks = BankForks::new(0, bank0);
|
||||
bank_forks.working_bank().freeze();
|
||||
|
||||
// Insert blob for slot 1, generate new forks, check result
|
||||
let mut blob_slot_1 = Blob::default();
|
||||
blob_slot_1.set_slot(1);
|
||||
blob_slot_1.set_parent(0);
|
||||
blocktree.insert_data_blobs(&vec![blob_slot_1]).unwrap();
|
||||
assert!(bank_forks.get(1).is_none());
|
||||
ReplayStage::generate_new_bank_forks(&blocktree, &mut bank_forks);
|
||||
assert!(bank_forks.get(1).is_some());
|
||||
|
||||
// Insert blob for slot 3, generate new forks, check result
|
||||
let mut blob_slot_2 = Blob::default();
|
||||
blob_slot_2.set_slot(2);
|
||||
blob_slot_2.set_parent(0);
|
||||
blocktree.insert_data_blobs(&vec![blob_slot_2]).unwrap();
|
||||
assert!(bank_forks.get(2).is_none());
|
||||
ReplayStage::generate_new_bank_forks(&blocktree, &mut bank_forks);
|
||||
assert!(bank_forks.get(1).is_some());
|
||||
assert!(bank_forks.get(2).is_some());
|
||||
}
|
||||
|
||||
let _ignored = remove_dir_all(&ledger_path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_handle_new_root() {
|
||||
let bank0 = Bank::default();
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank0)));
|
||||
let mut progress = HashMap::new();
|
||||
progress.insert(5, (Hash::default(), 0));
|
||||
ReplayStage::handle_new_root(&bank_forks, &mut progress);
|
||||
assert!(progress.is_empty());
|
||||
}
|
||||
}
|
||||
|
@ -3,19 +3,19 @@ use crate::blocktree::Blocktree;
|
||||
use crate::blocktree_processor;
|
||||
#[cfg(feature = "chacha")]
|
||||
use crate::chacha::{chacha_cbc_encrypt_ledger, CHACHA_BLOCK_SIZE};
|
||||
use crate::client::mk_client;
|
||||
use crate::cluster_info::{ClusterInfo, Node};
|
||||
use crate::cluster_info::{ClusterInfo, Node, FULLNODE_PORT_RANGE};
|
||||
use crate::contact_info::ContactInfo;
|
||||
use crate::gossip_service::GossipService;
|
||||
use crate::result::Result;
|
||||
use crate::rpc_request::{RpcClient, RpcRequest, RpcRequestHandler};
|
||||
use crate::service::Service;
|
||||
use crate::storage_stage::{get_segment_from_entry, ENTRIES_PER_SEGMENT};
|
||||
use crate::streamer::BlobReceiver;
|
||||
use crate::thin_client::{retry_get_balance, ThinClient};
|
||||
use crate::window_service::WindowService;
|
||||
use rand::thread_rng;
|
||||
use rand::Rng;
|
||||
use solana_client::client::create_client;
|
||||
use solana_client::rpc_request::{RpcClient, RpcRequest, RpcRequestHandler};
|
||||
use solana_client::thin_client::{retry_get_balance, ThinClient};
|
||||
use solana_drone::drone::{request_airdrop_transaction, DRONE_PORT};
|
||||
use solana_sdk::genesis_block::GenesisBlock;
|
||||
use solana_sdk::hash::{Hash, Hasher};
|
||||
@ -205,7 +205,7 @@ impl Replicator {
|
||||
cluster_info_w.insert_self(contact_info);
|
||||
}
|
||||
|
||||
let mut client = mk_client(leader_info);
|
||||
let mut client = create_client(leader_info.client_facing_addr(), FULLNODE_PORT_RANGE);
|
||||
|
||||
Self::get_airdrop_lamports(&mut client, &keypair, &leader_info);
|
||||
info!("Done downloading ledger at {}", ledger_path);
|
||||
|
@ -513,7 +513,7 @@ mod tests {
|
||||
"result":{
|
||||
"owner": [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
|
||||
"lamports": 20,
|
||||
"userdata": [],
|
||||
"data": [],
|
||||
"executable": false
|
||||
},
|
||||
"id":1}
|
||||
|
@ -17,7 +17,7 @@ use std::sync::{atomic, Arc};
|
||||
pub trait RpcSolPubSub {
|
||||
type Metadata;
|
||||
|
||||
// Get notification every time account userdata is changed
|
||||
// Get notification every time account data is changed
|
||||
// Accepts pubkey parameter as base-58 encoded string
|
||||
#[pubsub(
|
||||
subscription = "accountNotification",
|
||||
@ -34,7 +34,7 @@ pub trait RpcSolPubSub {
|
||||
)]
|
||||
fn account_unsubscribe(&self, _: Option<Self::Metadata>, _: SubscriptionId) -> Result<bool>;
|
||||
|
||||
// Get notification every time account userdata owned by a particular program is changed
|
||||
// Get notification every time account data owned by a particular program is changed
|
||||
// Accepts pubkey parameter as base-58 encoded string
|
||||
#[pubsub(
|
||||
subscription = "programNotification",
|
||||
@ -365,10 +365,7 @@ mod tests {
|
||||
|
||||
// Test signature confirmation notification #1
|
||||
let string = receiver.poll();
|
||||
let expected_userdata = arc_bank
|
||||
.get_account(&contract_state.pubkey())
|
||||
.unwrap()
|
||||
.userdata;
|
||||
let expected_data = arc_bank.get_account(&contract_state.pubkey()).unwrap().data;
|
||||
let expected = json!({
|
||||
"jsonrpc": "2.0",
|
||||
"method": "accountNotification",
|
||||
@ -376,7 +373,7 @@ mod tests {
|
||||
"result": {
|
||||
"owner": budget_program_id,
|
||||
"lamports": 51,
|
||||
"userdata": expected_userdata,
|
||||
"data": expected_data,
|
||||
"executable": executable,
|
||||
},
|
||||
"subscription": 0,
|
||||
|
@ -236,7 +236,7 @@ mod tests {
|
||||
subscriptions.check_account(&alice.pubkey(), &account);
|
||||
let string = transport_receiver.poll();
|
||||
if let Async::Ready(Some(response)) = string.unwrap() {
|
||||
let expected = format!(r#"{{"jsonrpc":"2.0","method":"accountNotification","params":{{"result":{{"executable":false,"lamports":1,"owner":[129,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"userdata":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]}},"subscription":0}}}}"#);
|
||||
let expected = format!(r#"{{"jsonrpc":"2.0","method":"accountNotification","params":{{"result":{{"data":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"executable":false,"lamports":1,"owner":[129,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]}},"subscription":0}}}}"#);
|
||||
assert_eq!(expected, response);
|
||||
}
|
||||
|
||||
@ -282,7 +282,7 @@ mod tests {
|
||||
subscriptions.check_program(&solana_budget_api::id(), &alice.pubkey(), &account);
|
||||
let string = transport_receiver.poll();
|
||||
if let Async::Ready(Some(response)) = string.unwrap() {
|
||||
let expected = format!(r#"{{"jsonrpc":"2.0","method":"programNotification","params":{{"result":["{:?}",{{"executable":false,"lamports":1,"owner":[129,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"userdata":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]}}],"subscription":0}}}}"#, alice.pubkey());
|
||||
let expected = format!(r#"{{"jsonrpc":"2.0","method":"programNotification","params":{{"result":["{:?}",{{"data":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"executable":false,"lamports":1,"owner":[129,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]}}],"subscription":0}}}}"#, alice.pubkey());
|
||||
assert_eq!(expected, response);
|
||||
}
|
||||
|
||||
|
@ -395,10 +395,10 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_system_transaction_userdata_layout() {
|
||||
fn test_system_transaction_data_layout() {
|
||||
use crate::packet::PACKET_DATA_SIZE;
|
||||
let mut tx0 = test_tx();
|
||||
tx0.instructions[0].userdata = vec![1, 2, 3];
|
||||
tx0.instructions[0].data = vec![1, 2, 3];
|
||||
let message0a = tx0.message();
|
||||
let tx_bytes = serialize(&tx0).unwrap();
|
||||
assert!(tx_bytes.len() < PACKET_DATA_SIZE);
|
||||
@ -408,9 +408,9 @@ mod tests {
|
||||
);
|
||||
let tx1 = deserialize(&tx_bytes).unwrap();
|
||||
assert_eq!(tx0, tx1);
|
||||
assert_eq!(tx1.instructions[0].userdata, vec![1, 2, 3]);
|
||||
assert_eq!(tx1.instructions[0].data, vec![1, 2, 3]);
|
||||
|
||||
tx0.instructions[0].userdata = vec![1, 2, 4];
|
||||
tx0.instructions[0].data = vec![1, 2, 4];
|
||||
let message0b = tx0.message();
|
||||
assert_ne!(message0a, message0b);
|
||||
}
|
||||
|
@ -58,17 +58,26 @@ fn node_staked_accounts(bank: &Bank) -> impl Iterator<Item = (Pubkey, u64, Accou
|
||||
})
|
||||
}
|
||||
|
||||
fn node_staked_accounts_at_epoch(
|
||||
pub fn node_staked_accounts_at_epoch(
|
||||
bank: &Bank,
|
||||
epoch_height: u64,
|
||||
) -> Option<impl Iterator<Item = (&Pubkey, u64, &Account)>> {
|
||||
bank.epoch_vote_accounts(epoch_height).map(|epoch_state| {
|
||||
epoch_state.into_iter().filter_map(|(account_id, account)| {
|
||||
filter_zero_balances(account).map(|stake| (account_id, stake, account))
|
||||
})
|
||||
epoch_state
|
||||
.into_iter()
|
||||
.filter_map(|(account_id, account)| {
|
||||
filter_zero_balances(account).map(|stake| (account_id, stake, account))
|
||||
})
|
||||
.filter(|(account_id, _, account)| filter_no_delegate(account_id, account))
|
||||
})
|
||||
}
|
||||
|
||||
fn filter_no_delegate(account_id: &Pubkey, account: &Account) -> bool {
|
||||
VoteState::deserialize(&account.data)
|
||||
.map(|vote_state| vote_state.delegate_id != *account_id)
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
fn filter_zero_balances(account: &Account) -> Option<u64> {
|
||||
let balance = Bank::read_balance(&account);
|
||||
if balance > 0 {
|
||||
@ -82,7 +91,7 @@ fn to_vote_state(
|
||||
node_staked_accounts: impl Iterator<Item = (impl Borrow<Pubkey>, u64, impl Borrow<Account>)>,
|
||||
) -> impl Iterator<Item = (u64, VoteState)> {
|
||||
node_staked_accounts.filter_map(|(_, stake, account)| {
|
||||
VoteState::deserialize(&account.borrow().userdata)
|
||||
VoteState::deserialize(&account.borrow().data)
|
||||
.ok()
|
||||
.map(|vote_state| (stake, vote_state))
|
||||
})
|
||||
@ -189,7 +198,13 @@ mod tests {
|
||||
|
||||
// Make a mint vote account. Because the mint has nonzero stake, this
|
||||
// should show up in the active set
|
||||
voting_keypair_tests::new_vote_account_with_vote(&mint_keypair, &bank_voter, &bank, 499, 0);
|
||||
voting_keypair_tests::new_vote_account_with_delegate(
|
||||
&mint_keypair,
|
||||
&bank_voter,
|
||||
&mint_keypair.pubkey(),
|
||||
&bank,
|
||||
499,
|
||||
);
|
||||
|
||||
// soonest slot that could be a new epoch is 1
|
||||
let mut slot = 1;
|
||||
|
@ -5,14 +5,14 @@
|
||||
use crate::blocktree::Blocktree;
|
||||
#[cfg(all(feature = "chacha", feature = "cuda"))]
|
||||
use crate::chacha_cuda::chacha_cbc_encrypt_file_many_keys;
|
||||
use crate::client::mk_client_with_timeout;
|
||||
use crate::cluster_info::ClusterInfo;
|
||||
use crate::cluster_info::{ClusterInfo, FULLNODE_PORT_RANGE};
|
||||
use crate::entry::{Entry, EntryReceiver};
|
||||
use crate::result::{Error, Result};
|
||||
use crate::service::Service;
|
||||
use bincode::deserialize;
|
||||
use rand::{Rng, SeedableRng};
|
||||
use rand_chacha::ChaChaRng;
|
||||
use solana_client::client::create_client_with_timeout;
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::signature::{Keypair, Signature};
|
||||
@ -228,10 +228,14 @@ impl StorageStage {
|
||||
account_to_create: Option<Pubkey>,
|
||||
) -> io::Result<()> {
|
||||
let contact_info = cluster_info.read().unwrap().my_data();
|
||||
let mut client = mk_client_with_timeout(&contact_info, Duration::from_secs(5));
|
||||
let mut client = create_client_with_timeout(
|
||||
contact_info.client_facing_addr(),
|
||||
FULLNODE_PORT_RANGE,
|
||||
Duration::from_secs(5),
|
||||
);
|
||||
|
||||
if let Some(account) = account_to_create {
|
||||
if client.get_account_userdata(&account).is_ok() {
|
||||
if client.get_account_data(&account).is_ok() {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
@ -379,7 +383,7 @@ impl StorageStage {
|
||||
*current_key_idx += size_of::<Signature>();
|
||||
*current_key_idx %= storage_keys.len();
|
||||
} else if solana_storage_api::check_id(&program_id) {
|
||||
match deserialize(&tx.instructions[i].userdata) {
|
||||
match deserialize(&tx.instructions[i].data) {
|
||||
Ok(StorageProgram::SubmitMiningProof {
|
||||
entry_height: proof_entry_height,
|
||||
..
|
||||
|
@ -1,7 +1,7 @@
|
||||
//! The `vote_signer_proxy` votes on the `blockhash` of the bank at a regular cadence
|
||||
|
||||
use crate::rpc_request::{RpcClient, RpcRequest};
|
||||
use jsonrpc_core;
|
||||
use solana_client::rpc_request::{RpcClient, RpcRequest};
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::signature::{Keypair, KeypairUtil, Signature};
|
||||
use solana_vote_signer::rpc::LocalVoteSigner;
|
||||
@ -120,6 +120,25 @@ pub mod tests {
|
||||
bank.process_transaction(&tx).unwrap();
|
||||
}
|
||||
|
||||
pub fn new_vote_account_with_delegate(
|
||||
from_keypair: &Keypair,
|
||||
voting_keypair: &Keypair,
|
||||
delegate: &Pubkey,
|
||||
bank: &Bank,
|
||||
lamports: u64,
|
||||
) {
|
||||
let blockhash = bank.last_blockhash();
|
||||
let tx = VoteTransaction::new_account_with_delegate(
|
||||
from_keypair,
|
||||
voting_keypair,
|
||||
delegate,
|
||||
blockhash,
|
||||
lamports,
|
||||
0,
|
||||
);
|
||||
bank.process_transaction(&tx).unwrap();
|
||||
}
|
||||
|
||||
pub fn push_vote<T: KeypairUtil>(voting_keypair: &T, bank: &Bank, slot: u64) {
|
||||
let blockhash = bank.last_blockhash();
|
||||
let tx =
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-drone"
|
||||
version = "0.12.0"
|
||||
version = "0.12.2"
|
||||
description = "Solana Drone"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -19,9 +19,9 @@ clap = "2.31"
|
||||
log = "0.4.2"
|
||||
serde = "1.0.89"
|
||||
serde_derive = "1.0.89"
|
||||
solana-logger = { path = "../logger", version = "0.12.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.12.0" }
|
||||
solana-metrics = { path = "../metrics", version = "0.12.0" }
|
||||
solana-logger = { path = "../logger", version = "0.12.2" }
|
||||
solana-sdk = { path = "../sdk", version = "0.12.2" }
|
||||
solana-metrics = { path = "../metrics", version = "0.12.2" }
|
||||
tokio = "0.1"
|
||||
tokio-codec = "0.1"
|
||||
|
||||
|
@ -372,7 +372,7 @@ mod tests {
|
||||
assert_eq!(tx.program_ids, vec![system_program::id()]);
|
||||
|
||||
assert_eq!(tx.instructions.len(), 1);
|
||||
let instruction: SystemInstruction = deserialize(&tx.instructions[0].userdata).unwrap();
|
||||
let instruction: SystemInstruction = deserialize(&tx.instructions[0].data).unwrap();
|
||||
assert_eq!(
|
||||
instruction,
|
||||
SystemInstruction::CreateAccount {
|
||||
|
@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-fullnode"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "0.12.0"
|
||||
version = "0.12.2"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -12,15 +12,15 @@ homepage = "https://solana.com/"
|
||||
clap = "2.32.0"
|
||||
log = "0.4.2"
|
||||
serde_json = "1.0.39"
|
||||
solana = { path = "../core", version = "0.12.0" }
|
||||
solana-drone = { path = "../drone", version = "0.12.0" }
|
||||
solana-logger = { path = "../logger", version = "0.12.0" }
|
||||
solana-netutil = { path = "../netutil", version = "0.12.0" }
|
||||
solana-metrics = { path = "../metrics", version = "0.12.0" }
|
||||
solana-runtime = { path = "../runtime", version = "0.12.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.12.0" }
|
||||
solana-vote-api = { path = "../programs/vote_api", version = "0.12.0" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "0.12.0" }
|
||||
solana = { path = "../core", version = "0.12.2" }
|
||||
solana-drone = { path = "../drone", version = "0.12.2" }
|
||||
solana-logger = { path = "../logger", version = "0.12.2" }
|
||||
solana-netutil = { path = "../netutil", version = "0.12.2" }
|
||||
solana-metrics = { path = "../metrics", version = "0.12.2" }
|
||||
solana-runtime = { path = "../runtime", version = "0.12.2" }
|
||||
solana-sdk = { path = "../sdk", version = "0.12.2" }
|
||||
solana-vote-api = { path = "../programs/vote_api", version = "0.12.2" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "0.12.2" }
|
||||
|
||||
[features]
|
||||
chacha = ["solana/chacha"]
|
||||
|
@ -5,7 +5,6 @@ use solana::contact_info::ContactInfo;
|
||||
use solana::fullnode::{Fullnode, FullnodeConfig};
|
||||
use solana::local_vote_signer_service::LocalVoteSignerService;
|
||||
use solana::service::Service;
|
||||
use solana_sdk::genesis_block::GenesisBlock;
|
||||
use solana_sdk::signature::{read_keypair, Keypair, KeypairUtil};
|
||||
use std::fs::File;
|
||||
use std::process::exit;
|
||||
@ -70,11 +69,6 @@ fn main() {
|
||||
.takes_value(true)
|
||||
.help("Rendezvous with the cluster at this gossip entry point"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("no_leader_rotation")
|
||||
.long("no-leader-rotation")
|
||||
.help("Disable leader rotation"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("no_voting")
|
||||
.long("no-voting")
|
||||
@ -169,8 +163,6 @@ fn main() {
|
||||
|
||||
fullnode_config.voting_disabled = matches.is_present("no_voting");
|
||||
|
||||
let use_only_bootstrap_leader = matches.is_present("no_leader_rotation");
|
||||
|
||||
if matches.is_present("enable_rpc_exit") {
|
||||
fullnode_config.rpc_config.enable_fullnode_exit = true;
|
||||
}
|
||||
@ -233,11 +225,6 @@ fn main() {
|
||||
node.info.rpc.set_port(rpc_port);
|
||||
node.info.rpc_pubsub.set_port(rpc_pubsub_port);
|
||||
|
||||
let genesis_block = GenesisBlock::load(ledger_path).expect("Unable to load genesis block");
|
||||
if use_only_bootstrap_leader && node.info.id != genesis_block.bootstrap_leader_id {
|
||||
fullnode_config.voting_disabled = true;
|
||||
}
|
||||
|
||||
let fullnode = Fullnode::new(
|
||||
node,
|
||||
&keypair,
|
||||
|
@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-genesis"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "0.12.0"
|
||||
version = "0.12.2"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -11,8 +11,8 @@ homepage = "https://solana.com/"
|
||||
[dependencies]
|
||||
clap = "2.32.0"
|
||||
serde_json = "1.0.39"
|
||||
solana = { path = "../core", version = "0.12.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.12.0" }
|
||||
solana = { path = "../core", version = "0.12.2" }
|
||||
solana-sdk = { path = "../sdk", version = "0.12.2" }
|
||||
|
||||
[features]
|
||||
cuda = ["solana/cuda"]
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-keygen"
|
||||
version = "0.12.0"
|
||||
version = "0.12.2"
|
||||
description = "Solana key generation utility"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -14,7 +14,7 @@ cuda = []
|
||||
[dependencies]
|
||||
dirs = "1.0.5"
|
||||
clap = "2.31"
|
||||
solana-sdk = { path = "../sdk", version = "0.12.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.12.2" }
|
||||
|
||||
[[bin]]
|
||||
name = "solana-keygen"
|
||||
|
@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-ledger-tool"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "0.12.0"
|
||||
version = "0.12.2"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -11,10 +11,10 @@ homepage = "https://solana.com/"
|
||||
[dependencies]
|
||||
clap = "2.32.0"
|
||||
serde_json = "1.0.39"
|
||||
solana = { path = "../core", version = "0.12.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.12.0" }
|
||||
solana-logger = { path = "../logger", version = "0.12.0" }
|
||||
solana-runtime = { path = "../runtime", version = "0.12.0" }
|
||||
solana = { path = "../core", version = "0.12.2" }
|
||||
solana-sdk = { path = "../sdk", version = "0.12.2" }
|
||||
solana-logger = { path = "../logger", version = "0.12.2" }
|
||||
solana-runtime = { path = "../runtime", version = "0.12.2" }
|
||||
|
||||
[dev-dependencies]
|
||||
assert_cmd = "0.11"
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-logger"
|
||||
version = "0.12.0"
|
||||
version = "0.12.2"
|
||||
description = "Solana Logger"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-metrics"
|
||||
version = "0.12.0"
|
||||
version = "0.12.2"
|
||||
description = "Solana Metrics"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -14,7 +14,7 @@ log = "0.4.2"
|
||||
reqwest = "0.9.11"
|
||||
lazy_static = "1.3.0"
|
||||
sys-info = "0.5.6"
|
||||
solana-sdk = { path = "../sdk", version = "0.12.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.12.2" }
|
||||
|
||||
[dev-dependencies]
|
||||
rand = "0.6.5"
|
||||
|
@ -15,8 +15,8 @@
|
||||
"editable": true,
|
||||
"gnetId": null,
|
||||
"graphTooltip": 0,
|
||||
"id": 251,
|
||||
"iteration": 1549301870214,
|
||||
"id": 399,
|
||||
"iteration": 1553559957575,
|
||||
"links": [
|
||||
{
|
||||
"asDropdown": true,
|
||||
@ -1412,7 +1412,7 @@
|
||||
}
|
||||
],
|
||||
"thresholds": "",
|
||||
"title": "Last Leader Rotation Tick",
|
||||
"title": "Last Leader Rotation Slot",
|
||||
"type": "singlestat",
|
||||
"valueFontSize": "70%",
|
||||
"valueMaps": [
|
||||
@ -3811,7 +3811,7 @@
|
||||
"x": 0,
|
||||
"y": 49
|
||||
},
|
||||
"id": 42,
|
||||
"id": 34,
|
||||
"legend": {
|
||||
"alignAsTable": false,
|
||||
"avg": false,
|
||||
@ -4001,7 +4001,7 @@
|
||||
"x": 12,
|
||||
"y": 49
|
||||
},
|
||||
"id": 41,
|
||||
"id": 35,
|
||||
"legend": {
|
||||
"alignAsTable": false,
|
||||
"avg": false,
|
||||
@ -4156,7 +4156,7 @@
|
||||
"x": 0,
|
||||
"y": 54
|
||||
},
|
||||
"id": 34,
|
||||
"id": 36,
|
||||
"legend": {
|
||||
"alignAsTable": false,
|
||||
"avg": false,
|
||||
@ -4496,7 +4496,7 @@
|
||||
"x": 12,
|
||||
"y": 54
|
||||
},
|
||||
"id": 40,
|
||||
"id": 37,
|
||||
"legend": {
|
||||
"alignAsTable": false,
|
||||
"avg": false,
|
||||
@ -4727,7 +4727,7 @@
|
||||
"x": 0,
|
||||
"y": 60
|
||||
},
|
||||
"id": 35,
|
||||
"id": 38,
|
||||
"legend": {
|
||||
"alignAsTable": false,
|
||||
"avg": false,
|
||||
@ -4997,7 +4997,7 @@
|
||||
"x": 12,
|
||||
"y": 60
|
||||
},
|
||||
"id": 43,
|
||||
"id": 39,
|
||||
"legend": {
|
||||
"alignAsTable": false,
|
||||
"avg": false,
|
||||
@ -5100,6 +5100,249 @@
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {
|
||||
"cluster-info.repair": "#ba43a9",
|
||||
"counter-replay_stage-new_leader.last": "#00ffbb",
|
||||
"window-service.receive": "#b7dbab",
|
||||
"window-stage.consumed": "#5195ce"
|
||||
},
|
||||
"bars": false,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"datasource": "Solana Metrics (read-only)",
|
||||
"fill": 1,
|
||||
"gridPos": {
|
||||
"h": 6,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 66
|
||||
},
|
||||
"id": 44,
|
||||
"legend": {
|
||||
"alignAsTable": false,
|
||||
"avg": false,
|
||||
"current": false,
|
||||
"max": false,
|
||||
"min": false,
|
||||
"show": true,
|
||||
"total": false,
|
||||
"values": false
|
||||
},
|
||||
"lines": false,
|
||||
"linewidth": 1,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"percentage": false,
|
||||
"pointradius": 2,
|
||||
"points": true,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"groupBy": [
|
||||
{
|
||||
"params": [
|
||||
"$__interval"
|
||||
],
|
||||
"type": "time"
|
||||
},
|
||||
{
|
||||
"params": [
|
||||
"null"
|
||||
],
|
||||
"type": "fill"
|
||||
}
|
||||
],
|
||||
"measurement": "counter-cluster_info-vote-count",
|
||||
"orderByTime": "ASC",
|
||||
"policy": "autogen",
|
||||
"query": "SELECT last(\"latest\") - last(\"root\") FROM \"$testnet\".\"autogen\".\"counter-locktower-vote\" WHERE host_id =~ /$hostid/ AND $timeFilter GROUP BY time($__interval)",
|
||||
"rawQuery": true,
|
||||
"refId": "A",
|
||||
"resultFormat": "time_series",
|
||||
"select": [
|
||||
[
|
||||
{
|
||||
"params": [
|
||||
"count"
|
||||
],
|
||||
"type": "field"
|
||||
},
|
||||
{
|
||||
"params": [],
|
||||
"type": "sum"
|
||||
}
|
||||
]
|
||||
],
|
||||
"tags": []
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeShift": null,
|
||||
"title": "Locktower Distance in Latest and Root Slot ($hostid)",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
"value_type": "individual"
|
||||
},
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {
|
||||
"cluster-info.repair": "#ba43a9",
|
||||
"counter-locktower-vote.last": "#00ffbb",
|
||||
"counter-replay_stage-new_leader.last": "#00ffbb",
|
||||
"window-service.receive": "#b7dbab",
|
||||
"window-stage.consumed": "#5195ce"
|
||||
},
|
||||
"bars": false,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"datasource": "Solana Metrics (read-only)",
|
||||
"fill": 1,
|
||||
"gridPos": {
|
||||
"h": 6,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 66
|
||||
},
|
||||
"id": 45,
|
||||
"legend": {
|
||||
"alignAsTable": false,
|
||||
"avg": false,
|
||||
"current": false,
|
||||
"max": false,
|
||||
"min": false,
|
||||
"show": true,
|
||||
"total": false,
|
||||
"values": false
|
||||
},
|
||||
"lines": false,
|
||||
"linewidth": 1,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"percentage": false,
|
||||
"pointradius": 2,
|
||||
"points": true,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"groupBy": [
|
||||
{
|
||||
"params": [
|
||||
"$__interval"
|
||||
],
|
||||
"type": "time"
|
||||
},
|
||||
{
|
||||
"params": [
|
||||
"null"
|
||||
],
|
||||
"type": "fill"
|
||||
}
|
||||
],
|
||||
"measurement": "counter-cluster_info-vote-count",
|
||||
"orderByTime": "ASC",
|
||||
"policy": "autogen",
|
||||
"query": "SELECT last(\"root\") FROM \"$testnet\".\"autogen\".\"counter-locktower-vote\" WHERE host_id =~ /$hostid/ AND $timeFilter GROUP BY time($__interval)",
|
||||
"rawQuery": true,
|
||||
"refId": "A",
|
||||
"resultFormat": "time_series",
|
||||
"select": [
|
||||
[
|
||||
{
|
||||
"params": [
|
||||
"count"
|
||||
],
|
||||
"type": "field"
|
||||
},
|
||||
{
|
||||
"params": [],
|
||||
"type": "sum"
|
||||
}
|
||||
]
|
||||
],
|
||||
"tags": []
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeShift": null,
|
||||
"title": "Locktower Root Slot ($hostid)",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
"value_type": "individual"
|
||||
},
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
"bars": false,
|
||||
@ -5111,9 +5354,9 @@
|
||||
"h": 5,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 66
|
||||
"y": 72
|
||||
},
|
||||
"id": 36,
|
||||
"id": 40,
|
||||
"legend": {
|
||||
"alignAsTable": false,
|
||||
"avg": false,
|
||||
@ -5343,9 +5586,9 @@
|
||||
"h": 5,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 66
|
||||
"y": 72
|
||||
},
|
||||
"id": 38,
|
||||
"id": 41,
|
||||
"legend": {
|
||||
"alignAsTable": false,
|
||||
"avg": false,
|
||||
@ -5496,9 +5739,9 @@
|
||||
"h": 1,
|
||||
"w": 24,
|
||||
"x": 0,
|
||||
"y": 71
|
||||
"y": 77
|
||||
},
|
||||
"id": 44,
|
||||
"id": 42,
|
||||
"panels": [],
|
||||
"title": "Signature Verification",
|
||||
"type": "row"
|
||||
@ -5514,9 +5757,9 @@
|
||||
"h": 5,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 72
|
||||
"y": 78
|
||||
},
|
||||
"id": 45,
|
||||
"id": 43,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
"current": false,
|
||||
@ -5701,5 +5944,5 @@
|
||||
"timezone": "",
|
||||
"title": "Testnet Monitor (edge)",
|
||||
"uid": "testnet-edge",
|
||||
"version": 116
|
||||
"version": 117
|
||||
}
|
||||
|
@ -10,6 +10,46 @@ source "$here"/common.sh
|
||||
# shellcheck source=scripts/oom-score-adj.sh
|
||||
source "$here"/../scripts/oom-score-adj.sh
|
||||
|
||||
if [[ $1 = -h ]]; then
|
||||
fullnode_usage "$@"
|
||||
fi
|
||||
|
||||
extra_fullnode_args=()
|
||||
setup_stakes=true
|
||||
|
||||
while [[ ${1:0:1} = - ]]; do
|
||||
if [[ $1 = --blockstream ]]; then
|
||||
extra_fullnode_args+=("$1" "$2")
|
||||
shift 2
|
||||
elif [[ $1 = --enable-rpc-exit ]]; then
|
||||
extra_fullnode_args+=("$1")
|
||||
shift
|
||||
elif [[ $1 = --init-complete-file ]]; then
|
||||
extra_fullnode_args+=("$1" "$2")
|
||||
shift 2
|
||||
elif [[ $1 = --only-bootstrap-stake ]]; then
|
||||
setup_stakes=false
|
||||
shift
|
||||
elif [[ $1 = --public-address ]]; then
|
||||
extra_fullnode_args+=("$1")
|
||||
shift
|
||||
elif [[ $1 = --no-signer ]]; then
|
||||
extra_fullnode_args+=("$1")
|
||||
shift
|
||||
elif [[ $1 = --rpc-port ]]; then
|
||||
extra_fullnode_args+=("$1" "$2")
|
||||
shift 2
|
||||
else
|
||||
echo "Unknown argument: $1"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ -n $3 ]]; then
|
||||
fullnode_usage "$@"
|
||||
fi
|
||||
|
||||
|
||||
[[ -f "$SOLANA_CONFIG_DIR"/bootstrap-leader-id.json ]] || {
|
||||
echo "$SOLANA_CONFIG_DIR/bootstrap-leader-id.json not found, create it by running:"
|
||||
echo
|
||||
@ -27,12 +67,10 @@ tune_system
|
||||
|
||||
$solana_ledger_tool --ledger "$SOLANA_CONFIG_DIR"/bootstrap-leader-ledger verify
|
||||
|
||||
|
||||
bootstrap_leader_id_path="$SOLANA_CONFIG_DIR"/bootstrap-leader-id.json
|
||||
bootstrap_leader_staker_id_path="$SOLANA_CONFIG_DIR"/bootstrap-leader-staker-id.json
|
||||
bootstrap_leader_staker_id=$($solana_wallet --keypair "$bootstrap_leader_staker_id_path" address)
|
||||
|
||||
set -x
|
||||
trap 'kill "$pid" && wait "$pid"' INT TERM ERR
|
||||
$program \
|
||||
--identity "$bootstrap_leader_id_path" \
|
||||
@ -42,11 +80,13 @@ $program \
|
||||
--accounts "$SOLANA_CONFIG_DIR"/bootstrap-leader-accounts \
|
||||
--rpc-port 8899 \
|
||||
--rpc-drone-address 127.0.0.1:9900 \
|
||||
"$@" \
|
||||
"${extra_fullnode_args[@]}" \
|
||||
> >($bootstrap_leader_logger) 2>&1 &
|
||||
pid=$!
|
||||
oom_score_adj "$pid" 1000
|
||||
|
||||
setup_fullnode_staking 127.0.0.1 "$bootstrap_leader_id_path" "$bootstrap_leader_staker_id_path"
|
||||
if [[ $setup_stakes = true ]] ; then
|
||||
setup_fullnode_staking 127.0.0.1 "$bootstrap_leader_id_path" "$bootstrap_leader_staker_id_path"
|
||||
fi
|
||||
|
||||
wait "$pid"
|
||||
|
@ -21,7 +21,8 @@ if [[ $(uname) != Linux ]]; then
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ -n $USE_INSTALL ]]; then # Assume |./scripts/cargo-install-all.sh| was run
|
||||
|
||||
if [[ -n $USE_INSTALL || ! -f "$(dirname "${BASH_SOURCE[0]}")"/../Cargo.toml ]]; then
|
||||
solana_program() {
|
||||
declare program="$1"
|
||||
printf "solana-%s" "$program"
|
||||
@ -150,6 +151,11 @@ setup_fullnode_staking() {
|
||||
declare staker_id
|
||||
staker_id=$($solana_wallet --keypair "$staker_id_path" address)
|
||||
|
||||
if [[ -f "$staker_id_path".configured ]]; then
|
||||
echo "Staking account has already been configured"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# A fullnode requires 43 lamports to function:
|
||||
# - one lamport to keep the node identity public key valid. TODO: really??
|
||||
# - 42 more for the staker account we fund
|
||||
@ -158,19 +164,44 @@ setup_fullnode_staking() {
|
||||
# A little wrong, fund the staking account from the
|
||||
# to the node. Maybe next time consider doing this the opposite
|
||||
# way or use an ephemeral account
|
||||
$solana_wallet --keypair "$fullnode_id_path" \
|
||||
$solana_wallet --keypair "$fullnode_id_path" --host "$drone_address" \
|
||||
create-staking-account "$staker_id" 42 || return $?
|
||||
|
||||
# as the staker, set the node as the delegate and the staker as
|
||||
# the vote-signer
|
||||
$solana_wallet --keypair "$staker_id_path" \
|
||||
$solana_wallet --keypair "$staker_id_path" --host "$drone_address" \
|
||||
configure-staking-account \
|
||||
--delegate-account "$fullnode_id" \
|
||||
--authorize-voter "$staker_id" || return $?
|
||||
|
||||
|
||||
touch "$staker_id_path".configured
|
||||
return 0
|
||||
}
|
||||
|
||||
fullnode_usage() {
|
||||
if [[ -n $1 ]]; then
|
||||
echo "$*"
|
||||
echo
|
||||
fi
|
||||
cat <<EOF
|
||||
usage: $0 [-x] [--blockstream PATH] [--init-complete-file FILE] [--only-bootstrap-stake] [--no-signer] [--rpc-port port] [rsync network path to bootstrap leader configuration] [network entry point]
|
||||
|
||||
Start a full node on the specified network
|
||||
|
||||
-x - start a new, dynamically-configured full node. Does not apply to the bootstrap leader
|
||||
-X [label] - start or restart a dynamically-configured full node with
|
||||
the specified label. Does not apply to the bootstrap leader
|
||||
--blockstream PATH - open blockstream at this unix domain socket location
|
||||
--init-complete-file FILE - create this file, if it doesn't already exist, once node initialization is complete
|
||||
--only-bootstrap-stake - only stake the bootstrap leader, effectively disabling leader rotation
|
||||
--public-address - advertise public machine address in gossip. By default the local machine address is advertised
|
||||
--no-signer - start node without vote signer
|
||||
--rpc-port port - custom RPC port for this node
|
||||
|
||||
EOF
|
||||
exit 1
|
||||
}
|
||||
|
||||
# The directory on the bootstrap leader that is rsynced by other full nodes as
|
||||
# they boot (TODO: Eventually this should go away)
|
||||
|
@ -9,37 +9,15 @@ source "$here"/common.sh
|
||||
# shellcheck source=scripts/oom-score-adj.sh
|
||||
source "$here"/../scripts/oom-score-adj.sh
|
||||
|
||||
usage() {
|
||||
if [[ -n $1 ]]; then
|
||||
echo "$*"
|
||||
echo
|
||||
fi
|
||||
cat <<EOF
|
||||
usage: $0 [-x] [--blockstream PATH] [--init-complete-file FILE] [--no-leader-rotation] [--no-signer] [--rpc-port port] [rsync network path to bootstrap leader configuration] [network entry point]
|
||||
|
||||
Start a full node on the specified network
|
||||
|
||||
-x - start a new, dynamically-configured full node
|
||||
-X [label] - start or restart a dynamically-configured full node with
|
||||
the specified label
|
||||
--blockstream PATH - open blockstream at this unix domain socket location
|
||||
--init-complete-file FILE - create this file, if it doesn't already exist, once node initialization is complete
|
||||
--no-leader-rotation - disable leader rotation
|
||||
--public-address - advertise public machine address in gossip. By default the local machine address is advertised
|
||||
--no-signer - start node without vote signer
|
||||
--rpc-port port - custom RPC port for this node
|
||||
|
||||
EOF
|
||||
exit 1
|
||||
}
|
||||
|
||||
if [[ $1 = -h ]]; then
|
||||
usage
|
||||
fullnode_usage "$@"
|
||||
fi
|
||||
|
||||
gossip_port=9000
|
||||
extra_fullnode_args=()
|
||||
self_setup=0
|
||||
setup_stakes=1
|
||||
poll_for_new_genesis_block=0
|
||||
|
||||
while [[ ${1:0:1} = - ]]; do
|
||||
if [[ $1 = -X ]]; then
|
||||
@ -50,6 +28,9 @@ while [[ ${1:0:1} = - ]]; do
|
||||
self_setup=1
|
||||
self_setup_label=$$
|
||||
shift
|
||||
elif [[ $1 = --poll-for-new-genesis-block ]]; then
|
||||
poll_for_new_genesis_block=1
|
||||
shift
|
||||
elif [[ $1 = --blockstream ]]; then
|
||||
extra_fullnode_args+=("$1" "$2")
|
||||
shift 2
|
||||
@ -59,15 +40,18 @@ while [[ ${1:0:1} = - ]]; do
|
||||
elif [[ $1 = --init-complete-file ]]; then
|
||||
extra_fullnode_args+=("$1" "$2")
|
||||
shift 2
|
||||
elif [[ $1 = --no-leader-rotation ]]; then
|
||||
extra_fullnode_args+=("$1")
|
||||
elif [[ $1 = --only-bootstrap-stake ]]; then
|
||||
setup_stakes=0
|
||||
shift
|
||||
elif [[ $1 = --public-address ]]; then
|
||||
extra_fullnode_args+=("$1")
|
||||
shift
|
||||
elif [[ $1 = --no-signer ]]; then
|
||||
elif [[ $1 = --no-voting ]]; then
|
||||
extra_fullnode_args+=("$1")
|
||||
shift
|
||||
elif [[ $1 = --gossip-port ]]; then
|
||||
gossip_port=$2
|
||||
shift 2
|
||||
elif [[ $1 = --rpc-port ]]; then
|
||||
extra_fullnode_args+=("$1" "$2")
|
||||
shift 2
|
||||
@ -78,7 +62,7 @@ while [[ ${1:0:1} = - ]]; do
|
||||
done
|
||||
|
||||
if [[ -n $3 ]]; then
|
||||
usage
|
||||
fullnode_usage "$@"
|
||||
fi
|
||||
|
||||
find_leader() {
|
||||
@ -86,7 +70,7 @@ find_leader() {
|
||||
declare shift=0
|
||||
|
||||
if [[ -z $1 ]]; then
|
||||
leader=${here}/.. # Default to local tree for rsync
|
||||
leader=$PWD # Default to local tree for rsync
|
||||
leader_address=127.0.0.1:8001 # Default to local leader
|
||||
elif [[ -z $2 ]]; then
|
||||
leader=$1
|
||||
@ -189,32 +173,62 @@ rsync_url() { # adds the 'rsync://` prefix to URLs that need it
|
||||
|
||||
|
||||
rsync_leader_url=$(rsync_url "$leader")
|
||||
set -ex
|
||||
if [[ ! -d "$ledger_config_dir" ]]; then
|
||||
$rsync -vPr "$rsync_leader_url"/config/ledger/ "$ledger_config_dir"
|
||||
[[ -d $ledger_config_dir ]] || {
|
||||
echo "Unable to retrieve ledger from $rsync_leader_url"
|
||||
exit 1
|
||||
}
|
||||
$solana_ledger_tool --ledger "$ledger_config_dir" verify
|
||||
set -e
|
||||
|
||||
fi
|
||||
secs_to_next_genesis_poll=0
|
||||
PS4="$(basename "$0"): "
|
||||
while true; do
|
||||
set -x
|
||||
if [[ ! -d "$SOLANA_RSYNC_CONFIG_DIR"/ledger ]]; then
|
||||
$rsync -vPr "$rsync_leader_url"/config/ledger "$SOLANA_RSYNC_CONFIG_DIR"
|
||||
fi
|
||||
|
||||
trap 'kill "$pid" && wait "$pid"' INT TERM ERR
|
||||
$program \
|
||||
--gossip-port "$gossip_port" \
|
||||
--identity "$fullnode_id_path" \
|
||||
--voting-keypair "$fullnode_staker_id_path" \
|
||||
--staking-account "$fullnode_staker_id" \
|
||||
--network "$leader_address" \
|
||||
--ledger "$ledger_config_dir" \
|
||||
--accounts "$accounts_config_dir" \
|
||||
--rpc-drone-address "${leader_address%:*}:9900" \
|
||||
"${extra_fullnode_args[@]}" \
|
||||
> >($fullnode_logger) 2>&1 &
|
||||
pid=$!
|
||||
oom_score_adj "$pid" 1000
|
||||
if [[ ! -d "$ledger_config_dir" ]]; then
|
||||
cp -a "$SOLANA_RSYNC_CONFIG_DIR"/ledger/ "$ledger_config_dir"
|
||||
$solana_ledger_tool --ledger "$ledger_config_dir" verify
|
||||
fi
|
||||
|
||||
setup_fullnode_staking "${leader_address%:*}" "$fullnode_id_path" "$fullnode_staker_id_path"
|
||||
trap 'kill "$pid" && wait "$pid"' INT TERM ERR
|
||||
$program \
|
||||
--gossip-port "$gossip_port" \
|
||||
--identity "$fullnode_id_path" \
|
||||
--voting-keypair "$fullnode_staker_id_path" \
|
||||
--staking-account "$fullnode_staker_id" \
|
||||
--network "$leader_address" \
|
||||
--ledger "$ledger_config_dir" \
|
||||
--accounts "$accounts_config_dir" \
|
||||
--rpc-drone-address "${leader_address%:*}:9900" \
|
||||
"${extra_fullnode_args[@]}" \
|
||||
> >($fullnode_logger) 2>&1 &
|
||||
pid=$!
|
||||
oom_score_adj "$pid" 1000
|
||||
|
||||
wait "$pid"
|
||||
if ((setup_stakes)); then
|
||||
setup_fullnode_staking "${leader_address%:*}" "$fullnode_id_path" "$fullnode_staker_id_path"
|
||||
fi
|
||||
set +x
|
||||
|
||||
while true; do
|
||||
if ! kill -0 "$pid"; then
|
||||
wait "$pid"
|
||||
exit 0
|
||||
fi
|
||||
sleep 1
|
||||
|
||||
if ((poll_for_new_genesis_block)); then
|
||||
if ((!secs_to_next_genesis_poll)); then
|
||||
secs_to_next_genesis_poll=60
|
||||
|
||||
$rsync -r "$rsync_leader_url"/config/ledger "$SOLANA_RSYNC_CONFIG_DIR" || true
|
||||
if [[ -n $(diff "$SOLANA_RSYNC_CONFIG_DIR"/ledger/genesis.json "$ledger_config_dir"/genesis.json 2>&1) ]]; then
|
||||
echo "############## New genesis detected, restarting fullnode ##############"
|
||||
rm -rf "$ledger_config_dir"
|
||||
kill "$pid" || true
|
||||
wait "$pid" || true
|
||||
break
|
||||
fi
|
||||
fi
|
||||
((secs_to_next_genesis_poll--))
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
@ -15,7 +15,7 @@ gce)
|
||||
cpuBootstrapLeaderMachineType=n1-standard-16
|
||||
gpuBootstrapLeaderMachineType="$cpuBootstrapLeaderMachineType --accelerator count=4,type=nvidia-tesla-k80"
|
||||
bootstrapLeaderMachineType=$cpuBootstrapLeaderMachineType
|
||||
fullNodeMachineType=n1-standard-16
|
||||
fullNodeMachineType=$cpuBootstrapLeaderMachineType
|
||||
clientMachineType=n1-standard-16
|
||||
blockstreamerMachineType=n1-standard-8
|
||||
;;
|
||||
@ -133,10 +133,12 @@ while getopts "h?p:Pn:c:z:gG:a:d:bu" opt; do
|
||||
g)
|
||||
enableGpu=true
|
||||
bootstrapLeaderMachineType=$gpuBootstrapLeaderMachineType
|
||||
fullNodeMachineType=$bootstrapLeaderMachineType
|
||||
;;
|
||||
G)
|
||||
enableGpu=true
|
||||
bootstrapLeaderMachineType="$OPTARG"
|
||||
fullNodeMachineType=$bootstrapLeaderMachineType
|
||||
;;
|
||||
a)
|
||||
customAddress=$OPTARG
|
||||
@ -225,10 +227,6 @@ ec2)
|
||||
;;
|
||||
esac
|
||||
|
||||
if $leaderRotation; then
|
||||
fullNodeMachineType=$bootstrapLeaderMachineType
|
||||
fi
|
||||
|
||||
# cloud_ForEachInstance [cmd] [extra args to cmd]
|
||||
#
|
||||
# Execute a command for each element in the `instances` array
|
||||
|
@ -12,11 +12,12 @@ usage() {
|
||||
echo "Error: $*"
|
||||
fi
|
||||
cat <<EOF
|
||||
usage: $0 [-e] [-d] [username]
|
||||
usage: $0 [-e] [-d] [-c] [username]
|
||||
|
||||
Creates a testnet dev metrics database
|
||||
|
||||
username InfluxDB user with access to create a new database
|
||||
-c Use Influx Cloud instance
|
||||
-d Delete the database instead of creating it
|
||||
-e Assume database already exists and SOLANA_METRICS_CONFIG is
|
||||
defined in the environment already
|
||||
@ -29,12 +30,16 @@ loadConfigFile
|
||||
|
||||
useEnv=false
|
||||
delete=false
|
||||
while getopts "hde" opt; do
|
||||
host="https://metrics.solana.com:8086"
|
||||
while getopts "hcde" opt; do
|
||||
case $opt in
|
||||
h|\?)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
c)
|
||||
host="https://clocktower-f1d56615.influxcloud.net:8086"
|
||||
;;
|
||||
d)
|
||||
delete=true
|
||||
;;
|
||||
@ -62,7 +67,7 @@ else
|
||||
query() {
|
||||
echo "$*"
|
||||
curl -XPOST \
|
||||
"https://metrics.solana.com:8086/query?u=${username}&p=${password}" \
|
||||
"$host/query?u=${username}&p=${password}" \
|
||||
--data-urlencode "q=$*"
|
||||
}
|
||||
|
||||
@ -73,7 +78,7 @@ else
|
||||
query "GRANT READ ON \"$netBasename\" TO \"ro\""
|
||||
query "GRANT WRITE ON \"$netBasename\" TO \"scratch_writer\""
|
||||
|
||||
SOLANA_METRICS_CONFIG="db=$netBasename,u=scratch_writer,p=topsecret"
|
||||
SOLANA_METRICS_CONFIG="host=$host,db=$netBasename,u=scratch_writer,p=topsecret"
|
||||
fi
|
||||
|
||||
echo "export SOLANA_METRICS_CONFIG=\"$SOLANA_METRICS_CONFIG\"" >> "$configFile"
|
||||
|
15
net/net.sh
15
net/net.sh
@ -117,7 +117,8 @@ loadConfigFile
|
||||
build() {
|
||||
declare MAYBE_DOCKER=
|
||||
if [[ $(uname) != Linux ]]; then
|
||||
MAYBE_DOCKER="ci/docker-run.sh solanalabs/rust"
|
||||
source ci/rust-version.sh
|
||||
MAYBE_DOCKER="ci/docker-run.sh +$rust_stable_docker_image"
|
||||
fi
|
||||
SECONDS=0
|
||||
(
|
||||
@ -277,7 +278,8 @@ start() {
|
||||
rm -f "$SOLANA_ROOT"/solana-release.tar.bz2
|
||||
(
|
||||
set -x
|
||||
curl -o "$SOLANA_ROOT"/solana-release.tar.bz2 http://solana-release.s3.amazonaws.com/"$releaseChannel"/solana-release.tar.bz2
|
||||
curl -o "$SOLANA_ROOT"/solana-release.tar.bz2 \
|
||||
http://solana-release.s3.amazonaws.com/"$releaseChannel"/solana-release-x86_64-unknown-linux-gnu.tar.bz2
|
||||
)
|
||||
tarballFilename="$SOLANA_ROOT"/solana-release.tar.bz2
|
||||
fi
|
||||
@ -285,7 +287,7 @@ start() {
|
||||
set -x
|
||||
rm -rf "$SOLANA_ROOT"/solana-release
|
||||
(cd "$SOLANA_ROOT"; tar jxv) < "$tarballFilename"
|
||||
cat "$SOLANA_ROOT"/solana-release/version.txt
|
||||
cat "$SOLANA_ROOT"/solana-release/version.yml
|
||||
)
|
||||
;;
|
||||
local)
|
||||
@ -370,7 +372,10 @@ start() {
|
||||
case $deployMethod in
|
||||
tar)
|
||||
networkVersion="$(
|
||||
tail -n1 "$SOLANA_ROOT"/solana-release/version.txt || echo "tar-unknown"
|
||||
(
|
||||
set -o pipefail
|
||||
grep "^version: " "$SOLANA_ROOT"/solana-release/version.yml | head -n1 | cut -d\ -f2
|
||||
) || echo "tar-unknown"
|
||||
)"
|
||||
;;
|
||||
local)
|
||||
@ -406,7 +411,7 @@ stopNode() {
|
||||
pgid=\$(ps opgid= \$(cat \$pid) | tr -d '[:space:]')
|
||||
sudo kill -- -\$pgid
|
||||
done
|
||||
for pattern in solana- remote-; do
|
||||
for pattern in node solana- remote-; do
|
||||
pkill -9 \$pattern
|
||||
done
|
||||
"
|
||||
|
@ -57,6 +57,7 @@ clientCommand="\
|
||||
--duration 7500 \
|
||||
--sustained \
|
||||
--threads $threadCount \
|
||||
--tx_count 10000 \
|
||||
"
|
||||
|
||||
tmux new -s solana-bench-tps -d "
|
||||
|
@ -76,7 +76,7 @@ local|tar)
|
||||
|
||||
maybeNoLeaderRotation=
|
||||
if ! $leaderRotation; then
|
||||
maybeNoLeaderRotation="--no-leader-rotation"
|
||||
maybeNoLeaderRotation="--only-bootstrap-stake"
|
||||
fi
|
||||
maybePublicAddress=
|
||||
if $publicNetwork; then
|
||||
@ -96,7 +96,7 @@ local|tar)
|
||||
|
||||
args=()
|
||||
if ! $leaderRotation; then
|
||||
args+=("--no-leader-rotation")
|
||||
args+=("--only-bootstrap-stake")
|
||||
fi
|
||||
if $publicNetwork; then
|
||||
args+=("--public-address")
|
||||
@ -104,18 +104,41 @@ local|tar)
|
||||
if [[ $nodeType = blockstreamer ]]; then
|
||||
args+=(
|
||||
--blockstream /tmp/solana-blockstream.sock
|
||||
--no-signer
|
||||
--no-voting
|
||||
)
|
||||
fi
|
||||
|
||||
args+=(
|
||||
--gossip-port 8001
|
||||
--rpc-port 8899
|
||||
)
|
||||
|
||||
set -x
|
||||
if [[ $skipSetup != true ]]; then
|
||||
./multinode-demo/setup.sh -t fullnode
|
||||
fi
|
||||
|
||||
if [[ $nodeType = blockstreamer ]]; then
|
||||
# Sneak the mint-id.json from the bootstrap leader and run another drone
|
||||
# with it on the blockstreamer node. Typically the blockstreamer node has
|
||||
# a static IP/DNS name for hosting the blockexplorer web app, and is
|
||||
# a location that somebody would expect to be able to airdrop from
|
||||
scp "$entrypointIp":~/solana/config-local/mint-id.json config-local/
|
||||
./multinode-demo/drone.sh > drone.log 2>&1 &
|
||||
|
||||
npm install @solana/blockexplorer@1
|
||||
npx solana-blockexplorer > blockexplorer.log 2>&1 &
|
||||
|
||||
# Confirm the blockexplorer is accessible
|
||||
curl --head --retry 3 --retry-connrefused http://localhost:5000/
|
||||
|
||||
# Redirect port 80 to port 5000
|
||||
sudo iptables -A INPUT -p tcp --dport 80 -j ACCEPT
|
||||
sudo iptables -A INPUT -p tcp --dport 5000 -j ACCEPT
|
||||
sudo iptables -A PREROUTING -t nat -p tcp --dport 80 -j REDIRECT --to-port 5000
|
||||
|
||||
# Confirm the blockexplorer is now globally accessible
|
||||
curl --head "$(curl ifconfig.io)"
|
||||
fi
|
||||
./multinode-demo/fullnode.sh "${args[@]}" "$entrypointIp":~/solana "$entrypointIp:8001" > fullnode.log 2>&1 &
|
||||
;;
|
||||
|
@ -72,7 +72,7 @@ local|tar)
|
||||
solana_keygen=solana-keygen
|
||||
|
||||
ledger=config-local/bootstrap-leader-ledger
|
||||
client_id=config/client-id.json
|
||||
client_id=config-local/client-id.json
|
||||
;;
|
||||
*)
|
||||
echo "Unknown deployment method: $deployMethod"
|
||||
|
@ -59,7 +59,7 @@ __cloud_FindInstances() {
|
||||
"Name=tag:name,Values=$filter" \
|
||||
"Name=instance-state-name,Values=pending,running" \
|
||||
--query "Reservations[].Instances[].[InstanceId,PublicIpAddress,PrivateIpAddress]" \
|
||||
--output text
|
||||
--output text \
|
||||
)
|
||||
}
|
||||
|
||||
@ -223,11 +223,32 @@ cloud_DeleteInstances() {
|
||||
echo No instances to delete
|
||||
return
|
||||
fi
|
||||
|
||||
declare names=("${instances[@]/:*/}")
|
||||
|
||||
(
|
||||
set -x
|
||||
aws ec2 terminate-instances --region "$region" --instance-ids "${names[@]}"
|
||||
)
|
||||
|
||||
# Wait until the instances are terminated
|
||||
for name in "${names[@]}"; do
|
||||
while true; do
|
||||
declare instanceState
|
||||
instanceState=$(\
|
||||
aws ec2 describe-instances \
|
||||
--region "$region" \
|
||||
--instance-ids "$name" \
|
||||
--query "Reservations[].Instances[].State.Name" \
|
||||
--output text \
|
||||
)
|
||||
echo "$name: $instanceState"
|
||||
if [[ $instanceState = terminated ]]; then
|
||||
break;
|
||||
fi
|
||||
sleep 2
|
||||
done
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-netutil"
|
||||
version = "0.12.0"
|
||||
version = "0.12.2"
|
||||
description = "Solana Network Utilities"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -18,7 +18,7 @@ reqwest = "0.9.0"
|
||||
socket2 = "0.3.8"
|
||||
|
||||
[dev-dependencies]
|
||||
solana-logger = { path = "../logger", version = "0.12.0" }
|
||||
solana-logger = { path = "../logger", version = "0.12.2" }
|
||||
|
||||
[lib]
|
||||
name = "solana_netutil"
|
||||
|
@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "solana-bpf-programs"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "0.12.0"
|
||||
version = "0.12.2"
|
||||
documentation = "https://docs.rs/solana"
|
||||
homepage = "https://solana.com/"
|
||||
readme = "README.md"
|
||||
@ -22,10 +22,10 @@ bincode = "1.1.2"
|
||||
byteorder = "1.3.1"
|
||||
elf = "0.0.10"
|
||||
solana_rbpf = "=0.1.10"
|
||||
solana-bpfloader = { path = "../bpf_loader", version = "0.12.0" }
|
||||
solana-logger = { path = "../../logger", version = "0.12.0" }
|
||||
solana-runtime = { path = "../../runtime", version = "0.12.0" }
|
||||
solana-sdk = { path = "../../sdk", version = "0.12.0" }
|
||||
solana-bpfloader = { path = "../bpf_loader", version = "0.12.2" }
|
||||
solana-logger = { path = "../../logger", version = "0.12.2" }
|
||||
solana-runtime = { path = "../../runtime", version = "0.12.2" }
|
||||
solana-sdk = { path = "../../sdk", version = "0.12.2" }
|
||||
|
||||
[[bench]]
|
||||
name = "bpf_loader"
|
||||
|
@ -3,7 +3,7 @@
|
||||
|
||||
[package]
|
||||
name = "solana-bpf-rust-noop"
|
||||
version = "0.12.0"
|
||||
version = "0.12.2"
|
||||
description = "Solana BPF noop program written in Rust"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
|
@ -123,8 +123,8 @@ pub fn sol_log_params(ka: &[SolKeyedAccount], data: &[u8]) {
|
||||
sol_log_key(&k.key);
|
||||
sol_log("- Lamports");
|
||||
sol_log_64(0, 0, 0, 0, k.lamports);
|
||||
sol_log("- Userdata");
|
||||
sol_log_slice(k.userdata);
|
||||
sol_log("- AccountData");
|
||||
sol_log_slice(k.data);
|
||||
sol_log("- Owner");
|
||||
sol_log_key(&k.owner);
|
||||
}
|
||||
@ -148,7 +148,7 @@ pub struct SolKeyedAccount<'a> {
|
||||
/// Number of lamports owned by this account
|
||||
pub lamports: u64,
|
||||
/// On-chain data within this account
|
||||
pub userdata: &'a [u8],
|
||||
pub data: &'a [u8],
|
||||
/// Program that owns this account
|
||||
pub owner: SolPubkey<'a>,
|
||||
}
|
||||
@ -200,15 +200,15 @@ pub extern "C" fn entrypoint(input: *mut u8) -> bool {
|
||||
};
|
||||
offset += size_of::<u64>();
|
||||
|
||||
let userdata_length = unsafe {
|
||||
let data_length = unsafe {
|
||||
#[allow(clippy::cast_ptr_alignment)]
|
||||
let userdata_length_ptr: *const u64 = input.add(offset) as *const u64;
|
||||
*userdata_length_ptr
|
||||
let data_length_ptr: *const u64 = input.add(offset) as *const u64;
|
||||
*data_length_ptr
|
||||
} as usize;
|
||||
offset += size_of::<u64>();
|
||||
|
||||
let userdata = unsafe { from_raw_parts(input.add(offset), userdata_length) };
|
||||
offset += userdata_length;
|
||||
let data = unsafe { from_raw_parts(input.add(offset), data_length) };
|
||||
offset += data_length;
|
||||
|
||||
let owner_slice = unsafe { from_raw_parts(input.add(offset), SIZE_PUBKEY) };
|
||||
let owner = SolPubkey { key: &owner_slice };
|
||||
@ -218,7 +218,7 @@ pub extern "C" fn entrypoint(input: *mut u8) -> bool {
|
||||
key,
|
||||
is_signer,
|
||||
lamports,
|
||||
userdata,
|
||||
data,
|
||||
owner,
|
||||
}];
|
||||
|
||||
@ -386,7 +386,7 @@ mod tests {
|
||||
assert_eq!(SIZE_PUBKEY, ka[0].key.key.len());
|
||||
assert_eq!(key, ka[0].key.key);
|
||||
assert_eq!(48, ka[0].lamports);
|
||||
assert_eq!(1, ka[0].userdata.len());
|
||||
assert_eq!(1, ka[0].data.len());
|
||||
let owner = [0; 32];
|
||||
assert_eq!(SIZE_PUBKEY, ka[0].owner.key.len());
|
||||
assert_eq!(owner, ka[0].owner.key);
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-bpfloader"
|
||||
version = "0.12.0"
|
||||
version = "0.12.2"
|
||||
description = "Solana BPF Loader"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -15,8 +15,8 @@ libc = "0.2.50"
|
||||
log = "0.4.2"
|
||||
solana_rbpf = "=0.1.10"
|
||||
serde = "1.0.89"
|
||||
solana-logger = { path = "../../logger", version = "0.12.0" }
|
||||
solana-sdk = { path = "../../sdk", version = "0.12.0" }
|
||||
solana-logger = { path = "../../logger", version = "0.12.2" }
|
||||
solana-sdk = { path = "../../sdk", version = "0.12.2" }
|
||||
|
||||
[lib]
|
||||
name = "solana_bpf_loader"
|
||||
|
@ -148,9 +148,9 @@ fn serialize_parameters(
|
||||
.unwrap();
|
||||
v.write_all(info.unsigned_key().as_ref()).unwrap();
|
||||
v.write_u64::<LittleEndian>(info.account.lamports).unwrap();
|
||||
v.write_u64::<LittleEndian>(info.account.userdata.len() as u64)
|
||||
v.write_u64::<LittleEndian>(info.account.data.len() as u64)
|
||||
.unwrap();
|
||||
v.write_all(&info.account.userdata).unwrap();
|
||||
v.write_all(&info.account.data).unwrap();
|
||||
v.write_all(info.account.owner.as_ref()).unwrap();
|
||||
}
|
||||
v.write_u64::<LittleEndian>(data.len() as u64).unwrap();
|
||||
@ -171,10 +171,10 @@ fn deserialize_parameters(keyed_accounts: &mut [KeyedAccount], buffer: &[u8]) {
|
||||
|
||||
start += mem::size_of::<u64>() // skip lamports
|
||||
+ mem::size_of::<u64>(); // skip length tag
|
||||
let end = start + info.account.userdata.len();
|
||||
info.account.userdata.clone_from_slice(&buffer[start..end]);
|
||||
let end = start + info.account.data.len();
|
||||
info.account.data.clone_from_slice(&buffer[start..end]);
|
||||
|
||||
start += info.account.userdata.len() // skip userdata
|
||||
start += info.account.data.len() // skip data
|
||||
+ mem::size_of::<Pubkey>(); // skip owner
|
||||
}
|
||||
}
|
||||
@ -190,7 +190,7 @@ fn entrypoint(
|
||||
|
||||
if keyed_accounts[0].account.executable {
|
||||
let (progs, params) = keyed_accounts.split_at_mut(1);
|
||||
let prog = &progs[0].account.userdata;
|
||||
let prog = &progs[0].account.data;
|
||||
info!("Call BPF program");
|
||||
//dump_program(keyed_accounts[0].key, prog);
|
||||
let mut vm = match create_vm(prog) {
|
||||
@ -228,15 +228,15 @@ fn entrypoint(
|
||||
let offset = offset as usize;
|
||||
let len = bytes.len();
|
||||
debug!("Write: offset={} length={}", offset, len);
|
||||
if keyed_accounts[0].account.userdata.len() < offset + len {
|
||||
if keyed_accounts[0].account.data.len() < offset + len {
|
||||
warn!(
|
||||
"Write overflow: {} < {}",
|
||||
keyed_accounts[0].account.userdata.len(),
|
||||
keyed_accounts[0].account.data.len(),
|
||||
offset + len
|
||||
);
|
||||
return Err(ProgramError::GenericError);
|
||||
}
|
||||
keyed_accounts[0].account.userdata[offset..offset + len].copy_from_slice(&bytes);
|
||||
keyed_accounts[0].account.data[offset..offset + len].copy_from_slice(&bytes);
|
||||
}
|
||||
LoaderInstruction::Finalize => {
|
||||
keyed_accounts[0].account.executable = true;
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-budget-program"
|
||||
version = "0.12.0"
|
||||
version = "0.12.2"
|
||||
description = "Solana budget program"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -14,12 +14,12 @@ chrono = { version = "0.4.0", features = ["serde"] }
|
||||
log = "0.4.2"
|
||||
serde = "1.0.89"
|
||||
serde_derive = "1.0.89"
|
||||
solana-budget-api = { path = "../budget_api", version = "0.12.0" }
|
||||
solana-logger = { path = "../../logger", version = "0.12.0" }
|
||||
solana-sdk = { path = "../../sdk", version = "0.12.0" }
|
||||
solana-budget-api = { path = "../budget_api", version = "0.12.2" }
|
||||
solana-logger = { path = "../../logger", version = "0.12.2" }
|
||||
solana-sdk = { path = "../../sdk", version = "0.12.2" }
|
||||
|
||||
[dev-dependencies]
|
||||
solana-runtime = { path = "../../runtime", version = "0.12.0" }
|
||||
solana-runtime = { path = "../../runtime", version = "0.12.2" }
|
||||
|
||||
[lib]
|
||||
name = "solana_budget_program"
|
||||
|
@ -87,7 +87,7 @@ fn apply_debits(
|
||||
keyed_accounts[0].account.lamports += payment.lamports;
|
||||
Ok(())
|
||||
} else {
|
||||
let existing = BudgetState::deserialize(&keyed_accounts[0].account.userdata).ok();
|
||||
let existing = BudgetState::deserialize(&keyed_accounts[0].account.data).ok();
|
||||
if Some(true) == existing.map(|x| x.initialized) {
|
||||
trace!("contract already exists");
|
||||
Err(BudgetError::ContractAlreadyExists)
|
||||
@ -95,13 +95,12 @@ fn apply_debits(
|
||||
let mut budget_state = BudgetState::default();
|
||||
budget_state.pending_budget = Some(expr);
|
||||
budget_state.initialized = true;
|
||||
budget_state.serialize(&mut keyed_accounts[0].account.userdata)
|
||||
budget_state.serialize(&mut keyed_accounts[0].account.data)
|
||||
}
|
||||
}
|
||||
}
|
||||
BudgetInstruction::ApplyTimestamp(dt) => {
|
||||
if let Ok(mut budget_state) =
|
||||
BudgetState::deserialize(&keyed_accounts[1].account.userdata)
|
||||
if let Ok(mut budget_state) = BudgetState::deserialize(&keyed_accounts[1].account.data)
|
||||
{
|
||||
if !budget_state.is_pending() {
|
||||
Err(BudgetError::ContractNotPending)
|
||||
@ -112,15 +111,14 @@ fn apply_debits(
|
||||
trace!("apply timestamp");
|
||||
apply_timestamp(&mut budget_state, keyed_accounts, *dt)?;
|
||||
trace!("apply timestamp committed");
|
||||
budget_state.serialize(&mut keyed_accounts[1].account.userdata)
|
||||
budget_state.serialize(&mut keyed_accounts[1].account.data)
|
||||
}
|
||||
} else {
|
||||
Err(BudgetError::UninitializedContract)
|
||||
}
|
||||
}
|
||||
BudgetInstruction::ApplySignature => {
|
||||
if let Ok(mut budget_state) =
|
||||
BudgetState::deserialize(&keyed_accounts[1].account.userdata)
|
||||
if let Ok(mut budget_state) = BudgetState::deserialize(&keyed_accounts[1].account.data)
|
||||
{
|
||||
if !budget_state.is_pending() {
|
||||
Err(BudgetError::ContractNotPending)
|
||||
@ -131,7 +129,7 @@ fn apply_debits(
|
||||
trace!("apply signature");
|
||||
apply_signature(&mut budget_state, keyed_accounts)?;
|
||||
trace!("apply signature committed");
|
||||
budget_state.serialize(&mut keyed_accounts[1].account.userdata)
|
||||
budget_state.serialize(&mut keyed_accounts[1].account.data)
|
||||
}
|
||||
} else {
|
||||
Err(BudgetError::UninitializedContract)
|
||||
@ -146,8 +144,8 @@ pub fn process_instruction(
|
||||
data: &[u8],
|
||||
) -> Result<(), BudgetError> {
|
||||
let instruction = deserialize(data).map_err(|err| {
|
||||
info!("Invalid transaction userdata: {:?} {:?}", data, err);
|
||||
BudgetError::UserdataDeserializeFailure
|
||||
info!("Invalid transaction data: {:?} {:?}", data, err);
|
||||
BudgetError::AccountDataDeserializeFailure
|
||||
})?;
|
||||
|
||||
trace!("process_instruction: {:?}", instruction);
|
||||
@ -178,12 +176,12 @@ mod test {
|
||||
let mut accounts = vec![Account::new(1, 0, &id()), Account::new(0, 512, &id())];
|
||||
let from = Keypair::new();
|
||||
let contract = Keypair::new();
|
||||
let userdata = (1u8, 2u8, 3u8);
|
||||
let data = (1u8, 2u8, 3u8);
|
||||
let tx = Transaction::new(
|
||||
&from,
|
||||
&[contract.pubkey()],
|
||||
&id(),
|
||||
&userdata,
|
||||
&data,
|
||||
Hash::default(),
|
||||
0,
|
||||
);
|
||||
@ -285,7 +283,7 @@ mod test {
|
||||
process_transaction(&tx, &mut accounts).unwrap();
|
||||
assert_eq!(accounts[from_account].lamports, 0);
|
||||
assert_eq!(accounts[contract_account].lamports, 1);
|
||||
let budget_state = BudgetState::deserialize(&accounts[contract_account].userdata).unwrap();
|
||||
let budget_state = BudgetState::deserialize(&accounts[contract_account].data).unwrap();
|
||||
assert!(budget_state.is_pending());
|
||||
|
||||
// Attack! Try to payout to a rando key
|
||||
@ -304,7 +302,7 @@ mod test {
|
||||
assert_eq!(accounts[contract_account].lamports, 1);
|
||||
assert_eq!(accounts[to_account].lamports, 0);
|
||||
|
||||
let budget_state = BudgetState::deserialize(&accounts[contract_account].userdata).unwrap();
|
||||
let budget_state = BudgetState::deserialize(&accounts[contract_account].data).unwrap();
|
||||
assert!(budget_state.is_pending());
|
||||
|
||||
// Now, acknowledge the time in the condition occurred and
|
||||
@ -321,7 +319,7 @@ mod test {
|
||||
assert_eq!(accounts[contract_account].lamports, 0);
|
||||
assert_eq!(accounts[to_account].lamports, 1);
|
||||
|
||||
let budget_state = BudgetState::deserialize(&accounts[contract_account].userdata).unwrap();
|
||||
let budget_state = BudgetState::deserialize(&accounts[contract_account].data).unwrap();
|
||||
assert!(!budget_state.is_pending());
|
||||
|
||||
// try to replay the timestamp contract
|
||||
@ -356,7 +354,7 @@ mod test {
|
||||
process_transaction(&tx, &mut accounts).unwrap();
|
||||
assert_eq!(accounts[from_account].lamports, 0);
|
||||
assert_eq!(accounts[contract_account].lamports, 1);
|
||||
let budget_state = BudgetState::deserialize(&accounts[contract_account].userdata).unwrap();
|
||||
let budget_state = BudgetState::deserialize(&accounts[contract_account].data).unwrap();
|
||||
assert!(budget_state.is_pending());
|
||||
|
||||
// Attack! try to put the lamports into the wrong account with cancel
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-budget-api"
|
||||
version = "0.12.0"
|
||||
version = "0.12.2"
|
||||
description = "Solana Budget program API"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -13,7 +13,7 @@ bincode = "1.1.2"
|
||||
chrono = { version = "0.4.0", features = ["serde"] }
|
||||
serde = "1.0.89"
|
||||
serde_derive = "1.0.89"
|
||||
solana-sdk = { path = "../../sdk", version = "0.12.0" }
|
||||
solana-sdk = { path = "../../sdk", version = "0.12.2" }
|
||||
|
||||
[lib]
|
||||
name = "solana_budget_api"
|
||||
|
@ -12,8 +12,8 @@ pub enum BudgetError {
|
||||
UninitializedContract,
|
||||
DestinationMissing,
|
||||
FailedWitness,
|
||||
UserdataTooSmall,
|
||||
UserdataDeserializeFailure,
|
||||
AccountDataTooSmall,
|
||||
AccountDataDeserializeFailure,
|
||||
UnsignedKey,
|
||||
}
|
||||
|
||||
@ -37,7 +37,7 @@ impl BudgetState {
|
||||
|
||||
pub fn serialize(&self, output: &mut [u8]) -> Result<(), BudgetError> {
|
||||
serialize_into(output, self).map_err(|err| match *err {
|
||||
_ => BudgetError::UserdataTooSmall,
|
||||
_ => BudgetError::AccountDataTooSmall,
|
||||
})
|
||||
}
|
||||
|
||||
@ -56,18 +56,18 @@ mod test {
|
||||
fn test_serializer() {
|
||||
let mut a = Account::new(0, 512, &id());
|
||||
let b = BudgetState::default();
|
||||
b.serialize(&mut a.userdata).unwrap();
|
||||
let c = BudgetState::deserialize(&a.userdata).unwrap();
|
||||
b.serialize(&mut a.data).unwrap();
|
||||
let c = BudgetState::deserialize(&a.data).unwrap();
|
||||
assert_eq!(b, c);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serializer_userdata_too_small() {
|
||||
fn test_serializer_data_too_small() {
|
||||
let mut a = Account::new(0, 1, &id());
|
||||
let b = BudgetState::default();
|
||||
assert_eq!(
|
||||
b.serialize(&mut a.userdata),
|
||||
Err(BudgetError::UserdataTooSmall)
|
||||
b.serialize(&mut a.data),
|
||||
Err(BudgetError::AccountDataTooSmall)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@ -151,11 +151,11 @@ impl BudgetTransaction {
|
||||
}
|
||||
|
||||
pub fn system_instruction(tx: &Transaction, index: usize) -> Option<SystemInstruction> {
|
||||
deserialize(&tx.userdata(index)).ok()
|
||||
deserialize(&tx.data(index)).ok()
|
||||
}
|
||||
|
||||
pub fn instruction(tx: &Transaction, index: usize) -> Option<BudgetInstruction> {
|
||||
deserialize(&tx.userdata(index)).ok()
|
||||
deserialize(&tx.data(index)).ok()
|
||||
}
|
||||
|
||||
/// Verify only the payment plan.
|
||||
@ -236,9 +236,9 @@ mod tests {
|
||||
payment.lamports = *lamports; // <-- attack, part 2!
|
||||
}
|
||||
}
|
||||
tx.instructions[1].userdata = serialize(&instruction).unwrap();
|
||||
tx.instructions[1].data = serialize(&instruction).unwrap();
|
||||
}
|
||||
tx.instructions[0].userdata = serialize(&system_instruction).unwrap();
|
||||
tx.instructions[0].data = serialize(&system_instruction).unwrap();
|
||||
assert!(BudgetTransaction::verify_plan(&tx));
|
||||
assert!(!tx.verify_signature());
|
||||
}
|
||||
@ -257,7 +257,7 @@ mod tests {
|
||||
payment.to = thief_keypair.pubkey(); // <-- attack!
|
||||
}
|
||||
}
|
||||
tx.instructions[1].userdata = serialize(&instruction).unwrap();
|
||||
tx.instructions[1].data = serialize(&instruction).unwrap();
|
||||
assert!(BudgetTransaction::verify_plan(&tx));
|
||||
assert!(!tx.verify_signature());
|
||||
}
|
||||
@ -274,7 +274,7 @@ mod tests {
|
||||
payment.lamports = 2; // <-- attack!
|
||||
}
|
||||
}
|
||||
tx.instructions[1].userdata = serialize(&instruction).unwrap();
|
||||
tx.instructions[1].data = serialize(&instruction).unwrap();
|
||||
assert!(!BudgetTransaction::verify_plan(&tx));
|
||||
|
||||
// Also, ensure all branchs of the plan spend all lamports
|
||||
@ -284,7 +284,7 @@ mod tests {
|
||||
payment.lamports = 0; // <-- whoops!
|
||||
}
|
||||
}
|
||||
tx.instructions[1].userdata = serialize(&instruction).unwrap();
|
||||
tx.instructions[1].data = serialize(&instruction).unwrap();
|
||||
assert!(!BudgetTransaction::verify_plan(&tx));
|
||||
}
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-failure"
|
||||
version = "0.12.0"
|
||||
version = "0.12.2"
|
||||
description = "Solana failure program"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -9,11 +9,11 @@ homepage = "https://solana.com/"
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
solana-sdk = { path = "../../sdk", version = "0.12.0" }
|
||||
solana-sdk = { path = "../../sdk", version = "0.12.2" }
|
||||
log = "0.4.2"
|
||||
|
||||
[dev-dependencies]
|
||||
solana-runtime = { path = "../../runtime", version = "0.12.0" }
|
||||
solana-runtime = { path = "../../runtime", version = "0.12.2" }
|
||||
|
||||
[lib]
|
||||
name = "failure"
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-noop"
|
||||
version = "0.12.0"
|
||||
version = "0.12.2"
|
||||
description = "Solana noop program"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -9,12 +9,12 @@ homepage = "https://solana.com/"
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
solana-sdk = { path = "../../sdk", version = "0.12.0" }
|
||||
solana-logger = { path = "../../logger", version = "0.12.0" }
|
||||
solana-sdk = { path = "../../sdk", version = "0.12.2" }
|
||||
solana-logger = { path = "../../logger", version = "0.12.2" }
|
||||
log = "0.4.2"
|
||||
|
||||
[dev-dependencies]
|
||||
solana-runtime = { path = "../../runtime", version = "0.12.0" }
|
||||
solana-runtime = { path = "../../runtime", version = "0.12.2" }
|
||||
|
||||
[lib]
|
||||
name = "noop"
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-rewards-program"
|
||||
version = "0.12.0"
|
||||
version = "0.12.2"
|
||||
description = "Solana rewards program"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -11,13 +11,13 @@ edition = "2018"
|
||||
[dependencies]
|
||||
bincode = "1.1.2"
|
||||
log = "0.4.2"
|
||||
solana-logger = { path = "../../logger", version = "0.12.0" }
|
||||
solana-sdk = { path = "../../sdk", version = "0.12.0" }
|
||||
solana-rewards-api = { path = "../rewards_api", version = "0.12.0" }
|
||||
solana-vote-api = { path = "../vote_api", version = "0.12.0" }
|
||||
solana-logger = { path = "../../logger", version = "0.12.2" }
|
||||
solana-sdk = { path = "../../sdk", version = "0.12.2" }
|
||||
solana-rewards-api = { path = "../rewards_api", version = "0.12.2" }
|
||||
solana-vote-api = { path = "../vote_api", version = "0.12.2" }
|
||||
|
||||
[dev-dependencies]
|
||||
solana-runtime = { path = "../../runtime", version = "0.12.0" }
|
||||
solana-runtime = { path = "../../runtime", version = "0.12.2" }
|
||||
|
||||
[lib]
|
||||
name = "solana_rewards_program"
|
||||
|
@ -44,7 +44,7 @@ fn redeem_vote_credits(keyed_accounts: &mut [KeyedAccount]) -> Result<(), Progra
|
||||
// VoteInstruction::ClearCredits and that it points to the same vote account
|
||||
// as keyed_accounts[0].
|
||||
|
||||
let vote_state = VoteState::deserialize(&keyed_accounts[0].account.userdata)?;
|
||||
let vote_state = VoteState::deserialize(&keyed_accounts[0].account.data)?;
|
||||
|
||||
// TODO: This assumes the stake is static. If not, it should use the account value
|
||||
// at the time of voting, not at credit redemption.
|
||||
@ -75,7 +75,7 @@ fn entrypoint(
|
||||
trace!("process_instruction: {:?}", data);
|
||||
trace!("keyed_accounts: {:?}", keyed_accounts);
|
||||
|
||||
match deserialize(data).map_err(|_| ProgramError::InvalidUserdata)? {
|
||||
match deserialize(data).map_err(|_| ProgramError::InvalidInstructionData)? {
|
||||
RewardsInstruction::RedeemVoteCredits => redeem_vote_credits(keyed_accounts),
|
||||
}
|
||||
}
|
||||
|
@ -52,7 +52,7 @@ impl<'a> RewardsBank<'a> {
|
||||
self.bank.register_tick(&hash(blockhash.as_ref()));
|
||||
|
||||
let vote_account = self.bank.get_account(&vote_keypair.pubkey()).unwrap();
|
||||
Ok(VoteState::deserialize(&vote_account.userdata).unwrap())
|
||||
Ok(VoteState::deserialize(&vote_account.data).unwrap())
|
||||
}
|
||||
|
||||
fn redeem_credits(&self, rewards_id: &Pubkey, vote_keypair: &Keypair) -> Result<VoteState> {
|
||||
@ -60,7 +60,7 @@ impl<'a> RewardsBank<'a> {
|
||||
let tx = RewardsTransaction::new_redeem_credits(&vote_keypair, rewards_id, blockhash, 0);
|
||||
self.bank.process_transaction(&tx)?;
|
||||
let vote_account = self.bank.get_account(&vote_keypair.pubkey()).unwrap();
|
||||
Ok(VoteState::deserialize(&vote_account.userdata).unwrap())
|
||||
Ok(VoteState::deserialize(&vote_account.data).unwrap())
|
||||
}
|
||||
}
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user