Compare commits

..

10 Commits

Author SHA1 Message Date
Rob Walker
b4adb1c266 Cherry pick fix for freeze (#4459)
* check freeze before updating slot_hashes (#4448)

* check freeze before updating slot_hashes

* fixup

* add more information to dropped vote warning (#4449)

* add more information to dropped vote warning

* fixup
2019-05-28 20:35:54 -06:00
Rob Walker
b9b541441b update book with passive staking (#4451) (#4455)
* update book with passive staking (#4451)

* undelete votestate etc
2019-05-28 16:01:53 -07:00
Michael Vines
e510d4e272 Drop influxcloud (#4461) 2019-05-28 15:34:36 -07:00
Michael Vines
9341e64ec7 Lock down blockexplorer version (#4462) 2019-05-28 15:26:53 -07:00
Tyera Eulberg
d934f94e05 Add commented correct future test lines 2019-05-28 09:59:07 -06:00
Tyera Eulberg
59dc123fa8 Add test indicating need for credit-only account handling 2019-05-28 09:58:57 -06:00
Michael Vines
0faea87c84 Revert --retry-on-http-error usage, Travis CI's wget doesn't recognize it 2019-05-27 19:34:46 -07:00
carllin
19137ce3f4 Bump logging level of validator procsesing errors (#4444)
automerge
2019-05-27 15:36:35 -07:00
Michael Vines
8bdeb2d1ed Use nohup and sleep a little to improve stability when launching a node 2019-05-27 14:00:24 -07:00
Michael Vines
d29a45266b data_dir -> data-dir 2019-05-27 07:32:29 -07:00
388 changed files with 15031 additions and 15832 deletions

View File

@@ -1,41 +0,0 @@
os: Visual Studio 2017
version: '{build}'
branches:
only:
- master
- /^v[0-9.]+/
cache:
- '%USERPROFILE%\.cargo'
- '%APPVEYOR_BUILD_FOLDER%\target'
build_script:
- bash ci/publish-tarball.sh
notifications:
- provider: Slack
incoming_webhook:
secure: 6HTXVh+FBz29LGJb+taFOo9dqoADfo9xyAszeyXZF5Ub9t5NERytKAR35B2wb+uIOOCBF8+JhmH4437Cgf/ti4IqvURzW1QReXK7eQhn1EI=
channel: ci-status
on_build_success: false
on_build_failure: true
on_build_status_changed: true
deploy:
- provider: S3
access_key_id:
secure: ptvqM/yvgeTeA12XOzybH1KYNh95AdfEvqoH9mvP2ic=
secret_access_key:
secure: IkrgBlz5hdxvwcJdMXyyHUrpWhKa6fXLOD/8rm/rjKqYCdrba9B8V1nLZVrzXGGy
bucket: release.solana.com
region: us-west-1
set_public: true
- provider: GitHub
auth_token:
secure: vQ3jMl5LQrit6+TQONA3ZgQjZ/Ej62BN2ReVb2NSOwjITHMu1131hjc3dOrMEZL6
draft: false
prerelease: false
on:
appveyor_repo_tag: true

View File

@@ -1 +0,0 @@
/secrets_unencrypted.ejson

View File

@@ -1,14 +1,12 @@
{ {
"_public_key": "ae29f4f7ad2fc92de70d470e411c8426d5d48db8817c9e3dae574b122192335f", "_public_key": "ae29f4f7ad2fc92de70d470e411c8426d5d48db8817c9e3dae574b122192335f",
"environment": { "environment": {
"CODECOV_TOKEN": "EJ[1:8iZ6baJB4fbBV+XDsrUooyGAnGL/8Ol+4Qd0zKh5YjI=:ks2/ElgxwgxqgmFcxTHANNLmj23YH74h:U4uzRONRfiQyqy6HrPQ/e7OnBUY4HkW37R0iekkF3KJ9UGnHqT1UvwgVbDqLahtDIJ4rWw==]", "CODECOV_TOKEN": "EJ[1:eSGdiZR0Qi0g7qnsI+qJ5H+/ik+H2qL3ned/cBdv/SY=:jA0WqO70coUtF0iokRdgtCR/lF/lETAI:d/Wl8Tdl6xVh/B39cTf1DaQkomR7I/2vMhvxd1msJ++BjI2l3p2dFoGsXqWT+/os8VgiPg==]",
"CRATES_IO_TOKEN": "EJ[1:8iZ6baJB4fbBV+XDsrUooyGAnGL/8Ol+4Qd0zKh5YjI=:lKMh3aLW+jyRrfS/c7yvkpB+TaPhXqLq:j0v27EbaPgwRdHZAbsM0FlAnt3r9ScQrFbWJYOAZtM3qestEiByTlKpZ0eyF/823]", "CRATES_IO_TOKEN": "EJ[1:eSGdiZR0Qi0g7qnsI+qJ5H+/ik+H2qL3ned/cBdv/SY=:2FaZ6k4RGH8luyNRaN6yeZUQDNAu2KwC:XeYe0tCAivYE0F9HEWM79mAI6kNbfYaqP7k7yY+SBDvs0341U9BdGZp7SErbHleS]",
"GITHUB_TOKEN": "EJ[1:8iZ6baJB4fbBV+XDsrUooyGAnGL/8Ol+4Qd0zKh5YjI=:Ll78c3jGpYqnTwR7HJq3mNNUC7pOv9Lu:GrInO2r8MjmP5c54szkyygdsrW5KQYkDgJQUVyFEPyG8SWfchyM9Gur8RV0a+cdwuxNkHLi4U2M=]", "GITHUB_TOKEN": "EJ[1:eSGdiZR0Qi0g7qnsI+qJ5H+/ik+H2qL3ned/cBdv/SY=:9kh4DGPiGDcUU7ejSFWg3gTW8nrOM09Q:b+GE07Wu6/bEnkDZcUtf48vTKAFphrCSt3tNNER9h6A+wZ80k499edw4pbDdl9kEvxB30fFwrLQ=]",
"INFLUX_DATABASE": "EJ[1:8iZ6baJB4fbBV+XDsrUooyGAnGL/8Ol+4Qd0zKh5YjI=:IlH/ZLTXv3SwlY3TVyAPCX2KzLRY6iG3:gGmUGSU/kCfR/mTwKONaUC/X]", "INFLUX_DATABASE": "EJ[1:eSGdiZR0Qi0g7qnsI+qJ5H+/ik+H2qL3ned/cBdv/SY=:rCHsYi0rc7dmvr1V3wEgNoaNIyr+9ClM:omjVcOqM7vwt44kJ+As4BjJL]",
"INFLUX_PASSWORD": "EJ[1:8iZ6baJB4fbBV+XDsrUooyGAnGL/8Ol+4Qd0zKh5YjI=:o2qm95GU4VrrcC4OU06jjPvCwKZy/CZF:OW2ga3kLOQJvaDEdGRJ+gn3L2ckFm8AJZtv9wj/GeUIKDH2A4uBPTHsAH9PMe6zujpuHGk3qbeg=]", "INFLUX_PASSWORD": "EJ[1:eSGdiZR0Qi0g7qnsI+qJ5H+/ik+H2qL3ned/cBdv/SY=:bP5Gw1Vy66viKFKO41o2Gho998XajH/5:khkCYz2LFvkJkk7R4xY1Hfz1yU3/NENjauiUkPhXA+dmg1qOIToxEagCgIkRwyeCiYaoCR6CZyw=]",
"INFLUX_USERNAME": "EJ[1:8iZ6baJB4fbBV+XDsrUooyGAnGL/8Ol+4Qd0zKh5YjI=:yDWW/uIHsJqOTDYskZoSx3pzoB1vztWY:2z31oTA3g0Xs9fCczGNJRcx8xf/hFCed]", "INFLUX_USERNAME": "EJ[1:eSGdiZR0Qi0g7qnsI+qJ5H+/ik+H2qL3ned/cBdv/SY=:ZamCvza2W9/bZRGSkqDu55xNN04XKKhp:5jlmCOdFbpL7EFez41zCbLfk3ZZlfmhI]",
"SOLANA_INSTALL_UPDATE_MANIFEST_KEYPAIR_x86_64_unknown_linux_gnu": "EJ[1:8iZ6baJB4fbBV+XDsrUooyGAnGL/8Ol+4Qd0zKh5YjI=:RqRaHlYUvGPNFJa6gmciaYM3tRJTURUH:q78/3GTHCN3Uqx9z4nOBjPZcO1lOazNoB/mdhGRDFsnAqVd2hU8zbKkqLrZfLlGqyD8WQOFuw5oTJR9qWg6L9LcOyj3pGL8jWF2yjgZxdtNMXnkbSrCWLooWBBLT61jYQnEwg73gT8ld3Q8EVv3T+MeSMu6FnPz+0+bqQCAGgfqksP4hsUAJGzgZu+i0tNOdlT7fxnh5KJK/yFM/CKgN2sRwEjukA9hXsffyB61g2zqzTDJxCUDLbCVrCkA/bfUk7Of/t0W5t0nK1H3oyGZEc/lRMauCknDBka3Gz11dVss2QT19WQNh0u7bHVaT/U4lepX1j9Zv]", "SOLANA_INSTALL_UPDATE_MANIFEST_KEYPAIR_x86_64_unknown_linux_gnu": "EJ[1:eSGdiZR0Qi0g7qnsI+qJ5H+/ik+H2qL3ned/cBdv/SY=:Oi2nsRxnvWnnBYsB6KwEDzLPcYgpYojU:ELbvjXkXKlgFCMES45R+fxG7Ex43WHWErjMbxZoqasxyr7GSH66hQzUWqiQSJyT4ukYrRhRC9YrsKKGkjACLU57X4EGIy9TuLgTnyBYhPnxLYStC3y/7o/MB5FCTt5wHJw3/A9p+me5+T4UmyZ7OeP21NhDUCGQcb0040VwYWS78klW2aQESJJ6wTI1xboE8/zC0vtnB/u50+LydbKEyb21r6y3OH9FYNEpSwIspWKcgpruJdQSCnDoKxP9YR1yzvk2rabss13LJNdV1Y6mQNIdP4OIFQhCs6dXT253RTl5qdZ0MruHwlp8wX4btOuYDcCoM5exr]"
"SOLANA_INSTALL_UPDATE_MANIFEST_KEYPAIR_x86_64_apple_darwin": "EJ[1:8iZ6baJB4fbBV+XDsrUooyGAnGL/8Ol+4Qd0zKh5YjI=:wFDl3INEnA3EQDHRX40avqGe1OMoJxyy:6ncCRVRTIRuYI5o/gayeuWCudWvmKNYr8KEHAWeTq34a5bdcKInBdKhjmjX+wLHqsEwQ5gcyhcxy4Ri2mbuN6AHazfZOZlubQkGlyUOAIYO5D5jkbyIh40DAtjVzo1MD/0HsW9zdGOzqUKp5xJJeDsbR4F153jbxa7fvwF90Q4UQjYFTKAtExEmHtDGSJG48ToVwTabTV/OnISMIggDZBviIv2QWHvXgK07b2mUj34rHJywEDGN1nj5rITTDdUeRcB1x4BAMOe94kTFPSTaj/OszvYlGECt8rkKFqbm092qL+XLfiBaImqe/WJHRCnAj6Don]",
"SOLANA_INSTALL_UPDATE_MANIFEST_KEYPAIR_x86_64_pc_windows_msvc": "EJ[1:8iZ6baJB4fbBV+XDsrUooyGAnGL/8Ol+4Qd0zKh5YjI=:wAh+dBuZopv6vruVOYegUcq/aBnbksT1:qIJfCfDvDWiqicMOkmbJs/0n7UJLKNmgMQaKzeQ8J7Q60YpXbtWzKVW3tS6lzlgf64m3MrPXyo1C+mWh6jkjsb18T/OfggZy1ZHM4AcsOC6/ldUkV5YtuxUQuAmd5jCuV/R7iuYY8Z66AcfAevlb+bnLpgIifdA8fh/IktOo58nZUQwZDdppAacmftsLc6Frn5Er6A6+EXpxK1nmnlmLJ4AJztqlh6X0r+JvE2O7qeoZUXrIegnkxo7Aay7I/dd8zdYpp7ICSiTEtfVN/xNIu/5QmTRU7gWoz7cPl9epq4aiEALzPOzb6KVOiRcsOg+TlFvLQ71Ik5o=]"
} }
} }

View File

@@ -1,8 +1,6 @@
CI_BUILD_START=$(date +%s) CI_BUILD_START=$(date +%s)
export CI_BUILD_START export CI_BUILD_START
source ci/env.sh
# #
# Kill any running docker containers, which are potentially left over from the # Kill any running docker containers, which are potentially left over from the
# previous CI job # previous CI job

5
.gitignore vendored
View File

@@ -2,10 +2,9 @@
/book/src/img/ /book/src/img/
/book/src/tests.ok /book/src/tests.ok
/farf/ /farf/
/metrics/scripts/lib/
/solana-release/ /solana-release/
/solana-release.tar.bz2 solana-release.tar.bz2
/solana-metrics/
/solana-metrics.tar.bz2
/target/ /target/
**/*.rs.bk **/*.rs.bk

View File

@@ -1,44 +0,0 @@
os:
- osx
language: rust
cache: cargo
rust:
- 1.35.0
install:
- source ci/rust-version.sh
- test $rust_stable = $TRAVIS_RUST_VERSION # Update .travis.yml rust version above when this fails
script:
- source ci/env.sh
- ci/publish-tarball.sh
branches:
only:
- master
- /^v\d+\.\d+(\.\d+)?(-\S*)?$/
notifications:
slack:
on_success: change
secure: F4IjOE05MyaMOdPRL+r8qhs7jBvv4yDM3RmFKE1zNXnfUOqV4X38oQM1EI+YVsgpMQLj/pxnEB7wcTE4Bf86N6moLssEULCpvAuMVoXj4QbWdomLX+01WbFa6fLVeNQIg45NHrz2XzVBhoKOrMNnl+QI5mbR2AlS5oqsudHsXDnyLzZtd4Y5SDMdYG1zVWM01+oNNjgNfjcCGmOE/K0CnOMl6GPi3X9C34tJ19P2XT7MTDsz1/IfEF7fro2Q8DHEYL9dchJMoisXSkem5z7IDQkGzXsWdWT4NnndUvmd1MlTCE9qgoXDqRf95Qh8sB1Dz08HtvgfaosP2XjtNTfDI9BBYS15Ibw9y7PchAJE1luteNjF35EOy6OgmCLw/YpnweqfuNViBZz+yOPWXVC0kxnPIXKZ1wyH9ibeH6E4hr7a8o9SV/6SiWIlbYF+IR9jPXyTCLP/cc3sYljPWxDnhWFwFdRVIi3PbVAhVu7uWtVUO17Oc9gtGPgs/GrhOMkJfwQPXaudRJDpVZowxTX4x9kefNotlMAMRgq+Drbmgt4eEBiCNp0ITWgh17BiE1U09WS3myuduhoct85+FoVeaUkp1sxzHVtGsNQH0hcz7WcpZyOM+AwistJA/qzeEDQao5zi1eKWPbO2xAhi2rV1bDH6bPf/4lDBwLRqSiwvlWU=
deploy:
- provider: s3
access_key_id: $AWS_ACCESS_KEY_ID
secret_access_key: $AWS_SECRET_ACCESS_KEY
bucket: release.solana.com
region: us-west-1
skip_cleanup: true
acl: public_read
local_dir: travis-s3-upload
on:
all_branches: true
- provider: releases
api_key: $GITHUB_TOKEN
skip_cleanup: true
file_glob: true
file: travis-release-upload/*
on:
tags: true

1145
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -3,7 +3,6 @@ members = [
"bench-exchange", "bench-exchange",
"bench-streamer", "bench-streamer",
"bench-tps", "bench-tps",
"chacha-sys",
"client", "client",
"core", "core",
"drone", "drone",
@@ -15,12 +14,10 @@ members = [
"kvstore", "kvstore",
"ledger-tool", "ledger-tool",
"logger", "logger",
"merkle-tree",
"metrics", "metrics",
"netutil", "netutil",
"programs/bpf", "programs/bpf",
"programs/bpf_loader_api", "programs/bpf_loader",
"programs/bpf_loader_program",
"programs/budget_api", "programs/budget_api",
"programs/budget_program", "programs/budget_program",
"programs/config_api", "programs/config_api",

View File

@@ -30,40 +30,6 @@ Before you jump into the code, review the online book [Solana: Blockchain Rebuil
(The _latest_ development version of the online book is also [available here](https://solana-labs.github.io/book-edge/).) (The _latest_ development version of the online book is also [available here](https://solana-labs.github.io/book-edge/).)
Release Binaries
===
Official release binaries are available at [Github Releases](https://github.com/solana-labs/solana/releases).
Additionally we provide pre-release binaries for the latest code on the edge and
beta channels. Note that these pre-release binaries may be less stable than an
official release.
### Edge channel
#### Linux (x86_64-unknown-linux-gnu)
* [solana.tar.bz2](http://release.solana.com/edge/solana-release-x86_64-unknown-linux-gnu.tar.bz2)
* [solana-install-init](http://release.solana.com/edge/solana-install-init-x86_64-unknown-linux-gnu) as a stand-alone executable
#### mac OS (x86_64-apple-darwin)
* [solana.tar.bz2](http://release.solana.com/edge/solana-release-x86_64-apple-darwin.tar.bz2)
* [solana-install-init](http://release.solana.com/edge/solana-install-init-x86_64-apple-darwin) as a stand-alone executable
#### Windows (x86_64-pc-windows-msvc)
* [solana.tar.bz2](http://release.solana.com/edge/solana-release-x86_64-pc-windows-msvc.tar.bz2)
* [solana-install-init.exe](http://release.solana.com/edge/solana-install-init-x86_64-pc-windows-msvc.exe) as a stand-alone executable
#### All platforms
* [solana-metrics.tar.bz2](http://release.solana.com.s3.amazonaws.com/edge/solana-metrics.tar.bz2)
### Beta channel
#### Linux (x86_64-unknown-linux-gnu)
* [solana.tar.bz2](http://release.solana.com/beta/solana-release-x86_64-unknown-linux-gnu.tar.bz2)
* [solana-install-init](http://release.solana.com/beta/solana-install-init-x86_64-unknown-linux-gnu) as a stand-alone executable
#### mac OS (x86_64-apple-darwin)
* [solana.tar.bz2](http://release.solana.com/beta/solana-release-x86_64-apple-darwin.tar.bz2)
* [solana-install-init](http://release.solana.com/beta/solana-install-init-x86_64-apple-darwin) as a stand-alone executable
#### Windows (x86_64-pc-windows-msvc)
* [solana.tar.bz2](http://release.solana.com/beta/solana-release-x86_64-pc-windows-msvc.tar.bz2)
* [solana-install-init.exe](http://release.solana.com/beta/solana-install-init-x86_64-pc-windows-msvc.exe) as a stand-alone executable
#### All platforms
* [solana-metrics.tar.bz2](http://release.solana.com.s3.amazonaws.com/beta/solana-metrics.tar.bz2)
Developing Developing
=== ===

View File

@@ -2,41 +2,39 @@
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018" edition = "2018"
name = "solana-bench-exchange" name = "solana-bench-exchange"
version = "0.16.1" version = "0.15.0"
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0" license = "Apache-2.0"
homepage = "https://solana.com/" homepage = "https://solana.com/"
publish = false
[dependencies] [dependencies]
bincode = "1.1.4"
bs58 = "0.2.0" bs58 = "0.2.0"
clap = "2.32.0" clap = "2.32.0"
bincode = "1.1.4"
env_logger = "0.6.0" env_logger = "0.6.0"
itertools = "0.8.0" itertools = "0.8.0"
log = "0.4.6" log = "0.4.6"
num-derive = "0.2"
num-traits = "0.2" num-traits = "0.2"
num-derive = "0.2"
rand = "0.6.5" rand = "0.6.5"
rayon = "1.1.0" rayon = "1.0.3"
serde = "1.0.92" serde = "1.0.91"
serde_derive = "1.0.92" serde_derive = "1.0.91"
serde_json = "1.0.39" serde_json = "1.0.38"
serde_yaml = "0.8.9"
# solana-runtime = { path = "../solana/runtime"} # solana-runtime = { path = "../solana/runtime"}
solana = { path = "../core", version = "0.16.1" } solana = { path = "../core", version = "0.15.0" }
solana-client = { path = "../client", version = "0.16.1" } solana-client = { path = "../client", version = "0.15.0" }
solana-drone = { path = "../drone", version = "0.16.1" } solana-drone = { path = "../drone", version = "0.15.0" }
solana-exchange-api = { path = "../programs/exchange_api", version = "0.16.1" } solana-exchange-api = { path = "../programs/exchange_api", version = "0.15.0" }
solana-exchange-program = { path = "../programs/exchange_program", version = "0.16.1" } solana-exchange-program = { path = "../programs/exchange_program", version = "0.15.0" }
solana-logger = { path = "../logger", version = "0.16.1" } solana-logger = { path = "../logger", version = "0.15.0" }
solana-metrics = { path = "../metrics", version = "0.16.1" } solana-metrics = { path = "../metrics", version = "0.15.0" }
solana-netutil = { path = "../netutil", version = "0.16.1" } solana-netutil = { path = "../netutil", version = "0.15.0" }
solana-runtime = { path = "../runtime", version = "0.16.1" } solana-runtime = { path = "../runtime", version = "0.15.0" }
solana-sdk = { path = "../sdk", version = "0.16.1" } solana-sdk = { path = "../sdk", version = "0.15.0" }
untrusted = "0.6.2"
ws = "0.8.1" ws = "0.8.1"
untrusted = "0.6.2"
[features] [features]
cuda = ["solana/cuda"] cuda = ["solana/cuda"]
erasure = []

View File

@@ -20,12 +20,9 @@ use solana_sdk::system_instruction;
use solana_sdk::timing::{duration_as_ms, duration_as_s}; use solana_sdk::timing::{duration_as_ms, duration_as_s};
use solana_sdk::transaction::Transaction; use solana_sdk::transaction::Transaction;
use std::cmp; use std::cmp;
use std::collections::{HashMap, VecDeque}; use std::collections::VecDeque;
use std::fs::File;
use std::io::prelude::*;
use std::mem; use std::mem;
use std::net::SocketAddr; use std::net::SocketAddr;
use std::path::Path;
use std::process::exit; use std::process::exit;
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::sync::mpsc::{channel, Receiver, Sender}; use std::sync::mpsc::{channel, Receiver, Sender};
@@ -51,8 +48,6 @@ pub struct Config {
pub batch_size: usize, pub batch_size: usize,
pub chunk_size: usize, pub chunk_size: usize,
pub account_groups: usize, pub account_groups: usize,
pub client_ids_and_stake_file: String,
pub read_from_client_file: bool,
} }
impl Default for Config { impl Default for Config {
@@ -66,38 +61,10 @@ impl Default for Config {
batch_size: 10, batch_size: 10,
chunk_size: 10, chunk_size: 10,
account_groups: 100, account_groups: 100,
client_ids_and_stake_file: String::new(),
read_from_client_file: false,
} }
} }
} }
pub fn create_client_accounts_file(
client_ids_and_stake_file: &str,
batch_size: usize,
account_groups: usize,
fund_amount: u64,
) {
let accounts_in_groups = batch_size * account_groups;
const NUM_KEYPAIR_GROUPS: u64 = 2;
let total_keys = accounts_in_groups as u64 * NUM_KEYPAIR_GROUPS;
let keypairs = generate_keypairs(total_keys);
let mut accounts = HashMap::new();
keypairs.iter().for_each(|keypair| {
accounts.insert(
serde_json::to_string(&keypair.to_bytes().to_vec()).unwrap(),
fund_amount,
);
});
let serialized = serde_yaml::to_string(&accounts).unwrap();
let path = Path::new(&client_ids_and_stake_file);
let mut file = File::create(path).unwrap();
file.write_all(&serialized.into_bytes()).unwrap();
}
pub fn do_bench_exchange<T>(clients: Vec<T>, config: Config) pub fn do_bench_exchange<T>(clients: Vec<T>, config: Config)
where where
T: 'static + Client + Send + Sync, T: 'static + Client + Send + Sync,
@@ -111,8 +78,6 @@ where
batch_size, batch_size,
chunk_size, chunk_size,
account_groups, account_groups,
client_ids_and_stake_file,
read_from_client_file,
} = config; } = config;
info!( info!(
@@ -127,55 +92,35 @@ where
); );
let accounts_in_groups = batch_size * account_groups; let accounts_in_groups = batch_size * account_groups;
const NUM_KEYPAIR_GROUPS: u64 = 2; let exit_signal = Arc::new(AtomicBool::new(false));
let total_keys = accounts_in_groups as u64 * NUM_KEYPAIR_GROUPS;
let mut signer_keypairs = if read_from_client_file {
let path = Path::new(&client_ids_and_stake_file);
let file = File::open(path).unwrap();
let accounts: HashMap<String, u64> = serde_yaml::from_reader(file).unwrap();
accounts
.into_iter()
.map(|(keypair, _)| {
let bytes: Vec<u8> = serde_json::from_str(keypair.as_str()).unwrap();
Keypair::from_bytes(&bytes).unwrap()
})
.collect()
} else {
info!("Generating {:?} signer keys", total_keys);
generate_keypairs(total_keys)
};
let trader_signers: Vec<_> = signer_keypairs
.drain(0..accounts_in_groups)
.map(Arc::new)
.collect();
let swapper_signers: Vec<_> = signer_keypairs
.drain(0..accounts_in_groups)
.map(Arc::new)
.collect();
let clients: Vec<_> = clients.into_iter().map(Arc::new).collect(); let clients: Vec<_> = clients.into_iter().map(Arc::new).collect();
let client = clients[0].as_ref(); let client = clients[0].as_ref();
if !read_from_client_file { const NUM_KEYPAIR_GROUPS: u64 = 4;
info!("Fund trader accounts"); let total_keys = accounts_in_groups as u64 * NUM_KEYPAIR_GROUPS;
fund_keys(client, &identity, &trader_signers, fund_amount); info!("Generating {:?} keys", total_keys);
info!("Fund swapper accounts"); let mut keypairs = generate_keypairs(total_keys);
fund_keys(client, &identity, &swapper_signers, fund_amount); let trader_signers: Vec<_> = keypairs
} .drain(0..accounts_in_groups)
.map(Arc::new)
.collect();
let swapper_signers: Vec<_> = keypairs
.drain(0..accounts_in_groups)
.map(Arc::new)
.collect();
let src_pubkeys: Vec<_> = keypairs
.drain(0..accounts_in_groups)
.map(|keypair| keypair.pubkey())
.collect();
let profit_pubkeys: Vec<_> = keypairs
.drain(0..accounts_in_groups)
.map(|keypair| keypair.pubkey())
.collect();
info!("Generating {:?} account keys", total_keys); info!("Fund trader accounts");
let mut account_keypairs = generate_keypairs(total_keys); fund_keys(client, &identity, &trader_signers, fund_amount);
let src_pubkeys: Vec<_> = account_keypairs info!("Fund swapper accounts");
.drain(0..accounts_in_groups) fund_keys(client, &identity, &swapper_signers, fund_amount);
.map(|keypair| keypair.pubkey())
.collect();
let profit_pubkeys: Vec<_> = account_keypairs
.drain(0..accounts_in_groups)
.map(|keypair| keypair.pubkey())
.collect();
info!("Create {:?} source token accounts", src_pubkeys.len()); info!("Create {:?} source token accounts", src_pubkeys.len());
create_token_accounts(client, &trader_signers, &src_pubkeys); create_token_accounts(client, &trader_signers, &src_pubkeys);
@@ -191,7 +136,6 @@ where
transfer_delay transfer_delay
); );
let exit_signal = Arc::new(AtomicBool::new(false));
let shared_txs: SharedTransactions = Arc::new(RwLock::new(VecDeque::new())); let shared_txs: SharedTransactions = Arc::new(RwLock::new(VecDeque::new()));
let total_txs_sent_count = Arc::new(AtomicUsize::new(0)); let total_txs_sent_count = Arc::new(AtomicUsize::new(0));
let s_threads: Vec<_> = (0..threads) let s_threads: Vec<_> = (0..threads)
@@ -948,7 +892,7 @@ pub fn airdrop_lamports(client: &Client, drone_addr: &SocketAddr, id: &Keypair,
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use solana::gossip_service::{discover_cluster, get_multi_client}; use solana::gossip_service::{discover_cluster, get_clients};
use solana::local_cluster::{ClusterConfig, LocalCluster}; use solana::local_cluster::{ClusterConfig, LocalCluster};
use solana::validator::ValidatorConfig; use solana::validator::ValidatorConfig;
use solana_drone::drone::run_local_drone; use solana_drone::drone::run_local_drone;
@@ -963,6 +907,7 @@ mod tests {
solana_logger::setup(); solana_logger::setup();
const NUM_NODES: usize = 1; const NUM_NODES: usize = 1;
let validator_config = ValidatorConfig::default();
let mut config = Config::default(); let mut config = Config::default();
config.identity = Keypair::new(); config.identity = Keypair::new();
@@ -984,7 +929,7 @@ mod tests {
let cluster = LocalCluster::new(&ClusterConfig { let cluster = LocalCluster::new(&ClusterConfig {
node_stakes: vec![100_000; NUM_NODES], node_stakes: vec![100_000; NUM_NODES],
cluster_lamports: 100_000_000_000_000, cluster_lamports: 100_000_000_000_000,
validator_configs: vec![ValidatorConfig::default(); NUM_NODES], validator_config,
native_instruction_processors: [solana_exchange_program!()].to_vec(), native_instruction_processors: [solana_exchange_program!()].to_vec(),
..ClusterConfig::default() ..ClusterConfig::default()
}); });
@@ -1007,20 +952,25 @@ mod tests {
exit(1); exit(1);
}); });
let (client, num_clients) = get_multi_client(&nodes); let clients = get_clients(&nodes);
info!("clients: {}", num_clients); if clients.len() < NUM_NODES {
assert!(num_clients >= NUM_NODES); error!(
"Error: Insufficient nodes discovered. Expecting {} or more",
NUM_NODES
);
exit(1);
}
const NUM_SIGNERS: u64 = 2; const NUM_SIGNERS: u64 = 2;
airdrop_lamports( airdrop_lamports(
&client, &clients[0],
&drone_addr, &drone_addr,
&config.identity, &config.identity,
fund_amount * (accounts_in_groups + 1) as u64 * NUM_SIGNERS, fund_amount * (accounts_in_groups + 1) as u64 * NUM_SIGNERS,
); );
do_bench_exchange(vec![client], config); do_bench_exchange(clients, config);
} }
#[test] #[test]

View File

@@ -18,9 +18,6 @@ pub struct Config {
pub batch_size: usize, pub batch_size: usize,
pub chunk_size: usize, pub chunk_size: usize,
pub account_groups: usize, pub account_groups: usize,
pub client_ids_and_stake_file: String,
pub write_to_client_file: bool,
pub read_from_client_file: bool,
} }
impl Default for Config { impl Default for Config {
@@ -37,9 +34,6 @@ impl Default for Config {
batch_size: 100, batch_size: 100,
chunk_size: 100, chunk_size: 100,
account_groups: 100, account_groups: 100,
client_ids_and_stake_file: String::new(),
write_to_client_file: false,
read_from_client_file: false,
} }
} }
} }
@@ -147,20 +141,6 @@ pub fn build_args<'a, 'b>() -> App<'a, 'b> {
.default_value("10") .default_value("10")
.help("Number of account groups to cycle for each batch"), .help("Number of account groups to cycle for each batch"),
) )
.arg(
Arg::with_name("write-client-keys")
.long("write-client-keys")
.value_name("FILENAME")
.takes_value(true)
.help("Generate client keys and stakes and write the list to YAML file"),
)
.arg(
Arg::with_name("read-client-keys")
.long("read-client-keys")
.value_name("FILENAME")
.takes_value(true)
.help("Read client keys and stakes from the YAML file"),
)
} }
pub fn extract_args<'a>(matches: &ArgMatches<'a>) -> Config { pub fn extract_args<'a>(matches: &ArgMatches<'a>) -> Config {
@@ -204,15 +184,5 @@ pub fn extract_args<'a>(matches: &ArgMatches<'a>) -> Config {
args.account_groups = value_t!(matches.value_of("account-groups"), usize) args.account_groups = value_t!(matches.value_of("account-groups"), usize)
.expect("Failed to parse account-groups"); .expect("Failed to parse account-groups");
if let Some(s) = matches.value_of("write-client-keys") {
args.write_to_client_file = true;
args.client_ids_and_stake_file = s.to_string();
}
if let Some(s) = matches.value_of("read-client-keys") {
assert!(!args.write_to_client_file);
args.read_from_client_file = true;
args.client_ids_and_stake_file = s.to_string();
}
args args
} }

View File

@@ -6,9 +6,9 @@ pub mod order_book;
#[macro_use] #[macro_use]
extern crate solana_exchange_program; extern crate solana_exchange_program;
use crate::bench::{airdrop_lamports, create_client_accounts_file, do_bench_exchange, Config}; use crate::bench::{airdrop_lamports, do_bench_exchange, Config};
use log::*; use log::*;
use solana::gossip_service::{discover_cluster, get_multi_client}; use solana::gossip_service::{discover_cluster, get_clients};
use solana_sdk::signature::KeypairUtil; use solana_sdk::signature::KeypairUtil;
fn main() { fn main() {
@@ -30,12 +30,33 @@ fn main() {
batch_size, batch_size,
chunk_size, chunk_size,
account_groups, account_groups,
client_ids_and_stake_file,
write_to_client_file,
read_from_client_file,
.. ..
} = cli_config; } = cli_config;
info!("Connecting to the cluster");
let (nodes, _replicators) =
discover_cluster(&entrypoint_addr, num_nodes).unwrap_or_else(|_| {
panic!("Failed to discover nodes");
});
let clients = get_clients(&nodes);
info!("{} nodes found", clients.len());
if clients.len() < num_nodes {
panic!("Error: Insufficient nodes discovered");
}
info!("Funding keypair: {}", identity.pubkey());
let accounts_in_groups = batch_size * account_groups;
const NUM_SIGNERS: u64 = 2;
airdrop_lamports(
&clients[0],
&drone_addr,
&identity,
fund_amount * (accounts_in_groups + 1) as u64 * NUM_SIGNERS,
);
let config = Config { let config = Config {
identity, identity,
threads, threads,
@@ -45,43 +66,7 @@ fn main() {
batch_size, batch_size,
chunk_size, chunk_size,
account_groups, account_groups,
client_ids_and_stake_file,
read_from_client_file,
}; };
if write_to_client_file { do_bench_exchange(clients, config);
create_client_accounts_file(
&config.client_ids_and_stake_file,
config.batch_size,
config.account_groups,
config.fund_amount,
);
} else {
info!("Connecting to the cluster");
let (nodes, _replicators) =
discover_cluster(&entrypoint_addr, num_nodes).unwrap_or_else(|_| {
panic!("Failed to discover nodes");
});
let (client, num_clients) = get_multi_client(&nodes);
info!("{} nodes found", num_clients);
if num_clients < num_nodes {
panic!("Error: Insufficient nodes discovered");
}
if !read_from_client_file {
info!("Funding keypair: {}", config.identity.pubkey());
let accounts_in_groups = batch_size * account_groups;
const NUM_SIGNERS: u64 = 2;
airdrop_lamports(
&client,
&drone_addr,
&config.identity,
fund_amount * (accounts_in_groups + 1) as u64 * NUM_SIGNERS,
);
}
do_bench_exchange(vec![client], config);
}
} }

View File

@@ -2,17 +2,17 @@
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018" edition = "2018"
name = "solana-bench-streamer" name = "solana-bench-streamer"
version = "0.16.1" version = "0.15.0"
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0" license = "Apache-2.0"
homepage = "https://solana.com/" homepage = "https://solana.com/"
[dependencies] [dependencies]
clap = "2.33.0" clap = "2.33.0"
solana = { path = "../core", version = "0.16.1" } solana = { path = "../core", version = "0.15.0" }
solana-logger = { path = "../logger", version = "0.16.1" } solana-logger = { path = "../logger", version = "0.15.0" }
solana-netutil = { path = "../netutil", version = "0.16.1" } solana-netutil = { path = "../netutil", version = "0.15.0" }
[features] [features]
cuda = ["solana/cuda"] cuda = ["solana/cuda"]
erasure = []

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018" edition = "2018"
name = "solana-bench-tps" name = "solana-bench-tps"
version = "0.16.1" version = "0.15.0"
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0" license = "Apache-2.0"
homepage = "https://solana.com/" homepage = "https://solana.com/"
@@ -10,20 +10,17 @@ homepage = "https://solana.com/"
[dependencies] [dependencies]
clap = "2.33.0" clap = "2.33.0"
log = "0.4.6" log = "0.4.6"
rayon = "1.1.0" rayon = "1.0.3"
serde = "1.0.92"
serde_derive = "1.0.92"
serde_json = "1.0.39" serde_json = "1.0.39"
serde_yaml = "0.8.9" solana = { path = "../core", version = "0.15.0" }
solana = { path = "../core", version = "0.16.1" } solana-client = { path = "../client", version = "0.15.0" }
solana-client = { path = "../client", version = "0.16.1" } solana-drone = { path = "../drone", version = "0.15.0" }
solana-drone = { path = "../drone", version = "0.16.1" } solana-logger = { path = "../logger", version = "0.15.0" }
solana-logger = { path = "../logger", version = "0.16.1" } solana-metrics = { path = "../metrics", version = "0.15.0" }
solana-metrics = { path = "../metrics", version = "0.16.1" } solana-netutil = { path = "../netutil", version = "0.15.0" }
solana-netutil = { path = "../netutil", version = "0.16.1" } solana-runtime = { path = "../runtime", version = "0.15.0" }
solana-runtime = { path = "../runtime", version = "0.16.1" } solana-sdk = { path = "../sdk", version = "0.15.0" }
solana-sdk = { path = "../sdk", version = "0.16.1" }
[features] [features]
cuda = ["solana/cuda"] cuda = ["solana/cuda"]
erasure = []

View File

@@ -17,6 +17,7 @@ use solana_sdk::transaction::Transaction;
use std::cmp; use std::cmp;
use std::collections::VecDeque; use std::collections::VecDeque;
use std::net::SocketAddr; use std::net::SocketAddr;
use std::process::exit;
use std::sync::atomic::{AtomicBool, AtomicIsize, AtomicUsize, Ordering}; use std::sync::atomic::{AtomicBool, AtomicIsize, AtomicUsize, Ordering};
use std::sync::{Arc, RwLock}; use std::sync::{Arc, RwLock};
use std::thread::sleep; use std::thread::sleep;
@@ -24,15 +25,8 @@ use std::thread::Builder;
use std::time::Duration; use std::time::Duration;
use std::time::Instant; use std::time::Instant;
pub const MAX_SPENDS_PER_TX: u64 = 4; pub const MAX_SPENDS_PER_TX: usize = 4;
pub const NUM_LAMPORTS_PER_ACCOUNT: u64 = 128; pub const NUM_LAMPORTS_PER_ACCOUNT: u64 = 20;
#[derive(Debug)]
pub enum BenchTpsError {
AirdropFailure,
}
pub type Result<T> = std::result::Result<T, BenchTpsError>;
pub type SharedTransactions = Arc<RwLock<VecDeque<Vec<(Transaction, u64)>>>>; pub type SharedTransactions = Arc<RwLock<VecDeque<Vec<(Transaction, u64)>>>>;
@@ -341,13 +335,8 @@ fn verify_funding_transfer<T: Client>(client: &T, tx: &Transaction, amount: u64)
/// fund the dests keys by spending all of the source keys into MAX_SPENDS_PER_TX /// fund the dests keys by spending all of the source keys into MAX_SPENDS_PER_TX
/// on every iteration. This allows us to replay the transfers because the source is either empty, /// on every iteration. This allows us to replay the transfers because the source is either empty,
/// or full /// or full
pub fn fund_keys<T: Client>( pub fn fund_keys<T: Client>(client: &T, source: &Keypair, dests: &[Keypair], lamports: u64) {
client: &T, let total = lamports * dests.len() as u64;
source: &Keypair,
dests: &[Keypair],
total: u64,
lamports_per_signature: u64,
) {
let mut funded: Vec<(&Keypair, u64)> = vec![(source, total)]; let mut funded: Vec<(&Keypair, u64)> = vec![(source, total)];
let mut notfunded: Vec<&Keypair> = dests.iter().collect(); let mut notfunded: Vec<&Keypair> = dests.iter().collect();
@@ -357,12 +346,12 @@ pub fn fund_keys<T: Client>(
let mut to_fund = vec![]; let mut to_fund = vec![];
println!("creating from... {}", funded.len()); println!("creating from... {}", funded.len());
for f in &mut funded { for f in &mut funded {
let max_units = cmp::min(notfunded.len() as u64, MAX_SPENDS_PER_TX); let max_units = cmp::min(notfunded.len(), MAX_SPENDS_PER_TX);
if max_units == 0 { if max_units == 0 {
break; break;
} }
let start = notfunded.len() - max_units as usize; let start = notfunded.len() - max_units;
let per_unit = (f.1 - max_units * lamports_per_signature) / max_units; let per_unit = f.1 / (max_units as u64);
let moves: Vec<_> = notfunded[start..] let moves: Vec<_> = notfunded[start..]
.iter() .iter()
.map(|k| (k.pubkey(), per_unit)) .map(|k| (k.pubkey(), per_unit))
@@ -453,7 +442,7 @@ pub fn airdrop_lamports<T: Client>(
drone_addr: &SocketAddr, drone_addr: &SocketAddr,
id: &Keypair, id: &Keypair,
tx_count: u64, tx_count: u64,
) -> Result<()> { ) {
let starting_balance = client.get_balance(&id.pubkey()).unwrap_or(0); let starting_balance = client.get_balance(&id.pubkey()).unwrap_or(0);
metrics_submit_lamport_balance(starting_balance); metrics_submit_lamport_balance(starting_balance);
println!("starting balance {}", starting_balance); println!("starting balance {}", starting_balance);
@@ -502,10 +491,9 @@ pub fn airdrop_lamports<T: Client>(
current_balance, current_balance,
starting_balance starting_balance
); );
return Err(BenchTpsError::AirdropFailure); exit(1);
} }
} }
Ok(())
} }
fn compute_and_report_stats( fn compute_and_report_stats(
@@ -582,16 +570,19 @@ fn should_switch_directions(num_lamports_per_account: u64, i: u64) -> bool {
i % (num_lamports_per_account / 4) == 0 && (i >= (3 * num_lamports_per_account) / 4) i % (num_lamports_per_account / 4) == 0 && (i >= (3 * num_lamports_per_account) / 4)
} }
pub fn generate_keypairs(seed_keypair: &Keypair, count: u64) -> Vec<Keypair> { pub fn generate_keypairs(seed_keypair: &Keypair, count: usize) -> Vec<Keypair> {
let mut seed = [0u8; 32]; let mut seed = [0u8; 32];
seed.copy_from_slice(&seed_keypair.to_bytes()[..32]); seed.copy_from_slice(&seed_keypair.to_bytes()[..32]);
let mut rnd = GenKeys::new(seed); let mut rnd = GenKeys::new(seed);
let mut total_keys = 1; let mut total_keys = 0;
while total_keys < count { let mut target = count;
total_keys *= MAX_SPENDS_PER_TX; while target > 1 {
total_keys += target;
// Use the upper bound for this division otherwise it may not generate enough keys
target = (target + MAX_SPENDS_PER_TX - 1) / MAX_SPENDS_PER_TX;
} }
rnd.gen_n_keypairs(total_keys) rnd.gen_n_keypairs(total_keys as u64)
} }
pub fn generate_and_fund_keypairs<T: Client>( pub fn generate_and_fund_keypairs<T: Client>(
@@ -600,9 +591,9 @@ pub fn generate_and_fund_keypairs<T: Client>(
funding_pubkey: &Keypair, funding_pubkey: &Keypair,
tx_count: usize, tx_count: usize,
lamports_per_account: u64, lamports_per_account: u64,
) -> Result<(Vec<Keypair>, u64)> { ) -> (Vec<Keypair>, u64) {
info!("Creating {} keypairs...", tx_count * 2); info!("Creating {} keypairs...", tx_count * 2);
let mut keypairs = generate_keypairs(funding_pubkey, tx_count as u64 * 2); let mut keypairs = generate_keypairs(funding_pubkey, tx_count * 2);
info!("Get lamports..."); info!("Get lamports...");
@@ -613,27 +604,19 @@ pub fn generate_and_fund_keypairs<T: Client>(
.unwrap_or(0); .unwrap_or(0);
if lamports_per_account > last_keypair_balance { if lamports_per_account > last_keypair_balance {
let (_, fee_calculator) = client.get_recent_blockhash().unwrap(); let extra = lamports_per_account - last_keypair_balance;
let extra =
lamports_per_account - last_keypair_balance + fee_calculator.max_lamports_per_signature;
let total = extra * (keypairs.len() as u64); let total = extra * (keypairs.len() as u64);
if client.get_balance(&funding_pubkey.pubkey()).unwrap_or(0) < total { if client.get_balance(&funding_pubkey.pubkey()).unwrap_or(0) < total {
airdrop_lamports(client, &drone_addr.unwrap(), funding_pubkey, total)?; airdrop_lamports(client, &drone_addr.unwrap(), funding_pubkey, total);
} }
info!("adding more lamports {}", extra); info!("adding more lamports {}", extra);
fund_keys( fund_keys(client, funding_pubkey, &keypairs, extra);
client,
funding_pubkey,
&keypairs,
total,
fee_calculator.max_lamports_per_signature,
);
} }
// 'generate_keypairs' generates extra keys to be able to have size-aligned funding batches for fund_keys. // 'generate_keypairs' generates extra keys to be able to have size-aligned funding batches for fund_keys.
keypairs.truncate(2 * tx_count); keypairs.truncate(2 * tx_count);
Ok((keypairs, last_keypair_balance)) (keypairs, last_keypair_balance)
} }
#[cfg(test)] #[cfg(test)]
@@ -668,11 +651,12 @@ mod tests {
#[test] #[test]
fn test_bench_tps_local_cluster() { fn test_bench_tps_local_cluster() {
solana_logger::setup(); solana_logger::setup();
let validator_config = ValidatorConfig::default();
const NUM_NODES: usize = 1; const NUM_NODES: usize = 1;
let cluster = LocalCluster::new(&ClusterConfig { let cluster = LocalCluster::new(&ClusterConfig {
node_stakes: vec![999_990; NUM_NODES], node_stakes: vec![999_990; NUM_NODES],
cluster_lamports: 2_000_000, cluster_lamports: 2_000_000,
validator_configs: vec![ValidatorConfig::default(); NUM_NODES], validator_config,
..ClusterConfig::default() ..ClusterConfig::default()
}); });
@@ -699,8 +683,7 @@ mod tests {
&config.id, &config.id,
config.tx_count, config.tx_count,
lamports_per_account, lamports_per_account,
) );
.unwrap();
let total = do_bench_tps(vec![client], config, keypairs, 0); let total = do_bench_tps(vec![client], config, keypairs, 0);
assert!(total > 100); assert!(total > 100);
@@ -718,7 +701,7 @@ mod tests {
config.duration = Duration::from_secs(5); config.duration = Duration::from_secs(5);
let (keypairs, _keypair_balance) = let (keypairs, _keypair_balance) =
generate_and_fund_keypairs(&clients[0], None, &config.id, config.tx_count, 20).unwrap(); generate_and_fund_keypairs(&clients[0], None, &config.id, config.tx_count, 20);
do_bench_tps(clients, config, keypairs, 0); do_bench_tps(clients, config, keypairs, 0);
} }
@@ -732,10 +715,11 @@ mod tests {
let lamports = 20; let lamports = 20;
let (keypairs, _keypair_balance) = let (keypairs, _keypair_balance) =
generate_and_fund_keypairs(&client, None, &id, tx_count, lamports).unwrap(); generate_and_fund_keypairs(&client, None, &id, tx_count, lamports);
for kp in &keypairs { for kp in &keypairs {
assert!(client.get_balance(&kp.pubkey()).unwrap() >= lamports); // TODO: This should be >= lamports, but fails at the moment
assert_ne!(client.get_balance(&kp.pubkey()).unwrap(), 0);
} }
} }
} }

View File

@@ -4,7 +4,6 @@ use std::time::Duration;
use clap::{crate_description, crate_name, crate_version, App, Arg, ArgMatches}; use clap::{crate_description, crate_name, crate_version, App, Arg, ArgMatches};
use solana_drone::drone::DRONE_PORT; use solana_drone::drone::DRONE_PORT;
use solana_sdk::fee_calculator::FeeCalculator;
use solana_sdk::signature::{read_keypair, Keypair, KeypairUtil}; use solana_sdk::signature::{read_keypair, Keypair, KeypairUtil};
/// Holds the configuration for a single run of the benchmark /// Holds the configuration for a single run of the benchmark
@@ -18,10 +17,6 @@ pub struct Config {
pub tx_count: usize, pub tx_count: usize,
pub thread_batch_sleep_ms: usize, pub thread_batch_sleep_ms: usize,
pub sustained: bool, pub sustained: bool,
pub client_ids_and_stake_file: String,
pub write_to_client_file: bool,
pub read_from_client_file: bool,
pub target_lamports_per_signature: u64,
} }
impl Default for Config { impl Default for Config {
@@ -36,10 +31,6 @@ impl Default for Config {
tx_count: 500_000, tx_count: 500_000,
thread_batch_sleep_ms: 0, thread_batch_sleep_ms: 0,
sustained: false, sustained: false,
client_ids_and_stake_file: String::new(),
write_to_client_file: false,
read_from_client_file: false,
target_lamports_per_signature: FeeCalculator::default().target_lamports_per_signature,
} }
} }
} }
@@ -115,30 +106,6 @@ pub fn build_args<'a, 'b>() -> App<'a, 'b> {
.takes_value(true) .takes_value(true)
.help("Per-thread-per-iteration sleep in ms"), .help("Per-thread-per-iteration sleep in ms"),
) )
.arg(
Arg::with_name("write-client-keys")
.long("write-client-keys")
.value_name("FILENAME")
.takes_value(true)
.help("Generate client keys and stakes and write the list to YAML file"),
)
.arg(
Arg::with_name("read-client-keys")
.long("read-client-keys")
.value_name("FILENAME")
.takes_value(true)
.help("Read client keys and stakes from the YAML file"),
)
.arg(
Arg::with_name("target_lamports_per_signature")
.long("target-lamports-per-signature")
.value_name("LAMPORTS")
.takes_value(true)
.help(
"The cost in lamports that the cluster will charge for signature \
verification when the cluster is operating at target-signatures-per-slot",
),
)
} }
/// Parses a clap `ArgMatches` structure into a `Config` /// Parses a clap `ArgMatches` structure into a `Config`
@@ -196,20 +163,5 @@ pub fn extract_args<'a>(matches: &ArgMatches<'a>) -> Config {
args.sustained = matches.is_present("sustained"); args.sustained = matches.is_present("sustained");
if let Some(s) = matches.value_of("write-client-keys") {
args.write_to_client_file = true;
args.client_ids_and_stake_file = s.to_string();
}
if let Some(s) = matches.value_of("read-client-keys") {
assert!(!args.write_to_client_file);
args.read_from_client_file = true;
args.client_ids_and_stake_file = s.to_string();
}
if let Some(v) = matches.value_of("target_lamports_per_signature") {
args.target_lamports_per_signature = v.to_string().parse().expect("can't parse lamports");
}
args args
} }

View File

@@ -1,21 +1,10 @@
mod bench; mod bench;
mod cli; mod cli;
use crate::bench::{ use crate::bench::{do_bench_tps, generate_and_fund_keypairs, Config, NUM_LAMPORTS_PER_ACCOUNT};
do_bench_tps, generate_and_fund_keypairs, generate_keypairs, Config, NUM_LAMPORTS_PER_ACCOUNT, use solana::gossip_service::{discover_cluster, get_clients};
};
use solana::gossip_service::{discover_cluster, get_multi_client};
use solana_sdk::fee_calculator::FeeCalculator;
use solana_sdk::signature::Keypair;
use std::collections::HashMap;
use std::fs::File;
use std::io::prelude::*;
use std::path::Path;
use std::process::exit; use std::process::exit;
/// Number of signatures for all transactions in ~1 week at ~100K TPS
pub const NUM_SIGNATURES_FOR_TXS: u64 = 100_000 * 60 * 60 * 24 * 7;
fn main() { fn main() {
solana_logger::setup(); solana_logger::setup();
solana_metrics::set_panic_hook("bench-tps"); solana_metrics::set_panic_hook("bench-tps");
@@ -33,44 +22,15 @@ fn main() {
tx_count, tx_count,
thread_batch_sleep_ms, thread_batch_sleep_ms,
sustained, sustained,
client_ids_and_stake_file,
write_to_client_file,
read_from_client_file,
target_lamports_per_signature,
} = cli_config; } = cli_config;
if write_to_client_file {
let keypairs = generate_keypairs(&id, tx_count as u64 * 2);
let num_accounts = keypairs.len() as u64;
let max_fee = FeeCalculator::new(target_lamports_per_signature).max_lamports_per_signature;
let num_lamports_per_account = (num_accounts - 1 + NUM_SIGNATURES_FOR_TXS * max_fee)
/ num_accounts
+ NUM_LAMPORTS_PER_ACCOUNT;
let mut accounts = HashMap::new();
keypairs.iter().for_each(|keypair| {
accounts.insert(
serde_json::to_string(&keypair.to_bytes().to_vec()).unwrap(),
num_lamports_per_account,
);
});
let serialized = serde_yaml::to_string(&accounts).unwrap();
let path = Path::new(&client_ids_and_stake_file);
let mut file = File::create(path).unwrap();
file.write_all(&serialized.into_bytes()).unwrap();
return;
}
println!("Connecting to the cluster"); println!("Connecting to the cluster");
let (nodes, _replicators) = let (nodes, _replicators) =
discover_cluster(&entrypoint_addr, num_nodes).unwrap_or_else(|err| { discover_cluster(&entrypoint_addr, num_nodes).unwrap_or_else(|err| {
eprintln!("Failed to discover {} nodes: {:?}", num_nodes, err); eprintln!("Failed to discover {} nodes: {:?}", num_nodes, err);
exit(1); exit(1);
}); });
if nodes.len() < num_nodes {
let (client, num_clients) = get_multi_client(&nodes);
if nodes.len() < num_clients {
eprintln!( eprintln!(
"Error: Insufficient nodes discovered. Expecting {} or more", "Error: Insufficient nodes discovered. Expecting {} or more",
num_nodes num_nodes
@@ -78,33 +38,15 @@ fn main() {
exit(1); exit(1);
} }
let (keypairs, keypair_balance) = if read_from_client_file { let clients = get_clients(&nodes);
let path = Path::new(&client_ids_and_stake_file);
let file = File::open(path).unwrap();
let accounts: HashMap<String, u64> = serde_yaml::from_reader(file).unwrap(); let (keypairs, keypair_balance) = generate_and_fund_keypairs(
let mut keypairs = vec![]; &clients[0],
let mut last_balance = 0; Some(drone_addr),
&id,
accounts.into_iter().for_each(|(keypair, balance)| { tx_count,
let bytes: Vec<u8> = serde_json::from_str(keypair.as_str()).unwrap(); NUM_LAMPORTS_PER_ACCOUNT,
keypairs.push(Keypair::from_bytes(&bytes).unwrap()); );
last_balance = balance;
});
(keypairs, last_balance)
} else {
generate_and_fund_keypairs(
&client,
Some(drone_addr),
&id,
tx_count,
NUM_LAMPORTS_PER_ACCOUNT,
)
.unwrap_or_else(|e| {
eprintln!("Error could not fund keys: {:?}", e);
exit(1);
})
};
let config = Config { let config = Config {
id, id,
@@ -115,5 +57,5 @@ fn main() {
sustained, sustained,
}; };
do_bench_tps(vec![client], config, keypairs, keypair_balance); do_bench_tps(clients, config, keypairs, keypair_balance);
} }

View File

@@ -5,8 +5,6 @@
- [Terminology](terminology.md) - [Terminology](terminology.md)
- [Getting Started](getting-started.md) - [Getting Started](getting-started.md)
- [Testnet Participation](testnet-participation.md)
- [Testnet Replicator](testnet-replicator.md)
- [Example: Web Wallet](webwallet.md) - [Example: Web Wallet](webwallet.md)
- [Programming Model](programs.md) - [Programming Model](programs.md)
@@ -18,7 +16,7 @@
- [Leader Rotation](leader-rotation.md) - [Leader Rotation](leader-rotation.md)
- [Fork Generation](fork-generation.md) - [Fork Generation](fork-generation.md)
- [Managing Forks](managing-forks.md) - [Managing Forks](managing-forks.md)
- [Turbine Block Propagation](turbine-block-propagation.md) - [Data Plane Fanout](data-plane-fanout.md)
- [Ledger Replication](ledger-replication.md) - [Ledger Replication](ledger-replication.md)
- [Secure Vote Signing](vote-signing.md) - [Secure Vote Signing](vote-signing.md)
- [Stake Delegation and Rewards](stake-delegation-and-rewards.md) - [Stake Delegation and Rewards](stake-delegation-and-rewards.md)
@@ -56,17 +54,16 @@
- [References](ed_references.md) - [References](ed_references.md)
- [Cluster Test Framework](cluster-test-framework.md) - [Cluster Test Framework](cluster-test-framework.md)
- [Credit-only Accounts](credit-only-credit-debit-accounts.md) - [Credit-only Accounts](credit-only-credit-debit-accounts.md)
- [Deterministic Transaction Fees](transaction-fees.md)
- [Validator](validator-proposal.md) - [Validator](validator-proposal.md)
- [Implemented Design Proposals](implemented-proposals.md) - [Implemented Design Proposals](implemented-proposals.md)
- [Blocktree](blocktree.md)
- [Cluster Software Installation and Updates](installer.md)
- [Deterministic Transaction Fees](transaction-fees.md)
- [Fork Selection](fork-selection.md) - [Fork Selection](fork-selection.md)
- [Leader-to-Leader Transition](leader-leader-transition.md) - [Leader-to-Leader Transition](leader-leader-transition.md)
- [Leader-to-Validator Transition](leader-validator-transition.md) - [Leader-to-Validator Transition](leader-validator-transition.md)
- [Passive Stake Delegation and Rewards](passive-stake-delegation-and-rewards.md) - [Testnet Participation](testnet-participation.md)
- [Persistent Account Storage](persistent-account-storage.md)
- [Reliable Vote Transmission](reliable-vote-transmission.md)
- [Repair Service](repair-service.md)
- [Testing Programs](testing-programs.md) - [Testing Programs](testing-programs.md)
- [Reliable Vote Transmission](reliable-vote-transmission.md)
- [Persistent Account Storage](persistent-account-storage.md)
- [Cluster Software Installation and Updates](installer.md)
- [Passive Stake Delegation and Rewards](passive-stake-delegation-and-rewards.md)

View File

@@ -1,12 +1,12 @@
# Turbine Block Propagation # Data Plane Fanout
A Solana cluster uses a multi-layer block propagation mechanism called *Turbine* A Solana cluster uses a multi-layer mechanism called *data plane fanout* to
to broadcast transaction blobs to all nodes with minimal amount of duplicate broadcast transaction blobs to all nodes in a very quick and efficient manner.
messages. The cluster divides itself into small collections of nodes, called In order to establish the fanout, the cluster divides itself into small
*neighborhoods*. Each node is responsible for sharing any data it receives with collections of nodes, called *neighborhoods*. Each node is responsible for
the other nodes in its neighborhood, as well as propagating the data on to a sharing any data it receives with the other nodes in its neighborhood, as well
small set of nodes in other neighborhoods. This way each node only has to as propagating the data on to a small set of nodes in other neighborhoods.
communicate with a small number of nodes. This way each node only has to communicate with a small number of nodes.
During its slot, the leader node distributes blobs between the validator nodes During its slot, the leader node distributes blobs between the validator nodes
in the first neighborhood (layer 0). Each validator shares its data within its in the first neighborhood (layer 0). Each validator shares its data within its
@@ -26,14 +26,6 @@ make up layer 0. These will automatically be the highest stake holders, allowing
the heaviest votes to come back to the leader first. Layer-0 and lower-layer the heaviest votes to come back to the leader first. Layer-0 and lower-layer
nodes use the same logic to find their neighbors and next layer peers. nodes use the same logic to find their neighbors and next layer peers.
To reduce the possibility of attack vectors, each blob is transmitted over a
random tree of neighborhoods. Each node uses the same set of nodes representing
the cluster. A random tree is generated from the set for each blob using
randomness derived from the blob itself. Since the random seed is not known in
advance, attacks that try to eclipse neighborhoods from certain leaders or
blocks become very difficult, and should require almost complete control of the
stake in the cluster.
## Layer and Neighborhood Structure ## Layer and Neighborhood Structure
The current leader makes its initial broadcasts to at most `DATA_PLANE_FANOUT` The current leader makes its initial broadcasts to at most `DATA_PLANE_FANOUT`

View File

@@ -161,7 +161,7 @@ This will dump all the threads stack traces into gdb.txt
In this example the client connects to our public testnet. To run validators on the testnet you would need to open udp ports `8000-10000`. In this example the client connects to our public testnet. To run validators on the testnet you would need to open udp ports `8000-10000`.
```bash ```bash
$ ./multinode-demo/client.sh --entrypoint testnet.solana.com:8001 --drone testnet.solana.com:9900 --duration 60 --tx_count 50 $ ./multinode-demo/client.sh --entrypoint testnet.solana.com:8001 --duration 60
``` ```
You can observe the effects of your client's transactions on our [dashboard](https://metrics.solana.com:3000/d/testnet/testnet-hud?orgId=2&from=now-30m&to=now&refresh=5s&var-testnet=testnet) You can observe the effects of your client's transactions on our [dashboard](https://metrics.solana.com:3000/d/testnet/testnet-hud?orgId=2&from=now-30m&to=now&refresh=5s&var-testnet=testnet)

View File

@@ -12,18 +12,18 @@ updates is managed using an on-chain update manifest program.
#### Fetch and run a pre-built installer using a bootstrap curl/shell script #### Fetch and run a pre-built installer using a bootstrap curl/shell script
The easiest install method for supported platforms: The easiest install method for supported platforms:
```bash ```bash
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.16.0/install/solana-install-init.sh | sh $ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.13.0/install/solana-install-init.sh | sh
``` ```
This script will check github for the latest tagged release and download and run the This script will check github for the latest tagged release and download and run the
`solana-install-init` binary from there. `solana-install` binary from there.
If additional arguments need to be specified during the installation, the If additional arguments need to be specified during the installation, the
following shell syntax is used: following shell syntax is used:
```bash ```bash
$ init_args=.... # arguments for `solana-install-init ...` $ init_args=.... # arguments for `solana-installer init ...`
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.16.0/install/solana-install-init.sh | sh -s - ${init_args} $ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.13.0/install/solana-install-init.sh | sh -s - ${init_args}
``` ```
#### Fetch and run a pre-built installer from a Github release #### Fetch and run a pre-built installer from a Github release
@@ -31,9 +31,9 @@ With a well-known release URL, a pre-built binary can be obtained for supported
platforms: platforms:
```bash ```bash
$ curl -o solana-install-init https://github.com/solana-labs/solana/releases/download/v0.16.0/solana-install-init-x86_64-apple-darwin $ curl -o solana-install https://github.com/solana-labs/solana/releases/download/v0.13.0/solana-install-x86_64-apple-darwin
$ chmod +x ./solana-install-init $ chmod +x ./solana-install
$ ./solana-install-init --help $ ./solana-install --help
``` ```
#### Build and run the installer from source #### Build and run the installer from source
@@ -49,7 +49,7 @@ $ cargo run -- --help
Given a solana release tarball (as created by `ci/publish-tarball.sh`) that has already been uploaded to a publicly accessible URL, Given a solana release tarball (as created by `ci/publish-tarball.sh`) that has already been uploaded to a publicly accessible URL,
the following commands will deploy the update: the following commands will deploy the update:
```bash ```bash
$ solana-keygen new -o update-manifest.json # <-- only generated once, the public key is shared with users $ solana-keygen -o update-manifest.json # <-- only generated once, the public key is shared with users
$ solana-install deploy http://example.com/path/to/solana-release.tar.bz2 update-manifest.json $ solana-install deploy http://example.com/path/to/solana-release.tar.bz2 update-manifest.json
``` ```
@@ -119,7 +119,7 @@ It manages the following files and directories in the user's home directory:
#### Command-line Interface #### Command-line Interface
```manpage ```manpage
solana-install 0.16.0 solana-install 0.13.0
The solana cluster software installer The solana cluster software installer
USAGE: USAGE:

View File

@@ -30,7 +30,6 @@ Methods
* [getSlotLeader](#getslotleader) * [getSlotLeader](#getslotleader)
* [getNumBlocksSinceSignatureConfirmation](#getnumblockssincesignatureconfirmation) * [getNumBlocksSinceSignatureConfirmation](#getnumblockssincesignatureconfirmation)
* [getTransactionCount](#gettransactioncount) * [getTransactionCount](#gettransactioncount)
* [getTotalSupply](#gettotalsupply)
* [getEpochVoteAccounts](#getepochvoteaccounts) * [getEpochVoteAccounts](#getepochvoteaccounts)
* [requestAirdrop](#requestairdrop) * [requestAirdrop](#requestairdrop)
* [sendTransaction](#sendtransaction) * [sendTransaction](#sendtransaction)
@@ -126,7 +125,7 @@ None
##### Results: ##### Results:
The result field will be an array of JSON objects, each with the following sub fields: The result field will be an array of JSON objects, each with the following sub fields:
* `pubkey` - Node public key, as base-58 encoded string * `id` - Node identifier, as base-58 encoded string
* `gossip` - Gossip network address for the node * `gossip` - Gossip network address for the node
* `tpu` - TPU network address for the node * `tpu` - TPU network address for the node
* `rpc` - JSON RPC network address for the node, or `null` if the JSON RPC service is not enabled * `rpc` - JSON RPC network address for the node, or `null` if the JSON RPC service is not enabled
@@ -137,7 +136,7 @@ The result field will be an array of JSON objects, each with the following sub f
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getClusterNodes"}' http://localhost:8899 curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getClusterNodes"}' http://localhost:8899
// Result // Result
{"jsonrpc":"2.0","result":[{"gossip":"10.239.6.48:8001","pubkey":"9QzsJf7LPLj8GkXbYT3LFDKqsj2hHG7TA3xinJHu8epQ","rpc":"10.239.6.48:8899","tpu":"10.239.6.48:8856"}],"id":1} {"jsonrpc":"2.0","result":[{"gossip":"10.239.6.48:8001","id":"9QzsJf7LPLj8GkXbYT3LFDKqsj2hHG7TA3xinJHu8epQ","rpc":"10.239.6.48:8899","tpu":"10.239.6.48:8856"}],"id":1}
``` ```
--- ---
@@ -276,26 +275,6 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m
--- ---
### getTotalSupply
Returns the current total supply in Lamports
##### Parameters:
None
##### Results:
* `integer` - Total supply, as unsigned 64-bit integer
##### Example:
```bash
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getTotalSupply"}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":10126,"id":1}
```
---
### getEpochVoteAccounts ### getEpochVoteAccounts
Returns the account info and associated stake for all the voting accounts in the current epoch. Returns the account info and associated stake for all the voting accounts in the current epoch.
@@ -303,11 +282,19 @@ Returns the account info and associated stake for all the voting accounts in the
None None
##### Results: ##### Results:
The result field will be an array of JSON objects, each with the following sub fields: An array consisting of vote accounts:
* `votePubkey` - Vote account public key, as base-58 encoded string * `string` - the vote account's Pubkey as base-58 encoded string
* `nodePubkey` - Node public key, as base-58 encoded string * `integer` - the stake, in lamports, delegated to this vote account
* `stake` - the stake, in lamports, delegated to this vote account * `VoteState` - the vote account's state
Each VoteState will be a JSON object with the following sub fields:
* `votes`, array of most recent vote lockouts
* `node_pubkey`, the pubkey of the node that votes using this account
* `authorized_voter_pubkey`, the pubkey of the authorized vote signer for this account
* `commission`, a 32-bit integer used as a fraction (commission/MAX_U32) for rewards payout * `commission`, a 32-bit integer used as a fraction (commission/MAX_U32) for rewards payout
* `root_slot`, the most recent slot this account has achieved maximum lockout
* `credits`, credits accrued by this account for reaching lockouts
##### Example: ##### Example:
```bash ```bash
@@ -315,7 +302,7 @@ The result field will be an array of JSON objects, each with the following sub f
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getEpochVoteAccounts"}' http://localhost:8899 curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getEpochVoteAccounts"}' http://localhost:8899
// Result // Result
{"jsonrpc":"2.0","result":[{"commission":0,"nodePubkey":"Et2RaZJdJRTzTkodUwiHr4H6sLkVmijBFv8tkd7oSSFY","stake":42,"votePubkey":"B4CdWq3NBSoH2wYsVE1CaZSWPo2ZtopE4SJipQhZ3srF"}],"id":1} {"jsonrpc":"2.0","result":[[[84,115,89,23,41,83,221,72,58,23,53,245,195,188,140,161,242,189,200,164,139,214,12,180,84,161,28,151,24,243,159,125],10000000,{"authorized_voter_pubkey":[84,115,89,23,41,83,221,72,58,23,53,245,195,188,140,161,242,189,200,164,139,214,12,180,84,161,28,151,24,243,159,125],"commission":0,"credits":0,"node_pubkey":[49,139,227,211,47,39,69,86,131,244,160,144,228,169,84,143,142,253,83,81,212,110,254,12,242,71,219,135,30,60,157,213],"root_slot":null,"votes":[{"confirmation_count":1,"slot":0}]}]],"id":1}
``` ```
--- ---

View File

@@ -45,7 +45,7 @@ The upsides compared to guards:
* The timeout is not fixed. * The timeout is not fixed.
* The timeout is local to the leader, and therefore can be clever. The leader's * The timeout is local to the leader, and therefore can be clever. The leader's
heuristic can take into account turbine performance. heuristic can take into account avalanche performance.
* This design doesn't require a ledger hard fork to update. * This design doesn't require a ledger hard fork to update.

View File

@@ -1,18 +1,19 @@
# Ledger Replication # Ledger Replication
At full capacity on a 1gbps network solana will generate 4 petabytes of data At full capacity on a 1gbps network solana will generate 4 petabytes of data
per year. To prevent the network from centralizing around validators that have per year. To prevent the network from centralizing around full nodes that have
to store the full data set this protocol proposes a way for mining nodes to to store the full data set this protocol proposes a way for mining nodes to
provide storage capacity for pieces of the data. provide storage capacity for pieces of the network.
The basic idea to Proof of Replication is encrypting a dataset with a public The basic idea to Proof of Replication is encrypting a dataset with a public
symmetric key using CBC encryption, then hash the encrypted dataset. The main symmetric key using CBC encryption, then hash the encrypted dataset. The main
problem with the naive approach is that a dishonest storage node can stream the problem with the naive approach is that a dishonest storage node can stream the
encryption and delete the data as it's hashed. The simple solution is to periodically encryption and delete the data as its hashed. The simple solution is to force
regenerate the hash based on a signed PoH value. This ensures that all the data is present the hash to be done on the reverse of the encryption, or perhaps with a random
during the generation of the proof and it also requires validators to have the order. This ensures that all the data is present during the generation of the
entirety of the encrypted data present for verification of every proof of every identity. proof and it also requires the validator to have the entirety of the encrypted
So the space required to validate is `number_of_proofs * data_size` data present for verification of every proof of every identity. So the space
required to validate is `number_of_proofs * data_size`
## Optimization with PoH ## Optimization with PoH
@@ -28,12 +29,13 @@ core. The total space required for verification is `1_ledger_segment +
## Network ## Network
Validators for PoRep are the same validators that are verifying transactions. Validators for PoRep are the same validators that are verifying transactions.
If a replicator can prove that a validator verified a fake PoRep, then the They have some stake that they have put up as collateral that ensures that
validator will not receive a reward for that storage epoch. their work is honest. If you can prove that a validator verified a fake PoRep,
then the validator will not receive a reward for that storage epoch.
Replicators are specialized *light clients*. They download a part of the Replicators are specialized *light clients*. They download a part of the ledger
ledger (a.k.a Segment) and store it, and provide PoReps of storing the ledger. and store it, and provide PoReps of storing the ledger. For each verified PoRep
For each verified PoRep replicators earn a reward of sol from the mining pool. replicators earn a reward of sol from the mining pool.
## Constraints ## Constraints
@@ -53,8 +55,9 @@ changes to determine what rate it can validate storage proofs.
1. SLOTS\_PER\_SEGMENT: Number of slots in a segment of ledger data. The 1. SLOTS\_PER\_SEGMENT: Number of slots in a segment of ledger data. The
unit of storage for a replicator. unit of storage for a replicator.
2. NUM\_KEY\_ROTATION\_SEGMENTS: Number of segments after which replicators 2. NUM\_KEY\_ROTATION\_TICKS: Number of ticks to save a PoH value and cause a
regenerate their encryption keys and select a new dataset to store. key generation for the section of ledger just generated and the rotation of
another key in the set.
3. NUM\_STORAGE\_PROOFS: Number of storage proofs required for a storage proof 3. NUM\_STORAGE\_PROOFS: Number of storage proofs required for a storage proof
claim to be successfully rewarded. claim to be successfully rewarded.
4. RATIO\_OF\_FAKE\_PROOFS: Ratio of fake proofs to real proofs that a storage 4. RATIO\_OF\_FAKE\_PROOFS: Ratio of fake proofs to real proofs that a storage
@@ -63,40 +66,36 @@ mining proof claim has to contain to be valid for a reward.
proof. proof.
6. NUM\_CHACHA\_ROUNDS: Number of encryption rounds performed to generate 6. NUM\_CHACHA\_ROUNDS: Number of encryption rounds performed to generate
encrypted state. encrypted state.
7. NUM\_SLOTS\_PER\_TURN: Number of slots that define a single storage epoch or
a "turn" of the PoRep game.
### Validator behavior ### Validator behavior
1. Validators join the network and begin looking for replicator accounts at each 1. Validator joins the network and submits a storage validation capacity
storage epoch/turn boundary. transaction which tells the network how many proofs it can process in a given
2. Every turn, Validators sign the PoH value at the boundary and use that signature period defined by NUM\_KEY\_ROTATION\_TICKS.
to randomly pick proofs to verify from each storage account found in the turn boundary. 2. Every NUM\_KEY\_ROTATION\_TICKS the validator stores the PoH value at that
This signed value is also submitted to the validator's storage account and will be used by height.
replicators at a later stage to cross-verify. 3. Validator generates a storage proof confirmation transaction.
3. Every `NUM_SLOTS_PER_TURN` slots the validator advertises the PoH value. This is value 4. The storage proof confirmation transaction is integrated into the ledger.
is also served to Replicators via RPC interfaces. 6. Validator responds to RPC interfaces for what the last storage epoch PoH
4. For a given turn N, all validations get locked out until turn N+3 (a gap of 2 turn/epoch). value is and its slot.
At which point all validations during that turn are available for reward collection.
5. Any incorrect validations will be marked during the turn in between.
### Replicator behavior ### Replicator behavior
1. Since a replicator is somewhat of a light client and not downloading all the 1. Since a replicator is somewhat of a light client and not downloading all the
ledger data, they have to rely on other validators and replicators for information. ledger data, they have to rely on other full nodes (validators) for
Any given validator may or may not be malicious and give incorrect information, although information. Any given validator may or may not be malicious and give incorrect
there are not any obvious attack vectors that this could accomplish besides having the information, although there are not any obvious attack vectors that this could
replicator do extra wasted work. For many of the operations there are a number of options accomplish besides having the replicator do extra wasted work. For many of the
depending on how paranoid a replicator is: operations there are a number of options depending on how paranoid a replicator
is:
- (a) replicator can ask a validator - (a) replicator can ask a validator
- (b) replicator can ask multiple validators - (b) replicator can ask multiple validators
- (c) replicator can ask other replicators - (c) replicator can subscribe to the full transaction stream and generate
- (d) replicator can subscribe to the full transaction stream and generate the information itself
the information itself (assuming the slot is recent enough) - (d) replicator can subscribe to an abbreviated transaction stream to
- (e) replicator can subscribe to an abbreviated transaction stream to generate the information itself
generate the information itself (assuming the slot is recent enough) 2. A replicator obtains the PoH hash corresponding to the last key rotation
2. A replicator obtains the PoH hash corresponding to the last turn with its slot. along with its slot.
3. The replicator signs the PoH hash with its keypair. That signature is the 3. The replicator signs the PoH hash with its keypair. That signature is the
seed used to pick the segment to replicate and also the encryption key. The seed used to pick the segment to replicate and also the encryption key. The
replicator mods the signature with the slot to get which segment to replicator mods the signature with the slot to get which segment to
@@ -104,67 +103,38 @@ replicate.
4. The replicator retrives the ledger by asking peer validators and 4. The replicator retrives the ledger by asking peer validators and
replicators. See 6.5. replicators. See 6.5.
5. The replicator then encrypts that segment with the key with chacha algorithm 5. The replicator then encrypts that segment with the key with chacha algorithm
in CBC mode with `NUM_CHACHA_ROUNDS` of encryption. in CBC mode with NUM\_CHACHA\_ROUNDS of encryption.
6. The replicator initializes a chacha rng with the a signed recent PoH value as 6. The replicator initializes a chacha rng with the signature from step 2 as
the seed. the seed.
7. The replicator generates `NUM_STORAGE_SAMPLES` samples in the range of the 7. The replicator generates NUM\_STORAGE\_SAMPLES samples in the range of the
entry size and samples the encrypted segment with sha256 for 32-bytes at each entry size and samples the encrypted segment with sha256 for 32-bytes at each
offset value. Sampling the state should be faster than generating the encrypted offset value. Sampling the state should be faster than generating the encrypted
segment. segment.
8. The replicator sends a PoRep proof transaction which contains its sha state 8. The replicator sends a PoRep proof transaction which contains its sha state
at the end of the sampling operation, its seed and the samples it used to the at the end of the sampling operation, its seed and the samples it used to the
current leader and it is put onto the ledger. current leader and it is put onto the ledger.
9. During a given turn the replicator should submit many proofs for the same segment
and based on the `RATIO_OF_FAKE_PROOFS` some of those proofs must be fake.
10. As the PoRep game enters the next turn, the replicator must submit a
transaction with the mask of which proofs were fake during the last turn. This
transaction will define the rewards for both replicators and validators.
11. Finally for a turn N, as the PoRep game enters turn N + 3, replicator's proofs for
turn N will be counted towards their rewards.
### The PoRep Game
The Proof of Replication game has 4 primary stages. For each "turn" multiple PoRep
games can be in progress but each in a different stage.
The 4 stages of the PoRep Game are as follows:
1. Proof submission stage
- Replicators: submit as many proofs as possible during this stage
- Validators: No-op
2. Proof verification stage
- Replicators: No-op
- Validators: Select replicators and verify their proofs from the previous turn
3. Proof challenge stage
- Replicators: Submit the proof mask with justifications (for fake proofs submitted 2 turns ago)
- Validators: No-op
4. Reward collection stage
- Replicators: Collect rewards for 3 turns ago
- Validators: Collect rewards for 3 turns ago
For each turn of the PoRep game, both Validators and Replicators evaluate each
stage. The stages are run as separate transactions on the storage program.
### Finding who has a given block of ledger ### Finding who has a given block of ledger
1. Validators monitor the turns in the PoRep game and look at the rooted bank 1. Validators monitor the transaction stream for storage mining proofs, and
at turn boundaries for any proofs. keep a mapping of ledger segments by slot to public keys. When it sees
2. Validators maintain a map of ledger segments and corresponding replicator public keys. a storage mining proof it updates this mapping and provides an RPC interface
The map is updated when a Validator processes a replicator's proofs for a segment. which takes a slot and hands back a list of public keys. The client
The validator provides an RPC interface to access the this map. Using this API, clients then looks up in their cluster\_info table to see which network address that
can map a segment to a replicator's network address (correlating it via cluster_info table). corresponds to and sends a repair request to retrieve the necessary blocks of
The clients can then send repair requests to the replicator to retrieve segments. ledger.
3. Validators would need to invalidate this list every N turns. 2. Validators would need to prune this list which it could do by periodically
looking at the oldest entries in its mappings and doing a network query to see
if the storage host is still serving the first entry.
## Sybil attacks ## Sybil attacks
For any random seed, we force everyone to use a signature that is derived from For any random seed, we force everyone to use a signature that is derived from
a PoH hash at the turn boundary. Everyone uses the same count, so the same PoH a PoH hash. Everyone must use the same count, so the same PoH hash is signed by
hash is signed by every participant. The signatures are then each cryptographically every participant. The signatures are then each cryptographically tied to the
tied to the keypair, which prevents a leader from grinding on the resulting keypair, which prevents a leader from grinding on the resulting value for more
value for more than 1 identity. than 1 identity.
Since there are many more client identities then encryption identities, we need Since there are many more client identities then encryption identities, we need
to split the reward for multiple clients, and prevent Sybil attacks from to split the reward for multiple clients, and prevent Sybil attacks from
@@ -185,7 +155,8 @@ the network can reward long lived client identities more than new ones.
showing the initial state for the hash. showing the initial state for the hash.
- If a validator marks real proofs as fake, no on-chain computation can be done - If a validator marks real proofs as fake, no on-chain computation can be done
to distinguish who is correct. Rewards would have to rely on the results from to distinguish who is correct. Rewards would have to rely on the results from
multiple validators to catch bad actors and replicators from being denied rewards. multiple validators in a stake-weighted fashion to catch bad actors and
replicators from being locked out of the network.
- Validator stealing mining proof results for itself. The proofs are derived - Validator stealing mining proof results for itself. The proofs are derived
from a signature from a replicator, since the validator does not know the from a signature from a replicator, since the validator does not know the
private key used to generate the encryption key, it cannot be the generator of private key used to generate the encryption key, it cannot be the generator of

View File

@@ -76,24 +76,21 @@ this field can only modified by this entity
### StakeState ### StakeState
A StakeState takes one of two forms, StakeState::Stake and StakeState::MiningPool. A StakeState takes one of two forms, StakeState::Delegate and StakeState::MiningPool.
### StakeState::Stake ### StakeState::Delegate
Stake is the current delegation preference of the **staker**. Stake StakeState is the current delegation preference of the **staker**. StakeState
contains the following state information: contains the following state information:
* Account::lamports - The staked lamports.
* `voter_pubkey` - The pubkey of the VoteState instance the lamports are * `voter_pubkey` - The pubkey of the VoteState instance the lamports are
delegated to. delegated to.
* `credits_observed` - The total credits claimed over the lifetime of the * `credits_observed` - The total credits claimed over the lifetime of the
program. program.
* `stake` - The actual activated stake.
* Account::lamports - Lamports available for staking, including any earned as rewards.
### StakeState::MiningPool ### StakeState::MiningPool
There are two approaches to the mining pool. The bank could allow the There are two approaches to the mining pool. The bank could allow the
@@ -108,12 +105,11 @@ tokens stored as `Account::lamports`.
The stakes and the MiningPool are accounts that are owned by the same `Stake` The stakes and the MiningPool are accounts that are owned by the same `Stake`
program. program.
### StakeInstruction::DelegateStake(stake) ### StakeInstruction::Initialize
* `account[0]` - RW - The StakeState::Stake instance. * `account[0]` - RW - The StakeState::Delegate instance.
`StakeState::Stake::credits_observed` is initialized to `VoteState::credits`. `StakeState::Delegate::credits_observed` is initialized to `VoteState::credits`.
`StakeState::Stake::voter_pubkey` is initialized to `account[1]` `StakeState::Delegate::voter_pubkey` is initialized to `account[1]`
`StakeState::Stake::stake` is initialized to `stake`, as long as it's less than account[0].lamports
* `account[1]` - R - The VoteState instance. * `account[1]` - R - The VoteState instance.
@@ -128,7 +124,7 @@ deposited into the StakeState and as validator commission is proportional to
* `account[0]` - RW - The StakeState::MiningPool instance that will fulfill the * `account[0]` - RW - The StakeState::MiningPool instance that will fulfill the
reward. reward.
* `account[1]` - RW - The StakeState::Stake instance that is redeeming votes * `account[1]` - RW - The StakeState::Delegate instance that is redeeming votes
credits. credits.
* `account[2]` - R - The VoteState instance, must be the same as * `account[2]` - R - The VoteState instance, must be the same as
`StakeState::voter_pubkey` `StakeState::voter_pubkey`
@@ -136,7 +132,7 @@ credits.
Reward is payed out for the difference between `VoteState::credits` to Reward is payed out for the difference between `VoteState::credits` to
`StakeState::Delgate.credits_observed`, and `credits_observed` is updated to `StakeState::Delgate.credits_observed`, and `credits_observed` is updated to
`VoteState::credits`. The commission is deposited into the `VoteState` token `VoteState::credits`. The commission is deposited into the `VoteState` token
balance, and the reward is deposited to the `StakeState::Stake` token balance. The balance, and the reward is deposited to the `StakeState::Delegate` token balance. The
reward and the commission is weighted by the `StakeState::lamports` divided by total lamports staked. reward and the commission is weighted by the `StakeState::lamports` divided by total lamports staked.
The Staker or the owner of the Stake program sends a transaction with this The Staker or the owner of the Stake program sends a transaction with this
@@ -150,7 +146,7 @@ stake_state.credits_observed = vote_state.credits;
``` ```
`credits_to_claim` is used to compute the reward and commission, and `credits_to_claim` is used to compute the reward and commission, and
`StakeState::Stake::credits_observed` is updated to the latest `StakeState::Delegate::credits_observed` is updated to the latest
`VoteState::credits` value. `VoteState::credits` value.
### Collecting network fees into the MiningPool ### Collecting network fees into the MiningPool
@@ -179,13 +175,13 @@ many rewards to be claimed concurrently.
## Passive Delegation ## Passive Delegation
Any number of instances of StakeState::Stake programs can delegate to a single Any number of instances of StakeState::Delegate programs can delegate to a single
VoteState program without an interactive action from the identity controlling VoteState program without an interactive action from the identity controlling
the VoteState program or submitting votes to the program. the VoteState program or submitting votes to the program.
The total stake allocated to a VoteState program can be calculated by the sum of The total stake allocated to a VoteState program can be calculated by the sum of
all the StakeState programs that have the VoteState pubkey as the all the StakeState programs that have the VoteState pubkey as the
`StakeState::Stake::voter_pubkey`. `StakeState::Delegate::voter_pubkey`.
## Example Callflow ## Example Callflow

View File

@@ -35,9 +35,9 @@ The different protocol strategies to address the above challenges:
* Insert an empty `SlotMeta` in blocktree for `p.slot` if it doesn't already exist. * Insert an empty `SlotMeta` in blocktree for `p.slot` if it doesn't already exist.
* If `p.slot` does exist, update the parent of `p` based on `parents` * If `p.slot` does exist, update the parent of `p` based on `parents`
Note: that once these empty slots are added to blocktree, the `Blob Repair` protocol should attempt to fill those slots. Note: that once these empty slots are added to blocktree, the `Blob Repair` protocol should attempt to fill those slots.
Note: Validators will only accept responses containing blobs within the current verifiable epoch (epoch the validator has a leader schedule for). Note: Validators will only accept responses containing blobs within the current verifiable epoch (epoch the validator has a leader schedule for).
3. Repairmen (Addresses Challenge #3): 3. Repairmen (Addresses Challenge #3):
This part of the repair protocol is the primary mechanism by which new nodes joining the cluster catch up after loading a snapshot. This protocol works in a "forward" fashion, so validators can verify every blob that they receive against a known leader schedule. This part of the repair protocol is the primary mechanism by which new nodes joining the cluster catch up after loading a snapshot. This protocol works in a "forward" fashion, so validators can verify every blob that they receive against a known leader schedule.

View File

@@ -48,7 +48,7 @@ specific parameters will be necessary:
Solana's trustless sense of time and ordering provided by its PoH data Solana's trustless sense of time and ordering provided by its PoH data
structure, along with its structure, along with its
[turbine](https://www.youtube.com/watch?v=qt_gDRXHrHQ&t=1s) data broadcast [avalanche](https://www.youtube.com/watch?v=qt_gDRXHrHQ&t=1s) data broadcast
and transmission design, should provide sub-second transaction confirmation times that scale and transmission design, should provide sub-second transaction confirmation times that scale
with the log of the number of nodes in the cluster. This means we shouldn't with the log of the number of nodes in the cluster. This means we shouldn't
have to restrict the number of validating nodes with a prohibitive 'minimum have to restrict the number of validating nodes with a prohibitive 'minimum

View File

@@ -32,7 +32,7 @@ traversal issues. A cloud-hosted machine works best. **Ensure that IP ports
Prebuilt binaries are available for Linux x86_64 (Ubuntu 18.04 recommended). Prebuilt binaries are available for Linux x86_64 (Ubuntu 18.04 recommended).
MacOS or WSL users may build from source. MacOS or WSL users may build from source.
For a performance testnet with many transactions we have some preliminary recommended setups: For a performance testnet with many transactions we have some preliminary recomended setups:
| | Low end | Medium end | High end | Notes | | | Low end | Medium end | High end | Notes |
| --- | ---------|------------|----------| -- | | --- | ---------|------------|----------| -- |
@@ -42,13 +42,6 @@ For a performance testnet with many transactions we have some preliminary recomm
| Accounts Drive(s) | None | Samsung 970 Pro 1TB | 2x Samsung 970 Pro 1TB | | | Accounts Drive(s) | None | Samsung 970 Pro 1TB | 2x Samsung 970 Pro 1TB | |
| GPU | 4x Nvidia 1070 or 2x Nvidia 1080 Ti or 2x Nvidia 2070 | 2x Nvidia 2080 Ti | 4x Nvidia 2080 Ti | Any number of cuda-capable GPUs are supported on Linux platforms. | | GPU | 4x Nvidia 1070 or 2x Nvidia 1080 Ti or 2x Nvidia 2070 | 2x Nvidia 2080 Ti | 4x Nvidia 2080 Ti | Any number of cuda-capable GPUs are supported on Linux platforms. |
#### GPU Requirements
CUDA is required to make use of the GPU on your system. The provided Solana
release binaries are built on Ubuntu 18.04 with <a
href="https://developer.nvidia.com/cuda-toolkit-archive">CUDA Toolkit 10.1
update 1"</a>. If your machine is using a different CUDA version then you will
need to rebuild from source.
#### Confirm The Testnet Is Reachable #### Confirm The Testnet Is Reachable
Before attaching a validator node, sanity check that the cluster is accessible Before attaching a validator node, sanity check that the cluster is accessible
to your machine by running some simple commands. If any of the commands fail, to your machine by running some simple commands. If any of the commands fail,
@@ -71,11 +64,11 @@ for more detail on cluster activity.
##### Bootstrap with `solana-install` ##### Bootstrap with `solana-install`
The `solana-install` tool can be used to easily install and upgrade the cluster The `solana-install` tool can be used to easily install and upgrade the cluster
software on Linux x86_64 and mac OS systems. software on Linux x86_64 systems.
```bash ```bash
$ export SOLANA_RELEASE=v0.16.0 # skip this line to install the latest release $ export SOLANA_RELEASE=v0.14.2 # skip this line to install the latest release
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.16.0/install/solana-install-init.sh | sh -s $ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.14.0/install/solana-install-init.sh | sh -s
``` ```
Alternatively build the `solana-install` program from source and run the Alternatively build the `solana-install` program from source and run the
@@ -85,12 +78,11 @@ $ solana-install init
``` ```
After a successful install, `solana-install update` may be used to easily update the cluster After a successful install, `solana-install update` may be used to easily update the cluster
software to a newer version at any time. software to a newer version.
##### Download Prebuilt Binaries ##### Download Prebuilt Binaries
If you would rather not use `solana-install` to manage the install, you can manually download and install the binaries. Binaries are available for Linux x86_64 systems.
###### Linux
Download the binaries by navigating to Download the binaries by navigating to
[https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest), [https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest),
download **solana-release-x86_64-unknown-linux-gnu.tar.bz2**, then extract the download **solana-release-x86_64-unknown-linux-gnu.tar.bz2**, then extract the
@@ -100,17 +92,6 @@ $ tar jxf solana-release-x86_64-unknown-linux-gnu.tar.bz2
$ cd solana-release/ $ cd solana-release/
$ export PATH=$PWD/bin:$PATH $ export PATH=$PWD/bin:$PATH
``` ```
###### mac OS
Download the binaries by navigating to
[https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest),
download **solana-release-x86_64-apple-darwin.tar.bz2**, then extract the
archive:
```bash
$ tar jxf solana-release-x86_64-apple-darwin.tar.bz2
$ cd solana-release/
$ export PATH=$PWD/bin:$PATH
```
##### Build From Source ##### Build From Source
If you are unable to use the prebuilt binaries or prefer to build it yourself If you are unable to use the prebuilt binaries or prefer to build it yourself
from source, navigate to from source, navigate to
@@ -122,12 +103,6 @@ $ ./scripts/cargo-install-all.sh .
$ export PATH=$PWD/bin:$PATH $ export PATH=$PWD/bin:$PATH
``` ```
If building for CUDA, include the `cuda` feature flag as well:
```bash
$ ./scripts/cargo-install-all.sh . cuda
$ export PATH=$PWD/bin:$PATH
```
### Starting The Validator ### Starting The Validator
Sanity check that you are able to interact with the cluster by receiving a small Sanity check that you are able to interact with the cluster by receiving a small
airdrop of lamports from the testnet drone: airdrop of lamports from the testnet drone:
@@ -144,7 +119,7 @@ $ solana-gossip --entrypoint testnet.solana.com:8001 spy
Now configure a key pair for your validator by running: Now configure a key pair for your validator by running:
```bash ```bash
$ solana-keygen new -o ~/validator-keypair.json $ solana-keygen -o validator-keypair.json
``` ```
Then use one of the following commands, depending on your installation Then use one of the following commands, depending on your installation
@@ -153,33 +128,22 @@ choice, to start the node:
If this is a `solana-install`-installation: If this is a `solana-install`-installation:
```bash ```bash
$ clear-config.sh $ clear-config.sh
$ validator.sh --identity ~/validator-keypair.json --poll-for-new-genesis-block testnet.solana.com $ validator.sh --identity validator-keypair.json --poll-for-new-genesis-block testnet.solana.com
``` ```
Alternatively, the `solana-install run` command can be used to run the validator Alternatively, the `solana-install run` command can be used to run the validator
node while periodically checking for and applying software updates: node while periodically checking for and applying software updates:
```bash ```bash
$ clear-config.sh $ clear-config.sh
$ solana-install run validator.sh -- --identity ~/validator-keypair.json --poll-for-new-genesis-block testnet.solana.com $ solana-install run validator.sh -- --identity validator-keypair.json --poll-for-new-genesis-block testnet.solana.com
``` ```
If you built from source: If you built from source:
```bash ```bash
$ USE_INSTALL=1 ./multinode-demo/clear-config.sh $ USE_INSTALL=1 ./multinode-demo/clear-config.sh
$ USE_INSTALL=1 ./multinode-demo/validator.sh --identity ~/validator-keypair.json --poll-for-new-genesis-block testnet.solana.com $ USE_INSTALL=1 ./multinode-demo/validator.sh --identity validator-keypair.json --poll-for-new-genesis-block testnet.solana.com
``` ```
#### Enabling CUDA
By default CUDA is disabled. If your machine has a GPU with CUDA installed,
define the SOLANA_CUDA flag in your environment *before* running any of the
previusly mentioned commands
```bash
$ export SOLANA_CUDA=1
```
When your validator is started look for the following log message to indicate that CUDA is enabled:
`"[<timestamp> solana::validator] CUDA is enabled"`
#### Controlling local network port allocation #### Controlling local network port allocation
By default the validator will dynamically select available network ports in the By default the validator will dynamically select available network ports in the
8000-10000 range, and may be overridden with `--dynamic-port-range`. For 8000-10000 range, and may be overridden with `--dynamic-port-range`. For
@@ -200,7 +164,7 @@ accounts: ...
The **identity pubkey** for your validator can also be found by running: The **identity pubkey** for your validator can also be found by running:
```bash ```bash
$ solana-keygen pubkey ~/validator-keypair.json $ solana-keygen pubkey validator-keypair.json
``` ```
From another console, confirm the IP address and **identity pubkey** of your validator is visible in the From another console, confirm the IP address and **identity pubkey** of your validator is visible in the
@@ -212,7 +176,7 @@ $ solana-gossip --entrypoint testnet.solana.com:8001 spy
Provide the **vote pubkey** to the `solana-wallet show-vote-account` command to view Provide the **vote pubkey** to the `solana-wallet show-vote-account` command to view
the recent voting activity from your validator: the recent voting activity from your validator:
```bash ```bash
$ solana-wallet show-vote-account 2ozWvfaXQd1X6uKh8jERoRGApDqSqcEy6fF1oN13LL2G $ solana-wallet -n testnet.solana.com show-vote-account 2ozWvfaXQd1X6uKh8jERoRGApDqSqcEy6fF1oN13LL2G
``` ```
The vote pubkey for the validator can also be found by running: The vote pubkey for the validator can also be found by running:
@@ -223,20 +187,13 @@ $ solana-keygen pubkey ~/.local/share/solana/install/active_release/config-local
$ solana-keygen pubkey ./config-local/validator-vote-keypair.json $ solana-keygen pubkey ./config-local/validator-vote-keypair.json
``` ```
### Sharing Metrics From Your Validator
#### Validator Metrics If you have obtained a metrics username/password from the Solana maintainers to
Metrics are available for local monitoring of your validator. help us monitor the health of the testnet, please perform the following steps
before starting the validator to activate metrics reporting:
Docker must be installed and the current user added to the docker group. Then
download `solana-metrics.tar.bz2` from the Github Release and run
```bash ```bash
$ tar jxf solana-metrics.tar.bz2 export u="username obtained from the Solana maintainers"
$ cd solana-metrics/ export p="password obtained from the Solana maintainers"
$ ./start.sh export SOLANA_METRICS_CONFIG="db=testnet,u=${u:?},p=${p:?}"
source scripts/configure-metrics.sh
``` ```
A local InfluxDB and Grafana instance is now running on your machine. Define
`SOLANA_METRICS_CONFIG` in your environment as described at the end of the
`start.sh` output and restart your validator.
Metrics should now be streaming and visible from your local Grafana dashboard.

View File

@@ -1,154 +0,0 @@
## Testnet Replicator
This document describes how to setup a replicator in the testnet
Please note some of the information and instructions described here may change
in future releases.
### Overview
Replicators are specialized light clients. They download a part of the
ledger (a.k.a Segment) and store it. They earn rewards for storing segments.
The testnet features a validator running at testnet.solana.com, which
serves as the entrypoint to the cluster for your replicator node.
Additionally there is a blockexplorer available at
[http://testnet.solana.com/](http://testnet.solana.com/).
The testnet is configured to reset the ledger daily, or sooner
should the hourly automated cluster sanity test fail.
### Machine Requirements
Replicators don't need specialized hardware. Anything with more than
128GB of disk space will be able to participate in the cluster as a replicator node.
Currently the disk space requirements are very low but we expect them to change
in the future.
Prebuilt binaries are available for Linux x86_64 (Ubuntu 18.04 recommended),
macOS, and Windows.
#### Confirm The Testnet Is Reachable
Before starting a replicator node, sanity check that the cluster is accessible
to your machine by running some simple commands. If any of the commands fail,
please retry 5-10 minutes later to confirm the testnet is not just restarting
itself before debugging further.
Fetch the current transaction count over JSON RPC:
```bash
$ curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"getTransactionCount"}' http://testnet.solana.com:8899
```
Inspect the blockexplorer at [http://testnet.solana.com/](http://testnet.solana.com/) for activity.
View the [metrics dashboard](
https://metrics.solana.com:3000/d/testnet-beta/testnet-monitor-beta?var-testnet=testnet)
for more detail on cluster activity.
### Replicator Setup
##### Obtaining The Software
##### Bootstrap with `solana-install`
The `solana-install` tool can be used to easily install and upgrade the cluster
software.
##### Linux and mac OS
```bash
$ export SOLANA_RELEASE=v0.16.0 # skip this line to install the latest release
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.16.0/install/solana-install-init.sh | sh -s
```
Alternatively build the `solana-install` program from source and run the
following command to obtain the same result:
```bash
$ solana-install init
```
##### Windows
Download and install **solana-install-init** from
[https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest)
After a successful install, `solana-install update` may be used to
easily update the software to a newer version at any time.
##### Download Prebuilt Binaries
If you would rather not use `solana-install` to manage the install, you can manually download and install the binaries.
##### Linux
Download the binaries by navigating to
[https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest),
download **solana-release-x86_64-unknown-linux-gnu.tar.bz2**, then extract the
archive:
```bash
$ tar jxf solana-release-x86_64-unknown-linux-gnu.tar.bz2
$ cd solana-release/
$ export PATH=$PWD/bin:$PATH
```
##### mac OS
Download the binaries by navigating to
[https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest),
download **solana-release-x86_64-apple-darwin.tar.bz2**, then extract the
archive:
```bash
$ tar jxf solana-release-x86_64-apple-darwin.tar.bz2
$ cd solana-release/
$ export PATH=$PWD/bin:$PATH
```
##### Windows
Download the binaries by navigating to
[https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest),
download **solana-release-x86_64-pc-windows-msvc.tar.bz2**, then extract it into a folder.
It is a good idea to add this extracted folder to your windows PATH.
### Starting The Replicator
Try running following command to join the gossip network and view all the other nodes in the cluster:
```bash
$ solana-gossip --entrypoint testnet.solana.com:8001 spy
# Press ^C to exit
```
Now configure the keypairs for your replicator by running:
Navigate to the solana install location and open a cmd prompt
```bash
$ solana-keygen new -o replicator-keypair.json
$ solana-keygen new -o storage-keypair.json
```
Use solana-keygen to show the public keys for each of the keypairs,
they will be needed in the next step:
- Windows
```bash
# The replicator's identity
$ solana-keygen pubkey replicator-keypair.json
$ solana-keygen pubkey storage-keypair.json
```
- Linux and mac OS
```bash
$ export REPLICATOR_IDENTITY=$(solana-keygen pubkey replicator-keypair.json)
$ export STORAGE_IDENTITY=$(solana-keygen pubkey storage-keypair.json)
```
Then set up the storage accounts for your replicator by running:
```bash
$ solana-wallet --keypair replicator-keypair.json airdrop 100000
$ solana-wallet --keypair replicator-keypair.json create-replicator-storage-account $REPLICATOR_IDENTITY $STORAGE_IDENTITY
```
Note: Every time the testnet restarts, run the wallet steps to setup the replicator accounts again.
To start the replicator:
```bash
$ solana-replicator --entrypoint testnet.solana.com:8001 --identity replicator-keypair.json --storage-keypair storage-keypair.json --ledger replicator-ledger
```
### Verify Replicator Setup
From another console, confirm the IP address and **identity pubkey** of your replicator is visible in the
gossip network by running:
```bash
$ solana-gossip --entrypoint testnet.solana.com:8001 spy
```
Provide the **storage account pubkey** to the `solana-wallet show-storage-account` command to view
the recent mining activity from your replicator:
```bash
$ solana-wallet --keypair storage-keypair.json show-storage-account $STORAGE_IDENTITY
```

View File

@@ -8,14 +8,17 @@ client won't know how much was collected until the transaction is confirmed by
the cluster and the remaining balance is checked. It smells of exactly what we the cluster and the remaining balance is checked. It smells of exactly what we
dislike about Ethereum's "gas", non-determinism. dislike about Ethereum's "gas", non-determinism.
## Implementation Status
This design is not yet implemented, but is written as though it has been. Once
implemented, delete this comment.
### Congestion-driven fees ### Congestion-driven fees
Each validator uses *signatures per slot* (SPS) to estimate network congestion Each validator uses *signatures per slot* (SPS) to estimate network congestion
and *SPS target* to estimate the desired processing capacity of the cluster. and *SPS target* to estimate the desired processing capacity of the cluster.
The validator learns the SPS target from the genesis block, whereas it The validator learns the SPS target from the genesis block, whereas it
calculates SPS from recently processed transactions. The genesis block also calculates SPS from the ledger data in the previous epoch.
defines a target `lamports_per_signature`, which is the fee to charge per
signature when the cluster is operating at *SPS target*.
### Calculating fees ### Calculating fees
@@ -34,11 +37,8 @@ lamports as returned by the fee calculator.
In the first implementation of this design, the only fee parameter is In the first implementation of this design, the only fee parameter is
`lamports_per_signature`. The more signatures the cluster needs to verify, the `lamports_per_signature`. The more signatures the cluster needs to verify, the
higher the fee. The exact number of lamports is determined by the ratio of SPS higher the fee. The exact number of lamports is determined by the ratio of SPS
to the SPS target. At the end of each slot, the cluster lowers to the SPS target. The cluster lowers `lamports_per_signature` when SPS is
`lamports_per_signature` when SPS is below the target and raises it when above below the target and raises it when at or above the target.
the target. The minimum value for `lamports_per_signature` is 50% of the target
`lamports_per_signature` and the maximum value is 10x the target
`lamports_per_signature'
Future parameters might include: Future parameters might include:

View File

@@ -284,18 +284,6 @@ ARGS:
<PATH> /path/to/program.o <PATH> /path/to/program.o
``` ```
```manpage
solana-wallet-fees
Display current cluster fees
USAGE:
solana-wallet fees
FLAGS:
-h, --help Prints help information
-V, --version Prints version information
```
```manpage ```manpage
solana-wallet-get-transaction-count solana-wallet-get-transaction-count
Get current transaction count Get current transaction count

23
build-perf-libs.sh Executable file
View File

@@ -0,0 +1,23 @@
#!/usr/bin/env bash
#
# Builds perf-libs from the upstream source and installs them into the correct
# location in the tree
#
set -e
cd "$(dirname "$0")"
if [[ -d target/perf-libs ]]; then
echo "target/perf-libs/ already exists, to continue run:"
echo "$ rm -rf target/perf-libs"
exit 1
fi
(
set -x
git clone git@github.com:solana-labs/solana-perf-libs.git target/perf-libs
cd target/perf-libs
make -j"$(nproc)"
make DESTDIR=. install
)
./fetch-perf-libs.sh

View File

@@ -1,12 +0,0 @@
[package]
name = "solana-chacha-sys"
version = "0.16.1"
description = "Solana chacha-sys"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
homepage = "https://solana.com/"
license = "Apache-2.0"
edition = "2018"
[build-dependencies]
cc = "1.0.37"

View File

@@ -1,8 +0,0 @@
extern crate cc;
fn main() {
cc::Build::new()
.file("cpu-crypt/chacha20_core.c")
.file("cpu-crypt/chacha_cbc.c")
.compile("libcpu-crypt");
}

View File

@@ -1 +0,0 @@
release/

View File

@@ -1,25 +0,0 @@
V:=debug
LIB:=cpu-crypt
CFLAGS_common:=-Wall -Werror -pedantic -fPIC
CFLAGS_release:=-march=native -O3 $(CFLAGS_common)
CFLAGS_debug:=-g $(CFLAGS_common)
CFLAGS:=$(CFLAGS_$V)
all: $V/lib$(LIB).a
$V/chacha20_core.o: chacha20_core.c chacha.h
@mkdir -p $(@D)
$(CC) $(CFLAGS) -c $< -o $@
$V/chacha_cbc.o: chacha_cbc.c chacha.h
@mkdir -p $(@D)
$(CC) $(CFLAGS) -c $< -o $@
$V/lib$(LIB).a: $V/chacha20_core.o $V/chacha_cbc.o
$(AR) rcs $@ $^
.PHONY:clean
clean:
rm -rf $V

View File

@@ -1,35 +0,0 @@
#ifndef HEADER_CHACHA_H
# define HEADER_CHACHA_H
#include <string.h>
#include <inttypes.h>
# include <stddef.h>
# ifdef __cplusplus
extern "C" {
# endif
typedef unsigned int u32;
#define CHACHA_KEY_SIZE 32
#define CHACHA_NONCE_SIZE 12
#define CHACHA_BLOCK_SIZE 64
#define CHACHA_ROUNDS 500
void chacha20_encrypt(const u32 input[16],
unsigned char output[64],
int num_rounds);
void chacha20_encrypt_ctr(const uint8_t *in, uint8_t *out, size_t in_len,
const uint8_t key[CHACHA_KEY_SIZE], const uint8_t nonce[CHACHA_NONCE_SIZE],
uint32_t counter);
void chacha20_cbc128_encrypt(const unsigned char* in, unsigned char* out,
uint32_t len, const uint8_t* key,
unsigned char* ivec);
# ifdef __cplusplus
}
# endif
#endif

View File

@@ -1,102 +0,0 @@
#include "chacha.h"
#define ROTL32(v, n) (((v) << (n)) | ((v) >> (32 - (n))))
#define ROTATE(v, c) ROTL32((v), (c))
#define XOR(v, w) ((v) ^ (w))
#define PLUS(x, y) ((x) + (y))
#define U32TO8_LITTLE(p, v) \
{ (p)[0] = ((v) ) & 0xff; (p)[1] = ((v) >> 8) & 0xff; \
(p)[2] = ((v) >> 16) & 0xff; (p)[3] = ((v) >> 24) & 0xff; }
#define U8TO32_LITTLE(p) \
(((u32)((p)[0]) ) | ((u32)((p)[1]) << 8) | \
((u32)((p)[2]) << 16) | ((u32)((p)[3]) << 24) )
#define QUARTERROUND(a,b,c,d) \
x[a] = PLUS(x[a],x[b]); x[d] = ROTATE(XOR(x[d],x[a]),16); \
x[c] = PLUS(x[c],x[d]); x[b] = ROTATE(XOR(x[b],x[c]),12); \
x[a] = PLUS(x[a],x[b]); x[d] = ROTATE(XOR(x[d],x[a]), 8); \
x[c] = PLUS(x[c],x[d]); x[b] = ROTATE(XOR(x[b],x[c]), 7);
// sigma contains the ChaCha constants, which happen to be an ASCII string.
static const uint8_t sigma[16] = { 'e', 'x', 'p', 'a', 'n', 'd', ' ', '3',
'2', '-', 'b', 'y', 't', 'e', ' ', 'k' };
void chacha20_encrypt(const u32 input[16],
unsigned char output[64],
int num_rounds)
{
u32 x[16];
int i;
memcpy(x, input, sizeof(u32) * 16);
for (i = num_rounds; i > 0; i -= 2) {
QUARTERROUND( 0, 4, 8,12)
QUARTERROUND( 1, 5, 9,13)
QUARTERROUND( 2, 6,10,14)
QUARTERROUND( 3, 7,11,15)
QUARTERROUND( 0, 5,10,15)
QUARTERROUND( 1, 6,11,12)
QUARTERROUND( 2, 7, 8,13)
QUARTERROUND( 3, 4, 9,14)
}
for (i = 0; i < 16; ++i) {
x[i] = PLUS(x[i], input[i]);
}
for (i = 0; i < 16; ++i) {
U32TO8_LITTLE(output + 4 * i, x[i]);
}
}
void chacha20_encrypt_ctr(const uint8_t *in, uint8_t *out, size_t in_len,
const uint8_t key[CHACHA_KEY_SIZE],
const uint8_t nonce[CHACHA_NONCE_SIZE],
uint32_t counter)
{
uint32_t input[16];
uint8_t buf[64];
size_t todo, i;
input[0] = U8TO32_LITTLE(sigma + 0);
input[1] = U8TO32_LITTLE(sigma + 4);
input[2] = U8TO32_LITTLE(sigma + 8);
input[3] = U8TO32_LITTLE(sigma + 12);
input[4] = U8TO32_LITTLE(key + 0);
input[5] = U8TO32_LITTLE(key + 4);
input[6] = U8TO32_LITTLE(key + 8);
input[7] = U8TO32_LITTLE(key + 12);
input[8] = U8TO32_LITTLE(key + 16);
input[9] = U8TO32_LITTLE(key + 20);
input[10] = U8TO32_LITTLE(key + 24);
input[11] = U8TO32_LITTLE(key + 28);
input[12] = counter;
input[13] = U8TO32_LITTLE(nonce + 0);
input[14] = U8TO32_LITTLE(nonce + 4);
input[15] = U8TO32_LITTLE(nonce + 8);
while (in_len > 0) {
todo = sizeof(buf);
if (in_len < todo) {
todo = in_len;
}
chacha20_encrypt(input, buf, 20);
for (i = 0; i < todo; i++) {
out[i] = in[i] ^ buf[i];
}
out += todo;
in += todo;
in_len -= todo;
input[12]++;
}
}

View File

@@ -1,72 +0,0 @@
#include "chacha.h"
#if !defined(STRICT_ALIGNMENT) && !defined(PEDANTIC)
# define STRICT_ALIGNMENT 0
#endif
void chacha20_cbc128_encrypt(const unsigned char* in, unsigned char* out,
uint32_t len, const uint8_t* key,
unsigned char* ivec)
{
size_t n;
unsigned char *iv = ivec;
(void)key;
if (len == 0) {
return;
}
#if !defined(OPENSSL_SMALL_FOOTPRINT)
if (STRICT_ALIGNMENT &&
((size_t)in | (size_t)out | (size_t)ivec) % sizeof(size_t) != 0) {
while (len >= CHACHA_BLOCK_SIZE) {
for (n = 0; n < CHACHA_BLOCK_SIZE; ++n) {
out[n] = in[n] ^ iv[n];
//printf("%x ", out[n]);
}
chacha20_encrypt((const u32*)out, out, CHACHA_ROUNDS);
iv = out;
len -= CHACHA_BLOCK_SIZE;
in += CHACHA_BLOCK_SIZE;
out += CHACHA_BLOCK_SIZE;
}
} else {
while (len >= CHACHA_BLOCK_SIZE) {
for (n = 0; n < CHACHA_BLOCK_SIZE; n += sizeof(size_t)) {
*(size_t *)(out + n) =
*(size_t *)(in + n) ^ *(size_t *)(iv + n);
//printf("%zu ", *(size_t *)(iv + n));
}
chacha20_encrypt((const u32*)out, out, CHACHA_ROUNDS);
iv = out;
len -= CHACHA_BLOCK_SIZE;
in += CHACHA_BLOCK_SIZE;
out += CHACHA_BLOCK_SIZE;
}
}
#endif
while (len) {
for (n = 0; n < CHACHA_BLOCK_SIZE && n < len; ++n) {
out[n] = in[n] ^ iv[n];
}
for (; n < CHACHA_BLOCK_SIZE; ++n) {
out[n] = iv[n];
}
chacha20_encrypt((const u32*)out, out, CHACHA_ROUNDS);
iv = out;
if (len <= CHACHA_BLOCK_SIZE) {
break;
}
len -= CHACHA_BLOCK_SIZE;
in += CHACHA_BLOCK_SIZE;
out += CHACHA_BLOCK_SIZE;
}
memcpy(ivec, iv, CHACHA_BLOCK_SIZE);
}
void chacha20_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t in_len,
const uint8_t key[CHACHA_KEY_SIZE], uint8_t* ivec)
{
chacha20_cbc128_encrypt(in, out, in_len, key, ivec);
}

View File

@@ -1,21 +0,0 @@
extern "C" {
fn chacha20_cbc_encrypt(
input: *const u8,
output: *mut u8,
in_len: usize,
key: *const u8,
ivec: *mut u8,
);
}
pub fn chacha_cbc_encrypt(input: &[u8], output: &mut [u8], key: &[u8], ivec: &mut [u8]) {
unsafe {
chacha20_cbc_encrypt(
input.as_ptr(),
output.as_mut_ptr(),
input.len(),
key.as_ptr(),
ivec.as_mut_ptr(),
);
}
}

View File

@@ -12,7 +12,7 @@
set -e set -e
cd "$(dirname "$0")"/.. cd "$(dirname "$0")"/..
if [[ -n $CI_PULL_REQUEST ]]; then if ci/is-pr.sh; then
affectedFiles="$(buildkite-agent meta-data get affected_files)" affectedFiles="$(buildkite-agent meta-data get affected_files)"
echo "Affected files in this PR: $affectedFiles" echo "Affected files in this PR: $affectedFiles"

View File

@@ -2,13 +2,13 @@ steps:
- command: "ci/shellcheck.sh" - command: "ci/shellcheck.sh"
name: "shellcheck" name: "shellcheck"
timeout_in_minutes: 5 timeout_in_minutes: 5
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_nightly_docker_image ci/test-checks.sh" - command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-checks.sh"
name: "checks" name: "checks"
timeout_in_minutes: 15 timeout_in_minutes: 15
- wait - wait
- command: "ci/test-stable-perf.sh" - command: "ci/test-stable-perf.sh"
name: "stable-perf" name: "stable-perf"
timeout_in_minutes: 30 timeout_in_minutes: 20
artifact_paths: "log-*.txt" artifact_paths: "log-*.txt"
agents: agents:
- "queue=cuda" - "queue=cuda"
@@ -21,7 +21,7 @@ steps:
artifact_paths: "log-*.txt" artifact_paths: "log-*.txt"
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_nightly_docker_image ci/test-coverage.sh" - command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_nightly_docker_image ci/test-coverage.sh"
name: "coverage" name: "coverage"
timeout_in_minutes: 40 timeout_in_minutes: 20
# TODO: Fix and re-enable test-large-network.sh # TODO: Fix and re-enable test-large-network.sh
# - command: "ci/test-large-network.sh || true" # - command: "ci/test-large-network.sh || true"
# name: "large-network [ignored]" # name: "large-network [ignored]"

View File

@@ -89,11 +89,11 @@ BETA_CHANNEL_LATEST_TAG=${beta_tag:+v$beta_tag}
STABLE_CHANNEL_LATEST_TAG=${stable_tag:+v$stable_tag} STABLE_CHANNEL_LATEST_TAG=${stable_tag:+v$stable_tag}
if [[ $CI_BRANCH = "$STABLE_CHANNEL" ]]; then if [[ $BUILDKITE_BRANCH = "$STABLE_CHANNEL" ]]; then
CHANNEL=stable CHANNEL=stable
elif [[ $CI_BRANCH = "$EDGE_CHANNEL" ]]; then elif [[ $BUILDKITE_BRANCH = "$EDGE_CHANNEL" ]]; then
CHANNEL=edge CHANNEL=edge
elif [[ $CI_BRANCH = "$BETA_CHANNEL" ]]; then elif [[ $BUILDKITE_BRANCH = "$BETA_CHANNEL" ]]; then
CHANNEL=beta CHANNEL=beta
fi fi

View File

@@ -64,14 +64,11 @@ fi
ARGS+=( ARGS+=(
--env BUILDKITE --env BUILDKITE
--env BUILDKITE_AGENT_ACCESS_TOKEN --env BUILDKITE_AGENT_ACCESS_TOKEN
--env BUILDKITE_BRANCH
--env BUILDKITE_COMMIT
--env BUILDKITE_JOB_ID --env BUILDKITE_JOB_ID
--env BUILDKITE_TAG
--env CI --env CI
--env CI_BRANCH
--env CI_BUILD_ID
--env CI_COMMIT
--env CI_JOB_ID
--env CI_PULL_REQUEST
--env CI_REPO_SLUG
--env CODECOV_TOKEN --env CODECOV_TOKEN
--env CRATES_IO_TOKEN --env CRATES_IO_TOKEN
) )

View File

@@ -3,7 +3,6 @@ ARG date
RUN set -x \ RUN set -x \
&& rustup install nightly-$date \ && rustup install nightly-$date \
&& rustup component add clippy --toolchain=nightly-$date \
&& rustup show \ && rustup show \
&& rustc --version \ && rustc --version \
&& cargo --version \ && cargo --version \

View File

@@ -15,12 +15,12 @@ To update the pinned version:
1. Run `ci/docker-rust-nightly/build.sh` to rebuild the nightly image locally, 1. Run `ci/docker-rust-nightly/build.sh` to rebuild the nightly image locally,
or potentially `ci/docker-rust-nightly/build.sh YYYY-MM-DD` if there's a or potentially `ci/docker-rust-nightly/build.sh YYYY-MM-DD` if there's a
specific YYYY-MM-DD that is desired (default is today's build). specific YYYY-MM-DD that is desired (default is today's build).
1. Update `ci/rust-version.sh` to reflect the new nightly `YYY-MM-DD`
1. Run `SOLANA_DOCKER_RUN_NOSETUID=1 ci/docker-run.sh --nopull solanalabs/rust-nightly:YYYY-MM-DD ci/test-coverage.sh` 1. Run `SOLANA_DOCKER_RUN_NOSETUID=1 ci/docker-run.sh --nopull solanalabs/rust-nightly:YYYY-MM-DD ci/test-coverage.sh`
to confirm the new nightly image builds. Fix any issues as needed to confirm the new nightly image builds. Fix any issues as needed
1. Run `docker login` to enable pushing images to Docker Hub, if you're authorized. 1. Run `docker login` to enable pushing images to Docker Hub, if you're authorized.
1. Run `CI=true ci/docker-rust-nightly/build.sh YYYY-MM-DD` to push the new nightly image to dockerhub.com. 1. Run `CI=true ci/docker-rust-nightly/build.sh YYYY-MM-DD` to push the new nightly image to dockerhub.com.
1. Send a PR with the `ci/rust-version.sh` change and any codebase adjustments needed. 1. Modify the `solanalabs/rust-nightly:YYYY-MM-DD` reference in `ci/rust-version.sh` from the previous to
new *YYYY-MM-DD* value, send a PR with this change and any codebase adjustments needed.
## Troubleshooting ## Troubleshooting

View File

@@ -1,6 +1,6 @@
# Note: when the rust version is changed also modify # Note: when the rust version is changed also modify
# ci/rust-version.sh to pick up the new image tag # ci/buildkite.yml to pick up the new image tag
FROM rust:1.35.0 FROM rust:1.34.0
RUN set -x \ RUN set -x \
&& apt update \ && apt update \

View File

@@ -1,7 +1,6 @@
Docker image containing rust and some preinstalled packages used in CI. Docker image containing rust and some preinstalled packages used in CI.
This image manually maintained: This image may be manually updated by running `./build.sh` if you are a member
1. Edit `Dockerfile` to match the desired rust version of the [Solana Labs](https://hub.docker.com/u/solanalabs/) Docker Hub
2. Run `./build.sh` to publish the new image, if you are a member of the [Solana organization, but it is also automatically updated periodically by
Labs](https://hub.docker.com/u/solanalabs/) Docker Hub organization. [this automation](https://buildkite.com/solana-labs/solana-ci-docker-rust).

View File

@@ -1,89 +0,0 @@
#
# Normalized CI environment variables
#
# |source| me
#
if [[ -n $CI ]]; then
export CI=1
if [[ -n $TRAVIS ]]; then
export CI_BRANCH=$TRAVIS_BRANCH
export CI_BUILD_ID=$TRAVIS_BUILD_ID
export CI_COMMIT=$TRAVIS_COMMIT
export CI_JOB_ID=$TRAVIS_JOB_ID
if $TRAVIS_PULL_REQUEST; then
export CI_PULL_REQUEST=true
else
export CI_PULL_REQUEST=
fi
export CI_OS_NAME=$TRAVIS_OS_NAME
export CI_REPO_SLUG=$TRAVIS_REPO_SLUG
export CI_TAG=$TRAVIS_TAG
elif [[ -n $BUILDKITE ]]; then
export CI_BRANCH=$BUILDKITE_BRANCH
export CI_BUILD_ID=$BUILDKITE_BUILD_ID
export CI_COMMIT=$BUILDKITE_COMMIT
export CI_JOB_ID=$BUILDKITE_JOB_ID
# The standard BUILDKITE_PULL_REQUEST environment variable is always "false" due
# to how solana-ci-gate is used to trigger PR builds rather than using the
# standard Buildkite PR trigger.
if [[ $CI_BRANCH =~ pull/* ]]; then
export CI_PULL_REQUEST=true
else
export CI_PULL_REQUEST=
fi
export CI_OS_NAME=linux
if [[ -n $BUILDKITE_TRIGGERED_FROM_BUILD_PIPELINE_SLUG ]]; then
# The solana-secondary pipeline should use the slug of the pipeline that
# triggered it
export CI_REPO_SLUG=$BUILDKITE_ORGANIZATION_SLUG/$BUILDKITE_TRIGGERED_FROM_BUILD_PIPELINE_SLUG
else
export CI_REPO_SLUG=$BUILDKITE_ORGANIZATION_SLUG/$BUILDKITE_PIPELINE_SLUG
fi
# TRIGGERED_BUILDKITE_TAG is a workaround to propagate BUILDKITE_TAG into
# the solana-secondary pipeline
if [[ -n $TRIGGERED_BUILDKITE_TAG ]]; then
export CI_TAG=$TRIGGERED_BUILDKITE_TAG
else
export CI_TAG=$BUILDKITE_TAG
fi
elif [[ -n $APPVEYOR ]]; then
export CI_BRANCH=$APPVEYOR_REPO_BRANCH
export CI_BUILD_ID=$APPVEYOR_BUILD_ID
export CI_COMMIT=$APPVEYOR_REPO_COMMIT
export CI_JOB_ID=$APPVEYOR_JOB_ID
if [[ -n $APPVEYOR_PULL_REQUEST_NUMBER ]]; then
export CI_PULL_REQUEST=true
else
export CI_PULL_REQUEST=
fi
if [[ $CI_LINUX = True ]]; then
export CI_OS_NAME=linux
elif [[ $CI_WINDOWS = True ]]; then
export CI_OS_NAME=windows
fi
export CI_REPO_SLUG=$APPVEYOR_REPO_NAME
export CI_TAG=$APPVEYOR_REPO_TAG_NAME
fi
else
export CI=
export CI_BRANCH=
export CI_BUILD_ID=
export CI_COMMIT=
export CI_JOB_ID=
export CI_OS_NAME=
export CI_PULL_REQUEST=
export CI_REPO_SLUG=
export CI_TAG=
fi
cat <<EOF
CI=$CI
CI_BRANCH=$CI_BRANCH
CI_BUILD_ID=$CI_BUILD_ID
CI_COMMIT=$CI_COMMIT
CI_JOB_ID=$CI_JOB_ID
CI_OS_NAME=$CI_OS_NAME
CI_PULL_REQUEST=$CI_PULL_REQUEST
CI_TAG=$CI_TAG
EOF

9
ci/is-pr.sh Executable file
View File

@@ -0,0 +1,9 @@
#!/usr/bin/env bash
set -e
#
# The standard BUILDKITE_PULL_REQUEST environment variable is always "false" due
# to how solana-ci-gate is used to trigger PR builds rather than using the
# standard Buildkite PR trigger.
#
[[ $BUILDKITE_BRANCH =~ pull/* ]]

View File

@@ -294,7 +294,7 @@ flag_error() {
} }
if ! $skipSetup; then if ! $skipSetup; then
multinode-demo/setup.sh multinode-demo/setup.sh --hashes-per-tick auto
else else
verifyLedger verifyLedger
fi fi
@@ -307,7 +307,7 @@ while [[ $iteration -le $iterations ]]; do
source multinode-demo/common.sh source multinode-demo/common.sh
set -x set -x
client_keypair=/tmp/client-id.json-$$ client_keypair=/tmp/client-id.json-$$
$solana_keygen new -f -o $client_keypair || exit $? $solana_keygen -o $client_keypair || exit $?
$solana_gossip spy --num-nodes-exactly $numNodes || exit $? $solana_gossip spy --num-nodes-exactly $numNodes || exit $?
rm -rf $client_keypair rm -rf $client_keypair
) || flag_error ) || flag_error

View File

@@ -23,13 +23,11 @@ declare print_free_tree=(
'metrics/src' 'metrics/src'
'netutil/src' 'netutil/src'
'runtime/src' 'runtime/src'
'sdk/bpf/rust/rust-utils'
'sdk/src' 'sdk/src'
'programs/bpf/rust'
'programs/stake_api/src'
'programs/stake_program/src'
'programs/vote_api/src' 'programs/vote_api/src'
'programs/vote_program/src' 'programs/vote_program/src'
'programs/stake_api/src'
'programs/stake_program/src'
) )
if _ git --no-pager grep -n --max-depth=0 "${prints[@]/#/-e }" -- "${print_free_tree[@]}"; then if _ git --no-pager grep -n --max-depth=0 "${prints[@]/#/-e }" -- "${print_free_tree[@]}"; then

View File

@@ -1,65 +0,0 @@
#!/usr/bin/env python
#
# This script figures the order in which workspace crates must be published to
# crates.io. Along the way it also ensures there are no circular dependencies
# that would cause a |cargo publish| to fail.
#
# On success an ordered list of Cargo.toml files is written to stdout
#
import os
import json
import subprocess
import sys;
def load_metadata():
return json.loads(subprocess.Popen(
'cargo metadata --no-deps --format-version=1',
shell=True, stdout=subprocess.PIPE).communicate()[0])
def get_packages():
metadata = load_metadata()
manifest_path = dict()
# Build dictionary of packages and their immediate solana-only dependencies
dependency_graph = dict()
for pkg in metadata['packages']:
manifest_path[pkg['name']] = pkg['manifest_path'];
dependency_graph[pkg['name']] = [x['name'] for x in pkg['dependencies'] if x['name'].startswith('solana')];
# Check for direct circular dependencies
circular_dependencies = set()
for package, dependencies in dependency_graph.items():
for dependency in dependencies:
if dependency in dependency_graph and package in dependency_graph[dependency]:
circular_dependencies.add(' <--> '.join(sorted([package, dependency])))
for dependency in circular_dependencies:
sys.stderr.write('Error: Circular dependency: {}\n'.format(dependency))
if len(circular_dependencies) != 0:
sys.exit(1)
# Order dependencies
sorted_dependency_graph = []
max_iterations = pow(len(dependency_graph),2)
while dependency_graph:
if max_iterations == 0:
# TODO: Be more helpful and find the actual cycle for the user
sys.exit('Error: Circular dependency suspected between these packages: {}\n'.format(' '.join(dependency_graph.keys())))
max_iterations -= 1
for package, dependencies in dependency_graph.items():
for dependency in dependencies:
if dependency in dependency_graph:
break
else:
del dependency_graph[package]
sorted_dependency_graph.append((package, manifest_path[package]))
return sorted_dependency_graph
for package, manifest in get_packages():
print os.path.relpath(manifest)

View File

@@ -13,7 +13,7 @@ echo --- create book repo
git config user.email "maintainers@solana.com" git config user.email "maintainers@solana.com"
git config user.name "$(basename "$0")" git config user.name "$(basename "$0")"
git add ./* ./.nojekyll git add ./* ./.nojekyll
git commit -m "${CI_COMMIT:-local}" git commit -m "${BUILDKITE_COMMIT:-local}"
) )
eval "$(ci/channel-info.sh)" eval "$(ci/channel-info.sh)"

View File

@@ -3,21 +3,43 @@ set -e
cd "$(dirname "$0")/.." cd "$(dirname "$0")/.."
source ci/semver_bash/semver.sh source ci/semver_bash/semver.sh
# shellcheck disable=SC2086 # List of internal crates to publish
is_crate_version_uploaded() { #
name=$1 # IMPORTANT: the order of the CRATES *is* significant. Crates must be published
version=$2 # before the crates that depend on them. Note that this information is already
curl https://crates.io/api/v1/crates/${name}/${version} | \ # expressed in the various Cargo.toml files, and ideally would not be duplicated
python3 -c "import sys,json; print('version' in json.load(sys.stdin));" # here. (TODO: figure the crate ordering dynamically)
} #
CRATES=(
kvstore
logger
netutil
sdk
keygen
metrics
client
drone
programs/{budget_api,config_api,stake_api,storage_api,token_api,vote_api,exchange_api}
programs/{vote_program,budget_program,bpf_loader,config_program,exchange_program,failure_program}
programs/{noop_program,stake_program,storage_program,token_program}
runtime
vote-signer
core
validator
genesis
gossip
ledger-tool
wallet
install
)
# Only package/publish if this is a tagged release # Only package/publish if this is a tagged release
[[ -n $CI_TAG ]] || { [[ -n $TRIGGERED_BUILDKITE_TAG ]] || {
echo CI_TAG unset, skipped echo TRIGGERED_BUILDKITE_TAG unset, skipped
exit 0 exit 0
} }
semverParseInto "$CI_TAG" MAJOR MINOR PATCH SPECIAL semverParseInto "$TRIGGERED_BUILDKITE_TAG" MAJOR MINOR PATCH SPECIAL
expectedCrateVersion="$MAJOR.$MINOR.$PATCH$SPECIAL" expectedCrateVersion="$MAJOR.$MINOR.$PATCH$SPECIAL"
[[ -n "$CRATES_IO_TOKEN" ]] || { [[ -n "$CRATES_IO_TOKEN" ]] || {
@@ -27,37 +49,25 @@ expectedCrateVersion="$MAJOR.$MINOR.$PATCH$SPECIAL"
cargoCommand="cargo publish --token $CRATES_IO_TOKEN" cargoCommand="cargo publish --token $CRATES_IO_TOKEN"
Cargo_tomls=$(ci/order-crates-for-publishing.py) for crate in "${CRATES[@]}"; do
if [[ ! -r $crate/Cargo.toml ]]; then
for Cargo_toml in $Cargo_tomls; do echo "Error: $crate/Cargo.toml does not exist"
echo "-- $Cargo_toml" exit 1
grep -q "^version = \"$expectedCrateVersion\"$" "$Cargo_toml" || { fi
echo "Error: $Cargo_toml version is not $expectedCrateVersion" echo "-- $crate"
grep -q "^version = \"$expectedCrateVersion\"$" "$crate"/Cargo.toml || {
echo "Error: $crate/Cargo.toml version is not $expectedCrateVersion"
exit 1 exit 1
} }
( (
set -x set -x
crate=$(dirname "$Cargo_toml")
# TODO: the rocksdb package does not build with the stock rust docker image, # TODO: the rocksdb package does not build with the stock rust docker image,
# so use the solana rust docker image until this is resolved upstream # so use the solana rust docker image until this is resolved upstream
source ci/rust-version.sh source ci/rust-version.sh
ci/docker-run.sh "$rust_stable_docker_image" bash -exc "cd $crate; $cargoCommand" ci/docker-run.sh "$rust_stable_docker_image" bash -exc "cd $crate; $cargoCommand"
) || true # <-- Don't fail. We want to be able to retry the job in cases when a publish fails halfway due to network/cloud issues #ci/docker-run.sh rust bash -exc "cd $crate; $cargoCommand"
)
# shellcheck disable=SC2086
crate_name=$(grep -m 1 '^name = ' $Cargo_toml | cut -f 3 -d ' ' | tr -d \")
numRetries=30
for ((i = 1 ; i <= numRetries ; i++)); do
echo "Attempt ${i} of ${numRetries}"
# shellcheck disable=SC2086
if [[ $(is_crate_version_uploaded $crate_name $expectedCrateVersion) = True ]] ; then
echo "Found ${crate_name} version ${expectedCrateVersion} on crates.io"
break
fi
echo "Did not find ${crate_name} version ${expectedCrateVersion} on crates.io. Sleeping for 2 seconds."
sleep 2
done
done done
exit 0 exit 0

View File

@@ -45,9 +45,7 @@ beta)
CHANNEL_BRANCH=$BETA_CHANNEL CHANNEL_BRANCH=$BETA_CHANNEL
;; ;;
stable) stable)
# Set to whatever branch 'testnet' is on. CHANNEL_BRANCH=$STABLE_CHANNEL
# TODO: Revert to $STABLE_CHANNEL for TdS
CHANNEL_BRANCH=$BETA_CHANNEL
;; ;;
*) *)
echo "Error: Invalid PUBLISH_CHANNEL=$PUBLISH_CHANNEL" echo "Error: Invalid PUBLISH_CHANNEL=$PUBLISH_CHANNEL"
@@ -55,7 +53,7 @@ stable)
;; ;;
esac esac
if [[ $CI_BRANCH != "$CHANNEL_BRANCH" ]]; then if [[ $BUILDKITE_BRANCH != "$CHANNEL_BRANCH" ]]; then
( (
cat <<EOF cat <<EOF
steps: steps:

View File

@@ -3,20 +3,8 @@ set -e
cd "$(dirname "$0")/.." cd "$(dirname "$0")/.."
if [[ -n $APPVEYOR ]]; then
# Bootstrap rust build environment
source ci/env.sh
source ci/rust-version.sh
appveyor DownloadFile https://win.rustup.rs/ -FileName rustup-init.exe
./rustup-init -yv --default-toolchain $rust_stable --default-host x86_64-pc-windows-msvc
export PATH="$PATH:$USERPROFILE/.cargo/bin"
rustc -vV
cargo -vV
fi
DRYRUN= DRYRUN=
if [[ -z $CI_BRANCH ]]; then if [[ -z $BUILDKITE_BRANCH ]]; then
DRYRUN="echo" DRYRUN="echo"
CHANNEL=unknown CHANNEL=unknown
fi fi
@@ -24,9 +12,12 @@ fi
eval "$(ci/channel-info.sh)" eval "$(ci/channel-info.sh)"
TAG= TAG=
if [[ -n "$CI_TAG" ]]; then if [[ -n "$BUILDKITE_TAG" ]]; then
CHANNEL_OR_TAG=$CI_TAG CHANNEL_OR_TAG=$BUILDKITE_TAG
TAG="$CI_TAG" TAG="$BUILDKITE_TAG"
elif [[ -n "$TRIGGERED_BUILDKITE_TAG" ]]; then
CHANNEL_OR_TAG=$TRIGGERED_BUILDKITE_TAG
TAG="$TRIGGERED_BUILDKITE_TAG"
else else
CHANNEL_OR_TAG=$CHANNEL CHANNEL_OR_TAG=$CHANNEL
fi fi
@@ -36,17 +27,12 @@ if [[ -z $CHANNEL_OR_TAG ]]; then
exit 1 exit 1
fi fi
PERF_LIBS=false case "$(uname)" in
case "$CI_OS_NAME" in Darwin)
osx)
TARGET=x86_64-apple-darwin TARGET=x86_64-apple-darwin
;; ;;
linux) Linux)
TARGET=x86_64-unknown-linux-gnu TARGET=x86_64-unknown-linux-gnu
PERF_LIBS=true
;;
windows)
TARGET=x86_64-pc-windows-msvc
;; ;;
*) *)
TARGET=unknown-unknown-unknown TARGET=unknown-unknown-unknown
@@ -70,21 +56,18 @@ echo --- Creating tarball
source ci/rust-version.sh stable source ci/rust-version.sh stable
scripts/cargo-install-all.sh +"$rust_stable" solana-release scripts/cargo-install-all.sh +"$rust_stable" solana-release
if $PERF_LIBS; then rm -rf target/perf-libs
rm -rf target/perf-libs ./fetch-perf-libs.sh
./fetch-perf-libs.sh mkdir solana-release/target
mkdir solana-release/target cp -a target/perf-libs solana-release/target/
cp -a target/perf-libs solana-release/target/
# shellcheck source=/dev/null
source ./target/perf-libs/env.sh
(
cd validator
cargo +"$rust_stable" install --path . --features=cuda --root ../solana-release-cuda
)
cp solana-release-cuda/bin/solana-validator solana-release/bin/solana-validator-cuda
fi
# shellcheck source=/dev/null
source ./target/perf-libs/env.sh
(
cd validator
cargo +"$rust_stable" install --path . --features=cuda --root ../solana-release-cuda
)
cp solana-release-cuda/bin/solana-validator solana-release/bin/solana-validator-cuda
cp -a scripts multinode-demo solana-release/ cp -a scripts multinode-demo solana-release/
# Add a wrapper script for validator.sh # Add a wrapper script for validator.sh
@@ -105,64 +88,41 @@ EOF
set -e set -e
cd "$(dirname "$0")"/.. cd "$(dirname "$0")"/..
export USE_INSTALL=1 export USE_INSTALL=1
exec multinode-demo/clear-config.sh "$@" exec multinode-demo/clear-validator-config.sh "$@"
EOF EOF
chmod +x solana-release/bin/clear-config.sh chmod +x solana-release/bin/clear-config.sh
tar jvcf solana-release-$TARGET.tar.bz2 solana-release/ tar jvcf solana-release-$TARGET.tar.bz2 solana-release/
cp solana-release/bin/solana-install-init solana-install-init-$TARGET cp solana-release/bin/solana-install solana-install-$TARGET
) )
# Metrics tarball is platform agnostic, only publish it from Linux echo --- Saving build artifacts
MAYBE_METRICS_TARBALL= source ci/upload-ci-artifact.sh
if [[ "$CI_OS_NAME" = linux ]]; then upload-ci-artifact solana-release-$TARGET.tar.bz2
metrics/create-metrics-tarball.sh
MAYBE_METRICS_TARBALL=solana-metrics.tar.bz2 if [[ -n $DO_NOT_PUBLISH_TAR ]]; then
echo Skipped due to DO_NOT_PUBLISH_TAR
exit 0
fi fi
source ci/upload-ci-artifact.sh for file in solana-release-$TARGET.tar.bz2 solana-install-$TARGET; do
echo --- AWS S3 Store: $file
(
set -x
$DRYRUN docker run \
--rm \
--env AWS_ACCESS_KEY_ID \
--env AWS_SECRET_ACCESS_KEY \
--volume "$PWD:/solana" \
eremite/aws-cli:2018.12.18 \
/usr/bin/s3cmd --acl-public put /solana/"$file" s3://release.solana.com/"$CHANNEL_OR_TAG"/"$file"
for file in solana-release-$TARGET.tar.bz2 solana-install-init-"$TARGET"* $MAYBE_METRICS_TARBALL; do echo Published to:
upload-ci-artifact "$file" $DRYRUN ci/format-url.sh http://release.solana.com/"$CHANNEL_OR_TAG"/"$file"
)
if [[ -n $DO_NOT_PUBLISH_TAR ]]; then if [[ -n $TAG ]]; then
echo "Skipped $file due to DO_NOT_PUBLISH_TAR" ci/upload-github-release-asset.sh $file
continue
fi
if [[ -n $BUILDKITE ]]; then
echo --- AWS S3 Store: "$file"
(
set -x
$DRYRUN docker run \
--rm \
--env AWS_ACCESS_KEY_ID \
--env AWS_SECRET_ACCESS_KEY \
--volume "$PWD:/solana" \
eremite/aws-cli:2018.12.18 \
/usr/bin/s3cmd --acl-public put /solana/"$file" s3://release.solana.com/"$CHANNEL_OR_TAG"/"$file"
echo Published to:
$DRYRUN ci/format-url.sh http://release.solana.com/"$CHANNEL_OR_TAG"/"$file"
)
if [[ -n $TAG ]]; then
ci/upload-github-release-asset.sh "$file"
fi
elif [[ -n $TRAVIS ]]; then
# .travis.yml uploads everything in the travis-s3-upload/ directory to release.solana.com
mkdir -p travis-s3-upload/"$CHANNEL_OR_TAG"
cp -v "$file" travis-s3-upload/"$CHANNEL_OR_TAG"/
if [[ -n $TAG ]]; then
# .travis.yaml uploads everything in the travis-release-upload/ directory to
# the associated Github Release
mkdir -p travis-release-upload/
cp -v "$file" travis-release-upload/
fi
elif [[ -n $APPVEYOR ]]; then
# Add artifacts for .appveyor.yml to upload
appveyor PushArtifact "$file" -FileName "$CHANNEL_OR_TAG"/"$file"
fi fi
done done

View File

@@ -13,14 +13,11 @@
# $ source ci/rust-version.sh # $ source ci/rust-version.sh
# #
stable_version=1.35.0 export rust_stable=1.34.0
nightly_version=2019-06-20 export rust_stable_docker_image=solanalabs/rust:1.34.0
export rust_stable="$stable_version" export rust_nightly=nightly-2019-05-01
export rust_stable_docker_image=solanalabs/rust:"$stable_version" export rust_nightly_docker_image=solanalabs/rust-nightly:2019-05-01
export rust_nightly=nightly-"$nightly_version"
export rust_nightly_docker_image=solanalabs/rust-nightly:"$nightly_version"
[[ -z $1 ]] || ( [[ -z $1 ]] || (

View File

@@ -30,8 +30,8 @@ set -o pipefail
export RUST_BACKTRACE=1 export RUST_BACKTRACE=1
UPLOAD_METRICS="" UPLOAD_METRICS=""
TARGET_BRANCH=$CI_BRANCH TARGET_BRANCH=$BUILDKITE_BRANCH
if [[ -z $CI_BRANCH ]] || [[ -n $CI_PULL_REQUEST ]]; then if [[ -z $BUILDKITE_BRANCH ]] || ./ci/is-pr.sh; then
TARGET_BRANCH=$EDGE_CHANNEL TARGET_BRANCH=$EDGE_CHANNEL
else else
UPLOAD_METRICS="upload" UPLOAD_METRICS="upload"
@@ -40,10 +40,6 @@ fi
BENCH_FILE=bench_output.log BENCH_FILE=bench_output.log
BENCH_ARTIFACT=current_bench_results.log BENCH_ARTIFACT=current_bench_results.log
# Clear the C dependency files, if dependeny moves these files are not regenerated
test -d target/debug/bpf && find target/debug/bpf -name '*.d' -delete
test -d target/release/bpf && find target/release/bpf -name '*.d' -delete
# Ensure all dependencies are built # Ensure all dependencies are built
_ cargo +$rust_nightly build --all --release _ cargo +$rust_nightly build --all --release

View File

@@ -5,37 +5,15 @@ cd "$(dirname "$0")/.."
source ci/_ source ci/_
source ci/rust-version.sh stable source ci/rust-version.sh stable
source ci/rust-version.sh nightly
export RUST_BACKTRACE=1 export RUST_BACKTRACE=1
export RUSTFLAGS="-D warnings" export RUSTFLAGS="-D warnings"
do_bpf_check() {
_ cargo +"$rust_stable" fmt --all -- --check
_ cargo +"$rust_nightly" clippy --all -- --version
_ cargo +"$rust_nightly" clippy --all -- --deny=warnings
_ cargo +"$rust_stable" audit
}
(
(
cd sdk/bpf/rust/rust-utils
do_bpf_check
)
for project in programs/bpf/rust/*/ ; do
(
cd "$project"
do_bpf_check
)
done
)
_ cargo +"$rust_stable" fmt --all -- --check _ cargo +"$rust_stable" fmt --all -- --check
_ cargo +"$rust_stable" clippy --all -- --version _ cargo +"$rust_stable" clippy --all -- --version
_ cargo +"$rust_stable" clippy --all -- --deny=warnings _ cargo +"$rust_stable" clippy --all -- --deny=warnings
_ cargo +"$rust_stable" audit _ cargo +"$rust_stable" audit
_ ci/nits.sh _ ci/nits.sh
_ ci/order-crates-for-publishing.py
_ book/build.sh _ book/build.sh
echo --- ok echo --- ok

View File

@@ -25,7 +25,7 @@ source scripts/ulimit-n.sh
scripts/coverage.sh scripts/coverage.sh
report=coverage-"${CI_COMMIT:0:9}".tar.gz report=coverage-"${BUILDKITE_COMMIT:0:9}".tar.gz
mv target/cov/report.tar.gz "$report" mv target/cov/report.tar.gz "$report"
upload-ci-artifact "$report" upload-ci-artifact "$report"
annotate --style success --context lcov-report \ annotate --style success --context lcov-report \
@@ -39,5 +39,5 @@ else
bash <(curl -s https://codecov.io/bash) -X gcov -f target/cov/lcov.info bash <(curl -s https://codecov.io/bash) -X gcov -f target/cov/lcov.info
annotate --style success --context codecov.io \ annotate --style success --context codecov.io \
"CodeCov report: https://codecov.io/github/solana-labs/solana/commit/${CI_COMMIT:0:9}" "CodeCov report: https://codecov.io/github/solana-labs/solana/commit/${BUILDKITE_COMMIT:0:9}"
fi fi

View File

@@ -19,14 +19,7 @@ source scripts/ulimit-n.sh
# Clear cached json keypair files # Clear cached json keypair files
rm -rf "$HOME/.config/solana" rm -rf "$HOME/.config/solana"
# Clear the C dependency files, if dependeny moves these files are not regenerated # Run tbe appropriate test based on entrypoint
test -d target/debug/bpf && find target/debug/bpf -name '*.d' -delete
test -d target/release/bpf && find target/release/bpf -name '*.d' -delete
# Clear the BPF sysroot files, they are not automatically rebuilt
rm -rf target/xargo # Issue #3105
# Run the appropriate test based on entrypoint
testName=$(basename "$0" .sh) testName=$(basename "$0" .sh)
case $testName in case $testName in
test-stable) test-stable)
@@ -42,10 +35,8 @@ test-stable-perf)
.rs$ \ .rs$ \
Cargo.lock$ \ Cargo.lock$ \
Cargo.toml$ \ Cargo.toml$ \
^ci/test-stable-perf.sh \ ci/test-stable-perf.sh \
^ci/test-stable.sh \ ci/test-stable.sh \
^core/build.rs \
^fetch-perf-libs.sh \
^programs/ \ ^programs/ \
^sdk/ \ ^sdk/ \
|| { || {
@@ -61,8 +52,10 @@ test-stable-perf)
--no-default-features --features=bpf_c,bpf_rust --no-default-features --features=bpf_c,bpf_rust
# Run root package tests with these features # Run root package tests with these features
ROOT_FEATURES= ROOT_FEATURES=erasure,chacha
if [[ $(uname) = Linux ]]; then if [[ $(uname) = Darwin ]]; then
./build-perf-libs.sh
else
# Enable persistence mode to keep the CUDA kernel driver loaded, avoiding a # Enable persistence mode to keep the CUDA kernel driver loaded, avoiding a
# lengthy and unexpected delay the first time CUDA is involved when the driver # lengthy and unexpected delay the first time CUDA is involved when the driver
# is not yet loaded. # is not yet loaded.
@@ -72,7 +65,7 @@ test-stable-perf)
./fetch-perf-libs.sh ./fetch-perf-libs.sh
# shellcheck source=/dev/null # shellcheck source=/dev/null
source ./target/perf-libs/env.sh source ./target/perf-libs/env.sh
ROOT_FEATURES=cuda ROOT_FEATURES=$ROOT_FEATURES,cuda
fi fi
# Run root package library tests # Run root package library tests

View File

@@ -311,9 +311,6 @@ if ! $skipStart; then
if [[ -n $NO_LEDGER_VERIFY ]]; then if [[ -n $NO_LEDGER_VERIFY ]]; then
args+=(-o noLedgerVerify) args+=(-o noLedgerVerify)
fi fi
if [[ -n $NO_INSTALL_CHECK ]]; then
args+=(-o noInstallCheck)
fi
if [[ -n $maybeHashesPerTick ]]; then if [[ -n $maybeHashesPerTick ]]; then
# shellcheck disable=SC2206 # Do not want to quote $maybeHashesPerTick # shellcheck disable=SC2206 # Do not want to quote $maybeHashesPerTick
args+=($maybeHashesPerTick) args+=($maybeHashesPerTick)
@@ -327,11 +324,10 @@ if ! $skipStart; then
args+=(-F) args+=(-F)
fi fi
if $deployUpdateManifest; then # shellcheck disable=SC2154 # SOLANA_INSTALL_UPDATE_MANIFEST_KEYPAIR_x86_64_unknown_linux_gnu comes from .buildkite/env/
rm -f update_manifest_keypair.json if $deployUpdateManifest && [[ -n $SOLANA_INSTALL_UPDATE_MANIFEST_KEYPAIR_x86_64_unknown_linux_gnu ]]; then
args+=(--deploy-update linux) echo "$SOLANA_INSTALL_UPDATE_MANIFEST_KEYPAIR_x86_64_unknown_linux_gnu" > update_manifest_keypair.json
args+=(--deploy-update osx) args+=(-i update_manifest_keypair.json)
args+=(--deploy-update windows)
fi fi
# shellcheck disable=SC2086 # Don't want to double quote the $maybeXYZ variables # shellcheck disable=SC2086 # Don't want to double quote the $maybeXYZ variables

View File

@@ -184,7 +184,7 @@ if [[ -n $TESTNET_TAG ]]; then
CHANNEL_OR_TAG=$TESTNET_TAG CHANNEL_OR_TAG=$TESTNET_TAG
else else
if [[ $CI_BRANCH != "$CHANNEL_BRANCH" ]]; then if [[ $BUILDKITE_BRANCH != "$CHANNEL_BRANCH" ]]; then
( (
cat <<EOF cat <<EOF
steps: steps:
@@ -212,7 +212,6 @@ sanity() {
testnet-edge) testnet-edge)
( (
set -x set -x
NO_INSTALL_CHECK=1 \
NO_LEDGER_VERIFY=1 \ NO_LEDGER_VERIFY=1 \
ci/testnet-sanity.sh edge-testnet-solana-com ec2 us-west-1a ci/testnet-sanity.sh edge-testnet-solana-com ec2 us-west-1a
) )
@@ -229,7 +228,6 @@ sanity() {
testnet-beta) testnet-beta)
( (
set -x set -x
NO_INSTALL_CHECK=1 \
NO_LEDGER_VERIFY=1 \ NO_LEDGER_VERIFY=1 \
ci/testnet-sanity.sh beta-testnet-solana-com ec2 us-west-1a ci/testnet-sanity.sh beta-testnet-solana-com ec2 us-west-1a
) )
@@ -325,7 +323,8 @@ deploy() {
${skipCreate:+-e} \ ${skipCreate:+-e} \
${skipStart:+-s} \ ${skipStart:+-s} \
${maybeStop:+-S} \ ${maybeStop:+-S} \
${maybeDelete:+-D} ${maybeDelete:+-D} \
--hashes-per-tick auto
) )
;; ;;
testnet-edge-perf) testnet-edge-perf)
@@ -339,7 +338,8 @@ deploy() {
${skipCreate:+-e} \ ${skipCreate:+-e} \
${skipStart:+-s} \ ${skipStart:+-s} \
${maybeStop:+-S} \ ${maybeStop:+-S} \
${maybeDelete:+-D} ${maybeDelete:+-D} \
--hashes-per-tick auto
) )
;; ;;
testnet-beta) testnet-beta)
@@ -351,7 +351,8 @@ deploy() {
${skipCreate:+-e} \ ${skipCreate:+-e} \
${skipStart:+-s} \ ${skipStart:+-s} \
${maybeStop:+-S} \ ${maybeStop:+-S} \
${maybeDelete:+-D} ${maybeDelete:+-D} \
--hashes-per-tick auto
) )
;; ;;
testnet-beta-perf) testnet-beta-perf)
@@ -365,7 +366,8 @@ deploy() {
${skipCreate:+-e} \ ${skipCreate:+-e} \
${skipStart:+-s} \ ${skipStart:+-s} \
${maybeStop:+-S} \ ${maybeStop:+-S} \
${maybeDelete:+-D} ${maybeDelete:+-D} \
--hashes-per-tick auto
) )
;; ;;
testnet) testnet)
@@ -409,7 +411,8 @@ deploy() {
${skipCreate:+-e} \ ${skipCreate:+-e} \
${skipStart:+-s} \ ${skipStart:+-s} \
${maybeStop:+-S} \ ${maybeStop:+-S} \
${maybeDelete:+-D} ${maybeDelete:+-D} \
--hashes-per-tick auto
) )
;; ;;
testnet-demo) testnet-demo)
@@ -429,7 +432,8 @@ deploy() {
${skipCreate:+-e} \ ${skipCreate:+-e} \
${maybeSkipStart:+-s} \ ${maybeSkipStart:+-s} \
${maybeStop:+-S} \ ${maybeStop:+-S} \
${maybeDelete:+-D} ${maybeDelete:+-D} \
--hashes-per-tick auto
if [[ -n $GCE_LOW_QUOTA_NODE_COUNT ]]; then if [[ -n $GCE_LOW_QUOTA_NODE_COUNT ]]; then
# shellcheck disable=SC2068 # shellcheck disable=SC2068
@@ -440,7 +444,8 @@ deploy() {
${skipCreate:+-e} \ ${skipCreate:+-e} \
${skipStart:+-s} \ ${skipStart:+-s} \
${maybeStop:+-S} \ ${maybeStop:+-S} \
${maybeDelete:+-D} ${maybeDelete:+-D} \
--hashes-per-tick auto
fi fi
) )
;; ;;

View File

@@ -64,7 +64,6 @@ for zone in "$@"; do
${NO_LEDGER_VERIFY:+-o noLedgerVerify} \ ${NO_LEDGER_VERIFY:+-o noLedgerVerify} \
${NO_VALIDATOR_SANITY:+-o noValidatorSanity} \ ${NO_VALIDATOR_SANITY:+-o noValidatorSanity} \
${REJECT_EXTRA_NODES:+-o rejectExtraNodes} \ ${REJECT_EXTRA_NODES:+-o rejectExtraNodes} \
${NO_INSTALL_CHECK:+-o noInstallCheck} \
$zone || ok=false $zone || ok=false
net/net.sh logs net/net.sh logs

View File

@@ -8,6 +8,8 @@
# #
set -e set -e
REPO_SLUG=solana-labs/solana
if [[ -z $1 ]]; then if [[ -z $1 ]]; then
echo No files specified echo No files specified
exit 1 exit 1
@@ -18,30 +20,31 @@ if [[ -z $GITHUB_TOKEN ]]; then
exit 1 exit 1
fi fi
if [[ -z $CI_TAG ]]; then if [[ -n $BUILDKITE_TAG ]]; then
echo Error: CI_TAG not defined TAG=$BUILDKITE_TAG
exit 1 elif [[ -n $TRIGGERED_BUILDKITE_TAG ]]; then
TAG=$TRIGGERED_BUILDKITE_TAG
fi fi
if [[ -z $CI_REPO_SLUG ]]; then if [[ -z $TAG ]]; then
echo Error: CI_REPO_SLUG not defined echo Error: TAG not defined
exit 1 exit 1
fi fi
releaseId=$( \ releaseId=$( \
curl -s "https://api.github.com/repos/$CI_REPO_SLUG/releases/tags/$CI_TAG" \ curl -s "https://api.github.com/repos/$REPO_SLUG/releases/tags/$TAG" \
| grep -m 1 \"id\": \ | grep -m 1 \"id\": \
| sed -ne 's/^[^0-9]*\([0-9]*\),$/\1/p' \ | sed -ne 's/^[^0-9]*\([0-9]*\),$/\1/p' \
) )
echo "Github release id for $CI_TAG is $releaseId" echo "Github release id for $TAG is $releaseId"
for file in "$@"; do for file in "$@"; do
echo "--- Uploading $file to tag $CI_TAG of $CI_REPO_SLUG" echo "--- Uploading $file to tag $TAG of $REPO_SLUG"
curl \ curl \
--data-binary @"$file" \ --data-binary @"$file" \
-H "Authorization: token $GITHUB_TOKEN" \ -H "Authorization: token $GITHUB_TOKEN" \
-H "Content-Type: application/octet-stream" \ -H "Content-Type: application/octet-stream" \
"https://uploads.github.com/repos/$CI_REPO_SLUG/releases/$releaseId/assets?name=$(basename "$file")" "https://uploads.github.com/repos/$REPO_SLUG/releases/$releaseId/assets?name=$(basename "$file")"
echo echo
done done

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "solana-client" name = "solana-client"
version = "0.16.1" version = "0.15.0"
description = "Solana Client" description = "Solana Client"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@@ -11,18 +11,16 @@ edition = "2018"
[dependencies] [dependencies]
bincode = "1.1.4" bincode = "1.1.4"
bs58 = "0.2.0" bs58 = "0.2.0"
jsonrpc-core = "12.0.0"
log = "0.4.2" log = "0.4.2"
rand = "0.6.5" jsonrpc-core = "10.1.0"
rayon = "1.1.0" reqwest = "0.9.17"
reqwest = "0.9.18" serde = "1.0.89"
serde = "1.0.92" serde_derive = "1.0.91"
serde_derive = "1.0.92"
serde_json = "1.0.39" serde_json = "1.0.39"
solana-netutil = { path = "../netutil", version = "0.16.1" } solana-netutil = { path = "../netutil", version = "0.15.0" }
solana-sdk = { path = "../sdk", version = "0.16.1" } solana-sdk = { path = "../sdk", version = "0.15.0" }
[dev-dependencies] [dev-dependencies]
jsonrpc-core = "12.0.0" jsonrpc-core = "10.1.0"
jsonrpc-http-server = "12.0.0" jsonrpc-http-server = "10.1.0"
solana-logger = { path = "../logger", version = "0.16.1" } solana-logger = { path = "../logger", version = "0.15.0" }

View File

@@ -60,7 +60,6 @@ impl GenericRpcClientRequest for MockRpcClientRequest {
serde_json::to_value(response).unwrap() serde_json::to_value(response).unwrap()
} }
RpcRequest::GetTransactionCount => Value::Number(Number::from(1234)), RpcRequest::GetTransactionCount => Value::Number(Number::from(1234)),
RpcRequest::GetSlot => Value::Number(Number::from(0)),
RpcRequest::SendTransaction => Value::String(SIGNATURE.to_string()), RpcRequest::SendTransaction => Value::String(SIGNATURE.to_string()),
_ => Value::Null, _ => Value::Null,
}; };

View File

@@ -36,18 +36,7 @@ pub fn sample_txs<T>(
total_elapsed = start_time.elapsed(); total_elapsed = start_time.elapsed();
let elapsed = now.elapsed(); let elapsed = now.elapsed();
now = Instant::now(); now = Instant::now();
let mut txs; let mut txs = client.get_transaction_count().expect("transaction count");
match client.get_transaction_count() {
Err(e) => {
// ThinClient with multiple options should pick a better one now.
info!("Couldn't get transaction count {:?}", e);
sleep(Duration::from_secs(sample_period));
continue;
}
Ok(tx_count) => {
txs = tx_count;
}
}
if txs < last_txs { if txs < last_txs {
info!("Expected txs({}) >= last_txs({})", txs, last_txs); info!("Expected txs({}) >= last_txs({})", txs, last_txs);

View File

@@ -75,25 +75,6 @@ impl RpcClient {
Ok(result) Ok(result)
} }
pub fn get_slot(&self) -> io::Result<u64> {
let response = self
.client
.send(&RpcRequest::GetSlot, None, 0)
.map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
format!("GetSlot request failure: {:?}", err),
)
})?;
serde_json::from_value(response).map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
format!("GetSlot parse failure: {}", err),
)
})
}
pub fn send_and_confirm_transaction<T: KeypairUtil>( pub fn send_and_confirm_transaction<T: KeypairUtil>(
&self, &self,
transaction: &mut Transaction, transaction: &mut Transaction,

View File

@@ -12,7 +12,6 @@ pub enum RpcRequest {
GetNumBlocksSinceSignatureConfirmation, GetNumBlocksSinceSignatureConfirmation,
GetRecentBlockhash, GetRecentBlockhash,
GetSignatureStatus, GetSignatureStatus,
GetSlot,
GetSlotLeader, GetSlotLeader,
GetEpochVoteAccounts, GetEpochVoteAccounts,
GetStorageBlockhash, GetStorageBlockhash,
@@ -40,7 +39,6 @@ impl RpcRequest {
} }
RpcRequest::GetRecentBlockhash => "getRecentBlockhash", RpcRequest::GetRecentBlockhash => "getRecentBlockhash",
RpcRequest::GetSignatureStatus => "getSignatureStatus", RpcRequest::GetSignatureStatus => "getSignatureStatus",
RpcRequest::GetSlot => "getSlot",
RpcRequest::GetSlotLeader => "getSlotLeader", RpcRequest::GetSlotLeader => "getSlotLeader",
RpcRequest::GetEpochVoteAccounts => "getEpochVoteAccounts", RpcRequest::GetEpochVoteAccounts => "getEpochVoteAccounts",
RpcRequest::GetStorageBlockhash => "getStorageBlockhash", RpcRequest::GetStorageBlockhash => "getStorageBlockhash",
@@ -106,10 +104,6 @@ mod tests {
let request = test_request.build_request_json(1, None); let request = test_request.build_request_json(1, None);
assert_eq!(request["method"], "getRecentBlockhash"); assert_eq!(request["method"], "getRecentBlockhash");
let test_request = RpcRequest::GetSlot;
let request = test_request.build_request_json(1, None);
assert_eq!(request["method"], "getSlot");
let test_request = RpcRequest::GetTransactionCount; let test_request = RpcRequest::GetTransactionCount;
let request = test_request.build_request_json(1, None); let request = test_request.build_request_json(1, None);
assert_eq!(request["method"], "getTransactionCount"); assert_eq!(request["method"], "getTransactionCount");

View File

@@ -6,7 +6,6 @@
use crate::rpc_client::RpcClient; use crate::rpc_client::RpcClient;
use bincode::{serialize_into, serialized_size}; use bincode::{serialize_into, serialized_size};
use log::*; use log::*;
use solana_sdk::account::Account;
use solana_sdk::client::{AsyncClient, Client, SyncClient}; use solana_sdk::client::{AsyncClient, Client, SyncClient};
use solana_sdk::fee_calculator::FeeCalculator; use solana_sdk::fee_calculator::FeeCalculator;
use solana_sdk::hash::Hash; use solana_sdk::hash::Hash;
@@ -16,100 +15,17 @@ use solana_sdk::packet::PACKET_DATA_SIZE;
use solana_sdk::pubkey::Pubkey; use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::{Keypair, KeypairUtil, Signature}; use solana_sdk::signature::{Keypair, KeypairUtil, Signature};
use solana_sdk::system_instruction; use solana_sdk::system_instruction;
use solana_sdk::timing::duration_as_ms;
use solana_sdk::transaction::{self, Transaction}; use solana_sdk::transaction::{self, Transaction};
use solana_sdk::transport::Result as TransportResult; use solana_sdk::transport::Result as TransportResult;
use std::io; use std::io;
use std::net::{SocketAddr, UdpSocket}; use std::net::{SocketAddr, UdpSocket};
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; use std::time::Duration;
use std::sync::RwLock;
use std::time::{Duration, Instant};
struct ClientOptimizer {
cur_index: AtomicUsize,
experiment_index: AtomicUsize,
experiment_done: AtomicBool,
times: RwLock<Vec<u64>>,
num_clients: usize,
}
fn min_index(array: &[u64]) -> (u64, usize) {
let mut min_time = std::u64::MAX;
let mut min_index = 0;
for (i, time) in array.iter().enumerate() {
if *time < min_time {
min_time = *time;
min_index = i;
}
}
(min_time, min_index)
}
impl ClientOptimizer {
fn new(num_clients: usize) -> Self {
Self {
cur_index: AtomicUsize::new(0),
experiment_index: AtomicUsize::new(0),
experiment_done: AtomicBool::new(false),
times: RwLock::new(vec![std::u64::MAX; num_clients]),
num_clients,
}
}
fn experiment(&self) -> usize {
if self.experiment_index.load(Ordering::Relaxed) < self.num_clients {
let old = self.experiment_index.fetch_add(1, Ordering::Relaxed);
if old < self.num_clients {
old
} else {
self.best()
}
} else {
self.best()
}
}
fn report(&self, index: usize, time_ms: u64) {
if self.num_clients > 1
&& (!self.experiment_done.load(Ordering::Relaxed) || time_ms == std::u64::MAX)
{
trace!(
"report {} with {} exp: {}",
index,
time_ms,
self.experiment_index.load(Ordering::Relaxed)
);
self.times.write().unwrap()[index] = time_ms;
if index == (self.num_clients - 1) || time_ms == std::u64::MAX {
let times = self.times.read().unwrap();
let (min_time, min_index) = min_index(&times);
trace!(
"done experimenting min: {} time: {} times: {:?}",
min_index,
min_time,
times
);
// Only 1 thread should grab the num_clients-1 index, so this should be ok.
self.cur_index.store(min_index, Ordering::Relaxed);
self.experiment_done.store(true, Ordering::Relaxed);
}
}
}
fn best(&self) -> usize {
self.cur_index.load(Ordering::Relaxed)
}
}
/// An object for querying and sending transactions to the network. /// An object for querying and sending transactions to the network.
pub struct ThinClient { pub struct ThinClient {
transactions_addr: SocketAddr,
transactions_socket: UdpSocket, transactions_socket: UdpSocket,
transactions_addrs: Vec<SocketAddr>, rpc_client: RpcClient,
rpc_clients: Vec<RpcClient>,
optimizer: ClientOptimizer,
} }
impl ThinClient { impl ThinClient {
@@ -143,39 +59,12 @@ impl ThinClient {
rpc_client: RpcClient, rpc_client: RpcClient,
) -> Self { ) -> Self {
Self { Self {
rpc_client,
transactions_addr,
transactions_socket, transactions_socket,
transactions_addrs: vec![transactions_addr],
rpc_clients: vec![rpc_client],
optimizer: ClientOptimizer::new(0),
} }
} }
pub fn new_from_addrs(
transactions_addrs: Vec<SocketAddr>,
transactions_socket: UdpSocket,
rpc_sockets: Vec<SocketAddr>,
) -> Self {
assert!(!transactions_addrs.is_empty());
assert!(!rpc_sockets.is_empty());
assert_eq!(rpc_sockets.len(), transactions_addrs.len());
let rpc_len = rpc_sockets.len();
let rpc_clients: Vec<_> = rpc_sockets.into_iter().map(RpcClient::new_socket).collect();
Self {
transactions_addrs,
transactions_socket,
rpc_clients,
optimizer: ClientOptimizer::new(rpc_len),
}
}
fn transactions_addr(&self) -> &SocketAddr {
&self.transactions_addrs[self.optimizer.best()]
}
fn rpc_client(&self) -> &RpcClient {
&self.rpc_clients[self.optimizer.best()]
}
/// Retry a sending a signed Transaction to the server for processing. /// Retry a sending a signed Transaction to the server for processing.
pub fn retry_transfer_until_confirmed( pub fn retry_transfer_until_confirmed(
&self, &self,
@@ -211,19 +100,15 @@ impl ThinClient {
serialize_into(&mut wr, &transaction) serialize_into(&mut wr, &transaction)
.expect("serialize Transaction in pub fn transfer_signed"); .expect("serialize Transaction in pub fn transfer_signed");
self.transactions_socket self.transactions_socket
.send_to(&buf[..], &self.transactions_addr())?; .send_to(&buf[..], &self.transactions_addr)?;
if self if self
.poll_for_signature_confirmation(&transaction.signatures[0], min_confirmed_blocks) .poll_for_signature_confirmation(&transaction.signatures[0], min_confirmed_blocks)
.is_ok() .is_ok()
{ {
return Ok(transaction.signatures[0]); return Ok(transaction.signatures[0]);
} }
info!( info!("{} tries failed transfer to {}", x, self.transactions_addr);
"{} tries failed transfer to {}", let (blockhash, _fee_calculator) = self.rpc_client.get_recent_blockhash()?;
x,
self.transactions_addr()
);
let (blockhash, _fee_calculator) = self.rpc_client().get_recent_blockhash()?;
transaction.sign(keypairs, blockhash); transaction.sign(keypairs, blockhash);
} }
Err(io::Error::new( Err(io::Error::new(
@@ -238,40 +123,39 @@ impl ThinClient {
polling_frequency: &Duration, polling_frequency: &Duration,
timeout: &Duration, timeout: &Duration,
) -> io::Result<u64> { ) -> io::Result<u64> {
self.rpc_client() self.rpc_client
.poll_balance_with_timeout(pubkey, polling_frequency, timeout) .poll_balance_with_timeout(pubkey, polling_frequency, timeout)
} }
pub fn poll_get_balance(&self, pubkey: &Pubkey) -> io::Result<u64> { pub fn poll_get_balance(&self, pubkey: &Pubkey) -> io::Result<u64> {
self.rpc_client().poll_get_balance(pubkey) self.rpc_client.poll_get_balance(pubkey)
} }
pub fn wait_for_balance(&self, pubkey: &Pubkey, expected_balance: Option<u64>) -> Option<u64> { pub fn wait_for_balance(&self, pubkey: &Pubkey, expected_balance: Option<u64>) -> Option<u64> {
self.rpc_client().wait_for_balance(pubkey, expected_balance) self.rpc_client.wait_for_balance(pubkey, expected_balance)
} }
/// Check a signature in the bank. This method blocks /// Check a signature in the bank. This method blocks
/// until the server sends a response. /// until the server sends a response.
pub fn check_signature(&self, signature: &Signature) -> bool { pub fn check_signature(&self, signature: &Signature) -> bool {
self.rpc_client().check_signature(signature) self.rpc_client.check_signature(signature)
} }
pub fn fullnode_exit(&self) -> io::Result<bool> { pub fn fullnode_exit(&self) -> io::Result<bool> {
self.rpc_client().fullnode_exit() self.rpc_client.fullnode_exit()
} }
pub fn get_num_blocks_since_signature_confirmation( pub fn get_num_blocks_since_signature_confirmation(
&mut self, &mut self,
sig: &Signature, sig: &Signature,
) -> io::Result<usize> { ) -> io::Result<usize> {
self.rpc_client() self.rpc_client
.get_num_blocks_since_signature_confirmation(sig) .get_num_blocks_since_signature_confirmation(sig)
} }
} }
impl Client for ThinClient { impl Client for ThinClient {
fn transactions_addr(&self) -> String { fn transactions_addr(&self) -> String {
self.transactions_addr().to_string() self.transactions_addr.to_string()
} }
} }
@@ -304,40 +188,20 @@ impl SyncClient for ThinClient {
} }
fn get_account_data(&self, pubkey: &Pubkey) -> TransportResult<Option<Vec<u8>>> { fn get_account_data(&self, pubkey: &Pubkey) -> TransportResult<Option<Vec<u8>>> {
Ok(self.rpc_client().get_account_data(pubkey).ok()) Ok(self.rpc_client.get_account_data(pubkey).ok())
}
fn get_account(&self, pubkey: &Pubkey) -> TransportResult<Option<Account>> {
Ok(self.rpc_client().get_account(pubkey).ok())
} }
fn get_balance(&self, pubkey: &Pubkey) -> TransportResult<u64> { fn get_balance(&self, pubkey: &Pubkey) -> TransportResult<u64> {
let balance = self.rpc_client().get_balance(pubkey)?; let balance = self.rpc_client.get_balance(pubkey)?;
Ok(balance) Ok(balance)
} }
fn get_recent_blockhash(&self) -> TransportResult<(Hash, FeeCalculator)> {
let index = self.optimizer.experiment();
let now = Instant::now();
let recent_blockhash = self.rpc_clients[index].get_recent_blockhash();
match recent_blockhash {
Ok(recent_blockhash) => {
self.optimizer.report(index, duration_as_ms(&now.elapsed()));
Ok(recent_blockhash)
}
Err(e) => {
self.optimizer.report(index, std::u64::MAX);
Err(e)?
}
}
}
fn get_signature_status( fn get_signature_status(
&self, &self,
signature: &Signature, signature: &Signature,
) -> TransportResult<Option<transaction::Result<()>>> { ) -> TransportResult<Option<transaction::Result<()>>> {
let status = self let status = self
.rpc_client() .rpc_client
.get_signature_status(&signature.to_string()) .get_signature_status(&signature.to_string())
.map_err(|err| { .map_err(|err| {
io::Error::new( io::Error::new(
@@ -348,29 +212,13 @@ impl SyncClient for ThinClient {
Ok(status) Ok(status)
} }
fn get_slot(&self) -> TransportResult<u64> { fn get_recent_blockhash(&self) -> TransportResult<(Hash, FeeCalculator)> {
let slot = self.rpc_client().get_slot().map_err(|err| { Ok(self.rpc_client.get_recent_blockhash()?)
io::Error::new(
io::ErrorKind::Other,
format!("send_transaction failed with error {:?}", err),
)
})?;
Ok(slot)
} }
fn get_transaction_count(&self) -> TransportResult<u64> { fn get_transaction_count(&self) -> TransportResult<u64> {
let index = self.optimizer.experiment(); let transaction_count = self.rpc_client.get_transaction_count()?;
let now = Instant::now(); Ok(transaction_count)
match self.rpc_client().get_transaction_count() {
Ok(transaction_count) => {
self.optimizer.report(index, duration_as_ms(&now.elapsed()));
Ok(transaction_count)
}
Err(e) => {
self.optimizer.report(index, std::u64::MAX);
Err(e)?
}
}
} }
/// Poll the server until the signature has been confirmed by at least `min_confirmed_blocks` /// Poll the server until the signature has been confirmed by at least `min_confirmed_blocks`
@@ -380,17 +228,16 @@ impl SyncClient for ThinClient {
min_confirmed_blocks: usize, min_confirmed_blocks: usize,
) -> TransportResult<()> { ) -> TransportResult<()> {
Ok(self Ok(self
.rpc_client() .rpc_client
.poll_for_signature_confirmation(signature, min_confirmed_blocks)?) .poll_for_signature_confirmation(signature, min_confirmed_blocks)?)
} }
fn poll_for_signature(&self, signature: &Signature) -> TransportResult<()> { fn poll_for_signature(&self, signature: &Signature) -> TransportResult<()> {
Ok(self.rpc_client().poll_for_signature(signature)?) Ok(self.rpc_client.poll_for_signature(signature)?)
} }
fn get_new_blockhash(&self, blockhash: &Hash) -> TransportResult<(Hash, FeeCalculator)> { fn get_new_blockhash(&self, blockhash: &Hash) -> TransportResult<(Hash, FeeCalculator)> {
let new_blockhash = self.rpc_client().get_new_blockhash(blockhash)?; Ok(self.rpc_client.get_new_blockhash(blockhash)?)
Ok(new_blockhash)
} }
} }
@@ -402,7 +249,7 @@ impl AsyncClient for ThinClient {
.expect("serialize Transaction in pub fn transfer_signed"); .expect("serialize Transaction in pub fn transfer_signed");
assert!(buf.len() < PACKET_DATA_SIZE); assert!(buf.len() < PACKET_DATA_SIZE);
self.transactions_socket self.transactions_socket
.send_to(&buf[..], &self.transactions_addr())?; .send_to(&buf[..], &self.transactions_addr)?;
Ok(transaction.signatures[0]) Ok(transaction.signatures[0])
} }
fn async_send_message( fn async_send_message(
@@ -449,28 +296,3 @@ pub fn create_client_with_timeout(
let (_, transactions_socket) = solana_netutil::bind_in_range(range).unwrap(); let (_, transactions_socket) = solana_netutil::bind_in_range(range).unwrap();
ThinClient::new_socket_with_timeout(rpc, tpu, transactions_socket, timeout) ThinClient::new_socket_with_timeout(rpc, tpu, transactions_socket, timeout)
} }
#[cfg(test)]
mod tests {
use super::*;
use rayon::prelude::*;
#[test]
fn test_client_optimizer() {
solana_logger::setup();
const NUM_CLIENTS: usize = 5;
let optimizer = ClientOptimizer::new(NUM_CLIENTS);
(0..NUM_CLIENTS).into_par_iter().for_each(|_| {
let index = optimizer.experiment();
optimizer.report(index, (NUM_CLIENTS - index) as u64);
});
let index = optimizer.experiment();
optimizer.report(index, 50);
assert_eq!(optimizer.best(), NUM_CLIENTS - 1);
optimizer.report(optimizer.best(), std::u64::MAX);
assert_eq!(optimizer.best(), NUM_CLIENTS - 2);
}
}

View File

@@ -1,7 +1,7 @@
[package] [package]
name = "solana" name = "solana"
description = "Blockchain, Rebuilt for Scale" description = "Blockchain, Rebuilt for Scale"
version = "0.16.1" version = "0.15.0"
documentation = "https://docs.rs/solana" documentation = "https://docs.rs/solana"
homepage = "https://solana.com/" homepage = "https://solana.com/"
readme = "../README.md" readme = "../README.md"
@@ -14,73 +14,69 @@ edition = "2018"
codecov = { repository = "solana-labs/solana", branch = "master", service = "github" } codecov = { repository = "solana-labs/solana", branch = "master", service = "github" }
[features] [features]
chacha = []
cuda = [] cuda = []
erasure = []
kvstore = ["solana-kvstore"] kvstore = ["solana-kvstore"]
[dependencies] [dependencies]
bincode = "1.1.4" bincode = "1.1.4"
bs58 = "0.2.0" bs58 = "0.2.0"
byteorder = "1.3.2" byteorder = "1.3.1"
chrono = { version = "0.4.0", features = ["serde"] } chrono = { version = "0.4.0", features = ["serde"] }
core_affinity = "0.5.9"
crc = { version = "1.8.1", optional = true } crc = { version = "1.8.1", optional = true }
core_affinity = "0.5.9"
hashbrown = "0.2.0" hashbrown = "0.2.0"
indexmap = "1.0" indexmap = "1.0"
itertools = "0.8.0" itertools = "0.8.0"
jsonrpc-core = "12.0.0" jsonrpc-core = "11.0.0"
jsonrpc-derive = "12.0.0" jsonrpc-derive = "11.0.0"
jsonrpc-http-server = "12.0.0" jsonrpc-http-server = "11.0.0"
jsonrpc-pubsub = "12.0.0" jsonrpc-pubsub = "11.0.0"
jsonrpc-ws-server = "12.0.0" jsonrpc-ws-server = "11.0.0"
libc = "0.2.58" libc = "0.2.55"
log = "0.4.2" log = "0.4.2"
memmap = { version = "0.7.0", optional = true } memmap = { version = "0.7.0", optional = true }
nix = "0.14.1" nix = "0.14.0"
num-traits = "0.2"
rand = "0.6.5" rand = "0.6.5"
rand_chacha = "0.1.1" rand_chacha = "0.1.1"
rayon = "1.1.0" rayon = "1.0.0"
reqwest = "0.9.18" reed-solomon-erasure = "3.1.1"
reqwest = "0.9.17"
rocksdb = "0.11.0" rocksdb = "0.11.0"
serde = "1.0.92" serde = "1.0.89"
serde_derive = "1.0.92" serde_derive = "1.0.91"
serde_json = "1.0.39" serde_json = "1.0.39"
solana-budget-api = { path = "../programs/budget_api", version = "0.16.1" } solana-budget-api = { path = "../programs/budget_api", version = "0.15.0" }
solana-budget-program = { path = "../programs/budget_program", version = "0.16.1" } solana-budget-program = { path = "../programs/budget_program", version = "0.15.0" }
solana-chacha-sys = { path = "../chacha-sys", version = "0.16.1" } solana-client = { path = "../client", version = "0.15.0" }
solana-client = { path = "../client", version = "0.16.1" } solana-drone = { path = "../drone", version = "0.15.0" }
solana-config-program = { path = "../programs/config_program", version = "0.16.1" }
solana-drone = { path = "../drone", version = "0.16.1" }
solana-ed25519-dalek = "0.2.0" solana-ed25519-dalek = "0.2.0"
solana-exchange-program = { path = "../programs/exchange_program", version = "0.16.1" } solana-kvstore = { path = "../kvstore", version = "0.15.0" , optional = true }
solana-kvstore = { path = "../kvstore", version = "0.16.1", optional = true } solana-logger = { path = "../logger", version = "0.15.0" }
solana-logger = { path = "../logger", version = "0.16.1" } solana-metrics = { path = "../metrics", version = "0.15.0" }
solana-metrics = { path = "../metrics", version = "0.16.1" } solana-netutil = { path = "../netutil", version = "0.15.0" }
solana-netutil = { path = "../netutil", version = "0.16.1" } solana-runtime = { path = "../runtime", version = "0.15.0" }
solana-runtime = { path = "../runtime", version = "0.16.1" } solana-sdk = { path = "../sdk", version = "0.15.0" }
solana-sdk = { path = "../sdk", version = "0.16.1" } solana-stake-api = { path = "../programs/stake_api", version = "0.15.0" }
solana-stake-api = { path = "../programs/stake_api", version = "0.16.1" } solana-stake-program = { path = "../programs/stake_program", version = "0.15.0" }
solana-stake-program = { path = "../programs/stake_program", version = "0.16.1" } solana-storage-api = { path = "../programs/storage_api", version = "0.15.0" }
solana-storage-api = { path = "../programs/storage_api", version = "0.16.1" } solana-storage-program = { path = "../programs/storage_program", version = "0.15.0" }
solana-storage-program = { path = "../programs/storage_program", version = "0.16.1" } solana-vote-api = { path = "../programs/vote_api", version = "0.15.0" }
solana-vote-api = { path = "../programs/vote_api", version = "0.16.1" } solana-vote-program = { path = "../programs/vote_program", version = "0.15.0" }
solana-vote-program = { path = "../programs/vote_program", version = "0.16.1" } solana-exchange-program = { path = "../programs/exchange_program", version = "0.15.0" }
solana-vote-signer = { path = "../vote-signer", version = "0.16.1" } solana-config-program = { path = "../programs/config_program", version = "0.15.0" }
sys-info = "0.5.7" solana-vote-signer = { path = "../vote-signer", version = "0.15.0" }
sys-info = "0.5.6"
tokio = "0.1" tokio = "0.1"
tokio-codec = "0.1" tokio-codec = "0.1"
untrusted = "0.6.2" untrusted = "0.6.2"
# reed-solomon-erasure's simd_c feature fails to build for x86_64-pc-windows-msvc, use pure-rust
[target.'cfg(windows)'.dependencies]
reed-solomon-erasure = { version = "3.1.1", features = ["pure-rust"] }
[target.'cfg(not(windows))'.dependencies]
reed-solomon-erasure = "3.1.1"
[dev-dependencies] [dev-dependencies]
hex-literal = "0.2.0" hex-literal = "0.2.0"
matches = "0.1.6" matches = "0.1.6"
[[bench]] [[bench]]
name = "banking_stage" name = "banking_stage"
@@ -103,5 +99,5 @@ name = "sigverify_stage"
name = "poh" name = "poh"
[[bench]] [[bench]]
name = "chacha"
required-features = ["chacha"] required-features = ["chacha"]
name = "chacha"

View File

@@ -22,7 +22,7 @@ use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::Signature; use solana_sdk::signature::Signature;
use solana_sdk::system_transaction; use solana_sdk::system_transaction;
use solana_sdk::timing::{ use solana_sdk::timing::{
duration_as_us, timestamp, DEFAULT_TICKS_PER_SLOT, MAX_RECENT_BLOCKHASHES, duration_as_ms, timestamp, DEFAULT_TICKS_PER_SLOT, MAX_RECENT_BLOCKHASHES,
}; };
use std::iter; use std::iter;
use std::sync::atomic::Ordering; use std::sync::atomic::Ordering;
@@ -33,18 +33,16 @@ use test::Bencher;
fn check_txs(receiver: &Arc<Receiver<WorkingBankEntries>>, ref_tx_count: usize) { fn check_txs(receiver: &Arc<Receiver<WorkingBankEntries>>, ref_tx_count: usize) {
let mut total = 0; let mut total = 0;
let now = Instant::now();
loop { loop {
let entries = receiver.recv_timeout(Duration::new(1, 0)); let entries = receiver.recv_timeout(Duration::new(1, 0));
if let Ok((_, entries)) = entries { if let Ok((_, entries)) = entries {
for (entry, _) in &entries { for (entry, _) in &entries {
total += entry.transactions.len(); total += entry.transactions.len();
} }
} } else {
if total >= ref_tx_count {
break; break;
} }
if now.elapsed().as_secs() > 60 { if total >= ref_tx_count {
break; break;
} }
} }
@@ -91,8 +89,7 @@ fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
solana_logger::setup(); solana_logger::setup();
let num_threads = BankingStage::num_threads() as usize; let num_threads = BankingStage::num_threads() as usize;
// a multiple of packet chunk 2X duplicates to avoid races // a multiple of packet chunk 2X duplicates to avoid races
const CHUNKS: usize = 32; let txes = 192 * num_threads * 2;
let txes = 192 * num_threads * CHUNKS;
let mint_total = 1_000_000_000_000; let mint_total = 1_000_000_000_000;
let GenesisBlockInfo { let GenesisBlockInfo {
mut genesis_block, mut genesis_block,
@@ -170,7 +167,7 @@ fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
); );
poh_recorder.lock().unwrap().set_bank(&bank); poh_recorder.lock().unwrap().set_bank(&bank);
let chunk_len = verified.len() / CHUNKS; let half_len = verified.len() / 2;
let mut start = 0; let mut start = 0;
// This is so that the signal_receiver does not go out of scope after the closure. // This is so that the signal_receiver does not go out of scope after the closure.
@@ -180,33 +177,18 @@ fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
let signal_receiver2 = signal_receiver.clone(); let signal_receiver2 = signal_receiver.clone();
bencher.iter(move || { bencher.iter(move || {
let now = Instant::now(); let now = Instant::now();
let mut sent = 0; for v in verified[start..start + half_len].chunks(verified.len() / num_threads) {
trace!("sending... {}..{} {}", start, start + half_len, timestamp());
for v in verified[start..start + chunk_len].chunks(verified.len() / num_threads) {
trace!(
"sending... {}..{} {}",
start,
start + chunk_len,
timestamp()
);
for xv in v {
sent += xv.0.packets.len();
}
verified_sender.send(v.to_vec()).unwrap(); verified_sender.send(v.to_vec()).unwrap();
} }
check_txs(&signal_receiver2, txes / CHUNKS); check_txs(&signal_receiver2, txes / 2);
// This signature clear may not actually clear the signatures
// in this chunk, but since we rotate between 32 chunks then
// we should clear them by the time we come around again to re-use that chunk.
bank.clear_signatures();
trace!( trace!(
"time: {} checked: {} sent: {}", "time: {} checked: {}",
duration_as_us(&now.elapsed()), duration_as_ms(&now.elapsed()),
txes / CHUNKS, txes / 2
sent,
); );
start += chunk_len; bank.clear_signatures();
start += half_len;
start %= verified.len(); start %= verified.len();
}); });
drop(vote_sender); drop(vote_sender);

View File

@@ -1,48 +0,0 @@
#![feature(test)]
extern crate test;
use solana::entry::EntrySlice;
use solana::entry::{next_entry_mut, Entry};
use solana_sdk::hash::{hash, Hash};
use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_sdk::system_transaction;
use test::Bencher;
const NUM_HASHES: u64 = 400;
const NUM_ENTRIES: usize = 800;
#[bench]
fn bench_poh_verify_ticks(bencher: &mut Bencher) {
let zero = Hash::default();
let mut cur_hash = hash(&zero.as_ref());
let start = *&cur_hash;
let mut ticks: Vec<Entry> = Vec::with_capacity(NUM_ENTRIES);
for _ in 0..NUM_ENTRIES {
ticks.push(next_entry_mut(&mut cur_hash, NUM_HASHES, vec![]));
}
bencher.iter(|| {
ticks.verify(&start);
})
}
#[bench]
fn bench_poh_verify_transaction_entries(bencher: &mut Bencher) {
let zero = Hash::default();
let mut cur_hash = hash(&zero.as_ref());
let start = *&cur_hash;
let keypair1 = Keypair::new();
let pubkey1 = keypair1.pubkey();
let mut ticks: Vec<Entry> = Vec::with_capacity(NUM_ENTRIES);
for _ in 0..NUM_ENTRIES {
let tx = system_transaction::create_user_account(&keypair1, &pubkey1, 42, cur_hash);
ticks.push(next_entry_mut(&mut cur_hash, NUM_HASHES, vec![tx]));
}
bencher.iter(|| {
ticks.verify(&start);
})
}

View File

@@ -5,41 +5,44 @@ use std::path::Path;
fn main() { fn main() {
println!("cargo:rerun-if-changed=build.rs"); println!("cargo:rerun-if-changed=build.rs");
if env::var("CARGO_FEATURE_CUDA").is_ok() { let perf_libs_dir = {
println!("cargo:rustc-cfg=cuda"); let manifest_dir = env::var("CARGO_MANIFEST_DIR").unwrap();
let mut path = Path::new(&manifest_dir);
path = path.parent().unwrap();
path.join(Path::new("target/perf-libs"))
};
let perf_libs_dir = perf_libs_dir.to_str().unwrap();
let perf_libs_dir = { // Ensure `perf_libs_dir` exists. It's been observed that
let manifest_dir = env::var("CARGO_MANIFEST_DIR").unwrap(); // a cargo:rerun-if-changed= directive with a non-existent
let mut path = Path::new(&manifest_dir); // directory triggers a rebuild on every |cargo build| invocation
path = path.parent().unwrap(); fs::create_dir_all(&perf_libs_dir).unwrap_or_else(|err| {
let mut path = path.join(Path::new("target/perf-libs")); if err.kind() != std::io::ErrorKind::AlreadyExists {
path.push( panic!("Unable to create {}: {:?}", perf_libs_dir, err);
env::var("SOLANA_PERF_LIBS_CUDA") }
.unwrap_or_else(|err| panic!("SOLANA_PERF_LIBS_CUDA not defined: {}", err)), });
);
path
};
let perf_libs_dir = perf_libs_dir.to_str().unwrap();
// Ensure `perf_libs_dir` exists. It's been observed that let chacha = !env::var("CARGO_FEATURE_CHACHA").is_err();
// a cargo:rerun-if-changed= directive with a non-existent let cuda = !env::var("CARGO_FEATURE_CUDA").is_err();
// directory triggers a rebuild on every |cargo build| invocation
fs::create_dir_all(&perf_libs_dir).unwrap_or_else(|err| { if chacha || cuda {
if err.kind() != std::io::ErrorKind::AlreadyExists {
panic!("Unable to create {}: {:?}", perf_libs_dir, err);
}
});
println!("cargo:rerun-if-changed={}", perf_libs_dir); println!("cargo:rerun-if-changed={}", perf_libs_dir);
println!("cargo:rustc-link-search=native={}", perf_libs_dir); println!("cargo:rustc-link-search=native={}", perf_libs_dir);
if cfg!(windows) { }
println!("cargo:rerun-if-changed={}/libcuda-crypt.dll", perf_libs_dir); if chacha {
} else if cfg!(target_os = "macos") { println!("cargo:rerun-if-changed={}/libcpu-crypt.a", perf_libs_dir);
println!( }
"cargo:rerun-if-changed={}/libcuda-crypt.dylib", if cuda {
perf_libs_dir let cuda_home = match env::var("CUDA_HOME") {
); Ok(cuda_home) => cuda_home,
} else { Err(_) => String::from("/usr/local/cuda"),
println!("cargo:rerun-if-changed={}/libcuda-crypt.so", perf_libs_dir); };
}
println!("cargo:rerun-if-changed={}/libcuda-crypt.a", perf_libs_dir);
println!("cargo:rustc-link-lib=static=cuda-crypt");
println!("cargo:rustc-link-search=native={}/lib64", cuda_home);
println!("cargo:rustc-link-lib=dylib=cudart");
println!("cargo:rustc-link-lib=dylib=cuda");
println!("cargo:rustc-link-lib=dylib=cudadevrt");
} }
} }

View File

@@ -1,16 +1,10 @@
//! The `bank_forks` module implments BankForks a DAG of checkpointed Banks //! The `bank_forks` module implments BankForks a DAG of checkpointed Banks
use bincode::{deserialize_from, serialize_into}; use hashbrown::{HashMap, HashSet};
use solana_metrics::inc_new_counter_info; use solana_metrics::inc_new_counter_info;
use solana_runtime::bank::{Bank, BankRc, StatusCacheRc}; use solana_runtime::bank::Bank;
use solana_sdk::genesis_block::GenesisBlock;
use solana_sdk::timing; use solana_sdk::timing;
use std::collections::{HashMap, HashSet};
use std::fs;
use std::fs::File;
use std::io::{BufReader, BufWriter, Error, ErrorKind};
use std::ops::Index; use std::ops::Index;
use std::path::{Path, PathBuf};
use std::sync::Arc; use std::sync::Arc;
use std::time::Instant; use std::time::Instant;
@@ -18,8 +12,6 @@ pub struct BankForks {
banks: HashMap<u64, Arc<Bank>>, banks: HashMap<u64, Arc<Bank>>,
working_bank: Arc<Bank>, working_bank: Arc<Bank>,
root: u64, root: u64,
slots: HashSet<u64>,
snapshot_path: Option<String>,
} }
impl Index<u64> for BankForks { impl Index<u64> for BankForks {
@@ -38,8 +30,6 @@ impl BankForks {
banks, banks,
working_bank, working_bank,
root: 0, root: 0,
slots: HashSet::new(),
snapshot_path: None,
} }
} }
@@ -55,7 +45,6 @@ impl BankForks {
} }
/// Create a map of bank slot id to the set of all of its descendants /// Create a map of bank slot id to the set of all of its descendants
#[allow(clippy::or_fun_call)]
pub fn descendants(&self) -> HashMap<u64, HashSet<u64>> { pub fn descendants(&self) -> HashMap<u64, HashSet<u64>> {
let mut descendants = HashMap::new(); let mut descendants = HashMap::new();
for bank in self.banks.values() { for bank in self.banks.values() {
@@ -102,8 +91,6 @@ impl BankForks {
root, root,
banks, banks,
working_bank, working_bank,
slots: HashSet::new(),
snapshot_path: None,
} }
} }
@@ -151,211 +138,9 @@ impl BankForks {
} }
fn prune_non_root(&mut self, root: u64) { fn prune_non_root(&mut self, root: u64) {
let slots: HashSet<u64> = self
.banks
.iter()
.filter(|(_, b)| b.is_frozen())
.map(|(k, _)| *k)
.collect();
let descendants = self.descendants(); let descendants = self.descendants();
self.banks self.banks
.retain(|slot, _| descendants[&root].contains(slot)); .retain(|slot, _| descendants[&root].contains(slot))
if self.snapshot_path.is_some() {
let diff: HashSet<_> = slots.symmetric_difference(&self.slots).collect();
trace!("prune non root {} - {:?}", root, diff);
for slot in diff.iter() {
if **slot > root {
let _ = self.add_snapshot(**slot, root);
} else {
BankForks::remove_snapshot(**slot, &self.snapshot_path);
}
}
}
self.slots = slots.clone();
}
fn get_io_error(error: &str) -> Error {
warn!("BankForks error: {:?}", error);
Error::new(ErrorKind::Other, error)
}
fn get_snapshot_path(path: &Option<String>) -> PathBuf {
Path::new(&path.clone().unwrap()).to_path_buf()
}
pub fn add_snapshot(&self, slot: u64, root: u64) -> Result<(), Error> {
let path = BankForks::get_snapshot_path(&self.snapshot_path);
fs::create_dir_all(path.clone())?;
let bank_file = format!("{}", slot);
let bank_file_path = path.join(bank_file);
trace!("path: {:?}", bank_file_path);
let file = File::create(bank_file_path)?;
let mut stream = BufWriter::new(file);
let bank_slot = self.get(slot);
if bank_slot.is_none() {
return Err(BankForks::get_io_error("bank_forks get error"));
}
let bank = bank_slot.unwrap().clone();
serialize_into(&mut stream, &*bank)
.map_err(|_| BankForks::get_io_error("serialize bank error"))?;
let mut parent_slot: u64 = 0;
if let Some(parent_bank) = bank.parent() {
parent_slot = parent_bank.slot();
}
serialize_into(&mut stream, &parent_slot)
.map_err(|_| BankForks::get_io_error("serialize bank parent error"))?;
serialize_into(&mut stream, &root)
.map_err(|_| BankForks::get_io_error("serialize root error"))?;
serialize_into(&mut stream, &bank.src)
.map_err(|_| BankForks::get_io_error("serialize bank status cache error"))?;
serialize_into(&mut stream, &bank.rc)
.map_err(|_| BankForks::get_io_error("serialize bank accounts error"))?;
Ok(())
}
pub fn remove_snapshot(slot: u64, path: &Option<String>) {
let path = BankForks::get_snapshot_path(path);
let bank_file = format!("{}", slot);
let bank_file_path = path.join(bank_file);
let _ = fs::remove_file(bank_file_path);
}
pub fn set_snapshot_config(&mut self, path: Option<String>) {
self.snapshot_path = path;
}
fn load_snapshots(
names: &[u64],
bank0: &mut Bank,
bank_maps: &mut Vec<(u64, u64, Bank)>,
status_cache_rc: &StatusCacheRc,
snapshot_path: &Option<String>,
) -> Option<u64> {
let path = BankForks::get_snapshot_path(snapshot_path);
let mut bank_root: Option<u64> = None;
for bank_slot in names.iter().rev() {
let bank_path = format!("{}", bank_slot);
let bank_file_path = path.join(bank_path.clone());
info!("Load from {:?}", bank_file_path);
let file = File::open(bank_file_path);
if file.is_err() {
warn!("Snapshot file open failed for {}", bank_slot);
continue;
}
let file = file.unwrap();
let mut stream = BufReader::new(file);
let bank: Result<Bank, std::io::Error> = deserialize_from(&mut stream)
.map_err(|_| BankForks::get_io_error("deserialize bank error"));
let slot: Result<u64, std::io::Error> = deserialize_from(&mut stream)
.map_err(|_| BankForks::get_io_error("deserialize bank parent error"));
let parent_slot = if slot.is_ok() { slot.unwrap() } else { 0 };
let root: Result<u64, std::io::Error> = deserialize_from(&mut stream)
.map_err(|_| BankForks::get_io_error("deserialize root error"));
let status_cache: Result<StatusCacheRc, std::io::Error> = deserialize_from(&mut stream)
.map_err(|_| BankForks::get_io_error("deserialize bank status cache error"));
if bank_root.is_none() && bank0.rc.update_from_stream(&mut stream).is_ok() {
bank_root = Some(root.unwrap());
}
if bank_root.is_some() {
match bank {
Ok(v) => {
if status_cache.is_ok() {
status_cache_rc.append(&status_cache.unwrap());
}
bank_maps.push((*bank_slot, parent_slot, v));
}
Err(_) => warn!("Load snapshot failed for {}", bank_slot),
}
} else {
BankForks::remove_snapshot(*bank_slot, snapshot_path);
warn!("Load snapshot rc failed for {}", bank_slot);
}
}
bank_root
}
fn setup_banks(
bank_maps: &mut Vec<(u64, u64, Bank)>,
bank_rc: &BankRc,
status_cache_rc: &StatusCacheRc,
) -> (HashMap<u64, Arc<Bank>>, HashSet<u64>, u64) {
let mut banks = HashMap::new();
let mut slots = HashSet::new();
let (last_slot, last_parent_slot, mut last_bank) = bank_maps.remove(0);
last_bank.set_bank_rc(&bank_rc, &status_cache_rc);
while let Some((slot, parent_slot, mut bank)) = bank_maps.pop() {
bank.set_bank_rc(&bank_rc, &status_cache_rc);
if parent_slot != 0 {
if let Some(parent) = banks.get(&parent_slot) {
bank.set_parent(parent);
}
}
if slot > 0 {
banks.insert(slot, Arc::new(bank));
slots.insert(slot);
}
}
if last_parent_slot != 0 {
if let Some(parent) = banks.get(&last_parent_slot) {
last_bank.set_parent(parent);
}
}
banks.insert(last_slot, Arc::new(last_bank));
slots.insert(last_slot);
(banks, slots, last_slot)
}
pub fn load_from_snapshot(
genesis_block: &GenesisBlock,
account_paths: Option<String>,
snapshot_path: &Option<String>,
) -> Result<Self, Error> {
let path = BankForks::get_snapshot_path(snapshot_path);
let paths = fs::read_dir(path)?;
let mut names = paths
.filter_map(|entry| {
entry.ok().and_then(|e| {
e.path()
.file_name()
.and_then(|n| n.to_str().map(|s| s.parse::<u64>().unwrap()))
})
})
.collect::<Vec<u64>>();
names.sort();
let mut bank_maps = vec![];
let status_cache_rc = StatusCacheRc::default();
let id = (names[names.len() - 1] + 1) as usize;
let mut bank0 =
Bank::create_with_genesis(&genesis_block, account_paths.clone(), &status_cache_rc, id);
bank0.freeze();
let bank_root = BankForks::load_snapshots(
&names,
&mut bank0,
&mut bank_maps,
&status_cache_rc,
snapshot_path,
);
if bank_maps.is_empty() || bank_root.is_none() {
BankForks::remove_snapshot(0, snapshot_path);
return Err(Error::new(ErrorKind::Other, "no snapshots loaded"));
}
let root = bank_root.unwrap();
let (banks, slots, last_slot) =
BankForks::setup_banks(&mut bank_maps, &bank0.rc, &status_cache_rc);
let working_bank = banks[&last_slot].clone();
Ok(BankForks {
banks,
working_bank,
root,
slots,
snapshot_path: snapshot_path.clone(),
})
} }
} }
@@ -365,10 +150,6 @@ mod tests {
use crate::genesis_utils::{create_genesis_block, GenesisBlockInfo}; use crate::genesis_utils::{create_genesis_block, GenesisBlockInfo};
use solana_sdk::hash::Hash; use solana_sdk::hash::Hash;
use solana_sdk::pubkey::Pubkey; use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_sdk::system_transaction;
use std::env;
use std::fs::remove_dir_all;
#[test] #[test]
fn test_bank_forks() { fn test_bank_forks() {
@@ -393,8 +174,8 @@ mod tests {
let bank = Bank::new_from_parent(&bank0, &Pubkey::default(), 2); let bank = Bank::new_from_parent(&bank0, &Pubkey::default(), 2);
bank_forks.insert(bank); bank_forks.insert(bank);
let descendants = bank_forks.descendants(); let descendants = bank_forks.descendants();
let children: HashSet<u64> = [1u64, 2u64].to_vec().into_iter().collect(); let children: Vec<u64> = descendants[&0].iter().cloned().collect();
assert_eq!(children, *descendants.get(&0).unwrap()); assert_eq!(children, vec![1, 2]);
assert!(descendants[&1].is_empty()); assert!(descendants[&1].is_empty());
assert!(descendants[&2].is_empty()); assert!(descendants[&2].is_empty());
} }
@@ -438,112 +219,4 @@ mod tests {
assert_eq!(bank_forks.active_banks(), vec![1]); assert_eq!(bank_forks.active_banks(), vec![1]);
} }
struct TempPaths {
pub paths: String,
}
#[macro_export]
macro_rules! tmp_bank_accounts_name {
() => {
&format!("{}-{}", file!(), line!())
};
}
#[macro_export]
macro_rules! get_tmp_bank_accounts_path {
() => {
get_tmp_bank_accounts_path(tmp_bank_accounts_name!())
};
}
impl Drop for TempPaths {
fn drop(&mut self) {
let paths: Vec<String> = self.paths.split(',').map(|s| s.to_string()).collect();
paths.iter().for_each(|p| {
let _ignored = remove_dir_all(p);
});
}
}
fn get_paths_vec(paths: &str) -> Vec<String> {
paths.split(',').map(|s| s.to_string()).collect()
}
fn get_tmp_snapshots_path() -> TempPaths {
let out_dir = env::var("OUT_DIR").unwrap_or_else(|_| "target".to_string());
let path = format!("{}/snapshots", out_dir);
TempPaths {
paths: path.to_string(),
}
}
fn get_tmp_bank_accounts_path(paths: &str) -> TempPaths {
let vpaths = get_paths_vec(paths);
let out_dir = env::var("OUT_DIR").unwrap_or_else(|_| "target".to_string());
let vpaths: Vec<_> = vpaths
.iter()
.map(|path| format!("{}/{}", out_dir, path))
.collect();
TempPaths {
paths: vpaths.join(","),
}
}
fn restore_from_snapshot(
genesis_block: &GenesisBlock,
bank_forks: BankForks,
account_paths: Option<String>,
last_slot: u64,
) {
let new =
BankForks::load_from_snapshot(&genesis_block, account_paths, &bank_forks.snapshot_path)
.unwrap();
for (slot, _) in new.banks.iter() {
if *slot > 0 {
let bank = bank_forks.banks.get(slot).unwrap().clone();
let new_bank = new.banks.get(slot).unwrap();
bank.compare_bank(&new_bank);
}
}
assert_eq!(new.working_bank().slot(), last_slot);
for (slot, _) in new.banks.iter() {
BankForks::remove_snapshot(*slot, &bank_forks.snapshot_path);
}
}
#[test]
fn test_bank_forks_snapshot_n() {
solana_logger::setup();
let path = get_tmp_bank_accounts_path!();
let spath = get_tmp_snapshots_path();
let GenesisBlockInfo {
genesis_block,
mint_keypair,
..
} = create_genesis_block(10_000);
for index in 0..10 {
let bank0 = Bank::new_with_paths(&genesis_block, Some(path.paths.clone()));
bank0.freeze();
let slot = bank0.slot();
let mut bank_forks = BankForks::new(0, bank0);
bank_forks.set_snapshot_config(Some(spath.paths.clone()));
bank_forks.add_snapshot(slot, 0).unwrap();
for forks in 0..index {
let bank = Bank::new_from_parent(&bank_forks[forks], &Pubkey::default(), forks + 1);
let key1 = Keypair::new().pubkey();
let tx = system_transaction::create_user_account(
&mint_keypair,
&key1,
1,
genesis_block.hash(),
);
assert_eq!(bank.process_transaction(&tx), Ok(()));
bank.freeze();
let slot = bank.slot();
bank_forks.insert(bank);
bank_forks.add_snapshot(slot, 0).unwrap();
}
restore_from_snapshot(&genesis_block, bank_forks, Some(path.paths.clone()), index);
}
}
} }

View File

@@ -67,7 +67,8 @@ impl BankingStage {
poh_recorder, poh_recorder,
verified_receiver, verified_receiver,
verified_vote_receiver, verified_vote_receiver,
4, 2, // 1 for voting, 1 for banking.
// More than 2 threads is slower in testnet testing.
) )
} }
@@ -85,7 +86,7 @@ impl BankingStage {
// This thread talks to poh_service and broadcasts the entries once they have been recorded. // This thread talks to poh_service and broadcasts the entries once they have been recorded.
// Once an entry has been recorded, its blockhash is registered with the bank. // Once an entry has been recorded, its blockhash is registered with the bank.
let exit = Arc::new(AtomicBool::new(false)); let exit = Arc::new(AtomicBool::new(false));
let my_pubkey = cluster_info.read().unwrap().id();
// Many banks that process transactions in parallel. // Many banks that process transactions in parallel.
let bank_thread_hdls: Vec<JoinHandle<()>> = (0..num_threads) let bank_thread_hdls: Vec<JoinHandle<()>> = (0..num_threads)
.map(|i| { .map(|i| {
@@ -104,7 +105,6 @@ impl BankingStage {
.name("solana-banking-stage-tx".to_string()) .name("solana-banking-stage-tx".to_string())
.spawn(move || { .spawn(move || {
Self::process_loop( Self::process_loop(
my_pubkey,
&verified_receiver, &verified_receiver,
&poh_recorder, &poh_recorder,
&cluster_info, &cluster_info,
@@ -242,13 +242,14 @@ impl BankingStage {
} }
fn process_buffered_packets( fn process_buffered_packets(
my_pubkey: &Pubkey,
socket: &std::net::UdpSocket, socket: &std::net::UdpSocket,
poh_recorder: &Arc<Mutex<PohRecorder>>, poh_recorder: &Arc<Mutex<PohRecorder>>,
cluster_info: &Arc<RwLock<ClusterInfo>>, cluster_info: &Arc<RwLock<ClusterInfo>>,
buffered_packets: &mut Vec<PacketsAndOffsets>, buffered_packets: &mut Vec<PacketsAndOffsets>,
enable_forwarding: bool, enable_forwarding: bool,
) -> Result<()> { ) -> Result<()> {
let rcluster_info = cluster_info.read().unwrap();
let (decision, next_leader) = { let (decision, next_leader) = {
let poh = poh_recorder.lock().unwrap(); let poh = poh_recorder.lock().unwrap();
let next_leader = poh.next_slot_leader(); let next_leader = poh.next_slot_leader();
@@ -257,7 +258,7 @@ impl BankingStage {
next_leader, next_leader,
poh.bank().is_some(), poh.bank().is_some(),
poh.would_be_leader(DEFAULT_TICKS_PER_SLOT * 2), poh.would_be_leader(DEFAULT_TICKS_PER_SLOT * 2),
my_pubkey, &rcluster_info.id(),
), ),
next_leader, next_leader,
) )
@@ -265,31 +266,28 @@ impl BankingStage {
match decision { match decision {
BufferedPacketsDecision::Consume => { BufferedPacketsDecision::Consume => {
let mut unprocessed = let mut unprocessed = Self::consume_buffered_packets(
Self::consume_buffered_packets(my_pubkey, poh_recorder, buffered_packets)?; &rcluster_info.id(),
poh_recorder,
buffered_packets,
)?;
buffered_packets.append(&mut unprocessed); buffered_packets.append(&mut unprocessed);
Ok(()) Ok(())
} }
BufferedPacketsDecision::Forward => { BufferedPacketsDecision::Forward => {
if enable_forwarding { if enable_forwarding {
next_leader.map_or(Ok(()), |leader_pubkey| { next_leader.map_or(Ok(()), |leader_pubkey| {
let leader_addr = { rcluster_info
cluster_info .lookup(&leader_pubkey)
.read() .map_or(Ok(()), |leader| {
.unwrap() let _ = Self::forward_buffered_packets(
.lookup(&leader_pubkey) &socket,
.map(|leader| leader.tpu_via_blobs) &leader.tpu_via_blobs,
}; &buffered_packets,
);
leader_addr.map_or(Ok(()), |leader_addr| { buffered_packets.clear();
let _ = Self::forward_buffered_packets( Ok(())
&socket, })
&leader_addr,
&buffered_packets,
);
buffered_packets.clear();
Ok(())
})
}) })
} else { } else {
buffered_packets.clear(); buffered_packets.clear();
@@ -301,7 +299,6 @@ impl BankingStage {
} }
pub fn process_loop( pub fn process_loop(
my_pubkey: Pubkey,
verified_receiver: &Arc<Mutex<Receiver<VerifiedPackets>>>, verified_receiver: &Arc<Mutex<Receiver<VerifiedPackets>>>,
poh_recorder: &Arc<Mutex<PohRecorder>>, poh_recorder: &Arc<Mutex<PohRecorder>>,
cluster_info: &Arc<RwLock<ClusterInfo>>, cluster_info: &Arc<RwLock<ClusterInfo>>,
@@ -314,7 +311,6 @@ impl BankingStage {
loop { loop {
if !buffered_packets.is_empty() { if !buffered_packets.is_empty() {
Self::process_buffered_packets( Self::process_buffered_packets(
&my_pubkey,
&socket, &socket,
poh_recorder, poh_recorder,
cluster_info, cluster_info,
@@ -335,11 +331,11 @@ impl BankingStage {
}; };
match Self::process_packets( match Self::process_packets(
&my_pubkey,
&verified_receiver, &verified_receiver,
&poh_recorder, &poh_recorder,
recv_start, recv_start,
recv_timeout, recv_timeout,
cluster_info,
id, id,
) { ) {
Err(Error::RecvTimeoutError(RecvTimeoutError::Timeout)) => (), Err(Error::RecvTimeoutError(RecvTimeoutError::Timeout)) => (),
@@ -374,23 +370,26 @@ impl BankingStage {
.collect() .collect()
} }
fn record_transactions( fn record_transactions<'a, 'b>(
bank_slot: u64, bank: &'a Bank,
txs: &[Transaction], txs: &'b [Transaction],
results: &[transaction::Result<()>], results: &[transaction::Result<()>],
poh: &Arc<Mutex<PohRecorder>>, poh: &Arc<Mutex<PohRecorder>>,
) -> Result<()> { recordable_txs: &'b mut Vec<&'b Transaction>,
) -> Result<LockedAccountsResults<'a, 'b, &'b Transaction>> {
let processed_transactions: Vec<_> = results let processed_transactions: Vec<_> = results
.iter() .iter()
.zip(txs.iter()) .zip(txs.iter())
.filter_map(|(r, x)| { .filter_map(|(r, x)| {
if Bank::can_commit(r) { if Bank::can_commit(r) {
recordable_txs.push(x);
Some(x.clone()) Some(x.clone())
} else { } else {
None None
} }
}) })
.collect(); .collect();
let record_locks = bank.lock_record_accounts(recordable_txs);
debug!("processed: {} ", processed_transactions.len()); debug!("processed: {} ", processed_transactions.len());
// unlock all the accounts with errors which are filtered by the above `filter_map` // unlock all the accounts with errors which are filtered by the above `filter_map`
if !processed_transactions.is_empty() { if !processed_transactions.is_empty() {
@@ -402,16 +401,16 @@ impl BankingStage {
// record and unlock will unlock all the successful transactions // record and unlock will unlock all the successful transactions
poh.lock() poh.lock()
.unwrap() .unwrap()
.record(bank_slot, hash, processed_transactions)?; .record(bank.slot(), hash, processed_transactions)?;
} }
Ok(()) Ok(record_locks)
} }
fn process_and_record_transactions_locked( fn process_and_record_transactions_locked(
bank: &Bank, bank: &Bank,
txs: &[Transaction], txs: &[Transaction],
poh: &Arc<Mutex<PohRecorder>>, poh: &Arc<Mutex<PohRecorder>>,
lock_results: &LockedAccountsResults, lock_results: &LockedAccountsResults<Transaction>,
) -> Result<()> { ) -> Result<()> {
let now = Instant::now(); let now = Instant::now();
// Use a shorter maximum age when adding transactions into the pipeline. This will reduce // Use a shorter maximum age when adding transactions into the pipeline. This will reduce
@@ -424,10 +423,12 @@ impl BankingStage {
let freeze_lock = bank.freeze_lock(); let freeze_lock = bank.freeze_lock();
let record_time = { let mut recordable_txs = vec![];
let (record_time, record_locks) = {
let now = Instant::now(); let now = Instant::now();
Self::record_transactions(bank.slot(), txs, &results, poh)?; let record_locks =
now.elapsed() Self::record_transactions(bank, txs, &results, poh, &mut recordable_txs)?;
(now.elapsed(), record_locks)
}; };
let commit_time = { let commit_time = {
@@ -436,6 +437,7 @@ impl BankingStage {
now.elapsed() now.elapsed()
}; };
drop(record_locks);
drop(freeze_lock); drop(freeze_lock);
debug!( debug!(
@@ -699,11 +701,11 @@ impl BankingStage {
/// Process the incoming packets /// Process the incoming packets
pub fn process_packets( pub fn process_packets(
my_pubkey: &Pubkey,
verified_receiver: &Arc<Mutex<Receiver<VerifiedPackets>>>, verified_receiver: &Arc<Mutex<Receiver<VerifiedPackets>>>,
poh: &Arc<Mutex<PohRecorder>>, poh: &Arc<Mutex<PohRecorder>>,
recv_start: &mut Instant, recv_start: &mut Instant,
recv_timeout: Duration, recv_timeout: Duration,
cluster_info: &Arc<RwLock<ClusterInfo>>,
id: u32, id: u32,
) -> Result<UnprocessedPackets> { ) -> Result<UnprocessedPackets> {
let mms = verified_receiver let mms = verified_receiver
@@ -745,6 +747,7 @@ impl BankingStage {
if processed < verified_txs_len { if processed < verified_txs_len {
let next_leader = poh.lock().unwrap().next_slot_leader(); let next_leader = poh.lock().unwrap().next_slot_leader();
let my_pubkey = cluster_info.read().unwrap().id();
// Walk thru rest of the transactions and filter out the invalid (e.g. too old) ones // Walk thru rest of the transactions and filter out the invalid (e.g. too old) ones
while let Some((msgs, vers)) = mms_iter.next() { while let Some((msgs, vers)) = mms_iter.next() {
let packet_indexes = Self::generate_packet_indexes(vers); let packet_indexes = Self::generate_packet_indexes(vers);
@@ -1180,8 +1183,14 @@ mod tests {
]; ];
let mut results = vec![Ok(()), Ok(())]; let mut results = vec![Ok(()), Ok(())];
BankingStage::record_transactions(bank.slot(), &transactions, &results, &poh_recorder) BankingStage::record_transactions(
.unwrap(); &bank,
&transactions,
&results,
&poh_recorder,
&mut vec![],
)
.unwrap();
let (_, entries) = entry_receiver.recv().unwrap(); let (_, entries) = entry_receiver.recv().unwrap();
assert_eq!(entries[0].0.transactions.len(), transactions.len()); assert_eq!(entries[0].0.transactions.len(), transactions.len());
@@ -1190,15 +1199,27 @@ mod tests {
1, 1,
InstructionError::new_result_with_negative_lamports(), InstructionError::new_result_with_negative_lamports(),
)); ));
BankingStage::record_transactions(bank.slot(), &transactions, &results, &poh_recorder) BankingStage::record_transactions(
.unwrap(); &bank,
&transactions,
&results,
&poh_recorder,
&mut vec![],
)
.unwrap();
let (_, entries) = entry_receiver.recv().unwrap(); let (_, entries) = entry_receiver.recv().unwrap();
assert_eq!(entries[0].0.transactions.len(), transactions.len()); assert_eq!(entries[0].0.transactions.len(), transactions.len());
// Other TransactionErrors should not be recorded // Other TransactionErrors should not be recorded
results[0] = Err(TransactionError::AccountNotFound); results[0] = Err(TransactionError::AccountNotFound);
BankingStage::record_transactions(bank.slot(), &transactions, &results, &poh_recorder) BankingStage::record_transactions(
.unwrap(); &bank,
&transactions,
&results,
&poh_recorder,
&mut vec![],
)
.unwrap();
let (_, entries) = entry_receiver.recv().unwrap(); let (_, entries) = entry_receiver.recv().unwrap();
assert_eq!(entries[0].0.transactions.len(), transactions.len() - 1); assert_eq!(entries[0].0.transactions.len(), transactions.len() - 1);
} }

View File

@@ -10,6 +10,10 @@ use serde_json::json;
use solana_sdk::hash::Hash; use solana_sdk::hash::Hash;
use solana_sdk::pubkey::Pubkey; use solana_sdk::pubkey::Pubkey;
use std::cell::RefCell; use std::cell::RefCell;
use std::io::prelude::*;
use std::net::Shutdown;
use std::os::unix::net::UnixStream;
use std::path::Path;
pub trait EntryWriter: std::fmt::Debug { pub trait EntryWriter: std::fmt::Debug {
fn write(&self, payload: String) -> Result<()>; fn write(&self, payload: String) -> Result<()>;
@@ -44,29 +48,16 @@ pub struct EntrySocket {
socket: String, socket: String,
} }
const MESSAGE_TERMINATOR: &str = "\n";
impl EntryWriter for EntrySocket { impl EntryWriter for EntrySocket {
#[cfg(not(windows))]
fn write(&self, payload: String) -> Result<()> { fn write(&self, payload: String) -> Result<()> {
use std::io::prelude::*;
use std::net::Shutdown;
use std::os::unix::net::UnixStream;
use std::path::Path;
const MESSAGE_TERMINATOR: &str = "\n";
let mut socket = UnixStream::connect(Path::new(&self.socket))?; let mut socket = UnixStream::connect(Path::new(&self.socket))?;
socket.write_all(payload.as_bytes())?; socket.write_all(payload.as_bytes())?;
socket.write_all(MESSAGE_TERMINATOR.as_bytes())?; socket.write_all(MESSAGE_TERMINATOR.as_bytes())?;
socket.shutdown(Shutdown::Write)?; socket.shutdown(Shutdown::Write)?;
Ok(()) Ok(())
} }
#[cfg(windows)]
fn write(&self, _payload: String) -> Result<()> {
Err(crate::result::Error::from(std::io::Error::new(
std::io::ErrorKind::Other,
"EntryWriter::write() not implemented for windows",
)))
}
} }
pub trait BlockstreamEvents { pub trait BlockstreamEvents {

View File

@@ -11,7 +11,7 @@ use solana_kvstore as kvstore;
use bincode::deserialize; use bincode::deserialize;
use std::collections::HashMap; use hashbrown::HashMap;
#[cfg(not(feature = "kvstore"))] #[cfg(not(feature = "kvstore"))]
use rocksdb; use rocksdb;
@@ -84,7 +84,6 @@ pub struct Blocktree {
db: Arc<Database>, db: Arc<Database>,
meta_cf: LedgerColumn<cf::SlotMeta>, meta_cf: LedgerColumn<cf::SlotMeta>,
data_cf: LedgerColumn<cf::Data>, data_cf: LedgerColumn<cf::Data>,
dead_slots_cf: LedgerColumn<cf::DeadSlots>,
erasure_cf: LedgerColumn<cf::Coding>, erasure_cf: LedgerColumn<cf::Coding>,
erasure_meta_cf: LedgerColumn<cf::ErasureMeta>, erasure_meta_cf: LedgerColumn<cf::ErasureMeta>,
orphans_cf: LedgerColumn<cf::Orphans>, orphans_cf: LedgerColumn<cf::Orphans>,
@@ -98,8 +97,6 @@ pub struct Blocktree {
pub const META_CF: &str = "meta"; pub const META_CF: &str = "meta";
// Column family for the data in a leader slot // Column family for the data in a leader slot
pub const DATA_CF: &str = "data"; pub const DATA_CF: &str = "data";
// Column family for slots that have been marked as dead
pub const DEAD_SLOTS_CF: &str = "dead_slots";
// Column family for erasure data // Column family for erasure data
pub const ERASURE_CF: &str = "erasure"; pub const ERASURE_CF: &str = "erasure";
pub const ERASURE_META_CF: &str = "erasure_meta"; pub const ERASURE_META_CF: &str = "erasure_meta";
@@ -127,9 +124,6 @@ impl Blocktree {
// Create the data column family // Create the data column family
let data_cf = db.column(); let data_cf = db.column();
// Create the dead slots column family
let dead_slots_cf = db.column();
// Create the erasure column family // Create the erasure column family
let erasure_cf = db.column(); let erasure_cf = db.column();
@@ -149,7 +143,6 @@ impl Blocktree {
db, db,
meta_cf, meta_cf,
data_cf, data_cf,
dead_slots_cf,
erasure_cf, erasure_cf,
erasure_meta_cf, erasure_meta_cf,
orphans_cf, orphans_cf,
@@ -184,15 +177,6 @@ impl Blocktree {
self.meta_cf.get(slot) self.meta_cf.get(slot)
} }
pub fn is_full(&self, slot: u64) -> bool {
if let Ok(meta) = self.meta_cf.get(slot) {
if let Some(meta) = meta {
return meta.is_full();
}
}
false
}
pub fn erasure_meta(&self, slot: u64, set_index: u64) -> Result<Option<ErasureMeta>> { pub fn erasure_meta(&self, slot: u64, set_index: u64) -> Result<Option<ErasureMeta>> {
self.erasure_meta_cf.get((slot, set_index)) self.erasure_meta_cf.get((slot, set_index))
} }
@@ -815,17 +799,7 @@ impl Blocktree {
let result: HashMap<u64, Vec<u64>> = slots let result: HashMap<u64, Vec<u64>> = slots
.iter() .iter()
.zip(slot_metas) .zip(slot_metas)
.filter_map(|(height, meta)| { .filter_map(|(height, meta)| meta.map(|meta| (*height, meta.next_slots)))
meta.map(|meta| {
let valid_next_slots: Vec<u64> = meta
.next_slots
.iter()
.cloned()
.filter(|s| !self.is_dead(*s))
.collect();
(*height, valid_next_slots)
})
})
.collect(); .collect();
Ok(result) Ok(result)
@@ -844,12 +818,18 @@ impl Blocktree {
} }
} }
pub fn set_roots(&self, rooted_slots: &[u64]) -> Result<()> { pub fn set_root(&self, new_root: u64, prev_root: u64) -> Result<()> {
let mut current_slot = new_root;
unsafe { unsafe {
let mut batch_processor = self.db.batch_processor(); let mut batch_processor = self.db.batch_processor();
let mut write_batch = batch_processor.batch()?; let mut write_batch = batch_processor.batch()?;
for slot in rooted_slots { if new_root == 0 {
write_batch.put::<cf::Root>(*slot, &true)?; write_batch.put::<cf::Root>(0, &true)?;
} else {
while current_slot != prev_root {
write_batch.put::<cf::Root>(current_slot, &true)?;
current_slot = self.meta(current_slot).unwrap().unwrap().parent_slot;
}
} }
batch_processor.write(write_batch)?; batch_processor.write(write_batch)?;
@@ -857,22 +837,6 @@ impl Blocktree {
Ok(()) Ok(())
} }
pub fn is_dead(&self, slot: u64) -> bool {
if let Some(true) = self
.db
.get::<cf::DeadSlots>(slot)
.expect("fetch from DeadSlots column family failed")
{
true
} else {
false
}
}
pub fn set_dead_slot(&self, slot: u64) -> Result<()> {
self.dead_slots_cf.put(slot, &true)
}
pub fn get_orphans(&self, max: Option<usize>) -> Vec<u64> { pub fn get_orphans(&self, max: Option<usize>) -> Vec<u64> {
let mut results = vec![]; let mut results = vec![];
@@ -3164,12 +3128,30 @@ pub mod tests {
} }
#[test] #[test]
fn test_set_roots() { fn test_set_root() {
let blocktree_path = get_tmp_ledger_path!(); let blocktree_path = get_tmp_ledger_path!();
let blocktree = Blocktree::open(&blocktree_path).unwrap(); let blocktree = Blocktree::open(&blocktree_path).unwrap();
blocktree.set_root(0, 0).unwrap();
let chained_slots = vec![0, 2, 4, 7, 12, 15]; let chained_slots = vec![0, 2, 4, 7, 12, 15];
blocktree.set_roots(&chained_slots).unwrap(); // Make a chain of slots
let all_blobs = make_chaining_slot_entries(&chained_slots, 10);
// Insert the chain of slots into the ledger
for (slot_blobs, _) in all_blobs {
blocktree.insert_data_blobs(&slot_blobs[..]).unwrap();
}
blocktree.set_root(4, 0).unwrap();
for i in &chained_slots[0..3] {
assert!(blocktree.is_root(*i));
}
for i in &chained_slots[3..] {
assert!(!blocktree.is_root(*i));
}
blocktree.set_root(15, 4).unwrap();
for i in chained_slots { for i in chained_slots {
assert!(blocktree.is_root(i)); assert!(blocktree.is_root(i));

View File

@@ -28,10 +28,6 @@ pub mod columns {
/// Data Column /// Data Column
pub struct Data; pub struct Data;
#[derive(Debug)]
/// Data Column
pub struct DeadSlots;
#[derive(Debug)] #[derive(Debug)]
/// The erasure meta column /// The erasure meta column
pub struct ErasureMeta; pub struct ErasureMeta;

View File

@@ -100,25 +100,6 @@ impl Column<Kvs> for cf::Data {
} }
} }
impl Column<Kvs> for cf::DeadSlots {
const NAME: &'static str = super::DEAD_SLOTS;
type Index = u64;
fn key(slot: u64) -> Key {
let mut key = Key::default();
BigEndian::write_u64(&mut key.0[8..16], slot);
key
}
fn index(key: &Key) -> u64 {
BigEndian::read_u64(&key.0[8..16])
}
}
impl TypedColumn<Kvs> for cf::Root {
type Type = bool;
}
impl Column<Kvs> for cf::Orphans { impl Column<Kvs> for cf::Orphans {
const NAME: &'static str = super::ORPHANS_CF; const NAME: &'static str = super::ORPHANS_CF;
type Index = u64; type Index = u64;

View File

@@ -30,7 +30,7 @@ impl Backend for Rocks {
type Error = rocksdb::Error; type Error = rocksdb::Error;
fn open(path: &Path) -> Result<Rocks> { fn open(path: &Path) -> Result<Rocks> {
use crate::blocktree::db::columns::{Coding, Data, DeadSlots, ErasureMeta, Orphans, Root, SlotMeta}; use crate::blocktree::db::columns::{Coding, Data, ErasureMeta, Orphans, Root, SlotMeta};
fs::create_dir_all(&path)?; fs::create_dir_all(&path)?;
@@ -40,7 +40,6 @@ impl Backend for Rocks {
// Column family names // Column family names
let meta_cf_descriptor = ColumnFamilyDescriptor::new(SlotMeta::NAME, get_cf_options()); let meta_cf_descriptor = ColumnFamilyDescriptor::new(SlotMeta::NAME, get_cf_options());
let data_cf_descriptor = ColumnFamilyDescriptor::new(Data::NAME, get_cf_options()); let data_cf_descriptor = ColumnFamilyDescriptor::new(Data::NAME, get_cf_options());
let dead_slots_cf_descriptor = ColumnFamilyDescriptor::new(DeadSlots::NAME, get_cf_options());
let erasure_cf_descriptor = ColumnFamilyDescriptor::new(Coding::NAME, get_cf_options()); let erasure_cf_descriptor = ColumnFamilyDescriptor::new(Coding::NAME, get_cf_options());
let erasure_meta_cf_descriptor = let erasure_meta_cf_descriptor =
ColumnFamilyDescriptor::new(ErasureMeta::NAME, get_cf_options()); ColumnFamilyDescriptor::new(ErasureMeta::NAME, get_cf_options());
@@ -50,7 +49,6 @@ impl Backend for Rocks {
let cfs = vec![ let cfs = vec![
meta_cf_descriptor, meta_cf_descriptor,
data_cf_descriptor, data_cf_descriptor,
dead_slots_cf_descriptor,
erasure_cf_descriptor, erasure_cf_descriptor,
erasure_meta_cf_descriptor, erasure_meta_cf_descriptor,
orphans_cf_descriptor, orphans_cf_descriptor,
@@ -64,12 +62,11 @@ impl Backend for Rocks {
} }
fn columns(&self) -> Vec<&'static str> { fn columns(&self) -> Vec<&'static str> {
use crate::blocktree::db::columns::{Coding, Data, DeadSlots, ErasureMeta, Orphans, Root, SlotMeta}; use crate::blocktree::db::columns::{Coding, Data, ErasureMeta, Orphans, Root, SlotMeta};
vec![ vec![
Coding::NAME, Coding::NAME,
ErasureMeta::NAME, ErasureMeta::NAME,
DeadSlots::NAME,
Data::NAME, Data::NAME,
Orphans::NAME, Orphans::NAME,
Root::NAME, Root::NAME,
@@ -164,25 +161,6 @@ impl Column<Rocks> for cf::Data {
} }
} }
impl Column<Rocks> for cf::DeadSlots {
const NAME: &'static str = super::DEAD_SLOTS_CF;
type Index = u64;
fn key(slot: u64) -> Vec<u8> {
let mut key = vec![0; 8];
BigEndian::write_u64(&mut key[..], slot);
key
}
fn index(key: &[u8]) -> u64 {
BigEndian::read_u64(&key[..8])
}
}
impl TypedColumn<Rocks> for cf::DeadSlots {
type Type = bool;
}
impl Column<Rocks> for cf::Orphans { impl Column<Rocks> for cf::Orphans {
const NAME: &'static str = super::ORPHANS_CF; const NAME: &'static str = super::ORPHANS_CF;
type Index = u64; type Index = u64;

View File

@@ -51,7 +51,7 @@ mod tests {
fn test_rooted_slot_iterator() { fn test_rooted_slot_iterator() {
let blocktree_path = get_tmp_ledger_path("test_rooted_slot_iterator"); let blocktree_path = get_tmp_ledger_path("test_rooted_slot_iterator");
let blocktree = Blocktree::open(&blocktree_path).unwrap(); let blocktree = Blocktree::open(&blocktree_path).unwrap();
blocktree.set_roots(&[0]).unwrap(); blocktree.set_root(0, 0).unwrap();
let ticks_per_slot = 5; let ticks_per_slot = 5;
/* /*
Build a blocktree in the ledger with the following fork structure: Build a blocktree in the ledger with the following fork structure:
@@ -98,7 +98,7 @@ mod tests {
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 4, fork_point, fork_hash); fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 4, fork_point, fork_hash);
// Set a root // Set a root
blocktree.set_roots(&[1, 2, 3]).unwrap(); blocktree.set_root(3, 0).unwrap();
// Trying to get an iterator on a different fork will error // Trying to get an iterator on a different fork will error
assert!(RootedSlotIterator::new(4, &blocktree).is_err()); assert!(RootedSlotIterator::new(4, &blocktree).is_err());

View File

@@ -3,7 +3,6 @@ use crate::blocktree::Blocktree;
use crate::entry::{Entry, EntrySlice}; use crate::entry::{Entry, EntrySlice};
use crate::leader_schedule_cache::LeaderScheduleCache; use crate::leader_schedule_cache::LeaderScheduleCache;
use rayon::prelude::*; use rayon::prelude::*;
use rayon::ThreadPool;
use solana_metrics::{datapoint, datapoint_error, inc_new_counter_debug}; use solana_metrics::{datapoint, datapoint_error, inc_new_counter_debug};
use solana_runtime::bank::Bank; use solana_runtime::bank::Bank;
use solana_runtime::locked_accounts_results::LockedAccountsResults; use solana_runtime::locked_accounts_results::LockedAccountsResults;
@@ -11,18 +10,11 @@ use solana_sdk::genesis_block::GenesisBlock;
use solana_sdk::timing::duration_as_ms; use solana_sdk::timing::duration_as_ms;
use solana_sdk::timing::MAX_RECENT_BLOCKHASHES; use solana_sdk::timing::MAX_RECENT_BLOCKHASHES;
use solana_sdk::transaction::Result; use solana_sdk::transaction::Result;
use solana_sdk::transaction::Transaction;
use std::result; use std::result;
use std::sync::Arc; use std::sync::Arc;
use std::time::{Duration, Instant}; use std::time::{Duration, Instant};
pub const NUM_THREADS: u32 = 10;
use std::cell::RefCell;
thread_local!(static PAR_THREAD_POOL: RefCell<ThreadPool> = RefCell::new(rayon::ThreadPoolBuilder::new()
.num_threads(sys_info::cpu_num().unwrap_or(NUM_THREADS) as usize)
.build()
.unwrap()));
fn first_err(results: &[Result<()>]) -> Result<()> { fn first_err(results: &[Result<()>]) -> Result<()> {
for r in results { for r in results {
if r.is_err() { if r.is_err() {
@@ -32,38 +24,37 @@ fn first_err(results: &[Result<()>]) -> Result<()> {
Ok(()) Ok(())
} }
fn par_execute_entries(bank: &Bank, entries: &[(&Entry, LockedAccountsResults)]) -> Result<()> { fn par_execute_entries(
bank: &Bank,
entries: &[(&Entry, LockedAccountsResults<Transaction>)],
) -> Result<()> {
inc_new_counter_debug!("bank-par_execute_entries-count", entries.len()); inc_new_counter_debug!("bank-par_execute_entries-count", entries.len());
let results: Vec<Result<()>> = PAR_THREAD_POOL.with(|thread_pool| { let results: Vec<Result<()>> = entries
thread_pool.borrow().install(|| { .into_par_iter()
entries .map(|(e, locked_accounts)| {
.into_par_iter() let results = bank.load_execute_and_commit_transactions(
.map(|(e, locked_accounts)| { &e.transactions,
let results = bank.load_execute_and_commit_transactions( locked_accounts,
&e.transactions, MAX_RECENT_BLOCKHASHES,
locked_accounts, );
MAX_RECENT_BLOCKHASHES, let mut first_err = None;
); for (r, tx) in results.iter().zip(e.transactions.iter()) {
let mut first_err = None; if let Err(ref e) = r {
for (r, tx) in results.iter().zip(e.transactions.iter()) { if first_err.is_none() {
if let Err(ref e) = r { first_err = Some(r.clone());
if first_err.is_none() {
first_err = Some(r.clone());
}
if !Bank::can_commit(&r) {
warn!("Unexpected validator error: {:?}, tx: {:?}", e, tx);
datapoint_error!(
"validator_process_entry_error",
("error", format!("error: {:?}, tx: {:?}", e, tx), String)
);
}
}
} }
first_err.unwrap_or(Ok(())) if !Bank::can_commit(&r) {
}) warn!("Unexpected validator error: {:?}, tx: {:?}", e, tx);
.collect() datapoint_error!(
"validator_process_entry_error",
("error", format!("error: {:?}, tx: {:?}", e, tx), String)
);
}
}
}
first_err.unwrap_or(Ok(()))
}) })
}); .collect();
first_err(&results) first_err(&results)
} }
@@ -164,7 +155,7 @@ pub fn process_blocktree(
vec![(slot, meta, bank, entry_height, last_entry_hash)] vec![(slot, meta, bank, entry_height, last_entry_hash)]
}; };
blocktree.set_roots(&[0]).expect("Couldn't set first root"); blocktree.set_root(0, 0).expect("Couldn't set first root");
let leader_schedule_cache = LeaderScheduleCache::new(*pending_slots[0].2.epoch_schedule(), 0); let leader_schedule_cache = LeaderScheduleCache::new(*pending_slots[0].2.epoch_schedule(), 0);
@@ -429,7 +420,7 @@ pub mod tests {
info!("last_fork1_entry.hash: {:?}", last_fork1_entry_hash); info!("last_fork1_entry.hash: {:?}", last_fork1_entry_hash);
info!("last_fork2_entry.hash: {:?}", last_fork2_entry_hash); info!("last_fork2_entry.hash: {:?}", last_fork2_entry_hash);
blocktree.set_roots(&[4, 1, 0]).unwrap(); blocktree.set_root(4, 0).unwrap();
let (bank_forks, bank_forks_info, _) = let (bank_forks, bank_forks_info, _) =
process_blocktree(&genesis_block, &blocktree, None).unwrap(); process_blocktree(&genesis_block, &blocktree, None).unwrap();
@@ -503,7 +494,8 @@ pub mod tests {
info!("last_fork1_entry.hash: {:?}", last_fork1_entry_hash); info!("last_fork1_entry.hash: {:?}", last_fork1_entry_hash);
info!("last_fork2_entry.hash: {:?}", last_fork2_entry_hash); info!("last_fork2_entry.hash: {:?}", last_fork2_entry_hash);
blocktree.set_roots(&[0, 1]).unwrap(); blocktree.set_root(0, 0).unwrap();
blocktree.set_root(1, 0).unwrap();
let (bank_forks, bank_forks_info, _) = let (bank_forks, bank_forks_info, _) =
process_blocktree(&genesis_block, &blocktree, None).unwrap(); process_blocktree(&genesis_block, &blocktree, None).unwrap();
@@ -579,11 +571,10 @@ pub mod tests {
} }
// Set a root on the last slot of the last confirmed epoch // Set a root on the last slot of the last confirmed epoch
let rooted_slots: Vec<_> = (0..=last_slot).collect(); blocktree.set_root(last_slot, 0).unwrap();
blocktree.set_roots(&rooted_slots).unwrap();
// Set a root on the next slot of the confrimed epoch // Set a root on the next slot of the confrimed epoch
blocktree.set_roots(&[last_slot + 1]).unwrap(); blocktree.set_root(last_slot + 1, last_slot).unwrap();
// Check that we can properly restart the ledger / leader scheduler doesn't fail // Check that we can properly restart the ledger / leader scheduler doesn't fail
let (bank_forks, bank_forks_info, _) = let (bank_forks, bank_forks_info, _) =

View File

@@ -1,87 +1,183 @@
//! A stage to broadcast data from a leader node to validators //! A stage to broadcast data from a leader node to validators
use self::fail_entry_verification_broadcast_run::FailEntryVerificationBroadcastRun; //!
use self::standard_broadcast_run::StandardBroadcastRun;
use crate::blocktree::Blocktree; use crate::blocktree::Blocktree;
use crate::cluster_info::{ClusterInfo, ClusterInfoError}; use crate::cluster_info::{ClusterInfo, ClusterInfoError, DATA_PLANE_FANOUT};
use crate::entry::EntrySlice;
use crate::erasure::CodingGenerator; use crate::erasure::CodingGenerator;
use crate::packet::index_blobs_with_genesis;
use crate::poh_recorder::WorkingBankEntries; use crate::poh_recorder::WorkingBankEntries;
use crate::result::{Error, Result}; use crate::result::{Error, Result};
use crate::service::Service; use crate::service::Service;
use crate::staking_utils; use crate::staking_utils;
use rayon::ThreadPool; use rayon::prelude::*;
use solana_metrics::{ use solana_metrics::{
datapoint, inc_new_counter_debug, inc_new_counter_error, inc_new_counter_info, datapoint, inc_new_counter_debug, inc_new_counter_error, inc_new_counter_info,
inc_new_counter_warn,
}; };
use solana_sdk::hash::Hash;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::timing::duration_as_ms; use solana_sdk::timing::duration_as_ms;
use std::net::UdpSocket; use std::net::UdpSocket;
use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::{Receiver, RecvTimeoutError}; use std::sync::mpsc::{Receiver, RecvTimeoutError};
use std::sync::{Arc, RwLock}; use std::sync::{Arc, RwLock};
use std::thread::{self, Builder, JoinHandle}; use std::thread::{self, Builder, JoinHandle};
use std::time::Instant; use std::time::{Duration, Instant};
mod broadcast_utils;
mod fail_entry_verification_broadcast_run;
mod standard_broadcast_run;
pub const NUM_THREADS: u32 = 10;
#[derive(Debug, PartialEq, Eq, Clone)] #[derive(Debug, PartialEq, Eq, Clone)]
pub enum BroadcastStageReturnType { pub enum BroadcastStageReturnType {
ChannelDisconnected, ChannelDisconnected,
} }
#[derive(PartialEq, Clone, Debug)] #[derive(Default)]
pub enum BroadcastStageType { struct BroadcastStats {
Standard, num_entries: Vec<usize>,
FailEntryVerification, run_elapsed: Vec<u64>,
to_blobs_elapsed: Vec<u64>,
} }
impl BroadcastStageType { struct Broadcast {
pub fn new_broadcast_stage( id: Pubkey,
&self, coding_generator: CodingGenerator,
sock: UdpSocket, stats: BroadcastStats,
cluster_info: Arc<RwLock<ClusterInfo>>,
receiver: Receiver<WorkingBankEntries>,
exit_sender: &Arc<AtomicBool>,
blocktree: &Arc<Blocktree>,
) -> BroadcastStage {
match self {
BroadcastStageType::Standard => BroadcastStage::new(
sock,
cluster_info,
receiver,
exit_sender,
blocktree,
StandardBroadcastRun::new(),
),
BroadcastStageType::FailEntryVerification => BroadcastStage::new(
sock,
cluster_info,
receiver,
exit_sender,
blocktree,
FailEntryVerificationBroadcastRun::new(),
),
}
}
} }
trait BroadcastRun { impl Broadcast {
fn run( fn run(
&mut self, &mut self,
broadcast: &mut Broadcast,
cluster_info: &Arc<RwLock<ClusterInfo>>, cluster_info: &Arc<RwLock<ClusterInfo>>,
receiver: &Receiver<WorkingBankEntries>, receiver: &Receiver<WorkingBankEntries>,
sock: &UdpSocket, sock: &UdpSocket,
blocktree: &Arc<Blocktree>, blocktree: &Arc<Blocktree>,
) -> Result<()>; genesis_blockhash: &Hash,
} ) -> Result<()> {
let timer = Duration::new(1, 0);
let (mut bank, entries) = receiver.recv_timeout(timer)?;
let mut max_tick_height = bank.max_tick_height();
struct Broadcast { let run_start = Instant::now();
coding_generator: CodingGenerator, let mut num_entries = entries.len();
thread_pool: ThreadPool, let mut ventries = Vec::new();
let mut last_tick = entries.last().map(|v| v.1).unwrap_or(0);
ventries.push(entries);
assert!(last_tick <= max_tick_height);
if last_tick != max_tick_height {
while let Ok((same_bank, entries)) = receiver.try_recv() {
// If the bank changed, that implies the previous slot was interrupted and we do not have to
// broadcast its entries.
if same_bank.slot() != bank.slot() {
num_entries = 0;
ventries.clear();
bank = same_bank.clone();
max_tick_height = bank.max_tick_height();
}
num_entries += entries.len();
last_tick = entries.last().map(|v| v.1).unwrap_or(0);
ventries.push(entries);
assert!(last_tick <= max_tick_height,);
if last_tick == max_tick_height {
break;
}
}
}
let bank_epoch = bank.get_stakers_epoch(bank.slot());
let mut broadcast_table = cluster_info
.read()
.unwrap()
.sorted_tvu_peers(staking_utils::staked_nodes_at_epoch(&bank, bank_epoch).as_ref());
inc_new_counter_warn!("broadcast_service-num_peers", broadcast_table.len() + 1);
// Layer 1, leader nodes are limited to the fanout size.
broadcast_table.truncate(DATA_PLANE_FANOUT);
inc_new_counter_info!("broadcast_service-entries_received", num_entries);
let to_blobs_start = Instant::now();
let blobs: Vec<_> = ventries
.into_par_iter()
.map(|p| {
let entries: Vec<_> = p.into_iter().map(|e| e.0).collect();
entries.to_shared_blobs()
})
.flatten()
.collect();
let blob_index = blocktree
.meta(bank.slot())
.expect("Database error")
.map(|meta| meta.consumed)
.unwrap_or(0);
index_blobs_with_genesis(
&blobs,
&self.id,
genesis_blockhash,
blob_index,
bank.slot(),
bank.parent().map_or(0, |parent| parent.slot()),
);
let contains_last_tick = last_tick == max_tick_height;
if contains_last_tick {
blobs.last().unwrap().write().unwrap().set_is_last_in_slot();
}
blocktree.write_shared_blobs(&blobs)?;
let coding = self.coding_generator.next(&blobs);
let to_blobs_elapsed = duration_as_ms(&to_blobs_start.elapsed());
let broadcast_start = Instant::now();
// Send out data
ClusterInfo::broadcast(&self.id, contains_last_tick, &broadcast_table, sock, &blobs)?;
inc_new_counter_debug!("streamer-broadcast-sent", blobs.len());
// send out erasures
ClusterInfo::broadcast(&self.id, false, &broadcast_table, sock, &coding)?;
self.update_broadcast_stats(
duration_as_ms(&broadcast_start.elapsed()),
duration_as_ms(&run_start.elapsed()),
num_entries,
to_blobs_elapsed,
blob_index,
);
Ok(())
}
fn update_broadcast_stats(
&mut self,
broadcast_elapsed: u64,
run_elapsed: u64,
num_entries: usize,
to_blobs_elapsed: u64,
blob_index: u64,
) {
inc_new_counter_info!("broadcast_service-time_ms", broadcast_elapsed as usize);
self.stats.num_entries.push(num_entries);
self.stats.to_blobs_elapsed.push(to_blobs_elapsed);
self.stats.run_elapsed.push(run_elapsed);
if self.stats.num_entries.len() >= 16 {
info!(
"broadcast: entries: {:?} blob times ms: {:?} broadcast times ms: {:?}",
self.stats.num_entries, self.stats.to_blobs_elapsed, self.stats.run_elapsed
);
self.stats.num_entries.clear();
self.stats.to_blobs_elapsed.clear();
self.stats.run_elapsed.clear();
}
datapoint!("broadcast-service", ("transmit-index", blob_index, i64));
}
} }
// Implement a destructor for the BroadcastStage thread to signal it exited // Implement a destructor for the BroadcastStage thread to signal it exited
@@ -113,21 +209,20 @@ impl BroadcastStage {
cluster_info: &Arc<RwLock<ClusterInfo>>, cluster_info: &Arc<RwLock<ClusterInfo>>,
receiver: &Receiver<WorkingBankEntries>, receiver: &Receiver<WorkingBankEntries>,
blocktree: &Arc<Blocktree>, blocktree: &Arc<Blocktree>,
mut broadcast_stage_run: impl BroadcastRun, genesis_blockhash: &Hash,
) -> BroadcastStageReturnType { ) -> BroadcastStageReturnType {
let me = cluster_info.read().unwrap().my_data().clone();
let coding_generator = CodingGenerator::default(); let coding_generator = CodingGenerator::default();
let mut broadcast = Broadcast { let mut broadcast = Broadcast {
id: me.id,
coding_generator, coding_generator,
thread_pool: rayon::ThreadPoolBuilder::new() stats: BroadcastStats::default(),
.num_threads(sys_info::cpu_num().unwrap_or(NUM_THREADS) as usize)
.build()
.unwrap(),
}; };
loop { loop {
if let Err(e) = if let Err(e) =
broadcast_stage_run.run(&mut broadcast, &cluster_info, receiver, sock, blocktree) broadcast.run(&cluster_info, receiver, sock, blocktree, genesis_blockhash)
{ {
match e { match e {
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) | Error::SendError => { Error::RecvTimeoutError(RecvTimeoutError::Disconnected) | Error::SendError => {
@@ -160,16 +255,17 @@ impl BroadcastStage {
/// which will then close FetchStage in the Tpu, and then the rest of the Tpu, /// which will then close FetchStage in the Tpu, and then the rest of the Tpu,
/// completing the cycle. /// completing the cycle.
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
fn new( pub fn new(
sock: UdpSocket, sock: UdpSocket,
cluster_info: Arc<RwLock<ClusterInfo>>, cluster_info: Arc<RwLock<ClusterInfo>>,
receiver: Receiver<WorkingBankEntries>, receiver: Receiver<WorkingBankEntries>,
exit_sender: &Arc<AtomicBool>, exit_sender: &Arc<AtomicBool>,
blocktree: &Arc<Blocktree>, blocktree: &Arc<Blocktree>,
broadcast_stage_run: impl BroadcastRun + Send + 'static, genesis_blockhash: &Hash,
) -> Self { ) -> Self {
let blocktree = blocktree.clone(); let blocktree = blocktree.clone();
let exit_sender = exit_sender.clone(); let exit_sender = exit_sender.clone();
let genesis_blockhash = *genesis_blockhash;
let thread_hdl = Builder::new() let thread_hdl = Builder::new()
.name("solana-broadcaster".to_string()) .name("solana-broadcaster".to_string())
.spawn(move || { .spawn(move || {
@@ -179,7 +275,7 @@ impl BroadcastStage {
&cluster_info, &cluster_info,
&receiver, &receiver,
&blocktree, &blocktree,
broadcast_stage_run, &genesis_blockhash,
) )
}) })
.unwrap(); .unwrap();
@@ -206,7 +302,6 @@ mod test {
use crate::service::Service; use crate::service::Service;
use solana_runtime::bank::Bank; use solana_runtime::bank::Bank;
use solana_sdk::hash::Hash; use solana_sdk::hash::Hash;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::{Keypair, KeypairUtil}; use solana_sdk::signature::{Keypair, KeypairUtil};
use std::sync::atomic::AtomicBool; use std::sync::atomic::AtomicBool;
use std::sync::mpsc::channel; use std::sync::mpsc::channel;
@@ -252,7 +347,7 @@ mod test {
entry_receiver, entry_receiver,
&exit_sender, &exit_sender,
&blocktree, &blocktree,
StandardBroadcastRun::new(), &Hash::default(),
); );
MockBroadcastStage { MockBroadcastStage {

View File

@@ -1,156 +0,0 @@
use crate::entry::Entry;
use crate::entry::EntrySlice;
use crate::erasure::CodingGenerator;
use crate::packet::{self, SharedBlob};
use crate::poh_recorder::WorkingBankEntries;
use crate::result::Result;
use rayon::prelude::*;
use rayon::ThreadPool;
use solana_runtime::bank::Bank;
use solana_sdk::signature::{Keypair, KeypairUtil, Signable};
use std::sync::mpsc::Receiver;
use std::sync::Arc;
use std::time::{Duration, Instant};
pub(super) struct ReceiveResults {
pub ventries: Vec<Vec<(Entry, u64)>>,
pub num_entries: usize,
pub time_elapsed: Duration,
pub bank: Arc<Bank>,
pub last_tick: u64,
}
impl ReceiveResults {
pub fn new(
ventries: Vec<Vec<(Entry, u64)>>,
num_entries: usize,
time_elapsed: Duration,
bank: Arc<Bank>,
last_tick: u64,
) -> Self {
Self {
ventries,
num_entries,
time_elapsed,
bank,
last_tick,
}
}
}
pub(super) fn recv_slot_blobs(receiver: &Receiver<WorkingBankEntries>) -> Result<ReceiveResults> {
let timer = Duration::new(1, 0);
let (mut bank, entries) = receiver.recv_timeout(timer)?;
let recv_start = Instant::now();
let mut max_tick_height = bank.max_tick_height();
let mut num_entries = entries.len();
let mut ventries = Vec::new();
let mut last_tick = entries.last().map(|v| v.1).unwrap_or(0);
ventries.push(entries);
assert!(last_tick <= max_tick_height);
if last_tick != max_tick_height {
while let Ok((same_bank, entries)) = receiver.try_recv() {
// If the bank changed, that implies the previous slot was interrupted and we do not have to
// broadcast its entries.
if same_bank.slot() != bank.slot() {
num_entries = 0;
ventries.clear();
bank = same_bank.clone();
max_tick_height = bank.max_tick_height();
}
num_entries += entries.len();
last_tick = entries.last().map(|v| v.1).unwrap_or(0);
ventries.push(entries);
assert!(last_tick <= max_tick_height,);
if last_tick == max_tick_height {
break;
}
}
}
let recv_end = recv_start.elapsed();
let receive_results = ReceiveResults::new(ventries, num_entries, recv_end, bank, last_tick);
Ok(receive_results)
}
pub(super) fn entries_to_blobs(
ventries: Vec<Vec<(Entry, u64)>>,
thread_pool: &ThreadPool,
latest_blob_index: u64,
last_tick: u64,
bank: &Bank,
keypair: &Keypair,
coding_generator: &mut CodingGenerator,
) -> (Vec<SharedBlob>, Vec<SharedBlob>) {
let blobs = generate_data_blobs(
ventries,
thread_pool,
latest_blob_index,
last_tick,
&bank,
&keypair,
);
let coding = generate_coding_blobs(&blobs, &thread_pool, coding_generator, &keypair);
(blobs, coding)
}
pub(super) fn generate_data_blobs(
ventries: Vec<Vec<(Entry, u64)>>,
thread_pool: &ThreadPool,
latest_blob_index: u64,
last_tick: u64,
bank: &Bank,
keypair: &Keypair,
) -> Vec<SharedBlob> {
let blobs: Vec<SharedBlob> = thread_pool.install(|| {
ventries
.into_par_iter()
.map(|p| {
let entries: Vec<_> = p.into_iter().map(|e| e.0).collect();
entries.to_shared_blobs()
})
.flatten()
.collect()
});
packet::index_blobs(
&blobs,
&keypair.pubkey(),
latest_blob_index,
bank.slot(),
bank.parent().map_or(0, |parent| parent.slot()),
);
if last_tick == bank.max_tick_height() {
blobs.last().unwrap().write().unwrap().set_is_last_in_slot();
}
// Make sure not to modify the blob header or data after signing it here
thread_pool.install(|| {
blobs.par_iter().for_each(|b| {
b.write().unwrap().sign(keypair);
})
});
blobs
}
pub(super) fn generate_coding_blobs(
blobs: &[SharedBlob],
thread_pool: &ThreadPool,
coding_generator: &mut CodingGenerator,
keypair: &Keypair,
) -> Vec<SharedBlob> {
let coding = coding_generator.next(&blobs);
thread_pool.install(|| {
coding.par_iter().for_each(|c| {
c.write().unwrap().sign(keypair);
})
});
coding
}

View File

@@ -1,70 +0,0 @@
use super::*;
use solana_sdk::hash::Hash;
pub(super) struct FailEntryVerificationBroadcastRun {}
impl FailEntryVerificationBroadcastRun {
pub(super) fn new() -> Self {
Self {}
}
}
impl BroadcastRun for FailEntryVerificationBroadcastRun {
fn run(
&mut self,
broadcast: &mut Broadcast,
cluster_info: &Arc<RwLock<ClusterInfo>>,
receiver: &Receiver<WorkingBankEntries>,
sock: &UdpSocket,
blocktree: &Arc<Blocktree>,
) -> Result<()> {
// 1) Pull entries from banking stage
let mut receive_results = broadcast_utils::recv_slot_blobs(receiver)?;
let bank = receive_results.bank.clone();
let last_tick = receive_results.last_tick;
// 2) Convert entries to blobs + generate coding blobs. Set a garbage PoH on the last entry
// in the slot to make verification fail on validators
if last_tick == bank.max_tick_height() {
let mut last_entry = receive_results
.ventries
.last_mut()
.unwrap()
.last_mut()
.unwrap();
last_entry.0.hash = Hash::default();
}
let keypair = &cluster_info.read().unwrap().keypair.clone();
let latest_blob_index = blocktree
.meta(bank.slot())
.expect("Database error")
.map(|meta| meta.consumed)
.unwrap_or(0);
let (data_blobs, coding_blobs) = broadcast_utils::entries_to_blobs(
receive_results.ventries,
&broadcast.thread_pool,
latest_blob_index,
last_tick,
&bank,
&keypair,
&mut broadcast.coding_generator,
);
blocktree.write_shared_blobs(data_blobs.iter().chain(coding_blobs.iter()))?;
// 3) Start broadcast step
let bank_epoch = bank.get_stakers_epoch(bank.slot());
let stakes = staking_utils::staked_nodes_at_epoch(&bank, bank_epoch);
// Broadcast data + erasures
cluster_info.read().unwrap().broadcast(
sock,
data_blobs.iter().chain(coding_blobs.iter()),
stakes.as_ref(),
)?;
Ok(())
}
}

View File

@@ -1,116 +0,0 @@
use super::broadcast_utils;
use super::*;
#[derive(Default)]
struct BroadcastStats {
num_entries: Vec<usize>,
run_elapsed: Vec<u64>,
to_blobs_elapsed: Vec<u64>,
}
pub(super) struct StandardBroadcastRun {
stats: BroadcastStats,
}
impl StandardBroadcastRun {
pub(super) fn new() -> Self {
Self {
stats: BroadcastStats::default(),
}
}
fn update_broadcast_stats(
&mut self,
broadcast_elapsed: u64,
run_elapsed: u64,
num_entries: usize,
to_blobs_elapsed: u64,
blob_index: u64,
) {
inc_new_counter_info!("broadcast_service-time_ms", broadcast_elapsed as usize);
self.stats.num_entries.push(num_entries);
self.stats.to_blobs_elapsed.push(to_blobs_elapsed);
self.stats.run_elapsed.push(run_elapsed);
if self.stats.num_entries.len() >= 16 {
info!(
"broadcast: entries: {:?} blob times ms: {:?} broadcast times ms: {:?}",
self.stats.num_entries, self.stats.to_blobs_elapsed, self.stats.run_elapsed
);
self.stats.num_entries.clear();
self.stats.to_blobs_elapsed.clear();
self.stats.run_elapsed.clear();
}
datapoint!("broadcast-service", ("transmit-index", blob_index, i64));
}
}
impl BroadcastRun for StandardBroadcastRun {
fn run(
&mut self,
broadcast: &mut Broadcast,
cluster_info: &Arc<RwLock<ClusterInfo>>,
receiver: &Receiver<WorkingBankEntries>,
sock: &UdpSocket,
blocktree: &Arc<Blocktree>,
) -> Result<()> {
// 1) Pull entries from banking stage
let receive_results = broadcast_utils::recv_slot_blobs(receiver)?;
let receive_elapsed = receive_results.time_elapsed;
let num_entries = receive_results.num_entries;
let bank = receive_results.bank.clone();
let last_tick = receive_results.last_tick;
inc_new_counter_info!("broadcast_service-entries_received", num_entries);
// 2) Convert entries to blobs + generate coding blobs
let to_blobs_start = Instant::now();
let keypair = &cluster_info.read().unwrap().keypair.clone();
let latest_blob_index = blocktree
.meta(bank.slot())
.expect("Database error")
.map(|meta| meta.consumed)
.unwrap_or(0);
let (data_blobs, coding_blobs) = broadcast_utils::entries_to_blobs(
receive_results.ventries,
&broadcast.thread_pool,
latest_blob_index,
last_tick,
&bank,
&keypair,
&mut broadcast.coding_generator,
);
blocktree.write_shared_blobs(data_blobs.iter().chain(coding_blobs.iter()))?;
let to_blobs_elapsed = to_blobs_start.elapsed();
// 3) Start broadcast step
let broadcast_start = Instant::now();
let bank_epoch = bank.get_stakers_epoch(bank.slot());
let stakes = staking_utils::staked_nodes_at_epoch(&bank, bank_epoch);
// Broadcast data + erasures
cluster_info.read().unwrap().broadcast(
sock,
data_blobs.iter().chain(coding_blobs.iter()),
stakes.as_ref(),
)?;
inc_new_counter_debug!(
"streamer-broadcast-sent",
data_blobs.len() + coding_blobs.len()
);
let broadcast_elapsed = broadcast_start.elapsed();
self.update_broadcast_stats(
duration_as_ms(&broadcast_elapsed),
duration_as_ms(&(receive_elapsed + to_blobs_elapsed + broadcast_elapsed)),
num_entries,
duration_as_ms(&to_blobs_elapsed),
latest_blob_index,
);
Ok(())
}
}

View File

@@ -6,11 +6,32 @@ use std::io::{BufWriter, Write};
use std::path::Path; use std::path::Path;
use std::sync::Arc; use std::sync::Arc;
pub use solana_chacha_sys::chacha_cbc_encrypt;
pub const CHACHA_BLOCK_SIZE: usize = 64; pub const CHACHA_BLOCK_SIZE: usize = 64;
pub const CHACHA_KEY_SIZE: usize = 32; pub const CHACHA_KEY_SIZE: usize = 32;
#[link(name = "cpu-crypt")]
extern "C" {
fn chacha20_cbc_encrypt(
input: *const u8,
output: *mut u8,
in_len: usize,
key: *const u8,
ivec: *mut u8,
);
}
pub fn chacha_cbc_encrypt(input: &[u8], output: &mut [u8], key: &[u8], ivec: &mut [u8]) {
unsafe {
chacha20_cbc_encrypt(
input.as_ptr(),
output.as_mut_ptr(),
input.len(),
key.as_ptr(),
ivec.as_mut_ptr(),
);
}
}
pub fn chacha_cbc_encrypt_ledger( pub fn chacha_cbc_encrypt_ledger(
blocktree: &Arc<Blocktree>, blocktree: &Arc<Blocktree>,
slice: u64, slice: u64,
@@ -133,7 +154,7 @@ mod tests {
hasher.hash(&buf[..size]); hasher.hash(&buf[..size]);
// golden needs to be updated if blob stuff changes.... // golden needs to be updated if blob stuff changes....
let golden: Hash = "E2HZjSC6VgH4nmEiTbMDATTeBcFjwSYz7QYvU7doGNhD" let golden: Hash = "9xb2Asf7UK5G8WqPwsvzo5xwLi4dixBSDiYKCtYRikA"
.parse() .parse()
.unwrap(); .unwrap();

View File

@@ -1,8 +1,6 @@
use solana_client::thin_client::ThinClient;
use solana_sdk::pubkey::Pubkey; use solana_sdk::pubkey::Pubkey;
pub trait Cluster { pub trait Cluster {
fn get_node_pubkeys(&self) -> Vec<Pubkey>; fn get_node_pubkeys(&self) -> Vec<Pubkey>;
fn get_validator_client(&self, pubkey: &Pubkey) -> Option<ThinClient>;
fn restart_node(&mut self, pubkey: Pubkey); fn restart_node(&mut self, pubkey: Pubkey);
} }

View File

@@ -24,17 +24,12 @@ use crate::repair_service::RepairType;
use crate::result::Result; use crate::result::Result;
use crate::staking_utils; use crate::staking_utils;
use crate::streamer::{BlobReceiver, BlobSender}; use crate::streamer::{BlobReceiver, BlobSender};
use crate::weighted_shuffle::weighted_shuffle;
use bincode::{deserialize, serialize}; use bincode::{deserialize, serialize};
use core::cmp; use core::cmp;
use itertools::Itertools; use hashbrown::HashMap;
use rand::SeedableRng;
use rand::{thread_rng, Rng}; use rand::{thread_rng, Rng};
use rand_chacha::ChaChaRng;
use rayon::prelude::*; use rayon::prelude::*;
use solana_metrics::{ use solana_metrics::{datapoint_debug, inc_new_counter_debug, inc_new_counter_error};
datapoint_debug, inc_new_counter_debug, inc_new_counter_error, inc_new_counter_warn,
};
use solana_netutil::{ use solana_netutil::{
bind_in_range, bind_to, find_available_port_in_range, multi_bind_in_range, PortRange, bind_in_range, bind_to, find_available_port_in_range, multi_bind_in_range, PortRange,
}; };
@@ -44,11 +39,10 @@ use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::{Keypair, KeypairUtil, Signable, Signature}; use solana_sdk::signature::{Keypair, KeypairUtil, Signable, Signature};
use solana_sdk::timing::{duration_as_ms, timestamp}; use solana_sdk::timing::{duration_as_ms, timestamp};
use solana_sdk::transaction::Transaction; use solana_sdk::transaction::Transaction;
use std::borrow::Borrow;
use std::borrow::Cow;
use std::cmp::min; use std::cmp::min;
use std::collections::{BTreeSet, HashMap}; use std::collections::BTreeSet;
use std::fmt; use std::fmt;
use std::io;
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket}; use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, RwLock}; use std::sync::{Arc, RwLock};
@@ -89,7 +83,7 @@ pub struct ClusterInfo {
pub struct Locality { pub struct Locality {
/// The bounds of the neighborhood represented by this locality /// The bounds of the neighborhood represented by this locality
pub neighbor_bounds: (usize, usize), pub neighbor_bounds: (usize, usize),
/// The `turbine` layer this locality is in /// The `avalanche` layer this locality is in
pub layer_ix: usize, pub layer_ix: usize,
/// The bounds of the current layer /// The bounds of the current layer
pub layer_bounds: (usize, usize), pub layer_bounds: (usize, usize),
@@ -128,7 +122,7 @@ impl Signable for PruneData {
self.pubkey self.pubkey
} }
fn signable_data(&self) -> Cow<[u8]> { fn signable_data(&self) -> Vec<u8> {
#[derive(Serialize)] #[derive(Serialize)]
struct SignData { struct SignData {
pubkey: Pubkey, pubkey: Pubkey,
@@ -142,7 +136,7 @@ impl Signable for PruneData {
destination: self.destination, destination: self.destination,
wallclock: self.wallclock, wallclock: self.wallclock,
}; };
Cow::Owned(serialize(&data).expect("serialize PruneData")) serialize(&data).expect("serialize PruneData")
} }
fn get_signature(&self) -> Signature { fn get_signature(&self) -> Signature {
@@ -496,69 +490,57 @@ impl ClusterInfo {
&& !ContactInfo::is_valid_address(&contact_info.tpu) && !ContactInfo::is_valid_address(&contact_info.tpu)
} }
fn stake_weighted_shuffle<S: std::hash::BuildHasher>( fn sort_by_stake<S: std::hash::BuildHasher>(
peers: &[ContactInfo], peers: &[ContactInfo],
stakes: Option<&HashMap<Pubkey, u64, S>>, stakes: Option<&HashMap<Pubkey, u64, S>>,
rng: ChaChaRng,
) -> Vec<(u64, ContactInfo)> { ) -> Vec<(u64, ContactInfo)> {
let (stake_weights, peers_with_stakes): (Vec<_>, Vec<_>) = peers let mut peers_with_stakes: Vec<_> = peers
.iter() .iter()
.map(|c| { .map(|c| {
let stake = stakes.map_or(0, |stakes| *stakes.get(&c.id).unwrap_or(&0)); (
// For stake weighted shuffle a valid weight is atleast 1. Weight 0 is stakes.map_or(0, |stakes| *stakes.get(&c.id).unwrap_or(&0)),
// assumed to be missing entry. So let's make sure stake weights are atleast 1 c.clone(),
(cmp::max(1, stake), (stake, c.clone())) )
}) })
.sorted_by(|(_, (l_stake, l_info)), (_, (r_stake, r_info))| {
if r_stake == l_stake {
r_info.id.cmp(&l_info.id)
} else {
r_stake.cmp(&l_stake)
}
})
.unzip();
let shuffle = weighted_shuffle(stake_weights, rng);
let mut out: Vec<(u64, ContactInfo)> = shuffle
.iter()
.map(|x| peers_with_stakes[*x].clone())
.collect(); .collect();
peers_with_stakes.sort_unstable_by(|(l_stake, l_info), (r_stake, r_info)| {
out.dedup(); if r_stake == l_stake {
out r_info.id.cmp(&l_info.id)
} else {
r_stake.cmp(&l_stake)
}
});
peers_with_stakes.dedup();
peers_with_stakes
} }
/// Return sorted Retransmit peers and index of `Self.id()` as if it were in that list /// Return sorted Retransmit peers and index of `Self.id()` as if it were in that list
pub fn shuffle_peers_and_index<S: std::hash::BuildHasher>( fn sorted_peers_and_index<S: std::hash::BuildHasher>(
&self, &self,
stakes: Option<&HashMap<Pubkey, u64, S>>, stakes: Option<&HashMap<Pubkey, u64, S>>,
rng: ChaChaRng,
) -> (usize, Vec<ContactInfo>) { ) -> (usize, Vec<ContactInfo>) {
let mut peers = self.retransmit_peers(); let mut peers = self.retransmit_peers();
peers.push(self.lookup(&self.id()).unwrap().clone()); peers.push(self.lookup(&self.id()).unwrap().clone());
let contacts_and_stakes: Vec<_> = ClusterInfo::stake_weighted_shuffle(&peers, stakes, rng); let contacts_and_stakes: Vec<_> = ClusterInfo::sort_by_stake(&peers, stakes);
let mut index = 0; let mut index = 0;
let peers: Vec<_> = contacts_and_stakes let peers: Vec<_> = contacts_and_stakes
.into_iter() .into_iter()
.enumerate() .enumerate()
.map(|(i, (_, peer))| { .filter_map(|(i, (_, peer))| {
if peer.id == self.id() { if peer.id == self.id() {
index = i; index = i;
None
} else {
Some(peer)
} }
peer
}) })
.collect(); .collect();
(index, peers) (index, peers)
} }
pub fn sorted_tvu_peers( pub fn sorted_tvu_peers(&self, stakes: Option<&HashMap<Pubkey, u64>>) -> Vec<ContactInfo> {
&self,
stakes: Option<&HashMap<Pubkey, u64>>,
rng: ChaChaRng,
) -> Vec<ContactInfo> {
let peers = self.tvu_peers(); let peers = self.tvu_peers();
let peers_with_stakes: Vec<_> = ClusterInfo::stake_weighted_shuffle(&peers, stakes, rng); let peers_with_stakes: Vec<_> = ClusterInfo::sort_by_stake(&peers, stakes);
peers_with_stakes peers_with_stakes
.iter() .iter()
.map(|(_, peer)| (*peer).clone()) .map(|(_, peer)| (*peer).clone())
@@ -710,39 +692,34 @@ impl ClusterInfo {
/// broadcast messages from the leader to layer 1 nodes /// broadcast messages from the leader to layer 1 nodes
/// # Remarks /// # Remarks
pub fn broadcast<I>( pub fn broadcast(
&self, id: &Pubkey,
contains_last_tick: bool,
broadcast_table: &[ContactInfo],
s: &UdpSocket, s: &UdpSocket,
blobs: I, blobs: &[SharedBlob],
stakes: Option<&HashMap<Pubkey, u64>>, ) -> Result<()> {
) -> Result<()> if broadcast_table.is_empty() {
where debug!("{}:not enough peers in cluster_info table", id);
I: IntoIterator, inc_new_counter_error!("cluster_info-broadcast-not_enough_peers_error", 1);
I::Item: Borrow<SharedBlob>, Err(ClusterInfoError::NoPeers)?;
{
let mut last_err = Ok(());
let mut broadcast_table_len = 0;
let mut blobs_len = 0;
blobs.into_iter().for_each(|b| {
blobs_len += 1;
let blob = b.borrow().read().unwrap();
let broadcast_table = self.sorted_tvu_peers(stakes, ChaChaRng::from_seed(blob.seed()));
broadcast_table_len = cmp::max(broadcast_table_len, broadcast_table.len());
if !broadcast_table.is_empty() {
if let Err(e) = s.send_to(&blob.data[..blob.meta.size], &broadcast_table[0].tvu) {
trace!("{}: broadcast result {:?}", self.id(), e);
last_err = Err(e);
}
}
});
last_err?;
inc_new_counter_debug!("cluster_info-broadcast-max_idx", blobs_len);
if broadcast_table_len != 0 {
inc_new_counter_warn!("broadcast_service-num_peers", broadcast_table_len + 1);
} }
let orders = Self::create_broadcast_orders(contains_last_tick, blobs, broadcast_table);
trace!("broadcast orders table {}", orders.len());
let errs = Self::send_orders(id, s, orders);
for e in errs {
if let Err(e) = &e {
trace!("{}: broadcast result {:?}", id, e);
}
e?;
}
inc_new_counter_debug!("cluster_info-broadcast-max_idx", blobs.len());
Ok(()) Ok(())
} }
@@ -795,6 +772,94 @@ impl ClusterInfo {
Ok(()) Ok(())
} }
fn send_orders(
id: &Pubkey,
s: &UdpSocket,
orders: Vec<(SharedBlob, Vec<&ContactInfo>)>,
) -> Vec<io::Result<usize>> {
orders
.into_iter()
.flat_map(|(b, vs)| {
let blob = b.read().unwrap();
let ids_and_tvus = if log_enabled!(log::Level::Trace) {
let v_ids = vs.iter().map(|v| v.id);
let tvus = vs.iter().map(|v| v.tvu);
let ids_and_tvus = v_ids.zip(tvus).collect();
trace!(
"{}: BROADCAST idx: {} sz: {} to {:?} coding: {}",
id,
blob.index(),
blob.meta.size,
ids_and_tvus,
blob.is_coding()
);
ids_and_tvus
} else {
vec![]
};
assert!(blob.meta.size <= BLOB_SIZE);
let send_errs_for_blob: Vec<_> = vs
.iter()
.map(move |v| {
let e = s.send_to(&blob.data[..blob.meta.size], &v.tvu);
trace!(
"{}: done broadcast {} to {:?}",
id,
blob.meta.size,
ids_and_tvus
);
e
})
.collect();
send_errs_for_blob
})
.collect()
}
pub fn create_broadcast_orders<'a, T>(
contains_last_tick: bool,
blobs: &[T],
broadcast_table: &'a [ContactInfo],
) -> Vec<(T, Vec<&'a ContactInfo>)>
where
T: Clone,
{
// enumerate all the blobs in the window, those are the indices
// transmit them to nodes, starting from a different node.
if blobs.is_empty() {
return vec![];
}
let mut orders = Vec::with_capacity(blobs.len());
let x = thread_rng().gen_range(0, broadcast_table.len());
for (i, blob) in blobs.iter().enumerate() {
let br_idx = (x + i) % broadcast_table.len();
trace!("broadcast order data br_idx {}", br_idx);
orders.push((blob.clone(), vec![&broadcast_table[br_idx]]));
}
if contains_last_tick {
// Broadcast the last tick to everyone on the network so it doesn't get dropped
// (Need to maximize probability the next leader in line sees this handoff tick
// despite packet drops)
// If we had a tick at max_tick_height, then we know it must be the last
// Blob in the broadcast, There cannot be an entry that got sent after the
// last tick, guaranteed by the PohService).
orders.push((
blobs.last().unwrap().clone(),
broadcast_table.iter().collect(),
));
}
orders
}
pub fn window_index_request_bytes(&self, slot: u64, blob_index: u64) -> Result<Vec<u8>> { pub fn window_index_request_bytes(&self, slot: u64, blob_index: u64) -> Result<Vec<u8>> {
let req = Protocol::RequestWindowIndex(self.my_data().clone(), slot, blob_index); let req = Protocol::RequestWindowIndex(self.my_data().clone(), slot, blob_index);
let out = serialize(&req)?; let out = serialize(&req)?;
@@ -822,34 +887,33 @@ impl ClusterInfo {
} }
let n = thread_rng().gen::<usize>() % valid.len(); let n = thread_rng().gen::<usize>() % valid.len();
let addr = valid[n].gossip; // send the request to the peer's gossip port let addr = valid[n].gossip; // send the request to the peer's gossip port
let out = self.map_repair_request(repair_request)?; let out = {
match repair_request {
RepairType::Blob(slot, blob_index) => {
datapoint_debug!(
"cluster_info-repair",
("repair-slot", *slot, i64),
("repair-ix", *blob_index, i64)
);
self.window_index_request_bytes(*slot, *blob_index)?
}
RepairType::HighestBlob(slot, blob_index) => {
datapoint_debug!(
"cluster_info-repair_highest",
("repair-highest-slot", *slot, i64),
("repair-highest-ix", *blob_index, i64)
);
self.window_highest_index_request_bytes(*slot, *blob_index)?
}
RepairType::Orphan(slot) => {
datapoint_debug!("cluster_info-repair_orphan", ("repair-orphan", *slot, i64));
self.orphan_bytes(*slot)?
}
}
};
Ok((addr, out)) Ok((addr, out))
} }
pub fn map_repair_request(&self, repair_request: &RepairType) -> Result<Vec<u8>> {
match repair_request {
RepairType::Blob(slot, blob_index) => {
datapoint_debug!(
"cluster_info-repair",
("repair-slot", *slot, i64),
("repair-ix", *blob_index, i64)
);
Ok(self.window_index_request_bytes(*slot, *blob_index)?)
}
RepairType::HighestBlob(slot, blob_index) => {
datapoint_debug!(
"cluster_info-repair_highest",
("repair-highest-slot", *slot, i64),
("repair-highest-ix", *blob_index, i64)
);
Ok(self.window_highest_index_request_bytes(*slot, *blob_index)?)
}
RepairType::Orphan(slot) => {
datapoint_debug!("cluster_info-repair_orphan", ("repair-orphan", *slot, i64));
Ok(self.orphan_bytes(*slot)?)
}
}
}
// If the network entrypoint hasn't been discovered yet, add it to the crds table // If the network entrypoint hasn't been discovered yet, add it to the crds table
fn add_entrypoint(&mut self, pulls: &mut Vec<(Pubkey, Bloom<Hash>, SocketAddr, CrdsValue)>) { fn add_entrypoint(&mut self, pulls: &mut Vec<(Pubkey, Bloom<Hash>, SocketAddr, CrdsValue)>) {
match &self.entrypoint { match &self.entrypoint {
@@ -903,18 +967,18 @@ impl ClusterInfo {
} }
fn new_push_requests(&mut self) -> Vec<(SocketAddr, Protocol)> { fn new_push_requests(&mut self) -> Vec<(SocketAddr, Protocol)> {
let self_id = self.gossip.id; let self_id = self.gossip.id;
let (_, push_messages) = self.gossip.new_push_messages(timestamp()); let (_, peers, msgs) = self.gossip.new_push_messages(timestamp());
push_messages peers
.into_iter() .into_iter()
.filter_map(|(peer, messages)| { .filter_map(|p| {
let peer_label = CrdsValueLabel::ContactInfo(peer); let peer_label = CrdsValueLabel::ContactInfo(p);
self.gossip self.gossip
.crds .crds
.lookup(&peer_label) .lookup(&peer_label)
.and_then(CrdsValue::contact_info) .and_then(CrdsValue::contact_info)
.map(|p| (p.gossip, messages)) .map(|p| p.gossip)
}) })
.map(|(peer, msgs)| (peer, Protocol::PushMessage(self_id, msgs))) .map(|peer| (peer, Protocol::PushMessage(self_id, msgs.clone())))
.collect() .collect()
} }
@@ -1423,7 +1487,7 @@ impl ClusterInfo {
} }
} }
/// Turbine logic /// Avalanche logic
/// 1 - For the current node find out if it is in layer 1 /// 1 - For the current node find out if it is in layer 1
/// 1.1 - If yes, then broadcast to all layer 1 nodes /// 1.1 - If yes, then broadcast to all layer 1 nodes
/// 1 - using the layer 1 index, broadcast to all layer 2 nodes assuming you know neighborhood size /// 1 - using the layer 1 index, broadcast to all layer 2 nodes assuming you know neighborhood size
@@ -1431,11 +1495,12 @@ impl ClusterInfo {
/// 1 - also check if there are nodes in the next layer and repeat the layer 1 to layer 2 logic /// 1 - also check if there are nodes in the next layer and repeat the layer 1 to layer 2 logic
/// Returns Neighbor Nodes and Children Nodes `(neighbors, children)` for a given node based on its stake (Bank Balance) /// Returns Neighbor Nodes and Children Nodes `(neighbors, children)` for a given node based on its stake (Bank Balance)
pub fn compute_retransmit_peers( pub fn compute_retransmit_peers<S: std::hash::BuildHasher>(
stakes: Option<&HashMap<Pubkey, u64, S>>,
cluster_info: &Arc<RwLock<ClusterInfo>>,
fanout: usize, fanout: usize,
my_index: usize,
peers: Vec<ContactInfo>,
) -> (Vec<ContactInfo>, Vec<ContactInfo>) { ) -> (Vec<ContactInfo>, Vec<ContactInfo>) {
let (my_index, peers) = cluster_info.read().unwrap().sorted_peers_and_index(stakes);
//calc num_layers and num_neighborhoods using the total number of nodes //calc num_layers and num_neighborhoods using the total number of nodes
let (num_layers, layer_indices) = ClusterInfo::describe_data_plane(peers.len(), fanout); let (num_layers, layer_indices) = ClusterInfo::describe_data_plane(peers.len(), fanout);
@@ -2026,14 +2091,11 @@ mod tests {
let mut cluster_info = ClusterInfo::new(contact_info.clone(), Arc::new(keypair)); let mut cluster_info = ClusterInfo::new(contact_info.clone(), Arc::new(keypair));
cluster_info.set_leader(&leader.id); cluster_info.set_leader(&leader.id);
cluster_info.insert_info(peer.clone()); cluster_info.insert_info(peer.clone());
cluster_info.gossip.refresh_push_active_set(&HashMap::new());
//check that all types of gossip messages are signed correctly //check that all types of gossip messages are signed correctly
let (_, push_messages) = cluster_info.gossip.new_push_messages(timestamp()); let (_, _, vals) = cluster_info.gossip.new_push_messages(timestamp());
// there should be some pushes ready // there should be some pushes ready
assert_eq!(push_messages.len() > 0, true); assert!(vals.len() > 0);
push_messages vals.par_iter().for_each(|v| assert!(v.verify()));
.values()
.for_each(|v| v.par_iter().for_each(|v| assert!(v.verify())));
let (_, _, val) = cluster_info let (_, _, val) = cluster_info
.gossip .gossip

View File

@@ -620,8 +620,8 @@ mod tests {
blocktree.insert_data_blobs(&blobs).unwrap(); blocktree.insert_data_blobs(&blobs).unwrap();
// Write roots so that these slots will qualify to be sent by the repairman // Write roots so that these slots will qualify to be sent by the repairman
let roots: Vec<_> = (0..=num_slots - 1).collect(); blocktree.set_root(0, 0).unwrap();
blocktree.set_roots(&roots).unwrap(); blocktree.set_root(num_slots - 1, 0).unwrap();
// Set up my information // Set up my information
let my_pubkey = Pubkey::new_rand(); let my_pubkey = Pubkey::new_rand();
@@ -696,8 +696,8 @@ mod tests {
blocktree.insert_data_blobs(&blobs).unwrap(); blocktree.insert_data_blobs(&blobs).unwrap();
// Write roots so that these slots will qualify to be sent by the repairman // Write roots so that these slots will qualify to be sent by the repairman
let roots: Vec<_> = (0..=slots_per_epoch * 2 - 1).collect(); blocktree.set_root(0, 0).unwrap();
blocktree.set_roots(&roots).unwrap(); blocktree.set_root(slots_per_epoch * 2 - 1, 0).unwrap();
// Set up my information // Set up my information
let my_pubkey = Pubkey::new_rand(); let my_pubkey = Pubkey::new_rand();

View File

@@ -101,10 +101,14 @@ mod tests {
let votes = (0..MAX_RECENT_VOTES) let votes = (0..MAX_RECENT_VOTES)
.map(|i| Vote::new(i as u64, Hash::default())) .map(|i| Vote::new(i as u64, Hash::default()))
.collect::<Vec<_>>(); .collect::<Vec<_>>();
let vote_ix = vote_instruction::vote(&vote_keypair.pubkey(), &vote_keypair.pubkey(), votes); let vote_ix = vote_instruction::vote(
&node_keypair.pubkey(),
let mut vote_tx = Transaction::new_with_payer(vec![vote_ix], Some(&node_keypair.pubkey())); &vote_keypair.pubkey(),
&vote_keypair.pubkey(),
votes,
);
let mut vote_tx = Transaction::new_unsigned_instructions(vec![vote_ix]);
vote_tx.partial_sign(&[&node_keypair], Hash::default()); vote_tx.partial_sign(&[&node_keypair], Hash::default());
vote_tx.partial_sign(&[&vote_keypair], Hash::default()); vote_tx.partial_sign(&[&vote_keypair], Hash::default());

View File

@@ -8,13 +8,11 @@ use crate::contact_info::ContactInfo;
use crate::entry::{Entry, EntrySlice}; use crate::entry::{Entry, EntrySlice};
use crate::gossip_service::discover_cluster; use crate::gossip_service::discover_cluster;
use crate::locktower::VOTE_THRESHOLD_DEPTH; use crate::locktower::VOTE_THRESHOLD_DEPTH;
use hashbrown::HashSet;
use solana_client::thin_client::create_client; use solana_client::thin_client::create_client;
use solana_runtime::epoch_schedule::MINIMUM_SLOTS_PER_EPOCH; use solana_runtime::epoch_schedule::MINIMUM_SLOT_LENGTH;
use solana_sdk::client::SyncClient; use solana_sdk::client::SyncClient;
use solana_sdk::hash::Hash; use solana_sdk::hash::Hash;
use solana_sdk::poh_config::PohConfig; use solana_sdk::poh_config::PohConfig;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::{Keypair, KeypairUtil, Signature}; use solana_sdk::signature::{Keypair, KeypairUtil, Signature};
use solana_sdk::system_transaction; use solana_sdk::system_transaction;
use solana_sdk::timing::{ use solana_sdk::timing::{
@@ -28,18 +26,14 @@ use std::time::Duration;
const DEFAULT_SLOT_MILLIS: u64 = (DEFAULT_TICKS_PER_SLOT * 1000) / DEFAULT_NUM_TICKS_PER_SECOND; const DEFAULT_SLOT_MILLIS: u64 = (DEFAULT_TICKS_PER_SLOT * 1000) / DEFAULT_NUM_TICKS_PER_SECOND;
/// Spend and verify from every node in the network /// Spend and verify from every node in the network
pub fn spend_and_verify_all_nodes<S: ::std::hash::BuildHasher>( pub fn spend_and_verify_all_nodes(
entry_point_info: &ContactInfo, entry_point_info: &ContactInfo,
funding_keypair: &Keypair, funding_keypair: &Keypair,
nodes: usize, nodes: usize,
ignore_nodes: HashSet<Pubkey, S>,
) { ) {
let (cluster_nodes, _) = discover_cluster(&entry_point_info.gossip, nodes).unwrap(); let (cluster_nodes, _) = discover_cluster(&entry_point_info.gossip, nodes).unwrap();
assert!(cluster_nodes.len() >= nodes); assert!(cluster_nodes.len() >= nodes);
for ingress_node in &cluster_nodes { for ingress_node in &cluster_nodes {
if ignore_nodes.contains(&ingress_node.id) {
continue;
}
let random_keypair = Keypair::new(); let random_keypair = Keypair::new();
let client = create_client(ingress_node.client_facing_addr(), FULLNODE_PORT_RANGE); let client = create_client(ingress_node.client_facing_addr(), FULLNODE_PORT_RANGE);
let bal = client let bal = client
@@ -54,9 +48,6 @@ pub fn spend_and_verify_all_nodes<S: ::std::hash::BuildHasher>(
.retry_transfer_until_confirmed(&funding_keypair, &mut transaction, 5, confs) .retry_transfer_until_confirmed(&funding_keypair, &mut transaction, 5, confs)
.unwrap(); .unwrap();
for validator in &cluster_nodes { for validator in &cluster_nodes {
if ignore_nodes.contains(&validator.id) {
continue;
}
let client = create_client(validator.client_facing_addr(), FULLNODE_PORT_RANGE); let client = create_client(validator.client_facing_addr(), FULLNODE_PORT_RANGE);
client.poll_for_signature_confirmation(&sig, confs).unwrap(); client.poll_for_signature_confirmation(&sig, confs).unwrap();
} }
@@ -150,7 +141,7 @@ pub fn kill_entry_and_spend_and_verify_rest(
let (cluster_nodes, _) = discover_cluster(&entry_point_info.gossip, nodes).unwrap(); let (cluster_nodes, _) = discover_cluster(&entry_point_info.gossip, nodes).unwrap();
assert!(cluster_nodes.len() >= nodes); assert!(cluster_nodes.len() >= nodes);
let client = create_client(entry_point_info.client_facing_addr(), FULLNODE_PORT_RANGE); let client = create_client(entry_point_info.client_facing_addr(), FULLNODE_PORT_RANGE);
let first_two_epoch_slots = MINIMUM_SLOTS_PER_EPOCH * 3; let first_two_epoch_slots = MINIMUM_SLOT_LENGTH * 3;
for ingress_node in &cluster_nodes { for ingress_node in &cluster_nodes {
client client

View File

@@ -6,7 +6,6 @@ use solana_sdk::rpc_port;
use solana_sdk::signature::{Keypair, KeypairUtil}; use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_sdk::signature::{Signable, Signature}; use solana_sdk::signature::{Signable, Signature};
use solana_sdk::timing::timestamp; use solana_sdk::timing::timestamp;
use std::borrow::Cow;
use std::cmp::{Ord, Ordering, PartialEq, PartialOrd}; use std::cmp::{Ord, Ordering, PartialEq, PartialOrd};
use std::net::{IpAddr, SocketAddr}; use std::net::{IpAddr, SocketAddr};
@@ -226,7 +225,7 @@ impl Signable for ContactInfo {
self.id self.id
} }
fn signable_data(&self) -> Cow<[u8]> { fn signable_data(&self) -> Vec<u8> {
#[derive(Serialize)] #[derive(Serialize)]
struct SignData { struct SignData {
id: Pubkey, id: Pubkey,
@@ -252,7 +251,7 @@ impl Signable for ContactInfo {
rpc_pubsub: me.rpc_pubsub, rpc_pubsub: me.rpc_pubsub,
wallclock: me.wallclock, wallclock: me.wallclock,
}; };
Cow::Owned(serialize(&data).expect("failed to serialize ContactInfo")) serialize(&data).expect("failed to serialize ContactInfo")
} }
fn get_signature(&self) -> Signature { fn get_signature(&self) -> Signature {

View File

@@ -8,10 +8,10 @@ use crate::crds_gossip_error::CrdsGossipError;
use crate::crds_gossip_pull::CrdsGossipPull; use crate::crds_gossip_pull::CrdsGossipPull;
use crate::crds_gossip_push::{CrdsGossipPush, CRDS_GOSSIP_NUM_ACTIVE}; use crate::crds_gossip_push::{CrdsGossipPush, CRDS_GOSSIP_NUM_ACTIVE};
use crate::crds_value::CrdsValue; use crate::crds_value::CrdsValue;
use hashbrown::HashMap;
use solana_runtime::bloom::Bloom; use solana_runtime::bloom::Bloom;
use solana_sdk::hash::Hash; use solana_sdk::hash::Hash;
use solana_sdk::pubkey::Pubkey; use solana_sdk::pubkey::Pubkey;
use std::collections::HashMap;
///The min size for bloom filters ///The min size for bloom filters
pub const CRDS_GOSSIP_BLOOM_SIZE: usize = 1000; pub const CRDS_GOSSIP_BLOOM_SIZE: usize = 1000;
@@ -65,9 +65,9 @@ impl CrdsGossip {
.collect() .collect()
} }
pub fn new_push_messages(&mut self, now: u64) -> (Pubkey, HashMap<Pubkey, Vec<CrdsValue>>) { pub fn new_push_messages(&mut self, now: u64) -> (Pubkey, Vec<Pubkey>, Vec<CrdsValue>) {
let push_messages = self.push.new_push_messages(&self.crds, now); let (peers, values) = self.push.new_push_messages(&self.crds, now);
(self.id, push_messages) (self.id, peers, values)
} }
/// add the `from` to the peer's filter of nodes /// add the `from` to the peer's filter of nodes

View File

@@ -16,13 +16,13 @@ use crate::crds_gossip_error::CrdsGossipError;
use crate::crds_value::{CrdsValue, CrdsValueLabel}; use crate::crds_value::{CrdsValue, CrdsValueLabel};
use crate::packet::BLOB_DATA_SIZE; use crate::packet::BLOB_DATA_SIZE;
use bincode::serialized_size; use bincode::serialized_size;
use hashbrown::HashMap;
use rand; use rand;
use rand::distributions::{Distribution, WeightedIndex}; use rand::distributions::{Distribution, WeightedIndex};
use solana_runtime::bloom::Bloom; use solana_runtime::bloom::Bloom;
use solana_sdk::hash::Hash; use solana_sdk::hash::Hash;
use solana_sdk::pubkey::Pubkey; use solana_sdk::pubkey::Pubkey;
use std::cmp; use std::cmp;
use std::collections::HashMap;
use std::collections::VecDeque; use std::collections::VecDeque;
pub const CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS: u64 = 15000; pub const CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS: u64 = 15000;

View File

@@ -14,20 +14,17 @@ use crate::crds_gossip::{get_stake, get_weight, CRDS_GOSSIP_BLOOM_SIZE};
use crate::crds_gossip_error::CrdsGossipError; use crate::crds_gossip_error::CrdsGossipError;
use crate::crds_value::{CrdsValue, CrdsValueLabel}; use crate::crds_value::{CrdsValue, CrdsValueLabel};
use crate::packet::BLOB_DATA_SIZE; use crate::packet::BLOB_DATA_SIZE;
use crate::weighted_shuffle::weighted_shuffle;
use bincode::serialized_size; use bincode::serialized_size;
use hashbrown::HashMap;
use indexmap::map::IndexMap; use indexmap::map::IndexMap;
use itertools::Itertools;
use rand; use rand;
use rand::distributions::{Distribution, WeightedIndex};
use rand::seq::SliceRandom; use rand::seq::SliceRandom;
use rand::{thread_rng, RngCore, SeedableRng};
use rand_chacha::ChaChaRng;
use solana_runtime::bloom::Bloom; use solana_runtime::bloom::Bloom;
use solana_sdk::hash::Hash; use solana_sdk::hash::Hash;
use solana_sdk::pubkey::Pubkey; use solana_sdk::pubkey::Pubkey;
use solana_sdk::timing::timestamp; use solana_sdk::timing::timestamp;
use std::cmp; use std::cmp;
use std::collections::HashMap;
pub const CRDS_GOSSIP_NUM_ACTIVE: usize = 30; pub const CRDS_GOSSIP_NUM_ACTIVE: usize = 30;
pub const CRDS_GOSSIP_PUSH_FANOUT: usize = 6; pub const CRDS_GOSSIP_PUSH_FANOUT: usize = 6;
@@ -101,7 +98,7 @@ impl CrdsGossipPush {
/// peers. /// peers.
/// The list of push messages is created such that all the randomly selected peers have not /// The list of push messages is created such that all the randomly selected peers have not
/// pruned the source addresses. /// pruned the source addresses.
pub fn new_push_messages(&mut self, crds: &Crds, now: u64) -> HashMap<Pubkey, Vec<CrdsValue>> { pub fn new_push_messages(&mut self, crds: &Crds, now: u64) -> (Vec<Pubkey>, Vec<CrdsValue>) {
let max = self.active_set.len(); let max = self.active_set.len();
let mut nodes: Vec<_> = (0..max).collect(); let mut nodes: Vec<_> = (0..max).collect();
nodes.shuffle(&mut rand::thread_rng()); nodes.shuffle(&mut rand::thread_rng());
@@ -113,8 +110,15 @@ impl CrdsGossipPush {
.collect(); .collect();
let mut total_bytes: usize = 0; let mut total_bytes: usize = 0;
let mut values = vec![]; let mut values = vec![];
let mut push_messages: HashMap<Pubkey, Vec<CrdsValue>> = HashMap::new();
for (label, hash) in &self.push_messages { for (label, hash) in &self.push_messages {
let mut failed = false;
for p in &peers {
let filter = self.active_set.get_mut(p);
failed |= filter.is_none() || filter.unwrap().contains(&label.pubkey());
}
if failed {
continue;
}
let res = crds.lookup_versioned(label); let res = crds.lookup_versioned(label);
if res.is_none() { if res.is_none() {
continue; continue;
@@ -133,16 +137,10 @@ impl CrdsGossipPush {
} }
values.push(value.clone()); values.push(value.clone());
} }
for v in values { for v in &values {
for p in peers.iter() {
let filter = self.active_set.get_mut(p);
if filter.is_some() && !filter.unwrap().contains(&v.label().pubkey()) {
push_messages.entry(*p).or_default().push(v.clone());
}
}
self.push_messages.remove(&v.label()); self.push_messages.remove(&v.label());
} }
push_messages (peers, values)
} }
/// add the `from` to the peer's filter of nodes /// add the `from` to the peer's filter of nodes
@@ -172,36 +170,28 @@ impl CrdsGossipPush {
let need = Self::compute_need(self.num_active, self.active_set.len(), ratio); let need = Self::compute_need(self.num_active, self.active_set.len(), ratio);
let mut new_items = HashMap::new(); let mut new_items = HashMap::new();
let options: Vec<_> = self.push_options(crds, &self_id, stakes); let mut options: Vec<_> = self.push_options(crds, &self_id, stakes);
if options.is_empty() { if options.is_empty() {
return; return;
} }
let mut seed = [0; 32];
seed[0..8].copy_from_slice(&thread_rng().next_u64().to_le_bytes());
let mut shuffle = weighted_shuffle(
options.iter().map(|weighted| weighted.0).collect_vec(),
ChaChaRng::from_seed(seed),
)
.into_iter();
while new_items.len() < need { while new_items.len() < need {
match shuffle.next() { let index = WeightedIndex::new(options.iter().map(|weighted| weighted.0));
Some(index) => { if index.is_err() {
let item = options[index].1; break;
if self.active_set.get(&item.id).is_some() {
continue;
}
if new_items.get(&item.id).is_some() {
continue;
}
let size = cmp::max(CRDS_GOSSIP_BLOOM_SIZE, network_size);
let mut bloom = Bloom::random(size, 0.1, 1024 * 8 * 4);
bloom.add(&item.id);
new_items.insert(item.id, bloom);
}
_ => break,
} }
let index = index.unwrap();
let index = index.sample(&mut rand::thread_rng());
let item = options[index].1;
options.remove(index);
if self.active_set.get(&item.id).is_some() {
continue;
}
if new_items.get(&item.id).is_some() {
continue;
}
let size = cmp::max(CRDS_GOSSIP_BLOOM_SIZE, network_size);
let bloom = Bloom::random(size, 0.1, 1024 * 8 * 4);
new_items.insert(item.id, bloom);
} }
let mut keys: Vec<Pubkey> = self.active_set.keys().cloned().collect(); let mut keys: Vec<Pubkey> = self.active_set.keys().cloned().collect();
keys.shuffle(&mut rand::thread_rng()); keys.shuffle(&mut rand::thread_rng());
@@ -276,7 +266,6 @@ impl CrdsGossipPush {
mod test { mod test {
use super::*; use super::*;
use crate::contact_info::ContactInfo; use crate::contact_info::ContactInfo;
use solana_sdk::signature::Signable;
#[test] #[test]
fn test_process_push() { fn test_process_push() {
@@ -431,34 +420,15 @@ mod test {
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 1, 1); push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 1, 1);
let new_msg = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0)); let new_msg = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
let mut expected = HashMap::new();
expected.insert(peer.label().pubkey(), vec![new_msg.clone()]);
assert_eq!(push.process_push_message(&mut crds, new_msg, 0), Ok(None));
assert_eq!(push.active_set.len(), 1);
assert_eq!(push.new_push_messages(&crds, 0), expected);
}
#[test]
fn test_personalized_push_messages() {
let mut crds = Crds::default();
let mut push = CrdsGossipPush::default();
let peer_1 = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
assert_eq!(crds.insert(peer_1.clone(), 0), Ok(None));
let peer_2 = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
assert_eq!(crds.insert(peer_2.clone(), 0), Ok(None));
let peer_3 = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
assert_eq!( assert_eq!(
push.process_push_message(&mut crds, peer_3.clone(), 0), push.process_push_message(&mut crds, new_msg.clone(), 0),
Ok(None) Ok(None)
); );
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 1, 1); assert_eq!(push.active_set.len(), 1);
assert_eq!(
// push 3's contact info to 1 and 2 and 3 push.new_push_messages(&crds, 0),
let new_msg = CrdsValue::ContactInfo(ContactInfo::new_localhost(&peer_3.pubkey(), 0)); (vec![peer.label().pubkey()], vec![new_msg])
let mut expected = HashMap::new(); );
expected.insert(peer_1.pubkey(), vec![new_msg.clone()]);
expected.insert(peer_2.pubkey(), vec![new_msg.clone()]);
assert_eq!(push.active_set.len(), 3);
assert_eq!(push.new_push_messages(&crds, 0), expected);
} }
#[test] #[test]
fn test_process_prune() { fn test_process_prune() {
@@ -469,13 +439,15 @@ mod test {
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 1, 1); push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 1, 1);
let new_msg = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0)); let new_msg = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
let expected = HashMap::new();
assert_eq!( assert_eq!(
push.process_push_message(&mut crds, new_msg.clone(), 0), push.process_push_message(&mut crds, new_msg.clone(), 0),
Ok(None) Ok(None)
); );
push.process_prune_msg(&peer.label().pubkey(), &[new_msg.label().pubkey()]); push.process_prune_msg(&peer.label().pubkey(), &[new_msg.label().pubkey()]);
assert_eq!(push.new_push_messages(&crds, 0), expected); assert_eq!(
push.new_push_messages(&crds, 0),
(vec![peer.label().pubkey()], vec![])
);
} }
#[test] #[test]
fn test_purge_old_pending_push_messages() { fn test_purge_old_pending_push_messages() {
@@ -488,13 +460,15 @@ mod test {
let mut ci = ContactInfo::new_localhost(&Pubkey::new_rand(), 0); let mut ci = ContactInfo::new_localhost(&Pubkey::new_rand(), 0);
ci.wallclock = 1; ci.wallclock = 1;
let new_msg = CrdsValue::ContactInfo(ci.clone()); let new_msg = CrdsValue::ContactInfo(ci.clone());
let expected = HashMap::new();
assert_eq!( assert_eq!(
push.process_push_message(&mut crds, new_msg.clone(), 1), push.process_push_message(&mut crds, new_msg.clone(), 1),
Ok(None) Ok(None)
); );
push.purge_old_pending_push_messages(&crds, 0); push.purge_old_pending_push_messages(&crds, 0);
assert_eq!(push.new_push_messages(&crds, 0), expected); assert_eq!(
push.new_push_messages(&crds, 0),
(vec![peer.label().pubkey()], vec![])
);
} }
#[test] #[test]

Some files were not shown because too many files have changed in this diff Show More