diff --git a/book/src/proposals/vote-signing-to-implement.md b/book/src/proposals/vote-signing-to-implement.md index a6e94e528f..3ca9f31ec0 100644 --- a/book/src/proposals/vote-signing-to-implement.md +++ b/book/src/proposals/vote-signing-to-implement.md @@ -17,7 +17,7 @@ The following sections outline how this architecture would work: * The keypair is ephemeral. A new keypair is generated on node bootup. A - new keypair might also be generated at runtime based on some TBD + new keypair might also be generated at runtime based on some to be determined criteria. @@ -28,7 +28,7 @@ The following sections outline how this architecture would work: signed by a trusted party 3. The stakeholder of the node grants ephemeral key permission to use its stake. - This process is TBD. + This process is to be determined. 4. The node's untrusted, non-enclave software calls trusted enclave software @@ -40,7 +40,7 @@ The following sections outline how this architecture would work: presented with some verifiable data to check before signing the vote. - * The process of generating the verifiable data in untrusted space is TBD + * The process of generating the verifiable data in untrusted space is to be determined ### PoH Verification diff --git a/ci/README.md b/ci/README.md index e120a47b08..45ff539d56 100644 --- a/ci/README.md +++ b/ci/README.md @@ -20,7 +20,7 @@ Create a new Azure-based "queue=default" agent by running the following command: ``` $ az vm create \ --resource-group ci \ - --name XXX \ + --name XYZ \ --image boilerplate \ --admin-username $(whoami) \ --ssh-key-value ~/.ssh/id_rsa.pub @@ -42,11 +42,11 @@ Creating a "queue=cuda" agent follows the same process but additionally: 1. When ready, ssh into the instance and start a root shell with `sudo -i`. Then prepare it for deallocation by running: `waagent -deprovision+user; cd /etc; ln -s ../run/systemd/resolve/stub-resolv.conf resolv.conf` -1. Run `az vm deallocate --resource-group ci --name XXX` -1. Run `az vm generalize --resource-group ci --name XXX` -1. Run `az image create --resource-group ci --source XXX --name boilerplate` +1. Run `az vm deallocate --resource-group ci --name XYZ` +1. Run `az vm generalize --resource-group ci --name XYZ` +1. Run `az image create --resource-group ci --source XYZ --name boilerplate` 1. Goto the `ci` resource group in the Azure portal and remove all resources - with the XXX name in them + with the XYZ name in them ## Reference @@ -95,7 +95,7 @@ VM Instances in each group is manually adjusted. Each Instance group has its own disk image, `ci-default-vX` and `ci-cuda-vY`, where *X* and *Y* are incremented each time the image is changed. -The process to update a disk image is as follows (TODO: make this less manual): +The manual process to update a disk image is as follows: 1. Create a new VM Instance using the disk image to modify. 2. Once the VM boots, ssh to it and modify the disk as desired. diff --git a/ci/buildkite.yml b/ci/buildkite.yml index de887ab754..1cdea19ee6 100644 --- a/ci/buildkite.yml +++ b/ci/buildkite.yml @@ -29,12 +29,6 @@ steps: - command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_nightly_docker_image ci/test-coverage.sh" name: "coverage" timeout_in_minutes: 40 - # TODO: Fix and re-enable test-large-network.sh - # - command: "ci/test-large-network.sh || true" - # name: "large-network [ignored]" - # timeout_in_minutes: 20 - # agents: - # - "queue=large" - wait - trigger: "solana-secondary" branches: "!pull/*" diff --git a/ci/nits.sh b/ci/nits.sh index 961b192811..28f0f4afc2 100755 --- a/ci/nits.sh +++ b/ci/nits.sh @@ -57,7 +57,7 @@ declare useGithubIssueInsteadOf=( #'TODO' # TODO: Uncomment this line to disable TODOs ) -if _ git --no-pager grep -n --max-depth=0 "${useGithubIssueInsteadOf[@]/#/-e }" -- '*.rs' '*.sh'; then +if _ git --no-pager grep -n --max-depth=0 "${useGithubIssueInsteadOf[@]/#/-e }" -- '*.rs' '*.sh' '*.md'; then exit 1 fi diff --git a/ci/order-crates-for-publishing.py b/ci/order-crates-for-publishing.py index b49a7df721..8ed42ce446 100755 --- a/ci/order-crates-for-publishing.py +++ b/ci/order-crates-for-publishing.py @@ -46,7 +46,7 @@ def get_packages(): max_iterations = pow(len(dependency_graph),2) while dependency_graph: if max_iterations == 0: - # TODO: Be more helpful and find the actual cycle for the user + # One day be more helpful and find the actual cycle for the user... sys.exit('Error: Circular dependency suspected between these packages: {}\n'.format(' '.join(dependency_graph.keys()))) max_iterations -= 1 diff --git a/ci/publish-crate.sh b/ci/publish-crate.sh index 19a2652baf..3e332adf57 100755 --- a/ci/publish-crate.sh +++ b/ci/publish-crate.sh @@ -50,8 +50,8 @@ for Cargo_toml in $Cargo_tomls; do ( set -x crate=$(dirname "$Cargo_toml") - # TODO: the rocksdb package does not build with the stock rust docker image, - # so use the solana rust docker image until this is resolved upstream + # The rocksdb package does not build with the stock rust docker image so use + # the solana rust docker image cargoCommand="cargo publish --token $CRATES_IO_TOKEN" ci/docker-run.sh "$rust_stable_docker_image" bash -exc "cd $crate; $cargoCommand" ) || true # <-- Don't fail. We want to be able to retry the job in cases when a publish fails halfway due to network/cloud issues diff --git a/ci/publish-metrics-dashboard.sh b/ci/publish-metrics-dashboard.sh index 2ab557cb1a..30cc442c98 100755 --- a/ci/publish-metrics-dashboard.sh +++ b/ci/publish-metrics-dashboard.sh @@ -46,7 +46,6 @@ beta) ;; stable) # Set to whatever branch 'testnet' is on. - # TODO: Revert to $STABLE_CHANNEL for TdS CHANNEL_BRANCH=$BETA_CHANNEL ;; *) diff --git a/ci/test-bench.sh b/ci/test-bench.sh index bfa5de3b11..14f6810f5e 100755 --- a/ci/test-bench.sh +++ b/ci/test-bench.sh @@ -70,9 +70,9 @@ _ cargo +$rust_nightly bench --manifest-path programs/bpf/Cargo.toml ${V:+--verb # Run banking bench. Doesn't require nightly, but use since it is already built. _ cargo +$rust_nightly run --release --manifest-path banking_bench/Cargo.toml ${V:+--verbose} | tee -a "$BENCH_FILE" -# TODO: debug why solana-upload-perf takes over 30 minutes to complete. +# `solana-upload-perf` disabled as it can take over 30 minutes to complete for some +# reason exit 0 - _ cargo +$rust_nightly run --release --package solana-upload-perf \ -- "$BENCH_FILE" "$TARGET_BRANCH" "$UPLOAD_METRICS" | tee "$BENCH_ARTIFACT" diff --git a/ci/test-large-network.sh b/ci/test-large-network.sh deleted file mode 100755 index ecba2d5a06..0000000000 --- a/ci/test-large-network.sh +++ /dev/null @@ -1,41 +0,0 @@ -#!/usr/bin/env bash -set -e - -here=$(dirname "$0") -cd "$here"/.. - -source ci/rust-version.sh stable - -export RUST_BACKTRACE=1 - -rm -rf target/perf-libs -./fetch-perf-libs.sh -export LD_LIBRARY_PATH=$PWD/target/perf-libs:$LD_LIBRARY_PATH - -export RUST_LOG=multinode=info - -source scripts/ulimit-n.sh - -if [[ $(sysctl -n net.core.rmem_default) -lt 1610612736 ]]; then - echo 'Error: rmem_default too small, run "sudo sysctl -w net.core.rmem_default=1610612736" to continue' - exit 1 -fi - -if [[ $(sysctl -n net.core.rmem_max) -lt 1610612736 ]]; then - echo 'Error: rmem_max too small, run "sudo sysctl -w net.core.rmem_max=1610612736" to continue' - exit 1 -fi - -if [[ $(sysctl -n net.core.wmem_default) -lt 1610612736 ]]; then - echo 'Error: rmem_default too small, run "sudo sysctl -w net.core.wmem_default=1610612736" to continue' - exit 1 -fi - -if [[ $(sysctl -n net.core.wmem_max) -lt 1610612736 ]]; then - echo 'Error: rmem_max too small, run "sudo sysctl -w net.core.wmem_max=1610612736" to continue' - exit 1 -fi - -set -x -export SOLANA_DYNAMIC_NODES=120 -exec cargo +"$rust_stable" test --release --features=erasure test_multi_node_dynamic_network -- --ignored diff --git a/ci/testnet-manager.sh b/ci/testnet-manager.sh index b6a7d3848a..094d26feb7 100755 --- a/ci/testnet-manager.sh +++ b/ci/testnet-manager.sh @@ -636,20 +636,7 @@ sanity-or-restart) else echo "+++ Sanity failed, updating the network" $metricsWriteDatapoint "testnet-manager sanity-failure=1" - - # TODO: Restore attempt to restart the cluster before recreating it - # See https://github.com/solana-labs/solana/issues/3774 - if false; then - if start; then - echo Update successful - else - echo "+++ Update failed, restarting the network" - $metricsWriteDatapoint "testnet-manager update-failure=1" - create-and-start - fi - else - create-and-start - fi + create-and-start fi ;; *) diff --git a/multinode-demo/delegate-stake.sh b/multinode-demo/delegate-stake.sh index 00cec8bb42..088387ce43 100755 --- a/multinode-demo/delegate-stake.sh +++ b/multinode-demo/delegate-stake.sh @@ -88,14 +88,12 @@ if [[ ! -f $vote_keypair_path ]]; then fi if [[ -f $stake_keypair_path ]]; then - # TODO: Add ability to add multiple stakes with this script? echo "Error: $stake_keypair_path already exists" exit 1 fi if ((airdrops_enabled)); then - declare fees=100 # TODO: No hardcoded transaction fees, fetch the current cluster fees - $solana_cli "${common_args[@]}" airdrop $((stake_lamports+fees)) lamports + $solana_cli "${common_args[@]}" airdrop "$stake_lamports" lamports fi $solana_keygen new -o "$stake_keypair_path" diff --git a/net/remote/remote-node.sh b/net/remote/remote-node.sh index 836793acc5..8c9ccbbfe7 100755 --- a/net/remote/remote-node.sh +++ b/net/remote/remote-node.sh @@ -377,9 +377,9 @@ EOF ) if [[ $airdropsEnabled != true ]]; then - echo "TODO: archivers not supported without airdrops" - # TODO: need to provide the `--identity` argument to an existing system - # account with lamports in it + # If this ever becomes a problem, we need to provide the `--identity` + # argument to an existing system account with lamports in it + echo "Error: archivers not supported without airdrops" exit 1 fi diff --git a/net/remote/remote-sanity.sh b/net/remote/remote-sanity.sh index 43fbaa2ba8..49a92ad03f 100755 --- a/net/remote/remote-sanity.sh +++ b/net/remote/remote-sanity.sh @@ -66,7 +66,6 @@ local|tar|skip) solana_cli=solana solana_gossip=solana-gossip solana_install=solana-install - solana_keygen=solana-keygen ;; *) echo "Unknown deployment method: $deployMethod" @@ -85,11 +84,8 @@ fi echo "+++ $sanityTargetIp: validators" ( - # Ensure solana-cli has a keypair even though it doesn't really need one... - # TODO: Remove when https://github.com/solana-labs/solana/issues/6375 is fixed - $solana_keygen new --force -o temp-id.json set -x - $solana_cli --keypair temp-id.json --url http://"$sanityTargetIp":8899 show-validators + $solana_cli --url http://"$sanityTargetIp":8899 show-validators ) echo "+++ $sanityTargetIp: node count ($numSanityNodes expected)" diff --git a/net/scripts/azure-provider.sh b/net/scripts/azure-provider.sh index f49c7604f9..a2a7fbb3fb 100755 --- a/net/scripts/azure-provider.sh +++ b/net/scripts/azure-provider.sh @@ -126,7 +126,7 @@ cloud_Initialize() { declare networkName="$1" # ec2-provider.sh creates firewall rules programmatically, should do the same # here. - echo "TODO: create $networkName firewall rules programmatically instead of assuming the 'testnet' tag exists" + echo "Note: one day create $networkName firewall rules programmatically instead of assuming the 'testnet' tag exists" } # diff --git a/net/scripts/disable-background-upgrades.sh b/net/scripts/disable-background-upgrades.sh index 683da62ab1..e46815155a 100755 --- a/net/scripts/disable-background-upgrades.sh +++ b/net/scripts/disable-background-upgrades.sh @@ -3,8 +3,8 @@ set -ex # # Prevent background upgrades that block |apt-get| # -# TODO: This approach is pretty uncompromising. An alternative solution that -# doesn't involve deleting system files would be welcome. +# This approach is pretty uncompromising. An alternative solution that doesn't +# involve deleting system files would be welcome. [[ $(uname) = Linux ]] || exit 1 [[ $USER = root ]] || exit 1 diff --git a/net/scripts/gce-provider.sh b/net/scripts/gce-provider.sh index 252ee97117..9b53436a38 100755 --- a/net/scripts/gce-provider.sh +++ b/net/scripts/gce-provider.sh @@ -122,7 +122,7 @@ cloud_Initialize() { declare networkName="$1" # ec2-provider.sh creates firewall rules programmatically, should do the same # here. - echo "TODO: create $networkName firewall rules programmatically instead of assuming the 'testnet' tag exists" + echo "Note: one day create $networkName firewall rules programmatically instead of assuming the 'testnet' tag exists" } # @@ -170,8 +170,8 @@ cloud_CreateInstances() { if $enableGpu; then # Custom Ubuntu 18.04 LTS image with CUDA 9.2 and CUDA 10.0 installed # - # TODO: Unfortunately this image is not public. When this becomes an issue, - # use the stock Ubuntu 18.04 image and programmatically install CUDA after the + # Unfortunately this image is not public. When this becomes an issue, use + # the stock Ubuntu 18.04 image and programmatically install CUDA after the # instance boots # imageName="ubuntu-1804-bionic-v20181029-with-cuda-10-and-cuda-9-2"