Compare commits

..

1 Commits

Author SHA1 Message Date
Dan Schaper
6ce527010b Initial work on stubbing for lua.
Signed-off-by: Dan Schaper <dan.schaper@pi-hole.net>
2021-06-21 10:49:46 -07:00
79 changed files with 2668 additions and 2953 deletions

View File

@@ -1,10 +0,0 @@
version: 2
updates:
- package-ecosystem: github-actions
directory: "/"
schedule:
interval: weekly
day: saturday
time: "10:00"
open-pull-requests-limit: 10
target-branch: developement

7
.github/release.yml vendored
View File

@@ -1,7 +0,0 @@
changelog:
exclude:
labels:
- internal
authors:
- dependabot
- github-actions

View File

@@ -1,40 +0,0 @@
name: "CodeQL"
on:
push:
branches:
- master
- development
pull_request:
branches:
- master
- development
schedule:
- cron: '32 11 * * 6'
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
permissions:
actions: read
contents: read
security-events: write
steps:
-
name: Checkout repository
uses: actions/checkout@v2
# Initializes the CodeQL tools for scanning.
-
name: Initialize CodeQL
uses: github/codeql-action/init@v1
with:
languages: 'python'
-
name: Autobuild
uses: github/codeql-action/autobuild@v1
-
name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v1

View File

@@ -1,25 +0,0 @@
name: Mark stale issues
on:
schedule:
- cron: '0 * * * *'
workflow_dispatch:
jobs:
stale:
runs-on: ubuntu-latest
permissions:
issues: write
steps:
- uses: actions/stale@v4
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
days-before-stale: 30
days-before-close: 5
stale-issue-message: 'This issue is stale because it has been open 30 days with no activity. Please comment or update this issue or it will be closed in 5 days.'
stale-issue-label: 'stale'
exempt-issue-labels: 'Internal, Fixed in next release, Bug: Confirmed, Documentation Needed'
exempt-all-issue-assignees: true
operations-per-run: 300

View File

@@ -1,27 +0,0 @@
name: Sync Back to Development
on:
push:
branches:
- master
jobs:
sync-branches:
runs-on: ubuntu-latest
name: Syncing branches
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Opening pull request
id: pull
uses: tretuna/sync-branches@1.4.0
with:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
FROM_BRANCH: 'master'
TO_BRANCH: 'development'
- name: Label the pull request to ignore for release note generation
uses: actions-ecosystem/action-add-labels@v1
with:
labels: internal
repo: ${{ github.repository }}
number: ${{ steps.pull.outputs.PULL_REQUEST_NUMBER }}

View File

@@ -5,44 +5,21 @@ on:
types: [opened, synchronize, reopened, ready_for_review] types: [opened, synchronize, reopened, ready_for_review]
jobs: jobs:
smoke-test:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
-
name: Checkout repository
uses: actions/checkout@v2
-
name: Run Smoke Tests
run: |
# Ensure scripts in repository are executable
IFS=$'\n';
for f in $(find . -name '*.sh'); do if [[ ! -x $f ]]; then echo "$f is not executable" && FAIL=1; fi ;done
unset IFS;
# If FAIL is 1 then we fail.
[[ $FAIL == 1 ]] && exit 1 || echo "Smoke Tests Passed"
distro-test: distro-test:
if: github.event.pull_request.draft == false if: github.event.pull_request.draft == false
runs-on: ubuntu-latest runs-on: ubuntu-latest
needs: smoke-test
strategy: strategy:
matrix: matrix:
distro: [debian_9, debian_10, debian_11, ubuntu_16, ubuntu_18, ubuntu_20, ubuntu_21, centos_7, centos_8, fedora_33, fedora_34] distro: [debian_9, debian_10, ubuntu_16, ubuntu_18, ubuntu_20, centos_7, centos_8, fedora_32, fedora_33]
env: env:
DISTRO: ${{matrix.distro}} DISTRO: ${{matrix.distro}}
steps: steps:
- - uses: actions/checkout@v1
name: Checkout repository - name: Set up Python 3.7
uses: actions/checkout@v2
-
name: Set up Python 3.8
uses: actions/setup-python@v2 uses: actions/setup-python@v2
with: with:
python-version: 3.8 python-version: 3.7
- - name: Install dependencies
name: Install dependencies
run: pip install -r test/requirements.txt run: pip install -r test/requirements.txt
- - name: Test with tox
name: Test with tox
run: tox -c test/tox.${DISTRO}.ini run: tox -c test/tox.${DISTRO}.ini

68
.gitignore vendored
View File

@@ -7,6 +7,70 @@ __pycache__
.tox .tox
.eggs .eggs
*.egg-info *.egg-info
.idea/
# Created by https://www.gitignore.io/api/jetbrains+iml
### JetBrains+iml ###
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
# All idea files, with exceptions
.idea
!.idea/codeStyles/*
!.idea/codeStyleSettings.xml
# Sensitive or high-churn files:
.idea/**/dataSources/
.idea/**/dataSources.ids
.idea/**/dataSources.xml
.idea/**/dataSources.local.xml
.idea/**/sqlDataSources.xml
.idea/**/dynamic.xml
.idea/**/uiDesigner.xml
# Gradle:
.idea/**/gradle.xml
.idea/**/libraries
# CMake
cmake-build-debug/
# Mongo Explorer plugin:
.idea/**/mongoSettings.xml
## File-based project format:
*.iws
## Plugin-specific files:
# IntelliJ
/out/
# mpeltonen/sbt-idea plugin
.idea_modules/
# JIRA plugin
atlassian-ide-plugin.xml
# Cursive Clojure plugin
.idea/replstate.xml
# Ruby plugin and RubyMine
/.rakeTasks
# Crashlytics plugin (for Android Studio and IntelliJ)
com_crashlytics_export_strings.xml
crashlytics.properties
crashlytics-build.properties
fabric.properties
### JetBrains+iml Patch ###
# Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-249601023
*.iml *.iml
.vscode/ .idea/misc.xml
*.ipr
# End of https://www.gitignore.io/api/jetbrains+iml

25
.idea/codeStyleSettings.xml generated Normal file
View File

@@ -0,0 +1,25 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectCodeStyleSettingsManager">
<option name="PER_PROJECT_SETTINGS">
<value>
<option name="OTHER_INDENT_OPTIONS">
<value>
<option name="INDENT_SIZE" value="2" />
<option name="CONTINUATION_INDENT_SIZE" value="8" />
<option name="TAB_SIZE" value="2" />
<option name="USE_TAB_CHARACTER" value="false" />
<option name="SMART_TABS" value="false" />
<option name="LABEL_INDENT_SIZE" value="0" />
<option name="LABEL_INDENT_ABSOLUTE" value="false" />
<option name="USE_RELATIVE_INDENTS" value="false" />
</value>
</option>
<MarkdownNavigatorCodeStyleSettings>
<option name="RIGHT_MARGIN" value="72" />
</MarkdownNavigatorCodeStyleSettings>
</value>
</option>
<option name="USE_PER_PROJECT_SETTINGS" value="true" />
</component>
</project>

7
.idea/codeStyles/Project.xml generated Normal file
View File

@@ -0,0 +1,7 @@
<component name="ProjectCodeStyleConfiguration">
<code_scheme name="Project" version="173">
<MarkdownNavigatorCodeStyleSettings>
<option name="RIGHT_MARGIN" value="72" />
</MarkdownNavigatorCodeStyleSettings>
</code_scheme>
</component>

5
.idea/codeStyles/codeStyleConfig.xml generated Normal file
View File

@@ -0,0 +1,5 @@
<component name="ProjectCodeStyleConfiguration">
<state>
<option name="USE_PER_PROJECT_SETTINGS" value="true" />
</state>
</component>

View File

@@ -2,6 +2,111 @@
Please read and understand the contribution guide before creating an issue or pull request. Please read and understand the contribution guide before creating an issue or pull request.
The guide can be found here: [https://docs.pi-hole.net/guides/github/contributing/](https://docs.pi-hole.net/guides/github/contributing/) ## Etiquette
- Our goal for Pi-hole is **stability before features**. This means we focus on squashing critical bugs before adding new features. Often, we can do both in tandem, but bugs will take priority over a new feature.
- Pi-hole is open source and [powered by donations](https://pi-hole.net/donate/), and as such, we give our **free time** to build, maintain, and **provide user support** for this project. It would be extremely unfair for us to suffer abuse or anger for our hard work, so please take a moment to consider that.
- Please be considerate towards the developers and other users when raising issues or presenting pull requests.
- Respect our decision(s), and do not be upset or abusive if your submission is not used.
## Viability
When requesting or submitting new features, first consider whether it might be useful to others. Open source projects are used by many people, who may have entirely different needs to your own. Think about whether or not your feature is likely to be used by other users of the project.
## Procedure
**Before filing an issue:**
- Attempt to replicate and **document** the problem, to ensure that it wasn't a coincidental incident.
- Check to make sure your feature suggestion isn't already present within the project.
- Check the pull requests tab to ensure that the bug doesn't have a fix in progress.
- Check the pull requests tab to ensure that the feature isn't already in progress.
**Before submitting a pull request:**
- Check the codebase to ensure that your feature doesn't already exist.
- Check the pull requests to ensure that another person hasn't already submitted the feature or fix.
- Read and understand the [DCO guidelines](https://docs.pi-hole.net/guides/github/contributing/) for the project.
## Technical Requirements
- Submit Pull Requests to the **development branch only**.
- Before Submitting your Pull Request, merge `development` with your new branch and fix any conflicts. (Make sure you don't break anything in development!)
- Please use the [Google Style Guide for Shell](https://google.github.io/styleguide/shell.xml) for your code submission styles.
- Commit Unix line endings.
- Please use the Pi-hole brand: **Pi-hole** (Take a special look at the capitalized 'P' and a low 'h' with a hyphen)
- (Optional fun) keep to the theme of Star Trek/black holes/gravity.
## Forking and Cloning from GitHub to GitHub
1. Fork <https://github.com/pi-hole/pi-hole/> to a repo under a namespace you control, or have permission to use, for example: `https://github.com/<your_namespace>/<your_repo_name>/`. You can do this from the github.com website.
2. Clone `https://github.com/<your_namespace>/<your_repo_name>/` with the tool of you choice.
3. To keep your fork in sync with our repo, add an upstream remote for pi-hole/pi-hole to your repo.
```bash
git remote add upstream https://github.com/pi-hole/pi-hole.git
```
4. Checkout the `development` branch from your fork `https://github.com/<your_namespace>/<your_repo_name>/`.
5. Create a topic/branch, based on the `development` branch code. *Bonus fun to keep to the theme of Star Trek/black holes/gravity.*
6. Make your changes and commit to your topic branch in your repo.
7. Rebase your commits and squash any insignificant commits. See the notes below for an example.
8. Merge `development` your branch and fix any conflicts.
9. Open a Pull Request to merge your topic branch into our repo's `development` branch.
- Keep in mind the technical requirements from above.
## Forking and Cloning from GitHub to other code hosting sites
- Forking is a GitHub concept and cannot be done from GitHub to other git-based code hosting sites. However, those sites may be able to mirror a GitHub repo.
1. To contribute from another code hosting site, you must first complete the steps above to fork our repo to a GitHub namespace you have permission to use, for example: `https://github.com/<your_namespace>/<your_repo_name>/`.
2. Create a repo in your code hosting site, for example: `https://gitlab.com/<your_namespace>/<your_repo_name>/`
3. Follow the instructions from your code hosting site to create a mirror between `https://github.com/<your_namespace>/<your_repo_name>/` and `https://gitlab.com/<your_namespace>/<your_repo_name>/`.
4. When you are ready to create a Pull Request (PR), follow the steps `(starting at step #6)` from [Forking and Cloning from GitHub to GitHub](#forking-and-cloning-from-github-to-github) and create the PR from `https://github.com/<your_namespace>/<your_repo_name>/`.
## Notes for squashing commits with rebase
- To rebase your commits and squash previous commits, you can use:
```bash
git rebase -i your_topic_branch~(number of commits to combine)
```
- For more details visit [gitready.com](http://gitready.com/advanced/2009/02/10/squashing-commits-with-rebase.html)
1. The following would combine the last four commits in the branch `mytopic`.
```bash
git rebase -i mytopic~4
```
2. An editor window opens with the most recent commits indicated: (edit the commands to the left of the commit ID)
```gitattributes
pick 9dff55b2 existing commit comments
squash ebb1a730 existing commit comments
squash 07cc5b50 existing commit comments
reword 9dff55b2 existing commit comments
```
3. Save and close the editor. The next editor window opens: (edit the new commit message). *If you select reword for a commit, an additional editor window will open for you to edit the comment.*
```bash
new commit comments
Signed-off-by: yourname <your email address>
```
4. Save and close the editor for the rebase process to execute. The terminal output should say something like the following:
```bash
Successfully rebased and updated refs/heads/mytopic.
```
5. Once you have a successful rebase, and before you sync your local clone, you have to force push origin to update your repo:
```bash
git push -f origin
```
6. Continue on from step #7 from [Forking and Cloning from GitHub to GitHub](#forking-and-cloning-from-github-to-github)

View File

@@ -11,9 +11,9 @@
</p> </p>
<!-- markdownlint-enable MD033 --> <!-- markdownlint-enable MD033 -->
The Pi-hole® is a [DNS sinkhole](https://en.wikipedia.org/wiki/DNS_Sinkhole) that protects your devices from unwanted content without installing any client-side software. The Pi-hole® is a [DNS sinkhole](https://en.wikipedia.org/wiki/DNS_Sinkhole) that protects your devices from unwanted content, without installing any client-side software.
- **Easy-to-install**: our versatile installer walks you through the process and takes less than ten minutes - **Easy-to-install**: our versatile installer walks you through the process, and takes less than ten minutes
- **Resolute**: content is blocked in _non-browser locations_, such as ad-laden mobile apps and smart TVs - **Resolute**: content is blocked in _non-browser locations_, such as ad-laden mobile apps and smart TVs
- **Responsive**: seamlessly speeds up the feel of everyday browsing by caching DNS queries - **Responsive**: seamlessly speeds up the feel of everyday browsing by caching DNS queries
- **Lightweight**: runs smoothly with [minimal hardware and software requirements](https://docs.pi-hole.net/main/prerequisites/) - **Lightweight**: runs smoothly with [minimal hardware and software requirements](https://docs.pi-hole.net/main/prerequisites/)
@@ -22,7 +22,7 @@ The Pi-hole® is a [DNS sinkhole](https://en.wikipedia.org/wiki/DNS_Sinkhole) th
- **Versatile**: can optionally function as a [DHCP server](https://discourse.pi-hole.net/t/how-do-i-use-pi-holes-built-in-dhcp-server-and-why-would-i-want-to/3026), ensuring *all* your devices are protected automatically - **Versatile**: can optionally function as a [DHCP server](https://discourse.pi-hole.net/t/how-do-i-use-pi-holes-built-in-dhcp-server-and-why-would-i-want-to/3026), ensuring *all* your devices are protected automatically
- **Scalable**: [capable of handling hundreds of millions of queries](https://pi-hole.net/2017/05/24/how-much-traffic-can-pi-hole-handle/) when installed on server-grade hardware - **Scalable**: [capable of handling hundreds of millions of queries](https://pi-hole.net/2017/05/24/how-much-traffic-can-pi-hole-handle/) when installed on server-grade hardware
- **Modern**: blocks ads over both IPv4 and IPv6 - **Modern**: blocks ads over both IPv4 and IPv6
- **Free**: open source software that helps ensure _you_ are the sole person in control of your privacy - **Free**: open source software which helps ensure _you_ are the sole person in control of your privacy
----- -----
@@ -57,21 +57,21 @@ Please refer to the [Pi-hole docker repo](https://github.com/pi-hole/docker-pi-h
Once the installer has been run, you will need to [configure your router to have **DHCP clients use Pi-hole as their DNS server**](https://discourse.pi-hole.net/t/how-do-i-configure-my-devices-to-use-pi-hole-as-their-dns-server/245) which ensures that all devices connecting to your network will have content blocked without any further intervention. Once the installer has been run, you will need to [configure your router to have **DHCP clients use Pi-hole as their DNS server**](https://discourse.pi-hole.net/t/how-do-i-configure-my-devices-to-use-pi-hole-as-their-dns-server/245) which ensures that all devices connecting to your network will have content blocked without any further intervention.
If your router does not support setting the DNS server, you can [use Pi-hole's built-in DHCP server](https://discourse.pi-hole.net/t/how-do-i-use-pi-holes-built-in-dhcp-server-and-why-would-i-want-to/3026); be sure to disable DHCP on your router first (if it has that feature available). If your router does not support setting the DNS server, you can [use Pi-hole's built-in DHCP server](https://discourse.pi-hole.net/t/how-do-i-use-pi-holes-built-in-dhcp-server-and-why-would-i-want-to/3026); just be sure to disable DHCP on your router first (if it has that feature available).
As a last resort, you can manually set each device to use Pi-hole as their DNS server. As a last resort, you can always manually set each device to use Pi-hole as their DNS server.
----- -----
## Pi-hole is free but powered by your support ## Pi-hole is free, but powered by your support
There are many reoccurring costs involved with maintaining free, open source, and privacy-respecting software; expenses which [our volunteer developers](https://github.com/orgs/pi-hole/people) pitch in to cover out-of-pocket. This is just one example of how strongly we feel about our software and the importance of keeping it maintained. There are many reoccurring costs involved with maintaining free, open source, and privacy-respecting software; expenses which [our volunteer developers](https://github.com/orgs/pi-hole/people) pitch in to cover out-of-pocket. This is just one example of how strongly we feel about our software, as well as the importance of keeping it maintained.
Make no mistake: **your support is absolutely vital to help keep us innovating!** Make no mistake: **your support is absolutely vital to help keep us innovating!**
### [Donations](https://pi-hole.net/donate) ### [Donations](https://pi-hole.net/donate)
Donating using our Sponsor Button is **extremely helpful** in offsetting a portion of our monthly expenses: Sending a donation using our Sponsor Button is **extremely helpful** in offsetting a portion of our monthly expenses and rewarding our dedicated development team:
### Alternative support ### Alternative support
@@ -83,13 +83,13 @@ If you'd rather not donate (_which is okay!_), there are other ways you can help
- [Digital Ocean](https://www.digitalocean.com/?refcode=344d234950e1) _affiliate link_ - [Digital Ocean](https://www.digitalocean.com/?refcode=344d234950e1) _affiliate link_
- [Stickermule](https://www.stickermule.com/unlock?ref_id=9127301701&utm_medium=link&utm_source=invite) _earn a $10 credit after your first purchase_ - [Stickermule](https://www.stickermule.com/unlock?ref_id=9127301701&utm_medium=link&utm_source=invite) _earn a $10 credit after your first purchase_
- [Amazon US](http://www.amazon.com/exec/obidos/redirect-home/pihole09-20) _affiliate link_ - [Amazon US](http://www.amazon.com/exec/obidos/redirect-home/pihole09-20) _affiliate link_
- Spreading the word about our software and how you have benefited from it - Spreading the word about our software, and how you have benefited from it
### Contributing via GitHub ### Contributing via GitHub
We welcome _everyone_ to contribute to issue reports, suggest new features, and create pull requests. We welcome _everyone_ to contribute to issue reports, suggest new features, and create pull requests.
If you have something to add - anything from a typo through to a whole new feature, we're happy to check it out! Just make sure to fill out our template when submitting your request; the questions it asks will help the volunteers quickly understand what you're aiming to achieve. If you have something to add - anything from a typo through to a whole new feature, we're happy to check it out! Just make sure to fill out our template when submitting your request; the questions that it asks will help the volunteers quickly understand what you're aiming to achieve.
You'll find that the [install script](https://github.com/pi-hole/pi-hole/blob/master/automated%20install/basic-install.sh) and the [debug script](https://github.com/pi-hole/pi-hole/blob/master/advanced/Scripts/piholeDebug.sh) have an abundance of comments, which will help you better understand how Pi-hole works. They're also a valuable resource to those who want to learn how to write scripts or code a program! We encourage anyone who likes to tinker to read through it and submit a pull request for us to review. You'll find that the [install script](https://github.com/pi-hole/pi-hole/blob/master/automated%20install/basic-install.sh) and the [debug script](https://github.com/pi-hole/pi-hole/blob/master/advanced/Scripts/piholeDebug.sh) have an abundance of comments, which will help you better understand how Pi-hole works. They're also a valuable resource to those who want to learn how to write scripts or code a program! We encourage anyone who likes to tinker to read through it and submit a pull request for us to review.
@@ -97,9 +97,9 @@ You'll find that the [install script](https://github.com/pi-hole/pi-hole/blob/ma
## Getting in touch with us ## Getting in touch with us
While we are primarily reachable on our [Discourse User Forum](https://discourse.pi-hole.net/), we can also be found on various social media outlets. While we are primarily reachable on our [Discourse User Forum](https://discourse.pi-hole.net/), we can also be found on a variety of social media outlets.
**Please be sure to check the FAQs** before starting a new discussion, as we do not have the spare time to reply to every request for assistance. **Please be sure to check the FAQ's** before starting a new discussion. Many user questions already have answers and can be solved without any additional assistance.
- [Frequently Asked Questions](https://discourse.pi-hole.net/c/faqs) - [Frequently Asked Questions](https://discourse.pi-hole.net/c/faqs)
- [Feature Requests](https://discourse.pi-hole.net/c/feature-requests?order=votes) - [Feature Requests](https://discourse.pi-hole.net/c/feature-requests?order=votes)
@@ -125,15 +125,15 @@ Some of the statistics you can integrate include:
- Queries cached - Queries cached
- Unique clients - Unique clients
Access the API via [`telnet`](https://github.com/pi-hole/FTL), the Web (`admin/api.php`) and Command Line (`pihole -c -j`). You can find out [more details over here](https://discourse.pi-hole.net/t/pi-hole-api/1863). The API can be accessed via [`telnet`](https://github.com/pi-hole/FTL), the Web (`admin/api.php`) and Command Line (`pihole -c -j`). You can find out [more details over here](https://discourse.pi-hole.net/t/pi-hole-api/1863).
### The Command Line Interface ### The Command Line Interface
The [pihole](https://docs.pi-hole.net/core/pihole-command/) command has all the functionality necessary to fully administer the Pi-hole, without the need of the Web Interface. It's fast, user-friendly, and auditable by anyone with an understanding of `bash`. The [pihole](https://docs.pi-hole.net/core/pihole-command/) command has all the functionality necessary to be able to fully administer the Pi-hole, without the need of the Web Interface. It's fast, user-friendly, and auditable by anyone with an understanding of `bash`.
Some notable features include: Some notable features include:
- [Whitelisting, Blacklisting, and Regex](https://docs.pi-hole.net/core/pihole-command/#whitelisting-blacklisting-and-regex) - [Whitelisting, Blacklisting and Regex](https://docs.pi-hole.net/core/pihole-command/#whitelisting-blacklisting-and-regex)
- [Debugging utility](https://docs.pi-hole.net/core/pihole-command/#debugger) - [Debugging utility](https://docs.pi-hole.net/core/pihole-command/#debugger)
- [Viewing the live log file](https://docs.pi-hole.net/core/pihole-command/#tail) - [Viewing the live log file](https://docs.pi-hole.net/core/pihole-command/#tail)
- [Updating Ad Lists](https://docs.pi-hole.net/core/pihole-command/#gravity) - [Updating Ad Lists](https://docs.pi-hole.net/core/pihole-command/#gravity)
@@ -149,7 +149,7 @@ This [optional dashboard](https://github.com/pi-hole/AdminLTE) allows you to vie
Some notable features include: Some notable features include:
- Mobile-friendly interface - Mobile friendly interface
- Password protection - Password protection
- Detailed graphs and doughnut charts - Detailed graphs and doughnut charts
- Top lists of domains and clients - Top lists of domains and clients

View File

@@ -39,4 +39,6 @@ cache-size=@CACHE_SIZE@
log-queries log-queries
log-facility=/var/log/pihole.log log-facility=/var/log/pihole.log
local-ttl=2
log-async log-async

View File

@@ -1,42 +0,0 @@
# Pi-hole: A black hole for Internet advertisements
# (c) 2021 Pi-hole, LLC (https://pi-hole.net)
# Network-wide ad blocking via your own hardware.
#
# RFC 6761 config file for Pi-hole
#
# This file is copyright under the latest version of the EUPL.
# Please see LICENSE file for your rights under this license.
###############################################################################
# FILE AUTOMATICALLY POPULATED BY PI-HOLE INSTALL/UPDATE PROCEDURE. #
# ANY CHANGES MADE TO THIS FILE AFTER INSTALL WILL BE LOST ON THE NEXT UPDATE #
# #
# CHANGES SHOULD BE MADE IN A SEPARATE CONFIG FILE #
# WITHIN /etc/dnsmasq.d/yourname.conf #
###############################################################################
# RFC 6761: Caching DNS servers SHOULD recognize
# test, localhost, invalid
# names as special and SHOULD NOT attempt to look up NS records for them, or
# otherwise query authoritative DNS servers in an attempt to resolve these
# names.
server=/test/
server=/localhost/
server=/invalid/
# The same RFC requests something similar for
# 10.in-addr.arpa. 21.172.in-addr.arpa. 27.172.in-addr.arpa.
# 16.172.in-addr.arpa. 22.172.in-addr.arpa. 28.172.in-addr.arpa.
# 17.172.in-addr.arpa. 23.172.in-addr.arpa. 29.172.in-addr.arpa.
# 18.172.in-addr.arpa. 24.172.in-addr.arpa. 30.172.in-addr.arpa.
# 19.172.in-addr.arpa. 25.172.in-addr.arpa. 31.172.in-addr.arpa.
# 20.172.in-addr.arpa. 26.172.in-addr.arpa. 168.192.in-addr.arpa.
# Pi-hole implements this via the dnsmasq option "bogus-priv" (see
# 01-pihole.conf) because this also covers IPv6.
# OpenWRT furthermore blocks bind, local, onion domains
# see https://git.openwrt.org/?p=openwrt/openwrt.git;a=blob_plain;f=package/network/services/dnsmasq/files/rfc6761.conf;hb=HEAD
# and https://www.iana.org/assignments/special-use-domain-names/special-use-domain-names.xhtml
# We do not include the ".local" rule ourselves, see https://github.com/pi-hole/pi-hole/pull/4282#discussion_r689112972
server=/bind/
server=/onion/

View File

@@ -357,7 +357,7 @@ get_sys_stats() {
ram_used="${ram_raw[1]}" ram_used="${ram_raw[1]}"
ram_total="${ram_raw[2]}" ram_total="${ram_raw[2]}"
if [[ "$(pihole status web 2> /dev/null)" -ge "1" ]]; then if [[ "$(pihole status web 2> /dev/null)" == "1" ]]; then
ph_status="${COL_LIGHT_GREEN}Active" ph_status="${COL_LIGHT_GREEN}Active"
else else
ph_status="${COL_LIGHT_RED}Offline" ph_status="${COL_LIGHT_RED}Offline"
@@ -498,6 +498,10 @@ chronoFunc() {
printFunc " RAM usage: " "$ram_perc%" "$ram_info" printFunc " RAM usage: " "$ram_perc%" "$ram_info"
printFunc " HDD usage: " "$disk_perc" "$disk_info" printFunc " HDD usage: " "$disk_perc" "$disk_info"
if [[ "$scr_lines" -gt 17 ]] && [[ "$chrono_width" != "small" ]]; then
printFunc " LAN addr: " "${IPV4_ADDRESS/\/*/}" "$lan_info"
fi
if [[ "$DHCP_ACTIVE" == "true" ]]; then if [[ "$DHCP_ACTIVE" == "true" ]]; then
printFunc "DHCP usage: " "$ph_dhcp_percent%" "$dhcp_info" printFunc "DHCP usage: " "$ph_dhcp_percent%" "$dhcp_info"
fi fi

34
advanced/Scripts/database_migration/gravity-db.sh Executable file → Normal file
View File

@@ -19,13 +19,13 @@ upgrade_gravityDB(){
auditFile="${piholeDir}/auditlog.list" auditFile="${piholeDir}/auditlog.list"
# Get database version # Get database version
version="$(pihole-FTL sqlite3 "${database}" "SELECT \"value\" FROM \"info\" WHERE \"property\" = 'version';")" version="$(sqlite3 "${database}" "SELECT \"value\" FROM \"info\" WHERE \"property\" = 'version';")"
if [[ "$version" == "1" ]]; then if [[ "$version" == "1" ]]; then
# This migration script upgrades the gravity.db file by # This migration script upgrades the gravity.db file by
# adding the domain_audit table # adding the domain_audit table
echo -e " ${INFO} Upgrading gravity database from version 1 to 2" echo -e " ${INFO} Upgrading gravity database from version 1 to 2"
pihole-FTL sqlite3 "${database}" < "${scriptPath}/1_to_2.sql" sqlite3 "${database}" < "${scriptPath}/1_to_2.sql"
version=2 version=2
# Store audit domains in database table # Store audit domains in database table
@@ -40,28 +40,28 @@ upgrade_gravityDB(){
# renaming the regex table to regex_blacklist, and # renaming the regex table to regex_blacklist, and
# creating a new regex_whitelist table + corresponding linking table and views # creating a new regex_whitelist table + corresponding linking table and views
echo -e " ${INFO} Upgrading gravity database from version 2 to 3" echo -e " ${INFO} Upgrading gravity database from version 2 to 3"
pihole-FTL sqlite3 "${database}" < "${scriptPath}/2_to_3.sql" sqlite3 "${database}" < "${scriptPath}/2_to_3.sql"
version=3 version=3
fi fi
if [[ "$version" == "3" ]]; then if [[ "$version" == "3" ]]; then
# This migration script unifies the formally separated domain # This migration script unifies the formally separated domain
# lists into a single table with a UNIQUE domain constraint # lists into a single table with a UNIQUE domain constraint
echo -e " ${INFO} Upgrading gravity database from version 3 to 4" echo -e " ${INFO} Upgrading gravity database from version 3 to 4"
pihole-FTL sqlite3 "${database}" < "${scriptPath}/3_to_4.sql" sqlite3 "${database}" < "${scriptPath}/3_to_4.sql"
version=4 version=4
fi fi
if [[ "$version" == "4" ]]; then if [[ "$version" == "4" ]]; then
# This migration script upgrades the gravity and list views # This migration script upgrades the gravity and list views
# implementing necessary changes for per-client blocking # implementing necessary changes for per-client blocking
echo -e " ${INFO} Upgrading gravity database from version 4 to 5" echo -e " ${INFO} Upgrading gravity database from version 4 to 5"
pihole-FTL sqlite3 "${database}" < "${scriptPath}/4_to_5.sql" sqlite3 "${database}" < "${scriptPath}/4_to_5.sql"
version=5 version=5
fi fi
if [[ "$version" == "5" ]]; then if [[ "$version" == "5" ]]; then
# This migration script upgrades the adlist view # This migration script upgrades the adlist view
# to return an ID used in gravity.sh # to return an ID used in gravity.sh
echo -e " ${INFO} Upgrading gravity database from version 5 to 6" echo -e " ${INFO} Upgrading gravity database from version 5 to 6"
pihole-FTL sqlite3 "${database}" < "${scriptPath}/5_to_6.sql" sqlite3 "${database}" < "${scriptPath}/5_to_6.sql"
version=6 version=6
fi fi
if [[ "$version" == "6" ]]; then if [[ "$version" == "6" ]]; then
@@ -69,7 +69,7 @@ upgrade_gravityDB(){
# which is automatically associated to all clients not # which is automatically associated to all clients not
# having their own group assignments # having their own group assignments
echo -e " ${INFO} Upgrading gravity database from version 6 to 7" echo -e " ${INFO} Upgrading gravity database from version 6 to 7"
pihole-FTL sqlite3 "${database}" < "${scriptPath}/6_to_7.sql" sqlite3 "${database}" < "${scriptPath}/6_to_7.sql"
version=7 version=7
fi fi
if [[ "$version" == "7" ]]; then if [[ "$version" == "7" ]]; then
@@ -77,21 +77,21 @@ upgrade_gravityDB(){
# to ensure uniqueness on the group name # to ensure uniqueness on the group name
# We also add date_added and date_modified columns # We also add date_added and date_modified columns
echo -e " ${INFO} Upgrading gravity database from version 7 to 8" echo -e " ${INFO} Upgrading gravity database from version 7 to 8"
pihole-FTL sqlite3 "${database}" < "${scriptPath}/7_to_8.sql" sqlite3 "${database}" < "${scriptPath}/7_to_8.sql"
version=8 version=8
fi fi
if [[ "$version" == "8" ]]; then if [[ "$version" == "8" ]]; then
# This migration fixes some issues that were introduced # This migration fixes some issues that were introduced
# in the previous migration script. # in the previous migration script.
echo -e " ${INFO} Upgrading gravity database from version 8 to 9" echo -e " ${INFO} Upgrading gravity database from version 8 to 9"
pihole-FTL sqlite3 "${database}" < "${scriptPath}/8_to_9.sql" sqlite3 "${database}" < "${scriptPath}/8_to_9.sql"
version=9 version=9
fi fi
if [[ "$version" == "9" ]]; then if [[ "$version" == "9" ]]; then
# This migration drops unused tables and creates triggers to remove # This migration drops unused tables and creates triggers to remove
# obsolete groups assignments when the linked items are deleted # obsolete groups assignments when the linked items are deleted
echo -e " ${INFO} Upgrading gravity database from version 9 to 10" echo -e " ${INFO} Upgrading gravity database from version 9 to 10"
pihole-FTL sqlite3 "${database}" < "${scriptPath}/9_to_10.sql" sqlite3 "${database}" < "${scriptPath}/9_to_10.sql"
version=10 version=10
fi fi
if [[ "$version" == "10" ]]; then if [[ "$version" == "10" ]]; then
@@ -101,31 +101,25 @@ upgrade_gravityDB(){
# to keep the copying process generic (needs the same columns in both the # to keep the copying process generic (needs the same columns in both the
# source and the destination databases). # source and the destination databases).
echo -e " ${INFO} Upgrading gravity database from version 10 to 11" echo -e " ${INFO} Upgrading gravity database from version 10 to 11"
pihole-FTL sqlite3 "${database}" < "${scriptPath}/10_to_11.sql" sqlite3 "${database}" < "${scriptPath}/10_to_11.sql"
version=11 version=11
fi fi
if [[ "$version" == "11" ]]; then if [[ "$version" == "11" ]]; then
# Rename group 0 from "Unassociated" to "Default" # Rename group 0 from "Unassociated" to "Default"
echo -e " ${INFO} Upgrading gravity database from version 11 to 12" echo -e " ${INFO} Upgrading gravity database from version 11 to 12"
pihole-FTL sqlite3 "${database}" < "${scriptPath}/11_to_12.sql" sqlite3 "${database}" < "${scriptPath}/11_to_12.sql"
version=12 version=12
fi fi
if [[ "$version" == "12" ]]; then if [[ "$version" == "12" ]]; then
# Add column date_updated to adlist table # Add column date_updated to adlist table
echo -e " ${INFO} Upgrading gravity database from version 12 to 13" echo -e " ${INFO} Upgrading gravity database from version 12 to 13"
pihole-FTL sqlite3 "${database}" < "${scriptPath}/12_to_13.sql" sqlite3 "${database}" < "${scriptPath}/12_to_13.sql"
version=13 version=13
fi fi
if [[ "$version" == "13" ]]; then if [[ "$version" == "13" ]]; then
# Add columns number and status to adlist table # Add columns number and status to adlist table
echo -e " ${INFO} Upgrading gravity database from version 13 to 14" echo -e " ${INFO} Upgrading gravity database from version 13 to 14"
pihole-FTL sqlite3 "${database}" < "${scriptPath}/13_to_14.sql" sqlite3 "${database}" < "${scriptPath}/13_to_14.sql"
version=14 version=14
fi fi
if [[ "$version" == "14" ]]; then
# Changes the vw_adlist created in 5_to_6
echo -e " ${INFO} Upgrading gravity database from version 14 to 15"
pihole-FTL sqlite3 "${database}" < "${scriptPath}/14_to_15.sql"
version=15
fi
} }

View File

@@ -1,15 +0,0 @@
.timeout 30000
PRAGMA FOREIGN_KEYS=OFF;
BEGIN TRANSACTION;
DROP VIEW vw_adlist;
CREATE VIEW vw_adlist AS SELECT DISTINCT address, id
FROM adlist
WHERE enabled = 1
ORDER BY id;
UPDATE info SET value = 15 WHERE property = 'version';
COMMIT;

View File

@@ -23,7 +23,7 @@ fi
# have changed # have changed
gravityDBfile="${GRAVITYDB}" gravityDBfile="${GRAVITYDB}"
noReloadRequested=false reload=false
addmode=true addmode=true
verbose=true verbose=true
wildcard=false wildcard=false
@@ -35,7 +35,6 @@ typeId=""
comment="" comment=""
declare -i domaincount declare -i domaincount
domaincount=0 domaincount=0
reload=false
colfile="/opt/pihole/COL_TABLE" colfile="/opt/pihole/COL_TABLE"
source ${colfile} source ${colfile}
@@ -91,8 +90,7 @@ Options:
-q, --quiet Make output less verbose -q, --quiet Make output less verbose
-h, --help Show this help dialog -h, --help Show this help dialog
-l, --list Display all your ${listname}listed domains -l, --list Display all your ${listname}listed domains
--nuke Removes all entries in a list --nuke Removes all entries in a list"
--comment \"text\" Add a comment to the domain. If adding multiple domains the same comment will be used for all"
exit 0 exit 0
} }
@@ -142,18 +140,18 @@ AddDomain() {
domain="$1" domain="$1"
# Is the domain in the list we want to add it to? # Is the domain in the list we want to add it to?
num="$(pihole-FTL sqlite3 "${gravityDBfile}" "SELECT COUNT(*) FROM domainlist WHERE domain = '${domain}';")" num="$(sqlite3 "${gravityDBfile}" "SELECT COUNT(*) FROM domainlist WHERE domain = '${domain}';")"
requestedListname="$(GetListnameFromTypeId "${typeId}")" requestedListname="$(GetListnameFromTypeId "${typeId}")"
if [[ "${num}" -ne 0 ]]; then if [[ "${num}" -ne 0 ]]; then
existingTypeId="$(pihole-FTL sqlite3 "${gravityDBfile}" "SELECT type FROM domainlist WHERE domain = '${domain}';")" existingTypeId="$(sqlite3 "${gravityDBfile}" "SELECT type FROM domainlist WHERE domain = '${domain}';")"
if [[ "${existingTypeId}" == "${typeId}" ]]; then if [[ "${existingTypeId}" == "${typeId}" ]]; then
if [[ "${verbose}" == true ]]; then if [[ "${verbose}" == true ]]; then
echo -e " ${INFO} ${1} already exists in ${requestedListname}, no need to add!" echo -e " ${INFO} ${1} already exists in ${requestedListname}, no need to add!"
fi fi
else else
existingListname="$(GetListnameFromTypeId "${existingTypeId}")" existingListname="$(GetListnameFromTypeId "${existingTypeId}")"
pihole-FTL sqlite3 "${gravityDBfile}" "UPDATE domainlist SET type = ${typeId} WHERE domain='${domain}';" sqlite3 "${gravityDBfile}" "UPDATE domainlist SET type = ${typeId} WHERE domain='${domain}';"
if [[ "${verbose}" == true ]]; then if [[ "${verbose}" == true ]]; then
echo -e " ${INFO} ${1} already exists in ${existingListname}, it has been moved to ${requestedListname}!" echo -e " ${INFO} ${1} already exists in ${existingListname}, it has been moved to ${requestedListname}!"
fi fi
@@ -169,10 +167,10 @@ AddDomain() {
# Insert only the domain here. The enabled and date_added fields will be filled # Insert only the domain here. The enabled and date_added fields will be filled
# with their default values (enabled = true, date_added = current timestamp) # with their default values (enabled = true, date_added = current timestamp)
if [[ -z "${comment}" ]]; then if [[ -z "${comment}" ]]; then
pihole-FTL sqlite3 "${gravityDBfile}" "INSERT INTO domainlist (domain,type) VALUES ('${domain}',${typeId});" sqlite3 "${gravityDBfile}" "INSERT INTO domainlist (domain,type) VALUES ('${domain}',${typeId});"
else else
# also add comment when variable has been set through the "--comment" option # also add comment when variable has been set through the "--comment" option
pihole-FTL sqlite3 "${gravityDBfile}" "INSERT INTO domainlist (domain,type,comment) VALUES ('${domain}',${typeId},'${comment}');" sqlite3 "${gravityDBfile}" "INSERT INTO domainlist (domain,type,comment) VALUES ('${domain}',${typeId},'${comment}');"
fi fi
} }
@@ -181,7 +179,7 @@ RemoveDomain() {
domain="$1" domain="$1"
# Is the domain in the list we want to remove it from? # Is the domain in the list we want to remove it from?
num="$(pihole-FTL sqlite3 "${gravityDBfile}" "SELECT COUNT(*) FROM domainlist WHERE domain = '${domain}' AND type = ${typeId};")" num="$(sqlite3 "${gravityDBfile}" "SELECT COUNT(*) FROM domainlist WHERE domain = '${domain}' AND type = ${typeId};")"
requestedListname="$(GetListnameFromTypeId "${typeId}")" requestedListname="$(GetListnameFromTypeId "${typeId}")"
@@ -198,14 +196,14 @@ RemoveDomain() {
fi fi
reload=true reload=true
# Remove it from the current list # Remove it from the current list
pihole-FTL sqlite3 "${gravityDBfile}" "DELETE FROM domainlist WHERE domain = '${domain}' AND type = ${typeId};" sqlite3 "${gravityDBfile}" "DELETE FROM domainlist WHERE domain = '${domain}' AND type = ${typeId};"
} }
Displaylist() { Displaylist() {
local count num_pipes domain enabled status nicedate requestedListname local count num_pipes domain enabled status nicedate requestedListname
requestedListname="$(GetListnameFromTypeId "${typeId}")" requestedListname="$(GetListnameFromTypeId "${typeId}")"
data="$(pihole-FTL sqlite3 "${gravityDBfile}" "SELECT domain,enabled,date_modified FROM domainlist WHERE type = ${typeId};" 2> /dev/null)" data="$(sqlite3 "${gravityDBfile}" "SELECT domain,enabled,date_modified FROM domainlist WHERE type = ${typeId};" 2> /dev/null)"
if [[ -z $data ]]; then if [[ -z $data ]]; then
echo -e "Not showing empty list" echo -e "Not showing empty list"
@@ -243,10 +241,10 @@ Displaylist() {
} }
NukeList() { NukeList() {
count=$(pihole-FTL sqlite3 "${gravityDBfile}" "SELECT COUNT(1) FROM domainlist WHERE type = ${typeId};") count=$(sqlite3 "${gravityDBfile}" "SELECT COUNT(1) FROM domainlist WHERE type = ${typeId};")
listname="$(GetListnameFromTypeId "${typeId}")" listname="$(GetListnameFromTypeId "${typeId}")"
if [ "$count" -gt 0 ];then if [ "$count" -gt 0 ];then
pihole-FTL sqlite3 "${gravityDBfile}" "DELETE FROM domainlist WHERE type = ${typeId};" sqlite3 "${gravityDBfile}" "DELETE FROM domainlist WHERE type = ${typeId};"
echo " ${TICK} Removed ${count} domain(s) from the ${listname}" echo " ${TICK} Removed ${count} domain(s) from the ${listname}"
else else
echo " ${INFO} ${listname} already empty. Nothing to do!" echo " ${INFO} ${listname} already empty. Nothing to do!"
@@ -270,7 +268,7 @@ while (( "$#" )); do
"--white-wild" | "white-wild" ) typeId=2; wildcard=true;; "--white-wild" | "white-wild" ) typeId=2; wildcard=true;;
"--wild" | "wildcard" ) typeId=3; wildcard=true;; "--wild" | "wildcard" ) typeId=3; wildcard=true;;
"--regex" | "regex" ) typeId=3;; "--regex" | "regex" ) typeId=3;;
"-nr"| "--noreload" ) noReloadRequested=true;; "-nr"| "--noreload" ) reload=false;;
"-d" | "--delmode" ) addmode=false;; "-d" | "--delmode" ) addmode=false;;
"-q" | "--quiet" ) verbose=false;; "-q" | "--quiet" ) verbose=false;;
"-h" | "--help" ) helpFunc;; "-h" | "--help" ) helpFunc;;
@@ -296,6 +294,6 @@ if $web; then
echo "DONE" echo "DONE"
fi fi
if [[ ${reload} == true && ${noReloadRequested} == false ]]; then if [[ "${reload}" != false ]]; then
pihole restartdns reload-lists pihole restartdns reload-lists
fi fi

View File

@@ -39,7 +39,7 @@ flushARP(){
# Truncate network_addresses table in pihole-FTL.db # Truncate network_addresses table in pihole-FTL.db
# This needs to be done before we can truncate the network table due to # This needs to be done before we can truncate the network table due to
# foreign key constraints # foreign key constraints
if ! output=$(pihole-FTL sqlite3 "${DBFILE}" "DELETE FROM network_addresses" 2>&1); then if ! output=$(sqlite3 "${DBFILE}" "DELETE FROM network_addresses" 2>&1); then
echo -e "${OVER} ${CROSS} Failed to truncate network_addresses table" echo -e "${OVER} ${CROSS} Failed to truncate network_addresses table"
echo " Database location: ${DBFILE}" echo " Database location: ${DBFILE}"
echo " Output: ${output}" echo " Output: ${output}"
@@ -47,7 +47,7 @@ flushARP(){
fi fi
# Truncate network table in pihole-FTL.db # Truncate network table in pihole-FTL.db
if ! output=$(pihole-FTL sqlite3 "${DBFILE}" "DELETE FROM network" 2>&1); then if ! output=$(sqlite3 "${DBFILE}" "DELETE FROM network" 2>&1); then
echo -e "${OVER} ${CROSS} Failed to truncate network table" echo -e "${OVER} ${CROSS} Failed to truncate network table"
echo " Database location: ${DBFILE}" echo " Database location: ${DBFILE}"
echo " Output: ${output}" echo " Output: ${output}"

3
advanced/Scripts/piholeCheckout.sh Executable file → Normal file
View File

@@ -166,15 +166,12 @@ checkout() {
checkout_pull_branch "${webInterfaceDir}" "${2}" checkout_pull_branch "${webInterfaceDir}" "${2}"
elif [[ "${1}" == "ftl" ]] ; then elif [[ "${1}" == "ftl" ]] ; then
local path local path
local oldbranch
path="${2}/${binary}" path="${2}/${binary}"
oldbranch="$(pihole-FTL -b)"
if check_download_exists "$path"; then if check_download_exists "$path"; then
echo " ${TICK} Branch ${2} exists" echo " ${TICK} Branch ${2} exists"
echo "${2}" > /etc/pihole/ftlbranch echo "${2}" > /etc/pihole/ftlbranch
chmod 644 /etc/pihole/ftlbranch chmod 644 /etc/pihole/ftlbranch
echo -e " ${INFO} Switching to branch: \"${2}\" from \"${oldbranch}\""
FTLinstall "${binary}" FTLinstall "${binary}"
restart_service pihole-FTL restart_service pihole-FTL
enable_service pihole-FTL enable_service pihole-FTL

View File

@@ -1,6 +1,6 @@
#!/usr/bin/env bash #!/usr/bin/env bash
# Pi-hole: A black hole for Internet advertisements # Pi-hole: A black hole for Internet advertisements
# (c) 2017 Pi-hole, LLC (https://pi-hole.net) # (c) 2021 Pi-hole (https://pi-hole.net)
# Network-wide ad blocking via your own hardware. # Network-wide ad blocking via your own hardware.
# #
# Generates pihole_debug.log to be used for troubleshooting. # Generates pihole_debug.log to be used for troubleshooting.
@@ -56,6 +56,11 @@ FAQ_BAD_ADDRESS="${COL_CYAN}https://discourse.pi-hole.net/t/why-do-i-see-bad-add
# Other URLs we may use # Other URLs we may use
FORUMS_URL="${COL_CYAN}https://discourse.pi-hole.net${COL_NC}" FORUMS_URL="${COL_CYAN}https://discourse.pi-hole.net${COL_NC}"
TRICORDER_CONTEST="${COL_CYAN}https://pi-hole.net/2016/11/07/crack-our-medical-tricorder-win-a-raspberry-pi-3/${COL_NC}"
# Port numbers used for uploading the debug log
TRICORDER_NC_PORT_NUMBER=9999
TRICORDER_SSL_PORT_NUMBER=9998
# Directories required by Pi-hole # Directories required by Pi-hole
# https://discourse.pi-hole.net/t/what-files-does-pi-hole-use/1684 # https://discourse.pi-hole.net/t/what-files-does-pi-hole-use/1684
@@ -73,12 +78,15 @@ HTML_DIRECTORY="/var/www/html"
WEB_GIT_DIRECTORY="${HTML_DIRECTORY}/admin" WEB_GIT_DIRECTORY="${HTML_DIRECTORY}/admin"
#BLOCK_PAGE_DIRECTORY="${HTML_DIRECTORY}/pihole" #BLOCK_PAGE_DIRECTORY="${HTML_DIRECTORY}/pihole"
SHM_DIRECTORY="/dev/shm" SHM_DIRECTORY="/dev/shm"
ETC="/etc"
# Files required by Pi-hole # Files required by Pi-hole
# https://discourse.pi-hole.net/t/what-files-does-pi-hole-use/1684 # https://discourse.pi-hole.net/t/what-files-does-pi-hole-use/1684
PIHOLE_CRON_FILE="${CRON_D_DIRECTORY}/pihole" PIHOLE_CRON_FILE="${CRON_D_DIRECTORY}/pihole"
PIHOLE_DNS_CONFIG_FILE="${DNSMASQ_D_DIRECTORY}/01-pihole.conf"
PIHOLE_DHCP_CONFIG_FILE="${DNSMASQ_D_DIRECTORY}/02-pihole-dhcp.conf"
PIHOLE_WILDCARD_CONFIG_FILE="${DNSMASQ_D_DIRECTORY}/03-wildcard.conf"
WEB_SERVER_CONFIG_FILE="${WEB_SERVER_CONFIG_DIRECTORY}/lighttpd.conf" WEB_SERVER_CONFIG_FILE="${WEB_SERVER_CONFIG_DIRECTORY}/lighttpd.conf"
WEB_SERVER_CUSTOM_CONFIG_FILE="${WEB_SERVER_CONFIG_DIRECTORY}/external.conf" WEB_SERVER_CUSTOM_CONFIG_FILE="${WEB_SERVER_CONFIG_DIRECTORY}/external.conf"
@@ -88,7 +96,6 @@ PIHOLE_LOCAL_HOSTS_FILE="${PIHOLE_DIRECTORY}/local.list"
PIHOLE_LOGROTATE_FILE="${PIHOLE_DIRECTORY}/logrotate" PIHOLE_LOGROTATE_FILE="${PIHOLE_DIRECTORY}/logrotate"
PIHOLE_SETUP_VARS_FILE="${PIHOLE_DIRECTORY}/setupVars.conf" PIHOLE_SETUP_VARS_FILE="${PIHOLE_DIRECTORY}/setupVars.conf"
PIHOLE_FTL_CONF_FILE="${PIHOLE_DIRECTORY}/pihole-FTL.conf" PIHOLE_FTL_CONF_FILE="${PIHOLE_DIRECTORY}/pihole-FTL.conf"
PIHOLE_CUSTOM_HOSTS_FILE="${PIHOLE_DIRECTORY}/custom.list"
# Read the value of an FTL config key. The value is printed to stdout. # Read the value of an FTL config key. The value is printed to stdout.
# #
@@ -134,9 +141,6 @@ PIHOLE_FTL_LOG="$(get_ftl_conf_value "LOGFILE" "${LOG_DIRECTORY}/pihole-FTL.log"
PIHOLE_WEB_SERVER_ACCESS_LOG_FILE="${WEB_SERVER_LOG_DIRECTORY}/access.log" PIHOLE_WEB_SERVER_ACCESS_LOG_FILE="${WEB_SERVER_LOG_DIRECTORY}/access.log"
PIHOLE_WEB_SERVER_ERROR_LOG_FILE="${WEB_SERVER_LOG_DIRECTORY}/error.log" PIHOLE_WEB_SERVER_ERROR_LOG_FILE="${WEB_SERVER_LOG_DIRECTORY}/error.log"
RESOLVCONF="${ETC}/resolv.conf"
DNSMASQ_CONF="${ETC}/dnsmasq.conf"
# An array of operating system "pretty names" that we officially support # An array of operating system "pretty names" that we officially support
# We can loop through the array at any time to see if it matches a value # We can loop through the array at any time to see if it matches a value
#SUPPORTED_OS=("Raspbian" "Ubuntu" "Fedora" "Debian" "CentOS") #SUPPORTED_OS=("Raspbian" "Ubuntu" "Fedora" "Debian" "CentOS")
@@ -161,6 +165,9 @@ PIHOLE_PROCESSES=( "lighttpd" "pihole-FTL" )
# Store the required directories in an array so it can be parsed through # Store the required directories in an array so it can be parsed through
REQUIRED_FILES=("${PIHOLE_CRON_FILE}" REQUIRED_FILES=("${PIHOLE_CRON_FILE}"
"${PIHOLE_DNS_CONFIG_FILE}"
"${PIHOLE_DHCP_CONFIG_FILE}"
"${PIHOLE_WILDCARD_CONFIG_FILE}"
"${WEB_SERVER_CONFIG_FILE}" "${WEB_SERVER_CONFIG_FILE}"
"${WEB_SERVER_CUSTOM_CONFIG_FILE}" "${WEB_SERVER_CUSTOM_CONFIG_FILE}"
"${PIHOLE_INSTALL_LOG_FILE}" "${PIHOLE_INSTALL_LOG_FILE}"
@@ -178,10 +185,7 @@ REQUIRED_FILES=("${PIHOLE_CRON_FILE}"
"${PIHOLE_DEBUG_LOG}" "${PIHOLE_DEBUG_LOG}"
"${PIHOLE_FTL_LOG}" "${PIHOLE_FTL_LOG}"
"${PIHOLE_WEB_SERVER_ACCESS_LOG_FILE}" "${PIHOLE_WEB_SERVER_ACCESS_LOG_FILE}"
"${PIHOLE_WEB_SERVER_ERROR_LOG_FILE}" "${PIHOLE_WEB_SERVER_ERROR_LOG_FILE}")
"${RESOLVCONF}"
"${DNSMASQ_CONF}"
"${PIHOLE_CUSTOM_HOSTS_FILE}")
DISCLAIMER="This process collects information from your Pi-hole, and optionally uploads it to a unique and random directory on tricorder.pi-hole.net. DISCLAIMER="This process collects information from your Pi-hole, and optionally uploads it to a unique and random directory on tricorder.pi-hole.net.
@@ -194,6 +198,33 @@ show_disclaimer(){
log_write "${DISCLAIMER}" log_write "${DISCLAIMER}"
} }
check_for_ftl(){
echo_current_diagnostic "Checking for pihole-FTL binary"
declare -g FTL_PATH
read -r FTL_PATH < <(which pihole-FTL)
if [ -z "${FTL_PATH}" ]; then
log_write "${CROSS} ${COL_RED} Unable to find pihole-FTL binary.${COL_NC}"
# Non-zero return value
return 2
else
log_write "${TICK} pihole-FTL: ${COL_GREEN}${FTL_PATH}${COL_NC}"
fi
}
check_for_lua(){
echo_current_diagnostic "Checking for lua capabilities"
if ! (${FTL_PATH} lua -v &>/dev/null); then
log_write "${CROSS} ${COL_RED} pihole-FTL binary does not have lua capabilites.${COL_NC}"
# Non-zero return value
return
else
log_write "${TICK} pihole-FTL: ${COL_GREEN}lua found!${COL_NC}"
fi
}
source_setup_variables() { source_setup_variables() {
# Display the current test that is running # Display the current test that is running
log_write "\\n${COL_PURPLE}*** [ INITIALIZING ]${COL_NC} Sourcing setup variables" log_write "\\n${COL_PURPLE}*** [ INITIALIZING ]${COL_NC} Sourcing setup variables"
@@ -231,7 +262,6 @@ copy_to_debug_log() {
} }
initialize_debug() { initialize_debug() {
local system_uptime
# Clear the screen so the debug log is readable # Clear the screen so the debug log is readable
clear clear
show_disclaimer show_disclaimer
@@ -239,10 +269,6 @@ initialize_debug() {
log_write "${COL_PURPLE}*** [ INITIALIZING ]${COL_NC}" log_write "${COL_PURPLE}*** [ INITIALIZING ]${COL_NC}"
# Timestamp the start of the log # Timestamp the start of the log
log_write "${INFO} $(date "+%Y-%m-%d:%H:%M:%S") debug log has been initialized." log_write "${INFO} $(date "+%Y-%m-%d:%H:%M:%S") debug log has been initialized."
# Uptime of the system
# credits to https://stackoverflow.com/questions/28353409/bash-format-uptime-to-show-days-hours-minutes
system_uptime=$(uptime | awk -F'( |,|:)+' '{if ($7=="min") m=$6; else {if ($7~/^day/){if ($9=="min") {d=$6;m=$8} else {d=$6;h=$8;m=$9}} else {h=$6;m=$7}}} {print d+0,"days,",h+0,"hours,",m+0,"minutes"}')
log_write "${INFO} System has been running for ${system_uptime}"
} }
# This is a function for visually displaying the current test that is being run. # This is a function for visually displaying the current test that is being run.
@@ -411,12 +437,12 @@ os_check() {
# This function gets a list of supported OS versions from a TXT record at versions.pi-hole.net # This function gets a list of supported OS versions from a TXT record at versions.pi-hole.net
# and determines whether or not the script is running on one of those systems # and determines whether or not the script is running on one of those systems
local remote_os_domain valid_os valid_version detected_os detected_version cmdResult digReturnCode response local remote_os_domain valid_os valid_version detected_os detected_version cmdResult digReturnCode response
remote_os_domain=${OS_CHECK_DOMAIN_NAME:-"versions.pi-hole.net"} remote_os_domain="versions.pi-hole.net"
detected_os=$(grep "\bID\b" /etc/os-release | cut -d '=' -f2 | tr -d '"') detected_os=$(grep "\bID\b" /etc/os-release | cut -d '=' -f2 | tr -d '"')
detected_version=$(grep VERSION_ID /etc/os-release | cut -d '=' -f2 | tr -d '"') detected_version=$(grep VERSION_ID /etc/os-release | cut -d '=' -f2 | tr -d '"')
cmdResult="$(dig +short -t txt "${remote_os_domain}" @ns1.pi-hole.net 2>&1; echo $?)" cmdResult="$(dig +short -t txt ${remote_os_domain} @ns1.pi-hole.net 2>&1; echo $?)"
#Get the return code of the previous command (last line) #Get the return code of the previous command (last line)
digReturnCode="${cmdResult##*$'\n'}" digReturnCode="${cmdResult##*$'\n'}"
@@ -467,9 +493,6 @@ diagnose_operating_system() {
# Display the current test that is running # Display the current test that is running
echo_current_diagnostic "Operating system" echo_current_diagnostic "Operating system"
# If the PIHOLE_DOCKER_TAG variable is set, include this information in the debug output
[ -n "${PIHOLE_DOCKER_TAG}" ] && log_write "${INFO} Pi-hole Docker Container: ${PIHOLE_DOCKER_TAG}"
# If there is a /etc/*release file, it's probably a supported operating system, so we can # If there is a /etc/*release file, it's probably a supported operating system, so we can
if ls /etc/*release 1> /dev/null 2>&1; then if ls /etc/*release 1> /dev/null 2>&1; then
# display the attributes to the user from the function made earlier # display the attributes to the user from the function made earlier
@@ -590,27 +613,6 @@ processor_check() {
fi fi
} }
disk_usage() {
local file_system
local hide
echo_current_diagnostic "Disk usage"
mapfile -t file_system < <(df -h)
# Some lines of df might contain sensitive information like usernames and passwords.
# E.g. curlftpfs filesystems (https://www.looklinux.com/mount-ftp-share-on-linux-using-curlftps/)
# We are not interested in those lines so we collect keyword, to remove them from the output
# Additinal keywords can be added, separated by "|"
hide="curlftpfs"
# only show those lines not containg a sensitive phrase
for line in "${file_system[@]}"; do
if [[ ! $line =~ $hide ]]; then
log_write " ${line}"
fi
done
}
parse_setup_vars() { parse_setup_vars() {
echo_current_diagnostic "Setup variables" echo_current_diagnostic "Setup variables"
# If the file exists, # If the file exists,
@@ -630,6 +632,38 @@ parse_locale() {
parse_file "${pihole_locale}" parse_file "${pihole_locale}"
} }
does_ip_match_setup_vars() {
# Check for IPv4 or 6
local protocol="${1}"
# IP address to check for
local ip_address="${2}"
# See what IP is in the setupVars.conf file
local setup_vars_ip
setup_vars_ip=$(< ${PIHOLE_SETUP_VARS_FILE} grep IPV"${protocol}"_ADDRESS | cut -d '=' -f2)
# If it's an IPv6 address
if [[ "${protocol}" == "6" ]]; then
# Strip off the / (CIDR notation)
if [[ "${ip_address%/*}" == "${setup_vars_ip%/*}" ]]; then
# if it matches, show it in green
log_write " ${COL_GREEN}${ip_address%/*}${COL_NC} matches the IP found in ${PIHOLE_SETUP_VARS_FILE}"
else
# otherwise show it in red with an FAQ URL
log_write " ${COL_RED}${ip_address%/*}${COL_NC} does not match the IP found in ${PIHOLE_SETUP_VARS_FILE} (${FAQ_ULA})"
fi
else
# if the protocol isn't 6, it's 4 so no need to strip the CIDR notation
# since it exists in the setupVars.conf that way
if [[ "${ip_address}" == "${setup_vars_ip}" ]]; then
# show in green if it matches
log_write " ${COL_GREEN}${ip_address}${COL_NC} matches the IP found in ${PIHOLE_SETUP_VARS_FILE}"
else
# otherwise show it in red
log_write " ${COL_RED}${ip_address}${COL_NC} does not match the IP found in ${PIHOLE_SETUP_VARS_FILE} (${FAQ_ULA})"
fi
fi
}
detect_ip_addresses() { detect_ip_addresses() {
# First argument should be a 4 or a 6 # First argument should be a 4 or a 6
local protocol=${1} local protocol=${1}
@@ -646,7 +680,8 @@ detect_ip_addresses() {
log_write "${TICK} IPv${protocol} address(es) bound to the ${PIHOLE_INTERFACE} interface:" log_write "${TICK} IPv${protocol} address(es) bound to the ${PIHOLE_INTERFACE} interface:"
# Since there may be more than one IP address, store them in an array # Since there may be more than one IP address, store them in an array
for i in "${!ip_addr_list[@]}"; do for i in "${!ip_addr_list[@]}"; do
log_write " ${ip_addr_list[$i]}" # For each one in the list, print it out
does_ip_match_setup_vars "${protocol}" "${ip_addr_list[$i]}"
done done
# Print a blank line just for formatting # Print a blank line just for formatting
log_write "" log_write ""
@@ -655,6 +690,13 @@ detect_ip_addresses() {
log_write "${CROSS} ${COL_RED}No IPv${protocol} address(es) found on the ${PIHOLE_INTERFACE}${COL_NC} interface.\\n" log_write "${CROSS} ${COL_RED}No IPv${protocol} address(es) found on the ${PIHOLE_INTERFACE}${COL_NC} interface.\\n"
return 1 return 1
fi fi
# If the protocol is v6
if [[ "${protocol}" == "6" ]]; then
# let the user know that as long as there is one green address, things should be ok
log_write " ^ Please note that you may have more than one IP address listed."
log_write " As long as one of them is green, and it matches what is in ${PIHOLE_SETUP_VARS_FILE}, there is no need for concern.\\n"
log_write " The link to the FAQ is for an issue that sometimes occurs when the IPv6 address changes, which is why we check for it.\\n"
fi
} }
ping_ipv4_or_ipv6() { ping_ipv4_or_ipv6() {
@@ -733,11 +775,11 @@ compare_port_to_service_assigned() {
# If the service is a Pi-hole service, highlight it in green # If the service is a Pi-hole service, highlight it in green
if [[ "${service_name}" == "${expected_service}" ]]; then if [[ "${service_name}" == "${expected_service}" ]]; then
log_write "${TICK} ${COL_GREEN}${port}${COL_NC} is in use by ${COL_GREEN}${service_name}${COL_NC}" log_write "[${COL_GREEN}${port}${COL_NC}] is in use by ${COL_GREEN}${service_name}${COL_NC}"
# Otherwise, # Otherwise,
else else
# Show the service name in red since it's non-standard # Show the service name in red since it's non-standard
log_write "${CROSS} ${COL_RED}${port}${COL_NC} is in use by ${COL_RED}${service_name}${COL_NC} (${FAQ_HARDWARE_REQUIREMENTS_PORTS})" log_write "[${COL_RED}${port}${COL_NC}] is in use by ${COL_RED}${service_name}${COL_NC} (${FAQ_HARDWARE_REQUIREMENTS_PORTS})"
fi fi
} }
@@ -753,47 +795,36 @@ check_required_ports() {
# Sort the addresses and remove duplicates # Sort the addresses and remove duplicates
while IFS= read -r line; do while IFS= read -r line; do
ports_in_use+=( "$line" ) ports_in_use+=( "$line" )
done < <( ss --listening --numeric --tcp --udp --processes --no-header ) done < <( lsof -iTCP -sTCP:LISTEN -P -n +c 10 )
# Now that we have the values stored, # Now that we have the values stored,
for i in "${!ports_in_use[@]}"; do for i in "${!ports_in_use[@]}"; do
# loop through them and assign some local variables # loop through them and assign some local variables
local service_name local service_name
service_name=$(echo "${ports_in_use[$i]}" | awk '{gsub(/users:\(\("/,"",$7);gsub(/".*/,"",$7);print $7}') service_name=$(echo "${ports_in_use[$i]}" | awk '{print $1}')
local protocol_type local protocol_type
protocol_type=$(echo "${ports_in_use[$i]}" | awk '{print $1}') protocol_type=$(echo "${ports_in_use[$i]}" | awk '{print $5}')
local port_number local port_number
port_number="$(echo "${ports_in_use[$i]}" | awk '{print $5}')" # | awk '{gsub(/^.*:/,"",$5);print $5}') port_number="$(echo "${ports_in_use[$i]}" | awk '{print $9}')"
# Skip the line if it's the titles of the columns the lsof command produces
if [[ "${service_name}" == COMMAND ]]; then
continue
fi
# Use a case statement to determine if the right services are using the right ports # Use a case statement to determine if the right services are using the right ports
case "$(echo "${port_number}" | rev | cut -d: -f1 | rev)" in case "$(echo "$port_number" | rev | cut -d: -f1 | rev)" in
53) compare_port_to_service_assigned "${resolver}" "${service_name}" "${protocol_type}:${port_number}" 53) compare_port_to_service_assigned "${resolver}" "${service_name}" 53
;; ;;
80) compare_port_to_service_assigned "${web_server}" "${service_name}" "${protocol_type}:${port_number}" 80) compare_port_to_service_assigned "${web_server}" "${service_name}" 80
;; ;;
4711) compare_port_to_service_assigned "${ftl}" "${service_name}" "${protocol_type}:${port_number}" 4711) compare_port_to_service_assigned "${ftl}" "${service_name}" 4711
;; ;;
# If it's not a default port that Pi-hole needs, just print it out for the user to see # If it's not a default port that Pi-hole needs, just print it out for the user to see
*) log_write " ${protocol_type}:${port_number} is in use by ${service_name:=<unknown>}"; *) log_write "${port_number} ${service_name} (${protocol_type})";
esac esac
done done
} }
ip_command() {
# Obtain and log information from "ip XYZ show" commands
echo_current_diagnostic "${2}"
local entries=()
mapfile -t entries < <(ip "${1}" show)
for line in "${entries[@]}"; do
log_write " ${line}"
done
}
check_ip_command() {
ip_command "addr" "Network interfaces and addresses"
ip_command "route" "Network routing table"
}
check_networking() { check_networking() {
# Runs through several of the functions made earlier; we just clump them # Runs through several of the functions made earlier; we just clump them
# together since they are all related to the networking aspect of things # together since they are all related to the networking aspect of things
@@ -802,9 +833,7 @@ check_networking() {
detect_ip_addresses "6" detect_ip_addresses "6"
ping_gateway "4" ping_gateway "4"
ping_gateway "6" ping_gateway "6"
# Skip the following check if installed in docker container. Unpriv'ed containers do not have access to the information required check_required_ports
# to resolve the service name listening - and the container should not start if there was a port conflict anyway
[ -z "${PIHOLE_DOCKER_TAG}" ] && check_required_ports
} }
check_x_headers() { check_x_headers() {
@@ -857,13 +886,13 @@ dig_at() {
# Store the arguments as variables with names # Store the arguments as variables with names
local protocol="${1}" local protocol="${1}"
local IP="${2}"
echo_current_diagnostic "Name resolution (IPv${protocol}) using a random blocked domain and a known ad-serving domain" echo_current_diagnostic "Name resolution (IPv${protocol}) using a random blocked domain and a known ad-serving domain"
# Set more local variables # Set more local variables
# We need to test name resolution locally, via Pi-hole, and via a public resolver # We need to test name resolution locally, via Pi-hole, and via a public resolver
local local_dig local local_dig
local pihole_dig
local remote_dig local remote_dig
local interfaces
local addresses
# Use a static domain that we know has IPv4 and IPv6 to avoid false positives # Use a static domain that we know has IPv4 and IPv6 to avoid false positives
# Sometimes the randomly chosen domains don't use IPv6, or something else is wrong with them # Sometimes the randomly chosen domains don't use IPv6, or something else is wrong with them
local remote_url="doubleclick.com" local remote_url="doubleclick.com"
@@ -872,15 +901,15 @@ dig_at() {
if [[ ${protocol} == "6" ]]; then if [[ ${protocol} == "6" ]]; then
# Set the IPv6 variables and record type # Set the IPv6 variables and record type
local local_address="::1" local local_address="::1"
local pihole_address="${IP}"
local remote_address="2001:4860:4860::8888" local remote_address="2001:4860:4860::8888"
local sed_selector="inet6"
local record_type="AAAA" local record_type="AAAA"
# Otherwise, it should be 4 # Otherwise, it should be 4
else else
# so use the IPv4 values # so use the IPv4 values
local local_address="127.0.0.1" local local_address="127.0.0.1"
local pihole_address="${IP}"
local remote_address="8.8.8.8" local remote_address="8.8.8.8"
local sed_selector="inet"
local record_type="A" local record_type="A"
fi fi
@@ -888,57 +917,34 @@ dig_at() {
# This helps emulate queries to different domains that a user might query # This helps emulate queries to different domains that a user might query
# It will also give extra assurance that Pi-hole is correctly resolving and blocking domains # It will also give extra assurance that Pi-hole is correctly resolving and blocking domains
local random_url local random_url
random_url=$(pihole-FTL sqlite3 "${PIHOLE_GRAVITY_DB_FILE}" "SELECT domain FROM vw_gravity ORDER BY RANDOM() LIMIT 1") random_url=$(sqlite3 "${PIHOLE_GRAVITY_DB_FILE}" "SELECT domain FROM vw_gravity ORDER BY RANDOM() LIMIT 1")
# First, do a dig on localhost to see if Pi-hole can use itself to block a domain
if local_dig=$(dig +tries=1 +time=2 -"${protocol}" "${random_url}" @${local_address} +short "${record_type}"); then
# If it can, show success
log_write "${TICK} ${random_url} ${COL_GREEN}is ${local_dig}${COL_NC} via ${COL_CYAN}localhost$COL_NC (${local_address})"
else
# Otherwise, show a failure
log_write "${CROSS} ${COL_RED}Failed to resolve${COL_NC} ${random_url} via ${COL_RED}localhost${COL_NC} (${local_address})"
fi
# Next we need to check if Pi-hole can resolve a domain when the query is sent to it's IP address # Next we need to check if Pi-hole can resolve a domain when the query is sent to it's IP address
# This better emulates how clients will interact with Pi-hole as opposed to above where Pi-hole is # This better emulates how clients will interact with Pi-hole as opposed to above where Pi-hole is
# just asing itself locally # just asing itself locally
# The default timeouts and tries are reduced in case the DNS server isn't working, so the user isn't # The default timeouts and tries are reduced in case the DNS server isn't working, so the user isn't waiting for too long
# waiting for too long
#
# Turn off history expansion such that the "!" in the sed command cannot do silly things
set +H
# Get interfaces
# sed logic breakdown:
# / master /d;
# Removes all interfaces that are slaves of others (e.g. virtual docker interfaces)
# /UP/!d;
# Removes all interfaces which are not UP
# s/^[0-9]*: //g;
# Removes interface index
# s/: <.*//g;
# Removes everything after the interface name
interfaces="$(ip link show | sed "/ master /d;/UP/!d;s/^[0-9]*: //g;s/: <.*//g;")"
while IFS= read -r iface ; do # If Pi-hole can dig itself from it's IP (not the loopback address)
# Get addresses of current interface if pihole_dig=$(dig +tries=1 +time=2 -"${protocol}" "${random_url}" @"${pihole_address}" +short "${record_type}"); then
# sed logic breakdown: # show a success
# /inet(|6) /!d; log_write "${TICK} ${random_url} ${COL_GREEN}is ${pihole_dig}${COL_NC} via ${COL_CYAN}Pi-hole${COL_NC} (${pihole_address})"
# Removes all lines from ip a that do not contain either "inet " or "inet6 "
# s/^.*inet(|6) //g;
# Removes all leading whitespace as well as the "inet " or "inet6 " string
# s/\/.*$//g;
# Removes CIDR and everything thereafter (e.g., scope properties)
addresses="$(ip address show dev "${iface}" | sed "/${sed_selector} /!d;s/^.*${sed_selector} //g;s/\/.*$//g;")"
if [ -n "${addresses}" ]; then
while IFS= read -r local_address ; do
# Check if Pi-hole can use itself to block a domain
if local_dig=$(dig +tries=1 +time=2 -"${protocol}" "${random_url}" @"${local_address}" +short "${record_type}"); then
# If it can, show success
log_write "${TICK} ${random_url} ${COL_GREEN}is ${local_dig}${COL_NC} on ${COL_CYAN}${iface}${COL_NC} (${COL_CYAN}${local_address}${COL_NC})"
else else
# Otherwise, show a failure # Otherwise, show a failure
log_write "${CROSS} ${COL_RED}Failed to resolve${COL_NC} ${random_url} on ${COL_RED}${iface}${COL_NC} (${COL_RED}${local_address}${COL_NC})" log_write "${CROSS} ${COL_RED}Failed to resolve${COL_NC} ${random_url} via ${COL_RED}Pi-hole${COL_NC} (${pihole_address})"
fi fi
done <<< "${addresses}"
else
log_write "${TICK} No IPv${protocol} address available on ${COL_CYAN}${iface}${COL_NC}"
fi
done <<< "${interfaces}"
# Finally, we need to make sure legitimate queries can out to the Internet using an external, public DNS server # Finally, we need to make sure legitimate queries can out to the Internet using an external, public DNS server
# We are using the static remote_url here instead of a random one because we know it works with IPv4 and IPv6 # We are using the static remote_url here instead of a random one because we know it works with IPv4 and IPv6
if remote_dig=$(dig +tries=1 +time=2 -"${protocol}" "${remote_url}" @"${remote_address}" +short "${record_type}" | head -n1); then if remote_dig=$(dig +tries=1 +time=2 -"${protocol}" "${remote_url}" @${remote_address} +short "${record_type}" | head -n1); then
# If successful, the real IP of the domain will be returned instead of Pi-hole's IP # If successful, the real IP of the domain will be returned instead of Pi-hole's IP
log_write "${TICK} ${remote_url} ${COL_GREEN}is ${remote_dig}${COL_NC} via ${COL_CYAN}a remote, public DNS server${COL_NC} (${remote_address})" log_write "${TICK} ${remote_url} ${COL_GREEN}is ${remote_dig}${COL_NC} via ${COL_CYAN}a remote, public DNS server${COL_NC} (${remote_address})"
else else
@@ -1053,7 +1059,7 @@ parse_file() {
local file_lines local file_lines
# For each line in the file, # For each line in the file,
for file_lines in "${file_info[@]}"; do for file_lines in "${file_info[@]}"; do
if [[ -n "${file_lines}" ]]; then if [[ ! -z "${file_lines}" ]]; then
# don't include the Web password hash # don't include the Web password hash
[[ "${file_lines}" =~ ^\#.*$ || ! "${file_lines}" || "${file_lines}" == "WEBPASSWORD="* ]] && continue [[ "${file_lines}" =~ ^\#.*$ || ! "${file_lines}" || "${file_lines}" == "WEBPASSWORD="* ]] && continue
# otherwise, display the lines of the file # otherwise, display the lines of the file
@@ -1067,8 +1073,12 @@ parse_file() {
check_name_resolution() { check_name_resolution() {
# Check name resolution from localhost, Pi-hole's IP, and Google's name severs # Check name resolution from localhost, Pi-hole's IP, and Google's name severs
# using the function we created earlier # using the function we created earlier
dig_at 4 dig_at 4 "${IPV4_ADDRESS%/*}"
dig_at 6 # If IPv6 enabled,
if [[ "${IPV6_ADDRESS}" ]]; then
# check resolution
dig_at 6 "${IPV6_ADDRESS%/*}"
fi
} }
# This function can check a directory exists # This function can check a directory exists
@@ -1111,17 +1121,13 @@ list_files_in_dir() {
: :
elif [[ "${dir_to_parse}" == "${SHM_DIRECTORY}" ]]; then elif [[ "${dir_to_parse}" == "${SHM_DIRECTORY}" ]]; then
# SHM file - we do not want to see the content, but we want to see the files and their sizes # SHM file - we do not want to see the content, but we want to see the files and their sizes
log_write "$(ls -lhd "${dir_to_parse}"/"${each_file}")" log_write "$(ls -ld "${dir_to_parse}"/"${each_file}")"
elif [[ "${dir_to_parse}" == "${DNSMASQ_D_DIRECTORY}" ]]; then
# in case of the dnsmasq directory inlcuede all files in the debug output
log_write "\\n${COL_GREEN}$(ls -lhd "${dir_to_parse}"/"${each_file}")${COL_NC}"
make_array_from_file "${dir_to_parse}/${each_file}"
else else
# Then, parse the file's content into an array so each line can be analyzed if need be # Then, parse the file's content into an array so each line can be analyzed if need be
for i in "${!REQUIRED_FILES[@]}"; do for i in "${!REQUIRED_FILES[@]}"; do
if [[ "${dir_to_parse}/${each_file}" == "${REQUIRED_FILES[$i]}" ]]; then if [[ "${dir_to_parse}/${each_file}" == "${REQUIRED_FILES[$i]}" ]]; then
# display the filename # display the filename
log_write "\\n${COL_GREEN}$(ls -lhd "${dir_to_parse}"/"${each_file}")${COL_NC}" log_write "\\n${COL_GREEN}$(ls -ld "${dir_to_parse}"/"${each_file}")${COL_NC}"
# Check if the file we want to view has a limit (because sometimes we just need a little bit of info from the file, not the entire thing) # Check if the file we want to view has a limit (because sometimes we just need a little bit of info from the file, not the entire thing)
case "${dir_to_parse}/${each_file}" in case "${dir_to_parse}/${each_file}" in
# If it's Web server error log, give the first and last 25 lines # If it's Web server error log, give the first and last 25 lines
@@ -1160,7 +1166,6 @@ show_content_of_pihole_files() {
show_content_of_files_in_dir "${WEB_SERVER_LOG_DIRECTORY}" show_content_of_files_in_dir "${WEB_SERVER_LOG_DIRECTORY}"
show_content_of_files_in_dir "${LOG_DIRECTORY}" show_content_of_files_in_dir "${LOG_DIRECTORY}"
show_content_of_files_in_dir "${SHM_DIRECTORY}" show_content_of_files_in_dir "${SHM_DIRECTORY}"
show_content_of_files_in_dir "${ETC}"
} }
head_tail_log() { head_tail_log() {
@@ -1202,7 +1207,7 @@ show_db_entries() {
IFS=$'\r\n' IFS=$'\r\n'
local entries=() local entries=()
mapfile -t entries < <(\ mapfile -t entries < <(\
pihole-FTL sqlite3 "${PIHOLE_GRAVITY_DB_FILE}" \ sqlite3 "${PIHOLE_GRAVITY_DB_FILE}" \
-cmd ".headers on" \ -cmd ".headers on" \
-cmd ".mode column" \ -cmd ".mode column" \
-cmd ".width ${widths}" \ -cmd ".width ${widths}" \
@@ -1227,7 +1232,7 @@ show_FTL_db_entries() {
IFS=$'\r\n' IFS=$'\r\n'
local entries=() local entries=()
mapfile -t entries < <(\ mapfile -t entries < <(\
pihole-FTL sqlite3 "${PIHOLE_FTL_DB_FILE}" \ sqlite3 "${PIHOLE_FTL_DB_FILE}" \
-cmd ".headers on" \ -cmd ".headers on" \
-cmd ".mode column" \ -cmd ".mode column" \
-cmd ".width ${widths}" \ -cmd ".width ${widths}" \
@@ -1261,11 +1266,11 @@ show_groups() {
} }
show_adlists() { show_adlists() {
show_db_entries "Adlists" "SELECT id,CASE enabled WHEN '0' THEN ' 0' WHEN '1' THEN ' 1' ELSE enabled END enabled,GROUP_CONCAT(adlist_by_group.group_id) group_ids,address,datetime(date_added,'unixepoch','localtime') date_added,datetime(date_modified,'unixepoch','localtime') date_modified,comment FROM adlist LEFT JOIN adlist_by_group ON adlist.id = adlist_by_group.adlist_id GROUP BY id;" "5 7 12 100 19 19 50" show_db_entries "Adlists" "SELECT id,CASE enabled WHEN '0' THEN ' 0' WHEN '1' THEN ' 1' ELSE enabled END enabled,GROUP_CONCAT(adlist_by_group.group_id) group_ids,address,datetime(date_added,'unixepoch','localtime') date_added,datetime(date_modified,'unixepoch','localtime') date_modified,comment FROM adlist LEFT JOIN adlist_by_group ON adlist.id = adlist_by_group.adlist_id GROUP BY id;" "4 7 12 100 19 19 50"
} }
show_domainlist() { show_domainlist() {
show_db_entries "Domainlist (0/1 = exact white-/blacklist, 2/3 = regex white-/blacklist)" "SELECT id,CASE type WHEN '0' THEN '0 ' WHEN '1' THEN ' 1 ' WHEN '2' THEN ' 2 ' WHEN '3' THEN ' 3' ELSE type END type,CASE enabled WHEN '0' THEN ' 0' WHEN '1' THEN ' 1' ELSE enabled END enabled,GROUP_CONCAT(domainlist_by_group.group_id) group_ids,domain,datetime(date_added,'unixepoch','localtime') date_added,datetime(date_modified,'unixepoch','localtime') date_modified,comment FROM domainlist LEFT JOIN domainlist_by_group ON domainlist.id = domainlist_by_group.domainlist_id GROUP BY id;" "5 4 7 12 100 19 19 50" show_db_entries "Domainlist (0/1 = exact white-/blacklist, 2/3 = regex white-/blacklist)" "SELECT id,CASE type WHEN '0' THEN '0 ' WHEN '1' THEN ' 1 ' WHEN '2' THEN ' 2 ' WHEN '3' THEN ' 3' ELSE type END type,CASE enabled WHEN '0' THEN ' 0' WHEN '1' THEN ' 1' ELSE enabled END enabled,GROUP_CONCAT(domainlist_by_group.group_id) group_ids,domain,datetime(date_added,'unixepoch','localtime') date_added,datetime(date_modified,'unixepoch','localtime') date_modified,comment FROM domainlist LEFT JOIN domainlist_by_group ON domainlist.id = domainlist_by_group.domainlist_id GROUP BY id;" "4 4 7 12 100 19 19 50"
} }
show_clients() { show_clients() {
@@ -1277,14 +1282,14 @@ show_messages() {
} }
analyze_gravity_list() { analyze_gravity_list() {
echo_current_diagnostic "Gravity Database" echo_current_diagnostic "Gravity List and Database"
local gravity_permissions local gravity_permissions
gravity_permissions=$(ls -lhd "${PIHOLE_GRAVITY_DB_FILE}") gravity_permissions=$(ls -ld "${PIHOLE_GRAVITY_DB_FILE}")
log_write "${COL_GREEN}${gravity_permissions}${COL_NC}" log_write "${COL_GREEN}${gravity_permissions}${COL_NC}"
show_db_entries "Info table" "SELECT property,value FROM info" "20 40" show_db_entries "Info table" "SELECT property,value FROM info" "20 40"
gravity_updated_raw="$(pihole-FTL sqlite3 "${PIHOLE_GRAVITY_DB_FILE}" "SELECT value FROM info where property = 'updated'")" gravity_updated_raw="$(sqlite3 "${PIHOLE_GRAVITY_DB_FILE}" "SELECT value FROM info where property = 'updated'")"
gravity_updated="$(date -d @"${gravity_updated_raw}")" gravity_updated="$(date -d @"${gravity_updated_raw}")"
log_write " Last gravity run finished at: ${COL_CYAN}${gravity_updated}${COL_NC}" log_write " Last gravity run finished at: ${COL_CYAN}${gravity_updated}${COL_NC}"
log_write "" log_write ""
@@ -1292,7 +1297,7 @@ analyze_gravity_list() {
OLD_IFS="$IFS" OLD_IFS="$IFS"
IFS=$'\r\n' IFS=$'\r\n'
local gravity_sample=() local gravity_sample=()
mapfile -t gravity_sample < <(pihole-FTL sqlite3 "${PIHOLE_GRAVITY_DB_FILE}" "SELECT domain FROM vw_gravity LIMIT 10") mapfile -t gravity_sample < <(sqlite3 "${PIHOLE_GRAVITY_DB_FILE}" "SELECT domain FROM vw_gravity LIMIT 10")
log_write " ${COL_CYAN}----- First 10 Gravity Domains -----${COL_NC}" log_write " ${COL_CYAN}----- First 10 Gravity Domains -----${COL_NC}"
for line in "${gravity_sample[@]}"; do for line in "${gravity_sample[@]}"; do
@@ -1303,29 +1308,39 @@ analyze_gravity_list() {
IFS="$OLD_IFS" IFS="$OLD_IFS"
} }
obfuscated_pihole_log() { analyze_pihole_log() {
local pihole_log=("$@") echo_current_diagnostic "Pi-hole log"
local line local head_line
# Put the current Internal Field Separator into another variable so it can be restored later
OLD_IFS="$IFS"
# Get the lines that are in the file(s) and store them in an array for parsing later
IFS=$'\r\n'
local pihole_log_permissions
pihole_log_permissions=$(ls -ld "${PIHOLE_LOG}")
log_write "${COL_GREEN}${pihole_log_permissions}${COL_NC}"
local pihole_log_head=()
mapfile -t pihole_log_head < <(head -n 20 ${PIHOLE_LOG})
log_write " ${COL_CYAN}-----head of $(basename ${PIHOLE_LOG})------${COL_NC}"
local error_to_check_for local error_to_check_for
local line_to_obfuscate local line_to_obfuscate
local obfuscated_line local obfuscated_line
for line in "${pihole_log[@]}"; do for head_line in "${pihole_log_head[@]}"; do
# A common error in the pihole.log is when there is a non-hosts formatted file # A common error in the pihole.log is when there is a non-hosts formatted file
# that the DNS server is attempting to read. Since it's not formatted # that the DNS server is attempting to read. Since it's not formatted
# correctly, there will be an entry for "bad address at line n" # correctly, there will be an entry for "bad address at line n"
# So we can check for that here and highlight it in red so the user can see it easily # So we can check for that here and highlight it in red so the user can see it easily
error_to_check_for=$(echo "${line}" | grep 'bad address at') error_to_check_for=$(echo "${head_line}" | grep 'bad address at')
# Some users may not want to have the domains they visit sent to us # Some users may not want to have the domains they visit sent to us
# To that end, we check for lines in the log that would contain a domain name # To that end, we check for lines in the log that would contain a domain name
line_to_obfuscate=$(echo "${line}" | grep ': query\|: forwarded\|: reply') line_to_obfuscate=$(echo "${head_line}" | grep ': query\|: forwarded\|: reply')
# If the variable contains a value, it found an error in the log # If the variable contains a value, it found an error in the log
if [[ -n ${error_to_check_for} ]]; then if [[ -n ${error_to_check_for} ]]; then
# So we can print it in red to make it visible to the user # So we can print it in red to make it visible to the user
log_write " ${CROSS} ${COL_RED}${line}${COL_NC} (${FAQ_BAD_ADDRESS})" log_write " ${CROSS} ${COL_RED}${head_line}${COL_NC} (${FAQ_BAD_ADDRESS})"
else else
# If the variable does not a value (the current default behavior), so do not obfuscate anything # If the variable does not a value (the current default behavior), so do not obfuscate anything
if [[ -z ${OBFUSCATE} ]]; then if [[ -z ${OBFUSCATE} ]]; then
log_write " ${line}" log_write " ${head_line}"
# Othwerise, a flag was passed to this command to obfuscate domains in the log # Othwerise, a flag was passed to this command to obfuscate domains in the log
else else
# So first check if there are domains in the log that should be obfuscated # So first check if there are domains in the log that should be obfuscated
@@ -1335,56 +1350,35 @@ obfuscated_pihole_log() {
obfuscated_line=$(echo "${line_to_obfuscate}" | awk -v placeholder="${OBFUSCATED_PLACEHOLDER}" '{sub($6,placeholder); print $0}') obfuscated_line=$(echo "${line_to_obfuscate}" | awk -v placeholder="${OBFUSCATED_PLACEHOLDER}" '{sub($6,placeholder); print $0}')
log_write " ${obfuscated_line}" log_write " ${obfuscated_line}"
else else
log_write " ${line}" log_write " ${head_line}"
fi fi
fi fi
fi fi
done done
}
analyze_pihole_log() {
echo_current_diagnostic "Pi-hole log"
local pihole_log_head=()
local pihole_log_tail=()
local pihole_log_permissions
local logging_enabled
logging_enabled=$(grep -c "^log-queries" /etc/dnsmasq.d/01-pihole.conf)
if [[ "${logging_enabled}" == "0" ]]; then
# Inform user that logging has been disabled and pihole.log does not contain queries
log_write "${INFO} Query logging is disabled"
log_write ""
fi
# Put the current Internal Field Separator into another variable so it can be restored later
OLD_IFS="$IFS"
# Get the lines that are in the file(s) and store them in an array for parsing later
IFS=$'\r\n'
pihole_log_permissions=$(ls -lhd "${PIHOLE_LOG}")
log_write "${COL_GREEN}${pihole_log_permissions}${COL_NC}"
mapfile -t pihole_log_head < <(head -n 20 ${PIHOLE_LOG})
log_write " ${COL_CYAN}-----head of $(basename ${PIHOLE_LOG})------${COL_NC}"
obfuscated_pihole_log "${pihole_log_head[@]}"
log_write ""
mapfile -t pihole_log_tail < <(tail -n 20 ${PIHOLE_LOG})
log_write " ${COL_CYAN}-----tail of $(basename ${PIHOLE_LOG})------${COL_NC}"
obfuscated_pihole_log "${pihole_log_tail[@]}"
log_write "" log_write ""
# Set the IFS back to what it was # Set the IFS back to what it was
IFS="$OLD_IFS" IFS="$OLD_IFS"
} }
curl_to_tricorder() { tricorder_use_nc_or_curl() {
# Users can submit their debug logs using curl (encrypted) # Users can submit their debug logs using nc (unencrypted) or curl (encrypted) if available
# Check for curl first since encryption is a good thing
if command -v curl &> /dev/null; then
# If the command exists,
log_write " * Using ${COL_GREEN}curl${COL_NC} for transmission." log_write " * Using ${COL_GREEN}curl${COL_NC} for transmission."
# transmit the log via TLS and store the token returned in a variable # transmit he log via TLS and store the token returned in a variable
tricorder_token=$(curl --silent --fail --show-error --upload-file ${PIHOLE_DEBUG_LOG} https://tricorder.pi-hole.net 2>&1) tricorder_token=$(curl --silent --upload-file ${PIHOLE_DEBUG_LOG} https://tricorder.pi-hole.net:${TRICORDER_SSL_PORT_NUMBER})
if [[ "${tricorder_token}" != "https://tricorder.pi-hole.net/"* ]]; then if [ -z "${tricorder_token}" ]; then
log_write " * ${COL_GREEN}curl${COL_NC} failed, contact Pi-hole support for assistance." # curl failed, fallback to nc
# Log curl error (if available) log_write " * ${COL_GREEN}curl${COL_NC} failed, falling back to ${COL_YELLOW}netcat${COL_NC} for transmission."
if [ -n "${tricorder_token}" ]; then tricorder_token=$(< ${PIHOLE_DEBUG_LOG} nc tricorder.pi-hole.net ${TRICORDER_NC_PORT_NUMBER})
log_write " * Error message: ${COL_RED}${tricorder_token}${COL_NC}\\n"
tricorder_token=""
fi fi
# Otherwise,
else
# use net cat
log_write "${INFO} Using ${COL_YELLOW}netcat${COL_NC} for transmission."
# Save the token returned by our server in a variable
tricorder_token=$(< ${PIHOLE_DEBUG_LOG} nc tricorder.pi-hole.net ${TRICORDER_NC_PORT_NUMBER})
fi fi
} }
@@ -1403,55 +1397,48 @@ upload_to_tricorder() {
# Provide information on what they should do with their token # Provide information on what they should do with their token
log_write " * The debug log can be uploaded to tricorder.pi-hole.net for sharing with developers only." log_write " * The debug log can be uploaded to tricorder.pi-hole.net for sharing with developers only."
log_write " * For more information, see: ${TRICORDER_CONTEST}"
# If pihole -d is running automatically log_write " * If available, we'll use openssl to upload the log, otherwise it will fall back to netcat."
# If pihole -d is running automatically (usually through the dashboard)
if [[ "${AUTOMATED}" ]]; then if [[ "${AUTOMATED}" ]]; then
# let the user know # let the user know
log_write "${INFO} Debug script running in automated mode" log_write "${INFO} Debug script running in automated mode"
# and then decide again which tool to use to submit it # and then decide again which tool to use to submit it
curl_to_tricorder tricorder_use_nc_or_curl
# If we're not running in automated mode, # If we're not running in automated mode,
else else
# if not being called from the web interface
if [[ ! "${WEBCALL}" ]]; then
echo "" echo ""
# give the user a choice of uploading it or not # give the user a choice of uploading it or not
# Users can review the log file locally (or the output of the script since they are the same) and try to self-diagnose their problem # Users can review the log file locally (or the output of the script since they are the same) and try to self-diagnose their problem
read -r -p "[?] Would you like to upload the log? [y/N] " response read -r -p "[?] Would you like to upload the log? [y/N] " response
case ${response} in case ${response} in
# If they say yes, run our function for uploading the log # If they say yes, run our function for uploading the log
[yY][eE][sS]|[yY]) curl_to_tricorder;; [yY][eE][sS]|[yY]) tricorder_use_nc_or_curl;;
# If they choose no, just exit out of the script # If they choose no, just exit out of the script
*) log_write " * Log will ${COL_GREEN}NOT${COL_NC} be uploaded to tricorder.\\n * A local copy of the debug log can be found at: ${COL_CYAN}${PIHOLE_DEBUG_LOG}${COL_NC}\\n";exit; *) log_write " * Log will ${COL_GREEN}NOT${COL_NC} be uploaded to tricorder.\\n * A local copy of the debug log can be found at: ${COL_CYAN}${PIHOLE_DEBUG_LOG}${COL_NC}\\n";exit;
esac esac
fi fi
fi
# Check if tricorder.pi-hole.net is reachable and provide token # Check if tricorder.pi-hole.net is reachable and provide token
# along with some additional useful information # along with some additional useful information
if [[ -n "${tricorder_token}" ]]; then if [[ -n "${tricorder_token}" ]]; then
# Again, try to make this visually striking so the user realizes they need to do something with this information # Again, try to make this visually striking so the user realizes they need to do something with this information
# Namely, provide the Pi-hole devs with the token # Namely, provide the Pi-hole devs with the token
log_write "" log_write ""
log_write "${COL_PURPLE}*****************************************************************${COL_NC}" log_write "${COL_PURPLE}***********************************${COL_NC}"
log_write "${COL_PURPLE}*****************************************************************${COL_NC}\\n" log_write "${COL_PURPLE}***********************************${COL_NC}"
log_write "${TICK} Your debug token is: ${COL_GREEN}${tricorder_token}${COL_NC}" log_write "${TICK} Your debug token is: ${COL_GREEN}${tricorder_token}${COL_NC}"
log_write "${INFO}${COL_RED} Logs are deleted 48 hours after upload.${COL_NC}\\n" log_write "${COL_PURPLE}***********************************${COL_NC}"
log_write "${COL_PURPLE}*****************************************************************${COL_NC}" log_write "${COL_PURPLE}***********************************${COL_NC}"
log_write "${COL_PURPLE}*****************************************************************${COL_NC}"
log_write "" log_write ""
log_write " * Provide the token above to the Pi-hole team for assistance at ${FORUMS_URL}" log_write " * Provide the token above to the Pi-hole team for assistance at"
log_write " * ${FORUMS_URL}"
log_write " * Your log will self-destruct on our server after ${COL_RED}48 hours${COL_NC}."
# If no token was generated # If no token was generated
else else
# Show an error and some help instructions # Show an error and some help instructions
# Skip this if being called from web interface and autmatic mode was not chosen (users opt-out to upload)
if [[ "${WEBCALL}" ]] && [[ ! "${AUTOMATED}" ]]; then
:
else
log_write "${CROSS} ${COL_RED}There was an error uploading your debug log.${COL_NC}" log_write "${CROSS} ${COL_RED}There was an error uploading your debug log.${COL_NC}"
log_write " * Please try again or contact the Pi-hole team for assistance." log_write " * Please try again or contact the Pi-hole team for assistance."
fi fi
fi
# Finally, show where the log file is no matter the outcome of the function so users can look at it # Finally, show where the log file is no matter the outcome of the function so users can look at it
log_write " * A local copy of the debug log can be found at: ${COL_CYAN}${PIHOLE_DEBUG_LOG}${COL_NC}\\n" log_write " * A local copy of the debug log can be found at: ${COL_CYAN}${PIHOLE_DEBUG_LOG}${COL_NC}\\n"
} }
@@ -1461,30 +1448,40 @@ make_temporary_log
initialize_debug initialize_debug
# setupVars.conf needs to be sourced before the networking so the values are # setupVars.conf needs to be sourced before the networking so the values are
# available to the other functions # available to the other functions
source_setup_variables if ! check_for_ftl; then
check_component_versions log_write "${COL_RED}Unable to complete debug run. Please contact support for assistance."
check_critical_program_versions log_write "Please note the error that is displayed above.${COL_NC}"
diagnose_operating_system #Non-zero return value
check_selinux exit 2
check_firewalld fi
processor_check if ! check_for_lua; then
disk_usage log_write "${COL_RED}Unable to complete debug run. Please contact support for assistance."
check_ip_command log_write "Please note the error that is displayed above.${COL_NC}"
check_networking #Non-zero return value
check_name_resolution exit 2
check_dhcp_servers fi
process_status # source_setup_variables
ftl_full_status # check_component_versions
parse_setup_vars # check_critical_program_versions
check_x_headers # diagnose_operating_system
analyze_gravity_list # check_selinux
show_groups # check_firewalld
show_domainlist # processor_check
show_clients # check_networking
show_adlists # check_name_resolution
show_content_of_pihole_files # check_dhcp_servers
show_messages # process_status
parse_locale # ftl_full_status
analyze_pihole_log # parse_setup_vars
copy_to_debug_log # check_x_headers
upload_to_tricorder # analyze_gravity_list
# show_groups
# show_domainlist
# show_clients
# show_adlists
# show_content_of_pihole_files
# show_messages
# parse_locale
# analyze_pihole_log
# copy_to_debug_log
# upload_to_tricorder

View File

@@ -11,11 +11,6 @@
colfile="/opt/pihole/COL_TABLE" colfile="/opt/pihole/COL_TABLE"
source ${colfile} source ${colfile}
# In case we're running at the same time as a system logrotate, use a
# separate logrotate state file to prevent stepping on each other's
# toes.
STATEFILE="/var/lib/logrotate/pihole"
# Determine database location # Determine database location
# Obtain DBFILE=... setting from pihole-FTL.db # Obtain DBFILE=... setting from pihole-FTL.db
# Constructed to return nothing when # Constructed to return nothing when
@@ -37,7 +32,7 @@ if [[ "$@" == *"once"* ]]; then
# Nightly logrotation # Nightly logrotation
if command -v /usr/sbin/logrotate >/dev/null; then if command -v /usr/sbin/logrotate >/dev/null; then
# Logrotate once # Logrotate once
/usr/sbin/logrotate --force --state "${STATEFILE}" /etc/pihole/logrotate /usr/sbin/logrotate --force /etc/pihole/logrotate
else else
# Copy pihole.log over to pihole.log.1 # Copy pihole.log over to pihole.log.1
# and empty out pihole.log # and empty out pihole.log
@@ -52,8 +47,8 @@ else
# Manual flushing # Manual flushing
if command -v /usr/sbin/logrotate >/dev/null; then if command -v /usr/sbin/logrotate >/dev/null; then
# Logrotate twice to move all data out of sight of FTL # Logrotate twice to move all data out of sight of FTL
/usr/sbin/logrotate --force --state "${STATEFILE}" /etc/pihole/logrotate; sleep 3 /usr/sbin/logrotate --force /etc/pihole/logrotate; sleep 3
/usr/sbin/logrotate --force --state "${STATEFILE}" /etc/pihole/logrotate /usr/sbin/logrotate --force /etc/pihole/logrotate
else else
# Flush both pihole.log and pihole.log.1 (if existing) # Flush both pihole.log and pihole.log.1 (if existing)
echo " " > /var/log/pihole.log echo " " > /var/log/pihole.log
@@ -63,7 +58,7 @@ else
fi fi
fi fi
# Delete most recent 24 hours from FTL's database, leave even older data intact (don't wipe out all history) # Delete most recent 24 hours from FTL's database, leave even older data intact (don't wipe out all history)
deleted=$(pihole-FTL sqlite3 "${DBFILE}" "DELETE FROM queries WHERE timestamp >= strftime('%s','now')-86400; select changes() from queries limit 1") deleted=$(sqlite3 "${DBFILE}" "DELETE FROM queries WHERE timestamp >= strftime('%s','now')-86400; select changes() from queries limit 1")
# Restart pihole-FTL to force reloading history # Restart pihole-FTL to force reloading history
sudo pihole restartdns sudo pihole restartdns

View File

@@ -121,7 +121,7 @@ scanDatabaseTable() {
fi fi
# Send prepared query to gravity database # Send prepared query to gravity database
result="$(pihole-FTL sqlite3 "${gravityDBfile}" "${querystr}")" 2> /dev/null result="$(sqlite3 "${gravityDBfile}" "${querystr}")" 2> /dev/null
if [[ -z "${result}" ]]; then if [[ -z "${result}" ]]; then
# Return early when there are no matches in this table # Return early when there are no matches in this table
return return
@@ -164,7 +164,7 @@ scanRegexDatabaseTable() {
type="${3:-}" type="${3:-}"
# Query all regex from the corresponding database tables # Query all regex from the corresponding database tables
mapfile -t regexList < <(pihole-FTL sqlite3 "${gravityDBfile}" "SELECT domain FROM domainlist WHERE type = ${type}" 2> /dev/null) mapfile -t regexList < <(sqlite3 "${gravityDBfile}" "SELECT domain FROM domainlist WHERE type = ${type}" 2> /dev/null)
# If we have regexps to process # If we have regexps to process
if [[ "${#regexList[@]}" -ne 0 ]]; then if [[ "${#regexList[@]}" -ne 0 ]]; then

View File

@@ -35,7 +35,6 @@ source "/opt/pihole/COL_TABLE"
GitCheckUpdateAvail() { GitCheckUpdateAvail() {
local directory local directory
local curBranch
directory="${1}" directory="${1}"
curdir=$PWD curdir=$PWD
cd "${directory}" || return cd "${directory}" || return
@@ -43,15 +42,6 @@ GitCheckUpdateAvail() {
# Fetch latest changes in this repo # Fetch latest changes in this repo
git fetch --quiet origin git fetch --quiet origin
# Check current branch. If it is master, then check for the latest available tag instead of latest commit.
curBranch=$(git rev-parse --abbrev-ref HEAD)
if [[ "${curBranch}" == "master" ]]; then
# get the latest local tag
LOCAL=$(git describe --abbrev=0 --tags master)
# get the latest tag from remote
REMOTE=$(git describe --abbrev=0 --tags origin/master)
else
# @ alone is a shortcut for HEAD. Older versions of git # @ alone is a shortcut for HEAD. Older versions of git
# need @{0} # need @{0}
LOCAL="$(git rev-parse "@{0}")" LOCAL="$(git rev-parse "@{0}")"
@@ -64,8 +54,6 @@ GitCheckUpdateAvail() {
# branch.<name>.merge). A missing branchname # branch.<name>.merge). A missing branchname
# defaults to the current one. # defaults to the current one.
REMOTE="$(git rev-parse "@{upstream}")" REMOTE="$(git rev-parse "@{upstream}")"
fi
if [[ "${#LOCAL}" == 0 ]]; then if [[ "${#LOCAL}" == 0 ]]; then
echo -e "\\n ${COL_LIGHT_RED}Error: Local revision could not be obtained, please contact Pi-hole Support" echo -e "\\n ${COL_LIGHT_RED}Error: Local revision could not be obtained, please contact Pi-hole Support"
@@ -107,10 +95,6 @@ main() {
# shellcheck disable=1090,2154 # shellcheck disable=1090,2154
source "${setupVars}" source "${setupVars}"
# Install packages used by this installation script (necessary if users have removed e.g. git from their systems)
package_manager_detect
install_dependent_packages "${INSTALLER_DEPS[@]}"
# This is unlikely # This is unlikely
if ! is_repo "${PI_HOLE_FILES_DIR}" ; then if ! is_repo "${PI_HOLE_FILES_DIR}" ; then
echo -e "\\n ${COL_LIGHT_RED}Error: Core Pi-hole repo is missing from system!" echo -e "\\n ${COL_LIGHT_RED}Error: Core Pi-hole repo is missing from system!"

View File

@@ -1,35 +0,0 @@
#!/usr/bin/env bash
# Pi-hole: A black hole for Internet advertisements
# (c) 2017 Pi-hole, LLC (https://pi-hole.net)
# Network-wide ad blocking via your own hardware.
#
# Script to hold utility functions for use in other scripts
#
# This file is copyright under the latest version of the EUPL.
# Please see LICENSE file for your rights under this license.
# Basic Housekeeping rules
# - Functions must be self contained
# - Functions must be added in alphabetical order
# - Functions must be documented
# - New functions must have a test added for them in test/test_any_utils.py
#######################
# Takes three arguments key, value, and file.
# Checks the target file for the existence of the key
# - If it exists, it changes the value
# - If it does not exist, it adds the value
#
# Example usage:
# addOrEditKeyValuePair "BLOCKING_ENABLED" "true" "/etc/pihole/setupVars.conf"
#######################
addOrEditKeyValPair() {
local key="${1}"
local value="${2}"
local file="${3}"
if grep -q "^${key}=" "${file}"; then
sed -i "/^${key}=/c\\${key}=${value}" "${file}"
else
echo "${key}=${value}" >> "${file}"
fi
}

View File

@@ -13,10 +13,6 @@ DEFAULT="-1"
COREGITDIR="/etc/.pihole/" COREGITDIR="/etc/.pihole/"
WEBGITDIR="/var/www/html/admin/" WEBGITDIR="/var/www/html/admin/"
# Source the setupvars config file
# shellcheck disable=SC1091
source /etc/pihole/setupVars.conf
getLocalVersion() { getLocalVersion() {
# FTL requires a different method # FTL requires a different method
if [[ "$1" == "FTL" ]]; then if [[ "$1" == "FTL" ]]; then
@@ -95,11 +91,10 @@ getRemoteVersion(){
#If the above file exists, then we can read from that. Prevents overuse of GitHub API #If the above file exists, then we can read from that. Prevents overuse of GitHub API
if [[ -f "$cachedVersions" ]]; then if [[ -f "$cachedVersions" ]]; then
IFS=' ' read -r -a arrCache < "$cachedVersions" IFS=' ' read -r -a arrCache < "$cachedVersions"
case $daemon in case $daemon in
"pi-hole" ) echo "${arrCache[0]}";; "pi-hole" ) echo "${arrCache[0]}";;
"AdminLTE" ) [[ "${INSTALL_WEB_INTERFACE}" == true ]] && echo "${arrCache[1]}";; "AdminLTE" ) echo "${arrCache[1]}";;
"FTL" ) [[ "${INSTALL_WEB_INTERFACE}" == true ]] && echo "${arrCache[2]}" || echo "${arrCache[1]}";; "FTL" ) echo "${arrCache[2]}";;
esac esac
return 0 return 0
@@ -145,11 +140,6 @@ getLocalBranch(){
} }
versionOutput() { versionOutput() {
if [[ "$1" == "AdminLTE" && "${INSTALL_WEB_INTERFACE}" != true ]]; then
echo " WebAdmin not installed"
return 1
fi
[[ "$1" == "pi-hole" ]] && GITDIR=$COREGITDIR [[ "$1" == "pi-hole" ]] && GITDIR=$COREGITDIR
[[ "$1" == "AdminLTE" ]] && GITDIR=$WEBGITDIR [[ "$1" == "AdminLTE" ]] && GITDIR=$WEBGITDIR
[[ "$1" == "FTL" ]] && GITDIR="FTL" [[ "$1" == "FTL" ]] && GITDIR="FTL"
@@ -176,7 +166,6 @@ versionOutput() {
output="Latest ${1^} hash is $latHash" output="Latest ${1^} hash is $latHash"
else else
errorOutput errorOutput
return 1
fi fi
[[ -n "$output" ]] && echo " $output" [[ -n "$output" ]] && echo " $output"
@@ -188,6 +177,10 @@ errorOutput() {
} }
defaultOutput() { defaultOutput() {
# Source the setupvars config file
# shellcheck disable=SC1091
source /etc/pihole/setupVars.conf
versionOutput "pi-hole" "$@" versionOutput "pi-hole" "$@"
if [[ "${INSTALL_WEB_INTERFACE}" == true ]]; then if [[ "${INSTALL_WEB_INTERFACE}" == true ]]; then

View File

@@ -44,9 +44,7 @@ Options:
-e, email Set an administrative contact address for the Block Page -e, email Set an administrative contact address for the Block Page
-h, --help Show this help dialog -h, --help Show this help dialog
-i, interface Specify dnsmasq's interface listening behavior -i, interface Specify dnsmasq's interface listening behavior
-l, privacylevel Set privacy level (0 = lowest, 3 = highest) -l, privacylevel Set privacy level (0 = lowest, 3 = highest)"
-t, teleporter Backup configuration as an archive
-t, teleporter myname.tar.gz Backup configuration to archive with name myname.tar.gz as specified"
exit 0 exit 0
} }
@@ -55,7 +53,7 @@ add_setting() {
} }
delete_setting() { delete_setting() {
sed -i "/^${1}/d" "${setupVars}" sed -i "/${1}/d" "${setupVars}"
} }
change_setting() { change_setting() {
@@ -68,7 +66,7 @@ addFTLsetting() {
} }
deleteFTLsetting() { deleteFTLsetting() {
sed -i "/^${1}/d" "${FTLconf}" sed -i "/${1}/d" "${FTLconf}"
} }
changeFTLsetting() { changeFTLsetting() {
@@ -85,7 +83,7 @@ add_dnsmasq_setting() {
} }
delete_dnsmasq_setting() { delete_dnsmasq_setting() {
sed -i "/^${1}/d" "${dnsmasqconfig}" sed -i "/${1}/d" "${dnsmasqconfig}"
} }
SetTemperatureUnit() { SetTemperatureUnit() {
@@ -200,8 +198,6 @@ trust-anchor=.,20326,8,2,E06D44B80B8F1D39A95C0B0D7C65D08458E880409BBC68345710423
# Setup interface listening behavior of dnsmasq # Setup interface listening behavior of dnsmasq
delete_dnsmasq_setting "interface" delete_dnsmasq_setting "interface"
delete_dnsmasq_setting "local-service" delete_dnsmasq_setting "local-service"
delete_dnsmasq_setting "except-interface"
delete_dnsmasq_setting "bind-interfaces"
if [[ "${DNSMASQ_LISTENING}" == "all" ]]; then if [[ "${DNSMASQ_LISTENING}" == "all" ]]; then
# Listen on all interfaces, permit all origins # Listen on all interfaces, permit all origins
@@ -210,7 +206,6 @@ trust-anchor=.,20326,8,2,E06D44B80B8F1D39A95C0B0D7C65D08458E880409BBC68345710423
# Listen only on all interfaces, but only local subnets # Listen only on all interfaces, but only local subnets
add_dnsmasq_setting "local-service" add_dnsmasq_setting "local-service"
else else
# Options "bind" and "single"
# Listen only on one interface # Listen only on one interface
# Use eth0 as fallback interface if interface is missing in setupVars.conf # Use eth0 as fallback interface if interface is missing in setupVars.conf
if [ -z "${PIHOLE_INTERFACE}" ]; then if [ -z "${PIHOLE_INTERFACE}" ]; then
@@ -218,11 +213,6 @@ trust-anchor=.,20326,8,2,E06D44B80B8F1D39A95C0B0D7C65D08458E880409BBC68345710423
fi fi
add_dnsmasq_setting "interface" "${PIHOLE_INTERFACE}" add_dnsmasq_setting "interface" "${PIHOLE_INTERFACE}"
if [[ "${DNSMASQ_LISTENING}" == "bind" ]]; then
# Really bind to interface
add_dnsmasq_setting "bind-interfaces"
fi
fi fi
if [[ "${CONDITIONAL_FORWARDING}" == true ]]; then if [[ "${CONDITIONAL_FORWARDING}" == true ]]; then
@@ -276,22 +266,17 @@ trust-anchor=.,20326,8,2,E06D44B80B8F1D39A95C0B0D7C65D08458E880409BBC68345710423
delete_setting "CONDITIONAL_FORWARDING_IP" delete_setting "CONDITIONAL_FORWARDING_IP"
fi fi
delete_dnsmasq_setting "rev-server"
if [[ "${REV_SERVER}" == true ]]; then if [[ "${REV_SERVER}" == true ]]; then
add_dnsmasq_setting "rev-server=${REV_SERVER_CIDR},${REV_SERVER_TARGET}" add_dnsmasq_setting "rev-server=${REV_SERVER_CIDR},${REV_SERVER_TARGET}"
if [ -n "${REV_SERVER_DOMAIN}" ]; then if [ -n "${REV_SERVER_DOMAIN}" ]; then
# Forward local domain names to the CF target, too
add_dnsmasq_setting "server=/${REV_SERVER_DOMAIN}/${REV_SERVER_TARGET}" add_dnsmasq_setting "server=/${REV_SERVER_DOMAIN}/${REV_SERVER_TARGET}"
fi fi
if [[ "${DNS_FQDN_REQUIRED}" != true ]]; then
# Forward unqualified names to the CF target only when the "never
# forward non-FQDN" option is unticked
add_dnsmasq_setting "server=//${REV_SERVER_TARGET}"
fi fi
fi # Prevent Firefox from automatically switching over to DNS-over-HTTPS
# This follows https://support.mozilla.org/en-US/kb/configuring-networks-disable-dns-over-https
# (sourced 7th September 2019)
add_dnsmasq_setting "server=/use-application-dns.net/"
# We need to process DHCP settings here as well to account for possible # We need to process DHCP settings here as well to account for possible
# changes in the non-FQDN forwarding. This cannot be done in 01-pihole.conf # changes in the non-FQDN forwarding. This cannot be done in 01-pihole.conf
@@ -441,7 +426,7 @@ dhcp-leasefile=/etc/pihole/dhcp.leases
echo "#quiet-dhcp6 echo "#quiet-dhcp6
#enable-ra #enable-ra
dhcp-option=option6:dns-server,[::] dhcp-option=option6:dns-server,[::]
dhcp-range=::100,::1ff,constructor:${interface},ra-names,slaac,64,3600 dhcp-range=::100,::1ff,constructor:${interface},ra-names,slaac,${leasetime}
ra-param=*,0,0 ra-param=*,0,0
" >> "${dhcpconfig}" " >> "${dhcpconfig}"
fi fi
@@ -524,13 +509,13 @@ CustomizeAdLists() {
if CheckUrl "${address}"; then if CheckUrl "${address}"; then
if [[ "${args[2]}" == "enable" ]]; then if [[ "${args[2]}" == "enable" ]]; then
pihole-FTL sqlite3 "${gravityDBfile}" "UPDATE adlist SET enabled = 1 WHERE address = '${address}'" sqlite3 "${gravityDBfile}" "UPDATE adlist SET enabled = 1 WHERE address = '${address}'"
elif [[ "${args[2]}" == "disable" ]]; then elif [[ "${args[2]}" == "disable" ]]; then
pihole-FTL sqlite3 "${gravityDBfile}" "UPDATE adlist SET enabled = 0 WHERE address = '${address}'" sqlite3 "${gravityDBfile}" "UPDATE adlist SET enabled = 0 WHERE address = '${address}'"
elif [[ "${args[2]}" == "add" ]]; then elif [[ "${args[2]}" == "add" ]]; then
pihole-FTL sqlite3 "${gravityDBfile}" "INSERT OR IGNORE INTO adlist (address, comment) VALUES ('${address}', '${comment}')" sqlite3 "${gravityDBfile}" "INSERT OR IGNORE INTO adlist (address, comment) VALUES ('${address}', '${comment}')"
elif [[ "${args[2]}" == "del" ]]; then elif [[ "${args[2]}" == "del" ]]; then
pihole-FTL sqlite3 "${gravityDBfile}" "DELETE FROM adlist WHERE address = '${address}'" sqlite3 "${gravityDBfile}" "DELETE FROM adlist WHERE address = '${address}'"
else else
echo "Not permitted" echo "Not permitted"
return 1 return 1
@@ -541,6 +526,25 @@ CustomizeAdLists() {
fi fi
} }
SetPrivacyMode() {
if [[ "${args[2]}" == "true" ]]; then
change_setting "API_PRIVACY_MODE" "true"
else
change_setting "API_PRIVACY_MODE" "false"
fi
}
ResolutionSettings() {
typ="${args[2]}"
state="${args[3]}"
if [[ "${typ}" == "forward" ]]; then
change_setting "API_GET_UPSTREAM_DNS_HOSTNAME" "${state}"
elif [[ "${typ}" == "clients" ]]; then
change_setting "API_GET_CLIENT_HOSTNAME" "${state}"
fi
}
AddDHCPStaticAddress() { AddDHCPStaticAddress() {
mac="${args[2]}" mac="${args[2]}"
ip="${args[3]}" ip="${args[3]}"
@@ -609,10 +613,9 @@ Example: 'pihole -a -i local'
Specify dnsmasq's network interface listening behavior Specify dnsmasq's network interface listening behavior
Interfaces: Interfaces:
local Only respond to queries from devices that local Listen on all interfaces, but only allow queries from
are at most one hop away (local devices) devices that are at most one hop away (local devices)
single Respond only on interface ${PIHOLE_INTERFACE} single Listen only on ${PIHOLE_INTERFACE} interface
bind Bind only on interface ${PIHOLE_INTERFACE}
all Listen on all interfaces, permit all origins" all Listen on all interfaces, permit all origins"
exit 0 exit 0
fi fi
@@ -623,9 +626,6 @@ Interfaces:
elif [[ "${args[2]}" == "local" ]]; then elif [[ "${args[2]}" == "local" ]]; then
echo -e " ${INFO} Listening on all interfaces, permitting origins from one hop away (LAN)" echo -e " ${INFO} Listening on all interfaces, permitting origins from one hop away (LAN)"
change_setting "DNSMASQ_LISTENING" "local" change_setting "DNSMASQ_LISTENING" "local"
elif [[ "${args[2]}" == "bind" ]]; then
echo -e " ${INFO} Binding on interface ${PIHOLE_INTERFACE}"
change_setting "DNSMASQ_LISTENING" "bind"
else else
echo -e " ${INFO} Listening only on interface ${PIHOLE_INTERFACE}" echo -e " ${INFO} Listening only on interface ${PIHOLE_INTERFACE}"
change_setting "DNSMASQ_LISTENING" "single" change_setting "DNSMASQ_LISTENING" "single"
@@ -641,17 +641,12 @@ Interfaces:
} }
Teleporter() { Teleporter() {
local filename
filename="${args[2]}"
if [[ -z "${filename}" ]]; then
local datetimestamp local datetimestamp
local host local host
datetimestamp=$(date "+%Y-%m-%d_%H-%M-%S") datetimestamp=$(date "+%Y-%m-%d_%H-%M-%S")
host=$(hostname) host=$(hostname)
host="${host//./_}" host="${host//./_}"
filename="pi-hole-${host:-noname}-teleporter_${datetimestamp}.tar.gz" php /var/www/html/admin/scripts/pi-hole/php/teleporter.php > "pi-hole-${host:-noname}-teleporter_${datetimestamp}.tar.gz"
fi
php /var/www/html/admin/scripts/pi-hole/php/teleporter.php > "${filename}"
} }
checkDomain() checkDomain()
@@ -687,12 +682,12 @@ addAudit()
done done
# Insert only the domain here. The date_added field will be # Insert only the domain here. The date_added field will be
# filled with its default value (date_added = current timestamp) # filled with its default value (date_added = current timestamp)
pihole-FTL sqlite3 "${gravityDBfile}" "INSERT INTO domain_audit (domain) VALUES ${domains};" sqlite3 "${gravityDBfile}" "INSERT INTO domain_audit (domain) VALUES ${domains};"
} }
clearAudit() clearAudit()
{ {
pihole-FTL sqlite3 "${gravityDBfile}" "DELETE FROM domain_audit;" sqlite3 "${gravityDBfile}" "DELETE FROM domain_audit;"
} }
SetPrivacyLevel() { SetPrivacyLevel() {
@@ -708,25 +703,10 @@ AddCustomDNSAddress() {
ip="${args[2]}" ip="${args[2]}"
host="${args[3]}" host="${args[3]}"
reload="${args[4]}" echo "${ip} ${host}" >> "${dnscustomfile}"
validHost="$(checkDomain "${host}")" # Restart dnsmasq to load new custom DNS entries
if [[ -n "${validHost}" ]]; then
if valid_ip "${ip}" || valid_ip6 "${ip}" ; then
echo "${ip} ${validHost}" >> "${dnscustomfile}"
else
echo -e " ${CROSS} Invalid IP has been passed"
exit 1
fi
else
echo " ${CROSS} Invalid Domain passed!"
exit 1
fi
# Restart dnsmasq to load new custom DNS entries only if $reload not false
if [[ ! $reload == "false" ]]; then
RestartDNS RestartDNS
fi
} }
RemoveCustomDNSAddress() { RemoveCustomDNSAddress() {
@@ -734,25 +714,16 @@ RemoveCustomDNSAddress() {
ip="${args[2]}" ip="${args[2]}"
host="${args[3]}" host="${args[3]}"
reload="${args[4]}"
validHost="$(checkDomain "${host}")"
if [[ -n "${validHost}" ]]; then
if valid_ip "${ip}" || valid_ip6 "${ip}" ; then if valid_ip "${ip}" || valid_ip6 "${ip}" ; then
sed -i "/^${ip} ${validHost}$/Id" "${dnscustomfile}" sed -i "/${ip} ${host}/d" "${dnscustomfile}"
else else
echo -e " ${CROSS} Invalid IP has been passed" echo -e " ${CROSS} Invalid IP has been passed"
exit 1 exit 1
fi fi
else
echo " ${CROSS} Invalid Domain passed!"
exit 1
fi
# Restart dnsmasq to load new custom DNS entries only if reload is not false # Restart dnsmasq to update removed custom DNS entries
if [[ ! $reload == "false" ]]; then
RestartDNS RestartDNS
fi
} }
AddCustomCNAMERecord() { AddCustomCNAMERecord() {
@@ -760,25 +731,11 @@ AddCustomCNAMERecord() {
domain="${args[2]}" domain="${args[2]}"
target="${args[3]}" target="${args[3]}"
reload="${args[4]}"
validDomain="$(checkDomain "${domain}")" echo "cname=${domain},${target}" >> "${dnscustomcnamefile}"
if [[ -n "${validDomain}" ]]; then
validTarget="$(checkDomain "${target}")" # Restart dnsmasq to load new custom CNAME records
if [[ -n "${validTarget}" ]]; then
echo "cname=${validDomain},${validTarget}" >> "${dnscustomcnamefile}"
else
echo " ${CROSS} Invalid Target Passed!"
exit 1
fi
else
echo " ${CROSS} Invalid Domain passed!"
exit 1
fi
# Restart dnsmasq to load new custom CNAME records only if reload is not false
if [[ ! $reload == "false" ]]; then
RestartDNS RestartDNS
fi
} }
RemoveCustomCNAMERecord() { RemoveCustomCNAMERecord() {
@@ -786,13 +743,12 @@ RemoveCustomCNAMERecord() {
domain="${args[2]}" domain="${args[2]}"
target="${args[3]}" target="${args[3]}"
reload="${args[4]}"
validDomain="$(checkDomain "${domain}")" validDomain="$(checkDomain "${domain}")"
if [[ -n "${validDomain}" ]]; then if [[ -n "${validDomain}" ]]; then
validTarget="$(checkDomain "${target}")" validTarget="$(checkDomain "${target}")"
if [[ -n "${validTarget}" ]]; then if [[ -n "${validDomain}" ]]; then
sed -i "/cname=${validDomain},${validTarget}$/Id" "${dnscustomcnamefile}" sed -i "/cname=${validDomain},${validTarget}/d" "${dnscustomcnamefile}"
else else
echo " ${CROSS} Invalid Target Passed!" echo " ${CROSS} Invalid Target Passed!"
exit 1 exit 1
@@ -802,10 +758,8 @@ RemoveCustomCNAMERecord() {
exit 1 exit 1
fi fi
# Restart dnsmasq to update removed custom CNAME records only if $reload not false # Restart dnsmasq to update removed custom CNAME records
if [[ ! $reload == "false" ]]; then
RestartDNS RestartDNS
fi
} }
main() { main() {
@@ -828,6 +782,8 @@ main() {
"layout" ) SetWebUILayout;; "layout" ) SetWebUILayout;;
"theme" ) SetWebUITheme;; "theme" ) SetWebUITheme;;
"-h" | "--help" ) helpFunc;; "-h" | "--help" ) helpFunc;;
"privacymode" ) SetPrivacyMode;;
"resolve" ) ResolutionSettings;;
"addstaticdhcp" ) AddDHCPStaticAddress;; "addstaticdhcp" ) AddDHCPStaticAddress;;
"removestaticdhcp" ) RemoveDHCPStaticAddress;; "removestaticdhcp" ) RemoveDHCPStaticAddress;;
"-e" | "email" ) SetAdminEmail "$3";; "-e" | "email" ) SetAdminEmail "$3";;

View File

@@ -0,0 +1,28 @@
#!/usr/bin/env bash
# Pi-hole: A black hole for Internet advertisements
# (c) 2017 Pi-hole, LLC (https://pi-hole.net)
# Network-wide ad blocking via your own hardware.
#
# Provides an automated migration subroutine to convert Pi-hole v3.x wildcard domains to Pi-hole v4.x regex filters
#
# This file is copyright under the latest version of the EUPL.
# Please see LICENSE file for your rights under this license.
# regexFile set in gravity.sh
wildcardFile="/etc/dnsmasq.d/03-pihole-wildcard.conf"
convert_wildcard_to_regex() {
if [ ! -f "${wildcardFile}" ]; then
return
fi
local addrlines domains uniquedomains
# Obtain wildcard domains from old file
addrlines="$(grep -oE "/.*/" ${wildcardFile})"
# Strip "/" from domain names and convert "." to regex-compatible "\."
domains="$(sed 's/\///g;s/\./\\./g' <<< "${addrlines}")"
# Remove repeated domains (may have been inserted two times due to A and AAAA blocking)
uniquedomains="$(uniq <<< "${domains}")"
# Automatically generate regex filters and remove old wildcards file
awk '{print "(^|\\.)"$0"$"}' <<< "${uniquedomains}" >> "${regexFile:?}" && rm "${wildcardFile}"
}

View File

@@ -57,7 +57,7 @@ CREATE TABLE info
value TEXT NOT NULL value TEXT NOT NULL
); );
INSERT INTO "info" VALUES('version','15'); INSERT INTO "info" VALUES('version','14');
CREATE TABLE domain_audit CREATE TABLE domain_audit
( (
@@ -143,10 +143,12 @@ CREATE VIEW vw_gravity AS SELECT domain, adlist_by_group.group_id AS group_id
LEFT JOIN "group" ON "group".id = adlist_by_group.group_id LEFT JOIN "group" ON "group".id = adlist_by_group.group_id
WHERE adlist.enabled = 1 AND (adlist_by_group.group_id IS NULL OR "group".enabled = 1); WHERE adlist.enabled = 1 AND (adlist_by_group.group_id IS NULL OR "group".enabled = 1);
CREATE VIEW vw_adlist AS SELECT DISTINCT address, id CREATE VIEW vw_adlist AS SELECT DISTINCT address, adlist.id AS id
FROM adlist FROM adlist
WHERE enabled = 1 LEFT JOIN adlist_by_group ON adlist_by_group.adlist_id = adlist.id
ORDER BY id; LEFT JOIN "group" ON "group".id = adlist_by_group.group_id
WHERE adlist.enabled = 1 AND (adlist_by_group.group_id IS NULL OR "group".enabled = 1)
ORDER BY adlist.id;
CREATE TRIGGER tr_domainlist_add AFTER INSERT ON domainlist CREATE TRIGGER tr_domainlist_add AFTER INSERT ON domainlist
BEGIN BEGIN

View File

@@ -12,17 +12,14 @@ INSERT OR REPLACE INTO "group" SELECT * FROM OLD."group";
INSERT OR REPLACE INTO domain_audit SELECT * FROM OLD.domain_audit; INSERT OR REPLACE INTO domain_audit SELECT * FROM OLD.domain_audit;
INSERT OR REPLACE INTO domainlist SELECT * FROM OLD.domainlist; INSERT OR REPLACE INTO domainlist SELECT * FROM OLD.domainlist;
DELETE FROM OLD.domainlist_by_group WHERE domainlist_id NOT IN (SELECT id FROM OLD.domainlist);
INSERT OR REPLACE INTO domainlist_by_group SELECT * FROM OLD.domainlist_by_group; INSERT OR REPLACE INTO domainlist_by_group SELECT * FROM OLD.domainlist_by_group;
INSERT OR REPLACE INTO adlist SELECT * FROM OLD.adlist; INSERT OR REPLACE INTO adlist SELECT * FROM OLD.adlist;
DELETE FROM OLD.adlist_by_group WHERE adlist_id NOT IN (SELECT id FROM OLD.adlist);
INSERT OR REPLACE INTO adlist_by_group SELECT * FROM OLD.adlist_by_group; INSERT OR REPLACE INTO adlist_by_group SELECT * FROM OLD.adlist_by_group;
INSERT OR REPLACE INTO info SELECT * FROM OLD.info; INSERT OR REPLACE INTO info SELECT * FROM OLD.info;
INSERT OR REPLACE INTO client SELECT * FROM OLD.client; INSERT OR REPLACE INTO client SELECT * FROM OLD.client;
DELETE FROM OLD.client_by_group WHERE client_id NOT IN (SELECT id FROM OLD.client);
INSERT OR REPLACE INTO client_by_group SELECT * FROM OLD.client_by_group; INSERT OR REPLACE INTO client_by_group SELECT * FROM OLD.client_by_group;

View File

@@ -1,2 +0,0 @@
#; Pi-hole FTL config file
#; Comments should start with #; to avoid issues with PHP and bash reading this file

View File

@@ -1,4 +1,4 @@
#!/usr/bin/env sh #!/usr/bin/env bash
### BEGIN INIT INFO ### BEGIN INIT INFO
# Provides: pihole-FTL # Provides: pihole-FTL
# Required-Start: $remote_fs $syslog $network # Required-Start: $remote_fs $syslog $network
@@ -9,8 +9,11 @@
# Description: Enable service provided by pihole-FTL daemon # Description: Enable service provided by pihole-FTL daemon
### END INIT INFO ### END INIT INFO
FTLUSER=pihole
PIDFILE=/run/pihole-FTL.pid
is_running() { is_running() {
pgrep -xo "pihole-FTL" > /dev/null pgrep -o "pihole-FTL" > /dev/null 2>&1
} }
@@ -20,25 +23,27 @@ start() {
echo "pihole-FTL is already running" echo "pihole-FTL is already running"
else else
# Touch files to ensure they exist (create if non-existing, preserve if existing) # Touch files to ensure they exist (create if non-existing, preserve if existing)
mkdir -pm 0755 /run/pihole touch /var/log/pihole-FTL.log /var/log/pihole.log
[ ! -f /run/pihole-FTL.pid ] && install -m 644 -o pihole -g pihole /dev/null /run/pihole-FTL.pid touch /run/pihole-FTL.pid /run/pihole-FTL.port
[ ! -f /run/pihole-FTL.port ] && install -m 644 -o pihole -g pihole /dev/null /run/pihole-FTL.port touch /etc/pihole/dhcp.leases
[ ! -f /var/log/pihole-FTL.log ] && install -m 644 -o pihole -g pihole /dev/null /var/log/pihole-FTL.log mkdir -p /run/pihole
[ ! -f /var/log/pihole.log ] && install -m 644 -o pihole -g pihole /dev/null /var/log/pihole.log mkdir -p /var/log/pihole
[ ! -f /etc/pihole/dhcp.leases ] && install -m 644 -o pihole -g pihole /dev/null /etc/pihole/dhcp.leases chown pihole:pihole /run/pihole /var/log/pihole
# Remove possible leftovers from previous pihole-FTL processes
rm -f /dev/shm/FTL-* 2> /dev/null
rm /run/pihole/FTL.sock 2> /dev/null
# Ensure that permissions are set so that pihole-FTL can edit all necessary files # Ensure that permissions are set so that pihole-FTL can edit all necessary files
chown pihole:pihole /run/pihole /etc/pihole /var/log/pihole.log /var/log/pihole.log /etc/pihole/dhcp.leases chown pihole:pihole /run/pihole-FTL.pid /run/pihole-FTL.port
# Ensure that permissions are set so that pihole-FTL can edit the files. We ignore errors as the file may not (yet) exist chown pihole:pihole /etc/pihole /etc/pihole/dhcp.leases 2> /dev/null
chmod -f 0644 /etc/pihole/macvendor.db /etc/pihole/dhcp.leases /var/log/pihole-FTL.log /var/log/pihole.log chown pihole:pihole /var/log/pihole-FTL.log /var/log/pihole.log
chmod 0644 /var/log/pihole-FTL.log /run/pihole-FTL.pid /run/pihole-FTL.port /var/log/pihole.log
# Chown database files to the user FTL runs as. We ignore errors as the files may not (yet) exist # Chown database files to the user FTL runs as. We ignore errors as the files may not (yet) exist
chown -f pihole:pihole /etc/pihole/pihole-FTL.db /etc/pihole/gravity.db /etc/pihole/macvendor.db chown pihole:pihole /etc/pihole/pihole-FTL.db /etc/pihole/gravity.db 2> /dev/null
# Chown database file permissions so that the pihole group (web interface) can edit the file. We ignore errors as the files may not (yet) exist if setcap CAP_NET_BIND_SERVICE,CAP_NET_RAW,CAP_NET_ADMIN,CAP_SYS_NICE+eip "$(which pihole-FTL)"; then
chmod -f 0664 /etc/pihole/pihole-FTL.db su -s /bin/sh -c "/usr/bin/pihole-FTL" "$FTLUSER"
if setcap CAP_NET_BIND_SERVICE,CAP_NET_RAW,CAP_NET_ADMIN,CAP_SYS_NICE,CAP_IPC_LOCK,CAP_CHOWN+eip "/usr/bin/pihole-FTL"; then
su -s /bin/sh -c "/usr/bin/pihole-FTL" pihole
else else
echo "Warning: Starting pihole-FTL as root because setting capabilities is not supported on this system" echo "Warning: Starting pihole-FTL as root because setting capabilities is not supported on this system"
/usr/bin/pihole-FTL pihole-FTL
fi fi
echo echo
fi fi
@@ -47,20 +52,20 @@ start() {
# Stop the service # Stop the service
stop() { stop() {
if is_running; then if is_running; then
pkill -xo "pihole-FTL" pkill -o pihole-FTL
for i in 1 2 3 4 5; do for i in {1..5}; do
if ! is_running; then if ! is_running; then
break break
fi fi
printf "." echo -n "."
sleep 1 sleep 1
done done
echo echo
if is_running; then if is_running; then
echo "Not stopped; may still be shutting down or shutdown may have failed, killing now" echo "Not stopped; may still be shutting down or shutdown may have failed, killing now"
pkill -xo -9 "pihole-FTL" pkill -o -9 pihole-FTL
exit 1 exit 1
else else
echo "Stopped" echo "Stopped"
@@ -68,8 +73,6 @@ stop() {
else else
echo "Not running" echo "Not running"
fi fi
# Cleanup
rm -f /run/pihole/FTL.sock /dev/shm/FTL-*
echo echo
} }
@@ -98,7 +101,7 @@ case "$1" in
start start
;; ;;
*) *)
echo "Usage: $0 {start|stop|restart|reload|status}" echo $"Usage: $0 {start|stop|restart|reload|status}"
exit 1 exit 1
esac esac

View File

@@ -26,7 +26,7 @@
# parameter "quiet": don't print messages # parameter "quiet": don't print messages
00 00 * * * root PATH="$PATH:/usr/sbin:/usr/local/bin/" pihole flush once quiet 00 00 * * * root PATH="$PATH:/usr/sbin:/usr/local/bin/" pihole flush once quiet
@reboot root /usr/sbin/logrotate --state /var/lib/logrotate/pihole /etc/pihole/logrotate @reboot root /usr/sbin/logrotate /etc/pihole/logrotate
# Pi-hole: Grab local version and branch every 10 minutes # Pi-hole: Grab local version and branch every 10 minutes
*/10 * * * * root PATH="$PATH:/usr/sbin:/usr/local/bin/" pihole updatechecker local */10 * * * * root PATH="$PATH:/usr/sbin:/usr/local/bin/" pihole updatechecker local

View File

@@ -73,12 +73,12 @@ if ($serverName === "pi.hole"
<meta charset='utf-8'> <meta charset='utf-8'>
$viewPort $viewPort
<title>● $serverName</title> <title>● $serverName</title>
<link rel='stylesheet' href='/pihole/blockingpage.css'> <link rel='stylesheet' href='pihole/blockingpage.css'>
<link rel='shortcut icon' href='/admin/img/favicons/favicon.ico' type='image/x-icon'> <link rel='shortcut icon' href='admin/img/favicons/favicon.ico' type='image/x-icon'>
</head> </head>
<body id='splashpage'> <body id='splashpage'>
<div id="pihole_card"> <div id="pihole_card">
<img src='/admin/img/logo.svg' alt='Pi-hole logo' id="pihole_logo_splash" /> <img src='admin/img/logo.svg' alt='Pi-hole logo' id="pihole_logo_splash" />
<p>Pi-<strong>hole</strong>: Your black hole for Internet advertisements</p> <p>Pi-<strong>hole</strong>: Your black hole for Internet advertisements</p>
<a href='/admin'>Did you mean to go to the admin panel?</a> <a href='/admin'>Did you mean to go to the admin panel?</a>
</div> </div>

View File

@@ -20,6 +20,7 @@ server.modules = (
"mod_accesslog", "mod_accesslog",
"mod_auth", "mod_auth",
"mod_expire", "mod_expire",
"mod_compress",
"mod_redirect", "mod_redirect",
"mod_setenv", "mod_setenv",
"mod_rewrite" "mod_rewrite"
@@ -40,6 +41,26 @@ index-file.names = ( "index.php", "index.html", "index.lighttpd.html"
url.access-deny = ( "~", ".inc", ".md", ".yml", ".ini" ) url.access-deny = ( "~", ".inc", ".md", ".yml", ".ini" )
static-file.exclude-extensions = ( ".php", ".pl", ".fcgi" ) static-file.exclude-extensions = ( ".php", ".pl", ".fcgi" )
compress.cache-dir = "/var/cache/lighttpd/compress/"
compress.filetype = (
"application/json",
"application/vnd.ms-fontobject",
"application/xml",
"font/eot",
"font/opentype",
"font/otf",
"font/ttf",
"image/bmp",
"image/svg+xml",
"image/vnd.microsoft.icon",
"image/x-icon",
"text/css",
"text/html",
"text/javascript",
"text/plain",
"text/xml"
)
mimetype.assign = ( mimetype.assign = (
".ico" => "image/x-icon", ".ico" => "image/x-icon",
".jpeg" => "image/jpeg", ".jpeg" => "image/jpeg",
@@ -78,6 +99,11 @@ $HTTP["url"] =~ "^/admin/" {
"X-Pi-hole" => "The Pi-hole Web interface is working!", "X-Pi-hole" => "The Pi-hole Web interface is working!",
"X-Frame-Options" => "DENY" "X-Frame-Options" => "DENY"
) )
$HTTP["url"] =~ "\.(eot|otf|tt[cf]|woff2?)$" {
# Allow Block Page access to local fonts
setenv.add-response-header = ( "Access-Control-Allow-Origin" => "*" )
}
} }
# Block . files from being served, such as .git, .github, .gitignore # Block . files from being served, such as .git, .github, .gitignore
@@ -85,12 +111,5 @@ $HTTP["url"] =~ "^/admin/\.(.*)" {
url.access-deny = ("") url.access-deny = ("")
} }
# allow teleporter and API qr code iframe on settings page
$HTTP["url"] =~ "/(teleporter|api_token)\.php$" {
$HTTP["referer"] =~ "/admin/settings\.php" {
setenv.add-response-header = ( "X-Frame-Options" => "SAMEORIGIN" )
}
}
# Default expire header # Default expire header
expire.url = ( "" => "access plus 0 seconds" ) expire.url = ( "" => "access plus 0 seconds" )

View File

@@ -21,6 +21,7 @@ server.modules = (
"mod_expire", "mod_expire",
"mod_fastcgi", "mod_fastcgi",
"mod_accesslog", "mod_accesslog",
"mod_compress",
"mod_redirect", "mod_redirect",
"mod_setenv", "mod_setenv",
"mod_rewrite" "mod_rewrite"
@@ -41,6 +42,26 @@ index-file.names = ( "index.php", "index.html", "index.lighttpd.html"
url.access-deny = ( "~", ".inc", ".md", ".yml", ".ini" ) url.access-deny = ( "~", ".inc", ".md", ".yml", ".ini" )
static-file.exclude-extensions = ( ".php", ".pl", ".fcgi" ) static-file.exclude-extensions = ( ".php", ".pl", ".fcgi" )
compress.cache-dir = "/var/cache/lighttpd/compress/"
compress.filetype = (
"application/json",
"application/vnd.ms-fontobject",
"application/xml",
"font/eot",
"font/opentype",
"font/otf",
"font/ttf",
"image/bmp",
"image/svg+xml",
"image/vnd.microsoft.icon",
"image/x-icon",
"text/css",
"text/html",
"text/javascript",
"text/plain",
"text/xml"
)
mimetype.assign = ( mimetype.assign = (
".ico" => "image/x-icon", ".ico" => "image/x-icon",
".jpeg" => "image/jpeg", ".jpeg" => "image/jpeg",
@@ -86,6 +107,11 @@ $HTTP["url"] =~ "^/admin/" {
"X-Pi-hole" => "The Pi-hole Web interface is working!", "X-Pi-hole" => "The Pi-hole Web interface is working!",
"X-Frame-Options" => "DENY" "X-Frame-Options" => "DENY"
) )
$HTTP["url"] =~ "\.(eot|otf|tt[cf]|woff2?)$" {
# Allow Block Page access to local fonts
setenv.add-response-header = ( "Access-Control-Allow-Origin" => "*" )
}
} }
# Block . files from being served, such as .git, .github, .gitignore # Block . files from being served, such as .git, .github, .gitignore
@@ -93,12 +119,5 @@ $HTTP["url"] =~ "^/admin/\.(.*)" {
url.access-deny = ("") url.access-deny = ("")
} }
# allow teleporter and API qr code iframe on settings page
$HTTP["url"] =~ "/(teleporter|api_token)\.php$" {
$HTTP["referer"] =~ "/admin/settings\.php" {
setenv.add-response-header = ( "X-Frame-Options" => "SAMEORIGIN" )
}
}
# Default expire header # Default expire header
expire.url = ( "" => "access plus 0 seconds" ) expire.url = ( "" => "access plus 0 seconds" )

View File

@@ -2,7 +2,7 @@
# shellcheck disable=SC1090 # shellcheck disable=SC1090
# Pi-hole: A black hole for Internet advertisements # Pi-hole: A black hole for Internet advertisements
# (c) 2017-2021 Pi-hole, LLC (https://pi-hole.net) # (c) 2017-2018 Pi-hole, LLC (https://pi-hole.net)
# Network-wide ad blocking via your own hardware. # Network-wide ad blocking via your own hardware.
# #
# Installs and Updates Pi-hole # Installs and Updates Pi-hole
@@ -34,26 +34,27 @@ export PATH+=':/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin'
# List of supported DNS servers # List of supported DNS servers
DNS_SERVERS=$(cat << EOM DNS_SERVERS=$(cat << EOM
Google (ECS, DNSSEC);8.8.8.8;8.8.4.4;2001:4860:4860:0:0:0:0:8888;2001:4860:4860:0:0:0:0:8844 Google (ECS);8.8.8.8;8.8.4.4;2001:4860:4860:0:0:0:0:8888;2001:4860:4860:0:0:0:0:8844
OpenDNS (ECS, DNSSEC);208.67.222.222;208.67.220.220;2620:119:35::35;2620:119:53::53 OpenDNS (ECS, DNSSEC);208.67.222.222;208.67.220.220;2620:119:35::35;2620:119:53::53
Level3;4.2.2.1;4.2.2.2;; Level3;4.2.2.1;4.2.2.2;;
Comodo;8.26.56.26;8.20.247.20;; Comodo;8.26.56.26;8.20.247.20;;
DNS.WATCH (DNSSEC);84.200.69.80;84.200.70.40;2001:1608:10:25:0:0:1c04:b12f;2001:1608:10:25:0:0:9249:d69b DNS.WATCH;84.200.69.80;84.200.70.40;2001:1608:10:25:0:0:1c04:b12f;2001:1608:10:25:0:0:9249:d69b
Quad9 (filtered, DNSSEC);9.9.9.9;149.112.112.112;2620:fe::fe;2620:fe::9 Quad9 (filtered, DNSSEC);9.9.9.9;149.112.112.112;2620:fe::fe;2620:fe::9
Quad9 (unfiltered, no DNSSEC);9.9.9.10;149.112.112.10;2620:fe::10;2620:fe::fe:10 Quad9 (unfiltered, no DNSSEC);9.9.9.10;149.112.112.10;2620:fe::10;2620:fe::fe:10
Quad9 (filtered, ECS, DNSSEC);9.9.9.11;149.112.112.11;2620:fe::11;2620:fe::fe:11 Quad9 (filtered + ECS);9.9.9.11;149.112.112.11;2620:fe::11;2620:fe::fe:11
Cloudflare (DNSSEC);1.1.1.1;1.0.0.1;2606:4700:4700::1111;2606:4700:4700::1001 Cloudflare;1.1.1.1;1.0.0.1;2606:4700:4700::1111;2606:4700:4700::1001
EOM EOM
) )
# Location for final installation log storage # Location for final installation log storage
installLogLoc="/etc/pihole/install.log" installLogLoc=/etc/pihole/install.log
# This is an important file as it contains information specific to the machine it's being installed on # This is an important file as it contains information specific to the machine it's being installed on
setupVars="/etc/pihole/setupVars.conf" setupVars=/etc/pihole/setupVars.conf
# Pi-hole uses lighttpd as a Web server, and this is the config file for it # Pi-hole uses lighttpd as a Web server, and this is the config file for it
lighttpdConfig="/etc/lighttpd/lighttpd.conf" # shellcheck disable=SC2034
lighttpdConfig=/etc/lighttpd/lighttpd.conf
# This is a file used for the colorized output # This is a file used for the colorized output
coltable="/opt/pihole/COL_TABLE" coltable=/opt/pihole/COL_TABLE
# Root of the web server # Root of the web server
webroot="/var/www/html" webroot="/var/www/html"
@@ -93,9 +94,24 @@ if [ -z "${USER}" ]; then
USER="$(id -un)" USER="$(id -un)"
fi fi
# whiptail dialog dimensions: 20 rows and 70 chars width assures to fit on small screens and is known to hold all content.
r=20 # Check if we are running on a real terminal and find the rows and columns
c=70 # If there is no real terminal, we will default to 80x24
if [ -t 0 ] ; then
screen_size=$(stty size)
else
screen_size="24 80"
fi
# Determine terminal rows and columns by parsing screen_size
printf -v rows '%d' "${screen_size%% *}"
printf -v columns '%d' "${screen_size##* }"
# Divide by two so the dialogs take up half of the screen, which looks nice.
r=$(( rows / 2 ))
c=$(( columns / 2 ))
# Unless the screen is tiny
r=$(( r < 20 ? 20 : r ))
c=$(( c < 70 ? 70 : c ))
######## Undocumented Flags. Shhh ######## ######## Undocumented Flags. Shhh ########
# These are undocumented flags; some of which we can use when repairing an installation # These are undocumented flags; some of which we can use when repairing an installation
@@ -170,12 +186,12 @@ os_check() {
# This function gets a list of supported OS versions from a TXT record at versions.pi-hole.net # This function gets a list of supported OS versions from a TXT record at versions.pi-hole.net
# and determines whether or not the script is running on one of those systems # and determines whether or not the script is running on one of those systems
local remote_os_domain valid_os valid_version valid_response detected_os detected_version display_warning cmdResult digReturnCode response local remote_os_domain valid_os valid_version valid_response detected_os detected_version display_warning cmdResult digReturnCode response
remote_os_domain=${OS_CHECK_DOMAIN_NAME:-"versions.pi-hole.net"} remote_os_domain="versions.pi-hole.net"
detected_os=$(grep '^ID=' /etc/os-release | cut -d '=' -f2 | tr -d '"') detected_os=$(grep "\bID\b" /etc/os-release | cut -d '=' -f2 | tr -d '"')
detected_version=$(grep VERSION_ID /etc/os-release | cut -d '=' -f2 | tr -d '"') detected_version=$(grep VERSION_ID /etc/os-release | cut -d '=' -f2 | tr -d '"')
cmdResult="$(dig +short -t txt "${remote_os_domain}" @ns1.pi-hole.net 2>&1; echo $?)" cmdResult="$(dig +short -t txt ${remote_os_domain} @ns1.pi-hole.net 2>&1; echo $?)"
# Gets the return code of the previous command (last line) # Gets the return code of the previous command (last line)
digReturnCode="${cmdResult##*$'\n'}" digReturnCode="${cmdResult##*$'\n'}"
@@ -260,11 +276,11 @@ os_check() {
} }
# Compatibility # Compatibility
package_manager_detect() { distro_check() {
# First check to see if apt-get is installed. # If apt-get is installed, then we know it's part of the Debian family
if is_command apt-get ; then if is_command apt-get ; then
# Set some global variables here # Set some global variables here
# We don't set them earlier since the installed package manager might be rpm, so these values would be different # We don't set them earlier since the family might be Red Hat, so these values would be different
PKG_MANAGER="apt-get" PKG_MANAGER="apt-get"
# A variable to store the command used to update the package cache # A variable to store the command used to update the package cache
UPDATE_PKG_CACHE="${PKG_MANAGER} update" UPDATE_PKG_CACHE="${PKG_MANAGER} update"
@@ -272,27 +288,81 @@ package_manager_detect() {
PKG_INSTALL=("${PKG_MANAGER}" -qq --no-install-recommends install) PKG_INSTALL=("${PKG_MANAGER}" -qq --no-install-recommends install)
# grep -c will return 1 if there are no matches. This is an acceptable condition, so we OR TRUE to prevent set -e exiting the script. # grep -c will return 1 if there are no matches. This is an acceptable condition, so we OR TRUE to prevent set -e exiting the script.
PKG_COUNT="${PKG_MANAGER} -s -o Debug::NoLocking=true upgrade | grep -c ^Inst || true" PKG_COUNT="${PKG_MANAGER} -s -o Debug::NoLocking=true upgrade | grep -c ^Inst || true"
# Update package cache # Some distros vary slightly so these fixes for dependencies may apply
# on Ubuntu 18.04.1 LTS we need to add the universe repository to gain access to dhcpcd5
APT_SOURCES="/etc/apt/sources.list"
if awk 'BEGIN{a=1;b=0}/bionic main/{a=0}/bionic.*universe/{b=1}END{exit a + b}' ${APT_SOURCES}; then
if ! whiptail --defaultno --title "Dependencies Require Update to Allowed Repositories" --yesno "Would you like to enable 'universe' repository?\\n\\nThis repository is required by the following packages:\\n\\n- dhcpcd5" "${r}" "${c}"; then
printf " %b Aborting installation: Dependencies could not be installed.\\n" "${CROSS}"
exit 1
else
printf " %b Enabling universe package repository for Ubuntu Bionic\\n" "${INFO}"
cp -p ${APT_SOURCES} ${APT_SOURCES}.backup # Backup current repo list
printf " %b Backed up current configuration to %s\\n" "${TICK}" "${APT_SOURCES}.backup"
add-apt-repository universe
printf " %b Enabled %s\\n" "${TICK}" "'universe' repository"
fi
fi
# Update package cache. This is required already here to assure apt-cache calls have package lists available.
update_package_cache || exit 1 update_package_cache || exit 1
# Debian 7 doesn't have iproute2 so check if it's available first
if apt-cache show iproute2 > /dev/null 2>&1; then
iproute_pkg="iproute2"
# Otherwise, check if iproute is available
elif apt-cache show iproute > /dev/null 2>&1; then
iproute_pkg="iproute"
# Else print error and exit
else
printf " %b Aborting installation: iproute2 and iproute packages were not found in APT repository.\\n" "${CROSS}"
exit 1
fi
# Check for and determine version number (major and minor) of current php install # Check for and determine version number (major and minor) of current php install
local phpVer="php"
if is_command php ; then if is_command php ; then
printf " %b Existing PHP installation detected : PHP version %s\\n" "${INFO}" "$(php <<< "<?php echo PHP_VERSION ?>")" printf " %b Existing PHP installation detected : PHP version %s\\n" "${INFO}" "$(php <<< "<?php echo PHP_VERSION ?>")"
printf -v phpInsMajor "%d" "$(php <<< "<?php echo PHP_MAJOR_VERSION ?>")" printf -v phpInsMajor "%d" "$(php <<< "<?php echo PHP_MAJOR_VERSION ?>")"
printf -v phpInsMinor "%d" "$(php <<< "<?php echo PHP_MINOR_VERSION ?>")" printf -v phpInsMinor "%d" "$(php <<< "<?php echo PHP_MINOR_VERSION ?>")"
# Is installed php version 7.0 or greater
if [ "${phpInsMajor}" -ge 7 ]; then
phpInsNewer=true
fi
fi
# Several other packages depend on the version of PHP. If PHP is not installed, or an insufficient version,
# those packages should fall back to the default (latest?)
if [[ "$phpInsNewer" != true ]]; then
# Prefer the php metapackage if it's there
if apt-cache show php > /dev/null 2>&1; then
phpVer="php"
# Else fall back on the php5 package if it's there
elif apt-cache show php5 > /dev/null 2>&1; then
phpVer="php5"
# Else print error and exit
else
printf " %b Aborting installation: No PHP packages were found in APT repository.\\n" "${CROSS}"
exit 1
fi
else
# Else, PHP is already installed at a version beyond v7.0, so the additional packages
# should match version with the current PHP version.
phpVer="php$phpInsMajor.$phpInsMinor" phpVer="php$phpInsMajor.$phpInsMinor"
fi fi
# Packages required to perfom the os_check (stored as an array) # We also need the correct version for `php-sqlite` (which differs across distros)
OS_CHECK_DEPS=(grep dnsutils) if apt-cache show "${phpVer}-sqlite3" > /dev/null 2>&1; then
phpSqlite="sqlite3"
elif apt-cache show "${phpVer}-sqlite" > /dev/null 2>&1; then
phpSqlite="sqlite"
else
printf " %b Aborting installation: No SQLite PHP module was found in APT repository.\\n" "${CROSS}"
exit 1
fi
# Packages required to run this install script (stored as an array) # Packages required to run this install script (stored as an array)
INSTALLER_DEPS=(git iproute2 whiptail ca-certificates) INSTALLER_DEPS=(dhcpcd5 git "${iproute_pkg}" whiptail dnsutils)
# Packages required to run Pi-hole (stored as an array) # Packages required to run Pi-hole (stored as an array)
PIHOLE_DEPS=(cron curl iputils-ping psmisc sudo unzip idn2 libcap2-bin dns-root-data libcap2 netcat-openbsd) PIHOLE_DEPS=(cron curl iputils-ping lsof netcat psmisc sudo unzip idn2 sqlite3 libcap2-bin dns-root-data libcap2)
# Packages required for the Web admin interface (stored as an array) # Packages required for the Web admin interface (stored as an array)
# It's useful to separate this from Pi-hole, since the two repos are also setup separately # It's useful to separate this from Pi-hole, since the two repos are also setup separately
PIHOLE_WEB_DEPS=(lighttpd "${phpVer}-common" "${phpVer}-cgi" "${phpVer}-sqlite3" "${phpVer}-xml" "${phpVer}-intl") PIHOLE_WEB_DEPS=(lighttpd "${phpVer}-common" "${phpVer}-cgi" "${phpVer}-${phpSqlite}" "${phpVer}-xml" "${phpVer}-intl")
# Prior to PHP8.0, JSON functionality is provided as dedicated module, required by Pi-hole AdminLTE: https://www.php.net/manual/json.installation.php # Prior to PHP8.0, JSON functionality is provided as dedicated module, required by Pi-hole AdminLTE: https://www.php.net/manual/json.installation.php
if [[ -z "${phpInsMajor}" || "${phpInsMajor}" -lt 8 ]]; then if [[ "${phpInsNewer}" != true || "${phpInsMajor}" -lt 8 ]]; then
PIHOLE_WEB_DEPS+=("${phpVer}-json") PIHOLE_WEB_DEPS+=("${phpVer}-json")
fi fi
# The Web server user, # The Web server user,
@@ -318,7 +388,7 @@ package_manager_detect() {
return 0 return 0
} }
# If apt-get is not found, check for rpm. # If apt-get is not found, check for rpm to see if it's a Red Hat family OS
elif is_command rpm ; then elif is_command rpm ; then
# Then check if dnf or yum is the package manager # Then check if dnf or yum is the package manager
if is_command dnf ; then if is_command dnf ; then
@@ -327,27 +397,15 @@ package_manager_detect() {
PKG_MANAGER="yum" PKG_MANAGER="yum"
fi fi
# These variable names match the ones for apt-get. See above for an explanation of what they are for. # These variable names match the ones in the Debian family. See above for an explanation of what they are for.
PKG_INSTALL=("${PKG_MANAGER}" install -y) PKG_INSTALL=("${PKG_MANAGER}" install -y)
PKG_COUNT="${PKG_MANAGER} check-update | egrep '(.i686|.x86|.noarch|.arm|.src)' | wc -l" PKG_COUNT="${PKG_MANAGER} check-update | egrep '(.i686|.x86|.noarch|.arm|.src)' | wc -l"
OS_CHECK_DEPS=(grep bind-utils) INSTALLER_DEPS=(git iproute newt procps-ng which chkconfig bind-utils)
INSTALLER_DEPS=(git iproute newt procps-ng which chkconfig ca-certificates) PIHOLE_DEPS=(cronie curl findutils nmap-ncat sudo unzip libidn2 psmisc sqlite libcap lsof)
PIHOLE_DEPS=(cronie curl findutils sudo unzip libidn2 psmisc libcap nmap-ncat)
PIHOLE_WEB_DEPS=(lighttpd lighttpd-fastcgi php-common php-cli php-pdo php-xml php-json php-intl) PIHOLE_WEB_DEPS=(lighttpd lighttpd-fastcgi php-common php-cli php-pdo php-xml php-json php-intl)
LIGHTTPD_USER="lighttpd" LIGHTTPD_USER="lighttpd"
LIGHTTPD_GROUP="lighttpd" LIGHTTPD_GROUP="lighttpd"
LIGHTTPD_CFG="lighttpd.conf.fedora" LIGHTTPD_CFG="lighttpd.conf.fedora"
# If neither apt-get or yum/dnf package managers were found
else
# we cannot install required packages
printf " %b No supported package manager found\\n" "${CROSS}"
# so exit the installer
exit
fi
}
select_rpm_php(){
# If the host OS is Fedora, # If the host OS is Fedora,
if grep -qiE 'fedora|fedberry' /etc/redhat-release; then if grep -qiE 'fedora|fedberry' /etc/redhat-release; then
# all required packages should be available by default with the latest fedora release # all required packages should be available by default with the latest fedora release
@@ -420,7 +478,10 @@ select_rpm_php(){
exit 1 exit 1
fi fi
fi fi
fi # Warn user of unsupported version of Fedora or CentOS fi
fi
else
# Warn user of unsupported version of Fedora or CentOS
if ! whiptail --defaultno --title "Unsupported RPM based distribution" --yesno "Would you like to continue installation on an unsupported RPM based distribution?\\n\\nPlease ensure the following packages have been installed manually:\\n\\n- lighttpd\\n- lighttpd-fastcgi\\n- PHP version 7+" "${r}" "${c}"; then if ! whiptail --defaultno --title "Unsupported RPM based distribution" --yesno "Would you like to continue installation on an unsupported RPM based distribution?\\n\\nPlease ensure the following packages have been installed manually:\\n\\n- lighttpd\\n- lighttpd-fastcgi\\n- PHP version 7+" "${r}" "${c}"; then
printf " %b Aborting installation due to unsupported RPM based distribution\\n" "${CROSS}" printf " %b Aborting installation due to unsupported RPM based distribution\\n" "${CROSS}"
exit exit
@@ -428,6 +489,13 @@ select_rpm_php(){
printf " %b Continuing installation with unsupported RPM based distribution\\n" "${INFO}" printf " %b Continuing installation with unsupported RPM based distribution\\n" "${INFO}"
fi fi
fi fi
# If neither apt-get or yum/dnf package managers were found
else
# it's not an OS we can support,
printf " %b OS distribution not supported\\n" "${CROSS}"
# so exit the installer
exit
fi fi
} }
@@ -514,7 +582,7 @@ update_repo() {
git stash --all --quiet &> /dev/null || true # Okay for stash failure git stash --all --quiet &> /dev/null || true # Okay for stash failure
git clean --quiet --force -d || true # Okay for already clean directory git clean --quiet --force -d || true # Okay for already clean directory
# Pull the latest commits # Pull the latest commits
git pull --no-rebase --quiet &> /dev/null || return $? git pull --quiet &> /dev/null || return $?
# Check current branch. If it is master, then reset to the latest available tag. # Check current branch. If it is master, then reset to the latest available tag.
# In case extra commits have been added after tagging/release (i.e in case of metadata updates/README.MD tweaks) # In case extra commits have been added after tagging/release (i.e in case of metadata updates/README.MD tweaks)
curBranch=$(git rev-parse --abbrev-ref HEAD) curBranch=$(git rev-parse --abbrev-ref HEAD)
@@ -624,17 +692,9 @@ welcomeDialogs() {
whiptail --msgbox --backtitle "Plea" --title "Free and open source" "\\n\\nThe Pi-hole is free, but powered by your donations: https://pi-hole.net/donate/" "${r}" "${c}" whiptail --msgbox --backtitle "Plea" --title "Free and open source" "\\n\\nThe Pi-hole is free, but powered by your donations: https://pi-hole.net/donate/" "${r}" "${c}"
# Explain the need for a static address # Explain the need for a static address
if whiptail --defaultno --backtitle "Initiating network interface" --title "Static IP Needed" --yesno "\\n\\nThe Pi-hole is a SERVER so it needs a STATIC IP ADDRESS to function properly. whiptail --msgbox --backtitle "Initiating network interface" --title "Static IP Needed" "\\n\\nThe Pi-hole is a SERVER so it needs a STATIC IP ADDRESS to function properly.
IMPORTANT: If you have not already done so, you must ensure that this device has a static IP. Either through DHCP reservation, or by manually assigning one. Depending on your operating system, there are many ways to achieve this. In the next section, you can choose to use your current network settings (DHCP) or to manually edit them." "${r}" "${c}"
Choose yes to indicate that you have understood this message, and wish to continue" "${r}" "${c}"; then
#Nothing to do, continue
echo
else
printf " %b Installer exited at static IP message.\\n" "${INFO}"
exit 1
fi
} }
# A function that lets the user pick an interface to use with Pi-hole # A function that lets the user pick an interface to use with Pi-hole
@@ -674,7 +734,7 @@ chooseInterface() {
# Feed the available interfaces into this while loop # Feed the available interfaces into this while loop
done <<< "${availableInterfaces}" done <<< "${availableInterfaces}"
# The whiptail command that will be run, stored in a variable # The whiptail command that will be run, stored in a variable
chooseInterfaceCmd=(whiptail --separate-output --radiolist "Choose An Interface (press space to toggle selection)" "${r}" "${c}" 6) chooseInterfaceCmd=(whiptail --separate-output --radiolist "Choose An Interface (press space to toggle selection)" "${r}" "${c}" "${interfaceCount}")
# Now run the command using the interfaces saved into the array # Now run the command using the interfaces saved into the array
chooseInterfaceOptions=$("${chooseInterfaceCmd[@]}" "${interfacesArray[@]}" 2>&1 >/dev/tty) || \ chooseInterfaceOptions=$("${chooseInterfaceCmd[@]}" "${interfacesArray[@]}" 2>&1 >/dev/tty) || \
# If the user chooses Cancel, exit # If the user chooses Cancel, exit
@@ -716,8 +776,9 @@ testIPv6() {
fi fi
} }
find_IPv6_information() { # A dialog for showing the user about IPv6 blocking
# Detects IPv6 address used for communication to WAN addresses. useIPv6dialog() {
# Determine the IPv6 address used for blocking
IPV6_ADDRESSES=($(ip -6 address | grep 'scope global' | awk '{print $2}')) IPV6_ADDRESSES=($(ip -6 address | grep 'scope global' | awk '{print $2}'))
# For each address in the array above, determine the type of IPv6 address it is # For each address in the array above, determine the type of IPv6 address it is
@@ -737,60 +798,89 @@ find_IPv6_information() {
# set the IPv6 address to the ULA address # set the IPv6 address to the ULA address
IPV6_ADDRESS="${ULA_ADDRESS}" IPV6_ADDRESS="${ULA_ADDRESS}"
# Show this info to the user # Show this info to the user
printf " %b Found IPv6 ULA address\\n" "${INFO}" printf " %b Found IPv6 ULA address, using it for blocking IPv6 ads\\n" "${INFO}"
# Otherwise, if the GUA_ADDRESS has a value, # Otherwise, if the GUA_ADDRESS has a value,
elif [[ ! -z "${GUA_ADDRESS}" ]]; then elif [[ ! -z "${GUA_ADDRESS}" ]]; then
# Let the user know # Let the user know
printf " %b Found IPv6 GUA address\\n" "${INFO}" printf " %b Found IPv6 GUA address, using it for blocking IPv6 ads\\n" "${INFO}"
# And assign it to the global variable # And assign it to the global variable
IPV6_ADDRESS="${GUA_ADDRESS}" IPV6_ADDRESS="${GUA_ADDRESS}"
# If none of those work, # If none of those work,
else else
printf " %b Unable to find IPv6 ULA/GUA address\\n" "${INFO}" # explain that IPv6 blocking will not be used
printf " %b Unable to find IPv6 ULA/GUA address, IPv6 adblocking will not be enabled\\n" "${INFO}"
# So set the variable to be empty # So set the variable to be empty
IPV6_ADDRESS="" IPV6_ADDRESS=""
fi fi
# If the IPV6_ADDRESS contains a value
if [[ ! -z "${IPV6_ADDRESS}" ]]; then
# Display that IPv6 is supported and will be used
whiptail --msgbox --backtitle "IPv6..." --title "IPv6 Supported" "$IPV6_ADDRESS will be used to block ads." "${r}" "${c}"
fi
} }
# A function to collect IPv4 and IPv6 information of the device # A function to check if we should use IPv4 and/or IPv6 for blocking ads
collect_v4andv6_information() { use4andor6() {
# Named local variables
local useIPv4
local useIPv6
# Let user choose IPv4 and/or IPv6 via a checklist
cmd=(whiptail --separate-output --checklist "Select Protocols (press space to toggle selection)" "${r}" "${c}" 2)
# In an array, show the options available:
# IPv4 (on by default)
options=(IPv4 "Block ads over IPv4" on
# or IPv6 (on by default if available)
IPv6 "Block ads over IPv6" on)
# In a variable, show the choices available; exit if Cancel is selected
choices=$("${cmd[@]}" "${options[@]}" 2>&1 >/dev/tty) || { printf " %bCancel was selected, exiting installer%b\\n" "${COL_LIGHT_RED}" "${COL_NC}"; exit 1; }
# For each choice available,
for choice in ${choices}
do
# Set the values to true
case ${choice} in
IPv4 ) useIPv4=true;;
IPv6 ) useIPv6=true;;
esac
done
# If IPv4 is to be used,
if [[ "${useIPv4}" ]]; then
# Run our function to get the information we need
find_IPv4_information find_IPv4_information
getStaticIPv4Settings
setStaticIPv4
fi
# If IPv6 is to be used,
if [[ "${useIPv6}" ]]; then
# Run our function to get this information
useIPv6dialog
fi
# Echo the information to the user # Echo the information to the user
printf " %b IPv4 address: %s\\n" "${INFO}" "${IPV4_ADDRESS}" printf " %b IPv4 address: %s\\n" "${INFO}" "${IPV4_ADDRESS}"
# if `dhcpcd` is used offer to set this as static IP for the device
if [[ -f "/etc/dhcpcd.conf" ]]; then
# configure networking via dhcpcd
getStaticIPv4Settings
fi
find_IPv6_information
printf " %b IPv6 address: %s\\n" "${INFO}" "${IPV6_ADDRESS}" printf " %b IPv6 address: %s\\n" "${INFO}" "${IPV6_ADDRESS}"
# If neither protocol is selected,
if [[ ! "${useIPv4}" ]] && [[ ! "${useIPv6}" ]]; then
# Show an error in red
printf " %bError: Neither IPv4 or IPv6 selected%b\\n" "${COL_LIGHT_RED}" "${COL_NC}"
# and exit with an error
exit 1
fi
} }
getStaticIPv4Settings() { getStaticIPv4Settings() {
# Local, named variables # Local, named variables
local ipSettingsCorrect local ipSettingsCorrect
local DHCPChoice
# Ask if the user wants to use DHCP settings as their static IP # Ask if the user wants to use DHCP settings as their static IP
# This is useful for users that are using DHCP reservations; then we can just use the information gathered via our functions # This is useful for users that are using DHCP reservations; then we can just use the information gathered via our functions
DHCPChoice=$(whiptail --backtitle "Calibrating network interface" --title "Static IP Address" --menu --separate-output "Do you want to use your current network settings as a static address? \\n if whiptail --backtitle "Calibrating network interface" --title "Static IP Address" --yesno "Do you want to use your current network settings as a static address?
IP address: ${IPV4_ADDRESS} \\n IP address: ${IPV4_ADDRESS}
Gateway: ${IPv4gw} \\n" "${r}" "${c}" 3\ Gateway: ${IPv4gw}" "${r}" "${c}"; then
"Yes" "Set static IP using current values" \
"No" "Set static IP using custom values" \
"Skip" "I will set a static IP later, or have already done so" 3>&2 2>&1 1>&3) || \
{ printf " %bCancel was selected, exiting installer%b\\n" "${COL_LIGHT_RED}" "${COL_NC}"; exit 1; }
case ${DHCPChoice} in
"Yes")
# If they choose yes, let the user know that the IP address will not be available via DHCP and may cause a conflict. # If they choose yes, let the user know that the IP address will not be available via DHCP and may cause a conflict.
whiptail --msgbox --backtitle "IP information" --title "FYI: IP Conflict" "It is possible your router could still try to assign this IP to a device, which would cause a conflict. But in most cases the router is smart enough to not do that. whiptail --msgbox --backtitle "IP information" --title "FYI: IP Conflict" "It is possible your router could still try to assign this IP to a device, which would cause a conflict. But in most cases the router is smart enough to not do that.
If you are worried, either manually set the address, or modify the DHCP reservation pool so it does not include the IP you want. If you are worried, either manually set the address, or modify the DHCP reservation pool so it does not include the IP you want.
It is also possible to use a DHCP reservation, but if you are going to do that, you might as well set a static address." "${r}" "${c}" It is also possible to use a DHCP reservation, but if you are going to do that, you might as well set a static address." "${r}" "${c}"
# Nothing else to do since the variables are already set above # Nothing else to do since the variables are already set above
setDHCPCD else
;;
"No")
# Otherwise, we need to ask the user to input their desired settings. # Otherwise, we need to ask the user to input their desired settings.
# Start by getting the IPv4 address (pre-filling it with info gathered from DHCP) # Start by getting the IPv4 address (pre-filling it with info gathered from DHCP)
# Start a loop to let the user enter their information with the chance to go back and edit it if necessary # Start a loop to let the user enter their information with the chance to go back and edit it if necessary
@@ -819,9 +909,8 @@ getStaticIPv4Settings() {
ipSettingsCorrect=False ipSettingsCorrect=False
fi fi
done done
setDHCPCD # End the if statement for DHCP vs. static
;; fi
esac
} }
# Configure networking via dhcpcd # Configure networking via dhcpcd
@@ -844,6 +933,93 @@ setDHCPCD() {
fi fi
} }
# Configure networking ifcfg-xxxx file found at /etc/sysconfig/network-scripts/
# This function requires the full path of an ifcfg file passed as an argument
setIFCFG() {
# Local, named variables
local IFCFG_FILE
local IPADDR
local CIDR
IFCFG_FILE=$1
printf -v IPADDR "%s" "${IPV4_ADDRESS%%/*}"
# Check if the desired IP is already set
if grep -Eq "${IPADDR}(\\b|\\/)" "${IFCFG_FILE}"; then
printf " %b Static IP already configured\\n" "${INFO}"
else
# Otherwise, put the IP in variables without the CIDR notation
printf -v CIDR "%s" "${IPV4_ADDRESS##*/}"
# Backup existing interface configuration:
cp -p "${IFCFG_FILE}" "${IFCFG_FILE}".pihole.orig
# Build Interface configuration file using the GLOBAL variables we have
{
echo "# Configured via Pi-hole installer"
echo "DEVICE=$PIHOLE_INTERFACE"
echo "BOOTPROTO=none"
echo "ONBOOT=yes"
echo "IPADDR=$IPADDR"
echo "PREFIX=$CIDR"
echo "GATEWAY=$IPv4gw"
echo "DNS1=$PIHOLE_DNS_1"
echo "DNS2=$PIHOLE_DNS_2"
echo "USERCTL=no"
}> "${IFCFG_FILE}"
chmod 644 "${IFCFG_FILE}"
chown root:root "${IFCFG_FILE}"
# Use ip to immediately set the new address
ip addr replace dev "${PIHOLE_INTERFACE}" "${IPV4_ADDRESS}"
# If NetworkMangler command line interface exists and ready to mangle,
if is_command nmcli && nmcli general status &> /dev/null; then
# Tell NetworkManagler to read our new sysconfig file
nmcli con load "${IFCFG_FILE}" > /dev/null
fi
# Show a warning that the user may need to restart
printf " %b Set IP address to %s\\n You may need to restart after the install is complete\\n" "${TICK}" "${IPV4_ADDRESS%%/*}"
fi
}
setStaticIPv4() {
# Local, named variables
local IFCFG_FILE
local CONNECTION_NAME
# If a static interface is already configured, we are done.
if [[ -r "/etc/sysconfig/network/ifcfg-${PIHOLE_INTERFACE}" ]]; then
if grep -q '^BOOTPROTO=.static.' "/etc/sysconfig/network/ifcfg-${PIHOLE_INTERFACE}"; then
return 0
fi
fi
# For the Debian family, if dhcpcd.conf exists then we can just configure using DHCPD.
if [[ -f "/etc/dhcpcd.conf" ]]; then
setDHCPCD
return 0
fi
# If a DHCPCD config file was not found, check for an ifcfg config file based on the interface name
if [[ -f "/etc/sysconfig/network-scripts/ifcfg-${PIHOLE_INTERFACE}" ]];then
# If it exists, then we can configure using IFCFG
IFCFG_FILE=/etc/sysconfig/network-scripts/ifcfg-${PIHOLE_INTERFACE}
setIFCFG "${IFCFG_FILE}"
return 0
fi
# If an ifcfg config does not exists for the interface name, search for one based on the connection name via network manager
if is_command nmcli && nmcli general status &> /dev/null; then
CONNECTION_NAME=$(nmcli dev show "${PIHOLE_INTERFACE}" | grep 'GENERAL.CONNECTION' | cut -d: -f2 | sed 's/^System//' | xargs | tr ' ' '_')
IFCFG_FILE=/etc/sysconfig/network-scripts/ifcfg-${CONNECTION_NAME}
if [[ -f "${IFCFG_FILE}" ]];then
# If it exists,
setIFCFG "${IFCFG_FILE}"
return 0
else
printf " %b Warning: sysconfig network script not found. Creating ${IFCFG_FILE}\\n" "${INFO}"
touch "${IFCFG_FILE}"
setIFCFG "${IFCFG_FILE}"
return 0
fi
fi
# If previous conditions failed, show an error and exit
printf " %b Warning: Unable to locate configuration file to set static IPv4 address\\n" "${INFO}"
exit 1
}
# Check an IP address to see if it is a valid one # Check an IP address to see if it is a valid one
valid_ip() { valid_ip() {
# Local, named variables # Local, named variables
@@ -856,7 +1032,7 @@ valid_ip() {
# Regex matching an optional port (starting with '#') range of 1-65536 # Regex matching an optional port (starting with '#') range of 1-65536
local portelem="(#(6553[0-5]|655[0-2][0-9]|65[0-4][0-9]{2}|6[0-4][0-9]{3}|[1-5][0-9]{4}|[1-9][0-9]{0,3}|0))?"; local portelem="(#(6553[0-5]|655[0-2][0-9]|65[0-4][0-9]{2}|6[0-4][0-9]{3}|[1-5][0-9]{4}|[1-9][0-9]{0,3}|0))?";
# Build a full IPv4 regex from the above subexpressions # Build a full IPv4 regex from the above subexpressions
local regex="^${ipv4elem}\\.${ipv4elem}\\.${ipv4elem}\\.${ipv4elem}${portelem}$" local regex="^${ipv4elem}\.${ipv4elem}\.${ipv4elem}\.${ipv4elem}${portelem}$"
# Evaluate the regex, and return the result # Evaluate the regex, and return the result
[[ $ip =~ ${regex} ]] [[ $ip =~ ${regex} ]]
@@ -1128,11 +1304,8 @@ chooseBlocklists() {
appendToListsFile "${choice}" appendToListsFile "${choice}"
done done
# Create an empty adList file with appropriate permissions. # Create an empty adList file with appropriate permissions.
if [ ! -f "${adlistFile}" ]; then touch "${adlistFile}"
install -m 644 /dev/null "${adlistFile}"
else
chmod 644 "${adlistFile}" chmod 644 "${adlistFile}"
fi
} }
# Accept a string parameter, it must be one of the default lists # Accept a string parameter, it must be one of the default lists
@@ -1162,10 +1335,8 @@ version_check_dnsmasq() {
local dnsmasq_pihole_id_string="addn-hosts=/etc/pihole/gravity.list" local dnsmasq_pihole_id_string="addn-hosts=/etc/pihole/gravity.list"
local dnsmasq_pihole_id_string2="# Dnsmasq config for Pi-hole's FTLDNS" local dnsmasq_pihole_id_string2="# Dnsmasq config for Pi-hole's FTLDNS"
local dnsmasq_original_config="${PI_HOLE_LOCAL_REPO}/advanced/dnsmasq.conf.original" local dnsmasq_original_config="${PI_HOLE_LOCAL_REPO}/advanced/dnsmasq.conf.original"
local dnsmasq_pihole_01_source="${PI_HOLE_LOCAL_REPO}/advanced/01-pihole.conf" local dnsmasq_pihole_01_snippet="${PI_HOLE_LOCAL_REPO}/advanced/01-pihole.conf"
local dnsmasq_pihole_01_target="/etc/dnsmasq.d/01-pihole.conf" local dnsmasq_pihole_01_location="/etc/dnsmasq.d/01-pihole.conf"
local dnsmasq_rfc6761_06_source="${PI_HOLE_LOCAL_REPO}/advanced/06-rfc6761.conf"
local dnsmasq_rfc6761_06_target="/etc/dnsmasq.d/06-rfc6761.conf"
# If the dnsmasq config file exists # If the dnsmasq config file exists
if [[ -f "${dnsmasq_conf}" ]]; then if [[ -f "${dnsmasq_conf}" ]]; then
@@ -1194,48 +1365,44 @@ version_check_dnsmasq() {
printf "%b %b No dnsmasq.conf found... restoring default dnsmasq.conf...\\n" "${OVER}" "${TICK}" printf "%b %b No dnsmasq.conf found... restoring default dnsmasq.conf...\\n" "${OVER}" "${TICK}"
fi fi
printf " %b Installing %s..." "${INFO}" "${dnsmasq_pihole_01_target}" printf " %b Copying 01-pihole.conf to /etc/dnsmasq.d/01-pihole.conf..." "${INFO}"
# Check to see if dnsmasq directory exists (it may not due to being a fresh install and dnsmasq no longer being a dependency) # Check to see if dnsmasq directory exists (it may not due to being a fresh install and dnsmasq no longer being a dependency)
if [[ ! -d "/etc/dnsmasq.d" ]];then if [[ ! -d "/etc/dnsmasq.d" ]];then
install -d -m 755 "/etc/dnsmasq.d" install -d -m 755 "/etc/dnsmasq.d"
fi fi
# Copy the new Pi-hole DNS config file into the dnsmasq.d directory # Copy the new Pi-hole DNS config file into the dnsmasq.d directory
install -D -m 644 -T "${dnsmasq_pihole_01_source}" "${dnsmasq_pihole_01_target}" install -D -m 644 -T "${dnsmasq_pihole_01_snippet}" "${dnsmasq_pihole_01_location}"
printf "%b %b Installed %s\n" "${OVER}" "${TICK}" "${dnsmasq_pihole_01_target}" printf "%b %b Copying 01-pihole.conf to /etc/dnsmasq.d/01-pihole.conf\\n" "${OVER}" "${TICK}"
# Replace our placeholder values with the GLOBAL DNS variables that we populated earlier # Replace our placeholder values with the GLOBAL DNS variables that we populated earlier
# First, swap in the interface to listen on, # First, swap in the interface to listen on,
sed -i "s/@INT@/$PIHOLE_INTERFACE/" "${dnsmasq_pihole_01_target}" sed -i "s/@INT@/$PIHOLE_INTERFACE/" "${dnsmasq_pihole_01_location}"
if [[ "${PIHOLE_DNS_1}" != "" ]]; then if [[ "${PIHOLE_DNS_1}" != "" ]]; then
# then swap in the primary DNS server. # then swap in the primary DNS server.
sed -i "s/@DNS1@/$PIHOLE_DNS_1/" "${dnsmasq_pihole_01_target}" sed -i "s/@DNS1@/$PIHOLE_DNS_1/" "${dnsmasq_pihole_01_location}"
else else
# Otherwise, remove the line which sets DNS1. # Otherwise, remove the line which sets DNS1.
sed -i '/^server=@DNS1@/d' "${dnsmasq_pihole_01_target}" sed -i '/^server=@DNS1@/d' "${dnsmasq_pihole_01_location}"
fi fi
# Ditto if DNS2 is not empty # Ditto if DNS2 is not empty
if [[ "${PIHOLE_DNS_2}" != "" ]]; then if [[ "${PIHOLE_DNS_2}" != "" ]]; then
sed -i "s/@DNS2@/$PIHOLE_DNS_2/" "${dnsmasq_pihole_01_target}" sed -i "s/@DNS2@/$PIHOLE_DNS_2/" "${dnsmasq_pihole_01_location}"
else else
sed -i '/^server=@DNS2@/d' "${dnsmasq_pihole_01_target}" sed -i '/^server=@DNS2@/d' "${dnsmasq_pihole_01_location}"
fi fi
# Set the cache size # Set the cache size
sed -i "s/@CACHE_SIZE@/$CACHE_SIZE/" "${dnsmasq_pihole_01_target}" sed -i "s/@CACHE_SIZE@/$CACHE_SIZE/" ${dnsmasq_pihole_01_location}
sed -i 's/^#conf-dir=\/etc\/dnsmasq.d$/conf-dir=\/etc\/dnsmasq.d/' "${dnsmasq_conf}" sed -i 's/^#conf-dir=\/etc\/dnsmasq.d$/conf-dir=\/etc\/dnsmasq.d/' "${dnsmasq_conf}"
# If the user does not want to enable logging, # If the user does not want to enable logging,
if [[ "${QUERY_LOGGING}" == false ]] ; then if [[ "${QUERY_LOGGING}" == false ]] ; then
# disable it by commenting out the directive in the DNS config file # disable it by commenting out the directive in the DNS config file
sed -i 's/^log-queries/#log-queries/' "${dnsmasq_pihole_01_target}" sed -i 's/^log-queries/#log-queries/' "${dnsmasq_pihole_01_location}"
else else
# Otherwise, enable it by uncommenting the directive in the DNS config file # Otherwise, enable it by uncommenting the directive in the DNS config file
sed -i 's/^#log-queries/log-queries/' "${dnsmasq_pihole_01_target}" sed -i 's/^#log-queries/log-queries/' "${dnsmasq_pihole_01_location}"
fi fi
printf " %b Installing %s..." "${INFO}" "${dnsmasq_rfc6761_06_source}"
install -D -m 644 -T "${dnsmasq_rfc6761_06_source}" "${dnsmasq_rfc6761_06_target}"
printf "%b %b Installed %s\n" "${OVER}" "${TICK}" "${dnsmasq_rfc6761_06_target}"
} }
# Clean an existing installation to prepare for upgrade/reinstall # Clean an existing installation to prepare for upgrade/reinstall
@@ -1302,10 +1469,10 @@ installConfigs() {
echo "${DNS_SERVERS}" > "${PI_HOLE_CONFIG_DIR}/dns-servers.conf" echo "${DNS_SERVERS}" > "${PI_HOLE_CONFIG_DIR}/dns-servers.conf"
chmod 644 "${PI_HOLE_CONFIG_DIR}/dns-servers.conf" chmod 644 "${PI_HOLE_CONFIG_DIR}/dns-servers.conf"
# Install template file if it does not exist # Install empty file if it does not exist
if [[ ! -r "${PI_HOLE_CONFIG_DIR}/pihole-FTL.conf" ]]; then if [[ ! -r "${PI_HOLE_CONFIG_DIR}/pihole-FTL.conf" ]]; then
install -d -m 0755 ${PI_HOLE_CONFIG_DIR} install -d -m 0755 ${PI_HOLE_CONFIG_DIR}
if ! install -T -o pihole -m 664 "${PI_HOLE_LOCAL_REPO}/advanced/Templates/pihole-FTL.conf" "${PI_HOLE_CONFIG_DIR}/pihole-FTL.conf" &>/dev/null; then if ! install -o pihole -m 664 /dev/null "${PI_HOLE_CONFIG_DIR}/pihole-FTL.conf" &>/dev/null; then
printf " %bError: Unable to initialize configuration file %s/pihole-FTL.conf\\n" "${COL_LIGHT_RED}" "${PI_HOLE_CONFIG_DIR}" printf " %bError: Unable to initialize configuration file %s/pihole-FTL.conf\\n" "${COL_LIGHT_RED}" "${PI_HOLE_CONFIG_DIR}"
return 1 return 1
fi fi
@@ -1326,19 +1493,18 @@ installConfigs() {
# make it and set the owners # make it and set the owners
install -d -m 755 -o "${USER}" -g root /etc/lighttpd install -d -m 755 -o "${USER}" -g root /etc/lighttpd
# Otherwise, if the config file already exists # Otherwise, if the config file already exists
elif [[ -f "${lighttpdConfig}" ]]; then elif [[ -f "/etc/lighttpd/lighttpd.conf" ]]; then
# back up the original # back up the original
mv "${lighttpdConfig}"{,.orig} mv /etc/lighttpd/lighttpd.conf /etc/lighttpd/lighttpd.conf.orig
fi fi
# and copy in the config file Pi-hole needs # and copy in the config file Pi-hole needs
install -D -m 644 -T ${PI_HOLE_LOCAL_REPO}/advanced/${LIGHTTPD_CFG} "${lighttpdConfig}" install -D -m 644 -T ${PI_HOLE_LOCAL_REPO}/advanced/${LIGHTTPD_CFG} /etc/lighttpd/lighttpd.conf
# Make sure the external.conf file exists, as lighttpd v1.4.50 crashes without it # Make sure the external.conf file exists, as lighttpd v1.4.50 crashes without it
if [ ! -f /etc/lighttpd/external.conf ]; then touch /etc/lighttpd/external.conf
install -m 644 /dev/null /etc/lighttpd/external.conf chmod 644 /etc/lighttpd/external.conf
fi
# If there is a custom block page in the html/pihole directory, replace 404 handler in lighttpd config # If there is a custom block page in the html/pihole directory, replace 404 handler in lighttpd config
if [[ -f "${PI_HOLE_BLOCKPAGE_DIR}/custom.php" ]]; then if [[ -f "${PI_HOLE_BLOCKPAGE_DIR}/custom.php" ]]; then
sed -i 's/^\(server\.error-handler-404\s*=\s*\).*$/\1"\/pihole\/custom\.php"/' "${lighttpdConfig}" sed -i 's/^\(server\.error-handler-404\s*=\s*\).*$/\1"pihole\/custom\.php"/' /etc/lighttpd/lighttpd.conf
fi fi
# Make the directories if they do not exist and set the owners # Make the directories if they do not exist and set the owners
mkdir -p /run/lighttpd mkdir -p /run/lighttpd
@@ -1375,12 +1541,7 @@ install_manpage() {
# Testing complete, copy the files & update the man db # Testing complete, copy the files & update the man db
install -D -m 644 -T ${PI_HOLE_LOCAL_REPO}/manpages/pihole.8 /usr/local/share/man/man8/pihole.8 install -D -m 644 -T ${PI_HOLE_LOCAL_REPO}/manpages/pihole.8 /usr/local/share/man/man8/pihole.8
install -D -m 644 -T ${PI_HOLE_LOCAL_REPO}/manpages/pihole-FTL.8 /usr/local/share/man/man8/pihole-FTL.8 install -D -m 644 -T ${PI_HOLE_LOCAL_REPO}/manpages/pihole-FTL.8 /usr/local/share/man/man8/pihole-FTL.8
install -D -m 644 -T ${PI_HOLE_LOCAL_REPO}/manpages/pihole-FTL.conf.5 /usr/local/share/man/man5/pihole-FTL.conf.5
# remove previously installed "pihole-FTL.conf.5" man page
if [[ -f "/usr/local/share/man/man5/pihole-FTL.conf.5" ]]; then
rm /usr/local/share/man/man5/pihole-FTL.conf.5
fi
if mandb -q &>/dev/null; then if mandb -q &>/dev/null; then
# Updated successfully # Updated successfully
printf "%b %b man pages installed and database updated\\n" "${OVER}" "${TICK}" printf "%b %b man pages installed and database updated\\n" "${OVER}" "${TICK}"
@@ -1388,7 +1549,7 @@ install_manpage() {
else else
# Something is wrong with the system's man installation, clean up # Something is wrong with the system's man installation, clean up
# our files, (leave everything how we found it). # our files, (leave everything how we found it).
rm /usr/local/share/man/man8/pihole.8 /usr/local/share/man/man8/pihole-FTL.8 rm /usr/local/share/man/man8/pihole.8 /usr/local/share/man/man8/pihole-FTL.8 /usr/local/share/man/man5/pihole-FTL.conf.5
printf "%b %b man page db not updated, man pages not installed\\n" "${OVER}" "${CROSS}" printf "%b %b man page db not updated, man pages not installed\\n" "${OVER}" "${CROSS}"
fi fi
} }
@@ -1490,6 +1651,9 @@ disable_resolved_stublistener() {
} }
update_package_cache() { update_package_cache() {
# Running apt-get update/upgrade with minimal output can cause some issues with
# requiring user input (e.g password for phpmyadmin see #218)
# Update package cache on apt based OSes. Do this every time since # Update package cache on apt based OSes. Do this every time since
# it's quick and packages can be updated at any time. # it's quick and packages can be updated at any time.
@@ -1501,14 +1665,8 @@ update_package_cache() {
printf "%b %b %s\\n" "${OVER}" "${TICK}" "${str}" printf "%b %b %s\\n" "${OVER}" "${TICK}" "${str}"
else else
# Otherwise, show an error and exit # Otherwise, show an error and exit
# In case we used apt-get and apt is also available, we use this as recommendation as we have seen it
# gives more user-friendly (interactive) advice
if [[ ${PKG_MANAGER} == "apt-get" ]] && is_command apt ; then
UPDATE_PKG_CACHE="apt update"
fi
printf "%b %b %s\\n" "${OVER}" "${CROSS}" "${str}" printf "%b %b %s\\n" "${OVER}" "${CROSS}" "${str}"
printf " %bError: Unable to update package cache. Please try \"%s\"%b\\n" "${COL_LIGHT_RED}" "sudo ${UPDATE_PKG_CACHE}" "${COL_NC}" printf " %bError: Unable to update package cache. Please try \"%s\"%b" "${COL_LIGHT_RED}" "${UPDATE_PKG_CACHE}" "${COL_NC}"
return 1 return 1
fi fi
} }
@@ -1535,7 +1693,20 @@ notify_package_updates_available() {
fi fi
} }
# This counter is outside of install_dependent_packages so that it can count the number of times the function is called.
counter=0
install_dependent_packages() { install_dependent_packages() {
# Local, named variables should be used here, especially for an iterator
# Add one to the counter
counter=$((counter+1))
if [[ "${counter}" == 1 ]]; then
# On the first loop, print a special message
printf " %b Installer Dependency checks...\\n" "${INFO}"
else
# On all subsequent loops, print a generic message.
printf " %b Main Dependency checks...\\n" "${INFO}"
fi
# Install packages passed in via argument array # Install packages passed in via argument array
# No spinner - conflicts with set -e # No spinner - conflicts with set -e
@@ -1560,8 +1731,6 @@ install_dependent_packages() {
# If there's anything to install, install everything in the list. # If there's anything to install, install everything in the list.
if [[ "${#installArray[@]}" -gt 0 ]]; then if [[ "${#installArray[@]}" -gt 0 ]]; then
test_dpkg_lock test_dpkg_lock
# Running apt-get install with minimal output can cause some issues with
# requiring user input (e.g password for phpmyadmin see #218)
printf " %b Processing %s install(s) for: %s, please wait...\\n" "${INFO}" "${PKG_MANAGER}" "${installArray[*]}" printf " %b Processing %s install(s) for: %s, please wait...\\n" "${INFO}" "${PKG_MANAGER}" "${installArray[*]}"
printf '%*s\n' "$columns" '' | tr " " -; printf '%*s\n' "$columns" '' | tr " " -;
"${PKG_INSTALL[@]}" "${installArray[@]}" "${PKG_INSTALL[@]}" "${installArray[@]}"
@@ -1740,7 +1909,7 @@ finalExports() {
# If the setup variable file exists, # If the setup variable file exists,
if [[ -e "${setupVars}" ]]; then if [[ -e "${setupVars}" ]]; then
# update the variables in the file # update the variables in the file
sed -i.update.bak '/PIHOLE_INTERFACE/d;/IPV4_ADDRESS/d;/IPV6_ADDRESS/d;/PIHOLE_DNS_1\b/d;/PIHOLE_DNS_2\b/d;/QUERY_LOGGING/d;/INSTALL_WEB_SERVER/d;/INSTALL_WEB_INTERFACE/d;/LIGHTTPD_ENABLED/d;/CACHE_SIZE/d;/DNS_FQDN_REQUIRED/d;/DNS_BOGUS_PRIV/d;/DNSMASQ_LISTENING/d;' "${setupVars}" sed -i.update.bak '/PIHOLE_INTERFACE/d;/IPV4_ADDRESS/d;/IPV6_ADDRESS/d;/PIHOLE_DNS_1\b/d;/PIHOLE_DNS_2\b/d;/QUERY_LOGGING/d;/INSTALL_WEB_SERVER/d;/INSTALL_WEB_INTERFACE/d;/LIGHTTPD_ENABLED/d;/CACHE_SIZE/d;' "${setupVars}"
fi fi
# echo the information to the user # echo the information to the user
{ {
@@ -1754,9 +1923,6 @@ finalExports() {
echo "INSTALL_WEB_INTERFACE=${INSTALL_WEB_INTERFACE}" echo "INSTALL_WEB_INTERFACE=${INSTALL_WEB_INTERFACE}"
echo "LIGHTTPD_ENABLED=${LIGHTTPD_ENABLED}" echo "LIGHTTPD_ENABLED=${LIGHTTPD_ENABLED}"
echo "CACHE_SIZE=${CACHE_SIZE}" echo "CACHE_SIZE=${CACHE_SIZE}"
echo "DNS_FQDN_REQUIRED=${DNS_FQDN_REQUIRED:-true}"
echo "DNS_BOGUS_PRIV=${DNS_BOGUS_PRIV:-true}"
echo "DNSMASQ_LISTENING=${DNSMASQ_LISTENING:-local}"
}>> "${setupVars}" }>> "${setupVars}"
chmod 644 "${setupVars}" chmod 644 "${setupVars}"
@@ -1778,17 +1944,9 @@ finalExports() {
# Install the logrotate script # Install the logrotate script
installLogrotate() { installLogrotate() {
local str="Installing latest logrotate script" local str="Installing latest logrotate script"
local target=/etc/pihole/logrotate
printf "\\n %b %s..." "${INFO}" "${str}" printf "\\n %b %s..." "${INFO}" "${str}"
if [[ -f ${target} ]]; then
printf "\\n\\t%b Existing logrotate file found. No changes made.\\n" "${INFO}"
# Return value isn't that important, using 2 to indicate that it's not a fatal error but
# the function did not complete.
return 2
fi
# Copy the file over from the local repo # Copy the file over from the local repo
install -D -m 644 -T "${PI_HOLE_LOCAL_REPO}"/advanced/Templates/logrotate ${target} install -D -m 644 -T ${PI_HOLE_LOCAL_REPO}/advanced/Templates/logrotate /etc/pihole/logrotate
# Different operating systems have different user / group # Different operating systems have different user / group
# settings for logrotate that makes it impossible to create # settings for logrotate that makes it impossible to create
# a static logrotate file that will work with e.g. # a static logrotate file that will work with e.g.
@@ -1797,13 +1955,34 @@ installLogrotate() {
# the local properties of the /var/log directory # the local properties of the /var/log directory
logusergroup="$(stat -c '%U %G' /var/log)" logusergroup="$(stat -c '%U %G' /var/log)"
# If there is a usergroup for log rotation, # If there is a usergroup for log rotation,
if [[ -n "${logusergroup}" ]]; then if [[ ! -z "${logusergroup}" ]]; then
# replace the line in the logrotate script with that usergroup. # replace the line in the logrotate script with that usergroup.
sed -i "s/# su #/su ${logusergroup}/g;" ${target} sed -i "s/# su #/su ${logusergroup}/g;" /etc/pihole/logrotate
fi fi
printf "%b %b %s\\n" "${OVER}" "${TICK}" "${str}" printf "%b %b %s\\n" "${OVER}" "${TICK}" "${str}"
} }
# At some point in the future this list can be pruned, for now we'll need it to ensure updates don't break.
# Refactoring of install script has changed the name of a couple of variables. Sort them out here.
accountForRefactor() {
sed -i 's/piholeInterface/PIHOLE_INTERFACE/g' "${setupVars}"
sed -i 's/IPv4_address/IPV4_ADDRESS/g' "${setupVars}"
sed -i 's/IPv4addr/IPV4_ADDRESS/g' "${setupVars}"
sed -i 's/IPv6_address/IPV6_ADDRESS/g' "${setupVars}"
sed -i 's/piholeIPv6/IPV6_ADDRESS/g' "${setupVars}"
sed -i 's/piholeDNS1/PIHOLE_DNS_1/g' "${setupVars}"
sed -i 's/piholeDNS2/PIHOLE_DNS_2/g' "${setupVars}"
sed -i 's/^INSTALL_WEB=/INSTALL_WEB_INTERFACE=/' "${setupVars}"
# Add 'INSTALL_WEB_SERVER', if its not been applied already: https://github.com/pi-hole/pi-hole/pull/2115
if ! grep -q '^INSTALL_WEB_SERVER=' ${setupVars}; then
local webserver_installed=false
if grep -q '^INSTALL_WEB_INTERFACE=true' ${setupVars}; then
webserver_installed=true
fi
echo -e "INSTALL_WEB_SERVER=$webserver_installed" >> "${setupVars}"
fi
}
# Install base files and web interface # Install base files and web interface
installPihole() { installPihole() {
# If the user wants to install the Web interface, # If the user wants to install the Web interface,
@@ -1834,6 +2013,10 @@ installPihole() {
fi fi
fi fi
fi fi
# For updates and unattended install.
if [[ "${useUpdateVars}" == true ]]; then
accountForRefactor
fi
# Install base files and web interface # Install base files and web interface
if ! installScripts; then if ! installScripts; then
printf " %b Failure in dependent script copy function.\\n" "${CROSS}" printf " %b Failure in dependent script copy function.\\n" "${CROSS}"
@@ -1851,10 +2034,8 @@ installPihole() {
fi fi
# Install the cron file # Install the cron file
installCron installCron
# Install the logrotate file # Install the logrotate file
installLogrotate || true installLogrotate
# Check if dnsmasq is present. If so, disable it and back up any possible # Check if dnsmasq is present. If so, disable it and back up any possible
# config file # config file
disable_dnsmasq disable_dnsmasq
@@ -1872,7 +2053,7 @@ checkSelinux() {
local CURRENT_SELINUX local CURRENT_SELINUX
local SELINUX_ENFORCING=0 local SELINUX_ENFORCING=0
# Check for SELinux configuration file and getenforce command # Check for SELinux configuration file and getenforce command
if [[ -f /etc/selinux/config ]] && is_command getenforce; then if [[ -f /etc/selinux/config ]] && command -v getenforce &> /dev/null; then
# Check the default SELinux mode # Check the default SELinux mode
DEFAULT_SELINUX=$(awk -F= '/^SELINUX=/ {print $2}' /etc/selinux/config) DEFAULT_SELINUX=$(awk -F= '/^SELINUX=/ {print $2}' /etc/selinux/config)
case "${DEFAULT_SELINUX,,}" in case "${DEFAULT_SELINUX,,}" in
@@ -1918,7 +2099,7 @@ displayFinalMessage() {
if [[ "${#1}" -gt 0 ]] ; then if [[ "${#1}" -gt 0 ]] ; then
# set the password to the first argument. # set the password to the first argument.
pwstring="$1" pwstring="$1"
elif [[ $(grep 'WEBPASSWORD' -c "${setupVars}") -gt 0 ]]; then elif [[ $(grep 'WEBPASSWORD' -c /etc/pihole/setupVars.conf) -gt 0 ]]; then
# Else if the password exists from previous setup, we'll load it later # Else if the password exists from previous setup, we'll load it later
pwstring="unchanged" pwstring="unchanged"
else else
@@ -1939,7 +2120,9 @@ Your Admin Webpage login password is ${pwstring}"
IPv4: ${IPV4_ADDRESS%/*} IPv4: ${IPV4_ADDRESS%/*}
IPv6: ${IPV6_ADDRESS:-"Not Configured"} IPv6: ${IPV6_ADDRESS:-"Not Configured"}
If you have not done so already, the above IP should be set to static. If you set a new IP address, you should restart the Pi.
The install log is in /etc/pihole.
${additional}" "${r}" "${c}" ${additional}" "${r}" "${c}"
} }
@@ -1958,7 +2141,7 @@ update_dialogs() {
strAdd="You will be updated to the latest version." strAdd="You will be updated to the latest version."
fi fi
opt2a="Reconfigure" opt2a="Reconfigure"
opt2b="Resets Pi-hole and allows re-selecting settings." opt2b="This will reset your Pi-hole and allow you to enter new settings."
# Display the information to the user # Display the information to the user
UpdateCmd=$(whiptail --title "Existing Install Detected!" --menu "\\n\\nWe have detected an existing install.\\n\\nPlease choose from the following options: \\n($strAdd)" "${r}" "${c}" 2 \ UpdateCmd=$(whiptail --title "Existing Install Detected!" --menu "\\n\\nWe have detected an existing install.\\n\\nPlease choose from the following options: \\n($strAdd)" "${r}" "${c}" 2 \
@@ -2053,7 +2236,7 @@ checkout_pull_branch() {
# Data in the repositories is public anyway so we can make it readable by everyone (+r to keep executable permission if already set by git) # Data in the repositories is public anyway so we can make it readable by everyone (+r to keep executable permission if already set by git)
chmod -R a+rX "${directory}" chmod -R a+rX "${directory}"
git_pull=$(git pull --no-rebase || return 1) git_pull=$(git pull || return 1)
if [[ "$git_pull" == *"up-to-date"* ]]; then if [[ "$git_pull" == *"up-to-date"* ]]; then
printf " %b %s\\n" "${INFO}" "${git_pull}" printf " %b %s\\n" "${INFO}" "${git_pull}"
@@ -2104,6 +2287,7 @@ clone_or_update_repos() {
# shellcheck disable=SC2120 # shellcheck disable=SC2120
FTLinstall() { FTLinstall() {
# Local, named variables # Local, named variables
local latesttag
local str="Downloading and Installing FTL" local str="Downloading and Installing FTL"
printf " %b %s..." "${INFO}" "${str}" printf " %b %s..." "${INFO}" "${str}"
@@ -2142,6 +2326,8 @@ FTLinstall() {
# Before stopping FTL, we download the macvendor database # Before stopping FTL, we download the macvendor database
curl -sSL "https://ftl.pi-hole.net/macvendor.db" -o "${PI_HOLE_CONFIG_DIR}/macvendor.db" || true curl -sSL "https://ftl.pi-hole.net/macvendor.db" -o "${PI_HOLE_CONFIG_DIR}/macvendor.db" || true
chmod 644 "${PI_HOLE_CONFIG_DIR}/macvendor.db"
chown pihole:pihole "${PI_HOLE_CONFIG_DIR}/macvendor.db"
# Stop pihole-FTL service if available # Stop pihole-FTL service if available
stop_service pihole-FTL &> /dev/null stop_service pihole-FTL &> /dev/null
@@ -2174,7 +2360,7 @@ FTLinstall() {
disable_dnsmasq() { disable_dnsmasq() {
# dnsmasq can now be stopped and disabled if it exists # dnsmasq can now be stopped and disabled if it exists
if is_command dnsmasq; then if which dnsmasq &> /dev/null; then
if check_service_active "dnsmasq";then if check_service_active "dnsmasq";then
printf " %b FTL can now resolve DNS Queries without dnsmasq running separately\\n" "${INFO}" printf " %b FTL can now resolve DNS Queries without dnsmasq running separately\\n" "${INFO}"
stop_service dnsmasq stop_service dnsmasq
@@ -2287,7 +2473,7 @@ FTLcheckUpdate() {
printf " %b Checking for existing FTL binary...\\n" "${INFO}" printf " %b Checking for existing FTL binary...\\n" "${INFO}"
local ftlLoc local ftlLoc
ftlLoc=$(command -v pihole-FTL 2>/dev/null) ftlLoc=$(which pihole-FTL 2>/dev/null)
local ftlBranch local ftlBranch
@@ -2304,7 +2490,7 @@ FTLcheckUpdate() {
local localSha1 local localSha1
# if dnsmasq exists and is running at this point, force reinstall of FTL Binary # if dnsmasq exists and is running at this point, force reinstall of FTL Binary
if is_command dnsmasq; then if which dnsmasq &> /dev/null; then
if check_service_active "dnsmasq";then if check_service_active "dnsmasq";then
return 0 return 0
fi fi
@@ -2325,7 +2511,7 @@ FTLcheckUpdate() {
# We already have a pihole-FTL binary downloaded. # We already have a pihole-FTL binary downloaded.
# Alt branches don't have a tagged version against them, so just confirm the checksum of the local vs remote to decide whether we download or not # Alt branches don't have a tagged version against them, so just confirm the checksum of the local vs remote to decide whether we download or not
remoteSha1=$(curl -sSL --fail "https://ftl.pi-hole.net/${ftlBranch}/${binary}.sha1" | cut -d ' ' -f 1) remoteSha1=$(curl -sSL --fail "https://ftl.pi-hole.net/${ftlBranch}/${binary}.sha1" | cut -d ' ' -f 1)
localSha1=$(sha1sum "$(command -v pihole-FTL)" | cut -d ' ' -f 1) localSha1=$(sha1sum "$(which pihole-FTL)" | cut -d ' ' -f 1)
if [[ "${remoteSha1}" != "${localSha1}" ]]; then if [[ "${remoteSha1}" != "${localSha1}" ]]; then
printf " %b Checksums do not match, downloading from ftl.pi-hole.net.\\n" "${INFO}" printf " %b Checksums do not match, downloading from ftl.pi-hole.net.\\n" "${INFO}"
@@ -2355,7 +2541,7 @@ FTLcheckUpdate() {
printf " %b Latest FTL Binary already installed (%s). Confirming Checksum...\\n" "${INFO}" "${FTLlatesttag}" printf " %b Latest FTL Binary already installed (%s). Confirming Checksum...\\n" "${INFO}" "${FTLlatesttag}"
remoteSha1=$(curl -sSL --fail "https://github.com/pi-hole/FTL/releases/download/${FTLversion%$'\r'}/${binary}.sha1" | cut -d ' ' -f 1) remoteSha1=$(curl -sSL --fail "https://github.com/pi-hole/FTL/releases/download/${FTLversion%$'\r'}/${binary}.sha1" | cut -d ' ' -f 1)
localSha1=$(sha1sum "$(command -v pihole-FTL)" | cut -d ' ' -f 1) localSha1=$(sha1sum "$(which pihole-FTL)" | cut -d ' ' -f 1)
if [[ "${remoteSha1}" != "${localSha1}" ]]; then if [[ "${remoteSha1}" != "${localSha1}" ]]; then
printf " %b Corruption detected...\\n" "${INFO}" printf " %b Corruption detected...\\n" "${INFO}"
@@ -2446,30 +2632,8 @@ main() {
fi fi
fi fi
# Check for supported package managers so that we may install dependencies # Check for supported distribution
package_manager_detect distro_check
# Notify user of package availability
notify_package_updates_available
# Install packages necessary to perform os_check
printf " %b Checking for / installing Required dependencies for OS Check...\\n" "${INFO}"
install_dependent_packages "${OS_CHECK_DEPS[@]}"
# Check that the installed OS is officially supported - display warning if not
os_check
# Install packages used by this installation script
printf " %b Checking for / installing Required dependencies for this install script...\\n" "${INFO}"
install_dependent_packages "${INSTALLER_DEPS[@]}"
#In case of RPM based distro, select the proper PHP version
if [[ "$PKG_MANAGER" == "yum" || "$PKG_MANAGER" == "dnf" ]] ; then
select_rpm_php
fi
# Check if SELinux is Enforcing
checkSelinux
# If the setup variable file exists, # If the setup variable file exists,
if [[ -f "${setupVars}" ]]; then if [[ -f "${setupVars}" ]]; then
@@ -2486,6 +2650,19 @@ main() {
fi fi
fi fi
# Start the installer
# Notify user of package availability
notify_package_updates_available
# Install packages used by this installation script
install_dependent_packages "${INSTALLER_DEPS[@]}"
# Check that the installed OS is officially supported - display warning if not
os_check
# Check if SELinux is Enforcing
checkSelinux
if [[ "${useUpdateVars}" == false ]]; then if [[ "${useUpdateVars}" == false ]]; then
# Display welcome dialogs # Display welcome dialogs
welcomeDialogs welcomeDialogs
@@ -2495,12 +2672,12 @@ main() {
get_available_interfaces get_available_interfaces
# Find interfaces and let the user choose one # Find interfaces and let the user choose one
chooseInterface chooseInterface
# find IPv4 and IPv6 information of the device
collect_v4andv6_information
# Decide what upstream DNS Servers to use # Decide what upstream DNS Servers to use
setDNS setDNS
# Give the user a choice of blocklists to include in their install. Or not. # Give the user a choice of blocklists to include in their install. Or not.
chooseBlocklists chooseBlocklists
# Let the user decide if they want to block ads over IPv4 and/or IPv6
use4andor6
# Let the user decide if they want the web interface to be installed automatically # Let the user decide if they want the web interface to be installed automatically
setAdminFlag setAdminFlag
# Let the user decide if they want query logging enabled... # Let the user decide if they want query logging enabled...
@@ -2532,8 +2709,6 @@ main() {
dep_install_list+=("${PIHOLE_WEB_DEPS[@]}") dep_install_list+=("${PIHOLE_WEB_DEPS[@]}")
fi fi
# Install packages used by the actual software
printf " %b Checking for / installing Required dependencies for Pi-hole software...\\n" "${INFO}"
install_dependent_packages "${dep_install_list[@]}" install_dependent_packages "${dep_install_list[@]}"
unset dep_install_list unset dep_install_list
@@ -2573,7 +2748,7 @@ main() {
# Add password to web UI if there is none # Add password to web UI if there is none
pw="" pw=""
# If no password is set, # If no password is set,
if [[ $(grep 'WEBPASSWORD' -c "${setupVars}") == 0 ]] ; then if [[ $(grep 'WEBPASSWORD' -c /etc/pihole/setupVars.conf) == 0 ]] ; then
# generate a random password # generate a random password
pw=$(tr -dc _A-Z-a-z-0-9 < /dev/urandom | head -c 8) pw=$(tr -dc _A-Z-a-z-0-9 < /dev/urandom | head -c 8)
# shellcheck disable=SC1091 # shellcheck disable=SC1091
@@ -2638,7 +2813,7 @@ main() {
printf " %b You may now configure your devices to use the Pi-hole as their DNS server\\n" "${INFO}" printf " %b You may now configure your devices to use the Pi-hole as their DNS server\\n" "${INFO}"
[[ -n "${IPV4_ADDRESS%/*}" ]] && printf " %b Pi-hole DNS (IPv4): %s\\n" "${INFO}" "${IPV4_ADDRESS%/*}" [[ -n "${IPV4_ADDRESS%/*}" ]] && printf " %b Pi-hole DNS (IPv4): %s\\n" "${INFO}" "${IPV4_ADDRESS%/*}"
[[ -n "${IPV6_ADDRESS}" ]] && printf " %b Pi-hole DNS (IPv6): %s\\n" "${INFO}" "${IPV6_ADDRESS}" [[ -n "${IPV6_ADDRESS}" ]] && printf " %b Pi-hole DNS (IPv6): %s\\n" "${INFO}" "${IPV6_ADDRESS}"
printf " %b If you have not done so already, the above IP should be set to static.\\n" "${INFO}" printf " %b If you set a new IP address, please restart the server running the Pi-hole\\n" "${INFO}"
INSTALL_TYPE="Installation" INSTALL_TYPE="Installation"
else else
INSTALL_TYPE="Update" INSTALL_TYPE="Update"

View File

@@ -42,8 +42,8 @@ source "${PI_HOLE_FILES_DIR}/automated install/basic-install.sh"
# setupVars set in basic-install.sh # setupVars set in basic-install.sh
source "${setupVars}" source "${setupVars}"
# package_manager_detect() sourced from basic-install.sh # distro_check() sourced from basic-install.sh
package_manager_detect distro_check
# Install packages used by the Pi-hole # Install packages used by the Pi-hole
DEPS=("${INSTALLER_DEPS[@]}" "${PIHOLE_DEPS[@]}") DEPS=("${INSTALLER_DEPS[@]}" "${PIHOLE_DEPS[@]}")
@@ -145,7 +145,6 @@ removeNoPurge() {
${SUDO} rm -f /etc/dnsmasq.d/adList.conf &> /dev/null ${SUDO} rm -f /etc/dnsmasq.d/adList.conf &> /dev/null
${SUDO} rm -f /etc/dnsmasq.d/01-pihole.conf &> /dev/null ${SUDO} rm -f /etc/dnsmasq.d/01-pihole.conf &> /dev/null
${SUDO} rm -f /etc/dnsmasq.d/06-rfc6761.conf &> /dev/null
${SUDO} rm -rf /var/log/*pihole* &> /dev/null ${SUDO} rm -rf /var/log/*pihole* &> /dev/null
${SUDO} rm -rf /etc/pihole/ &> /dev/null ${SUDO} rm -rf /etc/pihole/ &> /dev/null
${SUDO} rm -rf /etc/.pihole/ &> /dev/null ${SUDO} rm -rf /etc/.pihole/ &> /dev/null
@@ -207,7 +206,11 @@ removeNoPurge() {
} }
######### SCRIPT ########### ######### SCRIPT ###########
if command -v vcgencmd &> /dev/null; then
echo -e " ${INFO} All dependencies are safe to remove on Raspbian"
else
echo -e " ${INFO} Be sure to confirm if any dependencies should not be removed" echo -e " ${INFO} Be sure to confirm if any dependencies should not be removed"
fi
while true; do while true; do
echo -e " ${INFO} ${COL_YELLOW}The following dependencies may have been added by the Pi-hole install:" echo -e " ${INFO} ${COL_YELLOW}The following dependencies may have been added by the Pi-hole install:"
echo -n " " echo -n " "

View File

@@ -15,6 +15,8 @@ export LC_ALL=C
coltable="/opt/pihole/COL_TABLE" coltable="/opt/pihole/COL_TABLE"
source "${coltable}" source "${coltable}"
regexconverter="/opt/pihole/wildcard_regex_converter.sh"
source "${regexconverter}"
# shellcheck disable=SC1091 # shellcheck disable=SC1091
source "/etc/.pihole/advanced/Scripts/database_migration/gravity-db.sh" source "/etc/.pihole/advanced/Scripts/database_migration/gravity-db.sh"
@@ -45,6 +47,16 @@ domainsExtension="domains"
setupVars="${piholeDir}/setupVars.conf" setupVars="${piholeDir}/setupVars.conf"
if [[ -f "${setupVars}" ]];then if [[ -f "${setupVars}" ]];then
source "${setupVars}" source "${setupVars}"
# Remove CIDR mask from IPv4/6 addresses
IPV4_ADDRESS="${IPV4_ADDRESS%/*}"
IPV6_ADDRESS="${IPV6_ADDRESS%/*}"
# Determine if IPv4/6 addresses exist
if [[ -z "${IPV4_ADDRESS}" ]] && [[ -z "${IPV6_ADDRESS}" ]]; then
echo -e " ${COL_LIGHT_RED}No IP addresses found! Please run 'pihole -r' to reconfigure${COL_NC}"
exit 1
fi
else else
echo -e " ${COL_LIGHT_RED}Installation Failure: ${setupVars} does not exist! ${COL_NC} echo -e " ${COL_LIGHT_RED}Installation Failure: ${setupVars} does not exist! ${COL_NC}
Please run 'pihole -r', and choose the 'reconfigure' option to fix." Please run 'pihole -r', and choose the 'reconfigure' option to fix."
@@ -61,8 +73,6 @@ fi
# have changed # have changed
gravityDBfile="${GRAVITYDB}" gravityDBfile="${GRAVITYDB}"
gravityTEMPfile="${GRAVITYDB}_temp" gravityTEMPfile="${GRAVITYDB}_temp"
gravityDIR="$(dirname -- "${gravityDBfile}")"
gravityOLDfile="${gravityDIR}/gravity_old.db"
if [[ -z "${BLOCKINGMODE}" ]] ; then if [[ -z "${BLOCKINGMODE}" ]] ; then
BLOCKINGMODE="NULL" BLOCKINGMODE="NULL"
@@ -73,24 +83,19 @@ if [[ -r "${piholeDir}/pihole.conf" ]]; then
echo -e " ${COL_LIGHT_RED}Ignoring overrides specified within pihole.conf! ${COL_NC}" echo -e " ${COL_LIGHT_RED}Ignoring overrides specified within pihole.conf! ${COL_NC}"
fi fi
# Generate new SQLite3 file from schema template # Generate new sqlite3 file from schema template
generate_gravity_database() { generate_gravity_database() {
if ! pihole-FTL sqlite3 "${gravityDBfile}" < "${gravityDBschema}"; then sqlite3 "${1}" < "${gravityDBschema}"
echo -e " ${CROSS} Unable to create ${gravityDBfile}"
return 1
fi
chown pihole:pihole "${gravityDBfile}"
chmod g+w "${piholeDir}" "${gravityDBfile}"
} }
# Copy data from old to new database file and swap them # Copy data from old to new database file and swap them
gravity_swap_databases() { gravity_swap_databases() {
local str copyGravity oldAvail local str copyGravity
str="Building tree" str="Building tree"
echo -ne " ${INFO} ${str}..." echo -ne " ${INFO} ${str}..."
# The index is intentionally not UNIQUE as poor quality adlists may contain domains more than once # The index is intentionally not UNIQUE as poor quality adlists may contain domains more than once
output=$( { pihole-FTL sqlite3 "${gravityTEMPfile}" "CREATE INDEX idx_gravity ON gravity (domain, adlist_id);"; } 2>&1 ) output=$( { sqlite3 "${gravityTEMPfile}" "CREATE INDEX idx_gravity ON gravity (domain, adlist_id);"; } 2>&1 )
status="$?" status="$?"
if [[ "${status}" -ne 0 ]]; then if [[ "${status}" -ne 0 ]]; then
@@ -102,31 +107,30 @@ gravity_swap_databases() {
str="Swapping databases" str="Swapping databases"
echo -ne " ${INFO} ${str}..." echo -ne " ${INFO} ${str}..."
# Swap databases and remove or conditionally rename old database # Gravity copying SQL script
# Number of available blocks on disk copyGravity="$(cat "${gravityDBcopy}")"
availableBlocks=$(stat -f --format "%a" "${gravityDIR}") if [[ "${gravityDBfile}" != "${gravityDBfile_default}" ]]; then
# Number of blocks, used by gravity.db # Replace default gravity script location by custom location
gravityBlocks=$(stat --format "%b" ${gravityDBfile}) copyGravity="${copyGravity//"${gravityDBfile_default}"/"${gravityDBfile}"}"
# Only keep the old database if available disk space is at least twice the size of the existing gravity.db. fi
# Better be safe than sorry...
oldAvail=false output=$( { sqlite3 "${gravityTEMPfile}" <<< "${copyGravity}"; } 2>&1 )
if [ "${availableBlocks}" -gt "$((gravityBlocks * 2))" ] && [ -f "${gravityDBfile}" ]; then status="$?"
oldAvail=true
mv "${gravityDBfile}" "${gravityOLDfile}" if [[ "${status}" -ne 0 ]]; then
else echo -e "\\n ${CROSS} Unable to copy data from ${gravityDBfile} to ${gravityTEMPfile}\\n ${output}"
rm "${gravityDBfile}" return 1
fi fi
mv "${gravityTEMPfile}" "${gravityDBfile}"
echo -e "${OVER} ${TICK} ${str}" echo -e "${OVER} ${TICK} ${str}"
if $oldAvail; then # Swap databases and remove old database
echo -e " ${TICK} The old database remains available." rm "${gravityDBfile}"
fi mv "${gravityTEMPfile}" "${gravityDBfile}"
} }
# Update timestamp when the gravity table was last updated successfully # Update timestamp when the gravity table was last updated successfully
update_gravity_timestamp() { update_gravity_timestamp() {
output=$( { printf ".timeout 30000\\nINSERT OR REPLACE INTO info (property,value) values ('updated',cast(strftime('%%s', 'now') as int));" | pihole-FTL sqlite3 "${gravityDBfile}"; } 2>&1 ) output=$( { printf ".timeout 30000\\nINSERT OR REPLACE INTO info (property,value) values ('updated',cast(strftime('%%s', 'now') as int));" | sqlite3 "${gravityDBfile}"; } 2>&1 )
status="$?" status="$?"
if [[ "${status}" -ne 0 ]]; then if [[ "${status}" -ne 0 ]]; then
@@ -167,7 +171,7 @@ database_table_from_file() {
# Get MAX(id) from domainlist when INSERTing into this table # Get MAX(id) from domainlist when INSERTing into this table
if [[ "${table}" == "domainlist" ]]; then if [[ "${table}" == "domainlist" ]]; then
rowid="$(pihole-FTL sqlite3 "${gravityDBfile}" "SELECT MAX(id) FROM domainlist;")" rowid="$(sqlite3 "${gravityDBfile}" "SELECT MAX(id) FROM domainlist;")"
if [[ -z "$rowid" ]]; then if [[ -z "$rowid" ]]; then
rowid=0 rowid=0
fi fi
@@ -197,7 +201,7 @@ database_table_from_file() {
# Store domains in database table specified by ${table} # Store domains in database table specified by ${table}
# Use printf as .mode and .import need to be on separate lines # Use printf as .mode and .import need to be on separate lines
# see https://unix.stackexchange.com/a/445615/83260 # see https://unix.stackexchange.com/a/445615/83260
output=$( { printf ".timeout 30000\\n.mode csv\\n.import \"%s\" %s\\n" "${tmpFile}" "${table}" | pihole-FTL sqlite3 "${gravityDBfile}"; } 2>&1 ) output=$( { printf ".timeout 30000\\n.mode csv\\n.import \"%s\" %s\\n" "${tmpFile}" "${table}" | sqlite3 "${gravityDBfile}"; } 2>&1 )
status="$?" status="$?"
if [[ "${status}" -ne 0 ]]; then if [[ "${status}" -ne 0 ]]; then
@@ -217,7 +221,7 @@ database_table_from_file() {
# Update timestamp of last update of this list. We store this in the "old" database as all values in the new database will later be overwritten # Update timestamp of last update of this list. We store this in the "old" database as all values in the new database will later be overwritten
database_adlist_updated() { database_adlist_updated() {
output=$( { printf ".timeout 30000\\nUPDATE adlist SET date_updated = (cast(strftime('%%s', 'now') as int)) WHERE id = %i;\\n" "${1}" | pihole-FTL sqlite3 "${gravityDBfile}"; } 2>&1 ) output=$( { printf ".timeout 30000\\nUPDATE adlist SET date_updated = (cast(strftime('%%s', 'now') as int)) WHERE id = %i;\\n" "${1}" | sqlite3 "${gravityDBfile}"; } 2>&1 )
status="$?" status="$?"
if [[ "${status}" -ne 0 ]]; then if [[ "${status}" -ne 0 ]]; then
@@ -228,7 +232,7 @@ database_adlist_updated() {
# Check if a column with name ${2} exists in gravity table with name ${1} # Check if a column with name ${2} exists in gravity table with name ${1}
gravity_column_exists() { gravity_column_exists() {
output=$( { printf ".timeout 30000\\nSELECT EXISTS(SELECT * FROM pragma_table_info('%s') WHERE name='%s');\\n" "${1}" "${2}" | pihole-FTL sqlite3 "${gravityDBfile}"; } 2>&1 ) output=$( { printf ".timeout 30000\\nSELECT EXISTS(SELECT * FROM pragma_table_info('%s') WHERE name='%s');\\n" "${1}" "${2}" | sqlite3 "${gravityDBfile}"; } 2>&1 )
if [[ "${output}" == "1" ]]; then if [[ "${output}" == "1" ]]; then
return 0 # Bash 0 is success return 0 # Bash 0 is success
fi fi
@@ -243,7 +247,7 @@ database_adlist_number() {
return; return;
fi fi
output=$( { printf ".timeout 30000\\nUPDATE adlist SET number = %i, invalid_domains = %i WHERE id = %i;\\n" "${num_source_lines}" "${num_invalid}" "${1}" | pihole-FTL sqlite3 "${gravityDBfile}"; } 2>&1 ) output=$( { printf ".timeout 30000\\nUPDATE adlist SET number = %i, invalid_domains = %i WHERE id = %i;\\n" "${num_lines}" "${num_invalid}" "${1}" | sqlite3 "${gravityDBfile}"; } 2>&1 )
status="$?" status="$?"
if [[ "${status}" -ne 0 ]]; then if [[ "${status}" -ne 0 ]]; then
@@ -259,7 +263,7 @@ database_adlist_status() {
return; return;
fi fi
output=$( { printf ".timeout 30000\\nUPDATE adlist SET status = %i WHERE id = %i;\\n" "${2}" "${1}" | pihole-FTL sqlite3 "${gravityDBfile}"; } 2>&1 ) output=$( { printf ".timeout 30000\\nUPDATE adlist SET status = %i WHERE id = %i;\\n" "${2}" "${1}" | sqlite3 "${gravityDBfile}"; } 2>&1 )
status="$?" status="$?"
if [[ "${status}" -ne 0 ]]; then if [[ "${status}" -ne 0 ]]; then
@@ -274,10 +278,7 @@ migrate_to_database() {
if [ ! -e "${gravityDBfile}" ]; then if [ ! -e "${gravityDBfile}" ]; then
# Create new database file - note that this will be created in version 1 # Create new database file - note that this will be created in version 1
echo -e " ${INFO} Creating new gravity database" echo -e " ${INFO} Creating new gravity database"
if ! generate_gravity_database; then generate_gravity_database "${gravityDBfile}"
echo -e " ${CROSS} Error creating new gravity database. Please contact support."
return 1
fi
# Check if gravity database needs to be updated # Check if gravity database needs to be updated
upgrade_gravityDB "${gravityDBfile}" "${piholeDir}" upgrade_gravityDB "${gravityDBfile}" "${piholeDir}"
@@ -376,9 +377,9 @@ gravity_DownloadBlocklists() {
fi fi
# Retrieve source URLs from gravity database # Retrieve source URLs from gravity database
# We source only enabled adlists, SQLite3 stores boolean values as 0 (false) or 1 (true) # We source only enabled adlists, sqlite3 stores boolean values as 0 (false) or 1 (true)
mapfile -t sources <<< "$(pihole-FTL sqlite3 "${gravityDBfile}" "SELECT address FROM vw_adlist;" 2> /dev/null)" mapfile -t sources <<< "$(sqlite3 "${gravityDBfile}" "SELECT address FROM vw_adlist;" 2> /dev/null)"
mapfile -t sourceIDs <<< "$(pihole-FTL sqlite3 "${gravityDBfile}" "SELECT id FROM vw_adlist;" 2> /dev/null)" mapfile -t sourceIDs <<< "$(sqlite3 "${gravityDBfile}" "SELECT id FROM vw_adlist;" 2> /dev/null)"
# Parse source domains from $sources # Parse source domains from $sources
mapfile -t sourceDomains <<< "$( mapfile -t sourceDomains <<< "$(
@@ -392,12 +393,14 @@ gravity_DownloadBlocklists() {
)" )"
local str="Pulling blocklist source list into range" local str="Pulling blocklist source list into range"
echo -e "${OVER} ${TICK} ${str}"
if [[ -z "${sources[*]}" ]] || [[ -z "${sourceDomains[*]}" ]]; then if [[ -n "${sources[*]}" ]] && [[ -n "${sourceDomains[*]}" ]]; then
echo -e "${OVER} ${TICK} ${str}"
else
echo -e "${OVER} ${CROSS} ${str}"
echo -e " ${INFO} No source list found, or it is empty" echo -e " ${INFO} No source list found, or it is empty"
echo "" echo ""
unset sources return 1
fi fi
local url domain agent cmd_ext str target compression local url domain agent cmd_ext str target compression
@@ -407,7 +410,7 @@ gravity_DownloadBlocklists() {
str="Preparing new gravity database" str="Preparing new gravity database"
echo -ne " ${INFO} ${str}..." echo -ne " ${INFO} ${str}..."
rm "${gravityTEMPfile}" > /dev/null 2>&1 rm "${gravityTEMPfile}" > /dev/null 2>&1
output=$( { pihole-FTL sqlite3 "${gravityTEMPfile}" < "${gravityDBschema}"; } 2>&1 ) output=$( { sqlite3 "${gravityTEMPfile}" < "${gravityDBschema}"; } 2>&1 )
status="$?" status="$?"
if [[ "${status}" -ne 0 ]]; then if [[ "${status}" -ne 0 ]]; then
@@ -465,28 +468,9 @@ gravity_DownloadBlocklists() {
echo "" echo ""
done done
str="Creating new gravity databases"
echo -ne " ${INFO} ${str}..."
# Gravity copying SQL script
copyGravity="$(cat "${gravityDBcopy}")"
if [[ "${gravityDBfile}" != "${gravityDBfile_default}" ]]; then
# Replace default gravity script location by custom location
copyGravity="${copyGravity//"${gravityDBfile_default}"/"${gravityDBfile}"}"
fi
output=$( { pihole-FTL sqlite3 "${gravityTEMPfile}" <<< "${copyGravity}"; } 2>&1 )
status="$?"
if [[ "${status}" -ne 0 ]]; then
echo -e "\\n ${CROSS} Unable to copy data from ${gravityDBfile} to ${gravityTEMPfile}\\n ${output}"
return 1
fi
echo -e "${OVER} ${TICK} ${str}"
str="Storing downloaded domains in new gravity database" str="Storing downloaded domains in new gravity database"
echo -ne " ${INFO} ${str}..." echo -ne " ${INFO} ${str}..."
output=$( { printf ".timeout 30000\\n.mode csv\\n.import \"%s\" gravity\\n" "${target}" | pihole-FTL sqlite3 "${gravityTEMPfile}"; } 2>&1 ) output=$( { printf ".timeout 30000\\n.mode csv\\n.import \"%s\" gravity\\n" "${target}" | sqlite3 "${gravityTEMPfile}"; } 2>&1 )
status="$?" status="$?"
if [[ "${status}" -ne 0 ]]; then if [[ "${status}" -ne 0 ]]; then
@@ -518,9 +502,8 @@ gravity_DownloadBlocklists() {
gravity_Blackbody=true gravity_Blackbody=true
} }
# num_target_lines does increase for every correctly added domain in pareseList() total_num=0
num_target_lines=0 num_lines=0
num_source_lines=0
num_invalid=0 num_invalid=0
parseList() { parseList() {
local adlistID="${1}" src="${2}" target="${3}" incorrect_lines local adlistID="${1}" src="${2}" target="${3}" incorrect_lines
@@ -532,20 +515,18 @@ parseList() {
# Find (up to) five domains containing invalid characters (see above) # Find (up to) five domains containing invalid characters (see above)
incorrect_lines="$(sed -e "/[^a-zA-Z0-9.\_-]/!d" "${src}" | head -n 5)" incorrect_lines="$(sed -e "/[^a-zA-Z0-9.\_-]/!d" "${src}" | head -n 5)"
local num_target_lines_new num_correct_lines local num_target_lines num_correct_lines num_invalid
# Get number of lines in source file # Get number of lines in source file
num_source_lines="$(grep -c "^" "${src}")" num_lines="$(grep -c "^" "${src}")"
# Get the new number of lines in destination file # Get number of lines in destination file
num_target_lines_new="$(grep -c "^" "${target}")" num_target_lines="$(grep -c "^" "${target}")"
# Number of new correctly added lines num_correct_lines="$(( num_target_lines-total_num ))"
num_correct_lines="$(( num_target_lines_new-num_target_lines ))" total_num="$num_target_lines"
# Upate number of lines in target file num_invalid="$(( num_lines-num_correct_lines ))"
num_target_lines="$num_target_lines_new"
num_invalid="$(( num_source_lines-num_correct_lines ))"
if [[ "${num_invalid}" -eq 0 ]]; then if [[ "${num_invalid}" -eq 0 ]]; then
echo " ${INFO} Analyzed ${num_source_lines} domains" echo " ${INFO} Analyzed ${num_lines} domains"
else else
echo " ${INFO} Analyzed ${num_source_lines} domains, ${num_invalid} domains invalid!" echo " ${INFO} Analyzed ${num_lines} domains, ${num_invalid} domains invalid!"
fi fi
# Display sample of invalid lines if we found some # Display sample of invalid lines if we found some
@@ -583,7 +564,7 @@ compareLists() {
# Download specified URL and perform checks on HTTP status and file content # Download specified URL and perform checks on HTTP status and file content
gravity_DownloadBlocklistFromUrl() { gravity_DownloadBlocklistFromUrl() {
local url="${1}" cmd_ext="${2}" agent="${3}" adlistID="${4}" saveLocation="${5}" target="${6}" compression="${7}" local url="${1}" cmd_ext="${2}" agent="${3}" adlistID="${4}" saveLocation="${5}" target="${6}" compression="${7}"
local heisenbergCompensator="" patternBuffer str httpCode success="" ip local heisenbergCompensator="" patternBuffer str httpCode success=""
# Create temp file to store content on disk instead of RAM # Create temp file to store content on disk instead of RAM
patternBuffer=$(mktemp -p "/tmp" --suffix=".phgpb") patternBuffer=$(mktemp -p "/tmp" --suffix=".phgpb")
@@ -601,20 +582,13 @@ gravity_DownloadBlocklistFromUrl() {
blocked=false blocked=false
case $BLOCKINGMODE in case $BLOCKINGMODE in
"IP-NODATA-AAAA"|"IP") "IP-NODATA-AAAA"|"IP")
# Get IP address of this domain if [[ $(dig "${domain}" +short | grep "${IPV4_ADDRESS}" -c) -ge 1 ]]; then
ip="$(dig "${domain}" +short)"
# Check if this IP matches any IP of the system
if [[ -n "${ip}" && $(grep -Ec "inet(|6) ${ip}" <<< "$(ip a)") -gt 0 ]]; then
blocked=true blocked=true
fi;; fi;;
"NXDOMAIN") "NXDOMAIN")
if [[ $(dig "${domain}" | grep "NXDOMAIN" -c) -ge 1 ]]; then if [[ $(dig "${domain}" | grep "NXDOMAIN" -c) -ge 1 ]]; then
blocked=true blocked=true
fi;; fi;;
"NODATA")
if [[ $(dig "${domain}" | grep "NOERROR" -c) -ge 1 ]] && [[ -z $(dig +short "${domain}") ]]; then
blocked=true
fi;;
"NULL"|*) "NULL"|*)
if [[ $(dig "${domain}" +short | grep "0.0.0.0" -c) -ge 1 ]]; then if [[ $(dig "${domain}" +short | grep "0.0.0.0" -c) -ge 1 ]]; then
blocked=true blocked=true
@@ -708,7 +682,7 @@ gravity_DownloadBlocklistFromUrl() {
else else
echo -e " ${CROSS} List download failed: ${COL_LIGHT_RED}no cached list available${COL_NC}" echo -e " ${CROSS} List download failed: ${COL_LIGHT_RED}no cached list available${COL_NC}"
# Manually reset these two numbers because we do not call parseList here # Manually reset these two numbers because we do not call parseList here
num_source_lines=0 num_lines=0
num_invalid=0 num_invalid=0
database_adlist_number "${adlistID}" database_adlist_number "${adlistID}"
database_adlist_status "${adlistID}" "4" database_adlist_status "${adlistID}" "4"
@@ -791,12 +765,12 @@ gravity_Table_Count() {
local table="${1}" local table="${1}"
local str="${2}" local str="${2}"
local num local num
num="$(pihole-FTL sqlite3 "${gravityDBfile}" "SELECT COUNT(*) FROM ${table};")" num="$(sqlite3 "${gravityDBfile}" "SELECT COUNT(*) FROM ${table};")"
if [[ "${table}" == "vw_gravity" ]]; then if [[ "${table}" == "vw_gravity" ]]; then
local unique local unique
unique="$(pihole-FTL sqlite3 "${gravityDBfile}" "SELECT COUNT(DISTINCT domain) FROM ${table};")" unique="$(sqlite3 "${gravityDBfile}" "SELECT COUNT(DISTINCT domain) FROM ${table};")"
echo -e " ${INFO} Number of ${str}: ${num} (${COL_BOLD}${unique} unique domains${COL_NC})" echo -e " ${INFO} Number of ${str}: ${num} (${COL_BOLD}${unique} unique domains${COL_NC})"
pihole-FTL sqlite3 "${gravityDBfile}" "INSERT OR REPLACE INTO info (property,value) VALUES ('gravity_count',${unique});" sqlite3 "${gravityDBfile}" "INSERT OR REPLACE INTO info (property,value) VALUES ('gravity_count',${unique});"
else else
echo -e " ${INFO} Number of ${str}: ${num}" echo -e " ${INFO} Number of ${str}: ${num}"
fi fi
@@ -811,12 +785,43 @@ gravity_ShowCount() {
gravity_Table_Count "vw_regex_whitelist" "regex whitelist filters" gravity_Table_Count "vw_regex_whitelist" "regex whitelist filters"
} }
# Parse list of domains into hosts format
gravity_ParseDomainsIntoHosts() {
awk -v ipv4="$IPV4_ADDRESS" -v ipv6="$IPV6_ADDRESS" '{
# Remove windows CR line endings
sub(/\r$/, "")
# Parse each line as "ipaddr domain"
if(ipv6 && ipv4) {
print ipv4" "$0"\n"ipv6" "$0
} else if(!ipv6) {
print ipv4" "$0
} else {
print ipv6" "$0
}
}' >> "${2}" < "${1}"
}
# Create "localhost" entries into hosts format # Create "localhost" entries into hosts format
gravity_generateLocalList() { gravity_generateLocalList() {
local hostname
if [[ -s "/etc/hostname" ]]; then
hostname=$(< "/etc/hostname")
elif command -v hostname &> /dev/null; then
hostname=$(hostname -f)
else
echo -e " ${CROSS} Unable to determine fully qualified domain name of host"
return 0
fi
echo -e "${hostname}\\npi.hole" > "${localList}.tmp"
# Empty $localList if it already exists, otherwise, create it # Empty $localList if it already exists, otherwise, create it
echo "### Do not modify this file, it will be overwritten by pihole -g" > "${localList}" : > "${localList}"
chmod 644 "${localList}" chmod 644 "${localList}"
gravity_ParseDomainsIntoHosts "${localList}.tmp" "${localList}"
# Add additional LAN hosts provided by OpenVPN (if available) # Add additional LAN hosts provided by OpenVPN (if available)
if [[ -f "${VPNList}" ]]; then if [[ -f "${VPNList}" ]]; then
awk -F, '{printf $2"\t"$1".vpn\n"}' "${VPNList}" >> "${localList}" awk -F, '{printf $2"\t"$1".vpn\n"}' "${VPNList}" >> "${localList}"
@@ -867,49 +872,6 @@ gravity_Cleanup() {
fi fi
} }
database_recovery() {
local result
local str="Checking integrity of existing gravity database"
local option="${1}"
echo -ne " ${INFO} ${str}..."
if result="$(pihole-FTL sqlite3 "${gravityDBfile}" "PRAGMA integrity_check" 2>&1)"; then
echo -e "${OVER} ${TICK} ${str} - no errors found"
str="Checking foreign keys of existing gravity database"
echo -ne " ${INFO} ${str}..."
if result="$(pihole-FTL sqlite3 "${gravityDBfile}" "PRAGMA foreign_key_check" 2>&1)"; then
echo -e "${OVER} ${TICK} ${str} - no errors found"
if [[ "${option}" != "force" ]]; then
return
fi
else
echo -e "${OVER} ${CROSS} ${str} - errors found:"
while IFS= read -r line ; do echo " - $line"; done <<< "$result"
fi
else
echo -e "${OVER} ${CROSS} ${str} - errors found:"
while IFS= read -r line ; do echo " - $line"; done <<< "$result"
fi
str="Trying to recover existing gravity database"
echo -ne " ${INFO} ${str}..."
# We have to remove any possibly existing recovery database or this will fail
rm -f "${gravityDBfile}.recovered" > /dev/null 2>&1
if result="$(pihole-FTL sqlite3 "${gravityDBfile}" ".recover" | pihole-FTL sqlite3 "${gravityDBfile}.recovered" 2>&1)"; then
echo -e "${OVER} ${TICK} ${str} - success"
mv "${gravityDBfile}" "${gravityDBfile}.old"
mv "${gravityDBfile}.recovered" "${gravityDBfile}"
echo -ne " ${INFO} ${gravityDBfile} has been recovered"
echo -ne " ${INFO} The old ${gravityDBfile} has been moved to ${gravityDBfile}.old"
else
echo -e "${OVER} ${CROSS} ${str} - the following errors happened:"
while IFS= read -r line ; do echo " - $line"; done <<< "$result"
echo -e " ${CROSS} Recovery failed. Try \"pihole -r recreate\" instead."
exit 1
fi
echo ""
}
helpFunc() { helpFunc() {
echo "Usage: pihole -g echo "Usage: pihole -g
Update domains from blocklists specified in adlists.list Update domains from blocklists specified in adlists.list
@@ -920,51 +882,19 @@ Options:
exit 0 exit 0
} }
repairSelector() {
case "$1" in
"recover") recover_database=true;;
"recreate") recreate_database=true;;
*) echo "Usage: pihole -g -r {recover,recreate}
Attempt to repair gravity database
Available options:
pihole -g -r recover Try to recover a damaged gravity database file.
Pi-hole tries to restore as much as possible
from a corrupted gravity database.
pihole -g -r recover force Pi-hole will run the recovery process even when
no damage is detected. This option is meant to be
a last resort. Recovery is a fragile task
consuming a lot of resources and shouldn't be
performed unnecessarily.
pihole -g -r recreate Create a new gravity database file from scratch.
This will remove your existing gravity database
and create a new file from scratch. If you still
have the migration backup created when migrating
to Pi-hole v5.0, Pi-hole will import these files."
exit 0;;
esac
}
for var in "$@"; do for var in "$@"; do
case "${var}" in case "${var}" in
"-f" | "--force" ) forceDelete=true;; "-f" | "--force" ) forceDelete=true;;
"-r" | "--repair" ) repairSelector "$3";; "-r" | "--recreate" ) recreate_database=true;;
"-h" | "--help" ) helpFunc;; "-h" | "--help" ) helpFunc;;
esac esac
done done
# Remove OLD (backup) gravity file, if it exists
if [[ -f "${gravityOLDfile}" ]]; then
rm "${gravityOLDfile}"
fi
# Trap Ctrl-C # Trap Ctrl-C
gravity_Trap gravity_Trap
if [[ "${recreate_database:-}" == true ]]; then if [[ "${recreate_database:-}" == true ]]; then
str="Recreating gravity database from migration backup" str="Restoring from migration backup"
echo -ne "${INFO} ${str}..." echo -ne "${INFO} ${str}..."
rm "${gravityDBfile}" rm "${gravityDBfile}"
pushd "${piholeDir}" > /dev/null || exit pushd "${piholeDir}" > /dev/null || exit
@@ -973,15 +903,8 @@ if [[ "${recreate_database:-}" == true ]]; then
echo -e "${OVER} ${TICK} ${str}" echo -e "${OVER} ${TICK} ${str}"
fi fi
if [[ "${recover_database:-}" == true ]]; then
database_recovery "$4"
fi
# Move possibly existing legacy files to the gravity database # Move possibly existing legacy files to the gravity database
if ! migrate_to_database; then migrate_to_database
echo -e " ${CROSS} Unable to migrate to database. Please contact support."
exit 1
fi
if [[ "${forceDelete:-}" == true ]]; then if [[ "${forceDelete:-}" == true ]]; then
str="Deleting existing list cache" str="Deleting existing list cache"
@@ -992,21 +915,14 @@ if [[ "${forceDelete:-}" == true ]]; then
fi fi
# Gravity downloads blocklists next # Gravity downloads blocklists next
if ! gravity_CheckDNSResolutionAvailable; then gravity_CheckDNSResolutionAvailable
echo -e " ${CROSS} Can not complete gravity update, no DNS is available. Please contact support."
exit 1
fi
gravity_DownloadBlocklists gravity_DownloadBlocklists
# Create local.list # Create local.list
gravity_generateLocalList gravity_generateLocalList
# Migrate rest of the data from old to new database # Migrate rest of the data from old to new database
if ! gravity_swap_databases; then gravity_swap_databases
echo -e " ${CROSS} Unable to create database. Please contact support."
exit 1
fi
# Update gravity timestamp # Update gravity timestamp
update_gravity_timestamp update_gravity_timestamp

View File

@@ -144,9 +144,7 @@ Command line arguments can be arbitrarily combined, e.g:
Start ftl in foreground with more verbose logging, process everything and shutdown immediately Start ftl in foreground with more verbose logging, process everything and shutdown immediately
.br .br
.SH "SEE ALSO" .SH "SEE ALSO"
\fBpihole\fR(8) \fBpihole\fR(8), \fBpihole-FTL.conf\fR(5)
.br
\fBFor FTL's config options please see https://docs.pi-hole.net/ftldns/configfile/\fR
.br .br
.SH "COLOPHON" .SH "COLOPHON"

313
manpages/pihole-FTL.conf.5 Normal file
View File

@@ -0,0 +1,313 @@
.TH "pihole-FTL.conf" "5" "pihole-FTL.conf" "pihole-FTL.conf" "November 2020"
.SH "NAME"
pihole-FTL.conf - FTL's config file
.br
.SH "DESCRIPTION"
/etc/pihole/pihole-FTL.conf will be read by \fBpihole-FTL(8)\fR on startup.
.br
For each setting the option shown first is the default.
.br
\fBBLOCKINGMODE=IP|IP-AAAA-NODATA|NODATA|NXDOMAIN|NULL\fR
.br
How should FTL reply to blocked queries?
IP - Pi-hole's IPs for blocked domains
IP-AAAA-NODATA - Pi-hole's IP + NODATA-IPv6 for blocked domains
NODATA - Using NODATA for blocked domains
NXDOMAIN - NXDOMAIN for blocked domains
NULL - Null IPs for blocked domains
.br
\fBCNAME_DEEP_INSPECT=true|false\fR
.br
Use this option to disable deep CNAME inspection. This might be beneficial for very low-end devices.
.br
\fBBLOCK_ESNI=true|false\fR
.br
Block requests to _esni.* sub-domains.
.br
\fBMAXLOGAGE=24.0\fR
.br
Up to how many hours of queries should be imported from the database and logs?
.br
Maximum is 744 (31 days)
.br
\fBPRIVACYLEVEL=0|1|2|3|4\fR
.br
Privacy level used to collect Pi-hole statistics.
.br
0 - show everything
.br
1 - hide domains
.br
2 - hide domains and clients
.br
3 - anonymous mode (hide everything)
.br
4 - disable all statistics
.br
\fBIGNORE_LOCALHOST=no|yes\fR
.br
Should FTL ignore queries coming from the local machine?
.br
\fBAAAA_QUERY_ANALYSIS=yes|no\fR
.br
Should FTL analyze AAAA queries?
.br
\fBANALYZE_ONLY_A_AND_AAAA=false|true\fR
.br
Should FTL only analyze A and AAAA queries?
.br
\fBSOCKET_LISTENING=localonly|all\fR
.br
Listen only for local socket connections on the API port or permit all connections.
.br
\fBFTLPORT=4711\fR
.br
On which port should FTL be listening?
.br
\fBRESOLVE_IPV6=yes|no\fR
.br
Should FTL try to resolve IPv6 addresses to hostnames?
.br
\fBRESOLVE_IPV4=yes|no\fR
.br
Should FTL try to resolve IPv4 addresses to hostnames?
.br
\fBDELAY_STARTUP=0\fR
.br
Time in seconds (between 0 and 300) to delay FTL startup.
.br
\fBNICE=-10\fR
.br
Set the niceness of the Pi-hole FTL process.
.br
Can be disabled altogether by setting a value of -999.
.br
\fBNAMES_FROM_NETDB=true|false\fR
.br
Control whether FTL should use a fallback option and try to obtain client names from checking the network table.
.br
E.g. IPv6 clients without a hostname will be compared via MAC address to known clients.
.br
\fB\fBREFRESH_HOSTNAMES=IPV4|ALL|NONE\fR
.br
Change how (and if) hourly PTR requests are made to check for changes in client and upstream server hostnames:
.br
IPV4 - Do the hourly PTR lookups only for IPv4 addresses resolving issues in networks with many short-lived PE IPv6 addresses.
.br
ALL - Do the hourly PTR lookups for all addresses. This can create a lot of PTR queries in networks with many IPv6 addresses.
.br
NONE - Don't do hourly PTR lookups. Look up hostnames once (when first seeing a client) and never again. Future hostname changes may be missed.
.br
\fBMAXNETAGE=365\fR
.br
IP addresses (and associated host names) older than the specified number of days are removed.
.br
This avoids dead entries in the network overview table.
.br
\fBEDNS0_ECS=true|false\fR
.br
Should we overwrite the query source when client information is provided through EDNS0 client subnet (ECS) information?
.br
\fBPARSE_ARP_CACHE=true|false\fR
.br
Parse ARP cache to fill network overview table.
.br
\fBDBIMPORT=yes|no\fR
.br
Should FTL load information from the database on startup to be aware of the most recent history?
.br
\fBMAXDBDAYS=365\fR
.br
How long should queries be stored in the database? Setting this to 0 disables the database
.br
\fBDBINTERVAL=1.0\fR
.br
How often do we store queries in FTL's database [minutes]?
.br
Accepts value between 0.1 (6 sec) and 1440 (1 day)
.br
\fBDBFILE=/etc/pihole/pihole-FTL.db\fR
.br
Specify path and filename of FTL's SQLite long-term database.
.br
Setting this to DBFILE= disables the database altogether
.br
\fBLOGFILE=/var/log/pihole-FTL.log\fR
.br
The location of FTL's log file.
.br
\fBPIDFILE=/run/pihole-FTL.pid\fR
.br
The file which contains the PID of FTL's main process.
.br
\fBPORTFILE=/run/pihole-FTL.port\fR
.br
Specify path and filename where the FTL process will write its API port number.
.br
\fBSOCKETFILE=/run/pihole/FTL.sock\fR
.br
The file containing the socket FTL's API is listening on.
.br
\fBSETUPVARSFILE=/etc/pihole/setupVars.conf\fR
.br
The config file of Pi-hole containing, e.g., the current blocking status (do not change).
.br
\fBMACVENDORDB=/etc/pihole/macvendor.db\fR
.br
The database containing MAC -> Vendor information for the network table.
.br
\fBGRAVITYDB=/etc/pihole/gravity.db\fR
.br
Specify path and filename of FTL's SQLite3 gravity database. This database contains all domains relevant for Pi-hole's DNS blocking.
.br
\fBDEBUG_ALL=false|true\fR
.br
Enable all debug flags. If this is set to true, all other debug config options are ignored.
.br
\fBDEBUG_DATABASE=false|true\fR
.br
Print debugging information about database actions such as SQL statements and performance.
.br
\fBDEBUG_NETWORKING=false|true\fR
.br
Prints a list of the detected network interfaces on the startup of FTL.
.br
\fBDEBUG_LOCKS=false|true\fR
.br
Print information about shared memory locks.
.br
Messages will be generated when waiting, obtaining, and releasing a lock.
.br
\fBDEBUG_QUERIES=false|true\fR
.br
Print extensive DNS query information (domains, types, replies, etc.).
.br
\fBDEBUG_FLAGS=false|true\fR
.br
Print flags of queries received by the DNS hooks.
.br
Only effective when \fBDEBUG_QUERIES\fR is enabled as well.
\fBDEBUG_SHMEM=false|true\fR
.br
Print information about shared memory buffers.
.br
Messages are either about creating or enlarging shmem objects or string injections.
.br
\fBDEBUG_GC=false|true\fR
.br
Print information about garbage collection (GC):
.br
What is to be removed, how many have been removed and how long did GC take.
.br
\fBDEBUG_ARP=false|true\fR
.br
Print information about ARP table processing:
.br
How long did parsing take, whether read MAC addresses are valid, and if the macvendor.db file exists.
.br
\fBDEBUG_REGEX=false|true\fR
.br
Controls if FTL should print extended details about regex matching.
.br
\fBDEBUG_API=false|true\fR
.br
Print extra debugging information during telnet API calls.
.br
Currently only used to send extra information when getting all queries.
.br
\fBDEBUG_OVERTIME=false|true\fR
.br
Print information about overTime memory operations, such as initializing or moving overTime slots.
.br
\fBDEBUG_EXTBLOCKED=false|true\fR
.br
Print information about why FTL decided that certain queries were recognized as being externally blocked.
.br
\fBDEBUG_CAPS=false|true\fR
.br
Print information about POSIX capabilities granted to the FTL process.
.br
The current capabilities are printed on receipt of SIGHUP i.e. after executing `killall -HUP pihole-FTL`.
.br
\fBDEBUG_DNSMASQ_LINES=false|true\fR
.br
Print file and line causing a dnsmasq event into FTL's log files.
.br
This is handy to implement additional hooks missing from FTL.
.br
\fBDEBUG_VECTORS=false|true\fR
.br
FTL uses dynamically allocated vectors for various tasks.
.br
This config option enables extensive debugging information such as information about allocation, referencing, deletion, and appending.
.br
\fBDEBUG_RESOLVER=false|true\fR
.br
Extensive information about hostname resolution like which DNS servers are used in the first and second hostname resolving tries.
.br
.SH "SEE ALSO"
\fBpihole\fR(8), \fBpihole-FTL\fR(8)
.br
.SH "COLOPHON"
Pi-hole : The Faster-Than-Light (FTL) Engine is a lightweight, purpose-built daemon used to provide statistics needed for the Pi-hole Web Interface, and its API can be easily integrated into your own projects. Although it is an optional component of the Pi-hole ecosystem, it will be installed by default to provide statistics. As the name implies, FTL does its work \fIvery quickly\fR!
.br
Get sucked into the latest news and community activity by entering Pi-hole's orbit. Information about Pi-hole, and the latest version of the software can be found at https://pi-hole.net
.br

View File

@@ -56,7 +56,7 @@ Available commands and options:
\fB-w, whitelist\fR [options] [<domain1> <domain2 ...>] \fB-w, whitelist\fR [options] [<domain1> <domain2 ...>]
.br .br
Adds or removes specified domain or domains to the Whitelist Adds or removes specified domain or domains tho the Whitelist
.br .br
\fB-b, blacklist\fR [options] [<domain1> <domain2 ...>] \fB-b, blacklist\fR [options] [<domain1> <domain2 ...>]

110
pihole
View File

@@ -21,9 +21,6 @@ readonly FTL_PID_FILE="/run/pihole-FTL.pid"
readonly colfile="${PI_HOLE_SCRIPT_DIR}/COL_TABLE" readonly colfile="${PI_HOLE_SCRIPT_DIR}/COL_TABLE"
source "${colfile}" source "${colfile}"
readonly utilsfile="${PI_HOLE_SCRIPT_DIR}/utils.sh"
source "${utilsfile}"
webpageFunc() { webpageFunc() {
source "${PI_HOLE_SCRIPT_DIR}/webpage.sh" source "${PI_HOLE_SCRIPT_DIR}/webpage.sh"
main "$@" main "$@"
@@ -74,7 +71,8 @@ reconfigurePiholeFunc() {
} }
updateGravityFunc() { updateGravityFunc() {
exec "${PI_HOLE_SCRIPT_DIR}"/gravity.sh "$@" "${PI_HOLE_SCRIPT_DIR}"/gravity.sh "$@"
exit $?
} }
queryFunc() { queryFunc() {
@@ -97,7 +95,8 @@ uninstallFunc() {
versionFunc() { versionFunc() {
shift shift
exec "${PI_HOLE_SCRIPT_DIR}"/version.sh "$@" "${PI_HOLE_SCRIPT_DIR}"/version.sh "$@"
exit 0
} }
# Get PID of main pihole-FTL process # Get PID of main pihole-FTL process
@@ -226,7 +225,8 @@ Time:
fi fi
local str="Pi-hole Disabled" local str="Pi-hole Disabled"
addOrEditKeyValPair "BLOCKING_ENABLED" "false" "${setupVars}" sed -i "/BLOCKING_ENABLED=/d" "${setupVars}"
echo "BLOCKING_ENABLED=false" >> "${setupVars}"
fi fi
else else
# Enable Pi-hole # Enable Pi-hole
@@ -238,10 +238,11 @@ Time:
echo -e " ${INFO} Enabling blocking" echo -e " ${INFO} Enabling blocking"
local str="Pi-hole Enabled" local str="Pi-hole Enabled"
addOrEditKeyValPair "BLOCKING_ENABLED" "true" "${setupVars}" sed -i "/BLOCKING_ENABLED=/d" "${setupVars}"
echo "BLOCKING_ENABLED=true" >> "${setupVars}"
fi fi
restartDNS reload-lists restartDNS reload
echo -e "${OVER} ${TICK} ${str}" echo -e "${OVER} ${TICK} ${str}"
} }
@@ -261,7 +262,7 @@ Options:
elif [[ "${1}" == "off" ]]; then elif [[ "${1}" == "off" ]]; then
# Disable logging # Disable logging
sed -i 's/^log-queries/#log-queries/' /etc/dnsmasq.d/01-pihole.conf sed -i 's/^log-queries/#log-queries/' /etc/dnsmasq.d/01-pihole.conf
addOrEditKeyValPair "QUERY_LOGGING" "false" "${setupVars}" sed -i 's/^QUERY_LOGGING=true/QUERY_LOGGING=false/' /etc/pihole/setupVars.conf
if [[ "${2}" != "noflush" ]]; then if [[ "${2}" != "noflush" ]]; then
# Flush logs # Flush logs
"${PI_HOLE_BIN_DIR}"/pihole -f "${PI_HOLE_BIN_DIR}"/pihole -f
@@ -271,7 +272,7 @@ Options:
elif [[ "${1}" == "on" ]]; then elif [[ "${1}" == "on" ]]; then
# Enable logging # Enable logging
sed -i 's/^#log-queries/log-queries/' /etc/dnsmasq.d/01-pihole.conf sed -i 's/^#log-queries/log-queries/' /etc/dnsmasq.d/01-pihole.conf
addOrEditKeyValPair "QUERY_LOGGING" "true" "${setupVars}" sed -i 's/^QUERY_LOGGING=false/QUERY_LOGGING=true/' /etc/pihole/setupVars.conf
echo -e " ${INFO} Enabling logging..." echo -e " ${INFO} Enabling logging..."
local str="Logging has been enabled!" local str="Logging has been enabled!"
else else
@@ -284,29 +285,27 @@ Options:
} }
analyze_ports() { analyze_ports() {
local lv4 lv6 port=${1}
# FTL is listening at least on at least one port when this # FTL is listening at least on at least one port when this
# function is getting called # function is getting called
echo -e " ${TICK} DNS service is listening"
# Check individual address family/protocol combinations # Check individual address family/protocol combinations
# For a healthy Pi-hole, they should all be up (nothing printed) # For a healthy Pi-hole, they should all be up (nothing printed)
lv4="$(ss --ipv4 --listening --numeric --tcp --udp src :${port})" if grep -q "IPv4.*UDP" <<< "${1}"; then
if grep -q "udp " <<< "${lv4}"; then
echo -e " ${TICK} UDP (IPv4)" echo -e " ${TICK} UDP (IPv4)"
else else
echo -e " ${CROSS} UDP (IPv4)" echo -e " ${CROSS} UDP (IPv4)"
fi fi
if grep -q "tcp " <<< "${lv4}"; then if grep -q "IPv4.*TCP" <<< "${1}"; then
echo -e " ${TICK} TCP (IPv4)" echo -e " ${TICK} TCP (IPv4)"
else else
echo -e " ${CROSS} TCP (IPv4)" echo -e " ${CROSS} TCP (IPv4)"
fi fi
lv6="$(ss --ipv6 --listening --numeric --tcp --udp src :${port})" if grep -q "IPv6.*UDP" <<< "${1}"; then
if grep -q "udp " <<< "${lv6}"; then
echo -e " ${TICK} UDP (IPv6)" echo -e " ${TICK} UDP (IPv6)"
else else
echo -e " ${CROSS} UDP (IPv6)" echo -e " ${CROSS} UDP (IPv6)"
fi fi
if grep -q "tcp " <<< "${lv6}"; then if grep -q "IPv6.*TCP" <<< "${1}"; then
echo -e " ${TICK} TCP (IPv6)" echo -e " ${TICK} TCP (IPv6)"
else else
echo -e " ${CROSS} TCP (IPv6)" echo -e " ${CROSS} TCP (IPv6)"
@@ -315,31 +314,19 @@ analyze_ports() {
} }
statusFunc() { statusFunc() {
# Determine if there is pihole-FTL service is listening # Determine if there is a pihole service is listening on port 53
local listening pid port local listening
listening="$(lsof -Pni:53)"
pid="$(getFTLPID)" if grep -q "pihole" <<< "${listening}"; then
if [[ "$pid" -eq "-1" ]]; then if [[ "${1}" != "web" ]]; then
case "${1}" in analyze_ports "${listening}"
"web") echo "-1";; fi
*) echo -e " ${CROSS} DNS service is NOT running";;
esac
return 0
else else
#get the port pihole-FTL is listening on by using FTL's telnet API
port="$(echo ">dns-port >quit" | nc 127.0.0.1 4711)"
if [[ "${port}" == "0" ]]; then
case "${1}" in case "${1}" in
"web") echo "-1";; "web") echo "-1";;
*) echo -e " ${CROSS} DNS service is NOT listening";; *) echo -e " ${CROSS} DNS service is NOT listening";;
esac esac
return 0 return 0
else
if [[ "${1}" != "web" ]]; then
echo -e " ${TICK} FTL is listening on port ${port}"
analyze_ports "${port}"
fi
fi
fi fi
# Determine if Pi-hole's blocking is enabled # Determine if Pi-hole's blocking is enabled
@@ -352,19 +339,18 @@ statusFunc() {
elif grep -q "BLOCKING_ENABLED=true" /etc/pihole/setupVars.conf; then elif grep -q "BLOCKING_ENABLED=true" /etc/pihole/setupVars.conf; then
# Configs are set # Configs are set
case "${1}" in case "${1}" in
"web") echo "$port";; "web") echo 1;;
*) echo -e " ${TICK} Pi-hole blocking is enabled";; *) echo -e " ${TICK} Pi-hole blocking is enabled";;
esac esac
else else
# No configs were found # No configs were found
case "${1}" in case "${1}" in
"web") echo -2;; "web") echo 99;;
*) echo -e " ${INFO} Pi-hole blocking will be enabled";; *) echo -e " ${INFO} Pi-hole blocking will be enabled";;
esac esac
# Enable blocking # Enable blocking
"${PI_HOLE_BIN_DIR}"/pihole enable "${PI_HOLE_BIN_DIR}"/pihole enable
fi fi
} }
tailFunc() { tailFunc() {
@@ -377,13 +363,16 @@ tailFunc() {
fi fi
echo -e " ${INFO} Press Ctrl-C to exit" echo -e " ${INFO} Press Ctrl-C to exit"
# Retrieve IPv4/6 addresses
source /etc/pihole/setupVars.conf
# Strip date from each line # Strip date from each line
# Color blocklist/blacklist/wildcard entries as red # Color blocklist/blacklist/wildcard entries as red
# Color A/AAAA/DHCP strings as white # Color A/AAAA/DHCP strings as white
# Color everything else as gray # Color everything else as gray
tail -f /var/log/pihole.log | grep --line-buffered "${1}" | sed -E \ tail -f /var/log/pihole.log | sed -E \
-e "s,($(date +'%b %d ')| dnsmasq\[[0-9]*\]),,g" \ -e "s,($(date +'%b %d ')| dnsmasq\[[0-9]*\]),,g" \
-e "s,(.*(blacklisted |gravity blocked ).*),${COL_RED}&${COL_NC}," \ -e "s,(.*(blacklisted |gravity blocked ).* is (0.0.0.0|::|NXDOMAIN|${IPV4_ADDRESS%/*}|${IPV6_ADDRESS:-NULL}).*),${COL_RED}&${COL_NC}," \
-e "s,.*(query\\[A|DHCP).*,${COL_NC}&${COL_NC}," \ -e "s,.*(query\\[A|DHCP).*,${COL_NC}&${COL_NC}," \
-e "s,.*,${COL_GRAY}&${COL_NC}," -e "s,.*,${COL_GRAY}&${COL_NC},"
exit 0 exit 0
@@ -413,24 +402,34 @@ Branches:
} }
tricorderFunc() { tricorderFunc() {
local tricorder_token
if [[ ! -p "/dev/stdin" ]]; then if [[ ! -p "/dev/stdin" ]]; then
echo -e " ${INFO} Please do not call Tricorder directly" echo -e " ${INFO} Please do not call Tricorder directly"
exit 1 exit 1
fi fi
tricorder_token=$(curl --silent --fail --show-error --upload-file "-" https://tricorder.pi-hole.net/upload < /dev/stdin 2>&1) if ! (echo > /dev/tcp/tricorder.pi-hole.net/9998) >/dev/null 2>&1; then
if [[ "${tricorder_token}" != "https://tricorder.pi-hole.net/"* ]]; then echo -e " ${CROSS} Unable to connect to Pi-hole's Tricorder server"
echo -e "${CROSS} uploading failed, contact Pi-hole support for assistance."
# Log curl error (if available)
if [ -n "${tricorder_token}" ]; then
echo -e "${INFO} Error message: ${COL_RED}${tricorder_token}${COL_NC}\\n"
tricorder_token=""
fi
exit 1 exit 1
fi fi
echo "Upload successful, your token is: ${COL_GREEN}${tricorder_token}${COL_NC}"
exit 0 if command -v openssl &> /dev/null; then
openssl s_client -quiet -connect tricorder.pi-hole.net:9998 2> /dev/null < /dev/stdin
exit "$?"
else
echo -e " ${INFO} ${COL_YELLOW}Security Notice${COL_NC}: ${COL_WHITE}openssl${COL_NC} is not installed
Your debug log will be transmitted unencrypted via plain-text
There is a possibility that this could be intercepted by a third party
If you wish to cancel, press Ctrl-C to exit within 10 seconds"
secs="10"
while [[ "$secs" -gt "0" ]]; do
echo -ne "."
sleep 1
: $((secs--))
done
echo " "
nc tricorder.pi-hole.net 9999 < /dev/stdin
exit "$?"
fi
} }
updateCheckFunc() { updateCheckFunc() {
@@ -457,10 +456,7 @@ Debugging Options:
Add '-a' to automatically upload the log to tricorder.pi-hole.net Add '-a' to automatically upload the log to tricorder.pi-hole.net
-f, flush Flush the Pi-hole log -f, flush Flush the Pi-hole log
-r, reconfigure Reconfigure or Repair Pi-hole subsystems -r, reconfigure Reconfigure or Repair Pi-hole subsystems
-t, tail [arg] View the live output of the Pi-hole log. -t, tail View the live output of the Pi-hole log
Add an optional argument to filter the log
(regular expressions are supported)
Options: Options:
-a, admin Web interface options -a, admin Web interface options
@@ -534,7 +530,7 @@ case "${1}" in
"status" ) statusFunc "$2";; "status" ) statusFunc "$2";;
"restartdns" ) restartDNS "$2";; "restartdns" ) restartDNS "$2";;
"-a" | "admin" ) webpageFunc "$@";; "-a" | "admin" ) webpageFunc "$@";;
"-t" | "tail" ) tailFunc "$2";; "-t" | "tail" ) tailFunc;;
"checkout" ) piholeCheckoutFunc "$@";; "checkout" ) piholeCheckoutFunc "$@";;
"tricorder" ) tricorderFunc;; "tricorder" ) tricorderFunc;;
"updatechecker" ) updateCheckFunc "$@";; "updatechecker" ) updateCheckFunc "$@";;

5
supportedos.txt Normal file
View File

@@ -0,0 +1,5 @@
Raspbian=9,10
Ubuntu=16,18,20
Debian=9,10
Fedora=32,33
CentOS=7,8

View File

@@ -1,5 +1,4 @@
FROM centos:7 FROM centos:7
RUN yum install -y git
ENV GITDIR /etc/.pihole ENV GITDIR /etc/.pihole
ENV SCRIPTDIR /opt/pihole ENV SCRIPTDIR /opt/pihole
@@ -13,6 +12,5 @@ RUN true && \
chmod +x $SCRIPTDIR/* chmod +x $SCRIPTDIR/*
ENV PH_TEST true ENV PH_TEST true
ENV OS_CHECK_DOMAIN_NAME dev-supportedos.pi-hole.net
#sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \ #sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \

View File

@@ -1,5 +1,4 @@
FROM quay.io/centos/centos:stream8 FROM centos:8
RUN yum install -y git
ENV GITDIR /etc/.pihole ENV GITDIR /etc/.pihole
ENV SCRIPTDIR /opt/pihole ENV SCRIPTDIR /opt/pihole
@@ -13,6 +12,5 @@ RUN true && \
chmod +x $SCRIPTDIR/* chmod +x $SCRIPTDIR/*
ENV PH_TEST true ENV PH_TEST true
ENV OS_CHECK_DOMAIN_NAME dev-supportedos.pi-hole.net
#sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \ #sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \

View File

@@ -12,6 +12,5 @@ RUN true && \
chmod +x $SCRIPTDIR/* chmod +x $SCRIPTDIR/*
ENV PH_TEST true ENV PH_TEST true
ENV OS_CHECK_DOMAIN_NAME dev-supportedos.pi-hole.net
#sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \ #sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \

View File

@@ -12,6 +12,5 @@ RUN true && \
chmod +x $SCRIPTDIR/* chmod +x $SCRIPTDIR/*
ENV PH_TEST true ENV PH_TEST true
ENV OS_CHECK_DOMAIN_NAME dev-supportedos.pi-hole.net
#sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \ #sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \

View File

@@ -1,4 +1,4 @@
FROM buildpack-deps:bullseye-scm FROM fedora:32
ENV GITDIR /etc/.pihole ENV GITDIR /etc/.pihole
ENV SCRIPTDIR /opt/pihole ENV SCRIPTDIR /opt/pihole
@@ -12,6 +12,5 @@ RUN true && \
chmod +x $SCRIPTDIR/* chmod +x $SCRIPTDIR/*
ENV PH_TEST true ENV PH_TEST true
ENV OS_CHECK_DOMAIN_NAME dev-supportedos.pi-hole.net
#sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \ #sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \

View File

@@ -1,5 +1,4 @@
FROM fedora:33 FROM fedora:33
RUN dnf install -y git
ENV GITDIR /etc/.pihole ENV GITDIR /etc/.pihole
ENV SCRIPTDIR /opt/pihole ENV SCRIPTDIR /opt/pihole
@@ -13,6 +12,5 @@ RUN true && \
chmod +x $SCRIPTDIR/* chmod +x $SCRIPTDIR/*
ENV PH_TEST true ENV PH_TEST true
ENV OS_CHECK_DOMAIN_NAME dev-supportedos.pi-hole.net
#sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \ #sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \

View File

@@ -1,18 +0,0 @@
FROM fedora:34
RUN dnf install -y git
ENV GITDIR /etc/.pihole
ENV SCRIPTDIR /opt/pihole
RUN mkdir -p $GITDIR $SCRIPTDIR /etc/pihole
ADD . $GITDIR
RUN cp $GITDIR/advanced/Scripts/*.sh $GITDIR/gravity.sh $GITDIR/pihole $GITDIR/automated\ install/*.sh $SCRIPTDIR/
ENV PATH /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:$SCRIPTDIR
RUN true && \
chmod +x $SCRIPTDIR/*
ENV PH_TEST true
ENV OS_CHECK_DOMAIN_NAME dev-supportedos.pi-hole.net
#sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \

View File

@@ -12,6 +12,5 @@ RUN true && \
chmod +x $SCRIPTDIR/* chmod +x $SCRIPTDIR/*
ENV PH_TEST true ENV PH_TEST true
ENV OS_CHECK_DOMAIN_NAME dev-supportedos.pi-hole.net
#sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \ #sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \

View File

@@ -12,6 +12,5 @@ RUN true && \
chmod +x $SCRIPTDIR/* chmod +x $SCRIPTDIR/*
ENV PH_TEST true ENV PH_TEST true
ENV OS_CHECK_DOMAIN_NAME dev-supportedos.pi-hole.net
#sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \ #sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \

View File

@@ -13,6 +13,5 @@ RUN true && \
chmod +x $SCRIPTDIR/* chmod +x $SCRIPTDIR/*
ENV PH_TEST true ENV PH_TEST true
ENV OS_CHECK_DOMAIN_NAME dev-supportedos.pi-hole.net
#sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \ #sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \

View File

@@ -1,18 +0,0 @@
FROM buildpack-deps:hirsute-scm
ENV GITDIR /etc/.pihole
ENV SCRIPTDIR /opt/pihole
RUN mkdir -p $GITDIR $SCRIPTDIR /etc/pihole
ADD . $GITDIR
RUN cp $GITDIR/advanced/Scripts/*.sh $GITDIR/gravity.sh $GITDIR/pihole $GITDIR/automated\ install/*.sh $SCRIPTDIR/
ENV PATH /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:$SCRIPTDIR
ENV DEBIAN_FRONTEND=noninteractive
RUN true && \
chmod +x $SCRIPTDIR/*
ENV PH_TEST true
ENV OS_CHECK_DOMAIN_NAME dev-supportedos.pi-hole.net
#sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \

View File

@@ -1,52 +1,98 @@
import pytest import pytest
import testinfra import testinfra
import testinfra.backend.docker
import subprocess
from textwrap import dedent from textwrap import dedent
check_output = testinfra.get_backend(
"local://"
).get_module("Command").check_output
SETUPVARS = { SETUPVARS = {
'PIHOLE_INTERFACE': 'eth99', 'PIHOLE_INTERFACE': 'eth99',
'IPV4_ADDRESS': '1.1.1.1',
'IPV6_ADDRESS': 'FE80::240:D0FF:FE48:4672',
'PIHOLE_DNS_1': '4.2.2.1', 'PIHOLE_DNS_1': '4.2.2.1',
'PIHOLE_DNS_2': '4.2.2.2' 'PIHOLE_DNS_2': '4.2.2.2'
} }
IMAGE = 'pytest_pihole:test_container'
tick_box = "[\x1b[1;32m\u2713\x1b[0m]" tick_box = "[\x1b[1;32m\u2713\x1b[0m]"
cross_box = "[\x1b[1;31m\u2717\x1b[0m]" cross_box = "[\x1b[1;31m\u2717\x1b[0m]"
info_box = "[i]" info_box = "[i]"
# Monkeypatch sh to bash, if they ever support non hard code /bin/sh this can go away @pytest.fixture
# https://github.com/pytest-dev/pytest-testinfra/blob/master/testinfra/backend/docker.py def Pihole(Docker):
'''
used to contain some script stubbing, now pretty much an alias.
Also provides bash as the default run function shell
'''
def run_bash(self, command, *args, **kwargs): def run_bash(self, command, *args, **kwargs):
cmd = self.get_command(command, *args) cmd = self.get_command(command, *args)
if self.user is not None: if self.user is not None:
out = self.run_local( out = self.run_local(
"docker exec -u %s %s /bin/bash -c %s", self.user, self.name, cmd "docker exec -u %s %s /bin/bash -c %s",
) self.user, self.name, cmd)
else: else:
out = self.run_local("docker exec %s /bin/bash -c %s", self.name, cmd) out = self.run_local(
"docker exec %s /bin/bash -c %s", self.name, cmd)
out.command = self.encode(cmd) out.command = self.encode(cmd)
return out return out
funcType = type(Docker.run)
testinfra.backend.docker.DockerBackend.run = run_bash Docker.run = funcType(run_bash, Docker)
return Docker
@pytest.fixture @pytest.fixture
def host(): def Docker(request, args, image, cmd):
# run a container '''
docker_id = subprocess.check_output( combine our fixtures into a docker run command and setup finalizer to
['docker', 'run', '-t', '-d', '--cap-add=ALL', IMAGE]).decode().strip() cleanup
'''
assert 'docker' in check_output('id'), "Are you in the docker group?"
docker_run = "docker run {} {} {}".format(args, image, cmd)
docker_id = check_output(docker_run)
# return a testinfra connection to the container def teardown():
docker_host = testinfra.get_host("docker://" + docker_id) check_output("docker rm -f %s", docker_id)
request.addfinalizer(teardown)
yield docker_host docker_container = testinfra.get_backend("docker://" + docker_id)
# at the end of the test suite, destroy the container docker_container.id = docker_id
subprocess.check_call(['docker', 'rm', '-f', docker_id]) return docker_container
@pytest.fixture
def args(request):
'''
-t became required when tput began being used
'''
return '-t -d'
@pytest.fixture(params=[
'test_container'
])
def tag(request):
'''
consumed by image to make the test matrix
'''
return request.param
@pytest.fixture()
def image(request, tag):
'''
built by test_000_build_containers.py
'''
return 'pytest_pihole:{}'.format(tag)
@pytest.fixture()
def cmd(request):
'''
default to doing nothing by tailing null, but don't exit
'''
return 'tail -f /dev/null'
# Helper functions # Helper functions
@@ -56,7 +102,7 @@ def mock_command(script, args, container):
in unit tests in unit tests
''' '''
full_script_path = '/usr/local/bin/{}'.format(script) full_script_path = '/usr/local/bin/{}'.format(script)
mock_script = dedent(r'''\ mock_script = dedent('''\
#!/bin/bash -e #!/bin/bash -e
echo "\$0 \$@" >> /var/log/{script} echo "\$0 \$@" >> /var/log/{script}
case "\$1" in'''.format(script=script)) case "\$1" in'''.format(script=script))
@@ -77,75 +123,13 @@ def mock_command(script, args, container):
scriptlog=script)) scriptlog=script))
def mock_command_passthrough(script, args, container):
'''
Per other mock_command* functions, allows intercepting of commands we don't want to run for real
in unit tests, however also allows only specific arguments to be mocked. Anything not defined will
be passed through to the actual command.
Example use-case: mocking `git pull` but still allowing `git clone` to work as intended
'''
orig_script_path = container.check_output('command -v {}'.format(script))
full_script_path = '/usr/local/bin/{}'.format(script)
mock_script = dedent(r'''\
#!/bin/bash -e
echo "\$0 \$@" >> /var/log/{script}
case "\$1" in'''.format(script=script))
for k, v in args.items():
case = dedent('''
{arg})
echo {res}
exit {retcode}
;;'''.format(arg=k, res=v[0], retcode=v[1]))
mock_script += case
mock_script += dedent(r'''
*)
{orig_script_path} "\$@"
;;'''.format(orig_script_path=orig_script_path))
mock_script += dedent('''
esac''')
container.run('''
cat <<EOF> {script}\n{content}\nEOF
chmod +x {script}
rm -f /var/log/{scriptlog}'''.format(script=full_script_path,
content=mock_script,
scriptlog=script))
def mock_command_run(script, args, container):
'''
Allows for setup of commands we don't really want to have to run for real
in unit tests
'''
full_script_path = '/usr/local/bin/{}'.format(script)
mock_script = dedent(r'''\
#!/bin/bash -e
echo "\$0 \$@" >> /var/log/{script}
case "\$1 \$2" in'''.format(script=script))
for k, v in args.items():
case = dedent('''
\"{arg}\")
echo {res}
exit {retcode}
;;'''.format(arg=k, res=v[0], retcode=v[1]))
mock_script += case
mock_script += dedent('''
esac''')
container.run('''
cat <<EOF> {script}\n{content}\nEOF
chmod +x {script}
rm -f /var/log/{scriptlog}'''.format(script=full_script_path,
content=mock_script,
scriptlog=script))
def mock_command_2(script, args, container): def mock_command_2(script, args, container):
''' '''
Allows for setup of commands we don't really want to have to run for real Allows for setup of commands we don't really want to have to run for real
in unit tests in unit tests
''' '''
full_script_path = '/usr/local/bin/{}'.format(script) full_script_path = '/usr/local/bin/{}'.format(script)
mock_script = dedent(r'''\ mock_script = dedent('''\
#!/bin/bash -e #!/bin/bash -e
echo "\$0 \$@" >> /var/log/{script} echo "\$0 \$@" >> /var/log/{script}
case "\$1 \$2" in'''.format(script=script)) case "\$1 \$2" in'''.format(script=script))

View File

@@ -1,6 +1,6 @@
docker-compose docker-compose==1.23.2
pytest pytest==4.3.0
pytest-xdist pytest-xdist==1.26.1
pytest-cov pytest-cov==2.6.1
pytest-testinfra testinfra==1.19.0
tox tox==3.7.0

File diff suppressed because it is too large Load Diff

View File

@@ -1,16 +0,0 @@
def test_key_val_replacement_works(host):
''' Confirms addOrEditKeyValPair provides the expected output '''
host.run('''
setupvars=./testoutput
source /opt/pihole/utils.sh
addOrEditKeyValPair "KEY_ONE" "value1" "./testoutput"
addOrEditKeyValPair "KEY_TWO" "value2" "./testoutput"
addOrEditKeyValPair "KEY_ONE" "value3" "./testoutput"
addOrEditKeyValPair "KEY_FOUR" "value4" "./testoutput"
cat ./testoutput
''')
output = host.run('''
cat ./testoutput
''')
expected_stdout = 'KEY_ONE=value3\nKEY_TWO=value2\nKEY_FOUR=value4\n'
assert expected_stdout == output.stdout

View File

@@ -0,0 +1,599 @@
from textwrap import dedent
import re
from .conftest import (
SETUPVARS,
tick_box,
info_box,
cross_box,
mock_command,
mock_command_2,
run_script
)
def test_supported_operating_system(Pihole):
'''
confirm installer exists on unsupported distribution
'''
# break supported package managers to emulate an unsupported distribution
Pihole.run('rm -rf /usr/bin/apt-get')
Pihole.run('rm -rf /usr/bin/rpm')
distro_check = Pihole.run('''
source /opt/pihole/basic-install.sh
distro_check
''')
expected_stdout = cross_box + ' OS distribution not supported'
assert expected_stdout in distro_check.stdout
# assert distro_check.rc == 1
def test_setupVars_are_sourced_to_global_scope(Pihole):
'''
currently update_dialogs sources setupVars with a dot,
then various other functions use the variables.
This confirms the sourced variables are in scope between functions
'''
setup_var_file = 'cat <<EOF> /etc/pihole/setupVars.conf\n'
for k, v in SETUPVARS.items():
setup_var_file += "{}={}\n".format(k, v)
setup_var_file += "EOF\n"
Pihole.run(setup_var_file)
script = dedent('''\
set -e
printSetupVars() {
# Currently debug test function only
echo "Outputting sourced variables"
echo "PIHOLE_INTERFACE=${PIHOLE_INTERFACE}"
echo "IPV4_ADDRESS=${IPV4_ADDRESS}"
echo "IPV6_ADDRESS=${IPV6_ADDRESS}"
echo "PIHOLE_DNS_1=${PIHOLE_DNS_1}"
echo "PIHOLE_DNS_2=${PIHOLE_DNS_2}"
}
update_dialogs() {
. /etc/pihole/setupVars.conf
}
update_dialogs
printSetupVars
''')
output = run_script(Pihole, script).stdout
for k, v in SETUPVARS.items():
assert "{}={}".format(k, v) in output
def test_setupVars_saved_to_file(Pihole):
'''
confirm saved settings are written to a file for future updates to re-use
'''
# dedent works better with this and padding matching script below
set_setup_vars = '\n'
for k, v in SETUPVARS.items():
set_setup_vars += " {}={}\n".format(k, v)
Pihole.run(set_setup_vars).stdout
script = dedent('''\
set -e
echo start
TERM=xterm
source /opt/pihole/basic-install.sh
{}
mkdir -p /etc/dnsmasq.d
version_check_dnsmasq
echo "" > /etc/pihole/pihole-FTL.conf
finalExports
cat /etc/pihole/setupVars.conf
'''.format(set_setup_vars))
output = run_script(Pihole, script).stdout
for k, v in SETUPVARS.items():
assert "{}={}".format(k, v) in output
def test_selinux_not_detected(Pihole):
'''
confirms installer continues when SELinux configuration file does not exist
'''
check_selinux = Pihole.run('''
rm -f /etc/selinux/config
source /opt/pihole/basic-install.sh
checkSelinux
''')
expected_stdout = info_box + ' SELinux not detected'
assert expected_stdout in check_selinux.stdout
assert check_selinux.rc == 0
def test_installPiholeWeb_fresh_install_no_errors(Pihole):
'''
confirms all web page assets from Core repo are installed on a fresh build
'''
installWeb = Pihole.run('''
source /opt/pihole/basic-install.sh
installPiholeWeb
''')
expected_stdout = info_box + ' Installing blocking page...'
assert expected_stdout in installWeb.stdout
expected_stdout = tick_box + (' Creating directory for blocking page, '
'and copying files')
assert expected_stdout in installWeb.stdout
expected_stdout = info_box + ' Backing up index.lighttpd.html'
assert expected_stdout in installWeb.stdout
expected_stdout = ('No default index.lighttpd.html file found... '
'not backing up')
assert expected_stdout in installWeb.stdout
expected_stdout = tick_box + ' Installing sudoer file'
assert expected_stdout in installWeb.stdout
web_directory = Pihole.run('ls -r /var/www/html/pihole').stdout
assert 'index.php' in web_directory
assert 'blockingpage.css' in web_directory
def test_update_package_cache_success_no_errors(Pihole):
'''
confirms package cache was updated without any errors
'''
updateCache = Pihole.run('''
source /opt/pihole/basic-install.sh
distro_check
update_package_cache
''')
expected_stdout = tick_box + ' Update local cache of available packages'
assert expected_stdout in updateCache.stdout
assert 'error' not in updateCache.stdout.lower()
def test_update_package_cache_failure_no_errors(Pihole):
'''
confirms package cache was not updated
'''
mock_command('apt-get', {'update': ('', '1')}, Pihole)
updateCache = Pihole.run('''
source /opt/pihole/basic-install.sh
distro_check
update_package_cache
''')
expected_stdout = cross_box + ' Update local cache of available packages'
assert expected_stdout in updateCache.stdout
assert 'Error: Unable to update package cache.' in updateCache.stdout
def test_FTL_detect_aarch64_no_errors(Pihole):
'''
confirms only aarch64 package is downloaded for FTL engine
'''
# mock uname to return aarch64 platform
mock_command('uname', {'-m': ('aarch64', '0')}, Pihole)
# mock ldd to respond with aarch64 shared library
mock_command(
'ldd',
{
'/bin/ls': (
'/lib/ld-linux-aarch64.so.1',
'0'
)
},
Pihole
)
detectPlatform = Pihole.run('''
source /opt/pihole/basic-install.sh
create_pihole_user
funcOutput=$(get_binary_name)
binary="pihole-FTL${funcOutput##*pihole-FTL}"
theRest="${funcOutput%pihole-FTL*}"
FTLdetect "${binary}" "${theRest}"
''')
expected_stdout = info_box + ' FTL Checks...'
assert expected_stdout in detectPlatform.stdout
expected_stdout = tick_box + ' Detected AArch64 (64 Bit ARM) processor'
assert expected_stdout in detectPlatform.stdout
expected_stdout = tick_box + ' Downloading and Installing FTL'
assert expected_stdout in detectPlatform.stdout
def test_FTL_detect_armv4t_no_errors(Pihole):
'''
confirms only armv4t package is downloaded for FTL engine
'''
# mock uname to return armv4t platform
mock_command('uname', {'-m': ('armv4t', '0')}, Pihole)
# mock ldd to respond with ld-linux shared library
mock_command('ldd', {'/bin/ls': ('/lib/ld-linux.so.3', '0')}, Pihole)
detectPlatform = Pihole.run('''
source /opt/pihole/basic-install.sh
create_pihole_user
funcOutput=$(get_binary_name)
binary="pihole-FTL${funcOutput##*pihole-FTL}"
theRest="${funcOutput%pihole-FTL*}"
FTLdetect "${binary}" "${theRest}"
''')
expected_stdout = info_box + ' FTL Checks...'
assert expected_stdout in detectPlatform.stdout
expected_stdout = tick_box + (' Detected ARMv4 processor')
assert expected_stdout in detectPlatform.stdout
expected_stdout = tick_box + ' Downloading and Installing FTL'
assert expected_stdout in detectPlatform.stdout
def test_FTL_detect_armv5te_no_errors(Pihole):
'''
confirms only armv5te package is downloaded for FTL engine
'''
# mock uname to return armv5te platform
mock_command('uname', {'-m': ('armv5te', '0')}, Pihole)
# mock ldd to respond with ld-linux shared library
mock_command('ldd', {'/bin/ls': ('/lib/ld-linux.so.3', '0')}, Pihole)
detectPlatform = Pihole.run('''
source /opt/pihole/basic-install.sh
create_pihole_user
funcOutput=$(get_binary_name)
binary="pihole-FTL${funcOutput##*pihole-FTL}"
theRest="${funcOutput%pihole-FTL*}"
FTLdetect "${binary}" "${theRest}"
''')
expected_stdout = info_box + ' FTL Checks...'
assert expected_stdout in detectPlatform.stdout
expected_stdout = tick_box + (' Detected ARMv5 (or newer) processor')
assert expected_stdout in detectPlatform.stdout
expected_stdout = tick_box + ' Downloading and Installing FTL'
assert expected_stdout in detectPlatform.stdout
def test_FTL_detect_armv6l_no_errors(Pihole):
'''
confirms only armv6l package is downloaded for FTL engine
'''
# mock uname to return armv6l platform
mock_command('uname', {'-m': ('armv6l', '0')}, Pihole)
# mock ldd to respond with ld-linux-armhf shared library
mock_command('ldd', {'/bin/ls': ('/lib/ld-linux-armhf.so.3', '0')}, Pihole)
detectPlatform = Pihole.run('''
source /opt/pihole/basic-install.sh
create_pihole_user
funcOutput=$(get_binary_name)
binary="pihole-FTL${funcOutput##*pihole-FTL}"
theRest="${funcOutput%pihole-FTL*}"
FTLdetect "${binary}" "${theRest}"
''')
expected_stdout = info_box + ' FTL Checks...'
assert expected_stdout in detectPlatform.stdout
expected_stdout = tick_box + (' Detected ARMv6 processor '
'(with hard-float support)')
assert expected_stdout in detectPlatform.stdout
expected_stdout = tick_box + ' Downloading and Installing FTL'
assert expected_stdout in detectPlatform.stdout
def test_FTL_detect_armv7l_no_errors(Pihole):
'''
confirms only armv7l package is downloaded for FTL engine
'''
# mock uname to return armv7l platform
mock_command('uname', {'-m': ('armv7l', '0')}, Pihole)
# mock ldd to respond with ld-linux-armhf shared library
mock_command('ldd', {'/bin/ls': ('/lib/ld-linux-armhf.so.3', '0')}, Pihole)
detectPlatform = Pihole.run('''
source /opt/pihole/basic-install.sh
create_pihole_user
funcOutput=$(get_binary_name)
binary="pihole-FTL${funcOutput##*pihole-FTL}"
theRest="${funcOutput%pihole-FTL*}"
FTLdetect "${binary}" "${theRest}"
''')
expected_stdout = info_box + ' FTL Checks...'
assert expected_stdout in detectPlatform.stdout
expected_stdout = tick_box + (' Detected ARMv7 processor '
'(with hard-float support)')
assert expected_stdout in detectPlatform.stdout
expected_stdout = tick_box + ' Downloading and Installing FTL'
assert expected_stdout in detectPlatform.stdout
def test_FTL_detect_armv8a_no_errors(Pihole):
'''
confirms only armv8a package is downloaded for FTL engine
'''
# mock uname to return armv8a platform
mock_command('uname', {'-m': ('armv8a', '0')}, Pihole)
# mock ldd to respond with ld-linux-armhf shared library
mock_command('ldd', {'/bin/ls': ('/lib/ld-linux-armhf.so.3', '0')}, Pihole)
detectPlatform = Pihole.run('''
source /opt/pihole/basic-install.sh
create_pihole_user
funcOutput=$(get_binary_name)
binary="pihole-FTL${funcOutput##*pihole-FTL}"
theRest="${funcOutput%pihole-FTL*}"
FTLdetect "${binary}" "${theRest}"
''')
expected_stdout = info_box + ' FTL Checks...'
assert expected_stdout in detectPlatform.stdout
expected_stdout = tick_box + ' Detected ARMv8 (or newer) processor'
assert expected_stdout in detectPlatform.stdout
expected_stdout = tick_box + ' Downloading and Installing FTL'
assert expected_stdout in detectPlatform.stdout
def test_FTL_detect_x86_64_no_errors(Pihole):
'''
confirms only x86_64 package is downloaded for FTL engine
'''
detectPlatform = Pihole.run('''
source /opt/pihole/basic-install.sh
create_pihole_user
funcOutput=$(get_binary_name)
binary="pihole-FTL${funcOutput##*pihole-FTL}"
theRest="${funcOutput%pihole-FTL*}"
FTLdetect "${binary}" "${theRest}"
''')
expected_stdout = info_box + ' FTL Checks...'
assert expected_stdout in detectPlatform.stdout
expected_stdout = tick_box + ' Detected x86_64 processor'
assert expected_stdout in detectPlatform.stdout
expected_stdout = tick_box + ' Downloading and Installing FTL'
assert expected_stdout in detectPlatform.stdout
def test_FTL_detect_unknown_no_errors(Pihole):
''' confirms only generic package is downloaded for FTL engine '''
# mock uname to return generic platform
mock_command('uname', {'-m': ('mips', '0')}, Pihole)
detectPlatform = Pihole.run('''
source /opt/pihole/basic-install.sh
create_pihole_user
funcOutput=$(get_binary_name)
binary="pihole-FTL${funcOutput##*pihole-FTL}"
theRest="${funcOutput%pihole-FTL*}"
FTLdetect "${binary}" "${theRest}"
''')
expected_stdout = 'Not able to detect processor (unknown: mips)'
assert expected_stdout in detectPlatform.stdout
def test_FTL_download_aarch64_no_errors(Pihole):
'''
confirms only aarch64 package is downloaded for FTL engine
'''
# mock whiptail answers and ensure installer dependencies
mock_command('whiptail', {'*': ('', '0')}, Pihole)
Pihole.run('''
source /opt/pihole/basic-install.sh
distro_check
install_dependent_packages ${INSTALLER_DEPS[@]}
''')
download_binary = Pihole.run('''
source /opt/pihole/basic-install.sh
create_pihole_user
FTLinstall "pihole-FTL-aarch64-linux-gnu"
''')
expected_stdout = tick_box + ' Downloading and Installing FTL'
assert expected_stdout in download_binary.stdout
assert 'error' not in download_binary.stdout.lower()
def test_FTL_binary_installed_and_responsive_no_errors(Pihole):
'''
confirms FTL binary is copied and functional in installed location
'''
installed_binary = Pihole.run('''
source /opt/pihole/basic-install.sh
create_pihole_user
funcOutput=$(get_binary_name)
binary="pihole-FTL${funcOutput##*pihole-FTL}"
theRest="${funcOutput%pihole-FTL*}"
FTLdetect "${binary}" "${theRest}"
pihole-FTL version
''')
expected_stdout = 'v'
assert expected_stdout in installed_binary.stdout
# def test_FTL_support_files_installed(Pihole):
# '''
# confirms FTL support files are installed
# '''
# support_files = Pihole.run('''
# source /opt/pihole/basic-install.sh
# FTLdetect
# stat -c '%a %n' /var/log/pihole-FTL.log
# stat -c '%a %n' /run/pihole-FTL.port
# stat -c '%a %n' /run/pihole-FTL.pid
# ls -lac /run
# ''')
# assert '644 /run/pihole-FTL.port' in support_files.stdout
# assert '644 /run/pihole-FTL.pid' in support_files.stdout
# assert '644 /var/log/pihole-FTL.log' in support_files.stdout
def test_IPv6_only_link_local(Pihole):
'''
confirms IPv6 blocking is disabled for Link-local address
'''
# mock ip -6 address to return Link-local address
mock_command_2(
'ip',
{
'-6 address': (
'inet6 fe80::d210:52fa:fe00:7ad7/64 scope link',
'0'
)
},
Pihole
)
detectPlatform = Pihole.run('''
source /opt/pihole/basic-install.sh
useIPv6dialog
''')
expected_stdout = ('Unable to find IPv6 ULA/GUA address, '
'IPv6 adblocking will not be enabled')
assert expected_stdout in detectPlatform.stdout
def test_IPv6_only_ULA(Pihole):
'''
confirms IPv6 blocking is enabled for ULA addresses
'''
# mock ip -6 address to return ULA address
mock_command_2(
'ip',
{
'-6 address': (
'inet6 fda2:2001:5555:0:d210:52fa:fe00:7ad7/64 scope global',
'0'
)
},
Pihole
)
detectPlatform = Pihole.run('''
source /opt/pihole/basic-install.sh
useIPv6dialog
''')
expected_stdout = 'Found IPv6 ULA address, using it for blocking IPv6 ads'
assert expected_stdout in detectPlatform.stdout
def test_IPv6_only_GUA(Pihole):
'''
confirms IPv6 blocking is enabled for GUA addresses
'''
# mock ip -6 address to return GUA address
mock_command_2(
'ip',
{
'-6 address': (
'inet6 2003:12:1e43:301:d210:52fa:fe00:7ad7/64 scope global',
'0'
)
},
Pihole
)
detectPlatform = Pihole.run('''
source /opt/pihole/basic-install.sh
useIPv6dialog
''')
expected_stdout = 'Found IPv6 GUA address, using it for blocking IPv6 ads'
assert expected_stdout in detectPlatform.stdout
def test_IPv6_GUA_ULA_test(Pihole):
'''
confirms IPv6 blocking is enabled for GUA and ULA addresses
'''
# mock ip -6 address to return GUA and ULA addresses
mock_command_2(
'ip',
{
'-6 address': (
'inet6 2003:12:1e43:301:d210:52fa:fe00:7ad7/64 scope global\n'
'inet6 fda2:2001:5555:0:d210:52fa:fe00:7ad7/64 scope global',
'0'
)
},
Pihole
)
detectPlatform = Pihole.run('''
source /opt/pihole/basic-install.sh
useIPv6dialog
''')
expected_stdout = 'Found IPv6 ULA address, using it for blocking IPv6 ads'
assert expected_stdout in detectPlatform.stdout
def test_IPv6_ULA_GUA_test(Pihole):
'''
confirms IPv6 blocking is enabled for GUA and ULA addresses
'''
# mock ip -6 address to return ULA and GUA addresses
mock_command_2(
'ip',
{
'-6 address': (
'inet6 fda2:2001:5555:0:d210:52fa:fe00:7ad7/64 scope global\n'
'inet6 2003:12:1e43:301:d210:52fa:fe00:7ad7/64 scope global',
'0'
)
},
Pihole
)
detectPlatform = Pihole.run('''
source /opt/pihole/basic-install.sh
useIPv6dialog
''')
expected_stdout = 'Found IPv6 ULA address, using it for blocking IPv6 ads'
assert expected_stdout in detectPlatform.stdout
def test_validate_ip(Pihole):
'''
Tests valid_ip for various IP addresses
'''
def test_address(addr, success=True):
output = Pihole.run('''
source /opt/pihole/basic-install.sh
valid_ip "{addr}"
'''.format(addr=addr))
assert output.rc == 0 if success else 1
test_address('192.168.1.1')
test_address('127.0.0.1')
test_address('255.255.255.255')
test_address('255.255.255.256', False)
test_address('255.255.256.255', False)
test_address('255.256.255.255', False)
test_address('256.255.255.255', False)
test_address('1092.168.1.1', False)
test_address('not an IP', False)
test_address('8.8.8.8#', False)
test_address('8.8.8.8#0')
test_address('8.8.8.8#1')
test_address('8.8.8.8#42')
test_address('8.8.8.8#888')
test_address('8.8.8.8#1337')
test_address('8.8.8.8#65535')
test_address('8.8.8.8#65536', False)
test_address('8.8.8.8#-1', False)
test_address('00.0.0.0', False)
test_address('010.0.0.0', False)
test_address('001.0.0.0', False)
test_address('0.0.0.0#00', False)
test_address('0.0.0.0#01', False)
test_address('0.0.0.0#001', False)
test_address('0.0.0.0#0001', False)
test_address('0.0.0.0#00001', False)
def test_os_check_fails(Pihole):
''' Confirms install fails on unsupported OS '''
Pihole.run('''
source /opt/pihole/basic-install.sh
distro_check
install_dependent_packages ${INSTALLER_DEPS[@]}
cat <<EOT > /etc/os-release
ID=UnsupportedOS
VERSION_ID="2"
EOT
''')
detectOS = Pihole.run('''t
source /opt/pihole/basic-install.sh
os_check
''')
expected_stdout = 'Unsupported OS detected: UnsupportedOS'
assert expected_stdout in detectOS.stdout
def test_os_check_passes(Pihole):
''' Confirms OS meets the requirements '''
Pihole.run('''
source /opt/pihole/basic-install.sh
distro_check
install_dependent_packages ${INSTALLER_DEPS[@]}
''')
detectOS = Pihole.run('''
source /opt/pihole/basic-install.sh
os_check
''')
expected_stdout = 'Supported OS detected'
assert expected_stdout in detectOS.stdout

View File

@@ -5,59 +5,56 @@ from .conftest import (
) )
def test_php_upgrade_default_optout_centos_eq_7(host): def test_php_upgrade_default_optout_centos_eq_7(Pihole):
''' '''
confirms the default behavior to opt-out of installing PHP7 from REMI confirms the default behavior to opt-out of installing PHP7 from REMI
''' '''
package_manager_detect = host.run(''' distro_check = Pihole.run('''
source /opt/pihole/basic-install.sh source /opt/pihole/basic-install.sh
package_manager_detect distro_check
select_rpm_php
''') ''')
expected_stdout = info_box + (' User opt-out of PHP 7 upgrade on CentOS. ' expected_stdout = info_box + (' User opt-out of PHP 7 upgrade on CentOS. '
'Deprecated PHP may be in use.') 'Deprecated PHP may be in use.')
assert expected_stdout in package_manager_detect.stdout assert expected_stdout in distro_check.stdout
remi_package = host.package('remi-release') remi_package = Pihole.package('remi-release')
assert not remi_package.is_installed assert not remi_package.is_installed
def test_php_upgrade_user_optout_centos_eq_7(host): def test_php_upgrade_user_optout_centos_eq_7(Pihole):
''' '''
confirms installer behavior when user opt-out of installing PHP7 from REMI confirms installer behavior when user opt-out of installing PHP7 from REMI
(php not currently installed) (php not currently installed)
''' '''
# Whiptail dialog returns Cancel for user prompt # Whiptail dialog returns Cancel for user prompt
mock_command('whiptail', {'*': ('', '1')}, host) mock_command('whiptail', {'*': ('', '1')}, Pihole)
package_manager_detect = host.run(''' distro_check = Pihole.run('''
source /opt/pihole/basic-install.sh source /opt/pihole/basic-install.sh
package_manager_detect distro_check
select_rpm_php
''') ''')
expected_stdout = info_box + (' User opt-out of PHP 7 upgrade on CentOS. ' expected_stdout = info_box + (' User opt-out of PHP 7 upgrade on CentOS. '
'Deprecated PHP may be in use.') 'Deprecated PHP may be in use.')
assert expected_stdout in package_manager_detect.stdout assert expected_stdout in distro_check.stdout
remi_package = host.package('remi-release') remi_package = Pihole.package('remi-release')
assert not remi_package.is_installed assert not remi_package.is_installed
def test_php_upgrade_user_optin_centos_eq_7(host): def test_php_upgrade_user_optin_centos_eq_7(Pihole):
''' '''
confirms installer behavior when user opt-in to installing PHP7 from REMI confirms installer behavior when user opt-in to installing PHP7 from REMI
(php not currently installed) (php not currently installed)
''' '''
# Whiptail dialog returns Continue for user prompt # Whiptail dialog returns Continue for user prompt
mock_command('whiptail', {'*': ('', '0')}, host) mock_command('whiptail', {'*': ('', '0')}, Pihole)
package_manager_detect = host.run(''' distro_check = Pihole.run('''
source /opt/pihole/basic-install.sh source /opt/pihole/basic-install.sh
package_manager_detect distro_check
select_rpm_php
''') ''')
assert 'opt-out' not in package_manager_detect.stdout assert 'opt-out' not in distro_check.stdout
expected_stdout = info_box + (' Enabling Remi\'s RPM repository ' expected_stdout = info_box + (' Enabling Remi\'s RPM repository '
'(https://rpms.remirepo.net)') '(https://rpms.remirepo.net)')
assert expected_stdout in package_manager_detect.stdout assert expected_stdout in distro_check.stdout
expected_stdout = tick_box + (' Remi\'s RPM repository has ' expected_stdout = tick_box + (' Remi\'s RPM repository has '
'been enabled for PHP7') 'been enabled for PHP7')
assert expected_stdout in package_manager_detect.stdout assert expected_stdout in distro_check.stdout
remi_package = host.package('remi-release') remi_package = Pihole.package('remi-release')
assert remi_package.is_installed assert remi_package.is_installed

View File

@@ -5,64 +5,61 @@ from .conftest import (
) )
def test_php_upgrade_default_continue_centos_gte_8(host): def test_php_upgrade_default_continue_centos_gte_8(Pihole):
''' '''
confirms the latest version of CentOS continues / does not optout confirms the latest version of CentOS continues / does not optout
(should trigger on CentOS7 only) (should trigger on CentOS7 only)
''' '''
package_manager_detect = host.run(''' distro_check = Pihole.run('''
source /opt/pihole/basic-install.sh source /opt/pihole/basic-install.sh
package_manager_detect distro_check
select_rpm_php
''') ''')
unexpected_stdout = info_box + (' User opt-out of PHP 7 upgrade on CentOS.' unexpected_stdout = info_box + (' User opt-out of PHP 7 upgrade on CentOS.'
' Deprecated PHP may be in use.') ' Deprecated PHP may be in use.')
assert unexpected_stdout not in package_manager_detect.stdout assert unexpected_stdout not in distro_check.stdout
# ensure remi was not installed on latest CentOS # ensure remi was not installed on latest CentOS
remi_package = host.package('remi-release') remi_package = Pihole.package('remi-release')
assert not remi_package.is_installed assert not remi_package.is_installed
def test_php_upgrade_user_optout_skipped_centos_gte_8(host): def test_php_upgrade_user_optout_skipped_centos_gte_8(Pihole):
''' '''
confirms installer skips user opt-out of installing PHP7 from REMI on confirms installer skips user opt-out of installing PHP7 from REMI on
latest CentOS (should trigger on CentOS7 only) latest CentOS (should trigger on CentOS7 only)
(php not currently installed) (php not currently installed)
''' '''
# Whiptail dialog returns Cancel for user prompt # Whiptail dialog returns Cancel for user prompt
mock_command('whiptail', {'*': ('', '1')}, host) mock_command('whiptail', {'*': ('', '1')}, Pihole)
package_manager_detect = host.run(''' distro_check = Pihole.run('''
source /opt/pihole/basic-install.sh source /opt/pihole/basic-install.sh
package_manager_detect distro_check
select_rpm_php
''') ''')
unexpected_stdout = info_box + (' User opt-out of PHP 7 upgrade on CentOS.' unexpected_stdout = info_box + (' User opt-out of PHP 7 upgrade on CentOS.'
' Deprecated PHP may be in use.') ' Deprecated PHP may be in use.')
assert unexpected_stdout not in package_manager_detect.stdout assert unexpected_stdout not in distro_check.stdout
# ensure remi was not installed on latest CentOS # ensure remi was not installed on latest CentOS
remi_package = host.package('remi-release') remi_package = Pihole.package('remi-release')
assert not remi_package.is_installed assert not remi_package.is_installed
def test_php_upgrade_user_optin_skipped_centos_gte_8(host): def test_php_upgrade_user_optin_skipped_centos_gte_8(Pihole):
''' '''
confirms installer skips user opt-in to installing PHP7 from REMI on confirms installer skips user opt-in to installing PHP7 from REMI on
latest CentOS (should trigger on CentOS7 only) latest CentOS (should trigger on CentOS7 only)
(php not currently installed) (php not currently installed)
''' '''
# Whiptail dialog returns Continue for user prompt # Whiptail dialog returns Continue for user prompt
mock_command('whiptail', {'*': ('', '0')}, host) mock_command('whiptail', {'*': ('', '0')}, Pihole)
package_manager_detect = host.run(''' distro_check = Pihole.run('''
source /opt/pihole/basic-install.sh source /opt/pihole/basic-install.sh
package_manager_detect distro_check
select_rpm_php
''') ''')
assert 'opt-out' not in package_manager_detect.stdout assert 'opt-out' not in distro_check.stdout
unexpected_stdout = info_box + (' Enabling Remi\'s RPM repository ' unexpected_stdout = info_box + (' Enabling Remi\'s RPM repository '
'(https://rpms.remirepo.net)') '(https://rpms.remirepo.net)')
assert unexpected_stdout not in package_manager_detect.stdout assert unexpected_stdout not in distro_check.stdout
unexpected_stdout = tick_box + (' Remi\'s RPM repository has ' unexpected_stdout = tick_box + (' Remi\'s RPM repository has '
'been enabled for PHP7') 'been enabled for PHP7')
assert unexpected_stdout not in package_manager_detect.stdout assert unexpected_stdout not in distro_check.stdout
remi_package = host.package('remi-release') remi_package = Pihole.package('remi-release')
assert not remi_package.is_installed assert not remi_package.is_installed

View File

@@ -7,119 +7,114 @@ from .conftest import (
) )
def test_release_supported_version_check_centos(host): def test_release_supported_version_check_centos(Pihole):
''' '''
confirms installer exits on unsupported releases of CentOS confirms installer exits on unsupported releases of CentOS
''' '''
# modify /etc/redhat-release to mock an unsupported CentOS release # modify /etc/redhat-release to mock an unsupported CentOS release
host.run('echo "CentOS Linux release 6.9" > /etc/redhat-release') Pihole.run('echo "CentOS Linux release 6.9" > /etc/redhat-release')
package_manager_detect = host.run(''' distro_check = Pihole.run('''
source /opt/pihole/basic-install.sh source /opt/pihole/basic-install.sh
package_manager_detect distro_check
select_rpm_php
''') ''')
expected_stdout = cross_box + (' CentOS 6 is not supported.') expected_stdout = cross_box + (' CentOS 6 is not supported.')
assert expected_stdout in package_manager_detect.stdout assert expected_stdout in distro_check.stdout
expected_stdout = 'Please update to CentOS release 7 or later' expected_stdout = 'Please update to CentOS release 7 or later'
assert expected_stdout in package_manager_detect.stdout assert expected_stdout in distro_check.stdout
def test_enable_epel_repository_centos(host): def test_enable_epel_repository_centos(Pihole):
''' '''
confirms the EPEL package repository is enabled when installed on CentOS confirms the EPEL package repository is enabled when installed on CentOS
''' '''
package_manager_detect = host.run(''' distro_check = Pihole.run('''
source /opt/pihole/basic-install.sh source /opt/pihole/basic-install.sh
package_manager_detect distro_check
select_rpm_php
''') ''')
expected_stdout = info_box + (' Enabling EPEL package repository ' expected_stdout = info_box + (' Enabling EPEL package repository '
'(https://fedoraproject.org/wiki/EPEL)') '(https://fedoraproject.org/wiki/EPEL)')
assert expected_stdout in package_manager_detect.stdout assert expected_stdout in distro_check.stdout
expected_stdout = tick_box + ' Installed epel-release' expected_stdout = tick_box + ' Installed epel-release'
assert expected_stdout in package_manager_detect.stdout assert expected_stdout in distro_check.stdout
epel_package = host.package('epel-release') epel_package = Pihole.package('epel-release')
assert epel_package.is_installed assert epel_package.is_installed
def test_php_version_lt_7_detected_upgrade_default_optout_centos(host): def test_php_version_lt_7_detected_upgrade_default_optout_centos(Pihole):
''' '''
confirms the default behavior to opt-out of upgrading to PHP7 from REMI confirms the default behavior to opt-out of upgrading to PHP7 from REMI
''' '''
# first we will install the default php version to test installer behavior # first we will install the default php version to test installer behavior
php_install = host.run('yum install -y php') php_install = Pihole.run('yum install -y php')
assert php_install.rc == 0 assert php_install.rc == 0
php_package = host.package('php') php_package = Pihole.package('php')
default_centos_php_version = php_package.version.split('.')[0] default_centos_php_version = php_package.version.split('.')[0]
if int(default_centos_php_version) >= 7: # PHP7 is supported/recommended if int(default_centos_php_version) >= 7: # PHP7 is supported/recommended
pytest.skip("Test deprecated . Detected default PHP version >= 7") pytest.skip("Test deprecated . Detected default PHP version >= 7")
package_manager_detect = host.run(''' distro_check = Pihole.run('''
source /opt/pihole/basic-install.sh source /opt/pihole/basic-install.sh
package_manager_detect distro_check
select_rpm_php
''') ''')
expected_stdout = info_box + (' User opt-out of PHP 7 upgrade on CentOS. ' expected_stdout = info_box + (' User opt-out of PHP 7 upgrade on CentOS. '
'Deprecated PHP may be in use.') 'Deprecated PHP may be in use.')
assert expected_stdout in package_manager_detect.stdout assert expected_stdout in distro_check.stdout
remi_package = host.package('remi-release') remi_package = Pihole.package('remi-release')
assert not remi_package.is_installed assert not remi_package.is_installed
def test_php_version_lt_7_detected_upgrade_user_optout_centos(host): def test_php_version_lt_7_detected_upgrade_user_optout_centos(Pihole):
''' '''
confirms installer behavior when user opt-out to upgrade to PHP7 via REMI confirms installer behavior when user opt-out to upgrade to PHP7 via REMI
''' '''
# first we will install the default php version to test installer behavior # first we will install the default php version to test installer behavior
php_install = host.run('yum install -y php') php_install = Pihole.run('yum install -y php')
assert php_install.rc == 0 assert php_install.rc == 0
php_package = host.package('php') php_package = Pihole.package('php')
default_centos_php_version = php_package.version.split('.')[0] default_centos_php_version = php_package.version.split('.')[0]
if int(default_centos_php_version) >= 7: # PHP7 is supported/recommended if int(default_centos_php_version) >= 7: # PHP7 is supported/recommended
pytest.skip("Test deprecated . Detected default PHP version >= 7") pytest.skip("Test deprecated . Detected default PHP version >= 7")
# Whiptail dialog returns Cancel for user prompt # Whiptail dialog returns Cancel for user prompt
mock_command('whiptail', {'*': ('', '1')}, host) mock_command('whiptail', {'*': ('', '1')}, Pihole)
package_manager_detect = host.run(''' distro_check = Pihole.run('''
source /opt/pihole/basic-install.sh source /opt/pihole/basic-install.sh
package_manager_detect distro_check
select_rpm_php
''') ''')
expected_stdout = info_box + (' User opt-out of PHP 7 upgrade on CentOS. ' expected_stdout = info_box + (' User opt-out of PHP 7 upgrade on CentOS. '
'Deprecated PHP may be in use.') 'Deprecated PHP may be in use.')
assert expected_stdout in package_manager_detect.stdout assert expected_stdout in distro_check.stdout
remi_package = host.package('remi-release') remi_package = Pihole.package('remi-release')
assert not remi_package.is_installed assert not remi_package.is_installed
def test_php_version_lt_7_detected_upgrade_user_optin_centos(host): def test_php_version_lt_7_detected_upgrade_user_optin_centos(Pihole):
''' '''
confirms installer behavior when user opt-in to upgrade to PHP7 via REMI confirms installer behavior when user opt-in to upgrade to PHP7 via REMI
''' '''
# first we will install the default php version to test installer behavior # first we will install the default php version to test installer behavior
php_install = host.run('yum install -y php') php_install = Pihole.run('yum install -y php')
assert php_install.rc == 0 assert php_install.rc == 0
php_package = host.package('php') php_package = Pihole.package('php')
default_centos_php_version = php_package.version.split('.')[0] default_centos_php_version = php_package.version.split('.')[0]
if int(default_centos_php_version) >= 7: # PHP7 is supported/recommended if int(default_centos_php_version) >= 7: # PHP7 is supported/recommended
pytest.skip("Test deprecated . Detected default PHP version >= 7") pytest.skip("Test deprecated . Detected default PHP version >= 7")
# Whiptail dialog returns Continue for user prompt # Whiptail dialog returns Continue for user prompt
mock_command('whiptail', {'*': ('', '0')}, host) mock_command('whiptail', {'*': ('', '0')}, Pihole)
package_manager_detect = host.run(''' distro_check = Pihole.run('''
source /opt/pihole/basic-install.sh source /opt/pihole/basic-install.sh
package_manager_detect distro_check
select_rpm_php
install_dependent_packages PIHOLE_WEB_DEPS[@] install_dependent_packages PIHOLE_WEB_DEPS[@]
''') ''')
expected_stdout = info_box + (' User opt-out of PHP 7 upgrade on CentOS. ' expected_stdout = info_box + (' User opt-out of PHP 7 upgrade on CentOS. '
'Deprecated PHP may be in use.') 'Deprecated PHP may be in use.')
assert expected_stdout not in package_manager_detect.stdout assert expected_stdout not in distro_check.stdout
expected_stdout = info_box + (' Enabling Remi\'s RPM repository ' expected_stdout = info_box + (' Enabling Remi\'s RPM repository '
'(https://rpms.remirepo.net)') '(https://rpms.remirepo.net)')
assert expected_stdout in package_manager_detect.stdout assert expected_stdout in distro_check.stdout
expected_stdout = tick_box + (' Remi\'s RPM repository has ' expected_stdout = tick_box + (' Remi\'s RPM repository has '
'been enabled for PHP7') 'been enabled for PHP7')
assert expected_stdout in package_manager_detect.stdout assert expected_stdout in distro_check.stdout
remi_package = host.package('remi-release') remi_package = Pihole.package('remi-release')
assert remi_package.is_installed assert remi_package.is_installed
updated_php_package = host.package('php') updated_php_package = Pihole.package('php')
updated_php_version = updated_php_package.version.split('.')[0] updated_php_version = updated_php_package.version.split('.')[0]
assert int(updated_php_version) == 7 assert int(updated_php_version) == 7

View File

@@ -5,7 +5,7 @@ from .conftest import (
) )
def mock_selinux_config(state, host): def mock_selinux_config(state, Pihole):
''' '''
Creates a mock SELinux config file with expected content Creates a mock SELinux config file with expected content
''' '''
@@ -13,20 +13,20 @@ def mock_selinux_config(state, host):
valid_states = ['enforcing', 'permissive', 'disabled'] valid_states = ['enforcing', 'permissive', 'disabled']
assert state in valid_states assert state in valid_states
# getenforce returns the running state of SELinux # getenforce returns the running state of SELinux
mock_command('getenforce', {'*': (state.capitalize(), '0')}, host) mock_command('getenforce', {'*': (state.capitalize(), '0')}, Pihole)
# create mock configuration with desired content # create mock configuration with desired content
host.run(''' Pihole.run('''
mkdir /etc/selinux mkdir /etc/selinux
echo "SELINUX={state}" > /etc/selinux/config echo "SELINUX={state}" > /etc/selinux/config
'''.format(state=state.lower())) '''.format(state=state.lower()))
def test_selinux_enforcing_exit(host): def test_selinux_enforcing_exit(Pihole):
''' '''
confirms installer prompts to exit when SELinux is Enforcing by default confirms installer prompts to exit when SELinux is Enforcing by default
''' '''
mock_selinux_config("enforcing", host) mock_selinux_config("enforcing", Pihole)
check_selinux = host.run(''' check_selinux = Pihole.run('''
source /opt/pihole/basic-install.sh source /opt/pihole/basic-install.sh
checkSelinux checkSelinux
''') ''')
@@ -37,12 +37,12 @@ def test_selinux_enforcing_exit(host):
assert check_selinux.rc == 1 assert check_selinux.rc == 1
def test_selinux_permissive(host): def test_selinux_permissive(Pihole):
''' '''
confirms installer continues when SELinux is Permissive confirms installer continues when SELinux is Permissive
''' '''
mock_selinux_config("permissive", host) mock_selinux_config("permissive", Pihole)
check_selinux = host.run(''' check_selinux = Pihole.run('''
source /opt/pihole/basic-install.sh source /opt/pihole/basic-install.sh
checkSelinux checkSelinux
''') ''')
@@ -51,12 +51,12 @@ def test_selinux_permissive(host):
assert check_selinux.rc == 0 assert check_selinux.rc == 0
def test_selinux_disabled(host): def test_selinux_disabled(Pihole):
''' '''
confirms installer continues when SELinux is Disabled confirms installer continues when SELinux is Disabled
''' '''
mock_selinux_config("disabled", host) mock_selinux_config("disabled", Pihole)
check_selinux = host.run(''' check_selinux = Pihole.run('''
source /opt/pihole/basic-install.sh source /opt/pihole/basic-install.sh
checkSelinux checkSelinux
''') ''')

View File

@@ -1,16 +1,15 @@
def test_epel_and_remi_not_installed_fedora(host): def test_epel_and_remi_not_installed_fedora(Pihole):
''' '''
confirms installer does not attempt to install EPEL/REMI repositories confirms installer does not attempt to install EPEL/REMI repositories
on Fedora on Fedora
''' '''
package_manager_detect = host.run(''' distro_check = Pihole.run('''
source /opt/pihole/basic-install.sh source /opt/pihole/basic-install.sh
package_manager_detect distro_check
select_rpm_php
''') ''')
assert package_manager_detect.stdout == '' assert distro_check.stdout == ''
epel_package = host.package('epel-release') epel_package = Pihole.package('epel-release')
assert not epel_package.is_installed assert not epel_package.is_installed
remi_package = host.package('remi-release') remi_package = Pihole.package('remi-release')
assert not remi_package.is_installed assert not remi_package.is_installed

View File

@@ -1,8 +1,8 @@
[tox] [tox]
envlist = py38 envlist = py37
[testenv] [testenv]
whitelist_externals = docker whitelist_externals = docker
deps = -rrequirements.txt deps = -rrequirements.txt
commands = docker build -f _centos_7.Dockerfile -t pytest_pihole:test_container ../ commands = docker build -f _centos_7.Dockerfile -t pytest_pihole:test_container ../
pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py ./test_centos_fedora_common_support.py ./test_centos_common_support.py ./test_centos_7_support.py pytest {posargs:-vv -n auto} ./test_automated_install.py ./test_centos_fedora_common_support.py ./test_centos_common_support.py ./test_centos_7_support.py

View File

@@ -1,8 +1,8 @@
[tox] [tox]
envlist = py38 envlist = py37
[testenv] [testenv]
whitelist_externals = docker whitelist_externals = docker
deps = -rrequirements.txt deps = -rrequirements.txt
commands = docker build -f _centos_8.Dockerfile -t pytest_pihole:test_container ../ commands = docker build -f _centos_8.Dockerfile -t pytest_pihole:test_container ../
pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py ./test_centos_fedora_common_support.py ./test_centos_common_support.py ./test_centos_8_support.py pytest {posargs:-vv -n auto} ./test_automated_install.py ./test_centos_fedora_common_support.py ./test_centos_common_support.py ./test_centos_8_support.py

View File

@@ -1,8 +1,8 @@
[tox] [tox]
envlist = py38 envlist = py37
[testenv] [testenv]
whitelist_externals = docker whitelist_externals = docker
deps = -rrequirements.txt deps = -rrequirements.txt
commands = docker build -f _debian_10.Dockerfile -t pytest_pihole:test_container ../ commands = docker build -f _debian_10.Dockerfile -t pytest_pihole:test_container ../
pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py pytest {posargs:-vv -n auto} ./test_automated_install.py

View File

@@ -1,8 +0,0 @@
[tox]
envlist = py38
[testenv]
whitelist_externals = docker
deps = -rrequirements.txt
commands = docker build -f _debian_11.Dockerfile -t pytest_pihole:test_container ../
pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py

View File

@@ -1,8 +1,8 @@
[tox] [tox]
envlist = py38 envlist = py37
[testenv] [testenv]
whitelist_externals = docker whitelist_externals = docker
deps = -rrequirements.txt deps = -rrequirements.txt
commands = docker build -f _debian_9.Dockerfile -t pytest_pihole:test_container ../ commands = docker build -f _debian_9.Dockerfile -t pytest_pihole:test_container ../
pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py pytest {posargs:-vv -n auto} ./test_automated_install.py

8
test/tox.fedora_32.ini Normal file
View File

@@ -0,0 +1,8 @@
[tox]
envlist = py37
[testenv]
whitelist_externals = docker
deps = -rrequirements.txt
commands = docker build -f _fedora_32.Dockerfile -t pytest_pihole:test_container ../
pytest {posargs:-vv -n auto} ./test_automated_install.py ./test_centos_fedora_common_support.py ./test_fedora_support.py

View File

@@ -1,8 +1,8 @@
[tox] [tox]
envlist = py38 envlist = py37
[testenv] [testenv]
whitelist_externals = docker whitelist_externals = docker
deps = -rrequirements.txt deps = -rrequirements.txt
commands = docker build -f _fedora_33.Dockerfile -t pytest_pihole:test_container ../ commands = docker build -f _fedora_33.Dockerfile -t pytest_pihole:test_container ../
pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py ./test_centos_fedora_common_support.py ./test_fedora_support.py pytest {posargs:-vv -n auto} ./test_automated_install.py ./test_centos_fedora_common_support.py ./test_fedora_support.py

View File

@@ -1,8 +0,0 @@
[tox]
envlist = py38
[testenv]
whitelist_externals = docker
deps = -rrequirements.txt
commands = docker build -f _fedora_34.Dockerfile -t pytest_pihole:test_container ../
pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py ./test_centos_fedora_common_support.py ./test_fedora_support.py

View File

@@ -1,8 +1,8 @@
[tox] [tox]
envlist = py38 envlist = py37
[testenv] [testenv]
whitelist_externals = docker whitelist_externals = docker
deps = -rrequirements.txt deps = -rrequirements.txt
commands = docker build -f _ubuntu_16.Dockerfile -t pytest_pihole:test_container ../ commands = docker build -f _ubuntu_16.Dockerfile -t pytest_pihole:test_container ../
pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py pytest {posargs:-vv -n auto} ./test_automated_install.py

View File

@@ -1,8 +1,8 @@
[tox] [tox]
envlist = py38 envlist = py37
[testenv] [testenv]
whitelist_externals = docker whitelist_externals = docker
deps = -rrequirements.txt deps = -rrequirements.txt
commands = docker build -f _ubuntu_18.Dockerfile -t pytest_pihole:test_container ../ commands = docker build -f _ubuntu_18.Dockerfile -t pytest_pihole:test_container ../
pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py pytest {posargs:-vv -n auto} ./test_automated_install.py

View File

@@ -1,8 +1,8 @@
[tox] [tox]
envlist = py38 envlist = py37
[testenv] [testenv]
whitelist_externals = docker whitelist_externals = docker
deps = -rrequirements.txt deps = -rrequirements.txt
commands = docker build -f _ubuntu_20.Dockerfile -t pytest_pihole:test_container ../ commands = docker build -f _ubuntu_20.Dockerfile -t pytest_pihole:test_container ../
pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py pytest {posargs:-vv -n auto} ./test_automated_install.py

View File

@@ -1,8 +0,0 @@
[tox]
envlist = py38
[testenv]
whitelist_externals = docker
deps = -rrequirements.txt
commands = docker build -f _ubuntu_21.Dockerfile -t pytest_pihole:test_container ../
pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py