Compare commits
239 Commits
v5.4
...
developmen
Author | SHA1 | Date | |
---|---|---|---|
|
9356d7bbb1 | ||
|
29a867d5ae | ||
|
86dd612882 | ||
|
42d3368955 | ||
|
21ae81ffdb | ||
|
b33434d02a | ||
|
d3e94cbceb | ||
|
9b4f6c84cd | ||
|
4d31d5ee11 | ||
|
9878477896 | ||
|
c0a2ab7b77 | ||
|
30ba79f6a0 | ||
|
cd3c97f113 | ||
|
e4a1f3a175 | ||
|
6121c162ff | ||
|
0d74b27101 | ||
|
7fa8cdd03e | ||
|
fe9031b26f | ||
|
326cd6a1f8 | ||
|
063f92f8f4 | ||
|
be6a73f102 | ||
|
b714c4598a | ||
|
0f192998eb | ||
|
8a5c7dec71 | ||
|
69e4e9a2ae | ||
|
4230be0c80 | ||
|
d45c9fc522 | ||
|
efa99a177e | ||
|
c2384ecc6f | ||
|
2f38452565 | ||
|
5cebceadda | ||
|
722a716de3 | ||
|
614d18cd3d | ||
|
54ce8c2622 | ||
|
329c161054 | ||
|
f8e84b3c3f | ||
|
c9809371ab | ||
|
a48750e257 | ||
|
0d4c69cc6f | ||
|
479b2bc075 | ||
|
2ade05d60f | ||
|
59fc3804be | ||
|
48138d32b6 | ||
|
ff5e788889 | ||
|
ab7d83384f | ||
|
48136c5bbc | ||
|
0219e5dfe0 | ||
|
0631cb4984 | ||
|
40b96e673b | ||
|
36ca858668 | ||
|
899cac0aac | ||
|
16b732fe8a | ||
|
1bf2f8d0b7 | ||
|
c756bcb9d1 | ||
|
42424b515b | ||
|
bd956b5f16 | ||
|
9be5199f7c | ||
|
9db19c5e96 | ||
|
91b4233d3a | ||
|
0b905c28c1 | ||
|
f4286a4d12 | ||
|
6ffa2ba1b2 | ||
|
e9250d62c5 | ||
|
08999bf315 | ||
|
2bd670a3dd | ||
|
f342b2c9f6 | ||
|
2a0bb5b9ee | ||
|
c3c5342b48 | ||
|
d7d8e9730b | ||
|
7c60ee8df1 | ||
|
ee9f4856a2 | ||
|
444526ad58 | ||
|
844c4dcdc8 | ||
|
881d92632c | ||
|
76d4e1209f | ||
|
d956498c8c | ||
|
e09dd56807 | ||
|
30ec1c94cc | ||
|
5d68dac90e | ||
|
77e5121d43 | ||
|
74d7d10554 | ||
|
2f4c4d9176 | ||
|
1dd9d55d82 | ||
|
8cbffa179d | ||
|
5bb79de70b | ||
|
534f9a63bf | ||
|
f0f5cc52d9 | ||
|
bad6d8a59e | ||
|
7aa28e4a3a | ||
|
e80a7731c9 | ||
|
3cd662eaeb | ||
|
6ead24b315 | ||
|
cdde832ed3 | ||
|
57ba60ce54 | ||
|
ed6b85241b | ||
|
918f7a504c | ||
|
3260cb40b5 | ||
|
a79c1159a9 | ||
|
65a04246cd | ||
|
f1245685dc | ||
|
ec3a5c2989 | ||
|
b20b38d44f | ||
|
d5253f26f4 | ||
|
a65a841c56 | ||
|
1b0b24daf5 | ||
|
7010ed454c | ||
|
ce86157067 | ||
|
3097c8fbdc | ||
|
363e2f10bb | ||
|
bfd9fe80ef | ||
|
c2080324b7 | ||
|
875ad04fde | ||
|
0124e491d0 | ||
|
81698ef1ed | ||
|
2ff10fcd0a | ||
|
5823f5e254 | ||
|
7807a93e10 | ||
|
c6a2a6f739 | ||
|
241e53ed45 | ||
|
d605b4b8f9 | ||
|
0e359a6321 | ||
|
5bd7cc9c9d | ||
|
886f0c7df3 | ||
|
3989cc19e9 | ||
|
bcb59159ed | ||
|
2b52f92647 | ||
|
71ed842dfd | ||
|
f45248df80 | ||
|
5729f64ddc | ||
|
2a869419b4 | ||
|
4a2f4c1bce | ||
|
5ef731fc57 | ||
|
71ebd64f4e | ||
|
9f0e0dbd37 | ||
|
ef30a85afb | ||
|
1b809e4e8e | ||
|
3d3bb45a46 | ||
|
d2a98ae954 | ||
|
2e1ce7fc87 | ||
|
920cf6de14 | ||
|
1eb31174a5 | ||
|
ff4487ff74 | ||
|
54c58327f1 | ||
|
db5e94b14a | ||
|
7167e6d5e4 | ||
|
39a66b608b | ||
|
b06efb6ab7 | ||
|
ab4bce4787 | ||
|
469c179b32 | ||
|
190ab79606 | ||
|
669f1b0f4a | ||
|
31de661bbb | ||
|
3a67d1cf8d | ||
|
c0f454ddfa | ||
|
ef0a22f9ec | ||
|
533a77d6d5 | ||
|
76ae75689c | ||
|
a780fc59e2 | ||
|
28085cf7d8 | ||
|
a3cc5df317 | ||
|
2eff53b2bb | ||
|
8d6ce78c65 | ||
|
b52a3a021d | ||
|
ae39e338fe | ||
|
e243c562c2 | ||
|
4c267f7732 | ||
|
647ba6ec9d | ||
|
ba6d700e7e | ||
|
e485a7b9bb | ||
|
bfda52ed79 | ||
|
941f90d5c1 | ||
|
14a379d448 | ||
|
671fcaffc3 | ||
|
bc8150adfa | ||
|
b750b01acc | ||
|
996a2c74fa | ||
|
d85fee27a9 | ||
|
cdd4d9ea9e | ||
|
cedd1a2591 | ||
|
ac4a975be5 | ||
|
996f8fff28 | ||
|
e733553295 | ||
|
0c4e1b51ab | ||
|
c6da1a3918 | ||
|
c1eb35a35e | ||
|
b5e0f142cc | ||
|
8713135b01 | ||
|
7cdd8871e5 | ||
|
596689b4c9 | ||
|
a872fabe7d | ||
|
bc21a7155d | ||
|
b7bba6a689 | ||
|
04f9e92bff | ||
|
16fb6665ec | ||
|
d84da71310 | ||
|
77a30ac0c2 | ||
|
56fb954d64 | ||
|
99981b5e66 | ||
|
19ae9d3ee6 | ||
|
d03aa0c0c7 | ||
|
3c41ec08a3 | ||
|
fdc4cf9869 | ||
|
a0ecfcc1dc | ||
|
b30d729aa4 | ||
|
f8af1a1baa | ||
|
ab27a3bd45 | ||
|
f3acc7c839 | ||
|
6f6b54ea05 | ||
|
3cad8e4c5b | ||
|
9535e2fd6d | ||
|
0ea7344c30 | ||
|
55dce14655 | ||
|
5bf35dc687 | ||
|
c5828df198 | ||
|
77e322afa6 | ||
|
541257849d | ||
|
80560d4a4a | ||
|
38bb4a4908 | ||
|
2b74b47b4a | ||
|
109340033e | ||
|
0f246b8df5 | ||
|
5b03160295 | ||
|
7b0513d1e6 | ||
|
466520366d | ||
|
9dbcbdbe66 | ||
|
a9b9718ffa | ||
|
fb073373d6 | ||
|
fd050693a2 | ||
|
dad6247cb0 | ||
|
841222fa21 | ||
|
0576810438 | ||
|
b755330f4c | ||
|
cf59f35a4e | ||
|
2131a1fe7f | ||
|
a88a94c4f1 | ||
|
1c286c7bc4 | ||
|
6d670991c3 | ||
|
3439045228 | ||
|
b710e107d6 |
10
.github/dependabot.yml
vendored
Normal file
10
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: github-actions
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: weekly
|
||||
day: saturday
|
||||
time: "10:00"
|
||||
open-pull-requests-limit: 10
|
||||
target-branch: developement
|
7
.github/release.yml
vendored
Normal file
7
.github/release.yml
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
changelog:
|
||||
exclude:
|
||||
labels:
|
||||
- internal
|
||||
authors:
|
||||
- dependabot
|
||||
- github-actions
|
40
.github/workflows/codeql-analysis.yml
vendored
Normal file
40
.github/workflows/codeql-analysis.yml
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
name: "CodeQL"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- development
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- development
|
||||
schedule:
|
||||
- cron: '32 11 * * 6'
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
name: Analyze
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
permissions:
|
||||
actions: read
|
||||
contents: read
|
||||
security-events: write
|
||||
|
||||
steps:
|
||||
-
|
||||
name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
-
|
||||
name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v1
|
||||
with:
|
||||
languages: 'python'
|
||||
-
|
||||
name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v1
|
||||
-
|
||||
name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v1
|
25
.github/workflows/stale.yml
vendored
Normal file
25
.github/workflows/stale.yml
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
name: Mark stale issues
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 * * * *'
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
stale:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
|
||||
steps:
|
||||
- uses: actions/stale@v4
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
days-before-stale: 30
|
||||
days-before-close: 5
|
||||
stale-issue-message: 'This issue is stale because it has been open 30 days with no activity. Please comment or update this issue or it will be closed in 5 days.'
|
||||
stale-issue-label: 'stale'
|
||||
exempt-issue-labels: 'Internal, Fixed in next release, Bug: Confirmed, Documentation Needed'
|
||||
exempt-all-issue-assignees: true
|
||||
operations-per-run: 300
|
27
.github/workflows/sync-back-to-dev.yml
vendored
Normal file
27
.github/workflows/sync-back-to-dev.yml
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
name: Sync Back to Development
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
sync-branches:
|
||||
runs-on: ubuntu-latest
|
||||
name: Syncing branches
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: Opening pull request
|
||||
id: pull
|
||||
uses: tretuna/sync-branches@1.4.0
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
FROM_BRANCH: 'master'
|
||||
TO_BRANCH: 'development'
|
||||
- name: Label the pull request to ignore for release note generation
|
||||
uses: actions-ecosystem/action-add-labels@v1
|
||||
with:
|
||||
labels: internal
|
||||
repo: ${{ github.repository }}
|
||||
number: ${{ steps.pull.outputs.PULL_REQUEST_NUMBER }}
|
37
.github/workflows/test.yml
vendored
37
.github/workflows/test.yml
vendored
@@ -5,21 +5,44 @@ on:
|
||||
types: [opened, synchronize, reopened, ready_for_review]
|
||||
|
||||
jobs:
|
||||
smoke-test:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
-
|
||||
name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
-
|
||||
name: Run Smoke Tests
|
||||
run: |
|
||||
# Ensure scripts in repository are executable
|
||||
IFS=$'\n';
|
||||
for f in $(find . -name '*.sh'); do if [[ ! -x $f ]]; then echo "$f is not executable" && FAIL=1; fi ;done
|
||||
unset IFS;
|
||||
# If FAIL is 1 then we fail.
|
||||
[[ $FAIL == 1 ]] && exit 1 || echo "Smoke Tests Passed"
|
||||
|
||||
distro-test:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
needs: smoke-test
|
||||
strategy:
|
||||
matrix:
|
||||
distro: [debian_9, debian_10, debian_11, ubuntu_16, ubuntu_18, ubuntu_20, ubuntu_21, centos_7, centos_8, fedora_32, fedora_33]
|
||||
distro: [debian_9, debian_10, debian_11, ubuntu_16, ubuntu_18, ubuntu_20, ubuntu_21, centos_7, centos_8, fedora_33, fedora_34]
|
||||
env:
|
||||
DISTRO: ${{matrix.distro}}
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: Set up Python 3.7
|
||||
uses: actions/setup-python@v2
|
||||
-
|
||||
name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
-
|
||||
name: Set up Python 3.8
|
||||
uses: actions/setup-python@v3
|
||||
with:
|
||||
python-version: 3.7
|
||||
- name: Install dependencies
|
||||
python-version: 3.8
|
||||
-
|
||||
name: Install dependencies
|
||||
run: pip install -r test/requirements.txt
|
||||
- name: Test with tox
|
||||
-
|
||||
name: Test with tox
|
||||
run: tox -c test/tox.${DISTRO}.ini
|
||||
|
68
.gitignore
vendored
68
.gitignore
vendored
@@ -7,70 +7,6 @@ __pycache__
|
||||
.tox
|
||||
.eggs
|
||||
*.egg-info
|
||||
|
||||
|
||||
# Created by https://www.gitignore.io/api/jetbrains+iml
|
||||
|
||||
### JetBrains+iml ###
|
||||
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm
|
||||
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
|
||||
|
||||
# All idea files, with exceptions
|
||||
.idea
|
||||
!.idea/codeStyles/*
|
||||
!.idea/codeStyleSettings.xml
|
||||
|
||||
|
||||
# Sensitive or high-churn files:
|
||||
.idea/**/dataSources/
|
||||
.idea/**/dataSources.ids
|
||||
.idea/**/dataSources.xml
|
||||
.idea/**/dataSources.local.xml
|
||||
.idea/**/sqlDataSources.xml
|
||||
.idea/**/dynamic.xml
|
||||
.idea/**/uiDesigner.xml
|
||||
|
||||
# Gradle:
|
||||
.idea/**/gradle.xml
|
||||
.idea/**/libraries
|
||||
|
||||
# CMake
|
||||
cmake-build-debug/
|
||||
|
||||
# Mongo Explorer plugin:
|
||||
.idea/**/mongoSettings.xml
|
||||
|
||||
## File-based project format:
|
||||
*.iws
|
||||
|
||||
## Plugin-specific files:
|
||||
|
||||
# IntelliJ
|
||||
/out/
|
||||
|
||||
# mpeltonen/sbt-idea plugin
|
||||
.idea_modules/
|
||||
|
||||
# JIRA plugin
|
||||
atlassian-ide-plugin.xml
|
||||
|
||||
# Cursive Clojure plugin
|
||||
.idea/replstate.xml
|
||||
|
||||
# Ruby plugin and RubyMine
|
||||
/.rakeTasks
|
||||
|
||||
# Crashlytics plugin (for Android Studio and IntelliJ)
|
||||
com_crashlytics_export_strings.xml
|
||||
crashlytics.properties
|
||||
crashlytics-build.properties
|
||||
fabric.properties
|
||||
|
||||
### JetBrains+iml Patch ###
|
||||
# Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-249601023
|
||||
|
||||
.idea/
|
||||
*.iml
|
||||
.idea/misc.xml
|
||||
*.ipr
|
||||
|
||||
# End of https://www.gitignore.io/api/jetbrains+iml
|
||||
.vscode/
|
||||
|
25
.idea/codeStyleSettings.xml
generated
25
.idea/codeStyleSettings.xml
generated
@@ -1,25 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project version="4">
|
||||
<component name="ProjectCodeStyleSettingsManager">
|
||||
<option name="PER_PROJECT_SETTINGS">
|
||||
<value>
|
||||
<option name="OTHER_INDENT_OPTIONS">
|
||||
<value>
|
||||
<option name="INDENT_SIZE" value="2" />
|
||||
<option name="CONTINUATION_INDENT_SIZE" value="8" />
|
||||
<option name="TAB_SIZE" value="2" />
|
||||
<option name="USE_TAB_CHARACTER" value="false" />
|
||||
<option name="SMART_TABS" value="false" />
|
||||
<option name="LABEL_INDENT_SIZE" value="0" />
|
||||
<option name="LABEL_INDENT_ABSOLUTE" value="false" />
|
||||
<option name="USE_RELATIVE_INDENTS" value="false" />
|
||||
</value>
|
||||
</option>
|
||||
<MarkdownNavigatorCodeStyleSettings>
|
||||
<option name="RIGHT_MARGIN" value="72" />
|
||||
</MarkdownNavigatorCodeStyleSettings>
|
||||
</value>
|
||||
</option>
|
||||
<option name="USE_PER_PROJECT_SETTINGS" value="true" />
|
||||
</component>
|
||||
</project>
|
7
.idea/codeStyles/Project.xml
generated
7
.idea/codeStyles/Project.xml
generated
@@ -1,7 +0,0 @@
|
||||
<component name="ProjectCodeStyleConfiguration">
|
||||
<code_scheme name="Project" version="173">
|
||||
<MarkdownNavigatorCodeStyleSettings>
|
||||
<option name="RIGHT_MARGIN" value="72" />
|
||||
</MarkdownNavigatorCodeStyleSettings>
|
||||
</code_scheme>
|
||||
</component>
|
5
.idea/codeStyles/codeStyleConfig.xml
generated
5
.idea/codeStyles/codeStyleConfig.xml
generated
@@ -1,5 +0,0 @@
|
||||
<component name="ProjectCodeStyleConfiguration">
|
||||
<state>
|
||||
<option name="USE_PER_PROJECT_SETTINGS" value="true" />
|
||||
</state>
|
||||
</component>
|
107
CONTRIBUTING.md
107
CONTRIBUTING.md
@@ -2,111 +2,6 @@
|
||||
|
||||
Please read and understand the contribution guide before creating an issue or pull request.
|
||||
|
||||
## Etiquette
|
||||
The guide can be found here: [https://docs.pi-hole.net/guides/github/contributing/](https://docs.pi-hole.net/guides/github/contributing/)
|
||||
|
||||
- Our goal for Pi-hole is **stability before features**. This means we focus on squashing critical bugs before adding new features. Often, we can do both in tandem, but bugs will take priority over a new feature.
|
||||
- Pi-hole is open source and [powered by donations](https://pi-hole.net/donate/), and as such, we give our **free time** to build, maintain, and **provide user support** for this project. It would be extremely unfair for us to suffer abuse or anger for our hard work, so please take a moment to consider that.
|
||||
- Please be considerate towards the developers and other users when raising issues or presenting pull requests.
|
||||
- Respect our decision(s), and do not be upset or abusive if your submission is not used.
|
||||
|
||||
## Viability
|
||||
|
||||
When requesting or submitting new features, first consider whether it might be useful to others. Open source projects are used by many people, who may have entirely different needs to your own. Think about whether or not your feature is likely to be used by other users of the project.
|
||||
|
||||
## Procedure
|
||||
|
||||
**Before filing an issue:**
|
||||
|
||||
- Attempt to replicate and **document** the problem, to ensure that it wasn't a coincidental incident.
|
||||
- Check to make sure your feature suggestion isn't already present within the project.
|
||||
- Check the pull requests tab to ensure that the bug doesn't have a fix in progress.
|
||||
- Check the pull requests tab to ensure that the feature isn't already in progress.
|
||||
|
||||
**Before submitting a pull request:**
|
||||
|
||||
- Check the codebase to ensure that your feature doesn't already exist.
|
||||
- Check the pull requests to ensure that another person hasn't already submitted the feature or fix.
|
||||
- Read and understand the [DCO guidelines](https://docs.pi-hole.net/guides/github/contributing/) for the project.
|
||||
|
||||
## Technical Requirements
|
||||
|
||||
- Submit Pull Requests to the **development branch only**.
|
||||
- Before Submitting your Pull Request, merge `development` with your new branch and fix any conflicts. (Make sure you don't break anything in development!)
|
||||
- Please use the [Google Style Guide for Shell](https://google.github.io/styleguide/shell.xml) for your code submission styles.
|
||||
- Commit Unix line endings.
|
||||
- Please use the Pi-hole brand: **Pi-hole** (Take a special look at the capitalized 'P' and a low 'h' with a hyphen)
|
||||
- (Optional fun) keep to the theme of Star Trek/black holes/gravity.
|
||||
|
||||
## Forking and Cloning from GitHub to GitHub
|
||||
|
||||
1. Fork <https://github.com/pi-hole/pi-hole/> to a repo under a namespace you control, or have permission to use, for example: `https://github.com/<your_namespace>/<your_repo_name>/`. You can do this from the github.com website.
|
||||
2. Clone `https://github.com/<your_namespace>/<your_repo_name>/` with the tool of you choice.
|
||||
3. To keep your fork in sync with our repo, add an upstream remote for pi-hole/pi-hole to your repo.
|
||||
|
||||
```bash
|
||||
git remote add upstream https://github.com/pi-hole/pi-hole.git
|
||||
```
|
||||
|
||||
4. Checkout the `development` branch from your fork `https://github.com/<your_namespace>/<your_repo_name>/`.
|
||||
5. Create a topic/branch, based on the `development` branch code. *Bonus fun to keep to the theme of Star Trek/black holes/gravity.*
|
||||
6. Make your changes and commit to your topic branch in your repo.
|
||||
7. Rebase your commits and squash any insignificant commits. See the notes below for an example.
|
||||
8. Merge `development` your branch and fix any conflicts.
|
||||
9. Open a Pull Request to merge your topic branch into our repo's `development` branch.
|
||||
|
||||
- Keep in mind the technical requirements from above.
|
||||
|
||||
## Forking and Cloning from GitHub to other code hosting sites
|
||||
|
||||
- Forking is a GitHub concept and cannot be done from GitHub to other git-based code hosting sites. However, those sites may be able to mirror a GitHub repo.
|
||||
|
||||
1. To contribute from another code hosting site, you must first complete the steps above to fork our repo to a GitHub namespace you have permission to use, for example: `https://github.com/<your_namespace>/<your_repo_name>/`.
|
||||
2. Create a repo in your code hosting site, for example: `https://gitlab.com/<your_namespace>/<your_repo_name>/`
|
||||
3. Follow the instructions from your code hosting site to create a mirror between `https://github.com/<your_namespace>/<your_repo_name>/` and `https://gitlab.com/<your_namespace>/<your_repo_name>/`.
|
||||
4. When you are ready to create a Pull Request (PR), follow the steps `(starting at step #6)` from [Forking and Cloning from GitHub to GitHub](#forking-and-cloning-from-github-to-github) and create the PR from `https://github.com/<your_namespace>/<your_repo_name>/`.
|
||||
|
||||
## Notes for squashing commits with rebase
|
||||
|
||||
- To rebase your commits and squash previous commits, you can use:
|
||||
|
||||
```bash
|
||||
git rebase -i your_topic_branch~(number of commits to combine)
|
||||
```
|
||||
|
||||
- For more details visit [gitready.com](http://gitready.com/advanced/2009/02/10/squashing-commits-with-rebase.html)
|
||||
|
||||
1. The following would combine the last four commits in the branch `mytopic`.
|
||||
|
||||
```bash
|
||||
git rebase -i mytopic~4
|
||||
```
|
||||
|
||||
2. An editor window opens with the most recent commits indicated: (edit the commands to the left of the commit ID)
|
||||
|
||||
```gitattributes
|
||||
pick 9dff55b2 existing commit comments
|
||||
squash ebb1a730 existing commit comments
|
||||
squash 07cc5b50 existing commit comments
|
||||
reword 9dff55b2 existing commit comments
|
||||
```
|
||||
|
||||
3. Save and close the editor. The next editor window opens: (edit the new commit message). *If you select reword for a commit, an additional editor window will open for you to edit the comment.*
|
||||
|
||||
```bash
|
||||
new commit comments
|
||||
Signed-off-by: yourname <your email address>
|
||||
```
|
||||
|
||||
4. Save and close the editor for the rebase process to execute. The terminal output should say something like the following:
|
||||
|
||||
```bash
|
||||
Successfully rebased and updated refs/heads/mytopic.
|
||||
```
|
||||
|
||||
5. Once you have a successful rebase, and before you sync your local clone, you have to force push origin to update your repo:
|
||||
|
||||
```bash
|
||||
git push -f origin
|
||||
```
|
||||
|
||||
6. Continue on from step #7 from [Forking and Cloning from GitHub to GitHub](#forking-and-cloning-from-github-to-github)
|
||||
|
34
README.md
34
README.md
@@ -11,9 +11,9 @@
|
||||
</p>
|
||||
<!-- markdownlint-enable MD033 -->
|
||||
|
||||
The Pi-hole® is a [DNS sinkhole](https://en.wikipedia.org/wiki/DNS_Sinkhole) that protects your devices from unwanted content, without installing any client-side software.
|
||||
The Pi-hole® is a [DNS sinkhole](https://en.wikipedia.org/wiki/DNS_Sinkhole) that protects your devices from unwanted content without installing any client-side software.
|
||||
|
||||
- **Easy-to-install**: our versatile installer walks you through the process, and takes less than ten minutes
|
||||
- **Easy-to-install**: our versatile installer walks you through the process and takes less than ten minutes
|
||||
- **Resolute**: content is blocked in _non-browser locations_, such as ad-laden mobile apps and smart TVs
|
||||
- **Responsive**: seamlessly speeds up the feel of everyday browsing by caching DNS queries
|
||||
- **Lightweight**: runs smoothly with [minimal hardware and software requirements](https://docs.pi-hole.net/main/prerequisites/)
|
||||
@@ -22,7 +22,7 @@ The Pi-hole® is a [DNS sinkhole](https://en.wikipedia.org/wiki/DNS_Sinkhole) th
|
||||
- **Versatile**: can optionally function as a [DHCP server](https://discourse.pi-hole.net/t/how-do-i-use-pi-holes-built-in-dhcp-server-and-why-would-i-want-to/3026), ensuring *all* your devices are protected automatically
|
||||
- **Scalable**: [capable of handling hundreds of millions of queries](https://pi-hole.net/2017/05/24/how-much-traffic-can-pi-hole-handle/) when installed on server-grade hardware
|
||||
- **Modern**: blocks ads over both IPv4 and IPv6
|
||||
- **Free**: open source software which helps ensure _you_ are the sole person in control of your privacy
|
||||
- **Free**: open source software that helps ensure _you_ are the sole person in control of your privacy
|
||||
|
||||
-----
|
||||
|
||||
@@ -57,21 +57,21 @@ Please refer to the [Pi-hole docker repo](https://github.com/pi-hole/docker-pi-h
|
||||
|
||||
Once the installer has been run, you will need to [configure your router to have **DHCP clients use Pi-hole as their DNS server**](https://discourse.pi-hole.net/t/how-do-i-configure-my-devices-to-use-pi-hole-as-their-dns-server/245) which ensures that all devices connecting to your network will have content blocked without any further intervention.
|
||||
|
||||
If your router does not support setting the DNS server, you can [use Pi-hole's built-in DHCP server](https://discourse.pi-hole.net/t/how-do-i-use-pi-holes-built-in-dhcp-server-and-why-would-i-want-to/3026); just be sure to disable DHCP on your router first (if it has that feature available).
|
||||
If your router does not support setting the DNS server, you can [use Pi-hole's built-in DHCP server](https://discourse.pi-hole.net/t/how-do-i-use-pi-holes-built-in-dhcp-server-and-why-would-i-want-to/3026); be sure to disable DHCP on your router first (if it has that feature available).
|
||||
|
||||
As a last resort, you can always manually set each device to use Pi-hole as their DNS server.
|
||||
As a last resort, you can manually set each device to use Pi-hole as their DNS server.
|
||||
|
||||
-----
|
||||
|
||||
## Pi-hole is free, but powered by your support
|
||||
## Pi-hole is free but powered by your support
|
||||
|
||||
There are many reoccurring costs involved with maintaining free, open source, and privacy-respecting software; expenses which [our volunteer developers](https://github.com/orgs/pi-hole/people) pitch in to cover out-of-pocket. This is just one example of how strongly we feel about our software, as well as the importance of keeping it maintained.
|
||||
There are many reoccurring costs involved with maintaining free, open source, and privacy-respecting software; expenses which [our volunteer developers](https://github.com/orgs/pi-hole/people) pitch in to cover out-of-pocket. This is just one example of how strongly we feel about our software and the importance of keeping it maintained.
|
||||
|
||||
Make no mistake: **your support is absolutely vital to help keep us innovating!**
|
||||
|
||||
### [Donations](https://pi-hole.net/donate)
|
||||
|
||||
Sending a donation using our Sponsor Button is **extremely helpful** in offsetting a portion of our monthly expenses and rewarding our dedicated development team:
|
||||
Donating using our Sponsor Button is **extremely helpful** in offsetting a portion of our monthly expenses:
|
||||
|
||||
### Alternative support
|
||||
|
||||
@@ -83,13 +83,13 @@ If you'd rather not donate (_which is okay!_), there are other ways you can help
|
||||
- [Digital Ocean](https://www.digitalocean.com/?refcode=344d234950e1) _affiliate link_
|
||||
- [Stickermule](https://www.stickermule.com/unlock?ref_id=9127301701&utm_medium=link&utm_source=invite) _earn a $10 credit after your first purchase_
|
||||
- [Amazon US](http://www.amazon.com/exec/obidos/redirect-home/pihole09-20) _affiliate link_
|
||||
- Spreading the word about our software, and how you have benefited from it
|
||||
- Spreading the word about our software and how you have benefited from it
|
||||
|
||||
### Contributing via GitHub
|
||||
|
||||
We welcome _everyone_ to contribute to issue reports, suggest new features, and create pull requests.
|
||||
|
||||
If you have something to add - anything from a typo through to a whole new feature, we're happy to check it out! Just make sure to fill out our template when submitting your request; the questions that it asks will help the volunteers quickly understand what you're aiming to achieve.
|
||||
If you have something to add - anything from a typo through to a whole new feature, we're happy to check it out! Just make sure to fill out our template when submitting your request; the questions it asks will help the volunteers quickly understand what you're aiming to achieve.
|
||||
|
||||
You'll find that the [install script](https://github.com/pi-hole/pi-hole/blob/master/automated%20install/basic-install.sh) and the [debug script](https://github.com/pi-hole/pi-hole/blob/master/advanced/Scripts/piholeDebug.sh) have an abundance of comments, which will help you better understand how Pi-hole works. They're also a valuable resource to those who want to learn how to write scripts or code a program! We encourage anyone who likes to tinker to read through it and submit a pull request for us to review.
|
||||
|
||||
@@ -97,9 +97,9 @@ You'll find that the [install script](https://github.com/pi-hole/pi-hole/blob/ma
|
||||
|
||||
## Getting in touch with us
|
||||
|
||||
While we are primarily reachable on our [Discourse User Forum](https://discourse.pi-hole.net/), we can also be found on a variety of social media outlets.
|
||||
While we are primarily reachable on our [Discourse User Forum](https://discourse.pi-hole.net/), we can also be found on various social media outlets.
|
||||
|
||||
**Please be sure to check the FAQ's** before starting a new discussion. Many user questions already have answers and can be solved without any additional assistance.
|
||||
**Please be sure to check the FAQs** before starting a new discussion, as we do not have the spare time to reply to every request for assistance.
|
||||
|
||||
- [Frequently Asked Questions](https://discourse.pi-hole.net/c/faqs)
|
||||
- [Feature Requests](https://discourse.pi-hole.net/c/feature-requests?order=votes)
|
||||
@@ -125,15 +125,15 @@ Some of the statistics you can integrate include:
|
||||
- Queries cached
|
||||
- Unique clients
|
||||
|
||||
The API can be accessed via [`telnet`](https://github.com/pi-hole/FTL), the Web (`admin/api.php`) and Command Line (`pihole -c -j`). You can find out [more details over here](https://discourse.pi-hole.net/t/pi-hole-api/1863).
|
||||
Access the API via [`telnet`](https://github.com/pi-hole/FTL), the Web (`admin/api.php`) and Command Line (`pihole -c -j`). You can find out [more details over here](https://discourse.pi-hole.net/t/pi-hole-api/1863).
|
||||
|
||||
### The Command Line Interface
|
||||
|
||||
The [pihole](https://docs.pi-hole.net/core/pihole-command/) command has all the functionality necessary to be able to fully administer the Pi-hole, without the need of the Web Interface. It's fast, user-friendly, and auditable by anyone with an understanding of `bash`.
|
||||
The [pihole](https://docs.pi-hole.net/core/pihole-command/) command has all the functionality necessary to fully administer the Pi-hole, without the need of the Web Interface. It's fast, user-friendly, and auditable by anyone with an understanding of `bash`.
|
||||
|
||||
Some notable features include:
|
||||
|
||||
- [Whitelisting, Blacklisting and Regex](https://docs.pi-hole.net/core/pihole-command/#whitelisting-blacklisting-and-regex)
|
||||
- [Whitelisting, Blacklisting, and Regex](https://docs.pi-hole.net/core/pihole-command/#whitelisting-blacklisting-and-regex)
|
||||
- [Debugging utility](https://docs.pi-hole.net/core/pihole-command/#debugger)
|
||||
- [Viewing the live log file](https://docs.pi-hole.net/core/pihole-command/#tail)
|
||||
- [Updating Ad Lists](https://docs.pi-hole.net/core/pihole-command/#gravity)
|
||||
@@ -149,7 +149,7 @@ This [optional dashboard](https://github.com/pi-hole/AdminLTE) allows you to vie
|
||||
|
||||
Some notable features include:
|
||||
|
||||
- Mobile friendly interface
|
||||
- Mobile-friendly interface
|
||||
- Password protection
|
||||
- Detailed graphs and doughnut charts
|
||||
- Top lists of domains and clients
|
||||
@@ -161,4 +161,4 @@ Some notable features include:
|
||||
There are several ways to [access the dashboard](https://discourse.pi-hole.net/t/how-do-i-access-pi-holes-dashboard-admin-interface/3168):
|
||||
|
||||
1. `http://pi.hole/admin/` (when using Pi-hole as your DNS server)
|
||||
2. `http://<IP_ADDPRESS_OF_YOUR_PI_HOLE>/admin/`
|
||||
2. `http://<IP_ADDRESS_OF_YOUR_PI_HOLE>/admin/`
|
||||
|
@@ -39,6 +39,4 @@ cache-size=@CACHE_SIZE@
|
||||
log-queries
|
||||
log-facility=/var/log/pihole.log
|
||||
|
||||
local-ttl=2
|
||||
|
||||
log-async
|
||||
|
@@ -25,11 +25,12 @@ server=/localhost/
|
||||
server=/invalid/
|
||||
|
||||
# The same RFC requests something similar for
|
||||
# 16.172.in-addr.arpa. 22.172.in-addr.arpa. 27.172.in-addr.arpa.
|
||||
# 17.172.in-addr.arpa. 30.172.in-addr.arpa. 28.172.in-addr.arpa.
|
||||
# 18.172.in-addr.arpa. 23.172.in-addr.arpa. 29.172.in-addr.arpa.
|
||||
# 19.172.in-addr.arpa. 24.172.in-addr.arpa. 31.172.in-addr.arpa.
|
||||
# 20.172.in-addr.arpa. 25.172.in-addr.arpa. 168.192.in-addr.arpa.
|
||||
# 10.in-addr.arpa. 21.172.in-addr.arpa. 27.172.in-addr.arpa.
|
||||
# 16.172.in-addr.arpa. 22.172.in-addr.arpa. 28.172.in-addr.arpa.
|
||||
# 17.172.in-addr.arpa. 23.172.in-addr.arpa. 29.172.in-addr.arpa.
|
||||
# 18.172.in-addr.arpa. 24.172.in-addr.arpa. 30.172.in-addr.arpa.
|
||||
# 19.172.in-addr.arpa. 25.172.in-addr.arpa. 31.172.in-addr.arpa.
|
||||
# 20.172.in-addr.arpa. 26.172.in-addr.arpa. 168.192.in-addr.arpa.
|
||||
# Pi-hole implements this via the dnsmasq option "bogus-priv" (see
|
||||
# 01-pihole.conf) because this also covers IPv6.
|
||||
|
||||
|
@@ -1,5 +1,5 @@
|
||||
# Determine if terminal is capable of showing colors
|
||||
if [[ -t 1 ]] && [[ $(tput colors) -ge 8 ]]; then
|
||||
if ([[ -t 1 ]] && [[ $(tput colors) -ge 8 ]]) || [[ "${WEBCALL}" ]]; then
|
||||
# Bold and underline may not show up on all clients
|
||||
# If something MUST be emphasized, use both
|
||||
COL_BOLD='[1m'
|
||||
|
@@ -329,8 +329,8 @@ get_sys_stats() {
|
||||
*) cpu_col="$COL_URG_RED";;
|
||||
esac
|
||||
|
||||
# $COL_NC$COL_DARK_GRAY is needed for $COL_URG_RED
|
||||
cpu_temp_str=" @ $cpu_col$cpu_temp$COL_NC$COL_DARK_GRAY"
|
||||
# $COL_NC$COL_DARK_GRAY is needed for $COL_URG_RED
|
||||
cpu_temp_str=" @ $cpu_col$cpu_temp$COL_NC$COL_DARK_GRAY"
|
||||
|
||||
elif [[ "$temp_unit" == "F" ]]; then
|
||||
cpu_temp=$(printf "%.0ff\\n" "$(calcFunc "($(< $temp_file) / 1000) * 9 / 5 + 32")")
|
||||
@@ -357,7 +357,7 @@ get_sys_stats() {
|
||||
ram_used="${ram_raw[1]}"
|
||||
ram_total="${ram_raw[2]}"
|
||||
|
||||
if [[ "$(pihole status web 2> /dev/null)" == "1" ]]; then
|
||||
if [[ "$(pihole status web 2> /dev/null)" -ge "1" ]]; then
|
||||
ph_status="${COL_LIGHT_GREEN}Active"
|
||||
else
|
||||
ph_status="${COL_LIGHT_RED}Offline"
|
||||
@@ -445,7 +445,7 @@ get_strings() {
|
||||
lan_info="Gateway: $net_gateway"
|
||||
dhcp_info="$leased_str$ph_dhcp_num of $ph_dhcp_max"
|
||||
|
||||
ads_info="$total_str$ads_blocked_today of $dns_queries_today"
|
||||
ads_info="$total_str$ads_blocked_today of $dns_queries_today"
|
||||
dns_info="$dns_count DNS servers"
|
||||
|
||||
[[ "$recent_blocked" == "0" ]] && recent_blocked="${COL_LIGHT_RED}FTL offline${COL_NC}"
|
||||
@@ -488,7 +488,7 @@ chronoFunc() {
|
||||
${COL_LIGHT_RED}Press Ctrl-C to exit${COL_NC}
|
||||
${COL_DARK_GRAY}$scr_line_str${COL_NC}"
|
||||
else
|
||||
echo -e "[0;1;31;91m|¯[0;1;33;93m¯[0;1;32;92m¯[0;1;32;92m(¯[0;1;36;96m)[0;1;34;94m_[0;1;35;95m|[0;1;33;93m¯[0;1;31;91m|_ [0;1;32;92m__[0;1;36;96m_|[0;1;31;91m¯[0;1;34;94m|[0;1;35;95m__[0;1;31;91m_[0m$phc_ver_str\\n[0;1;33;93m| ¯[0;1;32;92m_[0;1;36;96m/¯[0;1;34;94m|[0;1;35;95m_[0;1;31;91m| [0;1;33;93m' [0;1;32;92m\\/ [0;1;36;96m_ [0;1;34;94m\\ [0;1;35;95m/ [0;1;31;91m-[0;1;33;93m_)[0m$lte_ver_str\\n[0;1;32;92m|_[0;1;36;96m| [0;1;34;94m|_[0;1;35;95m| [0;1;33;93m|_[0;1;32;92m||[0;1;36;96m_\\[0;1;34;94m__[0;1;35;95m_/[0;1;31;91m_\\[0;1;33;93m__[0;1;32;92m_|[0m$ftl_ver_str\\n ${COL_DARK_GRAY}$scr_line_str${COL_NC}"
|
||||
echo -e "[0;1;31;91m|¯[0;1;33;93m¯[0;1;32;92m¯[0;1;32;92m(¯[0;1;36;96m)[0;1;34;94m_[0;1;35;95m|[0;1;33;93m¯[0;1;31;91m|_ [0;1;32;92m__[0;1;36;96m_|[0;1;31;91m¯[0;1;34;94m|[0;1;35;95m__[0;1;31;91m_[0m$phc_ver_str\\n[0;1;33;93m| ¯[0;1;32;92m_[0;1;36;96m/¯[0;1;34;94m|[0;1;35;95m_[0;1;31;91m| [0;1;33;93m' [0;1;32;92m\\/ [0;1;36;96m_ [0;1;34;94m\\ [0;1;35;95m/ [0;1;31;91m-[0;1;33;93m_)[0m$lte_ver_str\\n[0;1;32;92m|_[0;1;36;96m| [0;1;34;94m|_[0;1;35;95m| [0;1;33;93m|_[0;1;32;92m||[0;1;36;96m_\\[0;1;34;94m__[0;1;35;95m_/[0;1;31;91m_\\[0;1;33;93m__[0;1;32;92m_|[0m$ftl_ver_str\\n ${COL_DARK_GRAY}$scr_line_str${COL_NC}"
|
||||
fi
|
||||
|
||||
printFunc " Hostname: " "$sys_name" "$host_info"
|
||||
|
34
advanced/Scripts/database_migration/gravity-db.sh
Normal file → Executable file
34
advanced/Scripts/database_migration/gravity-db.sh
Normal file → Executable file
@@ -19,13 +19,13 @@ upgrade_gravityDB(){
|
||||
auditFile="${piholeDir}/auditlog.list"
|
||||
|
||||
# Get database version
|
||||
version="$(sqlite3 "${database}" "SELECT \"value\" FROM \"info\" WHERE \"property\" = 'version';")"
|
||||
version="$(pihole-FTL sqlite3 "${database}" "SELECT \"value\" FROM \"info\" WHERE \"property\" = 'version';")"
|
||||
|
||||
if [[ "$version" == "1" ]]; then
|
||||
# This migration script upgrades the gravity.db file by
|
||||
# adding the domain_audit table
|
||||
echo -e " ${INFO} Upgrading gravity database from version 1 to 2"
|
||||
sqlite3 "${database}" < "${scriptPath}/1_to_2.sql"
|
||||
pihole-FTL sqlite3 "${database}" < "${scriptPath}/1_to_2.sql"
|
||||
version=2
|
||||
|
||||
# Store audit domains in database table
|
||||
@@ -40,28 +40,28 @@ upgrade_gravityDB(){
|
||||
# renaming the regex table to regex_blacklist, and
|
||||
# creating a new regex_whitelist table + corresponding linking table and views
|
||||
echo -e " ${INFO} Upgrading gravity database from version 2 to 3"
|
||||
sqlite3 "${database}" < "${scriptPath}/2_to_3.sql"
|
||||
pihole-FTL sqlite3 "${database}" < "${scriptPath}/2_to_3.sql"
|
||||
version=3
|
||||
fi
|
||||
if [[ "$version" == "3" ]]; then
|
||||
# This migration script unifies the formally separated domain
|
||||
# lists into a single table with a UNIQUE domain constraint
|
||||
echo -e " ${INFO} Upgrading gravity database from version 3 to 4"
|
||||
sqlite3 "${database}" < "${scriptPath}/3_to_4.sql"
|
||||
pihole-FTL sqlite3 "${database}" < "${scriptPath}/3_to_4.sql"
|
||||
version=4
|
||||
fi
|
||||
if [[ "$version" == "4" ]]; then
|
||||
# This migration script upgrades the gravity and list views
|
||||
# implementing necessary changes for per-client blocking
|
||||
echo -e " ${INFO} Upgrading gravity database from version 4 to 5"
|
||||
sqlite3 "${database}" < "${scriptPath}/4_to_5.sql"
|
||||
pihole-FTL sqlite3 "${database}" < "${scriptPath}/4_to_5.sql"
|
||||
version=5
|
||||
fi
|
||||
if [[ "$version" == "5" ]]; then
|
||||
# This migration script upgrades the adlist view
|
||||
# to return an ID used in gravity.sh
|
||||
echo -e " ${INFO} Upgrading gravity database from version 5 to 6"
|
||||
sqlite3 "${database}" < "${scriptPath}/5_to_6.sql"
|
||||
pihole-FTL sqlite3 "${database}" < "${scriptPath}/5_to_6.sql"
|
||||
version=6
|
||||
fi
|
||||
if [[ "$version" == "6" ]]; then
|
||||
@@ -69,7 +69,7 @@ upgrade_gravityDB(){
|
||||
# which is automatically associated to all clients not
|
||||
# having their own group assignments
|
||||
echo -e " ${INFO} Upgrading gravity database from version 6 to 7"
|
||||
sqlite3 "${database}" < "${scriptPath}/6_to_7.sql"
|
||||
pihole-FTL sqlite3 "${database}" < "${scriptPath}/6_to_7.sql"
|
||||
version=7
|
||||
fi
|
||||
if [[ "$version" == "7" ]]; then
|
||||
@@ -77,21 +77,21 @@ upgrade_gravityDB(){
|
||||
# to ensure uniqueness on the group name
|
||||
# We also add date_added and date_modified columns
|
||||
echo -e " ${INFO} Upgrading gravity database from version 7 to 8"
|
||||
sqlite3 "${database}" < "${scriptPath}/7_to_8.sql"
|
||||
pihole-FTL sqlite3 "${database}" < "${scriptPath}/7_to_8.sql"
|
||||
version=8
|
||||
fi
|
||||
if [[ "$version" == "8" ]]; then
|
||||
# This migration fixes some issues that were introduced
|
||||
# in the previous migration script.
|
||||
echo -e " ${INFO} Upgrading gravity database from version 8 to 9"
|
||||
sqlite3 "${database}" < "${scriptPath}/8_to_9.sql"
|
||||
pihole-FTL sqlite3 "${database}" < "${scriptPath}/8_to_9.sql"
|
||||
version=9
|
||||
fi
|
||||
if [[ "$version" == "9" ]]; then
|
||||
# This migration drops unused tables and creates triggers to remove
|
||||
# obsolete groups assignments when the linked items are deleted
|
||||
echo -e " ${INFO} Upgrading gravity database from version 9 to 10"
|
||||
sqlite3 "${database}" < "${scriptPath}/9_to_10.sql"
|
||||
pihole-FTL sqlite3 "${database}" < "${scriptPath}/9_to_10.sql"
|
||||
version=10
|
||||
fi
|
||||
if [[ "$version" == "10" ]]; then
|
||||
@@ -101,25 +101,31 @@ upgrade_gravityDB(){
|
||||
# to keep the copying process generic (needs the same columns in both the
|
||||
# source and the destination databases).
|
||||
echo -e " ${INFO} Upgrading gravity database from version 10 to 11"
|
||||
sqlite3 "${database}" < "${scriptPath}/10_to_11.sql"
|
||||
pihole-FTL sqlite3 "${database}" < "${scriptPath}/10_to_11.sql"
|
||||
version=11
|
||||
fi
|
||||
if [[ "$version" == "11" ]]; then
|
||||
# Rename group 0 from "Unassociated" to "Default"
|
||||
echo -e " ${INFO} Upgrading gravity database from version 11 to 12"
|
||||
sqlite3 "${database}" < "${scriptPath}/11_to_12.sql"
|
||||
pihole-FTL sqlite3 "${database}" < "${scriptPath}/11_to_12.sql"
|
||||
version=12
|
||||
fi
|
||||
if [[ "$version" == "12" ]]; then
|
||||
# Add column date_updated to adlist table
|
||||
echo -e " ${INFO} Upgrading gravity database from version 12 to 13"
|
||||
sqlite3 "${database}" < "${scriptPath}/12_to_13.sql"
|
||||
pihole-FTL sqlite3 "${database}" < "${scriptPath}/12_to_13.sql"
|
||||
version=13
|
||||
fi
|
||||
if [[ "$version" == "13" ]]; then
|
||||
# Add columns number and status to adlist table
|
||||
echo -e " ${INFO} Upgrading gravity database from version 13 to 14"
|
||||
sqlite3 "${database}" < "${scriptPath}/13_to_14.sql"
|
||||
pihole-FTL sqlite3 "${database}" < "${scriptPath}/13_to_14.sql"
|
||||
version=14
|
||||
fi
|
||||
if [[ "$version" == "14" ]]; then
|
||||
# Changes the vw_adlist created in 5_to_6
|
||||
echo -e " ${INFO} Upgrading gravity database from version 14 to 15"
|
||||
pihole-FTL sqlite3 "${database}" < "${scriptPath}/14_to_15.sql"
|
||||
version=15
|
||||
fi
|
||||
}
|
||||
|
@@ -10,4 +10,4 @@ ALTER TABLE adlist ADD COLUMN status INTEGER NOT NULL DEFAULT 0;
|
||||
|
||||
UPDATE info SET value = 14 WHERE property = 'version';
|
||||
|
||||
COMMIT;
|
||||
COMMIT;
|
||||
|
15
advanced/Scripts/database_migration/gravity/14_to_15.sql
Normal file
15
advanced/Scripts/database_migration/gravity/14_to_15.sql
Normal file
@@ -0,0 +1,15 @@
|
||||
.timeout 30000
|
||||
|
||||
PRAGMA FOREIGN_KEYS=OFF;
|
||||
|
||||
BEGIN TRANSACTION;
|
||||
DROP VIEW vw_adlist;
|
||||
|
||||
CREATE VIEW vw_adlist AS SELECT DISTINCT address, id
|
||||
FROM adlist
|
||||
WHERE enabled = 1
|
||||
ORDER BY id;
|
||||
|
||||
UPDATE info SET value = 15 WHERE property = 'version';
|
||||
|
||||
COMMIT;
|
@@ -16,14 +16,14 @@ GRAVITYDB="${piholeDir}/gravity.db"
|
||||
# Source pihole-FTL from install script
|
||||
pihole_FTL="${piholeDir}/pihole-FTL.conf"
|
||||
if [[ -f "${pihole_FTL}" ]]; then
|
||||
source "${pihole_FTL}"
|
||||
source "${pihole_FTL}"
|
||||
fi
|
||||
|
||||
# Set this only after sourcing pihole-FTL.conf as the gravity database path may
|
||||
# have changed
|
||||
gravityDBfile="${GRAVITYDB}"
|
||||
|
||||
reload=false
|
||||
noReloadRequested=false
|
||||
addmode=true
|
||||
verbose=true
|
||||
wildcard=false
|
||||
@@ -35,6 +35,7 @@ typeId=""
|
||||
comment=""
|
||||
declare -i domaincount
|
||||
domaincount=0
|
||||
reload=false
|
||||
|
||||
colfile="/opt/pihole/COL_TABLE"
|
||||
source ${colfile}
|
||||
@@ -90,7 +91,8 @@ Options:
|
||||
-q, --quiet Make output less verbose
|
||||
-h, --help Show this help dialog
|
||||
-l, --list Display all your ${listname}listed domains
|
||||
--nuke Removes all entries in a list"
|
||||
--nuke Removes all entries in a list
|
||||
--comment \"text\" Add a comment to the domain. If adding multiple domains the same comment will be used for all"
|
||||
|
||||
exit 0
|
||||
}
|
||||
@@ -132,7 +134,7 @@ ProcessDomainList() {
|
||||
else
|
||||
RemoveDomain "${dom}"
|
||||
fi
|
||||
done
|
||||
done
|
||||
}
|
||||
|
||||
AddDomain() {
|
||||
@@ -140,23 +142,23 @@ AddDomain() {
|
||||
domain="$1"
|
||||
|
||||
# Is the domain in the list we want to add it to?
|
||||
num="$(sqlite3 "${gravityDBfile}" "SELECT COUNT(*) FROM domainlist WHERE domain = '${domain}';")"
|
||||
num="$(pihole-FTL sqlite3 "${gravityDBfile}" "SELECT COUNT(*) FROM domainlist WHERE domain = '${domain}';")"
|
||||
requestedListname="$(GetListnameFromTypeId "${typeId}")"
|
||||
|
||||
if [[ "${num}" -ne 0 ]]; then
|
||||
existingTypeId="$(sqlite3 "${gravityDBfile}" "SELECT type FROM domainlist WHERE domain = '${domain}';")"
|
||||
if [[ "${existingTypeId}" == "${typeId}" ]]; then
|
||||
if [[ "${verbose}" == true ]]; then
|
||||
echo -e " ${INFO} ${1} already exists in ${requestedListname}, no need to add!"
|
||||
existingTypeId="$(pihole-FTL sqlite3 "${gravityDBfile}" "SELECT type FROM domainlist WHERE domain = '${domain}';")"
|
||||
if [[ "${existingTypeId}" == "${typeId}" ]]; then
|
||||
if [[ "${verbose}" == true ]]; then
|
||||
echo -e " ${INFO} ${1} already exists in ${requestedListname}, no need to add!"
|
||||
fi
|
||||
else
|
||||
existingListname="$(GetListnameFromTypeId "${existingTypeId}")"
|
||||
pihole-FTL sqlite3 "${gravityDBfile}" "UPDATE domainlist SET type = ${typeId} WHERE domain='${domain}';"
|
||||
if [[ "${verbose}" == true ]]; then
|
||||
echo -e " ${INFO} ${1} already exists in ${existingListname}, it has been moved to ${requestedListname}!"
|
||||
fi
|
||||
fi
|
||||
else
|
||||
existingListname="$(GetListnameFromTypeId "${existingTypeId}")"
|
||||
sqlite3 "${gravityDBfile}" "UPDATE domainlist SET type = ${typeId} WHERE domain='${domain}';"
|
||||
if [[ "${verbose}" == true ]]; then
|
||||
echo -e " ${INFO} ${1} already exists in ${existingListname}, it has been moved to ${requestedListname}!"
|
||||
fi
|
||||
fi
|
||||
return
|
||||
return
|
||||
fi
|
||||
|
||||
# Domain not found in the table, add it!
|
||||
@@ -167,10 +169,10 @@ AddDomain() {
|
||||
# Insert only the domain here. The enabled and date_added fields will be filled
|
||||
# with their default values (enabled = true, date_added = current timestamp)
|
||||
if [[ -z "${comment}" ]]; then
|
||||
sqlite3 "${gravityDBfile}" "INSERT INTO domainlist (domain,type) VALUES ('${domain}',${typeId});"
|
||||
pihole-FTL sqlite3 "${gravityDBfile}" "INSERT INTO domainlist (domain,type) VALUES ('${domain}',${typeId});"
|
||||
else
|
||||
# also add comment when variable has been set through the "--comment" option
|
||||
sqlite3 "${gravityDBfile}" "INSERT INTO domainlist (domain,type,comment) VALUES ('${domain}',${typeId},'${comment}');"
|
||||
pihole-FTL sqlite3 "${gravityDBfile}" "INSERT INTO domainlist (domain,type,comment) VALUES ('${domain}',${typeId},'${comment}');"
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -179,15 +181,15 @@ RemoveDomain() {
|
||||
domain="$1"
|
||||
|
||||
# Is the domain in the list we want to remove it from?
|
||||
num="$(sqlite3 "${gravityDBfile}" "SELECT COUNT(*) FROM domainlist WHERE domain = '${domain}' AND type = ${typeId};")"
|
||||
num="$(pihole-FTL sqlite3 "${gravityDBfile}" "SELECT COUNT(*) FROM domainlist WHERE domain = '${domain}' AND type = ${typeId};")"
|
||||
|
||||
requestedListname="$(GetListnameFromTypeId "${typeId}")"
|
||||
|
||||
if [[ "${num}" -eq 0 ]]; then
|
||||
if [[ "${verbose}" == true ]]; then
|
||||
echo -e " ${INFO} ${domain} does not exist in ${requestedListname}, no need to remove!"
|
||||
fi
|
||||
return
|
||||
if [[ "${verbose}" == true ]]; then
|
||||
echo -e " ${INFO} ${domain} does not exist in ${requestedListname}, no need to remove!"
|
||||
fi
|
||||
return
|
||||
fi
|
||||
|
||||
# Domain found in the table, remove it!
|
||||
@@ -196,14 +198,14 @@ RemoveDomain() {
|
||||
fi
|
||||
reload=true
|
||||
# Remove it from the current list
|
||||
sqlite3 "${gravityDBfile}" "DELETE FROM domainlist WHERE domain = '${domain}' AND type = ${typeId};"
|
||||
pihole-FTL sqlite3 "${gravityDBfile}" "DELETE FROM domainlist WHERE domain = '${domain}' AND type = ${typeId};"
|
||||
}
|
||||
|
||||
Displaylist() {
|
||||
local count num_pipes domain enabled status nicedate requestedListname
|
||||
|
||||
requestedListname="$(GetListnameFromTypeId "${typeId}")"
|
||||
data="$(sqlite3 "${gravityDBfile}" "SELECT domain,enabled,date_modified FROM domainlist WHERE type = ${typeId};" 2> /dev/null)"
|
||||
data="$(pihole-FTL sqlite3 "${gravityDBfile}" "SELECT domain,enabled,date_modified FROM domainlist WHERE type = ${typeId};" 2> /dev/null)"
|
||||
|
||||
if [[ -z $data ]]; then
|
||||
echo -e "Not showing empty list"
|
||||
@@ -241,22 +243,22 @@ Displaylist() {
|
||||
}
|
||||
|
||||
NukeList() {
|
||||
count=$(sqlite3 "${gravityDBfile}" "SELECT COUNT(1) FROM domainlist WHERE type = ${typeId};")
|
||||
listname="$(GetListnameFromTypeId "${typeId}")"
|
||||
count=$(pihole-FTL sqlite3 "${gravityDBfile}" "SELECT COUNT(1) FROM domainlist WHERE type = ${typeId};")
|
||||
listname="$(GetListnameFromTypeId "${typeId}")"
|
||||
if [ "$count" -gt 0 ];then
|
||||
sqlite3 "${gravityDBfile}" "DELETE FROM domainlist WHERE type = ${typeId};"
|
||||
pihole-FTL sqlite3 "${gravityDBfile}" "DELETE FROM domainlist WHERE type = ${typeId};"
|
||||
echo " ${TICK} Removed ${count} domain(s) from the ${listname}"
|
||||
else
|
||||
echo " ${INFO} ${listname} already empty. Nothing to do!"
|
||||
fi
|
||||
fi
|
||||
exit 0;
|
||||
}
|
||||
|
||||
GetComment() {
|
||||
comment="$1"
|
||||
if [[ "${comment}" =~ [^a-zA-Z0-9_\#:/\.,\ -] ]]; then
|
||||
echo " ${CROSS} Found invalid characters in domain comment!"
|
||||
exit
|
||||
echo " ${CROSS} Found invalid characters in domain comment!"
|
||||
exit
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -268,7 +270,7 @@ while (( "$#" )); do
|
||||
"--white-wild" | "white-wild" ) typeId=2; wildcard=true;;
|
||||
"--wild" | "wildcard" ) typeId=3; wildcard=true;;
|
||||
"--regex" | "regex" ) typeId=3;;
|
||||
"-nr"| "--noreload" ) reload=false;;
|
||||
"-nr"| "--noreload" ) noReloadRequested=true;;
|
||||
"-d" | "--delmode" ) addmode=false;;
|
||||
"-q" | "--quiet" ) verbose=false;;
|
||||
"-h" | "--help" ) helpFunc;;
|
||||
@@ -291,9 +293,9 @@ ProcessDomainList
|
||||
|
||||
# Used on web interface
|
||||
if $web; then
|
||||
echo "DONE"
|
||||
echo "DONE"
|
||||
fi
|
||||
|
||||
if [[ "${reload}" != false ]]; then
|
||||
if [[ ${reload} == true && ${noReloadRequested} == false ]]; then
|
||||
pihole restartdns reload-lists
|
||||
fi
|
||||
|
@@ -39,7 +39,7 @@ flushARP(){
|
||||
# Truncate network_addresses table in pihole-FTL.db
|
||||
# This needs to be done before we can truncate the network table due to
|
||||
# foreign key constraints
|
||||
if ! output=$(sqlite3 "${DBFILE}" "DELETE FROM network_addresses" 2>&1); then
|
||||
if ! output=$(pihole-FTL sqlite3 "${DBFILE}" "DELETE FROM network_addresses" 2>&1); then
|
||||
echo -e "${OVER} ${CROSS} Failed to truncate network_addresses table"
|
||||
echo " Database location: ${DBFILE}"
|
||||
echo " Output: ${output}"
|
||||
@@ -47,7 +47,7 @@ flushARP(){
|
||||
fi
|
||||
|
||||
# Truncate network table in pihole-FTL.db
|
||||
if ! output=$(sqlite3 "${DBFILE}" "DELETE FROM network" 2>&1); then
|
||||
if ! output=$(pihole-FTL sqlite3 "${DBFILE}" "DELETE FROM network" 2>&1); then
|
||||
echo -e "${OVER} ${CROSS} Failed to truncate network table"
|
||||
echo " Database location: ${DBFILE}"
|
||||
echo " Output: ${output}"
|
||||
|
0
advanced/Scripts/piholeCheckout.sh
Normal file → Executable file
0
advanced/Scripts/piholeCheckout.sh
Normal file → Executable file
@@ -27,7 +27,7 @@ PIHOLE_COLTABLE_FILE="${PIHOLE_SCRIPTS_DIRECTORY}/COL_TABLE"
|
||||
|
||||
# These provide the colors we need for making the log more readable
|
||||
if [[ -f ${PIHOLE_COLTABLE_FILE} ]]; then
|
||||
source ${PIHOLE_COLTABLE_FILE}
|
||||
source ${PIHOLE_COLTABLE_FILE}
|
||||
else
|
||||
COL_NC='\e[0m' # No Color
|
||||
COL_RED='\e[1;91m'
|
||||
@@ -88,6 +88,7 @@ PIHOLE_LOCAL_HOSTS_FILE="${PIHOLE_DIRECTORY}/local.list"
|
||||
PIHOLE_LOGROTATE_FILE="${PIHOLE_DIRECTORY}/logrotate"
|
||||
PIHOLE_SETUP_VARS_FILE="${PIHOLE_DIRECTORY}/setupVars.conf"
|
||||
PIHOLE_FTL_CONF_FILE="${PIHOLE_DIRECTORY}/pihole-FTL.conf"
|
||||
PIHOLE_CUSTOM_HOSTS_FILE="${PIHOLE_DIRECTORY}/custom.list"
|
||||
|
||||
# Read the value of an FTL config key. The value is printed to stdout.
|
||||
#
|
||||
@@ -179,7 +180,8 @@ REQUIRED_FILES=("${PIHOLE_CRON_FILE}"
|
||||
"${PIHOLE_WEB_SERVER_ACCESS_LOG_FILE}"
|
||||
"${PIHOLE_WEB_SERVER_ERROR_LOG_FILE}"
|
||||
"${RESOLVCONF}"
|
||||
"${DNSMASQ_CONF}")
|
||||
"${DNSMASQ_CONF}"
|
||||
"${PIHOLE_CUSTOM_HOSTS_FILE}")
|
||||
|
||||
DISCLAIMER="This process collects information from your Pi-hole, and optionally uploads it to a unique and random directory on tricorder.pi-hole.net.
|
||||
|
||||
@@ -465,6 +467,9 @@ diagnose_operating_system() {
|
||||
# Display the current test that is running
|
||||
echo_current_diagnostic "Operating system"
|
||||
|
||||
# If the PIHOLE_DOCKER_TAG variable is set, include this information in the debug output
|
||||
[ -n "${PIHOLE_DOCKER_TAG}" ] && log_write "${INFO} Pi-hole Docker Container: ${PIHOLE_DOCKER_TAG}"
|
||||
|
||||
# If there is a /etc/*release file, it's probably a supported operating system, so we can
|
||||
if ls /etc/*release 1> /dev/null 2>&1; then
|
||||
# display the attributes to the user from the function made earlier
|
||||
@@ -585,6 +590,27 @@ processor_check() {
|
||||
fi
|
||||
}
|
||||
|
||||
disk_usage() {
|
||||
local file_system
|
||||
local hide
|
||||
|
||||
echo_current_diagnostic "Disk usage"
|
||||
mapfile -t file_system < <(df -h)
|
||||
|
||||
# Some lines of df might contain sensitive information like usernames and passwords.
|
||||
# E.g. curlftpfs filesystems (https://www.looklinux.com/mount-ftp-share-on-linux-using-curlftps/)
|
||||
# We are not interested in those lines so we collect keyword, to remove them from the output
|
||||
# Additinal keywords can be added, separated by "|"
|
||||
hide="curlftpfs"
|
||||
|
||||
# only show those lines not containg a sensitive phrase
|
||||
for line in "${file_system[@]}"; do
|
||||
if [[ ! $line =~ $hide ]]; then
|
||||
log_write " ${line}"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
parse_setup_vars() {
|
||||
echo_current_diagnostic "Setup variables"
|
||||
# If the file exists,
|
||||
@@ -707,11 +733,11 @@ compare_port_to_service_assigned() {
|
||||
|
||||
# If the service is a Pi-hole service, highlight it in green
|
||||
if [[ "${service_name}" == "${expected_service}" ]]; then
|
||||
log_write "[${COL_GREEN}${port}${COL_NC}] is in use by ${COL_GREEN}${service_name}${COL_NC}"
|
||||
log_write "${TICK} ${COL_GREEN}${port}${COL_NC} is in use by ${COL_GREEN}${service_name}${COL_NC}"
|
||||
# Otherwise,
|
||||
else
|
||||
# Show the service name in red since it's non-standard
|
||||
log_write "[${COL_RED}${port}${COL_NC}] is in use by ${COL_RED}${service_name}${COL_NC} (${FAQ_HARDWARE_REQUIREMENTS_PORTS})"
|
||||
log_write "${CROSS} ${COL_RED}${port}${COL_NC} is in use by ${COL_RED}${service_name}${COL_NC} (${FAQ_HARDWARE_REQUIREMENTS_PORTS})"
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -727,36 +753,47 @@ check_required_ports() {
|
||||
# Sort the addresses and remove duplicates
|
||||
while IFS= read -r line; do
|
||||
ports_in_use+=( "$line" )
|
||||
done < <( lsof -iTCP -sTCP:LISTEN -P -n +c 10 )
|
||||
done < <( ss --listening --numeric --tcp --udp --processes --no-header )
|
||||
|
||||
# Now that we have the values stored,
|
||||
for i in "${!ports_in_use[@]}"; do
|
||||
# loop through them and assign some local variables
|
||||
local service_name
|
||||
service_name=$(echo "${ports_in_use[$i]}" | awk '{print $1}')
|
||||
service_name=$(echo "${ports_in_use[$i]}" | awk '{gsub(/users:\(\("/,"",$7);gsub(/".*/,"",$7);print $7}')
|
||||
local protocol_type
|
||||
protocol_type=$(echo "${ports_in_use[$i]}" | awk '{print $5}')
|
||||
protocol_type=$(echo "${ports_in_use[$i]}" | awk '{print $1}')
|
||||
local port_number
|
||||
port_number="$(echo "${ports_in_use[$i]}" | awk '{print $9}')"
|
||||
port_number="$(echo "${ports_in_use[$i]}" | awk '{print $5}')" # | awk '{gsub(/^.*:/,"",$5);print $5}')
|
||||
|
||||
# Skip the line if it's the titles of the columns the lsof command produces
|
||||
if [[ "${service_name}" == COMMAND ]]; then
|
||||
continue
|
||||
fi
|
||||
# Use a case statement to determine if the right services are using the right ports
|
||||
case "$(echo "$port_number" | rev | cut -d: -f1 | rev)" in
|
||||
53) compare_port_to_service_assigned "${resolver}" "${service_name}" 53
|
||||
case "$(echo "${port_number}" | rev | cut -d: -f1 | rev)" in
|
||||
53) compare_port_to_service_assigned "${resolver}" "${service_name}" "${protocol_type}:${port_number}"
|
||||
;;
|
||||
80) compare_port_to_service_assigned "${web_server}" "${service_name}" 80
|
||||
80) compare_port_to_service_assigned "${web_server}" "${service_name}" "${protocol_type}:${port_number}"
|
||||
;;
|
||||
4711) compare_port_to_service_assigned "${ftl}" "${service_name}" 4711
|
||||
4711) compare_port_to_service_assigned "${ftl}" "${service_name}" "${protocol_type}:${port_number}"
|
||||
;;
|
||||
# If it's not a default port that Pi-hole needs, just print it out for the user to see
|
||||
*) log_write "${port_number} ${service_name} (${protocol_type})";
|
||||
*) log_write " ${protocol_type}:${port_number} is in use by ${service_name:=<unknown>}";
|
||||
esac
|
||||
done
|
||||
}
|
||||
|
||||
ip_command() {
|
||||
# Obtain and log information from "ip XYZ show" commands
|
||||
echo_current_diagnostic "${2}"
|
||||
local entries=()
|
||||
mapfile -t entries < <(ip "${1}" show)
|
||||
for line in "${entries[@]}"; do
|
||||
log_write " ${line}"
|
||||
done
|
||||
}
|
||||
|
||||
check_ip_command() {
|
||||
ip_command "addr" "Network interfaces and addresses"
|
||||
ip_command "route" "Network routing table"
|
||||
}
|
||||
|
||||
check_networking() {
|
||||
# Runs through several of the functions made earlier; we just clump them
|
||||
# together since they are all related to the networking aspect of things
|
||||
@@ -765,7 +802,9 @@ check_networking() {
|
||||
detect_ip_addresses "6"
|
||||
ping_gateway "4"
|
||||
ping_gateway "6"
|
||||
check_required_ports
|
||||
# Skip the following check if installed in docker container. Unpriv'ed containers do not have access to the information required
|
||||
# to resolve the service name listening - and the container should not start if there was a port conflict anyway
|
||||
[ -z "${PIHOLE_DOCKER_TAG}" ] && check_required_ports
|
||||
}
|
||||
|
||||
check_x_headers() {
|
||||
@@ -849,7 +888,7 @@ dig_at() {
|
||||
# This helps emulate queries to different domains that a user might query
|
||||
# It will also give extra assurance that Pi-hole is correctly resolving and blocking domains
|
||||
local random_url
|
||||
random_url=$(sqlite3 "${PIHOLE_GRAVITY_DB_FILE}" "SELECT domain FROM vw_gravity ORDER BY RANDOM() LIMIT 1")
|
||||
random_url=$(pihole-FTL sqlite3 "${PIHOLE_GRAVITY_DB_FILE}" "SELECT domain FROM vw_gravity ORDER BY RANDOM() LIMIT 1")
|
||||
|
||||
# Next we need to check if Pi-hole can resolve a domain when the query is sent to it's IP address
|
||||
# This better emulates how clients will interact with Pi-hole as opposed to above where Pi-hole is
|
||||
@@ -867,9 +906,11 @@ dig_at() {
|
||||
# Removes all interfaces which are not UP
|
||||
# s/^[0-9]*: //g;
|
||||
# Removes interface index
|
||||
# s/@.*//g;
|
||||
# Removes everything after @ (if found)
|
||||
# s/: <.*//g;
|
||||
# Removes everything after the interface name
|
||||
interfaces="$(ip link show | sed "/ master /d;/UP/!d;s/^[0-9]*: //g;s/: <.*//g;")"
|
||||
interfaces="$(ip link show | sed "/ master /d;/UP/!d;s/^[0-9]*: //g;s/@.*//g;s/: <.*//g;")"
|
||||
|
||||
while IFS= read -r iface ; do
|
||||
# Get addresses of current interface
|
||||
@@ -1163,7 +1204,7 @@ show_db_entries() {
|
||||
IFS=$'\r\n'
|
||||
local entries=()
|
||||
mapfile -t entries < <(\
|
||||
sqlite3 "${PIHOLE_GRAVITY_DB_FILE}" \
|
||||
pihole-FTL sqlite3 "${PIHOLE_GRAVITY_DB_FILE}" \
|
||||
-cmd ".headers on" \
|
||||
-cmd ".mode column" \
|
||||
-cmd ".width ${widths}" \
|
||||
@@ -1188,7 +1229,7 @@ show_FTL_db_entries() {
|
||||
IFS=$'\r\n'
|
||||
local entries=()
|
||||
mapfile -t entries < <(\
|
||||
sqlite3 "${PIHOLE_FTL_DB_FILE}" \
|
||||
pihole-FTL sqlite3 "${PIHOLE_FTL_DB_FILE}" \
|
||||
-cmd ".headers on" \
|
||||
-cmd ".mode column" \
|
||||
-cmd ".width ${widths}" \
|
||||
@@ -1234,18 +1275,18 @@ show_clients() {
|
||||
}
|
||||
|
||||
show_messages() {
|
||||
show_FTL_db_entries "Pi-hole diagnosis messages" "SELECT id,datetime(timestamp,'unixepoch','localtime') timestamp,type,message,blob1,blob2,blob3,blob4,blob5 FROM message;" "4 19 20 60 20 20 20 20 20"
|
||||
show_FTL_db_entries "Pi-hole diagnosis messages" "SELECT count (message) as count, datetime(max(timestamp),'unixepoch','localtime') as 'last timestamp', type, message, blob1, blob2, blob3, blob4, blob5 FROM message GROUP BY type, message, blob1, blob2, blob3, blob4, blob5;" "6 19 20 60 20 20 20 20 20"
|
||||
}
|
||||
|
||||
analyze_gravity_list() {
|
||||
echo_current_diagnostic "Gravity List and Database"
|
||||
echo_current_diagnostic "Gravity Database"
|
||||
|
||||
local gravity_permissions
|
||||
gravity_permissions=$(ls -ld "${PIHOLE_GRAVITY_DB_FILE}")
|
||||
gravity_permissions=$(ls -lhd "${PIHOLE_GRAVITY_DB_FILE}")
|
||||
log_write "${COL_GREEN}${gravity_permissions}${COL_NC}"
|
||||
|
||||
show_db_entries "Info table" "SELECT property,value FROM info" "20 40"
|
||||
gravity_updated_raw="$(sqlite3 "${PIHOLE_GRAVITY_DB_FILE}" "SELECT value FROM info where property = 'updated'")"
|
||||
gravity_updated_raw="$(pihole-FTL sqlite3 "${PIHOLE_GRAVITY_DB_FILE}" "SELECT value FROM info where property = 'updated'")"
|
||||
gravity_updated="$(date -d @"${gravity_updated_raw}")"
|
||||
log_write " Last gravity run finished at: ${COL_CYAN}${gravity_updated}${COL_NC}"
|
||||
log_write ""
|
||||
@@ -1253,7 +1294,7 @@ analyze_gravity_list() {
|
||||
OLD_IFS="$IFS"
|
||||
IFS=$'\r\n'
|
||||
local gravity_sample=()
|
||||
mapfile -t gravity_sample < <(sqlite3 "${PIHOLE_GRAVITY_DB_FILE}" "SELECT domain FROM vw_gravity LIMIT 10")
|
||||
mapfile -t gravity_sample < <(pihole-FTL sqlite3 "${PIHOLE_GRAVITY_DB_FILE}" "SELECT domain FROM vw_gravity LIMIT 10")
|
||||
log_write " ${COL_CYAN}----- First 10 Gravity Domains -----${COL_NC}"
|
||||
|
||||
for line in "${gravity_sample[@]}"; do
|
||||
@@ -1320,7 +1361,7 @@ analyze_pihole_log() {
|
||||
OLD_IFS="$IFS"
|
||||
# Get the lines that are in the file(s) and store them in an array for parsing later
|
||||
IFS=$'\r\n'
|
||||
pihole_log_permissions=$(ls -ld "${PIHOLE_LOG}")
|
||||
pihole_log_permissions=$(ls -lhd "${PIHOLE_LOG}")
|
||||
log_write "${COL_GREEN}${pihole_log_permissions}${COL_NC}"
|
||||
mapfile -t pihole_log_head < <(head -n 20 ${PIHOLE_LOG})
|
||||
log_write " ${COL_CYAN}-----head of $(basename ${PIHOLE_LOG})------${COL_NC}"
|
||||
@@ -1363,9 +1404,9 @@ upload_to_tricorder() {
|
||||
log_write "${TICK} ${COL_GREEN}** FINISHED DEBUGGING! **${COL_NC}\\n"
|
||||
|
||||
# Provide information on what they should do with their token
|
||||
log_write " * The debug log can be uploaded to tricorder.pi-hole.net for sharing with developers only."
|
||||
log_write " * The debug log can be uploaded to tricorder.pi-hole.net for sharing with developers only."
|
||||
|
||||
# If pihole -d is running automatically (usually through the dashboard)
|
||||
# If pihole -d is running automatically
|
||||
if [[ "${AUTOMATED}" ]]; then
|
||||
# let the user know
|
||||
log_write "${INFO} Debug script running in automated mode"
|
||||
@@ -1373,16 +1414,19 @@ upload_to_tricorder() {
|
||||
curl_to_tricorder
|
||||
# If we're not running in automated mode,
|
||||
else
|
||||
echo ""
|
||||
# give the user a choice of uploading it or not
|
||||
# Users can review the log file locally (or the output of the script since they are the same) and try to self-diagnose their problem
|
||||
read -r -p "[?] Would you like to upload the log? [y/N] " response
|
||||
case ${response} in
|
||||
# If they say yes, run our function for uploading the log
|
||||
[yY][eE][sS]|[yY]) curl_to_tricorder;;
|
||||
# If they choose no, just exit out of the script
|
||||
*) log_write " * Log will ${COL_GREEN}NOT${COL_NC} be uploaded to tricorder.\\n * A local copy of the debug log can be found at: ${COL_CYAN}${PIHOLE_DEBUG_LOG}${COL_NC}\\n";exit;
|
||||
esac
|
||||
# if not being called from the web interface
|
||||
if [[ ! "${WEBCALL}" ]]; then
|
||||
echo ""
|
||||
# give the user a choice of uploading it or not
|
||||
# Users can review the log file locally (or the output of the script since they are the same) and try to self-diagnose their problem
|
||||
read -r -p "[?] Would you like to upload the log? [y/N] " response
|
||||
case ${response} in
|
||||
# If they say yes, run our function for uploading the log
|
||||
[yY][eE][sS]|[yY]) curl_to_tricorder;;
|
||||
# If they choose no, just exit out of the script
|
||||
*) log_write " * Log will ${COL_GREEN}NOT${COL_NC} be uploaded to tricorder.\\n * A local copy of the debug log can be found at: ${COL_CYAN}${PIHOLE_DEBUG_LOG}${COL_NC}\\n";exit;
|
||||
esac
|
||||
fi
|
||||
fi
|
||||
# Check if tricorder.pi-hole.net is reachable and provide token
|
||||
# along with some additional useful information
|
||||
@@ -1402,8 +1446,13 @@ upload_to_tricorder() {
|
||||
# If no token was generated
|
||||
else
|
||||
# Show an error and some help instructions
|
||||
log_write "${CROSS} ${COL_RED}There was an error uploading your debug log.${COL_NC}"
|
||||
log_write " * Please try again or contact the Pi-hole team for assistance."
|
||||
# Skip this if being called from web interface and autmatic mode was not chosen (users opt-out to upload)
|
||||
if [[ "${WEBCALL}" ]] && [[ ! "${AUTOMATED}" ]]; then
|
||||
:
|
||||
else
|
||||
log_write "${CROSS} ${COL_RED}There was an error uploading your debug log.${COL_NC}"
|
||||
log_write " * Please try again or contact the Pi-hole team for assistance."
|
||||
fi
|
||||
fi
|
||||
# Finally, show where the log file is no matter the outcome of the function so users can look at it
|
||||
log_write " * A local copy of the debug log can be found at: ${COL_CYAN}${PIHOLE_DEBUG_LOG}${COL_NC}\\n"
|
||||
@@ -1421,6 +1470,8 @@ diagnose_operating_system
|
||||
check_selinux
|
||||
check_firewalld
|
||||
processor_check
|
||||
disk_usage
|
||||
check_ip_command
|
||||
check_networking
|
||||
check_name_resolution
|
||||
check_dhcp_servers
|
||||
|
@@ -63,7 +63,7 @@ else
|
||||
fi
|
||||
fi
|
||||
# Delete most recent 24 hours from FTL's database, leave even older data intact (don't wipe out all history)
|
||||
deleted=$(sqlite3 "${DBFILE}" "DELETE FROM queries WHERE timestamp >= strftime('%s','now')-86400; select changes() from queries limit 1")
|
||||
deleted=$(pihole-FTL sqlite3 "${DBFILE}" "DELETE FROM query_storage WHERE timestamp >= strftime('%s','now')-86400; select changes() from query_storage limit 1")
|
||||
|
||||
# Restart pihole-FTL to force reloading history
|
||||
sudo pihole restartdns
|
||||
|
@@ -21,7 +21,7 @@ matchType="match"
|
||||
# Source pihole-FTL from install script
|
||||
pihole_FTL="${piholeDir}/pihole-FTL.conf"
|
||||
if [[ -f "${pihole_FTL}" ]]; then
|
||||
source "${pihole_FTL}"
|
||||
source "${pihole_FTL}"
|
||||
fi
|
||||
|
||||
# Set this only after sourcing pihole-FTL.conf as the gravity database path may
|
||||
@@ -48,7 +48,7 @@ scanList(){
|
||||
# Iterate through each regexp and check whether it matches the domainQuery
|
||||
# If it does, print the matching regexp and continue looping
|
||||
# Input 1 - regexps | Input 2 - domainQuery
|
||||
"regex" )
|
||||
"regex" )
|
||||
for list in ${lists}; do
|
||||
if [[ "${domain}" =~ ${list} ]]; then
|
||||
printf "%b\n" "${list}";
|
||||
@@ -64,8 +64,8 @@ Example: 'pihole -q -exact domain.com'
|
||||
Query the adlists for a specified domain
|
||||
|
||||
Options:
|
||||
-exact Search the block lists for exact domain matches
|
||||
-all Return all query matches within a block list
|
||||
-exact Search the adlists for exact domain matches
|
||||
-all Return all query matches within the adlists
|
||||
-h, --help Show this help dialog"
|
||||
exit 0
|
||||
fi
|
||||
@@ -109,27 +109,27 @@ scanDatabaseTable() {
|
||||
# behavior. The "ESCAPE '\'" clause specifies that an underscore preceded by an '\' should be matched
|
||||
# as a literal underscore character. We pretreat the $domain variable accordingly to escape underscores.
|
||||
if [[ "${table}" == "gravity" ]]; then
|
||||
case "${exact}" in
|
||||
"exact" ) querystr="SELECT gravity.domain,adlist.address,adlist.enabled FROM gravity LEFT JOIN adlist ON adlist.id = gravity.adlist_id WHERE domain = '${domain}'";;
|
||||
* ) querystr="SELECT gravity.domain,adlist.address,adlist.enabled FROM gravity LEFT JOIN adlist ON adlist.id = gravity.adlist_id WHERE domain LIKE '%${domain//_/\\_}%' ESCAPE '\\'";;
|
||||
esac
|
||||
case "${exact}" in
|
||||
"exact" ) querystr="SELECT gravity.domain,adlist.address,adlist.enabled FROM gravity LEFT JOIN adlist ON adlist.id = gravity.adlist_id WHERE domain = '${domain}'";;
|
||||
* ) querystr="SELECT gravity.domain,adlist.address,adlist.enabled FROM gravity LEFT JOIN adlist ON adlist.id = gravity.adlist_id WHERE domain LIKE '%${domain//_/\\_}%' ESCAPE '\\'";;
|
||||
esac
|
||||
else
|
||||
case "${exact}" in
|
||||
"exact" ) querystr="SELECT domain,enabled FROM domainlist WHERE type = '${type}' AND domain = '${domain}'";;
|
||||
* ) querystr="SELECT domain,enabled FROM domainlist WHERE type = '${type}' AND domain LIKE '%${domain//_/\\_}%' ESCAPE '\\'";;
|
||||
esac
|
||||
case "${exact}" in
|
||||
"exact" ) querystr="SELECT domain,enabled FROM domainlist WHERE type = '${type}' AND domain = '${domain}'";;
|
||||
* ) querystr="SELECT domain,enabled FROM domainlist WHERE type = '${type}' AND domain LIKE '%${domain//_/\\_}%' ESCAPE '\\'";;
|
||||
esac
|
||||
fi
|
||||
|
||||
# Send prepared query to gravity database
|
||||
result="$(sqlite3 "${gravityDBfile}" "${querystr}")" 2> /dev/null
|
||||
result="$(pihole-FTL sqlite3 "${gravityDBfile}" "${querystr}")" 2> /dev/null
|
||||
if [[ -z "${result}" ]]; then
|
||||
# Return early when there are no matches in this table
|
||||
return
|
||||
fi
|
||||
|
||||
if [[ "${table}" == "gravity" ]]; then
|
||||
echo "${result}"
|
||||
return
|
||||
echo "${result}"
|
||||
return
|
||||
fi
|
||||
|
||||
# Mark domain as having been white-/blacklist matched (global variable)
|
||||
@@ -164,7 +164,7 @@ scanRegexDatabaseTable() {
|
||||
type="${3:-}"
|
||||
|
||||
# Query all regex from the corresponding database tables
|
||||
mapfile -t regexList < <(sqlite3 "${gravityDBfile}" "SELECT domain FROM domainlist WHERE type = ${type}" 2> /dev/null)
|
||||
mapfile -t regexList < <(pihole-FTL sqlite3 "${gravityDBfile}" "SELECT domain FROM domainlist WHERE type = ${type}" 2> /dev/null)
|
||||
|
||||
# If we have regexps to process
|
||||
if [[ "${#regexList[@]}" -ne 0 ]]; then
|
||||
@@ -210,7 +210,7 @@ mapfile -t results <<< "$(scanDatabaseTable "${domainQuery}" "gravity")"
|
||||
|
||||
# Handle notices
|
||||
if [[ -z "${wbMatch:-}" ]] && [[ -z "${wcMatch:-}" ]] && [[ -z "${results[*]}" ]]; then
|
||||
echo -e " ${INFO} No ${exact/t/t }results found for ${COL_BOLD}${domainQuery}${COL_NC} within the block lists"
|
||||
echo -e " ${INFO} No ${exact/t/t }results found for ${COL_BOLD}${domainQuery}${COL_NC} within the adlists"
|
||||
exit 0
|
||||
elif [[ -z "${results[*]}" ]]; then
|
||||
# Result found in WL/BL/Wildcards
|
||||
@@ -233,15 +233,15 @@ for result in "${results[@]}"; do
|
||||
adlistAddress="${extra/|*/}"
|
||||
extra="${extra#*|}"
|
||||
if [[ "${extra}" == "0" ]]; then
|
||||
extra="(disabled)"
|
||||
extra=" (disabled)"
|
||||
else
|
||||
extra=""
|
||||
extra=""
|
||||
fi
|
||||
|
||||
if [[ -n "${blockpage}" ]]; then
|
||||
echo "0 ${adlistAddress}"
|
||||
elif [[ -n "${exact}" ]]; then
|
||||
echo " - ${adlistAddress} ${extra}"
|
||||
echo " - ${adlistAddress}${extra}"
|
||||
else
|
||||
if [[ ! "${adlistAddress}" == "${adlistAddress_prev:-}" ]]; then
|
||||
count=""
|
||||
@@ -256,7 +256,7 @@ for result in "${results[@]}"; do
|
||||
[[ "${count}" -gt "${max_count}" ]] && continue
|
||||
echo " ${COL_GRAY}Over ${count} results found, skipping rest of file${COL_NC}"
|
||||
else
|
||||
echo " ${match} ${extra}"
|
||||
echo " ${match}${extra}"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
@@ -35,6 +35,7 @@ source "/opt/pihole/COL_TABLE"
|
||||
|
||||
GitCheckUpdateAvail() {
|
||||
local directory
|
||||
local curBranch
|
||||
directory="${1}"
|
||||
curdir=$PWD
|
||||
cd "${directory}" || return
|
||||
@@ -42,18 +43,29 @@ GitCheckUpdateAvail() {
|
||||
# Fetch latest changes in this repo
|
||||
git fetch --quiet origin
|
||||
|
||||
# @ alone is a shortcut for HEAD. Older versions of git
|
||||
# need @{0}
|
||||
LOCAL="$(git rev-parse "@{0}")"
|
||||
# Check current branch. If it is master, then check for the latest available tag instead of latest commit.
|
||||
curBranch=$(git rev-parse --abbrev-ref HEAD)
|
||||
if [[ "${curBranch}" == "master" ]]; then
|
||||
# get the latest local tag
|
||||
LOCAL=$(git describe --abbrev=0 --tags master)
|
||||
# get the latest tag from remote
|
||||
REMOTE=$(git describe --abbrev=0 --tags origin/master)
|
||||
|
||||
else
|
||||
# @ alone is a shortcut for HEAD. Older versions of git
|
||||
# need @{0}
|
||||
LOCAL="$(git rev-parse "@{0}")"
|
||||
|
||||
# The suffix @{upstream} to a branchname
|
||||
# (short form <branchname>@{u}) refers
|
||||
# to the branch that the branch specified
|
||||
# by branchname is set to build on top of#
|
||||
# (configured with branch.<name>.remote and
|
||||
# branch.<name>.merge). A missing branchname
|
||||
# defaults to the current one.
|
||||
REMOTE="$(git rev-parse "@{upstream}")"
|
||||
fi
|
||||
|
||||
# The suffix @{upstream} to a branchname
|
||||
# (short form <branchname>@{u}) refers
|
||||
# to the branch that the branch specified
|
||||
# by branchname is set to build on top of#
|
||||
# (configured with branch.<name>.remote and
|
||||
# branch.<name>.merge). A missing branchname
|
||||
# defaults to the current one.
|
||||
REMOTE="$(git rev-parse "@{upstream}")"
|
||||
|
||||
if [[ "${#LOCAL}" == 0 ]]; then
|
||||
echo -e "\\n ${COL_LIGHT_RED}Error: Local revision could not be obtained, please contact Pi-hole Support"
|
||||
@@ -95,6 +107,10 @@ main() {
|
||||
# shellcheck disable=1090,2154
|
||||
source "${setupVars}"
|
||||
|
||||
# Install packages used by this installation script (necessary if users have removed e.g. git from their systems)
|
||||
package_manager_detect
|
||||
install_dependent_packages "${INSTALLER_DEPS[@]}"
|
||||
|
||||
# This is unlikely
|
||||
if ! is_repo "${PI_HOLE_FILES_DIR}" ; then
|
||||
echo -e "\\n ${COL_LIGHT_RED}Error: Core Pi-hole repo is missing from system!"
|
||||
@@ -196,7 +212,7 @@ main() {
|
||||
|
||||
if [[ "${FTL_update}" == true || "${core_update}" == true ]]; then
|
||||
${PI_HOLE_FILES_DIR}/automated\ install/basic-install.sh --reconfigure --unattended || \
|
||||
echo -e "${basicError}" && exit 1
|
||||
echo -e "${basicError}" && exit 1
|
||||
fi
|
||||
|
||||
if [[ "${FTL_update}" == true || "${core_update}" == true || "${web_update}" == true ]]; then
|
||||
|
92
advanced/Scripts/utils.sh
Executable file
92
advanced/Scripts/utils.sh
Executable file
@@ -0,0 +1,92 @@
|
||||
#!/usr/bin/env sh
|
||||
# shellcheck disable=SC3043 #https://github.com/koalaman/shellcheck/wiki/SC3043#exceptions
|
||||
|
||||
# Pi-hole: A black hole for Internet advertisements
|
||||
# (c) 2017 Pi-hole, LLC (https://pi-hole.net)
|
||||
# Network-wide ad blocking via your own hardware.
|
||||
#
|
||||
# Script to hold utility functions for use in other scripts
|
||||
#
|
||||
# This file is copyright under the latest version of the EUPL.
|
||||
# Please see LICENSE file for your rights under this license.
|
||||
|
||||
# Basic Housekeeping rules
|
||||
# - Functions must be self contained
|
||||
# - Functions must be added in alphabetical order
|
||||
# - Functions must be documented
|
||||
# - New functions must have a test added for them in test/test_any_utils.py
|
||||
|
||||
#######################
|
||||
# Takes either
|
||||
# - Three arguments: file, key, and value.
|
||||
# - Two arguments: file, and key.
|
||||
#
|
||||
# Checks the target file for the existence of the key
|
||||
# - If it exists, it changes the value
|
||||
# - If it does not exist, it adds the value
|
||||
#
|
||||
# Example usage:
|
||||
# addOrEditKeyValuePair "/etc/pihole/setupVars.conf" "BLOCKING_ENABLED" "true"
|
||||
#######################
|
||||
addOrEditKeyValPair() {
|
||||
local file="${1}"
|
||||
local key="${2}"
|
||||
local value="${3}"
|
||||
|
||||
if [ "${value}" != "" ]; then
|
||||
# value has a value, so it is a key-value pair
|
||||
if grep -q "^${key}=" "${file}"; then
|
||||
# Key already exists in file, modify the value
|
||||
sed -i "/^${key}=/c\\${key}=${value}" "${file}"
|
||||
else
|
||||
# Key does not already exist, add it and it's value
|
||||
echo "${key}=${value}" >> "${file}"
|
||||
fi
|
||||
else
|
||||
# value has no value, so it is just a key. Add it if it does not already exist
|
||||
if ! grep -q "^${key}" "${file}"; then
|
||||
# Key does not exist, add it.
|
||||
echo "${key}" >> "${file}"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
#######################
|
||||
# Takes two arguments file, and key.
|
||||
# Deletes a key from target file
|
||||
#
|
||||
# Example usage:
|
||||
# removeKey "/etc/pihole/setupVars.conf" "PIHOLE_DNS_1"
|
||||
#######################
|
||||
removeKey() {
|
||||
local file="${1}"
|
||||
local key="${2}"
|
||||
sed -i "/^${key}/d" "${file}"
|
||||
}
|
||||
|
||||
#######################
|
||||
# returns FTL's current telnet API port
|
||||
#######################
|
||||
getFTLAPIPort(){
|
||||
local FTLCONFFILE="/etc/pihole/pihole-FTL.conf"
|
||||
local DEFAULT_PORT_FILE="/run/pihole-FTL.port"
|
||||
local DEFAULT_FTL_PORT=4711
|
||||
local PORTFILE
|
||||
local ftl_api_port
|
||||
|
||||
if [ -f "$FTLCONFFILE" ]; then
|
||||
# if PORTFILE is not set in pihole-FTL.conf, use the default path
|
||||
PORTFILE="$( (grep "^PORTFILE=" $FTLCONFFILE || echo "$DEFAULT_PORT_FILE") | cut -d"=" -f2-)"
|
||||
fi
|
||||
|
||||
if [ -s "$PORTFILE" ]; then
|
||||
# -s: FILE exists and has a size greater than zero
|
||||
ftl_api_port=$(cat "${PORTFILE}")
|
||||
# Exploit prevention: unset the variable if there is malicious content
|
||||
# Verify that the value read from the file is numeric
|
||||
expr "$ftl_api_port" : "[^[:digit:]]" > /dev/null && unset ftl_api_port
|
||||
fi
|
||||
|
||||
# echo the port found in the portfile or default to the default port
|
||||
echo "${ftl_api_port:=$DEFAULT_FTL_PORT}"
|
||||
}
|
@@ -13,6 +13,10 @@ DEFAULT="-1"
|
||||
COREGITDIR="/etc/.pihole/"
|
||||
WEBGITDIR="/var/www/html/admin/"
|
||||
|
||||
# Source the setupvars config file
|
||||
# shellcheck disable=SC1091
|
||||
source /etc/pihole/setupVars.conf
|
||||
|
||||
getLocalVersion() {
|
||||
# FTL requires a different method
|
||||
if [[ "$1" == "FTL" ]]; then
|
||||
@@ -91,10 +95,11 @@ getRemoteVersion(){
|
||||
#If the above file exists, then we can read from that. Prevents overuse of GitHub API
|
||||
if [[ -f "$cachedVersions" ]]; then
|
||||
IFS=' ' read -r -a arrCache < "$cachedVersions"
|
||||
|
||||
case $daemon in
|
||||
"pi-hole" ) echo "${arrCache[0]}";;
|
||||
"AdminLTE" ) echo "${arrCache[1]}";;
|
||||
"FTL" ) echo "${arrCache[2]}";;
|
||||
"pi-hole" ) echo "${arrCache[0]}";;
|
||||
"AdminLTE" ) [[ "${INSTALL_WEB_INTERFACE}" == true ]] && echo "${arrCache[1]}";;
|
||||
"FTL" ) [[ "${INSTALL_WEB_INTERFACE}" == true ]] && echo "${arrCache[2]}" || echo "${arrCache[1]}";;
|
||||
esac
|
||||
|
||||
return 0
|
||||
@@ -117,7 +122,7 @@ getLocalBranch(){
|
||||
local directory="${1}"
|
||||
local branch
|
||||
|
||||
# Local FTL btranch is stored in /etc/pihole/ftlbranch
|
||||
# Local FTL btranch is stored in /etc/pihole/ftlbranch
|
||||
if [[ "$1" == "FTL" ]]; then
|
||||
branch="$(pihole-FTL branch)"
|
||||
else
|
||||
@@ -140,6 +145,11 @@ getLocalBranch(){
|
||||
}
|
||||
|
||||
versionOutput() {
|
||||
if [[ "$1" == "AdminLTE" && "${INSTALL_WEB_INTERFACE}" != true ]]; then
|
||||
echo " WebAdmin not installed"
|
||||
return 1
|
||||
fi
|
||||
|
||||
[[ "$1" == "pi-hole" ]] && GITDIR=$COREGITDIR
|
||||
[[ "$1" == "AdminLTE" ]] && GITDIR=$WEBGITDIR
|
||||
[[ "$1" == "FTL" ]] && GITDIR="FTL"
|
||||
@@ -166,6 +176,7 @@ versionOutput() {
|
||||
output="Latest ${1^} hash is $latHash"
|
||||
else
|
||||
errorOutput
|
||||
return 1
|
||||
fi
|
||||
|
||||
[[ -n "$output" ]] && echo " $output"
|
||||
@@ -177,10 +188,6 @@ errorOutput() {
|
||||
}
|
||||
|
||||
defaultOutput() {
|
||||
# Source the setupvars config file
|
||||
# shellcheck disable=SC1091
|
||||
source /etc/pihole/setupVars.conf
|
||||
|
||||
versionOutput "pi-hole" "$@"
|
||||
|
||||
if [[ "${INSTALL_WEB_INTERFACE}" == true ]]; then
|
||||
|
@@ -1,5 +1,7 @@
|
||||
#!/usr/bin/env bash
|
||||
# shellcheck disable=SC1090
|
||||
# shellcheck disable=SC2154
|
||||
|
||||
|
||||
# Pi-hole: A black hole for Internet advertisements
|
||||
# (c) 2017 Pi-hole, LLC (https://pi-hole.net)
|
||||
@@ -26,6 +28,9 @@ readonly PI_HOLE_FILES_DIR="/etc/.pihole"
|
||||
PH_TEST="true"
|
||||
source "${PI_HOLE_FILES_DIR}/automated install/basic-install.sh"
|
||||
|
||||
utilsfile="/opt/pihole/utils.sh"
|
||||
source "${utilsfile}"
|
||||
|
||||
coltable="/opt/pihole/COL_TABLE"
|
||||
if [[ -f ${coltable} ]]; then
|
||||
source ${coltable}
|
||||
@@ -37,54 +42,49 @@ Example: pihole -a -p password
|
||||
Set options for the Admin Console
|
||||
|
||||
Options:
|
||||
-p, password Set Admin Console password
|
||||
-c, celsius Set Celsius as preferred temperature unit
|
||||
-f, fahrenheit Set Fahrenheit as preferred temperature unit
|
||||
-k, kelvin Set Kelvin as preferred temperature unit
|
||||
-e, email Set an administrative contact address for the Block Page
|
||||
-h, --help Show this help dialog
|
||||
-i, interface Specify dnsmasq's interface listening behavior
|
||||
-l, privacylevel Set privacy level (0 = lowest, 3 = highest)
|
||||
-t, teleporter Backup configuration as an archive"
|
||||
-p, password Set Admin Console password
|
||||
-c, celsius Set Celsius as preferred temperature unit
|
||||
-f, fahrenheit Set Fahrenheit as preferred temperature unit
|
||||
-k, kelvin Set Kelvin as preferred temperature unit
|
||||
-e, email Set an administrative contact address for the Block Page
|
||||
-h, --help Show this help dialog
|
||||
-i, interface Specify dnsmasq's interface listening behavior
|
||||
-l, privacylevel Set privacy level (0 = lowest, 3 = highest)
|
||||
-t, teleporter Backup configuration as an archive
|
||||
-t, teleporter myname.tar.gz Backup configuration to archive with name myname.tar.gz as specified"
|
||||
exit 0
|
||||
}
|
||||
|
||||
add_setting() {
|
||||
echo "${1}=${2}" >> "${setupVars}"
|
||||
addOrEditKeyValPair "${setupVars}" "${1}" "${2}"
|
||||
}
|
||||
|
||||
delete_setting() {
|
||||
sed -i "/^${1}/d" "${setupVars}"
|
||||
removeKey "${setupVars}" "${1}"
|
||||
}
|
||||
|
||||
change_setting() {
|
||||
delete_setting "${1}"
|
||||
add_setting "${1}" "${2}"
|
||||
addOrEditKeyValPair "${setupVars}" "${1}" "${2}"
|
||||
}
|
||||
|
||||
addFTLsetting() {
|
||||
echo "${1}=${2}" >> "${FTLconf}"
|
||||
addOrEditKeyValPair "${FTLconf}" "${1}" "${2}"
|
||||
}
|
||||
|
||||
deleteFTLsetting() {
|
||||
sed -i "/^${1}/d" "${FTLconf}"
|
||||
removeKey "${FTLconf}" "${1}"
|
||||
}
|
||||
|
||||
changeFTLsetting() {
|
||||
deleteFTLsetting "${1}"
|
||||
addFTLsetting "${1}" "${2}"
|
||||
addOrEditKeyValPair "${FTLconf}" "${1}" "${2}"
|
||||
}
|
||||
|
||||
add_dnsmasq_setting() {
|
||||
if [[ "${2}" != "" ]]; then
|
||||
echo "${1}=${2}" >> "${dnsmasqconfig}"
|
||||
else
|
||||
echo "${1}" >> "${dnsmasqconfig}"
|
||||
fi
|
||||
addOrEditKeyValPair "${dnsmasqconfig}" "${1}" "${2}"
|
||||
}
|
||||
|
||||
delete_dnsmasq_setting() {
|
||||
sed -i "/^${1}/d" "${dnsmasqconfig}"
|
||||
removeKey "${dnsmasqconfig}" "${1}"
|
||||
}
|
||||
|
||||
SetTemperatureUnit() {
|
||||
@@ -122,14 +122,14 @@ SetWebPassword() {
|
||||
read -s -r -p "Enter New Password (Blank for no password): " PASSWORD
|
||||
echo ""
|
||||
|
||||
if [ "${PASSWORD}" == "" ]; then
|
||||
change_setting "WEBPASSWORD" ""
|
||||
echo -e " ${TICK} Password Removed"
|
||||
exit 0
|
||||
fi
|
||||
if [ "${PASSWORD}" == "" ]; then
|
||||
change_setting "WEBPASSWORD" ""
|
||||
echo -e " ${TICK} Password Removed"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
read -s -r -p "Confirm Password: " CONFIRM
|
||||
echo ""
|
||||
read -s -r -p "Confirm Password: " CONFIRM
|
||||
echo ""
|
||||
fi
|
||||
|
||||
if [ "${PASSWORD}" == "${CONFIRM}" ] ; then
|
||||
@@ -182,7 +182,7 @@ ProcessDNSSettings() {
|
||||
fi
|
||||
|
||||
delete_dnsmasq_setting "dnssec"
|
||||
delete_dnsmasq_setting "trust-anchor="
|
||||
delete_dnsmasq_setting "trust-anchor"
|
||||
|
||||
if [[ "${DNSSEC}" == true ]]; then
|
||||
echo "dnssec
|
||||
@@ -199,6 +199,8 @@ trust-anchor=.,20326,8,2,E06D44B80B8F1D39A95C0B0D7C65D08458E880409BBC68345710423
|
||||
# Setup interface listening behavior of dnsmasq
|
||||
delete_dnsmasq_setting "interface"
|
||||
delete_dnsmasq_setting "local-service"
|
||||
delete_dnsmasq_setting "except-interface"
|
||||
delete_dnsmasq_setting "bind-interfaces"
|
||||
|
||||
if [[ "${DNSMASQ_LISTENING}" == "all" ]]; then
|
||||
# Listen on all interfaces, permit all origins
|
||||
@@ -207,6 +209,7 @@ trust-anchor=.,20326,8,2,E06D44B80B8F1D39A95C0B0D7C65D08458E880409BBC68345710423
|
||||
# Listen only on all interfaces, but only local subnets
|
||||
add_dnsmasq_setting "local-service"
|
||||
else
|
||||
# Options "bind" and "single"
|
||||
# Listen only on one interface
|
||||
# Use eth0 as fallback interface if interface is missing in setupVars.conf
|
||||
if [ -z "${PIHOLE_INTERFACE}" ]; then
|
||||
@@ -214,6 +217,11 @@ trust-anchor=.,20326,8,2,E06D44B80B8F1D39A95C0B0D7C65D08458E880409BBC68345710423
|
||||
fi
|
||||
|
||||
add_dnsmasq_setting "interface" "${PIHOLE_INTERFACE}"
|
||||
|
||||
if [[ "${DNSMASQ_LISTENING}" == "bind" ]]; then
|
||||
# Really bind to interface
|
||||
add_dnsmasq_setting "bind-interfaces"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ "${CONDITIONAL_FORWARDING}" == true ]]; then
|
||||
@@ -247,8 +255,8 @@ trust-anchor=.,20326,8,2,E06D44B80B8F1D39A95C0B0D7C65D08458E880409BBC68345710423
|
||||
3 ) REV_SERVER_CIDR="${arrRev[0]}.0.0.0/8";;
|
||||
esac
|
||||
else
|
||||
# Set REV_SERVER_CIDR to whatever value it was set to
|
||||
REV_SERVER_CIDR="${CONDITIONAL_FORWARDING_REVERSE}"
|
||||
# Set REV_SERVER_CIDR to whatever value it was set to
|
||||
REV_SERVER_CIDR="${CONDITIONAL_FORWARDING_REVERSE}"
|
||||
fi
|
||||
|
||||
# If REV_SERVER_CIDR is not converted by the above, then use the REV_SERVER_TARGET variable to derive it
|
||||
@@ -371,34 +379,34 @@ ProcessDHCPSettings() {
|
||||
source "${setupVars}"
|
||||
|
||||
if [[ "${DHCP_ACTIVE}" == "true" ]]; then
|
||||
interface="${PIHOLE_INTERFACE}"
|
||||
interface="${PIHOLE_INTERFACE}"
|
||||
|
||||
# Use eth0 as fallback interface
|
||||
if [ -z ${interface} ]; then
|
||||
interface="eth0"
|
||||
fi
|
||||
# Use eth0 as fallback interface
|
||||
if [ -z ${interface} ]; then
|
||||
interface="eth0"
|
||||
fi
|
||||
|
||||
if [[ "${PIHOLE_DOMAIN}" == "" ]]; then
|
||||
PIHOLE_DOMAIN="lan"
|
||||
change_setting "PIHOLE_DOMAIN" "${PIHOLE_DOMAIN}"
|
||||
fi
|
||||
if [[ "${PIHOLE_DOMAIN}" == "" ]]; then
|
||||
PIHOLE_DOMAIN="lan"
|
||||
change_setting "PIHOLE_DOMAIN" "${PIHOLE_DOMAIN}"
|
||||
fi
|
||||
|
||||
if [[ "${DHCP_LEASETIME}" == "0" ]]; then
|
||||
leasetime="infinite"
|
||||
elif [[ "${DHCP_LEASETIME}" == "" ]]; then
|
||||
leasetime="24"
|
||||
change_setting "DHCP_LEASETIME" "${leasetime}"
|
||||
elif [[ "${DHCP_LEASETIME}" == "24h" ]]; then
|
||||
#Installation is affected by known bug, introduced in a previous version.
|
||||
#This will automatically clean up setupVars.conf and remove the unnecessary "h"
|
||||
leasetime="24"
|
||||
change_setting "DHCP_LEASETIME" "${leasetime}"
|
||||
else
|
||||
leasetime="${DHCP_LEASETIME}h"
|
||||
fi
|
||||
if [[ "${DHCP_LEASETIME}" == "0" ]]; then
|
||||
leasetime="infinite"
|
||||
elif [[ "${DHCP_LEASETIME}" == "" ]]; then
|
||||
leasetime="24"
|
||||
change_setting "DHCP_LEASETIME" "${leasetime}"
|
||||
elif [[ "${DHCP_LEASETIME}" == "24h" ]]; then
|
||||
#Installation is affected by known bug, introduced in a previous version.
|
||||
#This will automatically clean up setupVars.conf and remove the unnecessary "h"
|
||||
leasetime="24"
|
||||
change_setting "DHCP_LEASETIME" "${leasetime}"
|
||||
else
|
||||
leasetime="${DHCP_LEASETIME}h"
|
||||
fi
|
||||
|
||||
# Write settings to file
|
||||
echo "###############################################################################
|
||||
# Write settings to file
|
||||
echo "###############################################################################
|
||||
# DHCP SERVER CONFIG FILE AUTOMATICALLY POPULATED BY PI-HOLE WEB INTERFACE. #
|
||||
# ANY CHANGES MADE TO THIS FILE WILL BE LOST ON CHANGE #
|
||||
###############################################################################
|
||||
@@ -408,34 +416,34 @@ dhcp-option=option:router,${DHCP_ROUTER}
|
||||
dhcp-leasefile=/etc/pihole/dhcp.leases
|
||||
#quiet-dhcp
|
||||
" > "${dhcpconfig}"
|
||||
chmod 644 "${dhcpconfig}"
|
||||
chmod 644 "${dhcpconfig}"
|
||||
|
||||
if [[ "${PIHOLE_DOMAIN}" != "none" ]]; then
|
||||
echo "domain=${PIHOLE_DOMAIN}" >> "${dhcpconfig}"
|
||||
if [[ "${PIHOLE_DOMAIN}" != "none" ]]; then
|
||||
echo "domain=${PIHOLE_DOMAIN}" >> "${dhcpconfig}"
|
||||
|
||||
# When there is a Pi-hole domain set and "Never forward non-FQDNs" is
|
||||
# ticked, we add `local=/domain/` to tell FTL that this domain is purely
|
||||
# local and FTL may answer queries from /etc/hosts or DHCP but should
|
||||
# never forward queries on that domain to any upstream servers
|
||||
if [[ "${DNS_FQDN_REQUIRED}" == true ]]; then
|
||||
echo "local=/${PIHOLE_DOMAIN}/" >> "${dhcpconfig}"
|
||||
# When there is a Pi-hole domain set and "Never forward non-FQDNs" is
|
||||
# ticked, we add `local=/domain/` to tell FTL that this domain is purely
|
||||
# local and FTL may answer queries from /etc/hosts or DHCP but should
|
||||
# never forward queries on that domain to any upstream servers
|
||||
if [[ "${DNS_FQDN_REQUIRED}" == true ]]; then
|
||||
echo "local=/${PIHOLE_DOMAIN}/" >> "${dhcpconfig}"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Sourced from setupVars
|
||||
# shellcheck disable=SC2154
|
||||
if [[ "${DHCP_rapid_commit}" == "true" ]]; then
|
||||
echo "dhcp-rapid-commit" >> "${dhcpconfig}"
|
||||
fi
|
||||
# Sourced from setupVars
|
||||
# shellcheck disable=SC2154
|
||||
if [[ "${DHCP_rapid_commit}" == "true" ]]; then
|
||||
echo "dhcp-rapid-commit" >> "${dhcpconfig}"
|
||||
fi
|
||||
|
||||
if [[ "${DHCP_IPv6}" == "true" ]]; then
|
||||
echo "#quiet-dhcp6
|
||||
if [[ "${DHCP_IPv6}" == "true" ]]; then
|
||||
echo "#quiet-dhcp6
|
||||
#enable-ra
|
||||
dhcp-option=option6:dns-server,[::]
|
||||
dhcp-range=::100,::1ff,constructor:${interface},ra-names,slaac,64,3600
|
||||
ra-param=*,0,0
|
||||
" >> "${dhcpconfig}"
|
||||
fi
|
||||
fi
|
||||
|
||||
else
|
||||
if [[ -f "${dhcpconfig}" ]]; then
|
||||
@@ -515,13 +523,13 @@ CustomizeAdLists() {
|
||||
|
||||
if CheckUrl "${address}"; then
|
||||
if [[ "${args[2]}" == "enable" ]]; then
|
||||
sqlite3 "${gravityDBfile}" "UPDATE adlist SET enabled = 1 WHERE address = '${address}'"
|
||||
pihole-FTL sqlite3 "${gravityDBfile}" "UPDATE adlist SET enabled = 1 WHERE address = '${address}'"
|
||||
elif [[ "${args[2]}" == "disable" ]]; then
|
||||
sqlite3 "${gravityDBfile}" "UPDATE adlist SET enabled = 0 WHERE address = '${address}'"
|
||||
pihole-FTL sqlite3 "${gravityDBfile}" "UPDATE adlist SET enabled = 0 WHERE address = '${address}'"
|
||||
elif [[ "${args[2]}" == "add" ]]; then
|
||||
sqlite3 "${gravityDBfile}" "INSERT OR IGNORE INTO adlist (address, comment) VALUES ('${address}', '${comment}')"
|
||||
pihole-FTL sqlite3 "${gravityDBfile}" "INSERT OR IGNORE INTO adlist (address, comment) VALUES ('${address}', '${comment}')"
|
||||
elif [[ "${args[2]}" == "del" ]]; then
|
||||
sqlite3 "${gravityDBfile}" "DELETE FROM adlist WHERE address = '${address}'"
|
||||
pihole-FTL sqlite3 "${gravityDBfile}" "DELETE FROM adlist WHERE address = '${address}'"
|
||||
else
|
||||
echo "Not permitted"
|
||||
return 1
|
||||
@@ -532,25 +540,6 @@ CustomizeAdLists() {
|
||||
fi
|
||||
}
|
||||
|
||||
SetPrivacyMode() {
|
||||
if [[ "${args[2]}" == "true" ]]; then
|
||||
change_setting "API_PRIVACY_MODE" "true"
|
||||
else
|
||||
change_setting "API_PRIVACY_MODE" "false"
|
||||
fi
|
||||
}
|
||||
|
||||
ResolutionSettings() {
|
||||
typ="${args[2]}"
|
||||
state="${args[3]}"
|
||||
|
||||
if [[ "${typ}" == "forward" ]]; then
|
||||
change_setting "API_GET_UPSTREAM_DNS_HOSTNAME" "${state}"
|
||||
elif [[ "${typ}" == "clients" ]]; then
|
||||
change_setting "API_GET_CLIENT_HOSTNAME" "${state}"
|
||||
fi
|
||||
}
|
||||
|
||||
AddDHCPStaticAddress() {
|
||||
mac="${args[2]}"
|
||||
ip="${args[3]}"
|
||||
@@ -619,12 +608,13 @@ Example: 'pihole -a -i local'
|
||||
Specify dnsmasq's network interface listening behavior
|
||||
|
||||
Interfaces:
|
||||
local Listen on all interfaces, but only allow queries from
|
||||
devices that are at most one hop away (local devices)
|
||||
single Listen only on ${PIHOLE_INTERFACE} interface
|
||||
local Only respond to queries from devices that
|
||||
are at most one hop away (local devices)
|
||||
single Respond only on interface ${PIHOLE_INTERFACE}
|
||||
bind Bind only on interface ${PIHOLE_INTERFACE}
|
||||
all Listen on all interfaces, permit all origins"
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ "${args[2]}" == "all" ]]; then
|
||||
echo -e " ${INFO} Listening on all interfaces, permitting all origins. Please use a firewall!"
|
||||
@@ -632,6 +622,9 @@ Interfaces:
|
||||
elif [[ "${args[2]}" == "local" ]]; then
|
||||
echo -e " ${INFO} Listening on all interfaces, permitting origins from one hop away (LAN)"
|
||||
change_setting "DNSMASQ_LISTENING" "local"
|
||||
elif [[ "${args[2]}" == "bind" ]]; then
|
||||
echo -e " ${INFO} Binding on interface ${PIHOLE_INTERFACE}"
|
||||
change_setting "DNSMASQ_LISTENING" "bind"
|
||||
else
|
||||
echo -e " ${INFO} Listening only on interface ${PIHOLE_INTERFACE}"
|
||||
change_setting "DNSMASQ_LISTENING" "single"
|
||||
@@ -647,12 +640,17 @@ Interfaces:
|
||||
}
|
||||
|
||||
Teleporter() {
|
||||
local datetimestamp
|
||||
local host
|
||||
datetimestamp=$(date "+%Y-%m-%d_%H-%M-%S")
|
||||
host=$(hostname)
|
||||
host="${host//./_}"
|
||||
php /var/www/html/admin/scripts/pi-hole/php/teleporter.php > "pi-hole-${host:-noname}-teleporter_${datetimestamp}.tar.gz"
|
||||
local filename
|
||||
filename="${args[2]}"
|
||||
if [[ -z "${filename}" ]]; then
|
||||
local datetimestamp
|
||||
local host
|
||||
datetimestamp=$(date "+%Y-%m-%d_%H-%M-%S")
|
||||
host=$(hostname)
|
||||
host="${host//./_}"
|
||||
filename="pi-hole-${host:-noname}-teleporter_${datetimestamp}.tar.gz"
|
||||
fi
|
||||
php /var/www/html/admin/scripts/pi-hole/php/teleporter.php > "${filename}"
|
||||
}
|
||||
|
||||
checkDomain()
|
||||
@@ -673,27 +671,27 @@ addAudit()
|
||||
domains=""
|
||||
for domain in "$@"
|
||||
do
|
||||
# Check domain to be added. Only continue if it is valid
|
||||
validDomain="$(checkDomain "${domain}")"
|
||||
if [[ -n "${validDomain}" ]]; then
|
||||
# Put comma in between domains when there is
|
||||
# more than one domains to be added
|
||||
# SQL INSERT allows adding multiple rows at once using the format
|
||||
## INSERT INTO table (domain) VALUES ('abc.de'),('fgh.ij'),('klm.no'),('pqr.st');
|
||||
if [[ -n "${domains}" ]]; then
|
||||
domains="${domains},"
|
||||
# Check domain to be added. Only continue if it is valid
|
||||
validDomain="$(checkDomain "${domain}")"
|
||||
if [[ -n "${validDomain}" ]]; then
|
||||
# Put comma in between domains when there is
|
||||
# more than one domains to be added
|
||||
# SQL INSERT allows adding multiple rows at once using the format
|
||||
## INSERT INTO table (domain) VALUES ('abc.de'),('fgh.ij'),('klm.no'),('pqr.st');
|
||||
if [[ -n "${domains}" ]]; then
|
||||
domains="${domains},"
|
||||
fi
|
||||
domains="${domains}('${domain}')"
|
||||
fi
|
||||
domains="${domains}('${domain}')"
|
||||
fi
|
||||
done
|
||||
# Insert only the domain here. The date_added field will be
|
||||
# filled with its default value (date_added = current timestamp)
|
||||
sqlite3 "${gravityDBfile}" "INSERT INTO domain_audit (domain) VALUES ${domains};"
|
||||
pihole-FTL sqlite3 "${gravityDBfile}" "INSERT INTO domain_audit (domain) VALUES ${domains};"
|
||||
}
|
||||
|
||||
clearAudit()
|
||||
{
|
||||
sqlite3 "${gravityDBfile}" "DELETE FROM domain_audit;"
|
||||
pihole-FTL sqlite3 "${gravityDBfile}" "DELETE FROM domain_audit;"
|
||||
}
|
||||
|
||||
SetPrivacyLevel() {
|
||||
@@ -709,10 +707,25 @@ AddCustomDNSAddress() {
|
||||
|
||||
ip="${args[2]}"
|
||||
host="${args[3]}"
|
||||
echo "${ip} ${host}" >> "${dnscustomfile}"
|
||||
reload="${args[4]}"
|
||||
|
||||
# Restart dnsmasq to load new custom DNS entries
|
||||
RestartDNS
|
||||
validHost="$(checkDomain "${host}")"
|
||||
if [[ -n "${validHost}" ]]; then
|
||||
if valid_ip "${ip}" || valid_ip6 "${ip}" ; then
|
||||
echo "${ip} ${validHost}" >> "${dnscustomfile}"
|
||||
else
|
||||
echo -e " ${CROSS} Invalid IP has been passed"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo " ${CROSS} Invalid Domain passed!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Restart dnsmasq to load new custom DNS entries only if $reload not false
|
||||
if [[ ! $reload == "false" ]]; then
|
||||
RestartDNS
|
||||
fi
|
||||
}
|
||||
|
||||
RemoveCustomDNSAddress() {
|
||||
@@ -720,16 +733,25 @@ RemoveCustomDNSAddress() {
|
||||
|
||||
ip="${args[2]}"
|
||||
host="${args[3]}"
|
||||
reload="${args[4]}"
|
||||
|
||||
if valid_ip "${ip}" || valid_ip6 "${ip}" ; then
|
||||
sed -i "/^${ip} ${host}$/d" "${dnscustomfile}"
|
||||
validHost="$(checkDomain "${host}")"
|
||||
if [[ -n "${validHost}" ]]; then
|
||||
if valid_ip "${ip}" || valid_ip6 "${ip}" ; then
|
||||
sed -i "/^${ip} ${validHost}$/Id" "${dnscustomfile}"
|
||||
else
|
||||
echo -e " ${CROSS} Invalid IP has been passed"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo -e " ${CROSS} Invalid IP has been passed"
|
||||
echo " ${CROSS} Invalid Domain passed!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Restart dnsmasq to update removed custom DNS entries
|
||||
RestartDNS
|
||||
# Restart dnsmasq to load new custom DNS entries only if reload is not false
|
||||
if [[ ! $reload == "false" ]]; then
|
||||
RestartDNS
|
||||
fi
|
||||
}
|
||||
|
||||
AddCustomCNAMERecord() {
|
||||
@@ -737,11 +759,25 @@ AddCustomCNAMERecord() {
|
||||
|
||||
domain="${args[2]}"
|
||||
target="${args[3]}"
|
||||
reload="${args[4]}"
|
||||
|
||||
echo "cname=${domain},${target}" >> "${dnscustomcnamefile}"
|
||||
|
||||
# Restart dnsmasq to load new custom CNAME records
|
||||
RestartDNS
|
||||
validDomain="$(checkDomain "${domain}")"
|
||||
if [[ -n "${validDomain}" ]]; then
|
||||
validTarget="$(checkDomain "${target}")"
|
||||
if [[ -n "${validTarget}" ]]; then
|
||||
echo "cname=${validDomain},${validTarget}" >> "${dnscustomcnamefile}"
|
||||
else
|
||||
echo " ${CROSS} Invalid Target Passed!"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo " ${CROSS} Invalid Domain passed!"
|
||||
exit 1
|
||||
fi
|
||||
# Restart dnsmasq to load new custom CNAME records only if reload is not false
|
||||
if [[ ! $reload == "false" ]]; then
|
||||
RestartDNS
|
||||
fi
|
||||
}
|
||||
|
||||
RemoveCustomCNAMERecord() {
|
||||
@@ -749,12 +785,13 @@ RemoveCustomCNAMERecord() {
|
||||
|
||||
domain="${args[2]}"
|
||||
target="${args[3]}"
|
||||
reload="${args[4]}"
|
||||
|
||||
validDomain="$(checkDomain "${domain}")"
|
||||
if [[ -n "${validDomain}" ]]; then
|
||||
validTarget="$(checkDomain "${target}")"
|
||||
if [[ -n "${validDomain}" ]]; then
|
||||
sed -i "/cname=${validDomain},${validTarget}$/d" "${dnscustomcnamefile}"
|
||||
if [[ -n "${validTarget}" ]]; then
|
||||
sed -i "/cname=${validDomain},${validTarget}$/Id" "${dnscustomcnamefile}"
|
||||
else
|
||||
echo " ${CROSS} Invalid Target Passed!"
|
||||
exit 1
|
||||
@@ -764,8 +801,27 @@ RemoveCustomCNAMERecord() {
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Restart dnsmasq to update removed custom CNAME records
|
||||
RestartDNS
|
||||
# Restart dnsmasq to update removed custom CNAME records only if $reload not false
|
||||
if [[ ! $reload == "false" ]]; then
|
||||
RestartDNS
|
||||
fi
|
||||
}
|
||||
|
||||
SetRateLimit() {
|
||||
local rate_limit_count rate_limit_interval reload
|
||||
rate_limit_count="${args[2]}"
|
||||
rate_limit_interval="${args[3]}"
|
||||
reload="${args[4]}"
|
||||
|
||||
# Set rate-limit setting inf valid
|
||||
if [ "${rate_limit_count}" -ge 0 ] && [ "${rate_limit_interval}" -ge 0 ]; then
|
||||
changeFTLsetting "RATE_LIMIT" "${rate_limit_count}/${rate_limit_interval}"
|
||||
fi
|
||||
|
||||
# Restart FTL to update rate-limit settings only if $reload not false
|
||||
if [[ ! $reload == "false" ]]; then
|
||||
RestartDNS
|
||||
fi
|
||||
}
|
||||
|
||||
main() {
|
||||
@@ -788,8 +844,6 @@ main() {
|
||||
"layout" ) SetWebUILayout;;
|
||||
"theme" ) SetWebUITheme;;
|
||||
"-h" | "--help" ) helpFunc;;
|
||||
"privacymode" ) SetPrivacyMode;;
|
||||
"resolve" ) ResolutionSettings;;
|
||||
"addstaticdhcp" ) AddDHCPStaticAddress;;
|
||||
"removestaticdhcp" ) RemoveDHCPStaticAddress;;
|
||||
"-e" | "email" ) SetAdminEmail "$3";;
|
||||
@@ -803,6 +857,7 @@ main() {
|
||||
"removecustomdns" ) RemoveCustomDNSAddress;;
|
||||
"addcustomcname" ) AddCustomCNAMERecord;;
|
||||
"removecustomcname" ) RemoveCustomCNAMERecord;;
|
||||
"ratelimit" ) SetRateLimit;;
|
||||
* ) helpFunc;;
|
||||
esac
|
||||
|
||||
|
@@ -1,28 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# Pi-hole: A black hole for Internet advertisements
|
||||
# (c) 2017 Pi-hole, LLC (https://pi-hole.net)
|
||||
# Network-wide ad blocking via your own hardware.
|
||||
#
|
||||
# Provides an automated migration subroutine to convert Pi-hole v3.x wildcard domains to Pi-hole v4.x regex filters
|
||||
#
|
||||
# This file is copyright under the latest version of the EUPL.
|
||||
# Please see LICENSE file for your rights under this license.
|
||||
|
||||
# regexFile set in gravity.sh
|
||||
|
||||
wildcardFile="/etc/dnsmasq.d/03-pihole-wildcard.conf"
|
||||
|
||||
convert_wildcard_to_regex() {
|
||||
if [ ! -f "${wildcardFile}" ]; then
|
||||
return
|
||||
fi
|
||||
local addrlines domains uniquedomains
|
||||
# Obtain wildcard domains from old file
|
||||
addrlines="$(grep -oE "/.*/" ${wildcardFile})"
|
||||
# Strip "/" from domain names and convert "." to regex-compatible "\."
|
||||
domains="$(sed 's/\///g;s/\./\\./g' <<< "${addrlines}")"
|
||||
# Remove repeated domains (may have been inserted two times due to A and AAAA blocking)
|
||||
uniquedomains="$(uniq <<< "${domains}")"
|
||||
# Automatically generate regex filters and remove old wildcards file
|
||||
awk '{print "(^|\\.)"$0"$"}' <<< "${uniquedomains}" >> "${regexFile:?}" && rm "${wildcardFile}"
|
||||
}
|
@@ -57,7 +57,7 @@ CREATE TABLE info
|
||||
value TEXT NOT NULL
|
||||
);
|
||||
|
||||
INSERT INTO "info" VALUES('version','14');
|
||||
INSERT INTO "info" VALUES('version','15');
|
||||
|
||||
CREATE TABLE domain_audit
|
||||
(
|
||||
@@ -143,12 +143,10 @@ CREATE VIEW vw_gravity AS SELECT domain, adlist_by_group.group_id AS group_id
|
||||
LEFT JOIN "group" ON "group".id = adlist_by_group.group_id
|
||||
WHERE adlist.enabled = 1 AND (adlist_by_group.group_id IS NULL OR "group".enabled = 1);
|
||||
|
||||
CREATE VIEW vw_adlist AS SELECT DISTINCT address, adlist.id AS id
|
||||
CREATE VIEW vw_adlist AS SELECT DISTINCT address, id
|
||||
FROM adlist
|
||||
LEFT JOIN adlist_by_group ON adlist_by_group.adlist_id = adlist.id
|
||||
LEFT JOIN "group" ON "group".id = adlist_by_group.group_id
|
||||
WHERE adlist.enabled = 1 AND (adlist_by_group.group_id IS NULL OR "group".enabled = 1)
|
||||
ORDER BY adlist.id;
|
||||
WHERE enabled = 1
|
||||
ORDER BY id;
|
||||
|
||||
CREATE TRIGGER tr_domainlist_add AFTER INSERT ON domainlist
|
||||
BEGIN
|
||||
|
@@ -12,14 +12,17 @@ INSERT OR REPLACE INTO "group" SELECT * FROM OLD."group";
|
||||
INSERT OR REPLACE INTO domain_audit SELECT * FROM OLD.domain_audit;
|
||||
|
||||
INSERT OR REPLACE INTO domainlist SELECT * FROM OLD.domainlist;
|
||||
DELETE FROM OLD.domainlist_by_group WHERE domainlist_id NOT IN (SELECT id FROM OLD.domainlist);
|
||||
INSERT OR REPLACE INTO domainlist_by_group SELECT * FROM OLD.domainlist_by_group;
|
||||
|
||||
INSERT OR REPLACE INTO adlist SELECT * FROM OLD.adlist;
|
||||
DELETE FROM OLD.adlist_by_group WHERE adlist_id NOT IN (SELECT id FROM OLD.adlist);
|
||||
INSERT OR REPLACE INTO adlist_by_group SELECT * FROM OLD.adlist_by_group;
|
||||
|
||||
INSERT OR REPLACE INTO info SELECT * FROM OLD.info;
|
||||
|
||||
INSERT OR REPLACE INTO client SELECT * FROM OLD.client;
|
||||
DELETE FROM OLD.client_by_group WHERE client_id NOT IN (SELECT id FROM OLD.client);
|
||||
INSERT OR REPLACE INTO client_by_group SELECT * FROM OLD.client_by_group;
|
||||
|
||||
|
||||
|
2
advanced/Templates/pihole-FTL.conf
Normal file
2
advanced/Templates/pihole-FTL.conf
Normal file
@@ -0,0 +1,2 @@
|
||||
#; Pi-hole FTL config file
|
||||
#; Comments should start with #; to avoid issues with PHP and bash reading this file
|
@@ -21,12 +21,19 @@ start() {
|
||||
else
|
||||
# Touch files to ensure they exist (create if non-existing, preserve if existing)
|
||||
mkdir -pm 0755 /run/pihole
|
||||
touch /run/pihole-FTL.pid /run/pihole-FTL.port /var/log/pihole-FTL.log /var/log/pihole.log /etc/pihole/dhcp.leases
|
||||
[ ! -f /run/pihole-FTL.pid ] && install -m 644 -o pihole -g pihole /dev/null /run/pihole-FTL.pid
|
||||
[ ! -f /run/pihole-FTL.port ] && install -m 644 -o pihole -g pihole /dev/null /run/pihole-FTL.port
|
||||
[ ! -f /var/log/pihole-FTL.log ] && install -m 644 -o pihole -g pihole /dev/null /var/log/pihole-FTL.log
|
||||
[ ! -f /var/log/pihole.log ] && install -m 644 -o pihole -g pihole /dev/null /var/log/pihole.log
|
||||
[ ! -f /etc/pihole/dhcp.leases ] && install -m 644 -o pihole -g pihole /dev/null /etc/pihole/dhcp.leases
|
||||
# Ensure that permissions are set so that pihole-FTL can edit all necessary files
|
||||
chown pihole:pihole /run/pihole-FTL.pid /run/pihole-FTL.port /var/log/pihole-FTL.log /var/log/pihole.log /etc/pihole/dhcp.leases /run/pihole /etc/pihole
|
||||
chmod 0644 /run/pihole-FTL.pid /run/pihole-FTL.port /var/log/pihole-FTL.log /var/log/pihole.log /etc/pihole/dhcp.leases /etc/pihole/macvendor.db
|
||||
chown pihole:pihole /run/pihole /etc/pihole /var/log/pihole.log /var/log/pihole.log /etc/pihole/dhcp.leases
|
||||
# Ensure that permissions are set so that pihole-FTL can edit the files. We ignore errors as the file may not (yet) exist
|
||||
chmod -f 0644 /etc/pihole/macvendor.db /etc/pihole/dhcp.leases /var/log/pihole-FTL.log /var/log/pihole.log
|
||||
# Chown database files to the user FTL runs as. We ignore errors as the files may not (yet) exist
|
||||
chown -f pihole:pihole /etc/pihole/pihole-FTL.db /etc/pihole/gravity.db /etc/pihole/macvendor.db
|
||||
# Chown database file permissions so that the pihole group (web interface) can edit the file. We ignore errors as the files may not (yet) exist
|
||||
chmod -f 0664 /etc/pihole/pihole-FTL.db
|
||||
if setcap CAP_NET_BIND_SERVICE,CAP_NET_RAW,CAP_NET_ADMIN,CAP_SYS_NICE,CAP_IPC_LOCK,CAP_CHOWN+eip "/usr/bin/pihole-FTL"; then
|
||||
su -s /bin/sh -c "/usr/bin/pihole-FTL" pihole
|
||||
else
|
||||
|
@@ -164,13 +164,35 @@ ini_set("default_socket_timeout", 3);
|
||||
function queryAds($serverName) {
|
||||
// Determine the time it takes while querying adlists
|
||||
$preQueryTime = microtime(true)-$_SERVER["REQUEST_TIME_FLOAT"];
|
||||
|
||||
// Determine which protocol should be used
|
||||
$protocol = "http";
|
||||
if ((isset($_SERVER['HTTPS']) && $_SERVER['HTTPS'] === 'on') ||
|
||||
(isset($_SERVER['REQUEST_SCHEME']) && $_SERVER['REQUEST_SCHEME'] === 'https') ||
|
||||
(isset($_SERVER['HTTP_X_FORWARDED_PROTO']) && $_SERVER['HTTP_X_FORWARDED_PROTO'] === 'https')
|
||||
) {
|
||||
$protocol = "https";
|
||||
}
|
||||
|
||||
// Format the URL
|
||||
$queryAdsURL = sprintf(
|
||||
"http://127.0.0.1:%s/admin/scripts/pi-hole/php/queryads.php?domain=%s&bp",
|
||||
"%s://127.0.0.1:%s/admin/scripts/pi-hole/php/queryads.php?domain=%s&bp",
|
||||
$protocol,
|
||||
$_SERVER["SERVER_PORT"],
|
||||
$serverName
|
||||
);
|
||||
$queryAds = file($queryAdsURL, FILE_IGNORE_NEW_LINES);
|
||||
$queryAds = array_values(array_filter(preg_replace("/data:\s+/", "", $queryAds)));
|
||||
|
||||
// Request the file and receive the response
|
||||
$queryAdsFile = file($queryAdsURL, FILE_IGNORE_NEW_LINES);
|
||||
|
||||
// $queryAdsFile must be an array (to avoid PHP 8.0+ error)
|
||||
if (is_array($queryAdsFile)) {
|
||||
$queryAds = array_values(array_filter(preg_replace("/data:\s+/", "", $queryAdsFile)));
|
||||
} else {
|
||||
// if not an array, return an error message
|
||||
return array("0" => "error", "1" => "<br>(".gettype($queryAdsFile).")<br>".print_r($queryAdsFile, true));
|
||||
}
|
||||
|
||||
$queryTime = sprintf("%.0f", (microtime(true)-$_SERVER["REQUEST_TIME_FLOAT"]) - $preQueryTime);
|
||||
|
||||
// Exception Handling
|
||||
|
@@ -36,6 +36,11 @@ server.port = 80
|
||||
accesslog.filename = "/var/log/lighttpd/access.log"
|
||||
accesslog.format = "%{%s}t|%V|%r|%s|%b"
|
||||
|
||||
# Allow streaming response
|
||||
# reference: https://redmine.lighttpd.net/projects/lighttpd/wiki/Server_stream-response-bodyDetails
|
||||
server.stream-response-body = 1
|
||||
#ssl.read-ahead = "disable"
|
||||
|
||||
index-file.names = ( "index.php", "index.html", "index.lighttpd.html" )
|
||||
url.access-deny = ( "~", ".inc", ".md", ".yml", ".ini" )
|
||||
static-file.exclude-extensions = ( ".php", ".pl", ".fcgi" )
|
||||
@@ -85,5 +90,12 @@ $HTTP["url"] =~ "^/admin/\.(.*)" {
|
||||
url.access-deny = ("")
|
||||
}
|
||||
|
||||
# allow teleporter and API qr code iframe on settings page
|
||||
$HTTP["url"] =~ "/(teleporter|api_token)\.php$" {
|
||||
$HTTP["referer"] =~ "/admin/settings\.php" {
|
||||
setenv.add-response-header = ( "X-Frame-Options" => "SAMEORIGIN" )
|
||||
}
|
||||
}
|
||||
|
||||
# Default expire header
|
||||
expire.url = ( "" => "access plus 0 seconds" )
|
||||
|
@@ -37,6 +37,11 @@ server.port = 80
|
||||
accesslog.filename = "/var/log/lighttpd/access.log"
|
||||
accesslog.format = "%{%s}t|%V|%r|%s|%b"
|
||||
|
||||
# Allow streaming response
|
||||
# reference: https://redmine.lighttpd.net/projects/lighttpd/wiki/Server_stream-response-bodyDetails
|
||||
server.stream-response-body = 1
|
||||
#ssl.read-ahead = "disable"
|
||||
|
||||
index-file.names = ( "index.php", "index.html", "index.lighttpd.html" )
|
||||
url.access-deny = ( "~", ".inc", ".md", ".yml", ".ini" )
|
||||
static-file.exclude-extensions = ( ".php", ".pl", ".fcgi" )
|
||||
@@ -93,5 +98,12 @@ $HTTP["url"] =~ "^/admin/\.(.*)" {
|
||||
url.access-deny = ("")
|
||||
}
|
||||
|
||||
# allow teleporter and API qr code iframe on settings page
|
||||
$HTTP["url"] =~ "/(teleporter|api_token)\.php$" {
|
||||
$HTTP["referer"] =~ "/admin/settings\.php" {
|
||||
setenv.add-response-header = ( "X-Frame-Options" => "SAMEORIGIN" )
|
||||
}
|
||||
}
|
||||
|
||||
# Default expire header
|
||||
expire.url = ( "" => "access plus 0 seconds" )
|
||||
|
@@ -2,7 +2,7 @@
|
||||
# shellcheck disable=SC1090
|
||||
|
||||
# Pi-hole: A black hole for Internet advertisements
|
||||
# (c) 2017-2018 Pi-hole, LLC (https://pi-hole.net)
|
||||
# (c) 2017-2021 Pi-hole, LLC (https://pi-hole.net)
|
||||
# Network-wide ad blocking via your own hardware.
|
||||
#
|
||||
# Installs and Updates Pi-hole
|
||||
@@ -34,27 +34,26 @@ export PATH+=':/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin'
|
||||
|
||||
# List of supported DNS servers
|
||||
DNS_SERVERS=$(cat << EOM
|
||||
Google (ECS);8.8.8.8;8.8.4.4;2001:4860:4860:0:0:0:0:8888;2001:4860:4860:0:0:0:0:8844
|
||||
Google (ECS, DNSSEC);8.8.8.8;8.8.4.4;2001:4860:4860:0:0:0:0:8888;2001:4860:4860:0:0:0:0:8844
|
||||
OpenDNS (ECS, DNSSEC);208.67.222.222;208.67.220.220;2620:119:35::35;2620:119:53::53
|
||||
Level3;4.2.2.1;4.2.2.2;;
|
||||
Comodo;8.26.56.26;8.20.247.20;;
|
||||
DNS.WATCH;84.200.69.80;84.200.70.40;2001:1608:10:25:0:0:1c04:b12f;2001:1608:10:25:0:0:9249:d69b
|
||||
DNS.WATCH (DNSSEC);84.200.69.80;84.200.70.40;2001:1608:10:25:0:0:1c04:b12f;2001:1608:10:25:0:0:9249:d69b
|
||||
Quad9 (filtered, DNSSEC);9.9.9.9;149.112.112.112;2620:fe::fe;2620:fe::9
|
||||
Quad9 (unfiltered, no DNSSEC);9.9.9.10;149.112.112.10;2620:fe::10;2620:fe::fe:10
|
||||
Quad9 (filtered + ECS);9.9.9.11;149.112.112.11;2620:fe::11;2620:fe::fe:11
|
||||
Cloudflare;1.1.1.1;1.0.0.1;2606:4700:4700::1111;2606:4700:4700::1001
|
||||
Quad9 (filtered, ECS, DNSSEC);9.9.9.11;149.112.112.11;2620:fe::11;2620:fe::fe:11
|
||||
Cloudflare (DNSSEC);1.1.1.1;1.0.0.1;2606:4700:4700::1111;2606:4700:4700::1001
|
||||
EOM
|
||||
)
|
||||
|
||||
# Location for final installation log storage
|
||||
installLogLoc=/etc/pihole/install.log
|
||||
installLogLoc="/etc/pihole/install.log"
|
||||
# This is an important file as it contains information specific to the machine it's being installed on
|
||||
setupVars=/etc/pihole/setupVars.conf
|
||||
setupVars="/etc/pihole/setupVars.conf"
|
||||
# Pi-hole uses lighttpd as a Web server, and this is the config file for it
|
||||
# shellcheck disable=SC2034
|
||||
lighttpdConfig=/etc/lighttpd/lighttpd.conf
|
||||
lighttpdConfig="/etc/lighttpd/lighttpd.conf"
|
||||
# This is a file used for the colorized output
|
||||
coltable=/opt/pihole/COL_TABLE
|
||||
coltable="/opt/pihole/COL_TABLE"
|
||||
|
||||
# Root of the web server
|
||||
webroot="/var/www/html"
|
||||
@@ -77,7 +76,7 @@ PI_HOLE_CONFIG_DIR="/etc/pihole"
|
||||
PI_HOLE_BIN_DIR="/usr/local/bin"
|
||||
PI_HOLE_BLOCKPAGE_DIR="${webroot}/pihole"
|
||||
if [ -z "$useUpdateVars" ]; then
|
||||
useUpdateVars=false
|
||||
useUpdateVars=false
|
||||
fi
|
||||
|
||||
adlistFile="/etc/pihole/adlists.list"
|
||||
@@ -91,7 +90,7 @@ PRIVACY_LEVEL=0
|
||||
CACHE_SIZE=10000
|
||||
|
||||
if [ -z "${USER}" ]; then
|
||||
USER="$(id -un)"
|
||||
USER="$(id -un)"
|
||||
fi
|
||||
|
||||
# whiptail dialog dimensions: 20 rows and 70 chars width assures to fit on small screens and is known to hold all content.
|
||||
@@ -134,7 +133,7 @@ fi
|
||||
# A simple function that just echoes out our logo in ASCII format
|
||||
# This lets users know that it is a Pi-hole, LLC product
|
||||
show_ascii_berry() {
|
||||
echo -e "
|
||||
echo -e "
|
||||
${COL_LIGHT_GREEN}.;;,.
|
||||
.ccccc:,.
|
||||
:cccclll:. ..,,
|
||||
@@ -173,7 +172,7 @@ os_check() {
|
||||
local remote_os_domain valid_os valid_version valid_response detected_os detected_version display_warning cmdResult digReturnCode response
|
||||
remote_os_domain=${OS_CHECK_DOMAIN_NAME:-"versions.pi-hole.net"}
|
||||
|
||||
detected_os=$(grep "\bID\b" /etc/os-release | cut -d '=' -f2 | tr -d '"')
|
||||
detected_os=$(grep '^ID=' /etc/os-release | cut -d '=' -f2 | tr -d '"')
|
||||
detected_version=$(grep VERSION_ID /etc/os-release | cut -d '=' -f2 | tr -d '"')
|
||||
|
||||
cmdResult="$(dig +short -t txt "${remote_os_domain}" @ns1.pi-hole.net 2>&1; echo $?)"
|
||||
@@ -260,125 +259,102 @@ os_check() {
|
||||
fi
|
||||
}
|
||||
|
||||
# This function waits for dpkg to unlock, which signals that the previous apt-get command has finished.
|
||||
test_dpkg_lock() {
|
||||
i=0
|
||||
printf " %b Waiting for package manager to finish (up to 30 seconds)\\n" "${INFO}"
|
||||
# fuser is a program to show which processes use the named files, sockets, or filesystems
|
||||
# So while the lock is held,
|
||||
while fuser /var/lib/dpkg/lock >/dev/null 2>&1
|
||||
do
|
||||
# we wait half a second,
|
||||
sleep 0.5
|
||||
# increase the iterator,
|
||||
((i=i+1))
|
||||
# exit if waiting for more then 30 seconds
|
||||
if [[ $i -gt 60 ]]; then
|
||||
printf " %b %bError: Could not verify package manager finished and released lock. %b\\n" "${CROSS}" "${COL_LIGHT_RED}" "${COL_NC}"
|
||||
printf " Attempt to install packages manually and retry.\\n"
|
||||
exit 1;
|
||||
fi
|
||||
done
|
||||
# and then report success once dpkg is unlocked.
|
||||
return 0
|
||||
}
|
||||
|
||||
# Compatibility
|
||||
package_manager_detect() {
|
||||
# If apt-get is installed, then we know it's part of the Debian family
|
||||
if is_command apt-get ; then
|
||||
# Set some global variables here
|
||||
# We don't set them earlier since the family might be Red Hat, so these values would be different
|
||||
PKG_MANAGER="apt-get"
|
||||
# A variable to store the command used to update the package cache
|
||||
UPDATE_PKG_CACHE="${PKG_MANAGER} update"
|
||||
# The command we will use to actually install packages
|
||||
PKG_INSTALL=("${PKG_MANAGER}" -qq --no-install-recommends install)
|
||||
# grep -c will return 1 if there are no matches. This is an acceptable condition, so we OR TRUE to prevent set -e exiting the script.
|
||||
PKG_COUNT="${PKG_MANAGER} -s -o Debug::NoLocking=true upgrade | grep -c ^Inst || true"
|
||||
# Update package cache. This is required already here to assure apt-cache calls have package lists available.
|
||||
update_package_cache || exit 1
|
||||
# Debian 7 doesn't have iproute2 so check if it's available first
|
||||
if apt-cache show iproute2 > /dev/null 2>&1; then
|
||||
iproute_pkg="iproute2"
|
||||
# Otherwise, check if iproute is available
|
||||
elif apt-cache show iproute > /dev/null 2>&1; then
|
||||
iproute_pkg="iproute"
|
||||
# Else print error and exit
|
||||
else
|
||||
printf " %b Aborting installation: iproute2 and iproute packages were not found in APT repository.\\n" "${CROSS}"
|
||||
exit 1
|
||||
fi
|
||||
# Check for and determine version number (major and minor) of current php install
|
||||
if is_command php ; then
|
||||
printf " %b Existing PHP installation detected : PHP version %s\\n" "${INFO}" "$(php <<< "<?php echo PHP_VERSION ?>")"
|
||||
printf -v phpInsMajor "%d" "$(php <<< "<?php echo PHP_MAJOR_VERSION ?>")"
|
||||
printf -v phpInsMinor "%d" "$(php <<< "<?php echo PHP_MINOR_VERSION ?>")"
|
||||
# Is installed php version 7.0 or greater
|
||||
if [ "${phpInsMajor}" -ge 7 ]; then
|
||||
phpInsNewer=true
|
||||
# First check to see if apt-get is installed.
|
||||
if is_command apt-get ; then
|
||||
# Set some global variables here
|
||||
# We don't set them earlier since the installed package manager might be rpm, so these values would be different
|
||||
PKG_MANAGER="apt-get"
|
||||
# A variable to store the command used to update the package cache
|
||||
UPDATE_PKG_CACHE="${PKG_MANAGER} update"
|
||||
# The command we will use to actually install packages
|
||||
PKG_INSTALL=("${PKG_MANAGER}" -qq --no-install-recommends install)
|
||||
# grep -c will return 1 if there are no matches. This is an acceptable condition, so we OR TRUE to prevent set -e exiting the script.
|
||||
PKG_COUNT="${PKG_MANAGER} -s -o Debug::NoLocking=true upgrade | grep -c ^Inst || true"
|
||||
# Update package cache
|
||||
update_package_cache || exit 1
|
||||
# Check for and determine version number (major and minor) of current php install
|
||||
local phpVer="php"
|
||||
if is_command php ; then
|
||||
printf " %b Existing PHP installation detected : PHP version %s\\n" "${INFO}" "$(php <<< "<?php echo PHP_VERSION ?>")"
|
||||
printf -v phpInsMajor "%d" "$(php <<< "<?php echo PHP_MAJOR_VERSION ?>")"
|
||||
printf -v phpInsMinor "%d" "$(php <<< "<?php echo PHP_MINOR_VERSION ?>")"
|
||||
phpVer="php$phpInsMajor.$phpInsMinor"
|
||||
fi
|
||||
fi
|
||||
# Several other packages depend on the version of PHP. If PHP is not installed, or an insufficient version,
|
||||
# those packages should fall back to the default (latest?)
|
||||
if [[ "$phpInsNewer" != true ]]; then
|
||||
# Prefer the php metapackage if it's there
|
||||
if apt-cache show php > /dev/null 2>&1; then
|
||||
phpVer="php"
|
||||
# Else fall back on the php5 package if it's there
|
||||
elif apt-cache show php5 > /dev/null 2>&1; then
|
||||
phpVer="php5"
|
||||
# Else print error and exit
|
||||
# Packages required to perfom the os_check (stored as an array)
|
||||
OS_CHECK_DEPS=(grep dnsutils)
|
||||
# Packages required to run this install script (stored as an array)
|
||||
INSTALLER_DEPS=(git iproute2 whiptail ca-certificates)
|
||||
# Packages required to run Pi-hole (stored as an array)
|
||||
PIHOLE_DEPS=(cron curl iputils-ping psmisc sudo unzip idn2 libcap2-bin dns-root-data libcap2 netcat-openbsd procps)
|
||||
# Packages required for the Web admin interface (stored as an array)
|
||||
# It's useful to separate this from Pi-hole, since the two repos are also setup separately
|
||||
PIHOLE_WEB_DEPS=(lighttpd "${phpVer}-common" "${phpVer}-cgi" "${phpVer}-sqlite3" "${phpVer}-xml" "${phpVer}-intl")
|
||||
# Prior to PHP8.0, JSON functionality is provided as dedicated module, required by Pi-hole AdminLTE: https://www.php.net/manual/json.installation.php
|
||||
if [[ -z "${phpInsMajor}" || "${phpInsMajor}" -lt 8 ]]; then
|
||||
PIHOLE_WEB_DEPS+=("${phpVer}-json")
|
||||
fi
|
||||
# The Web server user,
|
||||
LIGHTTPD_USER="www-data"
|
||||
# group,
|
||||
LIGHTTPD_GROUP="www-data"
|
||||
# and config file
|
||||
LIGHTTPD_CFG="lighttpd.conf.debian"
|
||||
|
||||
# If apt-get is not found, check for rpm.
|
||||
elif is_command rpm ; then
|
||||
# Then check if dnf or yum is the package manager
|
||||
if is_command dnf ; then
|
||||
PKG_MANAGER="dnf"
|
||||
else
|
||||
printf " %b Aborting installation: No PHP packages were found in APT repository.\\n" "${CROSS}"
|
||||
exit 1
|
||||
PKG_MANAGER="yum"
|
||||
fi
|
||||
else
|
||||
# Else, PHP is already installed at a version beyond v7.0, so the additional packages
|
||||
# should match version with the current PHP version.
|
||||
phpVer="php$phpInsMajor.$phpInsMinor"
|
||||
fi
|
||||
# We also need the correct version for `php-sqlite` (which differs across distros)
|
||||
if apt-cache show "${phpVer}-sqlite3" > /dev/null 2>&1; then
|
||||
phpSqlite="sqlite3"
|
||||
elif apt-cache show "${phpVer}-sqlite" > /dev/null 2>&1; then
|
||||
phpSqlite="sqlite"
|
||||
else
|
||||
printf " %b Aborting installation: No SQLite PHP module was found in APT repository.\\n" "${CROSS}"
|
||||
exit 1
|
||||
fi
|
||||
# Packages required to perfom the os_check (stored as an array)
|
||||
OS_CHECK_DEPS=(grep dnsutils)
|
||||
# Packages required to run this install script (stored as an array)
|
||||
INSTALLER_DEPS=(git "${iproute_pkg}" whiptail)
|
||||
# Packages required to run Pi-hole (stored as an array)
|
||||
PIHOLE_DEPS=(cron curl iputils-ping lsof netcat psmisc sudo unzip idn2 sqlite3 libcap2-bin dns-root-data libcap2)
|
||||
# Packages required for the Web admin interface (stored as an array)
|
||||
# It's useful to separate this from Pi-hole, since the two repos are also setup separately
|
||||
PIHOLE_WEB_DEPS=(lighttpd "${phpVer}-common" "${phpVer}-cgi" "${phpVer}-${phpSqlite}" "${phpVer}-xml" "${phpVer}-intl")
|
||||
# Prior to PHP8.0, JSON functionality is provided as dedicated module, required by Pi-hole AdminLTE: https://www.php.net/manual/json.installation.php
|
||||
if [[ "${phpInsNewer}" != true || "${phpInsMajor}" -lt 8 ]]; then
|
||||
PIHOLE_WEB_DEPS+=("${phpVer}-json")
|
||||
fi
|
||||
# The Web server user,
|
||||
LIGHTTPD_USER="www-data"
|
||||
# group,
|
||||
LIGHTTPD_GROUP="www-data"
|
||||
# and config file
|
||||
LIGHTTPD_CFG="lighttpd.conf.debian"
|
||||
|
||||
# This function waits for dpkg to unlock, which signals that the previous apt-get command has finished.
|
||||
test_dpkg_lock() {
|
||||
i=0
|
||||
# fuser is a program to show which processes use the named files, sockets, or filesystems
|
||||
# So while the lock is held,
|
||||
while fuser /var/lib/dpkg/lock >/dev/null 2>&1
|
||||
do
|
||||
# we wait half a second,
|
||||
sleep 0.5
|
||||
# increase the iterator,
|
||||
((i=i+1))
|
||||
done
|
||||
# and then report success once dpkg is unlocked.
|
||||
return 0
|
||||
}
|
||||
# These variable names match the ones for apt-get. See above for an explanation of what they are for.
|
||||
PKG_INSTALL=("${PKG_MANAGER}" install -y)
|
||||
PKG_COUNT="${PKG_MANAGER} check-update | egrep '(.i686|.x86|.noarch|.arm|.src)' | wc -l"
|
||||
OS_CHECK_DEPS=(grep bind-utils)
|
||||
INSTALLER_DEPS=(git iproute newt procps-ng which chkconfig ca-certificates)
|
||||
PIHOLE_DEPS=(cronie curl findutils sudo unzip libidn2 psmisc libcap nmap-ncat)
|
||||
PIHOLE_WEB_DEPS=(lighttpd lighttpd-fastcgi php-common php-cli php-pdo php-xml php-json php-intl)
|
||||
LIGHTTPD_USER="lighttpd"
|
||||
LIGHTTPD_GROUP="lighttpd"
|
||||
LIGHTTPD_CFG="lighttpd.conf.fedora"
|
||||
|
||||
# If apt-get is not found, check for rpm to see if it's a Red Hat family OS
|
||||
elif is_command rpm ; then
|
||||
# Then check if dnf or yum is the package manager
|
||||
if is_command dnf ; then
|
||||
PKG_MANAGER="dnf"
|
||||
# If neither apt-get or yum/dnf package managers were found
|
||||
else
|
||||
PKG_MANAGER="yum"
|
||||
# we cannot install required packages
|
||||
printf " %b No supported package manager found\\n" "${CROSS}"
|
||||
# so exit the installer
|
||||
exit
|
||||
fi
|
||||
}
|
||||
|
||||
# These variable names match the ones in the Debian family. See above for an explanation of what they are for.
|
||||
PKG_INSTALL=("${PKG_MANAGER}" install -y)
|
||||
PKG_COUNT="${PKG_MANAGER} check-update | egrep '(.i686|.x86|.noarch|.arm|.src)' | wc -l"
|
||||
OS_CHECK_DEPS=(grep bind-utils)
|
||||
INSTALLER_DEPS=(git iproute newt procps-ng which chkconfig)
|
||||
PIHOLE_DEPS=(cronie curl findutils nmap-ncat sudo unzip libidn2 psmisc sqlite libcap lsof)
|
||||
PIHOLE_WEB_DEPS=(lighttpd lighttpd-fastcgi php-common php-cli php-pdo php-xml php-json php-intl)
|
||||
LIGHTTPD_USER="lighttpd"
|
||||
LIGHTTPD_GROUP="lighttpd"
|
||||
LIGHTTPD_CFG="lighttpd.conf.fedora"
|
||||
select_rpm_php(){
|
||||
# If the host OS is Fedora,
|
||||
if grep -qiE 'fedora|fedberry' /etc/redhat-release; then
|
||||
# all required packages should be available by default with the latest fedora release
|
||||
@@ -430,46 +406,36 @@ elif is_command rpm ; then
|
||||
REMI_PKG="remi-release"
|
||||
REMI_REPO="remi-php72"
|
||||
rpm -q ${REMI_PKG} &> /dev/null || rc=$?
|
||||
if [[ $rc -ne 0 ]]; then
|
||||
# The PHP version available via default repositories is older than version 7
|
||||
if ! whiptail --defaultno --title "PHP 7 Update (recommended)" --yesno "PHP 7.x is recommended for both security and language features.\\nWould you like to install PHP7 via Remi's RPM repository?\\n\\nSee: https://rpms.remirepo.net for more information" "${r}" "${c}"; then
|
||||
# User decided to NOT update PHP from REMI, attempt to install the default available PHP version
|
||||
printf " %b User opt-out of PHP 7 upgrade on CentOS. Deprecated PHP may be in use.\\n" "${INFO}"
|
||||
: # continue with unsupported php version
|
||||
else
|
||||
printf " %b Enabling Remi's RPM repository (https://rpms.remirepo.net)\\n" "${INFO}"
|
||||
"${PKG_INSTALL[@]}" "https://rpms.remirepo.net/enterprise/${REMI_PKG}-$(rpm -E '%{rhel}').rpm" &> /dev/null
|
||||
# enable the PHP 7 repository via yum-config-manager (provided by yum-utils)
|
||||
"${PKG_INSTALL[@]}" "yum-utils" &> /dev/null
|
||||
yum-config-manager --enable ${REMI_REPO} &> /dev/null
|
||||
printf " %b Remi's RPM repository has been enabled for PHP7\\n" "${TICK}"
|
||||
# trigger an install/update of PHP to ensure previous version of PHP is updated from REMI
|
||||
if "${PKG_INSTALL[@]}" "php-cli" &> /dev/null; then
|
||||
printf " %b PHP7 installed/updated via Remi's RPM repository\\n" "${TICK}"
|
||||
if [[ $rc -ne 0 ]]; then
|
||||
# The PHP version available via default repositories is older than version 7
|
||||
if ! whiptail --defaultno --title "PHP 7 Update (recommended)" --yesno "PHP 7.x is recommended for both security and language features.\\nWould you like to install PHP7 via Remi's RPM repository?\\n\\nSee: https://rpms.remirepo.net for more information" "${r}" "${c}"; then
|
||||
# User decided to NOT update PHP from REMI, attempt to install the default available PHP version
|
||||
printf " %b User opt-out of PHP 7 upgrade on CentOS. Deprecated PHP may be in use.\\n" "${INFO}"
|
||||
: # continue with unsupported php version
|
||||
else
|
||||
printf " %b There was a problem updating to PHP7 via Remi's RPM repository\\n" "${CROSS}"
|
||||
exit 1
|
||||
printf " %b Enabling Remi's RPM repository (https://rpms.remirepo.net)\\n" "${INFO}"
|
||||
"${PKG_INSTALL[@]}" "https://rpms.remirepo.net/enterprise/${REMI_PKG}-$(rpm -E '%{rhel}').rpm" &> /dev/null
|
||||
# enable the PHP 7 repository via yum-config-manager (provided by yum-utils)
|
||||
"${PKG_INSTALL[@]}" "yum-utils" &> /dev/null
|
||||
yum-config-manager --enable ${REMI_REPO} &> /dev/null
|
||||
printf " %b Remi's RPM repository has been enabled for PHP7\\n" "${TICK}"
|
||||
# trigger an install/update of PHP to ensure previous version of PHP is updated from REMI
|
||||
if "${PKG_INSTALL[@]}" "php-cli" &> /dev/null; then
|
||||
printf " %b PHP7 installed/updated via Remi's RPM repository\\n" "${TICK}"
|
||||
else
|
||||
printf " %b There was a problem updating to PHP7 via Remi's RPM repository\\n" "${CROSS}"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
fi # Warn user of unsupported version of Fedora or CentOS
|
||||
if ! whiptail --defaultno --title "Unsupported RPM based distribution" --yesno "Would you like to continue installation on an unsupported RPM based distribution?\\n\\nPlease ensure the following packages have been installed manually:\\n\\n- lighttpd\\n- lighttpd-fastcgi\\n- PHP version 7+" "${r}" "${c}"; then
|
||||
printf " %b Aborting installation due to unsupported RPM based distribution\\n" "${CROSS}"
|
||||
exit
|
||||
else
|
||||
printf " %b Continuing installation with unsupported RPM based distribution\\n" "${INFO}"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
else
|
||||
# Warn user of unsupported version of Fedora or CentOS
|
||||
if ! whiptail --defaultno --title "Unsupported RPM based distribution" --yesno "Would you like to continue installation on an unsupported RPM based distribution?\\n\\nPlease ensure the following packages have been installed manually:\\n\\n- lighttpd\\n- lighttpd-fastcgi\\n- PHP version 7+" "${r}" "${c}"; then
|
||||
printf " %b Aborting installation due to unsupported RPM based distribution\\n" "${CROSS}"
|
||||
exit
|
||||
else
|
||||
printf " %b Continuing installation with unsupported RPM based distribution\\n" "${INFO}"
|
||||
fi
|
||||
fi
|
||||
|
||||
# If neither apt-get or yum/dnf package managers were found
|
||||
else
|
||||
# it's not an OS we can support,
|
||||
printf " %b OS distribution not supported\\n" "${CROSS}"
|
||||
# so exit the installer
|
||||
exit
|
||||
fi
|
||||
}
|
||||
|
||||
# A function for checking if a directory is a git repository
|
||||
@@ -555,12 +521,12 @@ update_repo() {
|
||||
git stash --all --quiet &> /dev/null || true # Okay for stash failure
|
||||
git clean --quiet --force -d || true # Okay for already clean directory
|
||||
# Pull the latest commits
|
||||
git pull --quiet &> /dev/null || return $?
|
||||
git pull --no-rebase --quiet &> /dev/null || return $?
|
||||
# Check current branch. If it is master, then reset to the latest available tag.
|
||||
# In case extra commits have been added after tagging/release (i.e in case of metadata updates/README.MD tweaks)
|
||||
curBranch=$(git rev-parse --abbrev-ref HEAD)
|
||||
if [[ "${curBranch}" == "master" ]]; then
|
||||
git reset --hard "$(git describe --abbrev=0 --tags)" || return $?
|
||||
git reset --hard "$(git describe --abbrev=0 --tags)" || return $?
|
||||
fi
|
||||
# Show a completion message
|
||||
printf "%b %b %s\\n" "${OVER}" "${TICK}" "${str}"
|
||||
@@ -670,12 +636,12 @@ welcomeDialogs() {
|
||||
IMPORTANT: If you have not already done so, you must ensure that this device has a static IP. Either through DHCP reservation, or by manually assigning one. Depending on your operating system, there are many ways to achieve this.
|
||||
|
||||
Choose yes to indicate that you have understood this message, and wish to continue" "${r}" "${c}"; then
|
||||
#Nothing to do, continue
|
||||
echo
|
||||
else
|
||||
printf " %b Installer exited at static IP message.\\n" "${INFO}"
|
||||
exit 1
|
||||
fi
|
||||
#Nothing to do, continue
|
||||
echo
|
||||
else
|
||||
printf " %b Installer exited at static IP message.\\n" "${INFO}"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# A function that lets the user pick an interface to use with Pi-hole
|
||||
@@ -715,7 +681,7 @@ chooseInterface() {
|
||||
# Feed the available interfaces into this while loop
|
||||
done <<< "${availableInterfaces}"
|
||||
# The whiptail command that will be run, stored in a variable
|
||||
chooseInterfaceCmd=(whiptail --separate-output --radiolist "Choose An Interface (press space to toggle selection)" "${r}" "${c}" "${interfaceCount}")
|
||||
chooseInterfaceCmd=(whiptail --separate-output --radiolist "Choose An Interface (press space to toggle selection)" "${r}" "${c}" 6)
|
||||
# Now run the command using the interfaces saved into the array
|
||||
chooseInterfaceOptions=$("${chooseInterfaceCmd[@]}" "${interfacesArray[@]}" 2>&1 >/dev/tty) || \
|
||||
# If the user chooses Cancel, exit
|
||||
@@ -757,9 +723,8 @@ testIPv6() {
|
||||
fi
|
||||
}
|
||||
|
||||
# A dialog for showing the user about IPv6 blocking
|
||||
useIPv6dialog() {
|
||||
# Determine the IPv6 address used for blocking
|
||||
find_IPv6_information() {
|
||||
# Detects IPv6 address used for communication to WAN addresses.
|
||||
IPV6_ADDRESSES=($(ip -6 address | grep 'scope global' | awk '{print $2}'))
|
||||
|
||||
# For each address in the array above, determine the type of IPv6 address it is
|
||||
@@ -779,122 +744,91 @@ useIPv6dialog() {
|
||||
# set the IPv6 address to the ULA address
|
||||
IPV6_ADDRESS="${ULA_ADDRESS}"
|
||||
# Show this info to the user
|
||||
printf " %b Found IPv6 ULA address, using it for blocking IPv6 ads\\n" "${INFO}"
|
||||
printf " %b Found IPv6 ULA address\\n" "${INFO}"
|
||||
# Otherwise, if the GUA_ADDRESS has a value,
|
||||
elif [[ ! -z "${GUA_ADDRESS}" ]]; then
|
||||
# Let the user know
|
||||
printf " %b Found IPv6 GUA address, using it for blocking IPv6 ads\\n" "${INFO}"
|
||||
printf " %b Found IPv6 GUA address\\n" "${INFO}"
|
||||
# And assign it to the global variable
|
||||
IPV6_ADDRESS="${GUA_ADDRESS}"
|
||||
# If none of those work,
|
||||
else
|
||||
# explain that IPv6 blocking will not be used
|
||||
printf " %b Unable to find IPv6 ULA/GUA address, IPv6 adblocking will not be enabled\\n" "${INFO}"
|
||||
printf " %b Unable to find IPv6 ULA/GUA address\\n" "${INFO}"
|
||||
# So set the variable to be empty
|
||||
IPV6_ADDRESS=""
|
||||
fi
|
||||
|
||||
# If the IPV6_ADDRESS contains a value
|
||||
if [[ ! -z "${IPV6_ADDRESS}" ]]; then
|
||||
# Display that IPv6 is supported and will be used
|
||||
whiptail --msgbox --backtitle "IPv6..." --title "IPv6 Supported" "$IPV6_ADDRESS will be used to block ads." "${r}" "${c}"
|
||||
fi
|
||||
}
|
||||
|
||||
# A function to check if we should use IPv4 and/or IPv6 for blocking ads
|
||||
use4andor6() {
|
||||
# Named local variables
|
||||
local useIPv4
|
||||
local useIPv6
|
||||
# Let user choose IPv4 and/or IPv6 via a checklist
|
||||
cmd=(whiptail --separate-output --checklist "Select Protocols (press space to toggle selection)" "${r}" "${c}" 2)
|
||||
# In an array, show the options available:
|
||||
# IPv4 (on by default)
|
||||
options=(IPv4 "Block ads over IPv4" on
|
||||
# or IPv6 (on by default if available)
|
||||
IPv6 "Block ads over IPv6" on)
|
||||
# In a variable, show the choices available; exit if Cancel is selected
|
||||
choices=$("${cmd[@]}" "${options[@]}" 2>&1 >/dev/tty) || { printf " %bCancel was selected, exiting installer%b\\n" "${COL_LIGHT_RED}" "${COL_NC}"; exit 1; }
|
||||
# For each choice available,
|
||||
for choice in ${choices}
|
||||
do
|
||||
# Set the values to true
|
||||
case ${choice} in
|
||||
IPv4 ) useIPv4=true;;
|
||||
IPv6 ) useIPv6=true;;
|
||||
esac
|
||||
done
|
||||
# If IPv4 is to be used,
|
||||
if [[ "${useIPv4}" ]]; then
|
||||
# Run our function to get the information we need
|
||||
find_IPv4_information
|
||||
if [[ -f "/etc/dhcpcd.conf" ]]; then
|
||||
# configure networking via dhcpcd
|
||||
getStaticIPv4Settings
|
||||
setDHCPCD
|
||||
fi
|
||||
fi
|
||||
# If IPv6 is to be used,
|
||||
if [[ "${useIPv6}" ]]; then
|
||||
# Run our function to get this information
|
||||
useIPv6dialog
|
||||
fi
|
||||
# A function to collect IPv4 and IPv6 information of the device
|
||||
collect_v4andv6_information() {
|
||||
find_IPv4_information
|
||||
# Echo the information to the user
|
||||
printf " %b IPv4 address: %s\\n" "${INFO}" "${IPV4_ADDRESS}"
|
||||
printf " %b IPv6 address: %s\\n" "${INFO}" "${IPV6_ADDRESS}"
|
||||
# If neither protocol is selected,
|
||||
if [[ ! "${useIPv4}" ]] && [[ ! "${useIPv6}" ]]; then
|
||||
# Show an error in red
|
||||
printf " %bError: Neither IPv4 or IPv6 selected%b\\n" "${COL_LIGHT_RED}" "${COL_NC}"
|
||||
# and exit with an error
|
||||
exit 1
|
||||
# if `dhcpcd` is used offer to set this as static IP for the device
|
||||
if [[ -f "/etc/dhcpcd.conf" ]]; then
|
||||
# configure networking via dhcpcd
|
||||
getStaticIPv4Settings
|
||||
fi
|
||||
find_IPv6_information
|
||||
printf " %b IPv6 address: %s\\n" "${INFO}" "${IPV6_ADDRESS}"
|
||||
}
|
||||
|
||||
getStaticIPv4Settings() {
|
||||
# Local, named variables
|
||||
local ipSettingsCorrect
|
||||
local DHCPChoice
|
||||
# Ask if the user wants to use DHCP settings as their static IP
|
||||
# This is useful for users that are using DHCP reservations; then we can just use the information gathered via our functions
|
||||
if whiptail --backtitle "Calibrating network interface" --title "Static IP Address" --yesno "Do you want to use your current network settings as a static address?
|
||||
IP address: ${IPV4_ADDRESS}
|
||||
Gateway: ${IPv4gw}" "${r}" "${c}"; then
|
||||
DHCPChoice=$(whiptail --backtitle "Calibrating network interface" --title "Static IP Address" --menu --separate-output "Do you want to use your current network settings as a static address? \\n
|
||||
IP address: ${IPV4_ADDRESS} \\n
|
||||
Gateway: ${IPv4gw} \\n" "${r}" "${c}" 3\
|
||||
"Yes" "Set static IP using current values" \
|
||||
"No" "Set static IP using custom values" \
|
||||
"Skip" "I will set a static IP later, or have already done so" 3>&2 2>&1 1>&3) || \
|
||||
{ printf " %bCancel was selected, exiting installer%b\\n" "${COL_LIGHT_RED}" "${COL_NC}"; exit 1; }
|
||||
|
||||
case ${DHCPChoice} in
|
||||
"Yes")
|
||||
# If they choose yes, let the user know that the IP address will not be available via DHCP and may cause a conflict.
|
||||
whiptail --msgbox --backtitle "IP information" --title "FYI: IP Conflict" "It is possible your router could still try to assign this IP to a device, which would cause a conflict. But in most cases the router is smart enough to not do that.
|
||||
If you are worried, either manually set the address, or modify the DHCP reservation pool so it does not include the IP you want.
|
||||
It is also possible to use a DHCP reservation, but if you are going to do that, you might as well set a static address." "${r}" "${c}"
|
||||
# Nothing else to do since the variables are already set above
|
||||
else
|
||||
# Otherwise, we need to ask the user to input their desired settings.
|
||||
# Start by getting the IPv4 address (pre-filling it with info gathered from DHCP)
|
||||
# Start a loop to let the user enter their information with the chance to go back and edit it if necessary
|
||||
until [[ "${ipSettingsCorrect}" = True ]]; do
|
||||
If you are worried, either manually set the address, or modify the DHCP reservation pool so it does not include the IP you want.
|
||||
It is also possible to use a DHCP reservation, but if you are going to do that, you might as well set a static address." "${r}" "${c}"
|
||||
# Nothing else to do since the variables are already set above
|
||||
setDHCPCD
|
||||
;;
|
||||
|
||||
# Ask for the IPv4 address
|
||||
IPV4_ADDRESS=$(whiptail --backtitle "Calibrating network interface" --title "IPv4 address" --inputbox "Enter your desired IPv4 address" "${r}" "${c}" "${IPV4_ADDRESS}" 3>&1 1>&2 2>&3) || \
|
||||
# Canceling IPv4 settings window
|
||||
{ ipSettingsCorrect=False; echo -e " ${COL_LIGHT_RED}Cancel was selected, exiting installer${COL_NC}"; exit 1; }
|
||||
printf " %b Your static IPv4 address: %s\\n" "${INFO}" "${IPV4_ADDRESS}"
|
||||
"No")
|
||||
# Otherwise, we need to ask the user to input their desired settings.
|
||||
# Start by getting the IPv4 address (pre-filling it with info gathered from DHCP)
|
||||
# Start a loop to let the user enter their information with the chance to go back and edit it if necessary
|
||||
until [[ "${ipSettingsCorrect}" = True ]]; do
|
||||
|
||||
# Ask for the gateway
|
||||
IPv4gw=$(whiptail --backtitle "Calibrating network interface" --title "IPv4 gateway (router)" --inputbox "Enter your desired IPv4 default gateway" "${r}" "${c}" "${IPv4gw}" 3>&1 1>&2 2>&3) || \
|
||||
# Canceling gateway settings window
|
||||
{ ipSettingsCorrect=False; echo -e " ${COL_LIGHT_RED}Cancel was selected, exiting installer${COL_NC}"; exit 1; }
|
||||
printf " %b Your static IPv4 gateway: %s\\n" "${INFO}" "${IPv4gw}"
|
||||
# Ask for the IPv4 address
|
||||
IPV4_ADDRESS=$(whiptail --backtitle "Calibrating network interface" --title "IPv4 address" --inputbox "Enter your desired IPv4 address" "${r}" "${c}" "${IPV4_ADDRESS}" 3>&1 1>&2 2>&3) || \
|
||||
# Canceling IPv4 settings window
|
||||
{ ipSettingsCorrect=False; echo -e " ${COL_LIGHT_RED}Cancel was selected, exiting installer${COL_NC}"; exit 1; }
|
||||
printf " %b Your static IPv4 address: %s\\n" "${INFO}" "${IPV4_ADDRESS}"
|
||||
|
||||
# Give the user a chance to review their settings before moving on
|
||||
if whiptail --backtitle "Calibrating network interface" --title "Static IP Address" --yesno "Are these settings correct?
|
||||
IP address: ${IPV4_ADDRESS}
|
||||
Gateway: ${IPv4gw}" "${r}" "${c}"; then
|
||||
# After that's done, the loop ends and we move on
|
||||
ipSettingsCorrect=True
|
||||
else
|
||||
# If the settings are wrong, the loop continues
|
||||
ipSettingsCorrect=False
|
||||
fi
|
||||
done
|
||||
# End the if statement for DHCP vs. static
|
||||
fi
|
||||
# Ask for the gateway
|
||||
IPv4gw=$(whiptail --backtitle "Calibrating network interface" --title "IPv4 gateway (router)" --inputbox "Enter your desired IPv4 default gateway" "${r}" "${c}" "${IPv4gw}" 3>&1 1>&2 2>&3) || \
|
||||
# Canceling gateway settings window
|
||||
{ ipSettingsCorrect=False; echo -e " ${COL_LIGHT_RED}Cancel was selected, exiting installer${COL_NC}"; exit 1; }
|
||||
printf " %b Your static IPv4 gateway: %s\\n" "${INFO}" "${IPv4gw}"
|
||||
|
||||
# Give the user a chance to review their settings before moving on
|
||||
if whiptail --backtitle "Calibrating network interface" --title "Static IP Address" --yesno "Are these settings correct?
|
||||
IP address: ${IPV4_ADDRESS}
|
||||
Gateway: ${IPv4gw}" "${r}" "${c}"; then
|
||||
# After that's done, the loop ends and we move on
|
||||
ipSettingsCorrect=True
|
||||
else
|
||||
# If the settings are wrong, the loop continues
|
||||
ipSettingsCorrect=False
|
||||
fi
|
||||
done
|
||||
setDHCPCD
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# Configure networking via dhcpcd
|
||||
@@ -929,7 +863,7 @@ valid_ip() {
|
||||
# Regex matching an optional port (starting with '#') range of 1-65536
|
||||
local portelem="(#(6553[0-5]|655[0-2][0-9]|65[0-4][0-9]{2}|6[0-4][0-9]{3}|[1-5][0-9]{4}|[1-9][0-9]{0,3}|0))?";
|
||||
# Build a full IPv4 regex from the above subexpressions
|
||||
local regex="^${ipv4elem}\.${ipv4elem}\.${ipv4elem}\.${ipv4elem}${portelem}$"
|
||||
local regex="^${ipv4elem}\\.${ipv4elem}\\.${ipv4elem}\\.${ipv4elem}${portelem}$"
|
||||
|
||||
# Evaluate the regex, and return the result
|
||||
[[ $ip =~ ${regex} ]]
|
||||
@@ -986,8 +920,8 @@ setDNS() {
|
||||
IFS=${OIFS}
|
||||
# In a whiptail dialog, show the options
|
||||
DNSchoices=$(whiptail --separate-output --menu "Select Upstream DNS Provider. To use your own, select Custom." "${r}" "${c}" 7 \
|
||||
"${DNSChooseOptions[@]}" 2>&1 >/dev/tty) || \
|
||||
# Exit if the user selects "Cancel"
|
||||
"${DNSChooseOptions[@]}" 2>&1 >/dev/tty) || \
|
||||
# Exit if the user selects "Cancel"
|
||||
{ printf " %bCancel was selected, exiting installer%b\\n" "${COL_LIGHT_RED}" "${COL_NC}"; exit 1; }
|
||||
|
||||
# Depending on the user's choice, set the GLOBAL variables to the IP of the respective provider
|
||||
@@ -1201,8 +1135,11 @@ chooseBlocklists() {
|
||||
appendToListsFile "${choice}"
|
||||
done
|
||||
# Create an empty adList file with appropriate permissions.
|
||||
touch "${adlistFile}"
|
||||
chmod 644 "${adlistFile}"
|
||||
if [ ! -f "${adlistFile}" ]; then
|
||||
install -m 644 /dev/null "${adlistFile}"
|
||||
else
|
||||
chmod 644 "${adlistFile}"
|
||||
fi
|
||||
}
|
||||
|
||||
# Accept a string parameter, it must be one of the default lists
|
||||
@@ -1253,8 +1190,8 @@ version_check_dnsmasq() {
|
||||
install -D -m 644 -T "${dnsmasq_original_config}" "${dnsmasq_conf}"
|
||||
printf "%b %b Restoring default dnsmasq.conf...\\n" "${OVER}" "${TICK}"
|
||||
else
|
||||
# Otherwise, don't to anything
|
||||
printf " it is not a Pi-hole file, leaving alone!\\n"
|
||||
# Otherwise, don't to anything
|
||||
printf " it is not a Pi-hole file, leaving alone!\\n"
|
||||
fi
|
||||
else
|
||||
# If a file cannot be found,
|
||||
@@ -1289,8 +1226,8 @@ version_check_dnsmasq() {
|
||||
sed -i '/^server=@DNS2@/d' "${dnsmasq_pihole_01_target}"
|
||||
fi
|
||||
|
||||
# Set the cache size
|
||||
sed -i "s/@CACHE_SIZE@/$CACHE_SIZE/" "${dnsmasq_pihole_01_target}"
|
||||
# Set the cache size
|
||||
sed -i "s/@CACHE_SIZE@/$CACHE_SIZE/" "${dnsmasq_pihole_01_target}"
|
||||
|
||||
sed -i 's/^#conf-dir=\/etc\/dnsmasq.d$/conf-dir=\/etc\/dnsmasq.d/' "${dnsmasq_conf}"
|
||||
|
||||
@@ -1372,10 +1309,10 @@ installConfigs() {
|
||||
echo "${DNS_SERVERS}" > "${PI_HOLE_CONFIG_DIR}/dns-servers.conf"
|
||||
chmod 644 "${PI_HOLE_CONFIG_DIR}/dns-servers.conf"
|
||||
|
||||
# Install empty file if it does not exist
|
||||
# Install template file if it does not exist
|
||||
if [[ ! -r "${PI_HOLE_CONFIG_DIR}/pihole-FTL.conf" ]]; then
|
||||
install -d -m 0755 ${PI_HOLE_CONFIG_DIR}
|
||||
if ! install -o pihole -m 664 /dev/null "${PI_HOLE_CONFIG_DIR}/pihole-FTL.conf" &>/dev/null; then
|
||||
if ! install -T -o pihole -m 664 "${PI_HOLE_LOCAL_REPO}/advanced/Templates/pihole-FTL.conf" "${PI_HOLE_CONFIG_DIR}/pihole-FTL.conf" &>/dev/null; then
|
||||
printf " %bError: Unable to initialize configuration file %s/pihole-FTL.conf\\n" "${COL_LIGHT_RED}" "${PI_HOLE_CONFIG_DIR}"
|
||||
return 1
|
||||
fi
|
||||
@@ -1396,18 +1333,19 @@ installConfigs() {
|
||||
# make it and set the owners
|
||||
install -d -m 755 -o "${USER}" -g root /etc/lighttpd
|
||||
# Otherwise, if the config file already exists
|
||||
elif [[ -f "/etc/lighttpd/lighttpd.conf" ]]; then
|
||||
elif [[ -f "${lighttpdConfig}" ]]; then
|
||||
# back up the original
|
||||
mv /etc/lighttpd/lighttpd.conf /etc/lighttpd/lighttpd.conf.orig
|
||||
mv "${lighttpdConfig}"{,.orig}
|
||||
fi
|
||||
# and copy in the config file Pi-hole needs
|
||||
install -D -m 644 -T ${PI_HOLE_LOCAL_REPO}/advanced/${LIGHTTPD_CFG} /etc/lighttpd/lighttpd.conf
|
||||
install -D -m 644 -T ${PI_HOLE_LOCAL_REPO}/advanced/${LIGHTTPD_CFG} "${lighttpdConfig}"
|
||||
# Make sure the external.conf file exists, as lighttpd v1.4.50 crashes without it
|
||||
touch /etc/lighttpd/external.conf
|
||||
chmod 644 /etc/lighttpd/external.conf
|
||||
if [ ! -f /etc/lighttpd/external.conf ]; then
|
||||
install -m 644 /dev/null /etc/lighttpd/external.conf
|
||||
fi
|
||||
# If there is a custom block page in the html/pihole directory, replace 404 handler in lighttpd config
|
||||
if [[ -f "${PI_HOLE_BLOCKPAGE_DIR}/custom.php" ]]; then
|
||||
sed -i 's/^\(server\.error-handler-404\s*=\s*\).*$/\1"pihole\/custom\.php"/' /etc/lighttpd/lighttpd.conf
|
||||
sed -i 's/^\(server\.error-handler-404\s*=\s*\).*$/\1"\/pihole\/custom\.php"/' "${lighttpdConfig}"
|
||||
fi
|
||||
# Make the directories if they do not exist and set the owners
|
||||
mkdir -p /run/lighttpd
|
||||
@@ -1444,7 +1382,12 @@ install_manpage() {
|
||||
# Testing complete, copy the files & update the man db
|
||||
install -D -m 644 -T ${PI_HOLE_LOCAL_REPO}/manpages/pihole.8 /usr/local/share/man/man8/pihole.8
|
||||
install -D -m 644 -T ${PI_HOLE_LOCAL_REPO}/manpages/pihole-FTL.8 /usr/local/share/man/man8/pihole-FTL.8
|
||||
install -D -m 644 -T ${PI_HOLE_LOCAL_REPO}/manpages/pihole-FTL.conf.5 /usr/local/share/man/man5/pihole-FTL.conf.5
|
||||
|
||||
# remove previously installed "pihole-FTL.conf.5" man page
|
||||
if [[ -f "/usr/local/share/man/man5/pihole-FTL.conf.5" ]]; then
|
||||
rm /usr/local/share/man/man5/pihole-FTL.conf.5
|
||||
fi
|
||||
|
||||
if mandb -q &>/dev/null; then
|
||||
# Updated successfully
|
||||
printf "%b %b man pages installed and database updated\\n" "${OVER}" "${TICK}"
|
||||
@@ -1452,7 +1395,7 @@ install_manpage() {
|
||||
else
|
||||
# Something is wrong with the system's man installation, clean up
|
||||
# our files, (leave everything how we found it).
|
||||
rm /usr/local/share/man/man8/pihole.8 /usr/local/share/man/man8/pihole-FTL.8 /usr/local/share/man/man5/pihole-FTL.conf.5
|
||||
rm /usr/local/share/man/man8/pihole.8 /usr/local/share/man/man8/pihole-FTL.8
|
||||
printf "%b %b man page db not updated, man pages not installed\\n" "${OVER}" "${CROSS}"
|
||||
fi
|
||||
}
|
||||
@@ -1554,9 +1497,6 @@ disable_resolved_stublistener() {
|
||||
}
|
||||
|
||||
update_package_cache() {
|
||||
# Running apt-get update/upgrade with minimal output can cause some issues with
|
||||
# requiring user input (e.g password for phpmyadmin see #218)
|
||||
|
||||
# Update package cache on apt based OSes. Do this every time since
|
||||
# it's quick and packages can be updated at any time.
|
||||
|
||||
@@ -1568,8 +1508,14 @@ update_package_cache() {
|
||||
printf "%b %b %s\\n" "${OVER}" "${TICK}" "${str}"
|
||||
else
|
||||
# Otherwise, show an error and exit
|
||||
|
||||
# In case we used apt-get and apt is also available, we use this as recommendation as we have seen it
|
||||
# gives more user-friendly (interactive) advice
|
||||
if [[ ${PKG_MANAGER} == "apt-get" ]] && is_command apt ; then
|
||||
UPDATE_PKG_CACHE="apt update"
|
||||
fi
|
||||
printf "%b %b %s\\n" "${OVER}" "${CROSS}" "${str}"
|
||||
printf " %bError: Unable to update package cache. Please try \"%s\"%b" "${COL_LIGHT_RED}" "sudo ${UPDATE_PKG_CACHE}" "${COL_NC}"
|
||||
printf " %bError: Unable to update package cache. Please try \"%s\"%b\\n" "${COL_LIGHT_RED}" "sudo ${UPDATE_PKG_CACHE}" "${COL_NC}"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
@@ -1621,6 +1567,8 @@ install_dependent_packages() {
|
||||
# If there's anything to install, install everything in the list.
|
||||
if [[ "${#installArray[@]}" -gt 0 ]]; then
|
||||
test_dpkg_lock
|
||||
# Running apt-get install with minimal output can cause some issues with
|
||||
# requiring user input (e.g password for phpmyadmin see #218)
|
||||
printf " %b Processing %s install(s) for: %s, please wait...\\n" "${INFO}" "${PKG_MANAGER}" "${installArray[*]}"
|
||||
printf '%*s\n' "$columns" '' | tr " " -;
|
||||
"${PKG_INSTALL[@]}" "${installArray[@]}"
|
||||
@@ -1633,7 +1581,7 @@ install_dependent_packages() {
|
||||
|
||||
# Install Fedora/CentOS packages
|
||||
for i in "$@"; do
|
||||
# For each package, check if it's already installed (and if so, don't add it to the installArray)
|
||||
# For each package, check if it's already installed (and if so, don't add it to the installArray)
|
||||
printf " %b Checking for %s..." "${INFO}" "${i}"
|
||||
if "${PKG_MANAGER}" -q list installed "${i}" &> /dev/null; then
|
||||
printf "%b %b Checking for %s\\n" "${OVER}" "${TICK}" "${i}"
|
||||
@@ -1799,20 +1747,23 @@ finalExports() {
|
||||
# If the setup variable file exists,
|
||||
if [[ -e "${setupVars}" ]]; then
|
||||
# update the variables in the file
|
||||
sed -i.update.bak '/PIHOLE_INTERFACE/d;/IPV4_ADDRESS/d;/IPV6_ADDRESS/d;/PIHOLE_DNS_1\b/d;/PIHOLE_DNS_2\b/d;/QUERY_LOGGING/d;/INSTALL_WEB_SERVER/d;/INSTALL_WEB_INTERFACE/d;/LIGHTTPD_ENABLED/d;/CACHE_SIZE/d;' "${setupVars}"
|
||||
sed -i.update.bak '/PIHOLE_INTERFACE/d;/IPV4_ADDRESS/d;/IPV6_ADDRESS/d;/PIHOLE_DNS_1\b/d;/PIHOLE_DNS_2\b/d;/QUERY_LOGGING/d;/INSTALL_WEB_SERVER/d;/INSTALL_WEB_INTERFACE/d;/LIGHTTPD_ENABLED/d;/CACHE_SIZE/d;/DNS_FQDN_REQUIRED/d;/DNS_BOGUS_PRIV/d;/DNSMASQ_LISTENING/d;' "${setupVars}"
|
||||
fi
|
||||
# echo the information to the user
|
||||
{
|
||||
echo "PIHOLE_INTERFACE=${PIHOLE_INTERFACE}"
|
||||
echo "IPV4_ADDRESS=${IPV4_ADDRESS}"
|
||||
echo "IPV6_ADDRESS=${IPV6_ADDRESS}"
|
||||
echo "PIHOLE_DNS_1=${PIHOLE_DNS_1}"
|
||||
echo "PIHOLE_DNS_2=${PIHOLE_DNS_2}"
|
||||
echo "QUERY_LOGGING=${QUERY_LOGGING}"
|
||||
echo "INSTALL_WEB_SERVER=${INSTALL_WEB_SERVER}"
|
||||
echo "INSTALL_WEB_INTERFACE=${INSTALL_WEB_INTERFACE}"
|
||||
echo "LIGHTTPD_ENABLED=${LIGHTTPD_ENABLED}"
|
||||
echo "CACHE_SIZE=${CACHE_SIZE}"
|
||||
echo "PIHOLE_INTERFACE=${PIHOLE_INTERFACE}"
|
||||
echo "IPV4_ADDRESS=${IPV4_ADDRESS}"
|
||||
echo "IPV6_ADDRESS=${IPV6_ADDRESS}"
|
||||
echo "PIHOLE_DNS_1=${PIHOLE_DNS_1}"
|
||||
echo "PIHOLE_DNS_2=${PIHOLE_DNS_2}"
|
||||
echo "QUERY_LOGGING=${QUERY_LOGGING}"
|
||||
echo "INSTALL_WEB_SERVER=${INSTALL_WEB_SERVER}"
|
||||
echo "INSTALL_WEB_INTERFACE=${INSTALL_WEB_INTERFACE}"
|
||||
echo "LIGHTTPD_ENABLED=${LIGHTTPD_ENABLED}"
|
||||
echo "CACHE_SIZE=${CACHE_SIZE}"
|
||||
echo "DNS_FQDN_REQUIRED=${DNS_FQDN_REQUIRED:-true}"
|
||||
echo "DNS_BOGUS_PRIV=${DNS_BOGUS_PRIV:-true}"
|
||||
echo "DNSMASQ_LISTENING=${DNSMASQ_LISTENING:-local}"
|
||||
}>> "${setupVars}"
|
||||
chmod 644 "${setupVars}"
|
||||
|
||||
@@ -1860,27 +1811,6 @@ installLogrotate() {
|
||||
printf "%b %b %s\\n" "${OVER}" "${TICK}" "${str}"
|
||||
}
|
||||
|
||||
# At some point in the future this list can be pruned, for now we'll need it to ensure updates don't break.
|
||||
# Refactoring of install script has changed the name of a couple of variables. Sort them out here.
|
||||
accountForRefactor() {
|
||||
sed -i 's/piholeInterface/PIHOLE_INTERFACE/g' "${setupVars}"
|
||||
sed -i 's/IPv4_address/IPV4_ADDRESS/g' "${setupVars}"
|
||||
sed -i 's/IPv4addr/IPV4_ADDRESS/g' "${setupVars}"
|
||||
sed -i 's/IPv6_address/IPV6_ADDRESS/g' "${setupVars}"
|
||||
sed -i 's/piholeIPv6/IPV6_ADDRESS/g' "${setupVars}"
|
||||
sed -i 's/piholeDNS1/PIHOLE_DNS_1/g' "${setupVars}"
|
||||
sed -i 's/piholeDNS2/PIHOLE_DNS_2/g' "${setupVars}"
|
||||
sed -i 's/^INSTALL_WEB=/INSTALL_WEB_INTERFACE=/' "${setupVars}"
|
||||
# Add 'INSTALL_WEB_SERVER', if its not been applied already: https://github.com/pi-hole/pi-hole/pull/2115
|
||||
if ! grep -q '^INSTALL_WEB_SERVER=' ${setupVars}; then
|
||||
local webserver_installed=false
|
||||
if grep -q '^INSTALL_WEB_INTERFACE=true' ${setupVars}; then
|
||||
webserver_installed=true
|
||||
fi
|
||||
echo -e "INSTALL_WEB_SERVER=$webserver_installed" >> "${setupVars}"
|
||||
fi
|
||||
}
|
||||
|
||||
# Install base files and web interface
|
||||
installPihole() {
|
||||
# If the user wants to install the Web interface,
|
||||
@@ -1911,10 +1841,6 @@ installPihole() {
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
# For updates and unattended install.
|
||||
if [[ "${useUpdateVars}" == true ]]; then
|
||||
accountForRefactor
|
||||
fi
|
||||
# Install base files and web interface
|
||||
if ! installScripts; then
|
||||
printf " %b Failure in dependent script copy function.\\n" "${CROSS}"
|
||||
@@ -1953,7 +1879,7 @@ checkSelinux() {
|
||||
local CURRENT_SELINUX
|
||||
local SELINUX_ENFORCING=0
|
||||
# Check for SELinux configuration file and getenforce command
|
||||
if [[ -f /etc/selinux/config ]] && command -v getenforce &> /dev/null; then
|
||||
if [[ -f /etc/selinux/config ]] && is_command getenforce; then
|
||||
# Check the default SELinux mode
|
||||
DEFAULT_SELINUX=$(awk -F= '/^SELINUX=/ {print $2}' /etc/selinux/config)
|
||||
case "${DEFAULT_SELINUX,,}" in
|
||||
@@ -1999,7 +1925,7 @@ displayFinalMessage() {
|
||||
if [[ "${#1}" -gt 0 ]] ; then
|
||||
# set the password to the first argument.
|
||||
pwstring="$1"
|
||||
elif [[ $(grep 'WEBPASSWORD' -c /etc/pihole/setupVars.conf) -gt 0 ]]; then
|
||||
elif [[ $(grep 'WEBPASSWORD' -c "${setupVars}") -gt 0 ]]; then
|
||||
# Else if the password exists from previous setup, we'll load it later
|
||||
pwstring="unchanged"
|
||||
else
|
||||
@@ -2012,7 +1938,7 @@ displayFinalMessage() {
|
||||
additional="View the web interface at http://pi.hole/admin or http://${IPV4_ADDRESS%/*}/admin
|
||||
|
||||
Your Admin Webpage login password is ${pwstring}"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Final completion message to user
|
||||
whiptail --msgbox --backtitle "Make it so." --title "Installation Complete!" "Configure your devices to use the Pi-hole as their DNS server using:
|
||||
@@ -2022,8 +1948,6 @@ IPv6: ${IPV6_ADDRESS:-"Not Configured"}
|
||||
|
||||
If you have not done so already, the above IP should be set to static.
|
||||
|
||||
The install log is in /etc/pihole.
|
||||
|
||||
${additional}" "${r}" "${c}"
|
||||
}
|
||||
|
||||
@@ -2136,7 +2060,7 @@ checkout_pull_branch() {
|
||||
# Data in the repositories is public anyway so we can make it readable by everyone (+r to keep executable permission if already set by git)
|
||||
chmod -R a+rX "${directory}"
|
||||
|
||||
git_pull=$(git pull || return 1)
|
||||
git_pull=$(git pull --no-rebase || return 1)
|
||||
|
||||
if [[ "$git_pull" == *"up-to-date"* ]]; then
|
||||
printf " %b %s\\n" "${INFO}" "${git_pull}"
|
||||
@@ -2187,7 +2111,6 @@ clone_or_update_repos() {
|
||||
# shellcheck disable=SC2120
|
||||
FTLinstall() {
|
||||
# Local, named variables
|
||||
local latesttag
|
||||
local str="Downloading and Installing FTL"
|
||||
printf " %b %s..." "${INFO}" "${str}"
|
||||
|
||||
@@ -2258,7 +2181,7 @@ FTLinstall() {
|
||||
|
||||
disable_dnsmasq() {
|
||||
# dnsmasq can now be stopped and disabled if it exists
|
||||
if which dnsmasq &> /dev/null; then
|
||||
if is_command dnsmasq; then
|
||||
if check_service_active "dnsmasq";then
|
||||
printf " %b FTL can now resolve DNS Queries without dnsmasq running separately\\n" "${INFO}"
|
||||
stop_service dnsmasq
|
||||
@@ -2371,7 +2294,7 @@ FTLcheckUpdate() {
|
||||
printf " %b Checking for existing FTL binary...\\n" "${INFO}"
|
||||
|
||||
local ftlLoc
|
||||
ftlLoc=$(which pihole-FTL 2>/dev/null)
|
||||
ftlLoc=$(command -v pihole-FTL 2>/dev/null)
|
||||
|
||||
local ftlBranch
|
||||
|
||||
@@ -2388,7 +2311,7 @@ FTLcheckUpdate() {
|
||||
local localSha1
|
||||
|
||||
# if dnsmasq exists and is running at this point, force reinstall of FTL Binary
|
||||
if which dnsmasq &> /dev/null; then
|
||||
if is_command dnsmasq; then
|
||||
if check_service_active "dnsmasq";then
|
||||
return 0
|
||||
fi
|
||||
@@ -2409,7 +2332,7 @@ FTLcheckUpdate() {
|
||||
# We already have a pihole-FTL binary downloaded.
|
||||
# Alt branches don't have a tagged version against them, so just confirm the checksum of the local vs remote to decide whether we download or not
|
||||
remoteSha1=$(curl -sSL --fail "https://ftl.pi-hole.net/${ftlBranch}/${binary}.sha1" | cut -d ' ' -f 1)
|
||||
localSha1=$(sha1sum "$(which pihole-FTL)" | cut -d ' ' -f 1)
|
||||
localSha1=$(sha1sum "$(command -v pihole-FTL)" | cut -d ' ' -f 1)
|
||||
|
||||
if [[ "${remoteSha1}" != "${localSha1}" ]]; then
|
||||
printf " %b Checksums do not match, downloading from ftl.pi-hole.net.\\n" "${INFO}"
|
||||
@@ -2439,7 +2362,7 @@ FTLcheckUpdate() {
|
||||
printf " %b Latest FTL Binary already installed (%s). Confirming Checksum...\\n" "${INFO}" "${FTLlatesttag}"
|
||||
|
||||
remoteSha1=$(curl -sSL --fail "https://github.com/pi-hole/FTL/releases/download/${FTLversion%$'\r'}/${binary}.sha1" | cut -d ' ' -f 1)
|
||||
localSha1=$(sha1sum "$(which pihole-FTL)" | cut -d ' ' -f 1)
|
||||
localSha1=$(sha1sum "$(command -v pihole-FTL)" | cut -d ' ' -f 1)
|
||||
|
||||
if [[ "${remoteSha1}" != "${localSha1}" ]]; then
|
||||
printf " %b Corruption detected...\\n" "${INFO}"
|
||||
@@ -2547,6 +2470,11 @@ main() {
|
||||
printf " %b Checking for / installing Required dependencies for this install script...\\n" "${INFO}"
|
||||
install_dependent_packages "${INSTALLER_DEPS[@]}"
|
||||
|
||||
#In case of RPM based distro, select the proper PHP version
|
||||
if [[ "$PKG_MANAGER" == "yum" || "$PKG_MANAGER" == "dnf" ]] ; then
|
||||
select_rpm_php
|
||||
fi
|
||||
|
||||
# Check if SELinux is Enforcing
|
||||
checkSelinux
|
||||
|
||||
@@ -2574,12 +2502,12 @@ main() {
|
||||
get_available_interfaces
|
||||
# Find interfaces and let the user choose one
|
||||
chooseInterface
|
||||
# find IPv4 and IPv6 information of the device
|
||||
collect_v4andv6_information
|
||||
# Decide what upstream DNS Servers to use
|
||||
setDNS
|
||||
# Give the user a choice of blocklists to include in their install. Or not.
|
||||
chooseBlocklists
|
||||
# Let the user decide if they want to block ads over IPv4 and/or IPv6
|
||||
use4andor6
|
||||
# Let the user decide if they want the web interface to be installed automatically
|
||||
setAdminFlag
|
||||
# Let the user decide if they want query logging enabled...
|
||||
@@ -2652,7 +2580,7 @@ main() {
|
||||
# Add password to web UI if there is none
|
||||
pw=""
|
||||
# If no password is set,
|
||||
if [[ $(grep 'WEBPASSWORD' -c /etc/pihole/setupVars.conf) == 0 ]] ; then
|
||||
if [[ $(grep 'WEBPASSWORD' -c "${setupVars}") == 0 ]] ; then
|
||||
# generate a random password
|
||||
pw=$(tr -dc _A-Z-a-z-0-9 < /dev/urandom | head -c 8)
|
||||
# shellcheck disable=SC1091
|
||||
|
@@ -11,10 +11,9 @@
|
||||
source "/opt/pihole/COL_TABLE"
|
||||
|
||||
while true; do
|
||||
read -rp " ${QST} Are you sure you would like to remove ${COL_WHITE}Pi-hole${COL_NC}? [y/N] " yn
|
||||
case ${yn} in
|
||||
read -rp " ${QST} Are you sure you would like to remove ${COL_WHITE}Pi-hole${COL_NC}? [y/N] " answer
|
||||
case ${answer} in
|
||||
[Yy]* ) break;;
|
||||
[Nn]* ) echo -e "${OVER} ${COL_LIGHT_GREEN}Uninstall has been canceled${COL_NC}"; exit 0;;
|
||||
* ) echo -e "${OVER} ${COL_LIGHT_GREEN}Uninstall has been canceled${COL_NC}"; exit 0;;
|
||||
esac
|
||||
done
|
||||
@@ -76,8 +75,8 @@ removeAndPurge() {
|
||||
for i in "${DEPS[@]}"; do
|
||||
if package_check "${i}" > /dev/null; then
|
||||
while true; do
|
||||
read -rp " ${QST} Do you wish to remove ${COL_WHITE}${i}${COL_NC} from your system? [Y/N] " yn
|
||||
case ${yn} in
|
||||
read -rp " ${QST} Do you wish to remove ${COL_WHITE}${i}${COL_NC} from your system? [Y/N] " answer
|
||||
case ${answer} in
|
||||
[Yy]* )
|
||||
echo -ne " ${INFO} Removing ${i}...";
|
||||
${SUDO} "${PKG_REMOVE[@]}" "${i}" &> /dev/null;
|
||||
@@ -215,8 +214,8 @@ while true; do
|
||||
echo -n "${i} "
|
||||
done
|
||||
echo "${COL_NC}"
|
||||
read -rp " ${QST} Do you wish to go through each dependency for removal? (Choosing No will leave all dependencies installed) [Y/n] " yn
|
||||
case ${yn} in
|
||||
read -rp " ${QST} Do you wish to go through each dependency for removal? (Choosing No will leave all dependencies installed) [Y/n] " answer
|
||||
case ${answer} in
|
||||
[Yy]* ) removeAndPurge; break;;
|
||||
[Nn]* ) removeNoPurge; break;;
|
||||
* ) removeAndPurge; break;;
|
||||
|
280
gravity.sh
280
gravity.sh
@@ -15,8 +15,6 @@ export LC_ALL=C
|
||||
|
||||
coltable="/opt/pihole/COL_TABLE"
|
||||
source "${coltable}"
|
||||
regexconverter="/opt/pihole/wildcard_regex_converter.sh"
|
||||
source "${regexconverter}"
|
||||
# shellcheck disable=SC1091
|
||||
source "/etc/.pihole/advanced/Scripts/database_migration/gravity-db.sh"
|
||||
|
||||
@@ -75,19 +73,24 @@ if [[ -r "${piholeDir}/pihole.conf" ]]; then
|
||||
echo -e " ${COL_LIGHT_RED}Ignoring overrides specified within pihole.conf! ${COL_NC}"
|
||||
fi
|
||||
|
||||
# Generate new sqlite3 file from schema template
|
||||
# Generate new SQLite3 file from schema template
|
||||
generate_gravity_database() {
|
||||
sqlite3 "${1}" < "${gravityDBschema}"
|
||||
if ! pihole-FTL sqlite3 "${gravityDBfile}" < "${gravityDBschema}"; then
|
||||
echo -e " ${CROSS} Unable to create ${gravityDBfile}"
|
||||
return 1
|
||||
fi
|
||||
chown pihole:pihole "${gravityDBfile}"
|
||||
chmod g+w "${piholeDir}" "${gravityDBfile}"
|
||||
}
|
||||
|
||||
# Copy data from old to new database file and swap them
|
||||
gravity_swap_databases() {
|
||||
local str copyGravity
|
||||
local str copyGravity oldAvail
|
||||
str="Building tree"
|
||||
echo -ne " ${INFO} ${str}..."
|
||||
|
||||
# The index is intentionally not UNIQUE as poor quality adlists may contain domains more than once
|
||||
output=$( { sqlite3 "${gravityTEMPfile}" "CREATE INDEX idx_gravity ON gravity (domain, adlist_id);"; } 2>&1 )
|
||||
output=$( { pihole-FTL sqlite3 "${gravityTEMPfile}" "CREATE INDEX idx_gravity ON gravity (domain, adlist_id);"; } 2>&1 )
|
||||
status="$?"
|
||||
|
||||
if [[ "${status}" -ne 0 ]]; then
|
||||
@@ -99,22 +102,6 @@ gravity_swap_databases() {
|
||||
str="Swapping databases"
|
||||
echo -ne " ${INFO} ${str}..."
|
||||
|
||||
# Gravity copying SQL script
|
||||
copyGravity="$(cat "${gravityDBcopy}")"
|
||||
if [[ "${gravityDBfile}" != "${gravityDBfile_default}" ]]; then
|
||||
# Replace default gravity script location by custom location
|
||||
copyGravity="${copyGravity//"${gravityDBfile_default}"/"${gravityDBfile}"}"
|
||||
fi
|
||||
|
||||
output=$( { sqlite3 "${gravityTEMPfile}" <<< "${copyGravity}"; } 2>&1 )
|
||||
status="$?"
|
||||
|
||||
if [[ "${status}" -ne 0 ]]; then
|
||||
echo -e "\\n ${CROSS} Unable to copy data from ${gravityDBfile} to ${gravityTEMPfile}\\n ${output}"
|
||||
return 1
|
||||
fi
|
||||
echo -e "${OVER} ${TICK} ${str}"
|
||||
|
||||
# Swap databases and remove or conditionally rename old database
|
||||
# Number of available blocks on disk
|
||||
availableBlocks=$(stat -f --format "%a" "${gravityDIR}")
|
||||
@@ -122,18 +109,24 @@ gravity_swap_databases() {
|
||||
gravityBlocks=$(stat --format "%b" ${gravityDBfile})
|
||||
# Only keep the old database if available disk space is at least twice the size of the existing gravity.db.
|
||||
# Better be safe than sorry...
|
||||
if [ "${availableBlocks}" -gt "$(("${gravityBlocks}" * 2))" ] && [ -f "${gravityDBfile}" ]; then
|
||||
echo -e " ${TICK} The old database remains available."
|
||||
oldAvail=false
|
||||
if [ "${availableBlocks}" -gt "$((gravityBlocks * 2))" ] && [ -f "${gravityDBfile}" ]; then
|
||||
oldAvail=true
|
||||
mv "${gravityDBfile}" "${gravityOLDfile}"
|
||||
else
|
||||
rm "${gravityDBfile}"
|
||||
fi
|
||||
mv "${gravityTEMPfile}" "${gravityDBfile}"
|
||||
echo -e "${OVER} ${TICK} ${str}"
|
||||
|
||||
if $oldAvail; then
|
||||
echo -e " ${TICK} The old database remains available."
|
||||
fi
|
||||
}
|
||||
|
||||
# Update timestamp when the gravity table was last updated successfully
|
||||
update_gravity_timestamp() {
|
||||
output=$( { printf ".timeout 30000\\nINSERT OR REPLACE INTO info (property,value) values ('updated',cast(strftime('%%s', 'now') as int));" | sqlite3 "${gravityDBfile}"; } 2>&1 )
|
||||
output=$( { printf ".timeout 30000\\nINSERT OR REPLACE INTO info (property,value) values ('updated',cast(strftime('%%s', 'now') as int));" | pihole-FTL sqlite3 "${gravityDBfile}"; } 2>&1 )
|
||||
status="$?"
|
||||
|
||||
if [[ "${status}" -ne 0 ]]; then
|
||||
@@ -174,7 +167,7 @@ database_table_from_file() {
|
||||
|
||||
# Get MAX(id) from domainlist when INSERTing into this table
|
||||
if [[ "${table}" == "domainlist" ]]; then
|
||||
rowid="$(sqlite3 "${gravityDBfile}" "SELECT MAX(id) FROM domainlist;")"
|
||||
rowid="$(pihole-FTL sqlite3 "${gravityDBfile}" "SELECT MAX(id) FROM domainlist;")"
|
||||
if [[ -z "$rowid" ]]; then
|
||||
rowid=0
|
||||
fi
|
||||
@@ -204,7 +197,7 @@ database_table_from_file() {
|
||||
# Store domains in database table specified by ${table}
|
||||
# Use printf as .mode and .import need to be on separate lines
|
||||
# see https://unix.stackexchange.com/a/445615/83260
|
||||
output=$( { printf ".timeout 30000\\n.mode csv\\n.import \"%s\" %s\\n" "${tmpFile}" "${table}" | sqlite3 "${gravityDBfile}"; } 2>&1 )
|
||||
output=$( { printf ".timeout 30000\\n.mode csv\\n.import \"%s\" %s\\n" "${tmpFile}" "${table}" | pihole-FTL sqlite3 "${gravityDBfile}"; } 2>&1 )
|
||||
status="$?"
|
||||
|
||||
if [[ "${status}" -ne 0 ]]; then
|
||||
@@ -215,7 +208,7 @@ database_table_from_file() {
|
||||
# Move source file to backup directory, create directory if not existing
|
||||
mkdir -p "${backup_path}"
|
||||
mv "${source}" "${backup_file}" 2> /dev/null || \
|
||||
echo -e " ${CROSS} Unable to backup ${source} to ${backup_path}"
|
||||
echo -e " ${CROSS} Unable to backup ${source} to ${backup_path}"
|
||||
|
||||
# Delete tmpFile
|
||||
rm "${tmpFile}" > /dev/null 2>&1 || \
|
||||
@@ -224,7 +217,7 @@ database_table_from_file() {
|
||||
|
||||
# Update timestamp of last update of this list. We store this in the "old" database as all values in the new database will later be overwritten
|
||||
database_adlist_updated() {
|
||||
output=$( { printf ".timeout 30000\\nUPDATE adlist SET date_updated = (cast(strftime('%%s', 'now') as int)) WHERE id = %i;\\n" "${1}" | sqlite3 "${gravityDBfile}"; } 2>&1 )
|
||||
output=$( { printf ".timeout 30000\\nUPDATE adlist SET date_updated = (cast(strftime('%%s', 'now') as int)) WHERE id = %i;\\n" "${1}" | pihole-FTL sqlite3 "${gravityDBfile}"; } 2>&1 )
|
||||
status="$?"
|
||||
|
||||
if [[ "${status}" -ne 0 ]]; then
|
||||
@@ -235,7 +228,7 @@ database_adlist_updated() {
|
||||
|
||||
# Check if a column with name ${2} exists in gravity table with name ${1}
|
||||
gravity_column_exists() {
|
||||
output=$( { printf ".timeout 30000\\nSELECT EXISTS(SELECT * FROM pragma_table_info('%s') WHERE name='%s');\\n" "${1}" "${2}" | sqlite3 "${gravityDBfile}"; } 2>&1 )
|
||||
output=$( { printf ".timeout 30000\\nSELECT EXISTS(SELECT * FROM pragma_table_info('%s') WHERE name='%s');\\n" "${1}" "${2}" | pihole-FTL sqlite3 "${gravityDBfile}"; } 2>&1 )
|
||||
if [[ "${output}" == "1" ]]; then
|
||||
return 0 # Bash 0 is success
|
||||
fi
|
||||
@@ -250,7 +243,7 @@ database_adlist_number() {
|
||||
return;
|
||||
fi
|
||||
|
||||
output=$( { printf ".timeout 30000\\nUPDATE adlist SET number = %i, invalid_domains = %i WHERE id = %i;\\n" "${num_lines}" "${num_invalid}" "${1}" | sqlite3 "${gravityDBfile}"; } 2>&1 )
|
||||
output=$( { printf ".timeout 30000\\nUPDATE adlist SET number = %i, invalid_domains = %i WHERE id = %i;\\n" "${num_source_lines}" "${num_invalid}" "${1}" | pihole-FTL sqlite3 "${gravityDBfile}"; } 2>&1 )
|
||||
status="$?"
|
||||
|
||||
if [[ "${status}" -ne 0 ]]; then
|
||||
@@ -266,7 +259,7 @@ database_adlist_status() {
|
||||
return;
|
||||
fi
|
||||
|
||||
output=$( { printf ".timeout 30000\\nUPDATE adlist SET status = %i WHERE id = %i;\\n" "${2}" "${1}" | sqlite3 "${gravityDBfile}"; } 2>&1 )
|
||||
output=$( { printf ".timeout 30000\\nUPDATE adlist SET status = %i WHERE id = %i;\\n" "${2}" "${1}" | pihole-FTL sqlite3 "${gravityDBfile}"; } 2>&1 )
|
||||
status="$?"
|
||||
|
||||
if [[ "${status}" -ne 0 ]]; then
|
||||
@@ -281,7 +274,10 @@ migrate_to_database() {
|
||||
if [ ! -e "${gravityDBfile}" ]; then
|
||||
# Create new database file - note that this will be created in version 1
|
||||
echo -e " ${INFO} Creating new gravity database"
|
||||
generate_gravity_database "${gravityDBfile}"
|
||||
if ! generate_gravity_database; then
|
||||
echo -e " ${CROSS} Error creating new gravity database. Please contact support."
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Check if gravity database needs to be updated
|
||||
upgrade_gravityDB "${gravityDBfile}" "${piholeDir}"
|
||||
@@ -380,9 +376,9 @@ gravity_DownloadBlocklists() {
|
||||
fi
|
||||
|
||||
# Retrieve source URLs from gravity database
|
||||
# We source only enabled adlists, sqlite3 stores boolean values as 0 (false) or 1 (true)
|
||||
mapfile -t sources <<< "$(sqlite3 "${gravityDBfile}" "SELECT address FROM vw_adlist;" 2> /dev/null)"
|
||||
mapfile -t sourceIDs <<< "$(sqlite3 "${gravityDBfile}" "SELECT id FROM vw_adlist;" 2> /dev/null)"
|
||||
# We source only enabled adlists, SQLite3 stores boolean values as 0 (false) or 1 (true)
|
||||
mapfile -t sources <<< "$(pihole-FTL sqlite3 "${gravityDBfile}" "SELECT address FROM vw_adlist;" 2> /dev/null)"
|
||||
mapfile -t sourceIDs <<< "$(pihole-FTL sqlite3 "${gravityDBfile}" "SELECT id FROM vw_adlist;" 2> /dev/null)"
|
||||
|
||||
# Parse source domains from $sources
|
||||
mapfile -t sourceDomains <<< "$(
|
||||
@@ -396,14 +392,12 @@ gravity_DownloadBlocklists() {
|
||||
)"
|
||||
|
||||
local str="Pulling blocklist source list into range"
|
||||
echo -e "${OVER} ${TICK} ${str}"
|
||||
|
||||
if [[ -n "${sources[*]}" ]] && [[ -n "${sourceDomains[*]}" ]]; then
|
||||
echo -e "${OVER} ${TICK} ${str}"
|
||||
else
|
||||
echo -e "${OVER} ${CROSS} ${str}"
|
||||
if [[ -z "${sources[*]}" ]] || [[ -z "${sourceDomains[*]}" ]]; then
|
||||
echo -e " ${INFO} No source list found, or it is empty"
|
||||
echo ""
|
||||
return 1
|
||||
unset sources
|
||||
fi
|
||||
|
||||
local url domain agent cmd_ext str target compression
|
||||
@@ -413,7 +407,7 @@ gravity_DownloadBlocklists() {
|
||||
str="Preparing new gravity database"
|
||||
echo -ne " ${INFO} ${str}..."
|
||||
rm "${gravityTEMPfile}" > /dev/null 2>&1
|
||||
output=$( { sqlite3 "${gravityTEMPfile}" < "${gravityDBschema}"; } 2>&1 )
|
||||
output=$( { pihole-FTL sqlite3 "${gravityTEMPfile}" < "${gravityDBschema}"; } 2>&1 )
|
||||
status="$?"
|
||||
|
||||
if [[ "${status}" -ne 0 ]]; then
|
||||
@@ -432,9 +426,9 @@ gravity_DownloadBlocklists() {
|
||||
compression="--compressed"
|
||||
echo -e " ${INFO} Using libz compression\n"
|
||||
else
|
||||
compression=""
|
||||
echo -e " ${INFO} Libz compression not available\n"
|
||||
fi
|
||||
compression=""
|
||||
echo -e " ${INFO} Libz compression not available\n"
|
||||
fi
|
||||
# Loop through $sources and download each one
|
||||
for ((i = 0; i < "${#sources[@]}"; i++)); do
|
||||
url="${sources[$i]}"
|
||||
@@ -464,16 +458,35 @@ gravity_DownloadBlocklists() {
|
||||
check_url="$( sed -re 's#([^:/]*://)?([^/]+)@#\1\2#' <<< "$url" )"
|
||||
|
||||
if [[ "${check_url}" =~ ${regex} ]]; then
|
||||
echo -e " ${CROSS} Invalid Target"
|
||||
echo -e " ${CROSS} Invalid Target"
|
||||
else
|
||||
gravity_DownloadBlocklistFromUrl "${url}" "${cmd_ext}" "${agent}" "${sourceIDs[$i]}" "${saveLocation}" "${target}" "${compression}"
|
||||
gravity_DownloadBlocklistFromUrl "${url}" "${cmd_ext}" "${agent}" "${sourceIDs[$i]}" "${saveLocation}" "${target}" "${compression}"
|
||||
fi
|
||||
echo ""
|
||||
done
|
||||
|
||||
str="Creating new gravity databases"
|
||||
echo -ne " ${INFO} ${str}..."
|
||||
|
||||
# Gravity copying SQL script
|
||||
copyGravity="$(cat "${gravityDBcopy}")"
|
||||
if [[ "${gravityDBfile}" != "${gravityDBfile_default}" ]]; then
|
||||
# Replace default gravity script location by custom location
|
||||
copyGravity="${copyGravity//"${gravityDBfile_default}"/"${gravityDBfile}"}"
|
||||
fi
|
||||
|
||||
output=$( { pihole-FTL sqlite3 "${gravityTEMPfile}" <<< "${copyGravity}"; } 2>&1 )
|
||||
status="$?"
|
||||
|
||||
if [[ "${status}" -ne 0 ]]; then
|
||||
echo -e "\\n ${CROSS} Unable to copy data from ${gravityDBfile} to ${gravityTEMPfile}\\n ${output}"
|
||||
return 1
|
||||
fi
|
||||
echo -e "${OVER} ${TICK} ${str}"
|
||||
|
||||
str="Storing downloaded domains in new gravity database"
|
||||
echo -ne " ${INFO} ${str}..."
|
||||
output=$( { printf ".timeout 30000\\n.mode csv\\n.import \"%s\" gravity\\n" "${target}" | sqlite3 "${gravityTEMPfile}"; } 2>&1 )
|
||||
output=$( { printf ".timeout 30000\\n.mode csv\\n.import \"%s\" gravity\\n" "${target}" | pihole-FTL sqlite3 "${gravityTEMPfile}"; } 2>&1 )
|
||||
status="$?"
|
||||
|
||||
if [[ "${status}" -ne 0 ]]; then
|
||||
@@ -505,8 +518,9 @@ gravity_DownloadBlocklists() {
|
||||
gravity_Blackbody=true
|
||||
}
|
||||
|
||||
total_num=0
|
||||
num_lines=0
|
||||
# num_target_lines does increase for every correctly added domain in pareseList()
|
||||
num_target_lines=0
|
||||
num_source_lines=0
|
||||
num_invalid=0
|
||||
parseList() {
|
||||
local adlistID="${1}" src="${2}" target="${3}" incorrect_lines
|
||||
@@ -518,18 +532,20 @@ parseList() {
|
||||
# Find (up to) five domains containing invalid characters (see above)
|
||||
incorrect_lines="$(sed -e "/[^a-zA-Z0-9.\_-]/!d" "${src}" | head -n 5)"
|
||||
|
||||
local num_target_lines num_correct_lines num_invalid
|
||||
local num_target_lines_new num_correct_lines
|
||||
# Get number of lines in source file
|
||||
num_lines="$(grep -c "^" "${src}")"
|
||||
# Get number of lines in destination file
|
||||
num_target_lines="$(grep -c "^" "${target}")"
|
||||
num_correct_lines="$(( num_target_lines-total_num ))"
|
||||
total_num="$num_target_lines"
|
||||
num_invalid="$(( num_lines-num_correct_lines ))"
|
||||
num_source_lines="$(grep -c "^" "${src}")"
|
||||
# Get the new number of lines in destination file
|
||||
num_target_lines_new="$(grep -c "^" "${target}")"
|
||||
# Number of new correctly added lines
|
||||
num_correct_lines="$(( num_target_lines_new-num_target_lines ))"
|
||||
# Upate number of lines in target file
|
||||
num_target_lines="$num_target_lines_new"
|
||||
num_invalid="$(( num_source_lines-num_correct_lines ))"
|
||||
if [[ "${num_invalid}" -eq 0 ]]; then
|
||||
echo " ${INFO} Analyzed ${num_lines} domains"
|
||||
echo " ${INFO} Analyzed ${num_source_lines} domains"
|
||||
else
|
||||
echo " ${INFO} Analyzed ${num_lines} domains, ${num_invalid} domains invalid!"
|
||||
echo " ${INFO} Analyzed ${num_source_lines} domains, ${num_invalid} domains invalid!"
|
||||
fi
|
||||
|
||||
# Display sample of invalid lines if we found some
|
||||
@@ -585,28 +601,32 @@ gravity_DownloadBlocklistFromUrl() {
|
||||
blocked=false
|
||||
case $BLOCKINGMODE in
|
||||
"IP-NODATA-AAAA"|"IP")
|
||||
# Get IP address of this domain
|
||||
ip="$(dig "${domain}" +short)"
|
||||
# Check if this IP matches any IP of the system
|
||||
if [[ -n "${ip}" && $(grep -Ec "inet(|6) ${ip}" <<< "$(ip a)") -gt 0 ]]; then
|
||||
blocked=true
|
||||
fi;;
|
||||
# Get IP address of this domain
|
||||
ip="$(dig "${domain}" +short)"
|
||||
# Check if this IP matches any IP of the system
|
||||
if [[ -n "${ip}" && $(grep -Ec "inet(|6) ${ip}" <<< "$(ip a)") -gt 0 ]]; then
|
||||
blocked=true
|
||||
fi;;
|
||||
"NXDOMAIN")
|
||||
if [[ $(dig "${domain}" | grep "NXDOMAIN" -c) -ge 1 ]]; then
|
||||
blocked=true
|
||||
fi;;
|
||||
if [[ $(dig "${domain}" | grep "NXDOMAIN" -c) -ge 1 ]]; then
|
||||
blocked=true
|
||||
fi;;
|
||||
"NODATA")
|
||||
if [[ $(dig "${domain}" | grep "NOERROR" -c) -ge 1 ]] && [[ -z $(dig +short "${domain}") ]]; then
|
||||
blocked=true
|
||||
fi;;
|
||||
"NULL"|*)
|
||||
if [[ $(dig "${domain}" +short | grep "0.0.0.0" -c) -ge 1 ]]; then
|
||||
blocked=true
|
||||
fi;;
|
||||
esac
|
||||
if [[ $(dig "${domain}" +short | grep "0.0.0.0" -c) -ge 1 ]]; then
|
||||
blocked=true
|
||||
fi;;
|
||||
esac
|
||||
|
||||
if [[ "${blocked}" == true ]]; then
|
||||
printf -v ip_addr "%s" "${PIHOLE_DNS_1%#*}"
|
||||
if [[ ${PIHOLE_DNS_1} != *"#"* ]]; then
|
||||
port=53
|
||||
port=53
|
||||
else
|
||||
printf -v port "%s" "${PIHOLE_DNS_1#*#}"
|
||||
printf -v port "%s" "${PIHOLE_DNS_1#*#}"
|
||||
fi
|
||||
ip=$(dig "@${ip_addr}" -p "${port}" +short "${domain}" | tail -1)
|
||||
if [[ $(echo "${url}" | awk -F '://' '{print $1}') = "https" ]]; then
|
||||
@@ -625,11 +645,11 @@ gravity_DownloadBlocklistFromUrl() {
|
||||
case $url in
|
||||
# Did we "download" a local file?
|
||||
"file"*)
|
||||
if [[ -s "${patternBuffer}" ]]; then
|
||||
echo -e "${OVER} ${TICK} ${str} Retrieval successful"; success=true
|
||||
else
|
||||
echo -e "${OVER} ${CROSS} ${str} Not found / empty list"
|
||||
fi;;
|
||||
if [[ -s "${patternBuffer}" ]]; then
|
||||
echo -e "${OVER} ${TICK} ${str} Retrieval successful"; success=true
|
||||
else
|
||||
echo -e "${OVER} ${CROSS} ${str} Not found / empty list"
|
||||
fi;;
|
||||
# Did we "download" a remote file?
|
||||
*)
|
||||
# Determine "Status:" output based on HTTP response
|
||||
@@ -688,7 +708,7 @@ gravity_DownloadBlocklistFromUrl() {
|
||||
else
|
||||
echo -e " ${CROSS} List download failed: ${COL_LIGHT_RED}no cached list available${COL_NC}"
|
||||
# Manually reset these two numbers because we do not call parseList here
|
||||
num_lines=0
|
||||
num_source_lines=0
|
||||
num_invalid=0
|
||||
database_adlist_number "${adlistID}"
|
||||
database_adlist_status "${adlistID}" "4"
|
||||
@@ -771,12 +791,12 @@ gravity_Table_Count() {
|
||||
local table="${1}"
|
||||
local str="${2}"
|
||||
local num
|
||||
num="$(sqlite3 "${gravityDBfile}" "SELECT COUNT(*) FROM ${table};")"
|
||||
num="$(pihole-FTL sqlite3 "${gravityDBfile}" "SELECT COUNT(*) FROM ${table};")"
|
||||
if [[ "${table}" == "vw_gravity" ]]; then
|
||||
local unique
|
||||
unique="$(sqlite3 "${gravityDBfile}" "SELECT COUNT(DISTINCT domain) FROM ${table};")"
|
||||
unique="$(pihole-FTL sqlite3 "${gravityDBfile}" "SELECT COUNT(DISTINCT domain) FROM ${table};")"
|
||||
echo -e " ${INFO} Number of ${str}: ${num} (${COL_BOLD}${unique} unique domains${COL_NC})"
|
||||
sqlite3 "${gravityDBfile}" "INSERT OR REPLACE INTO info (property,value) VALUES ('gravity_count',${unique});"
|
||||
pihole-FTL sqlite3 "${gravityDBfile}" "INSERT OR REPLACE INTO info (property,value) VALUES ('gravity_count',${unique});"
|
||||
else
|
||||
echo -e " ${INFO} Number of ${str}: ${num}"
|
||||
fi
|
||||
@@ -847,6 +867,49 @@ gravity_Cleanup() {
|
||||
fi
|
||||
}
|
||||
|
||||
database_recovery() {
|
||||
local result
|
||||
local str="Checking integrity of existing gravity database"
|
||||
local option="${1}"
|
||||
echo -ne " ${INFO} ${str}..."
|
||||
if result="$(pihole-FTL sqlite3 "${gravityDBfile}" "PRAGMA integrity_check" 2>&1)"; then
|
||||
echo -e "${OVER} ${TICK} ${str} - no errors found"
|
||||
|
||||
str="Checking foreign keys of existing gravity database"
|
||||
echo -ne " ${INFO} ${str}..."
|
||||
if result="$(pihole-FTL sqlite3 "${gravityDBfile}" "PRAGMA foreign_key_check" 2>&1)"; then
|
||||
echo -e "${OVER} ${TICK} ${str} - no errors found"
|
||||
if [[ "${option}" != "force" ]]; then
|
||||
return
|
||||
fi
|
||||
else
|
||||
echo -e "${OVER} ${CROSS} ${str} - errors found:"
|
||||
while IFS= read -r line ; do echo " - $line"; done <<< "$result"
|
||||
fi
|
||||
else
|
||||
echo -e "${OVER} ${CROSS} ${str} - errors found:"
|
||||
while IFS= read -r line ; do echo " - $line"; done <<< "$result"
|
||||
fi
|
||||
|
||||
str="Trying to recover existing gravity database"
|
||||
echo -ne " ${INFO} ${str}..."
|
||||
# We have to remove any possibly existing recovery database or this will fail
|
||||
rm -f "${gravityDBfile}.recovered" > /dev/null 2>&1
|
||||
if result="$(pihole-FTL sqlite3 "${gravityDBfile}" ".recover" | pihole-FTL sqlite3 "${gravityDBfile}.recovered" 2>&1)"; then
|
||||
echo -e "${OVER} ${TICK} ${str} - success"
|
||||
mv "${gravityDBfile}" "${gravityDBfile}.old"
|
||||
mv "${gravityDBfile}.recovered" "${gravityDBfile}"
|
||||
echo -ne " ${INFO} ${gravityDBfile} has been recovered"
|
||||
echo -ne " ${INFO} The old ${gravityDBfile} has been moved to ${gravityDBfile}.old"
|
||||
else
|
||||
echo -e "${OVER} ${CROSS} ${str} - the following errors happened:"
|
||||
while IFS= read -r line ; do echo " - $line"; done <<< "$result"
|
||||
echo -e " ${CROSS} Recovery failed. Try \"pihole -r recreate\" instead."
|
||||
exit 1
|
||||
fi
|
||||
echo ""
|
||||
}
|
||||
|
||||
helpFunc() {
|
||||
echo "Usage: pihole -g
|
||||
Update domains from blocklists specified in adlists.list
|
||||
@@ -857,10 +920,37 @@ Options:
|
||||
exit 0
|
||||
}
|
||||
|
||||
repairSelector() {
|
||||
case "$1" in
|
||||
"recover") recover_database=true;;
|
||||
"recreate") recreate_database=true;;
|
||||
*) echo "Usage: pihole -g -r {recover,recreate}
|
||||
Attempt to repair gravity database
|
||||
|
||||
Available options:
|
||||
pihole -g -r recover Try to recover a damaged gravity database file.
|
||||
Pi-hole tries to restore as much as possible
|
||||
from a corrupted gravity database.
|
||||
|
||||
pihole -g -r recover force Pi-hole will run the recovery process even when
|
||||
no damage is detected. This option is meant to be
|
||||
a last resort. Recovery is a fragile task
|
||||
consuming a lot of resources and shouldn't be
|
||||
performed unnecessarily.
|
||||
|
||||
pihole -g -r recreate Create a new gravity database file from scratch.
|
||||
This will remove your existing gravity database
|
||||
and create a new file from scratch. If you still
|
||||
have the migration backup created when migrating
|
||||
to Pi-hole v5.0, Pi-hole will import these files."
|
||||
exit 0;;
|
||||
esac
|
||||
}
|
||||
|
||||
for var in "$@"; do
|
||||
case "${var}" in
|
||||
"-f" | "--force" ) forceDelete=true;;
|
||||
"-r" | "--recreate" ) recreate_database=true;;
|
||||
"-r" | "--repair" ) repairSelector "$3";;
|
||||
"-h" | "--help" ) helpFunc;;
|
||||
esac
|
||||
done
|
||||
@@ -874,7 +964,7 @@ fi
|
||||
gravity_Trap
|
||||
|
||||
if [[ "${recreate_database:-}" == true ]]; then
|
||||
str="Restoring from migration backup"
|
||||
str="Recreating gravity database from migration backup"
|
||||
echo -ne "${INFO} ${str}..."
|
||||
rm "${gravityDBfile}"
|
||||
pushd "${piholeDir}" > /dev/null || exit
|
||||
@@ -883,8 +973,15 @@ if [[ "${recreate_database:-}" == true ]]; then
|
||||
echo -e "${OVER} ${TICK} ${str}"
|
||||
fi
|
||||
|
||||
if [[ "${recover_database:-}" == true ]]; then
|
||||
database_recovery "$4"
|
||||
fi
|
||||
|
||||
# Move possibly existing legacy files to the gravity database
|
||||
migrate_to_database
|
||||
if ! migrate_to_database; then
|
||||
echo -e " ${CROSS} Unable to migrate to database. Please contact support."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "${forceDelete:-}" == true ]]; then
|
||||
str="Deleting existing list cache"
|
||||
@@ -895,14 +992,21 @@ if [[ "${forceDelete:-}" == true ]]; then
|
||||
fi
|
||||
|
||||
# Gravity downloads blocklists next
|
||||
gravity_CheckDNSResolutionAvailable
|
||||
if ! gravity_CheckDNSResolutionAvailable; then
|
||||
echo -e " ${CROSS} Can not complete gravity update, no DNS is available. Please contact support."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
gravity_DownloadBlocklists
|
||||
|
||||
# Create local.list
|
||||
gravity_generateLocalList
|
||||
|
||||
# Migrate rest of the data from old to new database
|
||||
gravity_swap_databases
|
||||
if ! gravity_swap_databases; then
|
||||
echo -e " ${CROSS} Unable to create database. Please contact support."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Update gravity timestamp
|
||||
update_gravity_timestamp
|
||||
|
@@ -144,7 +144,9 @@ Command line arguments can be arbitrarily combined, e.g:
|
||||
Start ftl in foreground with more verbose logging, process everything and shutdown immediately
|
||||
.br
|
||||
.SH "SEE ALSO"
|
||||
\fBpihole\fR(8), \fBpihole-FTL.conf\fR(5)
|
||||
\fBpihole\fR(8)
|
||||
.br
|
||||
\fBFor FTL's config options please see https://docs.pi-hole.net/ftldns/configfile/\fR
|
||||
.br
|
||||
.SH "COLOPHON"
|
||||
|
||||
|
@@ -1,313 +0,0 @@
|
||||
.TH "pihole-FTL.conf" "5" "pihole-FTL.conf" "pihole-FTL.conf" "November 2020"
|
||||
.SH "NAME"
|
||||
|
||||
pihole-FTL.conf - FTL's config file
|
||||
.br
|
||||
.SH "DESCRIPTION"
|
||||
|
||||
/etc/pihole/pihole-FTL.conf will be read by \fBpihole-FTL(8)\fR on startup.
|
||||
.br
|
||||
For each setting the option shown first is the default.
|
||||
.br
|
||||
|
||||
\fBBLOCKINGMODE=IP|IP-AAAA-NODATA|NODATA|NXDOMAIN|NULL\fR
|
||||
.br
|
||||
How should FTL reply to blocked queries?
|
||||
|
||||
IP - Pi-hole's IPs for blocked domains
|
||||
|
||||
IP-AAAA-NODATA - Pi-hole's IP + NODATA-IPv6 for blocked domains
|
||||
|
||||
NODATA - Using NODATA for blocked domains
|
||||
|
||||
NXDOMAIN - NXDOMAIN for blocked domains
|
||||
|
||||
NULL - Null IPs for blocked domains
|
||||
.br
|
||||
|
||||
\fBCNAME_DEEP_INSPECT=true|false\fR
|
||||
.br
|
||||
Use this option to disable deep CNAME inspection. This might be beneficial for very low-end devices.
|
||||
.br
|
||||
|
||||
\fBBLOCK_ESNI=true|false\fR
|
||||
.br
|
||||
Block requests to _esni.* sub-domains.
|
||||
.br
|
||||
|
||||
\fBMAXLOGAGE=24.0\fR
|
||||
.br
|
||||
Up to how many hours of queries should be imported from the database and logs?
|
||||
.br
|
||||
Maximum is 744 (31 days)
|
||||
.br
|
||||
|
||||
\fBPRIVACYLEVEL=0|1|2|3|4\fR
|
||||
.br
|
||||
Privacy level used to collect Pi-hole statistics.
|
||||
.br
|
||||
0 - show everything
|
||||
.br
|
||||
1 - hide domains
|
||||
.br
|
||||
2 - hide domains and clients
|
||||
.br
|
||||
3 - anonymous mode (hide everything)
|
||||
.br
|
||||
4 - disable all statistics
|
||||
.br
|
||||
|
||||
\fBIGNORE_LOCALHOST=no|yes\fR
|
||||
.br
|
||||
Should FTL ignore queries coming from the local machine?
|
||||
.br
|
||||
|
||||
\fBAAAA_QUERY_ANALYSIS=yes|no\fR
|
||||
.br
|
||||
Should FTL analyze AAAA queries?
|
||||
.br
|
||||
|
||||
\fBANALYZE_ONLY_A_AND_AAAA=false|true\fR
|
||||
.br
|
||||
Should FTL only analyze A and AAAA queries?
|
||||
.br
|
||||
|
||||
\fBSOCKET_LISTENING=localonly|all\fR
|
||||
.br
|
||||
Listen only for local socket connections on the API port or permit all connections.
|
||||
.br
|
||||
|
||||
\fBFTLPORT=4711\fR
|
||||
.br
|
||||
On which port should FTL be listening?
|
||||
.br
|
||||
|
||||
\fBRESOLVE_IPV6=yes|no\fR
|
||||
.br
|
||||
Should FTL try to resolve IPv6 addresses to hostnames?
|
||||
.br
|
||||
|
||||
\fBRESOLVE_IPV4=yes|no\fR
|
||||
.br
|
||||
Should FTL try to resolve IPv4 addresses to hostnames?
|
||||
.br
|
||||
|
||||
\fBDELAY_STARTUP=0\fR
|
||||
.br
|
||||
Time in seconds (between 0 and 300) to delay FTL startup.
|
||||
.br
|
||||
|
||||
\fBNICE=-10\fR
|
||||
.br
|
||||
Set the niceness of the Pi-hole FTL process.
|
||||
.br
|
||||
Can be disabled altogether by setting a value of -999.
|
||||
.br
|
||||
|
||||
\fBNAMES_FROM_NETDB=true|false\fR
|
||||
.br
|
||||
Control whether FTL should use a fallback option and try to obtain client names from checking the network table.
|
||||
.br
|
||||
E.g. IPv6 clients without a hostname will be compared via MAC address to known clients.
|
||||
.br
|
||||
|
||||
\fB\fBREFRESH_HOSTNAMES=IPV4|ALL|NONE\fR
|
||||
.br
|
||||
Change how (and if) hourly PTR requests are made to check for changes in client and upstream server hostnames:
|
||||
.br
|
||||
IPV4 - Do the hourly PTR lookups only for IPv4 addresses resolving issues in networks with many short-lived PE IPv6 addresses.
|
||||
.br
|
||||
ALL - Do the hourly PTR lookups for all addresses. This can create a lot of PTR queries in networks with many IPv6 addresses.
|
||||
.br
|
||||
NONE - Don't do hourly PTR lookups. Look up hostnames once (when first seeing a client) and never again. Future hostname changes may be missed.
|
||||
.br
|
||||
|
||||
\fBMAXNETAGE=365\fR
|
||||
.br
|
||||
IP addresses (and associated host names) older than the specified number of days are removed.
|
||||
.br
|
||||
This avoids dead entries in the network overview table.
|
||||
.br
|
||||
|
||||
\fBEDNS0_ECS=true|false\fR
|
||||
.br
|
||||
Should we overwrite the query source when client information is provided through EDNS0 client subnet (ECS) information?
|
||||
.br
|
||||
|
||||
\fBPARSE_ARP_CACHE=true|false\fR
|
||||
.br
|
||||
Parse ARP cache to fill network overview table.
|
||||
.br
|
||||
|
||||
\fBDBIMPORT=yes|no\fR
|
||||
.br
|
||||
Should FTL load information from the database on startup to be aware of the most recent history?
|
||||
.br
|
||||
|
||||
\fBMAXDBDAYS=365\fR
|
||||
.br
|
||||
How long should queries be stored in the database? Setting this to 0 disables the database
|
||||
.br
|
||||
|
||||
\fBDBINTERVAL=1.0\fR
|
||||
.br
|
||||
How often do we store queries in FTL's database [minutes]?
|
||||
.br
|
||||
Accepts value between 0.1 (6 sec) and 1440 (1 day)
|
||||
.br
|
||||
|
||||
\fBDBFILE=/etc/pihole/pihole-FTL.db\fR
|
||||
.br
|
||||
Specify path and filename of FTL's SQLite long-term database.
|
||||
.br
|
||||
Setting this to DBFILE= disables the database altogether
|
||||
.br
|
||||
|
||||
\fBLOGFILE=/var/log/pihole-FTL.log\fR
|
||||
.br
|
||||
The location of FTL's log file.
|
||||
.br
|
||||
|
||||
\fBPIDFILE=/run/pihole-FTL.pid\fR
|
||||
.br
|
||||
The file which contains the PID of FTL's main process.
|
||||
.br
|
||||
|
||||
\fBPORTFILE=/run/pihole-FTL.port\fR
|
||||
.br
|
||||
Specify path and filename where the FTL process will write its API port number.
|
||||
.br
|
||||
|
||||
\fBSOCKETFILE=/run/pihole/FTL.sock\fR
|
||||
.br
|
||||
The file containing the socket FTL's API is listening on.
|
||||
.br
|
||||
|
||||
\fBSETUPVARSFILE=/etc/pihole/setupVars.conf\fR
|
||||
.br
|
||||
The config file of Pi-hole containing, e.g., the current blocking status (do not change).
|
||||
.br
|
||||
|
||||
\fBMACVENDORDB=/etc/pihole/macvendor.db\fR
|
||||
.br
|
||||
The database containing MAC -> Vendor information for the network table.
|
||||
.br
|
||||
|
||||
\fBGRAVITYDB=/etc/pihole/gravity.db\fR
|
||||
.br
|
||||
Specify path and filename of FTL's SQLite3 gravity database. This database contains all domains relevant for Pi-hole's DNS blocking.
|
||||
.br
|
||||
|
||||
\fBDEBUG_ALL=false|true\fR
|
||||
.br
|
||||
Enable all debug flags. If this is set to true, all other debug config options are ignored.
|
||||
.br
|
||||
|
||||
\fBDEBUG_DATABASE=false|true\fR
|
||||
.br
|
||||
Print debugging information about database actions such as SQL statements and performance.
|
||||
.br
|
||||
|
||||
\fBDEBUG_NETWORKING=false|true\fR
|
||||
.br
|
||||
Prints a list of the detected network interfaces on the startup of FTL.
|
||||
.br
|
||||
|
||||
\fBDEBUG_LOCKS=false|true\fR
|
||||
.br
|
||||
Print information about shared memory locks.
|
||||
.br
|
||||
Messages will be generated when waiting, obtaining, and releasing a lock.
|
||||
.br
|
||||
|
||||
\fBDEBUG_QUERIES=false|true\fR
|
||||
.br
|
||||
Print extensive DNS query information (domains, types, replies, etc.).
|
||||
.br
|
||||
|
||||
\fBDEBUG_FLAGS=false|true\fR
|
||||
.br
|
||||
Print flags of queries received by the DNS hooks.
|
||||
.br
|
||||
Only effective when \fBDEBUG_QUERIES\fR is enabled as well.
|
||||
|
||||
\fBDEBUG_SHMEM=false|true\fR
|
||||
.br
|
||||
Print information about shared memory buffers.
|
||||
.br
|
||||
Messages are either about creating or enlarging shmem objects or string injections.
|
||||
.br
|
||||
|
||||
\fBDEBUG_GC=false|true\fR
|
||||
.br
|
||||
Print information about garbage collection (GC):
|
||||
.br
|
||||
What is to be removed, how many have been removed and how long did GC take.
|
||||
.br
|
||||
|
||||
\fBDEBUG_ARP=false|true\fR
|
||||
.br
|
||||
Print information about ARP table processing:
|
||||
.br
|
||||
How long did parsing take, whether read MAC addresses are valid, and if the macvendor.db file exists.
|
||||
.br
|
||||
|
||||
\fBDEBUG_REGEX=false|true\fR
|
||||
.br
|
||||
Controls if FTL should print extended details about regex matching.
|
||||
.br
|
||||
|
||||
\fBDEBUG_API=false|true\fR
|
||||
.br
|
||||
Print extra debugging information during telnet API calls.
|
||||
.br
|
||||
Currently only used to send extra information when getting all queries.
|
||||
.br
|
||||
|
||||
\fBDEBUG_OVERTIME=false|true\fR
|
||||
.br
|
||||
Print information about overTime memory operations, such as initializing or moving overTime slots.
|
||||
.br
|
||||
|
||||
\fBDEBUG_EXTBLOCKED=false|true\fR
|
||||
.br
|
||||
Print information about why FTL decided that certain queries were recognized as being externally blocked.
|
||||
.br
|
||||
|
||||
\fBDEBUG_CAPS=false|true\fR
|
||||
.br
|
||||
Print information about POSIX capabilities granted to the FTL process.
|
||||
.br
|
||||
The current capabilities are printed on receipt of SIGHUP i.e. after executing `killall -HUP pihole-FTL`.
|
||||
.br
|
||||
|
||||
\fBDEBUG_DNSMASQ_LINES=false|true\fR
|
||||
.br
|
||||
Print file and line causing a dnsmasq event into FTL's log files.
|
||||
.br
|
||||
This is handy to implement additional hooks missing from FTL.
|
||||
.br
|
||||
|
||||
\fBDEBUG_VECTORS=false|true\fR
|
||||
.br
|
||||
FTL uses dynamically allocated vectors for various tasks.
|
||||
.br
|
||||
This config option enables extensive debugging information such as information about allocation, referencing, deletion, and appending.
|
||||
.br
|
||||
|
||||
\fBDEBUG_RESOLVER=false|true\fR
|
||||
.br
|
||||
Extensive information about hostname resolution like which DNS servers are used in the first and second hostname resolving tries.
|
||||
.br
|
||||
|
||||
.SH "SEE ALSO"
|
||||
|
||||
\fBpihole\fR(8), \fBpihole-FTL\fR(8)
|
||||
.br
|
||||
.SH "COLOPHON"
|
||||
|
||||
Pi-hole : The Faster-Than-Light (FTL) Engine is a lightweight, purpose-built daemon used to provide statistics needed for the Pi-hole Web Interface, and its API can be easily integrated into your own projects. Although it is an optional component of the Pi-hole ecosystem, it will be installed by default to provide statistics. As the name implies, FTL does its work \fIvery quickly\fR!
|
||||
.br
|
||||
|
||||
Get sucked into the latest news and community activity by entering Pi-hole's orbit. Information about Pi-hole, and the latest version of the software can be found at https://pi-hole.net
|
||||
.br
|
109
pihole
109
pihole
@@ -21,6 +21,9 @@ readonly FTL_PID_FILE="/run/pihole-FTL.pid"
|
||||
readonly colfile="${PI_HOLE_SCRIPT_DIR}/COL_TABLE"
|
||||
source "${colfile}"
|
||||
|
||||
utilsfile="${PI_HOLE_SCRIPT_DIR}/utils.sh"
|
||||
source "${utilsfile}"
|
||||
|
||||
webpageFunc() {
|
||||
source "${PI_HOLE_SCRIPT_DIR}/webpage.sh"
|
||||
main "$@"
|
||||
@@ -71,8 +74,7 @@ reconfigurePiholeFunc() {
|
||||
}
|
||||
|
||||
updateGravityFunc() {
|
||||
"${PI_HOLE_SCRIPT_DIR}"/gravity.sh "$@"
|
||||
exit $?
|
||||
exec "${PI_HOLE_SCRIPT_DIR}"/gravity.sh "$@"
|
||||
}
|
||||
|
||||
queryFunc() {
|
||||
@@ -95,8 +97,7 @@ uninstallFunc() {
|
||||
|
||||
versionFunc() {
|
||||
shift
|
||||
"${PI_HOLE_SCRIPT_DIR}"/version.sh "$@"
|
||||
exit 0
|
||||
exec "${PI_HOLE_SCRIPT_DIR}"/version.sh "$@"
|
||||
}
|
||||
|
||||
# Get PID of main pihole-FTL process
|
||||
@@ -225,8 +226,7 @@ Time:
|
||||
fi
|
||||
|
||||
local str="Pi-hole Disabled"
|
||||
sed -i "/BLOCKING_ENABLED=/d" "${setupVars}"
|
||||
echo "BLOCKING_ENABLED=false" >> "${setupVars}"
|
||||
addOrEditKeyValPair "${setupVars}" "BLOCKING_ENABLED" "false"
|
||||
fi
|
||||
else
|
||||
# Enable Pi-hole
|
||||
@@ -238,8 +238,7 @@ Time:
|
||||
echo -e " ${INFO} Enabling blocking"
|
||||
local str="Pi-hole Enabled"
|
||||
|
||||
sed -i "/BLOCKING_ENABLED=/d" "${setupVars}"
|
||||
echo "BLOCKING_ENABLED=true" >> "${setupVars}"
|
||||
addOrEditKeyValPair "${setupVars}" "BLOCKING_ENABLED" "true"
|
||||
fi
|
||||
|
||||
restartDNS reload-lists
|
||||
@@ -261,8 +260,8 @@ Options:
|
||||
exit 0
|
||||
elif [[ "${1}" == "off" ]]; then
|
||||
# Disable logging
|
||||
sed -i 's/^log-queries/#log-queries/' /etc/dnsmasq.d/01-pihole.conf
|
||||
sed -i 's/^QUERY_LOGGING=true/QUERY_LOGGING=false/' /etc/pihole/setupVars.conf
|
||||
addOrEditKeyValPair /etc/dnsmasq.d/01-pihole.conf "log-queries"
|
||||
addOrEditKeyValPair "${setupVars}" "QUERY_LOGGING" "false"
|
||||
if [[ "${2}" != "noflush" ]]; then
|
||||
# Flush logs
|
||||
"${PI_HOLE_BIN_DIR}"/pihole -f
|
||||
@@ -271,8 +270,8 @@ Options:
|
||||
local str="Logging has been disabled!"
|
||||
elif [[ "${1}" == "on" ]]; then
|
||||
# Enable logging
|
||||
sed -i 's/^#log-queries/log-queries/' /etc/dnsmasq.d/01-pihole.conf
|
||||
sed -i 's/^QUERY_LOGGING=false/QUERY_LOGGING=true/' /etc/pihole/setupVars.conf
|
||||
removeKey /etc/dnsmasq.d/01-pihole.conf "log-queries"
|
||||
addOrEditKeyValPair "${setupVars}" "QUERY_LOGGING" "true"
|
||||
echo -e " ${INFO} Enabling logging..."
|
||||
local str="Logging has been enabled!"
|
||||
else
|
||||
@@ -285,27 +284,29 @@ Options:
|
||||
}
|
||||
|
||||
analyze_ports() {
|
||||
local lv4 lv6 port=${1}
|
||||
# FTL is listening at least on at least one port when this
|
||||
# function is getting called
|
||||
echo -e " ${TICK} DNS service is listening"
|
||||
# Check individual address family/protocol combinations
|
||||
# For a healthy Pi-hole, they should all be up (nothing printed)
|
||||
if grep -q "IPv4.*UDP" <<< "${1}"; then
|
||||
lv4="$(ss --ipv4 --listening --numeric --tcp --udp src :${port})"
|
||||
if grep -q "udp " <<< "${lv4}"; then
|
||||
echo -e " ${TICK} UDP (IPv4)"
|
||||
else
|
||||
echo -e " ${CROSS} UDP (IPv4)"
|
||||
fi
|
||||
if grep -q "IPv4.*TCP" <<< "${1}"; then
|
||||
if grep -q "tcp " <<< "${lv4}"; then
|
||||
echo -e " ${TICK} TCP (IPv4)"
|
||||
else
|
||||
echo -e " ${CROSS} TCP (IPv4)"
|
||||
fi
|
||||
if grep -q "IPv6.*UDP" <<< "${1}"; then
|
||||
lv6="$(ss --ipv6 --listening --numeric --tcp --udp src :${port})"
|
||||
if grep -q "udp " <<< "${lv6}"; then
|
||||
echo -e " ${TICK} UDP (IPv6)"
|
||||
else
|
||||
echo -e " ${CROSS} UDP (IPv6)"
|
||||
fi
|
||||
if grep -q "IPv6.*TCP" <<< "${1}"; then
|
||||
if grep -q "tcp " <<< "${lv6}"; then
|
||||
echo -e " ${TICK} TCP (IPv6)"
|
||||
else
|
||||
echo -e " ${CROSS} TCP (IPv6)"
|
||||
@@ -314,19 +315,32 @@ analyze_ports() {
|
||||
}
|
||||
|
||||
statusFunc() {
|
||||
# Determine if there is a pihole service is listening on port 53
|
||||
local listening
|
||||
listening="$(lsof -Pni:53)"
|
||||
if grep -q "pihole" <<< "${listening}"; then
|
||||
if [[ "${1}" != "web" ]]; then
|
||||
analyze_ports "${listening}"
|
||||
fi
|
||||
else
|
||||
# Determine if there is pihole-FTL service is listening
|
||||
local pid port ftl_api_port
|
||||
|
||||
pid="$(getFTLPID)"
|
||||
ftl_api_port="$(getFTLAPIPort)"
|
||||
if [[ "$pid" -eq "-1" ]]; then
|
||||
case "${1}" in
|
||||
"web") echo "-1";;
|
||||
*) echo -e " ${CROSS} DNS service is NOT listening";;
|
||||
*) echo -e " ${CROSS} DNS service is NOT running";;
|
||||
esac
|
||||
return 0
|
||||
else
|
||||
#get the DNS port pihole-FTL is listening on by using FTL's telnet API
|
||||
port="$(echo ">dns-port >quit" | nc 127.0.0.1 "$ftl_api_port")"
|
||||
if [[ "${port}" == "0" ]]; then
|
||||
case "${1}" in
|
||||
"web") echo "-1";;
|
||||
*) echo -e " ${CROSS} DNS service is NOT listening";;
|
||||
esac
|
||||
return 0
|
||||
else
|
||||
if [[ "${1}" != "web" ]]; then
|
||||
echo -e " ${TICK} FTL is listening on port ${port}"
|
||||
analyze_ports "${port}"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Determine if Pi-hole's blocking is enabled
|
||||
@@ -339,18 +353,19 @@ statusFunc() {
|
||||
elif grep -q "BLOCKING_ENABLED=true" /etc/pihole/setupVars.conf; then
|
||||
# Configs are set
|
||||
case "${1}" in
|
||||
"web") echo 1;;
|
||||
"web") echo "$port";;
|
||||
*) echo -e " ${TICK} Pi-hole blocking is enabled";;
|
||||
esac
|
||||
else
|
||||
# No configs were found
|
||||
case "${1}" in
|
||||
"web") echo 99;;
|
||||
"web") echo -2;;
|
||||
*) echo -e " ${INFO} Pi-hole blocking will be enabled";;
|
||||
esac
|
||||
# Enable blocking
|
||||
"${PI_HOLE_BIN_DIR}"/pihole enable
|
||||
fi
|
||||
exit 0
|
||||
}
|
||||
|
||||
tailFunc() {
|
||||
@@ -481,8 +496,38 @@ if [[ $# = 0 ]]; then
|
||||
helpFunc
|
||||
fi
|
||||
|
||||
# functions that do not require sudo power
|
||||
case "${1}" in
|
||||
"-h" | "help" | "--help" ) helpFunc;;
|
||||
"-v" | "version" ) versionFunc "$@";;
|
||||
"-c" | "chronometer" ) chronometerFunc "$@";;
|
||||
"-q" | "query" ) queryFunc "$@";;
|
||||
"status" ) statusFunc "$2";;
|
||||
"-t" | "tail" ) tailFunc "$2";;
|
||||
"tricorder" ) tricorderFunc;;
|
||||
|
||||
# we need to add all arguments that require sudo power to not trigger the * argument
|
||||
"-w" | "whitelist" ) ;;
|
||||
"-b" | "blacklist" ) ;;
|
||||
"--wild" | "wildcard" ) ;;
|
||||
"--regex" | "regex" ) ;;
|
||||
"--white-regex" | "white-regex" ) ;;
|
||||
"--white-wild" | "white-wild" ) ;;
|
||||
"-f" | "flush" ) ;;
|
||||
"-up" | "updatePihole" ) ;;
|
||||
"-r" | "reconfigure" ) ;;
|
||||
"-g" | "updateGravity" ) ;;
|
||||
"-l" | "logging" ) ;;
|
||||
"uninstall" ) ;;
|
||||
"enable" ) ;;
|
||||
"disable" ) ;;
|
||||
"-d" | "debug" ) ;;
|
||||
"restartdns" ) ;;
|
||||
"-a" | "admin" ) ;;
|
||||
"checkout" ) ;;
|
||||
"updatechecker" ) ;;
|
||||
"arpflush" ) ;;
|
||||
* ) helpFunc;;
|
||||
esac
|
||||
|
||||
# Must be root to use this tool
|
||||
@@ -509,21 +554,13 @@ case "${1}" in
|
||||
"-up" | "updatePihole" ) updatePiholeFunc "$@";;
|
||||
"-r" | "reconfigure" ) reconfigurePiholeFunc;;
|
||||
"-g" | "updateGravity" ) updateGravityFunc "$@";;
|
||||
"-c" | "chronometer" ) chronometerFunc "$@";;
|
||||
"-h" | "help" ) helpFunc;;
|
||||
"-v" | "version" ) versionFunc "$@";;
|
||||
"-q" | "query" ) queryFunc "$@";;
|
||||
"-l" | "logging" ) piholeLogging "$@";;
|
||||
"uninstall" ) uninstallFunc;;
|
||||
"enable" ) piholeEnable 1;;
|
||||
"disable" ) piholeEnable 0 "$2";;
|
||||
"status" ) statusFunc "$2";;
|
||||
"restartdns" ) restartDNS "$2";;
|
||||
"-a" | "admin" ) webpageFunc "$@";;
|
||||
"-t" | "tail" ) tailFunc "$2";;
|
||||
"checkout" ) piholeCheckoutFunc "$@";;
|
||||
"tricorder" ) tricorderFunc;;
|
||||
"updatechecker" ) updateCheckFunc "$@";;
|
||||
"arpflush" ) arpFunc "$@";;
|
||||
* ) helpFunc;;
|
||||
esac
|
||||
|
@@ -18,8 +18,8 @@ py.test -vv -n auto -m "build_stage"
|
||||
py.test -vv -n auto -m "not build_stage"
|
||||
```
|
||||
|
||||
The build_stage tests have to run first to create the docker images, followed by the actual tests which utilize said images. Unless you're changing your dockerfiles you shouldn't have to run the build_stage every time - but it's a good idea to rebuild at least once a day in case the base Docker images or packages change.
|
||||
The build_stage tests have to run first to create the docker images, followed by the actual tests which utilize said images. Unless you're changing your dockerfiles you shouldn't have to run the build_stage every time - but it's a good idea to rebuild at least once a day in case the base Docker images or packages change.
|
||||
|
||||
# How do I debug python?
|
||||
|
||||
Highly recommended: Setup PyCharm on a **Docker enabled** machine. Having a python debugger like PyCharm changes your life if you've never used it :)
|
||||
Highly recommended: Setup PyCharm on a **Docker enabled** machine. Having a python debugger like PyCharm changes your life if you've never used it :)
|
||||
|
@@ -1,4 +1,5 @@
|
||||
FROM centos:7
|
||||
RUN yum install -y git
|
||||
|
||||
ENV GITDIR /etc/.pihole
|
||||
ENV SCRIPTDIR /opt/pihole
|
||||
|
@@ -1,4 +1,5 @@
|
||||
FROM centos:8
|
||||
FROM quay.io/centos/centos:stream8
|
||||
RUN yum install -y git
|
||||
|
||||
ENV GITDIR /etc/.pihole
|
||||
ENV SCRIPTDIR /opt/pihole
|
||||
|
@@ -1,4 +1,5 @@
|
||||
FROM fedora:33
|
||||
RUN dnf install -y git
|
||||
|
||||
ENV GITDIR /etc/.pihole
|
||||
ENV SCRIPTDIR /opt/pihole
|
||||
|
@@ -1,4 +1,5 @@
|
||||
FROM fedora:32
|
||||
FROM fedora:34
|
||||
RUN dnf install -y git
|
||||
|
||||
ENV GITDIR /etc/.pihole
|
||||
ENV SCRIPTDIR /opt/pihole
|
@@ -1,4 +1,4 @@
|
||||
FROM buildpack-deps:hirsute-scm
|
||||
FROM buildpack-deps:impish-scm
|
||||
|
||||
ENV GITDIR /etc/.pihole
|
||||
ENV SCRIPTDIR /opt/pihole
|
||||
|
164
test/conftest.py
164
test/conftest.py
@@ -1,10 +1,9 @@
|
||||
import pytest
|
||||
import testinfra
|
||||
import testinfra.backend.docker
|
||||
import subprocess
|
||||
from textwrap import dedent
|
||||
|
||||
check_output = testinfra.get_backend(
|
||||
"local://"
|
||||
).get_module("Command").check_output
|
||||
|
||||
SETUPVARS = {
|
||||
'PIHOLE_INTERFACE': 'eth99',
|
||||
@@ -12,85 +11,42 @@ SETUPVARS = {
|
||||
'PIHOLE_DNS_2': '4.2.2.2'
|
||||
}
|
||||
|
||||
IMAGE = 'pytest_pihole:test_container'
|
||||
|
||||
tick_box = "[\x1b[1;32m\u2713\x1b[0m]"
|
||||
cross_box = "[\x1b[1;31m\u2717\x1b[0m]"
|
||||
info_box = "[i]"
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def Pihole(Docker):
|
||||
'''
|
||||
used to contain some script stubbing, now pretty much an alias.
|
||||
Also provides bash as the default run function shell
|
||||
'''
|
||||
def run_bash(self, command, *args, **kwargs):
|
||||
cmd = self.get_command(command, *args)
|
||||
if self.user is not None:
|
||||
out = self.run_local(
|
||||
"docker exec -u %s %s /bin/bash -c %s",
|
||||
self.user, self.name, cmd)
|
||||
else:
|
||||
out = self.run_local(
|
||||
"docker exec %s /bin/bash -c %s", self.name, cmd)
|
||||
out.command = self.encode(cmd)
|
||||
return out
|
||||
# Monkeypatch sh to bash, if they ever support non hard code /bin/sh this can go away
|
||||
# https://github.com/pytest-dev/pytest-testinfra/blob/master/testinfra/backend/docker.py
|
||||
def run_bash(self, command, *args, **kwargs):
|
||||
cmd = self.get_command(command, *args)
|
||||
if self.user is not None:
|
||||
out = self.run_local(
|
||||
"docker exec -u %s %s /bin/bash -c %s", self.user, self.name, cmd
|
||||
)
|
||||
else:
|
||||
out = self.run_local("docker exec %s /bin/bash -c %s", self.name, cmd)
|
||||
out.command = self.encode(cmd)
|
||||
return out
|
||||
|
||||
funcType = type(Docker.run)
|
||||
Docker.run = funcType(run_bash, Docker)
|
||||
return Docker
|
||||
|
||||
testinfra.backend.docker.DockerBackend.run = run_bash
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def Docker(request, args, image, cmd):
|
||||
'''
|
||||
combine our fixtures into a docker run command and setup finalizer to
|
||||
cleanup
|
||||
'''
|
||||
assert 'docker' in check_output('id'), "Are you in the docker group?"
|
||||
docker_run = "docker run {} {} {}".format(args, image, cmd)
|
||||
docker_id = check_output(docker_run)
|
||||
def host():
|
||||
# run a container
|
||||
docker_id = subprocess.check_output(
|
||||
['docker', 'run', '-t', '-d', '--cap-add=ALL', IMAGE]).decode().strip()
|
||||
|
||||
def teardown():
|
||||
check_output("docker rm -f %s", docker_id)
|
||||
request.addfinalizer(teardown)
|
||||
# return a testinfra connection to the container
|
||||
docker_host = testinfra.get_host("docker://" + docker_id)
|
||||
|
||||
docker_container = testinfra.get_backend("docker://" + docker_id)
|
||||
docker_container.id = docker_id
|
||||
return docker_container
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def args(request):
|
||||
'''
|
||||
-t became required when tput began being used
|
||||
'''
|
||||
return '-t -d'
|
||||
|
||||
|
||||
@pytest.fixture(params=[
|
||||
'test_container'
|
||||
])
|
||||
def tag(request):
|
||||
'''
|
||||
consumed by image to make the test matrix
|
||||
'''
|
||||
return request.param
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def image(request, tag):
|
||||
'''
|
||||
built by test_000_build_containers.py
|
||||
'''
|
||||
return 'pytest_pihole:{}'.format(tag)
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def cmd(request):
|
||||
'''
|
||||
default to doing nothing by tailing null, but don't exit
|
||||
'''
|
||||
return 'tail -f /dev/null'
|
||||
yield docker_host
|
||||
# at the end of the test suite, destroy the container
|
||||
subprocess.check_call(['docker', 'rm', '-f', docker_id])
|
||||
|
||||
|
||||
# Helper functions
|
||||
@@ -100,7 +56,7 @@ def mock_command(script, args, container):
|
||||
in unit tests
|
||||
'''
|
||||
full_script_path = '/usr/local/bin/{}'.format(script)
|
||||
mock_script = dedent('''\
|
||||
mock_script = dedent(r'''\
|
||||
#!/bin/bash -e
|
||||
echo "\$0 \$@" >> /var/log/{script}
|
||||
case "\$1" in'''.format(script=script))
|
||||
@@ -121,13 +77,75 @@ def mock_command(script, args, container):
|
||||
scriptlog=script))
|
||||
|
||||
|
||||
def mock_command_passthrough(script, args, container):
|
||||
'''
|
||||
Per other mock_command* functions, allows intercepting of commands we don't want to run for real
|
||||
in unit tests, however also allows only specific arguments to be mocked. Anything not defined will
|
||||
be passed through to the actual command.
|
||||
|
||||
Example use-case: mocking `git pull` but still allowing `git clone` to work as intended
|
||||
'''
|
||||
orig_script_path = container.check_output('command -v {}'.format(script))
|
||||
full_script_path = '/usr/local/bin/{}'.format(script)
|
||||
mock_script = dedent(r'''\
|
||||
#!/bin/bash -e
|
||||
echo "\$0 \$@" >> /var/log/{script}
|
||||
case "\$1" in'''.format(script=script))
|
||||
for k, v in args.items():
|
||||
case = dedent('''
|
||||
{arg})
|
||||
echo {res}
|
||||
exit {retcode}
|
||||
;;'''.format(arg=k, res=v[0], retcode=v[1]))
|
||||
mock_script += case
|
||||
mock_script += dedent(r'''
|
||||
*)
|
||||
{orig_script_path} "\$@"
|
||||
;;'''.format(orig_script_path=orig_script_path))
|
||||
mock_script += dedent('''
|
||||
esac''')
|
||||
container.run('''
|
||||
cat <<EOF> {script}\n{content}\nEOF
|
||||
chmod +x {script}
|
||||
rm -f /var/log/{scriptlog}'''.format(script=full_script_path,
|
||||
content=mock_script,
|
||||
scriptlog=script))
|
||||
|
||||
|
||||
def mock_command_run(script, args, container):
|
||||
'''
|
||||
Allows for setup of commands we don't really want to have to run for real
|
||||
in unit tests
|
||||
'''
|
||||
full_script_path = '/usr/local/bin/{}'.format(script)
|
||||
mock_script = dedent(r'''\
|
||||
#!/bin/bash -e
|
||||
echo "\$0 \$@" >> /var/log/{script}
|
||||
case "\$1 \$2" in'''.format(script=script))
|
||||
for k, v in args.items():
|
||||
case = dedent('''
|
||||
\"{arg}\")
|
||||
echo {res}
|
||||
exit {retcode}
|
||||
;;'''.format(arg=k, res=v[0], retcode=v[1]))
|
||||
mock_script += case
|
||||
mock_script += dedent('''
|
||||
esac''')
|
||||
container.run('''
|
||||
cat <<EOF> {script}\n{content}\nEOF
|
||||
chmod +x {script}
|
||||
rm -f /var/log/{scriptlog}'''.format(script=full_script_path,
|
||||
content=mock_script,
|
||||
scriptlog=script))
|
||||
|
||||
|
||||
def mock_command_2(script, args, container):
|
||||
'''
|
||||
Allows for setup of commands we don't really want to have to run for real
|
||||
in unit tests
|
||||
'''
|
||||
full_script_path = '/usr/local/bin/{}'.format(script)
|
||||
mock_script = dedent('''\
|
||||
mock_script = dedent(r'''\
|
||||
#!/bin/bash -e
|
||||
echo "\$0 \$@" >> /var/log/{script}
|
||||
case "\$1 \$2" in'''.format(script=script))
|
||||
|
@@ -1,6 +1,6 @@
|
||||
docker-compose==1.23.2
|
||||
pytest==4.3.0
|
||||
pytest-xdist==1.26.1
|
||||
pytest-cov==2.6.1
|
||||
testinfra==1.19.0
|
||||
tox==3.7.0
|
||||
docker-compose
|
||||
pytest
|
||||
pytest-xdist
|
||||
pytest-cov
|
||||
pytest-testinfra
|
||||
tox
|
||||
|
1150
test/test_any_automated_install.py
Normal file
1150
test/test_any_automated_install.py
Normal file
File diff suppressed because it is too large
Load Diff
56
test/test_any_utils.py
Normal file
56
test/test_any_utils.py
Normal file
@@ -0,0 +1,56 @@
|
||||
def test_key_val_replacement_works(host):
|
||||
''' Confirms addOrEditKeyValPair provides the expected output '''
|
||||
host.run('''
|
||||
source /opt/pihole/utils.sh
|
||||
addOrEditKeyValPair "./testoutput" "KEY_ONE" "value1"
|
||||
addOrEditKeyValPair "./testoutput" "KEY_TWO" "value2"
|
||||
addOrEditKeyValPair "./testoutput" "KEY_ONE" "value3"
|
||||
addOrEditKeyValPair "./testoutput" "KEY_FOUR" "value4"
|
||||
addOrEditKeyValPair "./testoutput" "KEY_FIVE_NO_VALUE"
|
||||
addOrEditKeyValPair "./testoutput" "KEY_FIVE_NO_VALUE"
|
||||
''')
|
||||
output = host.run('''
|
||||
cat ./testoutput
|
||||
''')
|
||||
expected_stdout = 'KEY_ONE=value3\nKEY_TWO=value2\nKEY_FOUR=value4\nKEY_FIVE_NO_VALUE\n'
|
||||
assert expected_stdout == output.stdout
|
||||
|
||||
|
||||
def test_key_val_removal_works(host):
|
||||
''' Confirms removeKey provides the expected output '''
|
||||
host.run('''
|
||||
source /opt/pihole/utils.sh
|
||||
addOrEditKeyValPair "./testoutput" "KEY_ONE" "value1"
|
||||
addOrEditKeyValPair "./testoutput" "KEY_TWO" "value2"
|
||||
addOrEditKeyValPair "./testoutput" "KEY_THREE" "value3"
|
||||
removeKey "./testoutput" "KEY_TWO"
|
||||
''')
|
||||
output = host.run('''
|
||||
cat ./testoutput
|
||||
''')
|
||||
expected_stdout = 'KEY_ONE=value1\nKEY_THREE=value3\n'
|
||||
assert expected_stdout == output.stdout
|
||||
|
||||
|
||||
def test_getFTLAPIPort_default(host):
|
||||
''' Confirms getFTLAPIPort returns the default API port '''
|
||||
output = host.run('''
|
||||
source /opt/pihole/utils.sh
|
||||
getFTLAPIPort
|
||||
''')
|
||||
expected_stdout = '4711\n'
|
||||
assert expected_stdout == output.stdout
|
||||
|
||||
|
||||
def test_getFTLAPIPort_custom(host):
|
||||
''' Confirms getFTLAPIPort returns a custom API port in a custom PORTFILE location '''
|
||||
host.run('''
|
||||
echo "PORTFILE=/tmp/port.file" > /etc/pihole/pihole-FTL.conf
|
||||
echo "1234" > /tmp/port.file
|
||||
''')
|
||||
output = host.run('''
|
||||
source /opt/pihole/utils.sh
|
||||
getFTLAPIPort
|
||||
''')
|
||||
expected_stdout = '1234\n'
|
||||
assert expected_stdout == output.stdout
|
@@ -1,638 +0,0 @@
|
||||
from textwrap import dedent
|
||||
import re
|
||||
from .conftest import (
|
||||
SETUPVARS,
|
||||
tick_box,
|
||||
info_box,
|
||||
cross_box,
|
||||
mock_command,
|
||||
mock_command_2,
|
||||
run_script
|
||||
)
|
||||
|
||||
|
||||
def test_supported_operating_system(Pihole):
|
||||
'''
|
||||
confirm installer exists on unsupported distribution
|
||||
'''
|
||||
# break supported package managers to emulate an unsupported distribution
|
||||
Pihole.run('rm -rf /usr/bin/apt-get')
|
||||
Pihole.run('rm -rf /usr/bin/rpm')
|
||||
package_manager_detect = Pihole.run('''
|
||||
source /opt/pihole/basic-install.sh
|
||||
package_manager_detect
|
||||
''')
|
||||
expected_stdout = cross_box + ' OS distribution not supported'
|
||||
assert expected_stdout in package_manager_detect.stdout
|
||||
# assert package_manager_detect.rc == 1
|
||||
|
||||
|
||||
def test_setupVars_are_sourced_to_global_scope(Pihole):
|
||||
'''
|
||||
currently update_dialogs sources setupVars with a dot,
|
||||
then various other functions use the variables.
|
||||
This confirms the sourced variables are in scope between functions
|
||||
'''
|
||||
setup_var_file = 'cat <<EOF> /etc/pihole/setupVars.conf\n'
|
||||
for k, v in SETUPVARS.items():
|
||||
setup_var_file += "{}={}\n".format(k, v)
|
||||
setup_var_file += "EOF\n"
|
||||
Pihole.run(setup_var_file)
|
||||
|
||||
script = dedent('''\
|
||||
set -e
|
||||
printSetupVars() {
|
||||
# Currently debug test function only
|
||||
echo "Outputting sourced variables"
|
||||
echo "PIHOLE_INTERFACE=${PIHOLE_INTERFACE}"
|
||||
echo "PIHOLE_DNS_1=${PIHOLE_DNS_1}"
|
||||
echo "PIHOLE_DNS_2=${PIHOLE_DNS_2}"
|
||||
}
|
||||
update_dialogs() {
|
||||
. /etc/pihole/setupVars.conf
|
||||
}
|
||||
update_dialogs
|
||||
printSetupVars
|
||||
''')
|
||||
|
||||
output = run_script(Pihole, script).stdout
|
||||
|
||||
for k, v in SETUPVARS.items():
|
||||
assert "{}={}".format(k, v) in output
|
||||
|
||||
|
||||
def test_setupVars_saved_to_file(Pihole):
|
||||
'''
|
||||
confirm saved settings are written to a file for future updates to re-use
|
||||
'''
|
||||
# dedent works better with this and padding matching script below
|
||||
set_setup_vars = '\n'
|
||||
for k, v in SETUPVARS.items():
|
||||
set_setup_vars += " {}={}\n".format(k, v)
|
||||
Pihole.run(set_setup_vars).stdout
|
||||
|
||||
script = dedent('''\
|
||||
set -e
|
||||
echo start
|
||||
TERM=xterm
|
||||
source /opt/pihole/basic-install.sh
|
||||
{}
|
||||
mkdir -p /etc/dnsmasq.d
|
||||
version_check_dnsmasq
|
||||
echo "" > /etc/pihole/pihole-FTL.conf
|
||||
finalExports
|
||||
cat /etc/pihole/setupVars.conf
|
||||
'''.format(set_setup_vars))
|
||||
|
||||
output = run_script(Pihole, script).stdout
|
||||
|
||||
for k, v in SETUPVARS.items():
|
||||
assert "{}={}".format(k, v) in output
|
||||
|
||||
|
||||
def test_selinux_not_detected(Pihole):
|
||||
'''
|
||||
confirms installer continues when SELinux configuration file does not exist
|
||||
'''
|
||||
check_selinux = Pihole.run('''
|
||||
rm -f /etc/selinux/config
|
||||
source /opt/pihole/basic-install.sh
|
||||
checkSelinux
|
||||
''')
|
||||
expected_stdout = info_box + ' SELinux not detected'
|
||||
assert expected_stdout in check_selinux.stdout
|
||||
assert check_selinux.rc == 0
|
||||
|
||||
|
||||
def test_installPiholeWeb_fresh_install_no_errors(Pihole):
|
||||
'''
|
||||
confirms all web page assets from Core repo are installed on a fresh build
|
||||
'''
|
||||
installWeb = Pihole.run('''
|
||||
source /opt/pihole/basic-install.sh
|
||||
installPiholeWeb
|
||||
''')
|
||||
expected_stdout = info_box + ' Installing blocking page...'
|
||||
assert expected_stdout in installWeb.stdout
|
||||
expected_stdout = tick_box + (' Creating directory for blocking page, '
|
||||
'and copying files')
|
||||
assert expected_stdout in installWeb.stdout
|
||||
expected_stdout = info_box + ' Backing up index.lighttpd.html'
|
||||
assert expected_stdout in installWeb.stdout
|
||||
expected_stdout = ('No default index.lighttpd.html file found... '
|
||||
'not backing up')
|
||||
assert expected_stdout in installWeb.stdout
|
||||
expected_stdout = tick_box + ' Installing sudoer file'
|
||||
assert expected_stdout in installWeb.stdout
|
||||
web_directory = Pihole.run('ls -r /var/www/html/pihole').stdout
|
||||
assert 'index.php' in web_directory
|
||||
assert 'blockingpage.css' in web_directory
|
||||
|
||||
|
||||
def test_update_package_cache_success_no_errors(Pihole):
|
||||
'''
|
||||
confirms package cache was updated without any errors
|
||||
'''
|
||||
updateCache = Pihole.run('''
|
||||
source /opt/pihole/basic-install.sh
|
||||
package_manager_detect
|
||||
update_package_cache
|
||||
''')
|
||||
expected_stdout = tick_box + ' Update local cache of available packages'
|
||||
assert expected_stdout in updateCache.stdout
|
||||
assert 'error' not in updateCache.stdout.lower()
|
||||
|
||||
|
||||
def test_update_package_cache_failure_no_errors(Pihole):
|
||||
'''
|
||||
confirms package cache was not updated
|
||||
'''
|
||||
mock_command('apt-get', {'update': ('', '1')}, Pihole)
|
||||
updateCache = Pihole.run('''
|
||||
source /opt/pihole/basic-install.sh
|
||||
package_manager_detect
|
||||
update_package_cache
|
||||
''')
|
||||
expected_stdout = cross_box + ' Update local cache of available packages'
|
||||
assert expected_stdout in updateCache.stdout
|
||||
assert 'Error: Unable to update package cache.' in updateCache.stdout
|
||||
|
||||
|
||||
def test_FTL_detect_aarch64_no_errors(Pihole):
|
||||
'''
|
||||
confirms only aarch64 package is downloaded for FTL engine
|
||||
'''
|
||||
# mock uname to return aarch64 platform
|
||||
mock_command('uname', {'-m': ('aarch64', '0')}, Pihole)
|
||||
# mock ldd to respond with aarch64 shared library
|
||||
mock_command(
|
||||
'ldd',
|
||||
{
|
||||
'/bin/ls': (
|
||||
'/lib/ld-linux-aarch64.so.1',
|
||||
'0'
|
||||
)
|
||||
},
|
||||
Pihole
|
||||
)
|
||||
detectPlatform = Pihole.run('''
|
||||
source /opt/pihole/basic-install.sh
|
||||
create_pihole_user
|
||||
funcOutput=$(get_binary_name)
|
||||
binary="pihole-FTL${funcOutput##*pihole-FTL}"
|
||||
theRest="${funcOutput%pihole-FTL*}"
|
||||
FTLdetect "${binary}" "${theRest}"
|
||||
''')
|
||||
expected_stdout = info_box + ' FTL Checks...'
|
||||
assert expected_stdout in detectPlatform.stdout
|
||||
expected_stdout = tick_box + ' Detected AArch64 (64 Bit ARM) processor'
|
||||
assert expected_stdout in detectPlatform.stdout
|
||||
expected_stdout = tick_box + ' Downloading and Installing FTL'
|
||||
assert expected_stdout in detectPlatform.stdout
|
||||
|
||||
|
||||
def test_FTL_detect_armv4t_no_errors(Pihole):
|
||||
'''
|
||||
confirms only armv4t package is downloaded for FTL engine
|
||||
'''
|
||||
# mock uname to return armv4t platform
|
||||
mock_command('uname', {'-m': ('armv4t', '0')}, Pihole)
|
||||
# mock ldd to respond with ld-linux shared library
|
||||
mock_command('ldd', {'/bin/ls': ('/lib/ld-linux.so.3', '0')}, Pihole)
|
||||
detectPlatform = Pihole.run('''
|
||||
source /opt/pihole/basic-install.sh
|
||||
create_pihole_user
|
||||
funcOutput=$(get_binary_name)
|
||||
binary="pihole-FTL${funcOutput##*pihole-FTL}"
|
||||
theRest="${funcOutput%pihole-FTL*}"
|
||||
FTLdetect "${binary}" "${theRest}"
|
||||
''')
|
||||
expected_stdout = info_box + ' FTL Checks...'
|
||||
assert expected_stdout in detectPlatform.stdout
|
||||
expected_stdout = tick_box + (' Detected ARMv4 processor')
|
||||
assert expected_stdout in detectPlatform.stdout
|
||||
expected_stdout = tick_box + ' Downloading and Installing FTL'
|
||||
assert expected_stdout in detectPlatform.stdout
|
||||
|
||||
|
||||
def test_FTL_detect_armv5te_no_errors(Pihole):
|
||||
'''
|
||||
confirms only armv5te package is downloaded for FTL engine
|
||||
'''
|
||||
# mock uname to return armv5te platform
|
||||
mock_command('uname', {'-m': ('armv5te', '0')}, Pihole)
|
||||
# mock ldd to respond with ld-linux shared library
|
||||
mock_command('ldd', {'/bin/ls': ('/lib/ld-linux.so.3', '0')}, Pihole)
|
||||
detectPlatform = Pihole.run('''
|
||||
source /opt/pihole/basic-install.sh
|
||||
create_pihole_user
|
||||
funcOutput=$(get_binary_name)
|
||||
binary="pihole-FTL${funcOutput##*pihole-FTL}"
|
||||
theRest="${funcOutput%pihole-FTL*}"
|
||||
FTLdetect "${binary}" "${theRest}"
|
||||
''')
|
||||
expected_stdout = info_box + ' FTL Checks...'
|
||||
assert expected_stdout in detectPlatform.stdout
|
||||
expected_stdout = tick_box + (' Detected ARMv5 (or newer) processor')
|
||||
assert expected_stdout in detectPlatform.stdout
|
||||
expected_stdout = tick_box + ' Downloading and Installing FTL'
|
||||
assert expected_stdout in detectPlatform.stdout
|
||||
|
||||
|
||||
def test_FTL_detect_armv6l_no_errors(Pihole):
|
||||
'''
|
||||
confirms only armv6l package is downloaded for FTL engine
|
||||
'''
|
||||
# mock uname to return armv6l platform
|
||||
mock_command('uname', {'-m': ('armv6l', '0')}, Pihole)
|
||||
# mock ldd to respond with ld-linux-armhf shared library
|
||||
mock_command('ldd', {'/bin/ls': ('/lib/ld-linux-armhf.so.3', '0')}, Pihole)
|
||||
detectPlatform = Pihole.run('''
|
||||
source /opt/pihole/basic-install.sh
|
||||
create_pihole_user
|
||||
funcOutput=$(get_binary_name)
|
||||
binary="pihole-FTL${funcOutput##*pihole-FTL}"
|
||||
theRest="${funcOutput%pihole-FTL*}"
|
||||
FTLdetect "${binary}" "${theRest}"
|
||||
''')
|
||||
expected_stdout = info_box + ' FTL Checks...'
|
||||
assert expected_stdout in detectPlatform.stdout
|
||||
expected_stdout = tick_box + (' Detected ARMv6 processor '
|
||||
'(with hard-float support)')
|
||||
assert expected_stdout in detectPlatform.stdout
|
||||
expected_stdout = tick_box + ' Downloading and Installing FTL'
|
||||
assert expected_stdout in detectPlatform.stdout
|
||||
|
||||
|
||||
def test_FTL_detect_armv7l_no_errors(Pihole):
|
||||
'''
|
||||
confirms only armv7l package is downloaded for FTL engine
|
||||
'''
|
||||
# mock uname to return armv7l platform
|
||||
mock_command('uname', {'-m': ('armv7l', '0')}, Pihole)
|
||||
# mock ldd to respond with ld-linux-armhf shared library
|
||||
mock_command('ldd', {'/bin/ls': ('/lib/ld-linux-armhf.so.3', '0')}, Pihole)
|
||||
detectPlatform = Pihole.run('''
|
||||
source /opt/pihole/basic-install.sh
|
||||
create_pihole_user
|
||||
funcOutput=$(get_binary_name)
|
||||
binary="pihole-FTL${funcOutput##*pihole-FTL}"
|
||||
theRest="${funcOutput%pihole-FTL*}"
|
||||
FTLdetect "${binary}" "${theRest}"
|
||||
''')
|
||||
expected_stdout = info_box + ' FTL Checks...'
|
||||
assert expected_stdout in detectPlatform.stdout
|
||||
expected_stdout = tick_box + (' Detected ARMv7 processor '
|
||||
'(with hard-float support)')
|
||||
assert expected_stdout in detectPlatform.stdout
|
||||
expected_stdout = tick_box + ' Downloading and Installing FTL'
|
||||
assert expected_stdout in detectPlatform.stdout
|
||||
|
||||
|
||||
def test_FTL_detect_armv8a_no_errors(Pihole):
|
||||
'''
|
||||
confirms only armv8a package is downloaded for FTL engine
|
||||
'''
|
||||
# mock uname to return armv8a platform
|
||||
mock_command('uname', {'-m': ('armv8a', '0')}, Pihole)
|
||||
# mock ldd to respond with ld-linux-armhf shared library
|
||||
mock_command('ldd', {'/bin/ls': ('/lib/ld-linux-armhf.so.3', '0')}, Pihole)
|
||||
detectPlatform = Pihole.run('''
|
||||
source /opt/pihole/basic-install.sh
|
||||
create_pihole_user
|
||||
funcOutput=$(get_binary_name)
|
||||
binary="pihole-FTL${funcOutput##*pihole-FTL}"
|
||||
theRest="${funcOutput%pihole-FTL*}"
|
||||
FTLdetect "${binary}" "${theRest}"
|
||||
''')
|
||||
expected_stdout = info_box + ' FTL Checks...'
|
||||
assert expected_stdout in detectPlatform.stdout
|
||||
expected_stdout = tick_box + ' Detected ARMv8 (or newer) processor'
|
||||
assert expected_stdout in detectPlatform.stdout
|
||||
expected_stdout = tick_box + ' Downloading and Installing FTL'
|
||||
assert expected_stdout in detectPlatform.stdout
|
||||
|
||||
|
||||
def test_FTL_detect_x86_64_no_errors(Pihole):
|
||||
'''
|
||||
confirms only x86_64 package is downloaded for FTL engine
|
||||
'''
|
||||
detectPlatform = Pihole.run('''
|
||||
source /opt/pihole/basic-install.sh
|
||||
create_pihole_user
|
||||
funcOutput=$(get_binary_name)
|
||||
binary="pihole-FTL${funcOutput##*pihole-FTL}"
|
||||
theRest="${funcOutput%pihole-FTL*}"
|
||||
FTLdetect "${binary}" "${theRest}"
|
||||
''')
|
||||
expected_stdout = info_box + ' FTL Checks...'
|
||||
assert expected_stdout in detectPlatform.stdout
|
||||
expected_stdout = tick_box + ' Detected x86_64 processor'
|
||||
assert expected_stdout in detectPlatform.stdout
|
||||
expected_stdout = tick_box + ' Downloading and Installing FTL'
|
||||
assert expected_stdout in detectPlatform.stdout
|
||||
|
||||
|
||||
def test_FTL_detect_unknown_no_errors(Pihole):
|
||||
''' confirms only generic package is downloaded for FTL engine '''
|
||||
# mock uname to return generic platform
|
||||
mock_command('uname', {'-m': ('mips', '0')}, Pihole)
|
||||
detectPlatform = Pihole.run('''
|
||||
source /opt/pihole/basic-install.sh
|
||||
create_pihole_user
|
||||
funcOutput=$(get_binary_name)
|
||||
binary="pihole-FTL${funcOutput##*pihole-FTL}"
|
||||
theRest="${funcOutput%pihole-FTL*}"
|
||||
FTLdetect "${binary}" "${theRest}"
|
||||
''')
|
||||
expected_stdout = 'Not able to detect processor (unknown: mips)'
|
||||
assert expected_stdout in detectPlatform.stdout
|
||||
|
||||
|
||||
def test_FTL_download_aarch64_no_errors(Pihole):
|
||||
'''
|
||||
confirms only aarch64 package is downloaded for FTL engine
|
||||
'''
|
||||
# mock whiptail answers and ensure installer dependencies
|
||||
mock_command('whiptail', {'*': ('', '0')}, Pihole)
|
||||
Pihole.run('''
|
||||
source /opt/pihole/basic-install.sh
|
||||
package_manager_detect
|
||||
install_dependent_packages ${INSTALLER_DEPS[@]}
|
||||
''')
|
||||
download_binary = Pihole.run('''
|
||||
source /opt/pihole/basic-install.sh
|
||||
create_pihole_user
|
||||
FTLinstall "pihole-FTL-aarch64-linux-gnu"
|
||||
''')
|
||||
expected_stdout = tick_box + ' Downloading and Installing FTL'
|
||||
assert expected_stdout in download_binary.stdout
|
||||
assert 'error' not in download_binary.stdout.lower()
|
||||
|
||||
|
||||
def test_FTL_binary_installed_and_responsive_no_errors(Pihole):
|
||||
'''
|
||||
confirms FTL binary is copied and functional in installed location
|
||||
'''
|
||||
installed_binary = Pihole.run('''
|
||||
source /opt/pihole/basic-install.sh
|
||||
create_pihole_user
|
||||
funcOutput=$(get_binary_name)
|
||||
binary="pihole-FTL${funcOutput##*pihole-FTL}"
|
||||
theRest="${funcOutput%pihole-FTL*}"
|
||||
FTLdetect "${binary}" "${theRest}"
|
||||
pihole-FTL version
|
||||
''')
|
||||
expected_stdout = 'v'
|
||||
assert expected_stdout in installed_binary.stdout
|
||||
|
||||
|
||||
# def test_FTL_support_files_installed(Pihole):
|
||||
# '''
|
||||
# confirms FTL support files are installed
|
||||
# '''
|
||||
# support_files = Pihole.run('''
|
||||
# source /opt/pihole/basic-install.sh
|
||||
# FTLdetect
|
||||
# stat -c '%a %n' /var/log/pihole-FTL.log
|
||||
# stat -c '%a %n' /run/pihole-FTL.port
|
||||
# stat -c '%a %n' /run/pihole-FTL.pid
|
||||
# ls -lac /run
|
||||
# ''')
|
||||
# assert '644 /run/pihole-FTL.port' in support_files.stdout
|
||||
# assert '644 /run/pihole-FTL.pid' in support_files.stdout
|
||||
# assert '644 /var/log/pihole-FTL.log' in support_files.stdout
|
||||
|
||||
|
||||
def test_IPv6_only_link_local(Pihole):
|
||||
'''
|
||||
confirms IPv6 blocking is disabled for Link-local address
|
||||
'''
|
||||
# mock ip -6 address to return Link-local address
|
||||
mock_command_2(
|
||||
'ip',
|
||||
{
|
||||
'-6 address': (
|
||||
'inet6 fe80::d210:52fa:fe00:7ad7/64 scope link',
|
||||
'0'
|
||||
)
|
||||
},
|
||||
Pihole
|
||||
)
|
||||
detectPlatform = Pihole.run('''
|
||||
source /opt/pihole/basic-install.sh
|
||||
useIPv6dialog
|
||||
''')
|
||||
expected_stdout = ('Unable to find IPv6 ULA/GUA address, '
|
||||
'IPv6 adblocking will not be enabled')
|
||||
assert expected_stdout in detectPlatform.stdout
|
||||
|
||||
|
||||
def test_IPv6_only_ULA(Pihole):
|
||||
'''
|
||||
confirms IPv6 blocking is enabled for ULA addresses
|
||||
'''
|
||||
# mock ip -6 address to return ULA address
|
||||
mock_command_2(
|
||||
'ip',
|
||||
{
|
||||
'-6 address': (
|
||||
'inet6 fda2:2001:5555:0:d210:52fa:fe00:7ad7/64 scope global',
|
||||
'0'
|
||||
)
|
||||
},
|
||||
Pihole
|
||||
)
|
||||
detectPlatform = Pihole.run('''
|
||||
source /opt/pihole/basic-install.sh
|
||||
useIPv6dialog
|
||||
''')
|
||||
expected_stdout = 'Found IPv6 ULA address, using it for blocking IPv6 ads'
|
||||
assert expected_stdout in detectPlatform.stdout
|
||||
|
||||
|
||||
def test_IPv6_only_GUA(Pihole):
|
||||
'''
|
||||
confirms IPv6 blocking is enabled for GUA addresses
|
||||
'''
|
||||
# mock ip -6 address to return GUA address
|
||||
mock_command_2(
|
||||
'ip',
|
||||
{
|
||||
'-6 address': (
|
||||
'inet6 2003:12:1e43:301:d210:52fa:fe00:7ad7/64 scope global',
|
||||
'0'
|
||||
)
|
||||
},
|
||||
Pihole
|
||||
)
|
||||
detectPlatform = Pihole.run('''
|
||||
source /opt/pihole/basic-install.sh
|
||||
useIPv6dialog
|
||||
''')
|
||||
expected_stdout = 'Found IPv6 GUA address, using it for blocking IPv6 ads'
|
||||
assert expected_stdout in detectPlatform.stdout
|
||||
|
||||
|
||||
def test_IPv6_GUA_ULA_test(Pihole):
|
||||
'''
|
||||
confirms IPv6 blocking is enabled for GUA and ULA addresses
|
||||
'''
|
||||
# mock ip -6 address to return GUA and ULA addresses
|
||||
mock_command_2(
|
||||
'ip',
|
||||
{
|
||||
'-6 address': (
|
||||
'inet6 2003:12:1e43:301:d210:52fa:fe00:7ad7/64 scope global\n'
|
||||
'inet6 fda2:2001:5555:0:d210:52fa:fe00:7ad7/64 scope global',
|
||||
'0'
|
||||
)
|
||||
},
|
||||
Pihole
|
||||
)
|
||||
detectPlatform = Pihole.run('''
|
||||
source /opt/pihole/basic-install.sh
|
||||
useIPv6dialog
|
||||
''')
|
||||
expected_stdout = 'Found IPv6 ULA address, using it for blocking IPv6 ads'
|
||||
assert expected_stdout in detectPlatform.stdout
|
||||
|
||||
|
||||
def test_IPv6_ULA_GUA_test(Pihole):
|
||||
'''
|
||||
confirms IPv6 blocking is enabled for GUA and ULA addresses
|
||||
'''
|
||||
# mock ip -6 address to return ULA and GUA addresses
|
||||
mock_command_2(
|
||||
'ip',
|
||||
{
|
||||
'-6 address': (
|
||||
'inet6 fda2:2001:5555:0:d210:52fa:fe00:7ad7/64 scope global\n'
|
||||
'inet6 2003:12:1e43:301:d210:52fa:fe00:7ad7/64 scope global',
|
||||
'0'
|
||||
)
|
||||
},
|
||||
Pihole
|
||||
)
|
||||
detectPlatform = Pihole.run('''
|
||||
source /opt/pihole/basic-install.sh
|
||||
useIPv6dialog
|
||||
''')
|
||||
expected_stdout = 'Found IPv6 ULA address, using it for blocking IPv6 ads'
|
||||
assert expected_stdout in detectPlatform.stdout
|
||||
|
||||
|
||||
def test_validate_ip(Pihole):
|
||||
'''
|
||||
Tests valid_ip for various IP addresses
|
||||
'''
|
||||
|
||||
def test_address(addr, success=True):
|
||||
output = Pihole.run('''
|
||||
source /opt/pihole/basic-install.sh
|
||||
valid_ip "{addr}"
|
||||
'''.format(addr=addr))
|
||||
|
||||
assert output.rc == 0 if success else 1
|
||||
|
||||
test_address('192.168.1.1')
|
||||
test_address('127.0.0.1')
|
||||
test_address('255.255.255.255')
|
||||
test_address('255.255.255.256', False)
|
||||
test_address('255.255.256.255', False)
|
||||
test_address('255.256.255.255', False)
|
||||
test_address('256.255.255.255', False)
|
||||
test_address('1092.168.1.1', False)
|
||||
test_address('not an IP', False)
|
||||
test_address('8.8.8.8#', False)
|
||||
test_address('8.8.8.8#0')
|
||||
test_address('8.8.8.8#1')
|
||||
test_address('8.8.8.8#42')
|
||||
test_address('8.8.8.8#888')
|
||||
test_address('8.8.8.8#1337')
|
||||
test_address('8.8.8.8#65535')
|
||||
test_address('8.8.8.8#65536', False)
|
||||
test_address('8.8.8.8#-1', False)
|
||||
test_address('00.0.0.0', False)
|
||||
test_address('010.0.0.0', False)
|
||||
test_address('001.0.0.0', False)
|
||||
test_address('0.0.0.0#00', False)
|
||||
test_address('0.0.0.0#01', False)
|
||||
test_address('0.0.0.0#001', False)
|
||||
test_address('0.0.0.0#0001', False)
|
||||
test_address('0.0.0.0#00001', False)
|
||||
|
||||
|
||||
def test_os_check_fails(Pihole):
|
||||
''' Confirms install fails on unsupported OS '''
|
||||
Pihole.run('''
|
||||
source /opt/pihole/basic-install.sh
|
||||
package_manager_detect
|
||||
install_dependent_packages ${OS_CHECK_DEPS[@]}
|
||||
install_dependent_packages ${INSTALLER_DEPS[@]}
|
||||
cat <<EOT > /etc/os-release
|
||||
ID=UnsupportedOS
|
||||
VERSION_ID="2"
|
||||
EOT
|
||||
''')
|
||||
detectOS = Pihole.run('''t
|
||||
source /opt/pihole/basic-install.sh
|
||||
os_check
|
||||
''')
|
||||
expected_stdout = 'Unsupported OS detected: UnsupportedOS'
|
||||
assert expected_stdout in detectOS.stdout
|
||||
|
||||
|
||||
def test_os_check_passes(Pihole):
|
||||
''' Confirms OS meets the requirements '''
|
||||
Pihole.run('''
|
||||
source /opt/pihole/basic-install.sh
|
||||
package_manager_detect
|
||||
install_dependent_packages ${OS_CHECK_DEPS[@]}
|
||||
install_dependent_packages ${INSTALLER_DEPS[@]}
|
||||
''')
|
||||
detectOS = Pihole.run('''
|
||||
source /opt/pihole/basic-install.sh
|
||||
os_check
|
||||
''')
|
||||
expected_stdout = 'Supported OS detected'
|
||||
assert expected_stdout in detectOS.stdout
|
||||
|
||||
|
||||
def test_package_manager_has_installer_deps(Pihole):
|
||||
''' Confirms OS is able to install the required packages for the installer'''
|
||||
mock_command('whiptail', {'*': ('', '0')}, Pihole)
|
||||
output = Pihole.run('''
|
||||
source /opt/pihole/basic-install.sh
|
||||
package_manager_detect
|
||||
install_dependent_packages ${INSTALLER_DEPS[@]}
|
||||
''')
|
||||
|
||||
assert 'No package' not in output.stdout # centos7 still exits 0...
|
||||
assert output.rc == 0
|
||||
|
||||
|
||||
def test_package_manager_has_pihole_deps(Pihole):
|
||||
''' Confirms OS is able to install the required packages for Pi-hole '''
|
||||
mock_command('whiptail', {'*': ('', '0')}, Pihole)
|
||||
output = Pihole.run('''
|
||||
source /opt/pihole/basic-install.sh
|
||||
package_manager_detect
|
||||
install_dependent_packages ${PIHOLE_DEPS[@]}
|
||||
''')
|
||||
|
||||
assert 'No package' not in output.stdout # centos7 still exits 0...
|
||||
assert output.rc == 0
|
||||
|
||||
|
||||
def test_package_manager_has_web_deps(Pihole):
|
||||
''' Confirms OS is able to install the required packages for web '''
|
||||
mock_command('whiptail', {'*': ('', '0')}, Pihole)
|
||||
output = Pihole.run('''
|
||||
source /opt/pihole/basic-install.sh
|
||||
package_manager_detect
|
||||
install_dependent_packages ${PIHOLE_WEB_DEPS[@]}
|
||||
''')
|
||||
|
||||
assert 'No package' not in output.stdout # centos7 still exits 0...
|
||||
assert output.rc == 0
|
@@ -5,49 +5,52 @@ from .conftest import (
|
||||
)
|
||||
|
||||
|
||||
def test_php_upgrade_default_optout_centos_eq_7(Pihole):
|
||||
def test_php_upgrade_default_optout_centos_eq_7(host):
|
||||
'''
|
||||
confirms the default behavior to opt-out of installing PHP7 from REMI
|
||||
'''
|
||||
package_manager_detect = Pihole.run('''
|
||||
package_manager_detect = host.run('''
|
||||
source /opt/pihole/basic-install.sh
|
||||
package_manager_detect
|
||||
select_rpm_php
|
||||
''')
|
||||
expected_stdout = info_box + (' User opt-out of PHP 7 upgrade on CentOS. '
|
||||
'Deprecated PHP may be in use.')
|
||||
assert expected_stdout in package_manager_detect.stdout
|
||||
remi_package = Pihole.package('remi-release')
|
||||
remi_package = host.package('remi-release')
|
||||
assert not remi_package.is_installed
|
||||
|
||||
|
||||
def test_php_upgrade_user_optout_centos_eq_7(Pihole):
|
||||
def test_php_upgrade_user_optout_centos_eq_7(host):
|
||||
'''
|
||||
confirms installer behavior when user opt-out of installing PHP7 from REMI
|
||||
(php not currently installed)
|
||||
'''
|
||||
# Whiptail dialog returns Cancel for user prompt
|
||||
mock_command('whiptail', {'*': ('', '1')}, Pihole)
|
||||
package_manager_detect = Pihole.run('''
|
||||
mock_command('whiptail', {'*': ('', '1')}, host)
|
||||
package_manager_detect = host.run('''
|
||||
source /opt/pihole/basic-install.sh
|
||||
package_manager_detect
|
||||
select_rpm_php
|
||||
''')
|
||||
expected_stdout = info_box + (' User opt-out of PHP 7 upgrade on CentOS. '
|
||||
'Deprecated PHP may be in use.')
|
||||
assert expected_stdout in package_manager_detect.stdout
|
||||
remi_package = Pihole.package('remi-release')
|
||||
remi_package = host.package('remi-release')
|
||||
assert not remi_package.is_installed
|
||||
|
||||
|
||||
def test_php_upgrade_user_optin_centos_eq_7(Pihole):
|
||||
def test_php_upgrade_user_optin_centos_eq_7(host):
|
||||
'''
|
||||
confirms installer behavior when user opt-in to installing PHP7 from REMI
|
||||
(php not currently installed)
|
||||
'''
|
||||
# Whiptail dialog returns Continue for user prompt
|
||||
mock_command('whiptail', {'*': ('', '0')}, Pihole)
|
||||
package_manager_detect = Pihole.run('''
|
||||
mock_command('whiptail', {'*': ('', '0')}, host)
|
||||
package_manager_detect = host.run('''
|
||||
source /opt/pihole/basic-install.sh
|
||||
package_manager_detect
|
||||
select_rpm_php
|
||||
''')
|
||||
assert 'opt-out' not in package_manager_detect.stdout
|
||||
expected_stdout = info_box + (' Enabling Remi\'s RPM repository '
|
||||
@@ -56,5 +59,5 @@ def test_php_upgrade_user_optin_centos_eq_7(Pihole):
|
||||
expected_stdout = tick_box + (' Remi\'s RPM repository has '
|
||||
'been enabled for PHP7')
|
||||
assert expected_stdout in package_manager_detect.stdout
|
||||
remi_package = Pihole.package('remi-release')
|
||||
remi_package = host.package('remi-release')
|
||||
assert remi_package.is_installed
|
||||
|
@@ -5,54 +5,57 @@ from .conftest import (
|
||||
)
|
||||
|
||||
|
||||
def test_php_upgrade_default_continue_centos_gte_8(Pihole):
|
||||
def test_php_upgrade_default_continue_centos_gte_8(host):
|
||||
'''
|
||||
confirms the latest version of CentOS continues / does not optout
|
||||
(should trigger on CentOS7 only)
|
||||
'''
|
||||
package_manager_detect = Pihole.run('''
|
||||
package_manager_detect = host.run('''
|
||||
source /opt/pihole/basic-install.sh
|
||||
package_manager_detect
|
||||
select_rpm_php
|
||||
''')
|
||||
unexpected_stdout = info_box + (' User opt-out of PHP 7 upgrade on CentOS.'
|
||||
' Deprecated PHP may be in use.')
|
||||
assert unexpected_stdout not in package_manager_detect.stdout
|
||||
# ensure remi was not installed on latest CentOS
|
||||
remi_package = Pihole.package('remi-release')
|
||||
remi_package = host.package('remi-release')
|
||||
assert not remi_package.is_installed
|
||||
|
||||
|
||||
def test_php_upgrade_user_optout_skipped_centos_gte_8(Pihole):
|
||||
def test_php_upgrade_user_optout_skipped_centos_gte_8(host):
|
||||
'''
|
||||
confirms installer skips user opt-out of installing PHP7 from REMI on
|
||||
latest CentOS (should trigger on CentOS7 only)
|
||||
(php not currently installed)
|
||||
'''
|
||||
# Whiptail dialog returns Cancel for user prompt
|
||||
mock_command('whiptail', {'*': ('', '1')}, Pihole)
|
||||
package_manager_detect = Pihole.run('''
|
||||
mock_command('whiptail', {'*': ('', '1')}, host)
|
||||
package_manager_detect = host.run('''
|
||||
source /opt/pihole/basic-install.sh
|
||||
package_manager_detect
|
||||
select_rpm_php
|
||||
''')
|
||||
unexpected_stdout = info_box + (' User opt-out of PHP 7 upgrade on CentOS.'
|
||||
' Deprecated PHP may be in use.')
|
||||
assert unexpected_stdout not in package_manager_detect.stdout
|
||||
# ensure remi was not installed on latest CentOS
|
||||
remi_package = Pihole.package('remi-release')
|
||||
remi_package = host.package('remi-release')
|
||||
assert not remi_package.is_installed
|
||||
|
||||
|
||||
def test_php_upgrade_user_optin_skipped_centos_gte_8(Pihole):
|
||||
def test_php_upgrade_user_optin_skipped_centos_gte_8(host):
|
||||
'''
|
||||
confirms installer skips user opt-in to installing PHP7 from REMI on
|
||||
latest CentOS (should trigger on CentOS7 only)
|
||||
(php not currently installed)
|
||||
'''
|
||||
# Whiptail dialog returns Continue for user prompt
|
||||
mock_command('whiptail', {'*': ('', '0')}, Pihole)
|
||||
package_manager_detect = Pihole.run('''
|
||||
mock_command('whiptail', {'*': ('', '0')}, host)
|
||||
package_manager_detect = host.run('''
|
||||
source /opt/pihole/basic-install.sh
|
||||
package_manager_detect
|
||||
select_rpm_php
|
||||
''')
|
||||
assert 'opt-out' not in package_manager_detect.stdout
|
||||
unexpected_stdout = info_box + (' Enabling Remi\'s RPM repository '
|
||||
@@ -61,5 +64,5 @@ def test_php_upgrade_user_optin_skipped_centos_gte_8(Pihole):
|
||||
unexpected_stdout = tick_box + (' Remi\'s RPM repository has '
|
||||
'been enabled for PHP7')
|
||||
assert unexpected_stdout not in package_manager_detect.stdout
|
||||
remi_package = Pihole.package('remi-release')
|
||||
remi_package = host.package('remi-release')
|
||||
assert not remi_package.is_installed
|
||||
|
@@ -7,15 +7,16 @@ from .conftest import (
|
||||
)
|
||||
|
||||
|
||||
def test_release_supported_version_check_centos(Pihole):
|
||||
def test_release_supported_version_check_centos(host):
|
||||
'''
|
||||
confirms installer exits on unsupported releases of CentOS
|
||||
'''
|
||||
# modify /etc/redhat-release to mock an unsupported CentOS release
|
||||
Pihole.run('echo "CentOS Linux release 6.9" > /etc/redhat-release')
|
||||
package_manager_detect = Pihole.run('''
|
||||
host.run('echo "CentOS Linux release 6.9" > /etc/redhat-release')
|
||||
package_manager_detect = host.run('''
|
||||
source /opt/pihole/basic-install.sh
|
||||
package_manager_detect
|
||||
select_rpm_php
|
||||
''')
|
||||
expected_stdout = cross_box + (' CentOS 6 is not supported.')
|
||||
assert expected_stdout in package_manager_detect.stdout
|
||||
@@ -23,85 +24,89 @@ def test_release_supported_version_check_centos(Pihole):
|
||||
assert expected_stdout in package_manager_detect.stdout
|
||||
|
||||
|
||||
def test_enable_epel_repository_centos(Pihole):
|
||||
def test_enable_epel_repository_centos(host):
|
||||
'''
|
||||
confirms the EPEL package repository is enabled when installed on CentOS
|
||||
'''
|
||||
package_manager_detect = Pihole.run('''
|
||||
package_manager_detect = host.run('''
|
||||
source /opt/pihole/basic-install.sh
|
||||
package_manager_detect
|
||||
select_rpm_php
|
||||
''')
|
||||
expected_stdout = info_box + (' Enabling EPEL package repository '
|
||||
'(https://fedoraproject.org/wiki/EPEL)')
|
||||
assert expected_stdout in package_manager_detect.stdout
|
||||
expected_stdout = tick_box + ' Installed epel-release'
|
||||
assert expected_stdout in package_manager_detect.stdout
|
||||
epel_package = Pihole.package('epel-release')
|
||||
epel_package = host.package('epel-release')
|
||||
assert epel_package.is_installed
|
||||
|
||||
|
||||
def test_php_version_lt_7_detected_upgrade_default_optout_centos(Pihole):
|
||||
def test_php_version_lt_7_detected_upgrade_default_optout_centos(host):
|
||||
'''
|
||||
confirms the default behavior to opt-out of upgrading to PHP7 from REMI
|
||||
'''
|
||||
# first we will install the default php version to test installer behavior
|
||||
php_install = Pihole.run('yum install -y php')
|
||||
php_install = host.run('yum install -y php')
|
||||
assert php_install.rc == 0
|
||||
php_package = Pihole.package('php')
|
||||
php_package = host.package('php')
|
||||
default_centos_php_version = php_package.version.split('.')[0]
|
||||
if int(default_centos_php_version) >= 7: # PHP7 is supported/recommended
|
||||
pytest.skip("Test deprecated . Detected default PHP version >= 7")
|
||||
package_manager_detect = Pihole.run('''
|
||||
package_manager_detect = host.run('''
|
||||
source /opt/pihole/basic-install.sh
|
||||
package_manager_detect
|
||||
select_rpm_php
|
||||
''')
|
||||
expected_stdout = info_box + (' User opt-out of PHP 7 upgrade on CentOS. '
|
||||
'Deprecated PHP may be in use.')
|
||||
assert expected_stdout in package_manager_detect.stdout
|
||||
remi_package = Pihole.package('remi-release')
|
||||
remi_package = host.package('remi-release')
|
||||
assert not remi_package.is_installed
|
||||
|
||||
|
||||
def test_php_version_lt_7_detected_upgrade_user_optout_centos(Pihole):
|
||||
def test_php_version_lt_7_detected_upgrade_user_optout_centos(host):
|
||||
'''
|
||||
confirms installer behavior when user opt-out to upgrade to PHP7 via REMI
|
||||
'''
|
||||
# first we will install the default php version to test installer behavior
|
||||
php_install = Pihole.run('yum install -y php')
|
||||
php_install = host.run('yum install -y php')
|
||||
assert php_install.rc == 0
|
||||
php_package = Pihole.package('php')
|
||||
php_package = host.package('php')
|
||||
default_centos_php_version = php_package.version.split('.')[0]
|
||||
if int(default_centos_php_version) >= 7: # PHP7 is supported/recommended
|
||||
pytest.skip("Test deprecated . Detected default PHP version >= 7")
|
||||
# Whiptail dialog returns Cancel for user prompt
|
||||
mock_command('whiptail', {'*': ('', '1')}, Pihole)
|
||||
package_manager_detect = Pihole.run('''
|
||||
mock_command('whiptail', {'*': ('', '1')}, host)
|
||||
package_manager_detect = host.run('''
|
||||
source /opt/pihole/basic-install.sh
|
||||
package_manager_detect
|
||||
select_rpm_php
|
||||
''')
|
||||
expected_stdout = info_box + (' User opt-out of PHP 7 upgrade on CentOS. '
|
||||
'Deprecated PHP may be in use.')
|
||||
assert expected_stdout in package_manager_detect.stdout
|
||||
remi_package = Pihole.package('remi-release')
|
||||
remi_package = host.package('remi-release')
|
||||
assert not remi_package.is_installed
|
||||
|
||||
|
||||
def test_php_version_lt_7_detected_upgrade_user_optin_centos(Pihole):
|
||||
def test_php_version_lt_7_detected_upgrade_user_optin_centos(host):
|
||||
'''
|
||||
confirms installer behavior when user opt-in to upgrade to PHP7 via REMI
|
||||
'''
|
||||
# first we will install the default php version to test installer behavior
|
||||
php_install = Pihole.run('yum install -y php')
|
||||
php_install = host.run('yum install -y php')
|
||||
assert php_install.rc == 0
|
||||
php_package = Pihole.package('php')
|
||||
php_package = host.package('php')
|
||||
default_centos_php_version = php_package.version.split('.')[0]
|
||||
if int(default_centos_php_version) >= 7: # PHP7 is supported/recommended
|
||||
pytest.skip("Test deprecated . Detected default PHP version >= 7")
|
||||
# Whiptail dialog returns Continue for user prompt
|
||||
mock_command('whiptail', {'*': ('', '0')}, Pihole)
|
||||
package_manager_detect = Pihole.run('''
|
||||
mock_command('whiptail', {'*': ('', '0')}, host)
|
||||
package_manager_detect = host.run('''
|
||||
source /opt/pihole/basic-install.sh
|
||||
package_manager_detect
|
||||
select_rpm_php
|
||||
install_dependent_packages PIHOLE_WEB_DEPS[@]
|
||||
''')
|
||||
expected_stdout = info_box + (' User opt-out of PHP 7 upgrade on CentOS. '
|
||||
@@ -113,8 +118,8 @@ def test_php_version_lt_7_detected_upgrade_user_optin_centos(Pihole):
|
||||
expected_stdout = tick_box + (' Remi\'s RPM repository has '
|
||||
'been enabled for PHP7')
|
||||
assert expected_stdout in package_manager_detect.stdout
|
||||
remi_package = Pihole.package('remi-release')
|
||||
remi_package = host.package('remi-release')
|
||||
assert remi_package.is_installed
|
||||
updated_php_package = Pihole.package('php')
|
||||
updated_php_package = host.package('php')
|
||||
updated_php_version = updated_php_package.version.split('.')[0]
|
||||
assert int(updated_php_version) == 7
|
||||
|
@@ -5,7 +5,7 @@ from .conftest import (
|
||||
)
|
||||
|
||||
|
||||
def mock_selinux_config(state, Pihole):
|
||||
def mock_selinux_config(state, host):
|
||||
'''
|
||||
Creates a mock SELinux config file with expected content
|
||||
'''
|
||||
@@ -13,20 +13,20 @@ def mock_selinux_config(state, Pihole):
|
||||
valid_states = ['enforcing', 'permissive', 'disabled']
|
||||
assert state in valid_states
|
||||
# getenforce returns the running state of SELinux
|
||||
mock_command('getenforce', {'*': (state.capitalize(), '0')}, Pihole)
|
||||
mock_command('getenforce', {'*': (state.capitalize(), '0')}, host)
|
||||
# create mock configuration with desired content
|
||||
Pihole.run('''
|
||||
host.run('''
|
||||
mkdir /etc/selinux
|
||||
echo "SELINUX={state}" > /etc/selinux/config
|
||||
'''.format(state=state.lower()))
|
||||
|
||||
|
||||
def test_selinux_enforcing_exit(Pihole):
|
||||
def test_selinux_enforcing_exit(host):
|
||||
'''
|
||||
confirms installer prompts to exit when SELinux is Enforcing by default
|
||||
'''
|
||||
mock_selinux_config("enforcing", Pihole)
|
||||
check_selinux = Pihole.run('''
|
||||
mock_selinux_config("enforcing", host)
|
||||
check_selinux = host.run('''
|
||||
source /opt/pihole/basic-install.sh
|
||||
checkSelinux
|
||||
''')
|
||||
@@ -37,12 +37,12 @@ def test_selinux_enforcing_exit(Pihole):
|
||||
assert check_selinux.rc == 1
|
||||
|
||||
|
||||
def test_selinux_permissive(Pihole):
|
||||
def test_selinux_permissive(host):
|
||||
'''
|
||||
confirms installer continues when SELinux is Permissive
|
||||
'''
|
||||
mock_selinux_config("permissive", Pihole)
|
||||
check_selinux = Pihole.run('''
|
||||
mock_selinux_config("permissive", host)
|
||||
check_selinux = host.run('''
|
||||
source /opt/pihole/basic-install.sh
|
||||
checkSelinux
|
||||
''')
|
||||
@@ -51,12 +51,12 @@ def test_selinux_permissive(Pihole):
|
||||
assert check_selinux.rc == 0
|
||||
|
||||
|
||||
def test_selinux_disabled(Pihole):
|
||||
def test_selinux_disabled(host):
|
||||
'''
|
||||
confirms installer continues when SELinux is Disabled
|
||||
'''
|
||||
mock_selinux_config("disabled", Pihole)
|
||||
check_selinux = Pihole.run('''
|
||||
mock_selinux_config("disabled", host)
|
||||
check_selinux = host.run('''
|
||||
source /opt/pihole/basic-install.sh
|
||||
checkSelinux
|
||||
''')
|
||||
|
@@ -1,15 +1,16 @@
|
||||
def test_epel_and_remi_not_installed_fedora(Pihole):
|
||||
def test_epel_and_remi_not_installed_fedora(host):
|
||||
'''
|
||||
confirms installer does not attempt to install EPEL/REMI repositories
|
||||
on Fedora
|
||||
'''
|
||||
package_manager_detect = Pihole.run('''
|
||||
package_manager_detect = host.run('''
|
||||
source /opt/pihole/basic-install.sh
|
||||
package_manager_detect
|
||||
select_rpm_php
|
||||
''')
|
||||
assert package_manager_detect.stdout == ''
|
||||
|
||||
epel_package = Pihole.package('epel-release')
|
||||
epel_package = host.package('epel-release')
|
||||
assert not epel_package.is_installed
|
||||
remi_package = Pihole.package('remi-release')
|
||||
remi_package = host.package('remi-release')
|
||||
assert not remi_package.is_installed
|
||||
|
@@ -1,8 +1,8 @@
|
||||
[tox]
|
||||
envlist = py37
|
||||
envlist = py38
|
||||
|
||||
[testenv]
|
||||
whitelist_externals = docker
|
||||
deps = -rrequirements.txt
|
||||
commands = docker build -f _centos_7.Dockerfile -t pytest_pihole:test_container ../
|
||||
pytest {posargs:-vv -n auto} ./test_automated_install.py ./test_centos_fedora_common_support.py ./test_centos_common_support.py ./test_centos_7_support.py
|
||||
pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py ./test_centos_fedora_common_support.py ./test_centos_common_support.py ./test_centos_7_support.py
|
||||
|
@@ -1,8 +1,8 @@
|
||||
[tox]
|
||||
envlist = py37
|
||||
envlist = py38
|
||||
|
||||
[testenv]
|
||||
whitelist_externals = docker
|
||||
deps = -rrequirements.txt
|
||||
commands = docker build -f _centos_8.Dockerfile -t pytest_pihole:test_container ../
|
||||
pytest {posargs:-vv -n auto} ./test_automated_install.py ./test_centos_fedora_common_support.py ./test_centos_common_support.py ./test_centos_8_support.py
|
||||
pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py ./test_centos_fedora_common_support.py ./test_centos_common_support.py ./test_centos_8_support.py
|
||||
|
@@ -1,8 +1,8 @@
|
||||
[tox]
|
||||
envlist = py37
|
||||
envlist = py38
|
||||
|
||||
[testenv]
|
||||
whitelist_externals = docker
|
||||
deps = -rrequirements.txt
|
||||
commands = docker build -f _debian_10.Dockerfile -t pytest_pihole:test_container ../
|
||||
pytest {posargs:-vv -n auto} ./test_automated_install.py
|
||||
pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py
|
||||
|
@@ -1,8 +1,8 @@
|
||||
[tox]
|
||||
envlist = py37
|
||||
envlist = py38
|
||||
|
||||
[testenv]
|
||||
whitelist_externals = docker
|
||||
deps = -rrequirements.txt
|
||||
commands = docker build -f _debian_11.Dockerfile -t pytest_pihole:test_container ../
|
||||
pytest {posargs:-vv -n auto} ./test_automated_install.py
|
||||
pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py
|
||||
|
@@ -1,8 +1,8 @@
|
||||
[tox]
|
||||
envlist = py37
|
||||
envlist = py38
|
||||
|
||||
[testenv]
|
||||
whitelist_externals = docker
|
||||
deps = -rrequirements.txt
|
||||
commands = docker build -f _debian_9.Dockerfile -t pytest_pihole:test_container ../
|
||||
pytest {posargs:-vv -n auto} ./test_automated_install.py
|
||||
pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py
|
||||
|
@@ -1,8 +0,0 @@
|
||||
[tox]
|
||||
envlist = py37
|
||||
|
||||
[testenv]
|
||||
whitelist_externals = docker
|
||||
deps = -rrequirements.txt
|
||||
commands = docker build -f _fedora_32.Dockerfile -t pytest_pihole:test_container ../
|
||||
pytest {posargs:-vv -n auto} ./test_automated_install.py ./test_centos_fedora_common_support.py ./test_fedora_support.py
|
@@ -1,8 +1,8 @@
|
||||
[tox]
|
||||
envlist = py37
|
||||
envlist = py38
|
||||
|
||||
[testenv]
|
||||
whitelist_externals = docker
|
||||
deps = -rrequirements.txt
|
||||
commands = docker build -f _fedora_33.Dockerfile -t pytest_pihole:test_container ../
|
||||
pytest {posargs:-vv -n auto} ./test_automated_install.py ./test_centos_fedora_common_support.py ./test_fedora_support.py
|
||||
pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py ./test_centos_fedora_common_support.py ./test_fedora_support.py
|
||||
|
8
test/tox.fedora_34.ini
Normal file
8
test/tox.fedora_34.ini
Normal file
@@ -0,0 +1,8 @@
|
||||
[tox]
|
||||
envlist = py38
|
||||
|
||||
[testenv]
|
||||
whitelist_externals = docker
|
||||
deps = -rrequirements.txt
|
||||
commands = docker build -f _fedora_34.Dockerfile -t pytest_pihole:test_container ../
|
||||
pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py ./test_centos_fedora_common_support.py ./test_fedora_support.py
|
@@ -1,8 +1,8 @@
|
||||
[tox]
|
||||
envlist = py37
|
||||
envlist = py38
|
||||
|
||||
[testenv]
|
||||
whitelist_externals = docker
|
||||
deps = -rrequirements.txt
|
||||
commands = docker build -f _ubuntu_16.Dockerfile -t pytest_pihole:test_container ../
|
||||
pytest {posargs:-vv -n auto} ./test_automated_install.py
|
||||
pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py
|
||||
|
@@ -1,8 +1,8 @@
|
||||
[tox]
|
||||
envlist = py37
|
||||
envlist = py38
|
||||
|
||||
[testenv]
|
||||
whitelist_externals = docker
|
||||
deps = -rrequirements.txt
|
||||
commands = docker build -f _ubuntu_18.Dockerfile -t pytest_pihole:test_container ../
|
||||
pytest {posargs:-vv -n auto} ./test_automated_install.py
|
||||
pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py
|
||||
|
@@ -1,8 +1,8 @@
|
||||
[tox]
|
||||
envlist = py37
|
||||
envlist = py38
|
||||
|
||||
[testenv]
|
||||
whitelist_externals = docker
|
||||
deps = -rrequirements.txt
|
||||
commands = docker build -f _ubuntu_20.Dockerfile -t pytest_pihole:test_container ../
|
||||
pytest {posargs:-vv -n auto} ./test_automated_install.py
|
||||
pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py
|
||||
|
@@ -1,8 +1,8 @@
|
||||
[tox]
|
||||
envlist = py37
|
||||
envlist = py38
|
||||
|
||||
[testenv]
|
||||
whitelist_externals = docker
|
||||
deps = -rrequirements.txt
|
||||
commands = docker build -f _ubuntu_21.Dockerfile -t pytest_pihole:test_container ../
|
||||
pytest {posargs:-vv -n auto} ./test_automated_install.py
|
||||
pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py
|
||||
|
Reference in New Issue
Block a user