Compare commits
240 Commits
developmen
...
fix/gravit
Author | SHA1 | Date | |
---|---|---|---|
|
5cf6359cfb | ||
|
e80a7731c9 | ||
|
3cd662eaeb | ||
|
6ead24b315 | ||
|
cdde832ed3 | ||
|
57ba60ce54 | ||
|
ed6b85241b | ||
|
918f7a504c | ||
|
3260cb40b5 | ||
|
a79c1159a9 | ||
|
65a04246cd | ||
|
f1245685dc | ||
|
ec3a5c2989 | ||
|
b20b38d44f | ||
|
d5253f26f4 | ||
|
a65a841c56 | ||
|
1b0b24daf5 | ||
|
7010ed454c | ||
|
ce86157067 | ||
|
3097c8fbdc | ||
|
363e2f10bb | ||
|
bfd9fe80ef | ||
|
c2080324b7 | ||
|
875ad04fde | ||
|
0124e491d0 | ||
|
81698ef1ed | ||
|
2ff10fcd0a | ||
|
5823f5e254 | ||
|
7807a93e10 | ||
|
c6a2a6f739 | ||
|
241e53ed45 | ||
|
d605b4b8f9 | ||
|
0e359a6321 | ||
|
5bd7cc9c9d | ||
|
886f0c7df3 | ||
|
3989cc19e9 | ||
|
bcb59159ed | ||
|
2b52f92647 | ||
|
71ed842dfd | ||
|
f45248df80 | ||
|
5729f64ddc | ||
|
2a869419b4 | ||
|
4a2f4c1bce | ||
|
5ef731fc57 | ||
|
71ebd64f4e | ||
|
9f0e0dbd37 | ||
|
ef30a85afb | ||
|
1b809e4e8e | ||
|
3d3bb45a46 | ||
|
d2a98ae954 | ||
|
2e1ce7fc87 | ||
|
920cf6de14 | ||
|
1eb31174a5 | ||
|
ff4487ff74 | ||
|
54c58327f1 | ||
|
db5e94b14a | ||
|
7167e6d5e4 | ||
|
39a66b608b | ||
|
b06efb6ab7 | ||
|
ab4bce4787 | ||
|
469c179b32 | ||
|
190ab79606 | ||
|
669f1b0f4a | ||
|
31de661bbb | ||
|
3a67d1cf8d | ||
|
c0f454ddfa | ||
|
ef0a22f9ec | ||
|
533a77d6d5 | ||
|
76ae75689c | ||
|
a780fc59e2 | ||
|
28085cf7d8 | ||
|
a3cc5df317 | ||
|
2eff53b2bb | ||
|
8d6ce78c65 | ||
|
b52a3a021d | ||
|
ae39e338fe | ||
|
e243c562c2 | ||
|
4c267f7732 | ||
|
647ba6ec9d | ||
|
ba6d700e7e | ||
|
e485a7b9bb | ||
|
bfda52ed79 | ||
|
941f90d5c1 | ||
|
14a379d448 | ||
|
671fcaffc3 | ||
|
bc8150adfa | ||
|
b750b01acc | ||
|
996a2c74fa | ||
|
d85fee27a9 | ||
|
cdd4d9ea9e | ||
|
cedd1a2591 | ||
|
ac4a975be5 | ||
|
996f8fff28 | ||
|
e733553295 | ||
|
0c4e1b51ab | ||
|
c6da1a3918 | ||
|
c1eb35a35e | ||
|
b5e0f142cc | ||
|
8713135b01 | ||
|
7cdd8871e5 | ||
|
596689b4c9 | ||
|
a872fabe7d | ||
|
bc21a7155d | ||
|
b7bba6a689 | ||
|
04f9e92bff | ||
|
16fb6665ec | ||
|
d84da71310 | ||
|
77a30ac0c2 | ||
|
56fb954d64 | ||
|
99981b5e66 | ||
|
19ae9d3ee6 | ||
|
d03aa0c0c7 | ||
|
3c41ec08a3 | ||
|
fdc4cf9869 | ||
|
a0ecfcc1dc | ||
|
b30d729aa4 | ||
|
f8af1a1baa | ||
|
ab27a3bd45 | ||
|
f3acc7c839 | ||
|
6f6b54ea05 | ||
|
3cad8e4c5b | ||
|
9535e2fd6d | ||
|
0ea7344c30 | ||
|
55dce14655 | ||
|
5bf35dc687 | ||
|
c5828df198 | ||
|
77e322afa6 | ||
|
541257849d | ||
|
80560d4a4a | ||
|
38bb4a4908 | ||
|
2b74b47b4a | ||
|
109340033e | ||
|
0f246b8df5 | ||
|
5b03160295 | ||
|
7b0513d1e6 | ||
|
466520366d | ||
|
9dbcbdbe66 | ||
|
a9b9718ffa | ||
|
fb073373d6 | ||
|
fd050693a2 | ||
|
dad6247cb0 | ||
|
841222fa21 | ||
|
0576810438 | ||
|
b755330f4c | ||
|
cf59f35a4e | ||
|
2131a1fe7f | ||
|
a88a94c4f1 | ||
|
1c286c7bc4 | ||
|
6d670991c3 | ||
|
3439045228 | ||
|
b710e107d6 | ||
|
2673c2c072 | ||
|
e393048488 | ||
|
2feea70311 | ||
|
62f29ba3fa | ||
|
89b1cfcd85 | ||
|
89e187947e | ||
|
d2faa93241 | ||
|
0df099a6a5 | ||
|
482ac12c9b | ||
|
b2592f5d31 | ||
|
d0e8b0c962 | ||
|
d48d3aba69 | ||
|
cbc99d45c6 | ||
|
be68a5339c | ||
|
606b05eec1 | ||
|
a1ee7d92a9 | ||
|
6941155572 | ||
|
b6d73ac081 | ||
|
6fec4acd82 | ||
|
b241a19e87 | ||
|
a7ba55ffb0 | ||
|
115e3eeda9 | ||
|
21897d7fbd | ||
|
31c7c019cb | ||
|
b97f76e678 | ||
|
839a70cc37 | ||
|
a068567926 | ||
|
06fd8123c3 | ||
|
c2f3477a82 | ||
|
ff64d8cf4d | ||
|
e8e8104b36 | ||
|
e1dca46423 | ||
|
ea5a3bf0b2 | ||
|
98867d8d71 | ||
|
c88c943cda | ||
|
3ef90a9e47 | ||
|
58a21bee07 | ||
|
676b7e60f3 | ||
|
43b88cd628 | ||
|
e163ed449d | ||
|
ee749f700f | ||
|
bb7c7cdf33 | ||
|
ffe45e8b76 | ||
|
f86ef0128e | ||
|
bdab701470 | ||
|
1ecb9165ee | ||
|
2ff3b95117 | ||
|
3ad5097b12 | ||
|
d68a2ffaf3 | ||
|
913dcead7f | ||
|
1358209a9a | ||
|
b729a44209 | ||
|
5a55fa5e48 | ||
|
d419eaf463 | ||
|
a8deebde4d | ||
|
2ec4e84db4 | ||
|
075b3f6468 | ||
|
7911841355 | ||
|
d2c75a33d5 | ||
|
15dfd19f58 | ||
|
9ae6b40818 | ||
|
aa2da80768 | ||
|
b05fc5bb2b | ||
|
c99c86af7f | ||
|
f552173be3 | ||
|
d02aa3ced1 | ||
|
d0eb0d5037 | ||
|
fbfec961d5 | ||
|
fba7517cc6 | ||
|
0c125eba2c | ||
|
7f4bb24fd3 | ||
|
b1ea60484e | ||
|
57c40cbd09 | ||
|
95d1464e36 | ||
|
092e533a30 | ||
|
18c6ef8aac | ||
|
6be647a85f | ||
|
6f7edbc92e | ||
|
9beb3a9b6c | ||
|
3b6e6317b8 | ||
|
a9b5fcd923 | ||
|
38d4b2a883 | ||
|
e2e7d0a6aa | ||
|
aa5c15a728 | ||
|
7dea5012ce | ||
|
fdca19e66d | ||
|
c9e341b5d4 | ||
|
012d1e4b3d | ||
|
31a096dec2 |
10
.github/dependabot.yml
vendored
Normal file
10
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
version: 2
|
||||||
|
updates:
|
||||||
|
- package-ecosystem: github-actions
|
||||||
|
directory: "/"
|
||||||
|
schedule:
|
||||||
|
interval: weekly
|
||||||
|
day: saturday
|
||||||
|
time: "10:00"
|
||||||
|
open-pull-requests-limit: 10
|
||||||
|
target-branch: developement
|
7
.github/release.yml
vendored
Normal file
7
.github/release.yml
vendored
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
changelog:
|
||||||
|
exclude:
|
||||||
|
labels:
|
||||||
|
- internal
|
||||||
|
authors:
|
||||||
|
- dependabot
|
||||||
|
- github-actions
|
40
.github/workflows/codeql-analysis.yml
vendored
Normal file
40
.github/workflows/codeql-analysis.yml
vendored
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
name: "CodeQL"
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
- development
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
- development
|
||||||
|
schedule:
|
||||||
|
- cron: '32 11 * * 6'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
analyze:
|
||||||
|
name: Analyze
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
actions: read
|
||||||
|
contents: read
|
||||||
|
security-events: write
|
||||||
|
|
||||||
|
steps:
|
||||||
|
-
|
||||||
|
name: Checkout repository
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
# Initializes the CodeQL tools for scanning.
|
||||||
|
-
|
||||||
|
name: Initialize CodeQL
|
||||||
|
uses: github/codeql-action/init@v1
|
||||||
|
with:
|
||||||
|
languages: 'python'
|
||||||
|
-
|
||||||
|
name: Autobuild
|
||||||
|
uses: github/codeql-action/autobuild@v1
|
||||||
|
-
|
||||||
|
name: Perform CodeQL Analysis
|
||||||
|
uses: github/codeql-action/analyze@v1
|
25
.github/workflows/stale.yml
vendored
Normal file
25
.github/workflows/stale.yml
vendored
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
name: Mark stale issues
|
||||||
|
|
||||||
|
on:
|
||||||
|
schedule:
|
||||||
|
- cron: '0 * * * *'
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
stale:
|
||||||
|
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
issues: write
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/stale@v4
|
||||||
|
with:
|
||||||
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
days-before-stale: 30
|
||||||
|
days-before-close: 5
|
||||||
|
stale-issue-message: 'This issue is stale because it has been open 30 days with no activity. Please comment or update this issue or it will be closed in 5 days.'
|
||||||
|
stale-issue-label: 'Submitter Attention Required'
|
||||||
|
exempt-issue-labels: 'Internal, Fixed in next release, Bug: Confirmed'
|
||||||
|
exempt-all-issue-assignees: true
|
||||||
|
operations-per-run: 300
|
28
.github/workflows/sync-back-to-dev.yml
vendored
Normal file
28
.github/workflows/sync-back-to-dev.yml
vendored
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
name: Sync Back to Development
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
sync-branches:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
name: Syncing branches
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Opening pull request
|
||||||
|
id: pull
|
||||||
|
uses: tretuna/sync-branches@1.4.0
|
||||||
|
with:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
FROM_BRANCH: 'master'
|
||||||
|
TO_BRANCH: 'development'
|
||||||
|
CONTENT_COMPARISON: true
|
||||||
|
- name: Label the pull request to ignore for release note generation
|
||||||
|
uses: actions-ecosystem/action-add-labels@v1
|
||||||
|
with:
|
||||||
|
labels: internal
|
||||||
|
repo: ${{ github.repository }}
|
||||||
|
number: ${{ steps.pull.outputs.PULL_REQUEST_NUMBER }}
|
35
.github/workflows/test.yml
vendored
35
.github/workflows/test.yml
vendored
@@ -5,21 +5,44 @@ on:
|
|||||||
types: [opened, synchronize, reopened, ready_for_review]
|
types: [opened, synchronize, reopened, ready_for_review]
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
smoke-test:
|
||||||
|
if: github.event.pull_request.draft == false
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
-
|
||||||
|
name: Checkout repository
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
-
|
||||||
|
name: Run Smoke Tests
|
||||||
|
run: |
|
||||||
|
# Ensure scripts in repository are executable
|
||||||
|
IFS=$'\n';
|
||||||
|
for f in $(find . -name '*.sh'); do if [[ ! -x $f ]]; then echo "$f is not executable" && FAIL=1; fi ;done
|
||||||
|
unset IFS;
|
||||||
|
# If FAIL is 1 then we fail.
|
||||||
|
[[ $FAIL == 1 ]] && exit 1 || echo "Smoke Tests Passed"
|
||||||
|
|
||||||
distro-test:
|
distro-test:
|
||||||
if: github.event.pull_request.draft == false
|
if: github.event.pull_request.draft == false
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
needs: smoke-test
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
distro: [debian_9, debian_10, ubuntu_16, ubuntu_18, ubuntu_20, centos_7, centos_8, fedora_32, fedora_33]
|
distro: [debian_9, debian_10, debian_11, ubuntu_16, ubuntu_18, ubuntu_20, ubuntu_21, centos_7, centos_8, fedora_33, fedora_34]
|
||||||
env:
|
env:
|
||||||
DISTRO: ${{matrix.distro}}
|
DISTRO: ${{matrix.distro}}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v1
|
-
|
||||||
- name: Set up Python 3.7
|
name: Checkout repository
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
-
|
||||||
|
name: Set up Python 3.8
|
||||||
uses: actions/setup-python@v2
|
uses: actions/setup-python@v2
|
||||||
with:
|
with:
|
||||||
python-version: 3.7
|
python-version: 3.8
|
||||||
- name: Install dependencies
|
-
|
||||||
|
name: Install dependencies
|
||||||
run: pip install -r test/requirements.txt
|
run: pip install -r test/requirements.txt
|
||||||
- name: Test with tox
|
-
|
||||||
|
name: Test with tox
|
||||||
run: tox -c test/tox.${DISTRO}.ini
|
run: tox -c test/tox.${DISTRO}.ini
|
||||||
|
68
.gitignore
vendored
68
.gitignore
vendored
@@ -7,70 +7,6 @@ __pycache__
|
|||||||
.tox
|
.tox
|
||||||
.eggs
|
.eggs
|
||||||
*.egg-info
|
*.egg-info
|
||||||
|
.idea/
|
||||||
|
|
||||||
# Created by https://www.gitignore.io/api/jetbrains+iml
|
|
||||||
|
|
||||||
### JetBrains+iml ###
|
|
||||||
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm
|
|
||||||
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
|
|
||||||
|
|
||||||
# All idea files, with exceptions
|
|
||||||
.idea
|
|
||||||
!.idea/codeStyles/*
|
|
||||||
!.idea/codeStyleSettings.xml
|
|
||||||
|
|
||||||
|
|
||||||
# Sensitive or high-churn files:
|
|
||||||
.idea/**/dataSources/
|
|
||||||
.idea/**/dataSources.ids
|
|
||||||
.idea/**/dataSources.xml
|
|
||||||
.idea/**/dataSources.local.xml
|
|
||||||
.idea/**/sqlDataSources.xml
|
|
||||||
.idea/**/dynamic.xml
|
|
||||||
.idea/**/uiDesigner.xml
|
|
||||||
|
|
||||||
# Gradle:
|
|
||||||
.idea/**/gradle.xml
|
|
||||||
.idea/**/libraries
|
|
||||||
|
|
||||||
# CMake
|
|
||||||
cmake-build-debug/
|
|
||||||
|
|
||||||
# Mongo Explorer plugin:
|
|
||||||
.idea/**/mongoSettings.xml
|
|
||||||
|
|
||||||
## File-based project format:
|
|
||||||
*.iws
|
|
||||||
|
|
||||||
## Plugin-specific files:
|
|
||||||
|
|
||||||
# IntelliJ
|
|
||||||
/out/
|
|
||||||
|
|
||||||
# mpeltonen/sbt-idea plugin
|
|
||||||
.idea_modules/
|
|
||||||
|
|
||||||
# JIRA plugin
|
|
||||||
atlassian-ide-plugin.xml
|
|
||||||
|
|
||||||
# Cursive Clojure plugin
|
|
||||||
.idea/replstate.xml
|
|
||||||
|
|
||||||
# Ruby plugin and RubyMine
|
|
||||||
/.rakeTasks
|
|
||||||
|
|
||||||
# Crashlytics plugin (for Android Studio and IntelliJ)
|
|
||||||
com_crashlytics_export_strings.xml
|
|
||||||
crashlytics.properties
|
|
||||||
crashlytics-build.properties
|
|
||||||
fabric.properties
|
|
||||||
|
|
||||||
### JetBrains+iml Patch ###
|
|
||||||
# Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-249601023
|
|
||||||
|
|
||||||
*.iml
|
*.iml
|
||||||
.idea/misc.xml
|
.vscode/
|
||||||
*.ipr
|
|
||||||
|
|
||||||
# End of https://www.gitignore.io/api/jetbrains+iml
|
|
||||||
|
25
.idea/codeStyleSettings.xml
generated
25
.idea/codeStyleSettings.xml
generated
@@ -1,25 +0,0 @@
|
|||||||
<?xml version="1.0" encoding="UTF-8"?>
|
|
||||||
<project version="4">
|
|
||||||
<component name="ProjectCodeStyleSettingsManager">
|
|
||||||
<option name="PER_PROJECT_SETTINGS">
|
|
||||||
<value>
|
|
||||||
<option name="OTHER_INDENT_OPTIONS">
|
|
||||||
<value>
|
|
||||||
<option name="INDENT_SIZE" value="2" />
|
|
||||||
<option name="CONTINUATION_INDENT_SIZE" value="8" />
|
|
||||||
<option name="TAB_SIZE" value="2" />
|
|
||||||
<option name="USE_TAB_CHARACTER" value="false" />
|
|
||||||
<option name="SMART_TABS" value="false" />
|
|
||||||
<option name="LABEL_INDENT_SIZE" value="0" />
|
|
||||||
<option name="LABEL_INDENT_ABSOLUTE" value="false" />
|
|
||||||
<option name="USE_RELATIVE_INDENTS" value="false" />
|
|
||||||
</value>
|
|
||||||
</option>
|
|
||||||
<MarkdownNavigatorCodeStyleSettings>
|
|
||||||
<option name="RIGHT_MARGIN" value="72" />
|
|
||||||
</MarkdownNavigatorCodeStyleSettings>
|
|
||||||
</value>
|
|
||||||
</option>
|
|
||||||
<option name="USE_PER_PROJECT_SETTINGS" value="true" />
|
|
||||||
</component>
|
|
||||||
</project>
|
|
7
.idea/codeStyles/Project.xml
generated
7
.idea/codeStyles/Project.xml
generated
@@ -1,7 +0,0 @@
|
|||||||
<component name="ProjectCodeStyleConfiguration">
|
|
||||||
<code_scheme name="Project" version="173">
|
|
||||||
<MarkdownNavigatorCodeStyleSettings>
|
|
||||||
<option name="RIGHT_MARGIN" value="72" />
|
|
||||||
</MarkdownNavigatorCodeStyleSettings>
|
|
||||||
</code_scheme>
|
|
||||||
</component>
|
|
5
.idea/codeStyles/codeStyleConfig.xml
generated
5
.idea/codeStyles/codeStyleConfig.xml
generated
@@ -1,5 +0,0 @@
|
|||||||
<component name="ProjectCodeStyleConfiguration">
|
|
||||||
<state>
|
|
||||||
<option name="USE_PER_PROJECT_SETTINGS" value="true" />
|
|
||||||
</state>
|
|
||||||
</component>
|
|
107
CONTRIBUTING.md
107
CONTRIBUTING.md
@@ -2,111 +2,6 @@
|
|||||||
|
|
||||||
Please read and understand the contribution guide before creating an issue or pull request.
|
Please read and understand the contribution guide before creating an issue or pull request.
|
||||||
|
|
||||||
## Etiquette
|
The guide can be found here: [https://docs.pi-hole.net/guides/github/contributing/](https://docs.pi-hole.net/guides/github/contributing/)
|
||||||
|
|
||||||
- Our goal for Pi-hole is **stability before features**. This means we focus on squashing critical bugs before adding new features. Often, we can do both in tandem, but bugs will take priority over a new feature.
|
|
||||||
- Pi-hole is open source and [powered by donations](https://pi-hole.net/donate/), and as such, we give our **free time** to build, maintain, and **provide user support** for this project. It would be extremely unfair for us to suffer abuse or anger for our hard work, so please take a moment to consider that.
|
|
||||||
- Please be considerate towards the developers and other users when raising issues or presenting pull requests.
|
|
||||||
- Respect our decision(s), and do not be upset or abusive if your submission is not used.
|
|
||||||
|
|
||||||
## Viability
|
|
||||||
|
|
||||||
When requesting or submitting new features, first consider whether it might be useful to others. Open source projects are used by many people, who may have entirely different needs to your own. Think about whether or not your feature is likely to be used by other users of the project.
|
|
||||||
|
|
||||||
## Procedure
|
|
||||||
|
|
||||||
**Before filing an issue:**
|
|
||||||
|
|
||||||
- Attempt to replicate and **document** the problem, to ensure that it wasn't a coincidental incident.
|
|
||||||
- Check to make sure your feature suggestion isn't already present within the project.
|
|
||||||
- Check the pull requests tab to ensure that the bug doesn't have a fix in progress.
|
|
||||||
- Check the pull requests tab to ensure that the feature isn't already in progress.
|
|
||||||
|
|
||||||
**Before submitting a pull request:**
|
|
||||||
|
|
||||||
- Check the codebase to ensure that your feature doesn't already exist.
|
|
||||||
- Check the pull requests to ensure that another person hasn't already submitted the feature or fix.
|
|
||||||
- Read and understand the [DCO guidelines](https://docs.pi-hole.net/guides/github/contributing/) for the project.
|
|
||||||
|
|
||||||
## Technical Requirements
|
|
||||||
|
|
||||||
- Submit Pull Requests to the **development branch only**.
|
|
||||||
- Before Submitting your Pull Request, merge `development` with your new branch and fix any conflicts. (Make sure you don't break anything in development!)
|
|
||||||
- Please use the [Google Style Guide for Shell](https://google.github.io/styleguide/shell.xml) for your code submission styles.
|
|
||||||
- Commit Unix line endings.
|
|
||||||
- Please use the Pi-hole brand: **Pi-hole** (Take a special look at the capitalized 'P' and a low 'h' with a hyphen)
|
|
||||||
- (Optional fun) keep to the theme of Star Trek/black holes/gravity.
|
|
||||||
|
|
||||||
## Forking and Cloning from GitHub to GitHub
|
|
||||||
|
|
||||||
1. Fork <https://github.com/pi-hole/pi-hole/> to a repo under a namespace you control, or have permission to use, for example: `https://github.com/<your_namespace>/<your_repo_name>/`. You can do this from the github.com website.
|
|
||||||
2. Clone `https://github.com/<your_namespace>/<your_repo_name>/` with the tool of you choice.
|
|
||||||
3. To keep your fork in sync with our repo, add an upstream remote for pi-hole/pi-hole to your repo.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
git remote add upstream https://github.com/pi-hole/pi-hole.git
|
|
||||||
```
|
|
||||||
|
|
||||||
4. Checkout the `development` branch from your fork `https://github.com/<your_namespace>/<your_repo_name>/`.
|
|
||||||
5. Create a topic/branch, based on the `development` branch code. *Bonus fun to keep to the theme of Star Trek/black holes/gravity.*
|
|
||||||
6. Make your changes and commit to your topic branch in your repo.
|
|
||||||
7. Rebase your commits and squash any insignificant commits. See the notes below for an example.
|
|
||||||
8. Merge `development` your branch and fix any conflicts.
|
|
||||||
9. Open a Pull Request to merge your topic branch into our repo's `development` branch.
|
|
||||||
|
|
||||||
- Keep in mind the technical requirements from above.
|
|
||||||
|
|
||||||
## Forking and Cloning from GitHub to other code hosting sites
|
|
||||||
|
|
||||||
- Forking is a GitHub concept and cannot be done from GitHub to other git-based code hosting sites. However, those sites may be able to mirror a GitHub repo.
|
|
||||||
|
|
||||||
1. To contribute from another code hosting site, you must first complete the steps above to fork our repo to a GitHub namespace you have permission to use, for example: `https://github.com/<your_namespace>/<your_repo_name>/`.
|
|
||||||
2. Create a repo in your code hosting site, for example: `https://gitlab.com/<your_namespace>/<your_repo_name>/`
|
|
||||||
3. Follow the instructions from your code hosting site to create a mirror between `https://github.com/<your_namespace>/<your_repo_name>/` and `https://gitlab.com/<your_namespace>/<your_repo_name>/`.
|
|
||||||
4. When you are ready to create a Pull Request (PR), follow the steps `(starting at step #6)` from [Forking and Cloning from GitHub to GitHub](#forking-and-cloning-from-github-to-github) and create the PR from `https://github.com/<your_namespace>/<your_repo_name>/`.
|
|
||||||
|
|
||||||
## Notes for squashing commits with rebase
|
|
||||||
|
|
||||||
- To rebase your commits and squash previous commits, you can use:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
git rebase -i your_topic_branch~(number of commits to combine)
|
|
||||||
```
|
|
||||||
|
|
||||||
- For more details visit [gitready.com](http://gitready.com/advanced/2009/02/10/squashing-commits-with-rebase.html)
|
|
||||||
|
|
||||||
1. The following would combine the last four commits in the branch `mytopic`.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
git rebase -i mytopic~4
|
|
||||||
```
|
|
||||||
|
|
||||||
2. An editor window opens with the most recent commits indicated: (edit the commands to the left of the commit ID)
|
|
||||||
|
|
||||||
```gitattributes
|
|
||||||
pick 9dff55b2 existing commit comments
|
|
||||||
squash ebb1a730 existing commit comments
|
|
||||||
squash 07cc5b50 existing commit comments
|
|
||||||
reword 9dff55b2 existing commit comments
|
|
||||||
```
|
|
||||||
|
|
||||||
3. Save and close the editor. The next editor window opens: (edit the new commit message). *If you select reword for a commit, an additional editor window will open for you to edit the comment.*
|
|
||||||
|
|
||||||
```bash
|
|
||||||
new commit comments
|
|
||||||
Signed-off-by: yourname <your email address>
|
|
||||||
```
|
|
||||||
|
|
||||||
4. Save and close the editor for the rebase process to execute. The terminal output should say something like the following:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
Successfully rebased and updated refs/heads/mytopic.
|
|
||||||
```
|
|
||||||
|
|
||||||
5. Once you have a successful rebase, and before you sync your local clone, you have to force push origin to update your repo:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
git push -f origin
|
|
||||||
```
|
|
||||||
|
|
||||||
6. Continue on from step #7 from [Forking and Cloning from GitHub to GitHub](#forking-and-cloning-from-github-to-github)
|
|
||||||
|
32
README.md
32
README.md
@@ -11,9 +11,9 @@
|
|||||||
</p>
|
</p>
|
||||||
<!-- markdownlint-enable MD033 -->
|
<!-- markdownlint-enable MD033 -->
|
||||||
|
|
||||||
The Pi-hole® is a [DNS sinkhole](https://en.wikipedia.org/wiki/DNS_Sinkhole) that protects your devices from unwanted content, without installing any client-side software.
|
The Pi-hole® is a [DNS sinkhole](https://en.wikipedia.org/wiki/DNS_Sinkhole) that protects your devices from unwanted content without installing any client-side software.
|
||||||
|
|
||||||
- **Easy-to-install**: our versatile installer walks you through the process, and takes less than ten minutes
|
- **Easy-to-install**: our versatile installer walks you through the process and takes less than ten minutes
|
||||||
- **Resolute**: content is blocked in _non-browser locations_, such as ad-laden mobile apps and smart TVs
|
- **Resolute**: content is blocked in _non-browser locations_, such as ad-laden mobile apps and smart TVs
|
||||||
- **Responsive**: seamlessly speeds up the feel of everyday browsing by caching DNS queries
|
- **Responsive**: seamlessly speeds up the feel of everyday browsing by caching DNS queries
|
||||||
- **Lightweight**: runs smoothly with [minimal hardware and software requirements](https://docs.pi-hole.net/main/prerequisites/)
|
- **Lightweight**: runs smoothly with [minimal hardware and software requirements](https://docs.pi-hole.net/main/prerequisites/)
|
||||||
@@ -22,7 +22,7 @@ The Pi-hole® is a [DNS sinkhole](https://en.wikipedia.org/wiki/DNS_Sinkhole) th
|
|||||||
- **Versatile**: can optionally function as a [DHCP server](https://discourse.pi-hole.net/t/how-do-i-use-pi-holes-built-in-dhcp-server-and-why-would-i-want-to/3026), ensuring *all* your devices are protected automatically
|
- **Versatile**: can optionally function as a [DHCP server](https://discourse.pi-hole.net/t/how-do-i-use-pi-holes-built-in-dhcp-server-and-why-would-i-want-to/3026), ensuring *all* your devices are protected automatically
|
||||||
- **Scalable**: [capable of handling hundreds of millions of queries](https://pi-hole.net/2017/05/24/how-much-traffic-can-pi-hole-handle/) when installed on server-grade hardware
|
- **Scalable**: [capable of handling hundreds of millions of queries](https://pi-hole.net/2017/05/24/how-much-traffic-can-pi-hole-handle/) when installed on server-grade hardware
|
||||||
- **Modern**: blocks ads over both IPv4 and IPv6
|
- **Modern**: blocks ads over both IPv4 and IPv6
|
||||||
- **Free**: open source software which helps ensure _you_ are the sole person in control of your privacy
|
- **Free**: open source software that helps ensure _you_ are the sole person in control of your privacy
|
||||||
|
|
||||||
-----
|
-----
|
||||||
|
|
||||||
@@ -57,21 +57,21 @@ Please refer to the [Pi-hole docker repo](https://github.com/pi-hole/docker-pi-h
|
|||||||
|
|
||||||
Once the installer has been run, you will need to [configure your router to have **DHCP clients use Pi-hole as their DNS server**](https://discourse.pi-hole.net/t/how-do-i-configure-my-devices-to-use-pi-hole-as-their-dns-server/245) which ensures that all devices connecting to your network will have content blocked without any further intervention.
|
Once the installer has been run, you will need to [configure your router to have **DHCP clients use Pi-hole as their DNS server**](https://discourse.pi-hole.net/t/how-do-i-configure-my-devices-to-use-pi-hole-as-their-dns-server/245) which ensures that all devices connecting to your network will have content blocked without any further intervention.
|
||||||
|
|
||||||
If your router does not support setting the DNS server, you can [use Pi-hole's built-in DHCP server](https://discourse.pi-hole.net/t/how-do-i-use-pi-holes-built-in-dhcp-server-and-why-would-i-want-to/3026); just be sure to disable DHCP on your router first (if it has that feature available).
|
If your router does not support setting the DNS server, you can [use Pi-hole's built-in DHCP server](https://discourse.pi-hole.net/t/how-do-i-use-pi-holes-built-in-dhcp-server-and-why-would-i-want-to/3026); be sure to disable DHCP on your router first (if it has that feature available).
|
||||||
|
|
||||||
As a last resort, you can always manually set each device to use Pi-hole as their DNS server.
|
As a last resort, you can manually set each device to use Pi-hole as their DNS server.
|
||||||
|
|
||||||
-----
|
-----
|
||||||
|
|
||||||
## Pi-hole is free, but powered by your support
|
## Pi-hole is free but powered by your support
|
||||||
|
|
||||||
There are many reoccurring costs involved with maintaining free, open source, and privacy-respecting software; expenses which [our volunteer developers](https://github.com/orgs/pi-hole/people) pitch in to cover out-of-pocket. This is just one example of how strongly we feel about our software, as well as the importance of keeping it maintained.
|
There are many reoccurring costs involved with maintaining free, open source, and privacy-respecting software; expenses which [our volunteer developers](https://github.com/orgs/pi-hole/people) pitch in to cover out-of-pocket. This is just one example of how strongly we feel about our software and the importance of keeping it maintained.
|
||||||
|
|
||||||
Make no mistake: **your support is absolutely vital to help keep us innovating!**
|
Make no mistake: **your support is absolutely vital to help keep us innovating!**
|
||||||
|
|
||||||
### [Donations](https://pi-hole.net/donate)
|
### [Donations](https://pi-hole.net/donate)
|
||||||
|
|
||||||
Sending a donation using our Sponsor Button is **extremely helpful** in offsetting a portion of our monthly expenses and rewarding our dedicated development team:
|
Donating using our Sponsor Button is **extremely helpful** in offsetting a portion of our monthly expenses:
|
||||||
|
|
||||||
### Alternative support
|
### Alternative support
|
||||||
|
|
||||||
@@ -83,13 +83,13 @@ If you'd rather not donate (_which is okay!_), there are other ways you can help
|
|||||||
- [Digital Ocean](https://www.digitalocean.com/?refcode=344d234950e1) _affiliate link_
|
- [Digital Ocean](https://www.digitalocean.com/?refcode=344d234950e1) _affiliate link_
|
||||||
- [Stickermule](https://www.stickermule.com/unlock?ref_id=9127301701&utm_medium=link&utm_source=invite) _earn a $10 credit after your first purchase_
|
- [Stickermule](https://www.stickermule.com/unlock?ref_id=9127301701&utm_medium=link&utm_source=invite) _earn a $10 credit after your first purchase_
|
||||||
- [Amazon US](http://www.amazon.com/exec/obidos/redirect-home/pihole09-20) _affiliate link_
|
- [Amazon US](http://www.amazon.com/exec/obidos/redirect-home/pihole09-20) _affiliate link_
|
||||||
- Spreading the word about our software, and how you have benefited from it
|
- Spreading the word about our software and how you have benefited from it
|
||||||
|
|
||||||
### Contributing via GitHub
|
### Contributing via GitHub
|
||||||
|
|
||||||
We welcome _everyone_ to contribute to issue reports, suggest new features, and create pull requests.
|
We welcome _everyone_ to contribute to issue reports, suggest new features, and create pull requests.
|
||||||
|
|
||||||
If you have something to add - anything from a typo through to a whole new feature, we're happy to check it out! Just make sure to fill out our template when submitting your request; the questions that it asks will help the volunteers quickly understand what you're aiming to achieve.
|
If you have something to add - anything from a typo through to a whole new feature, we're happy to check it out! Just make sure to fill out our template when submitting your request; the questions it asks will help the volunteers quickly understand what you're aiming to achieve.
|
||||||
|
|
||||||
You'll find that the [install script](https://github.com/pi-hole/pi-hole/blob/master/automated%20install/basic-install.sh) and the [debug script](https://github.com/pi-hole/pi-hole/blob/master/advanced/Scripts/piholeDebug.sh) have an abundance of comments, which will help you better understand how Pi-hole works. They're also a valuable resource to those who want to learn how to write scripts or code a program! We encourage anyone who likes to tinker to read through it and submit a pull request for us to review.
|
You'll find that the [install script](https://github.com/pi-hole/pi-hole/blob/master/automated%20install/basic-install.sh) and the [debug script](https://github.com/pi-hole/pi-hole/blob/master/advanced/Scripts/piholeDebug.sh) have an abundance of comments, which will help you better understand how Pi-hole works. They're also a valuable resource to those who want to learn how to write scripts or code a program! We encourage anyone who likes to tinker to read through it and submit a pull request for us to review.
|
||||||
|
|
||||||
@@ -97,9 +97,9 @@ You'll find that the [install script](https://github.com/pi-hole/pi-hole/blob/ma
|
|||||||
|
|
||||||
## Getting in touch with us
|
## Getting in touch with us
|
||||||
|
|
||||||
While we are primarily reachable on our [Discourse User Forum](https://discourse.pi-hole.net/), we can also be found on a variety of social media outlets.
|
While we are primarily reachable on our [Discourse User Forum](https://discourse.pi-hole.net/), we can also be found on various social media outlets.
|
||||||
|
|
||||||
**Please be sure to check the FAQ's** before starting a new discussion. Many user questions already have answers and can be solved without any additional assistance.
|
**Please be sure to check the FAQs** before starting a new discussion, as we do not have the spare time to reply to every request for assistance.
|
||||||
|
|
||||||
- [Frequently Asked Questions](https://discourse.pi-hole.net/c/faqs)
|
- [Frequently Asked Questions](https://discourse.pi-hole.net/c/faqs)
|
||||||
- [Feature Requests](https://discourse.pi-hole.net/c/feature-requests?order=votes)
|
- [Feature Requests](https://discourse.pi-hole.net/c/feature-requests?order=votes)
|
||||||
@@ -125,15 +125,15 @@ Some of the statistics you can integrate include:
|
|||||||
- Queries cached
|
- Queries cached
|
||||||
- Unique clients
|
- Unique clients
|
||||||
|
|
||||||
The API can be accessed via [`telnet`](https://github.com/pi-hole/FTL), the Web (`admin/api.php`) and Command Line (`pihole -c -j`). You can find out [more details over here](https://discourse.pi-hole.net/t/pi-hole-api/1863).
|
Access the API via [`telnet`](https://github.com/pi-hole/FTL), the Web (`admin/api.php`) and Command Line (`pihole -c -j`). You can find out [more details over here](https://discourse.pi-hole.net/t/pi-hole-api/1863).
|
||||||
|
|
||||||
### The Command Line Interface
|
### The Command Line Interface
|
||||||
|
|
||||||
The [pihole](https://docs.pi-hole.net/core/pihole-command/) command has all the functionality necessary to be able to fully administer the Pi-hole, without the need of the Web Interface. It's fast, user-friendly, and auditable by anyone with an understanding of `bash`.
|
The [pihole](https://docs.pi-hole.net/core/pihole-command/) command has all the functionality necessary to fully administer the Pi-hole, without the need of the Web Interface. It's fast, user-friendly, and auditable by anyone with an understanding of `bash`.
|
||||||
|
|
||||||
Some notable features include:
|
Some notable features include:
|
||||||
|
|
||||||
- [Whitelisting, Blacklisting and Regex](https://docs.pi-hole.net/core/pihole-command/#whitelisting-blacklisting-and-regex)
|
- [Whitelisting, Blacklisting, and Regex](https://docs.pi-hole.net/core/pihole-command/#whitelisting-blacklisting-and-regex)
|
||||||
- [Debugging utility](https://docs.pi-hole.net/core/pihole-command/#debugger)
|
- [Debugging utility](https://docs.pi-hole.net/core/pihole-command/#debugger)
|
||||||
- [Viewing the live log file](https://docs.pi-hole.net/core/pihole-command/#tail)
|
- [Viewing the live log file](https://docs.pi-hole.net/core/pihole-command/#tail)
|
||||||
- [Updating Ad Lists](https://docs.pi-hole.net/core/pihole-command/#gravity)
|
- [Updating Ad Lists](https://docs.pi-hole.net/core/pihole-command/#gravity)
|
||||||
@@ -149,7 +149,7 @@ This [optional dashboard](https://github.com/pi-hole/AdminLTE) allows you to vie
|
|||||||
|
|
||||||
Some notable features include:
|
Some notable features include:
|
||||||
|
|
||||||
- Mobile friendly interface
|
- Mobile-friendly interface
|
||||||
- Password protection
|
- Password protection
|
||||||
- Detailed graphs and doughnut charts
|
- Detailed graphs and doughnut charts
|
||||||
- Top lists of domains and clients
|
- Top lists of domains and clients
|
||||||
|
@@ -39,6 +39,4 @@ cache-size=@CACHE_SIZE@
|
|||||||
log-queries
|
log-queries
|
||||||
log-facility=/var/log/pihole.log
|
log-facility=/var/log/pihole.log
|
||||||
|
|
||||||
local-ttl=2
|
|
||||||
|
|
||||||
log-async
|
log-async
|
||||||
|
42
advanced/06-rfc6761.conf
Normal file
42
advanced/06-rfc6761.conf
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
# Pi-hole: A black hole for Internet advertisements
|
||||||
|
# (c) 2021 Pi-hole, LLC (https://pi-hole.net)
|
||||||
|
# Network-wide ad blocking via your own hardware.
|
||||||
|
#
|
||||||
|
# RFC 6761 config file for Pi-hole
|
||||||
|
#
|
||||||
|
# This file is copyright under the latest version of the EUPL.
|
||||||
|
# Please see LICENSE file for your rights under this license.
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# FILE AUTOMATICALLY POPULATED BY PI-HOLE INSTALL/UPDATE PROCEDURE. #
|
||||||
|
# ANY CHANGES MADE TO THIS FILE AFTER INSTALL WILL BE LOST ON THE NEXT UPDATE #
|
||||||
|
# #
|
||||||
|
# CHANGES SHOULD BE MADE IN A SEPARATE CONFIG FILE #
|
||||||
|
# WITHIN /etc/dnsmasq.d/yourname.conf #
|
||||||
|
###############################################################################
|
||||||
|
|
||||||
|
# RFC 6761: Caching DNS servers SHOULD recognize
|
||||||
|
# test, localhost, invalid
|
||||||
|
# names as special and SHOULD NOT attempt to look up NS records for them, or
|
||||||
|
# otherwise query authoritative DNS servers in an attempt to resolve these
|
||||||
|
# names.
|
||||||
|
server=/test/
|
||||||
|
server=/localhost/
|
||||||
|
server=/invalid/
|
||||||
|
|
||||||
|
# The same RFC requests something similar for
|
||||||
|
# 10.in-addr.arpa. 21.172.in-addr.arpa. 27.172.in-addr.arpa.
|
||||||
|
# 16.172.in-addr.arpa. 22.172.in-addr.arpa. 28.172.in-addr.arpa.
|
||||||
|
# 17.172.in-addr.arpa. 23.172.in-addr.arpa. 29.172.in-addr.arpa.
|
||||||
|
# 18.172.in-addr.arpa. 24.172.in-addr.arpa. 30.172.in-addr.arpa.
|
||||||
|
# 19.172.in-addr.arpa. 25.172.in-addr.arpa. 31.172.in-addr.arpa.
|
||||||
|
# 20.172.in-addr.arpa. 26.172.in-addr.arpa. 168.192.in-addr.arpa.
|
||||||
|
# Pi-hole implements this via the dnsmasq option "bogus-priv" (see
|
||||||
|
# 01-pihole.conf) because this also covers IPv6.
|
||||||
|
|
||||||
|
# OpenWRT furthermore blocks bind, local, onion domains
|
||||||
|
# see https://git.openwrt.org/?p=openwrt/openwrt.git;a=blob_plain;f=package/network/services/dnsmasq/files/rfc6761.conf;hb=HEAD
|
||||||
|
# and https://www.iana.org/assignments/special-use-domain-names/special-use-domain-names.xhtml
|
||||||
|
# We do not include the ".local" rule ourselves, see https://github.com/pi-hole/pi-hole/pull/4282#discussion_r689112972
|
||||||
|
server=/bind/
|
||||||
|
server=/onion/
|
@@ -329,8 +329,8 @@ get_sys_stats() {
|
|||||||
*) cpu_col="$COL_URG_RED";;
|
*) cpu_col="$COL_URG_RED";;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
# $COL_NC$COL_DARK_GRAY is needed for $COL_URG_RED
|
# $COL_NC$COL_DARK_GRAY is needed for $COL_URG_RED
|
||||||
cpu_temp_str=" @ $cpu_col$cpu_temp$COL_NC$COL_DARK_GRAY"
|
cpu_temp_str=" @ $cpu_col$cpu_temp$COL_NC$COL_DARK_GRAY"
|
||||||
|
|
||||||
elif [[ "$temp_unit" == "F" ]]; then
|
elif [[ "$temp_unit" == "F" ]]; then
|
||||||
cpu_temp=$(printf "%.0ff\\n" "$(calcFunc "($(< $temp_file) / 1000) * 9 / 5 + 32")")
|
cpu_temp=$(printf "%.0ff\\n" "$(calcFunc "($(< $temp_file) / 1000) * 9 / 5 + 32")")
|
||||||
@@ -357,7 +357,7 @@ get_sys_stats() {
|
|||||||
ram_used="${ram_raw[1]}"
|
ram_used="${ram_raw[1]}"
|
||||||
ram_total="${ram_raw[2]}"
|
ram_total="${ram_raw[2]}"
|
||||||
|
|
||||||
if [[ "$(pihole status web 2> /dev/null)" == "1" ]]; then
|
if [[ "$(pihole status web 2> /dev/null)" -ge "1" ]]; then
|
||||||
ph_status="${COL_LIGHT_GREEN}Active"
|
ph_status="${COL_LIGHT_GREEN}Active"
|
||||||
else
|
else
|
||||||
ph_status="${COL_LIGHT_RED}Offline"
|
ph_status="${COL_LIGHT_RED}Offline"
|
||||||
@@ -445,7 +445,7 @@ get_strings() {
|
|||||||
lan_info="Gateway: $net_gateway"
|
lan_info="Gateway: $net_gateway"
|
||||||
dhcp_info="$leased_str$ph_dhcp_num of $ph_dhcp_max"
|
dhcp_info="$leased_str$ph_dhcp_num of $ph_dhcp_max"
|
||||||
|
|
||||||
ads_info="$total_str$ads_blocked_today of $dns_queries_today"
|
ads_info="$total_str$ads_blocked_today of $dns_queries_today"
|
||||||
dns_info="$dns_count DNS servers"
|
dns_info="$dns_count DNS servers"
|
||||||
|
|
||||||
[[ "$recent_blocked" == "0" ]] && recent_blocked="${COL_LIGHT_RED}FTL offline${COL_NC}"
|
[[ "$recent_blocked" == "0" ]] && recent_blocked="${COL_LIGHT_RED}FTL offline${COL_NC}"
|
||||||
@@ -488,7 +488,7 @@ chronoFunc() {
|
|||||||
${COL_LIGHT_RED}Press Ctrl-C to exit${COL_NC}
|
${COL_LIGHT_RED}Press Ctrl-C to exit${COL_NC}
|
||||||
${COL_DARK_GRAY}$scr_line_str${COL_NC}"
|
${COL_DARK_GRAY}$scr_line_str${COL_NC}"
|
||||||
else
|
else
|
||||||
echo -e "[0;1;31;91m|¯[0;1;33;93m¯[0;1;32;92m¯[0;1;32;92m(¯[0;1;36;96m)[0;1;34;94m_[0;1;35;95m|[0;1;33;93m¯[0;1;31;91m|_ [0;1;32;92m__[0;1;36;96m_|[0;1;31;91m¯[0;1;34;94m|[0;1;35;95m__[0;1;31;91m_[0m$phc_ver_str\\n[0;1;33;93m| ¯[0;1;32;92m_[0;1;36;96m/¯[0;1;34;94m|[0;1;35;95m_[0;1;31;91m| [0;1;33;93m' [0;1;32;92m\\/ [0;1;36;96m_ [0;1;34;94m\\ [0;1;35;95m/ [0;1;31;91m-[0;1;33;93m_)[0m$lte_ver_str\\n[0;1;32;92m|_[0;1;36;96m| [0;1;34;94m|_[0;1;35;95m| [0;1;33;93m|_[0;1;32;92m||[0;1;36;96m_\\[0;1;34;94m__[0;1;35;95m_/[0;1;31;91m_\\[0;1;33;93m__[0;1;32;92m_|[0m$ftl_ver_str\\n ${COL_DARK_GRAY}$scr_line_str${COL_NC}"
|
echo -e "[0;1;31;91m|¯[0;1;33;93m¯[0;1;32;92m¯[0;1;32;92m(¯[0;1;36;96m)[0;1;34;94m_[0;1;35;95m|[0;1;33;93m¯[0;1;31;91m|_ [0;1;32;92m__[0;1;36;96m_|[0;1;31;91m¯[0;1;34;94m|[0;1;35;95m__[0;1;31;91m_[0m$phc_ver_str\\n[0;1;33;93m| ¯[0;1;32;92m_[0;1;36;96m/¯[0;1;34;94m|[0;1;35;95m_[0;1;31;91m| [0;1;33;93m' [0;1;32;92m\\/ [0;1;36;96m_ [0;1;34;94m\\ [0;1;35;95m/ [0;1;31;91m-[0;1;33;93m_)[0m$lte_ver_str\\n[0;1;32;92m|_[0;1;36;96m| [0;1;34;94m|_[0;1;35;95m| [0;1;33;93m|_[0;1;32;92m||[0;1;36;96m_\\[0;1;34;94m__[0;1;35;95m_/[0;1;31;91m_\\[0;1;33;93m__[0;1;32;92m_|[0m$ftl_ver_str\\n ${COL_DARK_GRAY}$scr_line_str${COL_NC}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
printFunc " Hostname: " "$sys_name" "$host_info"
|
printFunc " Hostname: " "$sys_name" "$host_info"
|
||||||
|
6
advanced/Scripts/database_migration/gravity-db.sh
Normal file → Executable file
6
advanced/Scripts/database_migration/gravity-db.sh
Normal file → Executable file
@@ -122,4 +122,10 @@ upgrade_gravityDB(){
|
|||||||
sqlite3 "${database}" < "${scriptPath}/13_to_14.sql"
|
sqlite3 "${database}" < "${scriptPath}/13_to_14.sql"
|
||||||
version=14
|
version=14
|
||||||
fi
|
fi
|
||||||
|
if [[ "$version" == "14" ]]; then
|
||||||
|
# Changes the vw_adlist created in 5_to_6
|
||||||
|
echo -e " ${INFO} Upgrading gravity database from version 14 to 15"
|
||||||
|
sqlite3 "${database}" < "${scriptPath}/14_to_15.sql"
|
||||||
|
version=15
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
|
@@ -10,4 +10,4 @@ ALTER TABLE adlist ADD COLUMN status INTEGER NOT NULL DEFAULT 0;
|
|||||||
|
|
||||||
UPDATE info SET value = 14 WHERE property = 'version';
|
UPDATE info SET value = 14 WHERE property = 'version';
|
||||||
|
|
||||||
COMMIT;
|
COMMIT;
|
||||||
|
15
advanced/Scripts/database_migration/gravity/14_to_15.sql
Normal file
15
advanced/Scripts/database_migration/gravity/14_to_15.sql
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
.timeout 30000
|
||||||
|
|
||||||
|
PRAGMA FOREIGN_KEYS=OFF;
|
||||||
|
|
||||||
|
BEGIN TRANSACTION;
|
||||||
|
DROP VIEW vw_adlist;
|
||||||
|
|
||||||
|
CREATE VIEW vw_adlist AS SELECT DISTINCT address, id
|
||||||
|
FROM adlist
|
||||||
|
WHERE enabled = 1
|
||||||
|
ORDER BY id;
|
||||||
|
|
||||||
|
UPDATE info SET value = 15 WHERE property = 'version';
|
||||||
|
|
||||||
|
COMMIT;
|
@@ -16,14 +16,14 @@ GRAVITYDB="${piholeDir}/gravity.db"
|
|||||||
# Source pihole-FTL from install script
|
# Source pihole-FTL from install script
|
||||||
pihole_FTL="${piholeDir}/pihole-FTL.conf"
|
pihole_FTL="${piholeDir}/pihole-FTL.conf"
|
||||||
if [[ -f "${pihole_FTL}" ]]; then
|
if [[ -f "${pihole_FTL}" ]]; then
|
||||||
source "${pihole_FTL}"
|
source "${pihole_FTL}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Set this only after sourcing pihole-FTL.conf as the gravity database path may
|
# Set this only after sourcing pihole-FTL.conf as the gravity database path may
|
||||||
# have changed
|
# have changed
|
||||||
gravityDBfile="${GRAVITYDB}"
|
gravityDBfile="${GRAVITYDB}"
|
||||||
|
|
||||||
reload=false
|
noReloadRequested=false
|
||||||
addmode=true
|
addmode=true
|
||||||
verbose=true
|
verbose=true
|
||||||
wildcard=false
|
wildcard=false
|
||||||
@@ -35,6 +35,7 @@ typeId=""
|
|||||||
comment=""
|
comment=""
|
||||||
declare -i domaincount
|
declare -i domaincount
|
||||||
domaincount=0
|
domaincount=0
|
||||||
|
reload=false
|
||||||
|
|
||||||
colfile="/opt/pihole/COL_TABLE"
|
colfile="/opt/pihole/COL_TABLE"
|
||||||
source ${colfile}
|
source ${colfile}
|
||||||
@@ -90,7 +91,8 @@ Options:
|
|||||||
-q, --quiet Make output less verbose
|
-q, --quiet Make output less verbose
|
||||||
-h, --help Show this help dialog
|
-h, --help Show this help dialog
|
||||||
-l, --list Display all your ${listname}listed domains
|
-l, --list Display all your ${listname}listed domains
|
||||||
--nuke Removes all entries in a list"
|
--nuke Removes all entries in a list
|
||||||
|
--comment \"text\" Add a comment to the domain. If adding multiple domains the same comment will be used for all"
|
||||||
|
|
||||||
exit 0
|
exit 0
|
||||||
}
|
}
|
||||||
@@ -132,7 +134,7 @@ ProcessDomainList() {
|
|||||||
else
|
else
|
||||||
RemoveDomain "${dom}"
|
RemoveDomain "${dom}"
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
}
|
}
|
||||||
|
|
||||||
AddDomain() {
|
AddDomain() {
|
||||||
@@ -144,19 +146,19 @@ AddDomain() {
|
|||||||
requestedListname="$(GetListnameFromTypeId "${typeId}")"
|
requestedListname="$(GetListnameFromTypeId "${typeId}")"
|
||||||
|
|
||||||
if [[ "${num}" -ne 0 ]]; then
|
if [[ "${num}" -ne 0 ]]; then
|
||||||
existingTypeId="$(sqlite3 "${gravityDBfile}" "SELECT type FROM domainlist WHERE domain = '${domain}';")"
|
existingTypeId="$(sqlite3 "${gravityDBfile}" "SELECT type FROM domainlist WHERE domain = '${domain}';")"
|
||||||
if [[ "${existingTypeId}" == "${typeId}" ]]; then
|
if [[ "${existingTypeId}" == "${typeId}" ]]; then
|
||||||
if [[ "${verbose}" == true ]]; then
|
if [[ "${verbose}" == true ]]; then
|
||||||
echo -e " ${INFO} ${1} already exists in ${requestedListname}, no need to add!"
|
echo -e " ${INFO} ${1} already exists in ${requestedListname}, no need to add!"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
existingListname="$(GetListnameFromTypeId "${existingTypeId}")"
|
||||||
|
sqlite3 "${gravityDBfile}" "UPDATE domainlist SET type = ${typeId} WHERE domain='${domain}';"
|
||||||
|
if [[ "${verbose}" == true ]]; then
|
||||||
|
echo -e " ${INFO} ${1} already exists in ${existingListname}, it has been moved to ${requestedListname}!"
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
else
|
return
|
||||||
existingListname="$(GetListnameFromTypeId "${existingTypeId}")"
|
|
||||||
sqlite3 "${gravityDBfile}" "UPDATE domainlist SET type = ${typeId} WHERE domain='${domain}';"
|
|
||||||
if [[ "${verbose}" == true ]]; then
|
|
||||||
echo -e " ${INFO} ${1} already exists in ${existingListname}, it has been moved to ${requestedListname}!"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
return
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Domain not found in the table, add it!
|
# Domain not found in the table, add it!
|
||||||
@@ -184,10 +186,10 @@ RemoveDomain() {
|
|||||||
requestedListname="$(GetListnameFromTypeId "${typeId}")"
|
requestedListname="$(GetListnameFromTypeId "${typeId}")"
|
||||||
|
|
||||||
if [[ "${num}" -eq 0 ]]; then
|
if [[ "${num}" -eq 0 ]]; then
|
||||||
if [[ "${verbose}" == true ]]; then
|
if [[ "${verbose}" == true ]]; then
|
||||||
echo -e " ${INFO} ${domain} does not exist in ${requestedListname}, no need to remove!"
|
echo -e " ${INFO} ${domain} does not exist in ${requestedListname}, no need to remove!"
|
||||||
fi
|
fi
|
||||||
return
|
return
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Domain found in the table, remove it!
|
# Domain found in the table, remove it!
|
||||||
@@ -242,21 +244,21 @@ Displaylist() {
|
|||||||
|
|
||||||
NukeList() {
|
NukeList() {
|
||||||
count=$(sqlite3 "${gravityDBfile}" "SELECT COUNT(1) FROM domainlist WHERE type = ${typeId};")
|
count=$(sqlite3 "${gravityDBfile}" "SELECT COUNT(1) FROM domainlist WHERE type = ${typeId};")
|
||||||
listname="$(GetListnameFromTypeId "${typeId}")"
|
listname="$(GetListnameFromTypeId "${typeId}")"
|
||||||
if [ "$count" -gt 0 ];then
|
if [ "$count" -gt 0 ];then
|
||||||
sqlite3 "${gravityDBfile}" "DELETE FROM domainlist WHERE type = ${typeId};"
|
sqlite3 "${gravityDBfile}" "DELETE FROM domainlist WHERE type = ${typeId};"
|
||||||
echo " ${TICK} Removed ${count} domain(s) from the ${listname}"
|
echo " ${TICK} Removed ${count} domain(s) from the ${listname}"
|
||||||
else
|
else
|
||||||
echo " ${INFO} ${listname} already empty. Nothing to do!"
|
echo " ${INFO} ${listname} already empty. Nothing to do!"
|
||||||
fi
|
fi
|
||||||
exit 0;
|
exit 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
GetComment() {
|
GetComment() {
|
||||||
comment="$1"
|
comment="$1"
|
||||||
if [[ "${comment}" =~ [^a-zA-Z0-9_\#:/\.,\ -] ]]; then
|
if [[ "${comment}" =~ [^a-zA-Z0-9_\#:/\.,\ -] ]]; then
|
||||||
echo " ${CROSS} Found invalid characters in domain comment!"
|
echo " ${CROSS} Found invalid characters in domain comment!"
|
||||||
exit
|
exit
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -268,7 +270,7 @@ while (( "$#" )); do
|
|||||||
"--white-wild" | "white-wild" ) typeId=2; wildcard=true;;
|
"--white-wild" | "white-wild" ) typeId=2; wildcard=true;;
|
||||||
"--wild" | "wildcard" ) typeId=3; wildcard=true;;
|
"--wild" | "wildcard" ) typeId=3; wildcard=true;;
|
||||||
"--regex" | "regex" ) typeId=3;;
|
"--regex" | "regex" ) typeId=3;;
|
||||||
"-nr"| "--noreload" ) reload=false;;
|
"-nr"| "--noreload" ) noReloadRequested=true;;
|
||||||
"-d" | "--delmode" ) addmode=false;;
|
"-d" | "--delmode" ) addmode=false;;
|
||||||
"-q" | "--quiet" ) verbose=false;;
|
"-q" | "--quiet" ) verbose=false;;
|
||||||
"-h" | "--help" ) helpFunc;;
|
"-h" | "--help" ) helpFunc;;
|
||||||
@@ -291,9 +293,9 @@ ProcessDomainList
|
|||||||
|
|
||||||
# Used on web interface
|
# Used on web interface
|
||||||
if $web; then
|
if $web; then
|
||||||
echo "DONE"
|
echo "DONE"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "${reload}" != false ]]; then
|
if [[ ${reload} == true && ${noReloadRequested} == false ]]; then
|
||||||
pihole restartdns reload-lists
|
pihole restartdns reload-lists
|
||||||
fi
|
fi
|
||||||
|
3
advanced/Scripts/piholeCheckout.sh
Normal file → Executable file
3
advanced/Scripts/piholeCheckout.sh
Normal file → Executable file
@@ -166,12 +166,15 @@ checkout() {
|
|||||||
checkout_pull_branch "${webInterfaceDir}" "${2}"
|
checkout_pull_branch "${webInterfaceDir}" "${2}"
|
||||||
elif [[ "${1}" == "ftl" ]] ; then
|
elif [[ "${1}" == "ftl" ]] ; then
|
||||||
local path
|
local path
|
||||||
|
local oldbranch
|
||||||
path="${2}/${binary}"
|
path="${2}/${binary}"
|
||||||
|
oldbranch="$(pihole-FTL -b)"
|
||||||
|
|
||||||
if check_download_exists "$path"; then
|
if check_download_exists "$path"; then
|
||||||
echo " ${TICK} Branch ${2} exists"
|
echo " ${TICK} Branch ${2} exists"
|
||||||
echo "${2}" > /etc/pihole/ftlbranch
|
echo "${2}" > /etc/pihole/ftlbranch
|
||||||
chmod 644 /etc/pihole/ftlbranch
|
chmod 644 /etc/pihole/ftlbranch
|
||||||
|
echo -e " ${INFO} Switching to branch: \"${2}\" from \"${oldbranch}\""
|
||||||
FTLinstall "${binary}"
|
FTLinstall "${binary}"
|
||||||
restart_service pihole-FTL
|
restart_service pihole-FTL
|
||||||
enable_service pihole-FTL
|
enable_service pihole-FTL
|
||||||
|
@@ -27,7 +27,7 @@ PIHOLE_COLTABLE_FILE="${PIHOLE_SCRIPTS_DIRECTORY}/COL_TABLE"
|
|||||||
|
|
||||||
# These provide the colors we need for making the log more readable
|
# These provide the colors we need for making the log more readable
|
||||||
if [[ -f ${PIHOLE_COLTABLE_FILE} ]]; then
|
if [[ -f ${PIHOLE_COLTABLE_FILE} ]]; then
|
||||||
source ${PIHOLE_COLTABLE_FILE}
|
source ${PIHOLE_COLTABLE_FILE}
|
||||||
else
|
else
|
||||||
COL_NC='\e[0m' # No Color
|
COL_NC='\e[0m' # No Color
|
||||||
COL_RED='\e[1;91m'
|
COL_RED='\e[1;91m'
|
||||||
@@ -56,11 +56,6 @@ FAQ_BAD_ADDRESS="${COL_CYAN}https://discourse.pi-hole.net/t/why-do-i-see-bad-add
|
|||||||
|
|
||||||
# Other URLs we may use
|
# Other URLs we may use
|
||||||
FORUMS_URL="${COL_CYAN}https://discourse.pi-hole.net${COL_NC}"
|
FORUMS_URL="${COL_CYAN}https://discourse.pi-hole.net${COL_NC}"
|
||||||
TRICORDER_CONTEST="${COL_CYAN}https://pi-hole.net/2016/11/07/crack-our-medical-tricorder-win-a-raspberry-pi-3/${COL_NC}"
|
|
||||||
|
|
||||||
# Port numbers used for uploading the debug log
|
|
||||||
TRICORDER_NC_PORT_NUMBER=9999
|
|
||||||
TRICORDER_SSL_PORT_NUMBER=9998
|
|
||||||
|
|
||||||
# Directories required by Pi-hole
|
# Directories required by Pi-hole
|
||||||
# https://discourse.pi-hole.net/t/what-files-does-pi-hole-use/1684
|
# https://discourse.pi-hole.net/t/what-files-does-pi-hole-use/1684
|
||||||
@@ -78,15 +73,12 @@ HTML_DIRECTORY="/var/www/html"
|
|||||||
WEB_GIT_DIRECTORY="${HTML_DIRECTORY}/admin"
|
WEB_GIT_DIRECTORY="${HTML_DIRECTORY}/admin"
|
||||||
#BLOCK_PAGE_DIRECTORY="${HTML_DIRECTORY}/pihole"
|
#BLOCK_PAGE_DIRECTORY="${HTML_DIRECTORY}/pihole"
|
||||||
SHM_DIRECTORY="/dev/shm"
|
SHM_DIRECTORY="/dev/shm"
|
||||||
|
ETC="/etc"
|
||||||
|
|
||||||
# Files required by Pi-hole
|
# Files required by Pi-hole
|
||||||
# https://discourse.pi-hole.net/t/what-files-does-pi-hole-use/1684
|
# https://discourse.pi-hole.net/t/what-files-does-pi-hole-use/1684
|
||||||
PIHOLE_CRON_FILE="${CRON_D_DIRECTORY}/pihole"
|
PIHOLE_CRON_FILE="${CRON_D_DIRECTORY}/pihole"
|
||||||
|
|
||||||
PIHOLE_DNS_CONFIG_FILE="${DNSMASQ_D_DIRECTORY}/01-pihole.conf"
|
|
||||||
PIHOLE_DHCP_CONFIG_FILE="${DNSMASQ_D_DIRECTORY}/02-pihole-dhcp.conf"
|
|
||||||
PIHOLE_WILDCARD_CONFIG_FILE="${DNSMASQ_D_DIRECTORY}/03-wildcard.conf"
|
|
||||||
|
|
||||||
WEB_SERVER_CONFIG_FILE="${WEB_SERVER_CONFIG_DIRECTORY}/lighttpd.conf"
|
WEB_SERVER_CONFIG_FILE="${WEB_SERVER_CONFIG_DIRECTORY}/lighttpd.conf"
|
||||||
WEB_SERVER_CUSTOM_CONFIG_FILE="${WEB_SERVER_CONFIG_DIRECTORY}/external.conf"
|
WEB_SERVER_CUSTOM_CONFIG_FILE="${WEB_SERVER_CONFIG_DIRECTORY}/external.conf"
|
||||||
|
|
||||||
@@ -96,6 +88,7 @@ PIHOLE_LOCAL_HOSTS_FILE="${PIHOLE_DIRECTORY}/local.list"
|
|||||||
PIHOLE_LOGROTATE_FILE="${PIHOLE_DIRECTORY}/logrotate"
|
PIHOLE_LOGROTATE_FILE="${PIHOLE_DIRECTORY}/logrotate"
|
||||||
PIHOLE_SETUP_VARS_FILE="${PIHOLE_DIRECTORY}/setupVars.conf"
|
PIHOLE_SETUP_VARS_FILE="${PIHOLE_DIRECTORY}/setupVars.conf"
|
||||||
PIHOLE_FTL_CONF_FILE="${PIHOLE_DIRECTORY}/pihole-FTL.conf"
|
PIHOLE_FTL_CONF_FILE="${PIHOLE_DIRECTORY}/pihole-FTL.conf"
|
||||||
|
PIHOLE_CUSTOM_HOSTS_FILE="${PIHOLE_DIRECTORY}/custom.list"
|
||||||
|
|
||||||
# Read the value of an FTL config key. The value is printed to stdout.
|
# Read the value of an FTL config key. The value is printed to stdout.
|
||||||
#
|
#
|
||||||
@@ -141,6 +134,9 @@ PIHOLE_FTL_LOG="$(get_ftl_conf_value "LOGFILE" "${LOG_DIRECTORY}/pihole-FTL.log"
|
|||||||
PIHOLE_WEB_SERVER_ACCESS_LOG_FILE="${WEB_SERVER_LOG_DIRECTORY}/access.log"
|
PIHOLE_WEB_SERVER_ACCESS_LOG_FILE="${WEB_SERVER_LOG_DIRECTORY}/access.log"
|
||||||
PIHOLE_WEB_SERVER_ERROR_LOG_FILE="${WEB_SERVER_LOG_DIRECTORY}/error.log"
|
PIHOLE_WEB_SERVER_ERROR_LOG_FILE="${WEB_SERVER_LOG_DIRECTORY}/error.log"
|
||||||
|
|
||||||
|
RESOLVCONF="${ETC}/resolv.conf"
|
||||||
|
DNSMASQ_CONF="${ETC}/dnsmasq.conf"
|
||||||
|
|
||||||
# An array of operating system "pretty names" that we officially support
|
# An array of operating system "pretty names" that we officially support
|
||||||
# We can loop through the array at any time to see if it matches a value
|
# We can loop through the array at any time to see if it matches a value
|
||||||
#SUPPORTED_OS=("Raspbian" "Ubuntu" "Fedora" "Debian" "CentOS")
|
#SUPPORTED_OS=("Raspbian" "Ubuntu" "Fedora" "Debian" "CentOS")
|
||||||
@@ -165,9 +161,6 @@ PIHOLE_PROCESSES=( "lighttpd" "pihole-FTL" )
|
|||||||
|
|
||||||
# Store the required directories in an array so it can be parsed through
|
# Store the required directories in an array so it can be parsed through
|
||||||
REQUIRED_FILES=("${PIHOLE_CRON_FILE}"
|
REQUIRED_FILES=("${PIHOLE_CRON_FILE}"
|
||||||
"${PIHOLE_DNS_CONFIG_FILE}"
|
|
||||||
"${PIHOLE_DHCP_CONFIG_FILE}"
|
|
||||||
"${PIHOLE_WILDCARD_CONFIG_FILE}"
|
|
||||||
"${WEB_SERVER_CONFIG_FILE}"
|
"${WEB_SERVER_CONFIG_FILE}"
|
||||||
"${WEB_SERVER_CUSTOM_CONFIG_FILE}"
|
"${WEB_SERVER_CUSTOM_CONFIG_FILE}"
|
||||||
"${PIHOLE_INSTALL_LOG_FILE}"
|
"${PIHOLE_INSTALL_LOG_FILE}"
|
||||||
@@ -185,7 +178,10 @@ REQUIRED_FILES=("${PIHOLE_CRON_FILE}"
|
|||||||
"${PIHOLE_DEBUG_LOG}"
|
"${PIHOLE_DEBUG_LOG}"
|
||||||
"${PIHOLE_FTL_LOG}"
|
"${PIHOLE_FTL_LOG}"
|
||||||
"${PIHOLE_WEB_SERVER_ACCESS_LOG_FILE}"
|
"${PIHOLE_WEB_SERVER_ACCESS_LOG_FILE}"
|
||||||
"${PIHOLE_WEB_SERVER_ERROR_LOG_FILE}")
|
"${PIHOLE_WEB_SERVER_ERROR_LOG_FILE}"
|
||||||
|
"${RESOLVCONF}"
|
||||||
|
"${DNSMASQ_CONF}"
|
||||||
|
"${PIHOLE_CUSTOM_HOSTS_FILE}")
|
||||||
|
|
||||||
DISCLAIMER="This process collects information from your Pi-hole, and optionally uploads it to a unique and random directory on tricorder.pi-hole.net.
|
DISCLAIMER="This process collects information from your Pi-hole, and optionally uploads it to a unique and random directory on tricorder.pi-hole.net.
|
||||||
|
|
||||||
@@ -235,6 +231,7 @@ copy_to_debug_log() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
initialize_debug() {
|
initialize_debug() {
|
||||||
|
local system_uptime
|
||||||
# Clear the screen so the debug log is readable
|
# Clear the screen so the debug log is readable
|
||||||
clear
|
clear
|
||||||
show_disclaimer
|
show_disclaimer
|
||||||
@@ -242,6 +239,10 @@ initialize_debug() {
|
|||||||
log_write "${COL_PURPLE}*** [ INITIALIZING ]${COL_NC}"
|
log_write "${COL_PURPLE}*** [ INITIALIZING ]${COL_NC}"
|
||||||
# Timestamp the start of the log
|
# Timestamp the start of the log
|
||||||
log_write "${INFO} $(date "+%Y-%m-%d:%H:%M:%S") debug log has been initialized."
|
log_write "${INFO} $(date "+%Y-%m-%d:%H:%M:%S") debug log has been initialized."
|
||||||
|
# Uptime of the system
|
||||||
|
# credits to https://stackoverflow.com/questions/28353409/bash-format-uptime-to-show-days-hours-minutes
|
||||||
|
system_uptime=$(uptime | awk -F'( |,|:)+' '{if ($7=="min") m=$6; else {if ($7~/^day/){if ($9=="min") {d=$6;m=$8} else {d=$6;h=$8;m=$9}} else {h=$6;m=$7}}} {print d+0,"days,",h+0,"hours,",m+0,"minutes"}')
|
||||||
|
log_write "${INFO} System has been running for ${system_uptime}"
|
||||||
}
|
}
|
||||||
|
|
||||||
# This is a function for visually displaying the current test that is being run.
|
# This is a function for visually displaying the current test that is being run.
|
||||||
@@ -410,12 +411,12 @@ os_check() {
|
|||||||
# This function gets a list of supported OS versions from a TXT record at versions.pi-hole.net
|
# This function gets a list of supported OS versions from a TXT record at versions.pi-hole.net
|
||||||
# and determines whether or not the script is running on one of those systems
|
# and determines whether or not the script is running on one of those systems
|
||||||
local remote_os_domain valid_os valid_version detected_os detected_version cmdResult digReturnCode response
|
local remote_os_domain valid_os valid_version detected_os detected_version cmdResult digReturnCode response
|
||||||
remote_os_domain="versions.pi-hole.net"
|
remote_os_domain=${OS_CHECK_DOMAIN_NAME:-"versions.pi-hole.net"}
|
||||||
|
|
||||||
detected_os=$(grep "\bID\b" /etc/os-release | cut -d '=' -f2 | tr -d '"')
|
detected_os=$(grep "\bID\b" /etc/os-release | cut -d '=' -f2 | tr -d '"')
|
||||||
detected_version=$(grep VERSION_ID /etc/os-release | cut -d '=' -f2 | tr -d '"')
|
detected_version=$(grep VERSION_ID /etc/os-release | cut -d '=' -f2 | tr -d '"')
|
||||||
|
|
||||||
cmdResult="$(dig +short -t txt ${remote_os_domain} @ns1.pi-hole.net 2>&1; echo $?)"
|
cmdResult="$(dig +short -t txt "${remote_os_domain}" @ns1.pi-hole.net 2>&1; echo $?)"
|
||||||
#Get the return code of the previous command (last line)
|
#Get the return code of the previous command (last line)
|
||||||
digReturnCode="${cmdResult##*$'\n'}"
|
digReturnCode="${cmdResult##*$'\n'}"
|
||||||
|
|
||||||
@@ -466,6 +467,9 @@ diagnose_operating_system() {
|
|||||||
# Display the current test that is running
|
# Display the current test that is running
|
||||||
echo_current_diagnostic "Operating system"
|
echo_current_diagnostic "Operating system"
|
||||||
|
|
||||||
|
# If the PIHOLE_DOCKER_TAG variable is set, include this information in the debug output
|
||||||
|
[ -n "${PIHOLE_DOCKER_TAG}" ] && log_write "${INFO} Pi-hole Docker Container: ${PIHOLE_DOCKER_TAG}"
|
||||||
|
|
||||||
# If there is a /etc/*release file, it's probably a supported operating system, so we can
|
# If there is a /etc/*release file, it's probably a supported operating system, so we can
|
||||||
if ls /etc/*release 1> /dev/null 2>&1; then
|
if ls /etc/*release 1> /dev/null 2>&1; then
|
||||||
# display the attributes to the user from the function made earlier
|
# display the attributes to the user from the function made earlier
|
||||||
@@ -586,6 +590,27 @@ processor_check() {
|
|||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
disk_usage() {
|
||||||
|
local file_system
|
||||||
|
local hide
|
||||||
|
|
||||||
|
echo_current_diagnostic "Disk usage"
|
||||||
|
mapfile -t file_system < <(df -h)
|
||||||
|
|
||||||
|
# Some lines of df might contain sensitive information like usernames and passwords.
|
||||||
|
# E.g. curlftpfs filesystems (https://www.looklinux.com/mount-ftp-share-on-linux-using-curlftps/)
|
||||||
|
# We are not interested in those lines so we collect keyword, to remove them from the output
|
||||||
|
# Additinal keywords can be added, separated by "|"
|
||||||
|
hide="curlftpfs"
|
||||||
|
|
||||||
|
# only show those lines not containg a sensitive phrase
|
||||||
|
for line in "${file_system[@]}"; do
|
||||||
|
if [[ ! $line =~ $hide ]]; then
|
||||||
|
log_write " ${line}"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
parse_setup_vars() {
|
parse_setup_vars() {
|
||||||
echo_current_diagnostic "Setup variables"
|
echo_current_diagnostic "Setup variables"
|
||||||
# If the file exists,
|
# If the file exists,
|
||||||
@@ -605,38 +630,6 @@ parse_locale() {
|
|||||||
parse_file "${pihole_locale}"
|
parse_file "${pihole_locale}"
|
||||||
}
|
}
|
||||||
|
|
||||||
does_ip_match_setup_vars() {
|
|
||||||
# Check for IPv4 or 6
|
|
||||||
local protocol="${1}"
|
|
||||||
# IP address to check for
|
|
||||||
local ip_address="${2}"
|
|
||||||
# See what IP is in the setupVars.conf file
|
|
||||||
local setup_vars_ip
|
|
||||||
setup_vars_ip=$(< ${PIHOLE_SETUP_VARS_FILE} grep IPV"${protocol}"_ADDRESS | cut -d '=' -f2)
|
|
||||||
# If it's an IPv6 address
|
|
||||||
if [[ "${protocol}" == "6" ]]; then
|
|
||||||
# Strip off the / (CIDR notation)
|
|
||||||
if [[ "${ip_address%/*}" == "${setup_vars_ip%/*}" ]]; then
|
|
||||||
# if it matches, show it in green
|
|
||||||
log_write " ${COL_GREEN}${ip_address%/*}${COL_NC} matches the IP found in ${PIHOLE_SETUP_VARS_FILE}"
|
|
||||||
else
|
|
||||||
# otherwise show it in red with an FAQ URL
|
|
||||||
log_write " ${COL_RED}${ip_address%/*}${COL_NC} does not match the IP found in ${PIHOLE_SETUP_VARS_FILE} (${FAQ_ULA})"
|
|
||||||
fi
|
|
||||||
|
|
||||||
else
|
|
||||||
# if the protocol isn't 6, it's 4 so no need to strip the CIDR notation
|
|
||||||
# since it exists in the setupVars.conf that way
|
|
||||||
if [[ "${ip_address}" == "${setup_vars_ip}" ]]; then
|
|
||||||
# show in green if it matches
|
|
||||||
log_write " ${COL_GREEN}${ip_address}${COL_NC} matches the IP found in ${PIHOLE_SETUP_VARS_FILE}"
|
|
||||||
else
|
|
||||||
# otherwise show it in red
|
|
||||||
log_write " ${COL_RED}${ip_address}${COL_NC} does not match the IP found in ${PIHOLE_SETUP_VARS_FILE} (${FAQ_ULA})"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
detect_ip_addresses() {
|
detect_ip_addresses() {
|
||||||
# First argument should be a 4 or a 6
|
# First argument should be a 4 or a 6
|
||||||
local protocol=${1}
|
local protocol=${1}
|
||||||
@@ -653,8 +646,7 @@ detect_ip_addresses() {
|
|||||||
log_write "${TICK} IPv${protocol} address(es) bound to the ${PIHOLE_INTERFACE} interface:"
|
log_write "${TICK} IPv${protocol} address(es) bound to the ${PIHOLE_INTERFACE} interface:"
|
||||||
# Since there may be more than one IP address, store them in an array
|
# Since there may be more than one IP address, store them in an array
|
||||||
for i in "${!ip_addr_list[@]}"; do
|
for i in "${!ip_addr_list[@]}"; do
|
||||||
# For each one in the list, print it out
|
log_write " ${ip_addr_list[$i]}"
|
||||||
does_ip_match_setup_vars "${protocol}" "${ip_addr_list[$i]}"
|
|
||||||
done
|
done
|
||||||
# Print a blank line just for formatting
|
# Print a blank line just for formatting
|
||||||
log_write ""
|
log_write ""
|
||||||
@@ -663,13 +655,6 @@ detect_ip_addresses() {
|
|||||||
log_write "${CROSS} ${COL_RED}No IPv${protocol} address(es) found on the ${PIHOLE_INTERFACE}${COL_NC} interface.\\n"
|
log_write "${CROSS} ${COL_RED}No IPv${protocol} address(es) found on the ${PIHOLE_INTERFACE}${COL_NC} interface.\\n"
|
||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
# If the protocol is v6
|
|
||||||
if [[ "${protocol}" == "6" ]]; then
|
|
||||||
# let the user know that as long as there is one green address, things should be ok
|
|
||||||
log_write " ^ Please note that you may have more than one IP address listed."
|
|
||||||
log_write " As long as one of them is green, and it matches what is in ${PIHOLE_SETUP_VARS_FILE}, there is no need for concern.\\n"
|
|
||||||
log_write " The link to the FAQ is for an issue that sometimes occurs when the IPv6 address changes, which is why we check for it.\\n"
|
|
||||||
fi
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ping_ipv4_or_ipv6() {
|
ping_ipv4_or_ipv6() {
|
||||||
@@ -748,11 +733,11 @@ compare_port_to_service_assigned() {
|
|||||||
|
|
||||||
# If the service is a Pi-hole service, highlight it in green
|
# If the service is a Pi-hole service, highlight it in green
|
||||||
if [[ "${service_name}" == "${expected_service}" ]]; then
|
if [[ "${service_name}" == "${expected_service}" ]]; then
|
||||||
log_write "[${COL_GREEN}${port}${COL_NC}] is in use by ${COL_GREEN}${service_name}${COL_NC}"
|
log_write "${TICK} ${COL_GREEN}${port}${COL_NC} is in use by ${COL_GREEN}${service_name}${COL_NC}"
|
||||||
# Otherwise,
|
# Otherwise,
|
||||||
else
|
else
|
||||||
# Show the service name in red since it's non-standard
|
# Show the service name in red since it's non-standard
|
||||||
log_write "[${COL_RED}${port}${COL_NC}] is in use by ${COL_RED}${service_name}${COL_NC} (${FAQ_HARDWARE_REQUIREMENTS_PORTS})"
|
log_write "${CROSS} ${COL_RED}${port}${COL_NC} is in use by ${COL_RED}${service_name}${COL_NC} (${FAQ_HARDWARE_REQUIREMENTS_PORTS})"
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -768,36 +753,47 @@ check_required_ports() {
|
|||||||
# Sort the addresses and remove duplicates
|
# Sort the addresses and remove duplicates
|
||||||
while IFS= read -r line; do
|
while IFS= read -r line; do
|
||||||
ports_in_use+=( "$line" )
|
ports_in_use+=( "$line" )
|
||||||
done < <( lsof -iTCP -sTCP:LISTEN -P -n +c 10 )
|
done < <( ss --listening --numeric --tcp --udp --processes --no-header )
|
||||||
|
|
||||||
# Now that we have the values stored,
|
# Now that we have the values stored,
|
||||||
for i in "${!ports_in_use[@]}"; do
|
for i in "${!ports_in_use[@]}"; do
|
||||||
# loop through them and assign some local variables
|
# loop through them and assign some local variables
|
||||||
local service_name
|
local service_name
|
||||||
service_name=$(echo "${ports_in_use[$i]}" | awk '{print $1}')
|
service_name=$(echo "${ports_in_use[$i]}" | awk '{gsub(/users:\(\("/,"",$7);gsub(/".*/,"",$7);print $7}')
|
||||||
local protocol_type
|
local protocol_type
|
||||||
protocol_type=$(echo "${ports_in_use[$i]}" | awk '{print $5}')
|
protocol_type=$(echo "${ports_in_use[$i]}" | awk '{print $1}')
|
||||||
local port_number
|
local port_number
|
||||||
port_number="$(echo "${ports_in_use[$i]}" | awk '{print $9}')"
|
port_number="$(echo "${ports_in_use[$i]}" | awk '{print $5}')" # | awk '{gsub(/^.*:/,"",$5);print $5}')
|
||||||
|
|
||||||
# Skip the line if it's the titles of the columns the lsof command produces
|
|
||||||
if [[ "${service_name}" == COMMAND ]]; then
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
# Use a case statement to determine if the right services are using the right ports
|
# Use a case statement to determine if the right services are using the right ports
|
||||||
case "$(echo "$port_number" | rev | cut -d: -f1 | rev)" in
|
case "$(echo "${port_number}" | rev | cut -d: -f1 | rev)" in
|
||||||
53) compare_port_to_service_assigned "${resolver}" "${service_name}" 53
|
53) compare_port_to_service_assigned "${resolver}" "${service_name}" "${protocol_type}:${port_number}"
|
||||||
;;
|
;;
|
||||||
80) compare_port_to_service_assigned "${web_server}" "${service_name}" 80
|
80) compare_port_to_service_assigned "${web_server}" "${service_name}" "${protocol_type}:${port_number}"
|
||||||
;;
|
;;
|
||||||
4711) compare_port_to_service_assigned "${ftl}" "${service_name}" 4711
|
4711) compare_port_to_service_assigned "${ftl}" "${service_name}" "${protocol_type}:${port_number}"
|
||||||
;;
|
;;
|
||||||
# If it's not a default port that Pi-hole needs, just print it out for the user to see
|
# If it's not a default port that Pi-hole needs, just print it out for the user to see
|
||||||
*) log_write "${port_number} ${service_name} (${protocol_type})";
|
*) log_write " ${protocol_type}:${port_number} is in use by ${service_name:=<unknown>}";
|
||||||
esac
|
esac
|
||||||
done
|
done
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ip_command() {
|
||||||
|
# Obtain and log information from "ip XYZ show" commands
|
||||||
|
echo_current_diagnostic "${2}"
|
||||||
|
local entries=()
|
||||||
|
mapfile -t entries < <(ip "${1}" show)
|
||||||
|
for line in "${entries[@]}"; do
|
||||||
|
log_write " ${line}"
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
check_ip_command() {
|
||||||
|
ip_command "addr" "Network interfaces and addresses"
|
||||||
|
ip_command "route" "Network routing table"
|
||||||
|
}
|
||||||
|
|
||||||
check_networking() {
|
check_networking() {
|
||||||
# Runs through several of the functions made earlier; we just clump them
|
# Runs through several of the functions made earlier; we just clump them
|
||||||
# together since they are all related to the networking aspect of things
|
# together since they are all related to the networking aspect of things
|
||||||
@@ -806,7 +802,9 @@ check_networking() {
|
|||||||
detect_ip_addresses "6"
|
detect_ip_addresses "6"
|
||||||
ping_gateway "4"
|
ping_gateway "4"
|
||||||
ping_gateway "6"
|
ping_gateway "6"
|
||||||
check_required_ports
|
# Skip the following check if installed in docker container. Unpriv'ed containers do not have access to the information required
|
||||||
|
# to resolve the service name listening - and the container should not start if there was a port conflict anyway
|
||||||
|
[ -z "${PIHOLE_DOCKER_TAG}" ] && check_required_ports
|
||||||
}
|
}
|
||||||
|
|
||||||
check_x_headers() {
|
check_x_headers() {
|
||||||
@@ -922,16 +920,20 @@ dig_at() {
|
|||||||
# s/\/.*$//g;
|
# s/\/.*$//g;
|
||||||
# Removes CIDR and everything thereafter (e.g., scope properties)
|
# Removes CIDR and everything thereafter (e.g., scope properties)
|
||||||
addresses="$(ip address show dev "${iface}" | sed "/${sed_selector} /!d;s/^.*${sed_selector} //g;s/\/.*$//g;")"
|
addresses="$(ip address show dev "${iface}" | sed "/${sed_selector} /!d;s/^.*${sed_selector} //g;s/\/.*$//g;")"
|
||||||
while IFS= read -r local_address ; do
|
if [ -n "${addresses}" ]; then
|
||||||
# Check if Pi-hole can use itself to block a domain
|
while IFS= read -r local_address ; do
|
||||||
if local_dig=$(dig +tries=1 +time=2 -"${protocol}" "${random_url}" @"${local_address}" +short "${record_type}"); then
|
# Check if Pi-hole can use itself to block a domain
|
||||||
# If it can, show success
|
if local_dig=$(dig +tries=1 +time=2 -"${protocol}" "${random_url}" @"${local_address}" +short "${record_type}"); then
|
||||||
log_write "${TICK} ${random_url} ${COL_GREEN}is ${local_dig}${COL_NC} on ${COL_CYAN}${iface}${COL_NC} (${COL_CYAN}${local_address}${COL_NC})"
|
# If it can, show success
|
||||||
else
|
log_write "${TICK} ${random_url} ${COL_GREEN}is ${local_dig}${COL_NC} on ${COL_CYAN}${iface}${COL_NC} (${COL_CYAN}${local_address}${COL_NC})"
|
||||||
# Otherwise, show a failure
|
else
|
||||||
log_write "${CROSS} ${COL_RED}Failed to resolve${COL_NC} ${random_url} on ${COL_RED}${iface}${COL_NC} (${COL_RED}${local_address}${COL_NC})"
|
# Otherwise, show a failure
|
||||||
fi
|
log_write "${CROSS} ${COL_RED}Failed to resolve${COL_NC} ${random_url} on ${COL_RED}${iface}${COL_NC} (${COL_RED}${local_address}${COL_NC})"
|
||||||
done <<< "${addresses}"
|
fi
|
||||||
|
done <<< "${addresses}"
|
||||||
|
else
|
||||||
|
log_write "${TICK} No IPv${protocol} address available on ${COL_CYAN}${iface}${COL_NC}"
|
||||||
|
fi
|
||||||
done <<< "${interfaces}"
|
done <<< "${interfaces}"
|
||||||
|
|
||||||
# Finally, we need to make sure legitimate queries can out to the Internet using an external, public DNS server
|
# Finally, we need to make sure legitimate queries can out to the Internet using an external, public DNS server
|
||||||
@@ -1109,13 +1111,17 @@ list_files_in_dir() {
|
|||||||
:
|
:
|
||||||
elif [[ "${dir_to_parse}" == "${SHM_DIRECTORY}" ]]; then
|
elif [[ "${dir_to_parse}" == "${SHM_DIRECTORY}" ]]; then
|
||||||
# SHM file - we do not want to see the content, but we want to see the files and their sizes
|
# SHM file - we do not want to see the content, but we want to see the files and their sizes
|
||||||
log_write "$(ls -ld "${dir_to_parse}"/"${each_file}")"
|
log_write "$(ls -lhd "${dir_to_parse}"/"${each_file}")"
|
||||||
|
elif [[ "${dir_to_parse}" == "${DNSMASQ_D_DIRECTORY}" ]]; then
|
||||||
|
# in case of the dnsmasq directory inlcuede all files in the debug output
|
||||||
|
log_write "\\n${COL_GREEN}$(ls -lhd "${dir_to_parse}"/"${each_file}")${COL_NC}"
|
||||||
|
make_array_from_file "${dir_to_parse}/${each_file}"
|
||||||
else
|
else
|
||||||
# Then, parse the file's content into an array so each line can be analyzed if need be
|
# Then, parse the file's content into an array so each line can be analyzed if need be
|
||||||
for i in "${!REQUIRED_FILES[@]}"; do
|
for i in "${!REQUIRED_FILES[@]}"; do
|
||||||
if [[ "${dir_to_parse}/${each_file}" == "${REQUIRED_FILES[$i]}" ]]; then
|
if [[ "${dir_to_parse}/${each_file}" == "${REQUIRED_FILES[$i]}" ]]; then
|
||||||
# display the filename
|
# display the filename
|
||||||
log_write "\\n${COL_GREEN}$(ls -ld "${dir_to_parse}"/"${each_file}")${COL_NC}"
|
log_write "\\n${COL_GREEN}$(ls -lhd "${dir_to_parse}"/"${each_file}")${COL_NC}"
|
||||||
# Check if the file we want to view has a limit (because sometimes we just need a little bit of info from the file, not the entire thing)
|
# Check if the file we want to view has a limit (because sometimes we just need a little bit of info from the file, not the entire thing)
|
||||||
case "${dir_to_parse}/${each_file}" in
|
case "${dir_to_parse}/${each_file}" in
|
||||||
# If it's Web server error log, give the first and last 25 lines
|
# If it's Web server error log, give the first and last 25 lines
|
||||||
@@ -1154,6 +1160,7 @@ show_content_of_pihole_files() {
|
|||||||
show_content_of_files_in_dir "${WEB_SERVER_LOG_DIRECTORY}"
|
show_content_of_files_in_dir "${WEB_SERVER_LOG_DIRECTORY}"
|
||||||
show_content_of_files_in_dir "${LOG_DIRECTORY}"
|
show_content_of_files_in_dir "${LOG_DIRECTORY}"
|
||||||
show_content_of_files_in_dir "${SHM_DIRECTORY}"
|
show_content_of_files_in_dir "${SHM_DIRECTORY}"
|
||||||
|
show_content_of_files_in_dir "${ETC}"
|
||||||
}
|
}
|
||||||
|
|
||||||
head_tail_log() {
|
head_tail_log() {
|
||||||
@@ -1254,11 +1261,11 @@ show_groups() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
show_adlists() {
|
show_adlists() {
|
||||||
show_db_entries "Adlists" "SELECT id,CASE enabled WHEN '0' THEN ' 0' WHEN '1' THEN ' 1' ELSE enabled END enabled,GROUP_CONCAT(adlist_by_group.group_id) group_ids,address,datetime(date_added,'unixepoch','localtime') date_added,datetime(date_modified,'unixepoch','localtime') date_modified,comment FROM adlist LEFT JOIN adlist_by_group ON adlist.id = adlist_by_group.adlist_id GROUP BY id;" "4 7 12 100 19 19 50"
|
show_db_entries "Adlists" "SELECT id,CASE enabled WHEN '0' THEN ' 0' WHEN '1' THEN ' 1' ELSE enabled END enabled,GROUP_CONCAT(adlist_by_group.group_id) group_ids,address,datetime(date_added,'unixepoch','localtime') date_added,datetime(date_modified,'unixepoch','localtime') date_modified,comment FROM adlist LEFT JOIN adlist_by_group ON adlist.id = adlist_by_group.adlist_id GROUP BY id;" "5 7 12 100 19 19 50"
|
||||||
}
|
}
|
||||||
|
|
||||||
show_domainlist() {
|
show_domainlist() {
|
||||||
show_db_entries "Domainlist (0/1 = exact white-/blacklist, 2/3 = regex white-/blacklist)" "SELECT id,CASE type WHEN '0' THEN '0 ' WHEN '1' THEN ' 1 ' WHEN '2' THEN ' 2 ' WHEN '3' THEN ' 3' ELSE type END type,CASE enabled WHEN '0' THEN ' 0' WHEN '1' THEN ' 1' ELSE enabled END enabled,GROUP_CONCAT(domainlist_by_group.group_id) group_ids,domain,datetime(date_added,'unixepoch','localtime') date_added,datetime(date_modified,'unixepoch','localtime') date_modified,comment FROM domainlist LEFT JOIN domainlist_by_group ON domainlist.id = domainlist_by_group.domainlist_id GROUP BY id;" "4 4 7 12 100 19 19 50"
|
show_db_entries "Domainlist (0/1 = exact white-/blacklist, 2/3 = regex white-/blacklist)" "SELECT id,CASE type WHEN '0' THEN '0 ' WHEN '1' THEN ' 1 ' WHEN '2' THEN ' 2 ' WHEN '3' THEN ' 3' ELSE type END type,CASE enabled WHEN '0' THEN ' 0' WHEN '1' THEN ' 1' ELSE enabled END enabled,GROUP_CONCAT(domainlist_by_group.group_id) group_ids,domain,datetime(date_added,'unixepoch','localtime') date_added,datetime(date_modified,'unixepoch','localtime') date_modified,comment FROM domainlist LEFT JOIN domainlist_by_group ON domainlist.id = domainlist_by_group.domainlist_id GROUP BY id;" "5 4 7 12 100 19 19 50"
|
||||||
}
|
}
|
||||||
|
|
||||||
show_clients() {
|
show_clients() {
|
||||||
@@ -1270,10 +1277,10 @@ show_messages() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
analyze_gravity_list() {
|
analyze_gravity_list() {
|
||||||
echo_current_diagnostic "Gravity List and Database"
|
echo_current_diagnostic "Gravity Database"
|
||||||
|
|
||||||
local gravity_permissions
|
local gravity_permissions
|
||||||
gravity_permissions=$(ls -ld "${PIHOLE_GRAVITY_DB_FILE}")
|
gravity_permissions=$(ls -lhd "${PIHOLE_GRAVITY_DB_FILE}")
|
||||||
log_write "${COL_GREEN}${gravity_permissions}${COL_NC}"
|
log_write "${COL_GREEN}${gravity_permissions}${COL_NC}"
|
||||||
|
|
||||||
show_db_entries "Info table" "SELECT property,value FROM info" "20 40"
|
show_db_entries "Info table" "SELECT property,value FROM info" "20 40"
|
||||||
@@ -1352,7 +1359,7 @@ analyze_pihole_log() {
|
|||||||
OLD_IFS="$IFS"
|
OLD_IFS="$IFS"
|
||||||
# Get the lines that are in the file(s) and store them in an array for parsing later
|
# Get the lines that are in the file(s) and store them in an array for parsing later
|
||||||
IFS=$'\r\n'
|
IFS=$'\r\n'
|
||||||
pihole_log_permissions=$(ls -ld "${PIHOLE_LOG}")
|
pihole_log_permissions=$(ls -lhd "${PIHOLE_LOG}")
|
||||||
log_write "${COL_GREEN}${pihole_log_permissions}${COL_NC}"
|
log_write "${COL_GREEN}${pihole_log_permissions}${COL_NC}"
|
||||||
mapfile -t pihole_log_head < <(head -n 20 ${PIHOLE_LOG})
|
mapfile -t pihole_log_head < <(head -n 20 ${PIHOLE_LOG})
|
||||||
log_write " ${COL_CYAN}-----head of $(basename ${PIHOLE_LOG})------${COL_NC}"
|
log_write " ${COL_CYAN}-----head of $(basename ${PIHOLE_LOG})------${COL_NC}"
|
||||||
@@ -1366,25 +1373,18 @@ analyze_pihole_log() {
|
|||||||
IFS="$OLD_IFS"
|
IFS="$OLD_IFS"
|
||||||
}
|
}
|
||||||
|
|
||||||
tricorder_use_nc_or_curl() {
|
curl_to_tricorder() {
|
||||||
# Users can submit their debug logs using nc (unencrypted) or curl (encrypted) if available
|
# Users can submit their debug logs using curl (encrypted)
|
||||||
# Check for curl first since encryption is a good thing
|
log_write " * Using ${COL_GREEN}curl${COL_NC} for transmission."
|
||||||
if command -v curl &> /dev/null; then
|
# transmit the log via TLS and store the token returned in a variable
|
||||||
# If the command exists,
|
tricorder_token=$(curl --silent --fail --show-error --upload-file ${PIHOLE_DEBUG_LOG} https://tricorder.pi-hole.net 2>&1)
|
||||||
log_write " * Using ${COL_GREEN}curl${COL_NC} for transmission."
|
if [[ "${tricorder_token}" != "https://tricorder.pi-hole.net/"* ]]; then
|
||||||
# transmit he log via TLS and store the token returned in a variable
|
log_write " * ${COL_GREEN}curl${COL_NC} failed, contact Pi-hole support for assistance."
|
||||||
tricorder_token=$(curl --silent --upload-file ${PIHOLE_DEBUG_LOG} https://tricorder.pi-hole.net:${TRICORDER_SSL_PORT_NUMBER})
|
# Log curl error (if available)
|
||||||
if [ -z "${tricorder_token}" ]; then
|
if [ -n "${tricorder_token}" ]; then
|
||||||
# curl failed, fallback to nc
|
log_write " * Error message: ${COL_RED}${tricorder_token}${COL_NC}\\n"
|
||||||
log_write " * ${COL_GREEN}curl${COL_NC} failed, falling back to ${COL_YELLOW}netcat${COL_NC} for transmission."
|
tricorder_token=""
|
||||||
tricorder_token=$(< ${PIHOLE_DEBUG_LOG} nc tricorder.pi-hole.net ${TRICORDER_NC_PORT_NUMBER})
|
|
||||||
fi
|
fi
|
||||||
# Otherwise,
|
|
||||||
else
|
|
||||||
# use net cat
|
|
||||||
log_write "${INFO} Using ${COL_YELLOW}netcat${COL_NC} for transmission."
|
|
||||||
# Save the token returned by our server in a variable
|
|
||||||
tricorder_token=$(< ${PIHOLE_DEBUG_LOG} nc tricorder.pi-hole.net ${TRICORDER_NC_PORT_NUMBER})
|
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1402,27 +1402,29 @@ upload_to_tricorder() {
|
|||||||
log_write "${TICK} ${COL_GREEN}** FINISHED DEBUGGING! **${COL_NC}\\n"
|
log_write "${TICK} ${COL_GREEN}** FINISHED DEBUGGING! **${COL_NC}\\n"
|
||||||
|
|
||||||
# Provide information on what they should do with their token
|
# Provide information on what they should do with their token
|
||||||
log_write " * The debug log can be uploaded to tricorder.pi-hole.net for sharing with developers only."
|
log_write " * The debug log can be uploaded to tricorder.pi-hole.net for sharing with developers only."
|
||||||
log_write " * For more information, see: ${TRICORDER_CONTEST}"
|
|
||||||
log_write " * If available, we'll use openssl to upload the log, otherwise it will fall back to netcat."
|
# If pihole -d is running automatically
|
||||||
# If pihole -d is running automatically (usually through the dashboard)
|
|
||||||
if [[ "${AUTOMATED}" ]]; then
|
if [[ "${AUTOMATED}" ]]; then
|
||||||
# let the user know
|
# let the user know
|
||||||
log_write "${INFO} Debug script running in automated mode"
|
log_write "${INFO} Debug script running in automated mode"
|
||||||
# and then decide again which tool to use to submit it
|
# and then decide again which tool to use to submit it
|
||||||
tricorder_use_nc_or_curl
|
curl_to_tricorder
|
||||||
# If we're not running in automated mode,
|
# If we're not running in automated mode,
|
||||||
else
|
else
|
||||||
echo ""
|
# if not being called from the web interface
|
||||||
# give the user a choice of uploading it or not
|
if [[ ! "${WEBCALL}" ]]; then
|
||||||
# Users can review the log file locally (or the output of the script since they are the same) and try to self-diagnose their problem
|
echo ""
|
||||||
read -r -p "[?] Would you like to upload the log? [y/N] " response
|
# give the user a choice of uploading it or not
|
||||||
case ${response} in
|
# Users can review the log file locally (or the output of the script since they are the same) and try to self-diagnose their problem
|
||||||
# If they say yes, run our function for uploading the log
|
read -r -p "[?] Would you like to upload the log? [y/N] " response
|
||||||
[yY][eE][sS]|[yY]) tricorder_use_nc_or_curl;;
|
case ${response} in
|
||||||
# If they choose no, just exit out of the script
|
# If they say yes, run our function for uploading the log
|
||||||
*) log_write " * Log will ${COL_GREEN}NOT${COL_NC} be uploaded to tricorder.\\n * A local copy of the debug log can be found at: ${COL_CYAN}${PIHOLE_DEBUG_LOG}${COL_NC}\\n";exit;
|
[yY][eE][sS]|[yY]) curl_to_tricorder;;
|
||||||
esac
|
# If they choose no, just exit out of the script
|
||||||
|
*) log_write " * Log will ${COL_GREEN}NOT${COL_NC} be uploaded to tricorder.\\n * A local copy of the debug log can be found at: ${COL_CYAN}${PIHOLE_DEBUG_LOG}${COL_NC}\\n";exit;
|
||||||
|
esac
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
# Check if tricorder.pi-hole.net is reachable and provide token
|
# Check if tricorder.pi-hole.net is reachable and provide token
|
||||||
# along with some additional useful information
|
# along with some additional useful information
|
||||||
@@ -1430,20 +1432,25 @@ upload_to_tricorder() {
|
|||||||
# Again, try to make this visually striking so the user realizes they need to do something with this information
|
# Again, try to make this visually striking so the user realizes they need to do something with this information
|
||||||
# Namely, provide the Pi-hole devs with the token
|
# Namely, provide the Pi-hole devs with the token
|
||||||
log_write ""
|
log_write ""
|
||||||
log_write "${COL_PURPLE}***********************************${COL_NC}"
|
log_write "${COL_PURPLE}*****************************************************************${COL_NC}"
|
||||||
log_write "${COL_PURPLE}***********************************${COL_NC}"
|
log_write "${COL_PURPLE}*****************************************************************${COL_NC}\\n"
|
||||||
log_write "${TICK} Your debug token is: ${COL_GREEN}${tricorder_token}${COL_NC}"
|
log_write "${TICK} Your debug token is: ${COL_GREEN}${tricorder_token}${COL_NC}"
|
||||||
log_write "${COL_PURPLE}***********************************${COL_NC}"
|
log_write "${INFO}${COL_RED} Logs are deleted 48 hours after upload.${COL_NC}\\n"
|
||||||
log_write "${COL_PURPLE}***********************************${COL_NC}"
|
log_write "${COL_PURPLE}*****************************************************************${COL_NC}"
|
||||||
|
log_write "${COL_PURPLE}*****************************************************************${COL_NC}"
|
||||||
log_write ""
|
log_write ""
|
||||||
log_write " * Provide the token above to the Pi-hole team for assistance at"
|
log_write " * Provide the token above to the Pi-hole team for assistance at ${FORUMS_URL}"
|
||||||
log_write " * ${FORUMS_URL}"
|
|
||||||
log_write " * Your log will self-destruct on our server after ${COL_RED}48 hours${COL_NC}."
|
|
||||||
# If no token was generated
|
# If no token was generated
|
||||||
else
|
else
|
||||||
# Show an error and some help instructions
|
# Show an error and some help instructions
|
||||||
log_write "${CROSS} ${COL_RED}There was an error uploading your debug log.${COL_NC}"
|
# Skip this if being called from web interface and autmatic mode was not chosen (users opt-out to upload)
|
||||||
log_write " * Please try again or contact the Pi-hole team for assistance."
|
if [[ "${WEBCALL}" ]] && [[ ! "${AUTOMATED}" ]]; then
|
||||||
|
:
|
||||||
|
else
|
||||||
|
log_write "${CROSS} ${COL_RED}There was an error uploading your debug log.${COL_NC}"
|
||||||
|
log_write " * Please try again or contact the Pi-hole team for assistance."
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
# Finally, show where the log file is no matter the outcome of the function so users can look at it
|
# Finally, show where the log file is no matter the outcome of the function so users can look at it
|
||||||
log_write " * A local copy of the debug log can be found at: ${COL_CYAN}${PIHOLE_DEBUG_LOG}${COL_NC}\\n"
|
log_write " * A local copy of the debug log can be found at: ${COL_CYAN}${PIHOLE_DEBUG_LOG}${COL_NC}\\n"
|
||||||
@@ -1461,6 +1468,8 @@ diagnose_operating_system
|
|||||||
check_selinux
|
check_selinux
|
||||||
check_firewalld
|
check_firewalld
|
||||||
processor_check
|
processor_check
|
||||||
|
disk_usage
|
||||||
|
check_ip_command
|
||||||
check_networking
|
check_networking
|
||||||
check_name_resolution
|
check_name_resolution
|
||||||
check_dhcp_servers
|
check_dhcp_servers
|
||||||
|
@@ -11,6 +11,11 @@
|
|||||||
colfile="/opt/pihole/COL_TABLE"
|
colfile="/opt/pihole/COL_TABLE"
|
||||||
source ${colfile}
|
source ${colfile}
|
||||||
|
|
||||||
|
# In case we're running at the same time as a system logrotate, use a
|
||||||
|
# separate logrotate state file to prevent stepping on each other's
|
||||||
|
# toes.
|
||||||
|
STATEFILE="/var/lib/logrotate/pihole"
|
||||||
|
|
||||||
# Determine database location
|
# Determine database location
|
||||||
# Obtain DBFILE=... setting from pihole-FTL.db
|
# Obtain DBFILE=... setting from pihole-FTL.db
|
||||||
# Constructed to return nothing when
|
# Constructed to return nothing when
|
||||||
@@ -32,7 +37,7 @@ if [[ "$@" == *"once"* ]]; then
|
|||||||
# Nightly logrotation
|
# Nightly logrotation
|
||||||
if command -v /usr/sbin/logrotate >/dev/null; then
|
if command -v /usr/sbin/logrotate >/dev/null; then
|
||||||
# Logrotate once
|
# Logrotate once
|
||||||
/usr/sbin/logrotate --force /etc/pihole/logrotate
|
/usr/sbin/logrotate --force --state "${STATEFILE}" /etc/pihole/logrotate
|
||||||
else
|
else
|
||||||
# Copy pihole.log over to pihole.log.1
|
# Copy pihole.log over to pihole.log.1
|
||||||
# and empty out pihole.log
|
# and empty out pihole.log
|
||||||
@@ -47,8 +52,8 @@ else
|
|||||||
# Manual flushing
|
# Manual flushing
|
||||||
if command -v /usr/sbin/logrotate >/dev/null; then
|
if command -v /usr/sbin/logrotate >/dev/null; then
|
||||||
# Logrotate twice to move all data out of sight of FTL
|
# Logrotate twice to move all data out of sight of FTL
|
||||||
/usr/sbin/logrotate --force /etc/pihole/logrotate; sleep 3
|
/usr/sbin/logrotate --force --state "${STATEFILE}" /etc/pihole/logrotate; sleep 3
|
||||||
/usr/sbin/logrotate --force /etc/pihole/logrotate
|
/usr/sbin/logrotate --force --state "${STATEFILE}" /etc/pihole/logrotate
|
||||||
else
|
else
|
||||||
# Flush both pihole.log and pihole.log.1 (if existing)
|
# Flush both pihole.log and pihole.log.1 (if existing)
|
||||||
echo " " > /var/log/pihole.log
|
echo " " > /var/log/pihole.log
|
||||||
|
@@ -21,7 +21,7 @@ matchType="match"
|
|||||||
# Source pihole-FTL from install script
|
# Source pihole-FTL from install script
|
||||||
pihole_FTL="${piholeDir}/pihole-FTL.conf"
|
pihole_FTL="${piholeDir}/pihole-FTL.conf"
|
||||||
if [[ -f "${pihole_FTL}" ]]; then
|
if [[ -f "${pihole_FTL}" ]]; then
|
||||||
source "${pihole_FTL}"
|
source "${pihole_FTL}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Set this only after sourcing pihole-FTL.conf as the gravity database path may
|
# Set this only after sourcing pihole-FTL.conf as the gravity database path may
|
||||||
@@ -48,7 +48,7 @@ scanList(){
|
|||||||
# Iterate through each regexp and check whether it matches the domainQuery
|
# Iterate through each regexp and check whether it matches the domainQuery
|
||||||
# If it does, print the matching regexp and continue looping
|
# If it does, print the matching regexp and continue looping
|
||||||
# Input 1 - regexps | Input 2 - domainQuery
|
# Input 1 - regexps | Input 2 - domainQuery
|
||||||
"regex" )
|
"regex" )
|
||||||
for list in ${lists}; do
|
for list in ${lists}; do
|
||||||
if [[ "${domain}" =~ ${list} ]]; then
|
if [[ "${domain}" =~ ${list} ]]; then
|
||||||
printf "%b\n" "${list}";
|
printf "%b\n" "${list}";
|
||||||
@@ -109,15 +109,15 @@ scanDatabaseTable() {
|
|||||||
# behavior. The "ESCAPE '\'" clause specifies that an underscore preceded by an '\' should be matched
|
# behavior. The "ESCAPE '\'" clause specifies that an underscore preceded by an '\' should be matched
|
||||||
# as a literal underscore character. We pretreat the $domain variable accordingly to escape underscores.
|
# as a literal underscore character. We pretreat the $domain variable accordingly to escape underscores.
|
||||||
if [[ "${table}" == "gravity" ]]; then
|
if [[ "${table}" == "gravity" ]]; then
|
||||||
case "${exact}" in
|
case "${exact}" in
|
||||||
"exact" ) querystr="SELECT gravity.domain,adlist.address,adlist.enabled FROM gravity LEFT JOIN adlist ON adlist.id = gravity.adlist_id WHERE domain = '${domain}'";;
|
"exact" ) querystr="SELECT gravity.domain,adlist.address,adlist.enabled FROM gravity LEFT JOIN adlist ON adlist.id = gravity.adlist_id WHERE domain = '${domain}'";;
|
||||||
* ) querystr="SELECT gravity.domain,adlist.address,adlist.enabled FROM gravity LEFT JOIN adlist ON adlist.id = gravity.adlist_id WHERE domain LIKE '%${domain//_/\\_}%' ESCAPE '\\'";;
|
* ) querystr="SELECT gravity.domain,adlist.address,adlist.enabled FROM gravity LEFT JOIN adlist ON adlist.id = gravity.adlist_id WHERE domain LIKE '%${domain//_/\\_}%' ESCAPE '\\'";;
|
||||||
esac
|
esac
|
||||||
else
|
else
|
||||||
case "${exact}" in
|
case "${exact}" in
|
||||||
"exact" ) querystr="SELECT domain,enabled FROM domainlist WHERE type = '${type}' AND domain = '${domain}'";;
|
"exact" ) querystr="SELECT domain,enabled FROM domainlist WHERE type = '${type}' AND domain = '${domain}'";;
|
||||||
* ) querystr="SELECT domain,enabled FROM domainlist WHERE type = '${type}' AND domain LIKE '%${domain//_/\\_}%' ESCAPE '\\'";;
|
* ) querystr="SELECT domain,enabled FROM domainlist WHERE type = '${type}' AND domain LIKE '%${domain//_/\\_}%' ESCAPE '\\'";;
|
||||||
esac
|
esac
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Send prepared query to gravity database
|
# Send prepared query to gravity database
|
||||||
@@ -128,8 +128,8 @@ scanDatabaseTable() {
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "${table}" == "gravity" ]]; then
|
if [[ "${table}" == "gravity" ]]; then
|
||||||
echo "${result}"
|
echo "${result}"
|
||||||
return
|
return
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Mark domain as having been white-/blacklist matched (global variable)
|
# Mark domain as having been white-/blacklist matched (global variable)
|
||||||
@@ -233,15 +233,15 @@ for result in "${results[@]}"; do
|
|||||||
adlistAddress="${extra/|*/}"
|
adlistAddress="${extra/|*/}"
|
||||||
extra="${extra#*|}"
|
extra="${extra#*|}"
|
||||||
if [[ "${extra}" == "0" ]]; then
|
if [[ "${extra}" == "0" ]]; then
|
||||||
extra="(disabled)"
|
extra=" (disabled)"
|
||||||
else
|
else
|
||||||
extra=""
|
extra=""
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ -n "${blockpage}" ]]; then
|
if [[ -n "${blockpage}" ]]; then
|
||||||
echo "0 ${adlistAddress}"
|
echo "0 ${adlistAddress}"
|
||||||
elif [[ -n "${exact}" ]]; then
|
elif [[ -n "${exact}" ]]; then
|
||||||
echo " - ${adlistAddress} ${extra}"
|
echo " - ${adlistAddress}${extra}"
|
||||||
else
|
else
|
||||||
if [[ ! "${adlistAddress}" == "${adlistAddress_prev:-}" ]]; then
|
if [[ ! "${adlistAddress}" == "${adlistAddress_prev:-}" ]]; then
|
||||||
count=""
|
count=""
|
||||||
@@ -256,7 +256,7 @@ for result in "${results[@]}"; do
|
|||||||
[[ "${count}" -gt "${max_count}" ]] && continue
|
[[ "${count}" -gt "${max_count}" ]] && continue
|
||||||
echo " ${COL_GRAY}Over ${count} results found, skipping rest of file${COL_NC}"
|
echo " ${COL_GRAY}Over ${count} results found, skipping rest of file${COL_NC}"
|
||||||
else
|
else
|
||||||
echo " ${match} ${extra}"
|
echo " ${match}${extra}"
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
@@ -35,25 +35,37 @@ source "/opt/pihole/COL_TABLE"
|
|||||||
|
|
||||||
GitCheckUpdateAvail() {
|
GitCheckUpdateAvail() {
|
||||||
local directory
|
local directory
|
||||||
|
local curBranch
|
||||||
directory="${1}"
|
directory="${1}"
|
||||||
curdir=$PWD
|
curdir=$PWD
|
||||||
cd "${directory}" || return
|
cd "${directory}" || return
|
||||||
|
|
||||||
# Fetch latest changes in this repo
|
# Fetch latest changes in this repo
|
||||||
git fetch --quiet origin
|
git fetch --tags --quiet origin
|
||||||
|
|
||||||
# @ alone is a shortcut for HEAD. Older versions of git
|
# Check current branch. If it is master, then check for the latest available tag instead of latest commit.
|
||||||
# need @{0}
|
curBranch=$(git rev-parse --abbrev-ref HEAD)
|
||||||
LOCAL="$(git rev-parse "@{0}")"
|
if [[ "${curBranch}" == "master" ]]; then
|
||||||
|
# get the latest local tag
|
||||||
|
LOCAL=$(git describe --abbrev=0 --tags master)
|
||||||
|
# get the latest tag from remote
|
||||||
|
REMOTE=$(git describe --abbrev=0 --tags origin/master)
|
||||||
|
|
||||||
|
else
|
||||||
|
# @ alone is a shortcut for HEAD. Older versions of git
|
||||||
|
# need @{0}
|
||||||
|
LOCAL="$(git rev-parse "@{0}")"
|
||||||
|
|
||||||
|
# The suffix @{upstream} to a branchname
|
||||||
|
# (short form <branchname>@{u}) refers
|
||||||
|
# to the branch that the branch specified
|
||||||
|
# by branchname is set to build on top of#
|
||||||
|
# (configured with branch.<name>.remote and
|
||||||
|
# branch.<name>.merge). A missing branchname
|
||||||
|
# defaults to the current one.
|
||||||
|
REMOTE="$(git rev-parse "@{upstream}")"
|
||||||
|
fi
|
||||||
|
|
||||||
# The suffix @{upstream} to a branchname
|
|
||||||
# (short form <branchname>@{u}) refers
|
|
||||||
# to the branch that the branch specified
|
|
||||||
# by branchname is set to build on top of#
|
|
||||||
# (configured with branch.<name>.remote and
|
|
||||||
# branch.<name>.merge). A missing branchname
|
|
||||||
# defaults to the current one.
|
|
||||||
REMOTE="$(git rev-parse "@{upstream}")"
|
|
||||||
|
|
||||||
if [[ "${#LOCAL}" == 0 ]]; then
|
if [[ "${#LOCAL}" == 0 ]]; then
|
||||||
echo -e "\\n ${COL_LIGHT_RED}Error: Local revision could not be obtained, please contact Pi-hole Support"
|
echo -e "\\n ${COL_LIGHT_RED}Error: Local revision could not be obtained, please contact Pi-hole Support"
|
||||||
@@ -95,6 +107,10 @@ main() {
|
|||||||
# shellcheck disable=1090,2154
|
# shellcheck disable=1090,2154
|
||||||
source "${setupVars}"
|
source "${setupVars}"
|
||||||
|
|
||||||
|
# Install packages used by this installation script (necessary if users have removed e.g. git from their systems)
|
||||||
|
package_manager_detect
|
||||||
|
install_dependent_packages "${INSTALLER_DEPS[@]}"
|
||||||
|
|
||||||
# This is unlikely
|
# This is unlikely
|
||||||
if ! is_repo "${PI_HOLE_FILES_DIR}" ; then
|
if ! is_repo "${PI_HOLE_FILES_DIR}" ; then
|
||||||
echo -e "\\n ${COL_LIGHT_RED}Error: Core Pi-hole repo is missing from system!"
|
echo -e "\\n ${COL_LIGHT_RED}Error: Core Pi-hole repo is missing from system!"
|
||||||
@@ -196,7 +212,7 @@ main() {
|
|||||||
|
|
||||||
if [[ "${FTL_update}" == true || "${core_update}" == true ]]; then
|
if [[ "${FTL_update}" == true || "${core_update}" == true ]]; then
|
||||||
${PI_HOLE_FILES_DIR}/automated\ install/basic-install.sh --reconfigure --unattended || \
|
${PI_HOLE_FILES_DIR}/automated\ install/basic-install.sh --reconfigure --unattended || \
|
||||||
echo -e "${basicError}" && exit 1
|
echo -e "${basicError}" && exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "${FTL_update}" == true || "${core_update}" == true || "${web_update}" == true ]]; then
|
if [[ "${FTL_update}" == true || "${core_update}" == true || "${web_update}" == true ]]; then
|
||||||
|
@@ -13,6 +13,10 @@ DEFAULT="-1"
|
|||||||
COREGITDIR="/etc/.pihole/"
|
COREGITDIR="/etc/.pihole/"
|
||||||
WEBGITDIR="/var/www/html/admin/"
|
WEBGITDIR="/var/www/html/admin/"
|
||||||
|
|
||||||
|
# Source the setupvars config file
|
||||||
|
# shellcheck disable=SC1091
|
||||||
|
source /etc/pihole/setupVars.conf
|
||||||
|
|
||||||
getLocalVersion() {
|
getLocalVersion() {
|
||||||
# FTL requires a different method
|
# FTL requires a different method
|
||||||
if [[ "$1" == "FTL" ]]; then
|
if [[ "$1" == "FTL" ]]; then
|
||||||
@@ -91,10 +95,11 @@ getRemoteVersion(){
|
|||||||
#If the above file exists, then we can read from that. Prevents overuse of GitHub API
|
#If the above file exists, then we can read from that. Prevents overuse of GitHub API
|
||||||
if [[ -f "$cachedVersions" ]]; then
|
if [[ -f "$cachedVersions" ]]; then
|
||||||
IFS=' ' read -r -a arrCache < "$cachedVersions"
|
IFS=' ' read -r -a arrCache < "$cachedVersions"
|
||||||
|
|
||||||
case $daemon in
|
case $daemon in
|
||||||
"pi-hole" ) echo "${arrCache[0]}";;
|
"pi-hole" ) echo "${arrCache[0]}";;
|
||||||
"AdminLTE" ) echo "${arrCache[1]}";;
|
"AdminLTE" ) [[ "${INSTALL_WEB_INTERFACE}" == true ]] && echo "${arrCache[1]}";;
|
||||||
"FTL" ) echo "${arrCache[2]}";;
|
"FTL" ) [[ "${INSTALL_WEB_INTERFACE}" == true ]] && echo "${arrCache[2]}" || echo "${arrCache[1]}";;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
return 0
|
return 0
|
||||||
@@ -117,7 +122,7 @@ getLocalBranch(){
|
|||||||
local directory="${1}"
|
local directory="${1}"
|
||||||
local branch
|
local branch
|
||||||
|
|
||||||
# Local FTL btranch is stored in /etc/pihole/ftlbranch
|
# Local FTL btranch is stored in /etc/pihole/ftlbranch
|
||||||
if [[ "$1" == "FTL" ]]; then
|
if [[ "$1" == "FTL" ]]; then
|
||||||
branch="$(pihole-FTL branch)"
|
branch="$(pihole-FTL branch)"
|
||||||
else
|
else
|
||||||
@@ -140,6 +145,11 @@ getLocalBranch(){
|
|||||||
}
|
}
|
||||||
|
|
||||||
versionOutput() {
|
versionOutput() {
|
||||||
|
if [[ "$1" == "AdminLTE" && "${INSTALL_WEB_INTERFACE}" != true ]]; then
|
||||||
|
echo " WebAdmin not installed"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
[[ "$1" == "pi-hole" ]] && GITDIR=$COREGITDIR
|
[[ "$1" == "pi-hole" ]] && GITDIR=$COREGITDIR
|
||||||
[[ "$1" == "AdminLTE" ]] && GITDIR=$WEBGITDIR
|
[[ "$1" == "AdminLTE" ]] && GITDIR=$WEBGITDIR
|
||||||
[[ "$1" == "FTL" ]] && GITDIR="FTL"
|
[[ "$1" == "FTL" ]] && GITDIR="FTL"
|
||||||
@@ -166,6 +176,7 @@ versionOutput() {
|
|||||||
output="Latest ${1^} hash is $latHash"
|
output="Latest ${1^} hash is $latHash"
|
||||||
else
|
else
|
||||||
errorOutput
|
errorOutput
|
||||||
|
return 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
[[ -n "$output" ]] && echo " $output"
|
[[ -n "$output" ]] && echo " $output"
|
||||||
@@ -177,10 +188,6 @@ errorOutput() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
defaultOutput() {
|
defaultOutput() {
|
||||||
# Source the setupvars config file
|
|
||||||
# shellcheck disable=SC1091
|
|
||||||
source /etc/pihole/setupVars.conf
|
|
||||||
|
|
||||||
versionOutput "pi-hole" "$@"
|
versionOutput "pi-hole" "$@"
|
||||||
|
|
||||||
if [[ "${INSTALL_WEB_INTERFACE}" == true ]]; then
|
if [[ "${INSTALL_WEB_INTERFACE}" == true ]]; then
|
||||||
|
@@ -44,7 +44,8 @@ Options:
|
|||||||
-e, email Set an administrative contact address for the Block Page
|
-e, email Set an administrative contact address for the Block Page
|
||||||
-h, --help Show this help dialog
|
-h, --help Show this help dialog
|
||||||
-i, interface Specify dnsmasq's interface listening behavior
|
-i, interface Specify dnsmasq's interface listening behavior
|
||||||
-l, privacylevel Set privacy level (0 = lowest, 3 = highest)"
|
-l, privacylevel Set privacy level (0 = lowest, 3 = highest)
|
||||||
|
-t, teleporter Backup configuration as an archive"
|
||||||
exit 0
|
exit 0
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -53,7 +54,7 @@ add_setting() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
delete_setting() {
|
delete_setting() {
|
||||||
sed -i "/${1}/d" "${setupVars}"
|
sed -i "/^${1}/d" "${setupVars}"
|
||||||
}
|
}
|
||||||
|
|
||||||
change_setting() {
|
change_setting() {
|
||||||
@@ -66,7 +67,7 @@ addFTLsetting() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
deleteFTLsetting() {
|
deleteFTLsetting() {
|
||||||
sed -i "/${1}/d" "${FTLconf}"
|
sed -i "/^${1}/d" "${FTLconf}"
|
||||||
}
|
}
|
||||||
|
|
||||||
changeFTLsetting() {
|
changeFTLsetting() {
|
||||||
@@ -83,7 +84,7 @@ add_dnsmasq_setting() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
delete_dnsmasq_setting() {
|
delete_dnsmasq_setting() {
|
||||||
sed -i "/${1}/d" "${dnsmasqconfig}"
|
sed -i "/^${1}/d" "${dnsmasqconfig}"
|
||||||
}
|
}
|
||||||
|
|
||||||
SetTemperatureUnit() {
|
SetTemperatureUnit() {
|
||||||
@@ -121,14 +122,14 @@ SetWebPassword() {
|
|||||||
read -s -r -p "Enter New Password (Blank for no password): " PASSWORD
|
read -s -r -p "Enter New Password (Blank for no password): " PASSWORD
|
||||||
echo ""
|
echo ""
|
||||||
|
|
||||||
if [ "${PASSWORD}" == "" ]; then
|
if [ "${PASSWORD}" == "" ]; then
|
||||||
change_setting "WEBPASSWORD" ""
|
change_setting "WEBPASSWORD" ""
|
||||||
echo -e " ${TICK} Password Removed"
|
echo -e " ${TICK} Password Removed"
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
read -s -r -p "Confirm Password: " CONFIRM
|
read -s -r -p "Confirm Password: " CONFIRM
|
||||||
echo ""
|
echo ""
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "${PASSWORD}" == "${CONFIRM}" ] ; then
|
if [ "${PASSWORD}" == "${CONFIRM}" ] ; then
|
||||||
@@ -198,6 +199,8 @@ trust-anchor=.,20326,8,2,E06D44B80B8F1D39A95C0B0D7C65D08458E880409BBC68345710423
|
|||||||
# Setup interface listening behavior of dnsmasq
|
# Setup interface listening behavior of dnsmasq
|
||||||
delete_dnsmasq_setting "interface"
|
delete_dnsmasq_setting "interface"
|
||||||
delete_dnsmasq_setting "local-service"
|
delete_dnsmasq_setting "local-service"
|
||||||
|
delete_dnsmasq_setting "except-interface"
|
||||||
|
delete_dnsmasq_setting "bind-interfaces"
|
||||||
|
|
||||||
if [[ "${DNSMASQ_LISTENING}" == "all" ]]; then
|
if [[ "${DNSMASQ_LISTENING}" == "all" ]]; then
|
||||||
# Listen on all interfaces, permit all origins
|
# Listen on all interfaces, permit all origins
|
||||||
@@ -206,6 +209,7 @@ trust-anchor=.,20326,8,2,E06D44B80B8F1D39A95C0B0D7C65D08458E880409BBC68345710423
|
|||||||
# Listen only on all interfaces, but only local subnets
|
# Listen only on all interfaces, but only local subnets
|
||||||
add_dnsmasq_setting "local-service"
|
add_dnsmasq_setting "local-service"
|
||||||
else
|
else
|
||||||
|
# Options "bind" and "single"
|
||||||
# Listen only on one interface
|
# Listen only on one interface
|
||||||
# Use eth0 as fallback interface if interface is missing in setupVars.conf
|
# Use eth0 as fallback interface if interface is missing in setupVars.conf
|
||||||
if [ -z "${PIHOLE_INTERFACE}" ]; then
|
if [ -z "${PIHOLE_INTERFACE}" ]; then
|
||||||
@@ -213,6 +217,11 @@ trust-anchor=.,20326,8,2,E06D44B80B8F1D39A95C0B0D7C65D08458E880409BBC68345710423
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
add_dnsmasq_setting "interface" "${PIHOLE_INTERFACE}"
|
add_dnsmasq_setting "interface" "${PIHOLE_INTERFACE}"
|
||||||
|
|
||||||
|
if [[ "${DNSMASQ_LISTENING}" == "bind" ]]; then
|
||||||
|
# Really bind to interface
|
||||||
|
add_dnsmasq_setting "bind-interfaces"
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "${CONDITIONAL_FORWARDING}" == true ]]; then
|
if [[ "${CONDITIONAL_FORWARDING}" == true ]]; then
|
||||||
@@ -246,8 +255,8 @@ trust-anchor=.,20326,8,2,E06D44B80B8F1D39A95C0B0D7C65D08458E880409BBC68345710423
|
|||||||
3 ) REV_SERVER_CIDR="${arrRev[0]}.0.0.0/8";;
|
3 ) REV_SERVER_CIDR="${arrRev[0]}.0.0.0/8";;
|
||||||
esac
|
esac
|
||||||
else
|
else
|
||||||
# Set REV_SERVER_CIDR to whatever value it was set to
|
# Set REV_SERVER_CIDR to whatever value it was set to
|
||||||
REV_SERVER_CIDR="${CONDITIONAL_FORWARDING_REVERSE}"
|
REV_SERVER_CIDR="${CONDITIONAL_FORWARDING_REVERSE}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# If REV_SERVER_CIDR is not converted by the above, then use the REV_SERVER_TARGET variable to derive it
|
# If REV_SERVER_CIDR is not converted by the above, then use the REV_SERVER_TARGET variable to derive it
|
||||||
@@ -266,11 +275,21 @@ trust-anchor=.,20326,8,2,E06D44B80B8F1D39A95C0B0D7C65D08458E880409BBC68345710423
|
|||||||
delete_setting "CONDITIONAL_FORWARDING_IP"
|
delete_setting "CONDITIONAL_FORWARDING_IP"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
delete_dnsmasq_setting "rev-server"
|
||||||
|
|
||||||
if [[ "${REV_SERVER}" == true ]]; then
|
if [[ "${REV_SERVER}" == true ]]; then
|
||||||
add_dnsmasq_setting "rev-server=${REV_SERVER_CIDR},${REV_SERVER_TARGET}"
|
add_dnsmasq_setting "rev-server=${REV_SERVER_CIDR},${REV_SERVER_TARGET}"
|
||||||
if [ -n "${REV_SERVER_DOMAIN}" ]; then
|
if [ -n "${REV_SERVER_DOMAIN}" ]; then
|
||||||
|
# Forward local domain names to the CF target, too
|
||||||
add_dnsmasq_setting "server=/${REV_SERVER_DOMAIN}/${REV_SERVER_TARGET}"
|
add_dnsmasq_setting "server=/${REV_SERVER_DOMAIN}/${REV_SERVER_TARGET}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [[ "${DNS_FQDN_REQUIRED}" != true ]]; then
|
||||||
|
# Forward unqualified names to the CF target only when the "never
|
||||||
|
# forward non-FQDN" option is unticked
|
||||||
|
add_dnsmasq_setting "server=//${REV_SERVER_TARGET}"
|
||||||
|
fi
|
||||||
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# We need to process DHCP settings here as well to account for possible
|
# We need to process DHCP settings here as well to account for possible
|
||||||
@@ -360,34 +379,34 @@ ProcessDHCPSettings() {
|
|||||||
source "${setupVars}"
|
source "${setupVars}"
|
||||||
|
|
||||||
if [[ "${DHCP_ACTIVE}" == "true" ]]; then
|
if [[ "${DHCP_ACTIVE}" == "true" ]]; then
|
||||||
interface="${PIHOLE_INTERFACE}"
|
interface="${PIHOLE_INTERFACE}"
|
||||||
|
|
||||||
# Use eth0 as fallback interface
|
# Use eth0 as fallback interface
|
||||||
if [ -z ${interface} ]; then
|
if [ -z ${interface} ]; then
|
||||||
interface="eth0"
|
interface="eth0"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "${PIHOLE_DOMAIN}" == "" ]]; then
|
if [[ "${PIHOLE_DOMAIN}" == "" ]]; then
|
||||||
PIHOLE_DOMAIN="lan"
|
PIHOLE_DOMAIN="lan"
|
||||||
change_setting "PIHOLE_DOMAIN" "${PIHOLE_DOMAIN}"
|
change_setting "PIHOLE_DOMAIN" "${PIHOLE_DOMAIN}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "${DHCP_LEASETIME}" == "0" ]]; then
|
if [[ "${DHCP_LEASETIME}" == "0" ]]; then
|
||||||
leasetime="infinite"
|
leasetime="infinite"
|
||||||
elif [[ "${DHCP_LEASETIME}" == "" ]]; then
|
elif [[ "${DHCP_LEASETIME}" == "" ]]; then
|
||||||
leasetime="24"
|
leasetime="24"
|
||||||
change_setting "DHCP_LEASETIME" "${leasetime}"
|
change_setting "DHCP_LEASETIME" "${leasetime}"
|
||||||
elif [[ "${DHCP_LEASETIME}" == "24h" ]]; then
|
elif [[ "${DHCP_LEASETIME}" == "24h" ]]; then
|
||||||
#Installation is affected by known bug, introduced in a previous version.
|
#Installation is affected by known bug, introduced in a previous version.
|
||||||
#This will automatically clean up setupVars.conf and remove the unnecessary "h"
|
#This will automatically clean up setupVars.conf and remove the unnecessary "h"
|
||||||
leasetime="24"
|
leasetime="24"
|
||||||
change_setting "DHCP_LEASETIME" "${leasetime}"
|
change_setting "DHCP_LEASETIME" "${leasetime}"
|
||||||
else
|
else
|
||||||
leasetime="${DHCP_LEASETIME}h"
|
leasetime="${DHCP_LEASETIME}h"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Write settings to file
|
# Write settings to file
|
||||||
echo "###############################################################################
|
echo "###############################################################################
|
||||||
# DHCP SERVER CONFIG FILE AUTOMATICALLY POPULATED BY PI-HOLE WEB INTERFACE. #
|
# DHCP SERVER CONFIG FILE AUTOMATICALLY POPULATED BY PI-HOLE WEB INTERFACE. #
|
||||||
# ANY CHANGES MADE TO THIS FILE WILL BE LOST ON CHANGE #
|
# ANY CHANGES MADE TO THIS FILE WILL BE LOST ON CHANGE #
|
||||||
###############################################################################
|
###############################################################################
|
||||||
@@ -397,34 +416,34 @@ dhcp-option=option:router,${DHCP_ROUTER}
|
|||||||
dhcp-leasefile=/etc/pihole/dhcp.leases
|
dhcp-leasefile=/etc/pihole/dhcp.leases
|
||||||
#quiet-dhcp
|
#quiet-dhcp
|
||||||
" > "${dhcpconfig}"
|
" > "${dhcpconfig}"
|
||||||
chmod 644 "${dhcpconfig}"
|
chmod 644 "${dhcpconfig}"
|
||||||
|
|
||||||
if [[ "${PIHOLE_DOMAIN}" != "none" ]]; then
|
if [[ "${PIHOLE_DOMAIN}" != "none" ]]; then
|
||||||
echo "domain=${PIHOLE_DOMAIN}" >> "${dhcpconfig}"
|
echo "domain=${PIHOLE_DOMAIN}" >> "${dhcpconfig}"
|
||||||
|
|
||||||
# When there is a Pi-hole domain set and "Never forward non-FQDNs" is
|
# When there is a Pi-hole domain set and "Never forward non-FQDNs" is
|
||||||
# ticked, we add `local=/domain/` to tell FTL that this domain is purely
|
# ticked, we add `local=/domain/` to tell FTL that this domain is purely
|
||||||
# local and FTL may answer queries from /etc/hosts or DHCP but should
|
# local and FTL may answer queries from /etc/hosts or DHCP but should
|
||||||
# never forward queries on that domain to any upstream servers
|
# never forward queries on that domain to any upstream servers
|
||||||
if [[ "${DNS_FQDN_REQUIRED}" == true ]]; then
|
if [[ "${DNS_FQDN_REQUIRED}" == true ]]; then
|
||||||
echo "local=/${PIHOLE_DOMAIN}/" >> "${dhcpconfig}"
|
echo "local=/${PIHOLE_DOMAIN}/" >> "${dhcpconfig}"
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
fi
|
|
||||||
|
|
||||||
# Sourced from setupVars
|
# Sourced from setupVars
|
||||||
# shellcheck disable=SC2154
|
# shellcheck disable=SC2154
|
||||||
if [[ "${DHCP_rapid_commit}" == "true" ]]; then
|
if [[ "${DHCP_rapid_commit}" == "true" ]]; then
|
||||||
echo "dhcp-rapid-commit" >> "${dhcpconfig}"
|
echo "dhcp-rapid-commit" >> "${dhcpconfig}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "${DHCP_IPv6}" == "true" ]]; then
|
if [[ "${DHCP_IPv6}" == "true" ]]; then
|
||||||
echo "#quiet-dhcp6
|
echo "#quiet-dhcp6
|
||||||
#enable-ra
|
#enable-ra
|
||||||
dhcp-option=option6:dns-server,[::]
|
dhcp-option=option6:dns-server,[::]
|
||||||
dhcp-range=::100,::1ff,constructor:${interface},ra-names,slaac,64,3600
|
dhcp-range=::100,::1ff,constructor:${interface},ra-names,slaac,64,3600
|
||||||
ra-param=*,0,0
|
ra-param=*,0,0
|
||||||
" >> "${dhcpconfig}"
|
" >> "${dhcpconfig}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
else
|
else
|
||||||
if [[ -f "${dhcpconfig}" ]]; then
|
if [[ -f "${dhcpconfig}" ]]; then
|
||||||
@@ -521,25 +540,6 @@ CustomizeAdLists() {
|
|||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
SetPrivacyMode() {
|
|
||||||
if [[ "${args[2]}" == "true" ]]; then
|
|
||||||
change_setting "API_PRIVACY_MODE" "true"
|
|
||||||
else
|
|
||||||
change_setting "API_PRIVACY_MODE" "false"
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
ResolutionSettings() {
|
|
||||||
typ="${args[2]}"
|
|
||||||
state="${args[3]}"
|
|
||||||
|
|
||||||
if [[ "${typ}" == "forward" ]]; then
|
|
||||||
change_setting "API_GET_UPSTREAM_DNS_HOSTNAME" "${state}"
|
|
||||||
elif [[ "${typ}" == "clients" ]]; then
|
|
||||||
change_setting "API_GET_CLIENT_HOSTNAME" "${state}"
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
AddDHCPStaticAddress() {
|
AddDHCPStaticAddress() {
|
||||||
mac="${args[2]}"
|
mac="${args[2]}"
|
||||||
ip="${args[3]}"
|
ip="${args[3]}"
|
||||||
@@ -608,12 +608,13 @@ Example: 'pihole -a -i local'
|
|||||||
Specify dnsmasq's network interface listening behavior
|
Specify dnsmasq's network interface listening behavior
|
||||||
|
|
||||||
Interfaces:
|
Interfaces:
|
||||||
local Listen on all interfaces, but only allow queries from
|
local Only respond to queries from devices that
|
||||||
devices that are at most one hop away (local devices)
|
are at most one hop away (local devices)
|
||||||
single Listen only on ${PIHOLE_INTERFACE} interface
|
single Respond only on interface ${PIHOLE_INTERFACE}
|
||||||
|
bind Bind only on interface ${PIHOLE_INTERFACE}
|
||||||
all Listen on all interfaces, permit all origins"
|
all Listen on all interfaces, permit all origins"
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "${args[2]}" == "all" ]]; then
|
if [[ "${args[2]}" == "all" ]]; then
|
||||||
echo -e " ${INFO} Listening on all interfaces, permitting all origins. Please use a firewall!"
|
echo -e " ${INFO} Listening on all interfaces, permitting all origins. Please use a firewall!"
|
||||||
@@ -621,6 +622,9 @@ Interfaces:
|
|||||||
elif [[ "${args[2]}" == "local" ]]; then
|
elif [[ "${args[2]}" == "local" ]]; then
|
||||||
echo -e " ${INFO} Listening on all interfaces, permitting origins from one hop away (LAN)"
|
echo -e " ${INFO} Listening on all interfaces, permitting origins from one hop away (LAN)"
|
||||||
change_setting "DNSMASQ_LISTENING" "local"
|
change_setting "DNSMASQ_LISTENING" "local"
|
||||||
|
elif [[ "${args[2]}" == "bind" ]]; then
|
||||||
|
echo -e " ${INFO} Binding on interface ${PIHOLE_INTERFACE}"
|
||||||
|
change_setting "DNSMASQ_LISTENING" "bind"
|
||||||
else
|
else
|
||||||
echo -e " ${INFO} Listening only on interface ${PIHOLE_INTERFACE}"
|
echo -e " ${INFO} Listening only on interface ${PIHOLE_INTERFACE}"
|
||||||
change_setting "DNSMASQ_LISTENING" "single"
|
change_setting "DNSMASQ_LISTENING" "single"
|
||||||
@@ -662,18 +666,18 @@ addAudit()
|
|||||||
domains=""
|
domains=""
|
||||||
for domain in "$@"
|
for domain in "$@"
|
||||||
do
|
do
|
||||||
# Check domain to be added. Only continue if it is valid
|
# Check domain to be added. Only continue if it is valid
|
||||||
validDomain="$(checkDomain "${domain}")"
|
validDomain="$(checkDomain "${domain}")"
|
||||||
if [[ -n "${validDomain}" ]]; then
|
if [[ -n "${validDomain}" ]]; then
|
||||||
# Put comma in between domains when there is
|
# Put comma in between domains when there is
|
||||||
# more than one domains to be added
|
# more than one domains to be added
|
||||||
# SQL INSERT allows adding multiple rows at once using the format
|
# SQL INSERT allows adding multiple rows at once using the format
|
||||||
## INSERT INTO table (domain) VALUES ('abc.de'),('fgh.ij'),('klm.no'),('pqr.st');
|
## INSERT INTO table (domain) VALUES ('abc.de'),('fgh.ij'),('klm.no'),('pqr.st');
|
||||||
if [[ -n "${domains}" ]]; then
|
if [[ -n "${domains}" ]]; then
|
||||||
domains="${domains},"
|
domains="${domains},"
|
||||||
|
fi
|
||||||
|
domains="${domains}('${domain}')"
|
||||||
fi
|
fi
|
||||||
domains="${domains}('${domain}')"
|
|
||||||
fi
|
|
||||||
done
|
done
|
||||||
# Insert only the domain here. The date_added field will be
|
# Insert only the domain here. The date_added field will be
|
||||||
# filled with its default value (date_added = current timestamp)
|
# filled with its default value (date_added = current timestamp)
|
||||||
@@ -698,10 +702,25 @@ AddCustomDNSAddress() {
|
|||||||
|
|
||||||
ip="${args[2]}"
|
ip="${args[2]}"
|
||||||
host="${args[3]}"
|
host="${args[3]}"
|
||||||
echo "${ip} ${host}" >> "${dnscustomfile}"
|
reload="${args[4]}"
|
||||||
|
|
||||||
# Restart dnsmasq to load new custom DNS entries
|
validHost="$(checkDomain "${host}")"
|
||||||
RestartDNS
|
if [[ -n "${validHost}" ]]; then
|
||||||
|
if valid_ip "${ip}" || valid_ip6 "${ip}" ; then
|
||||||
|
echo "${ip} ${validHost}" >> "${dnscustomfile}"
|
||||||
|
else
|
||||||
|
echo -e " ${CROSS} Invalid IP has been passed"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo " ${CROSS} Invalid Domain passed!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Restart dnsmasq to load new custom DNS entries only if $reload not false
|
||||||
|
if [[ ! $reload == "false" ]]; then
|
||||||
|
RestartDNS
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
RemoveCustomDNSAddress() {
|
RemoveCustomDNSAddress() {
|
||||||
@@ -709,16 +728,25 @@ RemoveCustomDNSAddress() {
|
|||||||
|
|
||||||
ip="${args[2]}"
|
ip="${args[2]}"
|
||||||
host="${args[3]}"
|
host="${args[3]}"
|
||||||
|
reload="${args[4]}"
|
||||||
|
|
||||||
if valid_ip "${ip}" || valid_ip6 "${ip}" ; then
|
validHost="$(checkDomain "${host}")"
|
||||||
sed -i "/^${ip} ${host}$/d" "${dnscustomfile}"
|
if [[ -n "${validHost}" ]]; then
|
||||||
|
if valid_ip "${ip}" || valid_ip6 "${ip}" ; then
|
||||||
|
sed -i "/^${ip} ${validHost}$/d" "${dnscustomfile}"
|
||||||
|
else
|
||||||
|
echo -e " ${CROSS} Invalid IP has been passed"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
else
|
else
|
||||||
echo -e " ${CROSS} Invalid IP has been passed"
|
echo " ${CROSS} Invalid Domain passed!"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Restart dnsmasq to update removed custom DNS entries
|
# Restart dnsmasq to load new custom DNS entries only if reload is not false
|
||||||
RestartDNS
|
if [[ ! $reload == "false" ]]; then
|
||||||
|
RestartDNS
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
AddCustomCNAMERecord() {
|
AddCustomCNAMERecord() {
|
||||||
@@ -726,11 +754,25 @@ AddCustomCNAMERecord() {
|
|||||||
|
|
||||||
domain="${args[2]}"
|
domain="${args[2]}"
|
||||||
target="${args[3]}"
|
target="${args[3]}"
|
||||||
|
reload="${args[4]}"
|
||||||
|
|
||||||
echo "cname=${domain},${target}" >> "${dnscustomcnamefile}"
|
validDomain="$(checkDomain "${domain}")"
|
||||||
|
if [[ -n "${validDomain}" ]]; then
|
||||||
# Restart dnsmasq to load new custom CNAME records
|
validTarget="$(checkDomain "${target}")"
|
||||||
RestartDNS
|
if [[ -n "${validTarget}" ]]; then
|
||||||
|
echo "cname=${validDomain},${validTarget}" >> "${dnscustomcnamefile}"
|
||||||
|
else
|
||||||
|
echo " ${CROSS} Invalid Target Passed!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo " ${CROSS} Invalid Domain passed!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
# Restart dnsmasq to load new custom CNAME records only if reload is not false
|
||||||
|
if [[ ! $reload == "false" ]]; then
|
||||||
|
RestartDNS
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
RemoveCustomCNAMERecord() {
|
RemoveCustomCNAMERecord() {
|
||||||
@@ -738,11 +780,12 @@ RemoveCustomCNAMERecord() {
|
|||||||
|
|
||||||
domain="${args[2]}"
|
domain="${args[2]}"
|
||||||
target="${args[3]}"
|
target="${args[3]}"
|
||||||
|
reload="${args[4]}"
|
||||||
|
|
||||||
validDomain="$(checkDomain "${domain}")"
|
validDomain="$(checkDomain "${domain}")"
|
||||||
if [[ -n "${validDomain}" ]]; then
|
if [[ -n "${validDomain}" ]]; then
|
||||||
validTarget="$(checkDomain "${target}")"
|
validTarget="$(checkDomain "${target}")"
|
||||||
if [[ -n "${validDomain}" ]]; then
|
if [[ -n "${validTarget}" ]]; then
|
||||||
sed -i "/cname=${validDomain},${validTarget}$/d" "${dnscustomcnamefile}"
|
sed -i "/cname=${validDomain},${validTarget}$/d" "${dnscustomcnamefile}"
|
||||||
else
|
else
|
||||||
echo " ${CROSS} Invalid Target Passed!"
|
echo " ${CROSS} Invalid Target Passed!"
|
||||||
@@ -753,8 +796,10 @@ RemoveCustomCNAMERecord() {
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Restart dnsmasq to update removed custom CNAME records
|
# Restart dnsmasq to update removed custom CNAME records only if $reload not false
|
||||||
RestartDNS
|
if [[ ! $reload == "false" ]]; then
|
||||||
|
RestartDNS
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
main() {
|
main() {
|
||||||
@@ -777,8 +822,6 @@ main() {
|
|||||||
"layout" ) SetWebUILayout;;
|
"layout" ) SetWebUILayout;;
|
||||||
"theme" ) SetWebUITheme;;
|
"theme" ) SetWebUITheme;;
|
||||||
"-h" | "--help" ) helpFunc;;
|
"-h" | "--help" ) helpFunc;;
|
||||||
"privacymode" ) SetPrivacyMode;;
|
|
||||||
"resolve" ) ResolutionSettings;;
|
|
||||||
"addstaticdhcp" ) AddDHCPStaticAddress;;
|
"addstaticdhcp" ) AddDHCPStaticAddress;;
|
||||||
"removestaticdhcp" ) RemoveDHCPStaticAddress;;
|
"removestaticdhcp" ) RemoveDHCPStaticAddress;;
|
||||||
"-e" | "email" ) SetAdminEmail "$3";;
|
"-e" | "email" ) SetAdminEmail "$3";;
|
||||||
|
@@ -1,28 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
# Pi-hole: A black hole for Internet advertisements
|
|
||||||
# (c) 2017 Pi-hole, LLC (https://pi-hole.net)
|
|
||||||
# Network-wide ad blocking via your own hardware.
|
|
||||||
#
|
|
||||||
# Provides an automated migration subroutine to convert Pi-hole v3.x wildcard domains to Pi-hole v4.x regex filters
|
|
||||||
#
|
|
||||||
# This file is copyright under the latest version of the EUPL.
|
|
||||||
# Please see LICENSE file for your rights under this license.
|
|
||||||
|
|
||||||
# regexFile set in gravity.sh
|
|
||||||
|
|
||||||
wildcardFile="/etc/dnsmasq.d/03-pihole-wildcard.conf"
|
|
||||||
|
|
||||||
convert_wildcard_to_regex() {
|
|
||||||
if [ ! -f "${wildcardFile}" ]; then
|
|
||||||
return
|
|
||||||
fi
|
|
||||||
local addrlines domains uniquedomains
|
|
||||||
# Obtain wildcard domains from old file
|
|
||||||
addrlines="$(grep -oE "/.*/" ${wildcardFile})"
|
|
||||||
# Strip "/" from domain names and convert "." to regex-compatible "\."
|
|
||||||
domains="$(sed 's/\///g;s/\./\\./g' <<< "${addrlines}")"
|
|
||||||
# Remove repeated domains (may have been inserted two times due to A and AAAA blocking)
|
|
||||||
uniquedomains="$(uniq <<< "${domains}")"
|
|
||||||
# Automatically generate regex filters and remove old wildcards file
|
|
||||||
awk '{print "(^|\\.)"$0"$"}' <<< "${uniquedomains}" >> "${regexFile:?}" && rm "${wildcardFile}"
|
|
||||||
}
|
|
@@ -57,7 +57,7 @@ CREATE TABLE info
|
|||||||
value TEXT NOT NULL
|
value TEXT NOT NULL
|
||||||
);
|
);
|
||||||
|
|
||||||
INSERT INTO "info" VALUES('version','14');
|
INSERT INTO "info" VALUES('version','15');
|
||||||
|
|
||||||
CREATE TABLE domain_audit
|
CREATE TABLE domain_audit
|
||||||
(
|
(
|
||||||
@@ -143,12 +143,10 @@ CREATE VIEW vw_gravity AS SELECT domain, adlist_by_group.group_id AS group_id
|
|||||||
LEFT JOIN "group" ON "group".id = adlist_by_group.group_id
|
LEFT JOIN "group" ON "group".id = adlist_by_group.group_id
|
||||||
WHERE adlist.enabled = 1 AND (adlist_by_group.group_id IS NULL OR "group".enabled = 1);
|
WHERE adlist.enabled = 1 AND (adlist_by_group.group_id IS NULL OR "group".enabled = 1);
|
||||||
|
|
||||||
CREATE VIEW vw_adlist AS SELECT DISTINCT address, adlist.id AS id
|
CREATE VIEW vw_adlist AS SELECT DISTINCT address, id
|
||||||
FROM adlist
|
FROM adlist
|
||||||
LEFT JOIN adlist_by_group ON adlist_by_group.adlist_id = adlist.id
|
WHERE enabled = 1
|
||||||
LEFT JOIN "group" ON "group".id = adlist_by_group.group_id
|
ORDER BY id;
|
||||||
WHERE adlist.enabled = 1 AND (adlist_by_group.group_id IS NULL OR "group".enabled = 1)
|
|
||||||
ORDER BY adlist.id;
|
|
||||||
|
|
||||||
CREATE TRIGGER tr_domainlist_add AFTER INSERT ON domainlist
|
CREATE TRIGGER tr_domainlist_add AFTER INSERT ON domainlist
|
||||||
BEGIN
|
BEGIN
|
||||||
|
2
advanced/Templates/pihole-FTL.conf
Normal file
2
advanced/Templates/pihole-FTL.conf
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
#; Pi-hole FTL config file
|
||||||
|
#; Comments should start with #; to avoid issues with PHP and bash reading this file
|
@@ -1,4 +1,4 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env sh
|
||||||
### BEGIN INIT INFO
|
### BEGIN INIT INFO
|
||||||
# Provides: pihole-FTL
|
# Provides: pihole-FTL
|
||||||
# Required-Start: $remote_fs $syslog $network
|
# Required-Start: $remote_fs $syslog $network
|
||||||
@@ -9,11 +9,8 @@
|
|||||||
# Description: Enable service provided by pihole-FTL daemon
|
# Description: Enable service provided by pihole-FTL daemon
|
||||||
### END INIT INFO
|
### END INIT INFO
|
||||||
|
|
||||||
FTLUSER=pihole
|
|
||||||
PIDFILE=/run/pihole-FTL.pid
|
|
||||||
|
|
||||||
is_running() {
|
is_running() {
|
||||||
pgrep -o "pihole-FTL" > /dev/null 2>&1
|
pgrep -xo "pihole-FTL" > /dev/null
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -23,27 +20,22 @@ start() {
|
|||||||
echo "pihole-FTL is already running"
|
echo "pihole-FTL is already running"
|
||||||
else
|
else
|
||||||
# Touch files to ensure they exist (create if non-existing, preserve if existing)
|
# Touch files to ensure they exist (create if non-existing, preserve if existing)
|
||||||
touch /var/log/pihole-FTL.log /var/log/pihole.log
|
mkdir -pm 0755 /run/pihole
|
||||||
touch /run/pihole-FTL.pid /run/pihole-FTL.port
|
touch /run/pihole-FTL.pid /run/pihole-FTL.port /var/log/pihole-FTL.log /var/log/pihole.log /etc/pihole/dhcp.leases
|
||||||
touch /etc/pihole/dhcp.leases
|
|
||||||
mkdir -p /run/pihole
|
|
||||||
mkdir -p /var/log/pihole
|
|
||||||
chown pihole:pihole /run/pihole /var/log/pihole
|
|
||||||
# Remove possible leftovers from previous pihole-FTL processes
|
|
||||||
rm -f /dev/shm/FTL-* 2> /dev/null
|
|
||||||
rm /run/pihole/FTL.sock 2> /dev/null
|
|
||||||
# Ensure that permissions are set so that pihole-FTL can edit all necessary files
|
# Ensure that permissions are set so that pihole-FTL can edit all necessary files
|
||||||
chown pihole:pihole /run/pihole-FTL.pid /run/pihole-FTL.port
|
chown pihole:pihole /run/pihole-FTL.pid /run/pihole-FTL.port /var/log/pihole-FTL.log /var/log/pihole.log /etc/pihole/dhcp.leases /run/pihole /etc/pihole
|
||||||
chown pihole:pihole /etc/pihole /etc/pihole/dhcp.leases 2> /dev/null
|
chmod 0644 /run/pihole-FTL.pid /run/pihole-FTL.port /var/log/pihole-FTL.log /var/log/pihole.log /etc/pihole/dhcp.leases
|
||||||
chown pihole:pihole /var/log/pihole-FTL.log /var/log/pihole.log
|
# Ensure that permissions are set so that pihole-FTL can edit the files. We ignore errors as the file may not (yet) exist
|
||||||
chmod 0644 /var/log/pihole-FTL.log /run/pihole-FTL.pid /run/pihole-FTL.port /var/log/pihole.log
|
chmod -f 0644 /etc/pihole/macvendor.db
|
||||||
# Chown database files to the user FTL runs as. We ignore errors as the files may not (yet) exist
|
# Chown database files to the user FTL runs as. We ignore errors as the files may not (yet) exist
|
||||||
chown pihole:pihole /etc/pihole/pihole-FTL.db /etc/pihole/gravity.db 2> /dev/null
|
chown -f pihole:pihole /etc/pihole/pihole-FTL.db /etc/pihole/gravity.db /etc/pihole/macvendor.db
|
||||||
if setcap CAP_NET_BIND_SERVICE,CAP_NET_RAW,CAP_NET_ADMIN,CAP_SYS_NICE+eip "$(which pihole-FTL)"; then
|
# Chown database file permissions so that the pihole group (web interface) can edit the file. We ignore errors as the files may not (yet) exist
|
||||||
su -s /bin/sh -c "/usr/bin/pihole-FTL" "$FTLUSER"
|
chmod -f 0664 /etc/pihole/pihole-FTL.db
|
||||||
|
if setcap CAP_NET_BIND_SERVICE,CAP_NET_RAW,CAP_NET_ADMIN,CAP_SYS_NICE,CAP_IPC_LOCK,CAP_CHOWN+eip "/usr/bin/pihole-FTL"; then
|
||||||
|
su -s /bin/sh -c "/usr/bin/pihole-FTL" pihole
|
||||||
else
|
else
|
||||||
echo "Warning: Starting pihole-FTL as root because setting capabilities is not supported on this system"
|
echo "Warning: Starting pihole-FTL as root because setting capabilities is not supported on this system"
|
||||||
pihole-FTL
|
/usr/bin/pihole-FTL
|
||||||
fi
|
fi
|
||||||
echo
|
echo
|
||||||
fi
|
fi
|
||||||
@@ -52,20 +44,20 @@ start() {
|
|||||||
# Stop the service
|
# Stop the service
|
||||||
stop() {
|
stop() {
|
||||||
if is_running; then
|
if is_running; then
|
||||||
pkill -o pihole-FTL
|
pkill -xo "pihole-FTL"
|
||||||
for i in {1..5}; do
|
for i in 1 2 3 4 5; do
|
||||||
if ! is_running; then
|
if ! is_running; then
|
||||||
break
|
break
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo -n "."
|
printf "."
|
||||||
sleep 1
|
sleep 1
|
||||||
done
|
done
|
||||||
echo
|
echo
|
||||||
|
|
||||||
if is_running; then
|
if is_running; then
|
||||||
echo "Not stopped; may still be shutting down or shutdown may have failed, killing now"
|
echo "Not stopped; may still be shutting down or shutdown may have failed, killing now"
|
||||||
pkill -o -9 pihole-FTL
|
pkill -xo -9 "pihole-FTL"
|
||||||
exit 1
|
exit 1
|
||||||
else
|
else
|
||||||
echo "Stopped"
|
echo "Stopped"
|
||||||
@@ -73,6 +65,8 @@ stop() {
|
|||||||
else
|
else
|
||||||
echo "Not running"
|
echo "Not running"
|
||||||
fi
|
fi
|
||||||
|
# Cleanup
|
||||||
|
rm -f /run/pihole/FTL.sock /dev/shm/FTL-*
|
||||||
echo
|
echo
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -101,7 +95,7 @@ case "$1" in
|
|||||||
start
|
start
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
echo $"Usage: $0 {start|stop|restart|reload|status}"
|
echo "Usage: $0 {start|stop|restart|reload|status}"
|
||||||
exit 1
|
exit 1
|
||||||
esac
|
esac
|
||||||
|
|
||||||
|
@@ -26,7 +26,7 @@
|
|||||||
# parameter "quiet": don't print messages
|
# parameter "quiet": don't print messages
|
||||||
00 00 * * * root PATH="$PATH:/usr/sbin:/usr/local/bin/" pihole flush once quiet
|
00 00 * * * root PATH="$PATH:/usr/sbin:/usr/local/bin/" pihole flush once quiet
|
||||||
|
|
||||||
@reboot root /usr/sbin/logrotate /etc/pihole/logrotate
|
@reboot root /usr/sbin/logrotate --state /var/lib/logrotate/pihole /etc/pihole/logrotate
|
||||||
|
|
||||||
# Pi-hole: Grab local version and branch every 10 minutes
|
# Pi-hole: Grab local version and branch every 10 minutes
|
||||||
*/10 * * * * root PATH="$PATH:/usr/sbin:/usr/local/bin/" pihole updatechecker local
|
*/10 * * * * root PATH="$PATH:/usr/sbin:/usr/local/bin/" pihole updatechecker local
|
||||||
|
@@ -73,12 +73,12 @@ if ($serverName === "pi.hole"
|
|||||||
<meta charset='utf-8'>
|
<meta charset='utf-8'>
|
||||||
$viewPort
|
$viewPort
|
||||||
<title>● $serverName</title>
|
<title>● $serverName</title>
|
||||||
<link rel='stylesheet' href='pihole/blockingpage.css'>
|
<link rel='stylesheet' href='/pihole/blockingpage.css'>
|
||||||
<link rel='shortcut icon' href='admin/img/favicons/favicon.ico' type='image/x-icon'>
|
<link rel='shortcut icon' href='/admin/img/favicons/favicon.ico' type='image/x-icon'>
|
||||||
</head>
|
</head>
|
||||||
<body id='splashpage'>
|
<body id='splashpage'>
|
||||||
<div id="pihole_card">
|
<div id="pihole_card">
|
||||||
<img src='admin/img/logo.svg' alt='Pi-hole logo' id="pihole_logo_splash" />
|
<img src='/admin/img/logo.svg' alt='Pi-hole logo' id="pihole_logo_splash" />
|
||||||
<p>Pi-<strong>hole</strong>: Your black hole for Internet advertisements</p>
|
<p>Pi-<strong>hole</strong>: Your black hole for Internet advertisements</p>
|
||||||
<a href='/admin'>Did you mean to go to the admin panel?</a>
|
<a href='/admin'>Did you mean to go to the admin panel?</a>
|
||||||
</div>
|
</div>
|
||||||
|
@@ -20,7 +20,6 @@ server.modules = (
|
|||||||
"mod_accesslog",
|
"mod_accesslog",
|
||||||
"mod_auth",
|
"mod_auth",
|
||||||
"mod_expire",
|
"mod_expire",
|
||||||
"mod_compress",
|
|
||||||
"mod_redirect",
|
"mod_redirect",
|
||||||
"mod_setenv",
|
"mod_setenv",
|
||||||
"mod_rewrite"
|
"mod_rewrite"
|
||||||
@@ -41,26 +40,6 @@ index-file.names = ( "index.php", "index.html", "index.lighttpd.html"
|
|||||||
url.access-deny = ( "~", ".inc", ".md", ".yml", ".ini" )
|
url.access-deny = ( "~", ".inc", ".md", ".yml", ".ini" )
|
||||||
static-file.exclude-extensions = ( ".php", ".pl", ".fcgi" )
|
static-file.exclude-extensions = ( ".php", ".pl", ".fcgi" )
|
||||||
|
|
||||||
compress.cache-dir = "/var/cache/lighttpd/compress/"
|
|
||||||
compress.filetype = (
|
|
||||||
"application/json",
|
|
||||||
"application/vnd.ms-fontobject",
|
|
||||||
"application/xml",
|
|
||||||
"font/eot",
|
|
||||||
"font/opentype",
|
|
||||||
"font/otf",
|
|
||||||
"font/ttf",
|
|
||||||
"image/bmp",
|
|
||||||
"image/svg+xml",
|
|
||||||
"image/vnd.microsoft.icon",
|
|
||||||
"image/x-icon",
|
|
||||||
"text/css",
|
|
||||||
"text/html",
|
|
||||||
"text/javascript",
|
|
||||||
"text/plain",
|
|
||||||
"text/xml"
|
|
||||||
)
|
|
||||||
|
|
||||||
mimetype.assign = (
|
mimetype.assign = (
|
||||||
".ico" => "image/x-icon",
|
".ico" => "image/x-icon",
|
||||||
".jpeg" => "image/jpeg",
|
".jpeg" => "image/jpeg",
|
||||||
@@ -99,11 +78,6 @@ $HTTP["url"] =~ "^/admin/" {
|
|||||||
"X-Pi-hole" => "The Pi-hole Web interface is working!",
|
"X-Pi-hole" => "The Pi-hole Web interface is working!",
|
||||||
"X-Frame-Options" => "DENY"
|
"X-Frame-Options" => "DENY"
|
||||||
)
|
)
|
||||||
|
|
||||||
$HTTP["url"] =~ "\.(eot|otf|tt[cf]|woff2?)$" {
|
|
||||||
# Allow Block Page access to local fonts
|
|
||||||
setenv.add-response-header = ( "Access-Control-Allow-Origin" => "*" )
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# Block . files from being served, such as .git, .github, .gitignore
|
# Block . files from being served, such as .git, .github, .gitignore
|
||||||
@@ -111,5 +85,12 @@ $HTTP["url"] =~ "^/admin/\.(.*)" {
|
|||||||
url.access-deny = ("")
|
url.access-deny = ("")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# allow teleporter and API qr code iframe on settings page
|
||||||
|
$HTTP["url"] =~ "/(teleporter|api_token)\.php$" {
|
||||||
|
$HTTP["referer"] =~ "/admin/settings\.php" {
|
||||||
|
setenv.add-response-header = ( "X-Frame-Options" => "SAMEORIGIN" )
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
# Default expire header
|
# Default expire header
|
||||||
expire.url = ( "" => "access plus 0 seconds" )
|
expire.url = ( "" => "access plus 0 seconds" )
|
||||||
|
@@ -21,7 +21,6 @@ server.modules = (
|
|||||||
"mod_expire",
|
"mod_expire",
|
||||||
"mod_fastcgi",
|
"mod_fastcgi",
|
||||||
"mod_accesslog",
|
"mod_accesslog",
|
||||||
"mod_compress",
|
|
||||||
"mod_redirect",
|
"mod_redirect",
|
||||||
"mod_setenv",
|
"mod_setenv",
|
||||||
"mod_rewrite"
|
"mod_rewrite"
|
||||||
@@ -42,26 +41,6 @@ index-file.names = ( "index.php", "index.html", "index.lighttpd.html"
|
|||||||
url.access-deny = ( "~", ".inc", ".md", ".yml", ".ini" )
|
url.access-deny = ( "~", ".inc", ".md", ".yml", ".ini" )
|
||||||
static-file.exclude-extensions = ( ".php", ".pl", ".fcgi" )
|
static-file.exclude-extensions = ( ".php", ".pl", ".fcgi" )
|
||||||
|
|
||||||
compress.cache-dir = "/var/cache/lighttpd/compress/"
|
|
||||||
compress.filetype = (
|
|
||||||
"application/json",
|
|
||||||
"application/vnd.ms-fontobject",
|
|
||||||
"application/xml",
|
|
||||||
"font/eot",
|
|
||||||
"font/opentype",
|
|
||||||
"font/otf",
|
|
||||||
"font/ttf",
|
|
||||||
"image/bmp",
|
|
||||||
"image/svg+xml",
|
|
||||||
"image/vnd.microsoft.icon",
|
|
||||||
"image/x-icon",
|
|
||||||
"text/css",
|
|
||||||
"text/html",
|
|
||||||
"text/javascript",
|
|
||||||
"text/plain",
|
|
||||||
"text/xml"
|
|
||||||
)
|
|
||||||
|
|
||||||
mimetype.assign = (
|
mimetype.assign = (
|
||||||
".ico" => "image/x-icon",
|
".ico" => "image/x-icon",
|
||||||
".jpeg" => "image/jpeg",
|
".jpeg" => "image/jpeg",
|
||||||
@@ -107,11 +86,6 @@ $HTTP["url"] =~ "^/admin/" {
|
|||||||
"X-Pi-hole" => "The Pi-hole Web interface is working!",
|
"X-Pi-hole" => "The Pi-hole Web interface is working!",
|
||||||
"X-Frame-Options" => "DENY"
|
"X-Frame-Options" => "DENY"
|
||||||
)
|
)
|
||||||
|
|
||||||
$HTTP["url"] =~ "\.(eot|otf|tt[cf]|woff2?)$" {
|
|
||||||
# Allow Block Page access to local fonts
|
|
||||||
setenv.add-response-header = ( "Access-Control-Allow-Origin" => "*" )
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# Block . files from being served, such as .git, .github, .gitignore
|
# Block . files from being served, such as .git, .github, .gitignore
|
||||||
@@ -119,5 +93,12 @@ $HTTP["url"] =~ "^/admin/\.(.*)" {
|
|||||||
url.access-deny = ("")
|
url.access-deny = ("")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# allow teleporter and API qr code iframe on settings page
|
||||||
|
$HTTP["url"] =~ "/(teleporter|api_token)\.php$" {
|
||||||
|
$HTTP["referer"] =~ "/admin/settings\.php" {
|
||||||
|
setenv.add-response-header = ( "X-Frame-Options" => "SAMEORIGIN" )
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
# Default expire header
|
# Default expire header
|
||||||
expire.url = ( "" => "access plus 0 seconds" )
|
expire.url = ( "" => "access plus 0 seconds" )
|
||||||
|
File diff suppressed because it is too large
Load Diff
@@ -42,8 +42,8 @@ source "${PI_HOLE_FILES_DIR}/automated install/basic-install.sh"
|
|||||||
# setupVars set in basic-install.sh
|
# setupVars set in basic-install.sh
|
||||||
source "${setupVars}"
|
source "${setupVars}"
|
||||||
|
|
||||||
# distro_check() sourced from basic-install.sh
|
# package_manager_detect() sourced from basic-install.sh
|
||||||
distro_check
|
package_manager_detect
|
||||||
|
|
||||||
# Install packages used by the Pi-hole
|
# Install packages used by the Pi-hole
|
||||||
DEPS=("${INSTALLER_DEPS[@]}" "${PIHOLE_DEPS[@]}")
|
DEPS=("${INSTALLER_DEPS[@]}" "${PIHOLE_DEPS[@]}")
|
||||||
@@ -113,7 +113,7 @@ removeNoPurge() {
|
|||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
echo -e "${OVER} ${TICK} Removed Web Interface"
|
echo -e "${OVER} ${TICK} Removed Web Interface"
|
||||||
|
|
||||||
# Attempt to preserve backwards compatibility with older versions
|
# Attempt to preserve backwards compatibility with older versions
|
||||||
# to guarantee no additional changes were made to /etc/crontab after
|
# to guarantee no additional changes were made to /etc/crontab after
|
||||||
# the installation of pihole, /etc/crontab.pihole should be permanently
|
# the installation of pihole, /etc/crontab.pihole should be permanently
|
||||||
@@ -145,6 +145,7 @@ removeNoPurge() {
|
|||||||
|
|
||||||
${SUDO} rm -f /etc/dnsmasq.d/adList.conf &> /dev/null
|
${SUDO} rm -f /etc/dnsmasq.d/adList.conf &> /dev/null
|
||||||
${SUDO} rm -f /etc/dnsmasq.d/01-pihole.conf &> /dev/null
|
${SUDO} rm -f /etc/dnsmasq.d/01-pihole.conf &> /dev/null
|
||||||
|
${SUDO} rm -f /etc/dnsmasq.d/06-rfc6761.conf &> /dev/null
|
||||||
${SUDO} rm -rf /var/log/*pihole* &> /dev/null
|
${SUDO} rm -rf /var/log/*pihole* &> /dev/null
|
||||||
${SUDO} rm -rf /etc/pihole/ &> /dev/null
|
${SUDO} rm -rf /etc/pihole/ &> /dev/null
|
||||||
${SUDO} rm -rf /etc/.pihole/ &> /dev/null
|
${SUDO} rm -rf /etc/.pihole/ &> /dev/null
|
||||||
|
206
gravity.sh
206
gravity.sh
@@ -15,8 +15,6 @@ export LC_ALL=C
|
|||||||
|
|
||||||
coltable="/opt/pihole/COL_TABLE"
|
coltable="/opt/pihole/COL_TABLE"
|
||||||
source "${coltable}"
|
source "${coltable}"
|
||||||
regexconverter="/opt/pihole/wildcard_regex_converter.sh"
|
|
||||||
source "${regexconverter}"
|
|
||||||
# shellcheck disable=SC1091
|
# shellcheck disable=SC1091
|
||||||
source "/etc/.pihole/advanced/Scripts/database_migration/gravity-db.sh"
|
source "/etc/.pihole/advanced/Scripts/database_migration/gravity-db.sh"
|
||||||
|
|
||||||
@@ -77,7 +75,12 @@ fi
|
|||||||
|
|
||||||
# Generate new sqlite3 file from schema template
|
# Generate new sqlite3 file from schema template
|
||||||
generate_gravity_database() {
|
generate_gravity_database() {
|
||||||
sqlite3 "${1}" < "${gravityDBschema}"
|
if ! sqlite3 "${gravityDBfile}" < "${gravityDBschema}"; then
|
||||||
|
echo -e " ${CROSS} Unable to create ${gravityDBfile}"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
chown pihole:pihole "${gravityDBfile}"
|
||||||
|
chmod g+w "${piholeDir}" "${gravityDBfile}"
|
||||||
}
|
}
|
||||||
|
|
||||||
# Copy data from old to new database file and swap them
|
# Copy data from old to new database file and swap them
|
||||||
@@ -122,7 +125,7 @@ gravity_swap_databases() {
|
|||||||
gravityBlocks=$(stat --format "%b" ${gravityDBfile})
|
gravityBlocks=$(stat --format "%b" ${gravityDBfile})
|
||||||
# Only keep the old database if available disk space is at least twice the size of the existing gravity.db.
|
# Only keep the old database if available disk space is at least twice the size of the existing gravity.db.
|
||||||
# Better be safe than sorry...
|
# Better be safe than sorry...
|
||||||
if [ "${availableBlocks}" -gt "$(("${gravityBlocks}" * 2))" ] && [ -f "${gravityDBfile}" ]; then
|
if [ "${availableBlocks}" -gt "$((gravityBlocks * 2))" ] && [ -f "${gravityDBfile}" ]; then
|
||||||
echo -e " ${TICK} The old database remains available."
|
echo -e " ${TICK} The old database remains available."
|
||||||
mv "${gravityDBfile}" "${gravityOLDfile}"
|
mv "${gravityDBfile}" "${gravityOLDfile}"
|
||||||
else
|
else
|
||||||
@@ -215,7 +218,7 @@ database_table_from_file() {
|
|||||||
# Move source file to backup directory, create directory if not existing
|
# Move source file to backup directory, create directory if not existing
|
||||||
mkdir -p "${backup_path}"
|
mkdir -p "${backup_path}"
|
||||||
mv "${source}" "${backup_file}" 2> /dev/null || \
|
mv "${source}" "${backup_file}" 2> /dev/null || \
|
||||||
echo -e " ${CROSS} Unable to backup ${source} to ${backup_path}"
|
echo -e " ${CROSS} Unable to backup ${source} to ${backup_path}"
|
||||||
|
|
||||||
# Delete tmpFile
|
# Delete tmpFile
|
||||||
rm "${tmpFile}" > /dev/null 2>&1 || \
|
rm "${tmpFile}" > /dev/null 2>&1 || \
|
||||||
@@ -250,7 +253,7 @@ database_adlist_number() {
|
|||||||
return;
|
return;
|
||||||
fi
|
fi
|
||||||
|
|
||||||
output=$( { printf ".timeout 30000\\nUPDATE adlist SET number = %i, invalid_domains = %i WHERE id = %i;\\n" "${num_lines}" "${num_invalid}" "${1}" | sqlite3 "${gravityDBfile}"; } 2>&1 )
|
output=$( { printf ".timeout 30000\\nUPDATE adlist SET number = %i, invalid_domains = %i WHERE id = %i;\\n" "${num_source_lines}" "${num_invalid}" "${1}" | sqlite3 "${gravityDBfile}"; } 2>&1 )
|
||||||
status="$?"
|
status="$?"
|
||||||
|
|
||||||
if [[ "${status}" -ne 0 ]]; then
|
if [[ "${status}" -ne 0 ]]; then
|
||||||
@@ -263,9 +266,12 @@ database_adlist_number() {
|
|||||||
database_adlist_status() {
|
database_adlist_status() {
|
||||||
# Only try to set the status when this field exists in the gravity database
|
# Only try to set the status when this field exists in the gravity database
|
||||||
if ! gravity_column_exists "adlist" "status"; then
|
if ! gravity_column_exists "adlist" "status"; then
|
||||||
|
echo "Gravity status column is not found!"
|
||||||
return;
|
return;
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
echo "Gravity status column found and set to ${2} (ID ${1})"
|
||||||
|
|
||||||
output=$( { printf ".timeout 30000\\nUPDATE adlist SET status = %i WHERE id = %i;\\n" "${2}" "${1}" | sqlite3 "${gravityDBfile}"; } 2>&1 )
|
output=$( { printf ".timeout 30000\\nUPDATE adlist SET status = %i WHERE id = %i;\\n" "${2}" "${1}" | sqlite3 "${gravityDBfile}"; } 2>&1 )
|
||||||
status="$?"
|
status="$?"
|
||||||
|
|
||||||
@@ -281,7 +287,10 @@ migrate_to_database() {
|
|||||||
if [ ! -e "${gravityDBfile}" ]; then
|
if [ ! -e "${gravityDBfile}" ]; then
|
||||||
# Create new database file - note that this will be created in version 1
|
# Create new database file - note that this will be created in version 1
|
||||||
echo -e " ${INFO} Creating new gravity database"
|
echo -e " ${INFO} Creating new gravity database"
|
||||||
generate_gravity_database "${gravityDBfile}"
|
if ! generate_gravity_database; then
|
||||||
|
echo -e " ${CROSS} Error creating new gravity database. Please contact support."
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
# Check if gravity database needs to be updated
|
# Check if gravity database needs to be updated
|
||||||
upgrade_gravityDB "${gravityDBfile}" "${piholeDir}"
|
upgrade_gravityDB "${gravityDBfile}" "${piholeDir}"
|
||||||
@@ -396,14 +405,12 @@ gravity_DownloadBlocklists() {
|
|||||||
)"
|
)"
|
||||||
|
|
||||||
local str="Pulling blocklist source list into range"
|
local str="Pulling blocklist source list into range"
|
||||||
|
echo -e "${OVER} ${TICK} ${str}"
|
||||||
|
|
||||||
if [[ -n "${sources[*]}" ]] && [[ -n "${sourceDomains[*]}" ]]; then
|
if [[ -z "${sources[*]}" ]] || [[ -z "${sourceDomains[*]}" ]]; then
|
||||||
echo -e "${OVER} ${TICK} ${str}"
|
|
||||||
else
|
|
||||||
echo -e "${OVER} ${CROSS} ${str}"
|
|
||||||
echo -e " ${INFO} No source list found, or it is empty"
|
echo -e " ${INFO} No source list found, or it is empty"
|
||||||
echo ""
|
echo ""
|
||||||
return 1
|
unset sources
|
||||||
fi
|
fi
|
||||||
|
|
||||||
local url domain agent cmd_ext str target compression
|
local url domain agent cmd_ext str target compression
|
||||||
@@ -432,9 +439,9 @@ gravity_DownloadBlocklists() {
|
|||||||
compression="--compressed"
|
compression="--compressed"
|
||||||
echo -e " ${INFO} Using libz compression\n"
|
echo -e " ${INFO} Using libz compression\n"
|
||||||
else
|
else
|
||||||
compression=""
|
compression=""
|
||||||
echo -e " ${INFO} Libz compression not available\n"
|
echo -e " ${INFO} Libz compression not available\n"
|
||||||
fi
|
fi
|
||||||
# Loop through $sources and download each one
|
# Loop through $sources and download each one
|
||||||
for ((i = 0; i < "${#sources[@]}"; i++)); do
|
for ((i = 0; i < "${#sources[@]}"; i++)); do
|
||||||
url="${sources[$i]}"
|
url="${sources[$i]}"
|
||||||
@@ -464,9 +471,9 @@ gravity_DownloadBlocklists() {
|
|||||||
check_url="$( sed -re 's#([^:/]*://)?([^/]+)@#\1\2#' <<< "$url" )"
|
check_url="$( sed -re 's#([^:/]*://)?([^/]+)@#\1\2#' <<< "$url" )"
|
||||||
|
|
||||||
if [[ "${check_url}" =~ ${regex} ]]; then
|
if [[ "${check_url}" =~ ${regex} ]]; then
|
||||||
echo -e " ${CROSS} Invalid Target"
|
echo -e " ${CROSS} Invalid Target"
|
||||||
else
|
else
|
||||||
gravity_DownloadBlocklistFromUrl "${url}" "${cmd_ext}" "${agent}" "${sourceIDs[$i]}" "${saveLocation}" "${target}" "${compression}"
|
gravity_DownloadBlocklistFromUrl "${url}" "${cmd_ext}" "${agent}" "${sourceIDs[$i]}" "${saveLocation}" "${target}" "${compression}"
|
||||||
fi
|
fi
|
||||||
echo ""
|
echo ""
|
||||||
done
|
done
|
||||||
@@ -505,8 +512,9 @@ gravity_DownloadBlocklists() {
|
|||||||
gravity_Blackbody=true
|
gravity_Blackbody=true
|
||||||
}
|
}
|
||||||
|
|
||||||
total_num=0
|
# num_target_lines does increase for every correctly added domain in pareseList()
|
||||||
num_lines=0
|
num_target_lines=0
|
||||||
|
num_source_lines=0
|
||||||
num_invalid=0
|
num_invalid=0
|
||||||
parseList() {
|
parseList() {
|
||||||
local adlistID="${1}" src="${2}" target="${3}" incorrect_lines
|
local adlistID="${1}" src="${2}" target="${3}" incorrect_lines
|
||||||
@@ -518,18 +526,20 @@ parseList() {
|
|||||||
# Find (up to) five domains containing invalid characters (see above)
|
# Find (up to) five domains containing invalid characters (see above)
|
||||||
incorrect_lines="$(sed -e "/[^a-zA-Z0-9.\_-]/!d" "${src}" | head -n 5)"
|
incorrect_lines="$(sed -e "/[^a-zA-Z0-9.\_-]/!d" "${src}" | head -n 5)"
|
||||||
|
|
||||||
local num_target_lines num_correct_lines num_invalid
|
local num_target_lines_new num_correct_lines
|
||||||
# Get number of lines in source file
|
# Get number of lines in source file
|
||||||
num_lines="$(grep -c "^" "${src}")"
|
num_source_lines="$(grep -c "^" "${src}")"
|
||||||
# Get number of lines in destination file
|
# Get the new number of lines in destination file
|
||||||
num_target_lines="$(grep -c "^" "${target}")"
|
num_target_lines_new="$(grep -c "^" "${target}")"
|
||||||
num_correct_lines="$(( num_target_lines-total_num ))"
|
# Number of new correctly added lines
|
||||||
total_num="$num_target_lines"
|
num_correct_lines="$(( num_target_lines_new-num_target_lines ))"
|
||||||
num_invalid="$(( num_lines-num_correct_lines ))"
|
# Upate number of lines in target file
|
||||||
|
num_target_lines="$num_target_lines_new"
|
||||||
|
num_invalid="$(( num_source_lines-num_correct_lines ))"
|
||||||
if [[ "${num_invalid}" -eq 0 ]]; then
|
if [[ "${num_invalid}" -eq 0 ]]; then
|
||||||
echo " ${INFO} Analyzed ${num_lines} domains"
|
echo " ${INFO} Analyzed ${num_source_lines} domains"
|
||||||
else
|
else
|
||||||
echo " ${INFO} Analyzed ${num_lines} domains, ${num_invalid} domains invalid!"
|
echo " ${INFO} Analyzed ${num_source_lines} domains, ${num_invalid} domains invalid!"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Display sample of invalid lines if we found some
|
# Display sample of invalid lines if we found some
|
||||||
@@ -585,28 +595,32 @@ gravity_DownloadBlocklistFromUrl() {
|
|||||||
blocked=false
|
blocked=false
|
||||||
case $BLOCKINGMODE in
|
case $BLOCKINGMODE in
|
||||||
"IP-NODATA-AAAA"|"IP")
|
"IP-NODATA-AAAA"|"IP")
|
||||||
# Get IP address of this domain
|
# Get IP address of this domain
|
||||||
ip="$(dig "${domain}" +short)"
|
ip="$(dig "${domain}" +short)"
|
||||||
# Check if this IP matches any IP of the system
|
# Check if this IP matches any IP of the system
|
||||||
if [[ -n "${ip}" && $(grep -Ec "inet(|6) ${ip}" <<< "$(ip a)") -gt 0 ]]; then
|
if [[ -n "${ip}" && $(grep -Ec "inet(|6) ${ip}" <<< "$(ip a)") -gt 0 ]]; then
|
||||||
blocked=true
|
blocked=true
|
||||||
fi;;
|
fi;;
|
||||||
"NXDOMAIN")
|
"NXDOMAIN")
|
||||||
if [[ $(dig "${domain}" | grep "NXDOMAIN" -c) -ge 1 ]]; then
|
if [[ $(dig "${domain}" | grep "NXDOMAIN" -c) -ge 1 ]]; then
|
||||||
blocked=true
|
blocked=true
|
||||||
fi;;
|
fi;;
|
||||||
|
"NODATA")
|
||||||
|
if [[ $(dig "${domain}" | grep "NOERROR" -c) -ge 1 ]] && [[ -z $(dig +short "${domain}") ]]; then
|
||||||
|
blocked=true
|
||||||
|
fi;;
|
||||||
"NULL"|*)
|
"NULL"|*)
|
||||||
if [[ $(dig "${domain}" +short | grep "0.0.0.0" -c) -ge 1 ]]; then
|
if [[ $(dig "${domain}" +short | grep "0.0.0.0" -c) -ge 1 ]]; then
|
||||||
blocked=true
|
blocked=true
|
||||||
fi;;
|
fi;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
if [[ "${blocked}" == true ]]; then
|
if [[ "${blocked}" == true ]]; then
|
||||||
printf -v ip_addr "%s" "${PIHOLE_DNS_1%#*}"
|
printf -v ip_addr "%s" "${PIHOLE_DNS_1%#*}"
|
||||||
if [[ ${PIHOLE_DNS_1} != *"#"* ]]; then
|
if [[ ${PIHOLE_DNS_1} != *"#"* ]]; then
|
||||||
port=53
|
port=53
|
||||||
else
|
else
|
||||||
printf -v port "%s" "${PIHOLE_DNS_1#*#}"
|
printf -v port "%s" "${PIHOLE_DNS_1#*#}"
|
||||||
fi
|
fi
|
||||||
ip=$(dig "@${ip_addr}" -p "${port}" +short "${domain}" | tail -1)
|
ip=$(dig "@${ip_addr}" -p "${port}" +short "${domain}" | tail -1)
|
||||||
if [[ $(echo "${url}" | awk -F '://' '{print $1}') = "https" ]]; then
|
if [[ $(echo "${url}" | awk -F '://' '{print $1}') = "https" ]]; then
|
||||||
@@ -625,11 +639,11 @@ gravity_DownloadBlocklistFromUrl() {
|
|||||||
case $url in
|
case $url in
|
||||||
# Did we "download" a local file?
|
# Did we "download" a local file?
|
||||||
"file"*)
|
"file"*)
|
||||||
if [[ -s "${patternBuffer}" ]]; then
|
if [[ -s "${patternBuffer}" ]]; then
|
||||||
echo -e "${OVER} ${TICK} ${str} Retrieval successful"; success=true
|
echo -e "${OVER} ${TICK} ${str} Retrieval successful"; success=true
|
||||||
else
|
else
|
||||||
echo -e "${OVER} ${CROSS} ${str} Not found / empty list"
|
echo -e "${OVER} ${CROSS} ${str} Not found / empty list"
|
||||||
fi;;
|
fi;;
|
||||||
# Did we "download" a remote file?
|
# Did we "download" a remote file?
|
||||||
*)
|
*)
|
||||||
# Determine "Status:" output based on HTTP response
|
# Determine "Status:" output based on HTTP response
|
||||||
@@ -688,7 +702,7 @@ gravity_DownloadBlocklistFromUrl() {
|
|||||||
else
|
else
|
||||||
echo -e " ${CROSS} List download failed: ${COL_LIGHT_RED}no cached list available${COL_NC}"
|
echo -e " ${CROSS} List download failed: ${COL_LIGHT_RED}no cached list available${COL_NC}"
|
||||||
# Manually reset these two numbers because we do not call parseList here
|
# Manually reset these two numbers because we do not call parseList here
|
||||||
num_lines=0
|
num_source_lines=0
|
||||||
num_invalid=0
|
num_invalid=0
|
||||||
database_adlist_number "${adlistID}"
|
database_adlist_number "${adlistID}"
|
||||||
database_adlist_status "${adlistID}" "4"
|
database_adlist_status "${adlistID}" "4"
|
||||||
@@ -847,6 +861,49 @@ gravity_Cleanup() {
|
|||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
database_recovery() {
|
||||||
|
local result
|
||||||
|
local str="Checking integrity of existing gravity database"
|
||||||
|
local option="${1}"
|
||||||
|
echo -ne " ${INFO} ${str}..."
|
||||||
|
if result="$(pihole-FTL sqlite3 "${gravityDBfile}" "PRAGMA integrity_check" 2>&1)"; then
|
||||||
|
echo -e "${OVER} ${TICK} ${str} - no errors found"
|
||||||
|
|
||||||
|
str="Checking foreign keys of existing gravity database"
|
||||||
|
echo -ne " ${INFO} ${str}..."
|
||||||
|
if result="$(pihole-FTL sqlite3 "${gravityDBfile}" "PRAGMA foreign_key_check" 2>&1)"; then
|
||||||
|
echo -e "${OVER} ${TICK} ${str} - no errors found"
|
||||||
|
if [[ "${option}" != "force" ]]; then
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo -e "${OVER} ${CROSS} ${str} - errors found:"
|
||||||
|
while IFS= read -r line ; do echo " - $line"; done <<< "$result"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo -e "${OVER} ${CROSS} ${str} - errors found:"
|
||||||
|
while IFS= read -r line ; do echo " - $line"; done <<< "$result"
|
||||||
|
fi
|
||||||
|
|
||||||
|
str="Trying to recover existing gravity database"
|
||||||
|
echo -ne " ${INFO} ${str}..."
|
||||||
|
# We have to remove any possibly existing recovery database or this will fail
|
||||||
|
rm -f "${gravityDBfile}.recovered" > /dev/null 2>&1
|
||||||
|
if result="$(pihole-FTL sqlite3 "${gravityDBfile}" ".recover" | pihole-FTL sqlite3 "${gravityDBfile}.recovered" 2>&1)"; then
|
||||||
|
echo -e "${OVER} ${TICK} ${str} - success"
|
||||||
|
mv "${gravityDBfile}" "${gravityDBfile}.old"
|
||||||
|
mv "${gravityDBfile}.recovered" "${gravityDBfile}"
|
||||||
|
echo -ne " ${INFO} ${gravityDBfile} has been recovered"
|
||||||
|
echo -ne " ${INFO} The old ${gravityDBfile} has been moved to ${gravityDBfile}.old"
|
||||||
|
else
|
||||||
|
echo -e "${OVER} ${CROSS} ${str} - the following errors happened:"
|
||||||
|
while IFS= read -r line ; do echo " - $line"; done <<< "$result"
|
||||||
|
echo -e " ${CROSS} Recovery failed. Try \"pihole -r recreate\" instead."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo ""
|
||||||
|
}
|
||||||
|
|
||||||
helpFunc() {
|
helpFunc() {
|
||||||
echo "Usage: pihole -g
|
echo "Usage: pihole -g
|
||||||
Update domains from blocklists specified in adlists.list
|
Update domains from blocklists specified in adlists.list
|
||||||
@@ -857,10 +914,37 @@ Options:
|
|||||||
exit 0
|
exit 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
repairSelector() {
|
||||||
|
case "$1" in
|
||||||
|
"recover") recover_database=true;;
|
||||||
|
"recreate") recreate_database=true;;
|
||||||
|
*) echo "Usage: pihole -g -r {recover,recreate}
|
||||||
|
Attempt to repair gravity database
|
||||||
|
|
||||||
|
Available options:
|
||||||
|
pihole -g -r recover Try to recover a damaged gravity database file.
|
||||||
|
Pi-hole tries to restore as much as possible
|
||||||
|
from a corrupted gravity database.
|
||||||
|
|
||||||
|
pihole -g -r recover force Pi-hole will run the recovery process even when
|
||||||
|
no damage is detected. This option is meant to be
|
||||||
|
a last resort. Recovery is a fragile task
|
||||||
|
consuming a lot of resources and shouldn't be
|
||||||
|
performed unnecessarily.
|
||||||
|
|
||||||
|
pihole -g -r recreate Create a new gravity database file from scratch.
|
||||||
|
This will remove your existing gravity database
|
||||||
|
and create a new file from scratch. If you still
|
||||||
|
have the migration backup created when migrating
|
||||||
|
to Pi-hole v5.0, Pi-hole will import these files."
|
||||||
|
exit 0;;
|
||||||
|
esac
|
||||||
|
}
|
||||||
|
|
||||||
for var in "$@"; do
|
for var in "$@"; do
|
||||||
case "${var}" in
|
case "${var}" in
|
||||||
"-f" | "--force" ) forceDelete=true;;
|
"-f" | "--force" ) forceDelete=true;;
|
||||||
"-r" | "--recreate" ) recreate_database=true;;
|
"-r" | "--repair" ) repairSelector "$3";;
|
||||||
"-h" | "--help" ) helpFunc;;
|
"-h" | "--help" ) helpFunc;;
|
||||||
esac
|
esac
|
||||||
done
|
done
|
||||||
@@ -874,7 +958,7 @@ fi
|
|||||||
gravity_Trap
|
gravity_Trap
|
||||||
|
|
||||||
if [[ "${recreate_database:-}" == true ]]; then
|
if [[ "${recreate_database:-}" == true ]]; then
|
||||||
str="Restoring from migration backup"
|
str="Recreating gravity database from migration backup"
|
||||||
echo -ne "${INFO} ${str}..."
|
echo -ne "${INFO} ${str}..."
|
||||||
rm "${gravityDBfile}"
|
rm "${gravityDBfile}"
|
||||||
pushd "${piholeDir}" > /dev/null || exit
|
pushd "${piholeDir}" > /dev/null || exit
|
||||||
@@ -883,8 +967,15 @@ if [[ "${recreate_database:-}" == true ]]; then
|
|||||||
echo -e "${OVER} ${TICK} ${str}"
|
echo -e "${OVER} ${TICK} ${str}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [[ "${recover_database:-}" == true ]]; then
|
||||||
|
database_recovery "$4"
|
||||||
|
fi
|
||||||
|
|
||||||
# Move possibly existing legacy files to the gravity database
|
# Move possibly existing legacy files to the gravity database
|
||||||
migrate_to_database
|
if ! migrate_to_database; then
|
||||||
|
echo -e " ${CROSS} Unable to migrate to database. Please contact support."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
if [[ "${forceDelete:-}" == true ]]; then
|
if [[ "${forceDelete:-}" == true ]]; then
|
||||||
str="Deleting existing list cache"
|
str="Deleting existing list cache"
|
||||||
@@ -895,14 +986,21 @@ if [[ "${forceDelete:-}" == true ]]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Gravity downloads blocklists next
|
# Gravity downloads blocklists next
|
||||||
gravity_CheckDNSResolutionAvailable
|
if ! gravity_CheckDNSResolutionAvailable; then
|
||||||
|
echo -e " ${CROSS} Can not complete gravity update, no DNS is available. Please contact support."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
gravity_DownloadBlocklists
|
gravity_DownloadBlocklists
|
||||||
|
|
||||||
# Create local.list
|
# Create local.list
|
||||||
gravity_generateLocalList
|
gravity_generateLocalList
|
||||||
|
|
||||||
# Migrate rest of the data from old to new database
|
# Migrate rest of the data from old to new database
|
||||||
gravity_swap_databases
|
if ! gravity_swap_databases; then
|
||||||
|
echo -e " ${CROSS} Unable to create database. Please contact support."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
# Update gravity timestamp
|
# Update gravity timestamp
|
||||||
update_gravity_timestamp
|
update_gravity_timestamp
|
||||||
|
97
pihole
97
pihole
@@ -71,8 +71,7 @@ reconfigurePiholeFunc() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
updateGravityFunc() {
|
updateGravityFunc() {
|
||||||
"${PI_HOLE_SCRIPT_DIR}"/gravity.sh "$@"
|
exec "${PI_HOLE_SCRIPT_DIR}"/gravity.sh "$@"
|
||||||
exit $?
|
|
||||||
}
|
}
|
||||||
|
|
||||||
queryFunc() {
|
queryFunc() {
|
||||||
@@ -95,8 +94,7 @@ uninstallFunc() {
|
|||||||
|
|
||||||
versionFunc() {
|
versionFunc() {
|
||||||
shift
|
shift
|
||||||
"${PI_HOLE_SCRIPT_DIR}"/version.sh "$@"
|
exec "${PI_HOLE_SCRIPT_DIR}"/version.sh "$@"
|
||||||
exit 0
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# Get PID of main pihole-FTL process
|
# Get PID of main pihole-FTL process
|
||||||
@@ -225,8 +223,7 @@ Time:
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
local str="Pi-hole Disabled"
|
local str="Pi-hole Disabled"
|
||||||
sed -i "/BLOCKING_ENABLED=/d" "${setupVars}"
|
sed -i "s/^BLOCKING_ENABLED=true/BLOCKING_ENABLED=false/" "${setupVars}"
|
||||||
echo "BLOCKING_ENABLED=false" >> "${setupVars}"
|
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
# Enable Pi-hole
|
# Enable Pi-hole
|
||||||
@@ -238,11 +235,10 @@ Time:
|
|||||||
echo -e " ${INFO} Enabling blocking"
|
echo -e " ${INFO} Enabling blocking"
|
||||||
local str="Pi-hole Enabled"
|
local str="Pi-hole Enabled"
|
||||||
|
|
||||||
sed -i "/BLOCKING_ENABLED=/d" "${setupVars}"
|
sed -i "s/^BLOCKING_ENABLED=false/BLOCKING_ENABLED=true/" "${setupVars}"
|
||||||
echo "BLOCKING_ENABLED=true" >> "${setupVars}"
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
restartDNS reload
|
restartDNS reload-lists
|
||||||
|
|
||||||
echo -e "${OVER} ${TICK} ${str}"
|
echo -e "${OVER} ${TICK} ${str}"
|
||||||
}
|
}
|
||||||
@@ -285,27 +281,29 @@ Options:
|
|||||||
}
|
}
|
||||||
|
|
||||||
analyze_ports() {
|
analyze_ports() {
|
||||||
|
local lv4 lv6 port=${1}
|
||||||
# FTL is listening at least on at least one port when this
|
# FTL is listening at least on at least one port when this
|
||||||
# function is getting called
|
# function is getting called
|
||||||
echo -e " ${TICK} DNS service is listening"
|
|
||||||
# Check individual address family/protocol combinations
|
# Check individual address family/protocol combinations
|
||||||
# For a healthy Pi-hole, they should all be up (nothing printed)
|
# For a healthy Pi-hole, they should all be up (nothing printed)
|
||||||
if grep -q "IPv4.*UDP" <<< "${1}"; then
|
lv4="$(ss --ipv4 --listening --numeric --tcp --udp src :${port})"
|
||||||
|
if grep -q "udp " <<< "${lv4}"; then
|
||||||
echo -e " ${TICK} UDP (IPv4)"
|
echo -e " ${TICK} UDP (IPv4)"
|
||||||
else
|
else
|
||||||
echo -e " ${CROSS} UDP (IPv4)"
|
echo -e " ${CROSS} UDP (IPv4)"
|
||||||
fi
|
fi
|
||||||
if grep -q "IPv4.*TCP" <<< "${1}"; then
|
if grep -q "tcp " <<< "${lv4}"; then
|
||||||
echo -e " ${TICK} TCP (IPv4)"
|
echo -e " ${TICK} TCP (IPv4)"
|
||||||
else
|
else
|
||||||
echo -e " ${CROSS} TCP (IPv4)"
|
echo -e " ${CROSS} TCP (IPv4)"
|
||||||
fi
|
fi
|
||||||
if grep -q "IPv6.*UDP" <<< "${1}"; then
|
lv6="$(ss --ipv6 --listening --numeric --tcp --udp src :${port})"
|
||||||
|
if grep -q "udp " <<< "${lv6}"; then
|
||||||
echo -e " ${TICK} UDP (IPv6)"
|
echo -e " ${TICK} UDP (IPv6)"
|
||||||
else
|
else
|
||||||
echo -e " ${CROSS} UDP (IPv6)"
|
echo -e " ${CROSS} UDP (IPv6)"
|
||||||
fi
|
fi
|
||||||
if grep -q "IPv6.*TCP" <<< "${1}"; then
|
if grep -q "tcp " <<< "${lv6}"; then
|
||||||
echo -e " ${TICK} TCP (IPv6)"
|
echo -e " ${TICK} TCP (IPv6)"
|
||||||
else
|
else
|
||||||
echo -e " ${CROSS} TCP (IPv6)"
|
echo -e " ${CROSS} TCP (IPv6)"
|
||||||
@@ -314,19 +312,31 @@ analyze_ports() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
statusFunc() {
|
statusFunc() {
|
||||||
# Determine if there is a pihole service is listening on port 53
|
# Determine if there is pihole-FTL service is listening
|
||||||
local listening
|
local listening pid port
|
||||||
listening="$(lsof -Pni:53)"
|
|
||||||
if grep -q "pihole" <<< "${listening}"; then
|
pid="$(getFTLPID)"
|
||||||
if [[ "${1}" != "web" ]]; then
|
if [[ "$pid" -eq "-1" ]]; then
|
||||||
analyze_ports "${listening}"
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
case "${1}" in
|
case "${1}" in
|
||||||
"web") echo "-1";;
|
"web") echo "-1";;
|
||||||
*) echo -e " ${CROSS} DNS service is NOT listening";;
|
*) echo -e " ${CROSS} DNS service is NOT running";;
|
||||||
esac
|
esac
|
||||||
return 0
|
return 0
|
||||||
|
else
|
||||||
|
#get the port pihole-FTL is listening on by using FTL's telnet API
|
||||||
|
port="$(echo ">dns-port >quit" | nc 127.0.0.1 4711)"
|
||||||
|
if [[ "${port}" == "0" ]]; then
|
||||||
|
case "${1}" in
|
||||||
|
"web") echo "-1";;
|
||||||
|
*) echo -e " ${CROSS} DNS service is NOT listening";;
|
||||||
|
esac
|
||||||
|
return 0
|
||||||
|
else
|
||||||
|
if [[ "${1}" != "web" ]]; then
|
||||||
|
echo -e " ${TICK} FTL is listening on port ${port}"
|
||||||
|
analyze_ports "${port}"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Determine if Pi-hole's blocking is enabled
|
# Determine if Pi-hole's blocking is enabled
|
||||||
@@ -339,18 +349,19 @@ statusFunc() {
|
|||||||
elif grep -q "BLOCKING_ENABLED=true" /etc/pihole/setupVars.conf; then
|
elif grep -q "BLOCKING_ENABLED=true" /etc/pihole/setupVars.conf; then
|
||||||
# Configs are set
|
# Configs are set
|
||||||
case "${1}" in
|
case "${1}" in
|
||||||
"web") echo 1;;
|
"web") echo "$port";;
|
||||||
*) echo -e " ${TICK} Pi-hole blocking is enabled";;
|
*) echo -e " ${TICK} Pi-hole blocking is enabled";;
|
||||||
esac
|
esac
|
||||||
else
|
else
|
||||||
# No configs were found
|
# No configs were found
|
||||||
case "${1}" in
|
case "${1}" in
|
||||||
"web") echo 99;;
|
"web") echo -2;;
|
||||||
*) echo -e " ${INFO} Pi-hole blocking will be enabled";;
|
*) echo -e " ${INFO} Pi-hole blocking will be enabled";;
|
||||||
esac
|
esac
|
||||||
# Enable blocking
|
# Enable blocking
|
||||||
"${PI_HOLE_BIN_DIR}"/pihole enable
|
"${PI_HOLE_BIN_DIR}"/pihole enable
|
||||||
fi
|
fi
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
tailFunc() {
|
tailFunc() {
|
||||||
@@ -369,7 +380,7 @@ tailFunc() {
|
|||||||
# Color everything else as gray
|
# Color everything else as gray
|
||||||
tail -f /var/log/pihole.log | grep --line-buffered "${1}" | sed -E \
|
tail -f /var/log/pihole.log | grep --line-buffered "${1}" | sed -E \
|
||||||
-e "s,($(date +'%b %d ')| dnsmasq\[[0-9]*\]),,g" \
|
-e "s,($(date +'%b %d ')| dnsmasq\[[0-9]*\]),,g" \
|
||||||
-e "s,(.*(blacklisted |gravity blocked ).* is (0.0.0.0|::|NXDOMAIN).*),${COL_RED}&${COL_NC}," \
|
-e "s,(.*(blacklisted |gravity blocked ).*),${COL_RED}&${COL_NC}," \
|
||||||
-e "s,.*(query\\[A|DHCP).*,${COL_NC}&${COL_NC}," \
|
-e "s,.*(query\\[A|DHCP).*,${COL_NC}&${COL_NC}," \
|
||||||
-e "s,.*,${COL_GRAY}&${COL_NC},"
|
-e "s,.*,${COL_GRAY}&${COL_NC},"
|
||||||
exit 0
|
exit 0
|
||||||
@@ -399,34 +410,24 @@ Branches:
|
|||||||
}
|
}
|
||||||
|
|
||||||
tricorderFunc() {
|
tricorderFunc() {
|
||||||
|
local tricorder_token
|
||||||
if [[ ! -p "/dev/stdin" ]]; then
|
if [[ ! -p "/dev/stdin" ]]; then
|
||||||
echo -e " ${INFO} Please do not call Tricorder directly"
|
echo -e " ${INFO} Please do not call Tricorder directly"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if ! (echo > /dev/tcp/tricorder.pi-hole.net/9998) >/dev/null 2>&1; then
|
tricorder_token=$(curl --silent --fail --show-error --upload-file "-" https://tricorder.pi-hole.net/upload < /dev/stdin 2>&1)
|
||||||
echo -e " ${CROSS} Unable to connect to Pi-hole's Tricorder server"
|
if [[ "${tricorder_token}" != "https://tricorder.pi-hole.net/"* ]]; then
|
||||||
exit 1
|
echo -e "${CROSS} uploading failed, contact Pi-hole support for assistance."
|
||||||
fi
|
# Log curl error (if available)
|
||||||
|
if [ -n "${tricorder_token}" ]; then
|
||||||
if command -v openssl &> /dev/null; then
|
echo -e "${INFO} Error message: ${COL_RED}${tricorder_token}${COL_NC}\\n"
|
||||||
openssl s_client -quiet -connect tricorder.pi-hole.net:9998 2> /dev/null < /dev/stdin
|
tricorder_token=""
|
||||||
exit "$?"
|
fi
|
||||||
else
|
exit 1
|
||||||
echo -e " ${INFO} ${COL_YELLOW}Security Notice${COL_NC}: ${COL_WHITE}openssl${COL_NC} is not installed
|
|
||||||
Your debug log will be transmitted unencrypted via plain-text
|
|
||||||
There is a possibility that this could be intercepted by a third party
|
|
||||||
If you wish to cancel, press Ctrl-C to exit within 10 seconds"
|
|
||||||
secs="10"
|
|
||||||
while [[ "$secs" -gt "0" ]]; do
|
|
||||||
echo -ne "."
|
|
||||||
sleep 1
|
|
||||||
: $((secs--))
|
|
||||||
done
|
|
||||||
echo " "
|
|
||||||
nc tricorder.pi-hole.net 9999 < /dev/stdin
|
|
||||||
exit "$?"
|
|
||||||
fi
|
fi
|
||||||
|
echo "Upload successful, your token is: ${COL_GREEN}${tricorder_token}${COL_NC}"
|
||||||
|
exit 0
|
||||||
}
|
}
|
||||||
|
|
||||||
updateCheckFunc() {
|
updateCheckFunc() {
|
||||||
|
@@ -1,5 +0,0 @@
|
|||||||
Raspbian=9,10
|
|
||||||
Ubuntu=16,18,20
|
|
||||||
Debian=9,10
|
|
||||||
Fedora=32,33
|
|
||||||
CentOS=7,8
|
|
@@ -18,8 +18,8 @@ py.test -vv -n auto -m "build_stage"
|
|||||||
py.test -vv -n auto -m "not build_stage"
|
py.test -vv -n auto -m "not build_stage"
|
||||||
```
|
```
|
||||||
|
|
||||||
The build_stage tests have to run first to create the docker images, followed by the actual tests which utilize said images. Unless you're changing your dockerfiles you shouldn't have to run the build_stage every time - but it's a good idea to rebuild at least once a day in case the base Docker images or packages change.
|
The build_stage tests have to run first to create the docker images, followed by the actual tests which utilize said images. Unless you're changing your dockerfiles you shouldn't have to run the build_stage every time - but it's a good idea to rebuild at least once a day in case the base Docker images or packages change.
|
||||||
|
|
||||||
# How do I debug python?
|
# How do I debug python?
|
||||||
|
|
||||||
Highly recommended: Setup PyCharm on a **Docker enabled** machine. Having a python debugger like PyCharm changes your life if you've never used it :)
|
Highly recommended: Setup PyCharm on a **Docker enabled** machine. Having a python debugger like PyCharm changes your life if you've never used it :)
|
||||||
|
@@ -1,4 +1,5 @@
|
|||||||
FROM centos:7
|
FROM centos:7
|
||||||
|
RUN yum install -y git
|
||||||
|
|
||||||
ENV GITDIR /etc/.pihole
|
ENV GITDIR /etc/.pihole
|
||||||
ENV SCRIPTDIR /opt/pihole
|
ENV SCRIPTDIR /opt/pihole
|
||||||
@@ -12,5 +13,6 @@ RUN true && \
|
|||||||
chmod +x $SCRIPTDIR/*
|
chmod +x $SCRIPTDIR/*
|
||||||
|
|
||||||
ENV PH_TEST true
|
ENV PH_TEST true
|
||||||
|
ENV OS_CHECK_DOMAIN_NAME dev-supportedos.pi-hole.net
|
||||||
|
|
||||||
#sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \
|
#sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \
|
||||||
|
@@ -1,4 +1,5 @@
|
|||||||
FROM centos:8
|
FROM centos:8
|
||||||
|
RUN yum install -y git
|
||||||
|
|
||||||
ENV GITDIR /etc/.pihole
|
ENV GITDIR /etc/.pihole
|
||||||
ENV SCRIPTDIR /opt/pihole
|
ENV SCRIPTDIR /opt/pihole
|
||||||
@@ -12,5 +13,6 @@ RUN true && \
|
|||||||
chmod +x $SCRIPTDIR/*
|
chmod +x $SCRIPTDIR/*
|
||||||
|
|
||||||
ENV PH_TEST true
|
ENV PH_TEST true
|
||||||
|
ENV OS_CHECK_DOMAIN_NAME dev-supportedos.pi-hole.net
|
||||||
|
|
||||||
#sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \
|
#sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \
|
||||||
|
@@ -12,5 +12,6 @@ RUN true && \
|
|||||||
chmod +x $SCRIPTDIR/*
|
chmod +x $SCRIPTDIR/*
|
||||||
|
|
||||||
ENV PH_TEST true
|
ENV PH_TEST true
|
||||||
|
ENV OS_CHECK_DOMAIN_NAME dev-supportedos.pi-hole.net
|
||||||
|
|
||||||
#sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \
|
#sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \
|
||||||
|
@@ -1,4 +1,4 @@
|
|||||||
FROM fedora:32
|
FROM buildpack-deps:bullseye-scm
|
||||||
|
|
||||||
ENV GITDIR /etc/.pihole
|
ENV GITDIR /etc/.pihole
|
||||||
ENV SCRIPTDIR /opt/pihole
|
ENV SCRIPTDIR /opt/pihole
|
||||||
@@ -12,5 +12,6 @@ RUN true && \
|
|||||||
chmod +x $SCRIPTDIR/*
|
chmod +x $SCRIPTDIR/*
|
||||||
|
|
||||||
ENV PH_TEST true
|
ENV PH_TEST true
|
||||||
|
ENV OS_CHECK_DOMAIN_NAME dev-supportedos.pi-hole.net
|
||||||
|
|
||||||
#sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \
|
#sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \
|
@@ -12,5 +12,6 @@ RUN true && \
|
|||||||
chmod +x $SCRIPTDIR/*
|
chmod +x $SCRIPTDIR/*
|
||||||
|
|
||||||
ENV PH_TEST true
|
ENV PH_TEST true
|
||||||
|
ENV OS_CHECK_DOMAIN_NAME dev-supportedos.pi-hole.net
|
||||||
|
|
||||||
#sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \
|
#sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \
|
||||||
|
@@ -1,4 +1,5 @@
|
|||||||
FROM fedora:33
|
FROM fedora:33
|
||||||
|
RUN dnf install -y git
|
||||||
|
|
||||||
ENV GITDIR /etc/.pihole
|
ENV GITDIR /etc/.pihole
|
||||||
ENV SCRIPTDIR /opt/pihole
|
ENV SCRIPTDIR /opt/pihole
|
||||||
@@ -12,5 +13,6 @@ RUN true && \
|
|||||||
chmod +x $SCRIPTDIR/*
|
chmod +x $SCRIPTDIR/*
|
||||||
|
|
||||||
ENV PH_TEST true
|
ENV PH_TEST true
|
||||||
|
ENV OS_CHECK_DOMAIN_NAME dev-supportedos.pi-hole.net
|
||||||
|
|
||||||
#sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \
|
#sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \
|
||||||
|
18
test/_fedora_34.Dockerfile
Normal file
18
test/_fedora_34.Dockerfile
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
FROM fedora:34
|
||||||
|
RUN dnf install -y git
|
||||||
|
|
||||||
|
ENV GITDIR /etc/.pihole
|
||||||
|
ENV SCRIPTDIR /opt/pihole
|
||||||
|
|
||||||
|
RUN mkdir -p $GITDIR $SCRIPTDIR /etc/pihole
|
||||||
|
ADD . $GITDIR
|
||||||
|
RUN cp $GITDIR/advanced/Scripts/*.sh $GITDIR/gravity.sh $GITDIR/pihole $GITDIR/automated\ install/*.sh $SCRIPTDIR/
|
||||||
|
ENV PATH /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:$SCRIPTDIR
|
||||||
|
|
||||||
|
RUN true && \
|
||||||
|
chmod +x $SCRIPTDIR/*
|
||||||
|
|
||||||
|
ENV PH_TEST true
|
||||||
|
ENV OS_CHECK_DOMAIN_NAME dev-supportedos.pi-hole.net
|
||||||
|
|
||||||
|
#sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \
|
@@ -12,5 +12,6 @@ RUN true && \
|
|||||||
chmod +x $SCRIPTDIR/*
|
chmod +x $SCRIPTDIR/*
|
||||||
|
|
||||||
ENV PH_TEST true
|
ENV PH_TEST true
|
||||||
|
ENV OS_CHECK_DOMAIN_NAME dev-supportedos.pi-hole.net
|
||||||
|
|
||||||
#sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \
|
#sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \
|
@@ -12,5 +12,6 @@ RUN true && \
|
|||||||
chmod +x $SCRIPTDIR/*
|
chmod +x $SCRIPTDIR/*
|
||||||
|
|
||||||
ENV PH_TEST true
|
ENV PH_TEST true
|
||||||
|
ENV OS_CHECK_DOMAIN_NAME dev-supportedos.pi-hole.net
|
||||||
|
|
||||||
#sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \
|
#sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \
|
||||||
|
@@ -13,5 +13,6 @@ RUN true && \
|
|||||||
chmod +x $SCRIPTDIR/*
|
chmod +x $SCRIPTDIR/*
|
||||||
|
|
||||||
ENV PH_TEST true
|
ENV PH_TEST true
|
||||||
|
ENV OS_CHECK_DOMAIN_NAME dev-supportedos.pi-hole.net
|
||||||
|
|
||||||
#sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \
|
#sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \
|
||||||
|
18
test/_ubuntu_21.Dockerfile
Normal file
18
test/_ubuntu_21.Dockerfile
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
FROM buildpack-deps:hirsute-scm
|
||||||
|
|
||||||
|
ENV GITDIR /etc/.pihole
|
||||||
|
ENV SCRIPTDIR /opt/pihole
|
||||||
|
|
||||||
|
RUN mkdir -p $GITDIR $SCRIPTDIR /etc/pihole
|
||||||
|
ADD . $GITDIR
|
||||||
|
RUN cp $GITDIR/advanced/Scripts/*.sh $GITDIR/gravity.sh $GITDIR/pihole $GITDIR/automated\ install/*.sh $SCRIPTDIR/
|
||||||
|
ENV PATH /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:$SCRIPTDIR
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
|
RUN true && \
|
||||||
|
chmod +x $SCRIPTDIR/*
|
||||||
|
|
||||||
|
ENV PH_TEST true
|
||||||
|
ENV OS_CHECK_DOMAIN_NAME dev-supportedos.pi-hole.net
|
||||||
|
|
||||||
|
#sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \
|
164
test/conftest.py
164
test/conftest.py
@@ -1,10 +1,9 @@
|
|||||||
import pytest
|
import pytest
|
||||||
import testinfra
|
import testinfra
|
||||||
|
import testinfra.backend.docker
|
||||||
|
import subprocess
|
||||||
from textwrap import dedent
|
from textwrap import dedent
|
||||||
|
|
||||||
check_output = testinfra.get_backend(
|
|
||||||
"local://"
|
|
||||||
).get_module("Command").check_output
|
|
||||||
|
|
||||||
SETUPVARS = {
|
SETUPVARS = {
|
||||||
'PIHOLE_INTERFACE': 'eth99',
|
'PIHOLE_INTERFACE': 'eth99',
|
||||||
@@ -12,85 +11,42 @@ SETUPVARS = {
|
|||||||
'PIHOLE_DNS_2': '4.2.2.2'
|
'PIHOLE_DNS_2': '4.2.2.2'
|
||||||
}
|
}
|
||||||
|
|
||||||
|
IMAGE = 'pytest_pihole:test_container'
|
||||||
|
|
||||||
tick_box = "[\x1b[1;32m\u2713\x1b[0m]"
|
tick_box = "[\x1b[1;32m\u2713\x1b[0m]"
|
||||||
cross_box = "[\x1b[1;31m\u2717\x1b[0m]"
|
cross_box = "[\x1b[1;31m\u2717\x1b[0m]"
|
||||||
info_box = "[i]"
|
info_box = "[i]"
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
# Monkeypatch sh to bash, if they ever support non hard code /bin/sh this can go away
|
||||||
def Pihole(Docker):
|
# https://github.com/pytest-dev/pytest-testinfra/blob/master/testinfra/backend/docker.py
|
||||||
'''
|
def run_bash(self, command, *args, **kwargs):
|
||||||
used to contain some script stubbing, now pretty much an alias.
|
cmd = self.get_command(command, *args)
|
||||||
Also provides bash as the default run function shell
|
if self.user is not None:
|
||||||
'''
|
out = self.run_local(
|
||||||
def run_bash(self, command, *args, **kwargs):
|
"docker exec -u %s %s /bin/bash -c %s", self.user, self.name, cmd
|
||||||
cmd = self.get_command(command, *args)
|
)
|
||||||
if self.user is not None:
|
else:
|
||||||
out = self.run_local(
|
out = self.run_local("docker exec %s /bin/bash -c %s", self.name, cmd)
|
||||||
"docker exec -u %s %s /bin/bash -c %s",
|
out.command = self.encode(cmd)
|
||||||
self.user, self.name, cmd)
|
return out
|
||||||
else:
|
|
||||||
out = self.run_local(
|
|
||||||
"docker exec %s /bin/bash -c %s", self.name, cmd)
|
|
||||||
out.command = self.encode(cmd)
|
|
||||||
return out
|
|
||||||
|
|
||||||
funcType = type(Docker.run)
|
|
||||||
Docker.run = funcType(run_bash, Docker)
|
testinfra.backend.docker.DockerBackend.run = run_bash
|
||||||
return Docker
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def Docker(request, args, image, cmd):
|
def host():
|
||||||
'''
|
# run a container
|
||||||
combine our fixtures into a docker run command and setup finalizer to
|
docker_id = subprocess.check_output(
|
||||||
cleanup
|
['docker', 'run', '-t', '-d', '--cap-add=ALL', IMAGE]).decode().strip()
|
||||||
'''
|
|
||||||
assert 'docker' in check_output('id'), "Are you in the docker group?"
|
|
||||||
docker_run = "docker run {} {} {}".format(args, image, cmd)
|
|
||||||
docker_id = check_output(docker_run)
|
|
||||||
|
|
||||||
def teardown():
|
# return a testinfra connection to the container
|
||||||
check_output("docker rm -f %s", docker_id)
|
docker_host = testinfra.get_host("docker://" + docker_id)
|
||||||
request.addfinalizer(teardown)
|
|
||||||
|
|
||||||
docker_container = testinfra.get_backend("docker://" + docker_id)
|
yield docker_host
|
||||||
docker_container.id = docker_id
|
# at the end of the test suite, destroy the container
|
||||||
return docker_container
|
subprocess.check_call(['docker', 'rm', '-f', docker_id])
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
|
||||||
def args(request):
|
|
||||||
'''
|
|
||||||
-t became required when tput began being used
|
|
||||||
'''
|
|
||||||
return '-t -d'
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(params=[
|
|
||||||
'test_container'
|
|
||||||
])
|
|
||||||
def tag(request):
|
|
||||||
'''
|
|
||||||
consumed by image to make the test matrix
|
|
||||||
'''
|
|
||||||
return request.param
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture()
|
|
||||||
def image(request, tag):
|
|
||||||
'''
|
|
||||||
built by test_000_build_containers.py
|
|
||||||
'''
|
|
||||||
return 'pytest_pihole:{}'.format(tag)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture()
|
|
||||||
def cmd(request):
|
|
||||||
'''
|
|
||||||
default to doing nothing by tailing null, but don't exit
|
|
||||||
'''
|
|
||||||
return 'tail -f /dev/null'
|
|
||||||
|
|
||||||
|
|
||||||
# Helper functions
|
# Helper functions
|
||||||
@@ -100,7 +56,7 @@ def mock_command(script, args, container):
|
|||||||
in unit tests
|
in unit tests
|
||||||
'''
|
'''
|
||||||
full_script_path = '/usr/local/bin/{}'.format(script)
|
full_script_path = '/usr/local/bin/{}'.format(script)
|
||||||
mock_script = dedent('''\
|
mock_script = dedent(r'''\
|
||||||
#!/bin/bash -e
|
#!/bin/bash -e
|
||||||
echo "\$0 \$@" >> /var/log/{script}
|
echo "\$0 \$@" >> /var/log/{script}
|
||||||
case "\$1" in'''.format(script=script))
|
case "\$1" in'''.format(script=script))
|
||||||
@@ -121,13 +77,75 @@ def mock_command(script, args, container):
|
|||||||
scriptlog=script))
|
scriptlog=script))
|
||||||
|
|
||||||
|
|
||||||
|
def mock_command_passthrough(script, args, container):
|
||||||
|
'''
|
||||||
|
Per other mock_command* functions, allows intercepting of commands we don't want to run for real
|
||||||
|
in unit tests, however also allows only specific arguments to be mocked. Anything not defined will
|
||||||
|
be passed through to the actual command.
|
||||||
|
|
||||||
|
Example use-case: mocking `git pull` but still allowing `git clone` to work as intended
|
||||||
|
'''
|
||||||
|
orig_script_path = container.check_output('command -v {}'.format(script))
|
||||||
|
full_script_path = '/usr/local/bin/{}'.format(script)
|
||||||
|
mock_script = dedent(r'''\
|
||||||
|
#!/bin/bash -e
|
||||||
|
echo "\$0 \$@" >> /var/log/{script}
|
||||||
|
case "\$1" in'''.format(script=script))
|
||||||
|
for k, v in args.items():
|
||||||
|
case = dedent('''
|
||||||
|
{arg})
|
||||||
|
echo {res}
|
||||||
|
exit {retcode}
|
||||||
|
;;'''.format(arg=k, res=v[0], retcode=v[1]))
|
||||||
|
mock_script += case
|
||||||
|
mock_script += dedent(r'''
|
||||||
|
*)
|
||||||
|
{orig_script_path} "\$@"
|
||||||
|
;;'''.format(orig_script_path=orig_script_path))
|
||||||
|
mock_script += dedent('''
|
||||||
|
esac''')
|
||||||
|
container.run('''
|
||||||
|
cat <<EOF> {script}\n{content}\nEOF
|
||||||
|
chmod +x {script}
|
||||||
|
rm -f /var/log/{scriptlog}'''.format(script=full_script_path,
|
||||||
|
content=mock_script,
|
||||||
|
scriptlog=script))
|
||||||
|
|
||||||
|
|
||||||
|
def mock_command_run(script, args, container):
|
||||||
|
'''
|
||||||
|
Allows for setup of commands we don't really want to have to run for real
|
||||||
|
in unit tests
|
||||||
|
'''
|
||||||
|
full_script_path = '/usr/local/bin/{}'.format(script)
|
||||||
|
mock_script = dedent(r'''\
|
||||||
|
#!/bin/bash -e
|
||||||
|
echo "\$0 \$@" >> /var/log/{script}
|
||||||
|
case "\$1 \$2" in'''.format(script=script))
|
||||||
|
for k, v in args.items():
|
||||||
|
case = dedent('''
|
||||||
|
\"{arg}\")
|
||||||
|
echo {res}
|
||||||
|
exit {retcode}
|
||||||
|
;;'''.format(arg=k, res=v[0], retcode=v[1]))
|
||||||
|
mock_script += case
|
||||||
|
mock_script += dedent('''
|
||||||
|
esac''')
|
||||||
|
container.run('''
|
||||||
|
cat <<EOF> {script}\n{content}\nEOF
|
||||||
|
chmod +x {script}
|
||||||
|
rm -f /var/log/{scriptlog}'''.format(script=full_script_path,
|
||||||
|
content=mock_script,
|
||||||
|
scriptlog=script))
|
||||||
|
|
||||||
|
|
||||||
def mock_command_2(script, args, container):
|
def mock_command_2(script, args, container):
|
||||||
'''
|
'''
|
||||||
Allows for setup of commands we don't really want to have to run for real
|
Allows for setup of commands we don't really want to have to run for real
|
||||||
in unit tests
|
in unit tests
|
||||||
'''
|
'''
|
||||||
full_script_path = '/usr/local/bin/{}'.format(script)
|
full_script_path = '/usr/local/bin/{}'.format(script)
|
||||||
mock_script = dedent('''\
|
mock_script = dedent(r'''\
|
||||||
#!/bin/bash -e
|
#!/bin/bash -e
|
||||||
echo "\$0 \$@" >> /var/log/{script}
|
echo "\$0 \$@" >> /var/log/{script}
|
||||||
case "\$1 \$2" in'''.format(script=script))
|
case "\$1 \$2" in'''.format(script=script))
|
||||||
|
@@ -1,6 +1,6 @@
|
|||||||
docker-compose==1.23.2
|
docker-compose
|
||||||
pytest==4.3.0
|
pytest
|
||||||
pytest-xdist==1.26.1
|
pytest-xdist
|
||||||
pytest-cov==2.6.1
|
pytest-cov
|
||||||
testinfra==1.19.0
|
pytest-testinfra
|
||||||
tox==3.7.0
|
tox
|
||||||
|
File diff suppressed because it is too large
Load Diff
@@ -5,56 +5,59 @@ from .conftest import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def test_php_upgrade_default_optout_centos_eq_7(Pihole):
|
def test_php_upgrade_default_optout_centos_eq_7(host):
|
||||||
'''
|
'''
|
||||||
confirms the default behavior to opt-out of installing PHP7 from REMI
|
confirms the default behavior to opt-out of installing PHP7 from REMI
|
||||||
'''
|
'''
|
||||||
distro_check = Pihole.run('''
|
package_manager_detect = host.run('''
|
||||||
source /opt/pihole/basic-install.sh
|
source /opt/pihole/basic-install.sh
|
||||||
distro_check
|
package_manager_detect
|
||||||
|
select_rpm_php
|
||||||
''')
|
''')
|
||||||
expected_stdout = info_box + (' User opt-out of PHP 7 upgrade on CentOS. '
|
expected_stdout = info_box + (' User opt-out of PHP 7 upgrade on CentOS. '
|
||||||
'Deprecated PHP may be in use.')
|
'Deprecated PHP may be in use.')
|
||||||
assert expected_stdout in distro_check.stdout
|
assert expected_stdout in package_manager_detect.stdout
|
||||||
remi_package = Pihole.package('remi-release')
|
remi_package = host.package('remi-release')
|
||||||
assert not remi_package.is_installed
|
assert not remi_package.is_installed
|
||||||
|
|
||||||
|
|
||||||
def test_php_upgrade_user_optout_centos_eq_7(Pihole):
|
def test_php_upgrade_user_optout_centos_eq_7(host):
|
||||||
'''
|
'''
|
||||||
confirms installer behavior when user opt-out of installing PHP7 from REMI
|
confirms installer behavior when user opt-out of installing PHP7 from REMI
|
||||||
(php not currently installed)
|
(php not currently installed)
|
||||||
'''
|
'''
|
||||||
# Whiptail dialog returns Cancel for user prompt
|
# Whiptail dialog returns Cancel for user prompt
|
||||||
mock_command('whiptail', {'*': ('', '1')}, Pihole)
|
mock_command('whiptail', {'*': ('', '1')}, host)
|
||||||
distro_check = Pihole.run('''
|
package_manager_detect = host.run('''
|
||||||
source /opt/pihole/basic-install.sh
|
source /opt/pihole/basic-install.sh
|
||||||
distro_check
|
package_manager_detect
|
||||||
|
select_rpm_php
|
||||||
''')
|
''')
|
||||||
expected_stdout = info_box + (' User opt-out of PHP 7 upgrade on CentOS. '
|
expected_stdout = info_box + (' User opt-out of PHP 7 upgrade on CentOS. '
|
||||||
'Deprecated PHP may be in use.')
|
'Deprecated PHP may be in use.')
|
||||||
assert expected_stdout in distro_check.stdout
|
assert expected_stdout in package_manager_detect.stdout
|
||||||
remi_package = Pihole.package('remi-release')
|
remi_package = host.package('remi-release')
|
||||||
assert not remi_package.is_installed
|
assert not remi_package.is_installed
|
||||||
|
|
||||||
|
|
||||||
def test_php_upgrade_user_optin_centos_eq_7(Pihole):
|
def test_php_upgrade_user_optin_centos_eq_7(host):
|
||||||
'''
|
'''
|
||||||
confirms installer behavior when user opt-in to installing PHP7 from REMI
|
confirms installer behavior when user opt-in to installing PHP7 from REMI
|
||||||
(php not currently installed)
|
(php not currently installed)
|
||||||
'''
|
'''
|
||||||
# Whiptail dialog returns Continue for user prompt
|
# Whiptail dialog returns Continue for user prompt
|
||||||
mock_command('whiptail', {'*': ('', '0')}, Pihole)
|
mock_command('whiptail', {'*': ('', '0')}, host)
|
||||||
distro_check = Pihole.run('''
|
package_manager_detect = host.run('''
|
||||||
source /opt/pihole/basic-install.sh
|
source /opt/pihole/basic-install.sh
|
||||||
distro_check
|
package_manager_detect
|
||||||
|
select_rpm_php
|
||||||
''')
|
''')
|
||||||
assert 'opt-out' not in distro_check.stdout
|
assert 'opt-out' not in package_manager_detect.stdout
|
||||||
expected_stdout = info_box + (' Enabling Remi\'s RPM repository '
|
expected_stdout = info_box + (' Enabling Remi\'s RPM repository '
|
||||||
'(https://rpms.remirepo.net)')
|
'(https://rpms.remirepo.net)')
|
||||||
assert expected_stdout in distro_check.stdout
|
assert expected_stdout in package_manager_detect.stdout
|
||||||
expected_stdout = tick_box + (' Remi\'s RPM repository has '
|
expected_stdout = tick_box + (' Remi\'s RPM repository has '
|
||||||
'been enabled for PHP7')
|
'been enabled for PHP7')
|
||||||
assert expected_stdout in distro_check.stdout
|
assert expected_stdout in package_manager_detect.stdout
|
||||||
remi_package = Pihole.package('remi-release')
|
remi_package = host.package('remi-release')
|
||||||
assert remi_package.is_installed
|
assert remi_package.is_installed
|
||||||
|
@@ -5,61 +5,64 @@ from .conftest import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def test_php_upgrade_default_continue_centos_gte_8(Pihole):
|
def test_php_upgrade_default_continue_centos_gte_8(host):
|
||||||
'''
|
'''
|
||||||
confirms the latest version of CentOS continues / does not optout
|
confirms the latest version of CentOS continues / does not optout
|
||||||
(should trigger on CentOS7 only)
|
(should trigger on CentOS7 only)
|
||||||
'''
|
'''
|
||||||
distro_check = Pihole.run('''
|
package_manager_detect = host.run('''
|
||||||
source /opt/pihole/basic-install.sh
|
source /opt/pihole/basic-install.sh
|
||||||
distro_check
|
package_manager_detect
|
||||||
|
select_rpm_php
|
||||||
''')
|
''')
|
||||||
unexpected_stdout = info_box + (' User opt-out of PHP 7 upgrade on CentOS.'
|
unexpected_stdout = info_box + (' User opt-out of PHP 7 upgrade on CentOS.'
|
||||||
' Deprecated PHP may be in use.')
|
' Deprecated PHP may be in use.')
|
||||||
assert unexpected_stdout not in distro_check.stdout
|
assert unexpected_stdout not in package_manager_detect.stdout
|
||||||
# ensure remi was not installed on latest CentOS
|
# ensure remi was not installed on latest CentOS
|
||||||
remi_package = Pihole.package('remi-release')
|
remi_package = host.package('remi-release')
|
||||||
assert not remi_package.is_installed
|
assert not remi_package.is_installed
|
||||||
|
|
||||||
|
|
||||||
def test_php_upgrade_user_optout_skipped_centos_gte_8(Pihole):
|
def test_php_upgrade_user_optout_skipped_centos_gte_8(host):
|
||||||
'''
|
'''
|
||||||
confirms installer skips user opt-out of installing PHP7 from REMI on
|
confirms installer skips user opt-out of installing PHP7 from REMI on
|
||||||
latest CentOS (should trigger on CentOS7 only)
|
latest CentOS (should trigger on CentOS7 only)
|
||||||
(php not currently installed)
|
(php not currently installed)
|
||||||
'''
|
'''
|
||||||
# Whiptail dialog returns Cancel for user prompt
|
# Whiptail dialog returns Cancel for user prompt
|
||||||
mock_command('whiptail', {'*': ('', '1')}, Pihole)
|
mock_command('whiptail', {'*': ('', '1')}, host)
|
||||||
distro_check = Pihole.run('''
|
package_manager_detect = host.run('''
|
||||||
source /opt/pihole/basic-install.sh
|
source /opt/pihole/basic-install.sh
|
||||||
distro_check
|
package_manager_detect
|
||||||
|
select_rpm_php
|
||||||
''')
|
''')
|
||||||
unexpected_stdout = info_box + (' User opt-out of PHP 7 upgrade on CentOS.'
|
unexpected_stdout = info_box + (' User opt-out of PHP 7 upgrade on CentOS.'
|
||||||
' Deprecated PHP may be in use.')
|
' Deprecated PHP may be in use.')
|
||||||
assert unexpected_stdout not in distro_check.stdout
|
assert unexpected_stdout not in package_manager_detect.stdout
|
||||||
# ensure remi was not installed on latest CentOS
|
# ensure remi was not installed on latest CentOS
|
||||||
remi_package = Pihole.package('remi-release')
|
remi_package = host.package('remi-release')
|
||||||
assert not remi_package.is_installed
|
assert not remi_package.is_installed
|
||||||
|
|
||||||
|
|
||||||
def test_php_upgrade_user_optin_skipped_centos_gte_8(Pihole):
|
def test_php_upgrade_user_optin_skipped_centos_gte_8(host):
|
||||||
'''
|
'''
|
||||||
confirms installer skips user opt-in to installing PHP7 from REMI on
|
confirms installer skips user opt-in to installing PHP7 from REMI on
|
||||||
latest CentOS (should trigger on CentOS7 only)
|
latest CentOS (should trigger on CentOS7 only)
|
||||||
(php not currently installed)
|
(php not currently installed)
|
||||||
'''
|
'''
|
||||||
# Whiptail dialog returns Continue for user prompt
|
# Whiptail dialog returns Continue for user prompt
|
||||||
mock_command('whiptail', {'*': ('', '0')}, Pihole)
|
mock_command('whiptail', {'*': ('', '0')}, host)
|
||||||
distro_check = Pihole.run('''
|
package_manager_detect = host.run('''
|
||||||
source /opt/pihole/basic-install.sh
|
source /opt/pihole/basic-install.sh
|
||||||
distro_check
|
package_manager_detect
|
||||||
|
select_rpm_php
|
||||||
''')
|
''')
|
||||||
assert 'opt-out' not in distro_check.stdout
|
assert 'opt-out' not in package_manager_detect.stdout
|
||||||
unexpected_stdout = info_box + (' Enabling Remi\'s RPM repository '
|
unexpected_stdout = info_box + (' Enabling Remi\'s RPM repository '
|
||||||
'(https://rpms.remirepo.net)')
|
'(https://rpms.remirepo.net)')
|
||||||
assert unexpected_stdout not in distro_check.stdout
|
assert unexpected_stdout not in package_manager_detect.stdout
|
||||||
unexpected_stdout = tick_box + (' Remi\'s RPM repository has '
|
unexpected_stdout = tick_box + (' Remi\'s RPM repository has '
|
||||||
'been enabled for PHP7')
|
'been enabled for PHP7')
|
||||||
assert unexpected_stdout not in distro_check.stdout
|
assert unexpected_stdout not in package_manager_detect.stdout
|
||||||
remi_package = Pihole.package('remi-release')
|
remi_package = host.package('remi-release')
|
||||||
assert not remi_package.is_installed
|
assert not remi_package.is_installed
|
||||||
|
@@ -7,114 +7,119 @@ from .conftest import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def test_release_supported_version_check_centos(Pihole):
|
def test_release_supported_version_check_centos(host):
|
||||||
'''
|
'''
|
||||||
confirms installer exits on unsupported releases of CentOS
|
confirms installer exits on unsupported releases of CentOS
|
||||||
'''
|
'''
|
||||||
# modify /etc/redhat-release to mock an unsupported CentOS release
|
# modify /etc/redhat-release to mock an unsupported CentOS release
|
||||||
Pihole.run('echo "CentOS Linux release 6.9" > /etc/redhat-release')
|
host.run('echo "CentOS Linux release 6.9" > /etc/redhat-release')
|
||||||
distro_check = Pihole.run('''
|
package_manager_detect = host.run('''
|
||||||
source /opt/pihole/basic-install.sh
|
source /opt/pihole/basic-install.sh
|
||||||
distro_check
|
package_manager_detect
|
||||||
|
select_rpm_php
|
||||||
''')
|
''')
|
||||||
expected_stdout = cross_box + (' CentOS 6 is not supported.')
|
expected_stdout = cross_box + (' CentOS 6 is not supported.')
|
||||||
assert expected_stdout in distro_check.stdout
|
assert expected_stdout in package_manager_detect.stdout
|
||||||
expected_stdout = 'Please update to CentOS release 7 or later'
|
expected_stdout = 'Please update to CentOS release 7 or later'
|
||||||
assert expected_stdout in distro_check.stdout
|
assert expected_stdout in package_manager_detect.stdout
|
||||||
|
|
||||||
|
|
||||||
def test_enable_epel_repository_centos(Pihole):
|
def test_enable_epel_repository_centos(host):
|
||||||
'''
|
'''
|
||||||
confirms the EPEL package repository is enabled when installed on CentOS
|
confirms the EPEL package repository is enabled when installed on CentOS
|
||||||
'''
|
'''
|
||||||
distro_check = Pihole.run('''
|
package_manager_detect = host.run('''
|
||||||
source /opt/pihole/basic-install.sh
|
source /opt/pihole/basic-install.sh
|
||||||
distro_check
|
package_manager_detect
|
||||||
|
select_rpm_php
|
||||||
''')
|
''')
|
||||||
expected_stdout = info_box + (' Enabling EPEL package repository '
|
expected_stdout = info_box + (' Enabling EPEL package repository '
|
||||||
'(https://fedoraproject.org/wiki/EPEL)')
|
'(https://fedoraproject.org/wiki/EPEL)')
|
||||||
assert expected_stdout in distro_check.stdout
|
assert expected_stdout in package_manager_detect.stdout
|
||||||
expected_stdout = tick_box + ' Installed epel-release'
|
expected_stdout = tick_box + ' Installed epel-release'
|
||||||
assert expected_stdout in distro_check.stdout
|
assert expected_stdout in package_manager_detect.stdout
|
||||||
epel_package = Pihole.package('epel-release')
|
epel_package = host.package('epel-release')
|
||||||
assert epel_package.is_installed
|
assert epel_package.is_installed
|
||||||
|
|
||||||
|
|
||||||
def test_php_version_lt_7_detected_upgrade_default_optout_centos(Pihole):
|
def test_php_version_lt_7_detected_upgrade_default_optout_centos(host):
|
||||||
'''
|
'''
|
||||||
confirms the default behavior to opt-out of upgrading to PHP7 from REMI
|
confirms the default behavior to opt-out of upgrading to PHP7 from REMI
|
||||||
'''
|
'''
|
||||||
# first we will install the default php version to test installer behavior
|
# first we will install the default php version to test installer behavior
|
||||||
php_install = Pihole.run('yum install -y php')
|
php_install = host.run('yum install -y php')
|
||||||
assert php_install.rc == 0
|
assert php_install.rc == 0
|
||||||
php_package = Pihole.package('php')
|
php_package = host.package('php')
|
||||||
default_centos_php_version = php_package.version.split('.')[0]
|
default_centos_php_version = php_package.version.split('.')[0]
|
||||||
if int(default_centos_php_version) >= 7: # PHP7 is supported/recommended
|
if int(default_centos_php_version) >= 7: # PHP7 is supported/recommended
|
||||||
pytest.skip("Test deprecated . Detected default PHP version >= 7")
|
pytest.skip("Test deprecated . Detected default PHP version >= 7")
|
||||||
distro_check = Pihole.run('''
|
package_manager_detect = host.run('''
|
||||||
source /opt/pihole/basic-install.sh
|
source /opt/pihole/basic-install.sh
|
||||||
distro_check
|
package_manager_detect
|
||||||
|
select_rpm_php
|
||||||
''')
|
''')
|
||||||
expected_stdout = info_box + (' User opt-out of PHP 7 upgrade on CentOS. '
|
expected_stdout = info_box + (' User opt-out of PHP 7 upgrade on CentOS. '
|
||||||
'Deprecated PHP may be in use.')
|
'Deprecated PHP may be in use.')
|
||||||
assert expected_stdout in distro_check.stdout
|
assert expected_stdout in package_manager_detect.stdout
|
||||||
remi_package = Pihole.package('remi-release')
|
remi_package = host.package('remi-release')
|
||||||
assert not remi_package.is_installed
|
assert not remi_package.is_installed
|
||||||
|
|
||||||
|
|
||||||
def test_php_version_lt_7_detected_upgrade_user_optout_centos(Pihole):
|
def test_php_version_lt_7_detected_upgrade_user_optout_centos(host):
|
||||||
'''
|
'''
|
||||||
confirms installer behavior when user opt-out to upgrade to PHP7 via REMI
|
confirms installer behavior when user opt-out to upgrade to PHP7 via REMI
|
||||||
'''
|
'''
|
||||||
# first we will install the default php version to test installer behavior
|
# first we will install the default php version to test installer behavior
|
||||||
php_install = Pihole.run('yum install -y php')
|
php_install = host.run('yum install -y php')
|
||||||
assert php_install.rc == 0
|
assert php_install.rc == 0
|
||||||
php_package = Pihole.package('php')
|
php_package = host.package('php')
|
||||||
default_centos_php_version = php_package.version.split('.')[0]
|
default_centos_php_version = php_package.version.split('.')[0]
|
||||||
if int(default_centos_php_version) >= 7: # PHP7 is supported/recommended
|
if int(default_centos_php_version) >= 7: # PHP7 is supported/recommended
|
||||||
pytest.skip("Test deprecated . Detected default PHP version >= 7")
|
pytest.skip("Test deprecated . Detected default PHP version >= 7")
|
||||||
# Whiptail dialog returns Cancel for user prompt
|
# Whiptail dialog returns Cancel for user prompt
|
||||||
mock_command('whiptail', {'*': ('', '1')}, Pihole)
|
mock_command('whiptail', {'*': ('', '1')}, host)
|
||||||
distro_check = Pihole.run('''
|
package_manager_detect = host.run('''
|
||||||
source /opt/pihole/basic-install.sh
|
source /opt/pihole/basic-install.sh
|
||||||
distro_check
|
package_manager_detect
|
||||||
|
select_rpm_php
|
||||||
''')
|
''')
|
||||||
expected_stdout = info_box + (' User opt-out of PHP 7 upgrade on CentOS. '
|
expected_stdout = info_box + (' User opt-out of PHP 7 upgrade on CentOS. '
|
||||||
'Deprecated PHP may be in use.')
|
'Deprecated PHP may be in use.')
|
||||||
assert expected_stdout in distro_check.stdout
|
assert expected_stdout in package_manager_detect.stdout
|
||||||
remi_package = Pihole.package('remi-release')
|
remi_package = host.package('remi-release')
|
||||||
assert not remi_package.is_installed
|
assert not remi_package.is_installed
|
||||||
|
|
||||||
|
|
||||||
def test_php_version_lt_7_detected_upgrade_user_optin_centos(Pihole):
|
def test_php_version_lt_7_detected_upgrade_user_optin_centos(host):
|
||||||
'''
|
'''
|
||||||
confirms installer behavior when user opt-in to upgrade to PHP7 via REMI
|
confirms installer behavior when user opt-in to upgrade to PHP7 via REMI
|
||||||
'''
|
'''
|
||||||
# first we will install the default php version to test installer behavior
|
# first we will install the default php version to test installer behavior
|
||||||
php_install = Pihole.run('yum install -y php')
|
php_install = host.run('yum install -y php')
|
||||||
assert php_install.rc == 0
|
assert php_install.rc == 0
|
||||||
php_package = Pihole.package('php')
|
php_package = host.package('php')
|
||||||
default_centos_php_version = php_package.version.split('.')[0]
|
default_centos_php_version = php_package.version.split('.')[0]
|
||||||
if int(default_centos_php_version) >= 7: # PHP7 is supported/recommended
|
if int(default_centos_php_version) >= 7: # PHP7 is supported/recommended
|
||||||
pytest.skip("Test deprecated . Detected default PHP version >= 7")
|
pytest.skip("Test deprecated . Detected default PHP version >= 7")
|
||||||
# Whiptail dialog returns Continue for user prompt
|
# Whiptail dialog returns Continue for user prompt
|
||||||
mock_command('whiptail', {'*': ('', '0')}, Pihole)
|
mock_command('whiptail', {'*': ('', '0')}, host)
|
||||||
distro_check = Pihole.run('''
|
package_manager_detect = host.run('''
|
||||||
source /opt/pihole/basic-install.sh
|
source /opt/pihole/basic-install.sh
|
||||||
distro_check
|
package_manager_detect
|
||||||
|
select_rpm_php
|
||||||
install_dependent_packages PIHOLE_WEB_DEPS[@]
|
install_dependent_packages PIHOLE_WEB_DEPS[@]
|
||||||
''')
|
''')
|
||||||
expected_stdout = info_box + (' User opt-out of PHP 7 upgrade on CentOS. '
|
expected_stdout = info_box + (' User opt-out of PHP 7 upgrade on CentOS. '
|
||||||
'Deprecated PHP may be in use.')
|
'Deprecated PHP may be in use.')
|
||||||
assert expected_stdout not in distro_check.stdout
|
assert expected_stdout not in package_manager_detect.stdout
|
||||||
expected_stdout = info_box + (' Enabling Remi\'s RPM repository '
|
expected_stdout = info_box + (' Enabling Remi\'s RPM repository '
|
||||||
'(https://rpms.remirepo.net)')
|
'(https://rpms.remirepo.net)')
|
||||||
assert expected_stdout in distro_check.stdout
|
assert expected_stdout in package_manager_detect.stdout
|
||||||
expected_stdout = tick_box + (' Remi\'s RPM repository has '
|
expected_stdout = tick_box + (' Remi\'s RPM repository has '
|
||||||
'been enabled for PHP7')
|
'been enabled for PHP7')
|
||||||
assert expected_stdout in distro_check.stdout
|
assert expected_stdout in package_manager_detect.stdout
|
||||||
remi_package = Pihole.package('remi-release')
|
remi_package = host.package('remi-release')
|
||||||
assert remi_package.is_installed
|
assert remi_package.is_installed
|
||||||
updated_php_package = Pihole.package('php')
|
updated_php_package = host.package('php')
|
||||||
updated_php_version = updated_php_package.version.split('.')[0]
|
updated_php_version = updated_php_package.version.split('.')[0]
|
||||||
assert int(updated_php_version) == 7
|
assert int(updated_php_version) == 7
|
||||||
|
@@ -5,7 +5,7 @@ from .conftest import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def mock_selinux_config(state, Pihole):
|
def mock_selinux_config(state, host):
|
||||||
'''
|
'''
|
||||||
Creates a mock SELinux config file with expected content
|
Creates a mock SELinux config file with expected content
|
||||||
'''
|
'''
|
||||||
@@ -13,20 +13,20 @@ def mock_selinux_config(state, Pihole):
|
|||||||
valid_states = ['enforcing', 'permissive', 'disabled']
|
valid_states = ['enforcing', 'permissive', 'disabled']
|
||||||
assert state in valid_states
|
assert state in valid_states
|
||||||
# getenforce returns the running state of SELinux
|
# getenforce returns the running state of SELinux
|
||||||
mock_command('getenforce', {'*': (state.capitalize(), '0')}, Pihole)
|
mock_command('getenforce', {'*': (state.capitalize(), '0')}, host)
|
||||||
# create mock configuration with desired content
|
# create mock configuration with desired content
|
||||||
Pihole.run('''
|
host.run('''
|
||||||
mkdir /etc/selinux
|
mkdir /etc/selinux
|
||||||
echo "SELINUX={state}" > /etc/selinux/config
|
echo "SELINUX={state}" > /etc/selinux/config
|
||||||
'''.format(state=state.lower()))
|
'''.format(state=state.lower()))
|
||||||
|
|
||||||
|
|
||||||
def test_selinux_enforcing_exit(Pihole):
|
def test_selinux_enforcing_exit(host):
|
||||||
'''
|
'''
|
||||||
confirms installer prompts to exit when SELinux is Enforcing by default
|
confirms installer prompts to exit when SELinux is Enforcing by default
|
||||||
'''
|
'''
|
||||||
mock_selinux_config("enforcing", Pihole)
|
mock_selinux_config("enforcing", host)
|
||||||
check_selinux = Pihole.run('''
|
check_selinux = host.run('''
|
||||||
source /opt/pihole/basic-install.sh
|
source /opt/pihole/basic-install.sh
|
||||||
checkSelinux
|
checkSelinux
|
||||||
''')
|
''')
|
||||||
@@ -37,12 +37,12 @@ def test_selinux_enforcing_exit(Pihole):
|
|||||||
assert check_selinux.rc == 1
|
assert check_selinux.rc == 1
|
||||||
|
|
||||||
|
|
||||||
def test_selinux_permissive(Pihole):
|
def test_selinux_permissive(host):
|
||||||
'''
|
'''
|
||||||
confirms installer continues when SELinux is Permissive
|
confirms installer continues when SELinux is Permissive
|
||||||
'''
|
'''
|
||||||
mock_selinux_config("permissive", Pihole)
|
mock_selinux_config("permissive", host)
|
||||||
check_selinux = Pihole.run('''
|
check_selinux = host.run('''
|
||||||
source /opt/pihole/basic-install.sh
|
source /opt/pihole/basic-install.sh
|
||||||
checkSelinux
|
checkSelinux
|
||||||
''')
|
''')
|
||||||
@@ -51,12 +51,12 @@ def test_selinux_permissive(Pihole):
|
|||||||
assert check_selinux.rc == 0
|
assert check_selinux.rc == 0
|
||||||
|
|
||||||
|
|
||||||
def test_selinux_disabled(Pihole):
|
def test_selinux_disabled(host):
|
||||||
'''
|
'''
|
||||||
confirms installer continues when SELinux is Disabled
|
confirms installer continues when SELinux is Disabled
|
||||||
'''
|
'''
|
||||||
mock_selinux_config("disabled", Pihole)
|
mock_selinux_config("disabled", host)
|
||||||
check_selinux = Pihole.run('''
|
check_selinux = host.run('''
|
||||||
source /opt/pihole/basic-install.sh
|
source /opt/pihole/basic-install.sh
|
||||||
checkSelinux
|
checkSelinux
|
||||||
''')
|
''')
|
||||||
|
@@ -1,15 +1,16 @@
|
|||||||
def test_epel_and_remi_not_installed_fedora(Pihole):
|
def test_epel_and_remi_not_installed_fedora(host):
|
||||||
'''
|
'''
|
||||||
confirms installer does not attempt to install EPEL/REMI repositories
|
confirms installer does not attempt to install EPEL/REMI repositories
|
||||||
on Fedora
|
on Fedora
|
||||||
'''
|
'''
|
||||||
distro_check = Pihole.run('''
|
package_manager_detect = host.run('''
|
||||||
source /opt/pihole/basic-install.sh
|
source /opt/pihole/basic-install.sh
|
||||||
distro_check
|
package_manager_detect
|
||||||
|
select_rpm_php
|
||||||
''')
|
''')
|
||||||
assert distro_check.stdout == ''
|
assert package_manager_detect.stdout == ''
|
||||||
|
|
||||||
epel_package = Pihole.package('epel-release')
|
epel_package = host.package('epel-release')
|
||||||
assert not epel_package.is_installed
|
assert not epel_package.is_installed
|
||||||
remi_package = Pihole.package('remi-release')
|
remi_package = host.package('remi-release')
|
||||||
assert not remi_package.is_installed
|
assert not remi_package.is_installed
|
||||||
|
@@ -1,5 +1,5 @@
|
|||||||
[tox]
|
[tox]
|
||||||
envlist = py37
|
envlist = py38
|
||||||
|
|
||||||
[testenv]
|
[testenv]
|
||||||
whitelist_externals = docker
|
whitelist_externals = docker
|
||||||
|
@@ -1,5 +1,5 @@
|
|||||||
[tox]
|
[tox]
|
||||||
envlist = py37
|
envlist = py38
|
||||||
|
|
||||||
[testenv]
|
[testenv]
|
||||||
whitelist_externals = docker
|
whitelist_externals = docker
|
||||||
|
@@ -1,5 +1,5 @@
|
|||||||
[tox]
|
[tox]
|
||||||
envlist = py37
|
envlist = py38
|
||||||
|
|
||||||
[testenv]
|
[testenv]
|
||||||
whitelist_externals = docker
|
whitelist_externals = docker
|
||||||
|
8
test/tox.debian_11.ini
Normal file
8
test/tox.debian_11.ini
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
[tox]
|
||||||
|
envlist = py38
|
||||||
|
|
||||||
|
[testenv]
|
||||||
|
whitelist_externals = docker
|
||||||
|
deps = -rrequirements.txt
|
||||||
|
commands = docker build -f _debian_11.Dockerfile -t pytest_pihole:test_container ../
|
||||||
|
pytest {posargs:-vv -n auto} ./test_automated_install.py
|
@@ -1,5 +1,5 @@
|
|||||||
[tox]
|
[tox]
|
||||||
envlist = py37
|
envlist = py38
|
||||||
|
|
||||||
[testenv]
|
[testenv]
|
||||||
whitelist_externals = docker
|
whitelist_externals = docker
|
||||||
|
@@ -1,5 +1,5 @@
|
|||||||
[tox]
|
[tox]
|
||||||
envlist = py37
|
envlist = py38
|
||||||
|
|
||||||
[testenv]
|
[testenv]
|
||||||
whitelist_externals = docker
|
whitelist_externals = docker
|
||||||
|
@@ -1,8 +1,8 @@
|
|||||||
[tox]
|
[tox]
|
||||||
envlist = py37
|
envlist = py38
|
||||||
|
|
||||||
[testenv]
|
[testenv]
|
||||||
whitelist_externals = docker
|
whitelist_externals = docker
|
||||||
deps = -rrequirements.txt
|
deps = -rrequirements.txt
|
||||||
commands = docker build -f _fedora_32.Dockerfile -t pytest_pihole:test_container ../
|
commands = docker build -f _fedora_34.Dockerfile -t pytest_pihole:test_container ../
|
||||||
pytest {posargs:-vv -n auto} ./test_automated_install.py ./test_centos_fedora_common_support.py ./test_fedora_support.py
|
pytest {posargs:-vv -n auto} ./test_automated_install.py ./test_centos_fedora_common_support.py ./test_fedora_support.py
|
@@ -1,5 +1,5 @@
|
|||||||
[tox]
|
[tox]
|
||||||
envlist = py37
|
envlist = py38
|
||||||
|
|
||||||
[testenv]
|
[testenv]
|
||||||
whitelist_externals = docker
|
whitelist_externals = docker
|
||||||
|
@@ -1,5 +1,5 @@
|
|||||||
[tox]
|
[tox]
|
||||||
envlist = py37
|
envlist = py38
|
||||||
|
|
||||||
[testenv]
|
[testenv]
|
||||||
whitelist_externals = docker
|
whitelist_externals = docker
|
||||||
|
@@ -1,5 +1,5 @@
|
|||||||
[tox]
|
[tox]
|
||||||
envlist = py37
|
envlist = py38
|
||||||
|
|
||||||
[testenv]
|
[testenv]
|
||||||
whitelist_externals = docker
|
whitelist_externals = docker
|
||||||
|
8
test/tox.ubuntu_21.ini
Normal file
8
test/tox.ubuntu_21.ini
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
[tox]
|
||||||
|
envlist = py38
|
||||||
|
|
||||||
|
[testenv]
|
||||||
|
whitelist_externals = docker
|
||||||
|
deps = -rrequirements.txt
|
||||||
|
commands = docker build -f _ubuntu_21.Dockerfile -t pytest_pihole:test_container ../
|
||||||
|
pytest {posargs:-vv -n auto} ./test_automated_install.py
|
Reference in New Issue
Block a user