Compare commits
15 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
bf4b045aa0 | ||
|
89e84772f0 | ||
|
54e2ec7e8a | ||
|
09159f4fb5 | ||
|
f834f328c1 | ||
|
afa5b3af48 | ||
|
f7657a39a1 | ||
|
bb91f2a1bc | ||
|
759d3edd4e | ||
|
8d3e77803c | ||
|
aa3f5d04ee | ||
|
7bbc9b23e1 | ||
|
eb956f8a36 | ||
|
8de7d5deea | ||
|
99330ac416 |
32
.github/workflows/container.yaml
vendored
Normal file
32
.github/workflows/container.yaml
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
---
|
||||
name: Container Build
|
||||
|
||||
on:
|
||||
push:
|
||||
tags: ['[0-9]+.[0-9]+.[0-9]+']
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: build
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: docker/setup-qemu-action@v1
|
||||
- uses: docker/setup-buildx-action@v1
|
||||
- uses: docker/login-action@v1
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- id: version
|
||||
run: echo ::set-output name=VERSION::${GITHUB_REF/refs\/tags\//}
|
||||
- uses: docker/build-push-action@v2
|
||||
with:
|
||||
platforms: linux/amd64, linux/arm64
|
||||
push: true
|
||||
tags: |-
|
||||
ghcr.io/${{ github.repository }}:latest
|
||||
ghcr.io/${{ github.repository }}:${{ steps.version.outputs.VERSION }}
|
27
CHANGELOG.md
27
CHANGELOG.md
@@ -1,3 +1,30 @@
|
||||
# 0.6.0
|
||||
* New image ghcr.io/storax/kubedoom:0.6.0
|
||||
* Latest image available as ghcr.io/storax/kubedoom:latest.
|
||||
* Add support for building on different architectures.
|
||||
* Update kubernetes to 1.23.2
|
||||
* Update to Ubuntu 21.10
|
||||
* Github Actions for building the image.
|
||||
* VNC password can be configured during build via the `VNCPASSWORD` build argument.
|
||||
|
||||
# 0.5.0
|
||||
|
||||
* New image storaxdev/kubedoom:1.0.0
|
||||
* New default VNC password is `idbehold`.
|
||||
* Update kubernetes to 1.19.1
|
||||
* Update to Ubuntu 20.10
|
||||
|
||||
# 0.4.0
|
||||
|
||||
* New image storadev/kubedoom:0.4.0
|
||||
* New `-mode` flag to switch between killing pods or namespaces.
|
||||
* Update kubernetes to 1.18.2
|
||||
|
||||
# 0.3.0
|
||||
|
||||
* New image storadev/kubedoom:0.3.0
|
||||
* Update kubernetes to 1.18.1
|
||||
|
||||
# 0.2.0
|
||||
|
||||
* New image storadev/kubedoom:0.2.0
|
||||
|
64
Dockerfile
64
Dockerfile
@@ -1,56 +1,58 @@
|
||||
FROM golang:1.13-alpine AS gobuild
|
||||
|
||||
FROM golang:1.17-alpine AS build-kubedoom
|
||||
WORKDIR /go/src/kubedoom
|
||||
ADD go.mod .
|
||||
ADD kubedoom.go .
|
||||
RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o kubedoom .
|
||||
|
||||
FROM ubuntu:19.10 AS ubuntu
|
||||
# make sure the package repository is up to date
|
||||
RUN apt-get update
|
||||
|
||||
FROM ubuntu AS ubuntu-deps
|
||||
# Install dependencies
|
||||
RUN apt-get install -y \
|
||||
FROM ubuntu:21.10 AS build-essentials
|
||||
ARG TARGETARCH
|
||||
ARG KUBECTL_VERSION=1.23.2
|
||||
RUN apt-get update && apt-get install -y \
|
||||
-o APT::Install-Suggests=0 \
|
||||
--no-install-recommends \
|
||||
wget ca-certificates
|
||||
RUN wget http://distro.ibiblio.org/pub/linux/distributions/slitaz/sources/packages/d/doom1.wad
|
||||
RUN wget -O /usr/bin/kubectl https://storage.googleapis.com/kubernetes-release/release/$(wget -O- https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl \
|
||||
RUN echo "TARGETARCH is $TARGETARCH"
|
||||
RUN echo "KUBECTL_VERSION is $KUBECTL_VERSION"
|
||||
RUN wget -O /usr/bin/kubectl "https://storage.googleapis.com/kubernetes-release/release/v${KUBECTL_VERSION}/bin/linux/${TARGETARCH}/kubectl" \
|
||||
&& chmod +x /usr/bin/kubectl
|
||||
|
||||
FROM ubuntu AS ubuntu-build
|
||||
|
||||
FROM ubuntu:21.10 AS build-doom
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
RUN apt-get install -y \
|
||||
RUN apt-get update && apt-get install -y \
|
||||
-o APT::Install-Suggests=0 \
|
||||
--no-install-recommends \
|
||||
build-essential \
|
||||
libsdl-mixer1.2-dev \
|
||||
libsdl-net1.2-dev \
|
||||
gcc
|
||||
|
||||
# Setup doom
|
||||
ADD /dockerdoom /dockerdoom
|
||||
RUN cd /dockerdoom/trunk && ./configure && make && make install
|
||||
WORKDIR /dockerdoom/trunk
|
||||
RUN ./configure && make && make install
|
||||
|
||||
FROM ubuntu
|
||||
RUN apt-get install -y \
|
||||
FROM ubuntu:21.10 as build-converge
|
||||
WORKDIR /build
|
||||
RUN mkdir -p \
|
||||
/build/root \
|
||||
/build/usr/bin \
|
||||
/build/usr/local/games
|
||||
COPY --from=build-essentials /doom1.wad /build/root
|
||||
COPY --from=build-essentials /usr/bin/kubectl /build/usr/bin
|
||||
COPY --from=build-kubedoom /go/src/kubedoom/kubedoom /build/usr/bin
|
||||
COPY --from=build-doom /usr/local/games/psdoom /build/usr/local/games
|
||||
|
||||
FROM ubuntu:21.10
|
||||
ARG VNCPASSWORD=idbehold
|
||||
RUN apt-get update && apt-get install -y \
|
||||
-o APT::Install-Suggests=0 \
|
||||
--no-install-recommends \
|
||||
libsdl-mixer1.2 \
|
||||
libsdl-net1.2 \
|
||||
x11vnc \
|
||||
xvfb \
|
||||
netcat-openbsd
|
||||
|
||||
WORKDIR /root/
|
||||
|
||||
# Setup a password
|
||||
RUN mkdir ~/.vnc && x11vnc -storepasswd 1234 ~/.vnc/passwd
|
||||
|
||||
COPY --from=ubuntu-deps /doom1.wad .
|
||||
COPY --from=ubuntu-deps /usr/bin/kubectl /usr/bin/
|
||||
COPY --from=ubuntu-build /usr/local/games/psdoom /usr/local/games/
|
||||
COPY --from=gobuild /go/src/kubedoom/kubedoom .
|
||||
|
||||
ENTRYPOINT ["/root/kubedoom"]
|
||||
netcat-openbsd \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
RUN mkdir /root/.vnc && x11vnc -storepasswd "${VNCPASSWORD}" /root/.vnc/passwd
|
||||
COPY --from=build-converge /build /
|
||||
WORKDIR /root
|
||||
ENTRYPOINT ["/usr/bin/kubedoom"]
|
||||
|
75
README.md
75
README.md
@@ -11,20 +11,43 @@ which was forked from psdoom.
|
||||
|
||||

|
||||
|
||||
## Usage
|
||||
## Running Locally
|
||||
|
||||
Run `storaxdev/kubedoom:0.2.0` locally:
|
||||
In order to run locally you will need to
|
||||
|
||||
1. Run the kubedoom container
|
||||
2. Attach a VNC client to the appropriate port (5901)
|
||||
|
||||
### With Docker
|
||||
|
||||
Run `ghcr.io/storax/kubedoom:latest` with docker locally:
|
||||
|
||||
```console
|
||||
$ docker run -p5900:5900 \
|
||||
$ docker run -p5901:5900 \
|
||||
--net=host \
|
||||
-v ~/.kube:/root/.kube \
|
||||
--rm -it --name kubedoom \
|
||||
storaxdev/kubedoom:0.2.0
|
||||
ghcr.io/storax/kubedoom:latest
|
||||
```
|
||||
|
||||
Now start a VNC viewer and connect to `localhost:5900`. The password is `1234`:
|
||||
Optionally, if you set `-e NAMESPACE={your namespace}` you can limit Kubedoom to deleting pods in a single namespace
|
||||
|
||||
### With Podman
|
||||
|
||||
Run `ghcr.io/storax/kubedoom:latest` with podman locally:
|
||||
|
||||
```console
|
||||
$ vncviewer viewer localhost
|
||||
$ podman run -it -p5901:5900/tcp \
|
||||
-v ~/.kube:/tmp/.kube --security-opt label=disable \
|
||||
--env "KUBECONFIG=/tmp/.kube/config" --name kubedoom
|
||||
ghcr.io/storax/kubedoom:latest
|
||||
```
|
||||
|
||||
### Attaching a VNC Client
|
||||
|
||||
Now start a VNC viewer and connect to `localhost:5901`. The password is `idbehold`:
|
||||
```console
|
||||
$ vncviewer viewer localhost:5901
|
||||
```
|
||||
You should now see DOOM! Now if you want to get the job done quickly enter the
|
||||
cheat `idspispopd` and walk through the wall on your right. You should be
|
||||
@@ -32,6 +55,21 @@ greeted by your pods as little pink monsters. Press `CTRL` to fire. If the
|
||||
pistol is not your thing, cheat with `idkfa` and press `5` for a nice surprise.
|
||||
Pause the game with `ESC`.
|
||||
|
||||
### Killing namespaces
|
||||
|
||||
Kubedoom now also supports killing namespaces [in case you have too many of
|
||||
them](https://github.com/storax/kubedoom/issues/5). Simply set the `-mode` flag
|
||||
to `namespaces`:
|
||||
|
||||
```console
|
||||
$ docker run -p5901:5900 \
|
||||
--net=host \
|
||||
-v ~/.kube:/root/.kube \
|
||||
--rm -it --name kubedoom \
|
||||
ghcr.io/storax/kubedoom:latest \
|
||||
-mode namespaces
|
||||
```
|
||||
|
||||
### Running Kubedoom inside Kubernetes
|
||||
|
||||
See the example in the `/manifest` directory. You can quickly test it using
|
||||
@@ -41,7 +79,7 @@ example config from this repository:
|
||||
```console
|
||||
$ kind create cluster --config kind-config.yaml
|
||||
Creating cluster "kind" ...
|
||||
✓ Ensuring node image (kindest/node:v1.17.0) 🖼
|
||||
✓ Ensuring node image (kindest/node:v1.23.0) 🖼
|
||||
✓ Preparing nodes 📦 📦
|
||||
✓ Writing configuration 📜
|
||||
✓ Starting control-plane 🕹️
|
||||
@@ -61,12 +99,29 @@ the worker node. Then run kubedoom inside the cluster by applying the manifest
|
||||
provided in this repository:
|
||||
|
||||
```console
|
||||
$ export KUBECONFIG="$(kind get kubeconfig-path --name="kind")"
|
||||
$ kubectl apply -f manifest/
|
||||
$ kubectl apply -k manifest/
|
||||
namespace/kubedoom created
|
||||
deployment.apps/kubedoom created
|
||||
serviceaccount/kubedoom created
|
||||
clusterrolebinding.rbac.authorization.k8s.io/kubedoom created
|
||||
```
|
||||
|
||||
To connect run:
|
||||
```console
|
||||
$ vncviewer viewer localhost:5900
|
||||
```
|
||||
|
||||
Kubedoom requires a service account with permissions to list all pods and delete
|
||||
them and uses kubectl 1.17.3.
|
||||
them and uses kubectl 1.23.2.
|
||||
|
||||
## Building Kubedoom
|
||||
|
||||
The repository contains a Dockerfile to build the kubedoom image. You have to
|
||||
specify your systems architecture as the `TARGETARCH` build argument. For
|
||||
example `amd64` or `arm64`.
|
||||
|
||||
```console
|
||||
$ docker build --build-arg=TARGETARCH=amd64 -t kubedoom .
|
||||
```
|
||||
|
||||
To change the default VNC password, use `--build-arg=VNCPASSWORD=differentpw`.
|
||||
|
@@ -512,7 +512,7 @@ static void LoadDefaultCollection(default_collection_t *collection)
|
||||
|
||||
while (!feof(f))
|
||||
{
|
||||
if (fscanf (f, "%79s %[^\n]\n", defname, strparm) != 2)
|
||||
if (fscanf (f, "%79s %99[^\n]\n", defname, strparm) != 2)
|
||||
{
|
||||
// This line doesn't match
|
||||
|
||||
|
@@ -1312,7 +1312,7 @@ static void LoadDefaultCollection(default_collection_t *collection)
|
||||
|
||||
while (!feof(f))
|
||||
{
|
||||
if (fscanf (f, "%79s %[^\n]\n", defname, strparm) != 2)
|
||||
if (fscanf (f, "%79s %99[^\n]\n", defname, strparm) != 2)
|
||||
{
|
||||
// This line doesn't match
|
||||
|
||||
|
@@ -2,9 +2,9 @@ kind: Cluster
|
||||
apiVersion: kind.x-k8s.io/v1alpha4
|
||||
nodes:
|
||||
- role: control-plane
|
||||
image: kindest/node:v1.17.0
|
||||
image: kindest/node:v1.23.0@sha256:49824ab1727c04e56a21a5d8372a402fcd32ea51ac96a2706a12af38934f81ac
|
||||
- role: worker
|
||||
image: kindest/node:v1.17.0
|
||||
image: kindest/node:v1.23.0@sha256:49824ab1727c04e56a21a5d8372a402fcd32ea51ac96a2706a12af38934f81ac
|
||||
extraPortMappings:
|
||||
- containerPort: 5900
|
||||
hostPort: 5900
|
||||
|
92
kubedoom.go
92
kubedoom.go
@@ -1,13 +1,14 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
func hash(input string) int32 {
|
||||
@@ -16,7 +17,7 @@ func hash(input string) int32 {
|
||||
for _, char := range input {
|
||||
hash = ((hash << 5) + hash + int32(char))
|
||||
}
|
||||
if (hash < 0) {
|
||||
if hash < 0 {
|
||||
hash = 0 - hash
|
||||
}
|
||||
return hash
|
||||
@@ -24,7 +25,7 @@ func hash(input string) int32 {
|
||||
|
||||
func runCmd(cmdstring string) {
|
||||
parts := strings.Split(cmdstring, " ")
|
||||
cmd := exec.Command(parts[0], parts[1:len(parts)]...)
|
||||
cmd := exec.Command(parts[0], parts[1:]...)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
err := cmd.Run()
|
||||
@@ -34,7 +35,7 @@ func runCmd(cmdstring string) {
|
||||
}
|
||||
|
||||
func outputCmd(argv []string) string {
|
||||
cmd := exec.Command(argv[0], argv[1:len(argv)]...)
|
||||
cmd := exec.Command(argv[0], argv[1:]...)
|
||||
cmd.Stderr = os.Stderr
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
@@ -45,7 +46,7 @@ func outputCmd(argv []string) string {
|
||||
|
||||
func startCmd(cmdstring string) {
|
||||
parts := strings.Split(cmdstring, " ")
|
||||
cmd := exec.Command(parts[0], parts[1:len(parts)]...)
|
||||
cmd := exec.Command(parts[0], parts[1:]...)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
cmd.Stdin = os.Stdin
|
||||
@@ -55,16 +56,53 @@ func startCmd(cmdstring string) {
|
||||
}
|
||||
}
|
||||
|
||||
func getPods() []string {
|
||||
args := []string{"kubectl", "get", "pods", "-A", "-o", "go-template", "--template={{range .items}}{{.metadata.namespace}}/{{.metadata.name}} {{end}}"}
|
||||
type Mode interface {
|
||||
getEntities() []string
|
||||
deleteEntity(string)
|
||||
}
|
||||
|
||||
type podmode struct {
|
||||
}
|
||||
|
||||
func (m podmode) getEntities() []string {
|
||||
var args []string
|
||||
if namespace, exists := os.LookupEnv("NAMESPACE"); exists {
|
||||
args = []string{"kubectl", "get", "pods", "--namespace", namespace, "-o", "go-template", "--template={{range .items}}{{.metadata.namespace}}/{{.metadata.name}} {{end}}"}
|
||||
} else {
|
||||
args = []string{"kubectl", "get", "pods", "-A", "-o", "go-template", "--template={{range .items}}{{.metadata.namespace}}/{{.metadata.name}} {{end}}"}
|
||||
}
|
||||
output := outputCmd(args)
|
||||
outputstr := strings.TrimSpace(output)
|
||||
pods := strings.Split(outputstr, " ")
|
||||
return pods
|
||||
}
|
||||
|
||||
func socketLoop(listener net.Listener) {
|
||||
for true {
|
||||
func (m podmode) deleteEntity(entity string) {
|
||||
log.Printf("Pod to kill: %v", entity)
|
||||
podparts := strings.Split(entity, "/")
|
||||
cmd := exec.Command("/usr/bin/kubectl", "delete", "pod", "-n", podparts[0], podparts[1])
|
||||
go cmd.Run()
|
||||
}
|
||||
|
||||
type nsmode struct {
|
||||
}
|
||||
|
||||
func (m nsmode) getEntities() []string {
|
||||
args := []string{"kubectl", "get", "namespaces", "-o", "go-template", "--template={{range .items}}{{.metadata.name}} {{end}}"}
|
||||
output := outputCmd(args)
|
||||
outputstr := strings.TrimSpace(output)
|
||||
namespaces := strings.Split(outputstr, " ")
|
||||
return namespaces
|
||||
}
|
||||
|
||||
func (m nsmode) deleteEntity(entity string) {
|
||||
log.Printf("Namespace to kill: %v", entity)
|
||||
cmd := exec.Command("/usr/bin/kubectl", "delete", "namespace", entity)
|
||||
go cmd.Run()
|
||||
}
|
||||
|
||||
func socketLoop(listener net.Listener, mode Mode) {
|
||||
for {
|
||||
conn, err := listener.Accept()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@@ -78,11 +116,11 @@ func socketLoop(listener net.Listener) {
|
||||
}
|
||||
bytes = bytes[0:n]
|
||||
strbytes := strings.TrimSpace(string(bytes))
|
||||
pods := getPods()
|
||||
entities := mode.getEntities()
|
||||
if strbytes == "list" {
|
||||
for _, pod := range pods {
|
||||
padding := strings.Repeat("\n", 255 - len(pod))
|
||||
_, err = conn.Write([]byte(pod + padding))
|
||||
for _, entity := range entities {
|
||||
padding := strings.Repeat("\n", 255-len(entity))
|
||||
_, err = conn.Write([]byte(entity + padding))
|
||||
if err != nil {
|
||||
log.Fatal("Could not write to socker file")
|
||||
}
|
||||
@@ -95,12 +133,9 @@ func socketLoop(listener net.Listener) {
|
||||
if err != nil {
|
||||
log.Fatal("Could not parse kill hash")
|
||||
}
|
||||
for _, pod := range pods {
|
||||
if (hash(pod) == int32(killhash)) {
|
||||
log.Printf("Pod to kill: %v", pod)
|
||||
podparts := strings.Split(pod, "/")
|
||||
cmd := exec.Command("/usr/bin/kubectl", "delete", "pod", "-n", podparts[0], podparts[1])
|
||||
go cmd.Run()
|
||||
for _, entity := range entities {
|
||||
if hash(entity) == int32(killhash) {
|
||||
mode.deleteEntity(entity)
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -112,6 +147,21 @@ func socketLoop(listener net.Listener) {
|
||||
}
|
||||
|
||||
func main() {
|
||||
var modeFlag string
|
||||
flag.StringVar(&modeFlag, "mode", "pods", "What to kill pods|namespaces")
|
||||
|
||||
flag.Parse()
|
||||
|
||||
var mode Mode
|
||||
switch modeFlag {
|
||||
case "pods":
|
||||
mode = podmode{}
|
||||
case "namespaces":
|
||||
mode = nsmode{}
|
||||
default:
|
||||
log.Fatalf("Mode should be pods or namespaces")
|
||||
}
|
||||
|
||||
listener, err := net.Listen("unix", "/dockerdoom.socket")
|
||||
if err != nil {
|
||||
log.Fatalf("Could not create socket file")
|
||||
@@ -124,6 +174,6 @@ func main() {
|
||||
log.Print("You can now connect to it with a VNC viewer at port 5900")
|
||||
|
||||
log.Print("Trying to start DOOM ...")
|
||||
startCmd("/usr/bin/env DISPLAY=:99 /usr/local/games/psdoom -warp -E1M1")
|
||||
socketLoop(listener)
|
||||
startCmd("/usr/bin/env DISPLAY=:99 /usr/local/games/psdoom -warp -E1M1 -skill 1 -nomouse")
|
||||
socketLoop(listener, mode)
|
||||
}
|
||||
|
@@ -1,3 +1,4 @@
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
@@ -18,7 +19,10 @@ spec:
|
||||
hostNetwork: true
|
||||
serviceAccountName: kubedoom
|
||||
containers:
|
||||
- image: storaxdev/kubedoom:0.2.0
|
||||
- image: ghcr.io/storax/kubedoom:latest
|
||||
env:
|
||||
- name: NAMESPACE
|
||||
value: default
|
||||
name: kubedoom
|
||||
ports:
|
||||
- containerPort: 5900
|
||||
|
5
manifest/kustomization.yaml
Normal file
5
manifest/kustomization.yaml
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
resources:
|
||||
- namespace.yaml
|
||||
- deployment.yaml
|
||||
- rbac.yaml
|
@@ -1,3 +1,4 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
@@ -1,3 +1,4 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
|
Reference in New Issue
Block a user