6 Commits
0.3.0 ... 0.5.0

Author SHA1 Message Date
David Zuber
759d3edd4e Change password and update to k8s 1.19 2020-10-09 18:48:37 +01:00
David Zuber
8d3e77803c Merge pull request #10 from welshstew/master
Added instructions to run locally with podman
2020-10-09 18:06:08 +01:00
Stuart Winchester
aa3f5d04ee Update README.md 2020-10-09 16:57:10 +01:00
David Zuber
7bbc9b23e1 Merge pull request #6 from AXDOOMER/master
Fix buffer overflow in LoadDefaultCollection
2020-06-26 10:40:50 +01:00
Alexandre-Xavier Labonté-Lamoureux
eb956f8a36 Fix buffer overflow in LoadDefaultCollection
CVE-2020-15007: https://nvd.nist.gov/vuln/detail/CVE-2020-15007
2020-06-25 23:25:43 -04:00
David Zuber
8de7d5deea Add -mode flag and update to 1.18.2 2020-05-17 09:31:41 +01:00
8 changed files with 131 additions and 32 deletions

View File

@@ -1,3 +1,16 @@
# 0.5.0
* New image storaxdev/kubedoom:1.0.0
* New default VNC password is `idbehold`.
* Update kubernetes to 1.19.1
* Update to Ubuntu 20.10
# 0.4.0
* New image storadev/kubedoom:0.4.0
* New `-mode` flag to switch between killing pods or namespaces.
* Update kubernetes to 1.18.2
# 0.3.0
* New image storadev/kubedoom:0.3.0

View File

@@ -4,7 +4,7 @@ WORKDIR /go/src/kubedoom
ADD kubedoom.go .
RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o kubedoom .
FROM ubuntu:19.10 AS ubuntu
FROM ubuntu:20.10 AS ubuntu
# make sure the package repository is up to date
RUN apt-get update
@@ -46,7 +46,7 @@ RUN apt-get install -y \
WORKDIR /root/
# Setup a password
RUN mkdir ~/.vnc && x11vnc -storepasswd 1234 ~/.vnc/passwd
RUN mkdir ~/.vnc && x11vnc -storepasswd idbehold ~/.vnc/passwd
COPY --from=ubuntu-deps /doom1.wad .
COPY --from=ubuntu-deps /usr/bin/kubectl /usr/bin/

View File

@@ -11,20 +11,41 @@ which was forked from psdoom.
![DOOM](assets/doom.jpg)
## Usage
## Running Locally
Run `storaxdev/kubedoom:0.3.0` locally:
In order to run locally you will need to
1. Run the kubedoom container
2. Attach a VNC client to the appropriate port (5901)
### With Docker
Run `storaxdev/kubedoom:0.5.0` with docker locally:
```console
$ docker run -p5900:5900 \
$ docker run -p5901:5900 \
--net=host \
-v ~/.kube:/root/.kube \
--rm -it --name kubedoom \
storaxdev/kubedoom:0.3.0
storaxdev/kubedoom:0.5.0
```
Now start a VNC viewer and connect to `localhost:5900`. The password is `1234`:
### With Podman
Run `storaxdev/kubedoom:0.5.0` with podman locally:
```console
$ vncviewer viewer localhost
$ podman run -it -p5901:5900/tcp \
-v ~/.kube:/tmp/.kube --security-opt label=disable \
--env "KUBECONFIG=/tmp/.kube/config" --name kubedoom
storaxdev/kubedoom:0.5.0
```
### Attaching a VNC Client
Now start a VNC viewer and connect to `localhost:5901`. The password is `idbehold`:
```console
$ vncviewer viewer localhost:5901
```
You should now see DOOM! Now if you want to get the job done quickly enter the
cheat `idspispopd` and walk through the wall on your right. You should be
@@ -32,6 +53,21 @@ greeted by your pods as little pink monsters. Press `CTRL` to fire. If the
pistol is not your thing, cheat with `idkfa` and press `5` for a nice surprise.
Pause the game with `ESC`.
### Killing namespaces
Kubedoom now also supports killing namespaces [in case you have too many of
them](https://github.com/storax/kubedoom/issues/5). Simply set the `-mode` flag
to `namespaces`:
```console
$ docker run -p5901:5900 \
--net=host \
-v ~/.kube:/root/.kube \
--rm -it --name kubedoom \
storaxdev/kubedoom:0.5.0 \
-mode namespaces
```
### Running Kubedoom inside Kubernetes
See the example in the `/manifest` directory. You can quickly test it using
@@ -41,7 +77,7 @@ example config from this repository:
```console
$ kind create cluster --config kind-config.yaml
Creating cluster "kind" ...
✓ Ensuring node image (kindest/node:v1.18.0) 🖼
✓ Ensuring node image (kindest/node:v1.19.1) 🖼
✓ Preparing nodes 📦 📦
✓ Writing configuration 📜
✓ Starting control-plane 🕹️
@@ -53,7 +89,7 @@ You can now use your cluster with:
kubectl cluster-info --context kind-kind
Not sure what to do next? 😅 Check out https://kind.sigs.k8s.io/docs/user/quick-start/
Not sure what to do next? 😅 Check out https://kind.sigs.k8s.io/docs/user/quick-start/
```
This will spin up a 2 node cluster inside docker, with port 5900 exposed from
@@ -61,12 +97,17 @@ the worker node. Then run kubedoom inside the cluster by applying the manifest
provided in this repository:
```console
$ export KUBECONFIG="$(kind get kubeconfig-path --name="kind")"
$ kubectl apply -f manifest/
namespace/kubedoom created
deployment.apps/kubedoom created
serviceaccount/kubedoom created
clusterrolebinding.rbac.authorization.k8s.io/kubedoom created
```
To connect run:
```console
$ vncviewer viewer localhost:5900
```
Kubedoom requires a service account with permissions to list all pods and delete
them and uses kubectl 1.18.1.
them and uses kubectl 1.19.2.

View File

@@ -512,7 +512,7 @@ static void LoadDefaultCollection(default_collection_t *collection)
while (!feof(f))
{
if (fscanf (f, "%79s %[^\n]\n", defname, strparm) != 2)
if (fscanf (f, "%79s %99[^\n]\n", defname, strparm) != 2)
{
// This line doesn't match

View File

@@ -1312,7 +1312,7 @@ static void LoadDefaultCollection(default_collection_t *collection)
while (!feof(f))
{
if (fscanf (f, "%79s %[^\n]\n", defname, strparm) != 2)
if (fscanf (f, "%79s %99[^\n]\n", defname, strparm) != 2)
{
// This line doesn't match

View File

@@ -2,9 +2,9 @@ kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
image: kindest/node:v1.18.0@sha256:0e20578828edd939d25eb98496a685c76c98d54084932f76069f886ec315d694
image: kindest/node:v1.19.1@sha256:98cf5288864662e37115e362b23e4369c8c4a408f99cbc06e58ac30ddc721600
- role: worker
image: kindest/node:v1.18.0@sha256:0e20578828edd939d25eb98496a685c76c98d54084932f76069f886ec315d694
image: kindest/node:v1.19.1@sha256:98cf5288864662e37115e362b23e4369c8c4a408f99cbc06e58ac30ddc721600
extraPortMappings:
- containerPort: 5900
hostPort: 5900

View File

@@ -1,13 +1,14 @@
package main
import (
"flag"
"log"
"net"
"os"
"os/exec"
"strconv"
"strings"
"time"
"strconv"
)
func hash(input string) int32 {
@@ -16,7 +17,7 @@ func hash(input string) int32 {
for _, char := range input {
hash = ((hash << 5) + hash + int32(char))
}
if (hash < 0) {
if hash < 0 {
hash = 0 - hash
}
return hash
@@ -55,7 +56,15 @@ func startCmd(cmdstring string) {
}
}
func getPods() []string {
type Mode interface {
getEntities() []string
deleteEntity(string)
}
type podmode struct {
}
func (m podmode) getEntities() []string {
args := []string{"kubectl", "get", "pods", "-A", "-o", "go-template", "--template={{range .items}}{{.metadata.namespace}}/{{.metadata.name}} {{end}}"}
output := outputCmd(args)
outputstr := strings.TrimSpace(output)
@@ -63,7 +72,31 @@ func getPods() []string {
return pods
}
func socketLoop(listener net.Listener) {
func (m podmode) deleteEntity(entity string) {
log.Printf("Pod to kill: %v", entity)
podparts := strings.Split(entity, "/")
cmd := exec.Command("/usr/bin/kubectl", "delete", "pod", "-n", podparts[0], podparts[1])
go cmd.Run()
}
type nsmode struct {
}
func (m nsmode) getEntities() []string {
args := []string{"kubectl", "get", "namespaces", "-o", "go-template", "--template={{range .items}}{{.metadata.name}} {{end}}"}
output := outputCmd(args)
outputstr := strings.TrimSpace(output)
namespaces := strings.Split(outputstr, " ")
return namespaces
}
func (m nsmode) deleteEntity(entity string) {
log.Printf("Namespace to kill: %v", entity)
cmd := exec.Command("/usr/bin/kubectl", "delete", "namespace", entity)
go cmd.Run()
}
func socketLoop(listener net.Listener, mode Mode) {
for true {
conn, err := listener.Accept()
if err != nil {
@@ -78,11 +111,11 @@ func socketLoop(listener net.Listener) {
}
bytes = bytes[0:n]
strbytes := strings.TrimSpace(string(bytes))
pods := getPods()
entities := mode.getEntities()
if strbytes == "list" {
for _, pod := range pods {
padding := strings.Repeat("\n", 255 - len(pod))
_, err = conn.Write([]byte(pod + padding))
for _, entity := range entities {
padding := strings.Repeat("\n", 255-len(entity))
_, err = conn.Write([]byte(entity + padding))
if err != nil {
log.Fatal("Could not write to socker file")
}
@@ -95,12 +128,9 @@ func socketLoop(listener net.Listener) {
if err != nil {
log.Fatal("Could not parse kill hash")
}
for _, pod := range pods {
if (hash(pod) == int32(killhash)) {
log.Printf("Pod to kill: %v", pod)
podparts := strings.Split(pod, "/")
cmd := exec.Command("/usr/bin/kubectl", "delete", "pod", "-n", podparts[0], podparts[1])
go cmd.Run()
for _, entity := range entities {
if hash(entity) == int32(killhash) {
mode.deleteEntity(entity)
break
}
}
@@ -112,6 +142,21 @@ func socketLoop(listener net.Listener) {
}
func main() {
var modeFlag string
flag.StringVar(&modeFlag, "mode", "pods", "What to kill pods|namespaces")
flag.Parse()
var mode Mode
switch modeFlag {
case "pods":
mode = podmode{}
case "namespaces":
mode = nsmode{}
default:
log.Fatalf("Mode should be pods or namespaces")
}
listener, err := net.Listen("unix", "/dockerdoom.socket")
if err != nil {
log.Fatalf("Could not create socket file")
@@ -125,5 +170,5 @@ func main() {
log.Print("Trying to start DOOM ...")
startCmd("/usr/bin/env DISPLAY=:99 /usr/local/games/psdoom -warp -E1M1")
socketLoop(listener)
socketLoop(listener, mode)
}

View File

@@ -18,7 +18,7 @@ spec:
hostNetwork: true
serviceAccountName: kubedoom
containers:
- image: storaxdev/kubedoom:0.3.0
- image: storaxdev/kubedoom:0.5.0
name: kubedoom
ports:
- containerPort: 5900