Add auto-scaling to relevant services (non-databases) and run the load tester to simulate load

This commit is contained in:
Will James
2019-05-19 14:11:46 +02:00
parent 44f572ebba
commit 8e1889b906
11 changed files with 199 additions and 2 deletions

View File

@@ -0,0 +1,18 @@
apiVersion: autoscaling/v2beta2
kind: HorizontalPodAutoscaler
metadata:
name: cart
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: cart
minReplicas: 1
maxReplicas: 2
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 50

View File

@@ -0,0 +1,18 @@
apiVersion: autoscaling/v2beta2
kind: HorizontalPodAutoscaler
metadata:
name: catalogue
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: catalogue
minReplicas: 1
maxReplicas: 2
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 50

View File

@@ -0,0 +1,18 @@
apiVersion: autoscaling/v2beta2
kind: HorizontalPodAutoscaler
metadata:
name: dispatch
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: dispatch
minReplicas: 1
maxReplicas: 2
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 50

View File

@@ -0,0 +1,35 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: load
labels:
service: load
spec:
replicas: 1
selector:
matchLabels:
service: load
template:
metadata:
labels:
service: load
spec:
containers:
- name: load
env:
- name: HOST
value: "http://web:8080"
- name: NUM_CLIENTS
value: "15"
- name: SILENT
value: "0"
- name: ERROR
value: "1"
image: robotshop/rs-load:latest
resources:
limits:
cpu: 200m
memory: 200Mi
requests:
cpu: 100m
memory: 100Mi

View File

@@ -0,0 +1,18 @@
apiVersion: autoscaling/v2beta2
kind: HorizontalPodAutoscaler
metadata:
name: payment
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: payment
minReplicas: 1
maxReplicas: 2
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 50

View File

@@ -0,0 +1,18 @@
apiVersion: autoscaling/v2beta2
kind: HorizontalPodAutoscaler
metadata:
name: ratings
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: ratings
minReplicas: 1
maxReplicas: 2
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 50

View File

@@ -0,0 +1,18 @@
apiVersion: autoscaling/v2beta2
kind: HorizontalPodAutoscaler
metadata:
name: shipping
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: shipping
minReplicas: 1
maxReplicas: 2
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 50

View File

@@ -0,0 +1,18 @@
apiVersion: autoscaling/v2beta2
kind: HorizontalPodAutoscaler
metadata:
name: user
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: user
minReplicas: 1
maxReplicas: 2
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 50

View File

@@ -0,0 +1,18 @@
apiVersion: autoscaling/v2beta2
kind: HorizontalPodAutoscaler
metadata:
name: web
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: web
minReplicas: 1
maxReplicas: 2
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 50

View File

@@ -0,0 +1,11 @@
apiVersion: v1
kind: ResourceQuota
metadata:
name: robot-shop-quota
spec:
hard:
limits.cpu: 4
requests.cpu: 2
limits.memory: 5Gi
requests.memory: 3Gi
pods: 20

View File

@@ -14,6 +14,13 @@ Alternatively, you can run the Container from Dockerhub directly on one of the n
`docker run -e 'HOST=$webnodeIP:8080' -e 'NUM_CLIENTS=3' -d --rm --name="loadgen" robotshop/rs-load`
## To Do
## Kubernetes
To run the load test in Kubernetes, apply the `K8s/autoscaling/load-deployment.yaml` configuration in your Kubernetes cluster. This will a replica of the above load test
kubectl -n robot-shop apply -f K8s/autoscaling/load-deployment.yaml
If you want to enable auto-scaling on relevant components (non-databases), you can apply everything in that directory. However you will first need to run a `metrics-server` in your cluster so the Horizontal Pod Autoscaler can know about the CPU usage of the pods.
kubectl -n robot-shop apply -f K8s/autoscaling/
Kubernetes deployment