added
This commit is contained in:
31
k3d/stress/deployment2.yaml
Normal file
31
k3d/stress/deployment2.yaml
Normal file
@@ -0,0 +1,31 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
app: web
|
||||
name: web
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: web
|
||||
strategy: {}
|
||||
template:
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
app: web
|
||||
spec:
|
||||
containers:
|
||||
- image: bee42/whoami:2.2.0
|
||||
name: whoami
|
||||
ports:
|
||||
- containerPort: 80
|
||||
resources:
|
||||
requests:
|
||||
memory: "64Mi"
|
||||
cpu: "250m"
|
||||
limits:
|
||||
memory: "128Mi"
|
||||
cpu: "500m"
|
||||
274
k3d/stress/doku
Normal file
274
k3d/stress/doku
Normal file
@@ -0,0 +1,274 @@
|
||||
Deployment with Ressource Constraints
|
||||
|
||||
https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
||||
|
||||
It's a good practice to assign a CPU request and a CPU limit to a container. Containers cannot use more CPU than the configured limit. Provided the system has CPU time free, a container is guaranteed to be allocated as much CPU as it requests.
|
||||
|
||||
mkdir ~/stress && cd ~/stress
|
||||
kubectl create namespace stress
|
||||
kubectl config set-context --current --namespace stress
|
||||
|
||||
cat >deployment2.yaml <<EOF
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
app: web
|
||||
name: web
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: web
|
||||
strategy: {}
|
||||
template:
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
app: web
|
||||
spec:
|
||||
containers:
|
||||
- image: bee42/whoami:2.2.0
|
||||
name: whoami
|
||||
ports:
|
||||
- containerPort: 80
|
||||
resources:
|
||||
requests:
|
||||
memory: "64Mi"
|
||||
cpu: "250m"
|
||||
limits:
|
||||
memory: "128Mi"
|
||||
cpu: "500m"
|
||||
EOF
|
||||
|
||||
kubectl apply -f deployment2.yaml
|
||||
kubectl get pods
|
||||
kubectl get all
|
||||
kubectl describe deployment web
|
||||
kubectl rollout history deployment web
|
||||
# set change cause
|
||||
kubectl annotate deployment web kubernetes.io/change-cause='set cpu limit 500'
|
||||
kubectl rollout history deployment web
|
||||
|
||||
https://kubernetes.io/docs/tasks/configure-pod-container/quality-service-pod/
|
||||
QoS
|
||||
https://kubernetes.io/docs/tasks/administer-cluster/manage-resources/quota-pod-namespace/
|
||||
ResoureQuota
|
||||
|
||||
Generate Stress
|
||||
|
||||
https://linux.die.net/man/1/stress
|
||||
|
||||
create your own stress tool
|
||||
|
||||
mkdir -p ~/stress && cd ~/stress
|
||||
cat >Dockerfile <<EOF
|
||||
FROM ubuntu
|
||||
|
||||
RUN apt-get update && apt-get install -y stress && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ENTRYPOINT ["/usr/bin/stress", "--verbose"]
|
||||
CMD []
|
||||
EOF
|
||||
docker build -t 127.0.0.1:5000/bee42/stress .
|
||||
docker push 127.0.0.1:5000/bee42/stress
|
||||
|
||||
Todo:
|
||||
|
||||
apply the pod
|
||||
check with kubectl top pods
|
||||
|
||||
cat >pod.yaml <<EOF
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: cpu-demo
|
||||
spec:
|
||||
containers:
|
||||
- name: cpu
|
||||
image: cnbc-registry:5000/bee42/stress
|
||||
resources:
|
||||
limits:
|
||||
cpu: "1"
|
||||
requests:
|
||||
cpu: "0.5"
|
||||
args:
|
||||
- --cpu
|
||||
- "2"
|
||||
EOF
|
||||
kubectl apply -f pod.yaml
|
||||
kubectl describe pod cpu-demo
|
||||
# wait a minute
|
||||
kubectl top pods
|
||||
NAME CPU(cores) MEMORY(bytes)
|
||||
cpu-demo 1962m 1Mi
|
||||
hey 0m 0Mi
|
||||
load 0m 1Mi
|
||||
web-7cdc98f947-2n2dp 0m 3Mi
|
||||
web-7cdc98f947-gn2cg 0m 4Mi
|
||||
|
||||
# Consume more cpu the limits set!
|
||||
|
||||
# PLEASE KILL this pod
|
||||
kubectl delete pod cpu-demo --force=true --grace-period=0
|
||||
|
||||
stress-deploy.yaml
|
||||
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: cpu-demo
|
||||
labels:
|
||||
app: cpu-demo
|
||||
spec:
|
||||
replicas: 4
|
||||
selector:
|
||||
matchLabels:
|
||||
app: cpu-demo
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: cpu-demo
|
||||
spec:
|
||||
containers:
|
||||
- name: stress
|
||||
image: cnbc-registry:5000/bee42/stress
|
||||
resources:
|
||||
limits:
|
||||
cpu: "1"
|
||||
requests:
|
||||
cpu: "0.5"
|
||||
args:
|
||||
- --cpu
|
||||
- "2"
|
||||
nodeName: k3d-cnbc-agent-1
|
||||
|
||||
kubectl apply -f stress-deploy.yaml
|
||||
kubectl top pods
|
||||
kubectl top nodes
|
||||
NAME CPU(cores) MEMORY(bytes)
|
||||
cpu-demo-595664dd8-lschw 1022m 1Mi
|
||||
cpu-demo-595664dd8-npx4w 975m 1Mi
|
||||
cpu-demo-595664dd8-p249m 993m 1Mi
|
||||
cpu-demo-595664dd8-twn84 940m 1Mi
|
||||
web-588896757f-52gx9 0m 1Mi
|
||||
|
||||
kubectl scale deploy cpu-demo --replicas=3
|
||||
|
||||
sleep 45
|
||||
|
||||
kubectl top nodes
|
||||
kubectl top pods
|
||||
NAME CPU(cores) MEMORY(bytes)
|
||||
cpu-demo-595664dd8-npx4w 1017m 1Mi
|
||||
cpu-demo-595664dd8-p249m 988m 1Mi
|
||||
cpu-demo-595664dd8-twn84 980m 1Mi
|
||||
web-588896757f-52gx9 1m 1Mi
|
||||
|
||||
https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-policies/
|
||||
Kubelet parameter
|
||||
|
||||
Tipp:
|
||||
|
||||
use imagePullSecerts to auth container registries
|
||||
|
||||
https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||||
|
||||
Memory Overload
|
||||
|
||||
https://www.cyberciti.biz/faq/stress-test-linux-unix-server-with-stress-ng/
|
||||
https://sysdig.com/blog/troubleshoot-kubernetes-oom/
|
||||
|
||||
k3d node create --cluster cnbc --role agent --memory 2G k3d-cnbc-agent-2
|
||||
kubectl get nodes
|
||||
# wait for ready
|
||||
kubectl apply -f mem-deploy.yaml
|
||||
|
||||
kubectl top nodes
|
||||
# OK!
|
||||
kubectl scale deploy mem-demo --replicas=4
|
||||
|
||||
# Hups...
|
||||
kubectl scale deploy mem-demo --replicas=5
|
||||
# wait 60 secs
|
||||
kubectl get pods -w
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
web-588896757f-52gx9 1/1 Running 0 60m
|
||||
mem-demo-67998d7456-4ntqw 1/1 Running 0 4m20s
|
||||
mem-demo-67998d7456-htvnn 1/1 Running 0 4m20s
|
||||
mem-demo-67998d7456-4f2wd 1/1 Running 0 2m2s
|
||||
mem-demo-67998d7456-kx4xv 1/1 Running 2 (49s ago) 4m20s
|
||||
mem-demo-67998d7456-d86cs 0/1 OOMKilled 2 (25s ago) 58s
|
||||
mem-demo-67998d7456-d86cs 0/1 CrashLoopBackOff 2 (12s ago) 65s
|
||||
mem-demo-67998d7456-d86cs 1/1 Running 3 (28s ago) 81s
|
||||
mem-demo-67998d7456-4ntqw 0/1 OOMKilled 0 4m44s
|
||||
mem-demo-67998d7456-4ntqw 1/1 Running 1 (2s ago) 4m45s
|
||||
mem-demo-67998d7456-4f2wd 0/1 OOMKilled 0 2m29s
|
||||
mem-demo-67998d7456-4f2wd 1/1 Running 1 (2s ago) 2m30s
|
||||
mem-demo-67998d7456-d86cs 0/1 OOMKilled 3 (35s ago) 88s
|
||||
mem-demo-67998d7456-d86cs 0/1 CrashLoopBackOff 3 (12s ago) 99s
|
||||
|
||||
# fix: scale down
|
||||
kubectl scale deploy mem-demo --replicas=4
|
||||
|
||||
# cleanup deploy
|
||||
kubectl delete -f mem-deploy.yaml
|
||||
|
||||
# drain node
|
||||
|
||||
kubectl drain k3d-k3d-cnbc-agent-2-0
|
||||
k3d node delete k3d-k3d-cnbc-agent-2-0
|
||||
kubectl delete node k3d-k3d-cnbc-agent-2-0
|
||||
|
||||
mem-deploy.yaml
|
||||
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: mem-demo
|
||||
labels:
|
||||
app: mem-demo
|
||||
spec:
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: mem-demo
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: mem-demo
|
||||
spec:
|
||||
containers:
|
||||
- name: stress
|
||||
image: cnbc-registry:5000/bee42/stress
|
||||
resources:
|
||||
limits:
|
||||
memory: "500Mi"
|
||||
requests:
|
||||
memory: "128Mi"
|
||||
args:
|
||||
- "-m"
|
||||
- "1"
|
||||
- "--vm-bytes"
|
||||
- "490M"
|
||||
nodeName: k3d-k3d-cnbc-agent-2-0
|
||||
|
||||
Kubernetes Failures
|
||||
|
||||
https://codeberg.org/hjacobs/kubernetes-failure-stories
|
||||
https://medium.com/omio-engineering/cpu-limits-and-aggressive-throttling-in-kubernetes-c5b20bd8a718
|
||||
https://home.robusta.dev/blog/stop-using-cpu-limits
|
||||
|
||||
Best practices for CPU limits and requests on Kubernetes
|
||||
|
||||
Use CPU requests for everything
|
||||
Make sure they are accurate
|
||||
Do not use CPU limits.
|
||||
|
||||
What about memory limits and requests?
|
||||
|
||||
Always use memory limits
|
||||
Always use memory requests
|
||||
Always set your memory requests equal to your limits
|
||||
|
||||
Reference in New Issue
Block a user