added
This commit is contained in:
128
k3d/blue-green/doku
Normal file
128
k3d/blue-green/doku
Normal file
@@ -0,0 +1,128 @@
|
||||
Blue Green Deployment
|
||||
|
||||
create version 1 and 2 and services
|
||||
|
||||
# prepare namespace
|
||||
|
||||
|
||||
mkdir ~/blue-green && cd ~/blue-green
|
||||
kubectl create namespace blue-green
|
||||
kubectl config set-context --current --namespace blue-green
|
||||
|
||||
# create manifests
|
||||
|
||||
cat >nginx-v1.yaml <<EOF
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: web
|
||||
version: V1.0.0
|
||||
name: webv1
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: web
|
||||
version: V1.0.0
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: web
|
||||
version: V1.0.0
|
||||
spec:
|
||||
containers:
|
||||
- image: nginx:1.19.3
|
||||
name: nginx
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 80
|
||||
EOF
|
||||
|
||||
cat >nginx-v2.yaml <<EOF
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: web
|
||||
version: V2.0.0
|
||||
name: webv2
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: web
|
||||
version: V2.0.0
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: web
|
||||
version: V2.0.0
|
||||
spec:
|
||||
containers:
|
||||
- image: nginx:1.19.8
|
||||
name: nginx
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 80
|
||||
EOF
|
||||
cat >nginx-svc.yaml <<EOF
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: web
|
||||
name: nginx
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: web
|
||||
version: V1.0.0
|
||||
EOF
|
||||
|
||||
Deployment from blue to green
|
||||
|
||||
|
||||
# deploy blue
|
||||
|
||||
kubectl apply -f nginx-v1.yaml
|
||||
kubectl apply -f nginx-svc.yaml
|
||||
kubectl describe deployment webv1
|
||||
kubectl describe svc nginx
|
||||
kubectl get ep nginx
|
||||
|
||||
# deploy green
|
||||
kubectl apply -f nginx-v2.yaml
|
||||
|
||||
# traffic shift
|
||||
kubectl patch service nginx --patch '{"spec":{"selector":{"version":"V2.0.0"}}}'
|
||||
|
||||
# Test if the second deployment was successful
|
||||
kubectl describe deployment webv2
|
||||
kubectl describe svc nginx
|
||||
kubectl get ep nginx
|
||||
|
||||
# cleanup v1
|
||||
|
||||
kubectl delete deploy webv1
|
||||
|
||||
WARNING:Traefik shift don't change established connection!!
|
||||
|
||||
kubectl run nc -it --rm --image curlimages/curl -- /bin/sh
|
||||
nc nginx.blue-green.svc.cluster.local 80
|
||||
GET /index.html HTTP/1.1
|
||||
Host: localhost
|
||||
|
||||
...
|
||||
|
||||
# switch at other terminal the load with patch
|
||||
#kubectl patch service nginx --patch '{"spec":{"selector":{"version":"V1.0.0"}}}'
|
||||
|
||||
GET /index.html HTTP/1.1
|
||||
Host: localhost
|
||||
|
||||
# same server/pod answer ...
|
||||
|
||||
14
k3d/blue-green/nginx-svc.yaml
Normal file
14
k3d/blue-green/nginx-svc.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: web
|
||||
name: nginx
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: web
|
||||
version: V1.0.0
|
||||
25
k3d/blue-green/nginx-v1.yaml
Normal file
25
k3d/blue-green/nginx-v1.yaml
Normal file
@@ -0,0 +1,25 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: web
|
||||
version: V1.0.0
|
||||
name: webv1
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: web
|
||||
version: V1.0.0
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: web
|
||||
version: V1.0.0
|
||||
spec:
|
||||
containers:
|
||||
- image: nginx:1.19.3
|
||||
name: nginx_new
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 80
|
||||
25
k3d/blue-green/nginx-v2.yaml
Normal file
25
k3d/blue-green/nginx-v2.yaml
Normal file
@@ -0,0 +1,25 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: web
|
||||
version: V2.0.0
|
||||
name: webv2
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: web
|
||||
version: V2.0.0
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: web
|
||||
version: V2.0.0
|
||||
spec:
|
||||
containers:
|
||||
- image: nginx:1.19.8
|
||||
name: nginx_old
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 80
|
||||
88
k3d/install_k3d
Normal file
88
k3d/install_k3d
Normal file
@@ -0,0 +1,88 @@
|
||||
mkdir ~/k3d
|
||||
|
||||
curl -s https://raw.githubusercontent.com/rancher/k3d/main/install.sh | bash
|
||||
CLUSTER=cnbc
|
||||
IP4=$(/sbin/ip -o -4 addr list br2 | awk '{print $4}' | cut -d/ -f1)
|
||||
|
||||
# Get from TRAINER :)
|
||||
MIRROR_IP=192.168.1.246
|
||||
cat >registries.yml <<EOF
|
||||
mirrors:
|
||||
docker.io:
|
||||
endpoint:
|
||||
- "http://$MIRROR_IP:5001"
|
||||
- "https://mirror.gcr.io"
|
||||
- "https://docker.io"
|
||||
EOF
|
||||
|
||||
# WARNING Check IP
|
||||
k3d cluster create $CLUSTER \
|
||||
--api-port 8545 \
|
||||
-p "8580:80@loadbalancer" \
|
||||
-p "8543:443@loadbalancer" \
|
||||
--agents=2 \
|
||||
--k3s-arg "--tls-san=$IP4@server:0" \
|
||||
--k3s-arg "--disable=traefik@server:0" \
|
||||
--registry-create $CLUSTER-registry:0.0.0.0:5000 \
|
||||
--registry-config=./registries.yml
|
||||
echo | \
|
||||
openssl s_client -connect $IP4:8545 2>/dev/null | \
|
||||
openssl x509 -text
|
||||
|
||||
PORT=$(docker container inspect \
|
||||
--format '{{ (index (index .NetworkSettings.Ports "5000/tcp") 0).HostPort }}' \
|
||||
$CLUSTER-registry)
|
||||
sudo sh -c "echo 127.0.0.1 $CLUSTER-registry >>/etc/hosts"
|
||||
curl cnbc-registry:5000/v2/_catalog
|
||||
source <(k3d completion bash)
|
||||
|
||||
cloud:
|
||||
|
||||
curl -s https://raw.githubusercontent.com/rancher/k3d/main/install.sh | bash
|
||||
CLUSTER=cnbc
|
||||
# cloud instance
|
||||
IP4=$(/sbin/ip -o -4 addr list eth0 | awk '{print $4}' | cut -d/ -f1)
|
||||
|
||||
# WARNING Check IP
|
||||
k3d cluster create $CLUSTER \
|
||||
--api-port 8545 \
|
||||
-p "8580:80@loadbalancer" \
|
||||
-p "8543:443@loadbalancer" \
|
||||
--agents=2 \
|
||||
--k3s-arg "--tls-san=$IP4@server:0" \
|
||||
--k3s-arg "--disable=traefik@server:0" \
|
||||
--registry-create $CLUSTER-registry:0.0.0.0:5000
|
||||
|
||||
echo | \
|
||||
openssl s_client -connect $IP4:8545 2>/dev/null | \
|
||||
openssl x509 -text
|
||||
|
||||
PORT=$(docker container inspect \
|
||||
--format '{{ (index (index .NetworkSettings.Ports "5000/tcp") 0).HostPort }}' \
|
||||
$CLUSTER-registry)
|
||||
sudo sh -c "echo 127.0.0.1 $CLUSTER-registry >>/etc/hosts"
|
||||
curl cnbc-registry:5000/v2/_catalog
|
||||
source <(k3d completion bash)
|
||||
|
||||
sudo apt-get update && sudo apt-get install -y apt-transport-https
|
||||
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg \
|
||||
| sudo apt-key add -
|
||||
echo "deb https://apt.kubernetes.io/ kubernetes-xenial main" \
|
||||
| sudo tee -a /etc/apt/sources.list.d/kubernetes.list
|
||||
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y kubectl
|
||||
|
||||
source <(kubectl completion bash)
|
||||
source <(k3d completion bash)
|
||||
|
||||
curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3
|
||||
chmod 700 get_helm.sh
|
||||
./get_helm.sh
|
||||
source <(helm completion bash)
|
||||
|
||||
|
||||
default Namespace nächste commands
|
||||
|
||||
kubectl config set-context --current --namespace whoami
|
||||
201
k3d/pod_init/doku
Normal file
201
k3d/pod_init/doku
Normal file
@@ -0,0 +1,201 @@
|
||||
Pod init container
|
||||
|
||||
https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
|
||||
|
||||
A Pod can have multiple containers running apps within it, but it can also have one or more init containers, which are run before the app containers are started.
|
||||
|
||||
Init containers are exactly like regular containers, except:
|
||||
|
||||
Init containers always run to completion.
|
||||
Each init container must complete successfully before the next one starts.
|
||||
|
||||
If a Pod's init container fails, the kubelet repeatedly restarts that init container until it succeeds. However, if the Pod has a restartPolicy of Never, and an init container fails during startup of that Pod, Kubernetes treats the overall Pod as failed.
|
||||
|
||||
kubectl delete service whoami
|
||||
cat >myapp-pod.yaml <<EOF
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: myapp-pod
|
||||
labels:
|
||||
app: myapp
|
||||
spec:
|
||||
containers:
|
||||
- name: myapp-container
|
||||
image: busybox:1.28
|
||||
command: ['sh', '-c', 'echo The app is running! && sleep 30']
|
||||
initContainers:
|
||||
- name: init-whoami
|
||||
image: busybox:1.28
|
||||
command: ['sh', '-c', "until nslookup whoami.\$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace).svc.cluster.local; do echo waiting for whoami; sleep 2; done"]
|
||||
EOF
|
||||
kubectl apply -f myapp-pod.yaml
|
||||
kubectl describe pod myapp-pod
|
||||
# initContainer not ready
|
||||
kubectl apply -f service.yaml
|
||||
kubectl describe pod myapp-pod
|
||||
|
||||
https://github.com/groundnuty/k8s-wait-for
|
||||
|
||||
cat >myapp-pod-exists.yaml <<EOF
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: myapp-pod-exists
|
||||
labels:
|
||||
app: myapp
|
||||
spec:
|
||||
containers:
|
||||
- name: myapp-container
|
||||
image: busybox:1.28
|
||||
command: ['sh', '-c', 'echo The app is running! && sleep 30']
|
||||
initContainers:
|
||||
- name: init-whoami
|
||||
image: busybox:1.28
|
||||
command: ['sh', '-c', "until nslookup whoami.\$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace).svc.cluster.local; do echo waiting for whoami; sleep 2; done"]
|
||||
- name: wait-for-whoami-pods
|
||||
image: ghcr.io/groundnuty/k8s-wait-for:v1.7
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- "pod-wr"
|
||||
- "-lapp=whoami"
|
||||
EOF
|
||||
|
||||
kubectl scale deployment whoami --replicas=0
|
||||
kubectl apply -f myapp-pod-exists.yaml
|
||||
|
||||
kubectl create role pod-reader \
|
||||
--verb=get --verb=list --verb=watch \
|
||||
--resource=pods,services,deployments
|
||||
|
||||
kubectl get role pod-reader -o yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
creationTimestamp: "2022-10-27T13:11:32Z"
|
||||
name: pod-reader
|
||||
namespace: whoami-1
|
||||
resourceVersion: "48787"
|
||||
uid: 2aadcee7-66bc-4ebe-8acc-5f76e2065480
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- services
|
||||
verbs:
|
||||
- get
|
||||
- listhttps://artifacthub.io/
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- deployments
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
|
||||
|
||||
kubectl create rolebinding default-pod-reader \
|
||||
--role=pod-reader \
|
||||
--serviceaccount=whoami-1:default \
|
||||
--namespace=whoami-1
|
||||
|
||||
kubectl get rolebindings.rbac.authorization.k8s.io default-pod-reader -o yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
creationTimestamp: "2022-10-27T13:15:02Z"
|
||||
name: default-pod-reader
|
||||
namespace: whoami-1
|
||||
resourceVersion: "48886"https://artifacthub.io/
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: pod-reader
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: default
|
||||
namespace: whoami-1
|
||||
|
||||
kubectl auth can-i list deployments.apps \
|
||||
--as system:serviceaccount:whoami-1:default
|
||||
yes
|
||||
kubectl auth can-i create deployments.apps \
|
||||
--as system:serviceaccount:whoami-1:default
|
||||
no
|
||||
|
||||
kubectl scale deployment whoami --replicas=1
|
||||
kubectl logs myapp-pod-exists -c wait-for-whoami-pods
|
||||
[2022-10-27 13:34:14] Waiting for pod -lapp=whoami...
|
||||
[2022-10-27 13:34:16] pod -lapp=whoami is ready.
|
||||
kubectl get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
...
|
||||
myapp-pod-exists 0/1 Completed 1 (36s ago) 31m
|
||||
|
||||
Postgres initContainer
|
||||
|
||||
apiVersion: apps/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: webapp
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: webapp
|
||||
spec:
|
||||
volumes:
|
||||
- name: config-volume
|
||||
configMap:
|
||||
name: app-config
|
||||
initContainers:
|
||||
- name: check-db-ready
|
||||
image: postgres:15
|
||||
command: ['sh', '-c',
|
||||
'until pg_isready -h postgres -p 5432;
|
||||
do echo waiting for database; sleep 2; done;']
|
||||
|
||||
Git Sync
|
||||
|
||||
https://github.com/kubernetes/git-sync
|
||||
https://github.com/kubernetes/git-sync/blob/master/docs/ssh.md
|
||||
https://github.com/kubernetes/git-sync/blob/master/docs/kubernetes.md
|
||||
https://www.heise.de/tipps-tricks/SSH-Key-erstellen-so-geht-s-4400280.html
|
||||
|
||||
Challenges:
|
||||
|
||||
Create SSH Key
|
||||
Create local git repo
|
||||
Create a secret with SSH Key
|
||||
Create a pod with git sync initContainer from local repo
|
||||
|
||||
Create SSH Key
|
||||
|
||||
ssh-keygen -t rsa -b 4096 -f ~/.ssh/id_cnbc-rsa
|
||||
# without password...
|
||||
|
||||
Example to use Git Sync
|
||||
|
||||
# make a directory (owned by you) for the volume
|
||||
export DIR="/tmp/git-data"
|
||||
mkdir -p $DIR
|
||||
|
||||
# run the container (as your own UID)
|
||||
|
||||
# run the container
|
||||
docker container run -d \
|
||||
-v $DIR:/tmp/git \
|
||||
-u$(id -u):$(id -g) \
|
||||
k8s.gcr.io/git-sync/git-sync:v4.0.0 \
|
||||
--repo=https://github.com/kubernetes/git-sync \
|
||||
--root=/tmp/git/root \
|
||||
--period=30s
|
||||
|
||||
# run an nginx container to serve the content
|
||||
docker container run -d \
|
||||
-p 8080:80 \
|
||||
-v $DIR:/usr/share/nginx/html \
|
||||
nginx
|
||||
|
||||
31
k3d/stress/deployment2.yaml
Normal file
31
k3d/stress/deployment2.yaml
Normal file
@@ -0,0 +1,31 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
app: web
|
||||
name: web
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: web
|
||||
strategy: {}
|
||||
template:
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
app: web
|
||||
spec:
|
||||
containers:
|
||||
- image: bee42/whoami:2.2.0
|
||||
name: whoami
|
||||
ports:
|
||||
- containerPort: 80
|
||||
resources:
|
||||
requests:
|
||||
memory: "64Mi"
|
||||
cpu: "250m"
|
||||
limits:
|
||||
memory: "128Mi"
|
||||
cpu: "500m"
|
||||
274
k3d/stress/doku
Normal file
274
k3d/stress/doku
Normal file
@@ -0,0 +1,274 @@
|
||||
Deployment with Ressource Constraints
|
||||
|
||||
https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
||||
|
||||
It's a good practice to assign a CPU request and a CPU limit to a container. Containers cannot use more CPU than the configured limit. Provided the system has CPU time free, a container is guaranteed to be allocated as much CPU as it requests.
|
||||
|
||||
mkdir ~/stress && cd ~/stress
|
||||
kubectl create namespace stress
|
||||
kubectl config set-context --current --namespace stress
|
||||
|
||||
cat >deployment2.yaml <<EOF
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
app: web
|
||||
name: web
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: web
|
||||
strategy: {}
|
||||
template:
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
app: web
|
||||
spec:
|
||||
containers:
|
||||
- image: bee42/whoami:2.2.0
|
||||
name: whoami
|
||||
ports:
|
||||
- containerPort: 80
|
||||
resources:
|
||||
requests:
|
||||
memory: "64Mi"
|
||||
cpu: "250m"
|
||||
limits:
|
||||
memory: "128Mi"
|
||||
cpu: "500m"
|
||||
EOF
|
||||
|
||||
kubectl apply -f deployment2.yaml
|
||||
kubectl get pods
|
||||
kubectl get all
|
||||
kubectl describe deployment web
|
||||
kubectl rollout history deployment web
|
||||
# set change cause
|
||||
kubectl annotate deployment web kubernetes.io/change-cause='set cpu limit 500'
|
||||
kubectl rollout history deployment web
|
||||
|
||||
https://kubernetes.io/docs/tasks/configure-pod-container/quality-service-pod/
|
||||
QoS
|
||||
https://kubernetes.io/docs/tasks/administer-cluster/manage-resources/quota-pod-namespace/
|
||||
ResoureQuota
|
||||
|
||||
Generate Stress
|
||||
|
||||
https://linux.die.net/man/1/stress
|
||||
|
||||
create your own stress tool
|
||||
|
||||
mkdir -p ~/stress && cd ~/stress
|
||||
cat >Dockerfile <<EOF
|
||||
FROM ubuntu
|
||||
|
||||
RUN apt-get update && apt-get install -y stress && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ENTRYPOINT ["/usr/bin/stress", "--verbose"]
|
||||
CMD []
|
||||
EOF
|
||||
docker build -t 127.0.0.1:5000/bee42/stress .
|
||||
docker push 127.0.0.1:5000/bee42/stress
|
||||
|
||||
Todo:
|
||||
|
||||
apply the pod
|
||||
check with kubectl top pods
|
||||
|
||||
cat >pod.yaml <<EOF
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: cpu-demo
|
||||
spec:
|
||||
containers:
|
||||
- name: cpu
|
||||
image: cnbc-registry:5000/bee42/stress
|
||||
resources:
|
||||
limits:
|
||||
cpu: "1"
|
||||
requests:
|
||||
cpu: "0.5"
|
||||
args:
|
||||
- --cpu
|
||||
- "2"
|
||||
EOF
|
||||
kubectl apply -f pod.yaml
|
||||
kubectl describe pod cpu-demo
|
||||
# wait a minute
|
||||
kubectl top pods
|
||||
NAME CPU(cores) MEMORY(bytes)
|
||||
cpu-demo 1962m 1Mi
|
||||
hey 0m 0Mi
|
||||
load 0m 1Mi
|
||||
web-7cdc98f947-2n2dp 0m 3Mi
|
||||
web-7cdc98f947-gn2cg 0m 4Mi
|
||||
|
||||
# Consume more cpu the limits set!
|
||||
|
||||
# PLEASE KILL this pod
|
||||
kubectl delete pod cpu-demo --force=true --grace-period=0
|
||||
|
||||
stress-deploy.yaml
|
||||
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: cpu-demo
|
||||
labels:
|
||||
app: cpu-demo
|
||||
spec:
|
||||
replicas: 4
|
||||
selector:
|
||||
matchLabels:
|
||||
app: cpu-demo
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: cpu-demo
|
||||
spec:
|
||||
containers:
|
||||
- name: stress
|
||||
image: cnbc-registry:5000/bee42/stress
|
||||
resources:
|
||||
limits:
|
||||
cpu: "1"
|
||||
requests:
|
||||
cpu: "0.5"
|
||||
args:
|
||||
- --cpu
|
||||
- "2"
|
||||
nodeName: k3d-cnbc-agent-1
|
||||
|
||||
kubectl apply -f stress-deploy.yaml
|
||||
kubectl top pods
|
||||
kubectl top nodes
|
||||
NAME CPU(cores) MEMORY(bytes)
|
||||
cpu-demo-595664dd8-lschw 1022m 1Mi
|
||||
cpu-demo-595664dd8-npx4w 975m 1Mi
|
||||
cpu-demo-595664dd8-p249m 993m 1Mi
|
||||
cpu-demo-595664dd8-twn84 940m 1Mi
|
||||
web-588896757f-52gx9 0m 1Mi
|
||||
|
||||
kubectl scale deploy cpu-demo --replicas=3
|
||||
|
||||
sleep 45
|
||||
|
||||
kubectl top nodes
|
||||
kubectl top pods
|
||||
NAME CPU(cores) MEMORY(bytes)
|
||||
cpu-demo-595664dd8-npx4w 1017m 1Mi
|
||||
cpu-demo-595664dd8-p249m 988m 1Mi
|
||||
cpu-demo-595664dd8-twn84 980m 1Mi
|
||||
web-588896757f-52gx9 1m 1Mi
|
||||
|
||||
https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-policies/
|
||||
Kubelet parameter
|
||||
|
||||
Tipp:
|
||||
|
||||
use imagePullSecerts to auth container registries
|
||||
|
||||
https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||||
|
||||
Memory Overload
|
||||
|
||||
https://www.cyberciti.biz/faq/stress-test-linux-unix-server-with-stress-ng/
|
||||
https://sysdig.com/blog/troubleshoot-kubernetes-oom/
|
||||
|
||||
k3d node create --cluster cnbc --role agent --memory 2G k3d-cnbc-agent-2
|
||||
kubectl get nodes
|
||||
# wait for ready
|
||||
kubectl apply -f mem-deploy.yaml
|
||||
|
||||
kubectl top nodes
|
||||
# OK!
|
||||
kubectl scale deploy mem-demo --replicas=4
|
||||
|
||||
# Hups...
|
||||
kubectl scale deploy mem-demo --replicas=5
|
||||
# wait 60 secs
|
||||
kubectl get pods -w
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
web-588896757f-52gx9 1/1 Running 0 60m
|
||||
mem-demo-67998d7456-4ntqw 1/1 Running 0 4m20s
|
||||
mem-demo-67998d7456-htvnn 1/1 Running 0 4m20s
|
||||
mem-demo-67998d7456-4f2wd 1/1 Running 0 2m2s
|
||||
mem-demo-67998d7456-kx4xv 1/1 Running 2 (49s ago) 4m20s
|
||||
mem-demo-67998d7456-d86cs 0/1 OOMKilled 2 (25s ago) 58s
|
||||
mem-demo-67998d7456-d86cs 0/1 CrashLoopBackOff 2 (12s ago) 65s
|
||||
mem-demo-67998d7456-d86cs 1/1 Running 3 (28s ago) 81s
|
||||
mem-demo-67998d7456-4ntqw 0/1 OOMKilled 0 4m44s
|
||||
mem-demo-67998d7456-4ntqw 1/1 Running 1 (2s ago) 4m45s
|
||||
mem-demo-67998d7456-4f2wd 0/1 OOMKilled 0 2m29s
|
||||
mem-demo-67998d7456-4f2wd 1/1 Running 1 (2s ago) 2m30s
|
||||
mem-demo-67998d7456-d86cs 0/1 OOMKilled 3 (35s ago) 88s
|
||||
mem-demo-67998d7456-d86cs 0/1 CrashLoopBackOff 3 (12s ago) 99s
|
||||
|
||||
# fix: scale down
|
||||
kubectl scale deploy mem-demo --replicas=4
|
||||
|
||||
# cleanup deploy
|
||||
kubectl delete -f mem-deploy.yaml
|
||||
|
||||
# drain node
|
||||
|
||||
kubectl drain k3d-k3d-cnbc-agent-2-0
|
||||
k3d node delete k3d-k3d-cnbc-agent-2-0
|
||||
kubectl delete node k3d-k3d-cnbc-agent-2-0
|
||||
|
||||
mem-deploy.yaml
|
||||
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: mem-demo
|
||||
labels:
|
||||
app: mem-demo
|
||||
spec:
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: mem-demo
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: mem-demo
|
||||
spec:
|
||||
containers:
|
||||
- name: stress
|
||||
image: cnbc-registry:5000/bee42/stress
|
||||
resources:
|
||||
limits:
|
||||
memory: "500Mi"
|
||||
requests:
|
||||
memory: "128Mi"
|
||||
args:
|
||||
- "-m"
|
||||
- "1"
|
||||
- "--vm-bytes"
|
||||
- "490M"
|
||||
nodeName: k3d-k3d-cnbc-agent-2-0
|
||||
|
||||
Kubernetes Failures
|
||||
|
||||
https://codeberg.org/hjacobs/kubernetes-failure-stories
|
||||
https://medium.com/omio-engineering/cpu-limits-and-aggressive-throttling-in-kubernetes-c5b20bd8a718
|
||||
https://home.robusta.dev/blog/stop-using-cpu-limits
|
||||
|
||||
Best practices for CPU limits and requests on Kubernetes
|
||||
|
||||
Use CPU requests for everything
|
||||
Make sure they are accurate
|
||||
Do not use CPU limits.
|
||||
|
||||
What about memory limits and requests?
|
||||
|
||||
Always use memory limits
|
||||
Always use memory requests
|
||||
Always set your memory requests equal to your limits
|
||||
|
||||
14
k3d/volumes/pv
Normal file
14
k3d/volumes/pv
Normal file
@@ -0,0 +1,14 @@
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: task-pv-volume
|
||||
labels:
|
||||
type: local
|
||||
spec:
|
||||
storageClassName: manual
|
||||
capacity:
|
||||
storage: 10Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
hostPath:
|
||||
path: "/mnt/data"
|
||||
0
k3d/volumes/pvc
Normal file
0
k3d/volumes/pvc
Normal file
Reference in New Issue
Block a user