carpool examples mkdir ~/carpool && cd ~/carpool # create app vi python_rest.py vi requirements.txt vi Dockerfile python_rest.py python_rest.py from flask import Flask, request, jsonify from flask_sqlalchemy import SQLAlchemy from flask_marshmallow import Marshmallow, fields from flask_cors import CORS   # Init app app = Flask(__name__)   #enable CORS CORS(app) # connect to already existing and running Database app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://postgres:cnbc@postgres:5432/esentricar'   # not important but otherwise we get a warning every time app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False   #Init db db = SQLAlchemy(app) #Init ma ma = Marshmallow(app)   # Pool Car Class/Model class Pool_Car(db.Model):   #define table, in our case already existing __tablename__ = 'cars' car_id = db.Column(db.Integer, primary_key=True) license_plate = db.Column(db.String(30), unique=True) car_type = db.Column(db.String(20)) fuel = db.Column(db.String(20)) number_of_seats =db.Column(db.Integer)     # set class attributes def __init__(self, license_plate, car_type, fuel, number_of_seats): self.license_plate = license_plate self.car_type = car_type self.fuel = fuel self.number_of_seats = number_of_seats   class Pool_CarSchema(ma.Schema): class Meta: fields = ('car_id','license_plate','car_type','fuel','number_of_seats')   # Init schema pool_car_schema = Pool_CarSchema() pool_cars_schema = Pool_CarSchema(many=True)     # Create a Pool_Car @app.route('/car', methods=['POST']) def add_pool_car(): # Get request data as json car_entity = request.get_json() license_plate= car_entity.get('license_plate') car_type = car_entity.get('car_type') fuel = car_entity.get('fuel') number_of_seats = car_entity.get('number_of_seats')   new_pool_car = Pool_Car(license_plate, car_type, fuel, number_of_seats)   db.session.add(new_pool_car) db.session.commit()   return pool_car_schema.jsonify(new_pool_car)   # Get car_id,license_plate,car_type of all products of the table @app.route('/car', methods=['GET']) def get_pool_cars(): all_pool_cars = Pool_Car.query.all() result = pool_cars_schema.dump(all_pool_cars) car_list= [] for item in result: car_details = { "car_id":None, "license_plate":None, "car_type":None} car_details['car_id'] = item['car_id'] car_details['license_plate'] = item['license_plate'] car_details['car_type'] = item['car_type'] car_list.append(car_details) return jsonify(car_list)   # Get Single Products @app.route('/car/', methods=['GET']) def get_pool_car(car_id): pool_car = Pool_Car.query.get(car_id) return pool_car_schema.jsonify(pool_car)   # Delete Product @app.route('/car/', methods=['DELETE']) def delete_pool_car(car_id): pool_car = Pool_Car.query.get(car_id) db.session.delete(pool_car) db.session.commit()   return pool_car_schema.jsonify(pool_car)     if __name__ == '__main__': app.run(debug=True) #port='5002' requirements.txt requirements.txt Flask==1.1.1 Flask-Cors==3.0.8 flask-marshmallow==0.10.1 Flask-SQLAlchemy==2.4.1 werkzeug==2.0.2 marshmallow==3.2.2 SQLAlchemy==1.3.11 marshmallow-sqlalchemy==0.19.0 psycopg2-binary==2.8.4 itsdangerous==2.0.1 Jinja2==3.0.3 Dockerfile Dockerfile # syntax=docker/dockerfile:1 FROM python:3.8-slim-buster WORKDIR /app COPY requirements.txt requirements.txt RUN pip3 install --disable-pip-version-check -r requirements.txt COPY . . ENV FLASK_APP=python_rest EXPOSE 5000 STOPSIGNAL SIGINT CMD [ "python3", "-m" , "flask", "run", "--host=0.0.0.0"] Build app docker build -t bee42/carpool:0.1.0 . curl http://127.0.0.1:5000/v2/_catalog {"repositories":[]} docker tag bee42/carpool:0.1.0 127.0.0.1:5000/bee42/carpool:0.1.0 docker push 127.0.0.1:5000/bee42/carpool:0.1.0 curl http://127.0.0.1:5000/v2/_catalog {"repositories":["bee42/carpool"]} curl http://127.0.0.1:5000/v2/bee42/carpool/tags/list {"name":"bee42/carpool","tags":["0.1.0"]} CarPool Deploy Postgres New Kubernetes Manifests: Persistence Volume Config Map mkdir ~/carpool/postgres && cd ~/carpool/postgres # define kubernetes manifests postgres-deploy.yaml postgres-deploy.yaml apiVersion: apps/v1 kind: Deployment metadata: name: postgres spec: replicas: 1 selector: matchLabels: app: postgres template: metadata: labels: app: postgres spec: containers: - name: postgres image: postgres:10.4 imagePullPolicy: "IfNotPresent" ports: - containerPort: 5432 envFrom: - configMapRef: name: postgres-config volumeMounts: - mountPath: /var/lib/postgresql/data name: postgredb - name: config mountPath: "/docker-entrypoint-initdb.d" readOnly: true volumes: - name: config configMap: name: carpool items: - key: "esentricar.sql" path: "esentricar.sql" - name: postgredb persistentVolumeClaim: claimName: postgres-pv-claim postgres-schema-cm.yaml postgres-schema-cm.yaml apiVersion: v1 kind: ConfigMap metadata: name: carpool data: esentricar.sql: | CREATE DATABASE esentricar; \c esentricar; CREATE TABLE cars ( car_id SERIAL PRIMARY KEY, license_plate varchar(30) NOT NULL, car_type varchar(30) NOT NULL, fuel varchar(30) NOT NULL, number_of_seats integer NOT NULL ); CREATE UNIQUE INDEX cars_license_plate ON cars (license_plate); postgres-config.yaml postgres-config.yaml apiVersion: v1 kind: ConfigMap metadata: name: postgres-config labels: app: postgres data: POSTGRES_DB: postgresdb POSTGRES_USER: postgres POSTGRES_PASSWORD: cnbc postgres-service.yaml postgres-service.yaml apiVersion: v1 kind: Service metadata: labels: app: postgres name: postgres spec: ports: - name: postgres port: 5432 protocol: TCP targetPort: 5432 selector: app: postgres type: ClusterIP postgres-pv-claim.yaml postgres-pv-claim.yaml kind: PersistentVolumeClaim apiVersion: v1 metadata: name: postgres-pv-claim labels: app: postgres spec: storageClassName: local-path accessModes: - ReadWriteOnce resources: requests: storage: 1Gi Create database kubectl create namespace carpool kubectl config set-context --current --namespace=carpool # apply all manifests kubectl apply -f . kubectl get all kubectl get configmaps NAME DATA AGE kube-root-ca.crt 1 21m postgres-config 3 42s carpool 1 42s kubectl get pvc,pv NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE persistentvolumeclaim/postgres-pv-claim Bound pvc-e6e7ba0f-ff3e-4bfa-be1b-4db6629d027e 1Gi RWO local-path 84s NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE persistentvolume/pvc-e6e7ba0f-ff3e-4bfa-be1b-4db6629d027e 1Gi RWO Delete Bound carpool/postgres-pv-claim local-path 79s kubectl describe pv Name: pvc-e6e7ba0f-ff3e-4bfa-be1b-4db6629d027e Labels: Annotations: pv.kubernetes.io/provisioned-by: rancher.io/local-path Finalizers: [kubernetes.io/pv-protection] StorageClass: local-path Status: Bound Claim: carpool/postgres-pv-claim Reclaim Policy: Delete Access Modes: RWO VolumeMode: Filesystem Capacity: 1Gi Node Affinity: Required Terms: Term 0: kubernetes.io/hostname in [k3d-cnbc-agent-1] Message: Source: Type: HostPath (bare host directory volume) Path: /var/lib/rancher/k3s/storage/pvc-e6e7ba0f-ff3e-4bfa-be1b-4db6629d027e_carpool_postgres-pv-claim HostPathType: DirectoryOrCreate Events: kubectl logs postgres-65974d6d58-72vk5 The files belonging to this database system will be owned by user "postgres". This user must also own the server process. The database cluster will be initialized with locale "en_US.utf8". The default database encoding has accordingly been set to "UTF8". The default text search configuration will be set to "english". Data page checksums are disabled. fixing permissions on existing directory /var/lib/postgresql/data ... ok creating subdirectories ... ok selecting default max_connections ... 100 selecting default shared_buffers ... 128MB selecting dynamic shared memory implementation ... posix creating configuration files ... ok running bootstrap script ... ok performing post-bootstrap initialization ... ok syncing data to disk ... ok Success. You can now start the database server using: pg_ctl -D /var/lib/postgresql/data -l logfile start WARNING: enabling "trust" authentication for local connections You can change this by editing pg_hba.conf or using the option -A, or --auth-local and --auth-host, the next time you run initdb. waiting for server to start....2022-03-08 12:30:20.872 UTC [42] LOG: listening on Unix socket "/var/run/postgresql/.s.PGSQL.5432" 2022-03-08 12:30:20.901 UTC [43] LOG: database system was shut down at 2022-03-08 12:30:20 UTC 2022-03-08 12:30:20.911 UTC [42] LOG: database system is ready to accept connections done server started CREATE DATABASE ALTER ROLE /usr/local/bin/docker-entrypoint.sh: running /docker-entrypoint-initdb.d/esentricar.sql CREATE DATABASE You are now connected to database "esentricar" as user "postgres". CREATE TABLE CREATE INDEX 2022-03-08 12:30:21.781 UTC [42] LOG: received fast shutdown request waiting for server to shut down....2022-03-08 12:30:21.783 UTC [42] LOG: aborting any active transactions 2022-03-08 12:30:21.787 UTC [42] LOG: worker process: logical replication launcher (PID 49) exited with exit code 1 2022-03-08 12:30:21.790 UTC [44] LOG: shutting down 2022-03-08 12:30:21.820 UTC [42] LOG: database system is shut down done server stopped PostgreSQL init process complete; ready for start up. 2022-03-08 12:30:21.904 UTC [1] LOG: listening on IPv4 address "0.0.0.0", port 5432 2022-03-08 12:30:21.904 UTC [1] LOG: listening on IPv6 address "::", port 5432 2022-03-08 12:30:21.909 UTC [1] LOG: listening on Unix socket "/var/run/postgresql/.s.PGSQL.5432" 2022-03-08 12:30:21.929 UTC [79] LOG: database system was shut down at 2022-03-08 12:30:21 UTC 2022-03-08 12:30:21.938 UTC [1] LOG: database system is ready to accept connections Carpool Deploy App deploy service create first car mkdir ~/carpool/app && cd ~/carpool/app vi deployment.yaml vi service.yaml Find name of your local registry cnbc-registry docker container ls CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 6f83432dbef0 rancher/k3d-proxy:5.3.0 "/bin/sh -c nginx-pr…" 29 hours ago Up 29 hours 0.0.0.0:8580->80/tcp, :::8580->80/tcp, 0.0.0.0:8543->443/tcp, :::8543->443/tcp, 0.0.0.0:8545->6443/tcp k3d-cnbc-serverlb 5f26dbd03241 rancher/k3s:v1.22.6-k3s1 "/bin/k3s agent" 29 hours ago Up 29 hours k3d-cnbc-agent-1 24d5876390bd rancher/k3s:v1.22.6-k3s1 "/bin/k3s agent" 29 hours ago Up 29 hours k3d-cnbc-agent-0 b5a50e960917 rancher/k3s:v1.22.6-k3s1 "/bin/k3s server --t…" 29 hours ago Up 29 hours k3d-cnbc-server-0 8f73928c8294 registry:2 "/entrypoint.sh /etc…" 29 hours ago Up 29 hours 0.0.0.0:5000->5000/tcp cnbc-registry deployment.yaml deployment.yaml apiVersion: apps/v1 kind: Deployment metadata: name: carpool labels: app: carpool spec: replicas: 1 selector: matchLabels: app: carpool template: metadata: labels: app: carpool spec: containers: - name: carpool image: cnbc-registry:5000/bee42/carpool:0.1.0 imagePullPolicy: Always env: - name: FLASK_APP value: python_rest service.yaml service.yaml apiVersion: v1 kind: Service metadata: labels: app: carpool name: carpool spec: ports: - name: http port: 5000 protocol: TCP targetPort: 5000 selector: app: carpool type: ClusterIP cd .. sudo apt install -y tree tree . . ├── app │ ├── deployment.yaml │ └── service.yaml ├── Dockerfile ├── postgres │ ├── postgres-config.yaml │ ├── postgres-deploy.yaml │ ├── postgres-pv-claim.yaml │ ├── postgres-schema-cm.yaml │ └── postgres-service.yaml ├── python_rest.py └── requirements.txt Deploy App carpool # create deployment and service manifests kubectl apply -f app kubectl get all NAME READY STATUS RESTARTS AGE pod/postgres-65974d6d58-72vk5 1/1 Running 0 26m pod/carpool-67d9fcf5f4-8cwdg 1/1 Running 0 11s NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/postgres ClusterIP 10.43.178.140 5432/TCP 26m service/carpool ClusterIP 10.43.223.216 5000/TCP 11s NAME READY UP-TO-DATE AVAILABLE AGE deployment.apps/postgres 1/1 1 1 26m deployment.apps/carpool 1/1 1 1 11s NAME DESIRED CURRENT READY AGE replicaset.apps/postgres-65974d6d58 1 1 1 26m replicaset.apps/carpool-67d9fcf5f4 1 1 1 11s Access app kubectl run curl --tty -i --image curlimages/curl -- /bin/sh # check registry access curl cnbc-registry:5000/v2/ curl cnbc-registry:5000/v2/_catalog curl http://cnbc-registry:5000/v2/bee42/carpool/tags/list {"repositories":["bee42/carpool"]} # create data cd /tmp cat >car_1.json <python_rest.py.patch < import os > 8a11,14 > user = os.environ.get('POSTGRES_USER', 'postgres') > db = os.environ.get('POSTGRES_DB', 'esentricar' ) > passwd = os.environ.get('POSTGRES_PASSWORD', 'cnbc') > host = os.environ.get('POSTGRES_HOST', 'postgres') 12c18 < app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://postgres:cnbc@postgres:5432/esentricar' --- > app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://' + user + ':' + passwd + '@' + host + ':5432/' + db EOF patch -p0 < python_rest.py.patch python_rest.py docker build -t bee42/carpool:0.2.0 . curl http://127.0.0.1:5000/v2/ docker tag bee42/carpool:0.2.0 127.0.0.1:5000/bee42/carpool:0.2.0 docker push 127.0.0.1:5000/bee42/carpool:0.2.0 curl http://127.0.0.1:5000/v2/bee42/carpool/tags/list {"name":"bee42/carpool","tags":["0.1.0","0.2.0"]} app/app-config.yaml app-config.yaml apiVersion: v1 kind: ConfigMap metadata: name: app-config labels: app: carpool data: POSTGRES_DB: esentricar POSTGRES_PASSWORD: cnbc POSTGRES_USER: postgres POSTGRES_HOST: postgres app/deployment.yaml deployment.yaml apiVersion: apps/v1 kind: Deployment metadata: name: carpool labels: app: carpool spec: replicas: 1 selector: matchLabels: app: carpool template: metadata: labels: app: carpool spec: containers: - name: carpool image: cnbc-registry:5000/bee42/carpool:0.2.0 imagePullPolicy: Always env: - name: FLASK_APP value: python_rest envFrom: - configMapRef: name: app-config kubectl apply -f app # later curl exec kubectl attach -ti curl curl http://carpool:5000/car [{"car_id":1,"car_type":"mini clubman SD","license_plate":"BO-PR-72"}] # remove all kubectl get pv # patch pv if you need the data after redeploy :) PV=$(kubectl get pvc -o json -lapp=postgres | jq -r .items[].spec.volumeName) kubectl patch pv $PV -p "{\"spec\":{\"persistentVolumeReclaimPolicy\":\"Retain\"}}" kubectl delete namespace carpool kubectl create namespace carpool kubectl apply -f postgres kubectl apply -f app # Problem: Auto reassociation of pv does't work kubectl get pv NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE pvc-ab3ea061-9180-4e34-95d5-0100a149747d 1Gi RWO Retain Released carpool/postgres-pv-claim local-path 24m pvc-57cae9ed-3ebe-4181-819a-7a9cfd745606 1Gi RWO Delete Bound carpool/postgres-pv-claim local-path 28s kubectl delete namespace carpool kubectl patch pv $PV --type json -p '[{"op": "remove", "path": "/spec/claimRef/uid"}]' kubectl create namespace carpool kubectl apply -f postgres kubectl apply -f app Options define a node port services cat >node-service.yaml < K3s dev 5.4.0-109-generic containerd://1.5.9-k3s1 k3d-cnbc-agent-1 Ready worker 30h v1.22.7+k3s1 172.18.0.3 K3s dev 5.4.0-109-generic containerd://1.5.9-k3s1 k3d-cnbc-server-0 Ready control-plane,master 30h v1.22.7+k3s1 172.18.0.2 K3s dev 5.4.0-109-generic containerd://1.5.9-k3s1 kubectl apply -f ~/carpool/app/node-service.yaml kubectl get service NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE postgres ClusterIP 10.43.173.36 5432/TCP 5m37s carpool ClusterIP 10.43.41.38 5000/TCP 5m11s carpool-node NodePort 10.43.44.24 5000:31231/TCP 7s curl 172.18.0.3:31231/car [{"car_id":1,"car_type":"mini clubman SD","license_plate":"BO-PR-72"}]