By default, containers are stateless meaning they're not storing any data besides the image itself on disk.
This behavior can be quite unfortunate when running workloads that require working with data such as databases. One of the easiest ways is to mount an NFS share to the container(s) and let them store everything on the NFS share so here's an example.
First, prepare the NFS Share:
[root@nfssrv ~]# echo '/var/nfs/test 172.31.10.0/24(rw,secure,sync,no_root_squash)' >> /etc/exports
[root@nfssrv ~]# exportfs -rav
Create the manifest for the Persistent Volume:
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: var-nfs-test
spec:
capacity:
storage: 512Mi
volumeMode: Filesystem
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Recycle
storageClassName: nfs
mountOptions:
- vers=4.2
- namlen=255
- proto=tcp
- timeo=30
- retrans=3
nfs:
path: /var/nfs/test
server: nfssrv.archyslife.lan
Create the manifest for the Persistent Volume Claim:
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-mount
spec:
accessModes:
- ReadWriteMany
storageClassName: nfs
resources:
requests:
storage: 512Mi
volumeName: var-nfs-test
Create the manifest for the deployment:
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfspods
labels:
app: nfstest
namespace: default
spec:
replicas: 2
selector:
matchLabels:
app: nfstest
strategy:
rollingUpdate:
maxSurge: 50%
maxUnavailable: 50%
type: RollingUpdate
template:
metadata:
labels:
app: nfstest
spec:
containers:
- name: nfstest
image: alpine:latest
command: ["/bin/sh"]
args: ["-c", "while true; do echo $(date +%H:%M:%S) $(hostname -f) >> /mnt/$(hostname -f); sleep 1; done"]
volumeMounts:
- mountPath: /mnt
name: nfs-volume
volumes:
- name: nfs-volume
persistentVolumeClaim:
claimName: nfs-mount
Log in to the NFS server and check the contents of the file:
[root@nfssrv ~]# tail -n 10 /var/nfs/test/*
==> /var/nfs/test/nfspods-5d6ffbcc87-chw8h <==
17:32:08 nfspods-5d6ffbcc87-chw8h
17:32:09 nfspods-5d6ffbcc87-chw8h
17:32:10 nfspods-5d6ffbcc87-chw8h
17:32:11 nfspods-5d6ffbcc87-chw8h
17:32:12 nfspods-5d6ffbcc87-chw8h
17:32:13 nfspods-5d6ffbcc87-chw8h
17:32:14 nfspods-5d6ffbcc87-chw8h
17:32:15 nfspods-5d6ffbcc87-chw8h
17:32:16 nfspods-5d6ffbcc87-chw8h
17:32:17 nfspods-5d6ffbcc87-chw8h
==> /var/nfs/test/nfspods-5d6ffbcc87-vd8rm <==
17:32:08 nfspods-5d6ffbcc87-vd8rm
17:32:09 nfspods-5d6ffbcc87-vd8rm
17:32:10 nfspods-5d6ffbcc87-vd8rm
17:32:11 nfspods-5d6ffbcc87-vd8rm
17:32:12 nfspods-5d6ffbcc87-vd8rm
17:32:13 nfspods-5d6ffbcc87-vd8rm
17:32:14 nfspods-5d6ffbcc87-vd8rm
17:32:15 nfspods-5d6ffbcc87-vd8rm
17:32:16 nfspods-5d6ffbcc87-vd8rm
17:32:17 nfspods-5d6ffbcc87-vd8rm
Seems like everything worked as expected.
Deleting the objects you created can be done by referencing the files:
[archy@workstation ~]$ kubectl -n default delete -f deployment.yml
deployment.apps "nfspods" deleted
[archy@workstation ~]$ kubectl -n default delete -f pvc.yml
persistentvolumeclaim "nfs-mount" deleted
[archy@workstation ~]$ kubectl -n default delete -f pv.yml
warning: deleting cluster-scoped resources, not scoped to the provided namespace
persistentvolume "var-nfs-test" deleted
The warning above is perfectly normal as Persistent Volumes are not bound to a specific namespace and can be accessed by the whole cluster.
Feel free to comment and / or suggest a topic.
Comments
Post a Comment