We'll be working on the Servers that are surrounded by the continous lines in this drawing:
The Cluster is in the process of building if you've arrived here. However, there's still some steps left to do like:
- remove the bootstrap node from haproxy
- approve the signing requests for the worker nodes
- monitor the cluster state and cluster operators
- create an initial htpasswd auth provider for a fallback admin account
- create a storage class for the openshift internal image registry
Let's start with removing the bootstrap Node after the bootstrap monitoring command tells us it's safe to remove:
[archy@helper01 ~]# sudo vim /etc/haproxy/haproxy.cfg
[archy@helper02 ~]# sudo vim /etc/haproxy/haproxy.cfg
global
log 127.0.0.1 local2
pidfile /var/run/haproxy.pid
maxconn 4000
daemon
defaults
mode http
log global
option dontlognull
option http-server-close
option redispatch
retries 3
timeout http-request 10s
timeout queue 1m
timeout connect 10s
timeout client 1m
timeout server 1m
timeout http-keep-alive 10s
timeout check 10s
maxconn 3000
frontend stats
bind 0.0.0.0:9000
mode http
log global
maxconn 10
stats enable
stats hide-version
stats refresh 30s
stats show-node
stats show-desc Stats for ha service
stats uri /stats
listen api-server-6443
bind 0.0.0.0:6443
mode tcp
server master01 master01.okd.archyslife.lan:6443 check inter 1s
server master02 master02.okd.archyslife.lan:6443 check inter 1s
server master03 master03.okd.archyslife.lan:6443 check inter 1s
listen machine-config-server-22623
bind 0.0.0.0:22623
mode tcp
server master01 master01.okd.archyslife.lan:22623 check inter 1s
server master02 master02.okd.archyslife.lan:22623 check inter 1s
server master03 master03.okd.archyslife.lan:22623 check inter 1s
listen http-ingress-80
bind 0.0.0.0:80
mode tcp
server worker01 worker01.okd.archyslife.lan:80 check inter 1s
server worker02 worker02.okd.archyslife.lan:80 check inter 1s
listen https-ingress-443
bind 0.0.0.0:443
mode tcp
server worker01 worker01.okd.archyslife.lan:443 check inter 1s
server worker02 worker02.okd.archyslife.lan:443 check inter 1s
With the bootstrap node removed, you can shut it down but I'd recommend to not delete it yet.
Next step is approving the CSRs generated by the worker nodes:
[root@helper01 ~]# export KUBECONFIG=/root/openshift-installer/auth/kubeconfig
[root@helper01 ~]# oc get nodes -o wide --show-kind
[root@helper01 ~]# oc get csr -ojson | jq -r '.items[] | select(.status == {} ) | .metadata.name' | xargs oc adm certificate approve
These commands might need to be repeated multiple times until all nodes are joined.
Once all nodes are part of the cluster, you can continue with monitoring the setup process by using these commands:
[root@helper01 ~]# watch -cn 1 oc get clusteroperators -o wide --show-kind
[root@helper01 ~]# watch -cn 1 oc get pods -o wide --show-kind --all-namespaces
After all the cluster operators are in a running state and are not progressing anymore, we can start to customize the installation. I'll start by adding a htpasswd authentication source as a fallback admin (adding other authentication sources will not be part of this tutorial) and add a default storage class using the nfs-subdir-provisioner. Setting up a htpasswd auth provider is fairly simple:
[root@helper01 ~]# htpasswd -c -B ~/openshift-installer/htpasswd-initial admin
[root@helper01 ~]# oc -n openshift-config create secret generic htpasswd-initial --from-file htpasswd=${HOME}/openshift-installer/htpasswd-initial
[root@helper01 ~]# cat << EOF >> ~/openshift-installer/htpasswd-initial.yaml
---
apiVersion: config.openshift.io/v1
kind: OAuth
metadata:
name: cluster
spec:
identityProviders:
- name: htpasswd-initial
mappingMethod: claim
type: HTPasswd
htpasswd:
fileData:
name: htpasswd-initial
...
EOF
[root@helper01 ~]# oc apply -f ${HOME}/openshift-installer/htpasswd-initial.yaml
Monitor the rollout process using this command:
[root@helper01 ~]# watch -cn 1 oc -n openshift-authentication get pods -o wide --show-kind
Once all the pods are restarted, associate the the 'cluster-admin' cluster-role to the 'admin' user:
[root@helper01 ~]# CLUSTERNAME=okd
[root@helper01 ~]# BASEDOMAIN=archyslife.lan
[root@helper01 ~]# oc login -u 'admin' -p '' --insecure-skip-tls-verify "https://api.${CLUSTERNAME}.${BASEDOMAIN}:6443"
The last step in finalizing the initial setup, is creating a storage class. I'll be using a nas in my local network along with the nfs-subdir-provisioner. First, some preparations:
[root@helper01 ~]# git clone https://github.com/kubernetes-sigs/nfs-subdir-external-provisioner.git
[root@helper01 ~]# cd nfs-subdir-external-provisioner
[root@helper01 ~]# NAMESPACE=default
[root@helper01 ~]# sed -i'' "s/namespace:.*/namespace: ${NAMESPACE}/g" ./deploy/rbac.yaml ./deploy/deployment.yaml
[root@helper01 ~]# oc create -f deploy/rbac.yaml
[root@helper01 ~]# oc adm policy add-scc-to-user hostmount-anyuid system:serviceaccount:${NAMESPACE}:nfs-client-provisioner
Next, edit and apply the deployment, here's mine as a reference:
[root@helper01 ~]# cat deploy/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: default
name: nfs-client-provisioner
labels:
app: nfs-client-provisioner
spec:
replicas: 1
revisionHistoryLimit: 5
strategy:
type: Recreate
selector:
matchLabels:
app: nfs-client-provisioner
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: registry.k8s.io/sig-storage/nfs-subdir-external-provisioner:v4.0.2
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: k8s-sigs.io/nfs-subdir-external-provisioner
- name: NFS_SERVER
value: strgnas01.archyslife.lan
- name: NFS_PATH
value: /volume1/openshift-data
volumes:
- name: nfs-client-root
nfs:
server: strgnas01.archyslife.lan
path: /volume1/openshift-data
[root@helper01 ~]# oc -n default apply -f deploy/deployment.yaml
[root@helper01 ~]# oc -n default get pods -o wide
Once the pod is running, create the storage class:
[root@helper01 ~]# cat << EOF >> deploy/class.yaml
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: managed-nfs
annotations:
storageclass.kubernetes.io/is-default-class: "true"
provisioner: k8s-sigs.io/nfs-subdir-external-provisioner
parameters:
pathPattern: "${.PVC.namespace}-${.PVC.name}"
archiveOnDelete: "false"
...
EOF
[root@helper01 ~]# oc apply -f deploy/class.yaml
That's all there is to deploying the storage class in OKD / OpenShift. PVCs will now use this storage class by default due to its annotation.
Feel free to comment and / or suggest a topic.
Comments
Post a Comment