# kubectl get nodes Unable to connect to the server: x509: certificate has expired or is not yet valid
ssh capv@CONTROL-PLANE-IP sudo -i kubeadm alpha certs check-expiration Note: For TKGm 1.5.x , you can remove the "alpha" from the command above
ssh capv@CONTROL-PLANE-IP sudo -i openssl x509 -in /var/lib/kubelet/pki/kubelet-client-current.pem -noout -dates
1. SSH to Control Plane node and rotate cluster component certs:
ssh capv@CONTROL-PLANE-IP sudo -i kubeadm alpha certs check-expiration kubeadm alpha certs renew all -v 6 kubeadm alpha certs check-expiration Note: For TKGm 1.5.x , you can remove the "alpha" from the 3 commands above
2. Restart cluster components, etcd, kube-apiserver, kube-controller-manager, kube-scheduler and kube-vip if present:
crictl ps ps -fe | grep -e etcd -e kube-api -e kube-controller-manager -e kube-scheduler -e kube-vip kill PID crictl ps
3. Repeat the above steps on all remaining Control Plane nodes.
ssh capv@CONTROL-PLANE-IP sudo -i grep client-certificate-data /etc/kubernetes/admin.conf | awk '{print $2}' | base64 -d | openssl x509 -noout -dates export KUBECONFIG=/etc/kubernetes/admin.conf kubectl get nodes
ssh capv@CONTROl-PLANE-IP vi /etc/kubernetes/admin.conf
vi ~/.kube/config users: - name: MGMT-CLUSTER-admin user: client-certificate-data: YYYYYY client-key-data: ZZZZZZ
vi ~/.kube-tkg/config users: - name: MGMT-CLUSTER-admin user: client-certificate-data: YYYYYY client-key-data: ZZZZZZ
kubectl get nodes
kubectl get secret -n tkg-system MGMT-CLUSTER-kubeconfig -o jsonpath='{.data.value}' | base64 -d > mgmt-kubeconfig-value
vi mgmt-kubeconfig-value base64 mgmt-kubeconfig-value -w 0
kubectl edit secret -n tkg-system MGMT-CLUSTER-kubeconfig
tkg get management-cluster kubectl config use-context MGMT-CONTEXT kubectl get nodes
kubectl config use-context MGMT-CONTEXT kubectl get secrets -A | grep kubeconfig kubectl delete secret CLUSTER-NAME-kubeconfig -n NAMESPACE
kubectl get cluster <cluster_name> -namespace <name-namespace> -o yaml | grep -i paused
kubectl get secrets -A | grep kubeconfig tkg get credentials CLUSTER-NAME kubectl config use-context WORKLOAD-CONTEXT kubectl get nodesNote: For TKGm 1.4.x and higher versions , replace "tkg" cli with "tanzu" cli and use "tanzu cluster kubeconfig get <cluster-name> -n <namespace-name> --admin" to retrieve kubeconfig of a cluster .
# openssl x509 -in /var/lib/kubelet/pki/kubelet-client-current.pem -noout -dates
ssh capv@CONTROL-PLANE-IP sudo -i mkdir /home/capv/backup mv /etc/kubernetes/kubelet.conf /home/capv/backup mv /var/lib/kubelet/pki/kubelet-client* /home/capv/backup
# check kubeadm version kubeadm version # if kubeadm version is v1.19.* or lower, kubeadm alpha kubeconfig user --org system:nodes --client-name system:node:NODE > /home/capv/backup/kubelet-NODE.conf # if kubeadm version is v1.20.* or v1.21.*, kubeadm config --kubeconfig /etc/kubernetes/admin.conf view > kubeadm.config kubeadm alpha kubeconfig user --org system:nodes --client-name system:node:NODE > /home/capv/backup/kubelet-NODE.conf --config kubeadm.config # if kubeadm version is v1.22.*, kubectl get cm -n kube-system kubeadm-config -o=jsonpath="{.data.ClusterConfiguration}" --kubeconfig /etc/kubernetes/admin.conf > kubeadm.config kubeadm kubeconfig user --org system:nodes --client-name system:node:NODE > /home/capv/backup/kubelet-NODE.conf --config kubeadm.config vi /home/capv/backup/kubelet-NODE.conf apiVersion: v1 clusters: - cluster: certificate-authority-data: XXXXXX server: https://ENDPOINT-IP:6443 name: CLUSTER-NAME contexts: - context: cluster: CLUSTER-NAME
cp home/capv/backup/kubelet-NODE.conf /etc/kubernetes/kubelet.conf systemctl restart kubelet systemctl status kubelet ls -l /var/lib/kubelet/pki/
kubeadm init phase kubelet-finalize all ls -l /var/lib/kubelet/pki/ openssl x509 -in /var/lib/kubelet/pki/kubelet-client-current.pem -noout -dates
kubectl get nodes
# openssl x509 -in /var/lib/kubelet/pki/kubelet-client-current.pem -noout -dates
ssh capv@WORKER-IP sudo -i mkdir /home/capv/backup mv /etc/kubernetes/kubelet.conf /home/capv/backup mv /var/lib/kubelet/pki/kubelet-client* /home/capv/backup
ssh capv@CONTROl-PLANE-IP # check kubeadm version kubeadm version # if kubeadm version is v1.19.* or lower, kubeadm alpha kubeconfig user --org system:nodes --client-name system:node:NODE > /home/capv/backup/kubelet-NODE.conf # if kubeadm version is v1.20.* or v1.21.*, kubeadm config --kubeconfig /etc/kubernetes/admin.conf view > kubeadm.config kubeadm alpha kubeconfig user --org system:nodes --client-name system:node:NODE > /home/capv/backup/kubelet-NODE.conf --config kubeadm.config # if kubeadm version is v1.22.*, kubectl get cm -n kube-system kubeadm-config -o=jsonpath="{.data.ClusterConfiguration}" --kubeconfig /etc/kubernetes/admin.conf > kubeadm.config kubeadm kubeconfig user --org system:nodes --client-name system:node:NODE > /home/capv/backup/kubelet-NODE.conf --config kubeadm.config vi /home/capv/backup/kubelet-NODE.conf apiVersion: v1 clusters: - cluster: certificate-authority-data: XXXXXX server: https://ENDPOINT-IP:6443 name: CLUSTER-NAME contexts: - context: cluster: CLUSTER-NAME
scp capv@CONTROL-PLANE-IP:/home/capv/backup/kubelet-NODE.conf . scp kubelet-NODE.conf capv@WORKER-IP:/home/capv/backup/kubelet-NODE.conf
4. SSH to worker node, copy kubelet and restart kubelet:
cp /home/capv/backup/kubelet-NODE.conf /etc/kubernetes/kubelet.conf systemctl restart kubelet systemctl status kubelet
vi /etc/kubernetes/kubelet.conf : user: client-certificate: /var/lib/kubelet/pki/kubelet-client-current.pem client-key: /var/lib/kubelet/pki/kubelet-client-current.pem systemctl restart kubelet systemctl status kubelet ls -l /var/lib/kubelet/pki/ openssl x509 -in /var/lib/kubelet/pki/kubelet-client-<DATE>.pem -text -noout | grep -A2 Validity
kubectl get nodes