TKC upgrade completes successful but application pods are not running.
Login to the control plane of the affected TKC and run "kubectl get pods -n <application namespace>"
Example:
Application XYZ installed in namespace xyz
root@xxxxxxxxxxxxxx [ ~ ]# kubectl get pods -n xyz
Note: no pods are running
root@
xxxxxxxxxxxxxx
[ ~ ]# kubectl get nsNAME STATUS AGE
default Active 374d
kube-node-lease Active 374d
kube-public Active 374d
kube-system Active 374d
xyz Active 374d
velero Active 363d
vmware-system-auth Active 374d
vmware-system-cloud-provider Active 374d
vmware-system-csi Active 374d
root@
xxxxxxxxxxxxxx
[ ~ ]#
root@
xxxxxxxxxxxxxx
[ ~ ]# kubectl get all -n xyzNAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/
xyz
ClusterIP None <none> 3306/TCP 374dservice/
xyz
LoadBalancer xxxxxxxxxxxxxx xxxxxxxxxxxxxx
80:32296/TCP,443:30935/TCP,18443:30971/TCP 374d
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/
xyz
0/1 0 0 374ddeployment.apps/
xyz
0/1 0 0 374d
NAME DESIRED CURRENT READY AGE
replicaset.apps/
-5bf4599d9b 1 0 0 374dxyz
replicaset.apps/xyz-5c98c5886c 1 0 0 374d
replicaset.apps/xyz-f876d69fc 0 0 0 374d
root@
xxxxxxxxxxxxxx
[ ~ ]#
root@
[ ~ ]# kubectl describe replicaset.apps/xxxxxxxxxxxxxx
-5bf4599d9b -n xyzxyz
Name:
-5bf4599d9bxyz
Namespace:
xyz
Selector: app=
,pod-template-hash=5bf4599d9b,tier=mariadbxyz
Labels: app=
xyz
pod-template-hash=5bf4599d9b
tier=mariadb
Annotations: deployment.kubernetes.io/desired-replicas: 1
deployment.kubernetes.io/max-replicas: 1
deployment.kubernetes.io/revision: 1
Controlled By: Deployment/
xyz
Replicas: 0 current / 1 desired
Pods Status: 0 Running / 0 Waiting / 0 Succeeded / 0 Failed
Pod Template:
Labels: app=
xyz
pod-template-hash=5bf4599d9b
tier=mariadb
Containers:
-db:xyz
Image: mariadb:10.4
Port: 3306/TCP
Host Port: 0/TCP
Environment:
MYSQL_ROOT_PASSWORD: <set to the key 'password' in secret '
-xxxxxx'> Optional: falsexyz
MYSQL_DATABASE:
xyz
MYSQL_USER:
xyz
MYSQL_PASSWORD: <set to the key 'password' in secret '
-xxxx'> Optional: falsexyz
TZ: Europe/Berlin
Mounts:
/var/lib/mysql from db-persistent-storage (rw)
Volumes:
db-persistent-storage:
Type: PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
ClaimName: db-files
ReadOnly: false
Conditions:
Type Status Reason
---- ------ ------
ReplicaFailure True FailedCreate
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Warning FailedCreate 67m replicaset-controller Error creating: pods "
-5bf4599d9b-qrxcc" is forbidden: violates PodSecurity "restricted:latest": allowPrivilegeEscalation != false (container "xyz
-db" must set securityContext.allowPrivilegeEscalation=false), unrestricted capabilities (container "xyz
-db" must set securityContext.capabilities.drop=["ALL"]), runAsNonRoot != true (pod or container "xyz
-db" must set securityContext.runAsNonRoot=true), seccompProfile (pod or container "xyz
-db" must set securityContext.seccompProfile.type to "RuntimeDefault" or "Localhost")xyz
Application namespace does not have any pod security labels
root@
[ ~ ]# kubectl get ns xyz -o yamlxxxxxxxxxxxxxx
apiVersion: v1
kind: Namespace
metadata:
creationTimestamp: "2024-01-11T14:17:21Z"
labels:
kubernetes.io/metadata.name:
xyz
name:
xyz
resourceVersion: "90233872"
uid:
xxxxxxx
spec:
finalizers:
- kubernetes
status:
phase: Active
Add the following labels for the pod-security to the affected namespace:
pod-security.kubernetes.io/audit: privileged
pod-security.kubernetes.io/audit-version: latest
pod-security.kubernetes.io/enforce: privileged
pod-security.kubernetes.io/enforce-version: latest
pod-security.kubernetes.io/warn: privileged
pod-security.kubernetes.io/warn-version: latest
kubectl edit ns <affected namespace>
root@
[ ~ ]# kubectl get ns xyz -o yamlxxxxxxxxxxxxxx
apiVersion: v1
kind: Namespace
metadata:
creationTimestamp: "2024-01-11T14:17:21Z"
labels:
kubernetes.io/metadata.name:
xyz
pod-security.kubernetes.io/audit: privileged
pod-security.kubernetes.io/audit-version: latest
pod-security.kubernetes.io/enforce: privileged
pod-security.kubernetes.io/enforce-version: latest
pod-security.kubernetes.io/warn: privileged
pod-security.kubernetes.io/warn-version: latest
name:
xyz
resourceVersion: "90233872"
uid:
xxxxxxx
spec:
finalizers:
- kubernetes
status:
phase: Active
root@xxxxxxxxxxxxxx [ ~ ]# kubectl describe rs
-5bf4599d9b -n xyzxyz
Name:
-5bf4599d9bxyz
Namespace:
xyz
Selector: app=
,pod-template-hash=xyz
xxxxxxx
,tier=mariadbLabels: app=
xyz
pod-template-hash=5bf4599d9b
tier=mariadb
Annotations: deployment.kubernetes.io/desired-replicas: 1
deployment.kubernetes.io/max-replicas: 1
deployment.kubernetes.io/revision: 1
Controlled By: Deployment/
xyz
Replicas: 1 current / 1 desired
Pods Status: 1 Running / 0 Waiting / 0 Succeeded / 0 Failed
Pod Template:
Labels: app=
xyz
pod-template-hash=5bf4599d9b
tier=mariadb
Containers:
-db:xyz
Image: mariadb:10.4
Port: 3306/TCP
Host Port: 0/TCP
Environment:
MYSQL_ROOT_PASSWORD: <set to the key 'password' in secret '
-xxxxxxx'> Optional: falsexyz
MYSQL_DATABASE:
xyz
MYSQL_USER:
xyz
MYSQL_PASSWORD: <set to the key 'password' in secret '
-xyz
xxxxxxx
'> Optional: false TZ: Europe/Berlin
Mounts:
/var/lib/mysql from db-persistent-storage (rw)
Volumes:
db-persistent-storage:
Type: PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
ClaimName: db-files
ReadOnly: false
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Warning FailedCreate 17m replicaset-controller Error creating: pods "
-5bf4599d9b-rxwhl" is forbidden: violates PodSecurity "restricted:latest": allowPrivilegeEscalation != false (container "xyz
-db" must set securityContext.allowPrivilegeEscalation=false), unrestricted capabilities (container "xyz
-db" must set securityContext.capabilities.drop=["ALL"]), runAsNonRoot != true (pod or container "xyz
-db" must set securityContext.runAsNonRoot=true), seccompProfile (pod or container "xyz
-db" must set securityContext.seccompProfile.type to "RuntimeDefault" or "Localhost")xyz
Normal SuccessfulCreate 79s replicaset-controller Created pod:
-5bf4599d9b-jnh4gxyz
Expected result: Application pods should come up now
root@xxxxxxxxxxxxxxxxxx [ ~ ]# kubectl get pods -n xyz
NAME READY STATUS RESTARTS AGE
-5bf4599d9b-jnh4g 1/1 Running 0 85sxyz
-5c98c5886c-l22jg 1/1 Running 0 85sxyz