[root@cert-rhosp-02 ~]# oc get clusterversion NAME VERSION AVAILABLE PROGRESSING SINCE STATUS version 4.21.0-0.nightly-2026-02-11-143507 True False 20h Cluster version is 4.21.0-0.nightly-2026-02-11-143507 [root@cert-rhosp-02 ~]# oc project openshift-workload-availability Now using project "openshift-workload-availability" on server "https://api.ocp-edge-cluster-0.qe.lab.redhat.com:6443". [root@cert-rhosp-02 ~]# oc get csv NAME DISPLAY VERSION REPLACES PHASE fence-agents-remediation.v0.7.0 Fence Agents Remediation Operator 0.7.0 fence-agents-remediation.v0.6.0 Succeeded machine-deletion-remediation.v0.6.0 Machine Deletion Remediation operator 0.6.0 machine-deletion-remediation.v0.5.0 Succeeded node-healthcheck-operator.v0.11.0 Node Health Check Operator 0.11.0 node-healthcheck-operator.v0.10.1 Succeeded node-maintenance-operator.v5.6.0 Node Maintenance Operator 5.6.0 node-maintenance-operator.v5.5.0 Succeeded self-node-remediation.v0.12.0 Self Node Remediation Operator 0.12.0 self-node-remediation.v0.11.0 Succeeded [root@cert-rhosp-02 ~]# oc get nodes NAME STATUS ROLES AGE VERSION master-0-0 Ready control-plane,master 21h v1.34.2 master-0-1 Ready control-plane,master 21h v1.34.2 master-0-2 Ready control-plane,master 21h v1.34.2 worker-0-0 Ready worker 20h v1.34.2 worker-0-1 Ready worker 20h v1.34.2 worker-0-2 Ready worker 20h v1.34.2 [root@cert-rhosp-02 ~]# PODS=$(oc get pods -o name -n openshift-workload-availability | grep fence-agents-remediation-controller-manager) [root@cert-rhosp-02 ~]# echo $PODS pod/fence-agents-remediation-controller-manager-6d67fccd55-lzjsv pod/fence-agents-remediation-controller-manager-6d67fccd55-vswjb [root@cert-rhosp-02 ~]# for p in $PODS; do > echo "== $p" > oc get "$p" -n openshift-workload-availability -o json | jq .spec.nodeName > done == pod/fence-agents-remediation-controller-manager-6d67fccd55-lzjsv "worker-0-0" == pod/fence-agents-remediation-controller-manager-6d67fccd55-vswjb "worker-0-2" [root@cert-rhosp-02 ~]# PODS=$(oc get pods -o name -n openshift-workload-availability | grep node-healthcheck-controller-manager) [root@cert-rhosp-02 ~]# echo $PODS pod/node-healthcheck-controller-manager-5ccc7f9869-7zm4m pod/node-healthcheck-controller-manager-5ccc7f9869-lnk2p [root@cert-rhosp-02 ~]# for p in $PODS; do > echo "== $p" > oc get "$p" -n openshift-workload-availability -o json | jq .spec.nodeName > done == pod/node-healthcheck-controller-manager-5ccc7f9869-7zm4m "master-0-1" == pod/node-healthcheck-controller-manager-5ccc7f9869-lnk2p "master-0-2" [root@cert-rhosp-02 ~]# PODS=$(oc get pods -o name -n openshift-workload-availability | grep self-node-remediation-controller-manager) [root@cert-rhosp-02 ~]# echo $PODS pod/self-node-remediation-controller-manager-7444ddf558-rhl8f pod/self-node-remediation-controller-manager-7444ddf558-xrqzt [root@cert-rhosp-02 ~]# for p in $PODS; do > echo "== $p" > oc get "$p" -n openshift-workload-availability -o json | jq .spec.nodeName > done == pod/self-node-remediation-controller-manager-7444ddf558-rhl8f "worker-0-2" == pod/self-node-remediation-controller-manager-7444ddf558-xrqzt "worker-0-0" [root@cert-rhosp-02 ~]# PODS=$(oc get pods -o name -n openshift-workload-availability | grep machine-deletion-remediation-controller) [root@cert-rhosp-02 ~]# echo $PODS pod/machine-deletion-remediation-controller-manager-74d4bcf77-k6hth [root@cert-rhosp-02 ~]# for p in $PODS; do > echo "== $p" > oc get "$p" -n openshift-workload-availability -o json | jq .spec.nodeName > done == pod/machine-deletion-remediation-controller-manager-74d4bcf77-k6hth "worker-0-2" [root@cert-rhosp-02 ~]# PODS=$(oc get pods -o name -n openshift-workload-availability | grep node-maintenance-operator-controller-manager) [root@cert-rhosp-02 ~]# echo $PODS pod/node-maintenance-operator-controller-manager-66f47c5457-g5wg6 [root@cert-rhosp-02 ~]# for p in $PODS; do > echo "== $p" > oc get "$p" -n openshift-workload-availability -o json | jq .spec.nodeName > done == pod/node-maintenance-operator-controller-manager-66f47c5457-g5wg6 "master-0-1" [root@cert-rhosp-02 ~]# oc get csv node-healthcheck-operator.v0.11.0 -o yaml | yq .spec.install.spec.deployments[0].spec.template.spec.topologySpreadConstraints - labelSelector: matchLabels: app.kubernetes.io/component: controller-manager app.kubernetes.io/name: node-healthcheck-operator maxSkew: 1 topologyKey: kubernetes.io/hostname whenUnsatisfiable: DoNotSchedule [root@cert-rhosp-02 ~]# oc delete pod/node-healthcheck-controller-manager-5ccc7f9869-7zm4m pod "node-healthcheck-controller-manager-5ccc7f9869-7zm4m" deleted [root@cert-rhosp-02 ~]# for p in $PODS; do > echo "== $p" > oc get "$p" -n openshift-workload-availability -o json | jq .spec.nodeName > done == pod/node-healthcheck-controller-manager-5ccc7f9869-lnk2p "master-0-2" == pod/node-healthcheck-controller-manager-5ccc7f9869-v2xch "master-0-1" [root@cert-rhosp-02 ~]# oc delete pod/node-healthcheck-controller-manager-5ccc7f9869-lnk2p pod/node-healthcheck-controller-manager-5ccc7f9869-v2xch pod "node-healthcheck-controller-manager-5ccc7f9869-lnk2p" deleted pod "node-healthcheck-controller-manager-5ccc7f9869-v2xch" deleted [root@cert-rhosp-02 ~]# oc get pods NAME READY STATUS RESTARTS AGE fence-agents-remediation-controller-manager-6d67fccd55-lzjsv 2/2 Running 0 3h40m fence-agents-remediation-controller-manager-6d67fccd55-vswjb 2/2 Running 0 3h40m machine-deletion-remediation-controller-manager-74d4bcf77-k6hth 2/2 Running 0 3h40m node-healthcheck-controller-manager-5ccc7f9869-5w6w6 2/2 Running 0 91m node-healthcheck-controller-manager-5ccc7f9869-z69tz 2/2 Running 0 85m node-healthcheck-node-remediation-console-plugin-8556fb7f564rpj 1/1 Running 0 3h40m node-maintenance-operator-controller-manager-66f47c5457-g5wg6 1/1 Running 0 3h40m self-node-remediation-controller-manager-7444ddf558-rhl8f 2/2 Running 1 (3h39m ago) 3h40m self-node-remediation-controller-manager-7444ddf558-xrqzt 2/2 Running 0 3h40m self-node-remediation-ds-22f2g 1/1 Running 0 3h39m self-node-remediation-ds-4htc6 1/1 Running 0 3h39m self-node-remediation-ds-f2ldg 1/1 Running 0 3h39m self-node-remediation-ds-qmv2z 1/1 Running 0 3h39m self-node-remediation-ds-sq65s 1/1 Running 0 3h39m self-node-remediation-ds-trlhb 1/1 Running 0 3h39m [root@cert-rhosp-02 ~]# PODS=$(oc get pods -o name -n openshift-workload-availability | grep node-healthcheck-controller-manager) [root@cert-rhosp-02 ~]# echo $PODS pod/node-healthcheck-controller-manager-5ccc7f9869-5w6w6 pod/node-healthcheck-controller-manager-5ccc7f9869-z69tz [root@cert-rhosp-02 ~]# for p in $PODS; do > echo "== $p" > oc get "$p" -n openshift-workload-availability -o json | jq .spec.nodeName > done == pod/node-healthcheck-controller-manager-5ccc7f9869-5w6w6 "master-0-1" == pod/node-healthcheck-controller-manager-5ccc7f9869-z69tz "worker-0-0" [root@cert-rhosp-02 ~]# oc adm taint nodes master-0-0 dedicated=special-workload:NoSchedule node/master-0-0 tainted [root@cert-rhosp-02 ~]# oc adm taint nodes master-0-2 dedicated=special-workload:NoSchedule node/master-0-2 tainted [root@cert-rhosp-02 ~]# oc adm taint nodes worker-0-0 dedicated=special-workload:NoSchedule node/worker-0-0 tainted [root@cert-rhosp-02 ~]# oc adm taint nodes worker-0-1 dedicated=special-workload:NoSchedule node/worker-0-1 tainted [root@cert-rhosp-02 ~]# oc adm taint nodes worker-0-2 dedicated=special-workload:NoSchedule node/worker-0-2 tainted [root@cert-rhosp-02 ~]# oc delete pod/node-healthcheck-controller-manager-5ccc7f9869-z69tz pod/node-healthcheck-controller-manager-5ccc7f9869-5w6w6 pod "node-healthcheck-controller-manager-5ccc7f9869-z69tz" deleted pod "node-healthcheck-controller-manager-5ccc7f9869-5w6w6" deleted [root@cert-rhosp-02 ~]# oc get pods NAME READY STATUS RESTARTS AGE fence-agents-remediation-controller-manager-6d67fccd55-lzjsv 2/2 Running 0 3h44m fence-agents-remediation-controller-manager-6d67fccd55-vswjb 2/2 Running 0 3h44m machine-deletion-remediation-controller-manager-74d4bcf77-k6hth 2/2 Running 0 3h43m node-healthcheck-controller-manager-5ccc7f9869-thsr4 1/2 Running 0 8s node-healthcheck-controller-manager-5ccc7f9869-vk558 0/2 Pending 0 8s node-healthcheck-node-remediation-console-plugin-8556fb7f564rpj 1/1 Running 0 3h43m node-maintenance-operator-controller-manager-66f47c5457-g5wg6 1/1 Running 0 3h44m self-node-remediation-controller-manager-7444ddf558-rhl8f 2/2 Running 1 (3h43m ago) 3h43m self-node-remediation-controller-manager-7444ddf558-xrqzt 2/2 Running 0 3h43m self-node-remediation-ds-22f2g 1/1 Running 0 3h43m self-node-remediation-ds-4htc6 1/1 Running 0 3h43m self-node-remediation-ds-f2ldg 1/1 Running 0 3h43m self-node-remediation-ds-qmv2z 1/1 Running 0 3h43m self-node-remediation-ds-sq65s 1/1 Running 0 3h43m self-node-remediation-ds-trlhb 1/1 Running 0 3h43m [root@cert-rhosp-02 ~]# oc adm taint nodes master-0-0 dedicated- node/master-0-0 untainted [root@cert-rhosp-02 ~]# oc adm taint nodes master-0-2 dedicated- node/master-0-2 untainted [root@cert-rhosp-02 ~]# oc adm taint nodes worker-0-0 dedicated- node/worker-0-0 untainted [root@cert-rhosp-02 ~]# oc adm taint nodes worker-0-1 dedicated- node/worker-0-1 untainted [root@cert-rhosp-02 ~]# oc adm taint nodes worker-0-2 dedicated- node/worker-0-2 untainted [root@cert-rhosp-02 ~]# oc get pods NAME READY STATUS RESTARTS AGE fence-agents-remediation-controller-manager-6d67fccd55-lzjsv 2/2 Running 0 3h45m fence-agents-remediation-controller-manager-6d67fccd55-vswjb 2/2 Running 0 3h45m machine-deletion-remediation-controller-manager-74d4bcf77-k6hth 2/2 Running 0 3h45m node-healthcheck-controller-manager-5ccc7f9869-thsr4 2/2 Running 0 94s node-healthcheck-controller-manager-5ccc7f9869-vk558 1/2 Running 0 94s node-healthcheck-node-remediation-console-plugin-8556fb7f564rpj 1/1 Running 0 3h45m node-maintenance-operator-controller-manager-66f47c5457-g5wg6 1/1 Running 0 3h45m self-node-remediation-controller-manager-7444ddf558-rhl8f 2/2 Running 1 (3h44m ago) 3h45m self-node-remediation-controller-manager-7444ddf558-xrqzt 2/2 Running 0 3h45m self-node-remediation-ds-22f2g 1/1 Running 0 3h44m self-node-remediation-ds-4htc6 1/1 Running 0 3h44m self-node-remediation-ds-f2ldg 1/1 Running 0 3h44m self-node-remediation-ds-qmv2z 1/1 Running 0 3h44m self-node-remediation-ds-sq65s 1/1 Running 0 3h44m self-node-remediation-ds-trlhb 1/1 Running 0 3h44m