[kni@titan45 ~]$ oc get pod node-healthcheck-controller-manager-7d56844489-tv9vp -o yaml apiVersion: v1 kind: Pod metadata: annotations: alm-examples: |- [ { "apiVersion": "remediation.medik8s.io/v1alpha1", "kind": "NodeHealthCheck", "metadata": { "name": "nodehealthcheck-sample" }, "spec": { "minHealthy": "51%", "remediationTemplate": { "apiVersion": "self-node-remediation.medik8s.io/v1alpha1", "kind": "SelfNodeRemediationTemplate", "name": "self-node-remediation-automatic-strategy-template", "namespace": "openshift-operators" }, "selector": { "matchExpressions": [ { "key": "node-role.kubernetes.io/worker", "operator": "Exists" } ] }, "unhealthyConditions": [ { "duration": "300s", "status": "False", "type": "Ready" }, { "duration": "300s", "status": "Unknown", "type": "Ready" } ] } } ] capabilities: Basic Install categories: OpenShift Optional console.openshift.io/plugins: '["node-remediation-console-plugin"]' containerImage: registry.redhat.io/workload-availability/node-healthcheck-rhel9-operator@sha256:2160f2688e5b098c0bae7597cebcd380019b82ae02f4506d07c2ed85f0b3664d createdAt: "2025-09-01 18:07:56" description: Detect failed Nodes and trigger remediation with a remediation operator. features.operators.openshift.io/cnf: "false" features.operators.openshift.io/cni: "false" features.operators.openshift.io/csi: "false" features.operators.openshift.io/disconnected: "true" features.operators.openshift.io/fips-compliant: "true" features.operators.openshift.io/proxy-aware: "false" features.operators.openshift.io/tls-profiles: "false" features.operators.openshift.io/token-auth-aws: "false" features.operators.openshift.io/token-auth-azure: "false" features.operators.openshift.io/token-auth-gcp: "false" k8s.ovn.org/pod-networks: '{"default":{"ip_addresses":["10.128.0.82/23"],"mac_address":"0a:58:0a:80:00:52","gateway_ips":["10.128.0.1"],"routes":[{"dest":"10.128.0.0/14","nextHop":"10.128.0.1"},{"dest":"172.30.0.0/16","nextHop":"10.128.0.1"},{"dest":"169.254.0.5/32","nextHop":"10.128.0.1"},{"dest":"100.64.0.0/16","nextHop":"10.128.0.1"}],"ip_address":"10.128.0.82/23","gateway_ip":"10.128.0.1","role":"primary"}}' k8s.v1.cni.cncf.io/network-status: |- [{ "name": "ovn-kubernetes", "interface": "eth0", "ips": [ "10.128.0.82" ], "mac": "0a:58:0a:80:00:52", "default": true, "dns": {} }] kubectl.kubernetes.io/default-container: manager olm.operatorGroup: node-healthcheck-operator-operatorgroup olm.operatorNamespace: openshift-workload-availability olm.skipRange: '>=0.9.0 <0.10.0' olm.targetNamespaces: "" olmcahash: da13a451ef475cd57ffac7dc59904daa3d07895cca39dc2cdc1755770e214124 openshift.io/scc: restricted-v2 operatorframework.io/properties: '{"properties":[{"type":"olm.gvk","value":{"group":"remediation.medik8s.io","kind":"NodeHealthCheck","version":"v1alpha1"}},{"type":"olm.package","value":{"packageName":"node-healthcheck-operator","version":"0.10.0"}}]}' operatorframework.io/suggested-namespace: openshift-workload-availability operatorframework.io/suggested-namespace-template: '{"kind":"Namespace","apiVersion":"v1","metadata":{"name":"openshift-workload-availability","annotations":{"openshift.io/node-selector":""}}}' operators.openshift.io/valid-subscription: '["OpenShift Kubernetes Engine", "OpenShift Container Platform", "OpenShift Platform Plus"]' operators.operatorframework.io/builder: operator-sdk-v1.33.0 operators.operatorframework.io/project_layout: go.kubebuilder.io/v3 repository: https://github.com/medik8s/node-healthcheck-operator seccomp.security.alpha.kubernetes.io/pod: runtime/default support: Red Hat creationTimestamp: "2025-09-12T09:53:58Z" generateName: node-healthcheck-controller-manager-7d56844489- labels: app.kubernetes.io/component: controller-manager app.kubernetes.io/name: node-healthcheck-operator pod-template-hash: 7d56844489 name: node-healthcheck-controller-manager-7d56844489-tv9vp namespace: openshift-workload-availability ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: ReplicaSet name: node-healthcheck-controller-manager-7d56844489 uid: ab87b05d-4f0b-4569-9ba6-38f444076bdf resourceVersion: "50890" uid: 3b3f0451-4829-4c27-8e92-6512f6852e19 spec: affinity: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - preference: matchExpressions: - key: node-role.kubernetes.io/infra operator: Exists weight: 3 - preference: matchExpressions: - key: node-role.kubernetes.io/master operator: Exists weight: 1 - preference: matchExpressions: - key: node-role.kubernetes.io/control-plane operator: Exists weight: 1 containers: - args: - --secure-listen-address=0.0.0.0:8443 - --http2-disable - --upstream=http://127.0.0.1:8080/ - --logtostderr=true - --v=0 - --tls-cert-file=/etc/tls/private/tls.crt - --tls-private-key-file=/etc/tls/private/tls.key env: - name: OPERATOR_CONDITION_NAME value: node-healthcheck-operator.v0.10.0 image: registry.redhat.io/openshift4/ose-kube-rbac-proxy-rhel9@sha256:3d3333285fd6736d11ea830fb7fe1f2b8d3e304d682a876458e18eb1173f271d imagePullPolicy: IfNotPresent name: kube-rbac-proxy ports: - containerPort: 8443 name: https protocol: TCP resources: limits: cpu: 500m memory: 128Mi requests: cpu: 5m memory: 64Mi securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL runAsUser: 1000740000 terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /etc/tls/private name: tls-config readOnly: true - mountPath: /apiserver.local.config/certificates name: apiservice-cert - mountPath: /tmp/k8s-webhook-server/serving-certs name: webhook-cert - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-6qkch readOnly: true - args: - --health-probe-bind-address=:8081 - --metrics-bind-address=127.0.0.1:8080 - --leader-elect command: - /manager env: - name: DEPLOYMENT_NAMESPACE valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.namespace - name: OPERATOR_CONDITION_NAME value: node-healthcheck-operator.v0.10.0 image: registry.redhat.io/workload-availability/node-healthcheck-rhel9-operator@sha256:2160f2688e5b098c0bae7597cebcd380019b82ae02f4506d07c2ed85f0b3664d imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 3 httpGet: path: /healthz port: 8081 scheme: HTTP initialDelaySeconds: 15 periodSeconds: 20 successThreshold: 1 timeoutSeconds: 1 name: manager readinessProbe: failureThreshold: 3 httpGet: path: /readyz port: 8081 scheme: HTTP initialDelaySeconds: 5 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 1 resources: requests: cpu: 100m memory: 20Mi securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL runAsUser: 1000740000 terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /apiserver.local.config/certificates name: apiservice-cert - mountPath: /tmp/k8s-webhook-server/serving-certs name: webhook-cert - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-6qkch readOnly: true dnsPolicy: ClusterFirst enableServiceLinks: true imagePullSecrets: - name: node-healthcheck-controller-manager-dockercfg-rf5rc nodeName: master-0-0 preemptionPolicy: PreemptLowerPriority priority: 2000000000 priorityClassName: system-cluster-critical restartPolicy: Always schedulerName: default-scheduler securityContext: fsGroup: 1000740000 runAsNonRoot: true seLinuxOptions: level: s0:c27,c19 seccompProfile: type: RuntimeDefault serviceAccount: node-healthcheck-controller-manager serviceAccountName: node-healthcheck-controller-manager terminationGracePeriodSeconds: 10 tolerations: - effect: NoSchedule key: node-role.kubernetes.io/master operator: Exists - effect: NoSchedule key: node-role.kubernetes.io/control-plane operator: Exists - effect: NoSchedule key: node-role.kubernetes.io/infra operator: Exists - effect: NoExecute key: node-role.kubernetes.io/infra operator: Exists - effect: NoExecute key: node.kubernetes.io/not-ready operator: Exists tolerationSeconds: 300 - effect: NoExecute key: node.kubernetes.io/unreachable operator: Exists tolerationSeconds: 300 - effect: NoSchedule key: node.kubernetes.io/memory-pressure operator: Exists volumes: - name: tls-config secret: defaultMode: 420 secretName: node-healthcheck-tls - name: apiservice-cert secret: defaultMode: 420 items: - key: tls.crt path: apiserver.crt - key: tls.key path: apiserver.key secretName: node-healthcheck-controller-manager-service-cert - name: webhook-cert secret: defaultMode: 420 items: - key: tls.crt path: tls.crt - key: tls.key path: tls.key secretName: node-healthcheck-controller-manager-service-cert - name: kube-api-access-6qkch projected: defaultMode: 420 sources: - serviceAccountToken: expirationSeconds: 3607 path: token - configMap: items: - key: ca.crt path: ca.crt name: kube-root-ca.crt - downwardAPI: items: - fieldRef: apiVersion: v1 fieldPath: metadata.namespace path: namespace - configMap: items: - key: service-ca.crt path: service-ca.crt name: openshift-service-ca.crt status: conditions: - lastProbeTime: null lastTransitionTime: "2025-09-12T09:54:27Z" status: "True" type: PodReadyToStartContainers - lastProbeTime: null lastTransitionTime: "2025-09-12T09:53:58Z" status: "True" type: Initialized - lastProbeTime: null lastTransitionTime: "2025-09-12T09:54:38Z" status: "True" type: Ready - lastProbeTime: null lastTransitionTime: "2025-09-12T09:54:38Z" status: "True" type: ContainersReady - lastProbeTime: null lastTransitionTime: "2025-09-12T09:53:58Z" status: "True" type: PodScheduled containerStatuses: - containerID: cri-o://9ee3ae9d3fb7ad319e72fb24274656a73c0d1e16e6fc763a393cd790f6cc9fa9 image: registry.redhat.io/openshift4/ose-kube-rbac-proxy-rhel9@sha256:3d3333285fd6736d11ea830fb7fe1f2b8d3e304d682a876458e18eb1173f271d imageID: registry.redhat.io/openshift4/ose-kube-rbac-proxy-rhel9@sha256:3d3333285fd6736d11ea830fb7fe1f2b8d3e304d682a876458e18eb1173f271d lastState: {} name: kube-rbac-proxy ready: true restartCount: 0 started: true state: running: startedAt: "2025-09-12T09:54:20Z" volumeMounts: - mountPath: /etc/tls/private name: tls-config readOnly: true recursiveReadOnly: Disabled - mountPath: /apiserver.local.config/certificates name: apiservice-cert - mountPath: /tmp/k8s-webhook-server/serving-certs name: webhook-cert - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-6qkch readOnly: true recursiveReadOnly: Disabled - containerID: cri-o://bcad27f98fbafafe71e824e03c5c0f66dc1881e19e7bca75d52b81c4eb39345f image: registry.redhat.io/workload-availability/node-healthcheck-rhel9-operator@sha256:2160f2688e5b098c0bae7597cebcd380019b82ae02f4506d07c2ed85f0b3664d imageID: registry.redhat.io/workload-availability/node-healthcheck-rhel9-operator@sha256:2160f2688e5b098c0bae7597cebcd380019b82ae02f4506d07c2ed85f0b3664d lastState: {} name: manager ready: true restartCount: 0 started: true state: running: startedAt: "2025-09-12T09:54:26Z" volumeMounts: - mountPath: /apiserver.local.config/certificates name: apiservice-cert - mountPath: /tmp/k8s-webhook-server/serving-certs name: webhook-cert - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-6qkch readOnly: true recursiveReadOnly: Disabled hostIP: 192.168.123.56 hostIPs: - ip: 192.168.123.56 phase: Running podIP: 10.128.0.82 podIPs: - ip: 10.128.0.82 qosClass: Burstable startTime: "2025-09-12T09:53:58Z" [kni@titan45 ~]$ oc get csv NAME DISPLAY VERSION REPLACES PHASE fence-agents-remediation.v0.6.0 Fence Agents Remediation Operator 0.6.0 fence-agents-remediation.v0.5.1 Succeeded node-healthcheck-operator.v0.10.0 Node Health Check Operator 0.10.0 node-healthcheck-operator.v0.9.1 Succeeded self-node-remediation.v0.10.2 Self Node Remediation Operator 0.10.2 self-node-remediation.v0.10.1 Succeeded [kni@titan45 ~]$ PODS=$(oc get pods -o name -n openshift-workload-availability | grep health) [kni@titan45 ~]$ for p in $PODS; do echo "== $p"; oc get "$p" -n openshift-workload-availability -o json | jq .spec.nodeName; done == pod/node-healthcheck-controller-manager-7d56844489-g2v44 "master-0-1" == pod/node-healthcheck-controller-manager-7d56844489-tv9vp "master-0-0"