Uploaded image for project: 'Red Hat Advanced Cluster Management'
  1. Red Hat Advanced Cluster Management
  2. ACM-11452

Find and resolve the PDB ConstraintTemplate issue

XMLWordPrintable

    • Icon: Task Task
    • Resolution: Done
    • Icon: Critical Critical
    • None
    • None
    • GRC
    • GRC Sprint 2024-09

       

      Hello yikim@redhat.com , finally got the requested feedback from the customer:

      Please find "Gatekeeper cr + constraintTemplate cr + constraint cr+ config cr"  in the linked file:

      https://drive.google.com/file/d/1ppyNIBFbFIz7VGYy_KvTWhzsh3u-JLkl/view?usp=sharing

       

      The customer also shared the following details for the sake of completeness:

      all of our policies are working fine except the PDB ConstraintTemplate called k8spdb.

      To be more precise: We have a variable called i.e data.inventory.namespace[input.review.object.metadata.namespace]["apps/v1"]["Deployment"][_]. This variable should give us the deployment definition, so that we can check the current replicas. But this variable is empty as long as we have caching disabled in our audit conf.

      The rule works on a dev cluster with enabled cache but we cannot use it in production because we have too much namespaces there.
      We have also tested to use the sync only filter and exclude all openshift namespaces but the memory of the audit pod enormouly blows up  and we got alot of timeout between controller and audit pod. 

      Here is the "rego" code they would like to use for the PDBs:

       

      ```

      package k8spdb
      
      # see https://open-policy-agent.github.io/gatekeeper-library/website/validation/poddisruptionbudget/
      
      
      violation[{"msg": msg}] {
        pdb := input.review.object
        input.review.kind.kind == "PodDisruptionBudget"
      
        not valid_pdb_max_unavailable(pdb)
        msg := sprintf(
        "PodDisruptionBudget <%v> has maxUnavailable of 0, only positive integers are allowed for maxUnavailable",
        [pdb.metadata.name],
        )
      }
      
      violation[{"msg": msg}] {
        pdb := input.review.object
        input.review.kind.kind == "PodDisruptionBudget"
        obj := data.inventory.namespace[input.review.object.metadata.namespace]["apps/v1"]["Deployment"][_]
      
        matchLabels := { [label, value] | some label; value := pdb.spec.selector.matchLabels[label] }
        labels := { [label, value] | some label; value := obj.spec.selector.matchLabels[label] }
        count(matchLabels - labels) == 0
        
      
        not valid_pdb_min_available(obj, pdb)
        msg := sprintf(
        "%v <%v> has %v replica(s) but PodDisruptionBudget <%v> has minAvailable of %v, PodDisruptionBudget count should always be lower than replica(s), and not used when replica(s) is set to 1    - %v=%v",
        [obj.kind, obj.metadata.name, obj.spec.replicas, pdb.metadata.name, pdb.spec.minAvailable, matchLabels, labels],
        )
      }
      violation[{"msg": msg}] {
        pdb := input.review.object
        input.review.kind.kind == "PodDisruptionBudget"
        obj := data.inventory.namespace[input.review.object.metadata.namespace]["apps/v1"]["Statefulset"][_]
      
        matchLabels := { [label, value] | some label; value := pdb.spec.selector.matchLabels[label] }
        labels := { [label, value] | some label; value := obj.spec.selector.matchLabels[label] }
        count(matchLabels - labels) == 0
        
      
        not valid_pdb_min_available(obj, pdb)
        msg := sprintf(
        "%v <%v> has %v replica(s) but PodDisruptionBudget <%v> has minAvailable of %v, PodDisruptionBudget count should always be lower than replica(s), and not used when replica(s) is set to 1    - %v=%v",
        [obj.kind, obj.metadata.name, obj.spec.replicas, pdb.metadata.name, pdb.spec.minAvailable, matchLabels, labels],
        )
      }
      
      violation[{"msg": msg}] {
        obj := input.review.object
        pdb := data.inventory.namespace[input.review.object.metadata.namespace]["policy/v1"]["PodDisruptionBudget"][_]
        
        matchLabels := { [label, value] | some label; value := pdb.spec.selector.matchLabels[label] }
        labels := { [label, value] | some label; value := obj.spec.selector.matchLabels[label] }
        count(matchLabels - labels) == 0
      
        not valid_pdb_min_available(obj, pdb)
        msg := sprintf(
          "%v <%v> has %v replica(s) but PodDisruptionBudget <%v> has minAvailable of %v, PodDisruptionBudget count should always be lower than replica(s), and not used when replica(s) is set to 1",
          [obj.kind, obj.metadata.name, obj.spec.replicas, pdb.metadata.name, pdb.spec.minAvailable],
        )
      }
      valid_pdb_min_available(obj, pdb) {
        # default to -1 if minAvailable is not set so valid_pdb_min_available is always true
        # for objects with >= 0 replicas. If minAvailable defaults to >= 0, objects with
        # replicas field might violate this constraint if they are equal to the default set here
        min_available := object.get(pdb.spec, "minAvailable", -1)
        obj.spec.replicas > min_available
      }
      
      valid_pdb_max_unavailable(pdb) {
        # default to 1 if maxUnavailable is not set so valid_pdb_max_unavailable always returns true.
        # If maxUnavailable defaults to 0, it violates this constraint because all pods needs to be
        # available and no pods can be evicted voluntarily
        max_unavailable := object.get(pdb.spec, "maxUnavailable", 1)
        max_unavailable > 0
      }  

       
       

            yikim@redhat.com Yi Rae Kim
            yikim@redhat.com Yi Rae Kim
            Dale Haiducek
            Derek Ho Derek Ho
            ACM GRC & Gatekeeper
            Votes:
            0 Vote for this issue
            Watchers:
            1 Start watching this issue

              Created:
              Updated:
              Resolved: