Uploaded image for project: 'OpenShift Virtualization'
  1. OpenShift Virtualization
  2. CNV-31896

[2230274] Test authentication policy from mesh fails from VM

XMLWordPrintable

    • CNV-net-QE-257, CNV-net-QE-258
    • Low
    • No

      **
      This bug was originally open for the service-mesh team:
      https://issues.redhat.com/browse/OSSM-4551
      **

      Unable to perform curl request from a VM to a service running in the same service-mesh-member namespace. The same request sent from the VM's virt-launcher pod is successful.

      Version:
      Seen on a BM cluster (bm02-ibm) with version v4.11.5, and OpenShiftSDN CNI:

      oc get clusterversion
      NAME      VERSION   AVAILABLE   PROGRESSING   SINCE   STATUS
      version   4.11.45   True        False         38h     Cluster version is 4.11.45

      oc get csv -A | grep Virtualization
      openshift-cnv                                      kubevirt-hyperconverged-operator.v4.11.5          OpenShift Virtualization                         4.11.5                    kubevirt-hyperconverged-operator.v4.11.4   Succeeded

      oc get csv -n istio-system       
      NAME                                       DISPLAY                                          VERSION                   REPLACES                     PHASE
      jaeger-operator.v1.42.0-5-0.1687199951.p   Red Hat OpenShift distributed tracing platform   1.42.0-5+0.1687199951.p   jaeger-operator.v1.34.1-5    Succeeded
      kiali-operator.v1.65.7                     Kiali Operator                                   1.65.7                    kiali-operator.v1.65.6       Succeeded
      servicemeshoperator.v2.4.1                 Red Hat OpenShift Service Mesh                   2.4.1-0                   servicemeshoperator.v2.4.0   Succeeded

      Steps:
      1. Create namespace service-mesh-test-service-mesh

      2. Create a ServiceMeshMemberRoll default:
      cat << EOF | oc create -f -
      apiVersion: maistra.io/v1
      kind: ServiceMeshMemberRoll
      metadata:
        name: default
        namespace: istio-system
      spec:
        members:
        - service-mesh-test-service-mesh
      EOF

      3. Create a VM service-mesh-vm:
      cat << EOF | oc create -f -
      apiVersion: kubevirt.io/v1
      kind: VirtualMachine
      metadata:
        name: service-mesh-vm
        namespace: service-mesh-test-service-mesh
      spec:
        running: true
        template:
          metadata:
            annotations:
              sidecar.istio.io/inject: 'true'
            labels:
              debugLogs: 'true'
              kubevirt.io/domain: service-mesh-vm
              kubevirt.io/vm: service-mesh-vm
          spec:
            domain:
              devices:
                disks:
                - disk:
                    bus: virtio
                  name: containerdisk
                rng: {}
              resources:
                requests:
                  memory: 128M
            volumes:
            - containerDisk:
                image: kubevirt/cirros-container-disk-demo:latest
              name: containerdisk
      EOF

      4. Create a Service service-mesh-vm-service-mesh-vm:
      cat << EOF | oc create -f -
      apiVersion: v1
      kind: Service
      metadata:
        name: service-mesh-vm-service-mesh-vm
        namespace: service-mesh-test-service-mesh
      spec:
        ipFamilyPolicy: null
        ports:
        - port: 8000
          protocol: TCP
        selector:
          kubevirt.io/domain: service-mesh-vm
        sessionAffinity: None
        type: null
      EOF

      5. Create Deployment httpbin-v1-dp:
      cat << EOF | oc create -f -
      apiVersion: apps/v1
      kind: Deployment
      metadata:
        name: httpbin-v1-dp
        namespace: service-mesh-test-service-mesh
      spec:
        replicas: 1
        selector:
          matchLabels:
            app: httpbin
            version: v1
        template:
          metadata:
            annotations:
              sidecar.istio.io/inject: 'true'
            labels:
              app: httpbin
              version: v1
          spec:
            containers:
            - command:
              - gunicorn
              - -b
              - 0.0.0.0:8000
              - httpbin:app
              - -k
              - gevent
              image: quay.io/verygoodsecurity/httpbin:0.1.80
              imagePullPolicy: Always
              name: httpbin-v1-dp
              ports:
              - containerPort: 8080
            restartPolicy: Always
            serviceAccountName: httpbin
      EOF

      6. Create ServiceAccount httpbin:
      cat << EOF | oc create -f -
      apiVersion: v1
      kind: ServiceAccount
      metadata:
        name: httpbin
        namespace: service-mesh-test-service-mesh

      EOF

      7. Create Service httpbin:
      cat << EOF | oc create -f -
      apiVersion: v1
      kind: Service
      metadata:
        name: httpbin
        namespace: service-mesh-test-service-mesh
      spec:
        ports:
        - port: 8000
          protocol: TCP
        selector:
          app: httpbin
      EOF

      8. Create PeerAuthentication default-pa:
      cat << EOF | oc create -f -
      apiVersion: security.istio.io/v1beta1
      kind: PeerAuthentication
      metadata:
        name: default-pa
        namespace: service-mesh-test-service-mesh
      spec:
        mtls:
          mode: STRICT
      EOF

      9. Connect to the VM:
      virtctl console service-mesh-vm

      10. Send a curl request to the httpbin service:
      curl http://httpbin:8000/ip

      Expected output:

      {   "origin": "127.0.0.6" }

      Actual output:
      curl: (6) Couldn't resolve host 'httpbin'

      If in steps 9-10, I'll connect to the VM virt-launcher pod and run the same curl command:
      oc exec -it virt-launcher-service-mesh-vm – bash

      I'll get the expected output containing the 127.0.0.6 IP address.

      I'm including the virt-launcher pod's logs.

      RHEL VM network information:

      ip link
      1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000
          link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
      2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 7850 qdisc fq_codel state UP mode DEFAULT group default qlen 1000
          link/ether 0a:58:0a:81:03:88 brd ff:ff:ff:ff:ff:ff
          altname enp1s0
      
      ip addr
      1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
          link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
          inet 127.0.0.1/8 scope host lo
             valid_lft forever preferred_lft forever
          inet6 ::1/128 scope host 
             valid_lft forever preferred_lft forever
      2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 7850 qdisc fq_codel state UP group default qlen 1000
          link/ether 0a:58:0a:81:03:88 brd ff:ff:ff:ff:ff:ff
          altname enp1s0
          inet 10.0.2.2/24 brd 10.0.2.255 scope global dynamic noprefixroute eth0
             valid_lft 86300795sec preferred_lft 86300795sec
          inet6 fe80::858:aff:fe81:388/64 scope link noprefixroute 
             valid_lft forever preferred_lft forever
      
      ip route
      default via 10.0.2.1 dev eth0 proto dhcp src 10.0.2.2 metric 100 
      10.0.2.0/24 dev eth0 proto kernel scope link src 10.0.2.2 metric 100
      cat /etc/resolv.conf 
      # Generated by NetworkManager
      search service-mesh-test-service-mesh.svc.cluster.local svc.cluster.local cluster.local net-sb1.rhos-psi.cnv-qe.rhood.us
      nameserver 172.30.0.10
       

              ehaas1@redhat.com Edward Haas
              rh-ee-awax Anat Wax
              Anat Wax Anat Wax
              Votes:
              0 Vote for this issue
              Watchers:
              5 Start watching this issue

                Created:
                Updated: