-
Bug
-
Resolution: Done
-
Critical
-
openshift-4.16
Description of problem:
Same test case passed in ostree installed cluster, but still failed in your rpm installed cluster Version-Release number of selected component (if applicable):{code:none} $ oc get configmap -n kube-public microshift-version -o yaml apiVersion: v1 data: major: "4" minor: "16" patch: "0" version: 4.16.0~ec.5 kind: ConfigMap metadata: creationTimestamp: "2024-04-16T22:11:41Z" name: microshift-version namespace: kube-public resourceVersion: "492" uid: 3265f8d9-367e-455f-ae28-ab81c4876284 How reproducible:{code:none} Always Steps to Reproduce:{code:none} Test log from a ostree install cluster: {code:java} $ oc get pod -n e2e-ushift-sdn-73085-q8y9khcg NAME READY STATUS RESTARTS AGE macvlan-vepa-host-local-pod1 1/1 Running 0 2m $ oc exec -n e2e-ushift-sdn-73085-q8y9khcg macvlan-vepa-host-local-pod1 -- ip a 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever inet6 ::1/128 scope host valid_lft forever preferred_lft forever 2: eth0@if188: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default link/ether 0a:58:0a:2a:00:90 brd ff:ff:ff:ff:ff:ff link-netnsid 0 inet 10.42.0.144/24 brd 10.42.0.255 scope global eth0 valid_lft forever preferred_lft forever inet6 fe80::858:aff:fe2a:90/64 scope link valid_lft forever preferred_lft forever 3: net1@if2: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default link/ether 22:bc:a3:8e:ea:d0 brd ff:ff:ff:ff:ff:ff link-netnsid 0 inet 192.168.10.8/24 brd 192.168.10.255 scope global net1 valid_lft forever preferred_lft forever inet6 2620:52:0:1eb:20bc:a3ff:fe8e:ead0/64 scope global dynamic mngtmpaddr valid_lft 2591857sec preferred_lft 604657sec inet6 fd00:dead:beef:10::8/64 scope global valid_lft forever preferred_lft forever inet6 fe80::20bc:a3ff:fe8e:ead0/64 scope link valid_lft forever preferred_lft forever Test log from a RPM install cluster: {code:java} $ oc config set-context --current --namespace e2e-ushift-sdn-73085-xlyin4x6 Context "microshift" modified. $ oc get pod NAME READY STATUS RESTARTS AGE macvlan-vepa-host-local-pod1 1/1 Running 0 3m32s $ oc exec macvlan-vepa-host-local-pod1 -- ip a 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever inet6 ::1/128 scope host valid_lft forever preferred_lft forever 2: eth0@if33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default link/ether 0a:58:0a:2a:00:1c brd ff:ff:ff:ff:ff:ff link-netnsid 0 inet 10.42.0.28/24 brd 10.42.0.255 scope global eth0 valid_lft forever preferred_lft forever inet6 fe80::858:aff:fe2a:1c/64 scope link valid_lft forever preferred_lft forever $ oc get pod macvlan-vepa-host-local-pod1 -o yaml apiVersion: v1 kind: Pod metadata: annotations: k8s.ovn.org/pod-networks: '{"default":{"ip_addresses":["10.42.0.28/24"],"mac_address":"0a:58:0a:2a:00:1c","gateway_ips":["10.42.0.1"],"routes":[{"dest":"10.42.0.0/16","nextHop":"10.42.0.1"},{"dest":"10.43.0.0/16","nextHop":"10.42.0.1"},{"dest":"100.64.0.0/16","nextHop":"10.42.0.1"}],"ip_address":"10.42.0.28/24","gateway_ip":"10.42.0.1"}}' k8s.v1.cni.cncf.io/networks: macvlan-vepa-host-local openshift.io/scc: restricted-v2 seccomp.security.alpha.kubernetes.io/pod: runtime/default creationTimestamp: "2024-04-17T17:45:21Z" labels: name: macvlan-vepa-host-local-pod1 name: macvlan-vepa-host-local-pod1 namespace: e2e-ushift-sdn-73085-xlyin4x6 resourceVersion: "61659" uid: f7bfa351-6529-4324-9b19-c9b3f4513ac6 spec: containers: - env: - name: RESPONSE value: macvlan-vepa-host-local-pod1 image: quay.io/openshifttest/hello-sdn@sha256:c89445416459e7adea9a5a416b3365ed3d74f2491beb904d61dc8d1eb89a72a4 imagePullPolicy: IfNotPresent name: macvlan-vepa-host-local-pod1 resources: {} securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL runAsUser: 1000180000 terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-xkxhv readOnly: true dnsPolicy: ClusterFirst enableServiceLinks: true nodeName: microshift-dev.local preemptionPolicy: PreemptLowerPriority priority: 0 restartPolicy: Always schedulerName: default-scheduler securityContext: fsGroup: 1000180000 runAsNonRoot: true seLinuxOptions: level: s0:c13,c12 seccompProfile: type: RuntimeDefault serviceAccount: default serviceAccountName: default terminationGracePeriodSeconds: 30 tolerations: - effect: NoExecute key: node.kubernetes.io/not-ready operator: Exists tolerationSeconds: 300 - effect: NoExecute key: node.kubernetes.io/unreachable operator: Exists tolerationSeconds: 300 volumes: - name: kube-api-access-xkxhv projected: defaultMode: 420 sources: - serviceAccountToken: expirationSeconds: 3607 path: token - configMap: items: - key: ca.crt path: ca.crt name: kube-root-ca.crt - downwardAPI: items: - fieldRef: apiVersion: v1 fieldPath: metadata.namespace path: namespace - configMap: items: - key: service-ca.crt path: service-ca.crt name: openshift-service-ca.crt status: conditions: - lastProbeTime: null lastTransitionTime: "2024-04-17T17:45:22Z" status: "True" type: PodReadyToStartContainers - lastProbeTime: null lastTransitionTime: "2024-04-17T17:45:21Z" status: "True" type: Initialized - lastProbeTime: null lastTransitionTime: "2024-04-17T17:45:22Z" status: "True" type: Ready - lastProbeTime: null lastTransitionTime: "2024-04-17T17:45:22Z" status: "True" type: ContainersReady - lastProbeTime: null lastTransitionTime: "2024-04-17T17:45:21Z" status: "True" type: PodScheduled containerStatuses: - containerID: cri-o://8d07252bc7a8bffaaf6a2d6cb83b5037e09863b64effbd5bc066d7b364fcabff image: quay.io/openshifttest/hello-sdn@sha256:c89445416459e7adea9a5a416b3365ed3d74f2491beb904d61dc8d1eb89a72a4 imageID: quay.io/openshifttest/hello-sdn@sha256:aa4e5b6448e5b38c66505216324ce247fbd14e0a4e8ab3b8c1746c0e49e70234 lastState: {} name: macvlan-vepa-host-local-pod1 ready: true restartCount: 0 started: true state: running: startedAt: "2024-04-17T17:45:21Z" hostIP: 10.1.235.5 hostIPs: - ip: 10.1.235.5 phase: Running podIP: 10.42.0.28 podIPs: - ip: 10.42.0.28 qosClass: BestEffort startTime: "2024-04-17T17:45:21Z" $ oc get net-attach-def NAME AGE macvlan-vepa-host-local 4m17s $ oc get net-attach-def macvlan-vepa-host-local -o yaml apiVersion: k8s.cni.cncf.io/v1 kind: NetworkAttachmentDefinition metadata: creationTimestamp: "2024-04-17T17:45:20Z" generation: 1 name: macvlan-vepa-host-local namespace: e2e-ushift-sdn-73085-xlyin4x6 resourceVersion: "61648" uid: 5ad1c0f4-7000-4cea-a43e-9646def58380 spec: config: | { "cniVersion": "0.3.1", "name": "macvlan-vepa-host-local", "type": "macvlan", "mode": "vepa", "ipam": { "type": "host-local", "ranges": [ [ { "subnet": "192.168.10.0/24", "rangeStart": "192.168.10.1", "rangeEnd": "192.168.10.9" } ], [ { "subnet": "fd00:dead:beef:10::/64", "rangeStart": "fd00:dead:beef:10::1", "rangeEnd": "fd00:dead:beef:10::9" } ] ] } } $ oc get configmap -n kube-public microshift-version -o yaml apiVersion: v1 data: major: "4" minor: "16" patch: "0" version: 4.16.0~ec.5 kind: ConfigMap metadata: creationTimestamp: "2024-04-16T22:11:41Z" name: microshift-version namespace: kube-public resourceVersion: "492" uid: 3265f8d9-367e-455f-ae28-ab81c4876284 Actual results:{code:none} Test failed in RPM installed cluster Expected results:{code:none} Test should pass in RPM installed cluster Additional info:{code:none}