-
Bug
-
Resolution: Done-Errata
-
Major
-
CNV v4.19.0
-
None
-
Quality / Stability / Reliability
-
0.42
-
False
-
-
False
-
CNV v4.19.0.rhel9-153
-
-
None
Description of problem:
create a vm with highperformance template on arm64 cluster, but vm can't running normal
Version-Release number of selected component (if applicable):
cnv-4.19
How reproducible:
100%
Steps to Reproduce:
1.create arm vm with high-performance templates 2.ensure dedicatedcpuplacement is present in the vm spec
Actual results:
vm CrashLoopBackOff
Expected results:
vm Running
Additional info:
vm spec
$ oc get vm fedora-coral-lobster-11 -o yaml
apiVersion: kubevirt.io/v1
kind: VirtualMachine
metadata:
annotations:
kubevirt.io/latest-observed-api-version: v1
kubevirt.io/storage-observed-api-version: v1
vm.kubevirt.io/validations: |
[
{
"name": "minimal-required-memory",
"path": "jsonpath::.spec.domain.memory.guest",
"rule": "integer",
"message": "This VM requires more memory.",
"min": 2147483648
}
]
creationTimestamp: "2025-04-23T15:27:20Z"
finalizers:
- kubevirt.io/virtualMachineControllerFinalize
generation: 1
labels:
app: fedora-coral-lobster-11
kubevirt.io/dynamic-credentials-support: "true"
vm.kubevirt.io/template: fedora-highperformance-medium-arm64
vm.kubevirt.io/template.namespace: openshift
vm.kubevirt.io/template.revision: "1"
vm.kubevirt.io/template.version: v0.34.0
name: fedora-coral-lobster-11
namespace: kbidarka
resourceVersion: "195846"
uid: b8b5aa55-5104-4a48-ba13-07e6ffec5b5b
spec:
dataVolumeTemplates:
- apiVersion: cdi.kubevirt.io/v1beta1
kind: DataVolume
metadata:
creationTimestamp: null
name: fedora-coral-lobster-11
spec:
source:
http:
url: https://download.fedoraproject.org/pub/fedora/linux/releases/40/Cloud/aarch64/images/Fedora-Cloud-Base-Generic.aarch64-40-1.14.qcow2
storage:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 30Gi
storageClassName: trident-csi-nfs
volumeMode: Filesystem
runStrategy: RerunOnFailure
template:
metadata:
annotations:
vm.kubevirt.io/flavor: medium
vm.kubevirt.io/os: fedora
vm.kubevirt.io/workload: highperformance
creationTimestamp: null
labels:
kubevirt.io/domain: fedora-coral-lobster-11
kubevirt.io/size: medium
network.kubevirt.io/headlessService: headless
spec:
architecture: arm64
domain:
cpu:
cores: 1
dedicatedCpuPlacement: true
isolateEmulatorThread: true
sockets: 1
threads: 1
devices:
disks:
- dedicatedIOThread: true
disk:
bus: virtio
name: rootdisk
- disk:
bus: virtio
name: cloudinitdisk
interfaces:
- macAddress: 02:10:9f:00:00:00
masquerade: {}
model: virtio
name: default
rng: {}
firmware:
bootloader:
efi:
secureBoot: false
ioThreadsPolicy: shared
machine:
type: virt
memory:
guest: 4Gi
resources: {}
networks:
- name: default
pod: {}
terminationGracePeriodSeconds: 180
volumes:
- dataVolume:
name: fedora-coral-lobster-11
name: rootdisk
- cloudInitNoCloud:
userData: |-
#cloud-config
user: fedora
password: o671-0xp0-77ki
chpasswd: { expire: False }
name: cloudinitdisk
status:
conditions:
- lastProbeTime: "2025-04-23T15:46:42Z"
lastTransitionTime: "2025-04-23T15:46:42Z"
message: VMI does not exist
reason: VMINotExists
status: "False"
type: Ready
- lastProbeTime: null
lastTransitionTime: null
message: All of the VMI's DVs are bound and not running
reason: AllDVsReady
status: "True"
type: DataVolumesReady
- lastProbeTime: null
lastTransitionTime: null
status: "True"
type: LiveMigratable
- lastProbeTime: null
lastTransitionTime: null
status: "True"
type: StorageLiveMigratable
desiredGeneration: 1
observedGeneration: 1
printableStatus: CrashLoopBackOff
runStrategy: RerunOnFailure
startFailure:
consecutiveFailCount: 7
lastFailedVMIUID: 4048bce9-46b7-4ac1-bb58-1e0a26ba89df
retryAfterTimestamp: "2025-04-23T15:51:42Z"
stateChangeRequests:
- action: Start
volumeSnapshotStatuses:
- enabled: true
name: rootdisk
- enabled: false
name: cloudinitdisk
reason: Snapshot is not supported for this volumeSource type [cloudinitdisk]
pod spec
$ oc get pod virt-launcher-fedora-coral-lobster-11-v5w99 -o yaml apiVersion: v1 kind: Pod metadata: annotations: descheduler.alpha.kubernetes.io/request-evict-only: "" k8s.ovn.org/pod-networks: '{"default":{"ip_addresses":["10.128.2.72/23"],"mac_address":"0a:58:0a:80:02:48","gateway_ips":["10.128.2.1"],"routes":[{"dest":"10.128.0.0/14","nextHop":"10.128.2.1"},{"dest":"172.30.0.0/16","nextHop":"10.128.2.1"},{"dest":"169.254.0.5/32","nextHop":"10.128.2.1"},{"dest":"100.64.0.0/16","nextHop":"10.128.2.1"}],"ip_address":"10.128.2.72/23","gateway_ip":"10.128.2.1","role":"primary"}}' k8s.v1.cni.cncf.io/network-status: |- [{ "name": "ovn-kubernetes", "interface": "eth0", "ips": [ "10.128.2.72" ], "mac": "0a:58:0a:80:02:48", "default": true, "dns": {} }] kubectl.kubernetes.io/default-container: compute kubevirt.io/domain: fedora-coral-lobster-11 kubevirt.io/migrationTransportUnix: "true" kubevirt.io/vm-generation: "1" openshift.io/scc: kubevirt-controller post.hook.backup.velero.io/command: '["/usr/bin/virt-freezer", "--unfreeze", "--name", "fedora-coral-lobster-11", "--namespace", "kbidarka"]' post.hook.backup.velero.io/container: compute pre.hook.backup.velero.io/command: '["/usr/bin/virt-freezer", "--freeze", "--name", "fedora-coral-lobster-11", "--namespace", "kbidarka"]' pre.hook.backup.velero.io/container: compute seccomp.security.alpha.kubernetes.io/pod: localhost/kubevirt/kubevirt.json traffic.sidecar.istio.io/kubevirtInterfaces: k6t-eth0 vm.kubevirt.io/flavor: medium vm.kubevirt.io/os: fedora vm.kubevirt.io/workload: highperformance creationTimestamp: "2025-04-23T15:41:04Z" generateName: virt-launcher-fedora-coral-lobster-11- labels: kubevirt.io: virt-launcher kubevirt.io/created-by: 8c566826-0295-47e0-b330-707b326fb17f kubevirt.io/domain: fedora-coral-lobster-11 kubevirt.io/nodeName: ip-10-0-22-97.us-east-2.compute.internal kubevirt.io/size: medium network.kubevirt.io/headlessService: headless vm.kubevirt.io/name: fedora-coral-lobster-11 name: virt-launcher-fedora-coral-lobster-11-v5w99 namespace: kbidarka ownerReferences: - apiVersion: kubevirt.io/v1 blockOwnerDeletion: true controller: true kind: VirtualMachineInstance name: fedora-coral-lobster-11 uid: 8c566826-0295-47e0-b330-707b326fb17f resourceVersion: "192697" uid: 01aff7f9-e4b8-47eb-9254-06df37aed824 spec: automountServiceAccountToken: false containers: - command: - /usr/bin/virt-launcher-monitor - --qemu-timeout - 287s - --name - fedora-coral-lobster-11 - --uid - 8c566826-0295-47e0-b330-707b326fb17f - --namespace - kbidarka - --kubevirt-share-dir - /var/run/kubevirt - --ephemeral-disk-dir - /var/run/kubevirt-ephemeral-disks - --container-disk-dir - /var/run/kubevirt/container-disks - --grace-period-seconds - "195" - --hook-sidecars - "0" - --ovmf-path - /usr/share/AAVMF - --run-as-nonroot env: - name: XDG_CACHE_HOME value: /var/run/kubevirt-private - name: XDG_CONFIG_HOME value: /var/run/kubevirt-private - name: XDG_RUNTIME_DIR value: /var/run - name: SHARED_FILESYSTEM_PATHS value: /var/run/kubevirt-private/vmi-disks/rootdisk - name: POD_NAME valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.name image: registry.redhat.io/container-native-virtualization/virt-launcher-rhel9@sha256:f0d3d86368759c6afdeec428bc668829bd3cbed1bc64e5f5df8f2d4a98dee603 imagePullPolicy: IfNotPresent name: compute resources: limits: cpu: "2" devices.kubevirt.io/kvm: "1" devices.kubevirt.io/tun: "1" devices.kubevirt.io/vhost-net: "1" memory: "4806672385" requests: cpu: "2" devices.kubevirt.io/kvm: "1" devices.kubevirt.io/tun: "1" devices.kubevirt.io/vhost-net: "1" ephemeral-storage: 50M memory: "4806672385" securityContext: allowPrivilegeEscalation: false capabilities: add: - NET_BIND_SERVICE drop: - ALL privileged: false runAsGroup: 107 runAsNonRoot: true runAsUser: 107 terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /var/run/kubevirt-private name: private - mountPath: /var/run/kubevirt name: public - mountPath: /var/run/kubevirt-ephemeral-disks name: ephemeral-disks - mountPath: /var/run/kubevirt/container-disks mountPropagation: HostToContainer name: container-disks - mountPath: /var/run/libvirt name: libvirt-runtime - mountPath: /var/run/kubevirt/sockets name: sockets - mountPath: /var/run/kubevirt-private/vmi-disks/rootdisk name: rootdisk - mountPath: /var/run/kubevirt/hotplug-disks mountPropagation: HostToContainer name: hotplug-disks - args: - --logfile - /var/run/kubevirt-private/8c566826-0295-47e0-b330-707b326fb17f/virt-serial0-log - --socket-timeout - 287s command: - /usr/bin/virt-tail env: - name: VIRT_LAUNCHER_LOG_VERBOSITY value: "2" image: registry.redhat.io/container-native-virtualization/virt-launcher-rhel9@sha256:f0d3d86368759c6afdeec428bc668829bd3cbed1bc64e5f5df8f2d4a98dee603 imagePullPolicy: IfNotPresent name: guest-console-log resources: limits: cpu: 15m memory: 60M requests: cpu: 15m memory: 60M securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL runAsNonRoot: true runAsUser: 107 terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /var/run/kubevirt-private name: private readOnly: true dnsPolicy: ClusterFirst enableServiceLinks: false hostname: fedora-coral-lobster-11 imagePullSecrets: - name: default-dockercfg-z48cx nodeName: ip-10-0-22-97.us-east-2.compute.internal nodeSelector: cpumanager: "true" kubernetes.io/arch: arm64 kubevirt.io/schedulable: "true" machine-type.node.kubevirt.io/virt: "true" preemptionPolicy: PreemptLowerPriority priority: 0 readinessGates: - conditionType: kubevirt.io/virtual-machine-unpaused restartPolicy: Never schedulerName: default-scheduler securityContext: fsGroup: 107 runAsGroup: 107 runAsNonRoot: true runAsUser: 107 seccompProfile: localhostProfile: kubevirt/kubevirt.json type: Localhost serviceAccount: default serviceAccountName: default terminationGracePeriodSeconds: 210 tolerations: - effect: NoExecute key: node.kubernetes.io/not-ready operator: Exists tolerationSeconds: 300 - effect: NoExecute key: node.kubernetes.io/unreachable operator: Exists tolerationSeconds: 300 - effect: NoSchedule key: node.kubernetes.io/memory-pressure operator: Exists volumes: - emptyDir: {} name: private - emptyDir: {} name: public - emptyDir: {} name: sockets - emptyDir: {} name: virt-bin-share-dir - emptyDir: {} name: libvirt-runtime - emptyDir: {} name: ephemeral-disks - emptyDir: {} name: container-disks - name: rootdisk persistentVolumeClaim: claimName: fedora-coral-lobster-11 - emptyDir: {} name: hotplug-disks status: conditions: - lastProbeTime: "2025-04-23T15:41:04Z" lastTransitionTime: "2025-04-23T15:41:04Z" message: the virtual machine is not paused reason: NotPaused status: "True" type: kubevirt.io/virtual-machine-unpaused - lastProbeTime: null lastTransitionTime: "2025-04-23T15:41:14Z" status: "True" type: PodReadyToStartContainers - lastProbeTime: null lastTransitionTime: "2025-04-23T15:41:04Z" status: "True" type: Initialized - lastProbeTime: null lastTransitionTime: "2025-04-23T15:41:17Z" reason: PodFailed status: "False" type: Ready - lastProbeTime: null lastTransitionTime: "2025-04-23T15:41:17Z" reason: PodFailed status: "False" type: ContainersReady - lastProbeTime: null lastTransitionTime: "2025-04-23T15:41:04Z" status: "True" type: PodScheduled containerStatuses: - containerID: cri-o://2e35a22eb5d57de20a916ee1a5bb91cc88972e4c273694f343db96fccab697ac image: registry.redhat.io/container-native-virtualization/virt-launcher-rhel9@sha256:f0d3d86368759c6afdeec428bc668829bd3cbed1bc64e5f5df8f2d4a98dee603 imageID: registry.redhat.io/container-native-virtualization/virt-launcher-rhel9@sha256:c92ef2262f7d0f08840606de5a84fa65232e7148ed3c4a250eb32941453e52b9 lastState: {} name: compute ready: false restartCount: 0 started: false state: terminated: containerID: cri-o://2e35a22eb5d57de20a916ee1a5bb91cc88972e4c273694f343db96fccab697ac exitCode: 2 finishedAt: "2025-04-23T15:41:17Z" reason: Error startedAt: "2025-04-23T15:41:13Z" volumeMounts: - mountPath: /var/run/kubevirt-private name: private - mountPath: /var/run/kubevirt name: public - mountPath: /var/run/kubevirt-ephemeral-disks name: ephemeral-disks - mountPath: /var/run/kubevirt/container-disks name: container-disks - mountPath: /var/run/libvirt name: libvirt-runtime - mountPath: /var/run/kubevirt/sockets name: sockets - mountPath: /var/run/kubevirt-private/vmi-disks/rootdisk name: rootdisk - mountPath: /var/run/kubevirt/hotplug-disks name: hotplug-disks - containerID: cri-o://81e94f645e8927d02503aac6a4cba1c1230392f9a08009ddedcb275025359991 image: registry.redhat.io/container-native-virtualization/virt-launcher-rhel9@sha256:f0d3d86368759c6afdeec428bc668829bd3cbed1bc64e5f5df8f2d4a98dee603 imageID: registry.redhat.io/container-native-virtualization/virt-launcher-rhel9@sha256:c92ef2262f7d0f08840606de5a84fa65232e7148ed3c4a250eb32941453e52b9 lastState: {} name: guest-console-log ready: false restartCount: 0 started: false state: terminated: containerID: cri-o://81e94f645e8927d02503aac6a4cba1c1230392f9a08009ddedcb275025359991 exitCode: 0 finishedAt: "2025-04-23T15:41:17Z" reason: Completed startedAt: "2025-04-23T15:41:13Z" volumeMounts: - mountPath: /var/run/kubevirt-private name: private readOnly: true recursiveReadOnly: Disabled hostIP: 10.0.22.97 hostIPs: - ip: 10.0.22.97 phase: Running podIP: 10.128.2.72 podIPs: - ip: 10.128.2.72 qosClass: Guaranteed startTime: "2025-04-23T15:41:04Z"
- links to
-
RHEA-2025:145122
OpenShift Virtualization 4.19.0 Images