oc get vmi test-vm-for-migration -n storage-mig-test -o yaml apiVersion: kubevirt.io/v1 kind: VirtualMachineInstance metadata: annotations: kubevirt.io/latest-observed-api-version: v1 kubevirt.io/nonroot: "true" kubevirt.io/storage-observed-api-version: v1 kubevirt.io/vm-generation: "1" creationTimestamp: "2026-02-09T13:46:24Z" finalizers: - kubevirt.io/virtualMachineControllerFinalize - kubevirt.io/foregroundDeleteVirtualMachine generation: 82 labels: kubevirt.io/nodeName: test-gcnv6-2wl6d-worker-c-6xj8f.c.ocpstrat-1278.internal name: test-vm-for-migration namespace: storage-mig-test ownerReferences: - apiVersion: kubevirt.io/v1 blockOwnerDeletion: true controller: true kind: VirtualMachine name: test-vm-for-migration uid: 7bf47ec0-744c-4fd1-8153-f728dada3a77 resourceVersion: "6405405" uid: fc52affc-6be4-45f3-ade7-9745ebdb54df spec: architecture: amd64 domain: cpu: cores: 1 model: host-model devices: disks: - disk: bus: virtio name: rootdisk interfaces: - macAddress: 02:5a:3e:ec:1e:78 masquerade: {} name: default features: acpi: enabled: true firmware: serial: f2488199-a57f-45a5-ab03-6df07a3f6182 uuid: 94518208-1c07-4458-b35d-f13e30a7f624 machine: type: pc-q35-rhel9.6.0 memory: guest: 2Gi maxGuest: 8Gi resources: {} evictionStrategy: LiveMigrate networks: - name: default pod: {} volumes: - hostDisk: capacity: "42226155520" path: /var/run/kubevirt-private/vmi-disks/rootdisk/disk.img shared: true type: DiskOrCreate name: rootdisk status: activePods: 7e10cbb0-9298-4c4b-922d-e9076dd59e67: test-gcnv6-2wl6d-worker-c-hbcr9.c.ocpstrat-1278.internal 9a7b166f-1e2b-40db-83c4-7e7ae7583b7f: test-gcnv6-2wl6d-worker-c-hbcr9.c.ocpstrat-1278.internal 8114380f-f1fc-43e4-95d7-51e07bd38dc9: test-gcnv6-2wl6d-worker-c-hbcr9.c.ocpstrat-1278.internal 81c3af7c-16bb-4c94-9238-93b9c9a7b706: test-gcnv6-2wl6d-worker-c-qtkj5.c.ocpstrat-1278.internal 89a9664b-b22a-4747-9e5d-e1e60192112e: test-gcnv6-2wl6d-worker-c-hbcr9.c.ocpstrat-1278.internal 191f1b18-ffbf-403d-8a3e-9212d98624bd: test-gcnv6-2wl6d-worker-c-hbcr9.c.ocpstrat-1278.internal 366429c6-ccc7-4c0c-8a2d-67ada81987a5: test-gcnv6-2wl6d-worker-c-hbcr9.c.ocpstrat-1278.internal 0838966b-d5a8-43a4-9621-9344ac35be72: test-gcnv6-2wl6d-worker-c-qtkj5.c.ocpstrat-1278.internal b40d8bb8-eaad-4dd4-8b39-06640c9520b8: test-gcnv6-2wl6d-worker-c-qtkj5.c.ocpstrat-1278.internal b5633e6a-ff55-4535-b602-98854f2d692b: test-gcnv6-2wl6d-worker-c-6xj8f.c.ocpstrat-1278.internal bc40a3cb-e844-44e8-9816-f84a98ebe780: test-gcnv6-2wl6d-worker-c-hbcr9.c.ocpstrat-1278.internal f5a3acf3-a135-44b3-956c-a2a5298eb2e7: test-gcnv6-2wl6d-worker-c-qtkj5.c.ocpstrat-1278.internal conditions: - lastProbeTime: null lastTransitionTime: "2026-02-09T13:46:38Z" status: "True" type: Ready - lastProbeTime: null lastTransitionTime: null message: All of the VMI's DVs are bound and ready reason: AllDVsReady status: "True" type: DataVolumesReady - lastProbeTime: null lastTransitionTime: null status: "True" type: LiveMigratable - lastProbeTime: null lastTransitionTime: null status: "True" type: StorageLiveMigratable - lastProbeTime: "2026-02-09T13:46:52Z" lastTransitionTime: null status: "True" type: AgentConnected - lastProbeTime: null lastTransitionTime: "2026-02-09T13:55:37Z" message: migrate volumes status: "True" type: VolumesChange currentCPUTopology: cores: 1 guestOSInfo: id: fedora kernelRelease: 6.17.1-300.fc43.x86_64 kernelVersion: '#1 SMP PREEMPT_DYNAMIC Mon Oct 6 15:37:21 UTC 2025' machine: x86_64 name: Fedora Linux prettyName: Fedora Linux 43 (Cloud Edition) version: 43 (Cloud Edition) versionId: "43" interfaces: - infoSource: domain, guest-agent interfaceName: enp1s0 ipAddress: 10.129.2.140 ipAddresses: - 10.129.2.140 linkState: up mac: 02:5a:3e:ec:1e:78 name: default podInterfaceName: eth0 queueCount: 1 launcherContainerImageVersion: registry.redhat.io/container-native-virtualization/virt-launcher-rhel9@sha256:06cc6973f14a840250752c68aa733d7cc536fe385079bfdb1a8758699fc806da machine: type: pc-q35-rhel9.6.0 memory: guestAtBoot: 2Gi guestCurrent: 2Gi guestRequested: 2Gi migratedVolumes: - destinationPVCInfo: claimName: test-vm-dv-mig volumeMode: Filesystem sourcePVCInfo: claimName: test-vm-dv volumeMode: Filesystem volumeName: rootdisk migrationMethod: BlockMigration migrationState: endTimestamp: "2026-02-09T14:41:10Z" failed: true failureReason: 'virError(Code=1, Domain=10, Message=''internal error: process exited while connecting to monitor: 2026-02-09T14:41:10.136724Z qemu-kvm: -blockdev {"driver":"file","filename":"/var/run/kubevirt-private/vmi-disks/rootdisk/disk.img","node-name":"libvirt-1-storage","auto-read-only":true,"discard":"unmap","cache":{"direct":true,"no-flush":false}}: Could not open ''/var/run/kubevirt-private/vmi-disks/rootdisk/disk.img'': Permission denied'')' migrationConfiguration: allowAutoConverge: false allowPostCopy: false allowWorkloadDisruption: false bandwidthPerMigration: "0" completionTimeoutPerGiB: 150 nodeDrainTaintKey: kubevirt.io/drain parallelMigrationsPerCluster: 5 parallelOutboundMigrationsPerNode: 2 progressTimeout: 150 unsafeMigrationOverride: false migrationUid: 547d467f-4e88-4124-a558-e24551197c00 mode: PreCopy sourceNode: test-gcnv6-2wl6d-worker-c-6xj8f.c.ocpstrat-1278.internal sourcePod: virt-launcher-test-vm-for-migration-z6n24 startTimestamp: "2026-02-09T14:41:09Z" targetDirectMigrationNodePorts: "36169": 49152 "40539": 49153 "44095": 0 targetNode: test-gcnv6-2wl6d-worker-c-qtkj5.c.ocpstrat-1278.internal targetNodeAddress: 10.128.3.150 targetPod: virt-launcher-test-vm-for-migration-fvwww migrationTransport: Unix nodeName: test-gcnv6-2wl6d-worker-c-6xj8f.c.ocpstrat-1278.internal phase: Running phaseTransitionTimestamps: - phase: Pending phaseTransitionTimestamp: "2026-02-09T13:46:24Z" - phase: Scheduling phaseTransitionTimestamp: "2026-02-09T13:46:25Z" - phase: Scheduled phaseTransitionTimestamp: "2026-02-09T13:46:38Z" - phase: Running phaseTransitionTimestamp: "2026-02-09T13:46:39Z" qosClass: Burstable runtimeUser: 107 selinuxContext: system_u:object_r:container_file_t:s0:c253,c852 virtualMachineRevisionName: revision-start-vm-7bf47ec0-744c-4fd1-8153-f728dada3a77-1 volumeStatus: - name: rootdisk persistentVolumeClaimInfo: accessModes: - ReadWriteMany capacity: storage: "42226637865" claimName: test-vm-dv-mig filesystemOverhead: "0.06" requests: storage: "42226637865" volumeMode: Filesystem target: vda [cloud-user@ocp-psi-executor-xl ahmad]$ oc get vmi -A NAMESPACE NAME AGE PHASE IP NODENAME READY storage-mig-test test-vm-for-migration 58m Running 10.129.2.140 test-gcnv6-2wl6d-worker-c-6xj8f.c.ocpstrat-1278.internal True [cloud-user@ocp-psi-executor-xl ahmad]$ [cloud-user@ocp-psi-executor-xl ahmad]$ [cloud-user@ocp-psi-executor-xl ahmad]$ # Get the VMI yaml oc get vmi test-vm-for-migration -n storage-mig-test -o yaml apiVersion: kubevirt.io/v1 kind: VirtualMachineInstance metadata: annotations: kubevirt.io/latest-observed-api-version: v1 kubevirt.io/nonroot: "true" kubevirt.io/storage-observed-api-version: v1 kubevirt.io/vm-generation: "1" creationTimestamp: "2026-02-09T13:46:24Z" finalizers: - kubevirt.io/virtualMachineControllerFinalize - kubevirt.io/foregroundDeleteVirtualMachine generation: 82 labels: kubevirt.io/nodeName: test-gcnv6-2wl6d-worker-c-6xj8f.c.ocpstrat-1278.internal name: test-vm-for-migration namespace: storage-mig-test ownerReferences: - apiVersion: kubevirt.io/v1 blockOwnerDeletion: true controller: true kind: VirtualMachine name: test-vm-for-migration uid: 7bf47ec0-744c-4fd1-8153-f728dada3a77 resourceVersion: "6405405" uid: fc52affc-6be4-45f3-ade7-9745ebdb54df spec: architecture: amd64 domain: cpu: cores: 1 model: host-model devices: disks: - disk: bus: virtio name: rootdisk interfaces: - macAddress: 02:5a:3e:ec:1e:78 masquerade: {} name: default features: acpi: enabled: true firmware: serial: f2488199-a57f-45a5-ab03-6df07a3f6182 uuid: 94518208-1c07-4458-b35d-f13e30a7f624 machine: type: pc-q35-rhel9.6.0 memory: guest: 2Gi maxGuest: 8Gi resources: {} evictionStrategy: LiveMigrate networks: - name: default pod: {} volumes: - hostDisk: capacity: "42226155520" path: /var/run/kubevirt-private/vmi-disks/rootdisk/disk.img shared: true type: DiskOrCreate name: rootdisk status: activePods: 7e10cbb0-9298-4c4b-922d-e9076dd59e67: test-gcnv6-2wl6d-worker-c-hbcr9.c.ocpstrat-1278.internal 9a7b166f-1e2b-40db-83c4-7e7ae7583b7f: test-gcnv6-2wl6d-worker-c-hbcr9.c.ocpstrat-1278.internal 8114380f-f1fc-43e4-95d7-51e07bd38dc9: test-gcnv6-2wl6d-worker-c-hbcr9.c.ocpstrat-1278.internal 81c3af7c-16bb-4c94-9238-93b9c9a7b706: test-gcnv6-2wl6d-worker-c-qtkj5.c.ocpstrat-1278.internal 89a9664b-b22a-4747-9e5d-e1e60192112e: test-gcnv6-2wl6d-worker-c-hbcr9.c.ocpstrat-1278.internal 191f1b18-ffbf-403d-8a3e-9212d98624bd: test-gcnv6-2wl6d-worker-c-hbcr9.c.ocpstrat-1278.internal 366429c6-ccc7-4c0c-8a2d-67ada81987a5: test-gcnv6-2wl6d-worker-c-hbcr9.c.ocpstrat-1278.internal 0838966b-d5a8-43a4-9621-9344ac35be72: test-gcnv6-2wl6d-worker-c-qtkj5.c.ocpstrat-1278.internal b40d8bb8-eaad-4dd4-8b39-06640c9520b8: test-gcnv6-2wl6d-worker-c-qtkj5.c.ocpstrat-1278.internal b5633e6a-ff55-4535-b602-98854f2d692b: test-gcnv6-2wl6d-worker-c-6xj8f.c.ocpstrat-1278.internal bc40a3cb-e844-44e8-9816-f84a98ebe780: test-gcnv6-2wl6d-worker-c-hbcr9.c.ocpstrat-1278.internal f5a3acf3-a135-44b3-956c-a2a5298eb2e7: test-gcnv6-2wl6d-worker-c-qtkj5.c.ocpstrat-1278.internal conditions: - lastProbeTime: null lastTransitionTime: "2026-02-09T13:46:38Z" status: "True" type: Ready - lastProbeTime: null lastTransitionTime: null message: All of the VMI's DVs are bound and ready reason: AllDVsReady status: "True" type: DataVolumesReady - lastProbeTime: null lastTransitionTime: null status: "True" type: LiveMigratable - lastProbeTime: null lastTransitionTime: null status: "True" type: StorageLiveMigratable - lastProbeTime: "2026-02-09T13:46:52Z" lastTransitionTime: null status: "True" type: AgentConnected - lastProbeTime: null lastTransitionTime: "2026-02-09T13:55:37Z" message: migrate volumes status: "True" type: VolumesChange currentCPUTopology: cores: 1 guestOSInfo: id: fedora kernelRelease: 6.17.1-300.fc43.x86_64 kernelVersion: '#1 SMP PREEMPT_DYNAMIC Mon Oct 6 15:37:21 UTC 2025' machine: x86_64 name: Fedora Linux prettyName: Fedora Linux 43 (Cloud Edition) version: 43 (Cloud Edition) versionId: "43" interfaces: - infoSource: domain, guest-agent interfaceName: enp1s0 ipAddress: 10.129.2.140 ipAddresses: - 10.129.2.140 linkState: up mac: 02:5a:3e:ec:1e:78 name: default podInterfaceName: eth0 queueCount: 1 launcherContainerImageVersion: registry.redhat.io/container-native-virtualization/virt-launcher-rhel9@sha256:06cc6973f14a840250752c68aa733d7cc536fe385079bfdb1a8758699fc806da machine: type: pc-q35-rhel9.6.0 memory: guestAtBoot: 2Gi guestCurrent: 2Gi guestRequested: 2Gi migratedVolumes: - destinationPVCInfo: claimName: test-vm-dv-mig volumeMode: Filesystem sourcePVCInfo: claimName: test-vm-dv volumeMode: Filesystem volumeName: rootdisk migrationMethod: BlockMigration migrationState: endTimestamp: "2026-02-09T14:41:10Z" failed: true failureReason: 'virError(Code=1, Domain=10, Message=''internal error: process exited while connecting to monitor: 2026-02-09T14:41:10.136724Z qemu-kvm: -blockdev {"driver":"file","filename":"/var/run/kubevirt-private/vmi-disks/rootdisk/disk.img","node-name":"libvirt-1-storage","auto-read-only":true,"discard":"unmap","cache":{"direct":true,"no-flush":false}}: Could not open ''/var/run/kubevirt-private/vmi-disks/rootdisk/disk.img'': Permission denied'')' migrationConfiguration: allowAutoConverge: false allowPostCopy: false allowWorkloadDisruption: false bandwidthPerMigration: "0" completionTimeoutPerGiB: 150 nodeDrainTaintKey: kubevirt.io/drain parallelMigrationsPerCluster: 5 parallelOutboundMigrationsPerNode: 2 progressTimeout: 150 unsafeMigrationOverride: false migrationUid: 547d467f-4e88-4124-a558-e24551197c00 mode: PreCopy sourceNode: test-gcnv6-2wl6d-worker-c-6xj8f.c.ocpstrat-1278.internal sourcePod: virt-launcher-test-vm-for-migration-z6n24 startTimestamp: "2026-02-09T14:41:09Z" targetDirectMigrationNodePorts: "36169": 49152 "40539": 49153 "44095": 0 targetNode: test-gcnv6-2wl6d-worker-c-qtkj5.c.ocpstrat-1278.internal targetNodeAddress: 10.128.3.150 targetPod: virt-launcher-test-vm-for-migration-fvwww migrationTransport: Unix nodeName: test-gcnv6-2wl6d-worker-c-6xj8f.c.ocpstrat-1278.internal phase: Running phaseTransitionTimestamps: - phase: Pending phaseTransitionTimestamp: "2026-02-09T13:46:24Z" - phase: Scheduling phaseTransitionTimestamp: "2026-02-09T13:46:25Z" - phase: Scheduled phaseTransitionTimestamp: "2026-02-09T13:46:38Z" - phase: Running phaseTransitionTimestamp: "2026-02-09T13:46:39Z" qosClass: Burstable runtimeUser: 107 selinuxContext: system_u:object_r:container_file_t:s0:c253,c852 virtualMachineRevisionName: revision-start-vm-7bf47ec0-744c-4fd1-8153-f728dada3a77-1 volumeStatus: - name: rootdisk persistentVolumeClaimInfo: accessModes: - ReadWriteMany capacity: storage: "42226637865" claimName: test-vm-dv-mig filesystemOverhead: "0.06" requests: storage: "42226637865" volumeMode: Filesystem target: vda