Uploaded image for project: 'RHEL'
  1. RHEL
  2. RHEL-15394

virtio-blk: qemu hang on "no response on QMP query-status" when write data to disk without enough space

    • qemu-kvm-8.2.0-6.el9
    • Yes
    • Important
    • Regression, CustomerScenariosInitiative
    • rhel-sst-virtualization-storage
    • ssg_virtualization
    • 25
    • 26
    • 5
    • QE ack
    • False
    • Hide

      None

      Show
      None
    • None
    • Red Hat Enterprise Linux
    • None
    • x86_64
    • Linux
    • None

      What were you trying to do that didn't work?
      Writing data to disk without enough space, it hangs on QMP command
      "query-status"

      Please provide the package NVR for which bug is seen

      Red Hat Enterprise Linux release 9.4 Beta (Plow)
      5.14.0-376.el9.x86_64
      qemu-kvm-8.1.0-3.el9.x86_64
      seabios-bin-1.16.1-1.el9.noarch
      edk2-ovmf-20230524-3.el9.noarch
      libvirt-9.5.0-7.el9_3.x86_64
      virtio-win-prewhql-0.1-240.iso

      How reproducible:
      50%+

      Steps to reproduce
      1. create loop device ,the backend disk has no enough space
      /usr/bin/qemu-img create -f raw /tmp/xtmpfs/stg0.raw 50M

      losetup /dev/loop0 /tmp/xtmpfs/stg0.raw && chmod 666 /dev/loop0

      /usr/bin/qemu-img create -f qcow2 /dev/loop0 500M

      2.boot Vm with loop device
      /usr/libexec/qemu-kvm \
      -name 'avocado-vt-vm1' \
      -sandbox on \
      -machine q35,memory-backend=mem-machine_mem \
      -device '

      {"id": "pcie-root-port-0", "driver": "pcie-root-port", "multifunction": true, "bus": "pcie.0", "addr": "0x1", "chassis": 1}

      ' \
      -device '

      {"id": "pcie-pci-bridge-0", "driver": "pcie-pci-bridge", "addr": "0x0", "bus": "pcie-root-port-0"}

      ' \
      -nodefaults \
      -device '

      {"driver": "VGA", "bus": "pcie.0", "addr": "0x2"}

      ' \
      -m 30720 \
      -object '

      {"size": 32212254720, "id": "mem-machine_mem", "qom-type": "memory-backend-ram"}

      ' \
      -smp 8,maxcpus=8,cores=4,threads=1,dies=1,sockets=2 \
      -cpu 'Skylake-Server-IBRS',ss=on,vmx=on,pdcm=on,hypervisor=on,tsc-adjust=on,clflushopt=on,umip=on,pku=on,md-clear=on,stibp=on,flush-l1d=on,arch-capabilities=on,ssbd=on,xsaves=on,ibpb=on,ibrs=on,amd-stibp=on,amd-ssbd=on,rsba=on,skip-l1dfl-vmentry=on,pschange-mc-no=on,kvm_pv_unhalt=on \
      \
      -device '

      {"id": "pcie-root-port-1", "port": 1, "driver": "pcie-root-port", "addr": "0x1.0x1", "bus": "pcie.0", "chassis": 2}

      ' \
      -device '

      {"driver": "qemu-xhci", "id": "usb1", "bus": "pcie-root-port-1", "addr": "0x0"}

      ' \
      -device '

      {"driver": "usb-tablet", "id": "usb-tablet1", "bus": "usb1.0", "port": "1"}

      ' \
      -object '

      {"qom-type": "iothread", "id": "iothread0"}

      ' \
      -object '

      {"qom-type": "iothread", "id": "iothread1"}

      ' \
      -device '

      {"id": "pcie-root-port-2", "port": 2, "driver": "pcie-root-port", "addr": "0x1.0x2", "bus": "pcie.0", "chassis": 3}

      ' \
      -device '

      {"id": "virtio_scsi_pci0", "driver": "virtio-scsi-pci", "bus": "pcie-root-port-2", "addr": "0x0", "iothread": "iothread0"}

      ' \
      -blockdev '{"node-name": "file_image1", "driver": "file", "auto-read-only": true, "discard": "unmap", "aio": "threads", "filename": "/home/kvm_autotest_root/images/rhel940-64-virtio-scsi.qcow2", "cache": {"direct": true, "no-flush": false}}' \
      -blockdev '{"node-name": "drive_image1", "driver": "qcow2", "read-only": false, "cache":

      {"direct": true, "no-flush": false}

      , "file": "file_image1"}' \
      -device '

      {"driver": "scsi-hd", "id": "image1", "drive": "drive_image1", "write-cache": "on"}

      ' \
      -blockdev '{"node-name": "file_stg1", "driver": "host_device", "auto-read-only": true, "discard": "unmap", "aio": "native", "filename": "/dev/loop0", "cache": {"direct": true, "no-flush": false}}' \
      -blockdev '{"node-name": "drive_stg1", "driver": "qcow2", "read-only": false, "cache":

      {"direct": true, "no-flush": false}

      , "file": "file_stg1"}' \
      -device '

      {"id": "pcie-root-port-3", "port": 3, "driver": "pcie-root-port", "addr": "0x1.0x3", "bus": "pcie.0", "chassis": 4}

      ' \
      -device '

      {"driver": "virtio-blk-pci", "id": "stg1", "drive": "drive_stg1", "write-cache": "on", "rerror": "stop", "werror": "stop", "serial": "TARGET_DISK0", "bus": "pcie-root-port-3", "addr": "0x0", "iothread": "iothread1"}

      ' \
      -device '

      {"id": "pcie-root-port-4", "port": 4, "driver": "pcie-root-port", "addr": "0x1.0x4", "bus": "pcie.0", "chassis": 5}

      ' \
      -device '

      {"driver": "virtio-net-pci", "mac": "9a:14:7f:bc:e5:53", "id": "id4OwnnQ", "netdev": "idr2ph7R", "bus": "pcie-root-port-4", "addr": "0x0"}

      ' \
      -netdev tap,id=idr2ph7R,vhost=on \
      -vnc :0 \
      -rtc base=utc,clock=host,driftfix=slew \
      -boot menu=off,order=cdn,once=c,strict=off \
      -enable-kvm \
      -device '

      {"id": "pcie_extra_root_port_0", "driver": "pcie-root-port", "multifunction": true, "bus": "pcie.0", "addr": "0x3", "chassis": 6}

      3.login guest execute:

      dd if=/dev/urandom of=/dev/vda oflag=direct bs=500M

      4. query VM status and wait for it to step into pause status

      {"execute": "query-status", "id": "Ph5qzOVx"}

      Expected results
      the VM step in pause status and qmp cmd return

      Actual results
      no response on QMP query-status"

       

      Not hit this issue on

      Red Hat Enterprise Linux release 9.3 Beta (Plow)
      5.14.0-362.8.1.el9_3.x86_64
      qemu-kvm-8.0.0-16.el9_3.x86_64

       

      GDB BT

      #0 0x00007f160549c560 in __lll_lock_wait () at /lib64/libc.so.6
      #1 0x00007f16054a2c7d in pthread_mutex_lock@@GLIBC_2.2.5 ()
      at /lib64/libc.so.6
      #2 0x000055c6d28c0676 in qemu_mutex_lock_impl
      (mutex=0x55c6d340adc0, file=0x80 <error: Cannot access memory at address 0x80>, line=2) at ../util/qemu-thread-posix.c:94
      #3 0x000055c6d26fde06 in aio_context_acquire (ctx=0x55c6d340ad60)
      at ../util/async.c:728
      #4 bdrv_do_drained_begin (bs=0x55c6d3437900, parent=0x0, poll=<optimized out>)
      at ../block/io.c:381
      #5 0x000055c6d26e9854 in bdrv_drained_begin (bs=0x55c6d3437900)
      at ../block/io.c:393
      #6 blk_drain (blk=0x55c6d46dbe60) at ../block/block-backend.c:2077
      #7 0x000055c6d22e6f9d in virtio_blk_data_plane_stop (vdev=<optimized out>)
      at ../hw/block/dataplane/virtio-blk.c:363
      #8 0x000055c6d23d6fc5 in virtio_bus_stop_ioeventfd (bus=<optimized out>)
      at ../hw/virtio/virtio-bus.c:259
      #9 virtio_pci_stop_ioeventfd (proxy=0x55c6d46cf300)
      at ../hw/virtio/virtio-pci.c:391
      #10 virtio_pci_vmstate_change (d=0x55c6d46cf300, running=<optimized out>)
      at ../hw/virtio/virtio-pci.c:1374
      #11 0x000055c6d25c1ca4 in virtio_vmstate_change

              shajnocz@redhat.com Stefan Hajnoczi
              qingwangrh qing wang
              virt-maint virt-maint
              qing wang qing wang
              Votes:
              0 Vote for this issue
              Watchers:
              18 Start watching this issue

                Created:
                Updated:
                Resolved: