Uploaded image for project: 'RHEL'
  1. RHEL
  2. RHEL-19069

[virtio-win][virtiofs] Memory leak in guest during disk IO on multiple virtiofs shared volume simultaneously

    • virtio-win-prewhql-0.1-247
    • Normal
    • CustomerScenariosInitiative
    • sst_virtualization_windows
    • ssg_virtualization
    • 22
    • 25
    • QE ack
    • False
    • Hide

      None

      Show
      None
    • Red Hat Enterprise Linux
    • x86_64
    • Windows

      Hit non-paged pool leak under Windows 11 when there is heavy disk IO on the multiple virtiofs at the same time, and the memory is never released until reboot the guest.

      The poolmon shows the kernel memory pool usage after starting disk IO. The nonpaged pool Mmdi grows continuously grows. The growth is accelerated when there is a lot of disk read/write activity on the virtiofs shares.

      Version-Release number of selected component (if applicable):
      qemu-kvm-8.1.0-2.el9.x86_64
      edk2-ovmf-20230524-3.el9.noarch
      kernel-5.14.0-367.el9.x86_64
      virtiofsd-1.7.2-1.el9.x86_64
      virtio-win-prewhql-0.1-242

      How reproducible:
      always

      Steps to Reproduce:
      1. Boot up Win11 guest with 5 virtiofs devices

      1. /usr/libexec/virtiofsd --socket-path=/var/tmp/avocado_syyrigyy/avocado-vt-vm1-fs${virtiofs_num}-virtiofsd.sock -o source=/tmp/virtio_fs${virtiofs_num}_test -o cache=auto

      2. Config WinFsp.Launcher for multifs inside guest.
      Sending command: "C:\Program Files (x86)\WinFsp\bin\fsreg.bat" virtiofs "C:\virtiofs.exe" "-t %1 -m %2"

      3. Start winfsp.launcher instance in guest.
      Sending command: "C:\Program Files (x86)\WinFsp\bin\launchctl-x64.exe" start virtiofs viofsmyfs1 myfs1 V:
      Sending command: "C:\Program Files (x86)\WinFsp\bin\launchctl-x64.exe" start virtiofs viofsmyfs2 myfs2 W:
      ....

      4. Create 5 workload with IOmeter software, read/write on the five virtiofs disk at the same time.

      5. open poolmon.exe to check the memory.

      Actual results:
      The nonpaged pool Mmdi grows continuously grows. The growth is accelerated when there is a lot of disk read/write activity on the virtiofs share.
      After stop the IO test, the memory is not released.

      Expected results:
      No memory leak.

      Additional info:
      qemu cmd line:
      /usr/libexec/qemu-kvm -S -name avocado-vt-vm1 -sandbox on -blockdev

      {"node-name": "file_ovmf_code", "driver": "file", "filename": "/usr/share/OVMF/OVMF_CODE.secboot.fd", "auto-read-only": true, "discard": "unmap"}

      -blockdev

      {"node-name": "drive_ovmf_code", "driver": "raw", "read-only": true, "file": "file_ovmf_code"}

      -blockdev

      {"node-name": "file_ovmf_vars", "driver": "file", "filename": "/root/avocado/data/avocado-vt/avocado-vt-vm1_win11-64-virtio-scsi-ovmf_avocado-vt-vm1_qcow2_filesystem_VARS.raw", "auto-read-only": true, "discard": "unmap"}

      -blockdev

      {"node-name": "drive_ovmf_vars", "driver": "raw", "read-only": false, "file": "file_ovmf_vars"}

      -machine q35,pflash0=drive_ovmf_code,pflash1=drive_ovmf_vars -device

      {"id": "pcie-root-port-0", "driver": "pcie-root-port", "multifunction": true, "bus": "pcie.0", "addr": "0x1", "chassis": 1}

      -device

      {"id": "pcie-pci-bridge-0", "driver": "pcie-pci-bridge", "addr": "0x0", "bus": "pcie-root-port-0"}

      -nodefaults -device

      {"driver": "VGA", "bus": "pcie.0", "addr": "0x2"}

      -m 29696 -object

      {"size": 31138512896, "mem-path": "/dev/shm", "share": true, "id": "mem-mem1", "qom-type": "memory-backend-file"}

      -smp 40,maxcpus=40,cores=20,threads=1,dies=1,sockets=2 -numa node,memdev=mem-mem1,nodeid=0 -cpu Icelake-Server,ds=on,ss=on,dtes64=on,vmx=on,pdcm=on,hypervisor=on,tsc-adjust=on,avx512ifma=on,sha-ni=on,rdpid=on,fsrm=on,md-clear=on,stibp=on,flush-l1d=on,arch-capabilities=on,xsaves=on,ibpb=on,ibrs=on,amd-stibp=on,amd-ssbd=on,rdctl-no=on,ibrs-all=on,skip-l1dfl-vmentry=on,mds-no=on,pschange-mc-no=on,tsx-ctrl=on,fb-clear=on,hle=off,rtm=off,mpx=off,intel-pt=off,hv_stimer,hv_synic,hv_vpindex,hv_relaxed,hv_spinlocks=0x1fff,hv_vapic,hv_time,hv_frequencies,hv_runtime,hv_tlbflush,hv_reenlightenment,hv_stimer_direct,hv_ipi,hv_tlbflush_ext,kvm_pv_unhalt=on -chardev socket,wait=off,server=on,path=/var/tmp/avocado_syyrigyy/monitor-qmpmonitor1-20231206-040546-BrTRYjkN,id=qmp_id_qmpmonitor1 -mon chardev=qmp_id_qmpmonitor1,mode=control -chardev socket,wait=off,server=on,path=/var/tmp/avocado_syyrigyy/monitor-catch_monitor-20231206-040546-BrTRYjkN,id=qmp_id_catch_monitor -mon chardev=qmp_id_catch_monitor,mode=control -device

      {"ioport": 1285, "driver": "pvpanic", "id": "idlgX4go"}

      -chardev socket,wait=off,server=on,path=/var/tmp/avocado_syyrigyy/serial-serial0-20231206-040546-BrTRYjkN,id=chardev_serial0 -device

      {"id": "serial0", "driver": "isa-serial", "chardev": "chardev_serial0"}

      -chardev socket,id=seabioslog_id_20231206-040546-BrTRYjkN,path=/var/tmp/avocado_syyrigyy/seabios-20231206-040546-BrTRYjkN,server=on,wait=off -device isa-debugcon,chardev=seabioslog_id_20231206-040546-BrTRYjkN,iobase=0x402 -device

      {"id": "pcie-root-port-1", "port": 1, "driver": "pcie-root-port", "addr": "0x1.0x1", "bus": "pcie.0", "chassis": 2}

      -device

      {"driver": "qemu-xhci", "id": "usb1", "bus": "pcie-root-port-1", "addr": "0x0"}

      -device

      {"driver": "usb-tablet", "id": "usb-tablet1", "bus": "usb1.0", "port": "1"}

      -device

      {"id": "pcie-root-port-2", "port": 2, "driver": "pcie-root-port", "addr": "0x1.0x2", "bus": "pcie.0", "chassis": 3}

      -device

      {"id": "virtio_scsi_pci0", "driver": "virtio-scsi-pci", "bus": "pcie-root-port-2", "addr": "0x0"}

      -blockdev {"node-name": "file_image1", "driver": "file", "auto-read-only": true, "discard": "unmap", "aio": "threads", "filename": "/home/kvm_autotest_root/images/win11-64-virtio-scsi-ovmf_avocado-vt-vm1.qcow2", "cache": {"direct": true, "no-flush": false}} -blockdev {"node-name": "drive_image1", "driver": "qcow2", "read-only": false, "cache":

      {"direct": true, "no-flush": false}

      , "file": "file_image1"} -device

      {"driver": "scsi-hd", "id": "image1", "drive": "drive_image1", "write-cache": "on"}

      -chardev socket,id=char_virtiofs_fs1,path=/var/tmp/avocado_syyrigyy/avocado-vt-vm1-fs1-virtiofsd.sock -device

      {"id": "pcie-root-port-3", "port": 3, "driver": "pcie-root-port", "addr": "0x1.0x3", "bus": "pcie.0", "chassis": 4}

      -device

      {"id": "vufs_virtiofs_fs1", "chardev": "char_virtiofs_fs1", "tag": "myfs1", "queue-size": 1024, "driver": "vhost-user-fs-pci", "bus": "pcie-root-port-3", "addr": "0x0"}

      -chardev socket,id=char_virtiofs_fs2,path=/var/tmp/avocado_syyrigyy/avocado-vt-vm1-fs2-virtiofsd.sock -device

      {"id": "pcie-root-port-4", "port": 4, "driver": "pcie-root-port", "addr": "0x1.0x4", "bus": "pcie.0", "chassis": 5}

      -device

      {"id": "vufs_virtiofs_fs2", "chardev": "char_virtiofs_fs2", "tag": "myfs2", "queue-size": 1024, "driver": "vhost-user-fs-pci", "bus": "pcie-root-port-4", "addr": "0x0"}

      -chardev socket,id=char_virtiofs_fs3,path=/var/tmp/avocado_syyrigyy/avocado-vt-vm1-fs3-virtiofsd.sock -device

      {"id": "pcie-root-port-5", "port": 5, "driver": "pcie-root-port", "addr": "0x1.0x5", "bus": "pcie.0", "chassis": 6}

      -device

      {"id": "vufs_virtiofs_fs3", "chardev": "char_virtiofs_fs3", "tag": "myfs3", "queue-size": 1024, "driver": "vhost-user-fs-pci", "bus": "pcie-root-port-5", "addr": "0x0"}

      -chardev socket,id=char_virtiofs_fs4,path=/var/tmp/avocado_syyrigyy/avocado-vt-vm1-fs4-virtiofsd.sock -device

      {"id": "pcie-root-port-6", "port": 6, "driver": "pcie-root-port", "addr": "0x1.0x6", "bus": "pcie.0", "chassis": 7}

      -device

      {"id": "vufs_virtiofs_fs4", "chardev": "char_virtiofs_fs4", "tag": "myfs4", "queue-size": 1024, "driver": "vhost-user-fs-pci", "bus": "pcie-root-port-6", "addr": "0x0"}

      -chardev socket,id=char_virtiofs_fs5,path=/var/tmp/avocado_syyrigyy/avocado-vt-vm1-fs5-virtiofsd.sock -device

      {"id": "pcie-root-port-7", "port": 7, "driver": "pcie-root-port", "addr": "0x1.0x7", "bus": "pcie.0", "chassis": 8}

      -device

      {"id": "vufs_virtiofs_fs5", "chardev": "char_virtiofs_fs5", "tag": "myfs5", "queue-size": 1024, "driver": "vhost-user-fs-pci", "bus": "pcie-root-port-7", "addr": "0x0"}

      -device

      {"id": "pcie-root-port-8", "port": 8, "driver": "pcie-root-port", "multifunction": true, "bus": "pcie.0", "addr": "0x3", "chassis": 9}

      -device

      {"driver": "virtio-net-pci", "mac": "9a:ee:52:61:01:0b", "id": "idwbD6Yn", "netdev": "idD17iFX", "bus": "pcie-root-port-8", "addr": "0x0"}

      -netdev tap,id=idD17iFX,vhost=on,vhostfd=16,fd=13 -blockdev {"node-name": "file_cd1", "driver": "file", "auto-read-only": true, "discard": "unmap", "aio": "threads", "filename": "/home/kvm_autotest_root/iso/windows/winutils.iso", "cache": {"direct": true, "no-flush": false}} -blockdev {"node-name": "drive_cd1", "driver": "raw", "read-only": true, "cache":

      {"direct": true, "no-flush": false}

      , "file": "file_cd1"} -device

      {"driver": "scsi-cd", "id": "cd1", "drive": "drive_cd1", "write-cache": "on"}

      -blockdev {"node-name": "file_virtio", "driver": "file", "auto-read-only": true, "discard": "unmap", "aio": "threads", "filename": "/home/kvm_autotest_root/iso/windows/virtio-win-prewhql-0.1-242.iso", "cache": {"direct": true, "no-flush": false}} -blockdev {"node-name": "drive_virtio", "driver": "raw", "read-only": true, "cache":

      {"direct": true, "no-flush": false}

      , "file": "file_virtio"} -device

      {"driver": "scsi-cd", "id": "virtio", "drive": "drive_virtio", "write-cache": "on"}

      -vnc :0 -rtc base=localtime,clock=host,driftfix=slew -boot menu=off,order=cdn,once=c,strict=off -chardev socket,id=char_vtpm_avocado-vt-vm1_tpm0,path=/root/avocado/data/avocado-vt/swtpm/avocado-vt-vm1_tpm0_swtpm.sock -tpmdev emulator,chardev=char_vtpm_avocado-vt-vm1_tpm0,id=emulator_vtpm_avocado-vt-vm1_tpm0 -device

      {"id": "tpm-crb_vtpm_avocado-vt-vm1_tpm0", "tpmdev": "emulator_vtpm_avocado-vt-vm1_tpm0", "driver": "tpm-crb"}

      -enable-kvm -device

      {"id": "pcie_extra_root_port_0", "driver": "pcie-root-port", "multifunction": true, "bus": "pcie.0", "addr": "0x4", "chassis": 10}

            kkostiuk Konstantin Kostiuk
            rhn-support-xiagao Xiaoling Gao
            Meirav Dean Meirav Dean
            Xiaoling Gao Xiaoling Gao
            Votes:
            0 Vote for this issue
            Watchers:
            7 Start watching this issue

              Created:
              Updated:
              Resolved: