-
Bug
-
Resolution: Unresolved
-
Undefined
-
None
-
None
-
None
-
False
-
-
False
-
rhel-sst-network-fastdatapath
-
-
-
ssg_networking
Description of problem:
Version-Release number of selected component (if applicable):
5.14.0-284.57.1.el9_2.x86_64
openvswitch3.2-3.2.0-40.el9fdp.x86_64
[root@netqe03 ~]# ethtool -i enp13s0np0
driver: mlx5_core
version: 5.14.0-284.57.1.el9_2.x86_64
firmware-version: 28.37.1014 (MT_0000000909)
expansion-rom-version:
bus-info: 0000:0d:00.0
supports-statistics: yes
supports-test: yes
supports-eeprom-access: no
supports-register-dump: no
supports-priv-flags: yes
[root@netqe03 ~]# lspci|grep ConnectX-7
0d:00.0 Ethernet controller: Mellanox Technologies MT2910 Family [ConnectX-7]
How reproducible:
Steps to Reproduce:
Run ovs dpdk vhostuser pvp case on DUT server
1. build ovs dpdk bridge and add the dpdk port to it.
[root@netqe03 ~]# ovs-vsctl show
ccfa4996-fe36-4403-a098-07d9f6c62e6f
Bridge ovsbr0
datapath_type: netdev
Port vhost1
Interface vhost1
type: dpdkvhostuserclient
options: {vhost-server-path="/tmp/vhostuser/vhost1"}
Port dpdk0
Interface dpdk0
type: dpdk
options: {dpdk-devargs="0000:0d:00.0", n_rxq="4", n_rxq_desc="1024", n_txq_desc="1024"}
Port ovsbr0
Interface ovsbr0
type: internal
Port vhost0
Interface vhost0
type: dpdkvhostuserclient
options: {vhost-server-path="/tmp/vhostuser/vhost0"}
ovs_version: "3.2.2"
[root@netqe03 ~]# ovs-vsctl set interface dpdk0 mtu_request=9218
[root@netqe03 ~]# ovs-vsctl set interface vhost0 mtu_request=9218
[root@netqe03 ~]# ovs-vsctl set interface vhost1 mtu_request=9218
2. start guest with following guest xml
root@netqe03 ~]# virsh dumpxml g1 <domain type='kvm' id='1'> <name>g1</name> <uuid>7c31b8ea-9d35-4292-a1fe-70258deea922</uuid> <memory unit='KiB'>8388608</memory> <currentMemory unit='KiB'>8388608</currentMemory> <memoryBacking> <hugepages> <page size='1048576' unit='KiB'/> </hugepages> <locked/> <access mode='shared'/> </memoryBacking> <vcpu placement='static'>9</vcpu> <cputune> <vcpupin vcpu='0' cpuset='10'/> <vcpupin vcpu='1' cpuset='72'/> <vcpupin vcpu='2' cpuset='8'/> <vcpupin vcpu='3' cpuset='70'/> <vcpupin vcpu='4' cpuset='6'/> <vcpupin vcpu='5' cpuset='68'/> <vcpupin vcpu='6' cpuset='4'/> <vcpupin vcpu='7' cpuset='66'/> <vcpupin vcpu='8' cpuset='2'/> <emulatorpin cpuset='0,64'/> </cputune> <numatune> <memory mode='strict' nodeset='0'/> </numatune> <resource> <partition>/machine</partition> </resource> <os> <type arch='x86_64' machine='pc-q35-rhel9.2.0'>hvm</type> <boot dev='hd'/> </os> <features> <acpi/> <apic/> <pmu state='off'/> <vmport state='off'/> <ioapic driver='qemu'/> </features> <cpu mode='host-passthrough' check='none' migratable='on'> <feature policy='require' name='tsc-deadline'/> <numa> <cell id='0' cpus='0-8' memory='8388608' unit='KiB' memAccess='shared'/> </numa> </cpu> <clock offset='utc'> <timer name='rtc' tickpolicy='catchup'/> <timer name='pit' tickpolicy='delay'/> <timer name='hpet' present='no'/> </clock> <on_poweroff>destroy</on_poweroff> <on_reboot>restart</on_reboot> <on_crash>restart</on_crash> <pm> <suspend-to-mem enabled='no'/> <suspend-to-disk enabled='no'/> </pm> <devices> <emulator>/usr/libexec/qemu-kvm</emulator> <disk type='file' device='disk'> <driver name='qemu' type='qcow2'/> <source file='/var/lib/libvirt/images/g1.qcow2' index='1'/> <backingStore/> <target dev='vda' bus='virtio'/> <alias name='virtio-disk0'/> <address type='pci' domain='0x0000' bus='0x01' slot='0x00' function='0x0'/> </disk> <controller type='usb' index='0' model='none'> <alias name='usb'/> </controller> <controller type='pci' index='0' model='pcie-root'> <alias name='pcie.0'/> </controller> <controller type='pci' index='1' model='pcie-root-port'> <model name='pcie-root-port'/> <target chassis='1' port='0x10'/> <alias name='pci.1'/> <address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/> </controller> <controller type='pci' index='2' model='pcie-root-port'> <model name='pcie-root-port'/> <target chassis='2' port='0x11'/> <alias name='pci.2'/> <address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/> </controller> <controller type='pci' index='3' model='pcie-root-port'> <model name='pcie-root-port'/> <target chassis='3' port='0x8'/> <alias name='pci.3'/> <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/> </controller> <controller type='pci' index='4' model='pcie-root-port'> <model name='pcie-root-port'/> <target chassis='4' port='0x9'/> <alias name='pci.4'/> <address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/> </controller> <controller type='pci' index='5' model='pcie-root-port'> <model name='pcie-root-port'/> <target chassis='5' port='0xa'/> <alias name='pci.5'/> <address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/> </controller> <controller type='pci' index='6' model='pcie-root-port'> <model name='pcie-root-port'/> <target chassis='6' port='0xb'/> <alias name='pci.6'/> <address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/> </controller> <controller type='sata' index='0'> <alias name='ide'/> <address type='pci' domain='0x0000' bus='0x00' slot='0x1f' function='0x2'/> </controller> <interface type='bridge'> <mac address='52:54:00:01:02:03'/> <source bridge='virbr0'/> <target dev='vnet0'/> <model type='virtio'/> <alias name='net0'/> <address type='pci' domain='0x0000' bus='0x02' slot='0x00' function='0x0'/> </interface> <interface type='vhostuser'> <mac address='00:de:ad:00:00:01'/> <source type='unix' path='/tmp/vhostuser/vhost0' mode='server'/> <target dev='vhost0'/> <model type='virtio'/> <driver name='vhost' queues='4' rx_queue_size='1024' tx_queue_size='1024' iommu='off' ats='off'> <host mrg_rxbuf='off'/> </driver> <alias name='net1'/> <address type='pci' domain='0x0000' bus='0x03' slot='0x00' function='0x0'/> </interface> <interface type='vhostuser'> <mac address='00:de:ad:00:00:02'/> <source type='unix' path='/tmp/vhostuser/vhost1' mode='server'/> <target dev='vhost1'/> <model type='virtio'/> <driver name='vhost' queues='4' rx_queue_size='1024' tx_queue_size='1024' iommu='off' ats='off'> <host mrg_rxbuf='off'/> </driver> <alias name='net2'/> <address type='pci' domain='0x0000' bus='0x04' slot='0x00' function='0x0'/> </interface> <serial type='pty'> <source path='/dev/pts/2'/> <target type='isa-serial' port='0'> <model name='isa-serial'/> </target> <alias name='serial0'/> </serial> <console type='pty' tty='/dev/pts/2'> <source path='/dev/pts/2'/> <target type='serial' port='0'/> <alias name='serial0'/> </console> <input type='mouse' bus='ps2'> <alias name='input0'/> </input> <input type='keyboard' bus='ps2'> <alias name='input1'/> </input> <graphics type='vnc' port='5900' autoport='yes' listen='0.0.0.0'> <listen type='address' address='0.0.0.0'/> </graphics> <audio id='1' type='none'/> <video> <model type='cirrus' vram='16384' heads='1' primary='yes'/> <alias name='video0'/> <address type='pci' domain='0x0000' bus='0x05' slot='0x00' function='0x0'/> </video> <memballoon model='virtio'> <alias name='balloon0'/> <address type='pci' domain='0x0000' bus='0x06' slot='0x00' function='0x0'/> </memballoon> <iommu model='intel'> <driver intremap='on' caching_mode='on' iotlb='on'/> <alias name='iommu0'/> </iommu> </devices> <seclabel type='dynamic' model='selinux' relabel='yes'> <label>system_u:system_r:svirt_t:s0:c30,c570</label> <imagelabel>system_u:object_r:svirt_image_t:s0:c30,c570</imagelabel> </seclabel> <seclabel type='dynamic' model='dac' relabel='yes'> <label>+107:+985</label> <imagelabel>+107:+985</imagelabel> </seclabel> </domain>
3. start testpmd forward inside guest
[root@localhost ~]# dpdk-testpmd l 0-8 -n 1 --socket-mem 1024 – -i --forward-mode=io --burst=32 --rxd=8192 --txd=8192 --max-pkt-len=9600 --mbuf-size=9728 --nb-cores=8 --rxq=4 --txq=4 --mbcache=512 -auto-start
EAL: Detected CPU lcores: 98
EAL: Detected NUMA nodes: 1
EAL: Detected shared linkage of DPDK
EAL: Multi-process socket /var/run/dpdk/rte/mp_socket
EAL: Selected IOVA mode 'PA'
EAL: VFIO support initialized
EAL: Probe PCI driver: net_virtio (1af4:1041) device: 0000:02:00.0 (socket -1)
eth_virtio_pci_init(): Failed to init PCI device
EAL: Requested device 0000:02:00.0 cannot be used
EAL: Probe PCI driver: net_virtio (1af4:1041) device: 0000:03:00.0 (socket -1)
EAL: Using IOMMU type 1 (Type 1)
EAL: Probe PCI driver: net_virtio (1af4:1041) device: 0000:04:00.0 (socket -1)
TELEMETRY: No legacy callbacks, legacy socket not created
Interactive-mode selected
Set io packet forwarding mode
Auto-start selected
Warning: NUMA should be configured manually by using --port-numa-config and --ring-numa-config parameters along with --numa.
testpmd: create a new mbuf pool <mb_pool_0>: n=278528, size=9728, socket=0
testpmd: preferred mempool ops selected: ring_mp_mc
Configuring Port 0 (socket 0)
EAL: Error disabling MSI-X interrupts for fd 49
Port 0: 00:DE:AD:00:00:01
Configuring Port 1 (socket 0)
EAL: Error disabling MSI-X interrupts for fd 53
Port 1: 00:DE:AD:00:00:02
Checking link statuses...
Done
Start automatic packet forwarding
io packet forwarding - ports=2 - cores=8 - streams=8 - NUMA support enabled, MP allocation mode: native
Logical Core 1 (socket 0) forwards packets on 1 streams:
RX P=0/Q=0 (socket 0) -> TX P=1/Q=0 (socket 0) peer=02:00:00:00:00:01
Logical Core 2 (socket 0) forwards packets on 1 streams:
RX P=1/Q=0 (socket 0) -> TX P=0/Q=0 (socket 0) peer=02:00:00:00:00:00
Logical Core 3 (socket 0) forwards packets on 1 streams:
RX P=0/Q=1 (socket 0) -> TX P=1/Q=1 (socket 0) peer=02:00:00:00:00:01
Logical Core 4 (socket 0) forwards packets on 1 streams:
RX P=1/Q=1 (socket 0) -> TX P=0/Q=1 (socket 0) peer=02:00:00:00:00:00
Logical Core 5 (socket 0) forwards packets on 1 streams:
RX P=0/Q=2 (socket 0) -> TX P=1/Q=2 (socket 0) peer=02:00:00:00:00:01
Logical Core 6 (socket 0) forwards packets on 1 streams:
RX P=1/Q=2 (socket 0) -> TX P=0/Q=2 (socket 0) peer=02:00:00:00:00:00
Logical Core 7 (socket 0) forwards packets on 1 streams:
RX P=0/Q=3 (socket 0) -> TX P=1/Q=3 (socket 0) peer=02:00:00:00:00:01
Logical Core 8 (socket 0) forwards packets on 1 streams:
RX P=1/Q=3 (socket 0) -> TX P=0/Q=3 (socket 0) peer=02:00:00:00:00:00
io packet forwarding packets/burst=32
nb forwarding cores=8 - nb forwarding ports=2
port 0: RX queue number: 4 Tx queue number: 4
Rx offloads=0x0 Tx offloads=0x0
RX queue: 0
RX desc=8192 - RX free threshold=0
RX threshold registers: pthresh=0 hthresh=0 wthresh=0
RX Offloads=0x0
TX queue: 0
TX desc=8192 - TX free threshold=0
TX threshold registers: pthresh=0 hthresh=0 wthresh=0
TX offloads=0x0 - TX RS bit threshold=0
port 1: RX queue number: 4 Tx queue number: 4
Rx offloads=0x0 Tx offloads=0x0
RX queue: 0
RX desc=8192 - RX free threshold=0
RX threshold registers: pthresh=0 hthresh=0 wthresh=0
RX Offloads=0x0
TX queue: 0
TX desc=8192 - TX free threshold=0
TX threshold registers: pthresh=0 hthresh=0 wthresh=0
TX offloads=0x0 - TX RS bit threshold=0
testpmd>
4. On trex server, send traffic with 9200byte packet.
./binary-search.py --traffic-generator=trex-txrx --frame-size=9200 --num-flows=1024 --max-loss-pct=0 --search-runtime=10 --validation-runtime=10 --rate-tolerance=10 --runtime-tolerance=10 --rate=25 --rate-unit=% --duplicate-packet-failure=retry-to-fail --negative-packet-loss=retry-to-fail --warmup-trial --warmup-trial-runtime=10 --rate=25 --rate-unit=% --one-shot=0 --src-macs=10:00:00:00:00:00 --dst-macs=20:00:00:00:00:00 --use-src-ip-flows=0 --use-dst-ip-flows=0 --use-src-mac-flows=1 --use-dst-mac-flows=0 --warmup-trial --warmup-trial-runtime=30 --use-src-ip-flows=1 --use-dst-ip-flows=1 --use-src-mac-flows=1 --use-dst-mac-flows=0 --traffic-direction=unidirectional --device-pairs=0:0
Actual results:
It can got 0 throughput when send the traffic with 9200byte packet
It can got the normal throughput(1.8mpps) when send the traffic with 9196byte packet
For testpmd detail output:
testpmd> set verbose 1
src=10:00:00:00:ED:00 - dst=20:00:00:00:00:00 - pool=mb_pool_0 - type=0x0800 - length=9196 - nb_segs=1 - sw ptype: L2_ETHER L3_IPV4 L4_UDP - l2_len=14 - l3_len=20 - l4_len=8 - Receive queue=0x0
ol_flags: RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN RTE_MBUF_F_RX_OUTER_L4_CKSUM_UNKNOWN
src=10:00:00:00:F0:00 - dst=20:00:00:00:00:00 - pool=mb_pool_0 - type=0x0800 - length=9196 - nb_segs=1 - sw ptype: L2_ETHER L3_IPV4 L4_UDP - l2_len=14 - l3_len=20 - l4_len=8 - Receive queue=0x0
ol_flags: RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN RTE_MBUF_F_RX_OUTER_L4_CKSUM_UNKNOWN
src=10:00:00:00:F1:00 - dst=20:00:00:00:00:00 - pool=mb_pool_0 - type=0x0800 - length=9196 - nb_segs=1 - sw ptype: L2_ETHER L3_IPV4 L4_UDP - l2_len=14 - l3_len=20 - l4_len=8 - Receive queue=0x0
testpmd> stop
Telling cores to stop...
Waiting for lcores to finish...
------- Forward Stats for RX Port= 0/Queue= 0 -> TX Port= 1/Queue= 0 -------
RX-packets: 30174 TX-packets: 30174 TX-dropped: 0
------- Forward Stats for RX Port= 0/Queue= 1 -> TX Port= 1/Queue= 1 -------
RX-packets: 8576 TX-packets: 8576 TX-dropped: 0
------- Forward Stats for RX Port= 0/Queue= 2 -> TX Port= 1/Queue= 2 -------
RX-packets: 8932 TX-packets: 8932 TX-dropped: 0
------- Forward Stats for RX Port= 0/Queue= 3 -> TX Port= 1/Queue= 3 -------
RX-packets: 6148 TX-packets: 6148 TX-dropped: 0
---------------------- Forward statistics for port 0 ----------------------
RX-packets: 53830 RX-dropped: 0 RX-total: 53830
TX-packets: 0 TX-dropped: 0 TX-total: 0
----------------------------------------------------------------------------
---------------------- Forward statistics for port 1 ----------------------
RX-packets: 0 RX-dropped: 0 RX-total: 0
TX-packets: 53830 TX-dropped: 0 TX-total: 53830
----------------------------------------------------------------------------
+++++++++++++++ Accumulated forward statistics for all ports+++++++++++++++
RX-packets: 53830 RX-dropped: 0 RX-total: 53830
TX-packets: 53830 TX-dropped: 0 TX-total: 53830
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Done.
[root@netqe03 ~]# ovs-ofctl dump-flows ovsbr0
cookie=0x0, duration=15340.408s, table=0, n_packets=384729169, n_bytes=3536697745612, in_port=dpdk0 actions=output:vhost0
cookie=0x0, duration=15340.398s, table=0, n_packets=350755787, n_bytes=3224278361624, in_port=vhost1 actions=output:dpdk0
trex binary-search log, see the attachment
Expected results:
It can got normal throughput when send the 9200byte traffic