./attach_usb_devices.sh cifmw-compute-12nt3v30-0 10 ./attach_usb_devices.sh cifmw-compute-12nt3v30-1 10 [zuul@compute-12nt3v30-0 ~]$ lspci -nn | grep -i usb 00:06.0 USB controller [0c03]: Intel Corporation 82801I (ICH9 Family) USB UHCI Controller #1 [8086:2934] (rev 03) 00:06.1 USB controller [0c03]: Intel Corporation 82801I (ICH9 Family) USB UHCI Controller #2 [8086:2935] (rev 03) 00:06.2 USB controller [0c03]: Intel Corporation 82801I (ICH9 Family) USB UHCI Controller #3 [8086:2936] (rev 03) 00:06.7 USB controller [0c03]: Intel Corporation 82801I (ICH9 Family) USB2 EHCI Controller #1 [8086:293a] (rev 03) 09:00.0 USB controller [0c03]: Red Hat, Inc. QEMU XHCI Host Controller [1b36:000d] (rev 01) 0a:00.0 USB controller [0c03]: Red Hat, Inc. QEMU XHCI Host Controller [1b36:000d] (rev 01) 0b:00.0 USB controller [0c03]: Red Hat, Inc. QEMU XHCI Host Controller [1b36:000d] (rev 01) 0c:00.0 USB controller [0c03]: Red Hat, Inc. QEMU XHCI Host Controller [1b36:000d] (rev 01) 0d:00.0 USB controller [0c03]: Red Hat, Inc. QEMU XHCI Host Controller [1b36:000d] (rev 01) 0e:00.0 USB controller [0c03]: Red Hat, Inc. QEMU XHCI Host Controller [1b36:000d] (rev 01) 0f:00.0 USB controller [0c03]: Red Hat, Inc. QEMU XHCI Host Controller [1b36:000d] (rev 01) 10:00.0 USB controller [0c03]: Red Hat, Inc. QEMU XHCI Host Controller [1b36:000d] (rev 01) 11:00.0 USB controller [0c03]: Red Hat, Inc. QEMU XHCI Host Controller [1b36:000d] (rev 01) 12:00.0 USB controller [0c03]: Red Hat, Inc. QEMU XHCI Host Controller [1b36:000d] (rev 01) 13:00.0 USB controller [0c03]: Red Hat, Inc. QEMU XHCI Host Controller [1b36:000d] (rev 01) 14:00.0 USB controller [0c03]: Red Hat, Inc. QEMU XHCI Host Controller [1b36:000d] (rev 01) 15:00.0 USB controller [0c03]: Red Hat, Inc. QEMU XHCI Host Controller [1b36:000d] (rev 01) [zuul@compute-12nt3v30-1 ~]$ lspci -nn | grep -i usb 00:06.0 USB controller [0c03]: Intel Corporation 82801I (ICH9 Family) USB UHCI Controller #1 [8086:2934] (rev 03) 00:06.1 USB controller [0c03]: Intel Corporation 82801I (ICH9 Family) USB UHCI Controller #2 [8086:2935] (rev 03) 00:06.2 USB controller [0c03]: Intel Corporation 82801I (ICH9 Family) USB UHCI Controller #3 [8086:2936] (rev 03) 00:06.7 USB controller [0c03]: Intel Corporation 82801I (ICH9 Family) USB2 EHCI Controller #1 [8086:293a] (rev 03) 09:00.0 USB controller [0c03]: Red Hat, Inc. QEMU XHCI Host Controller [1b36:000d] (rev 01) 0a:00.0 USB controller [0c03]: Red Hat, Inc. QEMU XHCI Host Controller [1b36:000d] (rev 01) 0b:00.0 USB controller [0c03]: Red Hat, Inc. QEMU XHCI Host Controller [1b36:000d] (rev 01) 0c:00.0 USB controller [0c03]: Red Hat, Inc. QEMU XHCI Host Controller [1b36:000d] (rev 01) 0d:00.0 USB controller [0c03]: Red Hat, Inc. QEMU XHCI Host Controller [1b36:000d] (rev 01) 0e:00.0 USB controller [0c03]: Red Hat, Inc. QEMU XHCI Host Controller [1b36:000d] (rev 01) 0f:00.0 USB controller [0c03]: Red Hat, Inc. QEMU XHCI Host Controller [1b36:000d] (rev 01) 10:00.0 USB controller [0c03]: Red Hat, Inc. QEMU XHCI Host Controller [1b36:000d] (rev 01) 11:00.0 USB controller [0c03]: Red Hat, Inc. QEMU XHCI Host Controller [1b36:000d] (rev 01) [pci] alias = {"name": "usb_passthrough", "vendor_id":"1b36", "product_id":"000d"} [filter_scheduler] pci_in_placement = True [pci] device_spec = {"address": "0000:0a:00.0", "vendor_id":"1b36", "product_id":"000d"} device_spec = {"address": "0000:0b:00.0", "vendor_id":"1b36", "product_id":"000d"} device_spec = {"address": "0000:0c:00.0", "vendor_id":"1b36", "product_id":"000d"} device_spec = {"address": "0000:0d:00.0", "vendor_id":"1b36", "product_id":"000d"} device_spec = {"address": "0000:0e:00.0", "vendor_id":"1b36", "product_id":"000d"} device_spec = {"address": "0000:0f:00.0", "vendor_id":"1b36", "product_id":"000d"} device_spec = {"address": "0000:09:00.0", "vendor_id":"1b36", "product_id":"000d"} device_spec = {"address": "0000:10:00.0", "vendor_id":"1b36", "product_id":"000d"} device_spec = {"address": "0000:11:00.0", "vendor_id":"1b36", "product_id":"000d"} device_spec = {"address": "0000:12:00.0", "vendor_id":"1b36", "product_id":"000d"} device_spec = {"address": "0000:13:00.0", "vendor_id":"1b36", "product_id":"000d"} device_spec = {"address": "0000:14:00.0", "vendor_id":"1b36", "product_id":"000d"} device_spec = {"address": "0000:15:00.0", "vendor_id":"1b36", "product_id":"000d"} alias = {"name": "usb_passthrough", "vendor_id": "1b36", "product_id": "000d"} report_in_placement = True [pci] device_spec = {"address": "0000:0a:00.0", "vendor_id":"1b36", "product_id":"000d"} device_spec = {"address": "0000:0b:00.0", "vendor_id":"1b36", "product_id":"000d"} device_spec = {"address": "0000:0c:00.0", "vendor_id":"1b36", "product_id":"000d"} device_spec = {"address": "0000:0d:00.0", "vendor_id":"1b36", "product_id":"000d"} device_spec = {"address": "0000:0e:00.0", "vendor_id":"1b36", "product_id":"000d"} device_spec = {"address": "0000:0f:00.0", "vendor_id":"1b36", "product_id":"000d"} device_spec = {"address": "0000:09:00.0", "vendor_id":"1b36", "product_id":"000d"} device_spec = {"address": "0000:10:00.0", "vendor_id":"1b36", "product_id":"000d"} device_spec = {"address": "0000:11:00.0", "vendor_id":"1b36", "product_id":"000d"} alias = {"name": "usb_passthrough", "vendor_id": "1b36", "product_id": "000d"} report_in_placement = True [zuul@controller-0 ~]$ oc get osctlplane controlplane -o json | jq -r .spec.nova.template.apiServiceTemplate.customServiceConfig [pci] alias = {"name": "usb_passthrough", "vendor_id":"1b36", "product_id":"000d"} [filter_scheduler] pci_in_placement = True [zuul@controller-0 ~]$ oc get osctlplane controlplane -o json | jq -r .spec.nova.template.cellTemplates.cell0.conductorServiceTemplate.customServiceConfig [filter_scheduler] pci_in_placement = True [zuul@controller-0 ~]$ oc get osctlplane controlplane -o json | jq -r .spec.nova.template.cellTemplates.cell1.conductorServiceTemplate.customServiceConfig [filter_scheduler] pci_in_placement = True [zuul@controller-0 ~]$ oc get osctlplane controlplane -o json | jq -r .spec.nova.template.schedulerServiceTemplate.customServiceConfig [filter_scheduler] pci_in_placement = True sh-5.1$ openstack flavor list +--------------------------------------+-------------------+-----+------+-----------+-------+-----------+ | ID | Name | RAM | Disk | Ephemeral | VCPUs | Is Public | +--------------------------------------+-------------------+-----+------+-----------+-------+-----------+ | 06703acc-30a8-415e-b973-82a21ed9d0e1 | m1.nano | 128 | 1 | 0 | 1 | True | | 100 | customized_flavor | 512 | 10 | 0 | 1 | True | | 210fbdcc-94cf-428a-a7ad-0d22bf4996a3 | m1.micro | 192 | 1 | 0 | 1 | True | | 57b0ce71-8a41-441f-a703-23a7cf157552 | aC7HLfAGVvam6PJN | 128 | 0 | 0 | 1 | True | | e3837a6e-6942-4bb3-b161-ed30f05f6815 | gxpdjXIuKnIrJ8Aw | 256 | 0 | 0 | 1 | True | +--------------------------------------+-------------------+-----+------+-----------+-------+-----------+ sh-5.1$ openstack flavor create flav-osprh-20355 --ram 256 --disk 1 --vcpu 2 --property pci_passthrough:alias='usb_passthrough:8' +----------------------------+-------------------------------------------+ | Field | Value | +----------------------------+-------------------------------------------+ | OS-FLV-DISABLED:disabled | False | | OS-FLV-EXT-DATA:ephemeral | 0 | | description | None | | disk | 1 | | id | 7ce4e960-f8b3-418b-81d5-d9005a086c07 | | name | flav-osprh-20355 | | os-flavor-access:is_public | True | | properties | pci_passthrough:alias='usb_passthrough:8' | | ram | 256 | | rxtx_factor | 1.0 | | swap | | | vcpus | 2 | +----------------------------+-------------------------------------------+ sh-5.1$ openstack image list +--------------------------------------+-----------------------------------------------------------+--------+ | ID | Name | Status | +--------------------------------------+-----------------------------------------------------------+--------+ | 59133330-e80c-4573-af3d-bdc70a9dc518 | tobiko.openstack.stacks._advanced_vm.AdvancedImageFixture | active | | 0386b298-83a8-4f02-8deb-b15d746dd64a | tobiko.openstack.stacks._cirros.CirrosImageFixture | active | +--------------------------------------+-----------------------------------------------------------+--------+ sh-5.1$ openstack network list +--------------------------------------+-------------------------------------------------------------------------------------------+----------------------------------------------------------------------------+ | ID | Name | Subnets | +--------------------------------------+-------------------------------------------------------------------------------------------+----------------------------------------------------------------------------+ | 07102d04-4296-4050-860a-b5838c894986 | tobiko.openstack.stacks._neutron.NetworkWithNetMtuWriteStackFixture-_network-njj4tx6l7ghk | a536eb8d-e39d-46b5-b55a-e733e50701d8, f62f2740-2448-4546-9cf8-05154c3b5c74 | | 267dd86e-321e-44f4-959a-06997df0ef97 | tobiko.openstack.stacks._vlan.VlanNetworkStackFixture-_network-4lonb6mkpv4l | 3fbffbb3-ae4c-4ebe-9293-9566375ade08, 47ad51b3-5d12-459f-8ca2-636c98d96d73 | | 72dd5237-1717-4b30-aef1-de5a23249d86 | tobiko.openstack.stacks._neutron.NetworkStackFixture-_network-6h4zvlrad3sq | 1b9f63c8-788a-43cd-a053-49e492f02560, d588bfe4-3992-4899-8325-2c7659897048 | | 9f1fa133-acb9-4205-a029-062caaf594a6 | heat_tempestconf_network | 36d0f751-1371-4342-8b1e-1510cdaf1e86 | | d3803d5f-c5d2-4f13-afc1-138d0fd82f2e | tobiko.openstack.stacks._qos.QosNetworkStackFixture-_network-wm7cox4cdo3o | 5296dba3-0367-4249-984a-e8255cd16680, 7fffb8d3-6ad9-4762-bdb5-ca716c8e53b6 | | d8e40424-ebdd-4765-8110-a565f76a0c95 | public | 13a235ed-3c1d-4167-936c-daf3a4c65a31 | +--------------------------------------+-------------------------------------------------------------------------------------------+----------------------------------------------------------------------------+ sh-5.1$ openstack server create --image 59133330-e80c-4573-af3d-bdc70a9dc518 --flavor flav-osprh-20355 --network public guest-osprh-20355 --wait Error creating server: guest-osprh-20355 Error creating server 2025-10-27 15:00:52.592 1 DEBUG nova.scheduler.manager [None req-4dd3ead2-fc79-44ef-9b2a-4900e6f07180 86abb11a873243f89bc996748fddaebf 7a56f755038f4ac6a7452b9b983ffda3 - - default default] Starting to schedule for instances: ['eaf0cd07-158d-4f83-bb9a-9ec64fea1e58'] select_destinations /usr/lib/python3.9/site-packages/nova/scheduler/manager.py:142^[[00m 2025-10-27 15:00:52.630 1 DEBUG nova.scheduler.request_filter [None req-4dd3ead2-fc79-44ef-9b2a-4900e6f07180 86abb11a873243f89bc996748fddaebf 7a56f755038f4ac6a7452b9b983ffda3 - - default default] Request filter 'require_tenant_aggregate' took 0.0 seconds wrapper /usr/lib/python3.9/site-packages/nova/scheduler/request_filter.py:46^[[00m 2025-10-27 15:00:52.631 1 DEBUG nova.scheduler.request_filter [None req-4dd3ead2-fc79-44ef-9b2a-4900e6f07180 86abb11a873243f89bc996748fddaebf 7a56f755038f4ac6a7452b9b983ffda3 - - default default] require_image_type_support request filter added required trait COMPUTE_IMAGE_TYPE_QCOW2 require_image_type_support /usr/lib/python3.9/site-packages/nova/scheduler/request_filter.py:194^[[00m 2025-10-27 15:00:52.632 1 DEBUG nova.scheduler.request_filter [None req-4dd3ead2-fc79-44ef-9b2a-4900e6f07180 86abb11a873243f89bc996748fddaebf 7a56f755038f4ac6a7452b9b983ffda3 - - default default] Request filter 'require_image_type_support' took 0.0 seconds wrapper /usr/lib/python3.9/site-packages/nova/scheduler/request_filter.py:46^[[00m 2025-10-27 15:00:52.633 1 DEBUG nova.scheduler.request_filter [None req-4dd3ead2-fc79-44ef-9b2a-4900e6f07180 86abb11a873243f89bc996748fddaebf 7a56f755038f4ac6a7452b9b983ffda3 - - default default] compute_status_filter request filter added forbidden trait COMPUTE_STATUS_DISABLED compute_status_filter /usr/lib/python3.9/site-packages/nova/scheduler/request_filter.py:255^[[00m 2025-10-27 15:00:52.633 1 DEBUG nova.scheduler.request_filter [None req-4dd3ead2-fc79-44ef-9b2a-4900e6f07180 86abb11a873243f89bc996748fddaebf 7a56f755038f4ac6a7452b9b983ffda3 - - default default] Request filter 'compute_status_filter' took 0.0 seconds wrapper /usr/lib/python3.9/site-packages/nova/scheduler/request_filter.py:46^[[00m 2025-10-27 15:00:52.675 1 DEBUG nova.scheduler.request_filter [None req-4dd3ead2-fc79-44ef-9b2a-4900e6f07180 86abb11a873243f89bc996748fddaebf 7a56f755038f4ac6a7452b9b983ffda3 - - default default] Request filter 'isolate_aggregates' took 0.0 seconds wrapper /usr/lib/python3.9/site-packages/nova/scheduler/request_filter.py:46^[[00m 2025-10-27 15:00:52.676 1 DEBUG nova.scheduler.request_filter [None req-4dd3ead2-fc79-44ef-9b2a-4900e6f07180 86abb11a873243f89bc996748fddaebf 7a56f755038f4ac6a7452b9b983ffda3 - - default default] Request filter 'transform_image_metadata' took 0.0 seconds wrapper /usr/lib/python3.9/site-packages/nova/scheduler/request_filter.py:46^[[00m 2025-10-27 15:00:52.677 1 DEBUG nova.scheduler.request_filter [None req-4dd3ead2-fc79-44ef-9b2a-4900e6f07180 86abb11a873243f89bc996748fddaebf 7a56f755038f4ac6a7452b9b983ffda3 - - default default] Request filter 'accelerators_filter' took 0.0 seconds wrapper /usr/lib/python3.9/site-packages/nova/scheduler/request_filter.py:46^[[00m 2025-10-27 15:00:54.697 1 DEBUG nova.scheduler.request_filter [None req-4dd3ead2-fc79-44ef-9b2a-4900e6f07180 86abb11a873243f89bc996748fddaebf 7a56f755038f4ac6a7452b9b983ffda3 - - default default] Request filter 'routed_networks_filter' took 2.0 seconds wrapper /usr/lib/python3.9/site-packages/nova/scheduler/request_filter.py:46^[[00m 2025-10-27 15:00:54.698 1 DEBUG nova.scheduler.request_filter [None req-4dd3ead2-fc79-44ef-9b2a-4900e6f07180 86abb11a873243f89bc996748fddaebf 7a56f755038f4ac6a7452b9b983ffda3 - - default default] Request filter 'remote_managed_ports_filter' took 0.0 seconds wrapper /usr/lib/python3.9/site-packages/nova/scheduler/request_filter.py:46^[[00m 2025-10-27 15:00:54.699 1 DEBUG nova.scheduler.request_filter [None req-4dd3ead2-fc79-44ef-9b2a-4900e6f07180 86abb11a873243f89bc996748fddaebf 7a56f755038f4ac6a7452b9b983ffda3 - - default default] ephemeral_encryption_filter skipped ephemeral_encryption_filter /usr/lib/python3.9/site-packages/nova/scheduler/request_filter.py:412^[[00m 2025-10-27 15:00:54.707 1 WARNING nova.scheduler.utils [None req-4dd3ead2-fc79-44ef-9b2a-4900e6f07180 86abb11a873243f89bc996748fddaebf 7a56f755038f4ac6a7452b9b983ffda3 - - default default] There is more than one numbered request group in the allocation candidate query but the flavor did not specify any group policy. This query would fail in placement due to the missing group policy. If you specified more than one numbered request group in the flavor extra_spec then you need to specify the group policy in the flavor extra_spec. If it is OK to let these groups be satisfied by overlapping resource providers then use 'group_policy': 'none'. If you want each group to be satisfied from a separate resource provider then use 'group_policy': 'isolate'.^[[00m 2025-10-27 15:00:54.708 1 INFO nova.scheduler.utils [None req-4dd3ead2-fc79-44ef-9b2a-4900e6f07180 86abb11a873243f89bc996748fddaebf 7a56f755038f4ac6a7452b9b983ffda3 - - default default] At least one numbered request group is defined outside of the flavor (e.g. in a port that has a QoS minimum bandwidth policy rule attached) but the flavor did not specify any group policy. To avoid the placement failure nova defaults the group policy to 'none'.^[[00m 2025-10-27 15:01:54.883 1 ERROR nova.scheduler.client.report [None req-4dd3ead2-fc79-44ef-9b2a-4900e6f07180 86abb11a873243f89bc996748fddaebf 7a56f755038f4ac6a7452b9b983ffda3 - - default default] Failed to retrieve allocation candidates from placement API for filters: RequestGroup(aggregates=[],forbidden_aggregates=set([]),forbidden_traits=set([]),in_tree=None,provider_uuids=[],requester_id='f834f7b8-368f-4cf2-a5b4-713eb9b6b5b4-0',required_traits=set([]),resources={CUSTOM_PCI_1B36_000D=1},use_same_provider=True), RequestGroup(aggregates=[],forbidden_aggregates=set([]),forbidden_traits=set([]),in_tree=None,provider_uuids=[],requester_id='f834f7b8-368f-4cf2-a5b4-713eb9b6b5b4-1',required_traits=set([]),resources={CUSTOM_PCI_1B36_000D=1},use_same_provider=True), RequestGroup(aggregates=[],forbidden_aggregates=set([]),forbidden_traits=set([]),in_tree=None,provider_uuids=[],requester_id='f834f7b8-368f-4cf2-a5b4-713eb9b6b5b4-2',required_traits=set([]),resources={CUSTOM_PCI_1B36_000D=1},use_same_provider=True), RequestGroup(aggregates=[],forbidden_aggregates=set([]),forbidden_traits=set([]),in_tree=None,provider_uuids=[],requester_id='f834f7b8-368f-4cf2-a5b4-713eb9b6b5b4-3',required_traits=set([]),resources={CUSTOM_PCI_1B36_000D=1},use_same_provider=True), RequestGroup(aggregates=[],forbidden_aggregates=set([]),forbidden_traits=set([]),in_tree=None,provider_uuids=[],requester_id='f834f7b8-368f-4cf2-a5b4-713eb9b6b5b4-4',required_traits=set([]),resources={CUSTOM_PCI_1B36_000D=1},use_same_provider=True), RequestGroup(aggregates=[],forbidden_aggregates=set([]),forbidden_traits=set([]),in_tree=None,provider_uuids=[],requester_id='f834f7b8-368f-4cf2-a5b4-713eb9b6b5b4-5',required_traits=set([]),resources={CUSTOM_PCI_1B36_000D=1},use_same_provider=True), RequestGroup(aggregates=[],forbidden_aggregates=set([]),forbidden_traits=set([]),in_tree=None,provider_uuids=[],requester_id='f834f7b8-368f-4cf2-a5b4-713eb9b6b5b4-6',required_traits=set([]),resources={CUSTOM_PCI_1B36_000D=1},use_same_provider=True), RequestGroup(aggregates=[],forbidden_aggregates=set([]),forbidden_traits=set([]),in_tree=None,provider_uuids=[],requester_id='f834f7b8-368f-4cf2-a5b4-713eb9b6b5b4-7',required_traits=set([]),resources={CUSTOM_PCI_1B36_000D=1},use_same_provider=True), RequestGroup(aggregates=[],forbidden_aggregates=set([]),forbidden_traits=set([]),in_tree=None,provider_uuids=[],requester_id=None,required_traits=set([]),resources={DISK_GB=1,MEMORY_MB=256,VCPU=2},use_same_provider=False) Got 504:
The gateway did not receive a timely response from the upstream server or application.
.^[[00m 2025-10-27 15:01:54.884 1 INFO nova.scheduler.manager [None req-4dd3ead2-fc79-44ef-9b2a-4900e6f07180 86abb11a873243f89bc996748fddaebf 7a56f755038f4ac6a7452b9b983ffda3 - - default default] Got no allocation candidates from the Placement API. This could be due to insufficient resources or a temporary occurrence as compute nodes start up.^[[00m ####### With Fix # Confirm commit is present [zuul@controller-0 ~]$ oc rsh placement-56f44bf97d-42hdl sh-5.1$ grep -B3 'self.resource_provider.uuid, self.resource_class, self.amount' /usr/lib/python3.9/site-packages/placement/objects/allocation_candidate.py def __repr__(self): return str( (self.resource_provider.uuid, self.resource_class, self.amount)) [zuul@titan96 ~]$ cat add_usb_controller.sh #!/usr/bin/env bash # Attach N USB qcow2 disks to a libvirt/KVM guest. # Usage: ./attach-usb-disks.shThe gateway did not receive a timely response from the upstream server or application.
.^[[00m 2025-11-20 16:18:41.817 1 INFO nova.scheduler.manager [None req-ce18ce82-16e5-42e4-ace8-115bef61fa7e 751315ae22f142d6aa309ff53f2ec735 96aee25d1b564f44b182ec74a5ceea49 - - default default] Got no allocation candidates from the Placement API. This could be due to insufficient resources or a temporary occurrence as compute nodes start up.^[[00m # Enable workaround and try again [zuul@controller-0 ~]$ oc get osctlplane controlplane -o json | jq -r .spec.placement.template.customServiceConfig [workarounds] optimize_for_wide_provider_trees = True [placement] max_allocation_candidates = 1000 allocation_candidates_generation_strategy = breadth-first sh-5.1$ openstack server create --image 08efdb67-f7a7-41e3-8301-b54ccfcfca39 --flavor eight_pci --network public guest-osprh-20355 --wait +-------------------------------------+--------------------------------------------------------------------------------------------------+ | Field | Value | +-------------------------------------+--------------------------------------------------------------------------------------------------+ | OS-DCF:diskConfig | MANUAL | | OS-EXT-AZ:availability_zone | nova | | OS-EXT-SRV-ATTR:host | compute-t3ljdzd4-1.ctlplane.example.com | | OS-EXT-SRV-ATTR:hypervisor_hostname | compute-t3ljdzd4-1.ctlplane.example.com | | OS-EXT-SRV-ATTR:instance_name | instance-0000032c | | OS-EXT-STS:power_state | Running | | OS-EXT-STS:task_state | None | | OS-EXT-STS:vm_state | active | | OS-SRV-USG:launched_at | 2025-11-20T21:07:19.000000 | | OS-SRV-USG:terminated_at | None | | accessIPv4 | | | accessIPv6 | | | addresses | public=192.168.122.202 | | adminPass | vN5a9oMGkzzm | | config_drive | True | | created | 2025-11-20T21:06:59Z | | flavor | eight_pci (ed39c9cc-e5dd-4410-b5fb-b18ee9187443) | | hostId | 425b523b7ae22d801110a36b88baa5b6da9ddacb66066a11e945a280 | | id | efdec7bb-303e-4b3f-9e11-08de25995238 | | image | tobiko.openstack.stacks._advanced_vm.AdvancedImageFixture (08efdb67-f7a7-41e3-8301-b54ccfcfca39) | | key_name | None | | name | guest-osprh-20355 | | progress | 0 | | project_id | 96aee25d1b564f44b182ec74a5ceea49 | | properties | | | security_groups | name='default' | | status | ACTIVE | | updated | 2025-11-20T21:07:19Z | | user_id | 751315ae22f142d6aa309ff53f2ec735 | | volumes_attached | | +-------------------------------------+--------------------------------------------------------------------------------------------------+