[root@m4221001 ~]# oc -n openshift-storage logs rook-ceph-osd-prepare-1171e64c16853699e649f82fac8e13bd-v5wz9 Defaulted container "provision" out of: provision, copy-bins (init), blkdevmapper (init) 2025/07/03 12:09:21 maxprocs: Leaving GOMAXPROCS=16: CPU quota undefined 2025-07-03 12:09:21.121225 I | cephcmd: desired devices to configure osds: [{Name:/mnt/ocs-deviceset-localblock-0-data-1xgd2w OSDsPerDevice:1 MetadataDevice: DatabaseSizeMB:0 DeviceClass: InitialWeight: IsFilter:false IsDevicePathFilter:false}] 2025-07-03 12:09:21.122382 I | rookcmd: starting Rook v4.18.7-0.73c94c18237524a3fe266314a9e779ff5c972194 with arguments '/rook/rook ceph osd provision' 2025-07-03 12:09:21.122391 I | rookcmd: flag values: --cluster-id=8afc80c8-9e41-4ebe-b4c8-8221bcca6b96, --cluster-name=ocs-storagecluster-cephcluster, --data-device-filter=, --data-device-path-filter=, --data-devices=[{"id":"/mnt/ocs-deviceset-localblock-0-data-1xgd2w","storeConfig":{"osdsPerDevice":1}}], --encrypted-device=false, --force-format=false, --help=false, --location=, --log-level=DEBUG, --metadata-device=, --node-name=ocs-deviceset-localblock-0-data-1xgd2w, --osd-crush-device-class=ssd, --osd-crush-initial-weight=, --osd-database-size=0, --osd-store-type=bluestore, --osd-wal-size=576, --osds-per-device=1, --pvc-backed-osd=true, --replace-osd=0 2025-07-03 12:09:21.122404 I | ceph-spec: parsing mon endpoints: c=172.30.254.248:3300,a=172.30.45.112:3300,b=172.30.176.224:3300 2025-07-03 12:09:21.144050 I | op-osd: CRUSH location=root=default host=worker-1-odf-ci-2-test-ocs 2025-07-03 12:09:21.144082 I | cephcmd: crush location of osd: root=default host=worker-1-odf-ci-2-test-ocs 2025-07-03 12:09:21.147768 I | cephclient: writing config file /var/lib/rook/openshift-storage/openshift-storage.config 2025-07-03 12:09:21.148062 I | cephclient: generated admin config in /var/lib/rook/openshift-storage 2025-07-03 12:09:21.148249 D | cephclient: config file @ /etc/ceph/ceph.conf: [global] fsid = bc41b8f3-a8df-4b00-92fa-fe9663c60aea mon initial members = b c a mon host = [v2:172.30.176.224:3300],[v2:172.30.254.248:3300],[v2:172.30.45.112:3300] bdev_flock_retry = 20 mon_osd_full_ratio = .85 mon_osd_backfillfull_ratio = .8 mon_osd_nearfull_ratio = .75 mon_max_pg_per_osd = 600 mon_pg_warn_max_object_skew = 0 mon_data_avail_warn = 15 mon_warn_on_pool_no_redundancy = false bluestore_prefer_deferred_size_hdd = 0 bluestore_slow_ops_warn_lifetime = 0 [osd] osd_memory_target_cgroup_limit_ratio = 0.8 [client.rbd-mirror.a] debug_ms = 1 debug_rbd = 15 debug_rbd_mirror = 30 log_file = /var/log/ceph/\$cluster-\$name.log [client.rbd-mirror-peer] debug_ms = 1 debug_rbd = 15 debug_rbd_mirror = 30 log_file = /var/log/ceph/\$cluster-\$name.log [client.admin] keyring = /var/lib/rook/openshift-storage/client.admin.keyring 2025-07-03 12:09:21.148255 I | cephcmd: destroying osd.0 and cleaning its backing device 2025-07-03 12:09:21.148543 D | exec: Running command: stdbuf -oL ceph-volume --log-path /tmp/ceph-log lvm list --format json 2025-07-03 12:09:21.617845 D | cephosd: {} 2025-07-03 12:09:21.617910 I | cephosd: 0 ceph-volume lvm osd devices configured on this node 2025-07-03 12:09:21.617960 D | exec: Running command: stdbuf -oL ceph-volume --log-path /tmp/ceph-log raw list --format json 2025-07-03 12:09:31.380293 D | cephosd: { "c338bb76-35f1-44e7-bd0e-cc1adf98d2bc": { "ceph_fsid": "bc41b8f3-a8df-4b00-92fa-fe9663c60aea", "device": "/dev/sdd", "osd_id": 0, "osd_uuid": "c338bb76-35f1-44e7-bd0e-cc1adf98d2bc", "type": "bluestore-rdr" } } 2025-07-03 12:09:31.380466 I | cephosd: 1 ceph-volume raw osd devices configured on this node 2025-07-03 12:09:31.380473 I | cephosd: destroying osd.0 2025-07-03 12:09:31.380499 D | exec: Running command: ceph osd destroy osd.0 --yes-i-really-mean-it --connect-timeout=15 --cluster=openshift-storage --conf=/var/lib/rook/openshift-storage/openshift-storage.config --name=client.admin --keyring=/var/lib/rook/openshift-storage/client.admin.keyring --format json 2025-07-03 12:09:31.903165 I | cephosd: successfully destroyed osd.0 2025-07-03 12:09:31.903231 D | exec: Running command: lsblk /mnt/ocs-deviceset-localblock-0-data-1xgd2w --bytes --nodeps --pairs --paths --output SIZE,ROTA,RO,TYPE,PKNAME,NAME,KNAME,MOUNTPOINT,FSTYPE 2025-07-03 12:09:31.906077 D | sys: lsblk output: "SIZE=\"536870912000\" ROTA=\"1\" RO=\"0\" TYPE=\"mpath\" PKNAME=\"\" NAME=\"/dev/mapper/mpathb\" KNAME=\"/dev/dm-3\" MOUNTPOINT=\"\" FSTYPE=\"ceph_bluestore\"" 2025-07-03 12:09:31.906102 I | cephosd: zap OSD.0 path "/dev/mapper/mpathb" 2025-07-03 12:09:31.906108 D | exec: Running command: stdbuf -oL ceph-volume lvm zap /dev/mapper/mpathb --destroy 2025-07-03 12:09:33.536157 I | cephosd: --> Zapping: /dev/mapper/mpathb --> Removing all BlueStore signature on /dev/mapper/mpathb if any... Running command: /usr/bin/ceph-bluestore-tool zap-device --dev /dev/mapper/mpathb --yes-i-really-really-mean-it Running command: /usr/bin/dd if=/dev/zero of=/dev/mapper/mpathb bs=1M count=10 conv=fsync stderr: 10+0 records in 10+0 records out stderr: 10485760 bytes (10 MB, 10 MiB) copied, 0.021504 s, 488 MB/s --> Zapping successful for: 2025-07-03 12:09:33.536205 I | cephosd: successfully zapped osd.0 path "/dev/mapper/mpathb" 2025-07-03 12:09:33.536221 D | exec: Running command: dmsetup version 2025-07-03 12:09:33.538509 I | cephosd: Library version: 1.02.202-RHEL9 (2024-11-04) Driver version: 4.48.0 2025-07-03 12:09:33.562257 I | cephosd: discovering hardware 2025-07-03 12:09:33.562346 D | exec: Running command: lsblk /mnt/ocs-deviceset-localblock-0-data-1xgd2w --bytes --nodeps --pairs --paths --output SIZE,ROTA,RO,TYPE,PKNAME,NAME,KNAME,MOUNTPOINT,FSTYPE 2025-07-03 12:09:33.565772 D | sys: lsblk output: "SIZE=\"536870912000\" ROTA=\"1\" RO=\"0\" TYPE=\"mpath\" PKNAME=\"\" NAME=\"/dev/mapper/mpathb\" KNAME=\"/dev/dm-3\" MOUNTPOINT=\"\" FSTYPE=\"\"" 2025-07-03 12:09:33.565809 D | exec: Running command: udevadm info --query=property /dev/dm-3 2025-07-03 12:09:33.574920 D | sys: udevadm info output: "DEVPATH=/devices/virtual/block/dm-3\nDEVNAME=/dev/dm-3\nDEVTYPE=disk\nDISKSEQ=8\nMAJOR=253\nMINOR=3\nSUBSYSTEM=block\nUSEC_INITIALIZED=14782517\nDM_UDEV_DISABLE_LIBRARY_FALLBACK_FLAG=1\nDM_UDEV_PRIMARY_SOURCE_FLAG=1\nDM_UDEV_RULES_VSN=2\nDM_NAME=mpathb\nDM_UUID=mpath-360050763808104bc2800000000000075\nDM_SUSPENDED=0\nMPATH_DEVICE_READY=1\nMPATH_SBIN_PATH=/sbin\nDM_TYPE=scsi\nDM_WWN=0x60050763808104bc2800000000000075\nDM_SERIAL=360050763808104bc2800000000000075\nNVME_HOST_IFACE=none\nSYSTEMD_READY=1\nDEVLINKS=/dev/disk/by-id/dm-name-mpathb /dev/disk/by-id/wwn-0x60050763808104bc2800000000000075 /dev/disk/by-id/scsi-360050763808104bc2800000000000075 /dev/disk/by-id/dm-uuid-mpath-360050763808104bc2800000000000075 /dev/mapper/mpathb\nTAGS=:systemd:\nCURRENT_TAGS=:systemd:" 2025-07-03 12:09:33.574951 I | cephosd: creating and starting the osds 2025-07-03 12:09:33.574996 D | cephosd: desiredDevices are [{Name:/mnt/ocs-deviceset-localblock-0-data-1xgd2w OSDsPerDevice:1 MetadataDevice: DatabaseSizeMB:0 DeviceClass: InitialWeight: IsFilter:false IsDevicePathFilter:false}] 2025-07-03 12:09:33.574998 D | cephosd: context.Devices are: 2025-07-03 12:09:33.575024 D | cephosd: &{Name:/mnt/ocs-deviceset-localblock-0-data-1xgd2w Parent: HasChildren:false DevLinks:/dev/disk/by-id/dm-name-mpathb /dev/disk/by-id/wwn-0x60050763808104bc2800000000000075 /dev/disk/by-id/scsi-360050763808104bc2800000000000075 /dev/disk/by-id/dm-uuid-mpath-360050763808104bc2800000000000075 /dev/mapper/mpathb Size:536870912000 UUID: Serial: Type:data Rotational:true Readonly:false Partitions:[] Filesystem: Mountpoint: Vendor: Model: WWN: WWNVendorExtension: Empty:false CephVolumeData: RealPath:/dev/mapper/mpathb KernelName:dm-3 Encrypted:false} 2025-07-03 12:09:33.575029 I | cephosd: old lsblk can't detect bluestore signature, so try to detect here 2025-07-03 12:09:33.575069 D | exec: Running command: cryptsetup luksDump /mnt/ocs-deviceset-localblock-0-data-1xgd2w 2025-07-03 12:09:33.584252 E | cephosd: failed to determine if the encrypted block "/mnt/ocs-deviceset-localblock-0-data-1xgd2w" is from our cluster. failed to dump LUKS header for disk "/mnt/ocs-deviceset-localblock-0-data-1xgd2w". Device /mnt/ocs-deviceset-localblock-0-data-1xgd2w is not a valid LUKS device.: exit status 1 2025-07-03 12:09:33.584291 D | exec: Running command: stdbuf -oL ceph-volume --log-path /tmp/ceph-log raw list /mnt/ocs-deviceset-localblock-0-data-1xgd2w --format json 2025-07-03 12:09:33.919373 D | cephosd: {} 2025-07-03 12:09:33.919434 I | cephosd: 0 ceph-volume raw osd devices configured on this node 2025-07-03 12:09:33.919443 I | cephosd: device "/mnt/ocs-deviceset-localblock-0-data-1xgd2w" is available. 2025-07-03 12:09:33.919449 I | cephosd: "/mnt/ocs-deviceset-localblock-0-data-1xgd2w" found in the desired devices 2025-07-03 12:09:33.919461 I | cephosd: device "/mnt/ocs-deviceset-localblock-0-data-1xgd2w" is selected by the device filter/name "/mnt/ocs-deviceset-localblock-0-data-1xgd2w" 2025-07-03 12:09:33.937036 I | cephosd: configuring osd devices: {"Entries":{"data":{"Data":-1,"Metadata":null,"Config":{"Name":"/mnt/ocs-deviceset-localblock-0-data-1xgd2w","OSDsPerDevice":1,"MetadataDevice":"","DatabaseSizeMB":0,"DeviceClass":"ssd","InitialWeight":"","IsFilter":false,"IsDevicePathFilter":false},"PersistentDevicePaths":["/dev/disk/by-id/dm-name-mpathb","/dev/disk/by-id/wwn-0x60050763808104bc2800000000000075","/dev/disk/by-id/scsi-360050763808104bc2800000000000075","/dev/disk/by-id/dm-uuid-mpath-360050763808104bc2800000000000075","/dev/mapper/mpathb"],"DeviceInfo":{"name":"/mnt/ocs-deviceset-localblock-0-data-1xgd2w","parent":"","hasChildren":false,"devLinks":"/dev/disk/by-id/dm-name-mpathb /dev/disk/by-id/wwn-0x60050763808104bc2800000000000075 /dev/disk/by-id/scsi-360050763808104bc2800000000000075 /dev/disk/by-id/dm-uuid-mpath-360050763808104bc2800000000000075 /dev/mapper/mpathb","size":536870912000,"uuid":"","serial":"","type":"data","rotational":true,"readOnly":false,"Partitions":null,"filesystem":"","mountpoint":"","vendor":"","model":"","wwn":"","wwnVendorExtension":"","empty":false,"real-path":"/dev/mapper/mpathb","kernel-name":"dm-3"},"RestoreOSD":false}}} 2025-07-03 12:09:33.937175 I | cephclient: getting or creating ceph auth key "client.bootstrap-osd" 2025-07-03 12:09:33.937195 D | exec: Running command: ceph auth get-or-create-key client.bootstrap-osd mon allow profile bootstrap-osd --connect-timeout=15 --cluster=openshift-storage --conf=/var/lib/rook/openshift-storage/openshift-storage.config --name=client.admin --keyring=/var/lib/rook/openshift-storage/client.admin.keyring --format json 2025-07-03 12:09:34.437389 D | exec: Running command: lsblk /mnt/ocs-deviceset-localblock-0-data-1xgd2w --bytes --nodeps --pairs --paths --output SIZE,ROTA,RO,TYPE,PKNAME,NAME,KNAME,MOUNTPOINT,FSTYPE 2025-07-03 12:09:34.444828 D | sys: lsblk output: "SIZE=\"536870912000\" ROTA=\"1\" RO=\"0\" TYPE=\"mpath\" PKNAME=\"\" NAME=\"/dev/mapper/mpathb\" KNAME=\"/dev/dm-3\" MOUNTPOINT=\"\" FSTYPE=\"\"" 2025-07-03 12:09:34.444873 I | cephosd: configuring new device "/mnt/ocs-deviceset-localblock-0-data-1xgd2w" 2025-07-03 12:09:34.444887 D | exec: Running command: stdbuf -oL ceph-volume --log-path /var/log/ceph/ocs-deviceset-localblock-0-data-1xgd2w raw prepare --bluestore --data /mnt/ocs-deviceset-localblock-0-data-1xgd2w --osd-id 0 --crush-device-class ssd 2025-07-03 12:09:42.625802 I | cephosd: stderr: Unknown device "/mnt/ocs-deviceset-localblock-0-data-1xgd2w": No such device Running command: /usr/bin/ceph-authtool --gen-print-key Running command: /usr/bin/ceph-authtool --gen-print-key Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring osd tree -f json Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 185a5813-7647-4f8c-a62a-1ff2ce12be29 0 Running command: /usr/bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-0 Running command: /usr/bin/chown -R ceph:ceph /mnt/ocs-deviceset-localblock-0-data-1xgd2w Running command: /usr/bin/ln -s /mnt/ocs-deviceset-localblock-0-data-1xgd2w /var/lib/ceph/osd/ceph-0/block Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-0/activate.monmap stderr: got monmap epoch 4 --> Creating keyring file for osd.0 Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0/keyring Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0/ Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 0 --monmap /var/lib/ceph/osd/ceph-0/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-0/ --osd-uuid 185a5813-7647-4f8c-a62a-1ff2ce12be29 --setuser ceph --setgroup ceph stderr: 2025-07-03T12:09:37.616+0000 3ffaade5900 -1 bluestore(/var/lib/ceph/osd/ceph-0//block) No valid bdev label found stderr: 2025-07-03T12:09:37.996+0000 3ffaade5900 -1 bluestore(/var/lib/ceph/osd/ceph-0/) _read_fsid unparsable uuid --> ceph-volume raw dmcrypt prepare successful for: /mnt/ocs-deviceset-localblock-0-data-1xgd2w 2025-07-03 12:09:42.625903 D | exec: Running command: stdbuf -oL ceph-volume --log-path /tmp/ceph-log lvm list /mnt/ocs-deviceset-localblock-0-data-1xgd2w --format json 2025-07-03 12:09:43.717866 D | cephosd: {} 2025-07-03 12:09:43.717927 I | cephosd: 0 ceph-volume lvm osd devices configured on this node 2025-07-03 12:09:43.717942 D | exec: Running command: cryptsetup luksDump /mnt/ocs-deviceset-localblock-0-data-1xgd2w 2025-07-03 12:09:43.737594 E | cephosd: failed to determine if the encrypted block "/mnt/ocs-deviceset-localblock-0-data-1xgd2w" is from our cluster. failed to dump LUKS header for disk "/mnt/ocs-deviceset-localblock-0-data-1xgd2w". Device /mnt/ocs-deviceset-localblock-0-data-1xgd2w is not a valid LUKS device.: exit status 1 2025-07-03 12:09:43.737669 D | exec: Running command: stdbuf -oL ceph-volume --log-path /tmp/ceph-log raw list /mnt/ocs-deviceset-localblock-0-data-1xgd2w --format json 2025-07-03 12:09:44.401423 D | cephosd: { "185a5813-7647-4f8c-a62a-1ff2ce12be29": { "ceph_fsid": "bc41b8f3-a8df-4b00-92fa-fe9663c60aea", "device": "/mnt/ocs-deviceset-localblock-0-data-1xgd2w", "osd_id": 0, "osd_uuid": "185a5813-7647-4f8c-a62a-1ff2ce12be29", "type": "bluestore" } } 2025-07-03 12:09:44.401604 D | exec: Running command: lsblk /mnt/ocs-deviceset-localblock-0-data-1xgd2w --bytes --nodeps --pairs --paths --output SIZE,ROTA,RO,TYPE,PKNAME,NAME,KNAME,MOUNTPOINT,FSTYPE 2025-07-03 12:09:44.414660 D | sys: lsblk output: "SIZE=\"536870912000\" ROTA=\"1\" RO=\"0\" TYPE=\"mpath\" PKNAME=\"\" NAME=\"/dev/mapper/mpathb\" KNAME=\"/dev/dm-3\" MOUNTPOINT=\"\" FSTYPE=\"\"" 2025-07-03 12:09:44.414716 I | cephosd: setting device class "hdd" for device "/mnt/ocs-deviceset-localblock-0-data-1xgd2w" 2025-07-03 12:09:44.414972 I | cephosd: 1 ceph-volume raw osd devices configured on this node 2025-07-03 12:09:44.415551 I | cephosd: devices = [{ID:0 Cluster:ceph UUID:185a5813-7647-4f8c-a62a-1ff2ce12be29 DevicePartUUID: DeviceClass:hdd BlockPath:/mnt/ocs-deviceset-localblock-0-data-1xgd2w MetadataPath: WalPath: SkipLVRelease:true Location:root=default host=worker-1-odf-ci-2-test-ocs LVBackedPV:false CVMode:raw Store:bluestore TopologyAffinity: Encrypted:false ExportService:false NodeName: PVCName:}] [root@m4221001 ~]#