-
Bug
-
Resolution: Unresolved
-
Normal
-
rhos-18.0.z
-
False
-
-
False
-
?
-
rhos-storage-manila
-
None
-
-
-
Low
I have created a 10GB NFS share with manila and mounted it to a system. Unfortunately mount on the system shows the size that largely exceed available space. 10GB vs 1.3TB
It seems that the quota is still in place and I cannot write to the nfs share more then 10GB. Run log:
cjanisze@fedora-vm ~/D/p/o/manila-share> openstack share list --insecure --project chrisj +--------------------------------------+-------+------+-------------+-----------+-----------+-----------------+----------------------------+-------------------+ | ID | Name | Size | Share Proto | Status | Is Public | Share Type Name | Host | Availability Zone | +--------------------------------------+-------+------+-------------+-----------+-----------+-----------------+----------------------------+-------------------+ | 99b2f52b-3454-4966-8164-9cc8f34c547f | test1 | 10 | NFS | available | False | nfs | hostgroup@cephfsnfs#cephfs | nova | +--------------------------------------+-------+------+-------------+-----------+-----------+-----------------+----------------------------+-------------------+ cjanisze@fedora-vm ~/D/p/o/manila-share [2]> openstack share show 99b2f52b-3454-4966-8164-9cc8f34c547f --insecure +---------------------------------------+-------------------------------------------------------------------------------------------------------------------+ | Field | Value | +---------------------------------------+-------------------------------------------------------------------------------------------------------------------+ | access_rules_status | active | | availability_zone | nova | | create_share_from_snapshot_support | False | | created_at | 2025-01-23T15:05:37.658014 | | description | None | | export_locations | | | | id = e2c3278c-8aee-4d47-8568-1951da2f0447 | | | path = 172.20.138.250:/volumes/_nogroup/d78c21fa-dba3-4c08-9f3b-505eeebf84ac/73ad23f3-300a-426e-b7cf-6ffa53010cff | | | preferred = True | | | share_instance_id = d78c21fa-dba3-4c08-9f3b-505eeebf84ac | | | is_admin_only = False | | has_replicas | False | | host | hostgroup@cephfsnfs#cephfs | | id | 99b2f52b-3454-4966-8164-9cc8f34c547f | | is_public | False | | is_soft_deleted | False | | mount_snapshot_support | False | | name | test1 | | progress | 100% | | project_id | 1e7b48f0eadd4dcd951e87bb961e15e8 | | properties | | | replication_type | None | | revert_to_snapshot_support | False | | scheduled_to_be_deleted_at | None | | share_group_id | None | | share_network_id | None | | share_proto | NFS | | share_server_id | None | | share_type | a8e2fb0e-d59e-4b2d-8c7f-fa79e1618c38 | | share_type_name | nfs | | size | 10 | | snapshot_id | None | | snapshot_support | False | | source_share_group_snapshot_member_id | None | | status | available | | task_state | None | | user_id | b41657d7d4134f8c907d1f6bda32e934 | | volume_type | nfs | +---------------------------------------+-------------------------------------------------------------------------------------------------------------------+ [root@rhel94-vm1 ~]# mount -t nfs 172.20.138.250:/volumes/_nogroup/d78c21fa-dba3-4c08-9f3b-505eeebf84ac/73ad23f3-300a-426e-b7cf-6ffa53010cff /mnt [root@rhel94-vm1 ~]# df -h Filesystem Size Used Avail Use% Mounted on devtmpfs 4.0M 0 4.0M 0% /dev tmpfs 888M 0 888M 0% /dev/shm tmpfs 356M 9.7M 346M 3% /run /dev/vda4 8.8G 1.5G 7.3G 17% / /dev/vda3 960M 170M 791M 18% /boot /dev/vda2 200M 7.1M 193M 4% /boot/efi tmpfs 178M 0 178M 0% /run/user/0 tmpfs 178M 0 178M 0% /run/user/1000 172.20.138.250:/volumes/_nogroup/d78c21fa-dba3-4c08-9f3b-505eeebf84ac/73ad23f3-300a-426e-b7cf-6ffa53010cff 1.3T 0 1.3T 0% /mnt [root@rhel94-vm1 mnt]# for i in {1..11}; do dd if=/dev/zero of=/mnt/test$i.img bs=1G count=1 oflag=dsync; done 1+0 records in 1+0 records out 1073741824 bytes (1.1 GB, 1.0 GiB) copied, 2.75783 s, 389 MB/s 1+0 records in 1+0 records out 1073741824 bytes (1.1 GB, 1.0 GiB) copied, 3.64177 s, 295 MB/s 1+0 records in 1+0 records out 1073741824 bytes (1.1 GB, 1.0 GiB) copied, 2.87453 s, 374 MB/s 1+0 records in 1+0 records out 1073741824 bytes (1.1 GB, 1.0 GiB) copied, 3.04324 s, 353 MB/s 1+0 records in 1+0 records out 1073741824 bytes (1.1 GB, 1.0 GiB) copied, 2.58057 s, 416 MB/s 1+0 records in 1+0 records out 1073741824 bytes (1.1 GB, 1.0 GiB) copied, 2.83895 s, 378 MB/s 1+0 records in 1+0 records out 1073741824 bytes (1.1 GB, 1.0 GiB) copied, 2.57254 s, 417 MB/s 1+0 records in 1+0 records out 1073741824 bytes (1.1 GB, 1.0 GiB) copied, 2.63475 s, 408 MB/s 1+0 records in 1+0 records out 1073741824 bytes (1.1 GB, 1.0 GiB) copied, 2.66892 s, 402 MB/s 1+0 records in 1+0 records out 1073741824 bytes (1.1 GB, 1.0 GiB) copied, 2.64982 s, 405 MB/s dd: error writing '/mnt/test11.img': Disk quota exceeded 1+0 records in 0+0 records out 0 bytes copied, 1.57725 s, 0.0 kB/s [root@rhel94-vm1 mnt]# df -h Filesystem Size Used Avail Use% Mounted on devtmpfs 4.0M 0 4.0M 0% /dev tmpfs 888M 0 888M 0% /dev/shm tmpfs 356M 9.7M 346M 3% /run /dev/vda4 8.8G 1.5G 7.3G 17% / /dev/vda3 960M 170M 791M 18% /boot /dev/vda2 200M 7.1M 193M 4% /boot/efi tmpfs 178M 0 178M 0% /run/user/0 tmpfs 178M 0 178M 0% /run/user/1000 172.20.138.250:/volumes/_nogroup/d78c21fa-dba3-4c08-9f3b-505eeebf84ac/73ad23f3-300a-426e-b7cf-6ffa53010cff 1.3T 10G 1.3T 1% /mnt
[root@edpm-compute-0 ceph]# ceph fs subvolume info cephfs d78c21fa-dba3-4c08-9f3b-505eeebf84ac
{
"atime": "2025-01-23 15:05:38",
"bytes_pcent": "0.00",
"bytes_quota": 10737418240,
"bytes_used": 15,
"created_at": "2025-01-23 15:05:38",
"ctime": "2025-01-23 15:09:45",
"data_pool": "cephfs.cephfs.data",
"features": [
"snapshot-clone",
"snapshot-autoprotect",
"snapshot-retention"
],
"flavor": 2,
"gid": 0,
"mode": 16895,
"mon_addrs": [
"172.20.142.100:6789",
"172.20.142.102:6789",
"172.20.142.101:6789"
],
"mtime": "2025-01-23 15:09:45",
"path": "/volumes/_nogroup/d78c21fa-dba3-4c08-9f3b-505eeebf84ac/73ad23f3-300a-426e-b7cf-6ffa53010cff",
"pool_namespace": "fsvolumens_d78c21fa-dba3-4c08-9f3b-505eeebf84ac",
"state": "complete",
"type": "subvolume",
"uid": 0
}
- links to