706 ceph pool create block-pool 64 64
707 ceph osd pool create block-pool 64 64
708 ceph osd pool application enable block-pool rbd
709 rbd create vdisk1 --size 4G --pool block-pool --image-format 2 --image-feature layering
710 rbd map block-pool/vdisk1
711 mkdir /mnt/vdisk1
712 mount /dev/rbd1 /mnt/vdisk1
713 mkfs.xfs /dev/rbd1
714 mount /dev/rbd1 /mnt/vdisk1
[root@ceph-client mnt]# rbd map block-pool/vdisk1
unable to get monitor info from DNS SRV with service name: ceph-mon
In some cases useful info is found in syslog - try "dmesg | tail".
rbd: map failed: 2023-11-14 15:51:13.809 ffff8d4e7010 -1 failed for service _ceph-mon._tcp
(2) No such file or directory
报错连接不上mon
[root@ceph-client mnt]# rbd map block-pool/vdisk1 -m 172.17.163.105,172.17.112.206
rbd: sysfs write failed
2023-11-14 15:55:34.873 ffff79be78c0 -1 monclient(hunting): handle_auth_bad_method server allowed_methods [2] but i only support [2,1]
rbd: couldn't connect to the cluster!
In some cases useful info is found in syslog - try "dmesg | tail".
rbd: map failed: (22) Invalid argument
[root@ceph-client mnt]# rbd map block-pool/vdisk1 -m 172.17.163.105
rbd: sysfs write failed
2023-11-14 15:58:45.753 ffffb09918c0 -1 monclient(hunting): handle_auth_bad_method server allowed_methods [2] but i only support [2,1]
rbd: couldn't connect to the cluster!
In some cases useful info is found in syslog - try "dmesg | tail".
rbd: map failed: (22) Invalid argumen
[91158.067305] libceph: no secret set (for auth_x protocol)
[91158.068206] libceph: error -22 on auth protocol 2 init
[root@ceph-0 ~]# ceph auth get-or-create client.blockuser mon 'allow r' osd 'allow * pool=block-pool'
[client.blockuser]
key = AQDNLFNlZXSwERAA9uYYz7UdIKmuO1bSiSmEVg==
[root@ceph-0 ~]# ceph auth get client.blockuser
exported keyring for client.blockuser
[client.blockuser]
key = AQDNLFNlZXSwERAA9uYYz7UdIKmuO1bSiSmEVg==
caps mon = "allow r"
caps osd = "allow * pool=block-pool"
[root@ceph-0 ~]# ceph auth get client.blockuser -o /etc/ceph/ceph.client.blockuser.keyring
exported keyring for client.blockuser
[root@ceph-0 ~]# ceph --user blockuser -s
cluster:
id: ff72b496-d036-4f1b-b2ad-55358f3c16cb
health: HEALTH_ERR
mon ceph-0 is very low on available space
services:
mon: 4 daemons, quorum ceph-3,ceph-1,ceph-0,ceph-2 (age 30h)
mgr: ceph-0(active, since 3d), standbys: ceph-1, ceph-3, ceph-2
mds: 4 up:standby
osd: 4 osds: 3 up (since 2d), 3 in (since 2d)
rgw: 4 daemons active (ceph-0, ceph-1, ceph-2, ceph-3)
task status:
data:
pools: 5 pools, 192 pgs
objects: 201 objects, 6.4 MiB
usage: 3.2 GiB used, 297 GiB / 300 GiB avail
pgs: 192 active+clean
[root@ceph-0 ~]# scp /etc/ceph/ceph.client.blockuser.keyring root@ceph-client:/etc/ceph/
[root@ceph-client ceph]# ceph --user blockuser -s -m ceph-0
cluster:
id: ff72b496-d036-4f1b-b2ad-55358f3c16cb
health: HEALTH_ERR
mon ceph-0 is very low on available space
services:
mon: 4 daemons, quorum ceph-3,ceph-1,ceph-0,ceph-2 (age 30h)
mgr: ceph-0(active, since 3d), standbys: ceph-1, ceph-3, ceph-2
mds: 4 up:standby
osd: 4 osds: 3 up (since 2d), 3 in (since 2d)
rgw: 4 daemons active (ceph-0, ceph-1, ceph-2, ceph-3)
task status:
data:
pools: 5 pools, 192 pgs
objects: 201 objects, 6.4 MiB
usage: 3.2 GiB used, 297 GiB / 300 GiB avail
pgs: 192 active+clean
[root@ceph-client ceph]# rbd map block-pool/vdisk1 --user blockuser -m ceph-0,ceph-1,ceph-2,ceph-3
/dev/rbd0
可以看到成功映射出/dev/rbd0 块设备
mkfs.xfs /dev/rbd0 -f
参考
更多【java-ceph 14.2.10 aarch64 非集群内 客户端 挂载块设备】相关视频教程:www.yxfzedu.com