Debian 11 Bullseye
Sponsored Link

Ceph Nautilus : Use Block Device2021/08/26

 
Configure a Client Host [dlp] to use Ceph Storage like follows.
                                         |
        +--------------------+           |
        |   [dlp.srv.world]  |10.0.0.30  |
        |     Ceph Client    +-----------+
        |                    |           |
        +--------------------+           |
            +----------------------------+----------------------------+
            |                            |                            |
            |10.0.0.51                   |10.0.0.52                   |10.0.0.53 
+-----------+-----------+    +-----------+-----------+    +-----------+-----------+
|   [node01.srv.world]  |    |   [node02.srv.world]  |    |   [node03.srv.world]  |
|     Object Storage    +----+     Object Storage    +----+     Object Storage    |
|     Monitor Daemon    |    |                       |    |                       |
|     Manager Daemon    |    |                       |    |                       |
+-----------------------+    +-----------------------+    +-----------------------+

 
For exmaple, Create a block device and mount it on a Client Host.
[1] Transfer SSH public key to Client Host and Configure it from Admin Node.
# transfer public key

root@node01:~#
ssh-copy-id dlp

# install required packages

root@node01:~#
ssh dlp "apt -y install ceph-common"
# transfer required files to Client Host

root@node01:~#
scp /etc/ceph/ceph.conf dlp:/etc/ceph/

ceph.conf                                     100%  195    98.1KB/s   00:00
root@node01:~#
scp /etc/ceph/ceph.client.admin.keyring dlp:/etc/ceph/

ceph.client.admin.keyring                     100%  151    71.5KB/s   00:00
root@node01:~#
ssh dlp "chown ceph. /etc/ceph/ceph.*"

[2] Create a Block device and mount it on a Client Host.
# create default RBD pool [rbd]

root@dlp:~#
ceph osd pool create rbd 64

pool 'rbd' created
# enable Placement Groups auto scale mode

root@dlp:~#
ceph mgr module enable pg_autoscaler

root@dlp:~#
ceph osd pool set rbd pg_autoscale_mode on

set pool 2 pg_autoscale_mode to on
# initialize the pool

root@dlp:~#
rbd pool init rbd

root@dlp:~#
ceph osd pool autoscale-status

POOL   SIZE TARGET SIZE RATE RAW CAPACITY  RATIO TARGET RATIO EFFECTIVE RATIO BIAS PG_NUM NEW PG_NUM AUTOSCALE
rbd     19               3.0       239.9G 0.0000                               1.0     64            on

# create a block device with 10G

root@dlp:~#
rbd create --size 10G --pool rbd rbd01
# confirm

root@dlp:~#
rbd ls -l

NAME   SIZE    PARENT  FMT  PROT  LOCK
rbd01  10 GiB            2

# map the block device

root@dlp:~#
rbd map rbd01

/dev/rbd0
# confirm

root@dlp:~#
rbd showmapped

id pool namespace image snap device
0  rbd            rbd01 -    /dev/rbd0

# format with XFS

root@dlp:~#
mkfs.xfs /dev/rbd0

meta-data=/dev/rbd0              isize=512    agcount=16, agsize=163840 blks
         =                       sectsz=512   attr=2, projid32bit=1
         =                       crc=1        finobt=1, sparse=1, rmapbt=0
         =                       reflink=1    bigtime=0
data     =                       bsize=4096   blocks=2621440, imaxpct=25
         =                       sunit=16     swidth=16 blks
naming   =version 2              bsize=4096   ascii-ci=0, ftype=1
log      =internal log           bsize=4096   blocks=2560, version=2
         =                       sectsz=512   sunit=16 blks, lazy-count=1
realtime =none                   extsz=4096   blocks=0, rtextents=0

root@dlp:~#
mount /dev/rbd0 /mnt

root@dlp:~#
df -hT

Filesystem                  Type      Size  Used Avail Use% Mounted on
udev                        devtmpfs  2.0G     0  2.0G   0% /dev
tmpfs                       tmpfs     394M  516K  393M   1% /run
/dev/mapper/debian--vg-root ext4       28G  1.2G   26G   5% /
tmpfs                       tmpfs     2.0G     0  2.0G   0% /dev/shm
tmpfs                       tmpfs     5.0M     0  5.0M   0% /run/lock
/dev/vda1                   ext2      470M   48M  398M  11% /boot
tmpfs                       tmpfs     394M     0  394M   0% /run/user/0
/dev/rbd0                   xfs        10G  105M  9.9G   2% /mnt
[3] For delete Block devices or Pools you created, run commands like follows.
For deleting Pools, it needs to set [mon allow pool delete = true] on [Monitor Daemon] configuration.
# unmap

root@dlp:~#
rbd unmap /dev/rbd/rbd/rbd01
# delete a block device

root@dlp:~#
rbd rm rbd01 -p rbd

Removing image: 100% complete...done.
# delete a pool

# ceph osd pool delete [Pool Name] [Pool Name] ***

root@dlp:~#
ceph osd pool delete rbd rbd --yes-i-really-really-mean-it

pool 'rbd' removed
Matched Content