CentOS Stream 9
Sponsored Link

Ceph Quincy : Use File System2022/06/14

 
Configure a Client Host [dlp] to use Ceph Storage like follows.
                                         |
        +--------------------+           |
        |   [dlp.srv.world]  |10.0.0.30  |
        |     Ceph Client    +-----------+
        |                    |           |
        +--------------------+           |
            +----------------------------+----------------------------+
            |                            |                            |
            |10.0.0.51                   |10.0.0.52                   |10.0.0.53 
+-----------+-----------+    +-----------+-----------+    +-----------+-----------+
|   [node01.srv.world]  |    |   [node02.srv.world]  |    |   [node03.srv.world]  |
|     Object Storage    +----+     Object Storage    +----+     Object Storage    |
|     Monitor Daemon    |    |                       |    |                       |
|     Manager Daemon    |    |                       |    |                       |
+-----------------------+    +-----------------------+    +-----------------------+

 
For example, mount as Filesystem on a Client Host.
[1] Transfer SSH public key to Client Host and Configure it from Admin Node.
# transfer public key

[root@node01 ~]#
ssh-copy-id dlp

# install required packages

[root@node01 ~]#
ssh dlp "dnf -y install centos-release-ceph-quincy; dnf -y install ceph-fuse"
# transfer required files to Client Host

[root@node01 ~]#
scp /etc/ceph/ceph.conf dlp:/etc/ceph/

ceph.conf                                     100%  195    98.1KB/s   00:00
[root@node01 ~]#
scp /etc/ceph/ceph.client.admin.keyring dlp:/etc/ceph/

ceph.client.admin.keyring                     100%  151    71.5KB/s   00:00
[root@node01 ~]#
ssh dlp "chown ceph. /etc/ceph/ceph.*"

[2] Configure MDS (MetaData Server) on a Node.
Configure it on [node01] Node on this example.
# create directory # directory name ⇒ (Cluster Name)-(Node Name)

[root@node01 ~]#
mkdir -p /var/lib/ceph/mds/ceph-node01

[root@node01 ~]#
ceph-authtool --create-keyring /var/lib/ceph/mds/ceph-node01/keyring --gen-key -n mds.node01

creating /var/lib/ceph/mds/ceph-node01/keyring
[root@node01 ~]#
chown -R ceph. /var/lib/ceph/mds/ceph-node01

[root@node01 ~]#
ceph auth add mds.node01 osd "allow rwx" mds "allow" mon "allow profile mds" -i /var/lib/ceph/mds/ceph-node01/keyring

added key for mds.node01
[root@node01 ~]#
systemctl enable --now ceph-mds@node01

[3] Create 2 RADOS pools for Data and MeataData on MDS Node.
Refer to the official documents to specify the end number (64 on the example below)
⇒ http://docs.ceph.com/docs/master/rados/operations/placement-groups/
[root@node01 ~]#
ceph osd pool create cephfs_data 32

pool 'cephfs_data' created
[root@node01 ~]#
ceph osd pool create cephfs_metadata 32

pool 'cephfs_metadata' created
[root@node01 ~]#
ceph fs new cephfs cephfs_metadata cephfs_data

new fs with metadata pool 4 and data pool 3
[root@node01 ~]#
ceph fs ls

name: cephfs, metadata pool: cephfs_metadata, data pools: [cephfs_data ]
[root@node01 ~]#
ceph mds stat

cephfs:1 {0=node01=up:active}
[root@node01 ~]#
ceph fs status cephfs

cephfs - 0 clients
======
RANK  STATE    MDS       ACTIVITY     DNS    INOS   DIRS   CAPS
 0    active  node01  Reqs:    0 /s    10     13     12      0
      POOL         TYPE     USED  AVAIL
cephfs_metadata  metadata  96.0k   151G
  cephfs_data      data       0    151G
MDS version: ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)
[4] Mount CephFS on a Client Host.
# Base64 encode client key

[root@dlp ~]#
ceph-authtool -p /etc/ceph/ceph.client.admin.keyring > admin.key

[root@dlp ~]#
chmod 600 admin.key
[root@dlp ~]#
mount -t ceph node01.srv.world:6789:/ /mnt -o name=admin,secretfile=admin.key

[root@dlp ~]#
df -hT

Filesystem          Type      Size  Used Avail Use% Mounted on
devtmpfs            devtmpfs  1.8G     0  1.8G   0% /dev
tmpfs               tmpfs     1.9G     0  1.9G   0% /dev/shm
tmpfs               tmpfs     744M  8.6M  736M   2% /run
/dev/mapper/cs-root xfs        26G  2.1G   24G   8% /
/dev/vda1           xfs      1014M  370M  645M  37% /boot
tmpfs               tmpfs     372M     0  372M   0% /run/user/0
10.0.0.51:6789:/    ceph      152G     0  152G   0% /mnt
[5] For delete CephFS or Pools you created, run commands like follows.
For deleting Pools, it needs to set [mon allow pool delete = true] on [Monitor Daemon] configuration.
# stop MDS

[root@node01 ~]#
systemctl stop ceph-mds@node01
# delete CephFS

[root@node01 ~]#
ceph fs rm cephfs --yes-i-really-mean-it
# delete pools
# ceph osd pool delete [Pool Name] [Pool Name] ***

[root@node01 ~]#
ceph osd pool delete cephfs_data cephfs_data --yes-i-really-really-mean-it

[root@node01 ~]#
ceph osd pool delete cephfs_metadata cephfs_metadata --yes-i-really-really-mean-it

Matched Content