CentOS Stream 8
Sponsored Link

Ceph Quincy : Configure Ceph Cluster #12022/06/14

Install Distributed File System Ceph to Configure Storage Cluster.
For example on here, Configure Ceph Cluster with 3 Nodes like follows.
Furthermore, each Storage Node has a free block device to use on Ceph Nodes.
(use [/dev/sdb] on this example)
            |                            |                            |
            |                   |                   | 
+-----------+-----------+    +-----------+-----------+    +-----------+-----------+
|   [node01.srv.world]  |    |   [node02.srv.world]  |    |   [node03.srv.world]  |
|     Object Storage    +----+     Object Storage    +----+     Object Storage    |
|     Monitor Daemon    |    |                       |    |                       |
|     Manager Daemon    |    |                       |    |                       |
+-----------------------+    +-----------------------+    +-----------------------+

[1] Generate SSH key-pair on [Monitor Daemon] Node (call it Admin Node on here) and set it to each Node.
Configure key-pair with no-passphrase as [root] account on here.
If you use a common account, it also needs to configure Sudo.
If you set passphrase to SSH kay-pair, it also needs to set SSH Agent.
[root@node01 ~]#
ssh-keygen -q -N ""

Enter file in which to save the key (/root/.ssh/id_rsa):
[root@node01 ~]#
vi ~/.ssh/config
# create new (define each Node and SSH user)

Host node01
    Hostname node01.srv.world
    User root
Host node02
    Hostname node02.srv.world
    User root
Host node03
    Hostname node03.srv.world
    User root

[root@node01 ~]#
chmod 600 ~/.ssh/config
# transfer public key

[root@node01 ~]#
ssh-copy-id node01

root@node01.srv.world's password:

Number of key(s) added: 1

Now try logging into the machine, with:   "ssh 'node01'"
and check to make sure that only the key(s) you wanted were added.

[root@node01 ~]#
ssh-copy-id node02

[root@node01 ~]#
ssh-copy-id node03

[2] Install Ceph to each Node from Admin Node.
[root@node01 ~]# for NODE in node01 node02 node03
    ssh $NODE "dnf -y install centos-release-ceph-quincy epel-release; dnf -y install ceph"
[3] Configure [Monitor Daemon], [Manager Daemon] on Admin Node.
[root@node01 ~]#

# create new config
# file name ⇒ (any Cluster Name).conf
# set Cluster Name [ceph] (default) on this example ⇒ [ceph.conf]

[root@node01 ~]#
vi /etc/ceph/ceph.conf
# specify cluster network for monitoring
cluster network =
# specify public network
public network =
# specify UUID genarated above
fsid = 8df33401-6131-4c08-9ceb-3894ab029257
# specify IP address of Monitor Daemon
mon host =
# specify Hostname of Monitor Daemon
mon initial members = node01
osd pool default crush rule = -1

# mon.(Node name)
# specify Hostname of Monitor Daemon
host = node01
# specify IP address of Monitor Daemon
mon addr =
# allow to delete pools
mon allow pool delete = true

# generate secret key for Cluster monitoring

[root@node01 ~]#
ceph-authtool --create-keyring /etc/ceph/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'

creating /etc/ceph/ceph.mon.keyring
# generate secret key for Cluster admin

[root@node01 ~]#
ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *'

creating /etc/ceph/ceph.client.admin.keyring
# generate key for bootstrap

[root@node01 ~]#
ceph-authtool --create-keyring /var/lib/ceph/bootstrap-osd/ceph.keyring --gen-key -n client.bootstrap-osd --cap mon 'profile bootstrap-osd' --cap mgr 'allow r'

creating /var/lib/ceph/bootstrap-osd/ceph.keyring
# import generated key

[root@node01 ~]#
ceph-authtool /etc/ceph/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring

importing contents of /etc/ceph/ceph.client.admin.keyring into /etc/ceph/ceph.mon.keyring
[root@node01 ~]#
ceph-authtool /etc/ceph/ceph.mon.keyring --import-keyring /var/lib/ceph/bootstrap-osd/ceph.keyring

importing contents of /var/lib/ceph/bootstrap-osd/ceph.keyring into /etc/ceph/ceph.mon.keyring
# generate monitor map

[root@node01 ~]#
FSID=$(grep "^fsid" /etc/ceph/ceph.conf | awk {'print $NF'})

[root@node01 ~]#
NODENAME=$(grep "^mon initial" /etc/ceph/ceph.conf | awk {'print $NF'})

[root@node01 ~]#
NODEIP=$(grep "^mon host" /etc/ceph/ceph.conf | awk {'print $NF'})

[root@node01 ~]#
monmaptool --create --add $NODENAME $NODEIP --fsid $FSID /etc/ceph/monmap

monmaptool: monmap file /etc/ceph/monmap
setting min_mon_release = octopus
monmaptool: set fsid to 8df33401-6131-4c08-9ceb-3894ab029257
monmaptool: writing epoch 0 to /etc/ceph/monmap (1 monitors)
# create a directory for Monitor Daemon
# directory name ⇒ (Cluster Name)-(Node Name)

[root@node01 ~]#
mkdir /var/lib/ceph/mon/ceph-node01
# assosiate key and monmap to Monitor Daemon
# --cluster (Cluster Name)

[root@node01 ~]#
ceph-mon --cluster ceph --mkfs -i $NODENAME --monmap /etc/ceph/monmap --keyring /etc/ceph/ceph.mon.keyring

[root@node01 ~]#
chown ceph. /etc/ceph/ceph.*

[root@node01 ~]#
chown -R ceph. /var/lib/ceph/mon/ceph-node01 /var/lib/ceph/bootstrap-osd

[root@node01 ~]#
systemctl enable --now ceph-mon@$NODENAME
# enable Messenger v2 Protocol

[root@node01 ~]#
ceph mon enable-msgr2
[root@node01 ~]#
ceph config set mon auth_allow_insecure_global_id_reclaim false
# enable Placement Groups auto scale module

[root@node01 ~]#
ceph mgr module enable pg_autoscaler
# create a directory for Manager Daemon
# directory name ⇒ (Cluster Name)-(Node Name)

[root@node01 ~]#
mkdir /var/lib/ceph/mgr/ceph-node01
# create auth key

[root@node01 ~]#
ceph auth get-or-create mgr.$NODENAME mon 'allow profile mgr' osd 'allow *' mds 'allow *'

        key = AQBf0qdiqefTBBAApu8JHt62+pQDhtnEFYKOXw==

[root@node01 ~]#
ceph auth get-or-create mgr.node01 > /etc/ceph/ceph.mgr.admin.keyring

[root@node01 ~]#
cp /etc/ceph/ceph.mgr.admin.keyring /var/lib/ceph/mgr/ceph-node01/keyring

[root@node01 ~]#
chown ceph. /etc/ceph/ceph.mgr.admin.keyring

[root@node01 ~]#
chown -R ceph. /var/lib/ceph/mgr/ceph-node01

[root@node01 ~]#
systemctl enable --now ceph-mgr@$NODENAME

[4] On Admin Node, If SELinux is enabled, change policy.
[root@node01 ~]#
vi cephmon.te
# create new

module cephmon 1.0;

require {
        type ceph_t;
        type initrc_var_run_t;
        type sudo_exec_t;
        class file { execute execute_no_trans lock map open read };
        class capability { audit_write sys_resource };
        class process setrlimit;
        class netlink_audit_socket { create nlmsg_relay };

#============= ceph_t ==============
allow ceph_t initrc_var_run_t:file { lock open read };
allow ceph_t self:capability { audit_write sys_resource };
allow ceph_t self:netlink_audit_socket { create nlmsg_relay };
allow ceph_t self:process setrlimit;
allow ceph_t sudo_exec_t:file map;
allow ceph_t sudo_exec_t:file { execute execute_no_trans open read };

[root@node01 ~]#
checkmodule -m -M -o cephmon.mod cephmon.te

[root@node01 ~]#
semodule_package --outfile cephmon.pp --module cephmon.mod

[root@node01 ~]#
semodule -i cephmon.pp

[5] On Admin Node, If Firewalld is running, allow service ports.
[root@node01 ~]#
firewall-cmd --add-service=ceph-mon

[root@node01 ~]#
firewall-cmd --runtime-to-permanent

[6] Confirm Cluster status. That's OK if [Monitor Daemon] and [Manager Daemon] are enabled like follows.
For OSD (Object Storage Device), Configure them on next section, so it's no problem if [HEALTH_WARN] at this point.
[root@node01 ~]#
ceph -s

    id:     8df33401-6131-4c08-9ceb-3894ab029257
    health: HEALTH_WARN
            OSD count 0 < osd_pool_default_size 3

    mon: 1 daemons, quorum node01 (age 2m)
    mgr: node01(active, since 22s)
    osd: 0 osds: 0 up, 0 in

    pools:   0 pools, 0 pgs
    objects: 0 objects, 0 B
    usage:   0 B used, 0 B / 0 B avail
Matched Content