CentOS Stream 8
Sponsored Link

Ceph Nautilus : Configure Ceph Cluster #22021/03/31

 
Install Distributed File System Ceph to Configure Storage Cluster.
For example on here, Configure Ceph Cluster with 3 Nodes like follows.
Furthermore, each Storage Node has a free block device to use on Ceph Nodes.
(use [/dev/sdb] on this example)
                                         |
            +----------------------------+----------------------------+
            |                            |                            |
            |10.0.0.51                   |10.0.0.52                   |10.0.0.53 
+-----------+-----------+    +-----------+-----------+    +-----------+-----------+
|   [node01.srv.world]  |    |   [node02.srv.world]  |    |   [node03.srv.world]  |
|     Object Storage    +----+     Object Storage    +----+     Object Storage    |
|     Monitor Daemon    |    |                       |    |                       |
|     Manager Daemon    |    |                       |    |                       |
+-----------------------+    +-----------------------+    +-----------------------+

[1]
[2] Configure OSD (Object Storage Device) to each Node from Admin Node.
Block devices ([/dev/sdb] on this example) are formatted for OSD, Be careful if some existing data are saved.
# if Firewalld is running on each Node, allow ports

[root@node01 ~]# for NODE in node01 node02 node03
do
    ssh $NODE "firewall-cmd --add-service=ceph --permanent; firewall-cmd --reload"
done 

# configure settings for OSD to each Node

[root@node01 ~]# for NODE in node01 node02 node03
do
    if [ ! ${NODE} = "node01" ]
    then
        scp /etc/ceph/ceph.conf ${NODE}:/etc/ceph/ceph.conf
        scp /etc/ceph/ceph.client.admin.keyring ${NODE}:/etc/ceph
        scp /var/lib/ceph/bootstrap-osd/ceph.keyring ${NODE}:/var/lib/ceph/bootstrap-osd
    fi
    ssh $NODE \
    "chown ceph. /etc/ceph/ceph.* /var/lib/ceph/bootstrap-osd/*; \
    parted --script /dev/sdb 'mklabel gpt'; \
    parted --script /dev/sdb "mkpart primary 0% 100%"; \
    ceph-volume lvm create --data /dev/sdb1"
done 

Running command: /usr/bin/ceph-authtool --gen-print-key
Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new a6afcb9a-7f30-462c-99b3-198cc558c9a5
Running command: /usr/sbin/vgcreate --force --yes ceph-43348ac0-e899-409c-b39d-e81f8fd451f5 /dev/sdb1
 stdout: Wiping xfs signature on /dev/sdb1.
 stdout: Physical volume "/dev/sdb1" successfully created.
 stdout: Volume group "ceph-43348ac0-e899-409c-b39d-e81f8fd451f5" successfully created
Running command: /usr/sbin/lvcreate --yes -l 20479 -n osd-block-a6afcb9a-7f30-462c-99b3-198cc558c9a5 ceph-43348ac0-e899-409c-b39d-e81f8fd451f5
 stdout: Logical volume "osd-block-a6afcb9a-7f30-462c-99b3-198cc558c9a5" created.
Running command: /usr/bin/ceph-authtool --gen-print-key
Running command: /usr/bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-0
Running command: /usr/sbin/restorecon /var/lib/ceph/osd/ceph-0
Running command: /usr/bin/chown -h ceph:ceph /dev/ceph-43348ac0-e899-409c-b39d-e81f8fd451f5/osd-block-a6afcb9a-7f30-462c-99b3-198cc558c9a5
Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2
Running command: /usr/bin/ln -s /dev/ceph-43348ac0-e899-409c-b39d-e81f8fd451f5/osd-block-a6afcb9a-7f30-462c-99b3-198cc558c9a5 /var/lib/ceph/osd/ceph-0/block
Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-0/activate.monmap
 stderr: got monmap epoch 2
Running command: /usr/bin/ceph-authtool /var/lib/ceph/osd/ceph-0/keyring --create-keyring --name osd.0 --add-key AQA6x2NgP/1JFRAASYRoKTRN3Ii3RTN6+pbs7A==
 stdout: creating /var/lib/ceph/osd/ceph-0/keyring
added entity osd.0 auth(key=AQA6x2NgP/1JFRAASYRoKTRN3Ii3RTN6+pbs7A==)
Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0/keyring
Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0/
Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 0 --monmap /var/lib/ceph/osd/ceph-0/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-0/ --osd-uuid a6afcb9a-7f30-462c-99b3-198cc558c9a5 --setuser ceph --setgroup ceph
 stderr: 2021-03-31 09:50:03.719 7f951092ddc0 -1 bluestore(/var/lib/ceph/osd/ceph-0/) _read_fsid unparsable uuid
--> ceph-volume lvm prepare successful for: /dev/sdb1
Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0
Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-43348ac0-e899-409c-b39d-e81f8fd451f5/osd-block-a6afcb9a-7f30-462c-99b3-198cc558c9a5 --path /var/lib/ceph/osd/ceph-0 --no-mon-config
Running command: /usr/bin/ln -snf /dev/ceph-43348ac0-e899-409c-b39d-e81f8fd451f5/osd-block-a6afcb9a-7f30-462c-99b3-198cc558c9a5 /var/lib/ceph/osd/ceph-0/block
Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-0/block
Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2
Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0
Running command: /usr/bin/systemctl enable ceph-volume@lvm-0-a6afcb9a-7f30-462c-99b3-198cc558c9a5
 stderr: Created symlink /etc/systemd/system/multi-user.target.wants/ceph-volume@lvm-0-a6afcb9a-7f30-462c-99b3-198cc558c9a5.service -> /usr/lib/systemd/system/ceph-volume@.service.
Running command: /usr/bin/systemctl enable --runtime ceph-osd@0
 stderr: Created symlink /run/systemd/system/ceph-osd.target.wants/ceph-osd@0.service -> /usr/lib/systemd/system/ceph-osd@.service.
Running command: /usr/bin/systemctl start ceph-osd@0
--> ceph-volume lvm activate successful for osd ID: 0
--> ceph-volume lvm create successful for: /dev/sdb1
ceph.conf                                     100%  273    60.0KB/s   00:00
ceph.client.admin.keyring                     100%  151   177.3KB/s   00:00
ceph.keyring                                  100%  129    69.6KB/s   00:00
.....
.....

# confirm cluster status

# that's OK if [HEALTH_OK]

[root@node01 ~]#
ceph -s

  cluster:
    id:     ea021e0d-d3ac-45b0-b4b6-c1174aa3966b
    health: HEALTH_OK

  services:
    mon: 1 daemons, quorum node01 (age 12m)
    mgr: node01(active, since 8m)
    osd: 3 osds: 3 up (since 2m), 3 in (since 2m)

  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0 B
    usage:   3.0 GiB used, 237 GiB / 240 GiB avail
    pgs:

# confirm OSD tree

[root@node01 ~]#
ceph osd tree

ID CLASS WEIGHT  TYPE NAME       STATUS REWEIGHT PRI-AFF
-1       0.23428 root default
-3       0.07809     host node01
 0   hdd 0.07809         osd.0       up  1.00000 1.00000
-5       0.07809     host node02
 1   hdd 0.07809         osd.1       up  1.00000 1.00000
-7       0.07809     host node03
 2   hdd 0.07809         osd.2       up  1.00000 1.00000

[root@node01 ~]# ceph df 
RAW STORAGE:
    CLASS     SIZE        AVAIL       USED        RAW USED     %RAW USED
    hdd       240 GiB     237 GiB     4.7 MiB      3.0 GiB          1.25
    TOTAL     240 GiB     237 GiB     4.7 MiB      3.0 GiB          1.25

POOLS:
    POOL     ID     PGS     STORED     OBJECTS     USED     %USED     MAX AVAIL
Matched Content