CentOS Stream 9
Sponsored Link

Ceph Reef : Add or Remove OSDs2023/08/21

 
This is how to add or remove OSDs from existing Cluster.
                                         |
        +--------------------+           |           +----------------------+
        |   [dlp.srv.world]  |10.0.0.30  |  10.0.0.31|    [www.srv.world]   |
        |     Ceph Client    +-----------+-----------+        RADOSGW       |
        |                    |           |           |                      |
        +--------------------+           |           +----------------------+
            +----------------------------+----------------------------+
            |                            |                            |
            |10.0.0.51                   |10.0.0.52                   |10.0.0.53 
+-----------+-----------+    +-----------+-----------+    +-----------+-----------+
|   [node01.srv.world]  |    |   [node02.srv.world]  |    |   [node03.srv.world]  |
|     Object Storage    +----+     Object Storage    +----+     Object Storage    |
|     Monitor Daemon    |    |                       |    |                       |
|     Manager Daemon    |    |                       |    |                       |
+-----------------------+    +-----------------------+    +-----------------------+

[1] For example, Add a [node04] node for OSD on Admin Node.
For Block device on new [node04] Node, use [/dev/sdb] on this example.
# transfer public key

[root@node01 ~]#
ssh-copy-id node04

# if Firewalld is running, allow service

[root@node01 ~]#
ssh node04 "firewall-cmd --add-service=ceph; firewall-cmd --runtime-to-permanent"

# install required packages

[root@node01 ~]#
ssh node04 "dnf -y install centos-release-ceph-reef epel-release; dnf -y install ceph"
# transfer required files

[root@node01 ~]#
scp /etc/ceph/ceph.conf node04:/etc/ceph/ceph.conf

[root@node01 ~]#
scp /etc/ceph/ceph.client.admin.keyring node04:/etc/ceph

[root@node01 ~]#
scp /var/lib/ceph/bootstrap-osd/ceph.keyring node04:/var/lib/ceph/bootstrap-osd
# configure OSD

[root@node01 ~]# ssh node04 \
"chown ceph:ceph /etc/ceph/ceph.* /var/lib/ceph/bootstrap-osd/*; \
parted --script /dev/sdb 'mklabel gpt'; \
parted --script /dev/sdb "mkpart primary 0% 100%"; \
ceph-volume lvm create --data /dev/sdb1" 

Running command: /usr/bin/ceph-authtool --gen-print-key
Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 854bf810-16f3-4d21-9369-4074f3e450f7
Running command: vgcreate --force --yes ceph-ef7cc8c5-43af-4cef-80b7-e4bfa8e43902 /dev/vdb1
 stdout: Physical volume "/dev/vdb1" successfully created.
 stdout: Volume group "ceph-ef7cc8c5-43af-4cef-80b7-e4bfa8e43902" successfully created
Running command: lvcreate --yes -l 40959 -n osd-block-854bf810-16f3-4d21-9369-4074f3e450f7 ceph-ef7cc8c5-43af-4cef-80b7-e4bfa8e43902
 stdout: Logical volume "osd-block-854bf810-16f3-4d21-9369-4074f3e450f7" created.

.....
.....

Running command: /usr/bin/systemctl enable --runtime ceph-osd@3
 stderr: Created symlink /run/systemd/system/ceph-osd.target.wants/ceph-osd@3.service → /usr/lib/systemd/system/ceph-osd@.service.
Running command: /usr/bin/systemctl start ceph-osd@3
--> ceph-volume lvm activate successful for osd ID: 3
--> ceph-volume lvm create successful for: /dev/vdb1

[root@node01 ~]# ceph -s 
  cluster:
    id:     f2e52449-e87b-4786-981e-1f1f58186a7c
    health: HEALTH_OK

  services:
    mon: 1 daemons, quorum node01 (age 91m)
    mgr: node01(active, since 28m)
    mds: 1/1 daemons up
    osd: 4 osds: 4 up (since 69s), 4 in (since 79s)
    rgw: 1 daemon active (1 hosts, 1 zones)

  data:
    volumes: 1/1 healthy
    pools:   8 pools, 225 pgs
    objects: 220 objects, 459 KiB
    usage:   259 MiB used, 640 GiB / 640 GiB avail
    pgs:     225 active+clean
[2] To remove an OSD Node from existing Cluster, run commands like follows.
For example, Remove [node04] node.
[root@node01 ~]#
ceph -s

  cluster:
    id:     f2e52449-e87b-4786-981e-1f1f58186a7c
    health: HEALTH_OK

  services:
    mon: 1 daemons, quorum node01 (age 91m)
    mgr: node01(active, since 28m)
    mds: 1/1 daemons up
    osd: 4 osds: 4 up (since 69s), 4 in (since 79s)
    rgw: 1 daemon active (1 hosts, 1 zones)

  data:
    volumes: 1/1 healthy
    pools:   8 pools, 225 pgs
    objects: 220 objects, 459 KiB
    usage:   259 MiB used, 640 GiB / 640 GiB avail
    pgs:     225 active+clean

[root@node01 ~]#
ceph osd tree

ID  CLASS  WEIGHT   TYPE NAME        STATUS  REWEIGHT  PRI-AFF
-1         0.62476  root default
-3         0.15619      host node01
 0    hdd  0.15619          osd.0        up   1.00000  1.00000
-5         0.15619      host node02
 1    hdd  0.15619          osd.1        up   1.00000  1.00000
-7         0.15619      host node03
 2    hdd  0.15619          osd.2        up   1.00000  1.00000
-9         0.15619      host node04
 3    hdd  0.15619          osd.3        up   1.00000  1.00000

# specify OSD ID of a node you'd like to remove

[root@node01 ~]#
ceph osd out 3

marked out osd.3.
# live watch cluster status
# after running [ceph osd out ***], rebalancing is executed automatically
# to quit live watch, push [Ctrl + c]

[root@node01 ~]#
ceph -w

  cluster:
    id:     f2e52449-e87b-4786-981e-1f1f58186a7c
    health: HEALTH_OK

  services:
    mon: 1 daemons, quorum node01 (age 92m)
    mgr: node01(active, since 29m)
    mds: 1/1 daemons up
    osd: 4 osds: 4 up (since 2m), 3 in (since 7s)
    rgw: 1 daemon active (1 hosts, 1 zones)

  data:
    volumes: 1/1 healthy
    pools:   8 pools, 225 pgs
    objects: 221 objects, 459 KiB
    usage:   226 MiB used, 480 GiB / 480 GiB avail
    pgs:     225 active+clean

  io:
    client:   539 B/s rd, 179 B/s wr, 0 op/s rd, 0 op/s wr
    recovery: 1.1 KiB/s, 1 keys/s, 39 objects/s


2023-08-21T11:45:25.777580+0900 mon.node01 [INF] Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 20 pgs peering)
2023-08-21T11:45:25.777595+0900 mon.node01 [INF] Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 16/663 objects degraded (2.413%), 16 pgs degraded)
2023-08-21T11:45:25.777599+0900 mon.node01 [INF] Cluster is now healthy
.....
.....

# after status turns to [HEALTH_OK], disable OSD service on the target node

[root@node01 ~]#
ssh node04 "systemctl disable --now ceph-osd@3.service"

Removed /run/systemd/system/ceph-osd.target.wants/ceph-osd@3.service.
# remove the node to specify target OSD ID

[root@node01 ~]#
ceph osd purge 3 --yes-i-really-mean-it

purged osd.3
[root@node01 ~]#
ceph -s

  cluster:
    id:     f2e52449-e87b-4786-981e-1f1f58186a7c
    health: HEALTH_OK

  services:
    mon: 1 daemons, quorum node01 (age 94m)
    mgr: node01(active, since 31m)
    mds: 1/1 daemons up
    osd: 3 osds: 3 up (since 8s), 3 in (since 91s)
    rgw: 1 daemon active (1 hosts, 1 zones)

  data:
    volumes: 1/1 healthy
    pools:   8 pools, 225 pgs
    objects: 220 objects, 459 KiB
    usage:   226 MiB used, 480 GiB / 480 GiB avail
    pgs:     225 active+clean
Matched Content