CentOS Stream 8
Sponsored Link

Ceph Octopus : Add or Remove OSDs2021/04/01

 
This is how to add or remove OSDs from exisiting Cluster.
                                         |
        +--------------------+           |           +----------------------+
        |   [dlp.srv.world]  |10.0.0.30  |  10.0.0.31|    [www.srv.world]   |
        |     Ceph Client    +-----------+-----------+        RADOSGW       |
        |                    |           |           |                      |
        +--------------------+           |           +----------------------+
            +----------------------------+----------------------------+
            |                            |                            |
            |10.0.0.51                   |10.0.0.52                   |10.0.0.53 
+-----------+-----------+    +-----------+-----------+    +-----------+-----------+
|   [node01.srv.world]  |    |   [node02.srv.world]  |    |   [node03.srv.world]  |
|     Object Storage    +----+     Object Storage    +----+     Object Storage    |
|     Monitor Daemon    |    |                       |    |                       |
|     Manager Daemon    |    |                       |    |                       |
+-----------------------+    +-----------------------+    +-----------------------+

[1] For example, Add a [node04] node to OSDs on Admin Node.
For Block device on new [node04] Node, use [/dev/sdb] on this example.
# transfer public key

[root@node01 ~]#
ssh-copy-id node04

# if Firewalld is running, allow service

[root@node01 ~]#
ssh node04 "firewall-cmd --add-service=ceph --permanent; firewall-cmd --reload"

# install required packages

[root@node01 ~]#
ssh node04 "dnf -y install centos-release-ceph-octopus epel-release; dnf -y install ceph"
# transfer required files

[root@node01 ~]#
scp /etc/ceph/ceph.conf node04:/etc/ceph/ceph.conf

[root@node01 ~]#
scp /etc/ceph/ceph.client.admin.keyring node04:/etc/ceph

[root@node01 ~]#
scp /var/lib/ceph/bootstrap-osd/ceph.keyring node04:/var/lib/ceph/bootstrap-osd
# configure OSD

[root@node01 ~]# ssh node04 \
"chown ceph. /etc/ceph/ceph.* /var/lib/ceph/bootstrap-osd/*; \
parted --script /dev/sdb 'mklabel gpt'; \
parted --script /dev/sdb "mkpart primary 0% 100%"; \
ceph-volume lvm create --data /dev/sdb1" 

Running command: /usr/bin/ceph-authtool --gen-print-key
Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 6a55c7be-d8b8-4e12-b4d1-193a3c521c43
Running command: /usr/sbin/vgcreate --force --yes ceph-7f4c909a-11d9-4648-9725-3b28f6c83f28 /dev/sdb1
 stdout: Physical volume "/dev/sdb1" successfully created.
 stdout: Volume group "ceph-7f4c909a-11d9-4648-9725-3b28f6c83f28" successfully created
Running command: /usr/sbin/lvcreate --yes -l 20479 -n osd-block-6a55c7be-d8b8-4e12-b4d1-193a3c521c43 ceph-7f4c909a-11d9-4648-9725-3b28f6c83f28
 stdout: Logical volume "osd-block-6a55c7be-d8b8-4e12-b4d1-193a3c521c43" created.

.....
.....

Running command: /usr/bin/systemctl start ceph-osd@3
--> ceph-volume lvm activate successful for osd ID: 3
--> ceph-volume lvm create successful for: /dev/sdb1

[root@node01 ~]# ceph -s 
  cluster:
    id:     7def6ab9-42d6-4385-af46-79ba8ccefdcd
    health: HEALTH_OK

  services:
    mon: 1 daemons, quorum node01 (age 97m)
    mgr: node01(active, since 26m)
    mds: cephfs:1 {0=node01=up:active}
    osd: 4 osds: 4 up (since 90s), 4 in (since 90s)
    rgw: 1 daemon active (www)

  task status:

  data:
    pools:   4 pools, 113 pgs
    objects: 26 objects, 51 KiB
    usage:   4.3 GiB used, 316 GiB / 320 GiB avail
    pgs:     113 active+clean
[2] To remove an OSD Node from existing Cluster, run commands like follows.
For example, Remove [node04] node.
[root@node01 ~]#
ceph -s

  cluster:
    id:     7def6ab9-42d6-4385-af46-79ba8ccefdcd
    health: HEALTH_OK

  services:
    mon: 1 daemons, quorum node01 (age 98m)
    mgr: node01(active, since 27m)
    mds: cephfs:1 {0=node01=up:active}
    osd: 4 osds: 4 up (since 2m), 4 in (since 2m)
    rgw: 1 daemon active (www)

  task status:

  data:
    pools:   4 pools, 113 pgs
    objects: 26 objects, 51 KiB
    usage:   4.3 GiB used, 316 GiB / 320 GiB avail
    pgs:     113 active+clean

[root@node01 ~]#
ceph osd tree

ID  CLASS  WEIGHT   TYPE NAME        STATUS  REWEIGHT  PRI-AFF
-1         0.31238  root default
-3         0.07809      host node01
 0    hdd  0.07809          osd.0        up   1.00000  1.00000
-5         0.07809      host node02
 1    hdd  0.07809          osd.1        up   1.00000  1.00000
-7         0.07809      host node03
 2    hdd  0.07809          osd.2        up   1.00000  1.00000
-9         0.07809      host node04
 3    hdd  0.07809          osd.3        up   1.00000  1.00000

# specify OSD ID of a node you'd like to remove

[root@node01 ~]#
ceph osd out 3

marked out osd.3.
# live watch cluster status

# after running [ceph osd out ***], rebalancing is executed automatically

# to quit live watch, push [Ctrl + c]

[root@node01 ~]#
ceph -w

  cluster:
    id:     7def6ab9-42d6-4385-af46-79ba8ccefdcd
    health: HEALTH_OK

  services:
    mon: 1 daemons, quorum node01 (age 99m)
    mgr: node01(active, since 28m)
    mds: cephfs:1 {0=node01=up:active}
    osd: 4 osds: 4 up (since 2m), 3 in (since 4s); 8 remapped pgs
    rgw: 1 daemon active (www)

  task status:

  data:
    pools:   4 pools, 113 pgs
    objects: 26 objects, 51 KiB
    usage:   3.2 GiB used, 237 GiB / 240 GiB avail
    pgs:     7.080% pgs not active
             16/78 objects misplaced (20.513%)
             104 active+clean
             8   activating+remapped
             1   active+recovering

  io:
    recovery: 127 B/s, 0 objects/s

  progress:
    Rebalancing after osd.3 marked out (0s)
      [............................]

# after status turns to [HEALTH_OK], disable OSD service on the target node

[root@node01 ~]#
ssh node04 "systemctl disable --now ceph-osd@3.service"

Removed /run/systemd/system/ceph-osd.target.wants/ceph-osd@3.service.
# remove the node to specify target OSD ID

[root@node01 ~]#
ceph osd purge 3 --yes-i-really-mean-it

purged osd.3
[root@node01 ~]#
ceph -s

  cluster:
    id:     7def6ab9-42d6-4385-af46-79ba8ccefdcd
    health: HEALTH_OK

  services:
    mon: 1 daemons, quorum node01 (age 101m)
    mgr: node01(active, since 30m)
    mds: cephfs:1 {0=node01=up:active}
    osd: 3 osds: 3 up (since 46s), 3 in (since 2m)
    rgw: 1 daemon active (www)

  task status:

  data:
    pools:   4 pools, 113 pgs
    objects: 26 objects, 51 KiB
    usage:   3.2 GiB used, 237 GiB / 240 GiB avail
    pgs:     113 active+clean
Matched Content