CentOS Stream 9

Ceph Tentacle : OSD を追加/削除する2025/09/25

 

既存の Ceph クラスターに OSD を追加/削除するには、以下のように設定します。

                                         |
        +--------------------+           |           +----------------------+
        |   [dlp.srv.world]  |10.0.0.30  |  10.0.0.31|    [www.srv.world]   |
        |     Ceph Client    +-----------+-----------+        RADOSGW       |
        |                    |           |           |                      |
        +--------------------+           |           +----------------------+
            +----------------------------+----------------------------+
            |                            |                            |
            |10.0.0.51                   |10.0.0.52                   |10.0.0.53 
+-----------+-----------+    +-----------+-----------+    +-----------+-----------+
|   [node01.srv.world]  |    |   [node02.srv.world]  |    |   [node03.srv.world]  |
|     Object Storage    +----+     Object Storage    +----+     Object Storage    |
|     Monitor Daemon    |    |                       |    |                       |
|     Manager Daemon    |    |                       |    |                       |
+-----------------------+    +-----------------------+    +-----------------------+

[1] 例として、管理ノードから [node04] ノードに、新たに OSD を追加します。
[node04] ノード上で Ceph 用に設定するブロックデバイスは [/dev/sdb] を使用します。
# 公開鍵転送

[root@node01 ~]#
ssh-copy-id node04

# Firewalld 稼働中の場合はサービス許可

[root@node01 ~]#
ssh node04 "firewall-cmd --add-service=ceph; firewall-cmd --runtime-to-permanent"

# 必要なパッケージをインストール

[root@node01 ~]#
ssh node04 "dnf -y install centos-release-ceph-tentacle epel-release; dnf --enablerepo=crb -y install ceph"
# 必要なファイルを転送

[root@node01 ~]#
scp /etc/ceph/ceph.conf node04:/etc/ceph/ceph.conf

[root@node01 ~]#
scp /etc/ceph/ceph.client.admin.keyring node04:/etc/ceph

[root@node01 ~]#
scp /var/lib/ceph/bootstrap-osd/ceph.keyring node04:/var/lib/ceph/bootstrap-osd
# OSD の設定

[root@node01 ~]# ssh node04 \
"chown -R ceph:ceph /etc/ceph/ceph.* /var/lib/ceph; \
parted --script /dev/sdb 'mklabel gpt'; \
parted --script /dev/sdb "mkpart primary 0% 100%"; \
ceph-volume lvm create --data /dev/sdb1" 

Running command: /usr/bin/ceph-authtool --gen-print-key
Running command: /usr/bin/ceph-authtool --gen-print-key
Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 30c601fb-642f-491d-b4d1-de14b5b69a4c
Running command: vgcreate --force --yes ceph-2672e5b1-7951-4b81-8471-66182e74d79e /dev/vdb1
 stdout: Physical volume "/dev/vdb1" successfully created.
 stdout: Volume group "ceph-2672e5b1-7951-4b81-8471-66182e74d79e" successfully created
Running command: lvcreate --yes -l 40959 -n osd-block-30c601fb-642f-491d-b4d1-de14b5b69a4c ceph-2672e5b1-7951-4b81-8471-66182e74d79e
 stdout: Logical volume "osd-block-30c601fb-642f-491d-b4d1-de14b5b69a4c" created.
Running command: /usr/bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-3
Running command: /usr/sbin/restorecon /var/lib/ceph/osd/ceph-3
Running command: /usr/bin/chown -h ceph:ceph /dev/ceph-2672e5b1-7951-4b81-8471-66182e74d79e/osd-block-30c601fb-642f-491d-b4d1-de14b5b69a4c
Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2
Running command: /usr/bin/ln -s /dev/ceph-2672e5b1-7951-4b81-8471-66182e74d79e/osd-block-30c601fb-642f-491d-b4d1-de14b5b69a4c /var/lib/ceph/osd/ceph-3/block
Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-3/activate.monmap
 stderr: got monmap epoch 2
--> Creating keyring file for osd.3
Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3/keyring
Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3/
Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 3 --monmap /var/lib/ceph/osd/ceph-3/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-3/ --osd-uuid 30c601fb-642f-491d-b4d1-de14b5b69a4c --setuser ceph --setgroup ceph
 stderr: 2025-09-25T11:05:36.100+0900 7f8fef5d4900 -1 bluestore(/var/lib/ceph/osd/ceph-3//block) No valid bdev label found
 stderr: 2025-09-25T11:05:36.116+0900 7f8fef5d4900 -1 bluestore(/var/lib/ceph/osd/ceph-3/) _read_fsid unparsable uuid
--> ceph-volume lvm prepare successful for: /dev/vdb1
Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3
Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-2672e5b1-7951-4b81-8471-66182e74d79e/osd-block-30c601fb-642f-491d-b4d1-de14b5b69a4c --path /var/lib/ceph/osd/ceph-3 --no-mon-config
Running command: /usr/bin/ln -snf /dev/ceph-2672e5b1-7951-4b81-8471-66182e74d79e/osd-block-30c601fb-642f-491d-b4d1-de14b5b69a4c /var/lib/ceph/osd/ceph-3/block
Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-3/block
Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2
Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3
Running command: /usr/bin/systemctl enable ceph-volume@lvm-3-30c601fb-642f-491d-b4d1-de14b5b69a4c
 stderr: Created symlink /etc/systemd/system/multi-user.target.wants/ceph-volume@lvm-3-30c601fb-642f-491d-b4d1-de14b5b69a4c.service → /usr/lib/systemd/system/ceph-volume@.service.
Running command: /usr/bin/systemctl enable --runtime ceph-osd@3
 stderr: Created symlink /run/systemd/system/ceph-osd.target.wants/ceph-osd@3.service → /usr/lib/systemd/system/ceph-osd@.service.
Running command: /usr/bin/systemctl start ceph-osd@3
--> ceph-volume lvm activate successful for osd ID: 3
--> ceph-volume lvm create successful for: /dev/vdb1

[root@node01 ~]# ceph -s 
  cluster:
    id:     4b46d240-7b97-412e-b5b6-8c58e8d25835
    health: HEALTH_OK

  services:
    mon: 1 daemons, quorum node01 (age 68m) [leader: node01]
    mgr: node01(active, since 21m)
    mds: 1/1 daemons up
    osd: 4 osds: 4 up (since 78s), 4 in (since 85s)
    rgw: 1 daemon active (1 hosts, 1 zones)

  data:
    volumes: 1/1 healthy
    pools:   8 pools, 321 pgs
    objects: 252 objects, 464 KiB
    usage:   236 MiB used, 640 GiB / 640 GiB avail
    pgs:     321 active+clean
[2] 既存のクラスターから OSD を削除する場合は以下のように実行します。
例として、管理ノードから [node04] ノードを削除します。
[root@node01 ~]#
ceph -s

  cluster:
    id:     4b46d240-7b97-412e-b5b6-8c58e8d25835
    health: HEALTH_OK

  services:
    mon: 1 daemons, quorum node01 (age 68m) [leader: node01]
    mgr: node01(active, since 21m)
    mds: 1/1 daemons up
    osd: 4 osds: 4 up (since 78s), 4 in (since 85s)
    rgw: 1 daemon active (1 hosts, 1 zones)

  data:
    volumes: 1/1 healthy
    pools:   8 pools, 321 pgs
    objects: 252 objects, 464 KiB
    usage:   236 MiB used, 640 GiB / 640 GiB avail
    pgs:     321 active+clean

[root@node01 ~]#
ceph osd tree

ID  CLASS  WEIGHT   TYPE NAME        STATUS  REWEIGHT  PRI-AFF
-1         0.62476  root default
-3         0.15619      host node01
 0    hdd  0.15619          osd.0        up   1.00000  1.00000
-5         0.15619      host node02
 1    hdd  0.15619          osd.1        up   1.00000  1.00000
-7         0.15619      host node03
 2    hdd  0.15619          osd.2        up   1.00000  1.00000
-9         0.15619      host node04
 3    hdd  0.15619          osd.3        up   1.00000  1.00000

# 削除したい OSD の ID を指定してクラスターから分離する

[root@node01 ~]#
ceph osd out 3

marked out osd.3.
# クラスターステータスを リアルタイムウォッチ する
# [ceph osd out ***] 実行後、リバランスが実行されデータが再配置される
# リアルタイムウォッチを終了する場合は [Ctrl + c]

[root@node01 ~]#
ceph -w

  cluster:
    id:     4b46d240-7b97-412e-b5b6-8c58e8d25835
    health: HEALTH_WARN
            too many PGs per OSD (321 > max 250)

  services:
    mon: 1 daemons, quorum node01 (age 69m) [leader: node01]
    mgr: node01(active, since 23m)
    mds: 1/1 daemons up
    osd: 4 osds: 4 up (since 2m), 3 in (since 7s)
    rgw: 1 daemon active (1 hosts, 1 zones)

  data:
    volumes: 1/1 healthy
    pools:   8 pools, 321 pgs
    objects: 255 objects, 464 KiB
    usage:   191 MiB used, 480 GiB / 480 GiB avail
    pgs:     321 active+clean

  io:
    recovery: 96 KiB/s, 2 keys/s, 44 objects/s
.....
.....

# クラスターステータスが [HEALTH_OK] になったのち、対象ノードの OSD サービスを無効化

[root@node01 ~]#
ssh node04 "systemctl disable --now ceph-osd@3.service"

Removed /run/systemd/system/ceph-osd.target.wants/ceph-osd@3.service.
# 対象ノードの OSD ID を指定してクラスターから削除する

[root@node01 ~]#
ceph osd purge 3 --yes-i-really-mean-it

purged osd.3
[root@node01 ~]#
ceph -s

  cluster:
    id:     4b46d240-7b97-412e-b5b6-8c58e8d25835
    health: HEALTH_OK

  services:
    mon: 1 daemons, quorum node01 (age 73m) [leader: node01]
    mgr: node01(active, since 27m)
    osd: 3 osds: 3 up (since 71s), 3 in (since 4m)
    rgw: 1 daemon active (1 hosts, 1 zones)

  data:
    pools:   6 pools, 161 pgs
    objects: 231 objects, 459 KiB
    usage:   191 MiB used, 480 GiB / 480 GiB avail
    pgs:     161 active+clean
関連コンテンツ