CentOS 8
Sponsored Link

Ceph Octopus : OSD を追加/削除する2020/07/02

 
既存の Ceph クラスターに OSD を追加/削除するには、以下のように設定します。
                                         |
        +--------------------+           |           +----------------------+
        |   [dlp.srv.world]  |10.0.0.30  |  10.0.0.31|    [www.srv.world]   |
        |     Ceph Client    +-----------+-----------+        RADOSGW       |
        |                    |           |           |                      |
        +--------------------+           |           +----------------------+
            +----------------------------+----------------------------+
            |                            |                            |
            |10.0.0.51                   |10.0.0.52                   |10.0.0.53 
+-----------+-----------+    +-----------+-----------+    +-----------+-----------+
|   [node01.srv.world]  |    |   [node02.srv.world]  |    |   [node03.srv.world]  |
|     Object Storage    +----+     Object Storage    +----+     Object Storage    |
|     Monitor Daemon    |    |                       |    |                       |
|     Manager Daemon    |    |                       |    |                       |
+-----------------------+    +-----------------------+    +-----------------------+

[1] 例として、管理ノードから [node04] ノードを新たに OSD に追加します。
[node04] ノード上で Ceph 用に設定するブロックデバイスは [/dev/sdb] を使用します。
# 公開鍵転送

[root@node01 ~]#
ssh-copy-id node04

# Firewalld 稼働中の場合はサービス許可

[root@node01 ~]#
ssh node04 "firewall-cmd --add-service=ceph --permanent; firewall-cmd --reload"

# 必要なパッケージをインストール

[root@node01 ~]#
ssh node04 "dnf -y install centos-release-ceph-octopus epel-release; dnf -y install ceph"
# 必要なファイルを転送

[root@node01 ~]#
scp /etc/ceph/ceph.conf node04:/etc/ceph/ceph.conf

[root@node01 ~]#
scp /etc/ceph/ceph.client.admin.keyring node04:/etc/ceph

[root@node01 ~]#
scp /var/lib/ceph/bootstrap-osd/ceph.keyring node04:/var/lib/ceph/bootstrap-osd
# OSD の設定

[root@node01 ~]# ssh node04 \
"chown ceph. /etc/ceph/ceph.* /var/lib/ceph/bootstrap-osd/*; \
parted --script /dev/sdb 'mklabel gpt'; \
parted --script /dev/sdb "mkpart primary 0% 100%"; \
ceph-volume lvm create --data /dev/sdb1" 
Running command: /usr/bin/ceph-authtool --gen-print-key
Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 2c941f07-96f7-4b51-91fd-5296a102eaa9
Running command: /usr/sbin/vgcreate --force --yes ceph-d299b0ff-e127-4de7-a0f4-bf8dffd95308 /dev/vdb1
 stdout: Physical volume "/dev/vdb1" successfully created.
 stdout: Volume group "ceph-d299b0ff-e127-4de7-a0f4-bf8dffd95308" successfully created
Running command: /usr/sbin/lvcreate --yes -l 100%FREE -n osd-block-2c941f07-96f7-4b51-91fd-5296a102eaa9 ceph-d299b0ff-e127-4de7-a0f4-bf8dffd95308
 stdout: Logical volume "osd-block-2c941f07-96f7-4b51-91fd-5296a102eaa9" created.
.....
.....
Running command: /usr/bin/systemctl start ceph-osd@3
--> ceph-volume lvm activate successful for osd ID: 3
--> ceph-volume lvm create successful for: /dev/vdb1

[root@node01 ~]# ceph -s 
  cluster:
    id:     38bc3fbb-1752-4cb1-b16c-2e9d5d402891
    health: HEALTH_OK

  services:
    mon: 1 daemons, quorum node01 (age 62m)
    mgr: node01(active, since 23m)
    mds:  1 up:standby
    osd: 4 osds: 4 up (since 57s), 4 in (since 57s)
    rgw: 1 daemon active (www)

  task status:

  data:
    pools:   8 pools, 193 pgs
    objects: 215 objects, 34 KiB
    usage:   4.3 GiB used, 316 GiB / 320 GiB avail
    pgs:     193 active+clean

  io:
    recovery: 9 B/s, 0 objects/s
[2] 既存のクラスターから OSD を削除する場合は以下のように実行します。
例として、管理ノードから [node04] ノードを削除します。
[root@node01 ~]#
ceph -s

  cluster:
    id:     38bc3fbb-1752-4cb1-b16c-2e9d5d402891
    health: HEALTH_OK

  services:
    mon: 1 daemons, quorum node01 (age 63m)
    mgr: node01(active, since 24m)
    mds:  1 up:standby
    osd: 4 osds: 4 up (since 101s), 4 in (since 101s)
    rgw: 1 daemon active (www)

  task status:

  data:
    pools:   8 pools, 193 pgs
    objects: 215 objects, 34 KiB
    usage:   4.3 GiB used, 316 GiB / 320 GiB avail
    pgs:     193 active+clean

  io:
    client:   7.5 KiB/s rd, 0 B/s wr, 7 op/s rd, 4 op/s wr

[root@node01 ~]#
ceph osd tree

ID  CLASS  WEIGHT   TYPE NAME        STATUS  REWEIGHT  PRI-AFF
-1         0.31238  root default
-3         0.07809      host node01
 0    hdd  0.07809          osd.0        up   1.00000  1.00000
-5         0.07809      host node02
 1    hdd  0.07809          osd.1        up   1.00000  1.00000
-7         0.07809      host node03
 2    hdd  0.07809          osd.2        up   1.00000  1.00000
-9         0.07809      host node04
 3    hdd  0.07809          osd.3        up   1.00000  1.00000

# 削除したい OSD の ID を指定してクラスターから分離する

[root@node01 ~]#
ceph osd out 3

marked out osd.3.
# クラスターステータスを リアルタイムウォッチ する

# [ceph osd out ***] 実行後、リバランスが実行されデータが再配置される

# リアルタイムウォッチを終了する場合は [Ctrl + c]

[root@node01 ~]#
ceph -w

  cluster:
    id:     38bc3fbb-1752-4cb1-b16c-2e9d5d402891
    health: HEALTH_WARN
            Reduced data availability: 17 pgs peering
            Degraded data redundancy: 154/645 objects degraded (23.876%), 33 pgs degraded

  services:
    mon: 1 daemons, quorum node01 (age 64m)
    mgr: node01(active, since 24m)
    mds:  1 up:standby
    osd: 4 osds: 4 up (since 2m), 3 in (since 6s); 20 remapped pgs
    rgw: 1 daemon active (www)

  task status:

  data:
    pools:   8 pools, 193 pgs
    objects: 215 objects, 34 KiB
    usage:   3.3 GiB used, 237 GiB / 240 GiB avail
    pgs:     10.363% pgs not active
             154/645 objects degraded (23.876%)
             8/645 objects misplaced (1.240%)
             134 active+clean
             33  active+recovery_wait+degraded
             17  remapped+peering
             5   active+recovery_wait
             3   activating+remapped
             1   active+recovering

  progress:
    Rebalancing after osd.3 marked out (1s)
      [............................]


2020-07-01T23:33:19.797172+0900 mon.node01 [WRN] Health check failed: Reduced data availability: 17 pgs peering (PG_AVAILABILITY)
2020-07-01T23:33:19.797216+0900 mon.node01 [WRN] Health check failed: Degraded data redundancy: 154/645 objects degraded (23.876%), 33 pgs degraded (PG_DEGRADED)
2020-07-01T23:33:23.651836+0900 mon.node01 [INF] Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 17 pgs peering)
2020-07-01T23:33:27.596635+0900 mon.node01 [WRN] Health check update: Degraded data redundancy: 134/645 objects degraded (20.775%), 29 pgs degraded (PG_DEGRADED)
2020-07-01T23:33:32.624610+0900 mon.node01 [WRN] Health check update: Degraded data redundancy: 101/645 objects degraded (15.659%), 22 pgs degraded (PG_DEGRADED)
2020-07-01T23:33:37.708423+0900 mon.node01 [WRN] Health check update: Degraded data redundancy: 74/645 objects degraded (11.473%), 16 pgs degraded (PG_DEGRADED)
2020-07-01T23:33:42.771092+0900 mon.node01 [WRN] Health check update: Degraded data redundancy: 47/645 objects degraded (7.287%), 10 pgs degraded (PG_DEGRADED)
2020-07-01T23:33:47.804469+0900 mon.node01 [WRN] Health check update: Degraded data redundancy: 16/645 objects degraded (2.481%), 5 pgs degraded (PG_DEGRADED)
2020-07-01T23:33:50.040981+0900 mon.node01 [INF] Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 16/645 objects degraded (2.481%), 5 pgs degraded)
2020-07-01T23:33:50.041015+0900 mon.node01 [INF] Cluster is now healthy

# クラスターステータスが [HEALTH_OK] になったのち、対象ノードの OSD サービスを無効化

[root@node01 ~]#
ssh node04 "systemctl disable --now ceph-osd@3.service"

Removed /run/systemd/system/ceph-osd.target.wants/ceph-osd@3.service.
# 対象ノードの OSD ID を指定してクラスターから削除する

[root@node01 ~]#
ceph osd purge 3 --yes-i-really-mean-it

purged osd.3
[root@node01 ~]#
ceph -s

  cluster:
    id:     38bc3fbb-1752-4cb1-b16c-2e9d5d402891
    health: HEALTH_OK

  services:
    mon: 1 daemons, quorum node01 (age 66m)
    mgr: node01(active, since 26m)
    mds:  1 up:standby
    osd: 3 osds: 3 up (since 13s), 3 in (since 2m)
    rgw: 1 daemon active (www)

  task status:

  data:
    pools:   8 pools, 193 pgs
    objects: 215 objects, 34 KiB
    usage:   3.3 GiB used, 237 GiB / 240 GiB avail
    pgs:     193 active+clean

  io:
    client:   895 B/s rd, 0 op/s rd, 0 op/s wr
関連コンテンツ