Debian 11 Bullseye
Sponsored Link

Ceph Nautilus : Monitor ノードを追加/削除する2021/08/26

 
既存の Ceph クラスターに Monitor ノードを追加/削除するには、以下のように設定します。
                                         |
        +--------------------+           |           +----------------------+
        |   [dlp.srv.world]  |10.0.0.30  |  10.0.0.31|    [www.srv.world]   |
        |     Ceph Client    +-----------+-----------+        RADOSGW       |
        |                    |           |           |                      |
        +--------------------+           |           +----------------------+
            +----------------------------+----------------------------+
            |                            |                            |
            |10.0.0.51                   |10.0.0.52                   |10.0.0.53 
+-----------+-----------+    +-----------+-----------+    +-----------+-----------+
|   [node01.srv.world]  |    |   [node02.srv.world]  |    |   [node03.srv.world]  |
|     Object Storage    +----+     Object Storage    +----+     Object Storage    |
|     Monitor Daemon    |    |                       |    |                       |
|     Manager Daemon    |    |                       |    |                       |
+-----------------------+    +-----------------------+    +-----------------------+

[1] 例として、管理ノードから [node04] ノードに、新たに Monitor Daemin を追加します。
# 公開鍵転送

root@node01:~#
ssh-copy-id node04

# 必要なパッケージをインストール

root@node01:~#
ssh node04 "apt update; apt -y install ceph"
# モニターマップ設定

root@node01:~#
FSID=$(grep "^fsid" /etc/ceph/ceph.conf | awk {'print $NF'})

root@node01:~#
NODENAME="node04"

root@node01:~#
NODEIP="10.0.0.54"

root@node01:~#
monmaptool --add $NODENAME $NODEIP --fsid $FSID /etc/ceph/monmap

monmaptool: monmap file /etc/ceph/monmap
monmaptool: set fsid to 92749530-d9af-4226-bfe0-ccc79a689a66
monmaptool: writing epoch 0 to /etc/ceph/monmap (2 monitors)

# Monitor Daemin 設定

root@node01:~#
scp /etc/ceph/ceph.conf node04:/etc/ceph/ceph.conf

root@node01:~#
scp /etc/ceph/ceph.mon.keyring node04:/etc/ceph

root@node01:~#
scp /etc/ceph/monmap node04:/etc/ceph

root@node01:~#
ssh node04 "ceph-mon --cluster ceph --mkfs -i node04 --monmap /etc/ceph/monmap --keyring /etc/ceph/ceph.mon.keyring"

root@node01:~#
ssh node04 "chown -R ceph. /etc/ceph /var/lib/ceph/mon"

root@node01:~#
ssh node04 "ceph auth get mon. -o /etc/ceph/ceph.mon.keyring"

root@node01:~#
ssh node04 "systemctl enable --now ceph-mon@node04"

root@node01:~#
ssh node04 "ceph mon enable-msgr2"
root@node01:~#
ceph -s

  cluster:
    id:     92749530-d9af-4226-bfe0-ccc79a689a66
    health: HEALTH_OK

  services:
    mon: 2 daemons, quorum node01,node04 (age 7s)
    mgr: node01(active, since 42m)
    mds: cephfs:1 {0=node01=up:active}
    osd: 3 osds: 3 up (since 9m), 3 in (since 10m)
    rgw: 1 daemon active (www)

  task status:
    scrub status:
        mds.node01: idle

  data:
    pools:   8 pools, 248 pgs
    objects: 237 objects, 6.7 MiB
    usage:   3.0 GiB used, 237 GiB / 240 GiB avail
    pgs:     248 active+clean
[2] 既存のクラスターから Monitor ノードを削除する場合は以下のように実行します。
例として、管理ノードから [node04] ノードを削除します。
root@node01:~#
ceph -s

  cluster:
    id:     92749530-d9af-4226-bfe0-ccc79a689a66
    health: HEALTH_OK

  services:
    mon: 2 daemons, quorum node01,node04 (age 7s)
    mgr: node01(active, since 42m)
    mds: cephfs:1 {0=node01=up:active}
    osd: 3 osds: 3 up (since 9m), 3 in (since 10m)
    rgw: 1 daemon active (www)

  task status:
    scrub status:
        mds.node01: idle

  data:
    pools:   8 pools, 248 pgs
    objects: 237 objects, 6.7 MiB
    usage:   3.0 GiB used, 237 GiB / 240 GiB avail
    pgs:     248 active+clean

# [node04] の Monitor Daemon を分離する

root@node01:~#
ceph mon remove node04

removing mon.node04 at [v2:10.0.0.54:3300/0,v1:10.0.0.54:6789/0], there will be 1 monitors
# 対象ノードの Monitor Daemon を無効化

root@node01:~#
ssh node04 "systemctl disable --now ceph-mon@node04.service"

root@node01:~#
ceph -s

  cluster:
    id:     92749530-d9af-4226-bfe0-ccc79a689a66
    health: HEALTH_OK

  services:
    mon: 1 daemons, quorum node01 (age 25s)
    mgr: node01(active, since 43m)
    mds: cephfs:1 {0=node01=up:active}
    osd: 3 osds: 3 up (since 10m), 3 in (since 11m)
    rgw: 1 daemon active (www)

  task status:
    scrub status:
        mds.node01: idle

  data:
    pools:   8 pools, 248 pgs
    objects: 237 objects, 6.7 MiB
    usage:   3.0 GiB used, 237 GiB / 240 GiB avail
    pgs:     248 active+clean
関連コンテンツ