CentOS 8
Sponsored Link

Ceph Nautilus : クラスターの設定 #22020/06/30

 
分散ファイルシステム Ceph をインストールして、ストレージクラスターを構成します。
当例では 三台 のノードでクラスターを構成します。
三台 のノードにはそれぞれ空きブロックデバイスがあることが前提です。
(当例では [/dev/sdb] を使用)
                                         |
            +----------------------------+----------------------------+
            |                            |                            |
            |10.0.0.51                   |10.0.0.52                   |10.0.0.53 
+-----------+-----------+    +-----------+-----------+    +-----------+-----------+
|   [node01.srv.world]  |    |   [node02.srv.world]  |    |   [node03.srv.world]  |
|     Object Storage    +----+     Object Storage    +----+     Object Storage    |
|     Monitor Daemon    |    |                       |    |                       |
|     Manager Daemon    |    |                       |    |                       |
+-----------------------+    +-----------------------+    +-----------------------+

[1]
[2] 管理ノードから各ノードへ OSD (Object Storage Device) の設定をします。
設定するブロックデバイス (当例では [/dev/sdb]) はフォーマットするため、保存が必要な既存データがある場合は事前にバックアップが必要です。
# Firewalld 稼働中の場合は事前に必要なサービスポートを許可

[root@node01 ~]# for NODE in node01 node02 node03
do
    ssh $NODE "firewall-cmd --add-service=ceph --permanent; firewall-cmd --reload"
done 

# 各ノードに OSD の設定を実行

[root@node01 ~]# for NODE in node01 node02 node03
do
    if [ ! ${NODE} = "node01" ]
    then
        scp /etc/ceph/ceph.conf ${NODE}:/etc/ceph/ceph.conf
        scp /etc/ceph/ceph.client.admin.keyring ${NODE}:/etc/ceph
        scp /var/lib/ceph/bootstrap-osd/ceph.keyring ${NODE}:/var/lib/ceph/bootstrap-osd
    fi
    ssh $NODE \
    "chown ceph. /etc/ceph/ceph.* /var/lib/ceph/bootstrap-osd/*; \
    parted --script /dev/sdb 'mklabel gpt'; \
    parted --script /dev/sdb "mkpart primary 0% 100%"; \
    ceph-volume lvm create --data /dev/sdb1"
done 

Running command: /usr/bin/ceph-authtool --gen-print-key
Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 64c31cc1-ca84-471d-adbe-6159ae91b198
Running command: /usr/sbin/vgcreate --force --yes ceph-4ea06a80-9f2a-4e14-a938-626487600868 /dev/sdb1
 stdout: Physical volume "/dev/sdb1" successfully created.
 stdout: Volume group "ceph-4ea06a80-9f2a-4e14-a938-626487600868" successfully created
Running command: /usr/sbin/lvcreate --yes -l 100%FREE -n osd-block-64c31cc1-ca84-471d-adbe-6159ae91b198 ceph-4ea06a80-9f2a-4e14-a938-626487600868
 stdout: Logical volume "osd-block-64c31cc1-ca84-471d-adbe-6159ae91b198" created.
Running command: /usr/bin/ceph-authtool --gen-print-key
Running command: /usr/bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-0
Running command: /usr/sbin/restorecon /var/lib/ceph/osd/ceph-0
Running command: /usr/bin/chown -h ceph:ceph /dev/ceph-4ea06a80-9f2a-4e14-a938-626487600868/osd-block-64c31cc1-ca84-471d-adbe-6159ae91b198
Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2
Running command: /usr/bin/ln -s /dev/ceph-4ea06a80-9f2a-4e14-a938-626487600868/osd-block-64c31cc1-ca84-471d-adbe-6159ae91b198 /var/lib/ceph/osd/ceph-0/block
Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-0/activate.monmap
 stderr: got monmap epoch 2
Running command: /usr/bin/ceph-authtool /var/lib/ceph/osd/ceph-0/keyring --create-keyring --name osd.0 --add-key AQAXrfVeRoZbKhAA/I+oM7FShkuin9pVaFd5Pg==
 stdout: creating /var/lib/ceph/osd/ceph-0/keyring
added entity osd.0 auth(key=AQAXrfVeRoZbKhAA/I+oM7FShkuin9pVaFd5Pg==)
Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0/keyring
Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0/
Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 0 --monmap /var/lib/ceph/osd/ceph-0/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-0/ --osd-uuid 64c31cc1-ca84-471d-adbe-6159ae91b198 --setuser ceph --setgroup ceph
--> ceph-volume lvm prepare successful for: /dev/sdb1
Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0
Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-4ea06a80-9f2a-4e14-a938-626487600868/osd-block-64c31cc1-ca84-471d-adbe-6159ae91b198 --path /var/lib/ceph/osd/ceph-0 --no-mon-config
Running command: /usr/bin/ln -snf /dev/ceph-4ea06a80-9f2a-4e14-a938-626487600868/osd-block-64c31cc1-ca84-471d-adbe-6159ae91b198 /var/lib/ceph/osd/ceph-0/block
Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-0/block
Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2
Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0
Running command: /usr/bin/systemctl enable ceph-volume@lvm-0-64c31cc1-ca84-471d-adbe-6159ae91b198
 stderr: Created symlink /etc/systemd/system/multi-user.target.wants/ceph-volume@lvm-0-64c31cc1-ca84-471d-adbe-6159ae91b198.service → /usr/lib/systemd/system/ceph-volume@.service.
Running command: /usr/bin/systemctl enable --runtime ceph-osd@0
Running command: /usr/bin/systemctl start ceph-osd@0
--> ceph-volume lvm activate successful for osd ID: 0
--> ceph-volume lvm create successful for: /dev/sdb1
.....
.....

# ステータス確認

# [HEALTH_OK] であれば OK

[root@node01 ~]#
ceph -s

  cluster:
    id:     018c84db-7c76-46bf-8c85-a7520748233b
    health: HEALTH_OK

  services:
    mon: 1 daemons, quorum node01 (age 45m)
    mgr: node01(active, since 45m)
    osd: 3 osds: 3 up (since 29s), 3 in (since 29s)

  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0 B
    usage:   3.0 GiB used, 237 GiB / 240 GiB avail
    pgs:

# OSD ツリー確認

[root@node01 ~]#
ceph osd tree

ID CLASS WEIGHT  TYPE NAME       STATUS REWEIGHT PRI-AFF
-1       0.23428 root default
-3       0.07809     host node01
 0   hdd 0.07809         osd.0       up  1.00000 1.00000
-5       0.07809     host node02
 1   hdd 0.07809         osd.1       up  1.00000 1.00000
-7       0.07809     host node03
 2   hdd 0.07809         osd.2       up  1.00000 1.00000

[root@node01 ~]# ceph df 
RAW STORAGE:
    CLASS     SIZE        AVAIL       USED       RAW USED     %RAW USED
    hdd       240 GiB     237 GiB     17 MiB      3.0 GiB          1.26
    TOTAL     240 GiB     237 GiB     17 MiB      3.0 GiB          1.26

POOLS:
    POOL     ID     STORED     OBJECTS     USED     %USED     MAX AVAIL
関連コンテンツ