Debian 11 Bullseye
Sponsored Link

Ceph Nautilus : クラスターの設定 #22021/08/26

 
分散ファイルシステム Ceph をインストールして、ストレージクラスターを構成します。
当例では 3台 のノードでクラスターを構成します。
3台 のノードにはそれぞれ空きブロックデバイスがあることが前提です。
(当例では [/dev/sdb] を使用)
                                         |
            +----------------------------+----------------------------+
            |                            |                            |
            |10.0.0.51                   |10.0.0.52                   |10.0.0.53 
+-----------+-----------+    +-----------+-----------+    +-----------+-----------+
|   [node01.srv.world]  |    |   [node02.srv.world]  |    |   [node03.srv.world]  |
|     Object Storage    +----+     Object Storage    +----+     Object Storage    |
|     Monitor Daemon    |    |                       |    |                       |
|     Manager Daemon    |    |                       |    |                       |
+-----------------------+    +-----------------------+    +-----------------------+

[1]
[2] 管理ノードから各ノードへ OSD (Object Storage Device) の設定をします。
設定するブロックデバイス (当例では [/dev/sdb]) はフォーマットするため、保存が必要な既存データがある場合は事前にバックアップが必要です。
# 各ノードに OSD の設定を実行

root@node01:~# for NODE in node01 node02 node03
do
    if [ ! ${NODE} = "node01" ]
    then
        scp /etc/ceph/ceph.conf ${NODE}:/etc/ceph/ceph.conf
        scp /etc/ceph/ceph.client.admin.keyring ${NODE}:/etc/ceph
        scp /var/lib/ceph/bootstrap-osd/ceph.keyring ${NODE}:/var/lib/ceph/bootstrap-osd
    fi
    ssh $NODE \
    "chown ceph. /etc/ceph/ceph.* /var/lib/ceph/bootstrap-osd/*; \
    parted --script /dev/sdb 'mklabel gpt'; \
    parted --script /dev/sdb "mkpart primary 0% 100%"; \
    ceph-volume lvm create --data /dev/sdb1"
done 

Running command: /usr/bin/ceph-authtool --gen-print-key
Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 106b9a2a-61d5-402f-8bda-de56dd735361
Running command: /usr/sbin/vgcreate --force --yes ceph-73fb83b6-392c-4aff-87d7-02797a2ecbd0 /dev/sdb1
 stdout: Physical volume "/dev/sdb1" successfully created.
 stdout: Volume group "ceph-73fb83b6-392c-4aff-87d7-02797a2ecbd0" successfully created
Running command: /usr/sbin/lvcreate --yes -l 20479 -n osd-block-106b9a2a-61d5-402f-8bda-de56dd735361 ceph-73fb83b6-392c-4aff-87d7-02797a2ecbd0
 stdout: Logical volume "osd-block-106b9a2a-61d5-402f-8bda-de56dd735361" created.
Running command: /usr/bin/ceph-authtool --gen-print-key
Running command: /usr/bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-0
--> Executable selinuxenabled not in PATH: /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
Running command: /usr/bin/chown -h ceph:ceph /dev/ceph-73fb83b6-392c-4aff-87d7-02797a2ecbd0/osd-block-106b9a2a-61d5-402f-8bda-de56dd735361
Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2
Running command: /usr/bin/ln -s /dev/ceph-73fb83b6-392c-4aff-87d7-02797a2ecbd0/osd-block-106b9a2a-61d5-402f-8bda-de56dd735361 /var/lib/ceph/osd/ceph-0/block
Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-0/activate.monmap
 stderr: got monmap epoch 2
Running command: /usr/bin/ceph-authtool /var/lib/ceph/osd/ceph-0/keyring --create-keyring --name osd.0 --add-key AQC40yZhufXxEBAAhLZdL1CGFmuTLAxi9+12pQ==
 stdout: creating /var/lib/ceph/osd/ceph-0/keyring
 stdout: added entity osd.0 auth(key=AQC40yZhufXxEBAAhLZdL1CGFmuTLAxi9+12pQ==)
Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0/keyring
Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0/
Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 0 --monmap /var/lib/ceph/osd/ceph-0/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-0/ --osd-uuid 106b9a2a-61d5-402f-8bda-de56dd735361 --setuser ceph --setgroup ceph
 stderr: 2021-08-26 08:35:21.932 7f2307cefc00 -1 bluestore(/var/lib/ceph/osd/ceph-0/) _read_fsid unparsable uuid
--> ceph-volume lvm prepare successful for: /dev/sdb1
Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0
Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-73fb83b6-392c-4aff-87d7-02797a2ecbd0/osd-block-106b9a2a-61d5-402f-8bda-de56dd735361 --path /var/lib/ceph/osd/ceph-0 --no-mon-config
Running command: /usr/bin/ln -snf /dev/ceph-73fb83b6-392c-4aff-87d7-02797a2ecbd0/osd-block-106b9a2a-61d5-402f-8bda-de56dd735361 /var/lib/ceph/osd/ceph-0/block
Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-0/block
Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2
Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0
Running command: /usr/bin/systemctl enable ceph-volume@lvm-0-106b9a2a-61d5-402f-8bda-de56dd735361
 stderr: Created symlink /etc/systemd/system/multi-user.target.wants/ceph-volume@lvm-0-106b9a2a-61d5-402f-8bda-de56dd735361.service → /lib/systemd/system/ceph-volume@.service.
Running command: /usr/bin/systemctl enable --runtime ceph-osd@0
 stderr: Created symlink /run/systemd/system/ceph-osd.target.wants/ceph-osd@0.service → /lib/systemd/system/ceph-osd@.service.
Running command: /usr/bin/systemctl start ceph-osd@0
--> ceph-volume lvm activate successful for osd ID: 0
--> ceph-volume lvm create successful for: /dev/sdb1
ceph.conf                                     100%  274   134.8KB/s   00:00
ceph.client.admin.keyring                     100%  151    74.3KB/s   00:00
ceph.keyring                                  100%  129    63.1KB/s   00:00
.....
.....

# ステータス確認

# [HEALTH_OK] であれば OK

root@node01:~#
ceph -s

  cluster:
    id:     92749530-d9af-4226-bfe0-ccc79a689a66
    health: HEALTH_OK

  services:
    mon: 1 daemons, quorum node01 (age 5m)
    mgr: node01(active, since 5m)
    osd: 3 osds: 3 up (since 83s), 3 in (since 83s)

  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0 B
    usage:   3.0 GiB used, 237 GiB / 240 GiB avail
    pgs:

# OSD ツリー確認

root@node01:~#
ceph osd tree

ID CLASS WEIGHT  TYPE NAME       STATUS REWEIGHT PRI-AFF
-1       0.23428 root default
-3       0.07809     host node01
 0   hdd 0.07809         osd.0       up  1.00000 1.00000
-5       0.07809     host node02
 1   hdd 0.07809         osd.1       up  1.00000 1.00000
-7       0.07809     host node03
 2   hdd 0.07809         osd.2       up  1.00000 1.00000

root@node01:~# ceph df 
RAW STORAGE:
    CLASS     SIZE        AVAIL       USED        RAW USED     %RAW USED
    hdd       240 GiB     237 GiB     5.4 MiB      3.0 GiB          1.25
    TOTAL     240 GiB     237 GiB     5.4 MiB      3.0 GiB          1.25

POOLS:
    POOL     ID     PGS     STORED     OBJECTS     USED     %USED     MAX AVAIL

root@node01:~# ceph osd df 
ID CLASS WEIGHT  REWEIGHT SIZE    RAW USE DATA    OMAP META  AVAIL   %USE VAR  PGS STATUS
 0   hdd 0.07809  1.00000  80 GiB 1.0 GiB 1.8 MiB  0 B 1 GiB  79 GiB 1.25 1.00   0     up
 1   hdd 0.07809  1.00000  80 GiB 1.0 GiB 1.8 MiB  0 B 1 GiB  79 GiB 1.25 1.00   0     up
 2   hdd 0.07809  1.00000  80 GiB 1.0 GiB 1.8 MiB  0 B 1 GiB  79 GiB 1.25 1.00   0     up
                    TOTAL 240 GiB 3.0 GiB 5.2 MiB  0 B 3 GiB 237 GiB 1.25
MIN/MAX VAR: 1.00/1.00  STDDEV: 0
関連コンテンツ