Ceph Tentacle : クラスターの設定 #22025/09/25 |
|
分散ファイルシステム Ceph をインストールして、ストレージクラスターを構成します。
当例では 3 台 のノードでクラスターを構成します。
|
+----------------------------+----------------------------+
| | |
|10.0.0.51 |10.0.0.52 |10.0.0.53
+-----------+-----------+ +-----------+-----------+ +-----------+-----------+
| [node01.srv.world] | | [node02.srv.world] | | [node03.srv.world] |
| Object Storage +----+ Object Storage +----+ Object Storage |
| Monitor Daemon | | | | |
| Manager Daemon | | | | |
+-----------------------+ +-----------------------+ +-----------------------+
|
| [1] | |
| [2] | 管理ノードから各ノードへ OSD (Object Storage Device) の設定をします。 設定するブロックデバイス (当例では [/dev/sdb]) はフォーマットするため、保存が必要な既存データがある場合は事前にバックアップが必要です。 |
|
# Firewalld 稼働中の場合は事前に必要なサービスポートを許可
[root@node01 ~]# for NODE in node01 node02 node03
do
ssh $NODE "firewall-cmd --add-service=ceph; firewall-cmd --runtime-to-permanent"
done
# 各ノードに OSD の設定を実行
[root@node01 ~]# for NODE in node01 node02 node03
do
if [ ! ${NODE} = "node01" ]
then
scp /etc/ceph/ceph.conf ${NODE}:/etc/ceph/ceph.conf
scp /etc/ceph/ceph.client.admin.keyring ${NODE}:/etc/ceph
scp /var/lib/ceph/bootstrap-osd/ceph.keyring ${NODE}:/var/lib/ceph/bootstrap-osd
fi
ssh $NODE \
"chown -R ceph:ceph /etc/ceph/ceph.* /var/lib/ceph; \
parted --script /dev/sdb 'mklabel gpt'; \
parted --script /dev/sdb "mkpart primary 0% 100%"; \
ceph-volume lvm create --data /dev/sdb1"
done
Running command: /usr/bin/ceph-authtool --gen-print-key Running command: /usr/bin/ceph-authtool --gen-print-key Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 2b322ebd-a987-404a-9761-c95cb0da4717 Running command: vgcreate --force --yes ceph-d3f3e94f-c30a-42d6-9661-60529d5cc8e1 /dev/vdb1 stdout: Physical volume "/dev/vdb1" successfully created. stdout: Volume group "ceph-d3f3e94f-c30a-42d6-9661-60529d5cc8e1" successfully created Running command: lvcreate --yes -l 40959 -n osd-block-2b322ebd-a987-404a-9761-c95cb0da4717 ceph-d3f3e94f-c30a-42d6-9661-60529d5cc8e1 stdout: Logical volume "osd-block-2b322ebd-a987-404a-9761-c95cb0da4717" created. Running command: /usr/bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-0 Running command: /usr/sbin/restorecon /var/lib/ceph/osd/ceph-0 Running command: /usr/bin/chown -h ceph:ceph /dev/ceph-d3f3e94f-c30a-42d6-9661-60529d5cc8e1/osd-block-2b322ebd-a987-404a-9761-c95cb0da4717 Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2 Running command: /usr/bin/ln -s /dev/ceph-d3f3e94f-c30a-42d6-9661-60529d5cc8e1/osd-block-2b322ebd-a987-404a-9761-c95cb0da4717 /var/lib/ceph/osd/ceph-0/block Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-0/activate.monmap stderr: got monmap epoch 2 --> Creating keyring file for osd.0 Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0/keyring Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0/ Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 0 --monmap /var/lib/ceph/osd/ceph-0/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-0/ --osd-uuid 2b322ebd-a987-404a-9761-c95cb0da4717 --setuser ceph --setgroup ceph stderr: 2025-09-25T10:04:33.732+0900 7ffb72cba900 -1 bluestore(/var/lib/ceph/osd/ceph-0//block) No valid bdev label found stderr: 2025-09-25T10:04:33.744+0900 7ffb72cba900 -1 bluestore(/var/lib/ceph/osd/ceph-0/) _read_fsid unparsable uuid --> ceph-volume lvm prepare successful for: /dev/vdb1 Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0 Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-d3f3e94f-c30a-42d6-9661-60529d5cc8e1/osd-block-2b322ebd-a987-404a-9761-c95cb0da4717 --path /var/lib/ceph/osd/ceph-0 --no-mon-config Running command: /usr/bin/ln -snf /dev/ceph-d3f3e94f-c30a-42d6-9661-60529d5cc8e1/osd-block-2b322ebd-a987-404a-9761-c95cb0da4717 /var/lib/ceph/osd/ceph-0/block Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-0/block Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2 Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0 Running command: /usr/bin/systemctl enable ceph-volume@lvm-0-2b322ebd-a987-404a-9761-c95cb0da4717 stderr: Created symlink /etc/systemd/system/multi-user.target.wants/ceph-volume@lvm-0-2b322ebd-a987-404a-9761-c95cb0da4717.service → /usr/lib/systemd/system/ceph-volume@.service. Running command: /usr/bin/systemctl enable --runtime ceph-osd@0 stderr: Created symlink /run/systemd/system/ceph-osd.target.wants/ceph-osd@0.service → /usr/lib/systemd/system/ceph-osd@.service. Running command: /usr/bin/systemctl start ceph-osd@0 --> ceph-volume lvm activate successful for osd ID: 0 --> ceph-volume lvm create successful for: /dev/vdb1 ..... ..... # ステータス確認 # [HEALTH_OK] であれば OK [root@node01 ~]# ceph -s
cluster:
id: 4b46d240-7b97-412e-b5b6-8c58e8d25835
health: HEALTH_OK
services:
mon: 1 daemons, quorum node01 (age 7m) [leader: node01]
mgr: node01(active, since 5m)
osd: 3 osds: 3 up (since 99s), 3 in (since 107s)
data:
pools: 1 pools, 1 pgs
objects: 2 objects, 449 KiB
usage: 81 MiB used, 480 GiB / 480 GiB avail
pgs: 1 active+clean
# OSD ツリー確認 [root@node01 ~]# ceph osd tree ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF -1 0.46857 root default -3 0.15619 host node01 0 hdd 0.15619 osd.0 up 1.00000 1.00000 -5 0.15619 host node02 1 hdd 0.15619 osd.1 up 1.00000 1.00000 -7 0.15619 host node03 2 hdd 0.15619 osd.2 up 1.00000 1.00000 [root@node01 ~]# ceph df --- RAW STORAGE --- CLASS SIZE AVAIL USED RAW USED %RAW USED hdd 480 GiB 480 GiB 81 MiB 81 MiB 0.02 TOTAL 480 GiB 480 GiB 81 MiB 81 MiB 0.02 --- POOLS --- POOL ID PGS STORED OBJECTS USED %USED MAX AVAIL .mgr 1 1 449 KiB 2 1.3 MiB 0 152 GiB [root@node01 ~]# ceph osd df ID CLASS WEIGHT REWEIGHT SIZE RAW USE DATA OMAP META AVAIL %USE VAR PGS STATUS 0 hdd 0.15619 1.00000 160 GiB 27 MiB 564 KiB 7 KiB 26 MiB 160 GiB 0.02 1.00 1 up 1 hdd 0.15619 1.00000 160 GiB 27 MiB 564 KiB 7 KiB 26 MiB 160 GiB 0.02 1.00 1 up 2 hdd 0.15619 1.00000 160 GiB 27 MiB 564 KiB 6 KiB 26 MiB 160 GiB 0.02 1.00 1 up TOTAL 480 GiB 81 MiB 1.7 MiB 21 KiB 79 MiB 480 GiB 0.02 MIN/MAX VAR: 1.00/1.00 STDDEV: 0 |
| Sponsored Link |
|
|