CentOS Stream 9
Sponsored Link

Configure RAID 12022/06/30

 
Configure RAID 1 to add 2 new Disks on a computer.
[1] This example is based on the environment like follows.
It shows to install new Disks [sdb] and [sdc] on this computer and configure RAID 1.
[root@dlp ~]#
df -h

Filesystem           Size  Used Avail Use% Mounted on
devtmpfs             7.7G     0  7.7G   0% /dev
tmpfs                7.7G     0  7.7G   0% /dev/shm
tmpfs                3.1G  8.6M  3.1G   1% /run
/dev/mapper/cs-root   71G  2.2G   69G   4% /
/dev/vda1           1014M  370M  645M  37% /boot
tmpfs                1.6G     0  1.6G   0% /run/user/0
[2] Create a partition on new Disks and set RAID flag.
[root@dlp ~]#
parted --script /dev/sdb "mklabel gpt"

[root@dlp ~]#
parted --script /dev/sdc "mklabel gpt"

[root@dlp ~]#
parted --script /dev/sdb "mkpart primary 0% 100%"

[root@dlp ~]#
parted --script /dev/sdc "mkpart primary 0% 100%"

[root@dlp ~]#
parted --script /dev/sdb "set 1 raid on"

[root@dlp ~]#
parted --script /dev/sdc "set 1 raid on"
[3] Configure RAID 1.
# install required tools

[root@dlp ~]#
dnf -y install mdadm
[root@dlp ~]#
mdadm --create /dev/md0 --level=raid1 --raid-devices=2 /dev/sdb1 /dev/sdc1

mdadm: Note: this array has metadata at the start and
    may not be suitable as a boot device.  If you plan to
    store '/boot' on this device please ensure that
    your boot-loader understands md/v1.x metadata, or use
    --metadata=0.90
Continue creating array? y
mdadm: Defaulting to version 1.2 metadata
mdadm: array /dev/md0 started.

# show status

[root@dlp ~]#
cat /proc/mdstat

Personalities : [raid1]
md0 : active raid1 vdc1[1] vdb1[0]
      83817472 blocks super 1.2 [2/2] [UU]
      [=>...................]  resync =  7.4% (6223872/83817472) finish=8.9min speed=144187K/sec

unused devices: <none>

# status turns like follows if syncing finished
# that's OK to configure RAID 1

[root@dlp ~]#
cat /proc/mdstat

Personalities : [raid1]
md0 : active raid1 vdc1[1] vdb1[0]
      83817472 blocks super 1.2 [2/2] [UU]

unused devices: <none>
[4] To use RAID device, Ceate any filesystem and mount it as usual.
# for example, format with xfs and mount it on /mnt

[root@dlp ~]#
mkfs.xfs /dev/md0

meta-data=/dev/md0               isize=512    agcount=4, agsize=5238592 blks
         =                       sectsz=512   attr=2, projid32bit=1
         =                       crc=1        finobt=1, sparse=1, rmapbt=0
         =                       reflink=1    bigtime=1 inobtcount=1
data     =                       bsize=4096   blocks=20954368, imaxpct=25
         =                       sunit=0      swidth=0 blks
naming   =version 2              bsize=4096   ascii-ci=0, ftype=1
log      =internal log           bsize=4096   blocks=10231, version=2
         =                       sectsz=512   sunit=0 blks, lazy-count=1
realtime =none                   extsz=4096   blocks=0, rtextents=0
Discarding blocks...Done.

[root@dlp ~]#
mount /dev/md0 /mnt

[root@dlp ~]#
df -hT

Filesystem          Type      Size  Used Avail Use% Mounted on
devtmpfs            devtmpfs  4.0M     0  4.0M   0% /dev
tmpfs               tmpfs     3.9G     0  3.9G   0% /dev/shm
tmpfs               tmpfs     1.6G  8.6M  1.6G   1% /run
/dev/mapper/cs-root xfs        71G  2.2G   69G   3% /
/dev/vda1           xfs      1014M  240M  775M  24% /boot
tmpfs               tmpfs     794M     0  794M   0% /run/user/0
/dev/md0            xfs        80G  603M   80G   1% /mnt

# to set in fstab
# because the name of md*** sometimes changes when hardwares are changed, set it with UUID

[root@dlp ~]#
blkid | grep md

/dev/md0: UUID="34957f99-1df9-4119-b080-8aaa3a2a80ff" TYPE="xfs"

[root@dlp ~]#
vi /etc/fstab
# set with UUID

/dev/mapper/cs-root     /                       xfs     defaults        0 0
UUID=a182e1b1-d17d-4bb8-89cd-77c87288674e /boot xfs     defaults        0 0
/dev/mapper/cs-swap     none                    swap    defaults        0 0
UUID=34957f99-1df9-4119-b080-8aaa3a2a80ff /mnt  xfs     defaults        0 0

# that's OK, even if the name of md*** are changed, it is mounted normally

[root@dlp ~]#
df -hT /mnt

Filesystem     Type  Size  Used Avail Use% Mounted on
/dev/md127     xfs    80G  603M   80G   1% /mnt
[5] If a member Disk in RAID array would be failure, re-configure RAID 1 like follows after swapping new Disk.
# the status is like follows in failure

[root@dlp ~]#
cat /proc/mdstat

Personalities : [raid1]
md0 : active (auto-read-only) raid1 sdb1[0]
      83817472 blocks super 1.2 [2/1] [U_]

unused devices: <none>

# after swapping new disk, re-configure like follows

[root@dlp ~]#
mdadm --manage /dev/md0 --add /dev/sdc1

mdadm: added /dev/sdc1
[root@dlp ~]#
cat /proc/mdstat

Personalities : [raid1]
md0 : active raid1 sdc1[2] sdb1[0]
      83817472 blocks super 1.2 [2/1] [U_]
      [>....................]  recovery =  2.4% (2028736/83817472) finish=8.7min speed=156056K/sec

unused devices: <none>
Matched Content