Debian 13 trixie
Sponsored Link

Configure RAID 12025/08/12

 

Configure RAID 1 to add 2 new Disks on a computer.

[1] This example is based on the environment like follows.
It shows to install new Disks [sdb] and [sdc] on this computer and configure RAID 1.
root@dlp:~#
df -h

Filesystem                   Size  Used Avail Use% Mounted on
udev                         7.8G     0  7.8G   0% /dev
tmpfs                        1.6G  592K  1.6G   1% /run
/dev/mapper/debian--vg-root   74G  913M   69G   2% /
tmpfs                        7.9G     0  7.9G   0% /dev/shm
tmpfs                        5.0M     0  5.0M   0% /run/lock
tmpfs                        1.0M     0  1.0M   0% /run/credentials/systemd-journald.service
tmpfs                        7.9G     0  7.9G   0% /tmp
/dev/vda1                    943M   74M  804M   9% /boot
tmpfs                        1.0M     0  1.0M   0% /run/credentials/getty@tty1.service
tmpfs                        1.0M     0  1.0M   0% /run/credentials/getty@ttyS0.service
tmpfs                        1.6G  4.0K  1.6G   1% /run/user/0
[2] Create a partition on new Disks and set RAID flag.
root@dlp:~#
apt -y install parted
root@dlp:~#
parted --script /dev/sdb "mklabel gpt"

root@dlp:~#
parted --script /dev/sdc "mklabel gpt"

root@dlp:~#
parted --script /dev/sdb "mkpart primary 0% 100%"

root@dlp:~#
parted --script /dev/sdc "mkpart primary 0% 100%"

root@dlp:~#
parted --script /dev/sdb "set 1 raid on"

root@dlp:~#
parted --script /dev/sdc "set 1 raid on"
[3] Configure RAID 1.
# install required tools

root@dlp:~#
apt -y install mdadm
root@dlp:~#
mdadm --create /dev/md0 --level=raid1 --raid-devices=2 /dev/sdb1 /dev/sdc1

To optimalize recovery speed, it is recommended to enable write-indent bitmap, do you want to enable it now? [y/N]? y
mdadm: Note: this array has metadata at the start and
    may not be suitable as a boot device.  If you plan to
    store '/boot' on this device please ensure that
    your boot-loader understands md/v1.x metadata, or use
    --metadata=0.90
Continue creating array [y/N]? y
mdadm: Defaulting to version 1.2 metadata
mdadm: array /dev/md0 started.

# show status

root@dlp:~#
cat /proc/mdstat

Personalities : [raid1]
md0 : active raid1 sdc1[1] sdb1[0]
      83817472 blocks super 1.2 [2/2] [UU]
      [=>...................]  resync =  7.4% (6222912/83817472) finish=6.4min speed=200739K/sec
      bitmap: 1/1 pages [4KB], 65536KB chunk

unused devices: <none>

# status turns like follows if syncing finished
# that's OK to configure RAID 1

root@dlp:~#
cat /proc/mdstat

Personalities : [raid1]
md0 : active raid1 sdc1[1] sdb1[0]
      83817472 blocks super 1.2 [2/2] [UU]
      bitmap: 1/1 pages [4KB], 65536KB chunk

unused devices: <none>
[4] If a member Disk in RAID array would be failure, re-configure RAID 1 like follows after swapping new Disk.
# the status is like follows in failure

root@dlp:~#
cat /proc/mdstat

Personalities : [raid1]
md0 : active raid1 sdc1[1] sdb1[0]
      83817472 blocks super 1.2 [2/1] [U_]
      bitmap: 1/1 pages [4KB], 65536KB chunk

unused devices: <none>

# after swapping new disk, re-configure like follows

root@dlp:~#
mdadm --manage /dev/md0 --add /dev/sdc1

mdadm: added /dev/sdc1
root@dlp:~#
cat /proc/mdstat

Personalities : [raid1]
md0 : active raid1 sdc1[1] sdb1[0]
      83817472 blocks super 1.2 [2/2] [UU]
      [====>................]  resync = 24.9% (20902592/83817472) finish=7.4min speed=140632K/sec
      bitmap: 1/1 pages [4KB], 65536KB chunk

unused devices: <none>
[5] To use RAID partition, format with a filesystem and mount it on a directory as usual.
# for example, format with ext4 and mount on /mnt

root@dlp:~#
mkfs.ext4 /dev/md0

root@dlp:~#
mount /dev/md0 /mnt

root@dlp:~#
df -hT

Filesystem                  Type      Size  Used Avail Use% Mounted on
udev                        devtmpfs  7.8G     0  7.8G   0% /dev
tmpfs                       tmpfs     1.6G  616K  1.6G   1% /run
/dev/mapper/debian--vg-root ext4       74G  924M   69G   2% /
tmpfs                       tmpfs     7.9G     0  7.9G   0% /dev/shm
tmpfs                       tmpfs     5.0M     0  5.0M   0% /run/lock
tmpfs                       tmpfs     1.0M     0  1.0M   0% /run/credentials/systemd-journald.service
tmpfs                       tmpfs     7.9G     0  7.9G   0% /tmp
/dev/vda1                   ext4      943M   75M  804M   9% /boot
tmpfs                       tmpfs     1.0M     0  1.0M   0% /run/credentials/getty@tty1.service
tmpfs                       tmpfs     1.0M     0  1.0M   0% /run/credentials/getty@ttyS0.service
tmpfs                       tmpfs     1.6G  4.0K  1.6G   1% /run/user/0
/dev/md0                    ext4       79G  2.1M   75G   1% /mnt

# to set in fstab
# because the name of md*** sometimes changes when hardwares are changed, set it with UUID

root@dlp:~#
blkid | grep md

/dev/md0: UUID="e1636b3e-b0be-44de-9d06-90dd9dd7e9b8" BLOCK_SIZE="4096" TYPE="ext4"

root@dlp:~#
vi /etc/fstab
# set with UUID

/dev/mapper/debian--vg-root /               ext4    errors=remount-ro 0       1
# /boot was on /dev/vda1 during installation
UUID=b1b9adb0-d4b8-4844-bd8c-fe6d55b0e87c /boot           ext4    defaults        0       2
/dev/mapper/debian--vg-swap_1 none            swap    sw              0       0
/dev/sr0        /media/cdrom0   udf,iso9660 user,noauto     0       0
/dev/disk/by-uuid/e1636b3e-b0be-44de-9d06-90dd9dd7e9b8 /mnt  ext4 defaults 0 0

# that's OK, even if the name of md*** are changed, it is mounted normally

root@dlp:~#
df -hT /mnt

Filesystem     Type  Size  Used Avail Use% Mounted on
/dev/md127     ext4   79G  2.1M   75G   1% /mnt
Matched Content