Difference between revisions of "Mdadm"
Jump to navigation
Jump to search
(3 intermediate revisions by the same user not shown) | |||
Line 5: | Line 5: | ||
for i in $( seq 1 9 ); do |
for i in $( seq 1 9 ); do |
||
dd if=/dev/zero of=${DISK_DIR}/disk${i} bs=100M count=1 |
dd if=/dev/zero of=${DISK_DIR}/disk${i} bs=100M count=1 |
||
[ ! - |
[ ! -e /dev/loop${i} -a ! -b /dev/loop${i} ] && mknod -m 660 /dev/loop${i} b 7 ${i} |
||
losetup /dev/loop${i} ${DISK_DIR}/disk${i} |
losetup /dev/loop${i} ${DISK_DIR}/disk${i} |
||
done |
done |
||
Line 12: | Line 12: | ||
Create raid1 with one spare |
Create raid1 with one spare |
||
# mdadm --create /dev/md1 --level raid1 -n 2 -x 1 /dev/loop[3-5] |
# mdadm --create /dev/md1 --level raid1 -n 2 -x 1 /dev/loop[3-5] |
||
or add the spare later |
|||
# mdadm --create /dev/md1 --level raid1 -n 2 /dev/loop[3-4] |
|||
# mdadm --manage /dev/md1 --add /dev/loop5 |
|||
Create raid5 with one spare |
Create raid5 with one spare |
||
# mdadm --create /dev/md5 --level raid5 -n 3 -x 1 /dev/loop[6-9] |
# mdadm --create /dev/md5 --level raid5 -n 3 -x 1 /dev/loop[6-9] |
||
Line 33: | Line 36: | ||
Add disk |
Add disk |
||
# mdadm /dev/md5 --add /dev/loop6 |
# mdadm /dev/md5 --add /dev/loop6 |
||
== Grow == |
|||
Besides growing an array this command can also convert between raid levels. |
|||
Assuming we have all the 9 available we already created earlier we create a <tt>raid4</tt> array |
|||
# <span class="input">mdadm --create /dev/md0 --level raid4 --raid-devices 8 --spare-devices 1 /dev/loop[1-9]</span> |
|||
mdadm: Defaulting to version 1.2 metadata |
|||
mdadm: array /dev/md0 started. |
|||
To confirm |
|||
# <span class="input">cat /proc/mdstat</span> |
|||
Personalities : [linear] [raid6] [raid5] [raid4] [multipath] [raid0] [raid1] [raid10] |
|||
md0 : active <span class="highlight">raid4</span> loop8[9] loop9[8](S) loop7[6] loop6[5] loop5[4] loop4[3] loop3[2] loop2[1] loop1[0] |
|||
713216 blocks super 1.2 level 4, 512k chunk, algorithm 0 [8/8] [UUUUUUUU] |
|||
Then converting to raid5 |
|||
# <span class="input">mdadm --grow /dev/md0 --level raid5 --raid-devices 8</span> |
|||
mdadm: level of /dev/md0 changed to raid5 |
|||
Confirming |
|||
# <span class="input">cat /proc/mdstat</span> |
|||
Personalities : [linear] [raid6] [raid5] [raid4] [multipath] [raid0] [raid1] [raid10] |
|||
md0 : active <span class="highlight">raid5</span> loop8[9] loop9[8](S) loop7[6] loop6[5] loop5[4] loop4[3] loop3[2] loop2[1] loop1[0] |
|||
713216 blocks super 1.2 level 5, 512k chunk, algorithm 5 [8/8] [UUUUUUUU] |
|||
== Destroy == |
|||
# mdadm --stop /dev/md0 |
|||
# mdadm --stop /dev/md1 |
|||
# mdadm --stop /dev/md5 |
|||
# mdadm --remove /dev/md0 |
|||
# mdadm --remove /dev/md1 |
|||
# mdadm --remove /dev/md5 |
|||
== Config file == |
== Config file == |
Latest revision as of 22:12, 10 June 2012
Create
Create environment
DISK_DIR=/var/tmp/disks mkdir -p ${DISK_DIR} for i in $( seq 1 9 ); do dd if=/dev/zero of=${DISK_DIR}/disk${i} bs=100M count=1 [ ! -e /dev/loop${i} -a ! -b /dev/loop${i} ] && mknod -m 660 /dev/loop${i} b 7 ${i} losetup /dev/loop${i} ${DISK_DIR}/disk${i} done
Create raid0 device
# mdadm --create /dev/md0 --level raid0 -n 2 /dev/loop[1-2]
Create raid1 with one spare
# mdadm --create /dev/md1 --level raid1 -n 2 -x 1 /dev/loop[3-5]
or add the spare later
# mdadm --create /dev/md1 --level raid1 -n 2 /dev/loop[3-4] # mdadm --manage /dev/md1 --add /dev/loop5
Create raid5 with one spare
# mdadm --create /dev/md5 --level raid5 -n 3 -x 1 /dev/loop[6-9]
Check the status
# cat /proc/mdstat Personalities : [raid6] [raid5] [raid4] [raid0] [raid1] md5 : active raid5 loop8[2] loop9[3](S) loop7[1] loop6[0] 204672 blocks level 5, 64k chunk, algorithm 2 [3/3] [UUU] md0 : active raid0 loop2[1] loop1[0] 204672 blocks 64k chunks md1 : active raid1 loop5[2](S) loop4[1] loop3[0] 102336 blocks [2/2] [UU] unused devices: <none>
Manage
Fail and remove disk
# mdadm /dev/md5 --fail /dev/loop6 --remove /dev/loop6
Add disk
# mdadm /dev/md5 --add /dev/loop6
Grow
Besides growing an array this command can also convert between raid levels.
Assuming we have all the 9 available we already created earlier we create a raid4 array
# mdadm --create /dev/md0 --level raid4 --raid-devices 8 --spare-devices 1 /dev/loop[1-9]
mdadm: Defaulting to version 1.2 metadata
mdadm: array /dev/md0 started.
To confirm
# cat /proc/mdstat Personalities : [linear] [raid6] [raid5] [raid4] [multipath] [raid0] [raid1] [raid10] md0 : active raid4 loop8[9] loop9[8](S) loop7[6] loop6[5] loop5[4] loop4[3] loop3[2] loop2[1] loop1[0] 713216 blocks super 1.2 level 4, 512k chunk, algorithm 0 [8/8] [UUUUUUUU]
Then converting to raid5
# mdadm --grow /dev/md0 --level raid5 --raid-devices 8
mdadm: level of /dev/md0 changed to raid5
Confirming
# cat /proc/mdstat Personalities : [linear] [raid6] [raid5] [raid4] [multipath] [raid0] [raid1] [raid10] md0 : active raid5 loop8[9] loop9[8](S) loop7[6] loop6[5] loop5[4] loop4[3] loop3[2] loop2[1] loop1[0] 713216 blocks super 1.2 level 5, 512k chunk, algorithm 5 [8/8] [UUUUUUUU]
Destroy
# mdadm --stop /dev/md0 # mdadm --stop /dev/md1 # mdadm --stop /dev/md5 # mdadm --remove /dev/md0 # mdadm --remove /dev/md1 # mdadm --remove /dev/md5
Config file
# mdadm --detail --scan >> /etc/mdadm.conf # cat /etc/mdadm.conf ARRAY /dev/md1 metadata=0.90 spares=1 UUID=a79ea43f:46f4c907:b40a34a0:8cfeb653 ARRAY /dev/md0 metadata=0.90 UUID=c3d6c5a6:b0d1c032:b40a34a0:8cfeb653 ARRAY /dev/md5 metadata=0.90 spares=1 UUID=b6aeae37:58c17516:b40a34a0:8cfeb653
Sample config file
DEVICE /dev/sd[bcdjkl]1 DEVICE /dev/hda1 /dev/hdb1 # /dev/md0 is known by its UUID. ARRAY /dev/md0 UUID=3aaa0122:29827cfa:5331ad66:ca767371 # /dev/md1 contains all devices with a minor number of # 1 in the superblock. ARRAY /dev/md1 superminor=1 # /dev/md2 is made from precisely these two devices ARRAY /dev/md2 devices=/dev/hda1,/dev/hdb1 # /dev/md4 and /dev/md5 are a spare-group and spares # can be moved between them ARRAY /dev/md4 uuid=b23f3c6d:aec43a9f:fd65db85:369432df spare-group=group1 ARRAY /dev/md5 uuid=19464854:03f71b1b:e0df2edd:246cc977 spare-group=group1 # /dev/md/home is created if need to be a partitionable md array # any spare device number is allocated. ARRAY /dev/md/home UUID=9187a482:5dde19d9:eea3cc4a:d646ab8b auto=part MAILADDR root@mydomain.tld PROGRAM /usr/sbin/handle-mdadm-events CREATE group=system mode=0640 auto=part-8 HOMEHOST <system> AUTO +1.x -all