├── .github ├── dependabot.yml ├── tools │ ├── .checkpatch.conf │ ├── install_ubuntu_packages.sh │ └── run_mdadm_tests.sh └── workflows │ ├── review.yml │ └── tests.yml ├── .gitignore ├── Assemble.c ├── Build.c ├── CHANGELOG.md ├── COPYING ├── Create.c ├── Detail.c ├── Dump.c ├── Examine.c ├── Grow.c ├── Incremental.c ├── Kill.c ├── MAINTAINERS.md ├── Makefile ├── Manage.c ├── Query.c ├── README.md ├── ReadMe.c ├── bitmap.c ├── bitmap.h ├── clustermd_tests ├── 00r10_Create ├── 00r1_Create ├── 01r10_Grow_bitmap-switch ├── 01r10_Grow_resize ├── 01r1_Grow_add ├── 01r1_Grow_bitmap-switch ├── 01r1_Grow_resize ├── 02r10_Manage_add ├── 02r10_Manage_add-spare ├── 02r10_Manage_re-add ├── 02r1_Manage_add ├── 02r1_Manage_add-spare ├── 02r1_Manage_re-add ├── 03r10_switch-recovery ├── 03r10_switch-resync ├── 03r1_switch-recovery ├── 03r1_switch-resync ├── cluster_conf └── func.sh ├── config.c ├── coverity-gcc-hack.h ├── crc32.c ├── crc32.h ├── crc32c.c ├── dlink.c ├── dlink.h ├── documentation ├── HOW_TO_RELEASE.md ├── bitmap.md ├── external-reshape-design.txt ├── mdadm.conf-example └── mdmon-design.txt ├── drive_encryption.c ├── drive_encryption.h ├── lib.c ├── managemon.c ├── mapfile.c ├── maps.c ├── md.4 ├── md5.h ├── md_p.h ├── md_u.h ├── mdadm.8.in ├── mdadm.c ├── mdadm.conf.5.in ├── mdadm.h ├── mdadm_status.h ├── mdmon.8 ├── mdmon.c ├── mdmon.h ├── mdmonitor.c ├── mdopen.c ├── mdstat.c ├── misc ├── mdcheck └── syslog-events ├── monitor.c ├── msg.c ├── msg.h ├── part.h ├── platform-intel.c ├── platform-intel.h ├── policy.c ├── probe_roms.c ├── probe_roms.h ├── pwgr.c ├── raid5extend.c ├── raid6check.8 ├── raid6check.c ├── restripe.c ├── sha1.c ├── sha1.h ├── super-ddf.c ├── super-gpt.c ├── super-intel.c ├── super-mbr.c ├── super0.c ├── super1.c ├── swap_super.c ├── sysfs.c ├── systemd ├── mdadm-grow-continue@.service ├── mdadm-last-resort@.service ├── mdadm-last-resort@.timer ├── mdadm.shutdown ├── mdcheck_continue.service ├── mdcheck_continue.timer ├── mdcheck_start.service ├── mdcheck_start.timer ├── mdmon@.service ├── mdmonitor-oneshot.service ├── mdmonitor-oneshot.timer └── mdmonitor.service ├── test ├── tests ├── 00confnames ├── 00createnames ├── 00linear ├── 00multipath ├── 00names ├── 00raid0 ├── 00raid1 ├── 00raid10 ├── 00raid4 ├── 00raid5 ├── 00raid5-zero ├── 00raid6 ├── 00readonly ├── 01r1fail ├── 01r5fail ├── 01r5integ ├── 01raid6integ ├── 01replace ├── 02lineargrow ├── 02r1add ├── 02r1grow ├── 02r5grow ├── 02r6grow ├── 03assem-incr ├── 03r0assem ├── 03r5assem ├── 03r5assemV1 ├── 04r0update ├── 04r1update ├── 04r5swap ├── 04update-metadata ├── 04update-uuid ├── 05r1-add-badblocks ├── 05r1-add-internalbitmap ├── 05r1-add-internalbitmap-v1a ├── 05r1-add-internalbitmap-v1b ├── 05r1-add-internalbitmap-v1c ├── 05r1-failfast ├── 05r1-grow-internal ├── 05r1-grow-internal-1 ├── 05r1-internalbitmap ├── 05r1-internalbitmap-v1a ├── 05r1-internalbitmap-v1b ├── 05r1-internalbitmap-v1c ├── 05r1-re-add ├── 05r1-re-add-nosuper ├── 05r1-remove-internalbitmap ├── 05r1-remove-internalbitmap-v1a ├── 05r1-remove-internalbitmap-v1b ├── 05r1-remove-internalbitmap-v1c ├── 05r5-internalbitmap ├── 05r6tor0 ├── 05r6tor0.broken ├── 06name ├── 06sysfs ├── 06wrmostly ├── 07autoassemble ├── 07autodetect ├── 07changelevelintr ├── 07changelevels ├── 07changelevels.broken ├── 07layouts ├── 07reshape5intr ├── 07revert-grow ├── 07revert-grow.broken ├── 07revert-inplace ├── 07revert-inplace.broken ├── 07revert-shrink ├── 07revert-shrink.broken ├── 07testreshape5 ├── 09imsm-assemble ├── 09imsm-create-fail-rebuild ├── 09imsm-create-fail-rebuild.broken ├── 09imsm-overlap.broken ├── 10ddf-assemble-missing ├── 10ddf-assemble-missing.broken ├── 10ddf-create ├── 10ddf-create-fail-rebuild ├── 10ddf-fail-create-race ├── 10ddf-fail-create-race.broken ├── 10ddf-fail-readd ├── 10ddf-fail-readd-readonly ├── 10ddf-fail-spare ├── 10ddf-fail-stop-readd ├── 10ddf-fail-twice ├── 10ddf-fail-two-spares ├── 10ddf-geometry ├── 10ddf-incremental-wrong-order ├── 10ddf-incremental-wrong-order.broken ├── 10ddf-sudden-degraded ├── 11spare-migration ├── 12imsm-r0_2d-grow-r0_3d ├── 12imsm-r0_2d-grow-r0_4d ├── 12imsm-r0_2d-grow-r0_5d ├── 12imsm-r0_3d-grow-r0_4d ├── 12imsm-r5_3d-grow-r5_4d ├── 12imsm-r5_3d-grow-r5_5d ├── 13imsm-r0_r0_2d-grow-r0_r0_4d ├── 13imsm-r0_r0_2d-grow-r0_r0_5d ├── 13imsm-r0_r0_3d-grow-r0_r0_4d ├── 13imsm-r0_r5_3d-grow-r0_r5_4d ├── 13imsm-r0_r5_3d-grow-r0_r5_5d ├── 13imsm-r5_r0_3d-grow-r5_r0_4d ├── 13imsm-r5_r0_3d-grow-r5_r0_5d ├── 14imsm-r0_3d-r5_3d-migrate-r5_4d-r5_4d ├── 14imsm-r0_3d_no_spares-migrate-r5_3d ├── 14imsm-r0_r0_2d-takeover-r10_4d ├── 14imsm-r10_4d-grow-r10_5d ├── 14imsm-r10_r5_4d-takeover-r0_2d ├── 14imsm-r1_2d-grow-r1_3d ├── 14imsm-r1_2d-grow-r1_3d.broken ├── 14imsm-r1_2d-takeover-r0_2d ├── 14imsm-r1_2d-takeover-r0_2d.broken ├── 14imsm-r5_3d-grow-r5_5d-no-spares ├── 14imsm-r5_3d-migrate-r4_3d ├── 15imsm-r0_3d_64k-migrate-r0_3d_256k ├── 15imsm-r5_3d_4k-migrate-r5_3d_256k ├── 15imsm-r5_3d_64k-migrate-r5_3d_256k ├── 15imsm-r5_6d_4k-migrate-r5_6d_256k ├── 15imsm-r5_r0_3d_64k-migrate-r5_r0_3d_256k ├── 16imsm-r0_3d-migrate-r5_4d ├── 16imsm-r0_5d-migrate-r5_6d ├── 16imsm-r5_3d-migrate-r0_3d ├── 16imsm-r5_5d-migrate-r0_5d ├── 18imsm-1d-takeover-r0_1d ├── 18imsm-1d-takeover-r1_2d ├── 18imsm-r0_2d-takeover-r10_4d ├── 18imsm-r10_4d-takeover-r0_2d ├── 18imsm-r10_4d-takeover-r0_2d.broken ├── 18imsm-r1_2d-takeover-r0_1d ├── 18imsm-r1_2d-takeover-r0_1d.broken ├── 19raid6auto-repair ├── 19raid6auto-repair.broken ├── 19raid6check ├── 19raid6repair ├── 19raid6repair.broken ├── 19repair-does-not-destroy ├── 20raid5journal ├── 20raid5journal.broken ├── 21raid5cache ├── 23rdev-lifetime ├── 24raid10deadlock ├── 24raid456deadlock ├── 25raid456-recovery-while-reshape ├── 25raid456-reshape-corrupt-data ├── 25raid456-reshape-deadlock ├── 25raid456-reshape-while-recovery ├── 25raid456-reshape-while-recovery.broken ├── ToTest ├── env-ddf-template ├── env-imsm-template ├── func.sh ├── imsm-grow-template ├── skiptests ├── templates │ └── names_template └── utils ├── udev-md-clustered-confirm-device.rules ├── udev-md-raid-arrays.rules ├── udev-md-raid-assembly.rules ├── udev-md-raid-creating.rules ├── udev-md-raid-safe-timeouts.rules ├── udev.c ├── udev.h ├── util.c ├── uuid.c ├── xmalloc.c └── xmalloc.h /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: "github-actions" 4 | directory: "/" 5 | schedule: 6 | interval: "daily" 7 | -------------------------------------------------------------------------------- /.github/tools/.checkpatch.conf: -------------------------------------------------------------------------------- 1 | --no-tree 2 | --show-types 3 | --exclude .github 4 | --exclude clustermd_tests 5 | --exclude documentation 6 | --exclude misc 7 | --exclude systemd 8 | --exclude tests 9 | --ignore FILE_PATH_CHANGES 10 | --ignore EMAIL_SUBJECT 11 | --ignore NEW_TYPEDEFS 12 | -------------------------------------------------------------------------------- /.github/tools/install_ubuntu_packages.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/bash 2 | 3 | VERSION_CODENAME=$(grep -oP '(?<=^VERSION_CODENAME=).+' /etc/os-release | tr -d '"') 4 | echo "Detected VERSION_CODENAME: $VERSION_CODENAME" 5 | 6 | # Add ubuntu repository 7 | sudo add-apt-repository -y "deb [arch=amd64] http://archive.ubuntu.com/ubuntu $VERSION_CODENAME \ 8 | main universe" 9 | # Install gcc 10 | sudo apt-get -y install gcc-$1 --no-upgrade --no-install-recommends --no-install-suggests 11 | # Install dependencies 12 | sudo apt-get -y install make gcc libudev-dev devscripts --no-upgrade --no-install-recommends --no-install-suggests 13 | -------------------------------------------------------------------------------- /.github/tools/run_mdadm_tests.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/bash 2 | 3 | sudo make clean 4 | sudo make -j$(nproc) 5 | sudo make install 6 | sudo mdadm -Ss 7 | sudo ./test setup 8 | 9 | # Uncomment and adjust this to minimalize testing time for CI or test improvements. 10 | # --tests=test1,test2,... Comma separated list of tests to run 11 | 12 | #sudo ./test --tests=00createnames 13 | 14 | sudo ./test --skip-broken --no-error --disable-integrity --disable-multipath --disable-linear --keep-going --skip-bigcase 15 | 16 | ret=$? 17 | sudo ./test cleanup 18 | exit $ret 19 | -------------------------------------------------------------------------------- /.github/workflows/review.yml: -------------------------------------------------------------------------------- 1 | name: review 2 | on: [pull_request] 3 | env: 4 | cflags: -Werror 5 | jobs: 6 | make: 7 | # when gcc is not found, it may be needed to update runner version 8 | runs-on: ubuntu-24.04 9 | name: Compilation test with gcc 10 | strategy: 11 | matrix: 12 | # gcc-versions are used to test up to 5 years old 13 | gcc-version: [9, 10, 11, 12, 13, 14] 14 | steps: 15 | - uses: actions/checkout@v4 16 | - name: 'Add ubuntu repository and install dependencies' 17 | run: .github/tools/install_ubuntu_packages.sh ${{ matrix.gcc-version }} 18 | - name: 'Check if gcc was installed correctly' 19 | run: gcc-${{ matrix.gcc-version }} --version 20 | - name: 'Make with DEBUG flag' 21 | run: V=1 make -j$(nproc) -B CC=gcc-${{ matrix.gcc-version }} CXFLAGS=-DEBUG && make clean 22 | - name: 'Make with DEBIAN flag' 23 | run: V=1 make -j$(nproc) -B CC=gcc-${{ matrix.gcc-version }} CXFLAGS=-DEBIAN && make clean 24 | - name: 'Make with USE_PTHREADS flag' 25 | run: V=1 make -j$(nproc) -B CC=gcc-${{ matrix.gcc-version }} CXFLAGS=-USE_PTHREADS && make clean 26 | - name: 'Make with DNO_LIBUDEV flag' 27 | run: V=1 make -j$(nproc) -B CC=gcc-${{ matrix.gcc-version }} CXFLAGS=-DNO_LIBUDEV && make clean 28 | - name: 'Make' 29 | run: V=1 make -j$(nproc) CC=gcc-${{ matrix.gcc-version }} 30 | - name: hardening-check mdadm 31 | run: hardening-check mdadm 32 | - name: hardening-check mdmon 33 | run: hardening-check mdmon 34 | checkpatch: 35 | runs-on: ubuntu-latest 36 | name: checkpatch review 37 | steps: 38 | - uses: actions/checkout@v4 39 | with: 40 | ref: ${{ github.event.pull_request.head.sha }} 41 | fetch-depth: 0 42 | - name: 'Move prepared .checkpatch.conf file to main directory' 43 | run: mv .github/tools/.checkpatch.conf . 44 | - name: Run checkpatch review 45 | uses: webispy/checkpatch-action@v9 46 | -------------------------------------------------------------------------------- /.github/workflows/tests.yml: -------------------------------------------------------------------------------- 1 | name: tests 2 | on: 3 | schedule: 4 | - cron: "0 0 * * *" 5 | pull_request: 6 | paths: 7 | - '*.c' 8 | - '*.h' 9 | - 'tests/*' 10 | - 'test' 11 | - '.github/*' 12 | - '.github/workflows/*' 13 | - '.github/tools/*' 14 | jobs: 15 | upstream_tests: 16 | if: ${{ github.repository == 'md-raid-utilities/mdadm' }} 17 | runs-on: self-hosted 18 | timeout-minutes: 150 19 | name: upstream tests 20 | steps: 21 | - uses: actions/checkout@v4 22 | if: ${{ github.event_name == 'pull_request' }} 23 | with: 24 | ref: ${{ github.event.pull_request.head.sha }} 25 | fetch-depth: 0 26 | - uses: actions/checkout@v4 27 | if: ${{ github.event_name == 'schedule' }} 28 | with: 29 | ref: main 30 | fetch-depth: 0 31 | - name: 'Prepare machine' 32 | run: | 33 | cd .. 34 | vagrant halt 35 | vagrant status 36 | vagrant up 37 | 38 | - name: 'Run tests' 39 | id: testing 40 | continue-on-error: true 41 | run: | 42 | cd .. 43 | vagrant ssh -c "cd /home/vagrant/host/mdadm && .github/tools/run_mdadm_tests.sh" 44 | 45 | - name: 'Copy logs to host machine' 46 | if: ${{ steps.testing.outcome == 'failure' }} 47 | run: | 48 | cd .. 49 | vagrant ssh -c "sudo mkdir -p /home/vagrant/host/logs && sudo mv /var/tmp/*.log /home/vagrant/host/logs" 50 | 51 | - name: "Save artifacts" 52 | if: ${{ steps.testing.outcome == 'failure' }} 53 | uses: actions/upload-artifact@v4 54 | with: 55 | name: "Logs from failed tests" 56 | path: /home/ci/actions-runner/_work/mdadm/logs/*.log 57 | 58 | - name: "Clean logs" 59 | if: ${{ steps.testing.outcome == 'failure' }} 60 | run: | 61 | cd .. 62 | sudo rm /home/ci/actions-runner/_work/mdadm/logs/*.log 63 | 64 | - name: "Set failed" 65 | if: ${{ steps.testing.outcome == 'failure' }} 66 | run: exit 1 67 | 68 | cleanup: 69 | runs-on: self-hosted 70 | needs: [upstream_tests] 71 | steps: 72 | - name: Restore clean VM 73 | run: | 74 | cd .. 75 | vagrant up 76 | vagrant ssh -c "sudo mdadm -Ss" 77 | vagrant halt 78 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /*.o 2 | /*.man 3 | /*-stamp 4 | /mdadm 5 | /mdadm.8 6 | /mdadm.conf.5 7 | /mdadm.udeb 8 | /mdassemble 9 | /mdmon 10 | /swap_super 11 | /test_stripe 12 | /TAGS 13 | /mdadm.O2 14 | /mdadm.Os 15 | /mdadm.static 16 | /mdassemble.auto 17 | /mdassemble.static 18 | /mdmon.O2 19 | /raid6check 20 | -------------------------------------------------------------------------------- /MAINTAINERS.md: -------------------------------------------------------------------------------- 1 | If you are sending patch through mailing list, please include everyone listed 2 | in this file. 3 | 4 | ## Github maintainers team 5 | 6 | Github Pull Request must be `approved` by at least 2 maintainers team members: 7 | 8 | Name | Github Profile | Email address | 9 | | -- |----------------|---------------| 10 | | Blazej Kucman | [bkucman](https://github.com/bkucman) | | 11 | | Mariusz Tkaczyk | [mtkaczyk](https://github.com/mtkaczyk) | | 12 | | Nigel Croxon | [ncroxon](https://github.com/ncroxon) | | 13 | | Xiao Ni | [XiaoNi87](https://github.com/XiaoNi87) | | 14 | 15 | ## Kernel.org maintainers 16 | Reach this team specifically if you are observing differences 17 | between kernel.org and Github. 18 | 19 | | Name | Email address | 20 | |------|----------------| 21 | | Mariusz Tkaczyk | | 22 | | Song Liu | | 23 | | Yu Kuai | | 24 | -------------------------------------------------------------------------------- /bitmap.h: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0-only 2 | 3 | /* 4 | * Copyright (C) Peter T. Breuer (ptb@ot.uc3m.es) 2003 5 | * Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc. 6 | * Copyright (C) 2005 Neil Brown 7 | */ 8 | 9 | /* See documentation/bitmap.md */ 10 | 11 | #ifndef BITMAP_H 12 | #define BITMAP_H 1 13 | 14 | #define BITMAP_MAJOR_LO 3 15 | #define BITMAP_MAJOR_HI 4 16 | #define BITMAP_MAJOR_CLUSTERED 5 17 | #define BITMAP_MAGIC 0x6d746962 18 | 19 | /* use these for bitmap->flags and bitmap->sb->state bit-fields */ 20 | enum bitmap_state { 21 | BITMAP_ACTIVE = 0x001, /* the bitmap is in use */ 22 | BITMAP_STALE = 0x002 /* the bitmap file is out of date or had -EIO */ 23 | }; 24 | 25 | /* the superblock at the front of the bitmap file -- little endian */ 26 | typedef struct bitmap_super_s { 27 | __u32 magic; /* 0 BITMAP_MAGIC */ 28 | __u32 version; /* 4 the bitmap major for now, could change... */ 29 | __u8 uuid[16]; /* 8 128 bit uuid - must match md device uuid */ 30 | __u64 events; /* 24 event counter for the bitmap (1)*/ 31 | __u64 events_cleared;/*32 event counter when last bit cleared (2) */ 32 | __u64 sync_size; /* 40 the size of the md device's sync range(3) */ 33 | __u32 state; /* 48 bitmap state information */ 34 | __u32 chunksize; /* 52 the bitmap chunk size in bytes */ 35 | __u32 daemon_sleep; /* 56 seconds between disk flushes */ 36 | __u32 write_behind; /* 60 number of outstanding write-behind writes */ 37 | __u32 sectors_reserved; /* 64 number of 512-byte sectors that are 38 | * reserved for the bitmap. */ 39 | __u32 nodes; /* 68 the maximum number of nodes in cluster. */ 40 | __u8 cluster_name[64]; /* 72 cluster name to which this md belongs */ 41 | __u8 pad[256 - 136]; /* set to zero */ 42 | } bitmap_super_t; 43 | 44 | #endif 45 | -------------------------------------------------------------------------------- /clustermd_tests/00r10_Create: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | mdadm -CR $md0 -l10 -b clustered --layout n2 -n2 $dev0 $dev1 4 | ssh $NODE2 mdadm -A $md0 $dev0 $dev1 5 | check $NODE1 resync 6 | check $NODE2 PENDING 7 | check all wait 8 | check all raid10 9 | check all bitmap 10 | check all nosync 11 | check all state UU 12 | check all dmesg 13 | stop_md all $md0 14 | 15 | mdadm -CR $md0 -l10 -b clustered -n3 --layout n3 $dev0 $dev1 $dev2 --assume-clean 16 | ssh $NODE2 mdadm -A $md0 $dev0 $dev1 $dev2 17 | check all nosync 18 | check all raid10 19 | check all bitmap 20 | check all state UUU 21 | check all dmesg 22 | stop_md all $md0 23 | 24 | mdadm -CR $md0 -l10 -b clustered -n2 -x1 --layout n2 $dev0 $dev1 $dev2 --assume-clean 25 | ssh $NODE2 mdadm -A $md0 $dev0 $dev1 $dev2 26 | check all nosync 27 | check all raid10 28 | check all bitmap 29 | check all spares 1 30 | check all state UU 31 | check all dmesg 32 | stop_md all $md0 33 | 34 | name=tstmd 35 | mdadm -CR $md0 -l10 -b clustered -n2 $dev0 $dev1 --layout n2 --name=$name --assume-clean 36 | ssh $NODE2 mdadm -A $md0 $dev0 $dev1 37 | check all nosync 38 | check all raid10 39 | check all bitmap 40 | check all state UU 41 | for ip in $NODE1 $NODE2 42 | do 43 | ssh $ip "mdadm -D $md0 | grep 'Name' | grep -q $name" 44 | [ $? -ne '0' ] && 45 | die "$ip: check --name=$name failed." 46 | done 47 | check all dmesg 48 | stop_md all $md0 49 | 50 | exit 0 51 | -------------------------------------------------------------------------------- /clustermd_tests/00r1_Create: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | mdadm -CR $md0 -l1 -b clustered -n2 $dev0 $dev1 4 | ssh $NODE2 mdadm -A $md0 $dev0 $dev1 5 | check $NODE1 resync 6 | check $NODE2 PENDING 7 | check all wait 8 | check all raid1 9 | check all bitmap 10 | check all nosync 11 | check all state UU 12 | check all dmesg 13 | stop_md all $md0 14 | 15 | mdadm -CR $md0 -l1 -b clustered -n2 $dev0 $dev1 --assume-clean 16 | ssh $NODE2 mdadm -A $md0 $dev0 $dev1 17 | check all nosync 18 | check all raid1 19 | check all bitmap 20 | check all state UU 21 | check all dmesg 22 | stop_md all $md0 23 | 24 | mdadm -CR $md0 -l1 -b clustered -n2 -x1 $dev0 $dev1 $dev2 --assume-clean 25 | ssh $NODE2 mdadm -A $md0 $dev0 $dev1 $dev2 26 | check all nosync 27 | check all raid1 28 | check all bitmap 29 | check all spares 1 30 | check all state UU 31 | check all dmesg 32 | stop_md all $md0 33 | 34 | name=tstmd 35 | mdadm -CR $md0 -l1 -b clustered -n2 $dev0 $dev1 --name=$name --assume-clean 36 | ssh $NODE2 mdadm -A $md0 $dev0 $dev1 37 | check all nosync 38 | check all raid1 39 | check all bitmap 40 | check all state UU 41 | for ip in $NODE1 $NODE2 42 | do 43 | ssh $ip "mdadm -D $md0 | grep 'Name' | grep -q $name" 44 | [ $? -ne '0' ] && 45 | die "$ip: check --name=$name failed." 46 | done 47 | check all dmesg 48 | stop_md all $md0 49 | 50 | exit 0 51 | -------------------------------------------------------------------------------- /clustermd_tests/01r10_Grow_bitmap-switch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | mdadm -CR $md0 -l10 -b clustered --layout n2 -n2 $dev0 $dev1 --assume-clean 4 | ssh $NODE2 mdadm -A $md0 $dev0 $dev1 5 | check all nosync 6 | check all raid10 7 | check all bitmap 8 | check all state UU 9 | 10 | # switch 'clustered' bitmap to 'none', and then 'none' to 'internal' 11 | stop_md $NODE2 $md0 12 | mdadm --grow $md0 --bitmap=none 13 | [ $? -eq '0' ] || 14 | die "$NODE1: change bitmap 'clustered' to 'none' failed." 15 | mdadm -X $dev0 $dev1 &> /dev/null 16 | [ $? -eq '0' ] && 17 | die "$NODE1: bitmap still exists in member_disks." 18 | check all nobitmap 19 | mdadm --grow $md0 --bitmap=internal 20 | [ $? -eq '0' ] || 21 | die "$NODE1: change bitmap 'none' to 'internal' failed." 22 | sleep 2 23 | mdadm -X $dev0 $dev1 &> /dev/null 24 | [ $? -eq '0' ] || 25 | die "$NODE1: create 'internal' bitmap failed." 26 | check $NODE1 bitmap 27 | 28 | # switch 'internal' bitmap to 'none', and then 'none' to 'clustered' 29 | mdadm --grow $md0 --bitmap=none 30 | [ $? -eq '0' ] || 31 | die "$NODE1: change bitmap 'internal' to 'none' failed." 32 | mdadm -X $dev0 $dev1 &> /dev/null 33 | [ $? -eq '0' ] && 34 | die "$NODE1: bitmap still exists in member_disks." 35 | check $NODE1 nobitmap 36 | mdadm --grow $md0 --bitmap=clustered 37 | [ $? -eq '0' ] || 38 | die "$NODE1: change bitmap 'none' to 'clustered' failed." 39 | ssh $NODE2 mdadm -A $md0 $dev0 $dev1 40 | sleep 2 41 | for ip in $NODES 42 | do 43 | ssh $ip "mdadm -X $dev0 $dev1 | grep -q 'Cluster name'" || 44 | die "$ip: create 'clustered' bitmap failed." 45 | done 46 | check all bitmap 47 | check all state UU 48 | check all dmesg 49 | stop_md all $md0 50 | 51 | exit 0 52 | -------------------------------------------------------------------------------- /clustermd_tests/01r10_Grow_resize: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | size=20000 4 | 5 | mdadm -CR $md0 -l10 -b clustered --layout n2 --size $size --chunk=64 -n2 $dev0 $dev1 --assume-clean 6 | ssh $NODE2 mdadm -A $md0 $dev0 $dev1 7 | check all nosync 8 | check all raid10 9 | check all bitmap 10 | check all state UU 11 | 12 | mdadm --grow $md0 --size max 13 | check $NODE1 resync 14 | check $NODE1 wait 15 | check all state UU 16 | 17 | mdadm --grow $md0 --size $size 18 | check all nosync 19 | check all state UU 20 | check all dmesg 21 | stop_md all $md0 22 | 23 | mdadm -CR $md0 -l10 -b clustered --layout n2 --chunk=64 -n2 $dev0 $dev1 --assume-clean 24 | ssh $NODE2 mdadm -A $md0 $dev0 $dev1 25 | check all nosync 26 | check all raid10 27 | check all bitmap 28 | check all state UU 29 | 30 | mdadm --grow $md0 --chunk=128 31 | check $NODE1 reshape 32 | check $NODE1 wait 33 | check all chunk 128 34 | check all state UU 35 | check all dmesg 36 | stop_md all $md0 37 | 38 | exit 0 39 | -------------------------------------------------------------------------------- /clustermd_tests/01r1_Grow_add: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | mdadm -CR $md0 -l1 -b clustered -n2 $dev0 $dev1 --assume-clean 4 | ssh $NODE2 mdadm -A $md0 $dev0 $dev1 5 | check all nosync 6 | check all raid1 7 | check all bitmap 8 | check all state UU 9 | check all dmesg 10 | mdadm --grow $md0 --raid-devices=3 --add $dev2 11 | sleep 0.3 12 | grep recovery /proc/mdstat 13 | if [ $? -eq '0' ] 14 | then 15 | check $NODE1 wait 16 | else 17 | check $NODE2 recovery 18 | check $NODE2 wait 19 | fi 20 | check all state UUU 21 | check all dmesg 22 | stop_md all $md0 23 | 24 | mdadm -CR $md0 -l1 -b clustered -n2 -x1 $dev0 $dev1 $dev2 --assume-clean 25 | ssh $NODE2 mdadm -A $md0 $dev0 $dev1 $dev2 26 | check all nosync 27 | check all raid1 28 | check all bitmap 29 | check all spares 1 30 | check all state UU 31 | check all dmesg 32 | mdadm --grow $md0 --raid-devices=3 --add $dev3 33 | sleep 0.3 34 | grep recovery /proc/mdstat 35 | if [ $? -eq '0' ] 36 | then 37 | check $NODE1 wait 38 | else 39 | check $NODE2 recovery 40 | check $NODE2 wait 41 | fi 42 | check all state UUU 43 | check all dmesg 44 | stop_md all $md0 45 | 46 | mdadm -CR $md0 -l1 -b clustered -n2 -x1 $dev0 $dev1 $dev2 --assume-clean 47 | ssh $NODE2 mdadm -A $md0 $dev0 $dev1 $dev2 48 | check all nosync 49 | check all raid1 50 | check all bitmap 51 | check all spares 1 52 | check all state UU 53 | check all dmesg 54 | mdadm --grow $md0 --raid-devices=3 55 | sleep 0.3 56 | grep recovery /proc/mdstat 57 | if [ $? -eq '0' ] 58 | then 59 | check $NODE1 wait 60 | else 61 | check $NODE2 recovery 62 | check $NODE2 wait 63 | fi 64 | check all state UUU 65 | check all dmesg 66 | stop_md all $md0 67 | 68 | exit 0 69 | -------------------------------------------------------------------------------- /clustermd_tests/01r1_Grow_bitmap-switch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | mdadm -CR $md0 -l1 -b clustered -n2 $dev0 $dev1 --assume-clean 4 | ssh $NODE2 mdadm -A $md0 $dev0 $dev1 5 | check all nosync 6 | check all raid1 7 | check all bitmap 8 | check all state UU 9 | 10 | # switch 'clustered' bitmap to 'none', and then 'none' to 'internal' 11 | stop_md $NODE2 $md0 12 | mdadm --grow $md0 --bitmap=none 13 | [ $? -eq '0' ] || 14 | die "$NODE1: change bitmap 'clustered' to 'none' failed." 15 | mdadm -X $dev0 $dev1 &> /dev/null 16 | [ $? -eq '0' ] && 17 | die "$NODE1: bitmap still exists in member_disks." 18 | check all nobitmap 19 | mdadm --grow $md0 --bitmap=internal 20 | [ $? -eq '0' ] || 21 | die "$NODE1: change bitmap 'none' to 'internal' failed." 22 | sleep 2 23 | mdadm -X $dev0 $dev1 &> /dev/null 24 | [ $? -eq '0' ] || 25 | die "$NODE1: create 'internal' bitmap failed." 26 | check $NODE1 bitmap 27 | 28 | # switch 'internal' bitmap to 'none', and then 'none' to 'clustered' 29 | mdadm --grow $md0 --bitmap=none 30 | [ $? -eq '0' ] || 31 | die "$NODE1: change bitmap 'internal' to 'none' failed." 32 | mdadm -X $dev0 $dev1 &> /dev/null 33 | [ $? -eq '0' ] && 34 | die "$NODE1: bitmap still exists in member_disks." 35 | check $NODE1 nobitmap 36 | mdadm --grow $md0 --bitmap=clustered 37 | [ $? -eq '0' ] || 38 | die "$NODE1: change bitmap 'none' to 'clustered' failed." 39 | ssh $NODE2 mdadm -A $md0 $dev0 $dev1 40 | sleep 2 41 | for ip in $NODES 42 | do 43 | ssh $ip "mdadm -X $dev0 $dev1 | grep -q 'Cluster name'" || 44 | die "$ip: create 'clustered' bitmap failed." 45 | done 46 | check all bitmap 47 | check all state UU 48 | check all dmesg 49 | stop_md all $md0 50 | 51 | exit 0 52 | -------------------------------------------------------------------------------- /clustermd_tests/01r1_Grow_resize: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | size=10000 4 | 5 | mdadm -CR $md0 -l1 -b clustered --size $size -n2 $dev0 $dev1 --assume-clean 6 | ssh $NODE2 mdadm -A $md0 $dev0 $dev1 7 | check all nosync 8 | check all raid1 9 | check all bitmap 10 | check all state UU 11 | 12 | mdadm --grow $md0 --size max 13 | check $NODE1 resync 14 | check $NODE1 wait 15 | check all state UU 16 | 17 | mdadm --grow $md0 --size $size 18 | check all nosync 19 | check all state UU 20 | check all dmesg 21 | stop_md all $md0 22 | 23 | exit 0 24 | -------------------------------------------------------------------------------- /clustermd_tests/02r10_Manage_add: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | mdadm -CR $md0 -l10 -b clustered --layout n2 -n2 $dev0 $dev1 --assume-clean 4 | ssh $NODE2 mdadm -A $md0 $dev0 $dev1 5 | check all nosync 6 | check all raid10 7 | check all bitmap 8 | check all state UU 9 | check all dmesg 10 | mdadm --manage $md0 --fail $dev0 --remove $dev0 11 | mdadm --zero $dev2 12 | mdadm --manage $md0 --add $dev2 13 | sleep 0.3 14 | check $NODE1 recovery 15 | check $NODE1 wait 16 | check all state UU 17 | check all dmesg 18 | stop_md all $md0 19 | 20 | mdadm -CR $md0 -l10 -b clustered --layout n2 -n2 $dev0 $dev1 --assume-clean 21 | ssh $NODE2 mdadm -A $md0 $dev0 $dev1 22 | check all nosync 23 | check all raid10 24 | check all bitmap 25 | check all state UU 26 | check all dmesg 27 | mdadm --manage $md0 --add $dev2 28 | check all spares 1 29 | check all state UU 30 | check all dmesg 31 | stop_md all $md0 32 | 33 | exit 0 34 | -------------------------------------------------------------------------------- /clustermd_tests/02r10_Manage_add-spare: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | mdadm -CR $md0 -l10 -b clustered --layout n2 -n2 $dev0 $dev1 --assume-clean 4 | ssh $NODE2 mdadm -A $md0 $dev0 $dev1 5 | check all nosync 6 | check all raid10 7 | check all bitmap 8 | check all state UU 9 | check all dmesg 10 | mdadm --manage $md0 --add-spare $dev2 11 | check all spares 1 12 | check all state UU 13 | check all dmesg 14 | stop_md all $md0 15 | 16 | mdadm -CR $md0 -l10 -b clustered --layout n2 -n2 -x1 $dev0 $dev1 $dev2 --assume-clean 17 | ssh $NODE2 mdadm -A $md0 $dev0 $dev1 $dev2 18 | check all nosync 19 | check all raid10 20 | check all bitmap 21 | check all spares 1 22 | check all state UU 23 | check all dmesg 24 | mdadm --manage $md0 --add-spare $dev3 25 | check all spares 2 26 | check all state UU 27 | check all dmesg 28 | stop_md all $md0 29 | 30 | exit 0 31 | -------------------------------------------------------------------------------- /clustermd_tests/02r10_Manage_re-add: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | mdadm -CR $md0 -l10 -b clustered --layout n2 -n2 $dev0 $dev1 --assume-clean 4 | ssh $NODE2 mdadm -A $md0 $dev0 $dev1 5 | check all nosync 6 | check all raid10 7 | check all bitmap 8 | check all state UU 9 | check all dmesg 10 | mdadm --manage $md0 --fail $dev0 --remove $dev0 11 | mdadm --manage $md0 --re-add $dev0 12 | #non-clustered array also doesn't do sync job 13 | #check $NODE1 recovery 14 | check all wait 15 | check all state UU 16 | check all dmesg 17 | stop_md all $md0 18 | 19 | exit 0 20 | -------------------------------------------------------------------------------- /clustermd_tests/02r1_Manage_add: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | mdadm -CR $md0 -l1 -b clustered -n2 $dev0 $dev1 --assume-clean 4 | ssh $NODE2 mdadm -A $md0 $dev0 $dev1 5 | check all nosync 6 | check all raid1 7 | check all bitmap 8 | check all state UU 9 | check all dmesg 10 | mdadm --manage $md0 --fail $dev0 --remove $dev0 11 | mdadm --zero $dev2 12 | mdadm --manage $md0 --add $dev2 13 | sleep 0.3 14 | check $NODE1 recovery 15 | check $NODE1 wait 16 | check all state UU 17 | check all dmesg 18 | stop_md all $md0 19 | 20 | mdadm -CR $md0 -l1 -b clustered -n2 $dev0 $dev1 --assume-clean 21 | ssh $NODE2 mdadm -A $md0 $dev0 $dev1 22 | check all nosync 23 | check all raid1 24 | check all bitmap 25 | check all state UU 26 | check all dmesg 27 | mdadm --manage $md0 --add $dev2 28 | check all spares 1 29 | check all state UU 30 | check all dmesg 31 | stop_md all $md0 32 | 33 | exit 0 34 | -------------------------------------------------------------------------------- /clustermd_tests/02r1_Manage_add-spare: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | mdadm -CR $md0 -l1 -b clustered -n2 $dev0 $dev1 --assume-clean 4 | ssh $NODE2 mdadm -A $md0 $dev0 $dev1 5 | check all nosync 6 | check all raid1 7 | check all bitmap 8 | check all state UU 9 | check all dmesg 10 | mdadm --manage $md0 --add-spare $dev2 11 | check all spares 1 12 | check all state UU 13 | check all dmesg 14 | stop_md all $md0 15 | 16 | mdadm -CR $md0 -l1 -b clustered -n2 -x1 $dev0 $dev1 $dev2 --assume-clean 17 | ssh $NODE2 mdadm -A $md0 $dev0 $dev1 $dev2 18 | check all nosync 19 | check all raid1 20 | check all bitmap 21 | check all spares 1 22 | check all state UU 23 | check all dmesg 24 | mdadm --manage $md0 --add-spare $dev3 25 | check all spares 2 26 | check all state UU 27 | check all dmesg 28 | stop_md all $md0 29 | 30 | exit 0 31 | -------------------------------------------------------------------------------- /clustermd_tests/02r1_Manage_re-add: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | mdadm -CR $md0 -l1 -b clustered -n2 $dev0 $dev1 --assume-clean 4 | ssh $NODE2 mdadm -A $md0 $dev0 $dev1 5 | check all nosync 6 | check all raid1 7 | check all bitmap 8 | check all state UU 9 | check all dmesg 10 | mdadm --manage $md0 --fail $dev0 --remove $dev0 11 | mdadm --manage $md0 --re-add $dev0 12 | check all wait 13 | check all state UU 14 | check all dmesg 15 | stop_md all $md0 16 | 17 | exit 0 18 | -------------------------------------------------------------------------------- /clustermd_tests/03r10_switch-recovery: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | mdadm -CR $md0 -l10 -b clustered --layout n2 -n2 -x1 $dev0 $dev1 $dev2 --assume-clean 4 | ssh $NODE2 mdadm -A $md0 $dev0 $dev1 $dev2 5 | check all nosync 6 | check all raid10 7 | check all bitmap 8 | check all spares 1 9 | check all state UU 10 | check all dmesg 11 | mdadm --manage $md0 --fail $dev0 12 | sleep 0.2 13 | check $NODE1 recovery-remote 14 | stop_md $NODE1 $md0 15 | check $NODE2 recovery-remote 16 | check $NODE2 wait 17 | check $NODE2 state UU 18 | check all dmesg 19 | stop_md $NODE2 $md0 20 | 21 | exit 0 22 | -------------------------------------------------------------------------------- /clustermd_tests/03r10_switch-resync: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | mdadm -CR $md0 -l10 -b clustered --layout n2 -n2 $dev0 $dev1 4 | ssh $NODE2 mdadm -A $md0 $dev0 $dev1 5 | check $NODE1 resync 6 | check $NODE2 PENDING 7 | stop_md $NODE1 $md0 8 | check $NODE2 resync 9 | check $NODE2 wait 10 | mdadm -A $md0 $dev0 $dev1 11 | check all raid10 12 | check all bitmap 13 | check all nosync 14 | check all state UU 15 | check all dmesg 16 | stop_md all $md0 17 | 18 | exit 0 19 | -------------------------------------------------------------------------------- /clustermd_tests/03r1_switch-recovery: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | mdadm -CR $md0 -l1 -b clustered -n2 -x1 $dev0 $dev1 $dev2 --assume-clean 4 | ssh $NODE2 mdadm -A $md0 $dev0 $dev1 $dev2 5 | check all nosync 6 | check all raid1 7 | check all bitmap 8 | check all spares 1 9 | check all state UU 10 | check all dmesg 11 | mdadm --manage $md0 --fail $dev0 12 | sleep 0.3 13 | check $NODE1 recovery-remote 14 | stop_md $NODE1 $md0 15 | check $NODE2 recovery-remote 16 | check $NODE2 wait 17 | check $NODE2 state UU 18 | check all dmesg 19 | stop_md $NODE2 $md0 20 | 21 | exit 0 22 | -------------------------------------------------------------------------------- /clustermd_tests/03r1_switch-resync: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | mdadm -CR $md0 -l1 -b clustered -n2 $dev0 $dev1 4 | ssh $NODE2 mdadm -A $md0 $dev0 $dev1 5 | check $NODE1 resync 6 | check $NODE2 PENDING 7 | stop_md $NODE1 $md0 8 | check $NODE2 resync 9 | check $NODE2 wait 10 | mdadm -A $md0 $dev0 $dev1 11 | check all raid1 12 | check all bitmap 13 | check all nosync 14 | check all state UU 15 | check all dmesg 16 | stop_md all $md0 17 | 18 | exit 0 19 | -------------------------------------------------------------------------------- /clustermd_tests/cluster_conf: -------------------------------------------------------------------------------- 1 | # Prerequisite: 2 | # 1. The clustermd_tests/ cases only support to test 2-node-cluster, cluster 3 | # requires packages: 'pacemaker+corosync+sbd+crmsh', all packages link at 4 | # "https://github.com/ClusterLabs/", and also requires dlm resource running 5 | # on each node of cluster. 6 | # For quick start HA-cluster with SUSE distributions, refer to the chapter 6-8: 7 | # https://www.suse.com/documentation/sle-ha-12/install-quick/data/install-quick.html 8 | # For Redhat distributions, please refer to: 9 | # https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html-single/high_availability_add-on_administration/index 10 | # 2. Setup ssh-access with no-authorized mode, it should be: 11 | # # 'ssh $node1 -l root ls' and 'ssh $node2 -l root ls' success on any node. 12 | # 3. Fill-up node-ip part and disks part as following. 13 | 14 | # Set node1 as the master node, the cluster-md cases should run on this node, 15 | # and node2 is the slave node. 16 | # For example: 17 | # NODE1=192.168.1.100 (testing run here) 18 | # NODE2=192.168.1.101 19 | NODE1= 20 | NODE2= 21 | 22 | # Provide the devlist for clustermd-testing, alternative: if set the step 1, 23 | # don't set step 2, and vice versa. 24 | # 1. Use ISCSI service to provide shared storage, then login ISCSI target via 25 | # to ISCSI_TARGET_ID and ISCSI_TARGET_IP on iscsi clients, commands like: 26 | # Execute on iscsi clients: 27 | # 1) discover the iscsi server. 28 | # # iscsiadm -m discovery -t st -p $ISCSI_TARGET_IP 29 | # 2) login and establish connection. 30 | # # iscsiadm -m node -T $ISCSI_TARGET_ID -p $ISCSI_TARGET_IP -l 31 | # Note: 32 | # On ISCSI server, must create all iscsi-luns in one target_id, recommend more 33 | # than 6 luns/disks for testing, and each disk should be: 100M < disk < 800M. 34 | # 2. If all cluster-nodes mounted the same disks directly, and the devname are 35 | # the same on all nodes, then put them to 'devlist'. 36 | 37 | # For example: (Only set $ISCSI_TARGET_ID is enough if iscsi has already connected) 38 | # ISCSI_TARGET_ID=iqn.2018-01.example.com:clustermd-testing 39 | # ISCSI_TARGET_IP=192.168.1.102 40 | ISCSI_TARGET_ID= 41 | 42 | #devlist=/dev/sda /dev/sdb /dev/sdc /dev/sdd 43 | devlist= 44 | -------------------------------------------------------------------------------- /coverity-gcc-hack.h: -------------------------------------------------------------------------------- 1 | #if !defined(__KERNEL__) && defined(__x86_64__) && defined(__COVERITY_GCC_VERSION_AT_LEAST) 2 | #if __COVERITY_GCC_VERSION_AT_LEAST(7, 0) 3 | typedef float _Float128 __attribute__((__vector_size__(128))); 4 | typedef float _Float64 __attribute__((__vector_size__(64))); 5 | typedef float _Float32 __attribute__((__vector_size__(32))); 6 | typedef float _Float128x __attribute__((__vector_size__(128))); 7 | typedef float _Float64x __attribute__((__vector_size__(64))); 8 | typedef float _Float32x __attribute__((__vector_size__(32))); 9 | #endif 10 | #endif 11 | -------------------------------------------------------------------------------- /dlink.c: -------------------------------------------------------------------------------- 1 | 2 | /* doubly linked lists */ 3 | /* This is free software. No strings attached. No copyright claimed */ 4 | 5 | #include 6 | #include 7 | #include 8 | #ifdef __dietlibc__ 9 | char *strncpy(char *dest, const char *src, size_t n) __THROW; 10 | #endif 11 | void *xcalloc(size_t num, size_t size); 12 | #include "dlink.h" 13 | 14 | void *dl_head() 15 | { 16 | void *h; 17 | h = dl_alloc(0); 18 | dl_next(h) = h; 19 | dl_prev(h) = h; 20 | return h; 21 | } 22 | 23 | void dl_free(void *v) 24 | { 25 | struct __dl_head *vv = v; 26 | free(vv-1); 27 | } 28 | 29 | void dl_free_all(void *head) 30 | { 31 | /* The list head is linked with the list tail so in order to free 32 | * all the elements properly there is a need to keep starting point. 33 | */ 34 | void *d = dl_next(head), *next; 35 | 36 | while (d != head) { 37 | next = dl_next(d); 38 | dl_free(d); 39 | d = next; 40 | } 41 | dl_free(head); 42 | } 43 | 44 | void dl_init(void *v) 45 | { 46 | dl_next(v) = v; 47 | dl_prev(v) = v; 48 | } 49 | 50 | void dl_insert(void *head, void *val) 51 | { 52 | dl_next(val) = dl_next(head); 53 | dl_prev(val) = head; 54 | dl_next(dl_prev(val)) = val; 55 | dl_prev(dl_next(val)) = val; 56 | } 57 | 58 | void dl_add(void *head, void *val) 59 | { 60 | dl_prev(val) = dl_prev(head); 61 | dl_next(val) = head; 62 | dl_next(dl_prev(val)) = val; 63 | dl_prev(dl_next(val)) = val; 64 | } 65 | 66 | void dl_del(void *val) 67 | { 68 | if (dl_prev(val) == 0 || dl_next(val) == 0) 69 | return; 70 | dl_prev(dl_next(val)) = dl_prev(val); 71 | dl_next(dl_prev(val)) = dl_next(val); 72 | dl_prev(val) = dl_next(val) = 0; 73 | } 74 | 75 | char *dl_strndup(char *s, int l) 76 | { 77 | char *n; 78 | if (s == NULL) 79 | return NULL; 80 | n = dl_newv(char, l+1); 81 | strncpy(n, s, l+1); 82 | n[l] = 0; 83 | return n; 84 | } 85 | 86 | char *dl_strdup(char *s) 87 | { 88 | return dl_strndup(s, (int)strlen(s)); 89 | } 90 | -------------------------------------------------------------------------------- /dlink.h: -------------------------------------------------------------------------------- 1 | 2 | /* doubley linked lists */ 3 | /* This is free software. No strings attached. No copyright claimed */ 4 | 5 | struct __dl_head 6 | { 7 | void * dh_prev; 8 | void * dh_next; 9 | }; 10 | 11 | #define dl_alloc(size) ((void*)(((char*)xcalloc(1,(size)+sizeof(struct __dl_head)))+sizeof(struct __dl_head))) 12 | #define dl_new(t) ((t*)dl_alloc(sizeof(t))) 13 | #define dl_newv(t,n) ((t*)dl_alloc(sizeof(t)*n)) 14 | 15 | #define dl_next(p) *(&(((struct __dl_head*)(p))[-1].dh_next)) 16 | #define dl_prev(p) *(&(((struct __dl_head*)(p))[-1].dh_prev)) 17 | 18 | void *dl_head(void); 19 | char *dl_strdup(char *s); 20 | char *dl_strndup(char *s, int l); 21 | void dl_insert(void *head, void *val); 22 | void dl_add(void *head, void *val); 23 | void dl_del(void *val); 24 | void dl_free(void *v); 25 | void dl_init(void *v); 26 | void dl_free_all(void *head); 27 | -------------------------------------------------------------------------------- /documentation/HOW_TO_RELEASE.md: -------------------------------------------------------------------------------- 1 | # Maintainer tools 2 | 3 | Useful tools for mdadm maintenance: 4 | - [checkpatch](https://docs.kernel.org/dev-tools/checkpatch.html) 5 | - [kup](https://korg.docs.kernel.org/kup.html) 6 | - [Auto-publishing](https://korg.docs.kernel.org/kup.html#auto-publishing-with-git-archive-signer) 7 | - [b4](https://b4.docs.kernel.org/en/latest/) 8 | 9 | # Making a release 10 | 11 | Assuming that maintainer is certain that release is safe, following steps must be done: 12 | 13 | - Make and push release commit: 14 | - Update versions strings, refer to previous releases for examples. 15 | - Update CHANGELOG.md. 16 | 17 | - Create GPG signed tag and push it to both remotes. Use same format as was used previously, 18 | prefixed by **mdadm-**, e.g. **mdadm-3.1.2**, **mdadm-4.1**. 19 | 20 | - Run kernel.org 21 | [Auto-publishing](https://korg.docs.kernel.org/kup.html#auto-publishing-with-git-archive-signer): 22 | 23 | Adopt script to our release tag model. When ready, push signed note to kernel.org repository. If 24 | it is done correctly, then *(sig)* is added to the package automatically generated by 25 | kernel.org automation. There is no need to upload archive manually. 26 | 27 | - Add release entry on Github. 28 | 29 | - Write "ANNOUNCE" mail to linux-raid@kernel.org to notify community. 30 | -------------------------------------------------------------------------------- /drive_encryption.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* 3 | * Read encryption information for Opal and ATA devices. 4 | * 5 | * Copyright (C) 2024 Intel Corporation 6 | * Author: Blazej Kucman 7 | */ 8 | 9 | typedef enum encryption_status { 10 | /* The drive is not currently encrypted. */ 11 | ENC_STATUS_UNENCRYPTED = 0, 12 | /* The drive is encrypted and the data is not accessible. */ 13 | ENC_STATUS_LOCKED, 14 | /* The drive is encrypted but the data is accessible in unencrypted form. */ 15 | ENC_STATUS_UNLOCKED 16 | } encryption_status_t; 17 | 18 | typedef enum encryption_ability { 19 | ENC_ABILITY_NONE = 0, 20 | ENC_ABILITY_OTHER, 21 | /* Self encrypted drive */ 22 | ENC_ABILITY_SED 23 | } encryption_ability_t; 24 | 25 | typedef struct encryption_information { 26 | encryption_ability_t ability; 27 | encryption_status_t status; 28 | } encryption_information_t; 29 | 30 | mdadm_status_t 31 | get_nvme_opal_encryption_information(int disk_fd, struct encryption_information *information, 32 | const int verbose); 33 | mdadm_status_t 34 | get_ata_encryption_information(int disk_fd, struct encryption_information *information, 35 | const int verbose); 36 | const char *get_encryption_ability_string(enum encryption_ability ability); 37 | const char *get_encryption_status_string(enum encryption_status status); 38 | -------------------------------------------------------------------------------- /mdadm_status.h: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0-only 2 | 3 | #ifndef MDADM_STATUS_H 4 | #define MDADM_STATUS_H 5 | 6 | typedef enum mdadm_status { 7 | MDADM_STATUS_SUCCESS = 0, 8 | MDADM_STATUS_ERROR, 9 | MDADM_STATUS_UNDEF, 10 | MDADM_STATUS_MEM_FAIL, 11 | MDADM_STATUS_FORKED 12 | } mdadm_status_t; 13 | 14 | #endif 15 | -------------------------------------------------------------------------------- /misc/syslog-events: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | # sample event handling script for mdadm 4 | # e.g. mdadm --follow --program=/sbin/syslog-events --scan 5 | # 6 | # License: GPL ver.2 7 | # Copyright (C) 2004 SEKINE Tatsuo 8 | 9 | event="$1" 10 | dev="$2" 11 | disc="$3" 12 | 13 | facility="kern" 14 | tag="mdmonitor" 15 | 16 | case x"${event}" in 17 | xFail*) priority="error" ;; 18 | xTest*) priority="debug" ;; 19 | x*) priority="info" ;; 20 | esac 21 | 22 | msg="${event} event on ${dev}" 23 | if [ x"${disc}" != x ]; then 24 | msg="${msg}, related to disc ${disc}" 25 | fi 26 | 27 | exec logger -t "${tag}" -p "${facility}.${priority}" -- "${msg}" 28 | -------------------------------------------------------------------------------- /msg.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2008 Intel Corporation 3 | * 4 | * mdmon socket / message handling 5 | * 6 | * This program is free software; you can redistribute it and/or modify it 7 | * under the terms and conditions of the GNU General Public License, 8 | * version 2, as published by the Free Software Foundation. 9 | * 10 | * This program is distributed in the hope it will be useful, but WITHOUT 11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 | * more details. 14 | * 15 | * You should have received a copy of the GNU General Public License along with 16 | * this program; if not, write to the Free Software Foundation, Inc., 17 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 | */ 19 | 20 | struct mdinfo; 21 | struct metadata_update; 22 | 23 | extern int receive_message(int fd, struct metadata_update *msg, int tmo); 24 | extern int send_message(int fd, struct metadata_update *msg, int tmo); 25 | extern int ack(int fd, int tmo); 26 | extern int wait_reply(int fd, int tmo); 27 | extern int connect_monitor(char *devname); 28 | extern int ping_monitor(char *devname); 29 | extern int block_subarray(struct mdinfo *sra); 30 | extern int unblock_subarray(struct mdinfo *sra, const int unfreeze); 31 | extern int block_monitor(char *container, const int freeze); 32 | extern void unblock_monitor(char *container, const int unfreeze); 33 | extern int fping_monitor(int sock); 34 | extern int ping_manager(char *devname); 35 | extern void flush_mdmon(char *container); 36 | 37 | #define MSG_MAX_LEN (4*1024*1024) 38 | -------------------------------------------------------------------------------- /part.h: -------------------------------------------------------------------------------- 1 | /* 2 | * mdadm - manage Linux "md" devices aka RAID arrays. 3 | * 4 | * Copyright (C) 2010 Neil Brown 5 | * 6 | * 7 | * This program is free software; you can redistribute it and/or modify 8 | * it under the terms of the GNU General Public License as published by 9 | * the Free Software Foundation; either version 2 of the License, or 10 | * (at your option) any later version. 11 | * 12 | * This program is distributed in the hope that it will be useful, 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 | * GNU General Public License for more details. 16 | * 17 | * You should have received a copy of the GNU General Public License 18 | * along with this program; if not, write to the Free Software 19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 | * 21 | * Author: Neil Brown 22 | * Email: 23 | * 24 | */ 25 | 26 | /* Structure definitions ext for MBR and GPT partition tables 27 | */ 28 | 29 | #define MBR_SIGNATURE_MAGIC __cpu_to_le16(0xAA55) 30 | #define MBR_PARTITIONS 4 31 | 32 | struct MBR_part_record { 33 | __u8 bootable; 34 | __u8 first_head; 35 | __u8 first_sector; 36 | __u8 first_cyl; 37 | __u8 part_type; 38 | __u8 last_head; 39 | __u8 last_sector; 40 | __u8 last_cyl; 41 | __u32 first_sect_lba; 42 | __u32 blocks_num; 43 | } __attribute__((packed)); 44 | 45 | struct MBR { 46 | __u8 pad[446]; 47 | struct MBR_part_record parts[MBR_PARTITIONS]; 48 | __u16 magic; 49 | } __attribute__((packed)); 50 | 51 | #define GPT_SIGNATURE_MAGIC __cpu_to_le64(0x5452415020494645ULL) 52 | #define MBR_GPT_PARTITION_TYPE 0xEE 53 | 54 | struct GPT_part_entry { 55 | unsigned char type_guid[16]; 56 | unsigned char partition_guid[16]; 57 | __u64 starting_lba; 58 | __u64 ending_lba; 59 | unsigned char attr_bits[8]; 60 | unsigned char name[72]; 61 | } __attribute__((packed)); 62 | 63 | struct GPT { 64 | __u64 magic; 65 | __u32 revision; 66 | __u32 header_size; 67 | __u32 crc; 68 | __u32 pad1; 69 | __u64 current_lba; 70 | __u64 backup_lba; 71 | __u64 first_lba; 72 | __u64 last_lba; 73 | __u8 guid[16]; 74 | __u64 part_start; 75 | __u32 part_cnt; 76 | __u32 part_size; 77 | __u32 part_crc; 78 | __u8 pad2[420]; 79 | } __attribute__((packed)); 80 | -------------------------------------------------------------------------------- /probe_roms.h: -------------------------------------------------------------------------------- 1 | /* 2 | * probe_roms - scan for Adapter ROMS 3 | * 4 | * Copyright (C) 2008 Intel Corporation 5 | * 6 | * This program is free software; you can redistribute it and/or modify it 7 | * under the terms and conditions of the GNU General Public License, 8 | * version 2, as published by the Free Software Foundation. 9 | * 10 | * This program is distributed in the hope it will be useful, but WITHOUT 11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 | * more details. 14 | * 15 | * You should have received a copy of the GNU General Public License along with 16 | * this program; if not, write to the Free Software Foundation, Inc., 17 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 | */ 19 | 20 | void probe_roms_exit(void); 21 | int probe_roms_init(unsigned long align); 22 | typedef int (*scan_fn)(const void *start, const void *end, const void *data); 23 | int scan_adapter_roms(scan_fn fn); 24 | void probe_roms(void); 25 | -------------------------------------------------------------------------------- /pwgr.c: -------------------------------------------------------------------------------- 1 | 2 | /* 3 | * We cannot link a static binary with passwd/group support, so 4 | * just do without 5 | */ 6 | #include 7 | #include 8 | #include 9 | 10 | struct passwd *getpwnam(const char *name) 11 | { 12 | return NULL; 13 | } 14 | struct group *getgrnam(const char *name) 15 | { 16 | return NULL; 17 | } 18 | -------------------------------------------------------------------------------- /raid5extend.c: -------------------------------------------------------------------------------- 1 | 2 | int phys2log(int phys, int stripe, int n, int layout) 3 | { 4 | /* In an 'n' disk array using 'layout', 5 | * in stripe 'stripe', the physical disc 'phys' 6 | * stores what logical chunk? 7 | * -1 mean parity. 8 | * 9 | */ 10 | switch(layout) { 11 | case ALGORITHM_LEFT_ASYMMETRIC: 12 | pd = (n-1) - (stripe % n); 13 | if (phys < pd) 14 | return phys; 15 | else if (phys == pd) 16 | return -1; 17 | else return phys-1; 18 | 19 | case ALGORITHM_RIGHT_ASYMMETRIC: 20 | pd = stripe % n; 21 | if (phys < pd) 22 | return phys; 23 | else if (phys == pd) 24 | return -1; 25 | else return phys-1; 26 | 27 | case ALGORITHM_LEFT_SYMMETRIC: 28 | pd = (n-1) - (stripe %n); 29 | if (phys < pd) 30 | return phys+ n-1-pd; 31 | else if (phys == pd) 32 | return -1; 33 | else return phys-pd-1; 34 | 35 | case ALGORITHM_RIGHT_SYMMETRIC: 36 | pd = stripe % n; 37 | if (phys < pd) 38 | return phys+ n-1-pd; 39 | else if (phys == pd) 40 | return -1; 41 | else return phys-pd-1; 42 | } 43 | return -2; 44 | } 45 | 46 | raid5_extend(unsigned long len, int chunksize, int layout, int n, int m, int rfds[], int wfds[]) 47 | { 48 | 49 | static char buf[4096]; 50 | 51 | unsigned long blocks = len/4; 52 | unsigned int blocksperchunk= chunksize/4096; 53 | 54 | unsigned long b; 55 | 56 | for (b=0; b 2 | #include 3 | #include 4 | #include 5 | #include 6 | /* 7 | * This is a tiny test program to endian-swap 8 | * the superblock on a given device. 9 | * We simply read 4k from where the superblock should be 10 | * do the swap, and write it back 11 | * Don't use this on a real array, use mdadm. 12 | */ 13 | 14 | #define MD_RESERVED_BYTES (64 * 1024) 15 | #define MD_RESERVED_SECTORS (MD_RESERVED_BYTES / 512) 16 | 17 | #define MD_NEW_SIZE_SECTORS(x) ((x & ~(MD_RESERVED_SECTORS - 1)) - MD_RESERVED_SECTORS) 18 | 19 | extern long long lseek64(int, long long, int); 20 | 21 | int main(int argc, char *argv[]) 22 | { 23 | int fd, i; 24 | unsigned long size; 25 | unsigned long long offset; 26 | char super[4096]; 27 | if (argc != 2) { 28 | fprintf(stderr, "Usage: swap_super device\n"); 29 | exit(1); 30 | } 31 | fd = open(argv[1], O_RDWR); 32 | if (fd<0) { 33 | perror(argv[1]); 34 | exit(1); 35 | } 36 | if (ioctl(fd, BLKGETSIZE, &size)) { 37 | perror("BLKGETSIZE"); 38 | exit(1); 39 | } 40 | offset = MD_NEW_SIZE_SECTORS(size) * 512LL; 41 | if (lseek64(fd, offset, 0) < 0LL) { 42 | perror("lseek64"); 43 | exit(1); 44 | } 45 | if (read(fd, super, 4096) != 4096) { 46 | perror("read"); 47 | exit(1); 48 | } 49 | 50 | for (i=0; i < 4096 ; i+=4) { 51 | char t = super[i]; 52 | super[i] = super[i+3]; 53 | super[i+3] = t; 54 | t=super[i+1]; 55 | super[i+1]=super[i+2]; 56 | super[i+2]=t; 57 | } 58 | /* swap the u64 events counters */ 59 | for (i=0; i<4; i++) { 60 | /* events_hi and events_lo */ 61 | char t=super[32*4+7*4 +i]; 62 | super[32*4+7*4 +i] = super[32*4+8*4 +i]; 63 | super[32*4+8*4 +i] = t; 64 | 65 | /* cp_events_hi and cp_events_lo */ 66 | t=super[32*4+9*4 +i]; 67 | super[32*4+9*4 +i] = super[32*4+10*4 +i]; 68 | super[32*4+10*4 +i] = t; 69 | } 70 | 71 | if (lseek64(fd, offset, 0) < 0LL) { 72 | perror("lseek64"); 73 | exit(1); 74 | } 75 | if (write(fd, super, 4096) != 4096) { 76 | perror("write"); 77 | exit(1); 78 | } 79 | exit(0); 80 | 81 | } 82 | -------------------------------------------------------------------------------- /systemd/mdadm-grow-continue@.service: -------------------------------------------------------------------------------- 1 | # This file is part of mdadm. 2 | # 3 | # mdadm is free software; you can redistribute it and/or modify it 4 | # under the terms of the GNU General Public License as published by 5 | # the Free Software Foundation; either version 2 of the License, or 6 | # (at your option) any later version. 7 | 8 | [Unit] 9 | Description=Manage MD Reshape on /dev/%I 10 | DefaultDependencies=no 11 | Documentation=man:mdadm(8) 12 | 13 | [Service] 14 | ExecStart=BINDIR/mdadm --grow --continue /dev/%I 15 | StandardInput=null 16 | StandardOutput=null 17 | StandardError=null 18 | -------------------------------------------------------------------------------- /systemd/mdadm-last-resort@.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Activate md array %I even though degraded 3 | DefaultDependencies=no 4 | ConditionPathExists=!/sys/devices/virtual/block/%i/md/sync_action 5 | Documentation=man:mdadm(8) 6 | 7 | [Service] 8 | Type=oneshot 9 | ExecStart=BINDIR/mdadm --run /dev/%i 10 | -------------------------------------------------------------------------------- /systemd/mdadm-last-resort@.timer: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Timer to wait for more drives before activating degraded array %I. 3 | DefaultDependencies=no 4 | Conflicts=sys-devices-virtual-block-%i.device 5 | 6 | [Timer] 7 | OnActiveSec=30 8 | -------------------------------------------------------------------------------- /systemd/mdadm.shutdown: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # We need to ensure all md arrays with external metadata 3 | # (e.g. IMSM, DDF) are clean before completing the shutdown. 4 | BINDIR/mdadm --wait-clean --scan 5 | -------------------------------------------------------------------------------- /systemd/mdcheck_continue.service: -------------------------------------------------------------------------------- 1 | # This file is part of mdadm. 2 | # 3 | # mdadm is free software; you can redistribute it and/or modify it 4 | # under the terms of the GNU General Public License as published by 5 | # the Free Software Foundation; either version 2 of the License, or 6 | # (at your option) any later version. 7 | 8 | [Unit] 9 | Description=MD array scrubbing - continuation 10 | ConditionPathExistsGlob=/var/lib/mdcheck/MD_UUID_* 11 | Documentation=man:mdadm(8) 12 | 13 | [Service] 14 | Type=oneshot 15 | Environment="MDADM_CHECK_DURATION=6 hours" 16 | ExecStart=/usr/share/mdadm/mdcheck --continue --duration ${MDADM_CHECK_DURATION} 17 | -------------------------------------------------------------------------------- /systemd/mdcheck_continue.timer: -------------------------------------------------------------------------------- 1 | # This file is part of mdadm. 2 | # 3 | # mdadm is free software; you can redistribute it and/or modify it 4 | # under the terms of the GNU General Public License as published by 5 | # the Free Software Foundation; either version 2 of the License, or 6 | # (at your option) any later version. 7 | 8 | [Unit] 9 | Description=MD array scrubbing - continuation 10 | 11 | [Timer] 12 | OnCalendar= 1:05:00 13 | 14 | [Install] 15 | WantedBy= mdmonitor.service 16 | -------------------------------------------------------------------------------- /systemd/mdcheck_start.service: -------------------------------------------------------------------------------- 1 | # This file is part of mdadm. 2 | # 3 | # mdadm is free software; you can redistribute it and/or modify it 4 | # under the terms of the GNU General Public License as published by 5 | # the Free Software Foundation; either version 2 of the License, or 6 | # (at your option) any later version. 7 | 8 | [Unit] 9 | Description=MD array scrubbing 10 | Wants=mdcheck_continue.timer 11 | Documentation=man:mdadm(8) 12 | 13 | [Service] 14 | Type=oneshot 15 | Environment="MDADM_CHECK_DURATION=6 hours" 16 | ExecStart=/usr/share/mdadm/mdcheck --duration ${MDADM_CHECK_DURATION} 17 | -------------------------------------------------------------------------------- /systemd/mdcheck_start.timer: -------------------------------------------------------------------------------- 1 | # This file is part of mdadm. 2 | # 3 | # mdadm is free software; you can redistribute it and/or modify it 4 | # under the terms of the GNU General Public License as published by 5 | # the Free Software Foundation; either version 2 of the License, or 6 | # (at your option) any later version. 7 | 8 | [Unit] 9 | Description=MD array scrubbing 10 | 11 | [Timer] 12 | OnCalendar=Sun *-*-1..7 1:00:00 13 | 14 | [Install] 15 | WantedBy= mdmonitor.service 16 | Also= mdcheck_continue.timer 17 | -------------------------------------------------------------------------------- /systemd/mdmon@.service: -------------------------------------------------------------------------------- 1 | # This file is part of mdadm. 2 | # 3 | # mdadm is free software; you can redistribute it and/or modify it 4 | # under the terms of the GNU General Public License as published by 5 | # the Free Software Foundation; either version 2 of the License, or 6 | # (at your option) any later version. 7 | 8 | [Unit] 9 | Description=MD Metadata Monitor on %I 10 | DefaultDependencies=no 11 | Before=initrd-switch-root.target 12 | Documentation=man:mdmon(8) 13 | # Allow mdmon to keep running after switchroot, until a new 14 | # instance is started. 15 | IgnoreOnIsolate=true 16 | 17 | [Service] 18 | # The mdmon starting in the initramfs (with dracut at least) 19 | # cannot see sysfs after root is mounted, so we will have to 20 | # 'takeover'. As the '--offroot --takeover' don't hurt when 21 | # not necessary, are are useful with root-on-md in dracut, 22 | # have them always present. 23 | ExecStart=BINDIR/mdmon --foreground --offroot --takeover %I 24 | # Don't set the PIDFile. It isn't necessary (systemd can work 25 | # it out) and systemd will remove it when transitioning from 26 | # initramfs to rootfs. 27 | #PIDFile=/run/mdadm/%I.pid 28 | # The default slice is system-mdmon.slice which Conflicts 29 | # with shutdown, causing mdmon to exit early. So use system.slice. 30 | Slice=system.slice 31 | -------------------------------------------------------------------------------- /systemd/mdmonitor-oneshot.service: -------------------------------------------------------------------------------- 1 | # This file is part of mdadm. 2 | # 3 | # mdadm is free software; you can redistribute it and/or modify it 4 | # under the terms of the GNU General Public License as published by 5 | # the Free Software Foundation; either version 2 of the License, or 6 | # (at your option) any later version. 7 | 8 | [Unit] 9 | Description=Reminder for degraded MD arrays 10 | Documentation=man:mdadm(8) 11 | 12 | [Service] 13 | Environment=MDADM_MONITOR_ARGS=--scan 14 | EnvironmentFile=-/run/sysconfig/mdadm 15 | ExecStartPre=-/usr/lib/mdadm/mdadm_env.sh 16 | ExecStart=BINDIR/mdadm --monitor --oneshot $MDADM_MONITOR_ARGS 17 | -------------------------------------------------------------------------------- /systemd/mdmonitor-oneshot.timer: -------------------------------------------------------------------------------- 1 | # This file is part of mdadm. 2 | # 3 | # mdadm is free software; you can redistribute it and/or modify it 4 | # under the terms of the GNU General Public License as published by 5 | # the Free Software Foundation; either version 2 of the License, or 6 | # (at your option) any later version. 7 | 8 | [Unit] 9 | Description=Reminder for degraded MD arrays 10 | 11 | [Timer] 12 | OnCalendar= 2:00:00 13 | 14 | [Install] 15 | WantedBy= mdmonitor.service 16 | -------------------------------------------------------------------------------- /systemd/mdmonitor.service: -------------------------------------------------------------------------------- 1 | # This file is part of mdadm. 2 | # 3 | # mdadm is free software; you can redistribute it and/or modify it 4 | # under the terms of the GNU General Public License as published by 5 | # the Free Software Foundation; either version 2 of the License, or 6 | # (at your option) any later version. 7 | 8 | [Unit] 9 | Description=MD array monitor 10 | DefaultDependencies=no 11 | Documentation=man:mdadm(8) 12 | 13 | [Service] 14 | # For Maintainers: 15 | # We need to ensure that the mdmonitor configuration aligns with the guidelines provided 16 | # in the man page for users. 17 | # /etc/sysconfig/mdadm, /etc/sysconfig/mdmonitor, or any other similar configuration file should 18 | # not be supported because non upstream components are not described in man pages. 19 | 20 | # Parameters designed to be customized by user, should be settable via mdadm.conf: 21 | # - MONITORDELAY (do not set --delay in service) 22 | # - MAILADDR (do not set --mail in service) 23 | # - MAILFROM (not settable from cmdline) 24 | # - PROGRAM (do not set --program or --alert in service) 25 | # 26 | # Following parameters can be customized in service: 27 | # - --syslog (configure syslog logging) 28 | # - --fork (Type=forking must be added, not recommended and not needed with systemd) 29 | # - --pid-file (allowed only when --fork selected) 30 | 31 | ExecStart=BINDIR/mdadm --monitor --scan 32 | -------------------------------------------------------------------------------- /tests/00confnames: -------------------------------------------------------------------------------- 1 | set -x -e 2 | . tests/templates/names_template 3 | 4 | # Test how is handled during Incremental assemblation with 5 | # config file and ARRAYLINE specified. 6 | 7 | names_create "/dev/md/name" 8 | local _UUID="$(mdadm -D --export /dev/md127 | grep MD_UUID | cut -d'=' -f2)" 9 | [[ "$_UUID" == "" ]] && echo "Cannot obtain UUID for $DEVNODE_NAME" && exit 1 10 | 11 | 12 | # 1. definition consistent with metadata name. 13 | names_make_conf $_UUID "/dev/md/name" $config 14 | mdadm -S "/dev/md127" 15 | mdadm -I $dev0 --config=$config 16 | names_verify "/dev/md127" "name" "name" 17 | mdadm -S "/dev/md127" 18 | 19 | # 2. Same as 1, but use short name form of . 20 | names_make_conf $_UUID "name" $config 21 | mdadm -I $dev0 --config=$config 22 | names_verify "/dev/md127" "name" "name" 23 | mdadm -S "/dev/md127" 24 | 25 | # 3. Same as 1, but use different than metadata provides. 26 | names_make_conf $_UUID "/dev/md/other" $config 27 | mdadm -I $dev0 --config=$config 28 | names_verify "/dev/md127" "other" "name" 29 | mdadm -S "/dev/md127" 30 | 31 | # 4. Same as 3, but use short name form of . 32 | names_make_conf $_UUID "other" $config 33 | mdadm -I $dev0 --config=$config 34 | names_verify "/dev/md127" "other" "name" 35 | mdadm -S "/dev/md127" 36 | 37 | # 5. Force particular node creation by setting to /dev/mdX. 38 | # Link is not created in this case. 39 | names_make_conf $_UUID "/dev/md4" $config 40 | mdadm -I $dev0 --config=$config 41 | names_verify "/dev/md4" "empty" "name" 42 | mdadm -S "/dev/md4" 43 | 44 | # 6. with some special symbols and locales. 45 | # should be ignored. 46 | names_make_conf $_UUID "tźż-\.,<>st+-" $config 47 | mdadm -I $dev0 --config=$config 48 | names_verify "/dev/md127" "name" "name" 49 | mdadm -S "/dev/md127" 50 | 51 | # 7. No set. 52 | # Metadata name and default node used. 53 | names_make_conf $_UUID "empty" $config 54 | mdadm -I $dev0 --config=$config 55 | names_verify "/dev/md127" "name" "name" 56 | mdadm -S "/dev/md127" 57 | -------------------------------------------------------------------------------- /tests/00createnames: -------------------------------------------------------------------------------- 1 | set -x -e 2 | . tests/templates/names_template 3 | 4 | # Test how and --name= are handled for create mode. 5 | 6 | # The most trivial case. 7 | names_create "/dev/md/name" 8 | names_verify "/dev/md127" "name" "name" 9 | mdadm -S "/dev/md127" 10 | 11 | names_create "name" 12 | names_verify "/dev/md127" "name" "name" 13 | mdadm -S "/dev/md127" 14 | 15 | # Use 'mdX' as name. 16 | names_create "/dev/md/md0" 17 | names_verify "/dev/md127" "md0" "md0" 18 | mdadm -S "/dev/md127" 19 | 20 | names_create "md0" 21 | names_verify "/dev/md127" "md0" "md0" 22 | mdadm -S "/dev/md127" 23 | 24 | # is used to create MD_DEVNAME but, name is used to create MD_NAME. 25 | names_create "/dev/md/devnode" "name" 26 | names_verify "/dev/md127" "devnode" "name" 27 | mdadm -S "/dev/md127" 28 | 29 | names_create "devnode" "name" 30 | names_verify "/dev/md127" "devnode" "name" 31 | mdadm -S "/dev/md127" 32 | 33 | # Devnode points to /dev/ directory. MD_DEVNAME doesn't exist. 34 | names_create "/dev/md0" 35 | names_verify "/dev/md0" "empty" "0" 36 | mdadm -S "/dev/md0" 37 | 38 | # Devnode points to /dev/ directory and name is set. 39 | names_create "/dev/md0" "name" 40 | names_verify "/dev/md0" "empty" "name" 41 | mdadm -S "/dev/md0" 42 | 43 | # Devnode is a special ignore keyword. Should be rejected. 44 | names_create "" "name", "true" 45 | -------------------------------------------------------------------------------- /tests/00linear: -------------------------------------------------------------------------------- 1 | 2 | # create a simple linear 3 | 4 | if [ "$LINEAR" != "yes" ]; then 5 | echo -ne 'skipping... ' 6 | exit 0 7 | fi 8 | 9 | mdadm -CR $md0 -l linear -n3 $dev0 $dev1 $dev2 10 | check linear 11 | testdev $md0 3 $mdsize2_l 1 12 | mdadm -S $md0 13 | 14 | # now with version-0.90 superblock 15 | mdadm -CR $md0 -e0.90 --level=linear -n4 $dev0 $dev1 $dev2 $dev3 16 | check linear 17 | testdev $md0 4 $mdsize0 1 18 | mdadm -S $md0 19 | 20 | # now with version-1.0 superblock 21 | mdadm -CR $md0 -e1.0 --level=linear -n4 $dev0 $dev1 $dev2 $dev3 22 | check linear 23 | testdev $md0 4 $mdsize1 1 24 | mdadm -S $md0 25 | 26 | # now with no superblock 27 | mdadm -B $md0 -l linear -n5 $dev0 $dev1 $dev2 $dev3 $dev4 28 | check linear 29 | testdev $md0 5 $size 64 30 | mdadm -S $md0 31 | -------------------------------------------------------------------------------- /tests/00multipath: -------------------------------------------------------------------------------- 1 | 2 | # 3 | # create a multipath, and fail and stuff 4 | 5 | if [ "$MULTIPATH" != "yes" ]; then 6 | echo -ne 'skipping... ' 7 | exit 0 8 | fi 9 | 10 | mdadm -CR $md1 -l multipath -n2 $path0 $path1 11 | 12 | testdev $md1 1 $mdsize12 1 13 | 14 | mdadm $md1 -f $path0 15 | rotest $md1 16 | testdev $md1 1 $mdsize12 1 17 | 18 | mdadm $md1 -r $path0 19 | mdadm $md1 -a $path0 20 | 21 | rotest $md1 22 | testdev $md1 1 $mdsize12 1 23 | 24 | mdadm $md1 -f $path1 25 | mdadm $md1 -r $path1 26 | rotest $md1 27 | testdev $md1 1 $mdsize12 1 28 | 29 | mdadm -S $md1 30 | -------------------------------------------------------------------------------- /tests/00names: -------------------------------------------------------------------------------- 1 | set -x -e 2 | 3 | # create arrays with non-numeric names 4 | conf=$targetdir/mdadm.conf 5 | echo "CREATE names=yes" > $conf 6 | 7 | levels=(raid0 raid1 raid4 raid5 raid6) 8 | 9 | if [ "$LINEAR" == "yes" ]; then 10 | levels+=( linear ) 11 | fi 12 | 13 | for i in ${levels[@]} 14 | do 15 | mdadm -CR --config $conf /dev/md/$i -l $i -n 4 $dev4 $dev3 $dev2 $dev1 16 | check $i 17 | [ -d /sys/class/block/md_$i/md ] 18 | mdadm -S md_$i 19 | done 20 | -------------------------------------------------------------------------------- /tests/00raid0: -------------------------------------------------------------------------------- 1 | 2 | # create a simple raid0 3 | 4 | mdadm -CR $md0 -l raid0 -n3 $dev0 $dev1 $dev2 5 | check raid0 6 | testdev $md0 3 $mdsize2_l 512 7 | mdadm -S $md0 8 | 9 | # verify raid0 with layouts fail for 0.90 10 | mdadm -CR $md0 -e0.90 -l0 -n4 $dev0 $dev1 $dev2 $dev3 11 | check opposite_result 12 | 13 | # now with no superblock 14 | mdadm -B $md0 -l0 -n5 $dev0 $dev1 $dev2 $dev3 $dev4 15 | check raid0 16 | testdev $md0 5 $size 512 17 | mdadm -S $md0 18 | 19 | if [ "$LINEAR" != "yes" ]; then 20 | echo -ne 'skipping... ' 21 | exit 0 22 | fi 23 | 24 | # now same again with different chunk size 25 | for chunk in 4 32 256 26 | do 27 | mdadm -CR $md0 -e0.90 -l linear --chunk $chunk -n3 $dev0 $dev1 $dev2 28 | check linear 29 | testdev $md0 3 $mdsize0 $chunk 30 | mdadm -S $md0 31 | 32 | # now with version-1 superblock 33 | mdadm -CR $md0 -e1.0 -l0 -c $chunk -n4 $dev0 $dev1 $dev2 $dev3 34 | check raid0 35 | testdev $md0 4 $mdsize1 $chunk 36 | mdadm -S $md0 37 | 38 | # now with no superblock 39 | mdadm -B $md0 -l0 -n5 --chun=$chunk $dev0 $dev1 $dev2 $dev3 $dev4 40 | check raid0 41 | testdev $md0 5 $size $chunk 42 | mdadm -S $md0 43 | 44 | done 45 | exit 0 46 | -------------------------------------------------------------------------------- /tests/00raid1: -------------------------------------------------------------------------------- 1 | 2 | # create a simple mirror 3 | # test version0, version1, and no super 4 | # test resync and recovery. 5 | 6 | # It's just a sanity check. This command shouldn't run successfully 7 | mdadm -CR $md0 -l 1 -n2 missing missing 8 | check opposite_result 9 | 10 | mdadm -CR $md0 -l 1 -n2 $dev0 $dev1 11 | check resync 12 | check raid1 13 | testdev $md0 1 $mdsize1a 64 14 | mdadm -S $md0 15 | 16 | # now with version-0.90 superblock, spare 17 | mdadm -CR $md0 -e0.90 --level=raid1 -n3 -x2 $dev0 missing missing $dev1 $dev2 18 | check recovery 19 | check raid1 20 | testdev $md0 1 $mdsize0 64 21 | mdadm -S $md0 22 | 23 | # now with no superblock 24 | mdadm -B $md0 -l mirror -n2 $dev0 $dev1 25 | check resync 26 | check raid1 27 | testdev $md0 1 $size 1 28 | mdadm -S $md0 29 | 30 | # again, but with no resync 31 | mdadm -B $md0 -l 1 --assume-clean -n2 $dev0 $dev1 32 | check raid1 33 | check nosync 34 | testdev $md0 1 $size 1 35 | mdadm -S $md0 36 | 37 | 38 | exit 0 39 | -------------------------------------------------------------------------------- /tests/00raid10: -------------------------------------------------------------------------------- 1 | 2 | # Create some raid10 arrays, all with 6 devices and one spare 3 | devs="$dev0 $dev1 $dev2 $dev3 $dev4 $dev5 $dev6" 4 | 5 | for lo in n2 n3 f2 f3 6 | do 7 | cm=1 8 | case $lo in 9 | f2 ) m=3 cm=2;; 10 | f3 ) m=2 cm=3;; 11 | n2 ) m=3;; 12 | n3 ) m=2;; 13 | esac 14 | mdadm --create --run --level=raid10 --layout $lo --raid-disks 6 -x 1 $md0 $devs 15 | check resync ; check raid10 16 | testdev $md0 $m $mdsize1 $[512*cm] 17 | mdadm -S $md0 18 | done 19 | -------------------------------------------------------------------------------- /tests/00raid4: -------------------------------------------------------------------------------- 1 | 2 | # create a simple raid4 set 3 | 4 | mdadm -CfR $md0 -l 4 -n3 $dev0 $dev1 $dev2 5 | check resync ; check raid[45] 6 | testdev $md0 2 $mdsize1 512 7 | mdadm -S $md0 8 | 9 | # now with version-1 superblock 10 | mdadm -CR $md0 -e1 --level=raid4 -n4 $dev0 $dev1 $dev2 $dev3 11 | check recovery; check raid[45] 12 | testdev $md0 3 $mdsize1 512 13 | mdadm -S $md0 14 | 15 | 16 | exit 0 17 | -------------------------------------------------------------------------------- /tests/00raid5: -------------------------------------------------------------------------------- 1 | 2 | # create a simple raid5 set 3 | 4 | mdadm -CfR $md0 -e 0.90 -l 5 -n3 $dev0 $dev1 $dev2 5 | check resync 6 | testdev $md0 2 $mdsize0 512 7 | mdadm -S $md0 8 | 9 | # now with version-1 superblock 10 | mdadm -CR $md0 -e1 --level=raid5 -n4 $dev0 $dev1 $dev2 $dev3 11 | check recovery 12 | testdev $md0 3 $mdsize1 512 13 | mdadm -S $md0 14 | 15 | # now same again with explicit layout 16 | 17 | for lo in la ra left-symmetric right-symmetric 18 | do 19 | 20 | mdadm -CfR $md0 -l 5 -p $lo -n3 $dev0 $dev1 $dev2 21 | check resync ; check raid5 22 | testdev $md0 2 $mdsize1 512 23 | mdadm -S $md0 24 | 25 | # now with version-1 superblock 26 | mdadm -CR $md0 -e1 --level=raid5 --layout $lo -n4 $dev0 $dev1 $dev2 $dev3 27 | check recovery ; check raid5 28 | testdev $md0 3 $mdsize1 512 29 | mdadm -S $md0 30 | 31 | done 32 | 33 | exit 0 34 | -------------------------------------------------------------------------------- /tests/00raid5-zero: -------------------------------------------------------------------------------- 1 | 2 | if mdadm -CfR $md0 -l 5 -n3 $dev0 $dev1 $dev2 --write-zeroes ; then 3 | check nosync 4 | echo check > /sys/block/md0/md/sync_action; 5 | check wait 6 | elif grep "zeroing [^ ]* failed: Operation not supported" \ 7 | $targetdir/stderr; then 8 | echo "write-zeros not supported, skipping" 9 | else 10 | echo >&2 "ERROR: mdadm return failure without not supported message" 11 | exit 1 12 | fi 13 | -------------------------------------------------------------------------------- /tests/00raid6: -------------------------------------------------------------------------------- 1 | 2 | # create a simple raid6 set 3 | 4 | mdadm -CfR $md0 -e0.90 -l 6 -n4 $dev0 $dev1 $dev2 $dev3 5 | check resync ; check raid6 6 | testdev $md0 2 $mdsize0 512 7 | mdadm -S $md0 8 | 9 | # now with version-1 superblock 10 | mdadm -CR $md0 -e1 --level=raid6 -n5 $dev0 $dev1 $dev2 $dev3 $dev4 11 | check resync ; check raid6 12 | testdev $md0 3 $mdsize1 512 13 | mdadm -S $md0 14 | 15 | 16 | exit 0 17 | -------------------------------------------------------------------------------- /tests/00readonly: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | levels=(raid0 raid1 raid4 raid5 raid6 raid10) 4 | 5 | if [ "$LINEAR" == "yes" ]; then 6 | levels+=( linear ) 7 | fi 8 | 9 | for metadata in 0.9 1.0 1.1 1.2 10 | do 11 | for level in ${levels[@]} 12 | do 13 | if [[ $metadata == "0.9" && $level == "raid0" ]]; 14 | then 15 | continue 16 | fi 17 | mdadm -CR $md0 -l $level -n 4 --metadata=$metadata \ 18 | $dev1 $dev2 $dev3 $dev4 --assume-clean 19 | check nosync 20 | check $level 21 | udevadm settle 22 | mdadm -ro $md0 23 | check readonly 24 | state=$(cat /sys/block/md0/md/array_state) 25 | [ "$state" == "readonly" ] || 26 | die "array_state should be 'readonly', but is $state" 27 | mdadm -w $md0 28 | check $level 29 | mdadm -S $md0 30 | done 31 | done 32 | 33 | exit 0 34 | -------------------------------------------------------------------------------- /tests/01r1fail: -------------------------------------------------------------------------------- 1 | 2 | # create a raid1, fail and remove a drive during initial sync 3 | # Add two more, fail and remove one 4 | # wait for sync to complete, fail, remove, re-add 5 | 6 | mdadm -CR $md0 -l1 -n4 $dev0 $dev1 $dev2 missing 7 | check resync 8 | mdadm $md0 --fail $dev2 9 | check resync 10 | mdadm $md0 --fail $dev1 11 | sleep 2 12 | check nosync 13 | check state U___ 14 | mdadm $md0 --add $dev4 $dev3 15 | check recovery 16 | # there could be two separate recoveries, one for each dev 17 | check wait 18 | check wait 19 | mdadm $md0 --remove $dev2 $dev1 20 | check nosync 21 | check state UUU_ 22 | 23 | mdadm --zero-superblock $dev2 24 | mdadm $md0 -a $dev2 25 | check recovery 26 | check wait 27 | check state UUUU 28 | 29 | mdadm -S $md0 30 | -------------------------------------------------------------------------------- /tests/01r5fail: -------------------------------------------------------------------------------- 1 | 2 | 3 | # create a raid5, fail and remove a drive during initial sync 4 | # Add two more, fail and remove one 5 | # wait for sync to complete, fail, remove, re-add 6 | 7 | mdadm -CR $md0 -l5 -n4 $dev0 $dev1 $dev2 $dev3 8 | check recovery 9 | mdadm $md0 --fail $dev3 10 | sleep 2 11 | check nosync 12 | check state UUU_ 13 | 14 | mdadm $md0 --add $dev4 $dev5 15 | check recovery 16 | check wait 17 | mdadm $md0 --fail $dev0 18 | mdadm $md0 --remove $dev3 $dev0 19 | check recovery 20 | check wait 21 | check state UUUU 22 | 23 | mdadm -S $md0 24 | -------------------------------------------------------------------------------- /tests/01r5integ: -------------------------------------------------------------------------------- 1 | 2 | # Check integrity of raid5 in degraded mode 3 | # Create a 4 disk raid5, create a filesystem and 4 | # sha1sum it with each device failed 5 | 6 | if [ "$INTEGRITY" != "yes" ]; then 7 | echo -ne 'skipping... ' 8 | exit 0 9 | fi 10 | 11 | for layout in ls rs la ra 12 | do 13 | mdadm -CR $md0 -l5 --layout $layout -n4 $dev0 $dev1 $dev2 $dev3 14 | check wait 15 | tar cf - /etc > $md0 16 | sum=`sha1sum $md0` 17 | 18 | for i in $dev0 $dev1 $dev2 $dev3 19 | do 20 | mdadm $md0 -f $i 21 | mdadm $md0 -r $i 22 | blockdev --flushbufs $md0 23 | sum1=`sha1sum $md0` 24 | if [ "$sum" != "$sum1" ] 25 | then 26 | echo $sum does not match $sum1 with $i missing 27 | exit 1 28 | fi 29 | mdadm $md0 -a $i 30 | while ! (check state 'U*'); do check wait; sleep 0.2; done 31 | done 32 | mdadm -S $md0 33 | done 34 | -------------------------------------------------------------------------------- /tests/01raid6integ: -------------------------------------------------------------------------------- 1 | 2 | # Check integrity of raid6 in degraded modes 3 | # Create a 5 disk raid6, dump some data to it, then 4 | # sha1sum it with different pairs of devices failed 5 | 6 | if [ "$INTEGRITY" != "yes" ]; then 7 | echo -ne 'skipping... ' 8 | exit 0 9 | fi 10 | 11 | layouts='ls rs la ra' 12 | lv=`uname -r` 13 | if expr $lv '>=' 2.6.30 > /dev/null 14 | then 15 | layouts="$layouts parity-first ddf-zero-restart ddf-N-restart ddf-N-continue \ 16 | left-asymmetric-6 right-asymmetric-6 left-symmetric-6 right-symmetric-6 parity-first-6" 17 | fi 18 | 19 | for layout in $layouts 20 | do 21 | mdadm -CR $md0 -l6 --layout $layout -n5 $dev0 $dev1 $dev2 $dev3 $dev4 22 | check wait 23 | tar cf - /etc > $md0 24 | sum=`sha1sum $md0` 25 | 26 | totest= 27 | for second in $dev0 $dev1 $dev2 $dev3 $dev4 28 | do 29 | mdadm $md0 -f $second 30 | mdadm $md0 -r $second 31 | blockdev --flushbufs $md0 32 | sum1=`sha1sum $md0` 33 | if [ "$sum" != "$sum1" ] 34 | then 35 | echo $sum does not match $sum1 with $second missing 36 | exit 1 37 | fi 38 | for first in $totest 39 | do 40 | mdadm $md0 -f $first 41 | mdadm $md0 -r $first 42 | blockdev --flushbufs $md0 43 | sum1=`sha1sum $md0` 44 | if [ "$sum" != "$sum1" ] 45 | then 46 | echo $sum does not match $sum1 with $first and $second missing 47 | exit 1 48 | fi 49 | mdadm $md0 -a $first 50 | while ! (check state 'U*_U*'); do check wait; sleep 0.2; done 51 | done 52 | mdadm $md0 -a $second 53 | while ! (check state 'U*'); do check wait; sleep 0.2; done 54 | totest="$totest $second" 55 | done 56 | mdadm -S $md0 57 | done 58 | -------------------------------------------------------------------------------- /tests/01replace: -------------------------------------------------------------------------------- 1 | set -x -e 2 | 3 | ## test --replace for raid5 raid6 raid1 and raid10 4 | #1/ after replace, can remove replaced device 5 | #2/ after --replace-with cannot remove the 'with' device 6 | #3/ preserve integrity with concurrent failure 7 | 8 | for level in 1 5 6 10 9 | do 10 | dd if=/dev/zero of=$dev4 bs=1M || true 11 | dd if=/dev/zero of=$dev5 bs=1M || true 12 | mdadm -CR $md0 -l $level -n4 -x2 $devlist5 13 | dd if=/dev/urandom of=$md0 bs=1M || true 14 | sum=`sha1sum < $md0` 15 | check wait 16 | mdadm $md0 --replace $dev1 17 | check wait 18 | mdadm $md0 --remove $dev1 19 | mdadm $md0 --remove $dev5 && exit 1 20 | mdadm -S $md0 21 | dd if=/dev/zero of=$dev4 bs=1M || true 22 | dd if=/dev/zero of=$dev5 bs=1M || true 23 | mdadm -CR $md0 -l $level -n4 -x2 $devlist5 24 | check wait 25 | sum1=`sha1sum < $md0` 26 | [ "$sum" == "$sum1" ] 27 | 28 | mdadm $md0 --replace $dev1 --with $dev4 29 | check wait 30 | mdadm $md0 --remove $dev1 31 | mdadm $md0 --remove $dev5 32 | mdadm $md0 --remove $dev4 && exit 1 33 | 34 | mdadm $md0 --add $dev1 $dev5 35 | mdadm $md0 --replace $dev0 36 | sleep 2 37 | mdadm $md0 --fail $dev2 38 | check wait 39 | sum2=`sha1sum < $md0` 40 | [ "$sum" == "$sum2" ] 41 | 42 | mdadm $md0 --remove $dev0 $dev2 43 | mdadm $md0 --add $dev0 $dev2 44 | mdadm $md0 --replace $dev3 45 | sleep 2 46 | mdadm $md0 --fail $dev0 $dev2 47 | check wait 48 | sum3=`sha1sum < $md0` 49 | [ "$sum" == "$sum3" ] 50 | 51 | mdadm -S $md0 52 | done 53 | -------------------------------------------------------------------------------- /tests/02lineargrow: -------------------------------------------------------------------------------- 1 | 2 | # create a liner array, and add more drives to to. 3 | 4 | if [ "$LINEAR" != "yes" ]; then 5 | echo -ne 'skipping... ' 6 | exit 0 7 | fi 8 | 9 | for e in 0.90 1 1.1 1.2 10 | do 11 | case $e in 12 | 0.90 ) sz=$mdsize0 ;; 13 | 1 ) sz=$mdsize2_l ;; 14 | 1.0 ) sz=$mdsize1 ;; 15 | 1.1 ) sz=$mdsize1_l ;; 16 | 1.2 ) sz=$mdsize2_l ;; 17 | esac 18 | mdadm -CRf $md0 --level linear -e $e --raid-disks=1 $dev1 19 | testdev $md0 1 $sz 1 20 | 21 | mdadm --grow $md0 --add $dev2 22 | testdev $md0 2 $sz 1 23 | 24 | mdadm --grow $md0 --add $dev3 25 | testdev $md0 3 $sz 1 26 | 27 | mdadm -S $md0 28 | mdadm --zero /dev/loop2 29 | mdadm --zero /dev/loop3 30 | done 31 | -------------------------------------------------------------------------------- /tests/02r1add: -------------------------------------------------------------------------------- 1 | 2 | # Make a raid1, add a device, then remove it again. 3 | 4 | mdadm -CR $md0 -l1 -n2 -x1 $dev0 $dev1 $dev2 5 | check resync 6 | check wait 7 | check state UU 8 | 9 | mdadm --grow $md0 -n 3 10 | check recovery 11 | check wait 12 | check state UUU 13 | 14 | mdadm $md0 --fail $dev0 15 | check state _UU 16 | 17 | mdadm --grow $md0 -n 2 18 | check state UU 19 | 20 | mdadm -S $md0 21 | # same again for version-1 22 | 23 | 24 | mdadm -CR $md0 -l1 -n2 -e1.2 -x1 $dev0 $dev1 $dev2 25 | check resync 26 | check wait 27 | check state UU 28 | 29 | mdadm --grow $md0 -n 3 30 | check recovery 31 | check wait 32 | check state UUU 33 | 34 | mdadm $md0 --fail $dev0 35 | check state _UU 36 | 37 | mdadm --grow $md0 -n 2 38 | check state UU 39 | 40 | mdadm -S $md0 41 | -------------------------------------------------------------------------------- /tests/02r1grow: -------------------------------------------------------------------------------- 1 | 2 | 3 | # create a small raid1 array, make it larger. Then make it smaller 4 | 5 | mdadm -CR $md0 -e 0.90 --level raid1 --raid-disks 3 --size $[size/2] $dev1 $dev2 $dev3 6 | check wait 7 | check state UUU 8 | testdev $md0 1 $[size/2] 1 9 | 10 | mdadm --grow $md0 --size max 11 | check resync 12 | check wait 13 | testdev $md0 1 $mdsize0 1 14 | 15 | mdadm --grow $md0 --size $[size/2] 16 | check nosync 17 | testdev $md0 1 $[size/2] 1 18 | 19 | mdadm -S $md0 20 | 21 | # same again with version 1.1 superblock 22 | mdadm -CR $md0 --level raid1 --metadata=1.1 --raid-disks 3 --size $[size/2] $dev1 $dev2 $dev3 23 | check wait 24 | check state UUU 25 | testdev $md0 1 $[size/2] 1 26 | 27 | mdadm --grow $md0 --size max 28 | check resync 29 | check wait 30 | testdev $md0 1 $mdsize1_l 1 31 | 32 | mdadm --grow $md0 --size $[size/2] 33 | check nosync 34 | testdev $md0 1 $[size/2] 1 35 | 36 | mdadm -S $md0 37 | -------------------------------------------------------------------------------- /tests/02r5grow: -------------------------------------------------------------------------------- 1 | 2 | 3 | # create a small raid5 array, make it larger. Then make it smaller 4 | 5 | mdadm -CR $md0 -e0.90 --level raid5 --chunk=64 --raid-disks 3 --size $[size/2] $dev1 $dev2 $dev3 6 | check wait 7 | check state UUU 8 | testdev $md0 2 $[size/2] 32 9 | 10 | mdadm --grow $md0 --size max 11 | check resync 12 | check wait 13 | testdev $md0 2 $mdsize0 32 14 | 15 | mdadm --grow $md0 --size $[size/2] 16 | check nosync 17 | testdev $md0 2 $[size/2] 32 18 | 19 | mdadm -S $md0 20 | 21 | # same again with version 1.1 superblock 22 | mdadm -CR $md0 --level raid5 --metadata=1.1 --chunk=128 --raid-disks 4 --size $[size/2] $dev1 $dev2 $dev3 $dev4 23 | check wait 24 | check state UUUU 25 | testdev $md0 3 $[size/2] 128 26 | 27 | mdadm --grow $md0 --size max 28 | check resync 29 | check wait 30 | testdev $md0 3 $[mdsize1_l] 128 31 | 32 | mdadm --grow $md0 --size $[size/2] 33 | check nosync 34 | testdev $md0 3 $[size/2] 128 35 | 36 | mdadm -S $md0 37 | 38 | # create a raid5 array and change the chunk 39 | mdadm -CR $md0 --level raid5 --metadata=1.1 --chunk=32 --raid-disks 3 --size $[size/2] $dev1 $dev2 $dev3 40 | check wait 41 | check state UUU 42 | check chunk 32 43 | 44 | mdadm $md0 --grow --chunk=64 45 | check reshape 46 | check wait 47 | check chunk 64 48 | 49 | mdadm -S $md0 50 | mdadm -A $md0 $dev1 $dev2 $dev3 51 | check state UUU 52 | check chunk 64 53 | mdadm -S $md0 54 | -------------------------------------------------------------------------------- /tests/02r6grow: -------------------------------------------------------------------------------- 1 | 2 | 3 | # create a small raid6 array, make it larger. Then make it smaller 4 | 5 | mdadm -CR $md0 -e 0.90 --level raid6 --chunk=64 --raid-disks 4 --size $[size/2] $dev1 $dev2 $dev3 $dev4 6 | check wait 7 | check state UUUU 8 | testdev $md0 2 $[size/2] 32 9 | 10 | mdadm --grow $md0 --size max 11 | check resync 12 | check wait 13 | testdev $md0 2 $mdsize0 32 14 | 15 | mdadm --grow $md0 --size $[size/2] 16 | check nosync 17 | testdev $md0 2 $[size/2] 32 18 | 19 | mdadm -S $md0 20 | 21 | # same again with version 1.1 superblock 22 | mdadm -CR $md0 --level raid6 --metadata=1.1 --chunk=128 --raid-disks 4 --size $[size/2] $dev1 $dev2 $dev3 $dev4 23 | check wait 24 | check state UUUU 25 | testdev $md0 2 $[size/2] 128 26 | 27 | mdadm --grow $md0 --size max 28 | check resync 29 | check wait 30 | testdev $md0 2 $[mdsize1_l] 128 31 | 32 | mdadm --grow $md0 --size $[size/2] 33 | check nosync 34 | testdev $md0 2 $[size/2] 128 35 | 36 | mdadm -S $md0 37 | -------------------------------------------------------------------------------- /tests/03assem-incr: -------------------------------------------------------------------------------- 1 | set -x -e 2 | 3 | # Test interaction between -I and -A 4 | # there are locking issue too, but those are hard to test for. 5 | # 6 | # Here just test that a partly "-I" assembled array can 7 | # be completed with "-A" 8 | 9 | levels=(raid0 raid1 raid5) 10 | 11 | if [ "$LINEAR" == "yes" ]; then 12 | levels+=( linear ) 13 | fi 14 | 15 | is_raid_foreign $md0 16 | 17 | for l in ${levels[@]} 18 | do 19 | mdadm -CR $md0 -l $l -n5 $dev0 $dev1 $dev2 $dev3 $dev4 --assume-clean 20 | mdadm -S $md0 21 | mdadm -I $dev1 22 | mdadm -I $dev3 23 | mdadm -A $md0 $dev0 $dev1 $dev2 $dev3 $dev4 24 | # If one array is foreign (metadata name doesn't have the machine's 25 | # hostname), mdadm chooses a minor number automatically from 127 26 | if [ $is_foreign == "no" ]; then 27 | mdadm -S $md0 28 | else 29 | mdadm -S $md127 30 | fi 31 | done 32 | -------------------------------------------------------------------------------- /tests/03r5assem: -------------------------------------------------------------------------------- 1 | 2 | # create a raid5 array and assemble it in various ways, 3 | # including with missing devices. 4 | 5 | mdadm -CR -e 0.90 $md1 -l5 -n3 $dev0 $dev1 $dev2 6 | tst="check raid5 ;testdev $md1 2 $mdsize0 512 ; mdadm -S $md1" 7 | uuid=`mdadm -Db $md1 | sed 's/.*UUID=//'` 8 | check wait 9 | eval $tst 10 | 11 | mdadm -A $md1 $dev0 $dev1 $dev2 12 | eval $tst 13 | 14 | mdadm -A $md1 -u $uuid $devlist 15 | eval $tst 16 | 17 | mdadm -A $md1 -m 1 $devlist 18 | eval $tst 19 | 20 | 21 | conf=$targetdir/mdadm.conf 22 | { 23 | echo DEVICE $devlist 24 | echo array $md1 UUID=$uuid 25 | } > $conf 26 | 27 | mdadm -As -c $conf $md1 28 | eval $tst 29 | 30 | { 31 | echo DEVICE $devlist 32 | echo array $md1 super-minor=1 33 | } > $conf 34 | 35 | mdadm -As -c $conf 36 | eval $tst 37 | 38 | { 39 | echo DEVICE $devlist 40 | echo array $md1 devices=$dev0,$dev1,$dev2 41 | } > $conf 42 | 43 | mdadm -As -c $conf 44 | 45 | echo "DEVICE $devlist" > $conf 46 | mdadm -Db $md1 >> $conf 47 | eval $tst 48 | 49 | mdadm --assemble --scan --config=$conf $md1 50 | eval $tst 51 | 52 | echo " metadata=0.90 devices=$dev0,$dev1,$dev2" >> $conf 53 | mdadm --assemble --scan --config=$conf $md1 54 | eval $tst 55 | 56 | ### Now with a missing device 57 | 58 | mdadm -AR $md1 $dev0 $dev2 # 59 | check state U_U 60 | eval $tst 61 | 62 | mdadm -A $md1 -u $uuid $devlist 63 | check state U_U 64 | eval $tst 65 | 66 | mdadm -A $md1 -m 1 $devlist 67 | check state U_U 68 | eval $tst 69 | 70 | 71 | conf=$targetdir/mdadm.conf 72 | { 73 | echo DEVICE $devlist 74 | echo array $md1 UUID=$uuid 75 | } > $conf 76 | 77 | mdadm -As -c $conf $md1 78 | check state U_U 79 | eval $tst 80 | 81 | { 82 | echo DEVICE $devlist 83 | echo array $md1 super-minor=1 84 | } > $conf 85 | 86 | mdadm -As -c $conf 87 | check state U_U 88 | eval $tst 89 | 90 | { 91 | echo DEVICE $devlist 92 | echo array $md1 devices=$dev0,$dev1,$dev2 93 | } > $conf 94 | 95 | mdadm -As -c $conf 96 | 97 | echo "DEVICE $devlist" > $conf 98 | mdadm -Db $md1 >> $conf 99 | check state U_U 100 | eval $tst 101 | 102 | mdadm --assemble --scan --config=$conf $md1 103 | check state U_U 104 | eval $tst 105 | 106 | echo " metadata=0.90 devices=$dev0,$dev1,$dev2" >> $conf 107 | mdadm --assemble --scan --config=$conf $md1 108 | check state U_U 109 | eval $tst 110 | -------------------------------------------------------------------------------- /tests/03r5assemV1: -------------------------------------------------------------------------------- 1 | 2 | # create a v-1 raid5 array and assemble in various ways 3 | 4 | mdadm -CR -e1 --name one $md1 -l5 -n3 -x2 $dev0 $dev1 $dev2 $dev3 $dev4 5 | tst="check raid5 ;testdev $md1 2 $mdsize1 512 ; mdadm -S $md1" 6 | uuid=`mdadm -Db $md1 | sed 's/.*UUID=//'` 7 | check wait 8 | 9 | eval $tst 10 | 11 | mdadm -A $md1 $dev0 $dev1 $dev2 12 | mdadm $md1 --add $dev3 $dev4 13 | check spares 2 14 | eval $tst 15 | 16 | mdadm -A $md1 -u $uuid $devlist 17 | check spares 2 18 | eval $tst 19 | 20 | mdadm -A $md1 --name one $devlist 21 | check spares 2 22 | eval $tst 23 | 24 | 25 | conf=$targetdir/mdadm.conf 26 | { 27 | echo DEVICE $devlist 28 | echo array $md1 UUID=$uuid 29 | } > $conf 30 | 31 | mdadm -As -c $conf $md1 32 | eval $tst 33 | 34 | { 35 | echo DEVICE $devlist 36 | echo array $md1 devices=$dev0,$dev1,$dev2,$dev3,$dev4 37 | } > $conf 38 | 39 | mdadm -As -c $conf 40 | 41 | echo "DEVICE $devlist" > $conf 42 | mdadm -Db $md1 >> $conf 43 | eval $tst 44 | mdadm --assemble --scan --config=$conf $md1 45 | eval $tst 46 | echo PING >&2 47 | 48 | echo " metadata=1.0 devices=$dev0,$dev1,$dev2,$dev3,$dev4" >> $conf 49 | mdadm --assemble --scan --config=$conf $md1 50 | eval $tst 51 | 52 | ### Now with a missing device 53 | # We don't want the recovery to complete while we are 54 | # messing about here. 55 | echo 100 > /proc/sys/dev/raid/speed_limit_max 56 | echo 100 > /proc/sys/dev/raid/speed_limit_min 57 | 58 | mdadm -AR $md1 $dev0 $dev2 $dev3 $dev4 # 59 | check state U_U 60 | check spares 1 61 | eval $tst 62 | 63 | mdadm -A $md1 -u $uuid $devlist 64 | check state U_U 65 | eval $tst 66 | 67 | mdadm -A $md1 --name=one $devlist 68 | check state U_U 69 | check spares 1 70 | eval $tst 71 | 72 | 73 | conf=$targetdir/mdadm.conf 74 | { 75 | echo DEVICE $devlist 76 | echo array $md1 UUID=$uuid 77 | } > $conf 78 | 79 | mdadm -As -c $conf $md1 80 | check state U_U 81 | eval $tst 82 | 83 | { 84 | echo DEVICE $devlist 85 | echo array $md1 devices=$dev0,$dev1,$dev2 86 | } > $conf 87 | 88 | mdadm -As -c $conf 89 | 90 | echo "DEVICE $devlist" > $conf 91 | mdadm -Db $md1 >> $conf 92 | check state U_U 93 | eval $tst 94 | 95 | mdadm --assemble --scan --config=$conf $md1 96 | check state U_U 97 | eval $tst 98 | 99 | echo " metadata=1.0 devices=$dev0,$dev1,$dev2" >> $conf 100 | mdadm --assemble --scan --config=$conf $md1 101 | check state U_U 102 | eval $tst 103 | 104 | # And now assemble with -I 105 | mdadm -Ss 106 | mdadm -I -c $conf $dev0 107 | mdadm -I -c $conf $dev1 108 | mdadm -I -c $conf $dev2 109 | eval $tst 110 | echo 2000 > /proc/sys/dev/raid/speed_limit_max 111 | echo 1000 > /proc/sys/dev/raid/speed_limit_min 112 | -------------------------------------------------------------------------------- /tests/04r0update: -------------------------------------------------------------------------------- 1 | 2 | # create a raid0, re-assemble with a different super-minor 3 | 4 | if [ "$LINEAR" != "yes" ]; then 5 | echo -ne 'skipping... ' 6 | exit 0 7 | fi 8 | 9 | mdadm -CR -e 0.90 $md0 -llinear -n3 $dev0 $dev1 $dev2 10 | testdev $md0 3 $mdsize0 1 11 | minor1=`mdadm -E $dev0 | sed -n -e 's/.*Preferred Minor : //p'` 12 | mdadm -S /dev/md0 13 | 14 | mdadm -A $md1 $dev0 $dev1 $dev2 15 | minor2=`mdadm -E $dev0 | sed -n -e 's/.*Preferred Minor : //p'` 16 | mdadm -S /dev/md1 17 | 18 | mdadm -A $md1 --update=super-minor $dev0 $dev1 $dev2 19 | minor3=`mdadm -E $dev0 | sed -n -e 's/.*Preferred Minor : //p'` 20 | mdadm -S /dev/md1 21 | 22 | case "$minor1 $minor2 $minor3" in 23 | "0 0 1" ) ;; 24 | * ) echo >&2 "ERROR minors should be '0 0 1' but are '$minor1 $minor2 $minor3'" 25 | exit 1 26 | esac 27 | -------------------------------------------------------------------------------- /tests/04r1update: -------------------------------------------------------------------------------- 1 | set -i 2 | 3 | # create a raid1 array, let it sync, then re-assemble with a force-sync 4 | 5 | mdadm -CR $md0 -l1 -n2 $dev0 $dev1 6 | check wait 7 | mdadm -S $md0 8 | 9 | mdadm -A $md0 $dev0 $dev1 10 | check nosync 11 | mdadm -S $md0 12 | 13 | mdadm -A $md0 -U resync $dev0 $dev1 14 | check resync 15 | mdadm -S $md0 16 | -------------------------------------------------------------------------------- /tests/04r5swap: -------------------------------------------------------------------------------- 1 | 2 | # make a raid5 array, byte swap the superblocks, then assemble... 3 | 4 | mdadm -CR $md0 -e 0.90 -l5 -n4 $dev0 $dev1 $dev2 $dev3 5 | sleep 6 6 | mdadm -S $md0 7 | 8 | mdadm -E --metadata=0 $dev1 > $targetdir/d1 9 | for d in $dev0 $dev1 $dev2 $dev3 10 | do $dir/swap_super $d 11 | done 12 | mdadm -E --metadata=0.swap $dev1 > $targetdir/d1s 13 | diff -u $targetdir/d1 $targetdir/d1s 14 | 15 | mdadm --assemble --update=byteorder $md0 $dev0 $dev1 $dev2 $dev3 16 | sleep 3 17 | check recovery 18 | mdadm -S $md0 19 | -------------------------------------------------------------------------------- /tests/04update-metadata: -------------------------------------------------------------------------------- 1 | set -xe 2 | 3 | # test converting v0.90 to v1.0 4 | # check for different levels 5 | # check it fails for non-v0.90 6 | # check it fails during reshape or recovery 7 | # check it fails when bitmap is present 8 | 9 | dlist="$dev0 $dev1 $dev2 $dev3" 10 | 11 | if [ $skipping_linear == "yes" ]; then 12 | level_list="raid1/1 raid5/3 raid6/2" 13 | else 14 | level_list="linear/4 raid1/1 raid5/3 raid6/2" 15 | fi 16 | for ls in $level_list 17 | do 18 | s=${ls#*/} l=${ls%/*} 19 | if [[ $l == 'raid1' ]]; then 20 | mdadm -CR --assume-clean -e 0.90 $md0 --level $l -n 4 $dlist 21 | else 22 | mdadm -CR --assume-clean -e 0.90 $md0 --level $l -n 4 -c 64 $dlist 23 | fi 24 | testdev $md0 $s 19904 64 25 | mdadm -S $md0 26 | mdadm -A $md0 --update=metadata $dlist 27 | testdev $md0 $s 19904 64 check 28 | mdadm -S $md0 29 | done 30 | 31 | if mdadm -A $md0 --update=metadata $dlist 32 | then echo >&2 should fail with v1.0 metadata 33 | exit 1 34 | fi 35 | 36 | mdadm -CR -e 0.90 $md0 --level=6 -n4 -c32 $dlist 37 | mdadm -S $md0 38 | 39 | if mdadm -A $md0 --update=metadata $dlist 40 | then echo >&2 should fail during resync 41 | exit 1 42 | fi 43 | mdadm -A $md0 $dlist 44 | mdadm --wait $md0 || true 45 | mdadm -S $md0 46 | 47 | # should succeed now 48 | mdadm -A $md0 --update=metadata $dlist 49 | 50 | mdadm -S /dev/md0 51 | mdadm -CR --assume-clean -e 0.90 $md0 --level=6 -n4 -c32 $dlist --bitmap=internal 52 | mdadm -S $md0 53 | 54 | if mdadm -A $md0 --update=metadata $dlist 55 | then echo >&2 should fail when bitmap present 56 | exit 1 57 | fi 58 | -------------------------------------------------------------------------------- /tests/04update-uuid: -------------------------------------------------------------------------------- 1 | set -x 2 | 3 | # create an array, then change the uuid. 4 | 5 | mdadm -CR --assume-clean $md0 -l5 -n3 $dev0 $dev1 $dev2 6 | mdadm -S /dev/md0 7 | mdadm -A /dev/md0 --update=uuid --uuid=0123456789abcdef:fedcba9876543210 $dev0 $dev1 $dev2 8 | no_errors 9 | mdadm -D /dev/md0 | grep -s > /dev/null 01234567:89abcdef:fedcba98:76543210 || { 10 | echo Wrong uuid; mdadm -D /dev/md0 ; exit 2; 11 | } 12 | mdadm -S /dev/md0 13 | 14 | # try v1 superblock 15 | 16 | mdadm -CR --assume-clean -e1 $md0 -l5 -n3 $dev0 $dev1 $dev2 17 | mdadm -S /dev/md0 18 | mdadm -A /dev/md0 --update=uuid --uuid=0123456789abcdef:fedcba9876543210 $dev0 $dev1 $dev2 19 | no_errors 20 | mdadm -D /dev/md0 | grep -s > /dev/null 01234567:89abcdef:fedcba98:76543210 || { 21 | echo Wrong uuid; mdadm -D /dev/md0 ; exit 2; 22 | } 23 | mdadm -S /dev/md0 24 | 25 | # Internal bitmaps too. 26 | mdadm -CR --assume-clean -b internal --bitmap-chunk 4 $md0 -l5 -n3 $dev0 $dev1 $dev2 27 | mdadm -S /dev/md0 28 | mdadm -A /dev/md0 --update=uuid --uuid=0123456789abcdef:fedcba9876543210 $dev0 $dev1 $dev2 29 | no_errors 30 | mdadm -D /dev/md0 | grep -s > /dev/null 01234567:89abcdef:fedcba98:76543210 || { 31 | echo Wrong uuid; mdadm -D /dev/md0 ; exit 2; 32 | } 33 | mdadm -X $dev0 | grep -s > /dev/null 01234567:89abcdef:fedcba98:76543210 || { 34 | echo Wrong uuid; mdadm -X $dev0; exit 2; 35 | } 36 | mdadm -S /dev/md0 37 | 38 | mdadm -CR --assume-clean -e1.2 -b internal --bitmap-chunk=4 $md0 -l5 -n3 $dev0 $dev1 $dev2 39 | mdadm -S /dev/md0 40 | mdadm -A /dev/md0 --update=uuid --uuid=0123456789abcdef:fedcba9876543210 $dev0 $dev1 $dev2 41 | no_errors 42 | mdadm -D /dev/md0 | grep -s > /dev/null 01234567:89abcdef:fedcba98:76543210 || { 43 | echo Wrong uuid; mdadm -D /dev/md0 ; exit 2; 44 | } 45 | mdadm -X $dev0 | grep -s > /dev/null 01234567:89abcdef:fedcba98:76543210 || { 46 | echo Wrong uuid; mdadm -X $dev0; exit 2; 47 | } 48 | mdadm -S /dev/md0 49 | -------------------------------------------------------------------------------- /tests/05r1-add-badblocks: -------------------------------------------------------------------------------- 1 | # 2 | # create a raid1 with a drive and set badblocks for the drive. 3 | # add a new drive does not cause an error. 4 | # 5 | 6 | # create raid1 7 | mdadm -CR $md0 -l1 -n2 -e1.0 $dev1 missing 8 | testdev $md0 1 $mdsize1a 64 9 | sleep 3 10 | 11 | # set badblocks for the drive 12 | dev1_name=$(basename $dev1) 13 | echo "100 100" > /sys/block/md0/md/dev-$dev1_name/bad_blocks 14 | echo "write_error" > /sys/block/md0/md/dev-$dev1_name/state 15 | 16 | # write badblocks to metadata 17 | dd if=/dev/zero of=$md0 bs=512 count=200 oflag=direct 18 | 19 | # re-add and recovery 20 | mdadm $md0 -a $dev2 21 | check recovery 22 | 23 | mdadm -S $md0 24 | 25 | -------------------------------------------------------------------------------- /tests/05r1-add-internalbitmap: -------------------------------------------------------------------------------- 1 | # 2 | # create a raid1 without any bitmap, add the bitmap and then write to 3 | # the device. This should catch the case where the bitmap is created 4 | # but not reloaded correctly, such as the case fixed by 5 | # 4474ca42e2577563a919fd3ed782e2ec55bf11a2 6 | # 7 | mdadm --create --run $md0 --metadata=0.9 --level=1 -n2 --delay=1 $dev1 $dev2 8 | check wait 9 | check nobitmap 10 | testdev $md0 1 $mdsize1b 64 11 | mdadm -Gb internal --bitmap-chunk=4 $md0 12 | check bitmap 13 | testdev $md0 1 $mdsize1b 64 14 | mdadm -S $md0 15 | 16 | # Re-assemble the array and verify the bitmap is still present 17 | mdadm --assemble $md0 $dev1 $dev2 18 | check bitmap 19 | testdev $md0 1 $mdsize1b 64 20 | mdadm -S $md0 21 | -------------------------------------------------------------------------------- /tests/05r1-add-internalbitmap-v1a: -------------------------------------------------------------------------------- 1 | # 2 | # create a raid1 without any bitmap, add the bitmap and then write to 3 | # the device. This should catch the case where the bitmap is created 4 | # but not reloaded correctly, such as the case fixed by 5 | # 4474ca42e2577563a919fd3ed782e2ec55bf11a2 6 | # 7 | mdadm --create --run $md0 --metadata=1.0 --level=1 -n2 --delay=1 $dev1 $dev2 8 | check wait 9 | check nobitmap 10 | testdev $md0 1 $mdsize1b 64 11 | mdadm -Gb internal --bitmap-chunk=4 $md0 12 | check bitmap 13 | testdev $md0 1 $mdsize1b 64 14 | mdadm -S $md0 15 | 16 | # Re-assemble the array and verify the bitmap is still present 17 | mdadm --assemble $md0 $dev1 $dev2 18 | check bitmap 19 | testdev $md0 1 $mdsize1b 64 20 | mdadm -S $md0 21 | -------------------------------------------------------------------------------- /tests/05r1-add-internalbitmap-v1b: -------------------------------------------------------------------------------- 1 | # 2 | # create a raid1 without any bitmap, add the bitmap and then write to 3 | # the device. This should catch the case where the bitmap is created 4 | # but not reloaded correctly, such as the case fixed by 5 | # 4474ca42e2577563a919fd3ed782e2ec55bf11a2 6 | # 7 | mdadm --create --run $md0 --metadata=1.1 --level=1 -n2 --delay=1 $dev1 $dev2 8 | check wait 9 | check nobitmap 10 | testdev $md0 1 $mdsize1b 64 11 | mdadm -Gb internal --bitmap-chunk=4 $md0 12 | check bitmap 13 | testdev $md0 1 $mdsize1b 64 14 | mdadm -S $md0 15 | 16 | # Re-assemble the array and verify the bitmap is still present 17 | mdadm --assemble $md0 $dev1 $dev2 18 | check bitmap 19 | testdev $md0 1 $mdsize1b 64 20 | mdadm -S $md0 21 | -------------------------------------------------------------------------------- /tests/05r1-add-internalbitmap-v1c: -------------------------------------------------------------------------------- 1 | # 2 | # create a raid1 without any bitmap, add the bitmap and then write to 3 | # the device. This should catch the case where the bitmap is created 4 | # but not reloaded correctly, such as the case fixed by 5 | # 4474ca42e2577563a919fd3ed782e2ec55bf11a2 6 | # 7 | mdadm --create --run $md0 --metadata=1.2 --level=1 -n2 --delay=1 $dev1 $dev2 8 | check wait 9 | check nobitmap 10 | testdev $md0 1 $mdsize1b 64 11 | mdadm -Gb internal --bitmap-chunk=4 $md0 12 | check bitmap 13 | testdev $md0 1 $mdsize1b 64 14 | mdadm -S $md0 15 | 16 | # Re-assemble the array and verify the bitmap is still present 17 | mdadm --assemble $md0 $dev1 $dev2 18 | check bitmap 19 | testdev $md0 1 $mdsize1b 64 20 | mdadm -S $md0 21 | -------------------------------------------------------------------------------- /tests/05r1-failfast: -------------------------------------------------------------------------------- 1 | 2 | # create a simple mirror and check failfast flag works 3 | mdadm -CR $md0 -e1.2 --level=raid1 --failfast -n2 $dev0 $dev1 4 | check raid1 5 | if grep -v failfast /sys/block/md0/md/rd*/state > /dev/null 6 | then 7 | die "failfast missing" 8 | fi 9 | 10 | # Removing works with the failfast flag 11 | mdadm $md0 -f $dev0 12 | mdadm $md0 -r $dev0 13 | if grep -v failfast /sys/block/md0/md/rd1/state > /dev/null 14 | then 15 | die "failfast missing" 16 | fi 17 | 18 | # Adding works with the failfast flag 19 | mdadm $md0 -a --failfast $dev0 20 | check wait 21 | if grep -v failfast /sys/block/md0/md/rd0/state > /dev/null 22 | then 23 | die "failfast missing" 24 | fi 25 | 26 | mdadm -S $md0 27 | 28 | # Assembling works with the failfast flag 29 | mdadm -A $md0 $dev0 $dev1 30 | check raid1 31 | if grep -v failfast /sys/block/md0/md/rd*/state > /dev/null 32 | then 33 | die "failfast missing" 34 | fi 35 | 36 | # Adding works with the nofailfast flag 37 | mdadm $md0 -f $dev0 38 | mdadm $md0 -r $dev0 39 | mdadm $md0 -a --nofailfast $dev0 40 | check wait 41 | if grep failfast /sys/block/md0/md/rd0/state > /dev/null 42 | then 43 | die "failfast should be missing" 44 | fi 45 | 46 | # Assembling with one faulty slave works with the failfast flag 47 | mdadm $md0 -f $dev0 48 | mdadm $md0 -r $dev0 49 | mdadm -S $md0 50 | mdadm -A $md0 $dev0 $dev1 51 | check raid1 52 | mdadm -S $md0 53 | 54 | # Spare works with the failfast flag 55 | mdadm -CR $md0 -e1.2 --level=raid1 --failfast -n2 $dev0 $dev1 56 | check raid1 57 | mdadm $md0 -a --failfast $dev2 58 | check wait 59 | check spares 1 60 | if grep -v failfast /sys/block/md0/md/rd*/state > /dev/null 61 | then 62 | die "failfast missing" 63 | fi 64 | 65 | # Grow works with the failfast flag 66 | mdadm -G $md0 --raid-devices=3 67 | check wait 68 | if grep -v failfast /sys/block/md0/md/rd*/state > /dev/null 69 | then 70 | die "failfast missing" 71 | fi 72 | mdadm -S $md0 73 | 74 | exit 0 75 | -------------------------------------------------------------------------------- /tests/05r1-grow-internal: -------------------------------------------------------------------------------- 1 | 2 | # 3 | # create a raid1 array, add an internal bitmap 4 | # 5 | mdadm --create --run $md0 -l 1 -n 2 $dev1 $dev2 6 | check wait 7 | testdev $md0 1 $mdsize1a 64 8 | 9 | #mdadm -E $dev1 10 | mdadm --grow $md0 --bitmap=internal --bitmap-chunk=4 --delay=1 || { mdadm -X $dev2 ; exit 1; } 11 | sleep 6 12 | dirty1=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'` 13 | 14 | testdev $md0 1 $mdsize1a 64 15 | sleep 6 16 | dirty2=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'` 17 | 18 | if [ $dirty1 -ne 0 -o $dirty2 -ne 0 ] 19 | then echo >&2 "ERROR bad 'dirty' counts: dirty1 $dirty1, dirty2 $dirty2" 20 | echo bad dirty counts 21 | exit 1 22 | fi 23 | 24 | # now to remove the bitmap 25 | check bitmap 26 | mdadm --grow $md0 --bitmap=none 27 | check nobitmap 28 | mdadm -S $md0 29 | -------------------------------------------------------------------------------- /tests/05r1-grow-internal-1: -------------------------------------------------------------------------------- 1 | 2 | # 3 | # create a raid1 array, version 1 superblock, add an internal bitmap 4 | # 5 | mdadm --create --run $md0 -e1 -l 1 -n 2 $dev1 $dev2 6 | check wait 7 | testdev $md0 1 $mdsize1b 64 8 | 9 | #mdadm -E $dev1 10 | mdadm --grow $md0 --bitmap=internal --bitmap-chunk=4 --delay=1 11 | sleep 6 12 | dirty1=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'` 13 | 14 | testdev $md0 1 $mdsize1b 64 15 | sleep 6 16 | dirty2=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'` 17 | 18 | if [ $dirty1 -ne 0 -o $dirty2 -ne 0 ] 19 | then echo >&2 "ERROR bad 'dirty' counts: dirty1 $dirty1, dirty2 $dirty2" 20 | exit 1 21 | fi 22 | 23 | # now to remove the bitmap 24 | check bitmap 25 | mdadm --grow $md0 --bitmap=none 26 | check nobitmap 27 | mdadm -S $md0 28 | -------------------------------------------------------------------------------- /tests/05r1-internalbitmap: -------------------------------------------------------------------------------- 1 | 2 | # 3 | # create a raid1 with an internal bitmap 4 | # 5 | mdadm --create -e0.90 --run $md0 --level=1 -n2 --delay=1 --bitmap internal --bitmap-chunk=4 $dev1 $dev2 6 | check wait 7 | testdev $md0 1 $mdsize0 64 8 | mdadm -S $md0 9 | 10 | mdadm --assemble $md0 $dev1 $dev2 11 | testdev $md0 1 $mdsize0 64 12 | sleep 6 13 | dirty1=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'` 14 | 15 | if [ $dirty1 -ne 0 ] 16 | then echo >&2 "ERROR bad 'dirty' counts: $dirty1" 17 | exit 1 18 | fi 19 | mdadm $md0 -f $dev1 20 | testdev $md0 1 $mdsize0 64 21 | sleep 6 22 | total=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) bits.*/\1/p'` 23 | dirty2=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'` 24 | if [ $dirty2 -ne $total ] 25 | then echo >&2 "ERROR bad 'dirty' counts: total $total, dirty2 $dirty2" 26 | exit 2 27 | fi 28 | 29 | mdadm -S $md0 30 | 31 | mdadm --assemble -R $md0 $dev2 32 | mdadm --zero-superblock $dev1 33 | mdadm $md0 --add $dev1 34 | check recovery 35 | 36 | check wait 37 | sleep 6 38 | dirty3=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'` 39 | 40 | if [ $dirty3 -ne 0 ] 41 | then echo >&2 "ERROR bad 'dirty' counts: $dirty3" 42 | exit 1 43 | fi 44 | 45 | mdadm -S $md0 46 | -------------------------------------------------------------------------------- /tests/05r1-internalbitmap-v1a: -------------------------------------------------------------------------------- 1 | 2 | # 3 | # create a raid1 with an internal bitmap 4 | # 5 | mdadm --create --run $md0 --metadata=1.0 --level=1 -n2 --delay=1 --bitmap internal --bitmap-chunk=4 $dev1 $dev2 6 | check wait 7 | check bitmap 8 | testdev $md0 1 $mdsize1b 64 9 | mdadm -S $md0 10 | 11 | mdadm --assemble $md0 $dev1 $dev2 12 | testdev $md0 1 $mdsize1b 64 13 | sleep 6 14 | dirty1=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'` 15 | 16 | if [ $dirty1 -ne 0 ] 17 | then echo >&2 "ERROR bad 'dirty' counts: $dirty1" 18 | exit 1 19 | fi 20 | mdadm $md0 -f $dev1 21 | testdev $md0 1 $mdsize1b 64 22 | sleep 6 23 | total=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) bits.*/\1/p'` 24 | dirty2=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'` 25 | if [ $dirty2 -ne $total ] 26 | then echo >&2 "ERROR bad 'dirty' counts: total $total, dirty2 $dirty2" 27 | exit 2 28 | fi 29 | 30 | mdadm -S $md0 31 | 32 | mdadm --zero-superblock $dev1 33 | mdadm --assemble -R $md0 $dev2 34 | mdadm $md0 --add $dev1 35 | check recovery 36 | 37 | check wait 38 | sleep 6 39 | dirty3=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'` 40 | 41 | if [ $dirty3 -ne 0 ] 42 | then echo >&2 "ERROR bad 'dirty' counts: $dirty3" 43 | exit 1 44 | fi 45 | 46 | mdadm -S $md0 47 | -------------------------------------------------------------------------------- /tests/05r1-internalbitmap-v1b: -------------------------------------------------------------------------------- 1 | 2 | # 3 | # create a raid1 with an internal bitmap 4 | # 5 | mdadm --create --run $md0 --metadata=1.1 --level=1 -n2 --delay=1 --bitmap internal --bitmap-chunk=4 $dev1 $dev2 6 | check wait 7 | check bitmap 8 | testdev $md0 1 $mdsize11 64 9 | mdadm -S $md0 10 | 11 | mdadm --assemble $md0 $dev1 $dev2 12 | check bitmap 13 | testdev $md0 1 $mdsize11 64 14 | sleep 6 15 | dirty1=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'` 16 | 17 | if [ $dirty1 -ne 0 ] 18 | then echo >&2 "ERROR bad 'dirty' counts: $dirty1" 19 | exit 1 20 | fi 21 | mdadm $md0 -f $dev1 22 | testdev $md0 1 $mdsize11 64 23 | sleep 6 24 | total=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) bits.*/\1/p'` 25 | dirty2=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'` 26 | if [ $dirty2 -ne $total ] 27 | then echo >&2 "ERROR bad 'dirty' counts: total $total, dirty2 $dirty2" 28 | exit 2 29 | fi 30 | 31 | mdadm -S $md0 32 | 33 | mdadm --zero-superblock $dev1 34 | mdadm --assemble -R $md0 $dev2 35 | mdadm $md0 --add $dev1 36 | check recovery 37 | check wait 38 | sleep 6 39 | dirty3=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'` 40 | 41 | if [ $dirty3 -ne 0 ] 42 | then echo >&2 "ERROR bad 'dirty' counts: $dirty3" 43 | exit 1 44 | fi 45 | 46 | mdadm -S $md0 47 | -------------------------------------------------------------------------------- /tests/05r1-internalbitmap-v1c: -------------------------------------------------------------------------------- 1 | 2 | # 3 | # create a raid1 with an internal bitmap 4 | # 5 | mdadm --create --run $md0 --metadata=1.2 --level=1 -n2 --delay=1 --bitmap internal --bitmap-chunk 4 $dev1 $dev2 6 | check wait 7 | check bitmap 8 | testdev $md0 1 $mdsize12 64 9 | mdadm -S $md0 10 | 11 | mdadm --assemble $md0 $dev1 $dev2 12 | testdev $md0 1 $mdsize12 64 13 | sleep 6 14 | dirty1=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'` 15 | 16 | if [ $dirty1 -ne 0 ] 17 | then echo >&2 "ERROR bad 'dirty' counts: $dirty1" 18 | exit 1 19 | fi 20 | mdadm $md0 -f $dev1 21 | testdev $md0 1 $mdsize12 64 22 | sleep 6 23 | total=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) bits.*/\1/p'` 24 | dirty2=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'` 25 | if [ $dirty2 -ne $total ] 26 | then echo >&2 "ERROR bad 'dirty' counts: total $total, dirty2 $dirty2" 27 | exit 2 28 | fi 29 | 30 | mdadm -S $md0 31 | 32 | mdadm --zero-superblock $dev1 33 | mdadm --assemble -R $md0 $dev2 34 | mdadm $md0 --add $dev1 35 | check recovery 36 | 37 | check wait 38 | sleep 6 39 | dirty3=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'` 40 | 41 | if [ $dirty3 -ne 0 ] 42 | then echo >&2 "ERROR bad 'dirty' counts: $dirty3" 43 | exit 1 44 | fi 45 | 46 | mdadm -S $md0 47 | -------------------------------------------------------------------------------- /tests/05r1-re-add: -------------------------------------------------------------------------------- 1 | 2 | # 3 | # create a raid1, remove a drive, and readd it. 4 | # resync should be instant. 5 | # Then do some IO first. Resync should still be very fast 6 | # 7 | 8 | mdadm -CR $md0 -l1 -n2 -binternal --bitmap-chunk=4 -d1 $dev1 $dev2 9 | check resync 10 | check wait 11 | testdev $md0 1 $mdsize1a 64 12 | sleep 6 13 | 14 | mdadm $md0 -f $dev2 15 | sleep 2 16 | mdadm $md0 -r $dev2 17 | mdadm $md0 -a $dev2 18 | #cat /proc/mdstat 19 | check nosync 20 | 21 | mdadm $md0 -f $dev2 22 | sleep 2 23 | mdadm $md0 -r $dev2 24 | testdev $md0 1 $mdsize1a 64 25 | mdadm $md0 -a $dev2 26 | check wait 27 | blockdev --flushbufs $dev1 $dev2 28 | cmp --ignore-initial=$[64*512] --bytes=$[$mdsize0*1024] $dev1 $dev2 29 | 30 | mdadm $md0 -f $dev2; sleep 2 31 | mdadm $md0 -r $dev2 32 | if dd if=/dev/zero of=$md0 ; then : ; fi 33 | blockdev --flushbufs $md0 # ensure writes have been sent. 34 | mdadm $md0 -a $dev2 35 | check recovery 36 | check wait 37 | blockdev --flushbufs $dev1 $dev2 38 | cmp --ignore-initial=$[64*512] --bytes=$[$mdsize0*1024] $dev1 $dev2 39 | mdadm -S $md0 40 | -------------------------------------------------------------------------------- /tests/05r1-re-add-nosuper: -------------------------------------------------------------------------------- 1 | # 2 | # create a raid1 without superblock, remove a drive, and readd it. 3 | # readd should fail. 4 | # 5 | mdadm -B $md0 -l1 -n2 -d1 $dev1 $dev2 6 | sleep 2 7 | check resync 8 | check wait 9 | testdev $md0 1 $size 1 10 | sleep 6 11 | 12 | mdadm $md0 -f $dev2 13 | sleep 2 14 | mdadm $md0 -r $dev2 15 | if mdadm $md0 --re-add $dev2; then 16 | err "re-add should fail" 17 | fi 18 | 19 | mdadm -S $md0 20 | -------------------------------------------------------------------------------- /tests/05r1-remove-internalbitmap: -------------------------------------------------------------------------------- 1 | # 2 | # create a raid1 with bitmap, remove the bitmap and verify it is still 3 | # gone when re-assembling the array 4 | # 5 | mdadm --create --run $md0 --metadata=0.9 --level=1 -n2 --bitmap internal --bitmap-chunk=4 --delay=1 $dev1 $dev2 6 | check wait 7 | check bitmap 8 | testdev $md0 1 $mdsize1b 64 9 | mdadm -Gb none $md0 10 | check nobitmap 11 | testdev $md0 1 $mdsize1b 64 12 | mdadm -S $md0 13 | 14 | # Re-assemble the array and verify the bitmap is still present 15 | mdadm --assemble $md0 $dev1 $dev2 16 | check nobitmap 17 | testdev $md0 1 $mdsize1b 64 18 | mdadm -S $md0 19 | -------------------------------------------------------------------------------- /tests/05r1-remove-internalbitmap-v1a: -------------------------------------------------------------------------------- 1 | # 2 | # create a raid1 with bitmap, remove the bitmap and verify it is still 3 | # gone when re-assembling the array 4 | # 5 | mdadm --create --run $md0 --metadata=1.0 --level=1 -n2 --bitmap internal --bitmap-chunk=4 --delay=1 $dev1 $dev2 6 | check wait 7 | check bitmap 8 | testdev $md0 1 $mdsize1b 64 9 | mdadm -Gb none $md0 10 | check nobitmap 11 | testdev $md0 1 $mdsize1b 64 12 | mdadm -S $md0 13 | 14 | # Re-assemble the array and verify the bitmap is still present 15 | mdadm --assemble $md0 $dev1 $dev2 16 | check nobitmap 17 | testdev $md0 1 $mdsize1b 64 18 | mdadm -S $md0 19 | -------------------------------------------------------------------------------- /tests/05r1-remove-internalbitmap-v1b: -------------------------------------------------------------------------------- 1 | # 2 | # create a raid1 with bitmap, remove the bitmap and verify it is still 3 | # gone when re-assembling the array 4 | # 5 | mdadm --create --run $md0 --metadata=1.1 --level=1 -n2 --bitmap internal --bitmap-chunk=4 --delay=1 $dev1 $dev2 6 | check wait 7 | check bitmap 8 | testdev $md0 1 $mdsize1b 64 9 | mdadm -Gb none $md0 10 | check nobitmap 11 | testdev $md0 1 $mdsize1b 64 12 | mdadm -S $md0 13 | 14 | # Re-assemble the array and verify the bitmap is still present 15 | mdadm --assemble $md0 $dev1 $dev2 16 | check nobitmap 17 | testdev $md0 1 $mdsize1b 64 18 | mdadm -S $md0 19 | -------------------------------------------------------------------------------- /tests/05r1-remove-internalbitmap-v1c: -------------------------------------------------------------------------------- 1 | # 2 | # create a raid1 with bitmap, remove the bitmap and verify it is still 3 | # gone when re-assembling the array 4 | # 5 | mdadm --create --run $md0 --metadata=1.2 --level=1 -n2 --bitmap internal --bitmap-chunk=4 --delay=1 $dev1 $dev2 6 | check wait 7 | check bitmap 8 | testdev $md0 1 $mdsize1b 64 9 | mdadm -Gb none $md0 10 | check nobitmap 11 | testdev $md0 1 $mdsize1b 64 12 | mdadm -S $md0 13 | 14 | # Re-assemble the array and verify the bitmap is still present 15 | mdadm --assemble $md0 $dev1 $dev2 16 | check nobitmap 17 | testdev $md0 1 $mdsize1b 64 18 | mdadm -S $md0 19 | -------------------------------------------------------------------------------- /tests/05r5-internalbitmap: -------------------------------------------------------------------------------- 1 | 2 | # 3 | # create a raid1 with an internal bitmap 4 | # 5 | mdadm --create --run $md0 --level=5 -n3 --delay=1 --bitmap internal --bitmap-chunk=4 $dev1 $dev2 $dev3 6 | check wait 7 | testdev $md0 2 $mdsize1 512 8 | mdadm -S $md0 9 | 10 | mdadm --assemble $md0 $dev1 $dev2 $dev3 11 | testdev $md0 2 $mdsize1 512 12 | sleep 6 13 | dirty1=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'` 14 | 15 | if [ $dirty1 -ne 0 ] 16 | then echo >&2 "ERROR bad 'dirty' counts: $dirty1" 17 | exit 1 18 | fi 19 | mdadm $md0 -f $dev1 20 | testdev $md0 2 $mdsize1 512 21 | sleep 6 22 | dirty2=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'` 23 | if [ $dirty2 -lt 400 ] 24 | then 25 | echo >&2 "ERROR dirty count $dirty2 is too small" 26 | exit 2 27 | fi 28 | 29 | mdadm -S $md0 30 | 31 | mdadm --assemble -R $md0 $dev2 $dev3 32 | mdadm --zero $dev1 # force --add, not --re-add 33 | mdadm $md0 --add $dev1 34 | check recovery 35 | check wait 36 | sleep 6 37 | dirty3=`mdadm -X $dev2 | sed -n -e 's/.*Bitmap.* \([0-9]*\) dirty.*/\1/p'` 38 | 39 | if [ $dirty3 -ne 0 ] 40 | then echo echo >&2 "ERROR bad 'dirty' counts at end: $dirty3" 41 | exit 1 42 | fi 43 | 44 | mdadm -S $md0 45 | -------------------------------------------------------------------------------- /tests/05r6tor0: -------------------------------------------------------------------------------- 1 | set -x -e 2 | 3 | # reshape a RAID6 to RAID5 and then RAID0. 4 | # then reshape back up to RAID5 and RAID5 5 | 6 | mdadm -CR $md0 -l6 -n5 $dev0 $dev1 $dev2 $dev3 $dev4 7 | check wait; sleep 2 8 | check raid6 9 | testdev $md0 3 19456 512 10 | mdadm -G $md0 -l5 11 | check wait; sleep 2 12 | check raid5 13 | testdev $md0 3 19456 512 14 | mdadm -G $md0 -l0 15 | check wait; sleep 2 16 | while ps auxf | grep "mdadm -G" | grep -v grep 17 | do 18 | sleep 2 19 | done 20 | check raid0 21 | testdev $md0 3 19456 512 22 | mdadm -G $md0 -l5 --add $dev3 $dev4 23 | check wait; sleep 2 24 | check raid5 25 | check algorithm 2 26 | testdev $md0 3 19456 512 27 | mdadm -G $md0 -l 6 28 | check wait; sleep 2 29 | check raid6 30 | check algorithm 2 31 | testdev $md0 3 19456 512 32 | -------------------------------------------------------------------------------- /tests/05r6tor0.broken: -------------------------------------------------------------------------------- 1 | Sometimes 2 | 3 | +++ pgrep -f 'mdadm --grow --continue' 4 | ++ [[ '' != '' ]] 5 | ++ break 6 | ++ echo 100 7 | ++ echo 500 8 | ++ sleep 2 9 | ++ check raid5 10 | ++ case $1 in 11 | ++ grep -sq 'active raid5 ' /proc/mdstat 12 | ++ die 'active raid5 not found' 13 | ++ echo -e '\n\tERROR: active raid5 not found \n' 14 | 15 | ERROR: active raid5 not found 16 | -------------------------------------------------------------------------------- /tests/06name: -------------------------------------------------------------------------------- 1 | set -x 2 | 3 | # create an array with a name 4 | 5 | is_raid_foreign $md0 6 | 7 | mdadm -CR $md0 -l0 -n2 --metadata=1 --name="Fred" $dev0 $dev1 8 | 9 | if [ $is_foreign == "no" ]; then 10 | mdadm -E $dev0 | grep "Name : $(hostname):Fred" > /dev/null || exit 1 11 | mdadm -D $md0 | grep "Name : $(hostname):Fred" > /dev/null || exit 1 12 | else 13 | mdadm -E $dev0 | grep "Name : Fred" > /dev/null || exit 1 14 | mdadm -D $md0 | grep "Name : Fred" > /dev/null || exit 1 15 | fi 16 | mdadm -S $md0 17 | 18 | mdadm -A $md0 --name="Fred" $devlist 19 | #mdadm -Db $md0 20 | mdadm -S $md0 21 | -------------------------------------------------------------------------------- /tests/06sysfs: -------------------------------------------------------------------------------- 1 | exit 0 2 | mdadm -CR $md0 -l1 -n3 $dev1 $dev2 $dev3 3 | 4 | ls -Rl /sys/block/md0 5 | 6 | cat /sys/block/md0/md/level 7 | cat /sys/block/md0/md/raid_disks 8 | 9 | mdadm -S $md0 10 | 11 | exit 1 12 | -------------------------------------------------------------------------------- /tests/06wrmostly: -------------------------------------------------------------------------------- 1 | 2 | # create a raid1 array with a wrmostly device 3 | 4 | mdadm -CR $md0 -l1 -n3 $dev0 $dev1 --write-mostly $dev2 5 | testdev $md0 1 $mdsize1a 64 6 | 7 | # unfortunately, we cannot measure if any read requests are going to $dev2 8 | 9 | mdadm -S $md0 10 | 11 | mdadm -CR $md0 -l1 -n3 --write-behind --bitmap=internal --bitmap-chunk=4 $dev0 $dev1 --write-mostly $dev2 12 | testdev $md0 1 $mdsize1a 64 13 | mdadm -S $md0 14 | -------------------------------------------------------------------------------- /tests/07autoassemble: -------------------------------------------------------------------------------- 1 | 2 | # create two raid1s, build a raid0 on top, then 3 | # tear it down and get auto-assemble to rebuild it. 4 | 5 | #the length of md0/md1/md2 is same. So use md0 here. 6 | is_raid_foreign $md0 7 | 8 | mdadm -CR $md1 -l1 -n2 $dev0 $dev1 --homehost=testing 9 | mdadm -CR $md2 -l1 -n2 $dev2 $dev3 --homehost=testing 10 | mdadm -CR $md0 -l0 -n2 $md1 $md2 --homehost=testing 11 | 12 | mdadm -Ss 13 | mdadm -As -c /dev/null --homehost=testing -vvv 14 | testdev $md1 1 $mdsize1a 64 15 | testdev $md2 1 $mdsize1a 64 16 | # md1 and md2 will be incremental assemble by udev rule. And 17 | # the testing machines' hostname is not testing. The md0 will 18 | # be considered as a foreign array. It can use 0 as metadata 19 | # name. md127 will be used 20 | testdev $md127 2 $mdsize11a 512 21 | mdadm --stop $md127 22 | mdadm --zero-superblock $md1 23 | mdadm --zero-superblock $md2 24 | mdadm -Ss 25 | 26 | mdadm --zero-superblock $dev0 $dev1 $dev2 $dev3 27 | ## Now the raid0 uses one stacked and one not 28 | mdadm -CR $md1 -l1 -n2 $dev0 $dev1 --homehost=testing 29 | mdadm -CR $md0 -l0 -n2 $md1 $dev2 --homehost=testing 30 | mdadm -Ss 31 | mdadm -As -c /dev/null --homehost=testing -vvv 32 | testdev $md1 1 $mdsize1a 64 33 | testdev $md127 1 $[mdsize1a+mdsize11a] 512 34 | mdadm --stop $md127 35 | mdadm --zero-superblock $md1 36 | mdadm -Ss 37 | 38 | # Don't specify homehost when creating raid and use the test 39 | # machine's homehost. For super1.2, if homehost name's length 40 | # is > 32, it doesn't use homehost name in metadata name and 41 | # the array will be treated as foreign array 42 | mdadm --zero-superblock $dev0 $dev1 $dev2 $dev3 43 | mdadm -CR $md1 -l1 -n2 $dev0 $dev1 44 | mdadm -CR $md2 -l1 -n2 $dev2 $dev3 45 | mdadm -CR $md0 -l0 -n2 $md1 $md2 46 | mdadm -Ss 47 | mdadm -As -c /dev/null 48 | cat /proc/mdstat # For logs 49 | if [ $is_foreign == "yes" ]; then 50 | # md127 is md1 51 | testdev $md127 1 $mdsize1a 64 52 | # md126 is md0, udev rule incremental assemble it 53 | testdev $md126 2 $mdsize11a 512 54 | # md125 is md2 55 | testdev $md125 1 $mdsize1a 64 56 | else 57 | testdev $md1 1 $mdsize1a 64 58 | testdev $md2 1 $mdsize1a 64 59 | testdev $md127 2 $mdsize11a 512 60 | fi 61 | mdadm -Ss 62 | -------------------------------------------------------------------------------- /tests/07autodetect: -------------------------------------------------------------------------------- 1 | 2 | # 3 | # Test in-kernel autodetect. 4 | # Create a partitionable array on each of two devices, 5 | # put a partition on each, create an array, and see if we can 6 | # use autodetect to restart the array. 7 | 8 | [if lsmod | grep md_mod > /dev/null 2>&1] || skip "md is a module - cannot test autodetect" 9 | 10 | mdadm -CR -e 0 $mdp0 -l0 -f -n1 $dev0 11 | mdadm -CR -e 0 $mdp1 -l0 -f -n1 $dev1 12 | udevadm settle 13 | sfdisk $mdp0 >&2 << END 14 | ,,FD 15 | END 16 | sfdisk $mdp1 >&2 << END 17 | ,,FD 18 | END 19 | udevadm settle 20 | mdadm -CR -e 0 $md0 -l1 -n2 ${mdp0}p1 ${mdp1}p1 21 | sleep 2 22 | check resync 23 | check raid1 24 | check wait 25 | mdadm -S $md0 26 | mdadm --auto-detect 27 | check raid1 28 | 29 | mdadm -Ss 30 | exit 0 31 | -------------------------------------------------------------------------------- /tests/07changelevelintr: -------------------------------------------------------------------------------- 1 | 2 | # 3 | # test that we can stop and restart a level change. 4 | # just test a few in-place changes, and a few 5 | # size-reducing changes. 6 | 7 | 8 | checkgeo() { 9 | # check the geometry of an array 10 | # level raid_disks chunk_size layout 11 | dev=$1 12 | shift 13 | sleep 0.5 14 | check wait 15 | sleep 2 16 | for attr in level raid_disks chunk_size layout 17 | do 18 | if [ $# -gt 0 ] ; then 19 | val=$1 20 | shift 21 | if [ " `cat /sys/block/$dev/md/$attr`" != " $val" ] 22 | then echo "$attr doesn't match for $dev" 23 | exit 1 24 | fi 25 | fi 26 | done 27 | } 28 | 29 | restart() { 30 | check reshape 31 | mdadm -S $md0 32 | mdadm -A $md0 $devs --backup-file=$bu 33 | check reshape 34 | } 35 | 36 | bu=/tmp/md-backup 37 | rm -f $bu 38 | devs="$dev0 $dev1 $dev2 $dev3 $dev4" 39 | mdadm -CR $md0 -l5 -n5 -c 256 $devs 40 | checkgeo md0 raid5 5 $[256*1024] 2 41 | 42 | mdadm -G $md0 -c 128 --backup-file=$bu 43 | restart 44 | checkgeo md0 raid5 5 $[128*1024] 2 45 | 46 | mdadm -G $md0 --layout rs --backup-file=$bu 47 | restart 48 | checkgeo md0 raid5 5 $[128*1024] 3 49 | 50 | # It needs to shrink array size first. Choose a value that 51 | # is power of 2 for array size. If not, it can't change 52 | # chunk size. 53 | mdadm -G $md0 --array-size 51200 54 | mdadm -G $md0 --raid-disks 4 -c 64 --backup-file=$bu 55 | restart 56 | checkgeo md0 raid5 4 $[64*1024] 3 57 | 58 | devs="$dev0 $dev1 $dev2 $dev3" 59 | mdadm -G $md0 --array-size 18432 60 | mdadm -G $md0 -n 2 -c 256 --backup-file=$bu 61 | restart 62 | checkgeo md0 raid5 2 $[256*1024] 3 63 | -------------------------------------------------------------------------------- /tests/07changelevels.broken: -------------------------------------------------------------------------------- 1 | Fails in multiple ways. 2 | 3 | There are issues with RAID6: 4 | - R5 -> R6 migration makes md unresponsive 5 | - R6 -> R5 migration fails 6 | 7 | Not worth investigating this now, marking as broken to clear the CI. 8 | -------------------------------------------------------------------------------- /tests/07layouts: -------------------------------------------------------------------------------- 1 | 2 | # check that kernel an restripe interpret all the different layouts 3 | # the same 4 | # This involves changing the layout to each different possibility 5 | # while MDADM_GROW_VERIFY is set. 6 | 7 | testK=$[64*3*6] 8 | dd if=/dev/urandom of=/tmp/RandFile bs=1024 count=$testK 9 | export MDADM_GROW_VERITY=1 10 | 11 | 12 | dotest() { 13 | sleep 0.5 14 | check wait 15 | testdev $md0 $1 $mdsize1 512 nd 16 | blockdev --flushbufs $md0 17 | cmp -s -n $[textK*1024] $md0 /tmp/RandFile || { echo cmp failed; exit 2; } 18 | # write something new - shift chars 4 space 19 | tr ' -~' '$-~ -#' < /tmp/RandFile > /tmp/RandFile2 20 | mv /tmp/RandFile2 /tmp/RandFile 21 | dd if=/tmp/RandFile of=$md0 22 | } 23 | 24 | checkgeo() { 25 | # check the geometry of an array 26 | # level raid_disks chunk_size layout 27 | dev=$1 28 | shift 29 | sleep 0.5 30 | check wait 31 | for attr in level raid_disks chunk_size layout 32 | do 33 | if [ $# -gt 0 ] ; then 34 | val=$1 35 | shift 36 | if [ " `sed 's/ .*//' /sys/block/$dev/md/$attr`" != " $val" ] 37 | then echo "$attr doesn't match for $dev" 38 | exit 1 39 | fi 40 | fi 41 | done 42 | } 43 | 44 | 45 | bu=/tmp/md-test-backup 46 | rm -f $bu 47 | 48 | # first a degraded 5 device raid5 49 | mdadm -CR $md0 -l5 -n5 $dev0 $dev1 missing $dev2 $dev3 50 | dd if=/tmp/RandFile of=$md0 51 | dotest 4 52 | 53 | l5[0]=la 54 | l5[1]=ra 55 | l5[2]=ls 56 | l5[3]=rs 57 | l5[4]=parity-first 58 | l5[5]=parity-last 59 | for layout in 0 1 2 3 4 5 0 60 | do 61 | mdadm -G $md0 --layout=${l5[$layout]} 62 | checkgeo md0 raid5 5 $[512*1024] $layout 63 | dotest 4 64 | done 65 | 66 | mdadm -S $md0 67 | # now a doubly degraded raid6 68 | mdadm -CR $md0 -l6 -n5 $dev0 missing $dev2 missing $dev4 69 | dd if=/tmp/RandFile of=$md0 70 | dotest 3 71 | 72 | l6[0]=la 73 | l6[1]=ra 74 | l6[2]=ls 75 | l6[3]=rs 76 | l6[4]=parity-first 77 | l6[5]=parity-last 78 | l6[8]=ddf-zero-restart 79 | l6[9]=ddf-N-restart 80 | l6[10]=ddf-N-continue 81 | l6[16]=left-asymmetric-6 82 | l6[17]=right-asymmetric-6 83 | l6[18]=left-symmetric-6 84 | l6[19]=right-symmetric-6 85 | l6[20]=parity-first-6 86 | for layout in 0 1 2 3 4 5 8 9 10 16 17 18 19 20 0 87 | do 88 | mdadm -G $md0 --layout=${l6[$layout]} 89 | checkgeo md0 raid6 5 $[512*1024] $layout 90 | dotest 3 91 | done 92 | -------------------------------------------------------------------------------- /tests/07reshape5intr: -------------------------------------------------------------------------------- 1 | 2 | # 3 | # test interrupting and restarting raid5 reshape. 4 | set -x 5 | devs="$dev1" 6 | st=UU 7 | for disks in 2 3 4 5 8 | do 9 | eval devs=\"$devs \$dev$disks\" 10 | st=U$st 11 | for d in $devs 12 | do dd if=/dev/urandom of=$d bs=1024 || true 13 | done 14 | 15 | case $disks in 16 | 2 | 3) chunk=1024;; 17 | 4 ) chunk=512;; 18 | 5 ) chunk=256;; 19 | esac 20 | 21 | mdadm -CR $md0 -amd -l5 -c $chunk -n$disks --assume-clean $devs 22 | mdadm $md0 --add $dev6 23 | echo 20 > /proc/sys/dev/raid/speed_limit_min 24 | echo 20 > /proc/sys/dev/raid/speed_limit_max 25 | mdadm --grow $md0 -n $[disks+1] 26 | check reshape 27 | check state $st 28 | mdadm --stop $md0 29 | mdadm --assemble $md0 $devs $dev6 30 | check reshape 31 | echo 1000 > /proc/sys/dev/raid/speed_limit_min 32 | echo 2000 > /proc/sys/dev/raid/speed_limit_max 33 | check wait 34 | 35 | max=5 36 | 37 | for ((i = 0 ; i < max ; i++ )); do 38 | if [[ $(echo check > /sys/block/md0/md/sync_action) != 0 ]]; then 39 | break; 40 | fi 41 | sleep 2 42 | done 43 | 44 | if [[ i == max ]]; then 45 | echo >&2 "Timeout waiting for check to succeed" 46 | exit 1 47 | fi 48 | 49 | check wait 50 | mm=`cat /sys/block/md0/md/mismatch_cnt` 51 | if [ $mm -gt 0 ] 52 | then echo >&2 "ERROR mismatch_cnt non-zero : $mm" ; exit 1 53 | fi 54 | mdadm -S $md0 55 | done 56 | -------------------------------------------------------------------------------- /tests/07revert-grow: -------------------------------------------------------------------------------- 1 | set -e -x 2 | 3 | # revert a reshape that is increasing the number of devices, 4 | # raid5, raid6, and raid10 5 | 6 | # metadate 0.90 cannot handle RAID10 growth 7 | # metadata 1.0 doesn't get a default headspace, is don't try it either. 8 | 9 | for metadata in 0.90 1.1 1.2 10 | do 11 | # RAID5 12 | mdadm -CR --assume-clean $md0 -l5 -n4 -x1 $devlist4 --metadata=$metadata 13 | check raid5 14 | testdev $md0 3 $mdsize1 512 15 | mdadm -G $md0 -n 5 16 | sleep 3 17 | mdadm -S $md0 18 | mdadm -A $md0 --update=revert-reshape $devlist4 --backup-file=/tmp/md-backup 19 | check wait 20 | check raid5 21 | testdev $md0 3 $mdsize1 512 22 | mdadm -S $md0 23 | 24 | # RAID6 25 | mdadm -CR --assume-clean $md0 -l6 -n4 -x1 $devlist4 --metadata=$metadata 26 | check raid6 27 | testdev $md0 2 $mdsize1 512 28 | mdadm -G $md0 -n 5 29 | sleep 3 30 | mdadm -S $md0 31 | mdadm -A $md0 --update=revert-reshape $devlist4 --backup-file=/tmp/md-backup 32 | check wait 33 | check raid6 34 | testdev $md0 2 $mdsize1 512 35 | mdadm -S $md0 36 | 37 | if [ $metadata = 0.90 ]; then continue; fi 38 | 39 | # RAID10 40 | mdadm -CR --assume-clean $md0 -l10 -n4 -x1 $devlist4 --metadata=$metadata 41 | check raid10 42 | testdev $md0 2 $mdsize1 512 43 | mdadm -G $md0 -n 5 44 | sleep 3 45 | mdadm -S $md0 46 | mdadm -A $md0 --update=revert-reshape $devlist4 47 | check wait 48 | check raid10 49 | testdev $md0 2 $mdsize1 512 50 | mdadm -S $md0 51 | 52 | done 53 | -------------------------------------------------------------------------------- /tests/07revert-grow.broken: -------------------------------------------------------------------------------- 1 | always fails 2 | 3 | This patch, recently added to md-next causes the test to always fail: 4 | 5 | 7e6ba434cc60 ("md: don't unregister sync_thread with reconfig_mutex held") 6 | 7 | The errors are: 8 | 9 | mdadm: No active reshape to revert on /dev/loop0 10 | ERROR: active raid5 not found 11 | 12 | Before the patch, the error seen is below. 13 | 14 | -- 15 | 16 | fails rarely 17 | 18 | Fails about 1 in every 30 runs with errors: 19 | 20 | mdadm: Merging with already-assembled /dev/md/0 21 | mdadm: backup file /tmp/md-backup inaccessible: No such file or directory 22 | mdadm: failed to add /dev/loop1 to /dev/md/0: Invalid argument 23 | mdadm: failed to add /dev/loop2 to /dev/md/0: Invalid argument 24 | mdadm: failed to add /dev/loop3 to /dev/md/0: Invalid argument 25 | mdadm: failed to add /dev/loop0 to /dev/md/0: Invalid argument 26 | mdadm: /dev/md/0 assembled from 1 drive - need all 5 to start it 27 | (use --run to insist). 28 | 29 | grep: /sys/block/md*/md/sync_action: No such file or directory 30 | 31 | ERROR: active raid5 not found 32 | -------------------------------------------------------------------------------- /tests/07revert-inplace: -------------------------------------------------------------------------------- 1 | set -e -x 2 | 3 | # revert a reshape that is not changing the number of data devices, 4 | # raid5, raid6, and raid10 5 | 6 | # RAID5 -> RAID6 7 | mdadm -CR --assume-clean $md0 -l5 -n4 -x1 $devlist4 8 | check raid5 9 | testdev $md0 3 $mdsize1 512 10 | mdadm -G $md0 -l 6 11 | sleep 2 12 | mdadm -S $md0 13 | mdadm -A $md0 --update=revert-reshape $devlist4 --backup-file=/tmp/md-backup 14 | check wait 15 | check raid6 16 | check algorithm 18 17 | testdev $md0 3 $mdsize1 512 18 | mdadm -S $md0 19 | 20 | # RAID6 -> RAID5 21 | mdadm -CR --assume-clean $md0 -l6 -n5 $devlist4 22 | check raid6 23 | testdev $md0 3 $mdsize1 512 24 | mdadm -G $md0 -l 5 25 | sleep 2 26 | mdadm -S $md0 27 | mdadm -A $md0 --update=revert-reshape $devlist4 --backup-file=/tmp/md-backup 28 | check wait 29 | check raid6 30 | testdev $md0 3 $mdsize1 512 31 | mdadm -S $md0 32 | 33 | # RAID10 - decrease chunk size 34 | mdadm -CR --assume-clean $md0 -l10 -n6 -c 64 $devlist5 35 | check raid10 36 | testdev $md0 3 $mdsize1 64 37 | mdadm -G $md0 -c 32 38 | sleep 2 39 | mdadm -S $md0 40 | mdadm -A $md0 --update=revert-reshape $devlist5 41 | check wait 42 | check raid10 43 | testdev $md0 3 $mdsize1 64 44 | mdadm -S $md0 45 | -------------------------------------------------------------------------------- /tests/07revert-inplace.broken: -------------------------------------------------------------------------------- 1 | always fails 2 | 3 | Fails with errors: 4 | ++ /usr/sbin/mdadm -A /dev/md0 --update=revert-reshape /dev/loop0 /dev/loop1 /dev/loop2 /dev/loop3 /dev/loop4 --backup-file=/tmp/md-backup 5 | ++ rv=1 6 | ++ case $* in 7 | ++ cat /var/tmp/stderr 8 | mdadm: failed to RUN_ARRAY /dev/md0: Invalid argument 9 | -------------------------------------------------------------------------------- /tests/07revert-shrink: -------------------------------------------------------------------------------- 1 | set -e -x 2 | 3 | # revert a reshape that is decreasing the number of devices, 4 | # raid5, raid6, and raid10 5 | 6 | bu=$targetdir/md-backup 7 | rm -f $bu 8 | # RAID5 9 | mdadm -CR --assume-clean $md0 -l5 -n5 $devlist4 10 | check raid5 11 | testdev $md0 4 $mdsize1 512 12 | mdadm --grow $md0 --array-size 56832 13 | testdev $md0 3 $mdsize1 512 14 | mdadm -G $md0 -n 4 --backup=$bu 15 | sleep 3 16 | mdadm -S $md0 17 | mdadm -A $md0 --update=revert-reshape $devlist4 --backup-file=$bu 18 | check wait 19 | check raid5 20 | fsck -f -n $md0 21 | testdev $md0 4 $mdsize1 512 22 | mdadm -S $md0 23 | 24 | #FIXME 25 | rm -f $bu 26 | # RAID6 27 | mdadm -CR --assume-clean $md0 -l6 -n5 $devlist4 28 | check raid6 29 | testdev $md0 3 $mdsize1 512 30 | mdadm --grow $md0 --array-size 37888 31 | testdev $md0 2 $mdsize1 512 32 | mdadm -G $md0 -n 4 --backup=$bu 33 | sleep 2 34 | mdadm -S $md0 35 | mdadm -A $md0 --update=revert-reshape $devlist4 --backup-file=$bu 36 | check wait 37 | check raid6 38 | fsck -f -n $md0 39 | testdev $md0 3 $mdsize1 512 40 | mdadm -S $md0 41 | 42 | # RAID10 43 | mdadm -CR --assume-clean $md0 -l10 -n6 $devlist5 44 | check raid10 45 | testdev $md0 3 $mdsize1 512 46 | mdadm --grow $md0 --array-size 36864 47 | testdev $md0 2 $mdsize1 512 48 | mdadm -G $md0 -n 4 49 | sleep 3 50 | mdadm -S $md0 51 | mdadm -A $md0 --update=revert-reshape $devlist5 52 | check wait 53 | check raid10 54 | fsck -f -n $md0 55 | testdev $md0 3 $mdsize1 512 56 | mdadm -S $md0 57 | -------------------------------------------------------------------------------- /tests/07revert-shrink.broken: -------------------------------------------------------------------------------- 1 | always fails 2 | 3 | Fails with errors: 4 | 5 | mdadm: this change will reduce the size of the array. 6 | use --grow --array-size first to truncate array. 7 | e.g. mdadm --grow /dev/md0 --array-size 53760 8 | 9 | ERROR: active raid5 not found 10 | -------------------------------------------------------------------------------- /tests/07testreshape5: -------------------------------------------------------------------------------- 1 | 2 | # 3 | # test the reshape code by using test_reshape and the 4 | # kernel md code to move data into and out of variously 5 | # shaped md arrays. 6 | set -x 7 | dir="." 8 | 9 | [ -e $dir/test_stripe ] || skip "test_stripes binary has not been compiled, skipping" 10 | 11 | layouts=(la ra ls rs) 12 | for level in 5 6 13 | do 14 | for chunk in 4 8 16 32 64 128 15 | do 16 | devs="$dev1" 17 | for disks in 2 3 4 5 6 18 | do 19 | eval devs=\"$devs \$dev$disks\" 20 | if [ " $level $disks" = " 6 3" -o " $level $disks" = " 6 2" ] 21 | then continue 22 | fi 23 | for nlayout in 0 1 2 3 24 | do 25 | layout=${layouts[$nlayout]} 26 | 27 | size=$[chunk*(disks-(level-4))*disks] 28 | 29 | # test restore: make a raid5 from a file, then do a compare 30 | dd if=/dev/urandom of=/tmp/RandFile bs=1024 count=$size 31 | $dir/test_stripe restore /tmp/RandFile $disks $[chunk*1024] $level $nlayout 0 $[size*1024] $devs 32 | mdadm -CR -e 1.0 $md0 -amd -l$level -n$disks --assume-clean -c $chunk -p $layout $devs 33 | cmp -s -n $[size*1024] $md0 /tmp/RandFile || { echo cmp failed ; exit 2; } 34 | 35 | # FIXME check parity 36 | 37 | # test save 38 | dd if=/dev/urandom of=$md0 bs=1024 count=$size 39 | blockdev --flushbufs $md0 $devs; sync 40 | > /tmp/NewRand 41 | $dir/test_stripe save /tmp/NewRand $disks $[chunk*1024] $level $nlayout 0 $[size*1024] $devs 42 | cmp -s -n $[size*1024] $md0 /tmp/NewRand || { echo cmp failed ; exit 2; } 43 | mdadm -S $md0 44 | udevadm settle 45 | done 46 | done 47 | done 48 | done 49 | exit 0 50 | -------------------------------------------------------------------------------- /tests/09imsm-assemble: -------------------------------------------------------------------------------- 1 | # validate the prodigal member disk scenario i.e. a former container 2 | # member is returned after having been rebuilt on another system 3 | 4 | 5 | imsm_check_hold() { 6 | if mdadm --remove $1 $2; then 7 | echo "$2 removal from $1 should have been blocked" >&2 8 | cat /proc/mdstat >&2 9 | mdadm -E $2 10 | exit 1 11 | fi 12 | } 13 | 14 | imsm_check_removal() { 15 | local ret=5 16 | local success=0 17 | 18 | for ((i=1; i<=ret; i++)); do 19 | if mdadm --remove "$1" "$2"; then 20 | success=1 21 | break 22 | fi 23 | sleep 2 24 | done 25 | 26 | if [ $success -ne 1 ]; then 27 | echo "$2 removal from $1 should have succeeded" >&2 28 | cat /proc/mdstat >&2 29 | mdadm -E "$2" 30 | exit 1 31 | fi 32 | } 33 | 34 | export IMSM_DEVNAME_AS_SERIAL=1 35 | export IMSM_TEST_OROM=1 36 | export IMSM_NO_PLATFORM=1 37 | container=/dev/md/container 38 | member=/dev/md/vol0 39 | 40 | 41 | num_disks=4 42 | size=$((10*1024)) 43 | mdadm -CR $container -e imsm -n $num_disks $dev0 $dev1 $dev2 $dev3 44 | mdadm -CR $member $dev0 $dev2 -n 2 -l 1 -z $size 45 | mdadm --wait $member || true 46 | mdadm -Ss 47 | 48 | # make dev0 and dev1 a new rebuild family 49 | mdadm -A $container $dev0 $dev1 50 | mdadm -IR $container 51 | mdadm --wait ${member}_0 || true 52 | mdadm -Ss 53 | 54 | # make dev2 and dev3 a new rebuild family 55 | mdadm -A $container $dev2 $dev3 56 | mdadm -IR $container 57 | mdadm --wait ${member}_0 || true 58 | mdadm -Ss 59 | 60 | # reassemble and make sure one of the families falls out 61 | mdadm -A $container $dev0 $dev1 $dev2 $dev3 62 | mdadm -IR $container 63 | testdev ${member}_0 1 $size 64 64 | if mdadm --remove $container $dev0 ; then 65 | # the dev[23] family won 66 | imsm_check_removal $container $dev1 67 | imsm_check_hold $container $dev2 68 | imsm_check_hold $container $dev3 69 | else 70 | # the dev[01] family won 71 | imsm_check_hold $container $dev1 72 | imsm_check_removal $container $dev2 73 | imsm_check_removal $container $dev3 74 | fi 75 | mdadm -Ss 76 | 77 | # reassemble with a new id for the dev[23] family 78 | mdadm -A $container $dev0 $dev1 79 | mdadm -IR $container 80 | mdadm -A ${container}2 $dev2 $dev3 --update=uuid 81 | mdadm -IR ${container}2 82 | 83 | testdev ${member}_0 1 $size 64 84 | testdev ${member}_1 1 $size 64 85 | -------------------------------------------------------------------------------- /tests/09imsm-create-fail-rebuild: -------------------------------------------------------------------------------- 1 | # sanity check array creation 2 | 3 | imsm_check_hold() { 4 | if mdadm --remove $1 $2; then 5 | echo "$2 removal from $1 should have been blocked" >&2 6 | cat /proc/mdstat >&2 7 | mdadm -E $2 8 | exit 1 9 | fi 10 | } 11 | 12 | imsm_check_removal() { 13 | if ! mdadm --remove $1 $2 ; then 14 | echo "$2 removal from $1 should have succeeded" >&2 15 | cat /proc/mdstat >&2 16 | mdadm -E $2 17 | exit 1 18 | fi 19 | } 20 | 21 | . tests/env-imsm-template 22 | 23 | # IMSM rounds to multiples of one mebibyte - 1024K 24 | DEV_ROUND_K=1024 25 | 26 | num_disks=2 27 | mdadm -CR $container -e imsm -n $num_disks $dev0 $dev1 28 | imsm_check container $num_disks 29 | 30 | # RAID0 + RAID1 31 | size=9000 32 | level=0 33 | chunk=64 34 | offset=0 35 | mdadm -CR $member0 $dev0 $dev1 -n $num_disks -l $level -z $size -c $chunk 36 | imsm_check member $member0 $num_disks $level $size $((size*2)) $offset $chunk 37 | testdev $member0 $num_disks $size $chunk 38 | 39 | offset=$(((size & ~(1024 - 1)) + 4096)) 40 | size=4000 41 | level=1 42 | chunk=0 43 | mdadm -CR $member1 $dev0 $dev1 -n $num_disks -l $level -z $size 44 | imsm_check member $member1 $num_disks $level $size $size $offset $chunk 45 | testdev $member1 1 $size 64 46 | check wait 47 | 48 | mdadm -Ss 49 | 50 | # RAID10 + RAID5 51 | num_disks=4 52 | mdadm -CR $container -e imsm -n $num_disks $dev0 $dev1 $dev2 $dev3 53 | imsm_check container $num_disks 54 | 55 | size=9000 56 | level=10 57 | chunk=64 58 | offset=0 59 | mdadm -CR $member0 $dev0 $dev1 $dev2 $dev3 -n $num_disks -l $level -z $size -c $chunk 60 | imsm_check member $member0 $num_disks $level $size $((size*2)) $offset $chunk 61 | testdev $member0 $((num_disks-2)) $size $chunk 62 | 63 | offset=$(((size & ~(1024 - 1)) + 4096)) 64 | size=4000 65 | level=5 66 | mdadm -CR $member1 $dev0 $dev1 $dev2 $dev3 -n $num_disks -l $level -z $size -c $chunk 67 | imsm_check member $member1 $num_disks $level $size $((size*3)) $offset $chunk 68 | testdev $member1 $((num_disks-1)) $size $chunk 69 | check wait 70 | 71 | # FAIL / REBUILD 72 | imsm_check_hold $container $dev0 73 | mdadm --fail $member0 $dev0 74 | mdadm --wait-clean --scan || true 75 | imsm_check_removal $container $dev0 76 | mdadm --add $container $dev4 77 | check wait 78 | imsm_check_hold $container $dev4 79 | -------------------------------------------------------------------------------- /tests/09imsm-create-fail-rebuild.broken: -------------------------------------------------------------------------------- 1 | always fails 2 | 3 | Fails with error: 4 | 5 | **Error**: Array size mismatch - expected 3072, actual 16384 6 | -------------------------------------------------------------------------------- /tests/09imsm-overlap.broken: -------------------------------------------------------------------------------- 1 | always fails 2 | 3 | Fails with errors: 4 | 5 | **Error**: Offset mismatch - expected 15360, actual 0 6 | **Error**: Offset mismatch - expected 15360, actual 0 7 | /dev/md/vol3 failed check 8 | -------------------------------------------------------------------------------- /tests/10ddf-assemble-missing: -------------------------------------------------------------------------------- 1 | # An array is assembled incompletely. 2 | # Re missing disks get marked as missing and are not allowed back in 3 | 4 | . tests/env-ddf-template 5 | tmp=$(mktemp /tmp/mdtest-XXXXXX) 6 | rm -f $tmp /var/tmp/mdmon.log 7 | ret=0 8 | 9 | mdadm -CR $container -e ddf -n 4 $dev8 $dev9 $dev10 $dev11 10 | ddf_check container 4 11 | 12 | mdadm -CR $member1 -n 4 -l 10 $dev8 $dev10 $dev9 $dev11 -z 10000 13 | mdadm -CR $member0 -n 2 -l 1 $dev8 $dev9 -z 10000 14 | 15 | mdadm --wait $member0 || true 16 | mdadm --wait $member1 || true 17 | 18 | mdadm -Ss 19 | sleep 2 20 | 21 | # Add all devices except those for $member0 22 | mdadm -I $dev10 23 | mdadm -I $dev11 24 | 25 | # Start runnable members 26 | mdadm -IRs || true 27 | mdadm -Ss 28 | 29 | #[ -f /var/tmp/mdmon.log ] && cat /var/tmp/mdmon.log 30 | 31 | # Now reassemble 32 | # This should work because BVDs weren't written to 33 | for d in $dev8 $dev9 $dev10 $dev11; do 34 | mdadm -I $d 35 | done 36 | mdadm -Ss 37 | 38 | # Expect consistent state 39 | for d in $dev10 $dev11; do 40 | mdadm -E $d>$tmp 41 | egrep 'state\[0\] : Degraded, Consistent' $tmp || { 42 | ret=1 43 | echo ERROR: $member0 has unexpected state on $d 44 | } 45 | egrep 'state\[1\] : Optimal, Consistent' $tmp || { 46 | ret=1 47 | echo ERROR: $member1 has unexpected state on $d 48 | } 49 | 50 | if [ x$(egrep -c 'active/Online$' $tmp) != x2 ]; then 51 | ret=1 52 | echo ERROR: unexpected number of online disks on $d 53 | fi 54 | done 55 | 56 | if [ $ret -ne 0 ]; then 57 | mdadm -E $dev10 58 | mdadm -E $dev8 59 | fi 60 | rm -f $tmp /var/tmp/mdmon.log 61 | [ $ret -eq 0 ] 62 | -------------------------------------------------------------------------------- /tests/10ddf-assemble-missing.broken: -------------------------------------------------------------------------------- 1 | always fails 2 | 3 | Fails with errors: 4 | 5 | ERROR: /dev/md/vol0 has unexpected state on /dev/loop10 6 | ERROR: unexpected number of online disks on /dev/loop10 7 | -------------------------------------------------------------------------------- /tests/10ddf-create: -------------------------------------------------------------------------------- 1 | # 2 | # Test basic DDF functionality. 3 | # 4 | # Create a container with 5 drives 5 | # create a small raid0 across them all, 6 | # then a small raid10 using 4 drives, then a 2disk raid1 7 | # and a 3disk raid5 using the remaining space 8 | # 9 | # add some data, tear down the array, reassemble 10 | # and make sure it is still there. 11 | set -e 12 | . tests/env-ddf-template 13 | sda=$(get_rootdev) || exit 1 14 | 15 | mdadm -CR /dev/md/ddf0 -e ddf -n 5 $dev8 $dev9 $dev10 $dev11 $dev12 16 | mdadm -CR r5 -l5 -n5 /dev/md/ddf0 -z 5000 17 | if mdadm -CR r5 -l1 -n2 /dev/md/ddf0 -z 5000 18 | then echo >&2 create with same name should fail ; exit 1 19 | fi 20 | mdadm -CR r10 -l10 -n4 -pn2 /dev/md/ddf0 -z 5000 21 | mdadm -CR r1 -l1 -n2 /dev/md/ddf0 22 | mdadm -CR r0 -l0 -n3 /dev/md/ddf0 23 | testdev /dev/md/r5 4 5000 512 24 | testdev /dev/md/r10 2 5000 512 25 | # r0/r10 will use 4608 due to chunk size, so that leaves 23552 for the rest 26 | testdev /dev/md/r1 1 23552 64 27 | testdev /dev/md/r0 3 23552 512 28 | dd if=$sda of=/dev/md/r0 || true 29 | dd if=$sda of=/dev/md/r10 || true 30 | dd if=$sda of=/dev/md/r1 || true 31 | dd if=$sda of=/dev/md/r5 || true 32 | 33 | s0=`sha1sum /dev/md/r0` 34 | s10=`sha1sum /dev/md/r10` 35 | s1=`sha1sum /dev/md/r1` 36 | s5=`sha1sum /dev/md/r5` 37 | 38 | 39 | mdadm -Ss 40 | mdadm -A /dev/md/ddf0 $dev8 $dev9 $dev10 $dev11 $dev12 41 | mdadm -I /dev/md/ddf0 42 | 43 | udevadm settle 44 | s0a=`sha1sum /dev/md/r0` 45 | s10a=`sha1sum /dev/md/r10` 46 | s1a=`sha1sum /dev/md/r1` 47 | s5a=`sha1sum /dev/md/r5` 48 | 49 | if [ "$s0" != "$s0a" ]; then 50 | echo r0 did not match ; exit 1; 51 | fi 52 | if [ "$s10" != "$s10a" ]; then 53 | echo r10 did not match ; exit 1; 54 | fi 55 | if [ "$s1" != "$s1a" ]; then 56 | echo r1 did not match ; exit 1; 57 | fi 58 | if [ "$s5" != "$s5a" ]; then 59 | echo r5 did not match ; exit 1; 60 | fi 61 | 62 | # failure status just means it has completed already, so ignore it. 63 | mdadm --wait /dev/md/r1 || true 64 | mdadm --wait /dev/md/r10 || true 65 | mdadm --wait /dev/md/r5 || true 66 | 67 | mdadm -Dbs > /var/tmp/mdadm.conf 68 | 69 | mdadm -Ss 70 | 71 | # Now try to assemble using mdadm.conf 72 | mdadm -Asc /var/tmp/mdadm.conf 73 | check nosync # This failed once. The raid5 was resyncing. 74 | udevadm settle 75 | mdadm -Dbs | sort > /tmp/mdadm.conf 76 | sort /var/tmp/mdadm.conf | diff /tmp/mdadm.conf - 77 | mdadm -Ss 78 | 79 | # and now assemble fully incrementally. 80 | for i in $dev8 $dev9 $dev10 $dev11 $dev12 81 | do 82 | mdadm -I $i -c /var/tmp/mdadm.conf 83 | done 84 | check nosync 85 | udevadm settle 86 | mdadm -Dbs | sort > /tmp/mdadm.conf 87 | sort /var/tmp/mdadm.conf | diff /tmp/mdadm.conf - 88 | mdadm -Ss 89 | rm /tmp/mdadm.conf /var/tmp/mdadm.conf 90 | -------------------------------------------------------------------------------- /tests/10ddf-create-fail-rebuild: -------------------------------------------------------------------------------- 1 | # sanity check array creation 2 | 3 | ddf_check_hold() { 4 | if mdadm --remove $1 $2; then 5 | echo "$2 removal from $1 should have been blocked" >&2 6 | cat /proc/mdstat >&2 7 | mdadm -E $2 8 | exit 1 9 | fi 10 | } 11 | 12 | ddf_check_removal() { 13 | if ! mdadm --remove $1 $2 ; then 14 | echo "$2 removal from $1 should have succeeded" >&2 15 | cat /proc/mdstat >&2 16 | mdadm -E $2 17 | exit 1 18 | fi 19 | } 20 | 21 | . tests/env-ddf-template 22 | 23 | num_disks=2 24 | mdadm -CR $container -e ddf -n $num_disks $dev8 $dev9 25 | ddf_check container $num_disks 26 | 27 | # RAID0 + RAID1 28 | size=9000 29 | level=0 30 | chunk=64 31 | offset=0 32 | layout=0 33 | mdadm -CR $member0 $dev8 $dev9 -n $num_disks -l $level -z $size -c $chunk 34 | ddf_check member $member0 $num_disks $level $size $((size*2)) $offset $chunk $layout 35 | testdev $member0 $num_disks $size $chunk 36 | 37 | offset=$(((size & ~(chunk - 1)))) 38 | size=4000 39 | level=1 40 | chunk=0 41 | mdadm -CR $member1 $dev8 $dev9 -n $num_disks -l $level -z $size 42 | ddf_check member $member1 $num_disks $level $size $size $offset $chunk $layout 43 | testdev $member1 1 $size 1 44 | check wait 45 | 46 | mdadm -Ss 47 | 48 | # RAID10 + RAID5 49 | num_disks=4 50 | mdadm -CR $container -e ddf -n $num_disks $dev8 $dev9 $dev10 $dev11 51 | ddf_check container $num_disks 52 | 53 | size=9000 54 | level=10 55 | chunk=64 56 | offset=0 57 | layout=2 58 | mdadm -CR $member0 $dev8 $dev9 $dev10 $dev11 -n $num_disks -l $level -z $size -c $chunk 59 | ddf_check member $member0 $num_disks $level $size $((size*2)) $offset $chunk $layout 60 | testdev $member0 $((num_disks-2)) $size $chunk 61 | 62 | offset=$(((size & ~(chunk - 1)))) 63 | size=4000 64 | level=5 65 | mdadm -CR $member1 $dev8 $dev9 $dev10 $dev11 -n $num_disks -l $level -z $size -c $chunk 66 | ddf_check member $member1 $num_disks $level $size $((size*3)) $offset $chunk $layout 67 | testdev $member1 $((num_disks-1)) $size $chunk 68 | check wait 69 | 70 | # FAIL / REBUILD 71 | ddf_check_hold $container $dev8 72 | mdadm --fail $member0 $dev8 73 | mdadm --wait-clean --scan || true 74 | ddf_check_removal $container $dev8 75 | mdadm --add $container $dev12 76 | check wait 77 | ddf_check_hold $container $dev12 78 | -------------------------------------------------------------------------------- /tests/10ddf-fail-create-race: -------------------------------------------------------------------------------- 1 | # This test creates a RAID1, fails a disk, and immediately 2 | # (simultaneously) creates a new array. This tests for a possible 3 | # race where the meta data reflecting the disk failure may not 4 | # be written when the 2nd array is created. 5 | . tests/env-ddf-template 6 | 7 | mdadm --zero-superblock $dev8 $dev9 $dev10 $dev11 $dev12 $dev13 8 | 9 | mdadm -CR $container -e ddf -l container -n 2 $dev11 $dev12 10 | #$dir/mdadm -CR $member0 -l raid1 -n 2 $container -z 10000 >/tmp/mdmon.txt 2>&1 11 | mdadm -CR $member0 -l raid1 -n 2 $container -z 10000 12 | check wait 13 | fail0=$dev11 14 | mdadm --fail $member0 $fail0 & 15 | 16 | # The test can succeed two ways: 17 | # 1) mdadm -C member1 fails - in this case the meta data 18 | # was already on disk when the create attempt was made 19 | # 2) mdadm -C succeeds in the first place (meta data not on disk yet), 20 | # but mdmon detects the problem and sets the disk faulty. 21 | 22 | if mdadm -CR $member1 -l raid1 -n 2 $container; then 23 | 24 | echo create should have failed / race condition? 25 | 26 | check wait 27 | set -- $(get_raiddisks $member0) 28 | d0=$1 29 | ret=0 30 | if [ $1 = $fail0 -o $2 = $fail0 ]; then 31 | ret=1 32 | else 33 | set -- $(get_raiddisks $member1) 34 | if [ $1 = $fail0 -o $2 = $fail0 ]; then 35 | ret=1 36 | fi 37 | fi 38 | if [ $ret -eq 1 ]; then 39 | echo ERROR: failed disk $fail0 is still a RAID member 40 | echo $member0: $(get_raiddisks $member0) 41 | echo $member1: $(get_raiddisks $member1) 42 | fi 43 | tmp=$(mktemp /tmp/mdest-XXXXXX) 44 | mdadm -E $d0 >$tmp 45 | if [ x$(grep -c 'state\[[01]\] : Degraded' $tmp) != x2 ]; then 46 | echo ERROR: non-degraded array found 47 | mdadm -E $d0 48 | ret=1 49 | fi 50 | if ! grep -q '^ *0 *[0-9a-f]\{8\} .*Offline, Failed' $tmp; then 51 | echo ERROR: disk 0 not marked as failed in meta data 52 | mdadm -E $d0 53 | ret=1 54 | fi 55 | rm -f $tmp 56 | else 57 | ret=0 58 | fi 59 | 60 | [ -f /tmp/mdmon.txt ] && { 61 | cat /tmp/mdmon.txt 62 | rm -f /tmp/mdmon.txt 63 | } 64 | 65 | [ $ret -eq 0 ] 66 | 67 | -------------------------------------------------------------------------------- /tests/10ddf-fail-create-race.broken: -------------------------------------------------------------------------------- 1 | usually fails 2 | 3 | Fails about 9 out of 10 times with many errors: 4 | 5 | mdadm: cannot open MISSING: No such file or directory 6 | ERROR: non-degraded array found 7 | ERROR: disk 0 not marked as failed in meta data 8 | -------------------------------------------------------------------------------- /tests/10ddf-fail-readd: -------------------------------------------------------------------------------- 1 | # Simple fail / re-add test 2 | . tests/env-ddf-template 3 | 4 | tmp=$(mktemp /tmp/mdtest-XXXXXX) 5 | rm -f $tmp 6 | 7 | mdadm --zero-superblock $dev8 $dev9 8 | mdadm -CR $container -e ddf -l container -n 2 $dev8 $dev9 9 | 10 | mdadm -CR $member0 -l raid1 -n 2 $container 11 | #$dir/mdadm -CR $member0 -l raid1 -n 2 $container >/tmp/mdmon.txt 2>&1 12 | 13 | mke2fs -F $member0 14 | check wait 15 | 16 | set -- $(get_raiddisks $member0) 17 | fail0=$1 18 | mdadm $member0 --fail $fail0 19 | 20 | sleep 2 21 | mdadm $container --remove $fail0 22 | 23 | set -- $(get_raiddisks $member0) 24 | case $1 in MISSING) shift;; esac 25 | good0=$1 26 | 27 | # We re-add the disk now 28 | mdadm $container --add $fail0 29 | 30 | sleep 2 31 | mdadm --wait $member0 || true 32 | 33 | ret=0 34 | set -- $(get_raiddisks $member0) 35 | case $1:$2 in 36 | $dev8:$dev9|$dev9:$dev8);; 37 | *) echo ERROR: bad raid disks "$@"; ret=1;; 38 | esac 39 | 40 | mdadm -Ss 41 | for x in $@; do 42 | mdadm -E $x >$tmp 43 | if ! grep -q 'state\[0\] : Optimal, Consistent' $tmp; then 44 | echo ERROR: member 0 should be optimal in meta data on $x 45 | ret=1 46 | fi 47 | done 48 | 49 | rm -f $tmp 50 | if [ $ret -ne 0 ]; then 51 | mdadm -E $dev8 52 | mdadm -E $dev9 53 | fi 54 | 55 | [ $ret -eq 0 ] 56 | -------------------------------------------------------------------------------- /tests/10ddf-fail-readd-readonly: -------------------------------------------------------------------------------- 1 | # Simple fail / re-add test 2 | . tests/env-ddf-template 3 | 4 | tmp=$(mktemp /tmp/mdtest-XXXXXX) 5 | rm -f $tmp 6 | 7 | mdadm --zero-superblock $dev8 $dev9 8 | mdadm -CR $container -e ddf -l container -n 2 $dev8 $dev9 9 | 10 | mdadm -CR $member0 -l raid1 -n 2 $container 11 | #$dir/mdadm -CR $member0 -l raid1 -n 2 $container >/tmp/mdmon.txt 2>&1 12 | 13 | check wait 14 | 15 | set -- $(get_raiddisks $member0) 16 | fail0=$1 17 | mdadm $member0 --fail $fail0 18 | 19 | sleep 2 20 | set -- $(get_raiddisks $member0) 21 | case $1 in MISSING) shift;; esac 22 | good0=$1 23 | 24 | # Check that the meta data now show one disk as failed 25 | ret=0 26 | for x in $@; do 27 | mdadm -E $x >$tmp 28 | if ! grep -q 'state\[0\] : Degraded, Consistent' $tmp; then 29 | echo ERROR: member 0 should be degraded in meta data on $x 30 | ret=1 31 | fi 32 | phys=$(grep $x $tmp) 33 | case $x:$phys in 34 | $fail0:*active/Offline,\ Failed);; 35 | $good0:*active/Online);; 36 | *) echo ERROR: wrong phys disk state for $x 37 | ret=1 38 | ;; 39 | esac 40 | done 41 | 42 | mdadm $container --remove $fail0 43 | 44 | # We re-add the disk now 45 | mdadm $container --add $fail0 46 | 47 | sleep 2 48 | mdadm --wait $member0 || true 49 | 50 | set -- $(get_raiddisks $member0) 51 | case $1:$2 in 52 | $dev8:$dev9|$dev9:$dev8);; 53 | *) echo ERROR: bad raid disks "$@"; ret=1;; 54 | esac 55 | 56 | mdadm -Ss 57 | for x in $@; do 58 | mdadm -E $x >$tmp 59 | if ! grep -q 'state\[0\] : Optimal, Consistent' $tmp; then 60 | echo ERROR: member 0 should be optimal in meta data on $x 61 | ret=1 62 | fi 63 | done 64 | 65 | rm -f $tmp 66 | if [ $ret -ne 0 ]; then 67 | mdadm -E $dev8 68 | mdadm -E $dev9 69 | fi 70 | 71 | [ $ret -eq 0 ] 72 | -------------------------------------------------------------------------------- /tests/10ddf-fail-spare: -------------------------------------------------------------------------------- 1 | # Test suggested by Albert Pauw: Create, fail one disk, have mdmon 2 | # activate the spare, 3 | # then run create again. Shouldn't use the failed disk for Create, 4 | . tests/env-ddf-template 5 | 6 | tmp=$(mktemp /tmp/mdtest-XXXXXX) 7 | rm -f $tmp 8 | 9 | mdadm --zero-superblock $dev8 $dev9 $dev10 $dev11 $dev12 $dev13 10 | mdadm -CR $container -e ddf -l container -n 5 $dev8 $dev9 $dev10 $dev11 $dev12 11 | 12 | mdadm -CR $member0 -l raid1 -n 2 $container 13 | #$dir/mdadm -CR $member0 -l raid1 -n 2 $container >/tmp/mdmon.txt 2>&1 14 | 15 | check wait 16 | 17 | set -- $(get_raiddisks $member0) 18 | fail0=$1 19 | mdadm --fail $member0 $fail0 20 | 21 | # To make sure the spare is activated, we may have to sleep 22 | # 2s has always been enough for me 23 | sleep 2 24 | check wait 25 | 26 | # This test can succeed both ways - if spare was activated 27 | # before new array was created, we see only member 0. 28 | # otherwise, we see both, adn member0 is degraded because the 29 | # new array grabbed the spare 30 | # which case occurs depends on the sleep time above. 31 | ret=0 32 | if mdadm -CR $member1 -l raid5 -n 3 $container; then 33 | # Creation successful - must have been quicker than spare activation 34 | 35 | check wait 36 | set -- $(get_raiddisks $member1) 37 | if [ $1 = $fail0 -o $2 = $fail0 -o $3 = $fail0 ]; then 38 | echo ERROR: $member1 must not contain $fail0: $@ 39 | ret=1 40 | fi 41 | d1=$1 42 | mdadm -E $d1 >$tmp 43 | if ! grep -q 'state\[1\] : Optimal, Consistent' $tmp; then 44 | echo ERROR: member 1 should be optimal in meta data 45 | ret=1 46 | fi 47 | state0=Degraded 48 | else 49 | # Creation unsuccessful - spare was used for member 0 50 | state0=Optimal 51 | fi 52 | 53 | # need to delay a little bit, sometimes the meta data aren't 54 | # up-to-date yet 55 | sleep 0.5 56 | set -- $(get_raiddisks $member0) 57 | if [ $1 = $fail0 -o $2 = $fail0 ]; then 58 | echo ERROR: $member0 must not contain $fail0: $@ 59 | ret=1 60 | fi 61 | d0=$1 62 | 63 | [ -f $tmp ] || mdadm -E $d0 >$tmp 64 | 65 | if ! grep -q 'state\[0\] : '$state0', Consistent' $tmp; then 66 | echo ERROR: member 0 should be $state0 in meta data 67 | ret=1 68 | fi 69 | if ! grep -q 'Offline, Failed' $tmp; then 70 | echo ERROR: Failed disk expected in meta data 71 | ret=1 72 | fi 73 | if [ $ret -eq 1 ]; then 74 | cat /proc/mdstat 75 | mdadm -E $d0 76 | mdadm -E $d1 77 | mdadm -E $fail0 78 | fi 79 | 80 | [ -f /tmp/mdmon.txt ] && { 81 | cat /tmp/mdmon.txt 82 | rm -f /tmp/mdmon.txt 83 | } 84 | 85 | rm -f $tmp 86 | [ $ret -eq 0 ] 87 | -------------------------------------------------------------------------------- /tests/10ddf-fail-stop-readd: -------------------------------------------------------------------------------- 1 | # Simple fail / re-add test 2 | . tests/env-ddf-template 3 | 4 | tmp=$(mktemp /tmp/mdtest-XXXXXX) 5 | rm -f $tmp 6 | 7 | mdadm --zero-superblock $dev8 $dev9 8 | mdadm -CR $container -e ddf -l container -n 2 $dev8 $dev9 9 | 10 | mdadm -CR $member0 -l raid1 -n 2 $container 11 | #$dir/mdadm -CR $member0 -l raid1 -n 2 $container >/tmp/mdmon.txt 2>&1 12 | 13 | # Write to the array 14 | mke2fs -F $member0 15 | check wait 16 | 17 | set -- $(get_raiddisks $member0) 18 | fail0=$1 19 | mdadm $member0 --fail $fail0 20 | 21 | sleep 2 22 | mdadm $container --remove $fail0 23 | 24 | set -- $(get_raiddisks $member0) 25 | case $1 in MISSING) shift;; esac 26 | good0=$1 27 | 28 | mdadm -Ss 29 | 30 | sleep 2 31 | # Now simulate incremental assembly 32 | mdadm -I $good0 33 | mdadm -IRs || true 34 | 35 | # Write to the array 36 | mke2fs -F $member0 37 | 38 | # We re-add the disk now 39 | mdadm $container --add $fail0 40 | 41 | sleep 2 42 | mdadm --wait $member0 || true 43 | 44 | ret=0 45 | set -- $(get_raiddisks $member0) 46 | case $1:$2 in 47 | $dev8:$dev9|$dev9:$dev8);; 48 | *) echo ERROR: bad raid disks "$@"; ret=1;; 49 | esac 50 | 51 | mdadm -Ss 52 | for x in $@; do 53 | mdadm -E $x >$tmp 54 | if ! grep -q 'state\[0\] : Optimal, Consistent' $tmp; then 55 | echo ERROR: member 0 should be optimal in meta data on $x 56 | ret=1 57 | fi 58 | done 59 | 60 | rm -f $tmp 61 | if [ $ret -ne 0 ]; then 62 | mdadm -E $dev8 63 | mdadm -E $dev9 64 | fi 65 | 66 | [ $ret -eq 0 ] 67 | -------------------------------------------------------------------------------- /tests/10ddf-fail-twice: -------------------------------------------------------------------------------- 1 | . tests/env-ddf-template 2 | 3 | num_disks=5 4 | mdadm -CR $container -e ddf -n $num_disks $dev8 $dev9 $dev10 $dev11 $dev12 5 | ddf_check container $num_disks 6 | 7 | mdadm -CR $member0 -n 2 -l 1 $container 8 | mdadm -CR $member1 -n 3 -l 5 $container 9 | 10 | mdadm --wait $member1 $member0 || mdadm --wait $member1 $member0 || true 11 | 12 | set -- $(get_raiddisks $member0) 13 | fail0=$1 14 | mdadm $member0 --fail $fail0 15 | set -- $(get_raiddisks $member1) 16 | fail1=$1 17 | mdadm $member1 --fail $fail1 18 | 19 | mdadm $container --add $dev13 20 | 21 | mdadm --wait $member1 $member0 || mdadm --wait $member1 $member0 || true 22 | 23 | 24 | devs0="$(get_raiddisks $member0)" 25 | devs1="$(get_raiddisks $member1)" 26 | 27 | present=$(($(get_present $member0) + $(get_present $member1))) 28 | [ $present -eq 4 ] || { 29 | echo expected 4 present disks, got $present 30 | devices for $member0: $devs0 31 | devices for $member1: $devs1 32 | exit 1 33 | } 34 | 35 | if echo "$devs0" | grep -q MISSING; then 36 | good=1 37 | bad=0 38 | else 39 | good=0 40 | bad=1 41 | fi 42 | 43 | # find a good device 44 | eval "set -- \$devs$good" 45 | check=$1 46 | 47 | tmp=$(mktemp /tmp/mdtest-XXXXXX) 48 | mdadm -E $check >$tmp 49 | 50 | { grep -q 'state\['$bad'\] : Degraded, Consistent' $tmp && 51 | grep -q 'state\['$good'\] : Optimal, Consistent' $tmp; } || { 52 | echo unexpected meta data state on $check 53 | mdadm -E $check 54 | rm -f $tmp 55 | exit 1 56 | } 57 | 58 | rm -f $tmp 59 | exit 0 60 | -------------------------------------------------------------------------------- /tests/10ddf-fail-two-spares: -------------------------------------------------------------------------------- 1 | # Simulate two disks failing shorty after each other 2 | . tests/env-ddf-template 3 | sda=$(get_rootdev) || exit 1 4 | tmp=$(mktemp /tmp/mdtest-XXXXXX) 5 | 6 | mdadm --zero-superblock $dev8 $dev9 $dev10 $dev11 $dev12 $dev13 7 | mdadm -CR $container -e ddf -l container -n 6 \ 8 | $dev8 $dev9 $dev10 $dev11 $dev12 $dev13 9 | 10 | #fast_sync 11 | 12 | mdadm -CR $member0 -l raid6 -n 4 $dev10 $dev11 $dev12 $dev13 -z 16384 13 | #$dir/mdadm -CR $member0 -l raid6 -n 4 $dev10 $dev11 $dev12 $dev13 -z 16384 \ 14 | # >/tmp/mdmon.txt 2>&1 15 | mdadm -CR $member1 -l raid10 -n 4 $dev10 $dev11 $dev12 $dev13 -z 16384 16 | 17 | dd if=$sda of=$member0 bs=1M count=32 18 | dd if=$sda of=$member1 bs=1M skip=16 count=16 19 | 20 | check wait 21 | 22 | sum0=$(sha1sum $member0) 23 | sum1=$(sha1sum $member1) 24 | 25 | mdadm --fail $member1 $dev11 26 | sleep 2 27 | mdadm --fail $member1 $dev12 28 | 29 | # We will have 4 resync procedures, 2 spares for 2 arrays. 30 | mdadm --wait $member1 $member0 || true 31 | mdadm --wait $member1 $member0 || true 32 | 33 | devs0="$(get_raiddisks $member0)" 34 | devs1="$(get_raiddisks $member1)" 35 | expected="$dev10 36 | $dev13 37 | $dev8 38 | $dev9" 39 | 40 | ret=0 41 | if [ "$(echo "$devs0" | sort)" != "$expected" \ 42 | -o "$(echo "$devs1" | sort)" != "$expected" ]; then 43 | echo ERROR: unexpected members 44 | echo $member0: $devs0 45 | echo $member1: $devs1 46 | ret=1 47 | fi 48 | 49 | mdadm -E $dev10 >$tmp 50 | if ! grep -q 'state\[0\] : Optimal, Consistent' $tmp; then 51 | echo ERROR: $member0 should be optimal in meta data 52 | ret=1 53 | fi 54 | if ! grep -q 'state\[1\] : Optimal, Consistent' $tmp; then 55 | echo ERROR: $member1 should be optimal in meta data 56 | ret=1 57 | fi 58 | if [ x"$(grep -c active/Online $tmp)" != x4 ]; then 59 | echo ERROR: expected 4 online disks 60 | ret=1 61 | fi 62 | if [ x"$(grep -c "Offline, Failed" $tmp)" != x2 ]; then 63 | echo ERROR: expected 2 failed disks 64 | ret=1 65 | fi 66 | 67 | sum0a=$(sha1sum $member0) 68 | sum1a=$(sha1sum $member1) 69 | 70 | if [ "$sum0" != "$sum0a" -o "$sum1" != "$sum1a" ]; then 71 | echo ERROR: checksum mismatch 72 | ret=1 73 | fi 74 | 75 | if [ $ret -eq 1 ]; then 76 | cat /proc/mdstat 77 | cat $tmp 78 | fi 79 | 80 | [ -f /tmp/mdmon.txt ] && { 81 | cat /tmp/mdmon.txt 82 | rm -f /tmp/mdmon.txt 83 | } 84 | rm -f $tmp 85 | 86 | [ $ret -eq 0 ] 87 | -------------------------------------------------------------------------------- /tests/10ddf-geometry: -------------------------------------------------------------------------------- 1 | # 2 | # Test various RAID geometries, creation and deletion of subarrays 3 | # 4 | 5 | assert_fail() { 6 | if mdadm "$@"; then 7 | echo mdadm "$@" must fail 8 | return 1 9 | else 10 | return 0 11 | fi 12 | } 13 | 14 | assert_kill() { 15 | local dev=$1 n=$2 16 | mdadm -S $dev 17 | mdadm --kill-subarray=$n /dev/md/ddf0 18 | if mdadm -Dbs | grep -q $dev; then 19 | echo >&2 $dev should be deleted 20 | return 1 21 | fi 22 | return 0 23 | } 24 | 25 | set -e 26 | mdadm -CR /dev/md/ddf0 -e ddf -n 6 $dev8 $dev9 $dev10 $dev11 $dev12 $dev13 27 | 28 | # RAID1 geometries 29 | # Use different sizes to make offset calculation harder 30 | mdadm -CR l1s -l1 -n2 /dev/md/ddf0 -z 8000 31 | mdadm -CR l1m -l1 -n3 $dev8 $dev9 $dev10 -z 10000 32 | assert_fail -CR badl1 -l1 -n4 /dev/md/ddf0 33 | 34 | # RAID10 geometries 35 | mdadm -CR l10_0 -l10 -n3 /dev/md/ddf0 -z 1000 36 | mdadm -CR l10_1 -l10 -n5 /dev/md/ddf0 -z 1000 37 | assert_fail mdadm -CR badl10 -l10 -n4 -pn3 /dev/md/ddf0 38 | mdadm -CR l10_2 -l10 -n6 -pn2 /dev/md/ddf0 -z 4000 39 | mdadm -CR l10_3 -l10 -n6 -pn3 /dev/md/ddf0 -z 4000 40 | 41 | assert_fail -CR l10_2 -l10 -n6 -pn2 /dev/md/ddf0 -z 5000 42 | assert_kill /dev/md/l10_2 4 43 | # gone now, must be able to create it again 44 | mdadm -CR l10_2 -l10 -n6 -pn2 /dev/md/ddf0 -z 5000 45 | 46 | # Now stop and reassemble 47 | mdadm -Ss 48 | mdadm -A /dev/md/ddf0 $dev8 $dev9 $dev10 $dev11 $dev12 $dev13 49 | 50 | # Same as above, on inactive container 51 | assert_fail -CR l10_3 -l10 -n6 -pn2 /dev/md/ddf0 -z 5000 52 | # Kill subarray without having started anything (no mdmon) 53 | mdadm --kill-subarray=5 /dev/md/ddf0 54 | mdadm -I /dev/md/ddf0 55 | mdadm -CR l10_3 -l10 -n6 -pn3 /dev/md/ddf0 -z 5000 56 | 57 | assert_kill /dev/md/l10_2 4 58 | assert_kill /dev/md/l10_3 5 59 | 60 | # RAID5 geometries 61 | mdadm -CR l5la -l5 -n3 --layout=ddf-N-restart /dev/md/ddf0 -z 5000 62 | mdadm -CR l5ra -l5 -n3 --layout=ddf-zero-restart /dev/md/ddf0 -z 5000 63 | mdadm -CR l5ls -l5 -n3 --layout=ddf-N-continue /dev/md/ddf0 -z 5000 64 | assert_fail -CR l5rs -l5 -n3 -prs /dev/md/ddf0 -z 5000 65 | 66 | # Stop and reassemble 67 | mdadm -Ss 68 | mdadm -A /dev/md/ddf0 $dev8 $dev9 $dev10 $dev11 $dev12 $dev13 69 | mdadm -I /dev/md/ddf0 70 | 71 | assert_kill /dev/md/l5la 4 72 | assert_kill /dev/md/l5ls 6 73 | assert_kill /dev/md/l5ra 5 74 | 75 | # RAID6 geometries 76 | assert_fail -CR l6la -l6 -n3 -pla /dev/md/ddf0 -z 5000 77 | assert_fail -CR l6rs -l5 -n4 -prs /dev/md/ddf0 -z 5000 78 | mdadm -CR l6la -l6 -n4 --layout=ddf-N-restart /dev/md/ddf0 -z 5000 79 | mdadm -CR l6ra -l6 -n4 --layout=ddf-zero-restart $dev8 $dev9 $dev10 $dev11 -z 5000 80 | mdadm -CR l6ls -l6 -n4 --layout=ddf-N-continue $dev13 $dev8 $dev9 $dev12 -z 5000 81 | 82 | mdadm -Ss 83 | -------------------------------------------------------------------------------- /tests/10ddf-incremental-wrong-order.broken: -------------------------------------------------------------------------------- 1 | always fails 2 | 3 | Fails with errors: 4 | ERROR: sha1sum of /dev/md/vol0 has changed 5 | ERROR: /dev/md/vol0 has unexpected state on /dev/loop10 6 | ERROR: unexpected number of online disks on /dev/loop10 7 | ERROR: /dev/md/vol0 has unexpected state on /dev/loop8 8 | ERROR: unexpected number of online disks on /dev/loop8 9 | ERROR: sha1sum of /dev/md/vol0 has changed 10 | -------------------------------------------------------------------------------- /tests/10ddf-sudden-degraded: -------------------------------------------------------------------------------- 1 | # 2 | # An array is assembled with one device missing. 3 | # The other device must be marked as Failed in metadata 4 | 5 | . tests/env-ddf-template 6 | 7 | mdadm -CR $container -e ddf -n 2 $dev8 $dev9 8 | ddf_check container 2 9 | 10 | mdadm -CR $member1 -n 2 -l1 $dev8 $dev9 11 | mdadm --wait $member1 || true 12 | mdadm -Ss 13 | 14 | mdadm -I $dev8 15 | mdadm -R $container 16 | mkfs $member1 17 | # There must be a missing device recorded 18 | mdadm --examine $dev8 | grep 'Raid Devices.*--' || exit 1 19 | -------------------------------------------------------------------------------- /tests/12imsm-r0_2d-grow-r0_3d: -------------------------------------------------------------------------------- 1 | . tests/env-imsm-template 2 | 3 | # RAID 0 volume, 2 disks grow to RAID 0 volume, 3 disks 4 | # POSITIVE test 5 | 6 | num_disks=2 7 | device_list="$dev0 $dev1" 8 | spare_list="$dev2" 9 | 10 | # Before: RAID 0 volume, 2 disks, 64k chunk size 11 | vol0_level=0 12 | vol0_comp_size=$((5 * 1024)) 13 | vol0_chunk=64 14 | vol0_num_comps=$num_disks 15 | vol0_offset=0 16 | 17 | # After: RAID 0 volume, 3 disks, 64k chunk size 18 | vol0_new_num_comps=$((num_disks + 1)) 19 | 20 | . tests/imsm-grow-template 0 0 21 | -------------------------------------------------------------------------------- /tests/12imsm-r0_2d-grow-r0_4d: -------------------------------------------------------------------------------- 1 | . tests/env-imsm-template 2 | 3 | # RAID 0 volume, 2 disks grow to RAID 0 volume, 4 disks 4 | # POSITIVE test 5 | 6 | num_disks=2 7 | device_list="$dev0 $dev1" 8 | spare_list="$dev2 $dev3" 9 | 10 | # Before: RAID 0 volume, 2 disks, 64k chunk size 11 | vol0_level=0 12 | vol0_comp_size=$((5 * 1024)) 13 | vol0_chunk=64 14 | vol0_num_comps=$num_disks 15 | vol0_offset=0 16 | 17 | # After: RAID 0 volume, 4 disks, 64k chunk size 18 | vol0_new_num_comps=$((num_disks + 2)) 19 | 20 | . tests/imsm-grow-template 0 0 21 | -------------------------------------------------------------------------------- /tests/12imsm-r0_2d-grow-r0_5d: -------------------------------------------------------------------------------- 1 | . tests/env-imsm-template 2 | 3 | # RAID 0 volume, 2 disks grow to RAID 0 volume, 5 disks 4 | # POSITIVE test 5 | 6 | num_disks=2 7 | device_list="$dev0 $dev1" 8 | spare_list="$dev2 $dev3 $dev4" 9 | 10 | # Before: RAID 0 volume, 2 disks, 64k chunk size 11 | vol0_level=0 12 | vol0_comp_size=$((5 * 1024)) 13 | vol0_chunk=64 14 | vol0_num_comps=$num_disks 15 | vol0_offset=0 16 | 17 | # After: RAID 0 volume, 5 disks, 64k chunk size 18 | vol0_new_num_comps=$((num_disks + 3)) 19 | 20 | . tests/imsm-grow-template 0 0 21 | -------------------------------------------------------------------------------- /tests/12imsm-r0_3d-grow-r0_4d: -------------------------------------------------------------------------------- 1 | . tests/env-imsm-template 2 | 3 | # RAID 0 volume, 3 disks grow to RAID 0 volume, 4 disks 4 | # POSITIVE test 5 | 6 | num_disks=3 7 | device_list="$dev0 $dev1 $dev2" 8 | spare_list="$dev3" 9 | 10 | # Before: RAID 0 volume, 3 disks, 64k chunk size 11 | vol0_level=0 12 | vol0_comp_size=$((5 * 1024)) 13 | vol0_chunk=64 14 | vol0_num_comps=$num_disks 15 | vol0_offset=0 16 | 17 | # After: RAID 0 volume, 4 disks, 64k chunk size 18 | vol0_new_num_comps=$((num_disks + 1)) 19 | 20 | . tests/imsm-grow-template 0 0 21 | -------------------------------------------------------------------------------- /tests/12imsm-r5_3d-grow-r5_4d: -------------------------------------------------------------------------------- 1 | . tests/env-imsm-template 2 | 3 | # RAID 5 volume, 3 disks grow to RAID 5 volume, 4 disks 4 | # POSITIVE test 5 | 6 | num_disks=3 7 | device_list="$dev0 $dev1 $dev2" 8 | spare_list="$dev3" 9 | 10 | # Before: RAID 5 volume, 3 disks, 64k chunk size 11 | vol0_level=5 12 | vol0_comp_size=$((5 * 1024)) 13 | vol0_chunk=64 14 | vol0_num_comps=$((num_disks - 1)) 15 | vol0_offset=0 16 | 17 | # After: RAID 5 volume, 4 disks, 64k chunk size 18 | vol0_new_num_comps=$num_disks 19 | 20 | . tests/imsm-grow-template 0 0 21 | -------------------------------------------------------------------------------- /tests/12imsm-r5_3d-grow-r5_5d: -------------------------------------------------------------------------------- 1 | . tests/env-imsm-template 2 | 3 | # RAID 5 volume, 3 disks grow to RAID 5 volume, 5 disks 4 | # POSITIVE test 5 | 6 | num_disks=3 7 | device_list="$dev0 $dev1 $dev2" 8 | spare_list="$dev3 $dev4" 9 | 10 | # Before: RAID 5 volume, 3 disks, 64k chunk size 11 | vol0_level=5 12 | vol0_comp_size=$((5 * 1024)) 13 | vol0_chunk=64 14 | vol0_num_comps=$((num_disks - 1)) 15 | vol0_offset=0 16 | 17 | # After: RAID 5 volume, 5 disks, 64k chunk size 18 | vol0_new_num_comps=$((num_disks + 1)) 19 | 20 | . tests/imsm-grow-template 0 0 21 | -------------------------------------------------------------------------------- /tests/13imsm-r0_r0_2d-grow-r0_r0_4d: -------------------------------------------------------------------------------- 1 | . tests/env-imsm-template 2 | 3 | # Grow the container (arrays inside) from 2 disks to 4 disks 4 | # POSITIVE test 5 | 6 | num_disks=2 7 | device_list="$dev0 $dev1" 8 | spare_list="$dev2 $dev3" 9 | 10 | # Before: RAID 0 volume in slot #0, 2 disks, 128k chunk size 11 | # RAID 0 volume in slot #1, 2 disks, 64k chunk size 12 | vol0_level=0 13 | vol0_comp_size=$((5 * 1024)) 14 | vol0_chunk=128 15 | vol0_num_comps=$num_disks 16 | vol0_offset=0 17 | 18 | vol1_level=0 19 | vol1_comp_size=$((5 * 1024)) 20 | vol1_chunk=64 21 | vol1_num_comps=$num_disks 22 | vol1_offset=$((vol0_comp_size + 4096)) 23 | 24 | # After: RAID 0 volume in slot #0, 4 disks, 128k chunk size 25 | # RAID 0 volume in slot #1, 4 disks, 64k chunk size 26 | vol0_new_num_comps=$((num_disks + 2)) 27 | vol1_new_num_comps=$vol0_new_num_comps 28 | 29 | . tests/imsm-grow-template 0 0 30 | -------------------------------------------------------------------------------- /tests/13imsm-r0_r0_2d-grow-r0_r0_5d: -------------------------------------------------------------------------------- 1 | . tests/env-imsm-template 2 | 3 | # Grow both members from 2 disks to 5 disks 4 | # POSITIVE test 5 | 6 | num_disks=2 7 | device_list="$dev0 $dev1" 8 | spare_list="$dev2 $dev3 $dev4" 9 | 10 | # Before: RAID 0 volume in slot #0, 2 disks, 64k chunk size 11 | # RAID 0 volume in slot #1, 2 disks, 256k chunk size 12 | vol0_level=0 13 | vol0_comp_size=$((4 * 1024)) 14 | vol0_chunk=64 15 | vol0_num_comps=$num_disks 16 | vol0_offset=0 17 | 18 | vol1_level=0 19 | vol1_comp_size=$((6 * 1024)) 20 | vol1_chunk=256 21 | vol1_num_comps=$num_disks 22 | vol1_offset=$((vol0_comp_size + 4096)) 23 | 24 | # After: RAID 0 volume in slot #0, 5 disks, 64k chunk size 25 | # RAID 0 volume in slot #1, 5 disks, 256k chunk size 26 | vol0_new_num_comps=$((num_disks + 3)) 27 | vol1_new_num_comps=$vol0_new_num_comps 28 | 29 | . tests/imsm-grow-template 0 0 30 | -------------------------------------------------------------------------------- /tests/13imsm-r0_r0_3d-grow-r0_r0_4d: -------------------------------------------------------------------------------- 1 | . tests/env-imsm-template 2 | 3 | # Grow a container (arrays inside) from 3 disks to 4 disks 4 | # POSITIVE test 5 | 6 | num_disks=3 7 | device_list="$dev0 $dev1 $dev2" 8 | spare_list="$dev3" 9 | 10 | # Before: RAID 0 volume in slot #0, 3 disks, 128k chunk size 11 | # RAID 0 volume in slot #1, 3 disks, 512k chunk size 12 | vol0_level=0 13 | vol0_comp_size=$((5 * 1024)) 14 | vol0_chunk=128 15 | vol0_num_comps=$num_disks 16 | vol0_offset=0 17 | 18 | vol1_level=0 19 | vol1_comp_size=$((5 * 1024)) 20 | vol1_chunk=128 21 | vol1_num_comps=$num_disks 22 | vol1_offset=$((vol0_comp_size + 4096)) 23 | 24 | # After: RAID0 volume in slot #0, 4 disks, 128k chunk size 25 | # RAID0 volume in slot #1, 4 disks, 512k chunk size 26 | vol0_new_num_comps=$((num_disks + 1)) 27 | vol1_new_num_comps=$vol0_new_num_comps 28 | 29 | . tests/imsm-grow-template 0 0 30 | -------------------------------------------------------------------------------- /tests/13imsm-r0_r5_3d-grow-r0_r5_4d: -------------------------------------------------------------------------------- 1 | . tests/env-imsm-template 2 | 3 | # Grow the container (arrays inside) from 3 disks to 4 disks 4 | # POSITIVE test 5 | 6 | num_disks=3 7 | device_list="$dev0 $dev1 $dev2" 8 | spare_list="$dev3" 9 | 10 | # Before: RAID 0 volume in slot #0, 3 disks, 64k chunk size 11 | # RAID 5 volume in slot #1, 3 disks, 128k chunk size 12 | vol0_level=0 13 | vol0_comp_size=$((5 * 1024)) 14 | vol0_chunk=64 15 | vol0_num_comps=$num_disks 16 | vol0_offset=0 17 | 18 | vol1_level=5 19 | vol1_comp_size=$((5 * 1024)) 20 | vol1_chunk=128 21 | vol1_num_comps=$((num_disks - 1)) 22 | vol1_offset=$((vol0_comp_size + 4096)) 23 | 24 | # After: RAID 0 volume in slot #0, 4 disks, 64k chunk size 25 | # RAID 5 volume in slot #1, 4 disks, 128k chunk size 26 | vol1_new_num_comps=$num_disks 27 | vol0_new_num_comps=$((num_disks + 1)) 28 | 29 | . tests/imsm-grow-template 0 0 30 | -------------------------------------------------------------------------------- /tests/13imsm-r0_r5_3d-grow-r0_r5_5d: -------------------------------------------------------------------------------- 1 | . tests/env-imsm-template 2 | 3 | # Grow the container (arrays inside) from 3 disks to 5 disks 4 | # POSITIVE test 5 | 6 | num_disks=3 7 | device_list="$dev0 $dev1 $dev2" 8 | spare_list="$dev3 $dev4" 9 | 10 | # Before: RAID 0 volume in slot #0, 3 disks, 256k chunk size 11 | # RAID 5 volume in slot #1, 3 disks, 512k chunk size 12 | vol0_level=0 13 | vol0_comp_size=$((5 * 1024)) 14 | vol0_chunk=128 15 | vol0_num_comps=$num_disks 16 | vol0_offset=0 17 | 18 | vol1_level=5 19 | vol1_comp_size=$((5 * 1024)) 20 | vol1_chunk=128 21 | vol1_num_comps=$((num_disks - 1)) 22 | vol1_offset=$((vol0_comp_size + 4096)) 23 | 24 | # After: RAID 0 volume in slot #0, 5 disks, 256k chunk size 25 | # RAID 5 volume in slot #1, 5 disks, 512k chunk size 26 | vol0_new_num_comps=$((num_disks + 2)) 27 | vol1_new_num_comps=$((num_disks + 1)) 28 | 29 | . tests/imsm-grow-template 0 0 30 | -------------------------------------------------------------------------------- /tests/13imsm-r5_r0_3d-grow-r5_r0_4d: -------------------------------------------------------------------------------- 1 | . tests/env-imsm-template 2 | 3 | # Grow the container (arrays inside) from 3 disks to 4 disks 4 | # POSITIVE test 5 | 6 | num_disks=3 7 | device_list="$dev0 $dev1 $dev2" 8 | spare_list="$dev3" 9 | 10 | # Before: RAID 5 volume in slot #0, 3 disks, 64k chunk size 11 | # RAID 0 volume in slot #1, 3 disks, 64k chunk size 12 | vol0_level=5 13 | vol0_comp_size=$((5 * 1024)) 14 | vol0_chunk=64 15 | vol0_num_comps=$((num_disks - 1)) 16 | vol0_offset=0 17 | 18 | vol1_level=0 19 | vol1_comp_size=$((5 * 1024)) 20 | vol1_chunk=64 21 | vol1_offset=$((vol0_comp_size + 4096)) 22 | vol1_num_comps=$num_disks 23 | 24 | # After: RAID 5 volume in slot #0, 4 disks, 64k chunk size 25 | # RAID 0 volume in slot #1, 4 disks, 64k chunk size 26 | vol0_new_num_comps=$num_disks 27 | vol1_new_num_comps=$((num_disks + 1)) 28 | 29 | . tests/imsm-grow-template 0 0 30 | -------------------------------------------------------------------------------- /tests/13imsm-r5_r0_3d-grow-r5_r0_5d: -------------------------------------------------------------------------------- 1 | . tests/env-imsm-template 2 | 3 | # Grow the container (arrays inside) from 3 disks to 5 disks 4 | # POSITIVE test 5 | 6 | num_disks=3 7 | device_list="$dev0 $dev1 $dev2" 8 | spare_list="$dev3 $dev4" 9 | 10 | # Before: RAID 5 volume in slot #0, 3 disks, 128k chunk size 11 | # RAID 0 volume in slot #1, 3 disks, 256k chunk size 12 | vol0_level=5 13 | vol0_comp_size=$((5 * 1024)) 14 | vol0_chunk=64 15 | vol0_num_comps=$((num_disks - 1)) 16 | vol0_offset=0 17 | 18 | vol1_level=0 19 | vol1_comp_size=$((5 * 1024)) 20 | vol1_chunk=64 21 | vol1_offset=$((vol0_comp_size + 4096)) 22 | vol1_num_comps=$num_disks 23 | 24 | # After: RAID 5 volume in slot #0, 5 disks, 128k chunk size 25 | # RAID 0 volume in slot #1, 5 disks, 256k chunk size 26 | vol0_new_num_comps=$((num_disks + 1)) 27 | vol1_new_num_comps=$((num_disks + 2)) 28 | 29 | . tests/imsm-grow-template 0 0 30 | -------------------------------------------------------------------------------- /tests/14imsm-r0_3d-r5_3d-migrate-r5_4d-r5_4d: -------------------------------------------------------------------------------- 1 | . tests/env-imsm-template 2 | 3 | # RAID 0 and RAID 5 volumes (3 disks) migrate to RAID 5 and RAID 5 volumes (4 disks) 4 | # NEGATIVE test - migration is not allowed if there is more then one array in a container 5 | 6 | num_disks=3 7 | device_list="$dev0 $dev1 $dev2" 8 | spare_list="$dev3" 9 | 10 | # Before: RAID 0 volume, 3 disks, 64k chunk size, as member #0 11 | vol0_level=0 12 | vol0_comp_size=$((5 * 1024)) 13 | vol0_chunk=64 14 | vol0_num_comps=$num_disks 15 | vol0_offset=0 16 | 17 | # Extra: RAID 5 volume, 3 disks, 64k chunk size, as member #1 18 | vol1_level=5 19 | vol1_comp_size=$((5 * 1024)) 20 | vol1_chunk=64 21 | vol1_num_comps=$((num_disks - 1)) 22 | vol1_offset=$((vol0_comp_size + 4096)) 23 | 24 | # After: RAID 5 volume, 4 disks, 64k chunk size (only member #0) 25 | vol0_new_level=5 26 | vol0_new_num_comps=$num_disks 27 | vol0_new_chunk=64 28 | 29 | . tests/imsm-grow-template 1 1 30 | -------------------------------------------------------------------------------- /tests/14imsm-r0_3d_no_spares-migrate-r5_3d: -------------------------------------------------------------------------------- 1 | . tests/env-imsm-template 2 | 3 | # RAID 0 volume (3 disks, no spares) migrate to RAID 5 volume (3 disks) 4 | # NEGATIVE test 5 | 6 | num_disks=3 7 | device_list="$dev0 $dev1 $dev2" 8 | 9 | # Before: RAID 0 volume, 3 disks, 64k chunk size 10 | vol0_level=0 11 | vol0_comp_size=$((5 * 1024)) 12 | vol0_chunk=64 13 | vol0_num_comps=$num_disks 14 | vol0_offset=0 15 | 16 | # After: RAID 5, 3 disks, 64k chunk size 17 | vol0_new_level=5 18 | vol0_new_num_comps=$((num_disks - 1)) 19 | vol0_new_chunk=64 20 | 21 | . tests/imsm-grow-template 1 22 | -------------------------------------------------------------------------------- /tests/14imsm-r0_r0_2d-takeover-r10_4d: -------------------------------------------------------------------------------- 1 | . tests/env-imsm-template 2 | 3 | 4 | # Two RAID 0 volumes (2 disks) migrate to RAID 10 volume (4 disks) 5 | # NEGATIVE test 6 | 7 | num_disks=2 8 | device_list="$dev0 $dev1" 9 | 10 | # Before: RAID 0 volume in slot #0, 2 disks, 64k chunk size 11 | # RAID 0 volume in slot #1, 2 disks, 64k chunk size 12 | vol0_level=0 13 | vol0_comp_size=$((5 * 1024)) 14 | vol0_chunk=64 15 | vol0_num_comps=$num_disks 16 | vol0_offset=0 17 | 18 | # Before: RAID 0 volume, disks, 64k chunk size 19 | vol1_level=0 20 | vol1_comp_size=$((5 * 1024)) 21 | vol1_chunk=64 22 | vol1_num_comps=num_disks 23 | vol1_offset=$(( $vol0_comp_size + 4096 )) 24 | 25 | # After: RAID 10, 4 disks, 64k chunk size 26 | vol0_new_level=10 27 | vol0_new_num_comps=$((num_disks - 1)) 28 | vol0_new_chunk=64 29 | 30 | . tests/imsm-grow-template 1 1 31 | -------------------------------------------------------------------------------- /tests/14imsm-r10_4d-grow-r10_5d: -------------------------------------------------------------------------------- 1 | . tests/env-imsm-template 2 | 3 | # RAID 10 volume, 4 disks grow to RAID 10 volume, 5 disks 4 | # NEGATIVE test 5 | 6 | num_disks=4 7 | device_list="$dev0 $dev1 $dev2 $dev3" 8 | spare_list="$dev4" 9 | 10 | # Before: RAID 10 volume, 4 disks, 128k chunk size 11 | vol0_level=10 12 | vol0_comp_size=$((5 * 1024)) 13 | vol0_chunk=128 14 | vol0_num_comps=$((num_disks - 2)) 15 | vol0_offset=0 16 | 17 | # After: RAID 10 volume, 5 disks, 128k chunks size (test should fail) 18 | vol0_new_num_comps=$((num_disks + 1)) 19 | 20 | . tests/imsm-grow-template 1 0 21 | -------------------------------------------------------------------------------- /tests/14imsm-r10_r5_4d-takeover-r0_2d: -------------------------------------------------------------------------------- 1 | . tests/env-imsm-template 2 | 3 | 4 | # Two RAID volumes: RAID10 and RAID5 (4 disks) migrate to RAID 0 volume (2 disks) 5 | # NEGATIVE test 6 | 7 | num_disks=4 8 | device_list="$dev0 $dev1 $dev2 $dev3" 9 | 10 | # Before: RAID 10 volume in slot #0, 4 disks, 64k chunk size 11 | # RAID 5 volume in slot #1, 4 disks, 64k chunk size 12 | vol0_level=10 13 | vol0_comp_size=$((5 * 1024)) 14 | vol0_chunk=64 15 | vol0_num_comps=$(( $num_disks - 2 )) 16 | vol0_offset=0 17 | 18 | # Before: RAID 0 volume, disks, 64k chunk size 19 | vol1_level=5 20 | vol1_comp_size=$((5 * 1024)) 21 | vol1_chunk=64 22 | vol1_num_comps=$(( $num_disks - 1 )) 23 | vol1_offset=$(( $vol0_comp_size + 4096 )) 24 | 25 | # After: RAID 10, 4 disks, 64k chunk size 26 | vol0_new_level=0 27 | vol0_new_num_comps=2 28 | vol0_new_chunk=64 29 | 30 | . tests/imsm-grow-template 1 1 31 | -------------------------------------------------------------------------------- /tests/14imsm-r1_2d-grow-r1_3d: -------------------------------------------------------------------------------- 1 | . tests/env-imsm-template 2 | 3 | # RAID 1 volume, 2 disks grow to RAID 1 volume, 3 disks 4 | # NEGATIVE test 5 | 6 | num_disks=2 7 | device_list="$dev0 $dev1" 8 | spare_list="$dev4" 9 | 10 | # Before: RAID 1 volume, 2 disks, 64k chunk size 11 | vol0_level=1 12 | vol0_comp_size=$((5 * 1024)) 13 | vol0_num_comps=$((num_disks - 1)) 14 | vol0_offset=0 15 | 16 | # After: RAID 1 volume, 3 disks, 64k chunks size (test should fail) 17 | vol0_new_num_comps=$num_disks 18 | 19 | . tests/imsm-grow-template 1 0 20 | -------------------------------------------------------------------------------- /tests/14imsm-r1_2d-grow-r1_3d.broken: -------------------------------------------------------------------------------- 1 | always fails 2 | 3 | Fails with error: 4 | 5 | mdadm/tests/func.sh: line 325: dvsize/chunk: division by 0 (error token is "chunk") 6 | -------------------------------------------------------------------------------- /tests/14imsm-r1_2d-takeover-r0_2d: -------------------------------------------------------------------------------- 1 | . tests/env-imsm-template 2 | 3 | # RAID 1 volume, 2 disks change to RAID 0 volume, 2 disks 4 | # 5 | #NEGATIVE test 6 | 7 | num_disks=2 8 | device_list="$dev0 $dev1" 9 | 10 | # Before: RAID 1 volume, 2 disks, 64k chunk size 11 | vol0_level=1 12 | vol0_comp_size=$((5 * 1024)) 13 | vol0_num_comps=$((num_disks - 1)) 14 | vol0_offset=0 15 | 16 | # After: RAID 0 volume, 2 disks, 64k chunk size 17 | vol0_new_level=0 18 | vol0_new_num_comps=$num_disks 19 | vol0_new_chunk=64 20 | 21 | . tests/imsm-grow-template 1 22 | -------------------------------------------------------------------------------- /tests/14imsm-r1_2d-takeover-r0_2d.broken: -------------------------------------------------------------------------------- 1 | always fails 2 | 3 | Fails with error: 4 | 5 | tests/func.sh: line 325: dvsize/chunk: division by 0 (error token 6 | is "chunk") 7 | -------------------------------------------------------------------------------- /tests/14imsm-r5_3d-grow-r5_5d-no-spares: -------------------------------------------------------------------------------- 1 | . tests/env-imsm-template 2 | 3 | # RAID 5 volume, 3 disks grow to RAID 5 volume, 4 disks 4 | # NEGATIVE test 5 | 6 | num_disks=3 7 | device_list="$dev0 $dev1 $dev2" 8 | 9 | # Before: RAID 5 volume, 3 disks, 64k chunk size 10 | vol0_level=5 11 | vol0_comp_size=$((5 * 1024)) 12 | vol0_chunk=64 13 | vol0_num_comps=$((num_disks - 1)) 14 | vol0_offset=0 15 | 16 | # After: RAID 5 volume, 4 disks, 64k chunks size 17 | add_to_num_disks=2 18 | vol0_new_num_comps=$((num_disks + 2)) 19 | 20 | . tests/imsm-grow-template 1 0 21 | -------------------------------------------------------------------------------- /tests/14imsm-r5_3d-migrate-r4_3d: -------------------------------------------------------------------------------- 1 | . tests/env-imsm-template 2 | 3 | # RAID 5 volume (3 disks) migrate to RAID 4 volume (3 disks) 4 | # NEGATIVE test 5 | 6 | num_disks=3 7 | device_list="$dev0 $dev1 $dev2" 8 | 9 | # Before: RAID 5 volume, 3 disks, 64k chunk size 10 | vol0_level=5 11 | vol0_comp_size=$((5 * 1024)) 12 | vol0_chunk=64 13 | vol0_num_comps=$((num_disks - 1)) 14 | vol0_offset=0 15 | 16 | # After: RAID 4, 3 disks, 64k chunk size 17 | vol0_new_level=4 18 | vol0_new_num_comps=$((num_disks - 1)) 19 | vol0_new_chunk=64 20 | 21 | . tests/imsm-grow-template 1 22 | -------------------------------------------------------------------------------- /tests/15imsm-r0_3d_64k-migrate-r0_3d_256k: -------------------------------------------------------------------------------- 1 | . tests/env-imsm-template 2 | 3 | # RAID 0 volume, Migration from 64k to 256k chunk size. 4 | # POSITIVE test 5 | 6 | num_disks=2 7 | device_list="$dev0 $dev1" 8 | 9 | # RAID 0, 2 disks, 64k chunk size 10 | vol0_level=0 11 | vol0_comp_size=$((5 * 1024)) 12 | vol0_chunk=64 13 | vol0_num_comps=$num_disks 14 | vol0_offset=0 15 | 16 | # RAID 0, 2 disks, 256k chunk size 17 | vol0_new_level=0 18 | vol0_new_num_comps=$vol0_num_comps 19 | vol0_new_chunk=256 20 | 21 | . tests/imsm-grow-template 0 1 22 | -------------------------------------------------------------------------------- /tests/15imsm-r5_3d_4k-migrate-r5_3d_256k: -------------------------------------------------------------------------------- 1 | . tests/env-imsm-template 2 | 3 | # RAID 5 volume, Migration from 4k to 256 chunk size. 4 | # POSITIVE test 5 | 6 | num_disks=3 7 | device_list="$dev0 $dev1 $dev2" 8 | 9 | # RAID 5, 3 disks, 4k chunk size 10 | vol0_level=5 11 | vol0_comp_size=$((5 * 1024)) 12 | vol0_chunk=4 13 | vol0_num_comps=$((num_disks - 1)) 14 | vol0_offset=0 15 | 16 | # RAID 5, 3 disks, 256k chunk size 17 | vol0_new_level=5 18 | vol0_new_num_comps=$vol0_num_comps 19 | vol0_new_chunk=256 20 | 21 | . tests/imsm-grow-template 0 1 22 | -------------------------------------------------------------------------------- /tests/15imsm-r5_3d_64k-migrate-r5_3d_256k: -------------------------------------------------------------------------------- 1 | . tests/env-imsm-template 2 | 3 | # RAID 5 volume, Migration from 64k to 256k chunk size. 4 | # POSITIVE test 5 | 6 | num_disks=3 7 | device_list="$dev0 $dev1 $dev2" 8 | 9 | # RAID 5, 3 disks, 64k chunk size 10 | vol0_level=5 11 | vol0_comp_size=$((5 * 1024)) 12 | vol0_chunk=64 13 | vol0_num_comps=$((num_disks - 1)) 14 | vol0_offset=0 15 | 16 | # RAID 5, 3 disks, 256k chunk size 17 | vol0_new_level=5 18 | vol0_new_num_comps=$vol0_num_comps 19 | vol0_new_chunk=256 20 | 21 | . tests/imsm-grow-template 0 1 22 | -------------------------------------------------------------------------------- /tests/15imsm-r5_6d_4k-migrate-r5_6d_256k: -------------------------------------------------------------------------------- 1 | . tests/env-imsm-template 2 | 3 | # RAID 5 volume, Migration from 4k to 256k chunk size. 4 | # POSITIVE test 5 | 6 | num_disks=6 7 | device_list="$dev0 $dev1 $dev2 $dev3 $dev4 $dev5" 8 | 9 | # RAID 5, 6 disks, 4k chunk size 10 | vol0_level=5 11 | vol0_comp_size=$((5 * 1024)) 12 | vol0_chunk=4 13 | vol0_num_comps=$((num_disks - 1)) 14 | vol0_offset=0 15 | 16 | # RAID 5, 6 disks, 256k chunk size 17 | vol0_new_level=5 18 | vol0_new_num_comps=$vol0_num_comps 19 | vol0_new_chunk=256 20 | 21 | . tests/imsm-grow-template 0 1 22 | -------------------------------------------------------------------------------- /tests/15imsm-r5_r0_3d_64k-migrate-r5_r0_3d_256k: -------------------------------------------------------------------------------- 1 | . tests/env-imsm-template 2 | 3 | # Member 0: RAID 5 volume, Member 1: RAID 0 volume 4 | # Migration from 64k to 256k chunk size (both members) 5 | # POSITIVE test 6 | 7 | num_disks=3 8 | device_list="$dev0 $dev1 $dev2" 9 | 10 | # RAID 5, 3 disks, 64k chunk size 11 | vol0_level=5 12 | vol0_comp_size=$((5 * 1024)) 13 | vol0_chunk=64 14 | vol0_num_comps=$((num_disks - 1)) 15 | vol0_offset=0 16 | 17 | # After migration parameters 18 | vol0_new_level=5 19 | vol0_new_num_comps=$vol0_num_comps 20 | vol0_new_chunk=256 21 | 22 | # RAID 0, 3 disks, 64k chunk size 23 | vol1_level=0 24 | vol1_comp_size=$((5 * 1024)) 25 | vol1_chunk=64 26 | vol1_num_comps=$num_disks 27 | vol1_offset=$((vol0_comp_size + 4096)) 28 | 29 | # After migration paramters 30 | vol1_new_level=0 31 | vol1_new_num_comps=$vol1_num_comps 32 | vol1_new_chunk=256 33 | 34 | . tests/imsm-grow-template 0 1 35 | -------------------------------------------------------------------------------- /tests/16imsm-r0_3d-migrate-r5_4d: -------------------------------------------------------------------------------- 1 | . tests/env-imsm-template 2 | 3 | # RAID 0 volume (3 disks) migrate to RAID 5 volume (4 disks) 4 | # POSITIVE test 5 | 6 | num_disks=3 7 | device_list="$dev0 $dev1 $dev2" 8 | 9 | # Before: RAID 0, 3 disks, 64k chunk size 10 | vol0_level=0 11 | vol0_comp_size=$((5 * 1024)) 12 | vol0_chunk=64 13 | vol0_num_comps=$num_disks 14 | vol0_offset=0 15 | 16 | # After: RAID 5, 4 disks, 64k chunk size 17 | vol0_new_level=5 18 | new_num_disks=4 19 | vol0_new_num_comps=$num_disks 20 | vol0_new_chunk=64 21 | 22 | . tests/imsm-grow-template 0 1 23 | -------------------------------------------------------------------------------- /tests/16imsm-r0_5d-migrate-r5_6d: -------------------------------------------------------------------------------- 1 | . tests/env-imsm-template 2 | 3 | # RAID 0 volume (5 disks) migrate to RAID 5 volume (6 disks) 4 | # POSITIVE test 5 | 6 | num_disks=5 7 | device_list="$dev0 $dev1 $dev2 $dev3 $dev4" 8 | 9 | # Before: RAID 0, 5 disks, 64k chunk size 10 | vol0_level=0 11 | vol0_comp_size=$((5 * 1024)) 12 | vol0_chunk=64 13 | vol0_num_comps=$num_disks 14 | vol0_offset=0 15 | 16 | # After: RAID 5, 6 disks, 64k chunk size 17 | vol0_new_level=5 18 | vol0_new_num_comps=$num_disks 19 | vol0_new_chunk=64 20 | new_num_disks=6 21 | 22 | . tests/imsm-grow-template 0 1 23 | -------------------------------------------------------------------------------- /tests/16imsm-r5_3d-migrate-r0_3d: -------------------------------------------------------------------------------- 1 | . tests/env-imsm-template 2 | 3 | # RAID 5 volume (3 disks) migrate to RAID 0 volume (2 disks) 4 | # NEGATIVE test 5 | 6 | num_disks=3 7 | device_list="$dev0 $dev1 $dev2" 8 | 9 | # Before: RAID 5, 3 disks, 64k chunk size 10 | vol0_level=5 11 | vol0_comp_size=$((5 * 1024)) 12 | vol0_chunk=64 13 | vol0_num_comps=$((num_disks - 1)) 14 | vol0_offset=0 15 | 16 | # After: RAID 0, 3 disks, 64k chunk size 17 | vol0_new_level=0 18 | vol0_new_num_comps=$((num_disks-1)) 19 | vol0_new_chunk=64 20 | 21 | . tests/imsm-grow-template 1 1 22 | -------------------------------------------------------------------------------- /tests/16imsm-r5_5d-migrate-r0_5d: -------------------------------------------------------------------------------- 1 | . tests/env-imsm-template 2 | 3 | # RAID 5 volume (5 disks) migration to RAID 0 volume (4 disks) 4 | # NEGATIVE test 5 | 6 | num_disks=5 7 | device_list="$dev0 $dev1 $dev2 $dev3 $dev4" 8 | 9 | # Before: RAID 5 volume, 5 disks, 64k chunk size 10 | vol0_level=5 11 | vol0_comp_size=$((5 * 1024)) 12 | vol0_chunk=64 13 | vol0_num_comps=$((num_disks - 1)) 14 | vol0_offset=0 15 | 16 | # After: RAID 0 volume, 5 disks, 64k chunk size 17 | vol0_new_level=0 18 | vol0_new_num_comps=$((num_disks - 1)) 19 | vol0_new_chunk=64 20 | 21 | . tests/imsm-grow-template 1 1 22 | -------------------------------------------------------------------------------- /tests/18imsm-1d-takeover-r0_1d: -------------------------------------------------------------------------------- 1 | . tests/env-imsm-template 2 | 3 | # Create RAID 0 from a single disk. 4 | # POSITIVE test 5 | 6 | vol0_num_comps=1 7 | vol0_comp_size=$((10 * 1024)) 8 | 9 | # Create container 10 | mdadm --create --run $container --metadata=imsm --force --raid-disks=$vol0_num_comps $dev0 11 | check wait 12 | imsm_check container $vol0_num_comps 13 | 14 | # Create RAID 0 volume 15 | mdadm --create --run $member0 --level=0 --size=$vol0_comp_size --chunk=64 --force --raid-disks=$vol0_num_comps $dev0 16 | check wait 17 | 18 | # Test the member 19 | imsm_check member $member0 $vol0_num_comps 0 $vol0_comp_size $((vol0_num_comps * vol0_comp_size)) 0 64 20 | testdev $member0 $vol0_num_comps $vol0_comp_size 64 21 | 22 | exit 0 23 | -------------------------------------------------------------------------------- /tests/18imsm-1d-takeover-r1_2d: -------------------------------------------------------------------------------- 1 | . tests/env-imsm-template 2 | 3 | # Create RAID 1 from a single disk 4 | # POSITIVE test 5 | 6 | vol0_num_comps=1 7 | vol0_comp_size=$((10 * 1024)) 8 | 9 | # Create container 10 | mdadm --create --run $container --auto=md --metadata=imsm --force --raid-disks=$vol0_num_comps $dev0 11 | check wait 12 | imsm_check container $vol0_num_comps 13 | 14 | # Create RAID 1 volume 15 | mdadm --create --run $member0 --auto=md --level=1 --size=$vol0_comp_size --raid-disks=$((vol0_num_comps + 1)) $dev0 missing 16 | check wait 17 | 18 | # Test the member0 19 | imsm_check member $member0 $((vol_num_comps + 1)) 1 $vol0_comp_size $((vol0_num_comps * vol0_comp_size)) 0 64 20 | testdev $member0 $vol0_num_comps $vol0_comp_size 64 21 | -------------------------------------------------------------------------------- /tests/18imsm-r0_2d-takeover-r10_4d: -------------------------------------------------------------------------------- 1 | . tests/env-imsm-template 2 | 3 | # RAID 0 volume, 2 disks change to RAID 10 volume, 4 disks 4 | # POSITIVE test 5 | 6 | num_disks=2 7 | device_list="$dev0 $dev1" 8 | spare_list="$dev2 $dev3" 9 | 10 | # Before: RAID 0 volume, 2 disks, 256k chunk size 11 | vol0_level=0 12 | vol0_comp_size=$((5 * 1024)) 13 | vol0_chunk=128 14 | vol0_num_comps=$num_disks 15 | vol0_offset=0 16 | 17 | # After: RAID 10 volume, 4 disks, 256k chunk size 18 | vol0_new_level=10 19 | vol0_new_num_comps=$vol0_num_comps 20 | vol0_new_chunk=128 21 | 22 | . tests/imsm-grow-template 0 1 23 | -------------------------------------------------------------------------------- /tests/18imsm-r10_4d-takeover-r0_2d: -------------------------------------------------------------------------------- 1 | . tests/env-imsm-template 2 | 3 | # RAID 10 volume, 4 disks change to RAID 0 volume, 2 disks 4 | # POSITIVE test 5 | 6 | num_disks=4 7 | device_list="$dev0 $dev1 $dev2 $dev3" 8 | 9 | # Before: RAID 10 volume, 4 disks, 128k chunk size 10 | vol0_level=10 11 | vol0_comp_size=$((5 * 1024)) 12 | vol0_chunk=128 13 | vol0_num_comps=$((num_disks - 2)) 14 | vol0_offset=0 15 | 16 | # After: RAID 0 volume, 2 disks, 128k chunk size 17 | vol0_new_level=0 18 | vol0_new_num_comps=2 19 | vol0_new_chunk=128 20 | new_num_disks=2 21 | 22 | . tests/imsm-grow-template 0 1 23 | -------------------------------------------------------------------------------- /tests/18imsm-r10_4d-takeover-r0_2d.broken: -------------------------------------------------------------------------------- 1 | fails rarely 2 | 3 | Fails about 1 run in 100 with message: 4 | 5 | ERROR: size is wrong for /dev/md/vol0: 2 * 5120 (chunk=128) = 20480, not 0 6 | -------------------------------------------------------------------------------- /tests/18imsm-r1_2d-takeover-r0_1d: -------------------------------------------------------------------------------- 1 | . tests/env-imsm-template 2 | 3 | # RAID 1 volume, 2 disks change to RAID 0 volume, 1 disks 4 | # POSITIVE test 5 | 6 | num_disks=2 7 | device_list="$dev0 $dev1" 8 | 9 | # Before: RAID 1 volume, 2 disks 10 | vol0_level=1 11 | vol0_comp_size=$((5 * 1024)) 12 | vol0_num_comps=$(( $num_disks - 1 )) 13 | vol0_offset=0 14 | 15 | # After: RAID 0 volume, 1 disks, 64k chunk size 16 | vol0_new_level=0 17 | vol0_new_num_comps=1 18 | vol0_new_chunk=64 19 | new_num_disks=1 20 | 21 | . tests/imsm-grow-template 0 1 22 | -------------------------------------------------------------------------------- /tests/18imsm-r1_2d-takeover-r0_1d.broken: -------------------------------------------------------------------------------- 1 | always fails 2 | 3 | Fails with error: 4 | 5 | tests/func.sh: line 325: dvsize/chunk: division by 0 (error token 6 | is "chunk") 7 | -------------------------------------------------------------------------------- /tests/19raid6auto-repair: -------------------------------------------------------------------------------- 1 | number_of_disks=5 2 | chunksize_in_kib=512 3 | chunksize_in_b=$[chunksize_in_kib*1024] 4 | array_data_size_in_kib=$[chunksize_in_kib*(number_of_disks-2)*number_of_disks] 5 | array_data_size_in_b=$[array_data_size_in_kib*1024] 6 | devs="$dev0 $dev1 $dev2 $dev3 $dev4" 7 | 8 | # default 2048 sectors 9 | data_offset_in_kib=$[2048/2] 10 | 11 | # make a raid5 from a file 12 | dd if=/dev/urandom of=/tmp/RandFile bs=1024 count=$array_data_size_in_kib 13 | 14 | # perform test for every layout 15 | layouts="ls rs la ra parity-first ddf-zero-restart ddf-N-restart ddf-N-continue \ 16 | left-asymmetric-6 right-asymmetric-6 left-symmetric-6 \ 17 | right-symmetric-6 parity-first-6" 18 | 19 | for layout in $layouts 20 | do 21 | mdadm -CR $md0 -l6 --layout=$layout -n$number_of_disks -c $chunksize_in_kib $devs 22 | dd if=/tmp/RandFile of=$md0 bs=1024 count=$array_data_size_in_kib 23 | blockdev --flushbufs $md0; sync 24 | check wait 25 | blockdev --flushbufs $devs; sync 26 | echo 3 > /proc/sys/vm/drop_caches 27 | cmp -s -n $array_data_size_in_b $md0 /tmp/RandFile || { echo sanity cmp failed ; exit 2; } 28 | 29 | # wipe out 5 chunks on each device 30 | dd if=/dev/urandom of=$dev0 bs=1024 count=$[5*chunksize_in_kib] seek=$[data_offset_in_kib+chunksize_in_kib*0] 31 | dd if=/dev/urandom of=$dev1 bs=1024 count=$[5*chunksize_in_kib] seek=$[data_offset_in_kib+chunksize_in_kib*5] 32 | dd if=/dev/urandom of=$dev2 bs=1024 count=$[5*chunksize_in_kib] seek=$[data_offset_in_kib+chunksize_in_kib*10] 33 | dd if=/dev/urandom of=$dev3 bs=1024 count=$[5*chunksize_in_kib] seek=$[data_offset_in_kib+chunksize_in_kib*15] 34 | dd if=/dev/urandom of=$dev4 bs=1024 count=$[5*chunksize_in_kib] seek=$[data_offset_in_kib+chunksize_in_kib*20] 35 | 36 | blockdev --flushbufs $devs; sync 37 | echo 3 > /proc/sys/vm/drop_caches 38 | 39 | $dir/raid6check $md0 0 0 2>&1 | grep -qs "Error" || { echo should detect errors; exit 2; } 40 | 41 | $dir/raid6check $md0 0 0 autorepair > /dev/null || { echo repair failed; exit 2; } 42 | blockdev --flushbufs $md0 $devs; sync 43 | echo 3 > /proc/sys/vm/drop_caches 44 | 45 | $dir/raid6check $md0 0 0 2>&1 | grep -qs "Error" && { echo errors detected; exit 2; } 46 | cmp -s -n $array_data_size_in_b $md0 /tmp/RandFile || { echo cmp failed ; exit 2; } 47 | 48 | mdadm -S $md0 49 | done 50 | -------------------------------------------------------------------------------- /tests/19raid6auto-repair.broken: -------------------------------------------------------------------------------- 1 | always fails 2 | 3 | Fails with: 4 | 5 | "should detect errors" 6 | -------------------------------------------------------------------------------- /tests/19raid6check: -------------------------------------------------------------------------------- 1 | # 2 | # Confirm that raid6check handles all RAID6 layouts. 3 | # Try both 4 and 5 devices. 4 | 5 | layouts='ls rs la ra' 6 | lv=`uname -r` 7 | if expr $lv '>=' 2.6.30 > /dev/null 8 | then 9 | layouts="$layouts parity-first ddf-zero-restart ddf-N-restart ddf-N-continue \ 10 | left-asymmetric-6 right-asymmetric-6 left-symmetric-6 right-symmetric-6 parity-first-6" 11 | fi 12 | 13 | for layout in $layouts 14 | do 15 | for devs in 4 5 16 | do 17 | dl="$dev0 $dev1 $dev2 $dev3" 18 | if [ $devs = 5 ]; then dl="$dl $dev4"; fi 19 | 20 | mdadm -CR $md0 -l6 --layout $layout -n$devs $dl 21 | check wait 22 | tar cf - /etc > $md0 23 | ./raid6check $md0 0 0 | grep 'Error detected' && exit 1 24 | mdadm -S $md0 25 | done 26 | done 27 | 28 | -------------------------------------------------------------------------------- /tests/19raid6repair: -------------------------------------------------------------------------------- 1 | number_of_disks=4 2 | chunksize_in_kib=512 3 | chunksize_in_b=$[chunksize_in_kib*1024] 4 | array_data_size_in_kib=$[chunksize_in_kib*(number_of_disks-2)*number_of_disks] 5 | array_data_size_in_b=$[array_data_size_in_kib*1024] 6 | devs="$dev1 $dev2 $dev3 $dev4" 7 | 8 | # default 2048 sectors 9 | data_offset_in_kib=$[2048/2] 10 | 11 | layouts="ls rs la ra parity-first ddf-zero-restart ddf-N-restart ddf-N-continue \ 12 | left-asymmetric-6 right-asymmetric-6 left-symmetric-6 \ 13 | right-symmetric-6 parity-first-6" 14 | 15 | for layout in $layouts 16 | do 17 | for failure in "$dev3 3 3 2" "$dev3 3 2 3" "$dev3 3 2 1" "$dev3 3 2 0" \ 18 | "$dev4 3 3 0" "$dev4 3 3 1" "$dev4 3 3 2" \ 19 | "$dev1 3 0 1" "$dev1 3 0 2" "$dev1 3 0 3" \ 20 | "$dev2 3 1 0" "$dev2 3 1 2" "$dev2 3 1 3" ; do 21 | failure_split=( $failure ) 22 | device_with_error=${failure_split[0]} 23 | stripe_with_error=${failure_split[1]} 24 | repair_params="$stripe_with_error ${failure_split[2]} ${failure_split[3]}" 25 | start_of_errors_in_kib=$[data_offset_in_kib+chunksize_in_kib*stripe_with_error] 26 | 27 | # make a raid5 from a file 28 | dd if=/dev/urandom of=/tmp/RandFile bs=1024 count=$array_data_size_in_kib 29 | mdadm -CR $md0 -l6 --layout=$layout -n$number_of_disks -c $chunksize_in_kib $devs 30 | dd if=/tmp/RandFile of=$md0 bs=1024 count=$array_data_size_in_kib 31 | blockdev --flushbufs $md0; sync 32 | 33 | check wait 34 | blockdev --flushbufs $devs; sync 35 | echo 3 > /proc/sys/vm/drop_caches 36 | cmp -s -n $array_data_size_in_b $md0 /tmp/RandFile || { echo sanity cmp failed ; exit 2; } 37 | 38 | dd if=/dev/urandom of=$device_with_error bs=1024 count=$chunksize_in_kib seek=$start_of_errors_in_kib 39 | blockdev --flushbufs $device_with_error; sync 40 | echo 3 > /proc/sys/vm/drop_caches 41 | 42 | $dir/raid6check $md0 0 0 2>&1 | grep -qs "Error" || { echo should detect errors; exit 2; } 43 | 44 | $dir/raid6check $md0 repair $repair_params > /dev/null || { echo repair failed; exit 2; } 45 | blockdev --flushbufs $md0 $devs; sync 46 | echo 3 > /proc/sys/vm/drop_caches 47 | 48 | $dir/raid6check $md0 0 0 2>&1 | grep -qs "Error" && { echo errors detected; exit 2; } 49 | cmp -s -n $array_data_size_in_b $md0 /tmp/RandFile || { echo cmp failed ; exit 2; } 50 | 51 | mdadm -S $md0 52 | udevadm settle 53 | sync 54 | echo 3 > /proc/sys/vm/drop_caches 55 | done 56 | done 57 | -------------------------------------------------------------------------------- /tests/19raid6repair.broken: -------------------------------------------------------------------------------- 1 | always fails 2 | 3 | Fails with: 4 | 5 | "should detect errors" 6 | -------------------------------------------------------------------------------- /tests/19repair-does-not-destroy: -------------------------------------------------------------------------------- 1 | number_of_disks=7 2 | chunksize_in_kib=512 3 | array_data_size_in_kib=$[chunksize_in_kib*(number_of_disks-2)*number_of_disks] 4 | array_data_size_in_b=$[array_data_size_in_kib*1024] 5 | devs="$dev0 $dev1 $dev2 $dev3 $dev4 $dev5 $dev6" 6 | 7 | dd if=/dev/urandom of=/tmp/RandFile bs=1024 count=$array_data_size_in_kib 8 | mdadm -CR $md0 -l6 -n$number_of_disks -c $chunksize_in_kib $devs 9 | dd if=/tmp/RandFile of=$md0 bs=1024 count=$array_data_size_in_kib 10 | blockdev --flushbufs $md0; sync 11 | check wait 12 | blockdev --flushbufs $devs; sync 13 | echo 3 > /proc/sys/vm/drop_caches 14 | $dir/raid6check $md0 repair 1 2 3 > /dev/null # D D 15 | $dir/raid6check $md0 repair 8 2 5 > /dev/null # D P 16 | $dir/raid6check $md0 repair 15 4 6 > /dev/null # D Q 17 | $dir/raid6check $md0 repair 22 5 6 > /dev/null # P Q 18 | $dir/raid6check $md0 repair 3 4 0 > /dev/null # Q D 19 | $dir/raid6check $md0 repair 3 3 1 > /dev/null # P D 20 | $dir/raid6check $md0 repair 6 4 5 > /dev/null # D /dev/null # D>D 22 | blockdev --flushbufs $devs; sync 23 | echo 3 > /proc/sys/vm/drop_caches 24 | $dir/raid6check $md0 0 0 2>&1 | grep -qs "Error" && { echo errors detected; exit 2; } 25 | cmp -s -n $array_data_size_in_b $md0 /tmp/RandFile || { echo should not mess up correct stripe ; exit 2; } 26 | 27 | mdadm -S $md0 28 | udevadm settle 29 | -------------------------------------------------------------------------------- /tests/20raid5journal: -------------------------------------------------------------------------------- 1 | # check write journal of raid456 2 | 3 | # test --detail 4 | test_detail_shows_journal() { 5 | mdadm -D $1 | grep journal || { 6 | echo >&2 "ERROR --detail does show journal device!"; mdadm -D $1 ; exit 1; } 7 | } 8 | 9 | # test --examine 10 | test_examine_shows_journal() { 11 | mdadm -E $1 | grep Journal || { 12 | echo >&2 "ERROR --examine does show Journal device!"; mdadm -E $1 ; exit 1; } 13 | } 14 | 15 | # test --create 16 | create_with_journal_and_stop() { 17 | mdadm -CR $md0 -l5 -n4 $dev0 $dev1 $dev2 $dev3 --write-journal $dev4 18 | check wait 19 | tar cf - /etc > $md0 20 | ./raid6check $md0 0 0 | grep 'Error detected' && exit 1 21 | test_detail_shows_journal $md0 22 | test_examine_shows_journal $dev4 23 | mdadm -S $md0 24 | } 25 | 26 | # test --assemble 27 | test_assemble() { 28 | create_with_journal_and_stop 29 | if mdadm -A $md0 $dev0 $dev1 $dev2 $dev3 30 | then 31 | echo >&2 "ERROR should return 1 when journal is missing!"; cat /proc/mdstat ; exit 1; 32 | fi 33 | mdadm -S $md0 34 | 35 | mdadm -A $md0 $dev0 $dev1 $dev2 $dev3 --force 36 | check readonly 37 | mdadm -S $md0 38 | } 39 | 40 | # test --incremental 41 | test_incremental() { 42 | create_with_journal_and_stop 43 | for d in $dev0 $dev1 $dev2 $dev3 44 | do 45 | mdadm -I $d 46 | done 47 | check inactive 48 | mdadm -I $dev4 49 | check raid5 50 | mdadm -S $md0 51 | 52 | # test --incremental with journal missing 53 | for d in $dev0 $dev1 $dev2 $dev3 54 | do 55 | mdadm -I $d 56 | done 57 | mdadm -R $md0 58 | check readonly 59 | mdadm -S $md0 60 | } 61 | 62 | create_with_journal_and_stop 63 | test_assemble 64 | test_incremental 65 | -------------------------------------------------------------------------------- /tests/20raid5journal.broken: -------------------------------------------------------------------------------- 1 | always fail 2 | 3 | ++ /usr/sbin/mdadm -I /dev/loop4 4 | ++ rv=0 5 | ++ case $* in 6 | ++ cat /var/tmp/stderr 7 | mdadm: /dev/loop4 attached to /dev/md/0_0, which has been started. 8 | ++ return 0 9 | ++ check raid5 10 | ++ case $1 in 11 | ++ grep -sq 'active raid5 ' /proc/mdstat 12 | ++ die 'active raid5 not found' 13 | ++ echo -e '\n\tERROR: active raid5 not found \n' 14 | 15 | ERROR: active raid5 not found 16 | 17 | ++ save_log fail 18 | -------------------------------------------------------------------------------- /tests/21raid5cache: -------------------------------------------------------------------------------- 1 | # check data integrity with raid5 write back cache 2 | 3 | # create a 4kB random file and 4 files each with a 1kB chunk of the random file: 4 | # randfile: ABCD randchunk[0-3]: A B C D 5 | # 6 | # then create another random 1kB chunk E, and a new random page with A, B, E, D: 7 | # randchunk4: E newrandfile: ABED 8 | create_random_data() { 9 | dd if=/dev/urandom of=/tmp/randfile bs=4k count=1 10 | for x in {0..3} 11 | do 12 | dd if=/tmp/randfile of=/tmp/randchunk$x bs=1k count=1 skip=$x count=1 13 | done 14 | 15 | dd if=/dev/urandom of=/tmp/randchunk4 bs=1k count=1 16 | 17 | rm /tmp/newrandfile 18 | for x in 0 1 4 3 19 | do 20 | cat /tmp/randchunk$x >> /tmp/newrandfile 21 | done 22 | } 23 | 24 | # create array, $1 could be 5 for raid5 and 6 for raid6 25 | create_array() { 26 | if [ $1 -lt 5 -o $1 -gt 6 ] 27 | then 28 | echo wrong array type $1 29 | exit 2 30 | fi 31 | 32 | mdadm -CR $md0 -c4 -l5 -n10 $dev0 $dev1 $dev2 $dev3 $dev4 $dev5 $dev6 $dev11 $dev8 $dev9 --write-journal $dev10 33 | check wait 34 | echo write-back > /sys/block/md0/md/journal_mode 35 | } 36 | 37 | restart_array_write_back() { 38 | mdadm -S $md0 39 | mdadm -A $md0 $dev0 $dev1 $dev2 $dev3 $dev4 $dev5 $dev6 $dev11 $dev8 $dev9 $dev10 40 | echo write-back > /sys/block/md0/md/journal_mode 41 | } 42 | 43 | # compare the first page of md0 with file in $1 44 | cmp_first_page() { 45 | cmp -n 4096 $1 $md0 || { echo cmp failed ; exit 2 ; } 46 | } 47 | 48 | # write 3 pages after the first page of md0 49 | write_three_pages() { 50 | for x in {1..3} 51 | do 52 | dd if=/dev/urandom of=$md0 bs=4k count=1 seek=$x count=1 53 | done 54 | } 55 | 56 | # run_test 57 | run_test() { 58 | create_random_data 59 | create_array $1 60 | 61 | if [ $2 == yes ] 62 | then 63 | mdadm --fail $md0 $dev0 64 | fi 65 | 66 | dd if=/tmp/randfile of=$md0 bs=4k count=1 67 | restart_array_write_back 68 | cmp_first_page /tmp/randfile 69 | restart_array_write_back 70 | write_three_pages 71 | cmp_first_page /tmp/randfile 72 | 73 | 74 | dd if=/tmp/randchunk4 of=/dev/md0 bs=1k count=1 seek=2 75 | restart_array_write_back 76 | cmp_first_page /tmp/newrandfile 77 | restart_array_write_back 78 | write_three_pages 79 | cmp_first_page /tmp/newrandfile 80 | 81 | mdadm -S $md0 82 | } 83 | 84 | run_test 5 no 85 | run_test 5 yes 86 | run_test 6 no 87 | run_test 6 yes 88 | -------------------------------------------------------------------------------- /tests/23rdev-lifetime: -------------------------------------------------------------------------------- 1 | devname=${dev0##*/} 2 | devt=`cat /sys/block/$devname/dev` 3 | pid="" 4 | runtime=2 5 | 6 | clean_up_test() { 7 | kill -9 $pid 8 | echo clear > /sys/block/md0/md/array_state 9 | } 10 | 11 | trap 'clean_up_test' EXIT 12 | 13 | add_by_sysfs() { 14 | while true; do 15 | echo $devt > /sys/block/md0/md/new_dev 16 | done 17 | } 18 | 19 | remove_by_sysfs(){ 20 | while true; do 21 | echo remove > /sys/block/md0/md/dev-${devname}/state 22 | done 23 | } 24 | 25 | echo md0 > /sys/module/md_mod/parameters/new_array || die "create md0 failed" 26 | 27 | add_by_sysfs & 28 | pid="$pid $!" 29 | 30 | remove_by_sysfs & 31 | pid="$pid $!" 32 | 33 | sleep $runtime 34 | exit 0 35 | -------------------------------------------------------------------------------- /tests/24raid10deadlock: -------------------------------------------------------------------------------- 1 | devs="$dev0 $dev1 $dev2 $dev3" 2 | runtime=120 3 | pid="" 4 | action_pid="" 5 | 6 | set_up_injection() 7 | { 8 | echo -1 > /sys/kernel/debug/fail_make_request/times 9 | echo 1 > /sys/kernel/debug/fail_make_request/probability 10 | echo 0 > /sys/kernel/debug/fail_make_request/verbose 11 | echo 1 > /sys/block/${1##*/}/make-it-fail 12 | } 13 | 14 | clean_up_injection() 15 | { 16 | echo 0 > /sys/block/${1##*/}/make-it-fail 17 | echo 0 > /sys/kernel/debug/fail_make_request/times 18 | echo 0 > /sys/kernel/debug/fail_make_request/probability 19 | echo 2 > /sys/kernel/debug/fail_make_request/verbose 20 | } 21 | 22 | test_rdev() 23 | { 24 | while true; do 25 | mdadm -f $md0 $1 &> /dev/null 26 | mdadm -r $md0 $1 &> /dev/null 27 | mdadm --zero-superblock $1 &> /dev/null 28 | mdadm -a $md0 $1 &> /dev/null 29 | sleep $2 30 | done 31 | } 32 | 33 | test_write_action() 34 | { 35 | while true; do 36 | echo frozen > /sys/block/md0/md/sync_action 37 | echo idle > /sys/block/md0/md/sync_action 38 | sleep 0.1 39 | done 40 | } 41 | 42 | set_up_test() 43 | { 44 | fio -h &> /dev/null || die "fio not found" 45 | 46 | # create a simple raid10 47 | mdadm -Cv -R -n 4 -l10 $md0 $devs || die "create raid10 failed" 48 | } 49 | 50 | clean_up_test() 51 | { 52 | clean_up_injection $dev0 53 | pkill -9 fio 54 | kill -9 $pid 55 | kill -9 $action_pid 56 | 57 | sleep 2 58 | 59 | if ps $action_pid | tail -1 | awk '{print $3}' | grep D; then 60 | die "thread that is writing sysfs is stuck in D state, deadlock is triggered" 61 | fi 62 | mdadm -S $md0 63 | } 64 | 65 | cat /sys/kernel/debug/fail_make_request/times || skip "fault injection is not enabled" 66 | 67 | trap 'clean_up_test' EXIT 68 | 69 | set_up_test || die "set up test failed" 70 | 71 | # backgroup io pressure 72 | fio -filename=$md0 -rw=randwrite -direct=1 -name=test -bs=4k -numjobs=16 -iodepth=16 & 73 | 74 | # trigger add/remove device by io failure 75 | set_up_injection $dev0 76 | test_rdev $dev0 2 & 77 | pid="$pid $!" 78 | 79 | # add/remove device directly 80 | test_rdev $dev3 10 & 81 | pid="$pid $!" 82 | 83 | test_write_action & 84 | action_pid="$!" 85 | 86 | sleep $runtime 87 | 88 | exit 0 89 | -------------------------------------------------------------------------------- /tests/24raid456deadlock: -------------------------------------------------------------------------------- 1 | devs="$dev0 $dev1 $dev2 $dev3 $dev4 $dev5" 2 | runtime=120 3 | pid="" 4 | old=`cat /proc/sys/vm/dirty_background_ratio` 5 | 6 | test_write_action() 7 | { 8 | while true; do 9 | echo check > /sys/block/md0/md/sync_action &> /dev/null 10 | sleep 0.1 11 | echo idle > /sys/block/md0/md/sync_action &> /dev/null 12 | done 13 | } 14 | 15 | test_write_back() 16 | { 17 | fio -filename=$md0 -bs=4k -rw=write -numjobs=1 -name=test \ 18 | -time_based -runtime=$runtime &> /dev/null 19 | } 20 | 21 | set_up_test() 22 | { 23 | fio -h &> /dev/null || die "fio not found" 24 | 25 | # create a simple raid6 26 | mdadm -Cv -R -n 6 -l6 $md0 $devs --assume-clean || die "create raid6 failed" 27 | 28 | # trigger dirty pages write back 29 | echo 0 > /proc/sys/vm/dirty_background_ratio 30 | } 31 | 32 | clean_up_test() 33 | { 34 | echo $old > /proc/sys/vm/dirty_background_ratio 35 | 36 | pkill -9 fio 37 | kill -9 $pid 38 | 39 | sleep 2 40 | 41 | if ps $pid | tail -1 | awk '{print $3}' | grep D; then 42 | die "thread that is writing sysfs is stuck in D state, deadlock is triggered" 43 | fi 44 | mdadm -S $md0 45 | } 46 | 47 | trap 'clean_up_test' EXIT 48 | 49 | set_up_test || die "set up test failed" 50 | 51 | test_write_back & 52 | 53 | test_write_action & 54 | pid="$!" 55 | 56 | sleep $runtime 57 | 58 | exit 0 59 | -------------------------------------------------------------------------------- /tests/25raid456-recovery-while-reshape: -------------------------------------------------------------------------------- 1 | devs="$dev0 $dev1 $dev2" 2 | 3 | set_up_test() 4 | { 5 | mdadm -Cv -R -n 3 -l5 $md0 $devs --assume-clean --size=10M || die "create array failed" 6 | mdadm -a $md0 $dev3 $dev4 || die "failed to bind new disk to array" 7 | echo 1000 > /sys/block/md0/md/sync_speed_max 8 | } 9 | 10 | clean_up_test() 11 | { 12 | mdadm -S $md0 13 | } 14 | 15 | trap 'clean_up_test' EXIT 16 | 17 | set_up_test || die "set up test failed" 18 | 19 | # trigger reshape 20 | mdadm --grow -l 6 $md0 21 | sleep 2 22 | 23 | # set up replacement 24 | echo frozen > /sys/block/md0/md/sync_action 25 | echo want_replacement > /sys/block/md0/md/rd0/state 26 | echo reshape > /sys/block/md0/md/sync_action 27 | sleep 2 28 | 29 | # reassemeble array 30 | mdadm -S $md0 || die "can't stop array" 31 | mdadm --assemble $md0 $devs $dev3 $dev4 || die "can't assemble array" 32 | 33 | exit 0 34 | -------------------------------------------------------------------------------- /tests/25raid456-reshape-corrupt-data: -------------------------------------------------------------------------------- 1 | devs="$dev0 $dev1 $dev2" 2 | 3 | set_up_test() 4 | { 5 | mdadm -Cv -R -n 3 -l5 $md0 $devs --size=10M || die "create array failed" 6 | mdadm -a $md0 $dev3 || die "failed to bind new disk to array" 7 | mkfs.ext4 -F $md0 || die "mkfs failed" 8 | e2fsck -pvf $md0 || die "check fs failed" 9 | } 10 | 11 | clean_up_test() 12 | { 13 | mdadm -S $md0 14 | } 15 | 16 | trap 'clean_up_test' EXIT 17 | 18 | set_up_test || die "set up test failed" 19 | 20 | # trigger reshape 21 | echo 1000 > /sys/block/md0/md/sync_speed_max 22 | mdadm --grow -l 6 $md0 23 | sleep 2 24 | 25 | # stop and start reshape 26 | echo frozen > /sys/block/md0/md/sync_action 27 | echo system > /sys/block/md0/md/sync_speed_max 28 | echo reshape > /sys/block/md0/md/sync_action 29 | 30 | mdadm -W $md0 31 | 32 | # check if data is corrupted 33 | e2fsck -vn $md0 || die "data is corrupted after reshape" 34 | 35 | exit 0 36 | -------------------------------------------------------------------------------- /tests/25raid456-reshape-deadlock: -------------------------------------------------------------------------------- 1 | devs="$dev0 $dev1 $dev2" 2 | 3 | set_up_test() 4 | { 5 | mdadm -Cv -R -n 3 -l5 $md0 $devs --size=10M || die "create array failed" 6 | mdadm -a $md0 $dev3 || die "failed to bind new disk to array" 7 | echo 1000 > /sys/block/md0/md/sync_speed_max 8 | } 9 | 10 | clean_up_test() 11 | { 12 | echo idle > /sys/block/md0/md/sync_action 13 | mdadm -S $md0 14 | } 15 | 16 | trap 'clean_up_test' EXIT 17 | 18 | set_up_test || die "set up test failed" 19 | 20 | # trigger reshape 21 | mdadm --grow -l 6 $md0 22 | sleep 2 23 | 24 | # stop reshape 25 | echo frozen > /sys/block/md0/md/sync_action 26 | 27 | # read accross reshape 28 | dd if=$md0 of=/dev/NULL bs=1m count=100 iflag=direct &> /dev/null & 29 | sleep 2 30 | 31 | # suspend array 32 | echo 1 > /sys/block/md0/md/suspend_lo 33 | 34 | exit 0 35 | -------------------------------------------------------------------------------- /tests/25raid456-reshape-while-recovery: -------------------------------------------------------------------------------- 1 | devs="$dev0 $dev1 $dev2" 2 | 3 | set_up_test() 4 | { 5 | mdadm -Cv -R -n 3 -l5 $md0 $devs --assume-clean --size=10M || die "create array failed" 6 | mdadm -a $md0 $dev3 $dev4 || die "failed to bind new disk to array" 7 | echo 1000 > /sys/block/md0/md/sync_speed_max 8 | } 9 | 10 | clean_up_test() 11 | { 12 | mdadm -S $md0 13 | } 14 | 15 | trap 'clean_up_test' EXIT 16 | 17 | set_up_test || die "set up test failed" 18 | 19 | # set up replacement 20 | echo want_replacement > /sys/block/md0/md/rd0/state 21 | sleep 2 22 | 23 | # trigger reshape 24 | echo frozen > /sys/block/md0/md/sync_action 25 | mdadm --grow -l 6 $md0 26 | sleep 2 27 | 28 | # reassemeble array 29 | mdadm -S $md0 || die "can't stop array" 30 | mdadm --assemble $md0 $devs $dev3 $dev4 || die "can't assemble array" 31 | 32 | exit 0 33 | -------------------------------------------------------------------------------- /tests/25raid456-reshape-while-recovery.broken: -------------------------------------------------------------------------------- 1 | There are multiple issues with this test: 2 | - kernel version dependent 3 | - can fail in multiple ways 4 | 5 | Marking this test as broken, so it's not executed by CI. 6 | -------------------------------------------------------------------------------- /tests/ToTest: -------------------------------------------------------------------------------- 1 | 2 | multipath!! 3 | 4 | add/remove/fail 5 | raid1 DONE 6 | raid5 DONE 7 | raid6/10 needed?? 8 | 9 | assemble 10 | by devices DONE 11 | by uuid DONE 12 | by superminor DONE 13 | by config file DONE 14 | 15 | various --updates DONE (not sparc2.2 or summaries) 16 | 17 | stop 18 | --scan 19 | 20 | readonly/readwrite 21 | 22 | bitmap 23 | separate file 24 | internal 25 | filename in config file 26 | 27 | examine 28 | --scan 29 | --brief 30 | 31 | detail 32 | 33 | grow: 34 | size 35 | raid1/5/6 DONE 36 | devices 37 | raid1 add DONE 38 | raid1 shrink DONE 39 | 40 | '--quiet' option, and remove "" 41 | '--name' option fo v1, and configfile etc... 42 | 43 | faulty 44 | errors in raid1/5/6 45 | -------------------------------------------------------------------------------- /tests/skiptests: -------------------------------------------------------------------------------- 1 | casename:seconds 2 | 01raid6integ:1732 3 | 01replace:396 4 | 07layouts:836 5 | 11spare-migration:1140 6 | 12imsm-r0_2d-grow-r0_5d:218 7 | 13imsm-r0_r0_2d-grow-r0_r0_4d:218 8 | 13imsm-r0_r0_2d-grow-r0_r0_5d:246 9 | 19raid6check:268 10 | -------------------------------------------------------------------------------- /tests/templates/names_template: -------------------------------------------------------------------------------- 1 | # NAME is optional. Testing with native 1.2 superblock. 2 | function names_create() { 3 | local DEVNAME=$1 4 | local NAME=$2 5 | local NEG_TEST=$3 6 | 7 | if [[ -z "$NAME" ]]; then 8 | mdadm -CR "$DEVNAME" -l0 -n 1 $dev0 --force 9 | else 10 | mdadm -CR "$DEVNAME" --name="$NAME" --metadata=1.2 -l0 -n 1 $dev0 --force 11 | fi 12 | 13 | if [[ "$NEG_TEST" == "true" ]]; then 14 | [[ "$?" == "0" ]] && return 0 15 | echo "Negative verification failed" 16 | exit 1 17 | fi 18 | 19 | if [[ "$?" != "0" ]]; then 20 | echo "Cannot create device." 21 | exit 1 22 | fi 23 | } 24 | 25 | # Three properties to check: 26 | # - devnode name 27 | # - link in /dev/md/ (MD_DEVNAME property from --detail --export) 28 | # - name in metadata (MD_NAME property from --detail --export)- that works only with 1.2 sb. 29 | function names_verify() { 30 | local DEVNODE_NAME="$1" 31 | local WANTED_LINK="$2" 32 | local WANTED_NAME="$3" 33 | local EXPECTED="" 34 | 35 | # We don't know what is saved in metadata, but we know what to expect. Therfore check if 36 | # expecation would be foreign (no hostname information). 37 | is_raid_foreign $WANTED_NAME 38 | 39 | local RES="$(mdadm -D --export $DEVNODE_NAME | grep MD_DEVNAME)" 40 | if [[ "$?" != "0" ]]; then 41 | echo "Cannot get details for $DEVNODE_NAME - unexpected devnode." 42 | exit 1 43 | fi 44 | 45 | if [[ "$WANTED_LINK" != "empty" ]]; then 46 | EXPECTED="MD_DEVNAME=$WANTED_LINK" 47 | 48 | if [ ! -b /dev/md/$WANTED_LINK ]; then 49 | echo "/dev/md/$WANTED_LINK doesn't exit" 50 | exit 1 51 | fi 52 | fi 53 | 54 | if [[ "$RES" != "$EXPECTED" ]]; then 55 | echo "$RES doesn't match $EXPECTED." 56 | exit 1 57 | fi 58 | 59 | local RES="$(mdadm -D --export $DEVNODE_NAME | grep MD_NAME)" 60 | if [[ "$?" != "0" ]]; then 61 | echo "Cannot get metadata from $dev0." 62 | exit 1 63 | fi 64 | 65 | if [ $is_foreign == "no" ]; then 66 | EXPECTED="MD_NAME=$(hostname):$WANTED_NAME" 67 | else 68 | EXPECTED="MD_NAME=$WANTED_NAME" 69 | fi 70 | if [[ "$RES" != "$EXPECTED" ]]; then 71 | echo "$RES doesn't match $EXPECTED." 72 | exit 1 73 | fi 74 | } 75 | 76 | # Generate ARRAYLINE for tested array. 77 | names_make_conf() { 78 | local UUID="$1" 79 | local WANTED_DEVNAME="$2" 80 | local CONF="$3" 81 | 82 | local LINE="ARRAY metadata=1.2 UUID=$UUID" 83 | 84 | if [[ "$WANTED_DEVNAME" != "empty" ]]; then 85 | LINE="$LINE $WANTED_DEVNAME" 86 | fi 87 | 88 | echo $LINE > $CONF 89 | } 90 | -------------------------------------------------------------------------------- /udev-md-clustered-confirm-device.rules: -------------------------------------------------------------------------------- 1 | # do not edit this file, it will be overwritten on update 2 | 3 | SUBSYSTEM!="block", GOTO="clustermd_end" 4 | 5 | # handle md arrays 6 | KERNEL!="md*", GOTO="clustermd_end" 7 | ENV{DEVTYPE}!="disk", GOTO="clustermd_end" 8 | ACTION!="change", GOTO="clustermd_end" 9 | ENV{EVENT}!="ADD_DEVICE", GOTO="clustermd_end" 10 | ENV{DEVICE_UUID}!="?*", GOTO="clustermd_end" 11 | ENV{RAID_DISK}!="?*", GOTO="clustermd_end" 12 | 13 | # Based on the received UUID, node confirms the device if 14 | # it is found by blkid, otherwise the node reports it is 15 | # missing. 16 | PROGRAM="BINDIR/blkid -o device -t UUID_SUB=$env{DEVICE_UUID}", ENV{.md.newdevice} = "$result" 17 | 18 | ENV{.md.newdevice}!="", RUN+="BINDIR/mdadm --manage $env{DEVNAME} --cluster-confirm $env{RAID_DISK}:$env{.md.newdevice}" 19 | ENV{.md.newdevice}=="", RUN+="BINDIR/mdadm --manage $env{DEVNAME} --cluster-confirm $env{RAID_DISK}:missing" 20 | 21 | LABEL="clustermd_end" 22 | -------------------------------------------------------------------------------- /udev-md-raid-arrays.rules: -------------------------------------------------------------------------------- 1 | # do not edit this file, it will be overwritten on update 2 | 3 | SUBSYSTEM!="block", GOTO="md_end" 4 | 5 | # handle md arrays 6 | ACTION=="remove", GOTO="md_end" 7 | KERNEL!="md*", GOTO="md_end" 8 | 9 | # partitions have no md/{array_state,metadata_version}, but should not 10 | # for that reason be ignored. 11 | ENV{DEVTYPE}=="partition", GOTO="md_ignore_state" 12 | 13 | # container devices have a metadata version of e.g. 'external:ddf' and 14 | # never leave state 'inactive' 15 | ATTR{md/metadata_version}=="external:[A-Za-z]*", ATTR{md/array_state}=="inactive", GOTO="md_ignore_state" 16 | TEST!="md/array_state", ENV{SYSTEMD_READY}="0", GOTO="md_end" 17 | ATTR{md/array_state}=="clear*|inactive", ENV{SYSTEMD_READY}="0", GOTO="md_end" 18 | LABEL="md_ignore_state" 19 | 20 | IMPORT{program}="BINDIR/mdadm --detail --no-devices --export $devnode" 21 | ENV{DEVTYPE}=="disk", ENV{MD_NAME}=="?*", SYMLINK+="disk/by-id/md-name-$env{MD_NAME}", OPTIONS+="string_escape=replace" 22 | ENV{DEVTYPE}=="disk", ENV{MD_UUID}=="?*", SYMLINK+="disk/by-id/md-uuid-$env{MD_UUID}" 23 | ENV{DEVTYPE}=="disk", ENV{MD_DEVNAME}=="?*", SYMLINK+="md/$env{MD_DEVNAME}" 24 | ENV{DEVTYPE}=="partition", ENV{MD_NAME}=="?*", SYMLINK+="disk/by-id/md-name-$env{MD_NAME}-part%n", OPTIONS+="string_escape=replace" 25 | ENV{DEVTYPE}=="partition", ENV{MD_UUID}=="?*", SYMLINK+="disk/by-id/md-uuid-$env{MD_UUID}-part%n" 26 | ENV{DEVTYPE}=="partition", ENV{MD_DEVNAME}=="*[^0-9]", SYMLINK+="md/$env{MD_DEVNAME}%n" 27 | ENV{DEVTYPE}=="partition", ENV{MD_DEVNAME}=="*[0-9]", SYMLINK+="md/$env{MD_DEVNAME}p%n" 28 | 29 | IMPORT{builtin}="blkid" 30 | OPTIONS+="link_priority=100" 31 | OPTIONS+="watch" 32 | OPTIONS+="db_persist" 33 | ENV{ID_FS_USAGE}=="filesystem|other|crypto", ENV{ID_FS_UUID_ENC}=="?*", SYMLINK+="disk/by-uuid/$env{ID_FS_UUID_ENC}" 34 | ENV{ID_FS_USAGE}=="filesystem|other", ENV{ID_PART_ENTRY_UUID}=="?*", SYMLINK+="disk/by-partuuid/$env{ID_PART_ENTRY_UUID}" 35 | ENV{ID_FS_USAGE}=="filesystem|other", ENV{ID_FS_LABEL_ENC}=="?*", SYMLINK+="disk/by-label/$env{ID_FS_LABEL_ENC}" 36 | 37 | ENV{MD_LEVEL}=="raid[1-9]*", ENV{SYSTEMD_WANTS}+="mdmonitor.service" 38 | 39 | # Tell systemd to run mdmon for our container, if we need it. 40 | ENV{MD_LEVEL}=="raid[1-9]*", ENV{MD_CONTAINER}=="?*", PROGRAM="/usr/bin/readlink $env{MD_CONTAINER}", ENV{MD_MON_THIS}="%c" 41 | ENV{MD_MON_THIS}=="?*", TEST=="/etc/initrd-release", PROGRAM="/usr/bin/basename $env{MD_MON_THIS}", ENV{SYSTEMD_WANTS}+="mdmon@initrd-%c.service" 42 | ENV{MD_MON_THIS}=="?*", TEST!="/etc/initrd-release", PROGRAM="/usr/bin/basename $env{MD_MON_THIS}", ENV{SYSTEMD_WANTS}+="mdmon@%c.service" 43 | ENV{MD_RESHAPE_ACTIVE}=="True", PROGRAM="/usr/bin/basename $env{MD_MON_THIS}", ENV{SYSTEMD_WANTS}+="mdadm-grow-continue@%c.service" 44 | 45 | LABEL="md_end" 46 | -------------------------------------------------------------------------------- /udev-md-raid-assembly.rules: -------------------------------------------------------------------------------- 1 | # do not edit this file, it will be overwritten on update 2 | 3 | # Don't process any events if anaconda is running as anaconda brings up 4 | # raid devices manually 5 | ENV{ANACONDA}=="?*", GOTO="md_inc_end" 6 | # assemble md arrays 7 | 8 | SUBSYSTEM!="block", GOTO="md_inc_end" 9 | 10 | # skip non-initialized devices 11 | ENV{SYSTEMD_READY}=="0", GOTO="md_inc_end" 12 | 13 | # handle potential components of arrays (the ones supported by md) 14 | # For member devices which are md/dm devices, we don't need to 15 | # handle add event. Because md/dm devices need to do some init jobs. 16 | # Then the change event happens. 17 | # When adding md/dm devices, ID_FS_TYPE can only be linux_raid_member 18 | # after change event happens. 19 | ENV{ID_FS_TYPE}=="linux_raid_member", GOTO="md_inc" 20 | 21 | # "noiswmd" on kernel command line stops mdadm from handling 22 | # "isw" (aka IMSM - Intel RAID). 23 | # "nodmraid" on kernel command line stops mdadm from handling 24 | # "isw" or "ddf". 25 | IMPORT{cmdline}="noiswmd" 26 | IMPORT{cmdline}="nodmraid" 27 | 28 | ENV{nodmraid}=="?*", GOTO="md_inc_end" 29 | ENV{ID_FS_TYPE}=="ddf_raid_member", GOTO="md_inc" 30 | ENV{noiswmd}=="?*", GOTO="md_inc_end" 31 | ENV{ID_FS_TYPE}=="isw_raid_member", ACTION!="change", GOTO="md_inc" 32 | GOTO="md_inc_end" 33 | 34 | LABEL="md_inc" 35 | 36 | # Bare disks are ready when add event happens, the raid can be assembled. 37 | ACTION=="change", KERNEL!="dm-*|md*", GOTO="md_inc_end" 38 | 39 | # remember you can limit what gets auto/incrementally assembled by 40 | # mdadm.conf(5)'s 'AUTO' and selectively whitelist using 'ARRAY' 41 | ACTION!="remove", IMPORT{program}="BINDIR/mdadm --incremental --export $devnode --offroot $env{DEVLINKS}" 42 | ACTION!="remove", ENV{MD_STARTED}=="*unsafe*", ENV{MD_FOREIGN}=="no", ENV{SYSTEMD_WANTS}+="mdadm-last-resort@$env{MD_DEVICE}.timer" 43 | 44 | ACTION=="remove", ENV{ID_PATH}=="?*", RUN+="BINDIR/mdadm -If $devnode --path $env{ID_PATH}" 45 | ACTION=="remove", ENV{ID_PATH}!="?*", RUN+="BINDIR/mdadm -If $devnode" 46 | 47 | LABEL="md_inc_end" 48 | -------------------------------------------------------------------------------- /udev-md-raid-creating.rules: -------------------------------------------------------------------------------- 1 | # do not edit this file, it will be overwritten on update 2 | # While mdadm is creating an array, it creates a file 3 | # /run/mdadm/creating-mdXXX. If that file exists, then 4 | # the array is not "ready" and we should make sure the 5 | # content is ignored. 6 | 7 | KERNEL=="md*", TEST=="/run/mdadm/creating-$kernel", ENV{SYSTEMD_READY}="0" 8 | -------------------------------------------------------------------------------- /udev.h: -------------------------------------------------------------------------------- 1 | /* 2 | * mdadm - manage Linux "md" devices aka RAID arrays. 3 | * 4 | * Copyright (C) 2022 Mateusz Grzonka 5 | * 6 | * This program is free software; you can redistribute it and/or modify 7 | * it under the terms of the GNU General Public License as published by 8 | * the Free Software Foundation; either version 2 of the License, or 9 | * (at your option) any later version. 10 | * 11 | * This program is distributed in the hope that it will be useful, 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | * GNU General Public License for more details. 15 | * 16 | * You should have received a copy of the GNU General Public License 17 | * along with this program; if not, write to the Free Software 18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 | */ 20 | 21 | #ifndef MONITOR_UDEV_H 22 | #define MONITOR_UDEV_H 23 | 24 | enum udev_status { 25 | UDEV_STATUS_ERROR_NO_UDEV = -2, 26 | UDEV_STATUS_ERROR, 27 | UDEV_STATUS_SUCCESS = 0, 28 | UDEV_STATUS_TIMEOUT 29 | }; 30 | 31 | bool udev_is_available(void); 32 | 33 | #ifndef NO_LIBUDEV 34 | enum udev_status udev_wait_for_events(int seconds); 35 | #endif 36 | 37 | enum udev_status udev_block(char *devnm); 38 | void udev_unblock(void); 39 | 40 | #endif 41 | -------------------------------------------------------------------------------- /xmalloc.c: -------------------------------------------------------------------------------- 1 | /* mdadm - manage Linux "md" devices aka RAID arrays. 2 | * 3 | * Copyright (C) 2001-2009 Neil Brown 4 | * 5 | * 6 | * This program is free software; you can redistribute it and/or modify 7 | * it under the terms of the GNU General Public License as published by 8 | * the Free Software Foundation; either version 2 of the License, or 9 | * (at your option) any later version. 10 | * 11 | * This program is distributed in the hope that it will be useful, 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | * GNU General Public License for more details. 15 | * 16 | * You should have received a copy of the GNU General Public License 17 | * along with this program; if not, write to the Free Software 18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 | * 20 | * Author: Neil Brown 21 | * Email: 22 | */ 23 | 24 | #include "xmalloc.h" 25 | #include "mdadm_status.h" 26 | 27 | #include 28 | #include 29 | #include 30 | #include 31 | 32 | static void *exit_memory_alloc_failure(void) 33 | { 34 | fprintf(stderr, "Memory allocation failure - aborting\n"); 35 | 36 | exit(MDADM_STATUS_MEM_FAIL); 37 | } 38 | 39 | void *xmalloc(size_t len) 40 | { 41 | void *rv = malloc(len); 42 | 43 | if (rv) 44 | return rv; 45 | 46 | return exit_memory_alloc_failure(); 47 | } 48 | 49 | void *xrealloc(void *ptr, size_t len) 50 | { 51 | void *rv = realloc(ptr, len); 52 | 53 | if (rv) 54 | return rv; 55 | 56 | return exit_memory_alloc_failure(); 57 | } 58 | 59 | void *xcalloc(size_t num, size_t size) 60 | { 61 | void *rv = calloc(num, size); 62 | 63 | if (rv) 64 | return rv; 65 | 66 | return exit_memory_alloc_failure(); 67 | } 68 | 69 | char *xstrdup(const char *str) 70 | { 71 | char *rv = strdup(str); 72 | 73 | if (rv) 74 | return rv; 75 | 76 | return exit_memory_alloc_failure(); 77 | } 78 | 79 | void *xmemalign(size_t alignment, size_t size) 80 | { 81 | void *ptr = NULL; 82 | int result = posix_memalign(&ptr, alignment, size); 83 | 84 | if (result == 0) 85 | return ptr; 86 | 87 | return exit_memory_alloc_failure(); 88 | } 89 | -------------------------------------------------------------------------------- /xmalloc.h: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0-only 2 | 3 | #ifndef XMALLOC_H 4 | #define XMALLOC_H 5 | 6 | #include 7 | 8 | void *xmalloc(size_t len); 9 | void *xrealloc(void *ptr, size_t len); 10 | void *xcalloc(size_t num, size_t size); 11 | char *xstrdup(const char *str); 12 | void *xmemalign(size_t alignment, size_t size); 13 | 14 | #endif 15 | --------------------------------------------------------------------------------