3 # FIXME - there is no reason to use all of these different return codes,
4 # espcially when most of them are mapped to something else anyway.
5 # The tests should use error() to describe the failure more clearly,
6 # and reduce the need to look into the tests to see what failed.
12 # bug number for skipped test: LU-2828
13 ALWAYS_EXCEPT="$CONF_SANITY_EXCEPT 59 64"
14 # UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
18 if [ -r /etc/SuSE-release ]
20 local vers=`grep VERSION /etc/SuSE-release | awk '{print $3}'`
21 local patchlev=`grep PATCHLEVEL /etc/SuSE-release \
23 if [ $vers -eq 11 ] && [ $patchlev -eq 2 ]
31 if is_sles11; then # LU-2181
32 ALWAYS_EXCEPT="$ALWAYS_EXCEPT 23a 34b"
35 if [ "$FAILURE_MODE" = "HARD" ]; then
36 CONFIG_EXCEPTIONS="24a " && \
37 echo "Except the tests: $CONFIG_EXCEPTIONS for FAILURE_MODE=$FAILURE_MODE, bug 23573" && \
38 ALWAYS_EXCEPT="$ALWAYS_EXCEPT $CONFIG_EXCEPTIONS"
41 # bug number for skipped test:
42 # a tool to create lustre filesystem images
43 ALWAYS_EXCEPT="32newtarball $ALWAYS_EXCEPT"
46 PATH=$PWD/$SRCDIR:$SRCDIR:$SRCDIR/../utils:$PATH
48 PTLDEBUG=${PTLDEBUG:--1}
50 LUSTRE=${LUSTRE:-`dirname $0`/..}
51 RLUSTRE=${RLUSTRE:-$LUSTRE}
52 LUSTRE_TESTS_API_DIR=${LUSTRE_TESTS_API_DIR:-${LUSTRE}/tests/clientapi}
53 export MULTIOP=${MULTIOP:-multiop}
55 . $LUSTRE/tests/test-framework.sh
57 . ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
59 # use small MDS + OST size to speed formatting time
60 # do not use too small MDSSIZE/OSTSIZE, which affect the default jouranl size
61 # STORED_MDSSIZE is used in test_18
62 STORED_MDSSIZE=$MDSSIZE
63 STORED_OSTSIZE=$OSTSIZE
67 if ! combined_mgs_mds; then
68 # bug number for skipped test: 23954
69 ALWAYS_EXCEPT="$ALWAYS_EXCEPT 24b"
72 # pass "-E lazy_itable_init" to mke2fs to speed up the formatting time
73 if [[ "$LDISKFS_MKFS_OPTS" != *lazy_itable_init* ]]; then
74 LDISKFS_MKFS_OPTS=$(csa_add "$LDISKFS_MKFS_OPTS" -E lazy_itable_init)
77 [ $(facet_fstype $SINGLEMDS) = "zfs" ] &&
78 # bug number for skipped test: LU-2778 LU-2059
79 ALWAYS_EXCEPT="$ALWAYS_EXCEPT 57b 50h"
84 require_dsh_mds || exit 0
85 require_dsh_ost || exit 0
87 [ "$SLOW" = "no" ] && EXCEPT_SLOW="30a 31 45 69"
93 # The MGS must be started before the OSTs for a new fs, so start
94 # and stop to generate the startup logs.
97 wait_osc_import_state mds ost FULL
102 reformat_and_config() {
104 if ! combined_mgs_mds ; then
110 writeconf_or_reformat() {
111 # There are at most 2 OSTs for write_conf test
112 # who knows if/where $TUNEFS is installed?
113 # Better reformat if it fails...
114 writeconf_all $MDSCOUNT 2 ||
115 { echo "tunefs failed, reformatting instead" &&
116 reformat_and_config && return 0; }
126 start mgs $(mgsdevname) $MGS_MOUNT_OPTS
132 local dev=$(mdsdevname $num)
135 echo "start mds service on `facet_active_host $facet`"
136 start $facet ${dev} $MDS_MOUNT_OPTS $@ || return 94
142 local dev=$(mdsdevname $num)
145 echo "stop mds service on `facet_active_host $facet`"
146 # These tests all use non-failover stop
147 stop $facet -f || return 97
153 for num in $(seq $MDSCOUNT); do
154 start_mdt $num $@ || return 94
159 if ! combined_mgs_mds ; then
167 for num in $(seq $MDSCOUNT); do
168 stop_mdt $num || return 97
173 echo "stop mgs service on `facet_active_host mgs`"
174 # These tests all use non-failover stop
175 stop mgs -f || return 97
179 echo "start ost1 service on `facet_active_host ost1`"
180 start ost1 `ostdevname 1` $OST_MOUNT_OPTS $@ || return 95
184 echo "stop ost1 service on `facet_active_host ost1`"
185 # These tests all use non-failover stop
186 stop ost1 -f || return 98
190 echo "start ost2 service on `facet_active_host ost2`"
191 start ost2 `ostdevname 2` $OST_MOUNT_OPTS $@ || return 92
195 echo "stop ost2 service on `facet_active_host ost2`"
196 # These tests all use non-failover stop
197 stop ost2 -f || return 93
202 echo "mount $FSNAME on ${MOUNTPATH}....."
203 zconf_mount `hostname` $MOUNTPATH || return 96
207 local mountopt="-o remount,$1"
209 echo "remount '$1' lustre on ${MOUNTPATH}....."
210 zconf_mount `hostname` $MOUNTPATH "$mountopt" || return 96
215 echo "umount lustre on ${MOUNTPATH}....."
216 zconf_umount `hostname` $MOUNTPATH || return 97
219 manual_umount_client(){
222 echo "manual umount lustre on ${MOUNT}...."
223 do_facet client "umount -d ${FORCE} $MOUNT"
229 start_mds || error "MDT start failed"
230 start_ost || error "OST start failed"
231 mount_client $MOUNT || error "client start failed"
232 client_up || error "client_up failed"
236 if ! combined_mgs_mds ; then
245 unload_modules_conf () {
246 if combined_mgs_mds || ! local_mode; then
247 unload_modules || return 1
252 stop_ost || return 202
253 stop_mds || return 201
254 unload_modules_conf || return 203
258 umount_client $MOUNT || return 200
259 cleanup_nocli || return $?
263 do_facet client "cp /etc/passwd $DIR/a" || return 71
264 do_facet client "rm $DIR/a" || return 72
265 # make sure lustre is actually mounted (touch will block,
266 # but grep won't, so do it after)
267 do_facet client "grep $MOUNT' ' /proc/mounts > /dev/null" || return 73
268 echo "setup single mount lustre success"
272 do_facet client "touch $DIR/a" || return 71
273 do_facet client "rm $DIR/a" || return 72
274 do_facet client "touch $DIR2/a" || return 73
275 do_facet client "rm $DIR2/a" || return 74
276 echo "setup double mount lustre success"
281 if [ "$ONLY" == "setup" ]; then
286 if [ "$ONLY" == "cleanup" ]; then
293 #create single point mountpoint
299 check_mount || return 41
302 run_test 0 "single mount setup"
305 start_mds || error "MDT start failed"
307 echo "start ost second time..."
308 start_ost && error "2nd OST start should fail"
309 mount_client $MOUNT || error "client start failed"
310 check_mount || return 42
313 run_test 1 "start up ost twice (should return errors)"
317 echo "start mds second time.."
318 start_mdt 1 && error "2nd MDT start should fail"
321 check_mount || return 43
324 run_test 2 "start up mds twice (should return err)"
328 #mount.lustre returns an error if already in mtab
329 mount_client $MOUNT && error "2nd client mount should fail"
330 check_mount || return 44
333 run_test 3 "mount client twice (should return err)"
337 touch $DIR/$tfile || return 85
341 # ok for ost to fail shutdown
342 if [ 202 -ne $eno ]; then
347 run_test 4 "force cleanup ost, then cleanup"
349 test_5a() { # was test_5
351 touch $DIR/$tfile || return 1
352 fuser -m -v $MOUNT && echo "$MOUNT is in use by user space process."
354 stop_mds -f || return 2
356 # cleanup may return an error from the failed
357 # disconnects; for now I'll consider this successful
358 # if all the modules have unloaded.
362 echo "killing umount"
363 kill -TERM $UMOUNT_PID
364 echo "waiting for umount to finish"
366 if grep " $MOUNT " /proc/mounts; then
367 echo "test 5: /proc/mounts after failed umount"
371 echo "killing umount"
372 kill -TERM $UMOUNT_PID
373 echo "waiting for umount to finish"
375 grep " $MOUNT " /proc/mounts && echo "test 5: /proc/mounts after second umount" && return 11
379 # stop_mds is a no-op here, and should not fail
380 cleanup_nocli || return $?
381 # df may have lingering entry
383 # mtab may have lingering entry
387 while [ "$WAIT" -ne "$MAX_WAIT" ]; do
389 grep -q $MOUNT" " /etc/mtab || break
390 echo "Waiting /etc/mtab updated ... "
391 WAIT=$(( WAIT + sleep))
393 [ "$WAIT" -eq "$MAX_WAIT" ] && error "/etc/mtab is not updated in $WAIT secs"
394 echo "/etc/mtab updated in $WAIT secs"
396 run_test 5a "force cleanup mds, then cleanup"
404 grep " $MOUNT " /etc/mtab && \
405 error false "unexpected entry in mtab before mount" && return 10
409 if ! combined_mgs_mds ; then
410 trap cleanup_5b EXIT ERR
415 [ -d $MOUNT ] || mkdir -p $MOUNT
416 mount_client $MOUNT && rc=1
417 grep " $MOUNT " /etc/mtab && \
418 error "$MOUNT entry in mtab after failed mount" && rc=11
420 # stop_mds is a no-op here, and should not fail
421 cleanup_nocli || rc=$?
422 if ! combined_mgs_mds ; then
427 run_test 5b "Try to start a client with no MGS (should return errs)"
430 grep " $MOUNT " /etc/mtab && \
431 error false "unexpected entry in mtab before mount" && return 10
436 [ -d $MOUNT ] || mkdir -p $MOUNT
437 local oldfs="${FSNAME}"
438 FSNAME="wrong.${FSNAME}"
439 mount_client $MOUNT || :
441 grep " $MOUNT " /etc/mtab && \
442 error "$MOUNT entry in mtab after failed mount" && rc=11
444 cleanup_nocli || rc=$?
447 run_test 5c "cleanup after failed mount (bug 2712) (should return errs)"
450 grep " $MOUNT " /etc/mtab && \
451 error false "unexpected entry in mtab before mount" && return 10
453 [ "$(facet_fstype ost1)" = "zfs" ] &&
454 skip "LU-2059: no local config for ZFS OSTs" && return
460 mount_client $MOUNT || rc=1
462 grep " $MOUNT " /etc/mtab && \
463 error "$MOUNT entry in mtab after unmount" && rc=11
466 run_test 5d "mount with ost down"
469 grep " $MOUNT " /etc/mtab && \
470 error false "unexpected entry in mtab before mount" && return 10
476 #define OBD_FAIL_PTLRPC_DELAY_SEND 0x506
477 do_facet client "lctl set_param fail_loc=0x80000506"
478 mount_client $MOUNT || echo "mount failed (not fatal)"
480 grep " $MOUNT " /etc/mtab && \
481 error "$MOUNT entry in mtab after unmount" && rc=11
484 run_test 5e "delayed connect, don't crash (bug 10268)"
487 if combined_mgs_mds ; then
488 skip "combined mgs and mds"
492 grep " $MOUNT " /etc/mtab && \
493 error false "unexpected entry in mtab before mount" && return 10
497 [ -d $MOUNT ] || mkdir -p $MOUNT
498 mount_client $MOUNT &
500 echo client_mount pid is $pid
504 if ! ps -f -p $pid >/dev/null; then
507 grep " $MOUNT " /etc/mtab && echo "test 5f: mtab after mount"
508 error "mount returns $rc, expected to hang"
517 # mount should succeed after start mds
520 [ $rc -eq 0 ] || error "mount returned $rc"
521 grep " $MOUNT " /etc/mtab && echo "test 5f: mtab after mount"
525 run_test 5f "mds down, cleanup after failed mount (bug 2712)"
530 mount_client ${MOUNT} || return 87
531 touch $DIR/a || return 86
534 run_test 6 "manual umount, then mount again"
539 cleanup_nocli || return $?
541 run_test 7 "manual umount, then cleanup"
546 check_mount2 || return 45
547 umount_client $MOUNT2
550 run_test 8 "double mount setup"
555 do_facet ost1 lctl set_param debug=\'inode trace\' || return 1
556 do_facet ost1 lctl set_param subsystem_debug=\'mds ost\' || return 1
558 CHECK_PTLDEBUG="`do_facet ost1 lctl get_param -n debug`"
559 if [ "$CHECK_PTLDEBUG" ] && { \
560 [ "$CHECK_PTLDEBUG" = "trace inode warning error emerg console" ] ||
561 [ "$CHECK_PTLDEBUG" = "trace inode" ]; }; then
562 echo "lnet.debug success"
564 echo "lnet.debug: want 'trace inode', have '$CHECK_PTLDEBUG'"
567 CHECK_SUBSYS="`do_facet ost1 lctl get_param -n subsystem_debug`"
568 if [ "$CHECK_SUBSYS" ] && [ "$CHECK_SUBSYS" = "mds ost" ]; then
569 echo "lnet.subsystem_debug success"
571 echo "lnet.subsystem_debug: want 'mds ost', have '$CHECK_SUBSYS'"
574 stop_ost || return $?
576 run_test 9 "test ptldebug and subsystem for mkfs"
584 do_facet $facet "test -b $dev" || rc=1
585 if [[ "$size" ]]; then
586 local in=$(do_facet $facet "dd if=$dev of=/dev/null bs=1k count=1 skip=$size 2>&1" |\
587 awk '($3 == "in") { print $1 }')
588 [[ $in = "1+0" ]] || rc=1
594 # Test 16 was to "verify that lustre will correct the mode of OBJECTS".
595 # But with new MDS stack we don't care about the mode of local objects
596 # anymore, so this test is removed. See bug 22944 for more details.
600 if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
601 skip "Only applicable to ldiskfs-based MDTs"
606 check_mount || return 41
609 echo "Remove mds config log"
610 if ! combined_mgs_mds ; then
614 do_facet mgs "$DEBUGFS -w -R 'unlink CONFIGS/$FSNAME-MDT0000' \
615 $(mgsdevname) || return \$?" || return $?
617 if ! combined_mgs_mds ; then
622 start_mds && return 42
625 run_test 17 "Verify failed mds_postsetup won't fail assertion (2936) (should return errs)"
628 if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
629 skip "Only applicable to ldiskfs-based MDTs"
633 local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/})
638 # check if current MDSSIZE is large enough
639 [ $MDSSIZE -ge $MIN ] && OK=1 && myMDSSIZE=$MDSSIZE && \
640 log "use MDSSIZE=$MDSSIZE"
642 # check if the global config has a large enough MDSSIZE
643 [ -z "$OK" -a ! -z "$STORED_MDSSIZE" ] && [ $STORED_MDSSIZE -ge $MIN ] && \
644 OK=1 && myMDSSIZE=$STORED_MDSSIZE && \
645 log "use STORED_MDSSIZE=$STORED_MDSSIZE"
647 # check if the block device is large enough
648 is_blkdev $SINGLEMDS $MDSDEV $MIN
649 local large_enough=$?
650 if [ -n "$OK" ]; then
651 [ $large_enough -ne 0 ] && OK=""
653 [ $large_enough -eq 0 ] && OK=1 && myMDSSIZE=$MIN &&
654 log "use device $MDSDEV with MIN=$MIN"
657 # check if a loopback device has enough space for fs metadata (5%)
659 if [ -z "$OK" ]; then
660 local SPACE=$(do_facet $SINGLEMDS "[ -f $MDSDEV -o ! -e $MDSDEV ] && df -P \\\$(dirname $MDSDEV)" |
661 awk '($1 != "Filesystem") {print $4}')
662 ! [ -z "$SPACE" ] && [ $SPACE -gt $((MIN / 20)) ] && \
663 OK=1 && myMDSSIZE=$MIN && \
664 log "use file $MDSDEV with MIN=$MIN"
667 [ -z "$OK" ] && skip_env "$MDSDEV too small for ${MIN}kB MDS" && return
670 echo "mount mds with large journal..."
672 local OLD_MDSSIZE=$MDSSIZE
676 echo "mount lustre system..."
678 check_mount || return 41
680 echo "check journal size..."
681 local FOUNDSIZE=$(do_facet $SINGLEMDS "$DEBUGFS -c -R 'stat <8>' $MDSDEV" | awk '/Size: / { print $NF; exit;}')
682 if [ $FOUNDSIZE -gt $((32 * 1024 * 1024)) ]; then
683 log "Success: mkfs creates large journals. Size: $((FOUNDSIZE >> 20))M"
685 error "expected journal size > 32M, found $((FOUNDSIZE >> 20))M"
693 run_test 18 "check mkfs creates large journals"
696 start_mds || return 1
697 stop_mds -f || return 2
699 run_test 19a "start/stop MDS without OSTs"
702 [ "$(facet_fstype ost1)" = "zfs" ] &&
703 skip "LU-2059: no local config for ZFS OSTs" && return
705 start_ost || return 1
706 stop_ost -f || return 2
708 run_test 19b "start/stop OSTs without MDS"
711 # first format the ost/mdt
715 check_mount || return 43
717 remount_client ro $MOUNT || return 44
718 touch $DIR/$tfile && echo "$DIR/$tfile created incorrectly" && return 45
719 [ -e $DIR/$tfile ] && echo "$DIR/$tfile exists incorrectly" && return 46
720 remount_client rw $MOUNT || return 47
722 [ ! -f $DIR/$tfile ] && echo "$DIR/$tfile missing" && return 48
723 MCNT=`grep -c $MOUNT /etc/mtab`
724 [ "$MCNT" -ne 1 ] && echo "$MOUNT in /etc/mtab $MCNT times" && return 49
729 run_test 20 "remount ro,rw mounts work and doesn't break /etc/mtab"
734 wait_osc_import_state mds ost FULL
738 run_test 21a "start mds before ost, stop ost first"
741 [ "$(facet_fstype ost1)" = "zfs" ] &&
742 skip "LU-2059: no local config for ZFS OSTs" && return
746 wait_osc_import_state mds ost FULL
750 run_test 21b "start ost before mds, stop mds first"
756 wait_osc_import_state mds ost2 FULL
760 #writeconf to remove all ost2 traces for subsequent tests
761 writeconf_or_reformat
763 run_test 21c "start mds between two osts, stop mds last"
766 if combined_mgs_mds ; then
767 skip "need separate mgs device" && return 0
777 wait_osc_import_state mds ost2 FULL
783 #writeconf to remove all ost2 traces for subsequent tests
784 writeconf_or_reformat
787 run_test 21d "start mgs then ost and then mds"
792 echo Client mount with ost in logs, but none running
794 # wait until mds connected to ost and open client connection
795 wait_osc_import_state mds ost FULL
798 # check_mount will block trying to contact ost
799 mcreate $DIR/$tfile || return 40
800 rm -f $DIR/$tfile || return 42
804 echo Client mount with a running ost
807 # if gss enabled, wait full time to let connection from
808 # mds to ost be established, due to the mismatch between
809 # initial connect timeout and gss context negotiation timeout.
810 # This perhaps could be remove after AT landed.
811 echo "sleep $((TIMEOUT + TIMEOUT + TIMEOUT))s"
812 sleep $((TIMEOUT + TIMEOUT + TIMEOUT))
815 wait_osc_import_state mds ost FULL
816 wait_osc_import_state client ost FULL
817 check_mount || return 41
822 run_test 22 "start a client before osts (should return errs)"
824 test_23a() { # was test_23
828 # force down client so that recovering mds waits for reconnect
829 local running=$(grep -c $MOUNT /proc/mounts) || true
830 if [ $running -ne 0 ]; then
831 echo "Stopping client $MOUNT (opts: -f)"
835 # enter recovery on mds
837 # try to start a new client
838 mount_client $MOUNT &
840 MOUNT_PID=$(ps -ef | grep "t lustre" | grep -v grep | awk '{print $2}')
841 MOUNT_LUSTRE_PID=`ps -ef | grep mount.lustre | grep -v grep | awk '{print $2}'`
842 echo mount pid is ${MOUNT_PID}, mount.lustre pid is ${MOUNT_LUSTRE_PID}
844 ps --ppid $MOUNT_LUSTRE_PID
845 echo "waiting for mount to finish"
847 # "ctrl-c" sends SIGINT but it usually (in script) does not work on child process
848 # SIGTERM works but it does not spread to offspring processses
849 kill -s TERM $MOUNT_PID
850 kill -s TERM $MOUNT_LUSTRE_PID
851 # we can not wait $MOUNT_PID because it is not a child of this shell
857 while [ "$WAIT" -lt "$MAX_WAIT" ]; do
859 PID1=$(ps -ef | awk '{print $2}' | grep -w $MOUNT_PID)
860 PID2=$(ps -ef | awk '{print $2}' | grep -w $MOUNT_LUSTRE_PID)
863 [ -z "$PID1" -a -z "$PID2" ] && break
864 echo "waiting for mount to finish ... "
865 WAIT=$(( WAIT + sleep))
867 if [ "$WAIT" -eq "$MAX_WAIT" ]; then
868 error "MOUNT_PID $MOUNT_PID and "\
869 "MOUNT_LUSTRE_PID $MOUNT_LUSTRE_PID still not killed in $WAIT secs"
872 stop_mds || error "stopping MDSes failed"
873 stop_ost || error "stopping OSSes failed"
875 run_test 23a "interrupt client during recovery mount delay"
880 test_23b() { # was test_23
883 # Simulate -EINTR during mount OBD_FAIL_LDLM_CLOSE_THREAD
884 lctl set_param fail_loc=0x80000313
888 run_test 23b "Simulate -EINTR during mount"
890 fs2mds_HOST=$mds_HOST
891 fs2ost_HOST=$ost_HOST
893 MDSDEV1_2=$fs2mds_DEV
894 OSTDEV1_2=$fs2ost_DEV
895 OSTDEV2_2=$fs3ost_DEV
899 echo "umount $MOUNT2 ..."
900 umount $MOUNT2 || true
901 echo "stopping fs2mds ..."
902 stop fs2mds -f || true
903 echo "stopping fs2ost ..."
904 stop fs2ost -f || true
908 local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/})
910 if [ -z "$fs2ost_DEV" -o -z "$fs2mds_DEV" ]; then
911 is_blkdev $SINGLEMDS $MDSDEV && \
912 skip_env "mixed loopback and real device not working" && return
915 [ -n "$ost1_HOST" ] && fs2ost_HOST=$ost1_HOST
917 local fs2mdsdev=$(mdsdevname 1_2)
918 local fs2ostdev=$(ostdevname 1_2)
919 local fs2mdsvdev=$(mdsvdevname 1_2)
920 local fs2ostvdev=$(ostvdevname 1_2)
922 # test 8-char fsname as well
923 local FSNAME2=test1234
925 add fs2mds $(mkfs_opts mds1 ${fs2mdsdev} ) --nomgs --mgsnode=$MGSNID \
926 --fsname=${FSNAME2} --reformat $fs2mdsdev $fs2mdsvdev || exit 10
928 add fs2ost $(mkfs_opts ost1 ${fs2ostdev}) --fsname=${FSNAME2} \
929 --reformat $fs2ostdev $fs2ostvdev || exit 10
932 start fs2mds $fs2mdsdev $MDS_MOUNT_OPTS && trap cleanup_fs2 EXIT INT
933 start fs2ost $fs2ostdev $OST_MOUNT_OPTS
935 mount -t lustre $MGSNID:/${FSNAME2} $MOUNT2 || return 1
937 check_mount || return 2
938 # files written on 1 should not show up on 2
939 cp /etc/passwd $DIR/$tfile
941 [ -e $MOUNT2/$tfile ] && error "File bleed" && return 7
944 cp /etc/passwd $MOUNT2/b || return 3
945 rm $MOUNT2/b || return 4
946 # 2 is actually mounted
947 grep $MOUNT2' ' /proc/mounts > /dev/null || return 5
949 facet_failover fs2mds
950 facet_failover fs2ost
953 # the MDS must remain up until last MDT
955 MDS=$(do_facet $SINGLEMDS "lctl get_param -n devices" | awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }' | head -1)
956 [ -z "$MDS" ] && error "No MDT" && return 8
958 cleanup_nocli || return 6
960 run_test 24a "Multiple MDTs on a single node"
963 local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/})
965 if [ -z "$fs2mds_DEV" ]; then
966 local dev=${SINGLEMDS}_dev
968 is_blkdev $SINGLEMDS $MDSDEV && \
969 skip_env "mixed loopback and real device not working" && return
972 local fs2mdsdev=$(mdsdevname 1_2)
973 local fs2mdsvdev=$(mdsvdevname 1_2)
975 add fs2mds $(mkfs_opts mds1 ${fs2mdsdev} ) --mgs --fsname=${FSNAME}2 \
976 --reformat $fs2mdsdev $fs2mdsvdev || exit 10
978 start fs2mds $fs2mdsdev $MDS_MOUNT_OPTS && return 2
981 run_test 24b "Multiple MGSs on a single node (should return err)"
985 check_mount || return 2
986 local MODULES=$($LCTL modules | awk '{ print $2 }')
987 rmmod $MODULES 2>/dev/null || true
990 run_test 25 "Verify modules are referenced"
994 # we need modules before mount for sysctl, so make sure...
995 do_facet $SINGLEMDS "lsmod | grep -q lustre || modprobe lustre"
996 #define OBD_FAIL_MDS_FS_SETUP 0x135
997 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000135"
998 start_mds && echo MDS started && return 1
999 lctl get_param -n devices
1000 DEVS=$(lctl get_param -n devices | egrep -v MG | wc -l)
1001 [ $DEVS -gt 0 ] && return 2
1002 # start mds to drop writeconf setting
1003 start_mds || return 3
1004 stop_mds || return 4
1005 unload_modules_conf || return $?
1007 run_test 26 "MDT startup failure cleans LOV (should return errs)"
1010 [ "$(facet_fstype ost1)" = "zfs" ] &&
1011 skip "LU-2059: no local config for ZFS OSTs" && return
1013 start_ost || return 1
1014 start_mds || return 2
1015 echo "Requeue thread should have started: "
1016 ps -e | grep ll_cfg_requeue
1017 set_conf_param_and_check ost1 \
1018 "lctl get_param -n obdfilter.$FSNAME-OST0000.client_cache_seconds" \
1019 "$FSNAME-OST0000.ost.client_cache_seconds" || return 3
1022 run_test 27a "Reacquire MGS lock if OST started first"
1027 local device=$(do_facet $SINGLEMDS "lctl get_param -n devices" |
1028 awk '($3 ~ "mdt" && $4 ~ "MDT0000") { print $4 }')
1030 facet_failover $SINGLEMDS
1031 set_conf_param_and_check $SINGLEMDS \
1032 "lctl get_param -n mdt.$device.identity_acquire_expire" \
1033 "$device.mdt.identity_acquire_expire" || return 3
1034 set_conf_param_and_check client \
1035 "lctl get_param -n mdc.$device-mdc-*.max_rpcs_in_flight"\
1036 "$device.mdc.max_rpcs_in_flight" || return 4
1040 run_test 27b "Reacquire MGS lock after failover"
1044 TEST="lctl get_param -n llite.$FSNAME-*.max_read_ahead_whole_mb"
1045 PARAM="$FSNAME.llite.max_read_ahead_whole_mb"
1047 FINAL=$(($ORIG + 1))
1048 set_conf_param_and_check client "$TEST" "$PARAM" $FINAL || return 3
1049 FINAL=$(($FINAL + 1))
1050 set_conf_param_and_check client "$TEST" "$PARAM" $FINAL || return 4
1051 umount_client $MOUNT || return 200
1054 if [ $RESULT -ne $FINAL ]; then
1055 echo "New config not seen: wanted $FINAL got $RESULT"
1058 echo "New config success: got $RESULT"
1060 set_conf_param_and_check client "$TEST" "$PARAM" $ORIG || return 5
1063 run_test 28 "permanent parameter setting"
1066 [ "$OSTCOUNT" -lt "2" ] && skip_env "$OSTCOUNT < 2, skipping" && return
1067 setup > /dev/null 2>&1
1071 local PARAM="$FSNAME-OST0001.osc.active"
1072 local PROC_ACT="osc.$FSNAME-OST0001-osc-[^M]*.active"
1073 local PROC_UUID="osc.$FSNAME-OST0001-osc-[^M]*.ost_server_uuid"
1075 ACTV=$(lctl get_param -n $PROC_ACT)
1077 set_conf_param_and_check client \
1078 "lctl get_param -n $PROC_ACT" "$PARAM" $DEAC || return 2
1079 # also check ost_server_uuid status
1080 RESULT=$(lctl get_param -n $PROC_UUID | grep DEACTIV)
1081 if [ -z "$RESULT" ]; then
1082 echo "Live client not deactivated: $(lctl get_param -n $PROC_UUID)"
1085 echo "Live client success: got $RESULT"
1089 for num in $(seq $MDSCOUNT); do
1090 local mdtosc=$(get_mdtosc_proc_path mds${num} $FSNAME-OST0001)
1091 local MPROC="osc.$mdtosc.active"
1096 RESULT=$(do_facet mds${num} " lctl get_param -n $MPROC")
1097 [ ${PIPESTATUS[0]} = 0 ] || error "Can't read $MPROC"
1098 if [ $RESULT -eq $DEAC ]; then
1099 echo -n "MDT deactivated also after"
1100 echo "$WAIT sec (got $RESULT)"
1104 if [ $WAIT -eq $MAX ]; then
1105 echo -n "MDT not deactivated: wanted $DEAC"
1109 echo "Waiting $(($MAX - $WAIT))secs for MDT deactivated"
1112 # test new client starts deactivated
1113 umount_client $MOUNT || return 200
1115 RESULT=$(lctl get_param -n $PROC_UUID | grep DEACTIV | grep NEW)
1116 if [ -z "$RESULT" ]; then
1117 echo "New client not deactivated from start: $(lctl get_param -n $PROC_UUID)"
1120 echo "New client success: got $RESULT"
1123 # make sure it reactivates
1124 set_conf_param_and_check client \
1125 "lctl get_param -n $PROC_ACT" "$PARAM" $ACTV || return 6
1127 umount_client $MOUNT
1130 #writeconf to remove all ost2 traces for subsequent tests
1131 writeconf_or_reformat
1133 run_test 29 "permanently remove an OST"
1138 echo Big config llog
1139 TEST="lctl get_param -n llite.$FSNAME-*.max_read_ahead_whole_mb"
1141 LIST=(1 2 3 4 5 4 3 2 1 2 3 4 5 4 3 2 1 2 3 4 5)
1142 for i in ${LIST[@]}; do
1143 set_conf_param_and_check client "$TEST" \
1144 "$FSNAME.llite.max_read_ahead_whole_mb" $i || return 3
1146 # make sure client restart still works
1147 umount_client $MOUNT
1148 mount_client $MOUNT || return 4
1149 [ "$($TEST)" -ne "$i" ] && error "Param didn't stick across restart $($TEST) != $i"
1152 echo Erase parameter setting
1153 do_facet mgs "$LCTL conf_param -d $FSNAME.llite.max_read_ahead_whole_mb" || return 6
1154 umount_client $MOUNT
1155 mount_client $MOUNT || return 6
1157 echo "deleted (default) value=$FINAL, orig=$ORIG"
1158 # assumes this parameter started at the default value
1159 [ "$FINAL" -eq "$ORIG" ] || fail "Deleted value=$FINAL, orig=$ORIG"
1163 run_test 30a "Big config llog and conf_param deletion"
1168 # Make a fake nid. Use the OST nid, and add 20 to the least significant
1169 # numerical part of it. Hopefully that's not already a failover address for
1171 OSTNID=$(do_facet ost1 "$LCTL get_param nis" | tail -1 | awk '{print $1}')
1172 ORIGVAL=$(echo $OSTNID | egrep -oi "[0-9]*@")
1173 NEWVAL=$((($(echo $ORIGVAL | egrep -oi "[0-9]*") + 20) % 256))
1174 NEW=$(echo $OSTNID | sed "s/$ORIGVAL/$NEWVAL@/")
1175 echo "Using fake nid $NEW"
1177 TEST="$LCTL get_param -n osc.$FSNAME-OST0000-osc-[^M]*.import | grep failover_nids | sed -n 's/.*\($NEW\).*/\1/p'"
1178 set_conf_param_and_check client "$TEST" \
1179 "$FSNAME-OST0000.failover.node" $NEW ||
1180 error "didn't add failover nid $NEW"
1181 NIDS=$($LCTL get_param -n osc.$FSNAME-OST0000-osc-[^M]*.import | grep failover_nids)
1183 NIDCOUNT=$(($(echo "$NIDS" | wc -w) - 1))
1184 echo "should have 2 failover nids: $NIDCOUNT"
1185 [ $NIDCOUNT -eq 2 ] || error "Failover nid not added"
1186 do_facet mgs "$LCTL conf_param -d $FSNAME-OST0000.failover.node" || error "conf_param delete failed"
1187 umount_client $MOUNT
1188 mount_client $MOUNT || return 3
1190 NIDS=$($LCTL get_param -n osc.$FSNAME-OST0000-osc-[^M]*.import | grep failover_nids)
1192 NIDCOUNT=$(($(echo "$NIDS" | wc -w) - 1))
1193 echo "only 1 final nid should remain: $NIDCOUNT"
1194 [ $NIDCOUNT -eq 1 ] || error "Failover nids not removed"
1198 run_test 30b "Remove failover nids"
1200 test_31() { # bug 10734
1201 # ipaddr must not exist
1202 mount -t lustre 4.3.2.1@tcp:/lustre $MOUNT || true
1205 run_test 31 "Connect to non-existent node (shouldn't crash)"
1209 T32_BLIMIT=20480 # Kbytes
1213 # This is not really a test but a tool to create new disk
1214 # image tarballs for the upgrade tests.
1216 # Disk image tarballs should be created on single-node
1217 # clusters by running this test with default configurations
1218 # plus a few mandatory environment settings that are verified
1219 # at the beginning of the test.
1221 test_32newtarball() {
1225 local tmp=$TMP/t32_image_create
1227 if [ $FSNAME != t32fs -o $MDSCOUNT -ne 1 -o \
1228 \( -z "$MDSDEV" -a -z "$MDSDEV1" \) -o $OSTCOUNT -ne 1 -o \
1229 -z "$OSTDEV1" ]; then
1230 error "Needs FSNAME=t32fs MDSCOUNT=1 MDSDEV1=<nonexistent_file>" \
1231 "(or MDSDEV, in the case of b1_8) OSTCOUNT=1" \
1232 "OSTDEV1=<nonexistent_file>"
1236 echo "Found stale $tmp"
1241 tar cf - -C $src . | tar xf - -C $tmp/src
1242 dd if=/dev/zero of=$tmp/src/t32_qf_old bs=1M \
1243 count=$(($T32_BLIMIT / 1024 / 2))
1244 chown $T32_QID.$T32_QID $tmp/src/t32_qf_old
1250 [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.3.50) ] &&
1251 $LFS quotacheck -ug /mnt/$FSNAME
1252 $LFS setquota -u $T32_QID -b 0 -B $T32_BLIMIT -i 0 -I $T32_ILIMIT \
1255 tar cf - -C $tmp/src . | tar xf - -C /mnt/$FSNAME
1262 ls -Rni --time-style=+%s >$tmp/img/list
1263 find . ! -name .lustre -type f -exec sha1sum {} \; |
1264 sort -k 2 >$tmp/img/sha1sums
1266 $LCTL get_param -n version | head -n 1 |
1267 sed -e 's/^lustre: *//' >$tmp/img/commit
1269 [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.3.50) ] &&
1270 $LFS quotaon -ug /mnt/$FSNAME
1271 $LFS quota -u $T32_QID -v /mnt/$FSNAME
1272 $LFS quota -v -u $T32_QID /mnt/$FSNAME |
1273 awk 'BEGIN { num='1' } { if ($1 == "'/mnt/$FSNAME'") \
1274 { if (NF == 1) { getline } else { num++ } ; print $num;} }' \
1275 | tr -d "*" > $tmp/img/bspace
1276 $LFS quota -v -u $T32_QID /mnt/$FSNAME |
1277 awk 'BEGIN { num='5' } { if ($1 == "'/mnt/$FSNAME'") \
1278 { if (NF == 1) { getline } else { num++ } ; print $num;} }' \
1279 | tr -d "*" > $tmp/img/ispace
1284 find -type f -exec sha1sum {} \; | sort -k 2 >$tmp/sha1sums.src
1287 if ! diff -u $tmp/sha1sums.src $tmp/img/sha1sums; then
1288 echo "Data verification failed"
1291 uname -r >$tmp/img/kernel
1292 uname -m >$tmp/img/arch
1294 mv ${MDSDEV1:-$MDSDEV} $tmp/img
1295 mv $OSTDEV1 $tmp/img
1297 version=$(sed -e 's/\(^[0-9]\+\.[0-9]\+\)\(.*$\)/\1/' $tmp/img/commit |
1298 sed -e 's/\./_/g') # E.g., "1.8.7" -> "1_8"
1301 tar cjvf $dst/disk$version-$(facet_fstype $SINGLEMDS).tar.bz2 -S *
1306 #run_test 32newtarball "Create a new test_32 disk image tarball for this version"
1309 # The list of applicable tarballs is returned via the caller's
1310 # variable "tarballs".
1313 local node=$(facet_active_host $SINGLEMDS)
1314 local r="do_node $node"
1316 if [ "$CLIENTONLY" ]; then
1317 skip "Client-only testing"
1321 if ! $r which $TUNEFS; then
1322 skip_env "tunefs.lustre required on $node"
1326 local IMGTYPE=$(facet_fstype $SINGLEMDS)
1328 tarballs=$($r find $RLUSTRE/tests -maxdepth 1 -name \'disk*-$IMGTYPE.tar.bz2\')
1330 if [ -z "$tarballs" ]; then
1331 skip "No applicable tarballs found"
1336 t32_test_cleanup() {
1340 if $shall_cleanup_lustre; then
1341 umount $tmp/mnt/lustre || rc=$?
1343 if $shall_cleanup_mdt; then
1344 $r umount -d $tmp/mnt/mdt || rc=$?
1346 if $shall_cleanup_mdt1; then
1347 $r umount -d $tmp/mnt/mdt1 || rc=$?
1349 if $shall_cleanup_ost; then
1350 $r umount -d $tmp/mnt/ost || rc=$?
1358 t32_bits_per_long() {
1360 # Yes, this is not meant to be perfect.
1370 t32_reload_modules() {
1372 local all_removed=false
1375 while ((i < 20)); do
1376 echo "Unloading modules on $node: Attempt $i"
1377 do_rpc_nodes $node $LUSTRE_RMMOD $(facet_fstype $SINGLEMDS) &&
1379 do_rpc_nodes $node check_mem_leak || return 1
1380 if $all_removed; then
1381 do_rpc_nodes $node load_modules
1387 echo "Unloading modules on $node: Given up"
1391 t32_wait_til_devices_gone() {
1397 echo wait for devices to go
1398 while ((i < 20)); do
1399 devices=$(do_rpc_nodes $node $LCTL device_list | wc -l)
1400 loops=$(do_rpc_nodes $node losetup -a | grep -c t32)
1401 ((devices == 0 && loops == 0)) && return 0
1405 echo "waiting for dev on $node: dev $devices loop $loops given up"
1406 do_rpc_nodes $node "losetup -a"
1407 do_rpc_nodes $node "$LCTL devices_list"
1411 t32_verify_quota() {
1415 local fstype=$(facet_fstype $SINGLEMDS)
1419 $LFS quota -u $T32_QID -v $mnt
1421 qval=$($LFS quota -v -u $T32_QID $mnt |
1422 awk 'BEGIN { num='1' } { if ($1 == "'$mnt'") \
1423 { if (NF == 1) { getline } else { num++ } ; print $num;} }' \
1425 [ $qval -eq $img_bspace ] || {
1426 echo "bspace, act:$qval, exp:$img_bspace"
1430 qval=$($LFS quota -v -u $T32_QID $mnt |
1431 awk 'BEGIN { num='5' } { if ($1 == "'$mnt'") \
1432 { if (NF == 1) { getline } else { num++ } ; print $num;} }' \
1434 [ $qval -eq $img_ispace ] || {
1435 echo "ispace, act:$qval, exp:$img_ispace"
1439 qval=$($LFS quota -v -u $T32_QID $mnt |
1440 awk 'BEGIN { num='3' } { if ($1 == "'$mnt'") \
1441 { if (NF == 1) { getline } else { num++ } ; print $num;} }' \
1443 [ $qval -eq $T32_BLIMIT ] || {
1444 echo "blimit, act:$qval, exp:$T32_BLIMIT"
1448 qval=$($LFS quota -v -u $T32_QID $mnt |
1449 awk 'BEGIN { num='7' } { if ($1 == "'$mnt'") \
1450 { if (NF == 1) { getline } else { num++ } ; print $num;} }' \
1452 [ $qval -eq $T32_ILIMIT ] || {
1453 echo "ilimit, act:$qval, exp:$T32_ILIMIT"
1457 do_node $node $LCTL conf_param $fsname.quota.mdt=ug
1458 cmd="$LCTL get_param -n osd-$fstype.$fsname-MDT0000"
1459 cmd=$cmd.quota_slave.enabled
1460 wait_update $node "$cmd" "ug" || {
1461 echo "Enable mdt quota failed"
1465 do_node $node $LCTL conf_param $fsname.quota.ost=ug
1466 cmd="$LCTL get_param -n osd-$fstype.$fsname-OST0000"
1467 cmd=$cmd.quota_slave.enabled
1468 wait_update $node "$cmd" "ug" || {
1469 echo "Enable ost quota failed"
1474 runas -u $T32_QID -g $T32_QID dd if=/dev/zero of=$mnt/t32_qf_new \
1475 bs=1M count=$(($T32_BLIMIT / 1024)) oflag=sync && {
1476 echo "Write succeed, but expect -EDQUOT"
1479 rm -f $mnt/t32_qf_new
1481 runas -u $T32_QID -g $T32_QID createmany -m $mnt/t32_qf_ \
1483 echo "Create succeed, but expect -EDQUOT"
1486 unlinkmany $mnt/t32_qf_ $T32_ILIMIT
1494 local dne_upgrade=${dne_upgrade:-"no"}
1495 local ff_convert=${ff_convert:-"no"}
1496 local shall_cleanup_mdt=false
1497 local shall_cleanup_mdt1=false
1498 local shall_cleanup_ost=false
1499 local shall_cleanup_lustre=false
1500 local node=$(facet_active_host $SINGLEMDS)
1501 local r="do_node $node"
1502 local node2=$(facet_active_host mds2)
1510 local nid=$($r $LCTL list_nids | head -1)
1516 local fstype=$(facet_fstype $SINGLEMDS)
1518 trap 'trap - RETURN; t32_test_cleanup' RETURN
1520 mkdir -p $tmp/mnt/lustre
1521 $r mkdir -p $tmp/mnt/{mdt,ost}
1522 $r tar xjvf $tarball -S -C $tmp || {
1523 error_noexit "Unpacking the disk image tarball"
1526 img_commit=$($r cat $tmp/commit)
1527 img_kernel=$($r cat $tmp/kernel)
1528 img_arch=$($r cat $tmp/arch)
1529 img_bspace=$($r cat $tmp/bspace)
1530 img_ispace=$($r cat $tmp/ispace)
1531 echo "Upgrading from $(basename $tarball), created with:"
1532 echo " Commit: $img_commit"
1533 echo " Kernel: $img_kernel"
1534 echo " Arch: $img_arch"
1536 local version=$(version_code $img_commit)
1537 [[ $version -gt $(version_code 2.4.0) ]] && ff_convert="no"
1539 $r $LCTL set_param debug="$PTLDEBUG"
1541 $r $TUNEFS --dryrun $tmp/mdt || {
1543 error_noexit "tunefs.lustre before mounting the MDT"
1546 if [ "$writeconf" ]; then
1547 mopts=loop,writeconf
1548 if [ $fstype == "ldiskfs" ]; then
1549 $r $TUNEFS --quota $tmp/mdt || {
1551 error_noexit "Enable mdt quota feature"
1556 if [ -n "$($LCTL list_nids | grep -v '\(tcp\|lo\)[[:digit:]]*$')" ]; then
1557 [[ $(lustre_version_code mgs) -ge $(version_code 2.3.59) ]] ||
1558 { skip "LU-2200: Cannot run over Inifiniband w/o lctl replace_nids "
1559 "(Need MGS version at least 2.3.59)"; return 0; }
1561 local osthost=$(facet_active_host ost1)
1562 local ostnid=$(do_node $osthost $LCTL list_nids | head -1)
1564 $r mount -t lustre -o loop,nosvc $tmp/mdt $tmp/mnt/mdt
1565 $r lctl replace_nids $fsname-OST0000 $ostnid
1566 $r lctl replace_nids $fsname-MDT0000 $nid
1567 $r umount -d $tmp/mnt/mdt
1570 mopts=loop,exclude=$fsname-OST0000
1573 t32_wait_til_devices_gone $node
1575 $r mount -t lustre -o $mopts $tmp/mdt $tmp/mnt/mdt || {
1577 error_noexit "Mounting the MDT"
1580 shall_cleanup_mdt=true
1582 if [ "$dne_upgrade" != "no" ]; then
1583 local fs2mdsdev=$(mdsdevname 1_2)
1584 local fs2mdsvdev=$(mdsvdevname 1_2)
1586 echo "mkfs new MDT on ${fs2mdsdev}...."
1587 if [ $(facet_fstype mds1) == ldiskfs ]; then
1588 mkfsoptions="--mkfsoptions=\\\"-J size=8\\\""
1591 add fs2mds $(mkfs_opts mds2 $fs2mdsdev $fsname) --reformat \
1592 $mkfsoptions $fs2mdsdev $fs2mdsvdev > /dev/null || {
1593 error_noexit "Mkfs new MDT failed"
1597 $r $TUNEFS --dryrun $fs2mdsdev || {
1598 error_noexit "tunefs.lustre before mounting the MDT"
1602 echo "mount new MDT....$fs2mdsdev"
1603 $r mkdir -p $tmp/mnt/mdt1
1604 $r mount -t lustre -o $mopts $fs2mdsdev $tmp/mnt/mdt1 || {
1605 error_noexit "mount mdt1 failed"
1608 shall_cleanup_mdt1=true
1611 uuid=$($r $LCTL get_param -n mdt.$fsname-MDT0000.uuid) || {
1612 error_noexit "Getting MDT UUID"
1615 if [ "$uuid" != $fsname-MDT0000_UUID ]; then
1616 error_noexit "Unexpected MDT UUID: \"$uuid\""
1620 $r $TUNEFS --dryrun $tmp/ost || {
1621 error_noexit "tunefs.lustre before mounting the OST"
1624 if [ "$writeconf" ]; then
1625 mopts=loop,mgsnode=$nid,$writeconf
1626 if [ $fstype == "ldiskfs" ]; then
1627 $r $TUNEFS --quota $tmp/ost || {
1629 error_noexit "Enable ost quota feature"
1634 mopts=loop,mgsnode=$nid
1636 $r mount -t lustre -o $mopts $tmp/ost $tmp/mnt/ost || {
1637 error_noexit "Mounting the OST"
1640 shall_cleanup_ost=true
1642 uuid=$($r $LCTL get_param -n obdfilter.$fsname-OST0000.uuid) || {
1643 error_noexit "Getting OST UUID"
1646 if [ "$uuid" != $fsname-OST0000_UUID ]; then
1647 error_noexit "Unexpected OST UUID: \"$uuid\""
1651 $r $LCTL conf_param $fsname-OST0000.osc.max_dirty_mb=15 || {
1652 error_noexit "Setting \"max_dirty_mb\""
1655 $r $LCTL conf_param $fsname-OST0000.failover.node=$nid || {
1656 error_noexit "Setting OST \"failover.node\""
1659 $r $LCTL conf_param $fsname-MDT0000.mdc.max_rpcs_in_flight=9 || {
1660 error_noexit "Setting \"max_rpcs_in_flight\""
1663 $r $LCTL conf_param $fsname-MDT0000.failover.node=$nid || {
1664 error_noexit "Setting MDT \"failover.node\""
1667 $r $LCTL pool_new $fsname.interop || {
1668 error_noexit "Setting \"interop\""
1671 $r $LCTL conf_param $fsname-MDT0000.lov.stripesize=4M || {
1672 error_noexit "Setting \"lov.stripesize\""
1676 if [ "$ff_convert" != "no" -a $(facet_fstype ost1) == "ldiskfs" ]; then
1677 $r $LCTL lfsck_start -M $fsname-OST0000 || {
1678 error_noexit "Start OI scrub on OST0"
1682 # The oi_scrub should be on ost1, but for test_32(),
1683 # all on the SINGLEMDS.
1684 wait_update_facet $SINGLEMDS "$LCTL get_param -n \
1685 osd-ldiskfs.$fsname-OST0000.oi_scrub |
1686 awk '/^status/ { print \\\$2 }'" "completed" 30 || {
1687 error_noexit "Failed to get the expected 'completed'"
1691 local UPDATED=$($r $LCTL get_param -n \
1692 osd-ldiskfs.$fsname-OST0000.oi_scrub |
1693 awk '/^updated/ { print $2 }')
1694 [ $UPDATED -ge 1 ] || {
1695 error_noexit "Only $UPDATED objects have been converted"
1700 if [ "$dne_upgrade" != "no" ]; then
1701 $r $LCTL conf_param \
1702 $fsname-MDT0001.mdc.max_rpcs_in_flight=9 || {
1703 error_noexit "Setting MDT1 \"max_rpcs_in_flight\""
1706 $r $LCTL conf_param $fsname-MDT0001.failover.node=$nid || {
1707 error_noexit "Setting MDT1 \"failover.node\""
1710 $r $LCTL conf_param $fsname-MDT0001.lov.stripesize=4M || {
1711 error_noexit "Setting MDT1 \"lov.stripesize\""
1717 if [ "$writeconf" ]; then
1718 mount -t lustre $nid:/$fsname $tmp/mnt/lustre || {
1719 error_noexit "Mounting the client"
1722 shall_cleanup_lustre=true
1723 $LCTL set_param debug="$PTLDEBUG"
1725 t32_verify_quota $node $fsname $tmp/mnt/lustre || {
1726 error_noexit "verify quota failed"
1730 if [ "$dne_upgrade" != "no" ]; then
1731 $LFS mkdir -i 1 $tmp/mnt/lustre/remote_dir || {
1732 error_noexit "set remote dir failed"
1736 pushd $tmp/mnt/lustre
1737 tar -cf - . --exclude=./remote_dir |
1738 tar -xvf - -C remote_dir 1>/dev/null || {
1739 error_noexit "cp to remote dir failed"
1745 dd if=/dev/zero of=$tmp/mnt/lustre/tmp_file bs=10k count=10 || {
1746 error_noexit "dd failed"
1749 rm -rf $tmp/mnt/lustre/tmp_file || {
1750 error_noexit "rm failed"
1754 if $r test -f $tmp/sha1sums; then
1755 # LU-2393 - do both sorts on same node to ensure locale
1757 $r cat $tmp/sha1sums | sort -k 2 >$tmp/sha1sums.orig
1758 if [ "$dne_upgrade" != "no" ]; then
1759 pushd $tmp/mnt/lustre/remote_dir
1761 pushd $tmp/mnt/lustre
1764 find ! -name .lustre -type f -exec sha1sum {} \; |
1765 sort -k 2 >$tmp/sha1sums || {
1766 error_noexit "sha1sum"
1770 if ! diff -ub $tmp/sha1sums.orig $tmp/sha1sums; then
1771 error_noexit "sha1sum verification failed"
1775 echo "sha1sum verification skipped"
1778 if [ "$dne_upgrade" != "no" ]; then
1779 rm -rf $tmp/mnt/lustre/remote_dir || {
1780 error_noexit "remove remote dir failed"
1785 if $r test -f $tmp/list; then
1787 # There is not a Test Framework API to copy files to or
1788 # from a remote node.
1790 # LU-2393 - do both sorts on same node to ensure locale
1792 $r cat $tmp/list | sort -k 6 >$tmp/list.orig
1793 pushd $tmp/mnt/lustre
1794 ls -Rni --time-style=+%s | sort -k 6 >$tmp/list || {
1800 # 32-bit and 64-bit clients use different algorithms to
1801 # convert FIDs into inode numbers. Hence, remove the inode
1802 # numbers from the lists, if the original list was created
1803 # on an architecture with different number of bits per
1806 if [ $(t32_bits_per_long $(uname -m)) != \
1807 $(t32_bits_per_long $img_arch) ]; then
1808 echo "Different number of bits per \"long\" from the disk image"
1809 for list in list.orig list; do
1810 sed -i -e 's/^[0-9]\+[ \t]\+//' $tmp/$list
1813 if ! diff -ub $tmp/list.orig $tmp/list; then
1814 error_noexit "list verification failed"
1818 echo "list verification skipped"
1822 # When adding new data verification tests, please check for
1823 # the presence of the required reference files first, like
1824 # the "sha1sums" and "list" tests above, to avoid the need to
1825 # regenerate every image for each test addition.
1828 nrpcs_orig=$($LCTL get_param \
1829 -n mdc.*MDT0000*.max_rpcs_in_flight) || {
1830 error_noexit "Getting \"max_rpcs_in_flight\""
1833 nrpcs=$((nrpcs_orig + 5))
1834 $r $LCTL conf_param $fsname-MDT0000.mdc.max_rpcs_in_flight=$nrpcs || {
1835 error_noexit "Changing \"max_rpcs_in_flight\""
1838 wait_update $HOSTNAME "$LCTL get_param \
1839 -n mdc.*MDT0000*.max_rpcs_in_flight" $nrpcs || {
1840 error_noexit "Verifying \"max_rpcs_in_flight\""
1844 umount $tmp/mnt/lustre || {
1845 error_noexit "Unmounting the client"
1848 shall_cleanup_lustre=false
1850 if [ "$dne_upgrade" != "no" ]; then
1851 $r umount -d $tmp/mnt/mdt1 || {
1852 error_noexit "Unmounting the MDT2"
1855 shall_cleanup_mdt1=false
1858 $r umount -d $tmp/mnt/mdt || {
1859 error_noexit "Unmounting the MDT"
1862 shall_cleanup_mdt=false
1864 $r umount -d $tmp/mnt/ost || {
1865 error_noexit "Unmounting the OST"
1868 shall_cleanup_ost=false
1870 t32_reload_modules $node || {
1871 error_noexit "Reloading modules"
1875 # mount a second time to make sure we didnt leave upgrade flag on
1876 $r $TUNEFS --dryrun $tmp/mdt || {
1878 error_noexit "tunefs.lustre before remounting the MDT"
1881 $r mount -t lustre -o loop,exclude=$fsname-OST0000 $tmp/mdt \
1883 error_noexit "Remounting the MDT"
1886 shall_cleanup_mdt=true
1896 for tarball in $tarballs; do
1897 t32_test $tarball || let "rc += $?"
1901 run_test 32a "Upgrade (not live)"
1909 for tarball in $tarballs; do
1910 t32_test $tarball writeconf || let "rc += $?"
1914 run_test 32b "Upgrade with writeconf"
1921 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
1923 for tarball in $tarballs; do
1924 dne_upgrade=yes t32_test $tarball writeconf || rc=$?
1928 run_test 32c "dne upgrade test"
1936 for tarball in $tarballs; do
1937 ff_convert=yes t32_test $tarball || rc=$?
1941 run_test 32d "convert ff test"
1943 test_33a() { # bug 12333, was test_33
1945 local FSNAME2=test-123
1946 local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/})
1949 [ -n "$ost1_HOST" ] && fs2ost_HOST=$ost1_HOST
1951 if [ -z "$fs2ost_DEV" -o -z "$fs2mds_DEV" ]; then
1952 local dev=${SINGLEMDS}_dev
1953 local MDSDEV=${!dev}
1954 is_blkdev $SINGLEMDS $MDSDEV && \
1955 skip_env "mixed loopback and real device not working" && return
1958 local fs2mdsdev=$(mdsdevname 1_2)
1959 local fs2ostdev=$(ostdevname 1_2)
1960 local fs2mdsvdev=$(mdsvdevname 1_2)
1961 local fs2ostvdev=$(ostvdevname 1_2)
1963 if [ $(facet_fstype mds1) == ldiskfs ]; then
1964 mkfsoptions="--mkfsoptions=\\\"-J size=8\\\"" # See bug 17931.
1967 add fs2mds $(mkfs_opts mds1 ${fs2mdsdev}) --mgs --fsname=${FSNAME2} \
1968 --reformat $mkfsoptions $fs2mdsdev $fs2mdsvdev || exit 10
1969 add fs2ost $(mkfs_opts ost1 ${fs2ostdev}) --mgsnode=$MGSNID \
1970 --fsname=${FSNAME2} --index=8191 --reformat $fs2ostdev \
1971 $fs2ostvdev || exit 10
1973 start fs2mds $fs2mdsdev $MDS_MOUNT_OPTS && trap cleanup_fs2 EXIT INT
1974 start fs2ost $fs2ostdev $OST_MOUNT_OPTS
1975 do_facet $SINGLEMDS "$LCTL conf_param $FSNAME2.sys.timeout=200" || rc=1
1977 mount -t lustre $MGSNID:/${FSNAME2} $MOUNT2 || rc=2
1980 cp /etc/hosts $MOUNT2/ || rc=3
1981 $LFS getstripe $MOUNT2/hosts
1986 cleanup_nocli || rc=6
1989 run_test 33a "Mount ost with a large index number"
1991 test_33b() { # was test_34
1994 do_facet client dd if=/dev/zero of=$MOUNT/24 bs=1024k count=1
1995 # Drop lock cancelation reply during umount
1996 #define OBD_FAIL_LDLM_CANCEL_NET 0x304
1997 do_facet client lctl set_param fail_loc=0x80000304
1998 #lctl set_param debug=-1
1999 umount_client $MOUNT
2002 run_test 33b "Drop cancel during umount"
2006 do_facet client "sh runmultiop_bg_pause $DIR/file O_c"
2007 manual_umount_client
2009 do_facet client killall -USR1 multiop
2010 if [ $rc -eq 0 ]; then
2011 error "umount not fail!"
2016 run_test 34a "umount with opened file should be fail"
2021 touch $DIR/$tfile || return 1
2022 stop_mds --force || return 2
2024 manual_umount_client --force
2026 if [ $rc -ne 0 ]; then
2027 error "mtab after failed umount - rc $rc"
2033 run_test 34b "force umount with failed mds should be normal"
2037 touch $DIR/$tfile || return 1
2038 stop_ost --force || return 2
2040 manual_umount_client --force
2042 if [ $rc -ne 0 ]; then
2043 error "mtab after failed umount - rc $rc"
2049 run_test 34c "force umount with failed ost should be normal"
2051 test_35a() { # bug 12459
2054 DBG_SAVE="`lctl get_param -n debug`"
2055 lctl set_param debug="ha"
2057 log "Set up a fake failnode for the MDS"
2059 local device=$(do_facet $SINGLEMDS "lctl get_param -n devices" |
2060 awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }' | head -1)
2061 do_facet mgs "$LCTL conf_param \
2062 ${device}.failover.node=$(h2$NETTYPE $FAKENID)" || return 4
2064 log "Wait for RECONNECT_INTERVAL seconds (10s)"
2067 MSG="conf-sanity.sh test_35a `date +%F%kh%Mm%Ss`"
2070 log "Stopping the MDT: $device"
2071 stop_mdt 1 || return 5
2073 df $MOUNT > /dev/null 2>&1 &
2075 log "Restarting the MDT: $device"
2076 start_mdt 1 || return 6
2077 log "Wait for df ($DFPID) ... "
2080 lctl set_param debug="$DBG_SAVE"
2082 # retrieve from the log the first server that the client tried to
2083 # contact after the connection loss
2084 $LCTL dk $TMP/lustre-log-$TESTNAME.log
2085 NEXTCONN=`awk "/${MSG}/ {start = 1;}
2086 /import_select_connection.*$device-mdc.* using connection/ {
2088 if (\\\$NF ~ /$FAKENID/)
2094 }" $TMP/lustre-log-$TESTNAME.log`
2095 [ "$NEXTCONN" != "0" ] && log "The client didn't try to reconnect to the last active server (tried ${NEXTCONN} instead)" && return 7
2097 # remove nid settings
2098 writeconf_or_reformat
2100 run_test 35a "Reconnect to the last active server first"
2102 test_35b() { # bug 18674
2103 remote_mds || { skip "local MDS" && return 0; }
2107 $LCTL set_param debug="ha"
2109 MSG="conf-sanity.sh test_35b `date +%F%kh%Mm%Ss`"
2112 log "Set up a fake failnode for the MDS"
2114 local device=$(do_facet $SINGLEMDS "$LCTL get_param -n devices" |
2115 awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }' | head -1)
2116 do_facet mgs "$LCTL conf_param \
2117 ${device}.failover.node=$(h2$NETTYPE $FAKENID)" || return 1
2119 local at_max_saved=0
2120 # adaptive timeouts may prevent seeing the issue
2121 if at_is_enabled; then
2122 at_max_saved=$(at_max_get mds)
2123 at_max_set 0 mds client
2126 mkdir -p $MOUNT/$tdir
2128 log "Injecting EBUSY on MDS"
2129 # Setting OBD_FAIL_MDS_RESEND=0x136
2130 do_facet $SINGLEMDS "$LCTL set_param fail_loc=0x80000136" || return 2
2132 $LCTL set_param mdc.${FSNAME}*.stats=clear
2134 log "Creating a test file and stat it"
2135 touch $MOUNT/$tdir/$tfile
2136 stat $MOUNT/$tdir/$tfile
2138 log "Stop injecting EBUSY on MDS"
2139 do_facet $SINGLEMDS "$LCTL set_param fail_loc=0" || return 3
2140 rm -f $MOUNT/$tdir/$tfile
2143 # restore adaptive timeout
2144 [ $at_max_saved -ne 0 ] && at_max_set $at_max_saved mds client
2146 $LCTL dk $TMP/lustre-log-$TESTNAME.log
2148 CONNCNT=`$LCTL get_param mdc.${FSNAME}*.stats | awk '/mds_connect/{print $2}'`
2150 # retrieve from the log if the client has ever tried to
2151 # contact the fake server after the loss of connection
2152 FAILCONN=`awk "BEGIN {ret = 0;}
2153 /import_select_connection.*${FSNAME}-MDT0000-mdc.* using connection/ {
2155 if (\\\$NF ~ /$FAKENID/) {
2160 END {print ret}" $TMP/lustre-log-$TESTNAME.log`
2162 [ "$FAILCONN" == "0" ] && \
2163 log "ERROR: The client reconnection has not been triggered" && \
2165 [ "$FAILCONN" == "2" ] && \
2166 log "ERROR: The client tried to reconnect to the failover server while the primary was busy" && \
2170 # When OBD_FAIL_MDS_RESEND is hit, we sleep for 2 * obd_timeout
2171 # Reconnects are supposed to be rate limited to one every 5s
2172 [ $CONNCNT -gt $((2 * $TIMEOUT / 5 + 1)) ] && \
2173 log "ERROR: Too many reconnects $CONNCNT" && \
2177 # remove nid settings
2178 writeconf_or_reformat
2180 run_test 35b "Continue reconnection retries, if the active server is busy"
2183 [ $OSTCOUNT -lt 2 ] && skip_env "skipping test for single OST" && return
2185 [ "$ost_HOST" = "`hostname`" -o "$ost1_HOST" = "`hostname`" ] || \
2186 { skip "remote OST" && return 0; }
2189 local FSNAME2=test1234
2190 local fs3ost_HOST=$ost_HOST
2191 local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/})
2193 [ -n "$ost1_HOST" ] && fs2ost_HOST=$ost1_HOST && fs3ost_HOST=$ost1_HOST
2195 if [ -z "$fs2ost_DEV" -o -z "$fs2mds_DEV" -o -z "$fs3ost_DEV" ]; then
2196 is_blkdev $SINGLEMDS $MDSDEV && \
2197 skip_env "mixed loopback and real device not working" && return
2200 local fs2mdsdev=$(mdsdevname 1_2)
2201 local fs2ostdev=$(ostdevname 1_2)
2202 local fs3ostdev=$(ostdevname 2_2)
2203 local fs2mdsvdev=$(mdsvdevname 1_2)
2204 local fs2ostvdev=$(ostvdevname 1_2)
2205 local fs3ostvdev=$(ostvdevname 2_2)
2207 add fs2mds $(mkfs_opts mds1 ${fs2mdsdev}) --mgs --fsname=${FSNAME2} \
2208 --reformat $fs2mdsdev $fs2mdsvdev || exit 10
2209 # XXX after we support non 4K disk blocksize in ldiskfs, specify a
2210 # different one than the default value here.
2211 add fs2ost $(mkfs_opts ost1 ${fs2ostdev}) --mgsnode=$MGSNID \
2212 --fsname=${FSNAME2} --reformat $fs2ostdev $fs2ostvdev || exit 10
2213 add fs3ost $(mkfs_opts ost1 ${fs3ostdev}) --mgsnode=$MGSNID \
2214 --fsname=${FSNAME2} --reformat $fs3ostdev $fs3ostvdev || exit 10
2216 start fs2mds $fs2mdsdev $MDS_MOUNT_OPTS
2217 start fs2ost $fs2ostdev $OST_MOUNT_OPTS
2218 start fs3ost $fs3ostdev $OST_MOUNT_OPTS
2220 mount -t lustre $MGSNID:/${FSNAME2} $MOUNT2 || return 1
2222 sleep 5 # until 11778 fixed
2224 dd if=/dev/zero of=$MOUNT2/$tfile bs=1M count=7 || return 2
2226 BKTOTAL=`lctl get_param -n obdfilter.*.kbytestotal | awk 'BEGIN{total=0}; {total+=$1}; END{print total}'`
2227 BKFREE=`lctl get_param -n obdfilter.*.kbytesfree | awk 'BEGIN{free=0}; {free+=$1}; END{print free}'`
2228 BKAVAIL=`lctl get_param -n obdfilter.*.kbytesavail | awk 'BEGIN{avail=0}; {avail+=$1}; END{print avail}'`
2229 STRING=`df -P $MOUNT2 | tail -n 1 | awk '{print $2","$3","$4}'`
2230 DFTOTAL=`echo $STRING | cut -d, -f1`
2231 DFUSED=`echo $STRING | cut -d, -f2`
2232 DFAVAIL=`echo $STRING | cut -d, -f3`
2233 DFFREE=$(($DFTOTAL - $DFUSED))
2235 ALLOWANCE=$((64 * $OSTCOUNT))
2237 if [ $DFTOTAL -lt $(($BKTOTAL - $ALLOWANCE)) ] ||
2238 [ $DFTOTAL -gt $(($BKTOTAL + $ALLOWANCE)) ] ; then
2239 echo "**** FAIL: df total($DFTOTAL) mismatch OST total($BKTOTAL)"
2242 if [ $DFFREE -lt $(($BKFREE - $ALLOWANCE)) ] ||
2243 [ $DFFREE -gt $(($BKFREE + $ALLOWANCE)) ] ; then
2244 echo "**** FAIL: df free($DFFREE) mismatch OST free($BKFREE)"
2247 if [ $DFAVAIL -lt $(($BKAVAIL - $ALLOWANCE)) ] ||
2248 [ $DFAVAIL -gt $(($BKAVAIL + $ALLOWANCE)) ] ; then
2249 echo "**** FAIL: df avail($DFAVAIL) mismatch OST avail($BKAVAIL)"
2254 stop fs3ost -f || return 200
2255 stop fs2ost -f || return 201
2256 stop fs2mds -f || return 202
2257 unload_modules_conf || return 203
2260 run_test 36 "df report consistency on OSTs with different block size"
2263 local mntpt=$(facet_mntpt $SINGLEMDS)
2264 local mdsdev=$(mdsdevname ${SINGLEMDS//mds/})
2265 local mdsdev_sym="$TMP/sym_mdt.img"
2266 local opts=$MDS_MOUNT_OPTS
2269 if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
2270 skip "Currently only applicable to ldiskfs-based MDTs"
2274 echo "MDS : $mdsdev"
2275 echo "SYMLINK : $mdsdev_sym"
2276 do_facet $SINGLEMDS rm -f $mdsdev_sym
2278 do_facet $SINGLEMDS ln -s $mdsdev $mdsdev_sym
2280 echo "mount symlink device - $mdsdev_sym"
2282 if ! do_facet $SINGLEMDS test -b $mdsdev; then
2283 opts=$(csa_add "$opts" -o loop)
2285 mount_op=$(do_facet $SINGLEMDS mount -v -t lustre $opts \
2286 $mdsdev_sym $mntpt 2>&1)
2289 echo mount_op=$mount_op
2291 do_facet $SINGLEMDS "umount -d $mntpt && rm -f $mdsdev_sym"
2293 if $(echo $mount_op | grep -q "unable to set tunable"); then
2294 error "set tunables failed for symlink device"
2297 [ $rc -eq 0 ] || error "mount symlink $mdsdev_sym failed! rc=$rc"
2301 run_test 37 "verify set tunables works for symlink device"
2303 test_38() { # bug 14222
2304 if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
2305 skip "Only applicable to ldiskfs-based MDTs"
2313 FILES=`find $SRC -type f -mtime +1 | head -n $COUNT`
2314 log "copying $(echo $FILES | wc -w) files to $DIR/$tdir"
2316 tar cf - $FILES | tar xf - -C $DIR/$tdir || \
2317 error "copying $SRC to $DIR/$tdir"
2319 umount_client $MOUNT
2321 log "rename lov_objid file on MDS"
2322 rm -f $TMP/lov_objid.orig
2324 local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/})
2325 do_facet $SINGLEMDS "$DEBUGFS -c -R \\\"dump lov_objid $TMP/lov_objid.orig\\\" $MDSDEV"
2326 do_facet $SINGLEMDS "$DEBUGFS -w -R \\\"rm lov_objid\\\" $MDSDEV"
2328 do_facet $SINGLEMDS "od -Ax -td8 $TMP/lov_objid.orig"
2329 # check create in mds_lov_connect
2333 [ $V ] && log "verifying $DIR/$tdir/$f"
2334 diff -q $f $DIR/$tdir/$f || ERROR=y
2336 do_facet $SINGLEMDS "$DEBUGFS -c -R \\\"dump lov_objid $TMP/lov_objid.new\\\" $MDSDEV"
2337 do_facet $SINGLEMDS "od -Ax -td8 $TMP/lov_objid.new"
2338 [ "$ERROR" = "y" ] && error "old and new files are different after connect" || true
2340 # check it's updates in sync
2341 umount_client $MOUNT
2344 do_facet $SINGLEMDS dd if=/dev/zero of=$TMP/lov_objid.clear bs=4096 count=1
2345 do_facet $SINGLEMDS "$DEBUGFS -w -R \\\"rm lov_objid\\\" $MDSDEV"
2346 do_facet $SINGLEMDS "$DEBUGFS -w -R \\\"write $TMP/lov_objid.clear lov_objid\\\" $MDSDEV "
2351 [ $V ] && log "verifying $DIR/$tdir/$f"
2352 diff -q $f $DIR/$tdir/$f || ERROR=y
2354 do_facet $SINGLEMDS "$DEBUGFS -c -R \\\"dump lov_objid $TMP/lov_objid.new1\\\" $MDSDEV"
2355 do_facet $SINGLEMDS "od -Ax -td8 $TMP/lov_objid.new1"
2356 umount_client $MOUNT
2358 [ "$ERROR" = "y" ] && error "old and new files are different after sync" || true
2360 log "files compared the same"
2363 run_test 38 "MDS recreates missing lov_objid file from OST data"
2369 perl $SRCDIR/leak_finder.pl $TMP/debug 2>&1 | egrep '*** Leak:' &&
2370 error "memory leak detected" || true
2372 run_test 39 "leak_finder recognizes both LUSTRE and LNET malloc messages"
2374 test_40() { # bug 15759
2376 #define OBD_FAIL_TGT_TOOMANY_THREADS 0x706
2377 do_facet $SINGLEMDS "$LCTL set_param fail_loc=0x80000706"
2381 run_test 40 "race during service thread startup"
2383 test_41a() { #bug 14134
2384 if [ $(facet_fstype $SINGLEMDS) == ldiskfs ] &&
2385 ! do_facet $SINGLEMDS test -b $(mdsdevname 1); then
2386 skip "Loop devices does not work with nosvc option"
2391 local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/})
2393 start $SINGLEMDS $MDSDEV $MDS_MOUNT_OPTS -o nosvc -n
2394 start ost1 `ostdevname 1` $OST_MOUNT_OPTS
2395 start $SINGLEMDS $MDSDEV $MDS_MOUNT_OPTS -o nomgs,force
2397 mount_client $MOUNT || return 1
2400 echo "blah blah" > $MOUNT/$tfile
2403 umount_client $MOUNT
2404 stop ost1 -f || return 201
2405 stop_mds -f || return 202
2406 stop_mds -f || return 203
2407 unload_modules_conf || return 204
2410 run_test 41a "mount mds with --nosvc and --nomgs"
2413 if [ $(facet_fstype $SINGLEMDS) == ldiskfs ] &&
2414 ! do_facet $SINGLEMDS test -b $(mdsdevname 1); then
2415 skip "Loop devices does not work with nosvc option"
2419 ! combined_mgs_mds && skip "needs combined mgs device" && return 0
2423 local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/})
2425 start $SINGLEMDS $MDSDEV $MDS_MOUNT_OPTS -o nosvc -n
2427 start $SINGLEMDS $MDSDEV $MDS_MOUNT_OPTS -o nomgs,force
2429 mount_client $MOUNT || return 1
2432 echo "blah blah" > $MOUNT/$tfile
2433 cat $MOUNT/$tfile || return 200
2435 umount_client $MOUNT
2436 stop_ost || return 201
2437 stop_mds -f || return 202
2438 stop_mds -f || return 203
2441 run_test 41b "mount mds with --nosvc and --nomgs on first mount"
2443 test_42() { #bug 14693
2445 check_mount || error "client was not mounted"
2447 do_facet mgs $LCTL conf_param $FSNAME.llite.some_wrong_param=10
2448 umount_client $MOUNT ||
2449 error "unmounting client failed with invalid llite param"
2450 mount_client $MOUNT ||
2451 error "mounting client failed with invalid llite param"
2453 do_facet mgs $LCTL conf_param $FSNAME.sys.some_wrong_param=20
2454 cleanup || error "stopping $FSNAME failed with invalid sys param"
2457 check_mount || "client was not mounted with invalid sys param"
2458 cleanup || error "stopping $FSNAME failed with invalid sys param"
2461 run_test 42 "allow client/server mount/unmount with invalid config param"
2464 [ $UID -ne 0 -o $RUNAS_ID -eq 0 ] && skip_env "run as root"
2466 chmod ugo+x $DIR || error "chmod 0 failed"
2467 set_conf_param_and_check mds \
2468 "lctl get_param -n mdt.$FSNAME-MDT0000.root_squash" \
2469 "$FSNAME.mdt.root_squash" \
2471 set_conf_param_and_check mds \
2472 "lctl get_param -n mdt.$FSNAME-MDT0000.nosquash_nids" \
2473 "$FSNAME.mdt.nosquash_nids" \
2477 # create set of test files
2479 echo "111" > $DIR/$tfile-userfile || error "write 1 failed"
2480 chmod go-rw $DIR/$tfile-userfile || error "chmod 1 failed"
2481 chown $RUNAS_ID.$RUNAS_ID $DIR/$tfile-userfile || error "chown failed"
2483 echo "222" > $DIR/$tfile-rootfile || error "write 2 failed"
2484 chmod go-rw $DIR/$tfile-rootfile || error "chmod 2 faield"
2486 mkdir $DIR/$tdir-rootdir -p || error "mkdir failed"
2487 chmod go-rwx $DIR/$tdir-rootdir || error "chmod 3 failed"
2488 touch $DIR/$tdir-rootdir/tfile-1 || error "touch failed"
2491 # check root_squash:
2492 # set root squash UID:GID to RUNAS_ID
2493 # root should be able to access only files owned by RUNAS_ID
2495 set_conf_param_and_check mds \
2496 "lctl get_param -n mdt.$FSNAME-MDT0000.root_squash" \
2497 "$FSNAME.mdt.root_squash" \
2498 "$RUNAS_ID:$RUNAS_ID"
2500 ST=$(stat -c "%n: owner uid %u (%A)" $DIR/$tfile-userfile)
2501 dd if=$DIR/$tfile-userfile 1>/dev/null 2>/dev/null || \
2502 error "$ST: root read permission is denied"
2503 echo "$ST: root read permission is granted - ok"
2506 dd conv=notrunc if=$DIR/$tfile-userfile 1>/dev/null 2>/dev/null || \
2507 error "$ST: root write permission is denied"
2508 echo "$ST: root write permission is granted - ok"
2510 ST=$(stat -c "%n: owner uid %u (%A)" $DIR/$tfile-rootfile)
2511 dd if=$DIR/$tfile-rootfile 1>/dev/null 2>/dev/null && \
2512 error "$ST: root read permission is granted"
2513 echo "$ST: root read permission is denied - ok"
2516 dd conv=notrunc of=$DIR/$tfile-rootfile 1>/dev/null 2>/dev/null && \
2517 error "$ST: root write permission is granted"
2518 echo "$ST: root write permission is denied - ok"
2520 ST=$(stat -c "%n: owner uid %u (%A)" $DIR/$tdir-rootdir)
2521 rm $DIR/$tdir-rootdir/tfile-1 1>/dev/null 2>/dev/null && \
2522 error "$ST: root unlink permission is granted"
2523 echo "$ST: root unlink permission is denied - ok"
2525 touch $DIR/tdir-rootdir/tfile-2 1>/dev/null 2>/dev/null && \
2526 error "$ST: root create permission is granted"
2527 echo "$ST: root create permission is denied - ok"
2530 # check nosquash_nids:
2531 # put client's NID into nosquash_nids list,
2532 # root should be able to access root file after that
2534 local NIDLIST=$(lctl list_nids all | tr '\n' ' ')
2535 NIDLIST="2@elan $NIDLIST 192.168.0.[2,10]@tcp"
2536 NIDLIST=$(echo $NIDLIST | tr -s ' ' ' ')
2537 set_conf_param_and_check mds \
2538 "lctl get_param -n mdt.$FSNAME-MDT0000.nosquash_nids" \
2539 "$FSNAME-MDTall.mdt.nosquash_nids" \
2542 ST=$(stat -c "%n: owner uid %u (%A)" $DIR/$tfile-rootfile)
2543 dd if=$DIR/$tfile-rootfile 1>/dev/null 2>/dev/null || \
2544 error "$ST: root read permission is denied"
2545 echo "$ST: root read permission is granted - ok"
2548 dd conv=notrunc of=$DIR/$tfile-rootfile 1>/dev/null 2>/dev/null || \
2549 error "$ST: root write permission is denied"
2550 echo "$ST: root write permission is granted - ok"
2552 ST=$(stat -c "%n: owner uid %u (%A)" $DIR/$tdir-rootdir)
2553 rm $DIR/$tdir-rootdir/tfile-1 || \
2554 error "$ST: root unlink permission is denied"
2555 echo "$ST: root unlink permission is granted - ok"
2556 touch $DIR/$tdir-rootdir/tfile-2 || \
2557 error "$ST: root create permission is denied"
2558 echo "$ST: root create permission is granted - ok"
2562 run_test 43 "check root_squash and nosquash_nids"
2564 umount_client $MOUNT
2569 check_mount || return 2
2570 UUID=$($LCTL get_param llite.${FSNAME}*.uuid | cut -d= -f2)
2572 UUIDS=$(do_facet $SINGLEMDS "$LCTL get_param mdt.${FSNAME}*.exports.*.uuid")
2573 for VAL in $UUIDS; do
2574 NID=$(echo $VAL | cut -d= -f1)
2575 CLUUID=$(echo $VAL | cut -d= -f2)
2576 [ "$UUID" = "$CLUUID" ] && STATS_FOUND=yes && break
2578 [ "$STATS_FOUND" = "no" ] && error "stats not found for client"
2582 run_test 44 "mounted client proc entry exists"
2586 check_mount || return 2
2591 #define OBD_FAIL_PTLRPC_LONG_UNLINK 0x50f
2592 do_facet client "lctl set_param fail_loc=0x50f"
2595 manual_umount_client --force || return 3
2596 do_facet client "lctl set_param fail_loc=0x0"
2598 mount_client $MOUNT || return 4
2602 run_test 45 "long unlink handling in ptlrpcd"
2609 umount_client $MOUNT2 || rc=$?
2610 umount_client $MOUNT || rc=$?
2611 while [ $count -gt 0 ]; do
2612 stop ost${count} -f || rc=$?
2616 cleanup_nocli || rc=$?
2617 #writeconf to remove all ost2 traces for subsequent tests
2618 writeconf_or_reformat
2623 echo "Testing with $OSTCOUNT OSTs"
2625 start_mds || return 1
2626 #first client should see only one ost
2627 start_ost || return 2
2628 wait_osc_import_state mds ost FULL
2630 mount_client $MOUNT || return 3
2631 trap "cleanup_46a $OSTCOUNT" EXIT ERR
2634 for (( i=2; i<=$OSTCOUNT; i++ )); do
2635 start ost$i `ostdevname $i` $OST_MOUNT_OPTS || return $((i+2))
2638 # wait until osts in sync
2639 for (( i=2; i<=$OSTCOUNT; i++ )); do
2640 wait_osc_import_state mds ost$i FULL
2641 wait_osc_import_state client ost$i FULL
2644 #second client see all ost's
2646 mount_client $MOUNT2 || return 8
2647 $LFS setstripe -c -1 $MOUNT2 || return 9
2648 $LFS getstripe $MOUNT2 || return 10
2650 echo "ok" > $MOUNT2/widestripe
2651 $LFS getstripe $MOUNT2/widestripe || return 11
2652 # fill acl buffer for avoid expand lsm to them
2653 awk -F : '{if (FNR < 25) { print "u:"$1":rwx" }}' /etc/passwd | while read acl; do
2654 setfacl -m $acl $MOUNT2/widestripe
2658 stat $MOUNT/widestripe || return 12
2660 cleanup_46a $OSTCOUNT || { echo "cleanup_46a failed!" && return 13; }
2663 run_test 46a "handle ost additional - wide striped file"
2668 check_mount || return 2
2669 $LCTL set_param ldlm.namespaces.$FSNAME-*-*-*.lru_size=100
2673 for ns in $($LCTL get_param ldlm.namespaces.$FSNAME-*-*-*.lru_size); do
2674 if echo $ns | grep "MDT[[:digit:]]*"; then
2677 lrs=$(echo $ns | sed 's/.*lru_size=//')
2678 lru_size[count]=$lrs
2683 facet_failover $SINGLEMDS
2684 client_up || return 3
2687 for ns in $($LCTL get_param ldlm.namespaces.$FSNAME-*-*-*.lru_size); do
2688 if echo $ns | grep "MDT[[:digit:]]*"; then
2691 lrs=$(echo $ns | sed 's/.*lru_size=//')
2692 if ! test "$lrs" -eq "${lru_size[count]}"; then
2693 n=$(echo $ns | sed -e 's/ldlm.namespaces.//' -e 's/.lru_size=.*//')
2694 error "$n has lost lru_size: $lrs vs. ${lru_size[count]}"
2702 run_test 47 "server restart does not make client loss lru_resize settings"
2707 # reformat after this test is needed - if test will failed
2708 # we will have unkillable file at FS
2712 test_48() { # bug 17636
2715 check_mount || return 2
2717 $LFS setstripe -c -1 $MOUNT || return 9
2718 $LFS getstripe $MOUNT || return 10
2720 echo "ok" > $MOUNT/widestripe
2721 $LFS getstripe $MOUNT/widestripe || return 11
2723 trap cleanup_48 EXIT ERR
2725 # fill acl buffer for avoid expand lsm to them
2726 getent passwd | awk -F : '{ print "u:"$1":rwx" }' | while read acl; do
2727 setfacl -m $acl $MOUNT/widestripe
2730 stat $MOUNT/widestripe || return 12
2735 run_test 48 "too many acls on file"
2737 # check PARAM_SYS_LDLM_TIMEOUT option of MKFS.LUSTRE
2738 test_49a() { # bug 17710
2739 local timeout_orig=$TIMEOUT
2740 local ldlm_timeout_orig=$LDLM_TIMEOUT
2741 local LOCAL_TIMEOUT=20
2743 LDLM_TIMEOUT=$LOCAL_TIMEOUT
2744 TIMEOUT=$LOCAL_TIMEOUT
2748 check_mount || error "client mount failed"
2750 echo "check ldlm_timout..."
2751 local LDLM_MDS="$(do_facet $SINGLEMDS lctl get_param -n ldlm_timeout)"
2752 local LDLM_OST1="$(do_facet ost1 lctl get_param -n ldlm_timeout)"
2753 local LDLM_CLIENT="$(do_facet client lctl get_param -n ldlm_timeout)"
2755 if [ $LDLM_MDS -ne $LDLM_OST1 -o $LDLM_MDS -ne $LDLM_CLIENT ]; then
2756 error "Different LDLM_TIMEOUT:$LDLM_MDS $LDLM_OST1 $LDLM_CLIENT"
2759 if [ $LDLM_MDS -ne $((LOCAL_TIMEOUT / 3)) ]; then
2760 error "LDLM_TIMEOUT($LDLM_MDS) is not $((LOCAL_TIMEOUT / 3))"
2763 umount_client $MOUNT
2764 stop_ost || error "problem stopping OSS"
2765 stop_mds || error "problem stopping MDS"
2767 LDLM_TIMEOUT=$ldlm_timeout_orig
2768 TIMEOUT=$timeout_orig
2770 run_test 49a "check PARAM_SYS_LDLM_TIMEOUT option of mkfs.lustre"
2772 test_49b() { # bug 17710
2773 local timeout_orig=$TIMEOUT
2774 local ldlm_timeout_orig=$LDLM_TIMEOUT
2775 local LOCAL_TIMEOUT=20
2777 LDLM_TIMEOUT=$((LOCAL_TIMEOUT - 1))
2778 TIMEOUT=$LOCAL_TIMEOUT
2782 check_mount || error "client mount failed"
2784 local LDLM_MDS="$(do_facet $SINGLEMDS lctl get_param -n ldlm_timeout)"
2785 local LDLM_OST1="$(do_facet ost1 lctl get_param -n ldlm_timeout)"
2786 local LDLM_CLIENT="$(do_facet client lctl get_param -n ldlm_timeout)"
2788 if [ $LDLM_MDS -ne $LDLM_OST1 -o $LDLM_MDS -ne $LDLM_CLIENT ]; then
2789 error "Different LDLM_TIMEOUT:$LDLM_MDS $LDLM_OST1 $LDLM_CLIENT"
2792 if [ $LDLM_MDS -ne $((LOCAL_TIMEOUT - 1)) ]; then
2793 error "LDLM_TIMEOUT($LDLM_MDS) is not $((LOCAL_TIMEOUT - 1))"
2796 cleanup || error "cleanup failed"
2798 LDLM_TIMEOUT=$ldlm_timeout_orig
2799 TIMEOUT=$timeout_orig
2801 run_test 49b "check PARAM_SYS_LDLM_TIMEOUT option of mkfs.lustre"
2804 # Test both statfs and lfs df and fail if either one fails
2805 multiop_bg_pause $1 f_
2808 killall -USR1 multiop
2809 [ $RC1 -ne 0 ] && log "lazystatfs multiop failed"
2810 wait $PID || { RC1=$?; log "multiop return error "; }
2817 if [ $RC2 -eq 0 ]; then
2819 log "lazystatfs df failed"
2823 [[ $RC1 -ne 0 || $RC2 -eq 0 ]] && RC=1
2829 lctl set_param llite.$FSNAME-*.lazystatfs=1
2832 lazystatfs $MOUNT || error "lazystatfs failed but no down servers"
2834 cleanup || return $?
2836 run_test 50a "lazystatfs all servers available =========================="
2840 lctl set_param llite.$FSNAME-*.lazystatfs=1
2843 # Wait for client to detect down OST
2844 stop_ost || error "Unable to stop OST1"
2845 wait_osc_import_state mds ost DISCONN
2847 lazystatfs $MOUNT || error "lazystatfs should don't have returned EIO"
2849 umount_client $MOUNT || error "Unable to unmount client"
2850 stop_mds || error "Unable to stop MDS"
2852 run_test 50b "lazystatfs all servers down =========================="
2855 start_mds || error "Unable to start MDS"
2856 start_ost || error "Unable to start OST1"
2857 start_ost2 || error "Unable to start OST2"
2858 mount_client $MOUNT || error "Unable to mount client"
2859 lctl set_param llite.$FSNAME-*.lazystatfs=1
2862 # Wait for client to detect down OST
2863 stop_ost || error "Unable to stop OST1"
2864 wait_osc_import_state mds ost DISCONN
2865 lazystatfs $MOUNT || error "lazystatfs failed with one down server"
2867 umount_client $MOUNT || error "Unable to unmount client"
2868 stop_ost2 || error "Unable to stop OST2"
2869 stop_mds || error "Unable to stop MDS"
2870 #writeconf to remove all ost2 traces for subsequent tests
2871 writeconf_or_reformat
2873 run_test 50c "lazystatfs one server down =========================="
2876 start_mds || error "Unable to start MDS"
2877 start_ost || error "Unable to start OST1"
2878 start_ost2 || error "Unable to start OST2"
2879 mount_client $MOUNT || error "Unable to mount client"
2880 lctl set_param llite.$FSNAME-*.lazystatfs=1
2883 # Issue the statfs during the window where the client still
2884 # belives the OST to be available but it is in fact down.
2885 # No failure just a statfs which hangs for a timeout interval.
2886 stop_ost || error "Unable to stop OST1"
2887 lazystatfs $MOUNT || error "lazystatfs failed with one down server"
2889 umount_client $MOUNT || error "Unable to unmount client"
2890 stop_ost2 || error "Unable to stop OST2"
2891 stop_mds || error "Unable to stop MDS"
2892 #writeconf to remove all ost2 traces for subsequent tests
2893 writeconf_or_reformat
2895 run_test 50d "lazystatfs client/server conn race =========================="
2902 start_mds || return 1
2903 #first client should see only one ost
2904 start_ost || return 2
2905 wait_osc_import_state mds ost FULL
2907 # Wait for client to detect down OST
2908 stop_ost || error "Unable to stop OST1"
2909 wait_osc_import_state mds ost DISCONN
2911 mount_client $MOUNT || error "Unable to mount client"
2912 lctl set_param llite.$FSNAME-*.lazystatfs=0
2914 multiop_bg_pause $MOUNT _f
2918 if [ $RC1 -ne 0 ]; then
2919 log "multiop failed $RC1"
2922 sleep $(( $TIMEOUT+1 ))
2924 [ $? -ne 0 ] && error "process isn't sleep"
2925 start_ost || error "Unable to start OST1"
2926 wait $pid || error "statfs failed"
2929 umount_client $MOUNT || error "Unable to unmount client"
2930 stop_ost || error "Unable to stop OST1"
2931 stop_mds || error "Unable to stop MDS"
2933 run_test 50e "normal statfs all servers down =========================="
2938 CONN_PROC="osc.$FSNAME-OST0001-osc-[M]*.ost_server_uuid"
2940 start_mds || error "Unable to start mds"
2941 #first client should see only one ost
2942 start_ost || error "Unable to start OST1"
2943 wait_osc_import_state mds ost FULL
2945 start_ost2 || error "Unable to start OST2"
2946 wait_osc_import_state mds ost2 FULL
2948 # Wait for client to detect down OST
2949 stop_ost2 || error "Unable to stop OST2"
2951 wait_osc_import_state mds ost2 DISCONN
2952 mount_client $MOUNT || error "Unable to mount client"
2953 lctl set_param llite.$FSNAME-*.lazystatfs=0
2955 multiop_bg_pause $MOUNT _f
2959 if [ $RC1 -ne 0 ]; then
2960 log "lazystatfs multiop failed $RC1"
2963 sleep $(( $TIMEOUT+1 ))
2965 [ $? -ne 0 ] && error "process isn't sleep"
2966 start_ost2 || error "Unable to start OST2"
2967 wait $pid || error "statfs failed"
2968 stop_ost2 || error "Unable to stop OST2"
2971 umount_client $MOUNT || error "Unable to unmount client"
2972 stop_ost || error "Unable to stop OST1"
2973 stop_mds || error "Unable to stop MDS"
2974 #writeconf to remove all ost2 traces for subsequent tests
2975 writeconf_or_reformat
2977 run_test 50f "normal statfs one server in down =========================="
2980 [ "$OSTCOUNT" -lt "2" ] && skip_env "$OSTCOUNT < 2, skipping" && return
2982 start_ost2 || error "Unable to start OST2"
2983 wait_osc_import_state mds ost2 FULL
2984 wait_osc_import_state client ost2 FULL
2986 local PARAM="${FSNAME}-OST0001.osc.active"
2988 $LFS setstripe -c -1 $DIR/$tfile || error "Unable to lfs setstripe"
2989 do_facet mgs $LCTL conf_param $PARAM=0 || error "Unable to deactivate OST"
2991 umount_client $MOUNT || error "Unable to unmount client"
2992 mount_client $MOUNT || error "Unable to mount client"
2993 # This df should not cause a panic
2996 do_facet mgs $LCTL conf_param $PARAM=1 || error "Unable to activate OST"
2998 umount_client $MOUNT || error "Unable to unmount client"
2999 stop_ost2 || error "Unable to stop OST2"
3000 stop_ost || error "Unable to stop OST1"
3001 stop_mds || error "Unable to stop MDS"
3002 #writeconf to remove all ost2 traces for subsequent tests
3003 writeconf_or_reformat
3005 run_test 50g "deactivated OST should not cause panic====================="
3009 # prepare MDT/OST, make OSC inactive for OST1
3010 [ "$OSTCOUNT" -lt "2" ] && skip_env "$OSTCOUNT < 2, skipping" && return
3011 do_facet ost1 "$TUNEFS --param osc.active=0 `ostdevname 1`" ||
3012 error "tunefs OST1 failed"
3013 start_mds || error "Unable to start MDT"
3014 start_ost || error "Unable to start OST1"
3015 start_ost2 || error "Unable to start OST2"
3016 mount_client $MOUNT || error "client start failed"
3020 # activatate OSC for OST1
3021 local TEST="$LCTL get_param -n osc.${FSNAME}-OST0000-osc-[!M]*.active"
3022 set_conf_param_and_check client \
3023 "$TEST" "${FSNAME}-OST0000.osc.active" 1 ||
3024 error "Unable to activate OST1"
3026 mkdir -p $DIR/$tdir/2
3027 $LFS setstripe -c -1 -i 0 $DIR/$tdir/2
3028 sleep 1 && echo "create a file after OST1 is activated"
3030 createmany -o $DIR/$tdir/2/$tfile-%d 1
3032 # check OSC import is working
3033 stat $DIR/$tdir/2/* >/dev/null 2>&1 ||
3034 error "some OSC imports are still not connected"
3037 umount_client $MOUNT || error "Unable to umount client"
3038 stop_ost2 || error "Unable to stop OST2"
3041 run_test 50h "LU-642: activate deactivated OST ==="
3044 local LOCAL_TIMEOUT=20
3048 check_mount || return 1
3051 $LFS setstripe -c -1 $MOUNT/d1
3052 #define OBD_FAIL_MDS_REINT_DELAY 0x142
3053 do_facet $SINGLEMDS "lctl set_param fail_loc=0x142"
3054 touch $MOUNT/d1/f1 &
3057 start_ost2 || return 2
3059 stop_ost2 || return 3
3061 #writeconf to remove all ost2 traces for subsequent tests
3062 writeconf_or_reformat
3064 run_test 51 "Verify that mdt_reint handles RMF_MDT_MD correctly when an OST is added"
3073 do_node $node mkdir -p $dest
3074 [ $? -eq 0 ] || { error "Unable to create directory"; return 1; }
3076 do_node $node 'tar cf - '$@' | tar xf - -C '$dest';
3077 [ \"\${PIPESTATUS[*]}\" = \"0 0\" ] || exit 1'
3078 [ $? -eq 0 ] || { error "Unable to tar files"; return 2; }
3080 do_node $node 'getfattr -d -m "[a-z]*\\." '$@' > '$xattrs
3081 [ $? -eq 0 ] || { error "Unable to read xattrs"; return 3; }
3091 local backup2=${TMP}/backup2
3093 do_node $node mkdir -p $backup2
3094 [ $? -eq 0 ] || { error "Unable to create directory"; return 1; }
3096 do_node $node 'tar cf - '$@' | tar xf - -C '$backup2';
3097 [ \"\${PIPESTATUS[*]}\" = \"0 0\" ] || exit 1'
3098 [ $? -eq 0 ] || { error "Unable to tar files to diff"; return 2; }
3100 do_node $node "diff -rq $backup $backup2"
3101 [ $? -eq 0 ] || { error "contents differ"; return 3; }
3103 local xattrs2=${TMP}/xattrs2
3104 do_node $node 'getfattr -d -m "[a-z]*\\." '$@' > '$xattrs2
3105 [ $? -eq 0 ] || { error "Unable to read xattrs to diff"; return 4; }
3107 do_node $node "diff $xattrs $xattrs2"
3108 [ $? -eq 0 ] || { error "xattrs differ"; return 5; }
3110 do_node $node "rm -rf $backup2 $xattrs2"
3111 [ $? -eq 0 ] || { error "Unable to delete temporary files"; return 6; }
3115 if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
3116 skip "Only applicable to ldiskfs-based MDTs"
3121 [ $? -eq 0 ] || { error "Unable to start MDS"; return 1; }
3123 [ $? -eq 0 ] || { error "Unable to start OST1"; return 2; }
3125 [ $? -eq 0 ] || { error "Unable to mount client"; return 3; }
3128 local ost1mnt=$(facet_mntpt ost1)
3129 local ost1node=$(facet_active_host ost1)
3130 local ost1tmp=$TMP/conf52
3134 [ $? -eq 0 ] || { error "Unable to create tdir"; return 4; }
3135 touch $TMP/modified_first
3136 [ $? -eq 0 ] || { error "Unable to create temporary file"; return 5; }
3137 local mtime=$(stat -c %Y $TMP/modified_first)
3138 do_node $ost1node "mkdir -p $ost1tmp && touch -m -d @$mtime $ost1tmp/modified_first"
3140 [ $? -eq 0 ] || { error "Unable to create temporary file"; return 6; }
3143 $LFS setstripe -c -1 -S 1M $DIR/$tdir
3144 [ $? -eq 0 ] || { error "lfs setstripe failed"; return 7; }
3146 for (( i=0; i < nrfiles; i++ )); do
3147 multiop $DIR/$tdir/$tfile-$i Ow1048576w1048576w524288c
3148 [ $? -eq 0 ] || { error "multiop failed"; return 8; }
3154 echo backup files to $TMP/files
3155 local files=$(find $DIR/$tdir -type f -newer $TMP/modified_first)
3156 copy_files_xattrs `hostname` $TMP/files $TMP/file_xattrs $files
3157 [ $? -eq 0 ] || { error "Unable to copy files"; return 9; }
3159 umount_client $MOUNT
3160 [ $? -eq 0 ] || { error "Unable to umount client"; return 10; }
3162 [ $? -eq 0 ] || { error "Unable to stop ost1"; return 11; }
3164 echo mount ost1 as ldiskfs
3165 do_node $ost1node mkdir -p $ost1mnt
3166 [ $? -eq 0 ] || { error "Unable to create $ost1mnt"; return 23; }
3167 if ! do_node $ost1node test -b $ost1_dev; then
3170 do_node $ost1node mount -t $(facet_fstype ost1) $loop $ost1_dev \
3172 [ $? -eq 0 ] || { error "Unable to mount ost1 as ldiskfs"; return 12; }
3175 echo backup objects to $ost1tmp/objects
3176 local objects=$(do_node $ost1node 'find '$ost1mnt'/O/[0-9]* -type f'\
3177 '-size +0 -newer '$ost1tmp'/modified_first -regex ".*\/[0-9]+"')
3178 copy_files_xattrs $ost1node $ost1tmp/objects $ost1tmp/object_xattrs \
3180 [ $? -eq 0 ] || { error "Unable to copy objects"; return 13; }
3182 # move objects to lost+found
3183 do_node $ost1node 'mv '$objects' '${ost1mnt}'/lost+found'
3184 [ $? -eq 0 ] || { error "Unable to move objects"; return 14; }
3187 do_node $ost1node "ll_recover_lost_found_objs -d $ost1mnt/lost+found"
3188 [ $? -eq 0 ] || { error "ll_recover_lost_found_objs failed"; return 15; }
3190 # compare restored objects against saved ones
3191 diff_files_xattrs $ost1node $ost1tmp/objects $ost1tmp/object_xattrs $objects
3192 [ $? -eq 0 ] || { error "Unable to diff objects"; return 16; }
3194 do_node $ost1node "umount $ost1mnt"
3195 [ $? -eq 0 ] || { error "Unable to umount ost1 as ldiskfs"; return 17; }
3198 [ $? -eq 0 ] || { error "Unable to start ost1"; return 18; }
3200 [ $? -eq 0 ] || { error "Unable to mount client"; return 19; }
3203 diff_files_xattrs `hostname` $TMP/files $TMP/file_xattrs $files
3204 [ $? -eq 0 ] || { error "Unable to diff files"; return 20; }
3206 rm -rf $TMP/files $TMP/file_xattrs
3207 [ $? -eq 0 ] || { error "Unable to delete temporary files"; return 21; }
3208 do_node $ost1node "rm -rf $ost1tmp"
3209 [ $? -eq 0 ] || { error "Unable to delete temporary files"; return 22; }
3212 run_test 52 "check recovering objects from lost+found"
3214 # Checks threads_min/max/started for some service
3216 # Arguments: service name (OST or MDT), facet (e.g., ost1, $SINGLEMDS), and a
3217 # parameter pattern prefix like 'ost.*.ost'.
3230 local msg="Insane $modname thread counts"
3231 local ncpts=$(check_cpt_number $facet)
3235 check_mount || return 41
3237 # We need to expand $parampat, but it may match multiple parameters, so
3238 # we'll pick the first one
3239 if ! paramp=$(do_facet $facet "lctl get_param -N ${parampat}.threads_min"|head -1); then
3240 error "Couldn't expand ${parampat}.threads_min parameter name"
3244 # Remove the .threads_min part
3245 paramp=${paramp%.threads_min}
3247 # Check for sanity in defaults
3248 tmin=$(do_facet $facet "lctl get_param -n ${paramp}.threads_min" || echo 0)
3249 tmax=$(do_facet $facet "lctl get_param -n ${paramp}.threads_max" || echo 0)
3250 tstarted=$(do_facet $facet "lctl get_param -n ${paramp}.threads_started" || echo 0)
3251 lassert 23 "$msg (PDSH problems?)" '(($tstarted && $tmin && $tmax))' || return $?
3252 lassert 24 "$msg" '(($tstarted >= $tmin && $tstarted <= $tmax ))' || return $?
3253 nthrs=$(expr $tmax - $tmin)
3254 if [ $nthrs -lt $ncpts ]; then
3260 [ $tmin -eq $tmax -a $tmin -eq $tstarted ] &&
3261 skip_env "module parameter forced $facet thread count" &&
3262 tmin=3 && tmax=$((3 * tmax))
3264 # Check that we can change min/max
3265 do_facet $facet "lctl set_param ${paramp}.threads_min=$((tmin + nthrs))"
3266 do_facet $facet "lctl set_param ${paramp}.threads_max=$((tmax - nthrs))"
3267 tmin2=$(do_facet $facet "lctl get_param -n ${paramp}.threads_min" || echo 0)
3268 tmax2=$(do_facet $facet "lctl get_param -n ${paramp}.threads_max" || echo 0)
3269 lassert 25 "$msg" '(($tmin2 == ($tmin + $nthrs) && $tmax2 == ($tmax - $nthrs)))' || return $?
3271 # Check that we can set min/max to the same value
3272 tmin=$(do_facet $facet "lctl get_param -n ${paramp}.threads_min" || echo 0)
3273 do_facet $facet "lctl set_param ${paramp}.threads_max=$tmin"
3274 tmin2=$(do_facet $facet "lctl get_param -n ${paramp}.threads_min" || echo 0)
3275 tmax2=$(do_facet $facet "lctl get_param -n ${paramp}.threads_max" || echo 0)
3276 lassert 26 "$msg" '(($tmin2 == $tmin && $tmax2 == $tmin))' || return $?
3278 # Check that we can't set max < min
3279 do_facet $facet "lctl set_param ${paramp}.threads_max=$((tmin - 1))"
3280 tmin2=$(do_facet $facet "lctl get_param -n ${paramp}.threads_min" || echo 0)
3281 tmax2=$(do_facet $facet "lctl get_param -n ${paramp}.threads_max" || echo 0)
3282 lassert 27 "$msg" '(($tmin2 <= $tmax2))' || return $?
3284 # We need to ensure that we get the module options desired; to do this
3285 # we set LOAD_MODULES_REMOTE=true and we call setmodopts below.
3286 LOAD_MODULES_REMOTE=true
3289 local newvalue="${opts}=$(expr $basethr \* $ncpts)"
3290 setmodopts -a $modname "$newvalue" oldvalue
3294 check_mount || return 41
3296 # Restore previous setting of MODOPTS_*
3297 setmodopts $modname "$oldvalue"
3299 # Check that $opts took
3300 tmin=$(do_facet $facet "lctl get_param -n ${paramp}.threads_min")
3301 tmax=$(do_facet $facet "lctl get_param -n ${paramp}.threads_max")
3302 tstarted=$(do_facet $facet "lctl get_param -n ${paramp}.threads_started")
3303 lassert 28 "$msg" '(($tstarted >= $tmin && $tstarted <= $tmax ))' || return $?
3312 thread_sanity OST ost1 'ost.*.ost' 'oss_num_threads' '16'
3315 run_test 53a "check OSS thread count params"
3319 local mds=$(do_facet $SINGLEMDS "lctl get_param -N mds.*.*.threads_max \
3321 if [ -z "$mds" ]; then
3322 #running this on an old MDT
3323 thread_sanity MDT $SINGLEMDS 'mdt.*.*.' 'mdt_num_threads' 16
3325 thread_sanity MDT $SINGLEMDS 'mds.*.*.' 'mds_num_threads' 16
3329 run_test 53b "check MDS thread count params"
3332 if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
3333 skip "Only applicable to ldiskfs-based MDTs"
3337 do_rpc_nodes $(facet_host ost1) run_llverdev $(ostdevname 1) -p
3338 [ $? -eq 0 ] || error "llverdev failed!"
3341 run_test 54a "test llverdev and partial verify of device"
3344 if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
3345 skip "Only applicable to ldiskfs-based MDTs"
3350 run_llverfs $MOUNT -p
3351 [ $? -eq 0 ] || error "llverfs failed!"
3354 run_test 54b "test llverfs and partial verify of filesystem"
3358 local max_ost_index=$1
3359 echo -n $(((max_ost_index + 1) * 8))
3363 if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
3364 skip "Only applicable to ldiskfs-based MDTs"
3368 local mdsdev=$(mdsdevname 1)
3369 local mdsvdev=$(mdsvdevname 1)
3373 add mds1 $(mkfs_opts mds1 ${mdsdev}) --reformat $mdsdev \
3375 add ost1 $(mkfs_opts ost1 $(ostdevname 1)) --index=$i \
3376 --reformat $(ostdevname 1) $(ostvdevname 1)
3382 echo checking size of lov_objid for ost index $i
3383 LOV_OBJID_SIZE=$(do_facet mds1 "$DEBUGFS -R 'stat lov_objid' $mdsdev 2>/dev/null" | grep ^User | awk '{print $6}')
3384 if [ "$LOV_OBJID_SIZE" != $(lov_objid_size $i) ]; then
3385 error "lov_objid size has to be $(lov_objid_size $i), not $LOV_OBJID_SIZE"
3387 echo ok, lov_objid size is correct: $LOV_OBJID_SIZE
3394 run_test 55 "check lov_objid size"
3397 local mds_journal_size_orig=$MDSJOURNALSIZE
3401 for num in $(seq 1 $MDSCOUNT); do
3402 add mds${num} $(mkfs_opts mds${num} $(mdsdevname $num)) \
3403 --reformat $(mdsdevname $num) $(mdsvdevname $num)
3405 add ost1 $(mkfs_opts ost1 $(ostdevname 1)) --index=1000 --reformat \
3406 $(ostdevname 1) $(ostvdevname 1)
3407 add ost2 $(mkfs_opts ost2 $(ostdevname 2)) --index=10000 --reformat \
3408 $(ostdevname 2) $(ostvdevname 2)
3412 start_ost2 || error "Unable to start second ost"
3413 mount_client $MOUNT || error "Unable to mount client"
3417 MDSJOURNALSIZE=$mds_journal_size_orig
3420 run_test 56 "check big indexes"
3422 test_57a() { # bug 22656
3423 local NID=$(do_facet ost1 "$LCTL get_param nis" | tail -1 | awk '{print $1}')
3424 writeconf_or_reformat
3425 [ $(facet_fstype ost1) == zfs ] && import_zpool ost1
3426 do_facet ost1 "$TUNEFS --failnode=$NID `ostdevname 1`" || error "tunefs failed"
3428 start_ost && error "OST registration from failnode should fail"
3431 run_test 57a "initial registration from failnode should fail (should return errs)"
3434 local NID=$(do_facet ost1 "$LCTL get_param nis" | tail -1 | awk '{print $1}')
3435 writeconf_or_reformat
3436 [ $(facet_fstype ost1) == zfs ] && import_zpool ost1
3437 do_facet ost1 "$TUNEFS --servicenode=$NID `ostdevname 1`" || error "tunefs failed"
3439 start_ost || error "OST registration from servicenode should not fail"
3442 run_test 57b "initial registration from servicenode should not fail"
3445 do_facet mgs $LCTL get_param mgs.MGS.live.$FSNAME | grep OST | wc -l
3448 test_58() { # bug 22658
3449 if [ $(facet_fstype mds) != ldiskfs ]; then
3450 skip "Only applicable to ldiskfs-based MDTs"
3455 createmany -o $DIR/$tdir/$tfile-%d 100
3456 # make sure that OSTs do not cancel llog cookies before we unmount the MDS
3457 #define OBD_FAIL_OBD_LOG_CANCEL_NET 0x601
3458 do_facet $SINGLEMDS "lctl set_param fail_loc=0x601"
3459 unlinkmany $DIR/$tdir/$tfile-%d 100
3462 local MNTDIR=$(facet_mntpt $SINGLEMDS)
3463 local devname=$(mdsdevname ${SINGLEMDS//mds/})
3465 if ! do_facet $SINGLEMDS "test -b $devname"; then
3469 # remove all files from the OBJECTS dir
3470 do_facet $SINGLEMDS "mount -t ldiskfs $opts $devname $MNTDIR"
3471 do_facet $SINGLEMDS "find $MNTDIR/O/1/d* -type f -delete"
3472 do_facet $SINGLEMDS "umount -d $MNTDIR"
3473 # restart MDS with missing llog files
3475 do_facet mds "lctl set_param fail_loc=0"
3478 run_test 58 "missing llog files must not prevent MDT from mounting"
3481 start_mgsmds >> /dev/null
3482 local C1=$(count_osts)
3483 if [ $C1 -eq 0 ]; then
3484 start_ost >> /dev/null
3488 echo "original ost count: $C1 (expect > 0)"
3489 [ $C1 -gt 0 ] || error "No OSTs in $FSNAME log"
3490 start_mgsmds -o writeconf >> /dev/null || error "MDT start failed"
3491 local C2=$(count_osts)
3492 echo "after mdt writeconf count: $C2 (expect 0)"
3493 [ $C2 -gt 0 ] && error "MDT writeconf should erase OST logs"
3494 echo "OST start without writeconf should fail:"
3495 start_ost >> /dev/null && error "OST start without writeconf didn't fail"
3496 echo "OST start with writeconf should succeed:"
3497 start_ost -o writeconf >> /dev/null || error "OST1 start failed"
3498 local C3=$(count_osts)
3499 echo "after ost writeconf count: $C3 (expect 1)"
3500 [ $C3 -eq 1 ] || error "new OST writeconf should add:"
3501 start_ost2 -o writeconf >> /dev/null || error "OST2 start failed"
3502 local C4=$(count_osts)
3503 echo "after ost2 writeconf count: $C4 (expect 2)"
3504 [ $C4 -eq 2 ] || error "OST2 writeconf should add log"
3505 stop_ost2 >> /dev/null
3506 cleanup_nocli >> /dev/null
3507 #writeconf to remove all ost2 traces for subsequent tests
3508 writeconf_or_reformat
3510 run_test 59 "writeconf mount option"
3512 test_60() { # LU-471
3515 if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
3516 skip "Only applicable to ldiskfs-based MDTs"
3520 for num in $(seq $MDSCOUNT); do
3521 add mds${num} $(mkfs_opts mds${num} $(mdsdevname $num)) \
3522 --mkfsoptions='\" -E stride=64 -O ^uninit_bg\"' \
3523 --reformat $(mdsdevname $num) $(mdsvdevname $num) ||
3527 dump=$(do_facet $SINGLEMDS dumpe2fs $(mdsdevname 1))
3529 [ $rc -eq 0 ] || error "dumpe2fs $(mdsdevname 1) failed"
3531 # MDT default has dirdata feature
3532 echo $dump | grep dirdata > /dev/null || error "dirdata is not set"
3533 # we disable uninit_bg feature
3534 echo $dump | grep uninit_bg > /dev/null && error "uninit_bg is set"
3535 # we set stride extended options
3536 echo $dump | grep stride > /dev/null || error "stride is not set"
3539 run_test 60 "check mkfs.lustre --mkfsoptions -E -O options setting"
3542 local reformat=false
3544 [ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.1.53) ] ||
3545 { skip "Need MDS version at least 2.1.53"; return 0; }
3547 if [ $(facet_fstype $SINGLEMDS) == ldiskfs ] &&
3548 ! large_xattr_enabled; then
3550 LDISKFS_MKFS_OPTS+=" -O large_xattr"
3552 for num in $(seq $MDSCOUNT); do
3553 add mds${num} $(mkfs_opts mds$num $(mdsdevname $num)) \
3554 --reformat $(mdsdevname $num) $(mdsvdevname $num) ||
3555 error "add mds $num failed"
3559 setup_noconfig || error "setting up the filesystem failed"
3560 client_up || error "starting client failed"
3562 local file=$DIR/$tfile
3565 local large_value="$(generate_string $(max_xattr_size))"
3566 local small_value="bar"
3568 local name="trusted.big"
3569 log "save large xattr $name on $file"
3570 setfattr -n $name -v $large_value $file ||
3571 error "saving $name on $file failed"
3573 local new_value=$(get_xattr_value $name $file)
3574 [[ "$new_value" != "$large_value" ]] &&
3575 error "$name different after saving"
3577 log "shrink value of $name on $file"
3578 setfattr -n $name -v $small_value $file ||
3579 error "shrinking value of $name on $file failed"
3581 new_value=$(get_xattr_value $name $file)
3582 [[ "$new_value" != "$small_value" ]] &&
3583 error "$name different after shrinking"
3585 log "grow value of $name on $file"
3586 setfattr -n $name -v $large_value $file ||
3587 error "growing value of $name on $file failed"
3589 new_value=$(get_xattr_value $name $file)
3590 [[ "$new_value" != "$large_value" ]] &&
3591 error "$name different after growing"
3593 log "check value of $name on $file after remounting MDS"
3595 new_value=$(get_xattr_value $name $file)
3596 [[ "$new_value" != "$large_value" ]] &&
3597 error "$name different after remounting MDS"
3599 log "remove large xattr $name from $file"
3600 setfattr -x $name $file || error "removing $name from $file failed"
3605 LDISKFS_MKFS_OPTS=${LDISKFS_MKFS_OPTS% -O large_xattr}
3609 run_test 61 "large xattr"
3612 if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
3613 skip "Only applicable to ldiskfs-based MDTs"
3618 local mdsdev=$(mdsdevname 1)
3619 local ostdev=$(ostdevname 1)
3621 [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.2.51) ]] ||
3622 { skip "Need MDS version at least 2.2.51"; return 0; }
3624 echo "disable journal for mds"
3625 do_facet mds tune2fs -O ^has_journal $mdsdev || error "tune2fs failed"
3626 start_mds && error "MDT start should fail"
3627 echo "disable journal for ost"
3628 do_facet ost1 tune2fs -O ^has_journal $ostdev || error "tune2fs failed"
3629 start_ost && error "OST start should fail"
3630 cleanup || return $?
3633 run_test 62 "start with disabled journal"
3636 if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
3637 skip "Only applicable to ldiskfs-based MDTs"
3641 local inode_slab=$(do_facet $SINGLEMDS \
3642 "awk '/ldiskfs_inode_cache/ { print \\\$5 }' /proc/slabinfo")
3643 if [ -z "$inode_slab" ]; then
3644 skip "ldiskfs module has not been loaded"
3648 echo "$inode_slab ldisk inodes per page"
3649 [ "$inode_slab" -ge "3" ] ||
3650 error "ldisk inode size is too big, $inode_slab objs per page"
3653 run_test 63 "Verify each page can at least hold 3 ldisk inodes"
3658 start_ost2 || error "Unable to start second ost"
3659 mount_client $MOUNT || error "Unable to mount client"
3660 stop_ost2 || error "Unable to stop second ost"
3662 $LFS df --lazy || error "lfs df failed"
3663 cleanup || return $?
3664 #writeconf to remove all ost2 traces for subsequent tests
3665 writeconf_or_reformat
3667 run_test 64 "check lfs df --lazy "
3669 test_65() { # LU-2237
3670 # Currently, the test is only valid for ldiskfs backend
3671 [ "$(facet_fstype $SINGLEMDS)" != "ldiskfs" ] &&
3672 skip "non-ldiskfs backend" && return
3674 local devname=$(mdsdevname ${SINGLEMDS//mds/})
3675 local brpt=$(facet_mntpt brpt)
3678 if ! do_facet $SINGLEMDS "test -b $devname"; then
3683 local obj=$(do_facet $SINGLEMDS \
3684 "$DEBUGFS -c -R \\\"stat last_rcvd\\\" $devname" |
3686 if [ -z "$obj" ]; then
3687 # The MDT may be just re-formatted, mount the MDT for the
3688 # first time to guarantee the "last_rcvd" file is there.
3689 start_mds || error "fail to mount the MDS for the first time"
3693 # remove the "last_rcvd" file
3694 do_facet $SINGLEMDS "mkdir -p $brpt"
3695 do_facet $SINGLEMDS \
3696 "mount -t $(facet_fstype $SINGLEMDS) $opts $devname $brpt"
3697 do_facet $SINGLEMDS "rm -f ${brpt}/last_rcvd"
3698 do_facet $SINGLEMDS "umount -d $brpt"
3700 # restart MDS, the "last_rcvd" file should be recreated.
3701 start_mds || error "fail to restart the MDS"
3703 obj=$(do_facet $SINGLEMDS \
3704 "$DEBUGFS -c -R \\\"stat last_rcvd\\\" $devname" | grep Inode)
3705 [ -n "$obj" ] || error "fail to re-create the last_rcvd"
3707 run_test 65 "re-create the lost last_rcvd file when server mount"
3710 [[ $(lustre_version_code mgs) -ge $(version_code 2.3.59) ]] ||
3711 { skip "Need MGS version at least 2.3.59"; return 0; }
3714 local OST1_NID=$(do_facet ost1 $LCTL list_nids | head -1)
3715 local MDS_NID=$(do_facet $SINGLEMDS $LCTL list_nids | head -1)
3717 echo "replace_nids should fail if MDS, OSTs and clients are UP"
3718 do_facet mgs $LCTL replace_nids $FSNAME-OST0000 $OST1_NID &&
3719 error "replace_nids fail"
3721 umount_client $MOUNT || error "unmounting client failed"
3722 echo "replace_nids should fail if MDS and OSTs are UP"
3723 do_facet mgs $LCTL replace_nids $FSNAME-OST0000 $OST1_NID &&
3724 error "replace_nids fail"
3727 echo "replace_nids should fail if MDS is UP"
3728 do_facet mgs $LCTL replace_nids $FSNAME-OST0000 $OST1_NID &&
3729 error "replace_nids fail"
3731 stop_mds || error "stopping mds failed"
3733 if combined_mgs_mds; then
3734 start_mdt 1 "-o nosvc" ||
3735 error "starting mds with nosvc option failed"
3738 echo "command should accept two parameters"
3739 do_facet mgs $LCTL replace_nids $FSNAME-OST0000 &&
3740 error "command should accept two params"
3742 echo "correct device name should be passed"
3743 do_facet mgs $LCTL replace_nids $FSNAME-WRONG0000 $OST1_NID &&
3744 error "wrong devname"
3746 echo "wrong nids list should not destroy the system"
3747 do_facet mgs $LCTL replace_nids $FSNAME-OST0000 "wrong nids list" &&
3750 echo "replace OST nid"
3751 do_facet mgs $LCTL replace_nids $FSNAME-OST0000 $OST1_NID ||
3752 error "replace nids failed"
3754 echo "command should accept two parameters"
3755 do_facet mgs $LCTL replace_nids $FSNAME-MDT0000 &&
3756 error "command should accept two params"
3758 echo "wrong nids list should not destroy the system"
3759 do_facet mgs $LCTL replace_nids $FSNAME-MDT0000 "wrong nids list" &&
3762 echo "replace MDS nid"
3763 do_facet mgs $LCTL replace_nids $FSNAME-MDT0000 $MDS_NID ||
3764 error "replace nids failed"
3766 if ! combined_mgs_mds ; then
3773 check_mount || error "error after nid replace"
3774 cleanup || error "cleanup failed"
3777 run_test 66 "replace nids"
3779 test_67() { #LU-2950
3780 local legacy="$TMP/legacy_lnet_config"
3781 local new="$TMP/new_routes_test"
3782 local out="$TMP/config_out_file"
3783 local verify="$TMP/conv_verify"
3784 local verify_conf="$TMP/conf_verify"
3786 # Create the legacy file that will be run through the
3787 # lustre_routes_conversion script
3788 cat <<- LEGACY_LNET_CONFIG > $legacy
3789 tcp1 23 192.168.213.1@tcp:1; tcp5 34 193.30.4.3@tcp:4;
3790 tcp2 54 10.1.3.2@tcp;
3791 tcp3 10.3.4.3@tcp:3;
3795 # Create the verification file to verify the output of
3796 # lustre_routes_conversion script against.
3797 cat <<- VERIFY_LNET_CONFIG > $verify
3798 tcp1: { gateway: 192.168.213.1@tcp, hop: 23, priority: 1 }
3799 tcp5: { gateway: 193.30.4.3@tcp, hop: 34, priority: 4 }
3800 tcp2: { gateway: 10.1.3.2@tcp, hop: 54 }
3801 tcp3: { gateway: 10.3.4.3@tcp, priority: 3 }
3802 tcp4: { gateway: 10.3.3.4@tcp }
3805 # Create the verification file to verify the output of
3806 # lustre_routes_config script against
3807 cat <<- VERIFY_LNET_CONFIG > $verify_conf
3808 lctl --net tcp1 add_route 192.168.213.1@tcp 23 1
3809 lctl --net tcp5 add_route 193.30.4.3@tcp 34 4
3810 lctl --net tcp2 add_route 10.1.3.2@tcp 54 4
3811 lctl --net tcp3 add_route 10.3.4.3@tcp 1 3
3812 lctl --net tcp4 add_route 10.3.3.4@tcp 1 3
3815 lustre_routes_conversion $legacy $new > /dev/null
3816 if [ -f $new ]; then
3817 # verify the conversion output
3818 cmp -s $new $verify > /dev/null
3819 if [ $? -eq 1 ]; then
3820 error "routes conversion failed"
3823 lustre_routes_config --dry-run --verbose $new > $out
3824 # check that the script succeeded
3825 cmp -s $out $verify_conf > /dev/null
3826 if [ $? -eq 1 ]; then
3827 error "routes config failed"
3830 error "routes conversion test failed"
3832 # remove generated files
3833 rm -f $new $legacy $verify $verify_conf $out
3835 run_test 67 "test routes conversion and configuration"
3843 [ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.4.53) ] ||
3844 { skip "Need MDS version at least 2.4.53"; return 0; }
3846 umount_client $MOUNT || error "umount client failed"
3848 start_mdt 1 || error "MDT start failed"
3851 # START-END - the sequences we'll be reserving
3852 START=$(do_facet $SINGLEMDS \
3853 lctl get_param -n seq.ctl*.space | awk -F'[[ ]' '{print $2}')
3854 END=$((START + (1 << 30)))
3855 do_facet $SINGLEMDS \
3856 lctl set_param seq.ctl*.fldb="[$START-$END\):0:mdt"
3858 # reset the sequences MDT0000 has already assigned
3859 do_facet $SINGLEMDS \
3860 lctl set_param seq.srv*MDT0000.space=clear
3862 # remount to let the client allocate new sequence
3863 mount_client $MOUNT || error "mount client failed"
3866 do_facet $SINGLEMDS \
3867 lctl get_param seq.srv*MDT0000.space
3868 $LFS path2fid $DIR/$tfile
3870 local old_ifs="$IFS"
3872 fid=($($LFS path2fid $DIR/$tfile))
3876 if [[ $seq < $END ]]; then
3877 error "used reserved sequence $seq?"
3879 cleanup || return $?
3881 run_test 68 "be able to reserve specific sequences in FLDB"
3884 local server_version=$(lustre_version_code $SINGLEMDS)
3886 [[ $server_version -lt $(version_code 2.4.2) ]] &&
3887 skip "Need MDS version at least 2.4.2" && return
3889 [[ $server_version -ge $(version_code 2.4.50) ]] &&
3890 [[ $server_version -lt $(version_code 2.5.0) ]] &&
3891 skip "Need MDS version at least 2.5.0" && return
3895 # use OST0000 since it probably has the most creations
3896 local OSTNAME=$(ostname_from_index 0)
3897 local mdtosc_proc1=$(get_mdtosc_proc_path mds1 $OSTNAME)
3898 local last_id=$(do_facet mds1 lctl get_param -n \
3899 osc.$mdtosc_proc1.prealloc_last_id)
3901 # Want to have OST LAST_ID over 1.5 * OST_MAX_PRECREATE to
3902 # verify that the LAST_ID recovery is working properly. If
3903 # not, then the OST will refuse to allow the MDS connect
3904 # because the LAST_ID value is too different from the MDS
3905 #define OST_MAX_PRECREATE=20000
3906 local num_create=$((20000 * 5))
3909 $LFS setstripe -i 0 $DIR/$tdir
3910 createmany -o $DIR/$tdir/$tfile- $num_create
3911 # delete all of the files with objects on OST0 so the
3912 # filesystem is not inconsistent later on
3913 $LFS find $MOUNT --ost 0 | xargs rm
3915 stop_ost || error "OST0 stop failure"
3916 add ost1 $(mkfs_opts ost1 $(ostdevname 1)) --reformat --replace \
3917 $(ostdevname 1) $(ostvdevname 1) ||
3918 error "reformat and replace $ostdev failed"
3919 start_ost || error "OST0 restart failure"
3920 wait_osc_import_state mds ost FULL
3922 touch $DIR/$tdir/$tfile-last || error "create file after reformat"
3923 local idx=$($LFS getstripe -i $DIR/$tdir/$tfile-last)
3924 [ $idx -ne 0 ] && error "$DIR/$tdir/$tfile-last on $idx not 0" || true
3928 run_test 69 "replace an OST with the same index"
3931 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
3936 start_mdt 1 || error "MDT0 start fail"
3938 start_ost || error "OST0 start fail"
3940 start_mdt 2 || error "MDT1 start fail"
3942 mount_client $MOUNT || error "mount client fails"
3944 mkdir -p $DIR/$tdir || error "create dir fail"
3946 $LFS mkdir -i $MDTIDX $DIR/$tdir/remote_dir ||
3947 error "create remote dir fail"
3949 rm -rf $DIR/$tdir || error "delete dir fail"
3950 cleanup || return $?
3952 run_test 70a "start MDT0, then OST, then MDT1"
3955 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
3958 start_ost || error "OST0 start fail"
3960 start_mdt 1 || error "MDT0 start fail"
3961 start_mdt 2 || error "MDT1 start fail"
3963 mount_client $MOUNT || error "mount client fails"
3965 mkdir -p $DIR/$tdir || error "create dir fail"
3967 $LFS mkdir -i $MDTIDX $DIR/$tdir/remote_dir ||
3968 error "create remote dir fail"
3970 rm -rf $DIR/$tdir || error "delete dir fail"
3972 cleanup || return $?
3974 run_test 70b "start OST, MDT1, MDT0"
3977 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
3980 start_mdt 1 || error "MDT0 start fail"
3981 start_mdt 2 || error "MDT1 start fail"
3982 start_ost || error "OST0 start fail"
3984 mount_client $MOUNT || error "mount client fails"
3985 stop_mdt 1 || error "MDT1 start fail"
3987 local mdc_for_mdt1=$($LCTL dl | grep MDT0000-mdc | awk '{print $4}')
3988 echo "deactivate $mdc_for_mdt1"
3989 $LCTL --device $mdc_for_mdt1 deactivate || return 1
3991 mkdir -p $DIR/$tdir && error "mkdir succeed"
3993 $LFS mkdir -i $MDTIDX $DIR/$tdir/remote_dir &&
3994 error "create remote dir succeed"
3996 cleanup || return $?
3998 run_test 70c "stop MDT0, mkdir fail, create remote dir fail"
4001 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
4004 start_mdt 1 || error "MDT0 start fail"
4005 start_mdt 2 || error "MDT1 start fail"
4006 start_ost || error "OST0 start fail"
4008 mount_client $MOUNT || error "mount client fails"
4010 stop_mdt 2 || error "MDT1 start fail"
4012 local mdc_for_mdt2=$($LCTL dl | grep MDT0001-mdc |
4014 echo "deactivate $mdc_for_mdt2"
4015 $LCTL --device $mdc_for_mdt2 deactivate ||
4016 error "set $mdc_for_mdt2 deactivate failed"
4018 mkdir -p $DIR/$tdir || error "mkdir fail"
4019 $LFS mkdir -i $MDTIDX $DIR/$tdir/remote_dir &&
4020 error "create remote dir succeed"
4022 rm -rf $DIR/$tdir || error "delete dir fail"
4024 cleanup || return $?
4026 run_test 70d "stop MDT1, mkdir succeed, create remote dir fail"
4029 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
4030 if combined_mgs_mds; then
4031 skip "needs separate MGS/MDT" && return
4035 start_mdt 1 || error "MDT0 start fail"
4036 start_ost || error "OST0 start fail"
4037 start_mdt 2 || error "MDT1 start fail"
4038 start_ost2 || error "OST1 start fail"
4040 mount_client $MOUNT || error "mount client fails"
4042 mkdir -p $DIR/$tdir || error "mkdir fail"
4043 $LFS mkdir -i $MDTIDX $DIR/$tdir/remote_dir ||
4044 error "create remote dir succeed"
4046 mcreate $DIR/$tdir/remote_dir/$tfile || error "create file failed"
4047 rm -rf $DIR/$tdir || error "delete dir fail"
4049 umount_client $MOUNT
4050 stop_mdt 1 || error "MDT0 stop fail"
4051 stop_mdt 2 || error "MDT1 stop fail"
4052 stop_ost || error "OST0 stop fail"
4053 stop_ost2 || error "OST1 stop fail"
4055 run_test 71a "start MDT0 OST0, MDT1, OST1"
4058 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
4059 if combined_mgs_mds; then
4060 skip "needs separate MGS/MDT" && return
4064 start_mdt 2 || error "MDT1 start fail"
4065 start_ost || error "OST0 start fail"
4066 start_mdt 1 || error "MDT0 start fail"
4067 start_ost2 || error "OST1 start fail"
4069 mount_client $MOUNT || error "mount client fails"
4071 mkdir -p $DIR/$tdir || error "mkdir fail"
4072 $LFS mkdir -i $MDTIDX $DIR/$tdir/remote_dir ||
4073 error "create remote dir succeed"
4075 mcreate $DIR/$tdir/remote_dir/$tfile || error "create file failed"
4076 rm -rf $DIR/$tdir || error "delete dir fail"
4078 umount_client $MOUNT
4079 stop_mdt 1 || error "MDT0 stop fail"
4080 stop_mdt 2 || error "MDT1 stop fail"
4081 stop_ost || error "OST0 stop fail"
4082 stop_ost2 || error "OST1 stop fail"
4084 run_test 71b "start MDT1, OST0, MDT0, OST1"
4087 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
4088 if combined_mgs_mds; then
4089 skip "needs separate MGS/MDT" && return
4093 start_ost || error "OST0 start fail"
4094 start_ost2 || error "OST1 start fail"
4095 start_mdt 2 || error "MDT1 start fail"
4096 start_mdt 1 || error "MDT0 start fail"
4098 mount_client $MOUNT || error "mount client fails"
4100 mkdir -p $DIR/$tdir || error "mkdir fail"
4101 $LFS mkdir -i $MDTIDX $DIR/$tdir/remote_dir ||
4102 error "create remote dir succeed"
4104 mcreate $DIR/$tdir/remote_dir/$tfile || error "create file failed"
4105 rm -rf $DIR/$tdir || error "delete dir fail"
4107 umount_client $MOUNT
4108 stop_mdt 1 || error "MDT0 stop fail"
4109 stop_mdt 2 || error "MDT1 stop fail"
4110 stop_ost || error "OST0 stop fail"
4111 stop_ost2 || error "OST1 stop fail"
4114 run_test 71c "start OST0, OST1, MDT1, MDT0"
4117 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
4118 if combined_mgs_mds; then
4119 skip "needs separate MGS/MDT" && return
4123 start_ost || error "OST0 start fail"
4124 start_mdt 2 || error "MDT0 start fail"
4125 start_mdt 1 || error "MDT0 start fail"
4126 start_ost2 || error "OST1 start fail"
4128 mount_client $MOUNT || error "mount client fails"
4130 mkdir -p $DIR/$tdir || error "mkdir fail"
4131 $LFS mkdir -i $MDTIDX $DIR/$tdir/remote_dir ||
4132 error "create remote dir succeed"
4134 mcreate $DIR/$tdir/remote_dir/$tfile || error "create file failed"
4135 rm -rf $DIR/$tdir || error "delete dir fail"
4137 umount_client $MOUNT
4138 stop_mdt 1 || error "MDT0 stop fail"
4139 stop_mdt 2 || error "MDT1 stop fail"
4140 stop_ost || error "OST0 stop fail"
4141 stop_ost2 || error "OST1 stop fail"
4144 run_test 71d "start OST0, MDT1, MDT0, OST1"
4147 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
4148 if combined_mgs_mds; then
4149 skip "needs separate MGS/MDT" && return
4153 start_ost || error "OST0 start fail"
4154 start_mdt 2 || error "MDT1 start fail"
4155 start_ost2 || error "OST1 start fail"
4156 start_mdt 1 || error "MDT0 start fail"
4158 mount_client $MOUNT || error "mount client fails"
4160 mkdir -p $DIR/$tdir || error "mkdir fail"
4161 $LFS mkdir -i $MDTIDX $DIR/$tdir/remote_dir ||
4162 error "create remote dir succeed"
4164 mcreate $DIR/$tdir/remote_dir/$tfile || error "create file failed"
4165 rm -rf $DIR/$tdir || error "delete dir fail"
4167 umount_client $MOUNT
4168 stop_mdt 1 || error "MDT0 stop fail"
4169 stop_mdt 2 || error "MDT1 stop fail"
4170 stop_ost || error "OST0 stop fail"
4171 stop_ost2 || error "OST1 stop fail"
4174 run_test 71e "start OST0, MDT1, OST1, MDT0"
4176 test_72() { #LU-2634
4177 local mdsdev=$(mdsdevname 1)
4178 local ostdev=$(ostdevname 1)
4179 local cmd="$E2FSCK -fnvd $mdsdev"
4182 [ "$(facet_fstype $SINGLEMDS)" != "ldiskfs" ] &&
4183 skip "ldiskfs only test" && return
4185 #tune MDT with "-O extents"
4187 for num in $(seq $MDSCOUNT); do
4188 add mds${num} $(mkfs_opts mds$num $(mdsdevname $num)) \
4189 --reformat $(mdsdevname $num) $(mdsvdevname $num) ||
4190 error "add mds $num failed"
4191 $TUNE2FS -O extents $(mdsdevname $num)
4194 add ost1 $(mkfs_opts ost1 $ostdev) --reformat $ostdev ||
4195 error "add $ostdev failed"
4196 start_mgsmds || error "start mds failed"
4197 start_ost || error "start ost failed"
4198 mount_client $MOUNT || error "mount client failed"
4200 #create some short symlinks
4202 createmany -o $DIR/$tdir/$tfile-%d $fn
4203 echo "create $fn short symlinks"
4204 for i in $(seq -w 1 $fn); do
4205 ln -s $DIR/$tdir/$tfile-$i $MOUNT/$tfile-$i
4210 umount_client $MOUNT || error "umount client failed"
4211 stop_mds || error "stop mds failed"
4212 stop_ost || error "stop ost failed"
4215 run_e2fsck $(facet_active_host $SINGLEMDS) $mdsdev "-n"
4217 run_test 72 "test fast symlink with extents flag enabled"
4219 test_73() { #LU-3006
4221 [ $(facet_fstype ost1) == zfs ] && import_zpool ost1
4222 do_facet ost1 "$TUNEFS --failnode=1.2.3.4@$NETTYPE $(ostdevname 1)" ||
4223 error "1st tunefs failed"
4224 start_mgsmds || error "start mds failed"
4225 start_ost || error "start ost failed"
4226 mount_client $MOUNT || error "mount client failed"
4227 lctl get_param -n osc.*OST0000-osc-[^M]*.import | grep failover_nids |
4228 grep 1.2.3.4@$NETTYPE || error "failover nids haven't changed"
4229 umount_client $MOUNT || error "umount client failed"
4233 run_test 73 "failnode to update from mountdata properly"
4235 test_74() { # LU-1606
4236 for TESTPROG in $LUSTRE_TESTS_API_DIR/*.c; do
4237 gcc -Wall -Werror $LUSTRE_TESTS_API_DIR/simple_test.c \
4239 -L$LUSTRE/utils -llustreapi ||
4240 error "client api broken"
4242 cleanup || return $?
4244 run_test 74 "Lustre client api program can compile and link"
4246 test_75() { # LU-2374
4247 [[ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.4.1) ]] &&
4248 skip "Need MDS version at least 2.4.1" && return
4251 local opts_mds="$(mkfs_opts mds1 $(mdsdevname 1)) \
4252 --reformat $(mdsdevname 1) $(mdsvdevname 1)"
4253 local opts_ost="$(mkfs_opts ost1 $(ostdevname 1)) \
4254 --reformat $(ostdevname 1) $(ostvdevname 1)"
4256 #check with default parameters
4257 add mds1 $opts_mds || error "add mds1 failed for default params"
4258 add ost1 $opts_ost || error "add ost1 failed for default params"
4260 opts_mds=$(echo $opts_mds | sed -e "s/--mdt//")
4261 opts_mds=$(echo $opts_mds |
4262 sed -e "s/--index=$index/--index=$index --mdt/")
4263 opts_ost=$(echo $opts_ost | sed -e "s/--ost//")
4264 opts_ost=$(echo $opts_ost |
4265 sed -e "s/--index=$index/--index=$index --ost/")
4267 add mds1 $opts_mds || error "add mds1 failed for new params"
4268 add ost1 $opts_ost || error "add ost1 failed for new params"
4271 run_test 75 "The order of --index should be irrelevant"
4274 [[ $(lustre_version_code mgs) -ge $(version_code 2.4.52) ]] ||
4275 { skip "Need MDS version at least 2.4.52" && return 0; }
4277 local MDMB_PARAM="osc.*.max_dirty_mb"
4278 echo "Change MGS params"
4279 local MAX_DIRTY_MB=$($LCTL get_param -n $MDMB_PARAM |
4281 echo "max_dirty_mb: $MAX_DIRTY_MB"
4282 local NEW_MAX_DIRTY_MB=$((MAX_DIRTY_MB + MAX_DIRTY_MB))
4283 echo "new_max_dirty_mb: $NEW_MAX_DIRTY_MB"
4284 do_facet mgs $LCTL set_param -P $MDMB_PARAM=$NEW_MAX_DIRTY_MB
4285 wait_update $HOSTNAME "lctl get_param -n $MDMB_PARAM |
4286 head -1" $NEW_MAX_DIRTY_MB
4287 MAX_DIRTY_MB=$($LCTL get_param -n $MDMB_PARAM | head -1)
4288 echo "$MAX_DIRTY_MB"
4289 [ $MAX_DIRTY_MB = $NEW_MAX_DIRTY_MB ] ||
4290 error "error while apply max_dirty_mb"
4292 echo "Check the value is stored after remount"
4295 wait_update $HOSTNAME "lctl get_param -n $MDMB_PARAM |
4296 head -1" $NEW_MAX_DIRTY_MB
4297 MAX_DIRTY_MB=$($LCTL get_param -n $MDMB_PARAM | head -1)
4298 [ $MAX_DIRTY_MB = $NEW_MAX_DIRTY_MB ] ||
4299 error "max_dirty_mb is not saved after remount"
4301 echo "Change OST params"
4302 CLIENT_PARAM="obdfilter.*.client_cache_count"
4303 local CLIENT_CACHE_COUNT
4304 CLIENT_CACHE_COUNT=$(do_facet ost1 $LCTL get_param -n $CLIENT_PARAM |
4306 echo "client_cache_count: $CLIENT_CACHE_COUNT"
4307 NEW_CLIENT_CACHE_COUNT=$((CLIENT_CACHE_COUNT+CLIENT_CACHE_COUNT))
4308 echo "new_client_cache_count: $NEW_CLIENT_CACHE_COUNT"
4309 do_facet mgs $LCTL set_param -P $CLIENT_PARAM=$NEW_CLIENT_CACHE_COUNT
4310 wait_update $(facet_host ost1) "lctl get_param -n $CLIENT_PARAM |
4311 head -1" $NEW_CLIENT_CACHE_COUNT
4312 CLIENT_CACHE_COUNT=$(do_facet ost1 $LCTL get_param -n $CLIENT_PARAM |
4314 echo "$CLIENT_CACHE_COUNT"
4315 [ $CLIENT_CACHE_COUNT = $NEW_CLIENT_CACHE_COUNT ] ||
4316 error "error while apply client_cache_count"
4318 echo "Check the value is stored after remount"
4321 wait_update $(facet_host ost1) "lctl get_param -n $CLIENT_PARAM |
4322 head -1" $NEW_CLIENT_CACHE_COUNT
4323 CLIENT_CACHE_COUNT=$(do_facet ost1 $LCTL get_param -n $CLIENT_PARAM |
4325 echo "$CLIENT_CACHE_COUNT"
4326 [ $CLIENT_CACHE_COUNT = $NEW_CLIENT_CACHE_COUNT ] ||
4327 error "client_cache_count is not saved after remount"
4330 run_test 76 "set permanent params set_param -P"
4332 test_77() { # LU-3445
4333 local server_version=$(lustre_version_code $SINGLEMDS)
4335 [[ $server_version -ge $(version_code 2.2.60) ]] &&
4336 [[ $server_version -le $(version_code 2.4.0) ]] &&
4337 skip "Need MDS version < 2.2.60 or > 2.4.0" && return
4339 if [[ -z "$fs2ost_DEV" || -z "$fs2mds_DEV" ]]; then
4340 is_blkdev $SINGLEMDS $(mdsdevname ${SINGLEMDS//mds/}) &&
4341 skip_env "mixed loopback and real device not working" && return
4344 local fs2mdsdev=$(mdsdevname 1_2)
4345 local fs2ostdev=$(ostdevname 1_2)
4346 local fs2mdsvdev=$(mdsvdevname 1_2)
4347 local fs2ostvdev=$(ostvdevname 1_2)
4348 local fsname=test1234
4350 local failnid="$(h2$NETTYPE 1.2.3.4),$(h2$NETTYPE 4.3.2.1)"
4352 add fs2mds $(mkfs_opts mds1 $fs2mdsdev) --mgs --fsname=$fsname \
4353 --reformat $fs2mdsdev $fs2mdsvdev || error "add fs2mds failed"
4354 start fs2mds $fs2mdsdev $MDS_MOUNT_OPTS && trap cleanup_fs2 EXIT INT ||
4355 error "start fs2mds failed"
4357 mgsnid=$(do_facet fs2mds $LCTL list_nids | xargs | tr ' ' ,)
4358 [[ $mgsnid = *,* ]] || mgsnid+=",$mgsnid"
4360 add fs2ost $(mkfs_opts ost1 $fs2ostdev) --mgsnode=$mgsnid \
4361 --failnode=$failnid --fsname=$fsname \
4362 --reformat $fs2ostdev $fs2ostvdev ||
4363 error "add fs2ost failed"
4364 start fs2ost $fs2ostdev $OST_MOUNT_OPTS || error "start fs2ost failed"
4367 mount -t lustre $mgsnid:/$fsname $MOUNT2 || error "mount $MOUNT2 failed"
4368 DIR=$MOUNT2 MOUNT=$MOUNT2 check_mount || error "check $MOUNT2 failed"
4371 run_test 77 "comma-separated MGS NIDs and failover node NIDs"
4374 [[ $(facet_fstype $SINGLEMDS) != ldiskfs ||
4375 $(facet_fstype ost1) != ldiskfs ]] &&
4376 skip "only applicable to ldiskfs-based MDTs and OSTs" && return
4378 # reformat the Lustre filesystem with a smaller size
4379 local saved_MDSSIZE=$MDSSIZE
4380 local saved_OSTSIZE=$OSTSIZE
4381 MDSSIZE=$((MDSSIZE - 20000))
4382 OSTSIZE=$((OSTSIZE - 20000))
4383 reformat || error "(1) reformat Lustre filesystem failed"
4384 MDSSIZE=$saved_MDSSIZE
4385 OSTSIZE=$saved_OSTSIZE
4387 # mount the Lustre filesystem
4388 setup_noconfig || error "(2) setup Lustre filesystem failed"
4391 log "create test files"
4395 mkdir -p $MOUNT/$tdir || error "(3) mkdir $MOUNT/$tdir failed"
4396 for i in $(seq $num_files); do
4397 file=$MOUNT/$tdir/$tfile-$i
4398 dd if=/dev/urandom of=$file count=1 bs=1M ||
4399 error "(4) create $file failed"
4402 # unmount the Lustre filesystem
4403 cleanup || error "(5) cleanup Lustre filesystem failed"
4405 # run e2fsck on the MDT and OST devices
4406 local mds_host=$(facet_active_host $SINGLEMDS)
4407 local ost_host=$(facet_active_host ost1)
4408 local mds_dev=$(mdsdevname ${SINGLEMDS//mds/})
4409 local ost_dev=$(ostdevname 1)
4411 run_e2fsck $mds_host $mds_dev "-y"
4412 run_e2fsck $ost_host $ost_dev "-y"
4414 # get the original block count of the MDT and OST filesystems
4415 local mds_orig_blks=$(get_block_count $SINGLEMDS $mds_dev)
4416 local ost_orig_blks=$(get_block_count ost1 $ost_dev)
4418 # expand the MDT and OST filesystems to the device size
4419 run_resize2fs $SINGLEMDS $mds_dev "" || error "expand $SINGLEMDS failed"
4420 run_resize2fs ost1 $ost_dev "" || error "expand ost1 failed"
4422 # run e2fsck on the MDT and OST devices again
4423 run_e2fsck $mds_host $mds_dev "-y"
4424 run_e2fsck $ost_host $ost_dev "-y"
4426 # mount the Lustre filesystem
4430 log "check files after expanding the MDT and OST filesystems"
4431 for i in $(seq $num_files); do
4432 file=$MOUNT/$tdir/$tfile-$i
4433 $CHECKSTAT -t file -s 1048576 $file ||
4434 error "(6) checkstat $file failed"
4438 log "create more files after expanding the MDT and OST filesystems"
4439 for i in $(seq $((num_files + 1)) $((num_files + 10))); do
4440 file=$MOUNT/$tdir/$tfile-$i
4441 dd if=/dev/urandom of=$file count=1 bs=1M ||
4442 error "(7) create $file failed"
4445 # unmount the Lustre filesystem
4446 cleanup || error "(8) cleanup Lustre filesystem failed"
4448 # run e2fsck on the MDT and OST devices
4449 run_e2fsck $mds_host $mds_dev "-y"
4450 run_e2fsck $ost_host $ost_dev "-y"
4452 # get the maximum block count of the MDT and OST filesystems
4453 local mds_max_blks=$(get_block_count $SINGLEMDS $mds_dev)
4454 local ost_max_blks=$(get_block_count ost1 $ost_dev)
4456 # get the minimum block count of the MDT and OST filesystems
4457 local mds_min_blks=$(run_resize2fs $SINGLEMDS $mds_dev "" "-P" 2>&1 |
4458 grep minimum | sed -e 's/^.*filesystem: //g')
4459 local ost_min_blks=$(run_resize2fs ost1 $ost_dev "" "-P" 2>&1 |
4460 grep minimum | sed -e 's/^.*filesystem: //g')
4462 # shrink the MDT and OST filesystems to a smaller size
4466 if [[ $mds_max_blks -gt $mds_min_blks &&
4467 $mds_max_blks -gt $mds_orig_blks ]]; then
4468 [[ $mds_orig_blks -gt $mds_min_blks ]] &&
4469 base_blks=$mds_orig_blks || base_blks=$mds_min_blks
4470 new_blks=$(( (mds_max_blks - base_blks) / 2 + base_blks ))
4471 run_resize2fs $SINGLEMDS $mds_dev $new_blks ||
4472 error "shrink $SINGLEMDS to $new_blks failed"
4476 if [[ $ost_max_blks -gt $ost_min_blks &&
4477 $ost_max_blks -gt $ost_orig_blks ]]; then
4478 [[ $ost_orig_blks -gt $ost_min_blks ]] &&
4479 base_blks=$ost_orig_blks || base_blks=$ost_min_blks
4480 new_blks=$(( (ost_max_blks - base_blks) / 2 + base_blks ))
4481 run_resize2fs ost1 $ost_dev $new_blks ||
4482 error "shrink ost1 to $new_blks failed"
4486 # check whether the MDT or OST filesystem was shrunk or not
4488 combined_mgs_mds || stop_mgs || error "(9) stop mgs failed"
4489 reformat || error "(10) reformat Lustre filesystem failed"
4493 # run e2fsck on the MDT and OST devices again
4494 run_e2fsck $mds_host $mds_dev "-y"
4495 run_e2fsck $ost_host $ost_dev "-y"
4497 # mount the Lustre filesystem again
4501 log "check files after shrinking the MDT and OST filesystems"
4502 for i in $(seq $((num_files + 10))); do
4503 file=$MOUNT/$tdir/$tfile-$i
4504 $CHECKSTAT -t file -s 1048576 $file ||
4505 error "(11) checkstat $file failed"
4508 # unmount and reformat the Lustre filesystem
4509 cleanup || error "(12) cleanup Lustre filesystem failed"
4510 combined_mgs_mds || stop_mgs || error "(13) stop mgs failed"
4511 reformat || error "(14) reformat Lustre filesystem failed"
4513 run_test 78 "run resize2fs on MDT and OST filesystems"
4515 if ! combined_mgs_mds ; then
4521 # restore the values of MDSSIZE and OSTSIZE
4522 MDSSIZE=$STORED_MDSSIZE
4523 OSTSIZE=$STORED_OSTSIZE