3 # FIXME - there is no reason to use all of these different return codes,
4 # espcially when most of them are mapped to something else anyway.
5 # The tests should use error() to describe the failure more clearly,
6 # and reduce the need to look into the tests to see what failed.
12 # bug number for skipped test: LU-2828
13 ALWAYS_EXCEPT="$CONF_SANITY_EXCEPT 59 64"
14 # UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
18 if [ -r /etc/SuSE-release ]
20 local vers=`grep VERSION /etc/SuSE-release | awk '{print $3}'`
21 local patchlev=`grep PATCHLEVEL /etc/SuSE-release \
23 if [ $vers -eq 11 ] && [ $patchlev -eq 2 ]
31 if is_sles11; then # LU-2181
32 ALWAYS_EXCEPT="$ALWAYS_EXCEPT 23a 34b"
35 if [ "$FAILURE_MODE" = "HARD" ]; then
36 CONFIG_EXCEPTIONS="24a " && \
37 echo "Except the tests: $CONFIG_EXCEPTIONS for FAILURE_MODE=$FAILURE_MODE, bug 23573" && \
38 ALWAYS_EXCEPT="$ALWAYS_EXCEPT $CONFIG_EXCEPTIONS"
41 # bug number for skipped test:
42 # a tool to create lustre filesystem images
43 ALWAYS_EXCEPT="32newtarball $ALWAYS_EXCEPT"
46 PATH=$PWD/$SRCDIR:$SRCDIR:$SRCDIR/../utils:$PATH
48 PTLDEBUG=${PTLDEBUG:--1}
50 LUSTRE=${LUSTRE:-`dirname $0`/..}
51 RLUSTRE=${RLUSTRE:-$LUSTRE}
52 export MULTIOP=${MULTIOP:-multiop}
54 . $LUSTRE/tests/test-framework.sh
56 . ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
58 # use small MDS + OST size to speed formatting time
59 # do not use too small MDSSIZE/OSTSIZE, which affect the default jouranl size
60 # STORED_MDSSIZE is used in test_18
61 STORED_MDSSIZE=$MDSSIZE
62 STORED_OSTSIZE=$OSTSIZE
66 if ! combined_mgs_mds; then
67 # bug number for skipped test: 23954
68 ALWAYS_EXCEPT="$ALWAYS_EXCEPT 24b"
71 # pass "-E lazy_itable_init" to mke2fs to speed up the formatting time
72 if [[ "$LDISKFS_MKFS_OPTS" != *lazy_itable_init* ]]; then
73 LDISKFS_MKFS_OPTS=$(csa_add "$LDISKFS_MKFS_OPTS" -E lazy_itable_init)
76 [ $(facet_fstype $SINGLEMDS) = "zfs" ] &&
77 # bug number for skipped test: LU-2778 LU-4444
78 ALWAYS_EXCEPT="$ALWAYS_EXCEPT 57b 69"
83 require_dsh_mds || exit 0
84 require_dsh_ost || exit 0
86 [ "$SLOW" = "no" ] && EXCEPT_SLOW="30a 31 45 69"
92 # The MGS must be started before the OSTs for a new fs, so start
93 # and stop to generate the startup logs.
96 wait_osc_import_state mds ost FULL
101 reformat_and_config() {
103 if ! combined_mgs_mds ; then
109 writeconf_or_reformat() {
110 # There are at most 2 OSTs for write_conf test
111 # who knows if/where $TUNEFS is installed?
112 # Better reformat if it fails...
113 writeconf_all $MDSCOUNT 2 ||
114 { echo "tunefs failed, reformatting instead" &&
115 reformat_and_config && return 0; }
125 start mgs $(mgsdevname) $MGS_MOUNT_OPTS
131 local dev=$(mdsdevname $num)
134 echo "start mds service on `facet_active_host $facet`"
135 start $facet ${dev} $MDS_MOUNT_OPTS $@ || return 94
141 local dev=$(mdsdevname $num)
144 echo "stop mds service on `facet_active_host $facet`"
145 # These tests all use non-failover stop
146 stop $facet -f || return 97
152 for num in $(seq $MDSCOUNT); do
153 start_mdt $num $@ || return 94
158 if ! combined_mgs_mds ; then
166 for num in $(seq $MDSCOUNT); do
167 stop_mdt $num || return 97
172 echo "stop mgs service on `facet_active_host mgs`"
173 # These tests all use non-failover stop
174 stop mgs -f || return 97
178 echo "start ost1 service on `facet_active_host ost1`"
179 start ost1 `ostdevname 1` $OST_MOUNT_OPTS $@ || return 95
183 echo "stop ost1 service on `facet_active_host ost1`"
184 # These tests all use non-failover stop
185 stop ost1 -f || return 98
189 echo "start ost2 service on `facet_active_host ost2`"
190 start ost2 `ostdevname 2` $OST_MOUNT_OPTS $@ || return 92
194 echo "stop ost2 service on `facet_active_host ost2`"
195 # These tests all use non-failover stop
196 stop ost2 -f || return 93
201 echo "mount $FSNAME on ${MOUNTPATH}....."
202 zconf_mount `hostname` $MOUNTPATH || return 96
206 local mountopt="-o remount,$1"
208 echo "remount '$1' lustre on ${MOUNTPATH}....."
209 zconf_mount `hostname` $MOUNTPATH "$mountopt" || return 96
214 echo "umount lustre on ${MOUNTPATH}....."
215 zconf_umount `hostname` $MOUNTPATH || return 97
218 manual_umount_client(){
221 echo "manual umount lustre on ${MOUNT}...."
222 do_facet client "umount -d ${FORCE} $MOUNT"
228 start_mds || error "MDT start failed"
229 start_ost || error "OST start failed"
230 mount_client $MOUNT || error "client start failed"
231 client_up || error "client_up failed"
235 if ! combined_mgs_mds ; then
244 unload_modules_conf () {
245 if combined_mgs_mds || ! local_mode; then
246 unload_modules || return 1
251 stop_ost || return 202
252 stop_mds || return 201
253 unload_modules_conf || return 203
257 umount_client $MOUNT || return 200
258 cleanup_nocli || return $?
262 do_facet client "cp /etc/passwd $DIR/a" || return 71
263 do_facet client "rm $DIR/a" || return 72
264 # make sure lustre is actually mounted (touch will block,
265 # but grep won't, so do it after)
266 do_facet client "grep $MOUNT' ' /proc/mounts > /dev/null" || return 73
267 echo "setup single mount lustre success"
271 do_facet client "touch $DIR/a" || return 71
272 do_facet client "rm $DIR/a" || return 72
273 do_facet client "touch $DIR2/a" || return 73
274 do_facet client "rm $DIR2/a" || return 74
275 echo "setup double mount lustre success"
280 if [ "$ONLY" == "setup" ]; then
285 if [ "$ONLY" == "cleanup" ]; then
292 #create single point mountpoint
298 check_mount || return 41
301 run_test 0 "single mount setup"
304 start_mds || error "MDT start failed"
306 echo "start ost second time..."
307 start_ost && error "2nd OST start should fail"
308 mount_client $MOUNT || error "client start failed"
309 check_mount || return 42
312 run_test 1 "start up ost twice (should return errors)"
316 echo "start mds second time.."
317 start_mdt 1 && error "2nd MDT start should fail"
320 check_mount || return 43
323 run_test 2 "start up mds twice (should return err)"
327 #mount.lustre returns an error if already in mtab
328 mount_client $MOUNT && error "2nd client mount should fail"
329 check_mount || return 44
332 run_test 3 "mount client twice (should return err)"
336 touch $DIR/$tfile || return 85
340 # ok for ost to fail shutdown
341 if [ 202 -ne $eno ]; then
346 run_test 4 "force cleanup ost, then cleanup"
348 test_5a() { # was test_5
350 touch $DIR/$tfile || return 1
351 fuser -m -v $MOUNT && echo "$MOUNT is in use by user space process."
353 stop_mds -f || return 2
355 # cleanup may return an error from the failed
356 # disconnects; for now I'll consider this successful
357 # if all the modules have unloaded.
361 echo "killing umount"
362 kill -TERM $UMOUNT_PID
363 echo "waiting for umount to finish"
365 if grep " $MOUNT " /proc/mounts; then
366 echo "test 5: /proc/mounts after failed umount"
370 echo "killing umount"
371 kill -TERM $UMOUNT_PID
372 echo "waiting for umount to finish"
374 grep " $MOUNT " /proc/mounts && echo "test 5: /proc/mounts after second umount" && return 11
378 # stop_mds is a no-op here, and should not fail
379 cleanup_nocli || return $?
380 # df may have lingering entry
382 # mtab may have lingering entry
386 while [ "$WAIT" -ne "$MAX_WAIT" ]; do
388 grep -q $MOUNT" " /etc/mtab || break
389 echo "Waiting /etc/mtab updated ... "
390 WAIT=$(( WAIT + sleep))
392 [ "$WAIT" -eq "$MAX_WAIT" ] && error "/etc/mtab is not updated in $WAIT secs"
393 echo "/etc/mtab updated in $WAIT secs"
395 run_test 5a "force cleanup mds, then cleanup"
403 grep " $MOUNT " /etc/mtab && \
404 error false "unexpected entry in mtab before mount" && return 10
408 if ! combined_mgs_mds ; then
409 trap cleanup_5b EXIT ERR
414 [ -d $MOUNT ] || mkdir -p $MOUNT
415 mount_client $MOUNT && rc=1
416 grep " $MOUNT " /etc/mtab && \
417 error "$MOUNT entry in mtab after failed mount" && rc=11
419 # stop_mds is a no-op here, and should not fail
420 cleanup_nocli || rc=$?
421 if ! combined_mgs_mds ; then
426 run_test 5b "Try to start a client with no MGS (should return errs)"
429 grep " $MOUNT " /etc/mtab && \
430 error false "unexpected entry in mtab before mount" && return 10
435 [ -d $MOUNT ] || mkdir -p $MOUNT
436 local oldfs="${FSNAME}"
437 FSNAME="wrong.${FSNAME}"
438 mount_client $MOUNT || :
440 grep " $MOUNT " /etc/mtab && \
441 error "$MOUNT entry in mtab after failed mount" && rc=11
443 cleanup_nocli || rc=$?
446 run_test 5c "cleanup after failed mount (bug 2712) (should return errs)"
449 grep " $MOUNT " /etc/mtab && \
450 error false "unexpected entry in mtab before mount" && return 10
456 mount_client $MOUNT || rc=1
458 grep " $MOUNT " /etc/mtab && \
459 error "$MOUNT entry in mtab after unmount" && rc=11
462 run_test 5d "mount with ost down"
465 grep " $MOUNT " /etc/mtab && \
466 error false "unexpected entry in mtab before mount" && return 10
472 #define OBD_FAIL_PTLRPC_DELAY_SEND 0x506
473 do_facet client "lctl set_param fail_loc=0x80000506"
474 mount_client $MOUNT || echo "mount failed (not fatal)"
476 grep " $MOUNT " /etc/mtab && \
477 error "$MOUNT entry in mtab after unmount" && rc=11
480 run_test 5e "delayed connect, don't crash (bug 10268)"
483 if combined_mgs_mds ; then
484 skip "combined mgs and mds"
488 grep " $MOUNT " /etc/mtab && \
489 error false "unexpected entry in mtab before mount" && return 10
493 [ -d $MOUNT ] || mkdir -p $MOUNT
494 mount_client $MOUNT &
496 echo client_mount pid is $pid
500 if ! ps -f -p $pid >/dev/null; then
503 grep " $MOUNT " /etc/mtab && echo "test 5f: mtab after mount"
504 error "mount returns $rc, expected to hang"
513 # mount should succeed after start mds
516 [ $rc -eq 0 ] || error "mount returned $rc"
517 grep " $MOUNT " /etc/mtab && echo "test 5f: mtab after mount"
521 run_test 5f "mds down, cleanup after failed mount (bug 2712)"
526 mount_client ${MOUNT} || return 87
527 touch $DIR/a || return 86
530 run_test 6 "manual umount, then mount again"
535 cleanup_nocli || return $?
537 run_test 7 "manual umount, then cleanup"
542 check_mount2 || return 45
543 umount_client $MOUNT2
546 run_test 8 "double mount setup"
551 do_facet ost1 lctl set_param debug=\'inode trace\' || return 1
552 do_facet ost1 lctl set_param subsystem_debug=\'mds ost\' || return 1
554 CHECK_PTLDEBUG="`do_facet ost1 lctl get_param -n debug`"
555 if [ "$CHECK_PTLDEBUG" ] && { \
556 [ "$CHECK_PTLDEBUG" = "trace inode warning error emerg console" ] ||
557 [ "$CHECK_PTLDEBUG" = "trace inode" ]; }; then
558 echo "lnet.debug success"
560 echo "lnet.debug: want 'trace inode', have '$CHECK_PTLDEBUG'"
563 CHECK_SUBSYS="`do_facet ost1 lctl get_param -n subsystem_debug`"
564 if [ "$CHECK_SUBSYS" ] && [ "$CHECK_SUBSYS" = "mds ost" ]; then
565 echo "lnet.subsystem_debug success"
567 echo "lnet.subsystem_debug: want 'mds ost', have '$CHECK_SUBSYS'"
570 stop_ost || return $?
572 run_test 9 "test ptldebug and subsystem for mkfs"
580 do_facet $facet "test -b $dev" || rc=1
581 if [[ "$size" ]]; then
582 local in=$(do_facet $facet "dd if=$dev of=/dev/null bs=1k count=1 skip=$size 2>&1" |\
583 awk '($3 == "in") { print $1 }')
584 [[ $in = "1+0" ]] || rc=1
590 # Test 16 was to "verify that lustre will correct the mode of OBJECTS".
591 # But with new MDS stack we don't care about the mode of local objects
592 # anymore, so this test is removed. See bug 22944 for more details.
596 if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
597 skip "Only applicable to ldiskfs-based MDTs"
602 check_mount || return 41
605 echo "Remove mds config log"
606 if ! combined_mgs_mds ; then
610 do_facet mgs "$DEBUGFS -w -R 'unlink CONFIGS/$FSNAME-MDT0000' \
611 $(mgsdevname) || return \$?" || return $?
613 if ! combined_mgs_mds ; then
618 start_mds && return 42
621 run_test 17 "Verify failed mds_postsetup won't fail assertion (2936) (should return errs)"
624 if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
625 skip "Only applicable to ldiskfs-based MDTs"
629 local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/})
634 # check if current MDSSIZE is large enough
635 [ $MDSSIZE -ge $MIN ] && OK=1 && myMDSSIZE=$MDSSIZE && \
636 log "use MDSSIZE=$MDSSIZE"
638 # check if the global config has a large enough MDSSIZE
639 [ -z "$OK" -a ! -z "$STORED_MDSSIZE" ] && [ $STORED_MDSSIZE -ge $MIN ] && \
640 OK=1 && myMDSSIZE=$STORED_MDSSIZE && \
641 log "use STORED_MDSSIZE=$STORED_MDSSIZE"
643 # check if the block device is large enough
644 is_blkdev $SINGLEMDS $MDSDEV $MIN
645 local large_enough=$?
646 if [ -n "$OK" ]; then
647 [ $large_enough -ne 0 ] && OK=""
649 [ $large_enough -eq 0 ] && OK=1 && myMDSSIZE=$MIN &&
650 log "use device $MDSDEV with MIN=$MIN"
653 # check if a loopback device has enough space for fs metadata (5%)
655 if [ -z "$OK" ]; then
656 local SPACE=$(do_facet $SINGLEMDS "[ -f $MDSDEV -o ! -e $MDSDEV ] && df -P \\\$(dirname $MDSDEV)" |
657 awk '($1 != "Filesystem") {print $4}')
658 ! [ -z "$SPACE" ] && [ $SPACE -gt $((MIN / 20)) ] && \
659 OK=1 && myMDSSIZE=$MIN && \
660 log "use file $MDSDEV with MIN=$MIN"
663 [ -z "$OK" ] && skip_env "$MDSDEV too small for ${MIN}kB MDS" && return
666 echo "mount mds with large journal..."
668 local OLD_MDSSIZE=$MDSSIZE
672 echo "mount lustre system..."
674 check_mount || return 41
676 echo "check journal size..."
677 local FOUNDSIZE=$(do_facet $SINGLEMDS "$DEBUGFS -c -R 'stat <8>' $MDSDEV" | awk '/Size: / { print $NF; exit;}')
678 if [ $FOUNDSIZE -gt $((32 * 1024 * 1024)) ]; then
679 log "Success: mkfs creates large journals. Size: $((FOUNDSIZE >> 20))M"
681 error "expected journal size > 32M, found $((FOUNDSIZE >> 20))M"
689 run_test 18 "check mkfs creates large journals"
692 start_mds || return 1
693 stop_mds -f || return 2
695 run_test 19a "start/stop MDS without OSTs"
698 start_ost || return 1
699 stop_ost -f || return 2
701 run_test 19b "start/stop OSTs without MDS"
704 # first format the ost/mdt
708 check_mount || return 43
710 remount_client ro $MOUNT || return 44
711 touch $DIR/$tfile && echo "$DIR/$tfile created incorrectly" && return 45
712 [ -e $DIR/$tfile ] && echo "$DIR/$tfile exists incorrectly" && return 46
713 remount_client rw $MOUNT || return 47
715 [ ! -f $DIR/$tfile ] && echo "$DIR/$tfile missing" && return 48
716 MCNT=`grep -c $MOUNT /etc/mtab`
717 [ "$MCNT" -ne 1 ] && echo "$MOUNT in /etc/mtab $MCNT times" && return 49
722 run_test 20 "remount ro,rw mounts work and doesn't break /etc/mtab"
727 wait_osc_import_state mds ost FULL
731 run_test 21a "start mds before ost, stop ost first"
736 wait_osc_import_state mds ost FULL
740 run_test 21b "start ost before mds, stop mds first"
746 wait_osc_import_state mds ost2 FULL
750 #writeconf to remove all ost2 traces for subsequent tests
751 writeconf_or_reformat
753 run_test 21c "start mds between two osts, stop mds last"
756 if combined_mgs_mds ; then
757 skip "need separate mgs device" && return 0
767 wait_osc_import_state mds ost2 FULL
773 #writeconf to remove all ost2 traces for subsequent tests
774 writeconf_or_reformat
777 run_test 21d "start mgs then ost and then mds"
782 echo Client mount with ost in logs, but none running
784 # wait until mds connected to ost and open client connection
785 wait_osc_import_state mds ost FULL
788 # check_mount will block trying to contact ost
789 mcreate $DIR/$tfile || return 40
790 rm -f $DIR/$tfile || return 42
794 echo Client mount with a running ost
797 # if gss enabled, wait full time to let connection from
798 # mds to ost be established, due to the mismatch between
799 # initial connect timeout and gss context negotiation timeout.
800 # This perhaps could be remove after AT landed.
801 echo "sleep $((TIMEOUT + TIMEOUT + TIMEOUT))s"
802 sleep $((TIMEOUT + TIMEOUT + TIMEOUT))
805 wait_osc_import_state mds ost FULL
806 wait_osc_import_state client ost FULL
807 check_mount || return 41
812 run_test 22 "start a client before osts (should return errs)"
814 test_23a() { # was test_23
818 # force down client so that recovering mds waits for reconnect
819 local running=$(grep -c $MOUNT /proc/mounts) || true
820 if [ $running -ne 0 ]; then
821 echo "Stopping client $MOUNT (opts: -f)"
825 # enter recovery on mds
827 # try to start a new client
828 mount_client $MOUNT &
830 MOUNT_PID=$(ps -ef | grep "t lustre" | grep -v grep | awk '{print $2}')
831 MOUNT_LUSTRE_PID=`ps -ef | grep mount.lustre | grep -v grep | awk '{print $2}'`
832 echo mount pid is ${MOUNT_PID}, mount.lustre pid is ${MOUNT_LUSTRE_PID}
834 ps --ppid $MOUNT_LUSTRE_PID
835 echo "waiting for mount to finish"
837 # "ctrl-c" sends SIGINT but it usually (in script) does not work on child process
838 # SIGTERM works but it does not spread to offspring processses
839 kill -s TERM $MOUNT_PID
840 kill -s TERM $MOUNT_LUSTRE_PID
841 # we can not wait $MOUNT_PID because it is not a child of this shell
847 while [ "$WAIT" -lt "$MAX_WAIT" ]; do
849 PID1=$(ps -ef | awk '{print $2}' | grep -w $MOUNT_PID)
850 PID2=$(ps -ef | awk '{print $2}' | grep -w $MOUNT_LUSTRE_PID)
853 [ -z "$PID1" -a -z "$PID2" ] && break
854 echo "waiting for mount to finish ... "
855 WAIT=$(( WAIT + sleep))
857 if [ "$WAIT" -eq "$MAX_WAIT" ]; then
858 error "MOUNT_PID $MOUNT_PID and "\
859 "MOUNT_LUSTRE_PID $MOUNT_LUSTRE_PID still not killed in $WAIT secs"
862 stop_mds || error "stopping MDSes failed"
863 stop_ost || error "stopping OSSes failed"
865 run_test 23a "interrupt client during recovery mount delay"
870 test_23b() { # was test_23
873 # Simulate -EINTR during mount OBD_FAIL_LDLM_CLOSE_THREAD
874 lctl set_param fail_loc=0x80000313
878 run_test 23b "Simulate -EINTR during mount"
880 fs2mds_HOST=$mds_HOST
881 fs2ost_HOST=$ost_HOST
883 MDSDEV1_2=$fs2mds_DEV
884 OSTDEV1_2=$fs2ost_DEV
885 OSTDEV2_2=$fs3ost_DEV
889 echo "umount $MOUNT2 ..."
890 umount $MOUNT2 || true
891 echo "stopping fs2mds ..."
892 stop fs2mds -f || true
893 echo "stopping fs2ost ..."
894 stop fs2ost -f || true
898 local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/})
900 if [ -z "$fs2ost_DEV" -o -z "$fs2mds_DEV" ]; then
901 is_blkdev $SINGLEMDS $MDSDEV && \
902 skip_env "mixed loopback and real device not working" && return
905 [ -n "$ost1_HOST" ] && fs2ost_HOST=$ost1_HOST
907 local fs2mdsdev=$(mdsdevname 1_2)
908 local fs2ostdev=$(ostdevname 1_2)
909 local fs2mdsvdev=$(mdsvdevname 1_2)
910 local fs2ostvdev=$(ostvdevname 1_2)
912 # test 8-char fsname as well
913 local FSNAME2=test1234
915 add fs2mds $(mkfs_opts mds1 ${fs2mdsdev} ) --nomgs --mgsnode=$MGSNID \
916 --fsname=${FSNAME2} --reformat $fs2mdsdev $fs2mdsvdev || exit 10
918 add fs2ost $(mkfs_opts ost1 ${fs2ostdev}) --fsname=${FSNAME2} \
919 --reformat $fs2ostdev $fs2ostvdev || exit 10
922 start fs2mds $fs2mdsdev $MDS_MOUNT_OPTS && trap cleanup_fs2 EXIT INT
923 start fs2ost $fs2ostdev $OST_MOUNT_OPTS
925 $MOUNT_CMD $MGSNID:/${FSNAME2} $MOUNT2 || return 1
927 check_mount || return 2
928 # files written on 1 should not show up on 2
929 cp /etc/passwd $DIR/$tfile
931 [ -e $MOUNT2/$tfile ] && error "File bleed" && return 7
934 cp /etc/passwd $MOUNT2/b || return 3
935 rm $MOUNT2/b || return 4
936 # 2 is actually mounted
937 grep $MOUNT2' ' /proc/mounts > /dev/null || return 5
939 facet_failover fs2mds
940 facet_failover fs2ost
943 # the MDS must remain up until last MDT
945 MDS=$(do_facet $SINGLEMDS "lctl get_param -n devices" | awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }' | head -1)
946 [ -z "$MDS" ] && error "No MDT" && return 8
948 cleanup_nocli || return 6
950 run_test 24a "Multiple MDTs on a single node"
953 local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/})
955 if [ -z "$fs2mds_DEV" ]; then
956 local dev=${SINGLEMDS}_dev
958 is_blkdev $SINGLEMDS $MDSDEV && \
959 skip_env "mixed loopback and real device not working" && return
962 local fs2mdsdev=$(mdsdevname 1_2)
963 local fs2mdsvdev=$(mdsvdevname 1_2)
965 add fs2mds $(mkfs_opts mds1 ${fs2mdsdev} ) --mgs --fsname=${FSNAME}2 \
966 --reformat $fs2mdsdev $fs2mdsvdev || exit 10
968 start fs2mds $fs2mdsdev $MDS_MOUNT_OPTS && return 2
971 run_test 24b "Multiple MGSs on a single node (should return err)"
975 check_mount || return 2
976 local MODULES=$($LCTL modules | awk '{ print $2 }')
977 rmmod $MODULES 2>/dev/null || true
980 run_test 25 "Verify modules are referenced"
984 # we need modules before mount for sysctl, so make sure...
985 do_facet $SINGLEMDS "lsmod | grep -q lustre || modprobe lustre"
986 #define OBD_FAIL_MDS_FS_SETUP 0x135
987 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000135"
988 start_mds && echo MDS started && return 1
989 lctl get_param -n devices
990 DEVS=$(lctl get_param -n devices | egrep -v MG | wc -l)
991 [ $DEVS -gt 0 ] && return 2
992 # start mds to drop writeconf setting
993 start_mds || return 3
995 unload_modules_conf || return $?
997 run_test 26 "MDT startup failure cleans LOV (should return errs)"
1000 start_ost || return 1
1001 start_mds || return 2
1002 echo "Requeue thread should have started: "
1003 ps -e | grep ll_cfg_requeue
1004 set_conf_param_and_check ost1 \
1005 "lctl get_param -n obdfilter.$FSNAME-OST0000.client_cache_seconds" \
1006 "$FSNAME-OST0000.ost.client_cache_seconds" || return 3
1009 run_test 27a "Reacquire MGS lock if OST started first"
1014 local device=$(do_facet $SINGLEMDS "lctl get_param -n devices" |
1015 awk '($3 ~ "mdt" && $4 ~ "MDT0000") { print $4 }')
1017 facet_failover $SINGLEMDS
1018 set_conf_param_and_check $SINGLEMDS \
1019 "lctl get_param -n mdt.$device.identity_acquire_expire" \
1020 "$device.mdt.identity_acquire_expire" || return 3
1021 set_conf_param_and_check client \
1022 "lctl get_param -n mdc.$device-mdc-*.max_rpcs_in_flight"\
1023 "$device.mdc.max_rpcs_in_flight" || return 4
1027 run_test 27b "Reacquire MGS lock after failover"
1031 TEST="lctl get_param -n llite.$FSNAME-*.max_read_ahead_whole_mb"
1032 PARAM="$FSNAME.llite.max_read_ahead_whole_mb"
1034 FINAL=$(($ORIG + 1))
1035 set_conf_param_and_check client "$TEST" "$PARAM" $FINAL || return 3
1036 FINAL=$(($FINAL + 1))
1037 set_conf_param_and_check client "$TEST" "$PARAM" $FINAL || return 4
1038 umount_client $MOUNT || return 200
1041 if [ $RESULT -ne $FINAL ]; then
1042 echo "New config not seen: wanted $FINAL got $RESULT"
1045 echo "New config success: got $RESULT"
1047 set_conf_param_and_check client "$TEST" "$PARAM" $ORIG || return 5
1050 run_test 28 "permanent parameter setting"
1052 test_28a() { # LU-4221
1053 [[ $(lustre_version_code ost1) -ge $(version_code 2.5.52) ]] ||
1054 { skip "Need OST version at least 2.5.52" && return 0; }
1055 [ "$(facet_fstype ost1)" = "zfs" ] &&
1056 skip "LU-4221: no such proc params for ZFS OSTs" && return
1063 local device="$FSNAME-OST0000"
1067 # In this test we will set three kinds of proc parameters with
1069 # 1. the ones moved from the OFD to the OSD, and only their
1070 # symlinks kept in obdfilter
1071 # 2. non-symlink ones in the OFD
1072 # 3. non-symlink ones in the OSD
1075 # prepare a symlink parameter in the OFD
1076 name="writethrough_cache_enable"
1077 param="$device.ost.$name"
1078 cmd="$LCTL get_param -n obdfilter.$device.$name"
1080 # conf_param the symlink parameter in the OFD
1081 old=$(do_facet ost1 $cmd)
1082 new=$(((old + 1) % 2))
1083 set_conf_param_and_check ost1 "$cmd" "$param" $new ||
1084 error "lctl conf_param $device.ost.$param=$new failed"
1086 # conf_param the target parameter in the OSD
1087 param="$device.osd.$name"
1088 cmd="$LCTL get_param -n osd-*.$device.$name"
1089 set_conf_param_and_check ost1 "$cmd" "$param" $old ||
1090 error "lctl conf_param $device.osd.$param=$old failed"
1093 # prepare a non-symlink parameter in the OFD
1094 name="client_cache_seconds"
1095 param="$device.ost.$name"
1096 cmd="$LCTL get_param -n obdfilter.$device.$name"
1098 # conf_param the parameter in the OFD
1099 old=$(do_facet ost1 $cmd)
1101 set_conf_param_and_check ost1 "$cmd" "$param" $new ||
1102 error "lctl conf_param $device.ost.$param=$new failed"
1103 set_conf_param_and_check ost1 "$cmd" "$param" $old ||
1104 error "lctl conf_param $device.ost.$param=$old failed"
1107 # prepare a non-symlink parameter in the OSD
1108 name="lma_self_repair"
1109 param="$device.osd.$name"
1110 cmd="$LCTL get_param -n osd-*.$device.$name"
1112 # conf_param the parameter in the OSD
1113 old=$(do_facet ost1 $cmd)
1114 new=$(((old + 1) % 2))
1115 set_conf_param_and_check ost1 "$cmd" "$param" $new ||
1116 error "lctl conf_param $device.osd.$param=$new failed"
1117 set_conf_param_and_check ost1 "$cmd" "$param" $old ||
1118 error "lctl conf_param $device.osd.$param=$old failed"
1122 run_test 28a "set symlink parameters permanently with conf_param"
1125 [ "$OSTCOUNT" -lt "2" ] && skip_env "$OSTCOUNT < 2, skipping" && return
1126 setup > /dev/null 2>&1
1130 local PARAM="$FSNAME-OST0001.osc.active"
1131 local PROC_ACT="osc.$FSNAME-OST0001-osc-[^M]*.active"
1132 local PROC_UUID="osc.$FSNAME-OST0001-osc-[^M]*.ost_server_uuid"
1134 ACTV=$(lctl get_param -n $PROC_ACT)
1136 set_conf_param_and_check client \
1137 "lctl get_param -n $PROC_ACT" "$PARAM" $DEAC || return 2
1138 # also check ost_server_uuid status
1139 RESULT=$(lctl get_param -n $PROC_UUID | grep DEACTIV)
1140 if [ -z "$RESULT" ]; then
1141 echo "Live client not deactivated: $(lctl get_param -n $PROC_UUID)"
1144 echo "Live client success: got $RESULT"
1148 for num in $(seq $MDSCOUNT); do
1149 local mdtosc=$(get_mdtosc_proc_path mds${num} $FSNAME-OST0001)
1150 local MPROC="osc.$mdtosc.active"
1155 RESULT=$(do_facet mds${num} " lctl get_param -n $MPROC")
1156 [ ${PIPESTATUS[0]} = 0 ] || error "Can't read $MPROC"
1157 if [ $RESULT -eq $DEAC ]; then
1158 echo -n "MDT deactivated also after"
1159 echo "$WAIT sec (got $RESULT)"
1163 if [ $WAIT -eq $MAX ]; then
1164 echo -n "MDT not deactivated: wanted $DEAC"
1168 echo "Waiting $(($MAX - $WAIT))secs for MDT deactivated"
1171 # test new client starts deactivated
1172 umount_client $MOUNT || return 200
1174 RESULT=$(lctl get_param -n $PROC_UUID | grep DEACTIV | grep NEW)
1175 if [ -z "$RESULT" ]; then
1176 echo "New client not deactivated from start: $(lctl get_param -n $PROC_UUID)"
1179 echo "New client success: got $RESULT"
1182 # make sure it reactivates
1183 set_conf_param_and_check client \
1184 "lctl get_param -n $PROC_ACT" "$PARAM" $ACTV || return 6
1186 umount_client $MOUNT
1189 #writeconf to remove all ost2 traces for subsequent tests
1190 writeconf_or_reformat
1192 run_test 29 "permanently remove an OST"
1197 echo Big config llog
1198 TEST="lctl get_param -n llite.$FSNAME-*.max_read_ahead_whole_mb"
1200 LIST=(1 2 3 4 5 4 3 2 1 2 3 4 5 4 3 2 1 2 3 4 5)
1201 for i in ${LIST[@]}; do
1202 set_conf_param_and_check client "$TEST" \
1203 "$FSNAME.llite.max_read_ahead_whole_mb" $i || return 3
1205 # make sure client restart still works
1206 umount_client $MOUNT
1207 mount_client $MOUNT || return 4
1208 [ "$($TEST)" -ne "$i" ] && error "Param didn't stick across restart $($TEST) != $i"
1211 echo Erase parameter setting
1212 do_facet mgs "$LCTL conf_param -d $FSNAME.llite.max_read_ahead_whole_mb" || return 6
1213 umount_client $MOUNT
1214 mount_client $MOUNT || return 6
1216 echo "deleted (default) value=$FINAL, orig=$ORIG"
1217 # assumes this parameter started at the default value
1218 [ "$FINAL" -eq "$ORIG" ] || fail "Deleted value=$FINAL, orig=$ORIG"
1222 run_test 30a "Big config llog and conf_param deletion"
1227 # Make a fake nid. Use the OST nid, and add 20 to the least significant
1228 # numerical part of it. Hopefully that's not already a failover address for
1230 OSTNID=$(do_facet ost1 "$LCTL get_param nis" | tail -1 | awk '{print $1}')
1231 ORIGVAL=$(echo $OSTNID | egrep -oi "[0-9]*@")
1232 NEWVAL=$((($(echo $ORIGVAL | egrep -oi "[0-9]*") + 20) % 256))
1233 NEW=$(echo $OSTNID | sed "s/$ORIGVAL/$NEWVAL@/")
1234 echo "Using fake nid $NEW"
1236 TEST="$LCTL get_param -n osc.$FSNAME-OST0000-osc-[^M]*.import | grep failover_nids | sed -n 's/.*\($NEW\).*/\1/p'"
1237 set_conf_param_and_check client "$TEST" \
1238 "$FSNAME-OST0000.failover.node" $NEW ||
1239 error "didn't add failover nid $NEW"
1240 NIDS=$($LCTL get_param -n osc.$FSNAME-OST0000-osc-[^M]*.import | grep failover_nids)
1242 NIDCOUNT=$(($(echo "$NIDS" | wc -w) - 1))
1243 echo "should have 2 failover nids: $NIDCOUNT"
1244 [ $NIDCOUNT -eq 2 ] || error "Failover nid not added"
1245 do_facet mgs "$LCTL conf_param -d $FSNAME-OST0000.failover.node" || error "conf_param delete failed"
1246 umount_client $MOUNT
1247 mount_client $MOUNT || return 3
1249 NIDS=$($LCTL get_param -n osc.$FSNAME-OST0000-osc-[^M]*.import | grep failover_nids)
1251 NIDCOUNT=$(($(echo "$NIDS" | wc -w) - 1))
1252 echo "only 1 final nid should remain: $NIDCOUNT"
1253 [ $NIDCOUNT -eq 1 ] || error "Failover nids not removed"
1257 run_test 30b "Remove failover nids"
1259 test_31() { # bug 10734
1260 # ipaddr must not exist
1261 $MOUNT_CMD 4.3.2.1@tcp:/lustre $MOUNT || true
1264 run_test 31 "Connect to non-existent node (shouldn't crash)"
1268 T32_BLIMIT=20480 # Kbytes
1272 # This is not really a test but a tool to create new disk
1273 # image tarballs for the upgrade tests.
1275 # Disk image tarballs should be created on single-node
1276 # clusters by running this test with default configurations
1277 # plus a few mandatory environment settings that are verified
1278 # at the beginning of the test.
1280 test_32newtarball() {
1284 local tmp=$TMP/t32_image_create
1286 if [ $FSNAME != t32fs -o $MDSCOUNT -ne 1 -o \
1287 \( -z "$MDSDEV" -a -z "$MDSDEV1" \) -o $OSTCOUNT -ne 1 -o \
1288 -z "$OSTDEV1" ]; then
1289 error "Needs FSNAME=t32fs MDSCOUNT=1 MDSDEV1=<nonexistent_file>" \
1290 "(or MDSDEV, in the case of b1_8) OSTCOUNT=1" \
1291 "OSTDEV1=<nonexistent_file>"
1295 echo "Found stale $tmp"
1300 tar cf - -C $src . | tar xf - -C $tmp/src
1301 dd if=/dev/zero of=$tmp/src/t32_qf_old bs=1M \
1302 count=$(($T32_BLIMIT / 1024 / 2))
1303 chown $T32_QID.$T32_QID $tmp/src/t32_qf_old
1305 # format ost with comma-separated NIDs to verify LU-4460
1306 local failnid="$(h2$NETTYPE 1.2.3.4),$(h2$NETTYPE 4.3.2.1)"
1307 MGSNID="$MGSNID,$MGSNID" OSTOPT="--failnode=$failnid" formatall
1311 [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.3.50) ] &&
1312 $LFS quotacheck -ug /mnt/$FSNAME
1313 $LFS setquota -u $T32_QID -b 0 -B $T32_BLIMIT -i 0 -I $T32_ILIMIT \
1316 tar cf - -C $tmp/src . | tar xf - -C /mnt/$FSNAME
1323 ls -Rni --time-style=+%s >$tmp/img/list
1324 find . ! -name .lustre -type f -exec sha1sum {} \; |
1325 sort -k 2 >$tmp/img/sha1sums
1327 $LCTL get_param -n version | head -n 1 |
1328 sed -e 's/^lustre: *//' >$tmp/img/commit
1330 [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.3.50) ] &&
1331 $LFS quotaon -ug /mnt/$FSNAME
1332 $LFS quota -u $T32_QID -v /mnt/$FSNAME
1333 $LFS quota -v -u $T32_QID /mnt/$FSNAME |
1334 awk 'BEGIN { num='1' } { if ($1 == "'/mnt/$FSNAME'") \
1335 { if (NF == 1) { getline } else { num++ } ; print $num;} }' \
1336 | tr -d "*" > $tmp/img/bspace
1337 $LFS quota -v -u $T32_QID /mnt/$FSNAME |
1338 awk 'BEGIN { num='5' } { if ($1 == "'/mnt/$FSNAME'") \
1339 { if (NF == 1) { getline } else { num++ } ; print $num;} }' \
1340 | tr -d "*" > $tmp/img/ispace
1345 find -type f -exec sha1sum {} \; | sort -k 2 >$tmp/sha1sums.src
1348 if ! diff -u $tmp/sha1sums.src $tmp/img/sha1sums; then
1349 echo "Data verification failed"
1352 uname -r >$tmp/img/kernel
1353 uname -m >$tmp/img/arch
1355 mv ${MDSDEV1:-$MDSDEV} $tmp/img
1356 mv $OSTDEV1 $tmp/img
1358 version=$(sed -e 's/\(^[0-9]\+\.[0-9]\+\)\(.*$\)/\1/' $tmp/img/commit |
1359 sed -e 's/\./_/g') # E.g., "1.8.7" -> "1_8"
1362 tar cjvf $dst/disk$version-$(facet_fstype $SINGLEMDS).tar.bz2 -S *
1367 #run_test 32newtarball "Create a new test_32 disk image tarball for this version"
1370 # The list of applicable tarballs is returned via the caller's
1371 # variable "tarballs".
1374 local node=$(facet_active_host $SINGLEMDS)
1375 local r="do_node $node"
1377 if [ "$CLIENTONLY" ]; then
1378 skip "Client-only testing"
1382 if ! $r which $TUNEFS; then
1383 skip_env "tunefs.lustre required on $node"
1387 local IMGTYPE=$(facet_fstype $SINGLEMDS)
1389 tarballs=$($r find $RLUSTRE/tests -maxdepth 1 -name \'disk*-$IMGTYPE.tar.bz2\')
1391 if [ -z "$tarballs" ]; then
1392 skip "No applicable tarballs found"
1397 t32_test_cleanup() {
1399 local fstype=$(facet_fstype $SINGLEMDS)
1402 if $shall_cleanup_lustre; then
1403 umount $tmp/mnt/lustre || rc=$?
1405 if $shall_cleanup_mdt; then
1406 $r umount -d $tmp/mnt/mdt || rc=$?
1408 if $shall_cleanup_mdt1; then
1409 $r umount -d $tmp/mnt/mdt1 || rc=$?
1411 if $shall_cleanup_ost; then
1412 $r umount -d $tmp/mnt/ost || rc=$?
1417 if [ $fstype == "zfs" ]; then
1418 $r $ZPOOL destroy t32fs-mdt1 || rc=$?
1419 $r $ZPOOL destroy t32fs-ost1 || rc=$?
1424 t32_bits_per_long() {
1426 # Yes, this is not meant to be perfect.
1436 t32_reload_modules() {
1438 local all_removed=false
1441 while ((i < 20)); do
1442 echo "Unloading modules on $node: Attempt $i"
1443 do_rpc_nodes $node $LUSTRE_RMMOD $(facet_fstype $SINGLEMDS) &&
1445 do_rpc_nodes $node check_mem_leak || return 1
1446 if $all_removed; then
1447 do_rpc_nodes $node load_modules
1453 echo "Unloading modules on $node: Given up"
1457 t32_wait_til_devices_gone() {
1463 echo wait for devices to go
1464 while ((i < 20)); do
1465 devices=$(do_rpc_nodes $node $LCTL device_list | wc -l)
1466 loops=$(do_rpc_nodes $node losetup -a | grep -c t32)
1467 ((devices == 0 && loops == 0)) && return 0
1471 echo "waiting for dev on $node: dev $devices loop $loops given up"
1472 do_rpc_nodes $node "losetup -a"
1473 do_rpc_nodes $node "$LCTL devices_list"
1477 t32_verify_quota() {
1481 local fstype=$(facet_fstype $SINGLEMDS)
1485 $LFS quota -u $T32_QID -v $mnt
1487 qval=$($LFS quota -v -u $T32_QID $mnt |
1488 awk 'BEGIN { num='1' } { if ($1 == "'$mnt'") \
1489 { if (NF == 1) { getline } else { num++ } ; print $num;} }' \
1491 [ $qval -eq $img_bspace ] || {
1492 echo "bspace, act:$qval, exp:$img_bspace"
1496 qval=$($LFS quota -v -u $T32_QID $mnt |
1497 awk 'BEGIN { num='5' } { if ($1 == "'$mnt'") \
1498 { if (NF == 1) { getline } else { num++ } ; print $num;} }' \
1500 [ $qval -eq $img_ispace ] || {
1501 echo "ispace, act:$qval, exp:$img_ispace"
1505 qval=$($LFS quota -v -u $T32_QID $mnt |
1506 awk 'BEGIN { num='3' } { if ($1 == "'$mnt'") \
1507 { if (NF == 1) { getline } else { num++ } ; print $num;} }' \
1509 [ $qval -eq $T32_BLIMIT ] || {
1510 echo "blimit, act:$qval, exp:$T32_BLIMIT"
1514 qval=$($LFS quota -v -u $T32_QID $mnt |
1515 awk 'BEGIN { num='7' } { if ($1 == "'$mnt'") \
1516 { if (NF == 1) { getline } else { num++ } ; print $num;} }' \
1518 [ $qval -eq $T32_ILIMIT ] || {
1519 echo "ilimit, act:$qval, exp:$T32_ILIMIT"
1523 do_node $node $LCTL conf_param $fsname.quota.mdt=ug
1524 cmd="$LCTL get_param -n osd-$fstype.$fsname-MDT0000"
1525 cmd=$cmd.quota_slave.enabled
1526 wait_update $node "$cmd" "ug" || {
1527 echo "Enable mdt quota failed"
1531 do_node $node $LCTL conf_param $fsname.quota.ost=ug
1532 cmd="$LCTL get_param -n osd-$fstype.$fsname-OST0000"
1533 cmd=$cmd.quota_slave.enabled
1534 wait_update $node "$cmd" "ug" || {
1535 echo "Enable ost quota failed"
1540 runas -u $T32_QID -g $T32_QID dd if=/dev/zero of=$mnt/t32_qf_new \
1541 bs=1M count=$(($T32_BLIMIT / 1024)) oflag=sync && {
1542 echo "Write succeed, but expect -EDQUOT"
1545 rm -f $mnt/t32_qf_new
1547 runas -u $T32_QID -g $T32_QID createmany -m $mnt/t32_qf_ \
1549 echo "Create succeed, but expect -EDQUOT"
1552 unlinkmany $mnt/t32_qf_ $T32_ILIMIT
1560 local dne_upgrade=${dne_upgrade:-"no"}
1561 local ff_convert=${ff_convert:-"no"}
1562 local shall_cleanup_mdt=false
1563 local shall_cleanup_mdt1=false
1564 local shall_cleanup_ost=false
1565 local shall_cleanup_lustre=false
1566 local node=$(facet_active_host $SINGLEMDS)
1567 local r="do_node $node"
1568 local node2=$(facet_active_host mds2)
1576 local nid=$($r $LCTL list_nids | head -1)
1582 local fstype=$(facet_fstype $SINGLEMDS)
1583 local mdt_dev=$tmp/mdt
1584 local ost_dev=$tmp/ost
1586 trap 'trap - RETURN; t32_test_cleanup' RETURN
1588 mkdir -p $tmp/mnt/lustre
1589 $r mkdir -p $tmp/mnt/{mdt,ost}
1590 $r tar xjvf $tarball -S -C $tmp || {
1591 error_noexit "Unpacking the disk image tarball"
1594 img_commit=$($r cat $tmp/commit)
1595 img_kernel=$($r cat $tmp/kernel)
1596 img_arch=$($r cat $tmp/arch)
1597 img_bspace=$($r cat $tmp/bspace)
1598 img_ispace=$($r cat $tmp/ispace)
1599 echo "Upgrading from $(basename $tarball), created with:"
1600 echo " Commit: $img_commit"
1601 echo " Kernel: $img_kernel"
1602 echo " Arch: $img_arch"
1604 local version=$(version_code $img_commit)
1605 [[ $version -ge $(version_code 2.5.0) ]] && ff_convert="no"
1607 if [ $fstype == "zfs" ]; then
1609 $r $ZPOOL import -f -d $tmp t32fs-mdt1
1610 $r $ZPOOL import -f -d $tmp t32fs-ost1
1611 mdt_dev=t32fs-mdt1/mdt1
1612 ost_dev=t32fs-ost1/ost1
1613 wait_update_facet $SINGLEMDS "$ZPOOL list |
1614 awk '/^t32fs-mdt1/ { print \\\$1 }'" "t32fs-mdt1" || {
1615 error_noexit "import zfs pool failed"
1620 $r $LCTL set_param debug="$PTLDEBUG"
1622 $r $TUNEFS --dryrun $mdt_dev || {
1624 error_noexit "tunefs.lustre before mounting the MDT"
1627 if [ "$writeconf" ]; then
1629 if [ $fstype == "ldiskfs" ]; then
1631 $r $TUNEFS --quota $mdt_dev || {
1633 error_noexit "Enable mdt quota feature"
1638 if [ -n "$($LCTL list_nids | grep -v '\(tcp\|lo\)[[:digit:]]*$')" ]; then
1639 [[ $(lustre_version_code mgs) -ge $(version_code 2.3.59) ]] ||
1640 { skip "LU-2200: Cannot run over Inifiniband w/o lctl replace_nids "
1641 "(Need MGS version at least 2.3.59)"; return 0; }
1643 local osthost=$(facet_active_host ost1)
1644 local ostnid=$(do_node $osthost $LCTL list_nids | head -1)
1647 if [ $fstype == "ldiskfs" ]; then
1650 $r $MOUNT_CMD -o $mopts $mdt_dev $tmp/mnt/mdt
1651 $r lctl replace_nids $fsname-OST0000 $ostnid
1652 $r lctl replace_nids $fsname-MDT0000 $nid
1653 $r umount -d $tmp/mnt/mdt
1656 mopts=exclude=$fsname-OST0000
1657 if [ $fstype == "ldiskfs" ]; then
1662 t32_wait_til_devices_gone $node
1664 $r $MOUNT_CMD -o $mopts $mdt_dev $tmp/mnt/mdt || {
1666 error_noexit "Mounting the MDT"
1669 shall_cleanup_mdt=true
1671 if [ "$dne_upgrade" != "no" ]; then
1672 local fs2mdsdev=$(mdsdevname 1_2)
1673 local fs2mdsvdev=$(mdsvdevname 1_2)
1675 echo "mkfs new MDT on ${fs2mdsdev}...."
1676 if [ $(facet_fstype mds1) == ldiskfs ]; then
1677 mkfsoptions="--mkfsoptions=\\\"-J size=8\\\""
1680 add fs2mds $(mkfs_opts mds2 $fs2mdsdev $fsname) --reformat \
1681 $mkfsoptions $fs2mdsdev $fs2mdsvdev > /dev/null || {
1682 error_noexit "Mkfs new MDT failed"
1686 $r $TUNEFS --dryrun $fs2mdsdev || {
1687 error_noexit "tunefs.lustre before mounting the MDT"
1691 echo "mount new MDT....$fs2mdsdev"
1692 $r mkdir -p $tmp/mnt/mdt1
1693 $r $MOUNT_CMD -o $mopts $fs2mdsdev $tmp/mnt/mdt1 || {
1694 error_noexit "mount mdt1 failed"
1698 $r $LCTL set_param -n mdt.${fsname}*.enable_remote_dir=1 ||
1699 error_noexit "enable remote dir create failed"
1701 shall_cleanup_mdt1=true
1704 uuid=$($r $LCTL get_param -n mdt.$fsname-MDT0000.uuid) || {
1705 error_noexit "Getting MDT UUID"
1708 if [ "$uuid" != $fsname-MDT0000_UUID ]; then
1709 error_noexit "Unexpected MDT UUID: \"$uuid\""
1713 $r $TUNEFS --dryrun $ost_dev || {
1714 error_noexit "tunefs.lustre before mounting the OST"
1717 if [ "$writeconf" ]; then
1718 mopts=mgsnode=$nid,$writeconf
1719 if [ $fstype == "ldiskfs" ]; then
1721 $r $TUNEFS --quota $ost_dev || {
1723 error_noexit "Enable ost quota feature"
1729 if [ $fstype == "ldiskfs" ]; then
1733 $r $MOUNT_CMD -o $mopts $ost_dev $tmp/mnt/ost || {
1734 error_noexit "Mounting the OST"
1737 shall_cleanup_ost=true
1739 uuid=$($r $LCTL get_param -n obdfilter.$fsname-OST0000.uuid) || {
1740 error_noexit "Getting OST UUID"
1743 if [ "$uuid" != $fsname-OST0000_UUID ]; then
1744 error_noexit "Unexpected OST UUID: \"$uuid\""
1748 $r $LCTL conf_param $fsname-OST0000.osc.max_dirty_mb=15 || {
1749 error_noexit "Setting \"max_dirty_mb\""
1752 $r $LCTL conf_param $fsname-OST0000.failover.node=$nid || {
1753 error_noexit "Setting OST \"failover.node\""
1756 $r $LCTL conf_param $fsname-MDT0000.mdc.max_rpcs_in_flight=9 || {
1757 error_noexit "Setting \"max_rpcs_in_flight\""
1760 $r $LCTL conf_param $fsname-MDT0000.failover.node=$nid || {
1761 error_noexit "Setting MDT \"failover.node\""
1764 $r $LCTL pool_new $fsname.interop || {
1765 error_noexit "Setting \"interop\""
1768 $r $LCTL conf_param $fsname-MDT0000.lov.stripesize=4M || {
1769 error_noexit "Setting \"lov.stripesize\""
1773 if [ "$ff_convert" != "no" -a $(facet_fstype ost1) == "ldiskfs" ]; then
1774 $r $LCTL lfsck_start -M $fsname-OST0000 || {
1775 error_noexit "Start OI scrub on OST0"
1779 # The oi_scrub should be on ost1, but for test_32(),
1780 # all on the SINGLEMDS.
1781 wait_update_facet $SINGLEMDS "$LCTL get_param -n \
1782 osd-ldiskfs.$fsname-OST0000.oi_scrub |
1783 awk '/^status/ { print \\\$2 }'" "completed" 30 || {
1784 error_noexit "Failed to get the expected 'completed'"
1788 local UPDATED=$($r $LCTL get_param -n \
1789 osd-ldiskfs.$fsname-OST0000.oi_scrub |
1790 awk '/^updated/ { print $2 }')
1791 [ $UPDATED -ge 1 ] || {
1792 error_noexit "Only $UPDATED objects have been converted"
1797 if [ "$dne_upgrade" != "no" ]; then
1798 $r $LCTL conf_param \
1799 $fsname-MDT0001.mdc.max_rpcs_in_flight=9 || {
1800 error_noexit "Setting MDT1 \"max_rpcs_in_flight\""
1803 $r $LCTL conf_param $fsname-MDT0001.failover.node=$nid || {
1804 error_noexit "Setting MDT1 \"failover.node\""
1807 $r $LCTL conf_param $fsname-MDT0001.lov.stripesize=4M || {
1808 error_noexit "Setting MDT1 \"lov.stripesize\""
1814 if [ "$writeconf" ]; then
1815 $MOUNT_CMD $nid:/$fsname $tmp/mnt/lustre || {
1816 error_noexit "Mounting the client"
1819 shall_cleanup_lustre=true
1820 $LCTL set_param debug="$PTLDEBUG"
1822 t32_verify_quota $node $fsname $tmp/mnt/lustre || {
1823 error_noexit "verify quota failed"
1827 if [ "$dne_upgrade" != "no" ]; then
1828 $LFS mkdir -i 1 $tmp/mnt/lustre/remote_dir || {
1829 error_noexit "set remote dir failed"
1833 pushd $tmp/mnt/lustre
1834 tar -cf - . --exclude=./remote_dir |
1835 tar -xvf - -C remote_dir 1>/dev/null || {
1836 error_noexit "cp to remote dir failed"
1842 dd if=/dev/zero of=$tmp/mnt/lustre/tmp_file bs=10k count=10 || {
1843 error_noexit "dd failed"
1846 rm -rf $tmp/mnt/lustre/tmp_file || {
1847 error_noexit "rm failed"
1851 if $r test -f $tmp/sha1sums; then
1852 # LU-2393 - do both sorts on same node to ensure locale
1854 $r cat $tmp/sha1sums | sort -k 2 >$tmp/sha1sums.orig
1855 if [ "$dne_upgrade" != "no" ]; then
1856 pushd $tmp/mnt/lustre/remote_dir
1858 pushd $tmp/mnt/lustre
1861 find ! -name .lustre -type f -exec sha1sum {} \; |
1862 sort -k 2 >$tmp/sha1sums || {
1863 error_noexit "sha1sum"
1867 if ! diff -ub $tmp/sha1sums.orig $tmp/sha1sums; then
1868 error_noexit "sha1sum verification failed"
1872 echo "sha1sum verification skipped"
1875 if [ "$dne_upgrade" != "no" ]; then
1876 rm -rf $tmp/mnt/lustre/remote_dir || {
1877 error_noexit "remove remote dir failed"
1882 if $r test -f $tmp/list; then
1884 # There is not a Test Framework API to copy files to or
1885 # from a remote node.
1887 # LU-2393 - do both sorts on same node to ensure locale
1889 $r cat $tmp/list | sort -k 6 >$tmp/list.orig
1890 pushd $tmp/mnt/lustre
1891 ls -Rni --time-style=+%s | sort -k 6 >$tmp/list || {
1897 # 32-bit and 64-bit clients use different algorithms to
1898 # convert FIDs into inode numbers. Hence, remove the inode
1899 # numbers from the lists, if the original list was created
1900 # on an architecture with different number of bits per
1903 if [ $(t32_bits_per_long $(uname -m)) != \
1904 $(t32_bits_per_long $img_arch) ]; then
1905 echo "Different number of bits per \"long\" from the disk image"
1906 for list in list.orig list; do
1907 sed -i -e 's/^[0-9]\+[ \t]\+//' $tmp/$list
1910 if ! diff -ub $tmp/list.orig $tmp/list; then
1911 error_noexit "list verification failed"
1915 echo "list verification skipped"
1919 # When adding new data verification tests, please check for
1920 # the presence of the required reference files first, like
1921 # the "sha1sums" and "list" tests above, to avoid the need to
1922 # regenerate every image for each test addition.
1925 nrpcs_orig=$($LCTL get_param \
1926 -n mdc.*MDT0000*.max_rpcs_in_flight) || {
1927 error_noexit "Getting \"max_rpcs_in_flight\""
1930 nrpcs=$((nrpcs_orig + 5))
1931 $r $LCTL conf_param $fsname-MDT0000.mdc.max_rpcs_in_flight=$nrpcs || {
1932 error_noexit "Changing \"max_rpcs_in_flight\""
1935 wait_update $HOSTNAME "$LCTL get_param \
1936 -n mdc.*MDT0000*.max_rpcs_in_flight" $nrpcs || {
1937 error_noexit "Verifying \"max_rpcs_in_flight\""
1941 umount $tmp/mnt/lustre || {
1942 error_noexit "Unmounting the client"
1945 shall_cleanup_lustre=false
1947 if [ "$dne_upgrade" != "no" ]; then
1948 $r umount -d $tmp/mnt/mdt1 || {
1949 error_noexit "Unmounting the MDT2"
1952 shall_cleanup_mdt1=false
1955 $r umount -d $tmp/mnt/mdt || {
1956 error_noexit "Unmounting the MDT"
1959 shall_cleanup_mdt=false
1961 $r umount -d $tmp/mnt/ost || {
1962 error_noexit "Unmounting the OST"
1965 shall_cleanup_ost=false
1967 t32_reload_modules $node || {
1968 error_noexit "Reloading modules"
1972 # mount a second time to make sure we didnt leave upgrade flag on
1973 $r $TUNEFS --dryrun $mdt_dev || {
1975 error_noexit "tunefs.lustre before remounting the MDT"
1979 mopts=exclude=$fsname-OST0000
1980 if [ $fstype == "ldiskfs" ]; then
1983 $r $MOUNT_CMD -o $mopts $mdt_dev $tmp/mnt/mdt || {
1984 error_noexit "Remounting the MDT"
1987 shall_cleanup_mdt=true
1997 for tarball in $tarballs; do
1998 t32_test $tarball || let "rc += $?"
2002 run_test 32a "Upgrade (not live)"
2010 for tarball in $tarballs; do
2011 t32_test $tarball writeconf || let "rc += $?"
2015 run_test 32b "Upgrade with writeconf"
2022 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
2024 for tarball in $tarballs; do
2025 dne_upgrade=yes t32_test $tarball writeconf || rc=$?
2029 run_test 32c "dne upgrade test"
2037 for tarball in $tarballs; do
2038 ff_convert=yes t32_test $tarball || rc=$?
2042 run_test 32d "convert ff test"
2044 test_33a() { # bug 12333, was test_33
2046 local FSNAME2=test-123
2047 local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/})
2050 [ -n "$ost1_HOST" ] && fs2ost_HOST=$ost1_HOST
2052 if [ -z "$fs2ost_DEV" -o -z "$fs2mds_DEV" ]; then
2053 local dev=${SINGLEMDS}_dev
2054 local MDSDEV=${!dev}
2055 is_blkdev $SINGLEMDS $MDSDEV && \
2056 skip_env "mixed loopback and real device not working" && return
2059 local fs2mdsdev=$(mdsdevname 1_2)
2060 local fs2ostdev=$(ostdevname 1_2)
2061 local fs2mdsvdev=$(mdsvdevname 1_2)
2062 local fs2ostvdev=$(ostvdevname 1_2)
2064 if [ $(facet_fstype mds1) == ldiskfs ]; then
2065 mkfsoptions="--mkfsoptions=\\\"-J size=8\\\"" # See bug 17931.
2068 add fs2mds $(mkfs_opts mds1 ${fs2mdsdev}) --mgs --fsname=${FSNAME2} \
2069 --reformat $mkfsoptions $fs2mdsdev $fs2mdsvdev || exit 10
2070 add fs2ost $(mkfs_opts ost1 ${fs2ostdev}) --mgsnode=$MGSNID \
2071 --fsname=${FSNAME2} --index=8191 --reformat $fs2ostdev \
2072 $fs2ostvdev || exit 10
2074 start fs2mds $fs2mdsdev $MDS_MOUNT_OPTS && trap cleanup_fs2 EXIT INT
2075 start fs2ost $fs2ostdev $OST_MOUNT_OPTS
2076 do_facet $SINGLEMDS "$LCTL conf_param $FSNAME2.sys.timeout=200" || rc=1
2078 $MOUNT_CMD $MGSNID:/${FSNAME2} $MOUNT2 || rc=2
2081 cp /etc/hosts $MOUNT2/ || rc=3
2082 $LFS getstripe $MOUNT2/hosts
2087 cleanup_nocli || rc=6
2090 run_test 33a "Mount ost with a large index number"
2092 test_33b() { # was test_34
2095 do_facet client dd if=/dev/zero of=$MOUNT/24 bs=1024k count=1
2096 # Drop lock cancelation reply during umount
2097 #define OBD_FAIL_LDLM_CANCEL_NET 0x304
2098 do_facet client lctl set_param fail_loc=0x80000304
2099 #lctl set_param debug=-1
2100 umount_client $MOUNT
2103 run_test 33b "Drop cancel during umount"
2107 do_facet client "sh runmultiop_bg_pause $DIR/file O_c"
2108 manual_umount_client
2110 do_facet client killall -USR1 multiop
2111 if [ $rc -eq 0 ]; then
2112 error "umount not fail!"
2117 run_test 34a "umount with opened file should be fail"
2122 touch $DIR/$tfile || return 1
2123 stop_mds --force || return 2
2125 manual_umount_client --force
2127 if [ $rc -ne 0 ]; then
2128 error "mtab after failed umount - rc $rc"
2134 run_test 34b "force umount with failed mds should be normal"
2138 touch $DIR/$tfile || return 1
2139 stop_ost --force || return 2
2141 manual_umount_client --force
2143 if [ $rc -ne 0 ]; then
2144 error "mtab after failed umount - rc $rc"
2150 run_test 34c "force umount with failed ost should be normal"
2152 test_35a() { # bug 12459
2155 DBG_SAVE="`lctl get_param -n debug`"
2156 lctl set_param debug="ha"
2158 log "Set up a fake failnode for the MDS"
2160 local device=$(do_facet $SINGLEMDS "lctl get_param -n devices" |
2161 awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }' | head -1)
2162 do_facet mgs "$LCTL conf_param \
2163 ${device}.failover.node=$(h2$NETTYPE $FAKENID)" || return 4
2165 log "Wait for RECONNECT_INTERVAL seconds (10s)"
2168 MSG="conf-sanity.sh test_35a `date +%F%kh%Mm%Ss`"
2171 log "Stopping the MDT: $device"
2172 stop_mdt 1 || return 5
2174 df $MOUNT > /dev/null 2>&1 &
2176 log "Restarting the MDT: $device"
2177 start_mdt 1 || return 6
2178 log "Wait for df ($DFPID) ... "
2181 lctl set_param debug="$DBG_SAVE"
2183 # retrieve from the log the first server that the client tried to
2184 # contact after the connection loss
2185 $LCTL dk $TMP/lustre-log-$TESTNAME.log
2186 NEXTCONN=`awk "/${MSG}/ {start = 1;}
2187 /import_select_connection.*$device-mdc.* using connection/ {
2189 if (\\\$NF ~ /$FAKENID/)
2195 }" $TMP/lustre-log-$TESTNAME.log`
2196 [ "$NEXTCONN" != "0" ] && log "The client didn't try to reconnect to the last active server (tried ${NEXTCONN} instead)" && return 7
2198 # remove nid settings
2199 writeconf_or_reformat
2201 run_test 35a "Reconnect to the last active server first"
2203 test_35b() { # bug 18674
2204 remote_mds || { skip "local MDS" && return 0; }
2208 $LCTL set_param debug="ha"
2210 MSG="conf-sanity.sh test_35b `date +%F%kh%Mm%Ss`"
2213 log "Set up a fake failnode for the MDS"
2215 local device=$(do_facet $SINGLEMDS "$LCTL get_param -n devices" |
2216 awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }' | head -1)
2217 do_facet mgs "$LCTL conf_param \
2218 ${device}.failover.node=$(h2$NETTYPE $FAKENID)" || return 1
2220 local at_max_saved=0
2221 # adaptive timeouts may prevent seeing the issue
2222 if at_is_enabled; then
2223 at_max_saved=$(at_max_get mds)
2224 at_max_set 0 mds client
2227 mkdir -p $MOUNT/$tdir
2229 log "Injecting EBUSY on MDS"
2230 # Setting OBD_FAIL_MDS_RESEND=0x136
2231 do_facet $SINGLEMDS "$LCTL set_param fail_loc=0x80000136" || return 2
2233 $LCTL set_param mdc.${FSNAME}*.stats=clear
2235 log "Creating a test file and stat it"
2236 touch $MOUNT/$tdir/$tfile
2237 stat $MOUNT/$tdir/$tfile
2239 log "Stop injecting EBUSY on MDS"
2240 do_facet $SINGLEMDS "$LCTL set_param fail_loc=0" || return 3
2241 rm -f $MOUNT/$tdir/$tfile
2244 # restore adaptive timeout
2245 [ $at_max_saved -ne 0 ] && at_max_set $at_max_saved mds client
2247 $LCTL dk $TMP/lustre-log-$TESTNAME.log
2249 CONNCNT=`$LCTL get_param mdc.${FSNAME}*.stats | awk '/mds_connect/{print $2}'`
2251 # retrieve from the log if the client has ever tried to
2252 # contact the fake server after the loss of connection
2253 FAILCONN=`awk "BEGIN {ret = 0;}
2254 /import_select_connection.*${FSNAME}-MDT0000-mdc.* using connection/ {
2256 if (\\\$NF ~ /$FAKENID/) {
2261 END {print ret}" $TMP/lustre-log-$TESTNAME.log`
2263 [ "$FAILCONN" == "0" ] && \
2264 log "ERROR: The client reconnection has not been triggered" && \
2266 [ "$FAILCONN" == "2" ] && \
2267 log "ERROR: The client tried to reconnect to the failover server while the primary was busy" && \
2271 # When OBD_FAIL_MDS_RESEND is hit, we sleep for 2 * obd_timeout
2272 # Reconnects are supposed to be rate limited to one every 5s
2273 [ $CONNCNT -gt $((2 * $TIMEOUT / 5 + 1)) ] && \
2274 log "ERROR: Too many reconnects $CONNCNT" && \
2278 # remove nid settings
2279 writeconf_or_reformat
2281 run_test 35b "Continue reconnection retries, if the active server is busy"
2284 [ $OSTCOUNT -lt 2 ] && skip_env "skipping test for single OST" && return
2286 [ "$ost_HOST" = "`hostname`" -o "$ost1_HOST" = "`hostname`" ] || \
2287 { skip "remote OST" && return 0; }
2290 local FSNAME2=test1234
2291 local fs3ost_HOST=$ost_HOST
2292 local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/})
2294 [ -n "$ost1_HOST" ] && fs2ost_HOST=$ost1_HOST && fs3ost_HOST=$ost1_HOST
2296 if [ -z "$fs2ost_DEV" -o -z "$fs2mds_DEV" -o -z "$fs3ost_DEV" ]; then
2297 is_blkdev $SINGLEMDS $MDSDEV && \
2298 skip_env "mixed loopback and real device not working" && return
2301 local fs2mdsdev=$(mdsdevname 1_2)
2302 local fs2ostdev=$(ostdevname 1_2)
2303 local fs3ostdev=$(ostdevname 2_2)
2304 local fs2mdsvdev=$(mdsvdevname 1_2)
2305 local fs2ostvdev=$(ostvdevname 1_2)
2306 local fs3ostvdev=$(ostvdevname 2_2)
2308 add fs2mds $(mkfs_opts mds1 ${fs2mdsdev}) --mgs --fsname=${FSNAME2} \
2309 --reformat $fs2mdsdev $fs2mdsvdev || exit 10
2310 # XXX after we support non 4K disk blocksize in ldiskfs, specify a
2311 # different one than the default value here.
2312 add fs2ost $(mkfs_opts ost1 ${fs2ostdev}) --mgsnode=$MGSNID \
2313 --fsname=${FSNAME2} --reformat $fs2ostdev $fs2ostvdev || exit 10
2314 add fs3ost $(mkfs_opts ost2 ${fs3ostdev}) --mgsnode=$MGSNID \
2315 --fsname=${FSNAME2} --reformat $fs3ostdev $fs3ostvdev || exit 10
2317 start fs2mds $fs2mdsdev $MDS_MOUNT_OPTS
2318 start fs2ost $fs2ostdev $OST_MOUNT_OPTS
2319 start fs3ost $fs3ostdev $OST_MOUNT_OPTS
2321 $MOUNT_CMD $MGSNID:/${FSNAME2} $MOUNT2 || return 1
2323 sleep 5 # until 11778 fixed
2325 dd if=/dev/zero of=$MOUNT2/$tfile bs=1M count=7 || return 2
2327 BKTOTAL=`lctl get_param -n obdfilter.*.kbytestotal | awk 'BEGIN{total=0}; {total+=$1}; END{print total}'`
2328 BKFREE=`lctl get_param -n obdfilter.*.kbytesfree | awk 'BEGIN{free=0}; {free+=$1}; END{print free}'`
2329 BKAVAIL=`lctl get_param -n obdfilter.*.kbytesavail | awk 'BEGIN{avail=0}; {avail+=$1}; END{print avail}'`
2330 STRING=`df -P $MOUNT2 | tail -n 1 | awk '{print $2","$3","$4}'`
2331 DFTOTAL=`echo $STRING | cut -d, -f1`
2332 DFUSED=`echo $STRING | cut -d, -f2`
2333 DFAVAIL=`echo $STRING | cut -d, -f3`
2334 DFFREE=$(($DFTOTAL - $DFUSED))
2336 ALLOWANCE=$((64 * $OSTCOUNT))
2338 if [ $DFTOTAL -lt $(($BKTOTAL - $ALLOWANCE)) ] ||
2339 [ $DFTOTAL -gt $(($BKTOTAL + $ALLOWANCE)) ] ; then
2340 echo "**** FAIL: df total($DFTOTAL) mismatch OST total($BKTOTAL)"
2343 if [ $DFFREE -lt $(($BKFREE - $ALLOWANCE)) ] ||
2344 [ $DFFREE -gt $(($BKFREE + $ALLOWANCE)) ] ; then
2345 echo "**** FAIL: df free($DFFREE) mismatch OST free($BKFREE)"
2348 if [ $DFAVAIL -lt $(($BKAVAIL - $ALLOWANCE)) ] ||
2349 [ $DFAVAIL -gt $(($BKAVAIL + $ALLOWANCE)) ] ; then
2350 echo "**** FAIL: df avail($DFAVAIL) mismatch OST avail($BKAVAIL)"
2355 stop fs3ost -f || return 200
2356 stop fs2ost -f || return 201
2357 stop fs2mds -f || return 202
2358 unload_modules_conf || return 203
2361 run_test 36 "df report consistency on OSTs with different block size"
2364 local mntpt=$(facet_mntpt $SINGLEMDS)
2365 local mdsdev=$(mdsdevname ${SINGLEMDS//mds/})
2366 local mdsdev_sym="$TMP/sym_mdt.img"
2367 local opts=$MDS_MOUNT_OPTS
2370 if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
2371 skip "Currently only applicable to ldiskfs-based MDTs"
2375 echo "MDS : $mdsdev"
2376 echo "SYMLINK : $mdsdev_sym"
2377 do_facet $SINGLEMDS rm -f $mdsdev_sym
2379 do_facet $SINGLEMDS ln -s $mdsdev $mdsdev_sym
2381 echo "mount symlink device - $mdsdev_sym"
2383 if ! do_facet $SINGLEMDS test -b $mdsdev; then
2384 opts=$(csa_add "$opts" -o loop)
2386 mount_op=$(do_facet $SINGLEMDS mount -v -t lustre $opts \
2387 $mdsdev_sym $mntpt 2>&1)
2390 echo mount_op=$mount_op
2392 do_facet $SINGLEMDS "umount -d $mntpt && rm -f $mdsdev_sym"
2394 if $(echo $mount_op | grep -q "unable to set tunable"); then
2395 error "set tunables failed for symlink device"
2398 [ $rc -eq 0 ] || error "mount symlink $mdsdev_sym failed! rc=$rc"
2402 run_test 37 "verify set tunables works for symlink device"
2404 test_38() { # bug 14222
2405 local fstype=$(facet_fstype $SINGLEMDS)
2406 local mntpt=$(facet_mntpt $SINGLEMDS)
2411 local SRC="/etc /bin"
2412 local FILES=$(find $SRC -type f -mtime +1 | head -n $COUNT)
2413 log "copying $(echo $FILES | wc -w) files to $DIR/$tdir"
2415 tar cf - $FILES | tar xf - -C $DIR/$tdir ||
2416 error "copying $SRC to $DIR/$tdir"
2418 umount_client $MOUNT
2419 do_facet $SINGLEMDS "$LCTL get_param osp.*.prealloc_next_id"
2421 log "delete lov_objid file on MDS"
2423 mount_fstype $SINGLEMDS || error "mount MDS failed (1)"
2425 do_facet $SINGLEMDS "od -Ax -td8 $mntpt/lov_objid; rm $mntpt/lov_objid"
2427 unmount_fstype $SINGLEMDS || error "umount failed (1)"
2429 # check create in mds_lov_connect
2433 [ $V ] && log "verifying $DIR/$tdir/$f"
2434 diff -q $f $DIR/$tdir/$f || ERROR=y
2436 do_facet $SINGLEMDS "$LCTL get_param osp.*.prealloc_next_id"
2437 if [ "$ERROR" = "y" ]; then
2438 # check it's updates in sync
2439 umount_client $MOUNT
2441 mount_fstype $SIGNLEMDS
2442 do_facet $SINGLEMDS "od -Ax -td8 $mntpt/lov_objid"
2443 unmount_fstype $SINGLEMDS
2444 error "old and new files are different after connect" || true
2446 touch $DIR/$tdir/f2 || error "f2 file create failed"
2448 # check it's updates in sync
2449 umount_client $MOUNT
2452 mount_fstype $SINGLEMDS || error "mount MDS failed (3)"
2454 do_facet $SINGLEMDS "od -Ax -td8 $mntpt/lov_objid"
2455 do_facet $SINGLEMDS dd if=/dev/zero of=$mntpt/lov_objid.clear count=8
2457 unmount_fstype $SINGLEMDS || error "umount failed (3)"
2462 [ $V ] && log "verifying $DIR/$tdir/$f"
2463 diff -q $f $DIR/$tdir/$f || ERROR=y
2465 touch $DIR/$tdir/f3 || error "f3 file create failed"
2466 do_facet $SINGLEMDS "$LCTL get_param osp.*.prealloc_next_id"
2467 umount_client $MOUNT
2469 mount_fstype $SINGLEMDS || error "mount MDS failed (4)"
2470 do_facet $SINGLEMDS "od -Ax -td8 $mntpt/lov_objid"
2471 unmount_fstype $SINGLEMDS || error "umount failed (4)"
2473 [ "$ERROR" = "y" ] &&
2474 error "old and new files are different after sync" || true
2476 log "files compared the same"
2479 run_test 38 "MDS recreates missing lov_objid file from OST data"
2485 perl $SRCDIR/leak_finder.pl $TMP/debug 2>&1 | egrep '*** Leak:' &&
2486 error "memory leak detected" || true
2488 run_test 39 "leak_finder recognizes both LUSTRE and LNET malloc messages"
2490 test_40() { # bug 15759
2492 #define OBD_FAIL_TGT_TOOMANY_THREADS 0x706
2493 do_facet $SINGLEMDS "$LCTL set_param fail_loc=0x80000706"
2497 run_test 40 "race during service thread startup"
2499 test_41a() { #bug 14134
2500 if [ $(facet_fstype $SINGLEMDS) == ldiskfs ] &&
2501 ! do_facet $SINGLEMDS test -b $(mdsdevname 1); then
2502 skip "Loop devices does not work with nosvc option"
2507 local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/})
2509 start $SINGLEMDS $MDSDEV $MDS_MOUNT_OPTS -o nosvc -n
2510 start ost1 `ostdevname 1` $OST_MOUNT_OPTS
2511 start $SINGLEMDS $MDSDEV $MDS_MOUNT_OPTS -o nomgs,force
2513 mount_client $MOUNT || return 1
2516 echo "blah blah" > $MOUNT/$tfile
2519 umount_client $MOUNT
2520 stop ost1 -f || return 201
2521 stop_mds -f || return 202
2522 stop_mds -f || return 203
2523 unload_modules_conf || return 204
2526 run_test 41a "mount mds with --nosvc and --nomgs"
2529 if [ $(facet_fstype $SINGLEMDS) == ldiskfs ] &&
2530 ! do_facet $SINGLEMDS test -b $(mdsdevname 1); then
2531 skip "Loop devices does not work with nosvc option"
2535 ! combined_mgs_mds && skip "needs combined mgs device" && return 0
2539 local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/})
2541 start $SINGLEMDS $MDSDEV $MDS_MOUNT_OPTS -o nosvc -n
2543 start $SINGLEMDS $MDSDEV $MDS_MOUNT_OPTS -o nomgs,force
2545 mount_client $MOUNT || return 1
2548 echo "blah blah" > $MOUNT/$tfile
2549 cat $MOUNT/$tfile || return 200
2551 umount_client $MOUNT
2552 stop_ost || return 201
2553 stop_mds -f || return 202
2554 stop_mds -f || return 203
2557 run_test 41b "mount mds with --nosvc and --nomgs on first mount"
2559 test_42() { #bug 14693
2561 check_mount || error "client was not mounted"
2563 do_facet mgs $LCTL conf_param $FSNAME.llite.some_wrong_param=10
2564 umount_client $MOUNT ||
2565 error "unmounting client failed with invalid llite param"
2566 mount_client $MOUNT ||
2567 error "mounting client failed with invalid llite param"
2569 do_facet mgs $LCTL conf_param $FSNAME.sys.some_wrong_param=20
2570 cleanup || error "stopping $FSNAME failed with invalid sys param"
2573 check_mount || "client was not mounted with invalid sys param"
2574 cleanup || error "stopping $FSNAME failed with invalid sys param"
2577 run_test 42 "allow client/server mount/unmount with invalid config param"
2580 [ $UID -ne 0 -o $RUNAS_ID -eq 0 ] && skip_env "run as root"
2583 USER1=$(cat /etc/passwd | grep :$ID1:$ID1: | cut -d: -f1)
2584 [ -z "$USER1" ] && skip_env "missing user with uid=$ID1 gid=$ID1" &&
2588 chmod ugo+x $DIR || error "chmod 0 failed"
2589 set_conf_param_and_check mds \
2590 "lctl get_param -n mdt.$FSNAME-MDT0000.root_squash" \
2591 "$FSNAME.mdt.root_squash" \
2593 wait_update $HOSTNAME \
2594 "lctl get_param -n llite.${FSNAME}*.root_squash" \
2596 error "check llite root_squash failed!"
2597 set_conf_param_and_check mds \
2598 "lctl get_param -n mdt.$FSNAME-MDT0000.nosquash_nids" \
2599 "$FSNAME.mdt.nosquash_nids" \
2601 wait_update $HOSTNAME \
2602 "lctl get_param -n llite.${FSNAME}*.nosquash_nids" \
2604 error "check llite nosquash_nids failed!"
2607 # create set of test files
2609 echo "111" > $DIR/$tfile-userfile || error "write 1 failed"
2610 chmod go-rw $DIR/$tfile-userfile || error "chmod 1 failed"
2611 chown $RUNAS_ID.$RUNAS_ID $DIR/$tfile-userfile || error "chown failed"
2613 echo "222" > $DIR/$tfile-rootfile || error "write 2 failed"
2614 chmod go-rw $DIR/$tfile-rootfile || error "chmod 2 faield"
2616 mkdir $DIR/$tdir-rootdir -p || error "mkdir failed"
2617 chmod go-rwx $DIR/$tdir-rootdir || error "chmod 3 failed"
2618 touch $DIR/$tdir-rootdir/tfile-1 || error "touch failed"
2620 echo "777" > $DIR/$tfile-user1file || error "write 7 failed"
2621 chmod go-rw $DIR/$tfile-user1file || error "chmod 7 failed"
2622 chown $ID1.$ID1 $DIR/$tfile-user1file || error "chown failed"
2625 # check root_squash:
2626 # set root squash UID:GID to RUNAS_ID
2627 # root should be able to access only files owned by RUNAS_ID
2629 set_conf_param_and_check mds \
2630 "lctl get_param -n mdt.$FSNAME-MDT0000.root_squash" \
2631 "$FSNAME.mdt.root_squash" \
2632 "$RUNAS_ID:$RUNAS_ID"
2633 wait_update $HOSTNAME \
2634 "lctl get_param -n llite.${FSNAME}*.root_squash" \
2635 "$RUNAS_ID:$RUNAS_ID" ||
2636 error "check llite root_squash failed!"
2638 ST=$(stat -c "%n: owner uid %u (%A)" $DIR/$tfile-userfile)
2639 dd if=$DIR/$tfile-userfile 1>/dev/null 2>/dev/null || \
2640 error "$ST: root read permission is denied"
2641 echo "$ST: root read permission is granted - ok"
2644 dd conv=notrunc of=$DIR/$tfile-userfile 1>/dev/null 2>/dev/null || \
2645 error "$ST: root write permission is denied"
2646 echo "$ST: root write permission is granted - ok"
2648 ST=$(stat -c "%n: owner uid %u (%A)" $DIR/$tfile-rootfile)
2649 dd if=$DIR/$tfile-rootfile 1>/dev/null 2>/dev/null && \
2650 error "$ST: root read permission is granted"
2651 echo "$ST: root read permission is denied - ok"
2654 dd conv=notrunc of=$DIR/$tfile-rootfile 1>/dev/null 2>/dev/null && \
2655 error "$ST: root write permission is granted"
2656 echo "$ST: root write permission is denied - ok"
2658 ST=$(stat -c "%n: owner uid %u (%A)" $DIR/$tdir-rootdir)
2659 rm $DIR/$tdir-rootdir/tfile-1 1>/dev/null 2>/dev/null && \
2660 error "$ST: root unlink permission is granted"
2661 echo "$ST: root unlink permission is denied - ok"
2663 touch $DIR/tdir-rootdir/tfile-2 1>/dev/null 2>/dev/null && \
2664 error "$ST: root create permission is granted"
2665 echo "$ST: root create permission is denied - ok"
2669 # check root_squash is enforced independently
2670 # of client cache content
2672 # access file by USER1, keep access open
2673 # root should be denied access to user file
2675 runas -u $ID1 tail -f $DIR/$tfile-user1file 1>/dev/null 2>&1 &
2679 ST=$(stat -c "%n: owner uid %u (%A)" $DIR/$tfile-user1file)
2680 dd if=$DIR/$tfile-user1file 1>/dev/null 2>&1 &&
2681 { kill $pid; error "$ST: root read permission is granted"; }
2682 echo "$ST: root read permission is denied - ok"
2685 dd conv=notrunc of=$DIR/$tfile-user1file 1>/dev/null 2>&1 &&
2686 { kill $pid; error "$ST: root write permission is granted"; }
2687 echo "$ST: root write permission is denied - ok"
2693 # check nosquash_nids:
2694 # put client's NID into nosquash_nids list,
2695 # root should be able to access root file after that
2697 local NIDLIST=$(lctl list_nids all | tr '\n' ' ')
2698 NIDLIST="2@elan $NIDLIST 192.168.0.[2,10]@tcp"
2699 NIDLIST=$(echo $NIDLIST | tr -s ' ' ' ')
2700 set_conf_param_and_check mds \
2701 "lctl get_param -n mdt.$FSNAME-MDT0000.nosquash_nids" \
2702 "$FSNAME-MDTall.mdt.nosquash_nids" \
2704 wait_update $HOSTNAME \
2705 "lctl get_param -n llite.${FSNAME}*.nosquash_nids" \
2707 error "check llite nosquash_nids failed!"
2709 ST=$(stat -c "%n: owner uid %u (%A)" $DIR/$tfile-rootfile)
2710 dd if=$DIR/$tfile-rootfile 1>/dev/null 2>/dev/null || \
2711 error "$ST: root read permission is denied"
2712 echo "$ST: root read permission is granted - ok"
2715 dd conv=notrunc of=$DIR/$tfile-rootfile 1>/dev/null 2>/dev/null || \
2716 error "$ST: root write permission is denied"
2717 echo "$ST: root write permission is granted - ok"
2719 ST=$(stat -c "%n: owner uid %u (%A)" $DIR/$tdir-rootdir)
2720 rm $DIR/$tdir-rootdir/tfile-1 || \
2721 error "$ST: root unlink permission is denied"
2722 echo "$ST: root unlink permission is granted - ok"
2723 touch $DIR/$tdir-rootdir/tfile-2 || \
2724 error "$ST: root create permission is denied"
2725 echo "$ST: root create permission is granted - ok"
2729 run_test 43 "check root_squash and nosquash_nids"
2731 umount_client $MOUNT
2736 check_mount || return 2
2737 UUID=$($LCTL get_param llite.${FSNAME}*.uuid | cut -d= -f2)
2739 UUIDS=$(do_facet $SINGLEMDS "$LCTL get_param mdt.${FSNAME}*.exports.*.uuid")
2740 for VAL in $UUIDS; do
2741 NID=$(echo $VAL | cut -d= -f1)
2742 CLUUID=$(echo $VAL | cut -d= -f2)
2743 [ "$UUID" = "$CLUUID" ] && STATS_FOUND=yes && break
2745 [ "$STATS_FOUND" = "no" ] && error "stats not found for client"
2749 run_test 44 "mounted client proc entry exists"
2753 check_mount || return 2
2758 #define OBD_FAIL_PTLRPC_LONG_UNLINK 0x50f
2759 do_facet client "lctl set_param fail_loc=0x50f"
2762 manual_umount_client --force || return 3
2763 do_facet client "lctl set_param fail_loc=0x0"
2765 mount_client $MOUNT || return 4
2769 run_test 45 "long unlink handling in ptlrpcd"
2776 umount_client $MOUNT2 || rc=$?
2777 umount_client $MOUNT || rc=$?
2778 while [ $count -gt 0 ]; do
2779 stop ost${count} -f || rc=$?
2783 cleanup_nocli || rc=$?
2784 #writeconf to remove all ost2 traces for subsequent tests
2785 writeconf_or_reformat
2790 echo "Testing with $OSTCOUNT OSTs"
2792 start_mds || return 1
2793 #first client should see only one ost
2794 start_ost || return 2
2795 wait_osc_import_state mds ost FULL
2797 mount_client $MOUNT || return 3
2798 trap "cleanup_46a $OSTCOUNT" EXIT ERR
2801 for (( i=2; i<=$OSTCOUNT; i++ )); do
2802 start ost$i `ostdevname $i` $OST_MOUNT_OPTS || return $((i+2))
2805 # wait until osts in sync
2806 for (( i=2; i<=$OSTCOUNT; i++ )); do
2807 wait_osc_import_state mds ost$i FULL
2808 wait_osc_import_state client ost$i FULL
2811 #second client see all ost's
2813 mount_client $MOUNT2 || return 8
2814 $LFS setstripe -c -1 $MOUNT2 || return 9
2815 $LFS getstripe $MOUNT2 || return 10
2817 echo "ok" > $MOUNT2/widestripe
2818 $LFS getstripe $MOUNT2/widestripe || return 11
2819 # fill acl buffer for avoid expand lsm to them
2820 awk -F : '{if (FNR < 25) { print "u:"$1":rwx" }}' /etc/passwd | while read acl; do
2821 setfacl -m $acl $MOUNT2/widestripe
2825 stat $MOUNT/widestripe || return 12
2827 cleanup_46a $OSTCOUNT || { echo "cleanup_46a failed!" && return 13; }
2830 run_test 46a "handle ost additional - wide striped file"
2835 check_mount || return 2
2836 $LCTL set_param ldlm.namespaces.$FSNAME-*-*-*.lru_size=100
2840 for ns in $($LCTL get_param ldlm.namespaces.$FSNAME-*-*-*.lru_size); do
2841 if echo $ns | grep "MDT[[:digit:]]*"; then
2844 lrs=$(echo $ns | sed 's/.*lru_size=//')
2845 lru_size[count]=$lrs
2850 facet_failover $SINGLEMDS
2851 client_up || return 3
2854 for ns in $($LCTL get_param ldlm.namespaces.$FSNAME-*-*-*.lru_size); do
2855 if echo $ns | grep "MDT[[:digit:]]*"; then
2858 lrs=$(echo $ns | sed 's/.*lru_size=//')
2859 if ! test "$lrs" -eq "${lru_size[count]}"; then
2860 n=$(echo $ns | sed -e 's/ldlm.namespaces.//' -e 's/.lru_size=.*//')
2861 error "$n has lost lru_size: $lrs vs. ${lru_size[count]}"
2869 run_test 47 "server restart does not make client loss lru_resize settings"
2874 # reformat after this test is needed - if test will failed
2875 # we will have unkillable file at FS
2879 test_48() { # bug 17636
2882 check_mount || return 2
2884 $LFS setstripe -c -1 $MOUNT || return 9
2885 $LFS getstripe $MOUNT || return 10
2887 echo "ok" > $MOUNT/widestripe
2888 $LFS getstripe $MOUNT/widestripe || return 11
2890 trap cleanup_48 EXIT ERR
2892 # fill acl buffer for avoid expand lsm to them
2893 getent passwd | awk -F : '{ print "u:"$1":rwx" }' | while read acl; do
2894 setfacl -m $acl $MOUNT/widestripe
2897 stat $MOUNT/widestripe || return 12
2902 run_test 48 "too many acls on file"
2904 # check PARAM_SYS_LDLM_TIMEOUT option of MKFS.LUSTRE
2905 test_49a() { # bug 17710
2906 local timeout_orig=$TIMEOUT
2907 local ldlm_timeout_orig=$LDLM_TIMEOUT
2908 local LOCAL_TIMEOUT=20
2910 LDLM_TIMEOUT=$LOCAL_TIMEOUT
2911 TIMEOUT=$LOCAL_TIMEOUT
2915 check_mount || error "client mount failed"
2917 echo "check ldlm_timout..."
2918 local LDLM_MDS="$(do_facet $SINGLEMDS lctl get_param -n ldlm_timeout)"
2919 local LDLM_OST1="$(do_facet ost1 lctl get_param -n ldlm_timeout)"
2920 local LDLM_CLIENT="$(do_facet client lctl get_param -n ldlm_timeout)"
2922 if [ $LDLM_MDS -ne $LDLM_OST1 -o $LDLM_MDS -ne $LDLM_CLIENT ]; then
2923 error "Different LDLM_TIMEOUT:$LDLM_MDS $LDLM_OST1 $LDLM_CLIENT"
2926 if [ $LDLM_MDS -ne $((LOCAL_TIMEOUT / 3)) ]; then
2927 error "LDLM_TIMEOUT($LDLM_MDS) is not $((LOCAL_TIMEOUT / 3))"
2930 umount_client $MOUNT
2931 stop_ost || error "problem stopping OSS"
2932 stop_mds || error "problem stopping MDS"
2934 LDLM_TIMEOUT=$ldlm_timeout_orig
2935 TIMEOUT=$timeout_orig
2937 run_test 49a "check PARAM_SYS_LDLM_TIMEOUT option of mkfs.lustre"
2939 test_49b() { # bug 17710
2940 local timeout_orig=$TIMEOUT
2941 local ldlm_timeout_orig=$LDLM_TIMEOUT
2942 local LOCAL_TIMEOUT=20
2944 LDLM_TIMEOUT=$((LOCAL_TIMEOUT - 1))
2945 TIMEOUT=$LOCAL_TIMEOUT
2949 check_mount || error "client mount failed"
2951 local LDLM_MDS="$(do_facet $SINGLEMDS lctl get_param -n ldlm_timeout)"
2952 local LDLM_OST1="$(do_facet ost1 lctl get_param -n ldlm_timeout)"
2953 local LDLM_CLIENT="$(do_facet client lctl get_param -n ldlm_timeout)"
2955 if [ $LDLM_MDS -ne $LDLM_OST1 -o $LDLM_MDS -ne $LDLM_CLIENT ]; then
2956 error "Different LDLM_TIMEOUT:$LDLM_MDS $LDLM_OST1 $LDLM_CLIENT"
2959 if [ $LDLM_MDS -ne $((LOCAL_TIMEOUT - 1)) ]; then
2960 error "LDLM_TIMEOUT($LDLM_MDS) is not $((LOCAL_TIMEOUT - 1))"
2963 cleanup || error "cleanup failed"
2965 LDLM_TIMEOUT=$ldlm_timeout_orig
2966 TIMEOUT=$timeout_orig
2968 run_test 49b "check PARAM_SYS_LDLM_TIMEOUT option of mkfs.lustre"
2971 # Test both statfs and lfs df and fail if either one fails
2972 multiop_bg_pause $1 f_
2975 killall -USR1 multiop
2976 [ $RC1 -ne 0 ] && log "lazystatfs multiop failed"
2977 wait $PID || { RC1=$?; log "multiop return error "; }
2984 if [ $RC2 -eq 0 ]; then
2986 log "lazystatfs df failed"
2990 [[ $RC1 -ne 0 || $RC2 -eq 0 ]] && RC=1
2996 lctl set_param llite.$FSNAME-*.lazystatfs=1
2999 lazystatfs $MOUNT || error "lazystatfs failed but no down servers"
3001 cleanup || return $?
3003 run_test 50a "lazystatfs all servers available =========================="
3007 lctl set_param llite.$FSNAME-*.lazystatfs=1
3010 # Wait for client to detect down OST
3011 stop_ost || error "Unable to stop OST1"
3012 wait_osc_import_state mds ost DISCONN
3014 lazystatfs $MOUNT || error "lazystatfs should don't have returned EIO"
3016 umount_client $MOUNT || error "Unable to unmount client"
3017 stop_mds || error "Unable to stop MDS"
3019 run_test 50b "lazystatfs all servers down =========================="
3022 start_mds || error "Unable to start MDS"
3023 start_ost || error "Unable to start OST1"
3024 start_ost2 || error "Unable to start OST2"
3025 mount_client $MOUNT || error "Unable to mount client"
3026 lctl set_param llite.$FSNAME-*.lazystatfs=1
3029 # Wait for client to detect down OST
3030 stop_ost || error "Unable to stop OST1"
3031 wait_osc_import_state mds ost DISCONN
3032 lazystatfs $MOUNT || error "lazystatfs failed with one down server"
3034 umount_client $MOUNT || error "Unable to unmount client"
3035 stop_ost2 || error "Unable to stop OST2"
3036 stop_mds || error "Unable to stop MDS"
3037 #writeconf to remove all ost2 traces for subsequent tests
3038 writeconf_or_reformat
3040 run_test 50c "lazystatfs one server down =========================="
3043 start_mds || error "Unable to start MDS"
3044 start_ost || error "Unable to start OST1"
3045 start_ost2 || error "Unable to start OST2"
3046 mount_client $MOUNT || error "Unable to mount client"
3047 lctl set_param llite.$FSNAME-*.lazystatfs=1
3050 # Issue the statfs during the window where the client still
3051 # belives the OST to be available but it is in fact down.
3052 # No failure just a statfs which hangs for a timeout interval.
3053 stop_ost || error "Unable to stop OST1"
3054 lazystatfs $MOUNT || error "lazystatfs failed with one down server"
3056 umount_client $MOUNT || error "Unable to unmount client"
3057 stop_ost2 || error "Unable to stop OST2"
3058 stop_mds || error "Unable to stop MDS"
3059 #writeconf to remove all ost2 traces for subsequent tests
3060 writeconf_or_reformat
3062 run_test 50d "lazystatfs client/server conn race =========================="
3069 start_mds || return 1
3070 #first client should see only one ost
3071 start_ost || return 2
3072 wait_osc_import_state mds ost FULL
3074 # Wait for client to detect down OST
3075 stop_ost || error "Unable to stop OST1"
3076 wait_osc_import_state mds ost DISCONN
3078 mount_client $MOUNT || error "Unable to mount client"
3079 lctl set_param llite.$FSNAME-*.lazystatfs=0
3081 multiop_bg_pause $MOUNT _f
3085 if [ $RC1 -ne 0 ]; then
3086 log "multiop failed $RC1"
3089 sleep $(( $TIMEOUT+1 ))
3091 [ $? -ne 0 ] && error "process isn't sleep"
3092 start_ost || error "Unable to start OST1"
3093 wait $pid || error "statfs failed"
3096 umount_client $MOUNT || error "Unable to unmount client"
3097 stop_ost || error "Unable to stop OST1"
3098 stop_mds || error "Unable to stop MDS"
3100 run_test 50e "normal statfs all servers down =========================="
3105 CONN_PROC="osc.$FSNAME-OST0001-osc-[M]*.ost_server_uuid"
3107 start_mds || error "Unable to start mds"
3108 #first client should see only one ost
3109 start_ost || error "Unable to start OST1"
3110 wait_osc_import_state mds ost FULL
3112 start_ost2 || error "Unable to start OST2"
3113 wait_osc_import_state mds ost2 FULL
3115 # Wait for client to detect down OST
3116 stop_ost2 || error "Unable to stop OST2"
3118 wait_osc_import_state mds ost2 DISCONN
3119 mount_client $MOUNT || error "Unable to mount client"
3120 lctl set_param llite.$FSNAME-*.lazystatfs=0
3122 multiop_bg_pause $MOUNT _f
3126 if [ $RC1 -ne 0 ]; then
3127 log "lazystatfs multiop failed $RC1"
3130 sleep $(( $TIMEOUT+1 ))
3132 [ $? -ne 0 ] && error "process isn't sleep"
3133 start_ost2 || error "Unable to start OST2"
3134 wait $pid || error "statfs failed"
3135 stop_ost2 || error "Unable to stop OST2"
3138 umount_client $MOUNT || error "Unable to unmount client"
3139 stop_ost || error "Unable to stop OST1"
3140 stop_mds || error "Unable to stop MDS"
3141 #writeconf to remove all ost2 traces for subsequent tests
3142 writeconf_or_reformat
3144 run_test 50f "normal statfs one server in down =========================="
3147 [ "$OSTCOUNT" -lt "2" ] && skip_env "$OSTCOUNT < 2, skipping" && return
3149 start_ost2 || error "Unable to start OST2"
3150 wait_osc_import_state mds ost2 FULL
3151 wait_osc_import_state client ost2 FULL
3153 local PARAM="${FSNAME}-OST0001.osc.active"
3155 $LFS setstripe -c -1 $DIR/$tfile || error "Unable to lfs setstripe"
3156 do_facet mgs $LCTL conf_param $PARAM=0 || error "Unable to deactivate OST"
3158 umount_client $MOUNT || error "Unable to unmount client"
3159 mount_client $MOUNT || error "Unable to mount client"
3160 # This df should not cause a panic
3163 do_facet mgs $LCTL conf_param $PARAM=1 || error "Unable to activate OST"
3165 umount_client $MOUNT || error "Unable to unmount client"
3166 stop_ost2 || error "Unable to stop OST2"
3167 stop_ost || error "Unable to stop OST1"
3168 stop_mds || error "Unable to stop MDS"
3169 #writeconf to remove all ost2 traces for subsequent tests
3170 writeconf_or_reformat
3172 run_test 50g "deactivated OST should not cause panic====================="
3176 # prepare MDT/OST, make OSC inactive for OST1
3177 [ "$OSTCOUNT" -lt "2" ] && skip_env "$OSTCOUNT < 2, skipping" && return
3179 [ $(facet_fstype ost1) == zfs ] && import_zpool ost1
3180 do_facet ost1 "$TUNEFS --param osc.active=0 `ostdevname 1`" ||
3181 error "tunefs OST1 failed"
3182 start_mds || error "Unable to start MDT"
3183 start_ost || error "Unable to start OST1"
3184 start_ost2 || error "Unable to start OST2"
3185 mount_client $MOUNT || error "client start failed"
3189 # activatate OSC for OST1
3190 local TEST="$LCTL get_param -n osc.${FSNAME}-OST0000-osc-[!M]*.active"
3191 set_conf_param_and_check client \
3192 "$TEST" "${FSNAME}-OST0000.osc.active" 1 ||
3193 error "Unable to activate OST1"
3195 mkdir -p $DIR/$tdir/2
3196 $LFS setstripe -c -1 -i 0 $DIR/$tdir/2
3197 sleep 1 && echo "create a file after OST1 is activated"
3199 createmany -o $DIR/$tdir/2/$tfile-%d 1
3201 # check OSC import is working
3202 stat $DIR/$tdir/2/* >/dev/null 2>&1 ||
3203 error "some OSC imports are still not connected"
3206 umount_client $MOUNT || error "Unable to umount client"
3207 stop_ost2 || error "Unable to stop OST2"
3210 run_test 50h "LU-642: activate deactivated OST ==="
3213 local LOCAL_TIMEOUT=20
3217 check_mount || return 1
3220 $LFS setstripe -c -1 $MOUNT/d1
3221 #define OBD_FAIL_MDS_REINT_DELAY 0x142
3222 do_facet $SINGLEMDS "lctl set_param fail_loc=0x142"
3223 touch $MOUNT/d1/f1 &
3226 start_ost2 || return 2
3228 stop_ost2 || return 3
3230 #writeconf to remove all ost2 traces for subsequent tests
3231 writeconf_or_reformat
3233 run_test 51 "Verify that mdt_reint handles RMF_MDT_MD correctly when an OST is added"
3242 do_node $node mkdir -p $dest
3243 [ $? -eq 0 ] || { error "Unable to create directory"; return 1; }
3245 do_node $node 'tar cf - '$@' | tar xf - -C '$dest';
3246 [ \"\${PIPESTATUS[*]}\" = \"0 0\" ] || exit 1'
3247 [ $? -eq 0 ] || { error "Unable to tar files"; return 2; }
3249 do_node $node 'getfattr -d -m "[a-z]*\\." '$@' > '$xattrs
3250 [ $? -eq 0 ] || { error "Unable to read xattrs"; return 3; }
3260 local backup2=${TMP}/backup2
3262 do_node $node mkdir -p $backup2
3263 [ $? -eq 0 ] || { error "Unable to create directory"; return 1; }
3265 do_node $node 'tar cf - '$@' | tar xf - -C '$backup2';
3266 [ \"\${PIPESTATUS[*]}\" = \"0 0\" ] || exit 1'
3267 [ $? -eq 0 ] || { error "Unable to tar files to diff"; return 2; }
3269 do_node $node "diff -rq $backup $backup2"
3270 [ $? -eq 0 ] || { error "contents differ"; return 3; }
3272 local xattrs2=${TMP}/xattrs2
3273 do_node $node 'getfattr -d -m "[a-z]*\\." '$@' > '$xattrs2
3274 [ $? -eq 0 ] || { error "Unable to read xattrs to diff"; return 4; }
3276 do_node $node "diff $xattrs $xattrs2"
3277 [ $? -eq 0 ] || { error "xattrs differ"; return 5; }
3279 do_node $node "rm -rf $backup2 $xattrs2"
3280 [ $? -eq 0 ] || { error "Unable to delete temporary files"; return 6; }
3284 if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
3285 skip "Only applicable to ldiskfs-based MDTs"
3290 [ $? -eq 0 ] || { error "Unable to start MDS"; return 1; }
3292 [ $? -eq 0 ] || { error "Unable to start OST1"; return 2; }
3294 [ $? -eq 0 ] || { error "Unable to mount client"; return 3; }
3297 local ost1mnt=$(facet_mntpt ost1)
3298 local ost1node=$(facet_active_host ost1)
3299 local ost1tmp=$TMP/conf52
3303 [ $? -eq 0 ] || { error "Unable to create tdir"; return 4; }