3 # FIXME - there is no reason to use all of these different
4 # return codes, espcially when most of them are mapped to something
5 # else anyway. The combination of test number and return code
6 # figure out what failed.
12 # bug number for skipped test: LU-2828
13 ALWAYS_EXCEPT="$CONF_SANITY_EXCEPT 59 64"
14 # UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
18 if [ -r /etc/SuSE-release ]
20 local vers=`grep VERSION /etc/SuSE-release | awk '{print $3}'`
21 local patchlev=`grep PATCHLEVEL /etc/SuSE-release \
23 if [ $vers -eq 11 ] && [ $patchlev -eq 2 ]
31 if is_sles11; then # LU-2181
32 ALWAYS_EXCEPT="$ALWAYS_EXCEPT 23a 34b"
35 if [ "$FAILURE_MODE" = "HARD" ]; then
36 CONFIG_EXCEPTIONS="24a " && \
37 echo "Except the tests: $CONFIG_EXCEPTIONS for FAILURE_MODE=$FAILURE_MODE, bug 23573" && \
38 ALWAYS_EXCEPT="$ALWAYS_EXCEPT $CONFIG_EXCEPTIONS"
41 # bug number for skipped test:
42 # a tool to create lustre filesystem images
43 ALWAYS_EXCEPT="32newtarball $ALWAYS_EXCEPT"
46 PATH=$PWD/$SRCDIR:$SRCDIR:$SRCDIR/../utils:$PATH
48 PTLDEBUG=${PTLDEBUG:--1}
50 LUSTRE=${LUSTRE:-`dirname $0`/..}
51 RLUSTRE=${RLUSTRE:-$LUSTRE}
52 export MULTIOP=${MULTIOP:-multiop}
54 . $LUSTRE/tests/test-framework.sh
57 # use small MDS + OST size to speed formatting time
58 # do not use too small MDSSIZE/OSTSIZE, which affect the default jouranl size
61 . ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
63 if ! combined_mgs_mds; then
64 # bug number for skipped test: 23954
65 ALWAYS_EXCEPT="$ALWAYS_EXCEPT 24b"
68 # STORED_MDSSIZE is used in test_18
69 if [ -n "$MDSSIZE" ]; then
70 STORED_MDSSIZE=$MDSSIZE
73 # pass "-E lazy_itable_init" to mke2fs to speed up the formatting time
74 if [[ "$LDISKFS_MKFS_OPTS" != *lazy_itable_init* ]]; then
75 LDISKFS_MKFS_OPTS=$(csa_add "$LDISKFS_MKFS_OPTS" -E lazy_itable_init)
78 [ $(facet_fstype $SINGLEMDS) = "zfs" ] &&
79 # bug number for skipped test: LU-2778 LU-2059
80 ALWAYS_EXCEPT="$ALWAYS_EXCEPT 57b 50h"
85 require_dsh_mds || exit 0
86 require_dsh_ost || exit 0
88 [ "$SLOW" = "no" ] && EXCEPT_SLOW="30a 31 45"
94 # The MGS must be started before the OSTs for a new fs, so start
95 # and stop to generate the startup logs.
98 wait_osc_import_state mds ost FULL
103 reformat_and_config() {
105 if ! combined_mgs_mds ; then
111 writeconf_or_reformat() {
112 # There are at most 2 OSTs for write_conf test
113 # who knows if/where $TUNEFS is installed?
114 # Better reformat if it fails...
115 writeconf_all $MDSCOUNT 2 ||
116 { echo "tunefs failed, reformatting instead" &&
117 reformat_and_config && return 0; }
127 start mgs $MGSDEV $MGS_MOUNT_OPTS
133 local dev=$(mdsdevname $num)
136 echo "start mds service on `facet_active_host $facet`"
137 start $facet ${dev} $MDS_MOUNT_OPTS $@ || return 94
143 local dev=$(mdsdevname $num)
146 echo "stop mds service on `facet_active_host $facet`"
147 # These tests all use non-failover stop
148 stop $facet -f || return 97
154 for num in $(seq $MDSCOUNT); do
155 start_mdt $num $@ || return 94
160 if ! combined_mgs_mds ; then
168 for num in $(seq $MDSCOUNT); do
169 stop_mdt $num || return 97
174 echo "stop mgs service on `facet_active_host mgs`"
175 # These tests all use non-failover stop
176 stop mgs -f || return 97
180 echo "start ost1 service on `facet_active_host ost1`"
181 start ost1 `ostdevname 1` $OST_MOUNT_OPTS $@ || return 95
185 echo "stop ost1 service on `facet_active_host ost1`"
186 # These tests all use non-failover stop
187 stop ost1 -f || return 98
191 echo "start ost2 service on `facet_active_host ost2`"
192 start ost2 `ostdevname 2` $OST_MOUNT_OPTS $@ || return 92
196 echo "stop ost2 service on `facet_active_host ost2`"
197 # These tests all use non-failover stop
198 stop ost2 -f || return 93
203 echo "mount $FSNAME on ${MOUNTPATH}....."
204 zconf_mount `hostname` $MOUNTPATH || return 96
208 local mountopt="-o remount,$1"
210 echo "remount '$1' lustre on ${MOUNTPATH}....."
211 zconf_mount `hostname` $MOUNTPATH "$mountopt" || return 96
216 echo "umount lustre on ${MOUNTPATH}....."
217 zconf_umount `hostname` $MOUNTPATH || return 97
220 manual_umount_client(){
223 echo "manual umount lustre on ${MOUNT}...."
224 do_facet client "umount -d ${FORCE} $MOUNT"
230 start_mds || error "MDT start failed"
231 start_ost || error "OST start failed"
232 mount_client $MOUNT || error "client start failed"
233 client_up || error "client_up failed"
237 if ! combined_mgs_mds ; then
246 unload_modules_conf () {
247 if combined_mgs_mds || ! local_mode; then
248 unload_modules || return 1
253 stop_ost || return 202
254 stop_mds || return 201
255 unload_modules_conf || return 203
259 umount_client $MOUNT || return 200
260 cleanup_nocli || return $?
264 do_facet client "cp /etc/passwd $DIR/a" || return 71
265 do_facet client "rm $DIR/a" || return 72
266 # make sure lustre is actually mounted (touch will block,
267 # but grep won't, so do it after)
268 do_facet client "grep $MOUNT' ' /proc/mounts > /dev/null" || return 73
269 echo "setup single mount lustre success"
273 do_facet client "touch $DIR/a" || return 71
274 do_facet client "rm $DIR/a" || return 72
275 do_facet client "touch $DIR2/a" || return 73
276 do_facet client "rm $DIR2/a" || return 74
277 echo "setup double mount lustre success"
282 if [ "$ONLY" == "setup" ]; then
287 if [ "$ONLY" == "cleanup" ]; then
294 #create single point mountpoint
300 check_mount || return 41
303 run_test 0 "single mount setup"
306 start_mds || error "MDT start failed"
308 echo "start ost second time..."
309 start_ost && error "2nd OST start should fail"
310 mount_client $MOUNT || error "client start failed"
311 check_mount || return 42
314 run_test 1 "start up ost twice (should return errors)"
318 echo "start mds second time.."
319 start_mdt 1 && error "2nd MDT start should fail"
322 check_mount || return 43
325 run_test 2 "start up mds twice (should return err)"
329 #mount.lustre returns an error if already in mtab
330 mount_client $MOUNT && error "2nd client mount should fail"
331 check_mount || return 44
334 run_test 3 "mount client twice (should return err)"
338 touch $DIR/$tfile || return 85
342 # ok for ost to fail shutdown
343 if [ 202 -ne $eno ]; then
348 run_test 4 "force cleanup ost, then cleanup"
350 test_5a() { # was test_5
352 touch $DIR/$tfile || return 1
353 fuser -m -v $MOUNT && echo "$MOUNT is in use by user space process."
355 stop_mds -f || return 2
357 # cleanup may return an error from the failed
358 # disconnects; for now I'll consider this successful
359 # if all the modules have unloaded.
363 echo "killing umount"
364 kill -TERM $UMOUNT_PID
365 echo "waiting for umount to finish"
367 if grep " $MOUNT " /proc/mounts; then
368 echo "test 5: /proc/mounts after failed umount"
372 echo "killing umount"
373 kill -TERM $UMOUNT_PID
374 echo "waiting for umount to finish"
376 grep " $MOUNT " /proc/mounts && echo "test 5: /proc/mounts after second umount" && return 11
380 # stop_mds is a no-op here, and should not fail
381 cleanup_nocli || return $?
382 # df may have lingering entry
384 # mtab may have lingering entry
388 while [ "$WAIT" -ne "$MAX_WAIT" ]; do
390 grep -q $MOUNT" " /etc/mtab || break
391 echo "Waiting /etc/mtab updated ... "
392 WAIT=$(( WAIT + sleep))
394 [ "$WAIT" -eq "$MAX_WAIT" ] && error "/etc/mtab is not updated in $WAIT secs"
395 echo "/etc/mtab updated in $WAIT secs"
397 run_test 5a "force cleanup mds, then cleanup"
405 grep " $MOUNT " /etc/mtab && \
406 error false "unexpected entry in mtab before mount" && return 10
410 if ! combined_mgs_mds ; then
411 trap cleanup_5b EXIT ERR
416 [ -d $MOUNT ] || mkdir -p $MOUNT
417 mount_client $MOUNT && rc=1
418 grep " $MOUNT " /etc/mtab && \
419 error "$MOUNT entry in mtab after failed mount" && rc=11
421 # stop_mds is a no-op here, and should not fail
422 cleanup_nocli || rc=$?
423 if ! combined_mgs_mds ; then
428 run_test 5b "Try to start a client with no MGS (should return errs)"
431 grep " $MOUNT " /etc/mtab && \
432 error false "unexpected entry in mtab before mount" && return 10
437 [ -d $MOUNT ] || mkdir -p $MOUNT
438 local oldfs="${FSNAME}"
439 FSNAME="wrong.${FSNAME}"
440 mount_client $MOUNT || :
442 grep " $MOUNT " /etc/mtab && \
443 error "$MOUNT entry in mtab after failed mount" && rc=11
445 cleanup_nocli || rc=$?
448 run_test 5c "cleanup after failed mount (bug 2712) (should return errs)"
451 grep " $MOUNT " /etc/mtab && \
452 error false "unexpected entry in mtab before mount" && return 10
454 [ "$(facet_fstype ost1)" = "zfs" ] &&
455 skip "LU-2059: no local config for ZFS OSTs" && return
461 mount_client $MOUNT || rc=1
463 grep " $MOUNT " /etc/mtab && \
464 error "$MOUNT entry in mtab after unmount" && rc=11
467 run_test 5d "mount with ost down"
470 grep " $MOUNT " /etc/mtab && \
471 error false "unexpected entry in mtab before mount" && return 10
477 #define OBD_FAIL_PTLRPC_DELAY_SEND 0x506
478 do_facet client "lctl set_param fail_loc=0x80000506"
479 mount_client $MOUNT || echo "mount failed (not fatal)"
481 grep " $MOUNT " /etc/mtab && \
482 error "$MOUNT entry in mtab after unmount" && rc=11
485 run_test 5e "delayed connect, don't crash (bug 10268)"
488 if combined_mgs_mds ; then
489 skip "combined mgs and mds"
493 grep " $MOUNT " /etc/mtab && \
494 error false "unexpected entry in mtab before mount" && return 10
498 [ -d $MOUNT ] || mkdir -p $MOUNT
499 mount_client $MOUNT &
501 echo client_mount pid is $pid
505 if ! ps -f -p $pid >/dev/null; then
508 grep " $MOUNT " /etc/mtab && echo "test 5f: mtab after mount"
509 error "mount returns $rc, expected to hang"
518 # mount should succeed after start mds
521 [ $rc -eq 0 ] || error "mount returned $rc"
522 grep " $MOUNT " /etc/mtab && echo "test 5f: mtab after mount"
526 run_test 5f "mds down, cleanup after failed mount (bug 2712)"
531 mount_client ${MOUNT} || return 87
532 touch $DIR/a || return 86
535 run_test 6 "manual umount, then mount again"
540 cleanup_nocli || return $?
542 run_test 7 "manual umount, then cleanup"
547 check_mount2 || return 45
548 umount_client $MOUNT2
551 run_test 8 "double mount setup"
556 do_facet ost1 lctl set_param debug=\'inode trace\' || return 1
557 do_facet ost1 lctl set_param subsystem_debug=\'mds ost\' || return 1
559 CHECK_PTLDEBUG="`do_facet ost1 lctl get_param -n debug`"
560 if [ "$CHECK_PTLDEBUG" ] && { \
561 [ "$CHECK_PTLDEBUG" = "trace inode warning error emerg console" ] ||
562 [ "$CHECK_PTLDEBUG" = "trace inode" ]; }; then
563 echo "lnet.debug success"
565 echo "lnet.debug: want 'trace inode', have '$CHECK_PTLDEBUG'"
568 CHECK_SUBSYS="`do_facet ost1 lctl get_param -n subsystem_debug`"
569 if [ "$CHECK_SUBSYS" ] && [ "$CHECK_SUBSYS" = "mds ost" ]; then
570 echo "lnet.subsystem_debug success"
572 echo "lnet.subsystem_debug: want 'mds ost', have '$CHECK_SUBSYS'"
575 stop_ost || return $?
577 run_test 9 "test ptldebug and subsystem for mkfs"
585 do_facet $facet "test -b $dev" || rc=1
586 if [[ "$size" ]]; then
587 local in=$(do_facet $facet "dd if=$dev of=/dev/null bs=1k count=1 skip=$size 2>&1" |\
588 awk '($3 == "in") { print $1 }')
589 [[ $in = "1+0" ]] || rc=1
595 # Test 16 was to "verify that lustre will correct the mode of OBJECTS".
596 # But with new MDS stack we don't care about the mode of local objects
597 # anymore, so this test is removed. See bug 22944 for more details.
601 if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
602 skip "Only applicable to ldiskfs-based MDTs"
607 check_mount || return 41
610 echo "Remove mds config log"
611 if ! combined_mgs_mds ; then
615 do_facet mgs "$DEBUGFS -w -R 'unlink CONFIGS/$FSNAME-MDT0000' \
616 $(mgsdevname) || return \$?" || return $?
618 if ! combined_mgs_mds ; then
623 start_mds && return 42
626 run_test 17 "Verify failed mds_postsetup won't fail assertion (2936) (should return errs)"
629 if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
630 skip "Only applicable to ldiskfs-based MDTs"
634 local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/})
639 # check if current MDSSIZE is large enough
640 [ $MDSSIZE -ge $MIN ] && OK=1 && myMDSSIZE=$MDSSIZE && \
641 log "use MDSSIZE=$MDSSIZE"
643 # check if the global config has a large enough MDSSIZE
644 [ -z "$OK" -a ! -z "$STORED_MDSSIZE" ] && [ $STORED_MDSSIZE -ge $MIN ] && \
645 OK=1 && myMDSSIZE=$STORED_MDSSIZE && \
646 log "use STORED_MDSSIZE=$STORED_MDSSIZE"
648 # check if the block device is large enough
649 is_blkdev $SINGLEMDS $MDSDEV $MIN
650 local large_enough=$?
651 if [ -n "$OK" ]; then
652 [ $large_enough -ne 0 ] && OK=""
654 [ $large_enough -eq 0 ] && OK=1 && myMDSSIZE=$MIN &&
655 log "use device $MDSDEV with MIN=$MIN"
658 # check if a loopback device has enough space for fs metadata (5%)
660 if [ -z "$OK" ]; then
661 local SPACE=$(do_facet $SINGLEMDS "[ -f $MDSDEV -o ! -e $MDSDEV ] && df -P \\\$(dirname $MDSDEV)" |
662 awk '($1 != "Filesystem") {print $4}')
663 ! [ -z "$SPACE" ] && [ $SPACE -gt $((MIN / 20)) ] && \
664 OK=1 && myMDSSIZE=$MIN && \
665 log "use file $MDSDEV with MIN=$MIN"
668 [ -z "$OK" ] && skip_env "$MDSDEV too small for ${MIN}kB MDS" && return
671 echo "mount mds with large journal..."
673 local OLD_MDSSIZE=$MDSSIZE
677 echo "mount lustre system..."
679 check_mount || return 41
681 echo "check journal size..."
682 local FOUNDSIZE=$(do_facet $SINGLEMDS "$DEBUGFS -c -R 'stat <8>' $MDSDEV" | awk '/Size: / { print $NF; exit;}')
683 if [ $FOUNDSIZE -gt $((32 * 1024 * 1024)) ]; then
684 log "Success: mkfs creates large journals. Size: $((FOUNDSIZE >> 20))M"
686 error "expected journal size > 32M, found $((FOUNDSIZE >> 20))M"
694 run_test 18 "check mkfs creates large journals"
697 start_mds || return 1
698 stop_mds -f || return 2
700 run_test 19a "start/stop MDS without OSTs"
703 [ "$(facet_fstype ost1)" = "zfs" ] &&
704 skip "LU-2059: no local config for ZFS OSTs" && return
706 start_ost || return 1
707 stop_ost -f || return 2
709 run_test 19b "start/stop OSTs without MDS"
712 # first format the ost/mdt
716 check_mount || return 43
718 remount_client ro $MOUNT || return 44
719 touch $DIR/$tfile && echo "$DIR/$tfile created incorrectly" && return 45
720 [ -e $DIR/$tfile ] && echo "$DIR/$tfile exists incorrectly" && return 46
721 remount_client rw $MOUNT || return 47
723 [ ! -f $DIR/$tfile ] && echo "$DIR/$tfile missing" && return 48
724 MCNT=`grep -c $MOUNT /etc/mtab`
725 [ "$MCNT" -ne 1 ] && echo "$MOUNT in /etc/mtab $MCNT times" && return 49
730 run_test 20 "remount ro,rw mounts work and doesn't break /etc/mtab"
735 wait_osc_import_state mds ost FULL
739 run_test 21a "start mds before ost, stop ost first"
742 [ "$(facet_fstype ost1)" = "zfs" ] &&
743 skip "LU-2059: no local config for ZFS OSTs" && return
747 wait_osc_import_state mds ost FULL
751 run_test 21b "start ost before mds, stop mds first"
757 wait_osc_import_state mds ost2 FULL
761 #writeconf to remove all ost2 traces for subsequent tests
762 writeconf_or_reformat
764 run_test 21c "start mds between two osts, stop mds last"
767 if combined_mgs_mds ; then
768 skip "need separate mgs device" && return 0
778 wait_osc_import_state mds ost2 FULL
784 #writeconf to remove all ost2 traces for subsequent tests
785 writeconf_or_reformat
788 run_test 21d "start mgs then ost and then mds"
793 echo Client mount with ost in logs, but none running
795 # wait until mds connected to ost and open client connection
796 wait_osc_import_state mds ost FULL
799 # check_mount will block trying to contact ost
800 mcreate $DIR/$tfile || return 40
801 rm -f $DIR/$tfile || return 42
805 echo Client mount with a running ost
808 # if gss enabled, wait full time to let connection from
809 # mds to ost be established, due to the mismatch between
810 # initial connect timeout and gss context negotiation timeout.
811 # This perhaps could be remove after AT landed.
812 echo "sleep $((TIMEOUT + TIMEOUT + TIMEOUT))s"
813 sleep $((TIMEOUT + TIMEOUT + TIMEOUT))
816 wait_osc_import_state mds ost FULL
817 wait_osc_import_state client ost FULL
818 check_mount || return 41
823 run_test 22 "start a client before osts (should return errs)"
825 test_23a() { # was test_23
829 # force down client so that recovering mds waits for reconnect
830 local running=$(grep -c $MOUNT /proc/mounts) || true
831 if [ $running -ne 0 ]; then
832 echo "Stopping client $MOUNT (opts: -f)"
836 # enter recovery on mds
838 # try to start a new client
839 mount_client $MOUNT &
841 MOUNT_PID=$(ps -ef | grep "t lustre" | grep -v grep | awk '{print $2}')
842 MOUNT_LUSTRE_PID=`ps -ef | grep mount.lustre | grep -v grep | awk '{print $2}'`
843 echo mount pid is ${MOUNT_PID}, mount.lustre pid is ${MOUNT_LUSTRE_PID}
845 ps --ppid $MOUNT_LUSTRE_PID
846 echo "waiting for mount to finish"
848 # "ctrl-c" sends SIGINT but it usually (in script) does not work on child process
849 # SIGTERM works but it does not spread to offspring processses
850 kill -s TERM $MOUNT_PID
851 kill -s TERM $MOUNT_LUSTRE_PID
852 # we can not wait $MOUNT_PID because it is not a child of this shell
858 while [ "$WAIT" -lt "$MAX_WAIT" ]; do
860 PID1=$(ps -ef | awk '{print $2}' | grep -w $MOUNT_PID)
861 PID2=$(ps -ef | awk '{print $2}' | grep -w $MOUNT_LUSTRE_PID)
864 [ -z "$PID1" -a -z "$PID2" ] && break
865 echo "waiting for mount to finish ... "
866 WAIT=$(( WAIT + sleep))
868 if [ "$WAIT" -eq "$MAX_WAIT" ]; then
869 error "MOUNT_PID $MOUNT_PID and "\
870 "MOUNT_LUSTRE_PID $MOUNT_LUSTRE_PID still not killed in $WAIT secs"
876 run_test 23a "interrupt client during recovery mount delay"
881 test_23b() { # was test_23
884 # Simulate -EINTR during mount OBD_FAIL_LDLM_CLOSE_THREAD
885 lctl set_param fail_loc=0x80000313
889 run_test 23b "Simulate -EINTR during mount"
891 fs2mds_HOST=$mds_HOST
892 fs2ost_HOST=$ost_HOST
894 MDSDEV1_2=$fs2mds_DEV
895 OSTDEV1_2=$fs2ost_DEV
896 OSTDEV2_2=$fs3ost_DEV
900 echo "umount $MOUNT2 ..."
901 umount $MOUNT2 || true
902 echo "stopping fs2mds ..."
903 stop fs2mds -f || true
904 echo "stopping fs2ost ..."
905 stop fs2ost -f || true
909 local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/})
911 if [ -z "$fs2ost_DEV" -o -z "$fs2mds_DEV" ]; then
912 is_blkdev $SINGLEMDS $MDSDEV && \
913 skip_env "mixed loopback and real device not working" && return
916 [ -n "$ost1_HOST" ] && fs2ost_HOST=$ost1_HOST
918 local fs2mdsdev=$(mdsdevname 1_2)
919 local fs2ostdev=$(ostdevname 1_2)
920 local fs2mdsvdev=$(mdsvdevname 1_2)
921 local fs2ostvdev=$(ostvdevname 1_2)
923 # test 8-char fsname as well
924 local FSNAME2=test1234
926 add fs2mds $(mkfs_opts mds1 ${fs2mdsdev} ) --nomgs --mgsnode=$MGSNID \
927 --fsname=${FSNAME2} --reformat $fs2mdsdev $fs2mdsvdev || exit 10
929 add fs2ost $(mkfs_opts ost1 ${fs2ostdev}) --fsname=${FSNAME2} \
930 --reformat $fs2ostdev $fs2ostvdev || exit 10
933 start fs2mds $fs2mdsdev $MDS_MOUNT_OPTS && trap cleanup_24a EXIT INT
934 start fs2ost $fs2ostdev $OST_MOUNT_OPTS
936 mount -t lustre $MGSNID:/${FSNAME2} $MOUNT2 || return 1
938 check_mount || return 2
939 # files written on 1 should not show up on 2
940 cp /etc/passwd $DIR/$tfile
942 [ -e $MOUNT2/$tfile ] && error "File bleed" && return 7
945 cp /etc/passwd $MOUNT2/b || return 3
946 rm $MOUNT2/b || return 4
947 # 2 is actually mounted
948 grep $MOUNT2' ' /proc/mounts > /dev/null || return 5
950 facet_failover fs2mds
951 facet_failover fs2ost
954 # the MDS must remain up until last MDT
956 MDS=$(do_facet $SINGLEMDS "lctl get_param -n devices" | awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }' | head -1)
957 [ -z "$MDS" ] && error "No MDT" && return 8
959 cleanup_nocli || return 6
961 run_test 24a "Multiple MDTs on a single node"
964 local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/})
966 if [ -z "$fs2mds_DEV" ]; then
967 local dev=${SINGLEMDS}_dev
969 is_blkdev $SINGLEMDS $MDSDEV && \
970 skip_env "mixed loopback and real device not working" && return
973 local fs2mdsdev=$(mdsdevname 1_2)
974 local fs2mdsvdev=$(mdsvdevname 1_2)
976 add fs2mds $(mkfs_opts mds1 ${fs2mdsdev} ) --mgs --fsname=${FSNAME}2 \
977 --reformat $fs2mdsdev $fs2mdsvdev || exit 10
979 start fs2mds $fs2mdsdev $MDS_MOUNT_OPTS && return 2
982 run_test 24b "Multiple MGSs on a single node (should return err)"
986 check_mount || return 2
987 local MODULES=$($LCTL modules | awk '{ print $2 }')
988 rmmod $MODULES 2>/dev/null || true
991 run_test 25 "Verify modules are referenced"
995 # we need modules before mount for sysctl, so make sure...
996 do_facet $SINGLEMDS "lsmod | grep -q lustre || modprobe lustre"
997 #define OBD_FAIL_MDS_FS_SETUP 0x135
998 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000135"
999 start_mds && echo MDS started && return 1
1000 lctl get_param -n devices
1001 DEVS=$(lctl get_param -n devices | egrep -v MG | wc -l)
1002 [ $DEVS -gt 0 ] && return 2
1003 # start mds to drop writeconf setting
1004 start_mds || return 3
1005 stop_mds || return 4
1006 unload_modules_conf || return $?
1008 run_test 26 "MDT startup failure cleans LOV (should return errs)"
1011 [ "$(facet_fstype ost1)" = "zfs" ] &&
1012 skip "LU-2059: no local config for ZFS OSTs" && return
1014 start_ost || return 1
1015 start_mds || return 2
1016 echo "Requeue thread should have started: "
1017 ps -e | grep ll_cfg_requeue
1018 set_conf_param_and_check ost1 \
1019 "lctl get_param -n obdfilter.$FSNAME-OST0000.client_cache_seconds" \
1020 "$FSNAME-OST0000.ost.client_cache_seconds" || return 3
1023 run_test 27a "Reacquire MGS lock if OST started first"
1028 local device=$(do_facet $SINGLEMDS "lctl get_param -n devices" |
1029 awk '($3 ~ "mdt" && $4 ~ "MDT0000") { print $4 }')
1031 facet_failover $SINGLEMDS
1032 set_conf_param_and_check $SINGLEMDS \
1033 "lctl get_param -n mdt.$device.identity_acquire_expire" \
1034 "$device.mdt.identity_acquire_expire" || return 3
1035 set_conf_param_and_check client \
1036 "lctl get_param -n mdc.$device-mdc-*.max_rpcs_in_flight"\
1037 "$device.mdc.max_rpcs_in_flight" || return 4
1041 run_test 27b "Reacquire MGS lock after failover"
1045 TEST="lctl get_param -n llite.$FSNAME-*.max_read_ahead_whole_mb"
1046 PARAM="$FSNAME.llite.max_read_ahead_whole_mb"
1048 FINAL=$(($ORIG + 1))
1049 set_conf_param_and_check client "$TEST" "$PARAM" $FINAL || return 3
1050 FINAL=$(($FINAL + 1))
1051 set_conf_param_and_check client "$TEST" "$PARAM" $FINAL || return 4
1052 umount_client $MOUNT || return 200
1055 if [ $RESULT -ne $FINAL ]; then
1056 echo "New config not seen: wanted $FINAL got $RESULT"
1059 echo "New config success: got $RESULT"
1061 set_conf_param_and_check client "$TEST" "$PARAM" $ORIG || return 5
1064 run_test 28 "permanent parameter setting"
1067 [ "$OSTCOUNT" -lt "2" ] && skip_env "$OSTCOUNT < 2, skipping" && return
1068 setup > /dev/null 2>&1
1072 local PARAM="$FSNAME-OST0001.osc.active"
1073 local PROC_ACT="osc.$FSNAME-OST0001-osc-[^M]*.active"
1074 local PROC_UUID="osc.$FSNAME-OST0001-osc-[^M]*.ost_server_uuid"
1076 ACTV=$(lctl get_param -n $PROC_ACT)
1078 set_conf_param_and_check client \
1079 "lctl get_param -n $PROC_ACT" "$PARAM" $DEAC || return 2
1080 # also check ost_server_uuid status
1081 RESULT=$(lctl get_param -n $PROC_UUID | grep DEACTIV)
1082 if [ -z "$RESULT" ]; then
1083 echo "Live client not deactivated: $(lctl get_param -n $PROC_UUID)"
1086 echo "Live client success: got $RESULT"
1090 for num in $(seq $MDSCOUNT); do
1091 local mdtosc=$(get_mdtosc_proc_path mds${num} $FSNAME-OST0001)
1092 local MPROC="osc.$mdtosc.active"
1097 RESULT=$(do_facet mds${num} " lctl get_param -n $MPROC")
1098 [ ${PIPESTATUS[0]} = 0 ] || error "Can't read $MPROC"
1099 if [ $RESULT -eq $DEAC ]; then
1100 echo -n "MDT deactivated also after"
1101 echo "$WAIT sec (got $RESULT)"
1105 if [ $WAIT -eq $MAX ]; then
1106 echo -n "MDT not deactivated: wanted $DEAC"
1110 echo "Waiting $(($MAX - $WAIT))secs for MDT deactivated"
1113 # test new client starts deactivated
1114 umount_client $MOUNT || return 200
1116 RESULT=$(lctl get_param -n $PROC_UUID | grep DEACTIV | grep NEW)
1117 if [ -z "$RESULT" ]; then
1118 echo "New client not deactivated from start: $(lctl get_param -n $PROC_UUID)"
1121 echo "New client success: got $RESULT"
1124 # make sure it reactivates
1125 set_conf_param_and_check client \
1126 "lctl get_param -n $PROC_ACT" "$PARAM" $ACTV || return 6
1128 umount_client $MOUNT
1131 #writeconf to remove all ost2 traces for subsequent tests
1132 writeconf_or_reformat
1134 run_test 29 "permanently remove an OST"
1139 echo Big config llog
1140 TEST="lctl get_param -n llite.$FSNAME-*.max_read_ahead_whole_mb"
1142 LIST=(1 2 3 4 5 4 3 2 1 2 3 4 5 4 3 2 1 2 3 4 5)
1143 for i in ${LIST[@]}; do
1144 set_conf_param_and_check client "$TEST" \
1145 "$FSNAME.llite.max_read_ahead_whole_mb" $i || return 3
1147 # make sure client restart still works
1148 umount_client $MOUNT
1149 mount_client $MOUNT || return 4
1150 [ "$($TEST)" -ne "$i" ] && error "Param didn't stick across restart $($TEST) != $i"
1153 echo Erase parameter setting
1154 do_facet mgs "$LCTL conf_param -d $FSNAME.llite.max_read_ahead_whole_mb" || return 6
1155 umount_client $MOUNT
1156 mount_client $MOUNT || return 6
1158 echo "deleted (default) value=$FINAL, orig=$ORIG"
1159 # assumes this parameter started at the default value
1160 [ "$FINAL" -eq "$ORIG" ] || fail "Deleted value=$FINAL, orig=$ORIG"
1164 run_test 30a "Big config llog and conf_param deletion"
1169 # Make a fake nid. Use the OST nid, and add 20 to the least significant
1170 # numerical part of it. Hopefully that's not already a failover address for
1172 OSTNID=$(do_facet ost1 "$LCTL get_param nis" | tail -1 | awk '{print $1}')
1173 ORIGVAL=$(echo $OSTNID | egrep -oi "[0-9]*@")
1174 NEWVAL=$((($(echo $ORIGVAL | egrep -oi "[0-9]*") + 20) % 256))
1175 NEW=$(echo $OSTNID | sed "s/$ORIGVAL/$NEWVAL@/")
1176 echo "Using fake nid $NEW"
1178 TEST="$LCTL get_param -n osc.$FSNAME-OST0000-osc-[^M]*.import | grep failover_nids | sed -n 's/.*\($NEW\).*/\1/p'"
1179 set_conf_param_and_check client "$TEST" \
1180 "$FSNAME-OST0000.failover.node" $NEW ||
1181 error "didn't add failover nid $NEW"
1182 NIDS=$($LCTL get_param -n osc.$FSNAME-OST0000-osc-[^M]*.import | grep failover_nids)
1184 NIDCOUNT=$(($(echo "$NIDS" | wc -w) - 1))
1185 echo "should have 2 failover nids: $NIDCOUNT"
1186 [ $NIDCOUNT -eq 2 ] || error "Failover nid not added"
1187 do_facet mgs "$LCTL conf_param -d $FSNAME-OST0000.failover.node" || error "conf_param delete failed"
1188 umount_client $MOUNT
1189 mount_client $MOUNT || return 3
1191 NIDS=$($LCTL get_param -n osc.$FSNAME-OST0000-osc-[^M]*.import | grep failover_nids)
1193 NIDCOUNT=$(($(echo "$NIDS" | wc -w) - 1))
1194 echo "only 1 final nid should remain: $NIDCOUNT"
1195 [ $NIDCOUNT -eq 1 ] || error "Failover nids not removed"
1199 run_test 30b "Remove failover nids"
1201 test_31() { # bug 10734
1202 # ipaddr must not exist
1203 mount -t lustre 4.3.2.1@tcp:/lustre $MOUNT || true
1206 run_test 31 "Connect to non-existent node (shouldn't crash)"
1210 T32_BLIMIT=20480 # Kbytes
1214 # This is not really a test but a tool to create new disk
1215 # image tarballs for the upgrade tests.
1217 # Disk image tarballs should be created on single-node
1218 # clusters by running this test with default configurations
1219 # plus a few mandatory environment settings that are verified
1220 # at the beginning of the test.
1222 test_32newtarball() {
1226 local tmp=$TMP/t32_image_create
1228 if [ $FSNAME != t32fs -o $MDSCOUNT -ne 1 -o \
1229 \( -z "$MDSDEV" -a -z "$MDSDEV1" \) -o $OSTCOUNT -ne 1 -o \
1230 -z "$OSTDEV1" ]; then
1231 error "Needs FSNAME=t32fs MDSCOUNT=1 MDSDEV1=<nonexistent_file>" \
1232 "(or MDSDEV, in the case of b1_8) OSTCOUNT=1" \
1233 "OSTDEV1=<nonexistent_file>"
1237 echo "Found stale $tmp"
1242 tar cf - -C $src . | tar xf - -C $tmp/src
1243 dd if=/dev/zero of=$tmp/src/t32_qf_old bs=1M \
1244 count=$(($T32_BLIMIT / 1024 / 2))
1245 chown $T32_QID.$T32_QID $tmp/src/t32_qf_old
1251 [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.3.50) ] &&
1252 $LFS quotacheck -ug /mnt/$FSNAME
1253 $LFS setquota -u $T32_QID -b 0 -B $T32_BLIMIT -i 0 -I $T32_ILIMIT \
1256 tar cf - -C $tmp/src . | tar xf - -C /mnt/$FSNAME
1263 ls -Rni --time-style=+%s >$tmp/img/list
1264 find . ! -name .lustre -type f -exec sha1sum {} \; |
1265 sort -k 2 >$tmp/img/sha1sums
1267 $LCTL get_param -n version | head -n 1 |
1268 sed -e 's/^lustre: *//' >$tmp/img/commit
1270 [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.3.50) ] &&
1271 $LFS quotaon -ug /mnt/$FSNAME
1272 $LFS quota -u $T32_QID -v /mnt/$FSNAME
1273 $LFS quota -v -u $T32_QID /mnt/$FSNAME |
1274 awk 'BEGIN { num='1' } { if ($1 == "'/mnt/$FSNAME'") \
1275 { if (NF == 1) { getline } else { num++ } ; print $num;} }' \
1276 | tr -d "*" > $tmp/img/bspace
1277 $LFS quota -v -u $T32_QID /mnt/$FSNAME |
1278 awk 'BEGIN { num='5' } { if ($1 == "'/mnt/$FSNAME'") \
1279 { if (NF == 1) { getline } else { num++ } ; print $num;} }' \
1280 | tr -d "*" > $tmp/img/ispace
1285 find -type f -exec sha1sum {} \; | sort -k 2 >$tmp/sha1sums.src
1288 if ! diff -u $tmp/sha1sums.src $tmp/img/sha1sums; then
1289 echo "Data verification failed"
1292 uname -r >$tmp/img/kernel
1293 uname -m >$tmp/img/arch
1295 mv ${MDSDEV1:-$MDSDEV} $tmp/img
1296 mv $OSTDEV1 $tmp/img
1298 version=$(sed -e 's/\(^[0-9]\+\.[0-9]\+\)\(.*$\)/\1/' $tmp/img/commit |
1299 sed -e 's/\./_/g') # E.g., "1.8.7" -> "1_8"
1302 tar cjvf $dst/disk$version-$(facet_fstype $SINGLEMDS).tar.bz2 -S *
1307 #run_test 32newtarball "Create a new test_32 disk image tarball for this version"
1310 # The list of applicable tarballs is returned via the caller's
1311 # variable "tarballs".
1314 local node=$(facet_active_host $SINGLEMDS)
1315 local r="do_node $node"
1317 if [ "$CLIENTONLY" ]; then
1318 skip "Client-only testing"
1322 if ! $r which $TUNEFS; then
1323 skip_env "tunefs.lustre required on $node"
1327 if [ -n "$($LCTL list_nids | grep -v '\(tcp\|lo\)[[:digit:]]*$')" ]; then
1328 skip "LU-2200: Test cannot run over Infiniband"
1332 local IMGTYPE=$(facet_fstype $SINGLEMDS)
1334 tarballs=$($r find $RLUSTRE/tests -maxdepth 1 -name \'disk*-$IMGTYPE.tar.bz2\')
1336 if [ -z "$tarballs" ]; then
1337 skip "No applicable tarballs found"
1342 t32_test_cleanup() {
1346 if $shall_cleanup_lustre; then
1347 umount $tmp/mnt/lustre || rc=$?
1349 if $shall_cleanup_mdt; then
1350 $r umount -d $tmp/mnt/mdt || rc=$?
1352 if $shall_cleanup_mdt1; then
1353 $r umount -d $tmp/mnt/mdt1 || rc=$?
1355 if $shall_cleanup_ost; then
1356 $r umount -d $tmp/mnt/ost || rc=$?
1364 t32_bits_per_long() {
1366 # Yes, this is not meant to be perfect.
1376 t32_reload_modules() {
1378 local all_removed=false
1381 while ((i < 20)); do
1382 echo "Unloading modules on $node: Attempt $i"
1383 do_rpc_nodes $node $LUSTRE_RMMOD $(facet_fstype $SINGLEMDS) &&
1385 do_rpc_nodes $node check_mem_leak || return 1
1386 if $all_removed; then
1387 do_rpc_nodes $node load_modules
1393 echo "Unloading modules on $node: Given up"
1397 t32_wait_til_devices_gone() {
1402 echo wait for devices to go
1403 while ((i < 20)); do
1404 devices=$(do_rpc_nodes $node $LCTL device_list | wc -l)
1405 ((devices == 0)) && return 0
1409 echo "waiting for devices on $node: Given up"
1413 t32_verify_quota() {
1417 local fstype=$(facet_fstype $SINGLEMDS)
1421 $LFS quota -u $T32_QID -v $mnt
1423 qval=$($LFS quota -v -u $T32_QID $mnt |
1424 awk 'BEGIN { num='1' } { if ($1 == "'$mnt'") \
1425 { if (NF == 1) { getline } else { num++ } ; print $num;} }' \
1427 [ $qval -eq $img_bspace ] || {
1428 echo "bspace, act:$qval, exp:$img_bspace"
1432 qval=$($LFS quota -v -u $T32_QID $mnt |
1433 awk 'BEGIN { num='5' } { if ($1 == "'$mnt'") \
1434 { if (NF == 1) { getline } else { num++ } ; print $num;} }' \
1436 [ $qval -eq $img_ispace ] || {
1437 echo "ispace, act:$qval, exp:$img_ispace"
1441 qval=$($LFS quota -v -u $T32_QID $mnt |
1442 awk 'BEGIN { num='3' } { if ($1 == "'$mnt'") \
1443 { if (NF == 1) { getline } else { num++ } ; print $num;} }' \
1445 [ $qval -eq $T32_BLIMIT ] || {
1446 echo "blimit, act:$qval, exp:$T32_BLIMIT"
1450 qval=$($LFS quota -v -u $T32_QID $mnt |
1451 awk 'BEGIN { num='7' } { if ($1 == "'$mnt'") \
1452 { if (NF == 1) { getline } else { num++ } ; print $num;} }' \
1454 [ $qval -eq $T32_ILIMIT ] || {
1455 echo "ilimit, act:$qval, exp:$T32_ILIMIT"
1459 do_node $node $LCTL conf_param $fsname.quota.mdt=ug
1460 cmd="$LCTL get_param -n osd-$fstype.$fsname-MDT0000"
1461 cmd=$cmd.quota_slave.enabled
1462 wait_update $node "$cmd" "ug" || {
1463 echo "Enable mdt quota failed"
1467 do_node $node $LCTL conf_param $fsname.quota.ost=ug
1468 cmd="$LCTL get_param -n osd-$fstype.$fsname-OST0000"
1469 cmd=$cmd.quota_slave.enabled
1470 wait_update $node "$cmd" "ug" || {
1471 echo "Enable ost quota failed"
1476 runas -u $T32_QID -g $T32_QID dd if=/dev/zero of=$mnt/t32_qf_new \
1477 bs=1M count=$(($T32_BLIMIT / 1024)) oflag=sync && {
1478 echo "Write succeed, but expect -EDQUOT"
1481 rm -f $mnt/t32_qf_new
1483 runas -u $T32_QID -g $T32_QID createmany -m $mnt/t32_qf_ \
1485 echo "Create succeed, but expect -EDQUOT"
1488 unlinkmany $mnt/t32_qf_ $T32_ILIMIT
1496 local dne_upgrade=${dne_upgrade:-"no"}
1497 local shall_cleanup_mdt=false
1498 local shall_cleanup_mdt1=false
1499 local shall_cleanup_ost=false
1500 local shall_cleanup_lustre=false
1501 local node=$(facet_active_host $SINGLEMDS)
1502 local r="do_node $node"
1503 local node2=$(facet_active_host mds2)
1511 local nid=$($r $LCTL list_nids | head -1)
1517 local fstype=$(facet_fstype $SINGLEMDS)
1519 trap 'trap - RETURN; t32_test_cleanup' RETURN
1521 mkdir -p $tmp/mnt/lustre
1522 $r mkdir -p $tmp/mnt/{mdt,ost}
1523 $r tar xjvf $tarball -S -C $tmp || {
1524 error_noexit "Unpacking the disk image tarball"
1527 img_commit=$($r cat $tmp/commit)
1528 img_kernel=$($r cat $tmp/kernel)
1529 img_arch=$($r cat $tmp/arch)
1530 img_bspace=$($r cat $tmp/bspace)
1531 img_ispace=$($r cat $tmp/ispace)
1532 echo "Upgrading from $(basename $tarball), created with:"
1533 echo " Commit: $img_commit"
1534 echo " Kernel: $img_kernel"
1535 echo " Arch: $img_arch"
1537 $r $LCTL set_param debug="$PTLDEBUG"
1539 $r $TUNEFS --dryrun $tmp/mdt || {
1540 error_noexit "tunefs.lustre before mounting the MDT"
1543 if [ "$writeconf" ]; then
1544 mopts=loop,writeconf
1545 if [ $fstype == "ldiskfs" ]; then
1546 $r $TUNEFS --quota $tmp/mdt || {
1547 error_noexit "Enable mdt quota feature"
1552 mopts=loop,exclude=$fsname-OST0000
1555 t32_wait_til_devices_gone $node
1557 $r mount -t lustre -o $mopts $tmp/mdt $tmp/mnt/mdt || {
1558 error_noexit "Mounting the MDT"
1561 shall_cleanup_mdt=true
1563 if [ "$dne_upgrade" != "no" ]; then
1564 local fs2mdsdev=$(mdsdevname 1_2)
1565 local fs2mdsvdev=$(mdsvdevname 1_2)
1567 echo "mkfs new MDT on ${fs2mdsdev}...."
1568 if [ $(facet_fstype mds1) == ldiskfs ]; then
1569 mkfsoptions="--mkfsoptions=\\\"-J size=8\\\""
1572 add fs2mds $(mkfs_opts mds2 $fs2mdsdev $fsname) --reformat \
1573 $mkfsoptions $fs2mdsdev $fs2mdsvdev > /dev/null || {
1574 error_noexit "Mkfs new MDT failed"
1578 $r $TUNEFS --dryrun $fs2mdsdev || {
1579 error_noexit "tunefs.lustre before mounting the MDT"
1583 echo "mount new MDT....$fs2mdsdev"
1584 $r mkdir -p $tmp/mnt/mdt1
1585 $r mount -t lustre -o $mopts $fs2mdsdev $tmp/mnt/mdt1 || {
1586 error_noexit "mount mdt1 failed"
1589 shall_cleanup_mdt1=true
1592 uuid=$($r $LCTL get_param -n mdt.$fsname-MDT0000.uuid) || {
1593 error_noexit "Getting MDT UUID"
1596 if [ "$uuid" != $fsname-MDT0000_UUID ]; then
1597 error_noexit "Unexpected MDT UUID: \"$uuid\""
1601 $r $TUNEFS --dryrun $tmp/ost || {
1602 error_noexit "tunefs.lustre before mounting the OST"
1605 if [ "$writeconf" ]; then
1606 mopts=loop,mgsnode=$nid,$writeconf
1607 if [ $fstype == "ldiskfs" ]; then
1608 $r $TUNEFS --quota $tmp/ost || {
1609 error_noexit "Enable ost quota feature"
1614 mopts=loop,mgsnode=$nid
1616 $r mount -t lustre -o $mopts $tmp/ost $tmp/mnt/ost || {
1617 error_noexit "Mounting the OST"
1620 shall_cleanup_ost=true
1622 uuid=$($r $LCTL get_param -n obdfilter.$fsname-OST0000.uuid) || {
1623 error_noexit "Getting OST UUID"
1626 if [ "$uuid" != $fsname-OST0000_UUID ]; then
1627 error_noexit "Unexpected OST UUID: \"$uuid\""
1631 $r $LCTL conf_param $fsname-OST0000.osc.max_dirty_mb=15 || {
1632 error_noexit "Setting \"max_dirty_mb\""
1635 $r $LCTL conf_param $fsname-OST0000.failover.node=$nid || {
1636 error_noexit "Setting OST \"failover.node\""
1639 $r $LCTL conf_param $fsname-MDT0000.mdc.max_rpcs_in_flight=9 || {
1640 error_noexit "Setting \"max_rpcs_in_flight\""
1643 $r $LCTL conf_param $fsname-MDT0000.failover.node=$nid || {
1644 error_noexit "Setting MDT \"failover.node\""
1647 $r $LCTL pool_new $fsname.interop || {
1648 error_noexit "Setting \"interop\""
1651 $r $LCTL conf_param $fsname-MDT0000.lov.stripesize=4M || {
1652 error_noexit "Setting \"lov.stripesize\""
1656 if [ "$dne_upgrade" != "no" ]; then
1657 $r $LCTL conf_param \
1658 $fsname-MDT0001.mdc.max_rpcs_in_flight=9 || {
1659 error_noexit "Setting MDT1 \"max_rpcs_in_flight\""
1662 $r $LCTL conf_param $fsname-MDT0001.failover.node=$nid || {
1663 error_noexit "Setting MDT1 \"failover.node\""
1666 $r $LCTL conf_param $fsname-MDT0001.lov.stripesize=4M || {
1667 error_noexit "Setting MDT1 \"lov.stripesize\""
1673 if [ "$writeconf" ]; then
1674 mount -t lustre $nid:/$fsname $tmp/mnt/lustre || {
1675 error_noexit "Mounting the client"
1678 shall_cleanup_lustre=true
1679 $LCTL set_param debug="$PTLDEBUG"
1681 t32_verify_quota $node $fsname $tmp/mnt/lustre || {
1682 error_noexit "verify quota failed"
1686 if [ "$dne_upgrade" != "no" ]; then
1687 $LFS mkdir -i 1 $tmp/mnt/lustre/remote_dir || {
1688 error_noexit "set remote dir failed"
1692 pushd $tmp/mnt/lustre
1693 tar -cf - . --exclude=./remote_dir |
1694 tar -xvf - -C remote_dir 1>/dev/null || {
1695 error_noexit "cp to remote dir failed"
1701 dd if=/dev/zero of=$tmp/mnt/lustre/tmp_file bs=10k count=10 || {
1702 error_noexit "dd failed"
1705 rm -rf $tmp/mnt/lustre/tmp_file || {
1706 error_noexit "rm failed"
1710 if $r test -f $tmp/sha1sums; then
1711 # LU-2393 - do both sorts on same node to ensure locale
1713 $r cat $tmp/sha1sums | sort -k 2 >$tmp/sha1sums.orig
1714 if [ "$dne_upgrade" != "no" ]; then
1715 pushd $tmp/mnt/lustre/remote_dir
1717 pushd $tmp/mnt/lustre
1720 find ! -name .lustre -type f -exec sha1sum {} \; |
1721 sort -k 2 >$tmp/sha1sums || {
1722 error_noexit "sha1sum"
1726 if ! diff -ub $tmp/sha1sums.orig $tmp/sha1sums; then
1727 error_noexit "sha1sum verification failed"
1731 echo "sha1sum verification skipped"
1734 if [ "$dne_upgrade" != "no" ]; then
1735 rm -rf $tmp/mnt/lustre/remote_dir || {
1736 error_noexit "remove remote dir failed"
1741 if $r test -f $tmp/list; then
1743 # There is not a Test Framework API to copy files to or
1744 # from a remote node.
1746 # LU-2393 - do both sorts on same node to ensure locale
1748 $r cat $tmp/list | sort -k 6 >$tmp/list.orig
1749 pushd $tmp/mnt/lustre
1750 ls -Rni --time-style=+%s | sort -k 6 >$tmp/list || {
1756 # 32-bit and 64-bit clients use different algorithms to
1757 # convert FIDs into inode numbers. Hence, remove the inode
1758 # numbers from the lists, if the original list was created
1759 # on an architecture with different number of bits per
1762 if [ $(t32_bits_per_long $(uname -m)) != \
1763 $(t32_bits_per_long $img_arch) ]; then
1764 echo "Different number of bits per \"long\" from the disk image"
1765 for list in list.orig list; do
1766 sed -i -e 's/^[0-9]\+[ \t]\+//' $tmp/$list
1769 if ! diff -ub $tmp/list.orig $tmp/list; then
1770 error_noexit "list verification failed"
1774 echo "list verification skipped"
1778 # When adding new data verification tests, please check for
1779 # the presence of the required reference files first, like
1780 # the "sha1sums" and "list" tests above, to avoid the need to
1781 # regenerate every image for each test addition.
1784 nrpcs_orig=$($LCTL get_param \
1785 -n mdc.*MDT0000*.max_rpcs_in_flight) || {
1786 error_noexit "Getting \"max_rpcs_in_flight\""
1789 nrpcs=$((nrpcs_orig + 5))
1790 $r $LCTL conf_param $fsname-MDT0000.mdc.max_rpcs_in_flight=$nrpcs || {
1791 error_noexit "Changing \"max_rpcs_in_flight\""
1794 wait_update $HOSTNAME "$LCTL get_param \
1795 -n mdc.*MDT0000*.max_rpcs_in_flight" $nrpcs || {
1796 error_noexit "Verifying \"max_rpcs_in_flight\""
1800 umount $tmp/mnt/lustre || {
1801 error_noexit "Unmounting the client"
1804 shall_cleanup_lustre=false
1806 if [ "$dne_upgrade" != "no" ]; then
1807 $r umount -d $tmp/mnt/mdt1 || {
1808 error_noexit "Unmounting the MDT2"
1811 shall_cleanup_mdt1=false
1814 $r umount -d $tmp/mnt/mdt || {
1815 error_noexit "Unmounting the MDT"
1818 shall_cleanup_mdt=false
1820 $r umount -d $tmp/mnt/ost || {
1821 error_noexit "Unmounting the OST"
1824 shall_cleanup_ost=false
1826 t32_reload_modules $node || {
1827 error_noexit "Reloading modules"
1831 # mount a second time to make sure we didnt leave upgrade flag on
1832 $r $TUNEFS --dryrun $tmp/mdt || {
1833 error_noexit "tunefs.lustre before remounting the MDT"
1836 $r mount -t lustre -o loop,exclude=$fsname-OST0000 $tmp/mdt \
1838 error_noexit "Remounting the MDT"
1841 shall_cleanup_mdt=true
1851 for tarball in $tarballs; do
1852 t32_test $tarball || rc=$?
1856 run_test 32a "Upgrade (not live)"
1864 for tarball in $tarballs; do
1865 t32_test $tarball writeconf || rc=$?
1869 run_test 32b "Upgrade with writeconf"
1876 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
1878 for tarball in $tarballs; do
1879 dne_upgrade=yes t32_test $tarball writeconf || rc=$?
1883 run_test 32c "dne upgrade test"
1885 test_33a() { # bug 12333, was test_33
1887 local FSNAME2=test-123
1888 local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/})
1891 [ -n "$ost1_HOST" ] && fs2ost_HOST=$ost1_HOST
1893 if [ -z "$fs2ost_DEV" -o -z "$fs2mds_DEV" ]; then
1894 local dev=${SINGLEMDS}_dev
1895 local MDSDEV=${!dev}
1896 is_blkdev $SINGLEMDS $MDSDEV && \
1897 skip_env "mixed loopback and real device not working" && return
1900 local fs2mdsdev=$(mdsdevname 1_2)
1901 local fs2ostdev=$(ostdevname 1_2)
1902 local fs2mdsvdev=$(mdsvdevname 1_2)
1903 local fs2ostvdev=$(ostvdevname 1_2)
1905 if [ $(facet_fstype mds1) == ldiskfs ]; then
1906 mkfsoptions="--mkfsoptions=\\\"-J size=8\\\"" # See bug 17931.
1909 add fs2mds $(mkfs_opts mds1 ${fs2mdsdev}) --mgs --fsname=${FSNAME2} \
1910 --reformat $mkfsoptions $fs2mdsdev $fs2mdsvdev || exit 10
1911 add fs2ost $(mkfs_opts ost1 ${fs2ostdev}) --mgsnode=$MGSNID \
1912 --fsname=${FSNAME2} --index=8191 --reformat $fs2ostdev \
1913 $fs2ostvdev || exit 10
1915 start fs2mds $fs2mdsdev $MDS_MOUNT_OPTS && trap cleanup_24a EXIT INT
1916 start fs2ost $fs2ostdev $OST_MOUNT_OPTS
1917 do_facet $SINGLEMDS "$LCTL conf_param $FSNAME2.sys.timeout=200" || rc=1
1919 mount -t lustre $MGSNID:/${FSNAME2} $MOUNT2 || rc=2
1922 cp /etc/hosts $MOUNT2/ || rc=3
1923 $LFS getstripe $MOUNT2/hosts
1928 cleanup_nocli || rc=6
1931 run_test 33a "Mount ost with a large index number"
1933 test_33b() { # was test_34
1936 do_facet client dd if=/dev/zero of=$MOUNT/24 bs=1024k count=1
1937 # Drop lock cancelation reply during umount
1938 #define OBD_FAIL_LDLM_CANCEL_NET 0x304
1939 do_facet client lctl set_param fail_loc=0x80000304
1940 #lctl set_param debug=-1
1941 umount_client $MOUNT
1944 run_test 33b "Drop cancel during umount"
1948 do_facet client "sh runmultiop_bg_pause $DIR/file O_c"
1949 manual_umount_client
1951 do_facet client killall -USR1 multiop
1952 if [ $rc -eq 0 ]; then
1953 error "umount not fail!"
1958 run_test 34a "umount with opened file should be fail"
1963 touch $DIR/$tfile || return 1
1964 stop_mds --force || return 2
1966 manual_umount_client --force
1968 if [ $rc -ne 0 ]; then
1969 error "mtab after failed umount - rc $rc"
1975 run_test 34b "force umount with failed mds should be normal"
1979 touch $DIR/$tfile || return 1
1980 stop_ost --force || return 2
1982 manual_umount_client --force
1984 if [ $rc -ne 0 ]; then
1985 error "mtab after failed umount - rc $rc"
1991 run_test 34c "force umount with failed ost should be normal"
1993 test_35a() { # bug 12459
1996 DBG_SAVE="`lctl get_param -n debug`"
1997 lctl set_param debug="ha"
1999 log "Set up a fake failnode for the MDS"
2001 local device=$(do_facet $SINGLEMDS "lctl get_param -n devices" | awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }' | head -1)
2002 do_facet mgs "$LCTL conf_param ${device}.failover.node=" \
2003 "$(h2$NETTYPE $FAKENID)" || return 4
2005 log "Wait for RECONNECT_INTERVAL seconds (10s)"
2008 MSG="conf-sanity.sh test_35a `date +%F%kh%Mm%Ss`"
2011 log "Stopping the MDT: $device"
2012 stop_mdt 1 || return 5
2014 df $MOUNT > /dev/null 2>&1 &
2016 log "Restarting the MDT: $device"
2017 start_mdt 1 || return 6
2018 log "Wait for df ($DFPID) ... "
2021 lctl set_param debug="$DBG_SAVE"
2023 # retrieve from the log the first server that the client tried to
2024 # contact after the connection loss
2025 $LCTL dk $TMP/lustre-log-$TESTNAME.log
2026 NEXTCONN=`awk "/${MSG}/ {start = 1;}
2027 /import_select_connection.*$device-mdc.* using connection/ {
2029 if (\\\$NF ~ /$FAKENID/)
2035 }" $TMP/lustre-log-$TESTNAME.log`
2036 [ "$NEXTCONN" != "0" ] && log "The client didn't try to reconnect to the last active server (tried ${NEXTCONN} instead)" && return 7
2038 # remove nid settings
2039 writeconf_or_reformat
2041 run_test 35a "Reconnect to the last active server first"
2043 test_35b() { # bug 18674
2044 remote_mds || { skip "local MDS" && return 0; }
2048 $LCTL set_param debug="ha"
2050 MSG="conf-sanity.sh test_35b `date +%F%kh%Mm%Ss`"
2053 log "Set up a fake failnode for the MDS"
2055 local device=$(do_facet $SINGLEMDS "$LCTL get_param -n devices" | \
2056 awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }' | head -1)
2057 do_facet mgs "$LCTL conf_param ${device}.failover.node=" \
2058 "$(h2$NETTYPE $FAKENID)" || return 1
2060 local at_max_saved=0
2061 # adaptive timeouts may prevent seeing the issue
2062 if at_is_enabled; then
2063 at_max_saved=$(at_max_get mds)
2064 at_max_set 0 mds client
2067 mkdir -p $MOUNT/$tdir
2069 log "Injecting EBUSY on MDS"
2070 # Setting OBD_FAIL_MDS_RESEND=0x136
2071 do_facet $SINGLEMDS "$LCTL set_param fail_loc=0x80000136" || return 2
2073 $LCTL set_param mdc.${FSNAME}*.stats=clear
2075 log "Creating a test file and stat it"
2076 touch $MOUNT/$tdir/$tfile
2077 stat $MOUNT/$tdir/$tfile
2079 log "Stop injecting EBUSY on MDS"
2080 do_facet $SINGLEMDS "$LCTL set_param fail_loc=0" || return 3
2081 rm -f $MOUNT/$tdir/$tfile
2084 # restore adaptive timeout
2085 [ $at_max_saved -ne 0 ] && at_max_set $at_max_saved mds client
2087 $LCTL dk $TMP/lustre-log-$TESTNAME.log
2089 CONNCNT=`$LCTL get_param mdc.${FSNAME}*.stats | awk '/mds_connect/{print $2}'`
2091 # retrieve from the log if the client has ever tried to
2092 # contact the fake server after the loss of connection
2093 FAILCONN=`awk "BEGIN {ret = 0;}
2094 /import_select_connection.*${FSNAME}-MDT0000-mdc.* using connection/ {
2096 if (\\\$NF ~ /$FAKENID/) {
2101 END {print ret}" $TMP/lustre-log-$TESTNAME.log`
2103 [ "$FAILCONN" == "0" ] && \
2104 log "ERROR: The client reconnection has not been triggered" && \
2106 [ "$FAILCONN" == "2" ] && \
2107 log "ERROR: The client tried to reconnect to the failover server while the primary was busy" && \
2111 # When OBD_FAIL_MDS_RESEND is hit, we sleep for 2 * obd_timeout
2112 # Reconnects are supposed to be rate limited to one every 5s
2113 [ $CONNCNT -gt $((2 * $TIMEOUT / 5 + 1)) ] && \
2114 log "ERROR: Too many reconnects $CONNCNT" && \
2118 # remove nid settings
2119 writeconf_or_reformat
2121 run_test 35b "Continue reconnection retries, if the active server is busy"
2124 [ $OSTCOUNT -lt 2 ] && skip_env "skipping test for single OST" && return
2126 [ "$ost_HOST" = "`hostname`" -o "$ost1_HOST" = "`hostname`" ] || \
2127 { skip "remote OST" && return 0; }
2130 local FSNAME2=test1234
2131 local fs3ost_HOST=$ost_HOST
2132 local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/})
2134 [ -n "$ost1_HOST" ] && fs2ost_HOST=$ost1_HOST && fs3ost_HOST=$ost1_HOST
2136 if [ -z "$fs2ost_DEV" -o -z "$fs2mds_DEV" -o -z "$fs3ost_DEV" ]; then
2137 is_blkdev $SINGLEMDS $MDSDEV && \
2138 skip_env "mixed loopback and real device not working" && return
2141 local fs2mdsdev=$(mdsdevname 1_2)
2142 local fs2ostdev=$(ostdevname 1_2)
2143 local fs3ostdev=$(ostdevname 2_2)
2144 local fs2mdsvdev=$(mdsvdevname 1_2)
2145 local fs2ostvdev=$(ostvdevname 1_2)
2146 local fs3ostvdev=$(ostvdevname 2_2)
2148 add fs2mds $(mkfs_opts mds1 ${fs2mdsdev}) --mgs --fsname=${FSNAME2} \
2149 --reformat $fs2mdsdev $fs2mdsvdev || exit 10
2150 # XXX after we support non 4K disk blocksize in ldiskfs, specify a
2151 # different one than the default value here.
2152 add fs2ost $(mkfs_opts ost1 ${fs2ostdev}) --mgsnode=$MGSNID \
2153 --fsname=${FSNAME2} --reformat $fs2ostdev $fs2ostvdev || exit 10
2154 add fs3ost $(mkfs_opts ost1 ${fs3ostdev}) --mgsnode=$MGSNID \
2155 --fsname=${FSNAME2} --reformat $fs3ostdev $fs3ostvdev || exit 10
2157 start fs2mds $fs2mdsdev $MDS_MOUNT_OPTS
2158 start fs2ost $fs2ostdev $OST_MOUNT_OPTS
2159 start fs3ost $fs3ostdev $OST_MOUNT_OPTS
2161 mount -t lustre $MGSNID:/${FSNAME2} $MOUNT2 || return 1
2163 sleep 5 # until 11778 fixed
2165 dd if=/dev/zero of=$MOUNT2/$tfile bs=1M count=7 || return 2
2167 BKTOTAL=`lctl get_param -n obdfilter.*.kbytestotal | awk 'BEGIN{total=0}; {total+=$1}; END{print total}'`
2168 BKFREE=`lctl get_param -n obdfilter.*.kbytesfree | awk 'BEGIN{free=0}; {free+=$1}; END{print free}'`
2169 BKAVAIL=`lctl get_param -n obdfilter.*.kbytesavail | awk 'BEGIN{avail=0}; {avail+=$1}; END{print avail}'`
2170 STRING=`df -P $MOUNT2 | tail -n 1 | awk '{print $2","$3","$4}'`
2171 DFTOTAL=`echo $STRING | cut -d, -f1`
2172 DFUSED=`echo $STRING | cut -d, -f2`
2173 DFAVAIL=`echo $STRING | cut -d, -f3`
2174 DFFREE=$(($DFTOTAL - $DFUSED))
2176 ALLOWANCE=$((64 * $OSTCOUNT))
2178 if [ $DFTOTAL -lt $(($BKTOTAL - $ALLOWANCE)) ] ||
2179 [ $DFTOTAL -gt $(($BKTOTAL + $ALLOWANCE)) ] ; then
2180 echo "**** FAIL: df total($DFTOTAL) mismatch OST total($BKTOTAL)"
2183 if [ $DFFREE -lt $(($BKFREE - $ALLOWANCE)) ] ||
2184 [ $DFFREE -gt $(($BKFREE + $ALLOWANCE)) ] ; then
2185 echo "**** FAIL: df free($DFFREE) mismatch OST free($BKFREE)"
2188 if [ $DFAVAIL -lt $(($BKAVAIL - $ALLOWANCE)) ] ||
2189 [ $DFAVAIL -gt $(($BKAVAIL + $ALLOWANCE)) ] ; then
2190 echo "**** FAIL: df avail($DFAVAIL) mismatch OST avail($BKAVAIL)"
2195 stop fs3ost -f || return 200
2196 stop fs2ost -f || return 201
2197 stop fs2mds -f || return 202
2198 unload_modules_conf || return 203
2201 run_test 36 "df report consistency on OSTs with different block size"
2204 local mntpt=$(facet_mntpt $SINGLEMDS)
2205 local mdsdev=$(mdsdevname ${SINGLEMDS//mds/})
2206 local mdsdev_sym="$TMP/sym_mdt.img"
2207 local opts=$MDS_MOUNT_OPTS
2210 if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
2211 skip "Currently only applicable to ldiskfs-based MDTs"
2215 echo "MDS : $mdsdev"
2216 echo "SYMLINK : $mdsdev_sym"
2217 do_facet $SINGLEMDS rm -f $mdsdev_sym
2219 do_facet $SINGLEMDS ln -s $mdsdev $mdsdev_sym
2221 echo "mount symlink device - $mdsdev_sym"
2223 if ! do_facet $SINGLEMDS test -b $mdsdev; then
2224 opts=$(csa_add "$opts" -o loop)
2226 mount_op=$(do_facet $SINGLEMDS mount -v -t lustre $opts \
2227 $mdsdev_sym $mntpt 2>&1)
2230 echo mount_op=$mount_op
2232 do_facet $SINGLEMDS "umount -d $mntpt && rm -f $mdsdev_sym"
2234 if $(echo $mount_op | grep -q "unable to set tunable"); then
2235 error "set tunables failed for symlink device"
2238 [ $rc -eq 0 ] || error "mount symlink $mdsdev_sym failed! rc=$rc"
2242 run_test 37 "verify set tunables works for symlink device"
2244 test_38() { # bug 14222
2245 if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
2246 skip "Only applicable to ldiskfs-based MDTs"
2254 FILES=`find $SRC -type f -mtime +1 | head -n $COUNT`
2255 log "copying $(echo $FILES | wc -w) files to $DIR/$tdir"
2257 tar cf - $FILES | tar xf - -C $DIR/$tdir || \
2258 error "copying $SRC to $DIR/$tdir"
2260 umount_client $MOUNT
2262 log "rename lov_objid file on MDS"
2263 rm -f $TMP/lov_objid.orig
2265 local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/})
2266 do_facet $SINGLEMDS "$DEBUGFS -c -R \\\"dump lov_objid $TMP/lov_objid.orig\\\" $MDSDEV"
2267 do_facet $SINGLEMDS "$DEBUGFS -w -R \\\"rm lov_objid\\\" $MDSDEV"
2269 do_facet $SINGLEMDS "od -Ax -td8 $TMP/lov_objid.orig"
2270 # check create in mds_lov_connect
2274 [ $V ] && log "verifying $DIR/$tdir/$f"
2275 diff -q $f $DIR/$tdir/$f || ERROR=y
2277 do_facet $SINGLEMDS "$DEBUGFS -c -R \\\"dump lov_objid $TMP/lov_objid.new\\\" $MDSDEV"
2278 do_facet $SINGLEMDS "od -Ax -td8 $TMP/lov_objid.new"
2279 [ "$ERROR" = "y" ] && error "old and new files are different after connect" || true
2281 # check it's updates in sync
2282 umount_client $MOUNT
2285 do_facet $SINGLEMDS dd if=/dev/zero of=$TMP/lov_objid.clear bs=4096 count=1
2286 do_facet $SINGLEMDS "$DEBUGFS -w -R \\\"rm lov_objid\\\" $MDSDEV"
2287 do_facet $SINGLEMDS "$DEBUGFS -w -R \\\"write $TMP/lov_objid.clear lov_objid\\\" $MDSDEV "
2292 [ $V ] && log "verifying $DIR/$tdir/$f"
2293 diff -q $f $DIR/$tdir/$f || ERROR=y
2295 do_facet $SINGLEMDS "$DEBUGFS -c -R \\\"dump lov_objid $TMP/lov_objid.new1\\\" $MDSDEV"
2296 do_facet $SINGLEMDS "od -Ax -td8 $TMP/lov_objid.new1"
2297 umount_client $MOUNT
2299 [ "$ERROR" = "y" ] && error "old and new files are different after sync" || true
2301 log "files compared the same"
2304 run_test 38 "MDS recreates missing lov_objid file from OST data"
2310 perl $SRCDIR/leak_finder.pl $TMP/debug 2>&1 | egrep '*** Leak:' &&
2311 error "memory leak detected" || true
2313 run_test 39 "leak_finder recognizes both LUSTRE and LNET malloc messages"
2315 test_40() { # bug 15759
2317 #define OBD_FAIL_TGT_TOOMANY_THREADS 0x706
2318 do_facet $SINGLEMDS "$LCTL set_param fail_loc=0x80000706"
2322 run_test 40 "race during service thread startup"
2324 test_41a() { #bug 14134
2325 if [ $(facet_fstype $SINGLEMDS) == ldiskfs ] &&
2326 ! do_facet $SINGLEMDS test -b $(mdsdevname 1); then
2327 skip "Loop devices does not work with nosvc option"
2332 local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/})
2334 start $SINGLEMDS $MDSDEV $MDS_MOUNT_OPTS -o nosvc -n
2335 start ost1 `ostdevname 1` $OST_MOUNT_OPTS
2336 start $SINGLEMDS $MDSDEV $MDS_MOUNT_OPTS -o nomgs,force
2338 mount_client $MOUNT || return 1
2341 echo "blah blah" > $MOUNT/$tfile
2344 umount_client $MOUNT
2345 stop ost1 -f || return 201
2346 stop_mds -f || return 202
2347 stop_mds -f || return 203
2348 unload_modules_conf || return 204
2351 run_test 41a "mount mds with --nosvc and --nomgs"
2354 if [ $(facet_fstype $SINGLEMDS) == ldiskfs ] &&
2355 ! do_facet $SINGLEMDS test -b $(mdsdevname 1); then
2356 skip "Loop devices does not work with nosvc option"
2360 ! combined_mgs_mds && skip "needs combined mgs device" && return 0
2364 local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/})
2366 start $SINGLEMDS $MDSDEV $MDS_MOUNT_OPTS -o nosvc -n
2368 start $SINGLEMDS $MDSDEV $MDS_MOUNT_OPTS -o nomgs,force
2370 mount_client $MOUNT || return 1
2373 echo "blah blah" > $MOUNT/$tfile
2374 cat $MOUNT/$tfile || return 200
2376 umount_client $MOUNT
2377 stop_ost || return 201
2378 stop_mds -f || return 202
2379 stop_mds -f || return 203
2382 run_test 41b "mount mds with --nosvc and --nomgs on first mount"
2384 test_42() { #bug 14693
2386 check_mount || error "client was not mounted"
2388 do_facet mgs $LCTL conf_param $FSNAME.llite.some_wrong_param=10
2389 umount_client $MOUNT ||
2390 error "unmounting client failed with invalid llite param"
2391 mount_client $MOUNT ||
2392 error "mounting client failed with invalid llite param"
2394 do_facet mgs $LCTL conf_param $FSNAME.sys.some_wrong_param=20
2395 cleanup || error "stopping $FSNAME failed with invalid sys param"
2398 check_mount || "client was not mounted with invalid sys param"
2399 cleanup || error "stopping $FSNAME failed with invalid sys param"
2402 run_test 42 "allow client/server mount/unmount with invalid config param"
2405 [ $UID -ne 0 -o $RUNAS_ID -eq 0 ] && skip_env "run as root"
2407 chmod ugo+x $DIR || error "chmod 0 failed"
2408 set_conf_param_and_check mds \
2409 "lctl get_param -n mdt.$FSNAME-MDT0000.root_squash" \
2410 "$FSNAME.mdt.root_squash" \
2412 set_conf_param_and_check mds \
2413 "lctl get_param -n mdt.$FSNAME-MDT0000.nosquash_nids" \
2414 "$FSNAME.mdt.nosquash_nids" \
2418 # create set of test files
2420 echo "111" > $DIR/$tfile-userfile || error "write 1 failed"
2421 chmod go-rw $DIR/$tfile-userfile || error "chmod 1 failed"
2422 chown $RUNAS_ID.$RUNAS_ID $DIR/$tfile-userfile || error "chown failed"
2424 echo "222" > $DIR/$tfile-rootfile || error "write 2 failed"
2425 chmod go-rw $DIR/$tfile-rootfile || error "chmod 2 faield"
2427 mkdir $DIR/$tdir-rootdir -p || error "mkdir failed"
2428 chmod go-rwx $DIR/$tdir-rootdir || error "chmod 3 failed"
2429 touch $DIR/$tdir-rootdir/tfile-1 || error "touch failed"
2432 # check root_squash:
2433 # set root squash UID:GID to RUNAS_ID
2434 # root should be able to access only files owned by RUNAS_ID
2436 set_conf_param_and_check mds \
2437 "lctl get_param -n mdt.$FSNAME-MDT0000.root_squash" \
2438 "$FSNAME.mdt.root_squash" \
2439 "$RUNAS_ID:$RUNAS_ID"
2441 ST=$(stat -c "%n: owner uid %u (%A)" $DIR/$tfile-userfile)
2442 dd if=$DIR/$tfile-userfile 1>/dev/null 2>/dev/null || \
2443 error "$ST: root read permission is denied"
2444 echo "$ST: root read permission is granted - ok"
2447 dd conv=notrunc if=$DIR/$tfile-userfile 1>/dev/null 2>/dev/null || \
2448 error "$ST: root write permission is denied"
2449 echo "$ST: root write permission is granted - ok"
2451 ST=$(stat -c "%n: owner uid %u (%A)" $DIR/$tfile-rootfile)
2452 dd if=$DIR/$tfile-rootfile 1>/dev/null 2>/dev/null && \
2453 error "$ST: root read permission is granted"
2454 echo "$ST: root read permission is denied - ok"
2457 dd conv=notrunc of=$DIR/$tfile-rootfile 1>/dev/null 2>/dev/null && \
2458 error "$ST: root write permission is granted"
2459 echo "$ST: root write permission is denied - ok"
2461 ST=$(stat -c "%n: owner uid %u (%A)" $DIR/$tdir-rootdir)
2462 rm $DIR/$tdir-rootdir/tfile-1 1>/dev/null 2>/dev/null && \
2463 error "$ST: root unlink permission is granted"
2464 echo "$ST: root unlink permission is denied - ok"
2466 touch $DIR/tdir-rootdir/tfile-2 1>/dev/null 2>/dev/null && \
2467 error "$ST: root create permission is granted"
2468 echo "$ST: root create permission is denied - ok"
2471 # check nosquash_nids:
2472 # put client's NID into nosquash_nids list,
2473 # root should be able to access root file after that
2475 local NIDLIST=$(lctl list_nids all | tr '\n' ' ')
2476 NIDLIST="2@elan $NIDLIST 192.168.0.[2,10]@tcp"
2477 NIDLIST=$(echo $NIDLIST | tr -s ' ' ' ')
2478 set_conf_param_and_check mds \
2479 "lctl get_param -n mdt.$FSNAME-MDT0000.nosquash_nids" \
2480 "$FSNAME-MDTall.mdt.nosquash_nids" \
2483 ST=$(stat -c "%n: owner uid %u (%A)" $DIR/$tfile-rootfile)
2484 dd if=$DIR/$tfile-rootfile 1>/dev/null 2>/dev/null || \
2485 error "$ST: root read permission is denied"
2486 echo "$ST: root read permission is granted - ok"
2489 dd conv=notrunc of=$DIR/$tfile-rootfile 1>/dev/null 2>/dev/null || \
2490 error "$ST: root write permission is denied"
2491 echo "$ST: root write permission is granted - ok"
2493 ST=$(stat -c "%n: owner uid %u (%A)" $DIR/$tdir-rootdir)
2494 rm $DIR/$tdir-rootdir/tfile-1 || \
2495 error "$ST: root unlink permission is denied"
2496 echo "$ST: root unlink permission is granted - ok"
2497 touch $DIR/$tdir-rootdir/tfile-2 || \
2498 error "$ST: root create permission is denied"
2499 echo "$ST: root create permission is granted - ok"
2503 run_test 43 "check root_squash and nosquash_nids"
2505 umount_client $MOUNT
2510 check_mount || return 2
2511 UUID=$($LCTL get_param llite.${FSNAME}*.uuid | cut -d= -f2)
2513 UUIDS=$(do_facet $SINGLEMDS "$LCTL get_param mdt.${FSNAME}*.exports.*.uuid")
2514 for VAL in $UUIDS; do
2515 NID=$(echo $VAL | cut -d= -f1)
2516 CLUUID=$(echo $VAL | cut -d= -f2)
2517 [ "$UUID" = "$CLUUID" ] && STATS_FOUND=yes && break
2519 [ "$STATS_FOUND" = "no" ] && error "stats not found for client"
2523 run_test 44 "mounted client proc entry exists"
2527 check_mount || return 2
2532 #define OBD_FAIL_PTLRPC_LONG_UNLINK 0x50f
2533 do_facet client "lctl set_param fail_loc=0x50f"
2536 manual_umount_client --force || return 3
2537 do_facet client "lctl set_param fail_loc=0x0"
2539 mount_client $MOUNT || return 4
2543 run_test 45 "long unlink handling in ptlrpcd"
2550 umount_client $MOUNT2 || rc=$?
2551 umount_client $MOUNT || rc=$?
2552 while [ $count -gt 0 ]; do
2553 stop ost${count} -f || rc=$?
2557 cleanup_nocli || rc=$?
2558 #writeconf to remove all ost2 traces for subsequent tests
2559 writeconf_or_reformat
2564 echo "Testing with $OSTCOUNT OSTs"
2566 start_mds || return 1
2567 #first client should see only one ost
2568 start_ost || return 2
2569 wait_osc_import_state mds ost FULL
2571 mount_client $MOUNT || return 3
2572 trap "cleanup_46a $OSTCOUNT" EXIT ERR
2575 for (( i=2; i<=$OSTCOUNT; i++ )); do
2576 start ost$i `ostdevname $i` $OST_MOUNT_OPTS || return $((i+2))
2579 # wait until osts in sync
2580 for (( i=2; i<=$OSTCOUNT; i++ )); do
2581 wait_osc_import_state mds ost$i FULL
2582 wait_osc_import_state client ost$i FULL
2585 #second client see all ost's
2587 mount_client $MOUNT2 || return 8
2588 $LFS setstripe -c -1 $MOUNT2 || return 9
2589 $LFS getstripe $MOUNT2 || return 10
2591 echo "ok" > $MOUNT2/widestripe
2592 $LFS getstripe $MOUNT2/widestripe || return 11
2593 # fill acl buffer for avoid expand lsm to them
2594 awk -F : '{if (FNR < 25) { print "u:"$1":rwx" }}' /etc/passwd | while read acl; do
2595 setfacl -m $acl $MOUNT2/widestripe
2599 stat $MOUNT/widestripe || return 12
2601 cleanup_46a $OSTCOUNT || { echo "cleanup_46a failed!" && return 13; }
2604 run_test 46a "handle ost additional - wide striped file"
2609 check_mount || return 2
2610 $LCTL set_param ldlm.namespaces.$FSNAME-*-*-*.lru_size=100
2614 for ns in $($LCTL get_param ldlm.namespaces.$FSNAME-*-*-*.lru_size); do
2615 if echo $ns | grep "MDT[[:digit:]]*"; then
2618 lrs=$(echo $ns | sed 's/.*lru_size=//')
2619 lru_size[count]=$lrs
2624 facet_failover $SINGLEMDS
2625 client_up || return 3
2628 for ns in $($LCTL get_param ldlm.namespaces.$FSNAME-*-*-*.lru_size); do
2629 if echo $ns | grep "MDT[[:digit:]]*"; then
2632 lrs=$(echo $ns | sed 's/.*lru_size=//')
2633 if ! test "$lrs" -eq "${lru_size[count]}"; then
2634 n=$(echo $ns | sed -e 's/ldlm.namespaces.//' -e 's/.lru_size=.*//')
2635 error "$n has lost lru_size: $lrs vs. ${lru_size[count]}"
2643 run_test 47 "server restart does not make client loss lru_resize settings"
2648 # reformat after this test is needed - if test will failed
2649 # we will have unkillable file at FS
2653 test_48() { # bug 17636
2656 check_mount || return 2
2658 $LFS setstripe -c -1 $MOUNT || return 9
2659 $LFS getstripe $MOUNT || return 10
2661 echo "ok" > $MOUNT/widestripe
2662 $LFS getstripe $MOUNT/widestripe || return 11
2664 trap cleanup_48 EXIT ERR
2666 # fill acl buffer for avoid expand lsm to them
2667 getent passwd | awk -F : '{ print "u:"$1":rwx" }' | while read acl; do
2668 setfacl -m $acl $MOUNT/widestripe
2671 stat $MOUNT/widestripe || return 12
2676 run_test 48 "too many acls on file"
2678 # check PARAM_SYS_LDLM_TIMEOUT option of MKFS.LUSTRE
2679 test_49() { # bug 17710
2680 local timeout_orig=$TIMEOUT
2681 local ldlm_timeout_orig=$LDLM_TIMEOUT
2682 local LOCAL_TIMEOUT=20
2684 LDLM_TIMEOUT=$LOCAL_TIMEOUT
2685 TIMEOUT=$LOCAL_TIMEOUT
2689 check_mount || return 1
2691 echo "check ldlm_timout..."
2692 LDLM_MDS="`do_facet $SINGLEMDS lctl get_param -n ldlm_timeout`"
2693 LDLM_OST1="`do_facet ost1 lctl get_param -n ldlm_timeout`"
2694 LDLM_CLIENT="`do_facet client lctl get_param -n ldlm_timeout`"
2696 if [ $LDLM_MDS -ne $LDLM_OST1 ] || [ $LDLM_MDS -ne $LDLM_CLIENT ]; then
2697 error "Different LDLM_TIMEOUT:$LDLM_MDS $LDLM_OST1 $LDLM_CLIENT"
2700 if [ $LDLM_MDS -ne $((LOCAL_TIMEOUT / 3)) ]; then
2701 error "LDLM_TIMEOUT($LDLM_MDS) is not correct"
2704 umount_client $MOUNT
2705 stop_ost || return 2
2706 stop_mds || return 3
2708 LDLM_TIMEOUT=$((LOCAL_TIMEOUT - 1))
2712 check_mount || return 7
2714 LDLM_MDS="`do_facet $SINGLEMDS lctl get_param -n ldlm_timeout`"
2715 LDLM_OST1="`do_facet ost1 lctl get_param -n ldlm_timeout`"
2716 LDLM_CLIENT="`do_facet client lctl get_param -n ldlm_timeout`"
2718 if [ $LDLM_MDS -ne $LDLM_OST1 ] || [ $LDLM_MDS -ne $LDLM_CLIENT ]; then
2719 error "Different LDLM_TIMEOUT:$LDLM_MDS $LDLM_OST1 $LDLM_CLIENT"
2722 if [ $LDLM_MDS -ne $((LOCAL_TIMEOUT - 1)) ]; then
2723 error "LDLM_TIMEOUT($LDLM_MDS) is not correct"
2726 cleanup || return $?
2728 LDLM_TIMEOUT=$ldlm_timeout_orig
2729 TIMEOUT=$timeout_orig
2731 run_test 49 "check PARAM_SYS_LDLM_TIMEOUT option of MKFS.LUSTRE"
2734 # Test both statfs and lfs df and fail if either one fails
2735 multiop_bg_pause $1 f_
2738 killall -USR1 multiop
2739 [ $RC1 -ne 0 ] && log "lazystatfs multiop failed"
2740 wait $PID || { RC1=$?; log "multiop return error "; }
2747 if [ $RC2 -eq 0 ]; then
2749 log "lazystatfs df failed"
2753 [[ $RC1 -ne 0 || $RC2 -eq 0 ]] && RC=1
2759 lctl set_param llite.$FSNAME-*.lazystatfs=1
2762 lazystatfs $MOUNT || error "lazystatfs failed but no down servers"
2764 cleanup || return $?
2766 run_test 50a "lazystatfs all servers available =========================="
2770 lctl set_param llite.$FSNAME-*.lazystatfs=1
2773 # Wait for client to detect down OST
2774 stop_ost || error "Unable to stop OST1"
2775 wait_osc_import_state mds ost DISCONN
2777 lazystatfs $MOUNT || error "lazystatfs should don't have returned EIO"
2779 umount_client $MOUNT || error "Unable to unmount client"
2780 stop_mds || error "Unable to stop MDS"
2782 run_test 50b "lazystatfs all servers down =========================="
2785 start_mds || error "Unable to start MDS"
2786 start_ost || error "Unable to start OST1"
2787 start_ost2 || error "Unable to start OST2"
2788 mount_client $MOUNT || error "Unable to mount client"
2789 lctl set_param llite.$FSNAME-*.lazystatfs=1
2792 # Wait for client to detect down OST
2793 stop_ost || error "Unable to stop OST1"
2794 wait_osc_import_state mds ost DISCONN
2795 lazystatfs $MOUNT || error "lazystatfs failed with one down server"
2797 umount_client $MOUNT || error "Unable to unmount client"
2798 stop_ost2 || error "Unable to stop OST2"
2799 stop_mds || error "Unable to stop MDS"
2800 #writeconf to remove all ost2 traces for subsequent tests
2801 writeconf_or_reformat
2803 run_test 50c "lazystatfs one server down =========================="
2806 start_mds || error "Unable to start MDS"
2807 start_ost || error "Unable to start OST1"
2808 start_ost2 || error "Unable to start OST2"
2809 mount_client $MOUNT || error "Unable to mount client"
2810 lctl set_param llite.$FSNAME-*.lazystatfs=1
2813 # Issue the statfs during the window where the client still
2814 # belives the OST to be available but it is in fact down.
2815 # No failure just a statfs which hangs for a timeout interval.
2816 stop_ost || error "Unable to stop OST1"
2817 lazystatfs $MOUNT || error "lazystatfs failed with one down server"
2819 umount_client $MOUNT || error "Unable to unmount client"
2820 stop_ost2 || error "Unable to stop OST2"
2821 stop_mds || error "Unable to stop MDS"
2822 #writeconf to remove all ost2 traces for subsequent tests
2823 writeconf_or_reformat
2825 run_test 50d "lazystatfs client/server conn race =========================="
2832 start_mds || return 1
2833 #first client should see only one ost
2834 start_ost || return 2
2835 wait_osc_import_state mds ost FULL
2837 # Wait for client to detect down OST
2838 stop_ost || error "Unable to stop OST1"
2839 wait_osc_import_state mds ost DISCONN
2841 mount_client $MOUNT || error "Unable to mount client"
2842 lctl set_param llite.$FSNAME-*.lazystatfs=0
2844 multiop_bg_pause $MOUNT _f
2848 if [ $RC1 -ne 0 ]; then
2849 log "multiop failed $RC1"
2852 sleep $(( $TIMEOUT+1 ))
2854 [ $? -ne 0 ] && error "process isn't sleep"
2855 start_ost || error "Unable to start OST1"
2856 wait $pid || error "statfs failed"
2859 umount_client $MOUNT || error "Unable to unmount client"
2860 stop_ost || error "Unable to stop OST1"
2861 stop_mds || error "Unable to stop MDS"
2863 run_test 50e "normal statfs all servers down =========================="
2868 CONN_PROC="osc.$FSNAME-OST0001-osc-[M]*.ost_server_uuid"
2870 start_mds || error "Unable to start mds"
2871 #first client should see only one ost
2872 start_ost || error "Unable to start OST1"
2873 wait_osc_import_state mds ost FULL
2875 start_ost2 || error "Unable to start OST2"
2876 wait_osc_import_state mds ost2 FULL
2878 # Wait for client to detect down OST
2879 stop_ost2 || error "Unable to stop OST2"
2881 wait_osc_import_state mds ost2 DISCONN
2882 mount_client $MOUNT || error "Unable to mount client"
2883 lctl set_param llite.$FSNAME-*.lazystatfs=0
2885 multiop_bg_pause $MOUNT _f
2889 if [ $RC1 -ne 0 ]; then
2890 log "lazystatfs multiop failed $RC1"
2893 sleep $(( $TIMEOUT+1 ))
2895 [ $? -ne 0 ] && error "process isn't sleep"
2896 start_ost2 || error "Unable to start OST2"
2897 wait $pid || error "statfs failed"
2898 stop_ost2 || error "Unable to stop OST2"
2901 umount_client $MOUNT || error "Unable to unmount client"
2902 stop_ost || error "Unable to stop OST1"
2903 stop_mds || error "Unable to stop MDS"
2904 #writeconf to remove all ost2 traces for subsequent tests
2905 writeconf_or_reformat
2907 run_test 50f "normal statfs one server in down =========================="
2910 [ "$OSTCOUNT" -lt "2" ] && skip_env "$OSTCOUNT < 2, skipping" && return
2912 start_ost2 || error "Unable to start OST2"
2913 wait_osc_import_state mds ost2 FULL
2914 wait_osc_import_state client ost2 FULL
2916 local PARAM="${FSNAME}-OST0001.osc.active"
2918 $LFS setstripe -c -1 $DIR/$tfile || error "Unable to lfs setstripe"
2919 do_facet mgs $LCTL conf_param $PARAM=0 || error "Unable to deactivate OST"
2921 umount_client $MOUNT || error "Unable to unmount client"
2922 mount_client $MOUNT || error "Unable to mount client"
2923 # This df should not cause a panic
2926 do_facet mgs $LCTL conf_param $PARAM=1 || error "Unable to activate OST"
2928 umount_client $MOUNT || error "Unable to unmount client"
2929 stop_ost2 || error "Unable to stop OST2"
2930 stop_ost || error "Unable to stop OST1"
2931 stop_mds || error "Unable to stop MDS"
2932 #writeconf to remove all ost2 traces for subsequent tests
2933 writeconf_or_reformat
2935 run_test 50g "deactivated OST should not cause panic====================="
2939 # prepare MDT/OST, make OSC inactive for OST1
2940 [ "$OSTCOUNT" -lt "2" ] && skip_env "$OSTCOUNT < 2, skipping" && return
2941 do_facet ost1 "$TUNEFS --param osc.active=0 `ostdevname 1`" ||
2942 error "tunefs OST1 failed"
2943 start_mds || error "Unable to start MDT"
2944 start_ost || error "Unable to start OST1"
2945 start_ost2 || error "Unable to start OST2"
2946 mount_client $MOUNT || error "client start failed"
2950 # activatate OSC for OST1
2951 local TEST="$LCTL get_param -n osc.${FSNAME}-OST0000-osc-[!M]*.active"
2952 set_conf_param_and_check client \
2953 "$TEST" "${FSNAME}-OST0000.osc.active" 1 ||
2954 error "Unable to activate OST1"
2956 mkdir -p $DIR/$tdir/2
2957 $LFS setstripe -c -1 -i 0 $DIR/$tdir/2
2958 sleep 1 && echo "create a file after OST1 is activated"
2960 createmany -o $DIR/$tdir/2/$tfile-%d 1
2962 # check OSC import is working
2963 stat $DIR/$tdir/2/* >/dev/null 2>&1 ||
2964 error "some OSC imports are still not connected"
2967 umount_client $MOUNT || error "Unable to umount client"
2968 stop_ost2 || error "Unable to stop OST2"
2971 run_test 50h "LU-642: activate deactivated OST ==="
2974 local LOCAL_TIMEOUT=20
2978 check_mount || return 1
2981 $LFS setstripe -c -1 $MOUNT/d1
2982 #define OBD_FAIL_MDS_REINT_DELAY 0x142
2983 do_facet $SINGLEMDS "lctl set_param fail_loc=0x142"
2984 touch $MOUNT/d1/f1 &
2987 start_ost2 || return 2
2989 stop_ost2 || return 3
2991 #writeconf to remove all ost2 traces for subsequent tests
2992 writeconf_or_reformat
2994 run_test 51 "Verify that mdt_reint handles RMF_MDT_MD correctly when an OST is added"
3003 do_node $node mkdir -p $dest
3004 [ $? -eq 0 ] || { error "Unable to create directory"; return 1; }
3006 do_node $node 'tar cf - '$@' | tar xf - -C '$dest';
3007 [ \"\${PIPESTATUS[*]}\" = \"0 0\" ] || exit 1'
3008 [ $? -eq 0 ] || { error "Unable to tar files"; return 2; }
3010 do_node $node 'getfattr -d -m "[a-z]*\\." '$@' > '$xattrs
3011 [ $? -eq 0 ] || { error "Unable to read xattrs"; return 3; }
3021 local backup2=${TMP}/backup2
3023 do_node $node mkdir -p $backup2
3024 [ $? -eq 0 ] || { error "Unable to create directory"; return 1; }
3026 do_node $node 'tar cf - '$@' | tar xf - -C '$backup2';
3027 [ \"\${PIPESTATUS[*]}\" = \"0 0\" ] || exit 1'
3028 [ $? -eq 0 ] || { error "Unable to tar files to diff"; return 2; }
3030 do_node $node "diff -rq $backup $backup2"
3031 [ $? -eq 0 ] || { error "contents differ"; return 3; }
3033 local xattrs2=${TMP}/xattrs2
3034 do_node $node 'getfattr -d -m "[a-z]*\\." '$@' > '$xattrs2
3035 [ $? -eq 0 ] || { error "Unable to read xattrs to diff"; return 4; }
3037 do_node $node "diff $xattrs $xattrs2"
3038 [ $? -eq 0 ] || { error "xattrs differ"; return 5; }
3040 do_node $node "rm -rf $backup2 $xattrs2"
3041 [ $? -eq 0 ] || { error "Unable to delete temporary files"; return 6; }
3045 if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
3046 skip "Only applicable to ldiskfs-based MDTs"
3051 [ $? -eq 0 ] || { error "Unable to start MDS"; return 1; }
3053 [ $? -eq 0 ] || { error "Unable to start OST1"; return 2; }
3055 [ $? -eq 0 ] || { error "Unable to mount client"; return 3; }
3058 local ost1mnt=$(facet_mntpt ost1)
3059 local ost1node=$(facet_active_host ost1)
3060 local ost1tmp=$TMP/conf52
3064 [ $? -eq 0 ] || { error "Unable to create tdir"; return 4; }
3065 touch $TMP/modified_first
3066 [ $? -eq 0 ] || { error "Unable to create temporary file"; return 5; }
3067 local mtime=$(stat -c %Y $TMP/modified_first)
3068 do_node $ost1node "mkdir -p $ost1tmp && touch -m -d @$mtime $ost1tmp/modified_first"
3070 [ $? -eq 0 ] || { error "Unable to create temporary file"; return 6; }
3073 $LFS setstripe -c -1 -S 1M $DIR/$tdir
3074 [ $? -eq 0 ] || { error "lfs setstripe failed"; return 7; }
3076 for (( i=0; i < nrfiles; i++ )); do
3077 multiop $DIR/$tdir/$tfile-$i Ow1048576w1048576w524288c
3078 [ $? -eq 0 ] || { error "multiop failed"; return 8; }
3084 echo backup files to $TMP/files
3085 local files=$(find $DIR/$tdir -type f -newer $TMP/modified_first)
3086 copy_files_xattrs `hostname` $TMP/files $TMP/file_xattrs $files
3087 [ $? -eq 0 ] || { error "Unable to copy files"; return 9; }
3089 umount_client $MOUNT
3090 [ $? -eq 0 ] || { error "Unable to umount client"; return 10; }
3092 [ $? -eq 0 ] || { error "Unable to stop ost1"; return 11; }
3094 echo mount ost1 as ldiskfs
3095 do_node $ost1node mkdir -p $ost1mnt
3096 [ $? -eq 0 ] || { error "Unable to create $ost1mnt"; return 23; }
3097 if ! do_node $ost1node test -b $ost1_dev; then
3100 do_node $ost1node mount -t $(facet_fstype ost1) $loop $ost1_dev \
3102 [ $? -eq 0 ] || { error "Unable to mount ost1 as ldiskfs"; return 12; }
3105 echo backup objects to $ost1tmp/objects
3106 local objects=$(do_node $ost1node 'find '$ost1mnt'/O/[0-9]* -type f'\
3107 '-size +0 -newer '$ost1tmp'/modified_first -regex ".*\/[0-9]+"')
3108 copy_files_xattrs $ost1node $ost1tmp/objects $ost1tmp/object_xattrs \
3110 [ $? -eq 0 ] || { error "Unable to copy objects"; return 13; }
3112 # move objects to lost+found
3113 do_node $ost1node 'mv '$objects' '${ost1mnt}'/lost+found'
3114 [ $? -eq 0 ] || { error "Unable to move objects"; return 14; }
3117 do_node $ost1node "ll_recover_lost_found_objs -d $ost1mnt/lost+found"
3118 [ $? -eq 0 ] || { error "ll_recover_lost_found_objs failed"; return 15; }
3120 # compare restored objects against saved ones
3121 diff_files_xattrs $ost1node $ost1tmp/objects $ost1tmp/object_xattrs $objects
3122 [ $? -eq 0 ] || { error "Unable to diff objects"; return 16; }
3124 do_node $ost1node "umount $ost1mnt"
3125 [ $? -eq 0 ] || { error "Unable to umount ost1 as ldiskfs"; return 17; }
3128 [ $? -eq 0 ] || { error "Unable to start ost1"; return 18; }
3130 [ $? -eq 0 ] || { error "Unable to mount client"; return 19; }
3133 diff_files_xattrs `hostname` $TMP/files $TMP/file_xattrs $files
3134 [ $? -eq 0 ] || { error "Unable to diff files"; return 20; }
3136 rm -rf $TMP/files $TMP/file_xattrs
3137 [ $? -eq 0 ] || { error "Unable to delete temporary files"; return 21; }
3138 do_node $ost1node "rm -rf $ost1tmp"
3139 [ $? -eq 0 ] || { error "Unable to delete temporary files"; return 22; }
3142 run_test 52 "check recovering objects from lost+found"
3144 # Checks threads_min/max/started for some service
3146 # Arguments: service name (OST or MDT), facet (e.g., ost1, $SINGLEMDS), and a
3147 # parameter pattern prefix like 'ost.*.ost'.
3160 local msg="Insane $modname thread counts"
3161 local ncpts=$(check_cpt_number $facet)
3165 check_mount || return 41
3167 # We need to expand $parampat, but it may match multiple parameters, so
3168 # we'll pick the first one
3169 if ! paramp=$(do_facet $facet "lctl get_param -N ${parampat}.threads_min"|head -1); then
3170 error "Couldn't expand ${parampat}.threads_min parameter name"
3174 # Remove the .threads_min part
3175 paramp=${paramp%.threads_min}
3177 # Check for sanity in defaults
3178 tmin=$(do_facet $facet "lctl get_param -n ${paramp}.threads_min" || echo 0)
3179 tmax=$(do_facet $facet "lctl get_param -n ${paramp}.threads_max" || echo 0)
3180 tstarted=$(do_facet $facet "lctl get_param -n ${paramp}.threads_started" || echo 0)
3181 lassert 23 "$msg (PDSH problems?)" '(($tstarted && $tmin && $tmax))' || return $?
3182 lassert 24 "$msg" '(($tstarted >= $tmin && $tstarted <= $tmax ))' || return $?
3183 nthrs=$(expr $tmax - $tmin)
3184 if [ $nthrs -lt $ncpts ]; then
3190 [ $tmin -eq $tmax -a $tmin -eq $tstarted ] &&
3191 skip_env "module parameter forced $facet thread count" &&
3192 tmin=3 && tmax=$((3 * tmax))
3194 # Check that we can change min/max
3195 do_facet $facet "lctl set_param ${paramp}.threads_min=$((tmin + nthrs))"
3196 do_facet $facet "lctl set_param ${paramp}.threads_max=$((tmax - nthrs))"
3197 tmin2=$(do_facet $facet "lctl get_param -n ${paramp}.threads_min" || echo 0)
3198 tmax2=$(do_facet $facet "lctl get_param -n ${paramp}.threads_max" || echo 0)
3199 lassert 25 "$msg" '(($tmin2 == ($tmin + $nthrs) && $tmax2 == ($tmax - $nthrs)))' || return $?
3201 # Check that we can set min/max to the same value
3202 tmin=$(do_facet $facet "lctl get_param -n ${paramp}.threads_min" || echo 0)
3203 do_facet $facet "lctl set_param ${paramp}.threads_max=$tmin"
3204 tmin2=$(do_facet $facet "lctl get_param -n ${paramp}.threads_min" || echo 0)
3205 tmax2=$(do_facet $facet "lctl get_param -n ${paramp}.threads_max" || echo 0)
3206 lassert 26 "$msg" '(($tmin2 == $tmin && $tmax2 == $tmin))' || return $?
3208 # Check that we can't set max < min
3209 do_facet $facet "lctl set_param ${paramp}.threads_max=$((tmin - 1))"
3210 tmin2=$(do_facet $facet "lctl get_param -n ${paramp}.threads_min" || echo 0)
3211 tmax2=$(do_facet $facet "lctl get_param -n ${paramp}.threads_max" || echo 0)
3212 lassert 27 "$msg" '(($tmin2 <= $tmax2))' || return $?
3214 # We need to ensure that we get the module options desired; to do this
3215 # we set LOAD_MODULES_REMOTE=true and we call setmodopts below.
3216 LOAD_MODULES_REMOTE=true
3219 local newvalue="${opts}=$(expr $basethr \* $ncpts)"
3220 setmodopts -a $modname "$newvalue" oldvalue
3224 check_mount || return 41
3226 # Restore previous setting of MODOPTS_*
3227 setmodopts $modname "$oldvalue"
3229 # Check that $opts took
3230 tmin=$(do_facet $facet "lctl get_param -n ${paramp}.threads_min")
3231 tmax=$(do_facet $facet "lctl get_param -n ${paramp}.threads_max")
3232 tstarted=$(do_facet $facet "lctl get_param -n ${paramp}.threads_started")
3233 lassert 28 "$msg" '(($tstarted == $tmin && $tstarted == $tmax ))' || return $?
3242 thread_sanity OST ost1 'ost.*.ost' 'oss_num_threads' '16'
3245 run_test 53a "check OSS thread count params"
3249 local mds=$(do_facet $SINGLEMDS "lctl get_param -N mds.*.*.threads_max \
3251 if [ -z "$mds" ]; then
3252 #running this on an old MDT
3253 thread_sanity MDT $SINGLEMDS 'mdt.*.*.' 'mdt_num_threads' 16
3255 thread_sanity MDT $SINGLEMDS 'mds.*.*.' 'mds_num_threads' 16
3259 run_test 53b "check MDS thread count params"
3262 if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
3263 skip "Only applicable to ldiskfs-based MDTs"
3267 do_rpc_nodes $(facet_host ost1) run_llverdev $(ostdevname 1) -p
3268 [ $? -eq 0 ] || error "llverdev failed!"
3271 run_test 54a "test llverdev and partial verify of device"
3274 if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
3275 skip "Only applicable to ldiskfs-based MDTs"
3280 run_llverfs $MOUNT -p
3281 [ $? -eq 0 ] || error "llverfs failed!"
3284 run_test 54b "test llverfs and partial verify of filesystem"
3288 local max_ost_index=$1
3289 echo -n $(((max_ost_index + 1) * 8))
3293 if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
3294 skip "Only applicable to ldiskfs-based MDTs"
3298 local mdsdev=$(mdsdevname 1)
3299 local mdsvdev=$(mdsvdevname 1)
3303 add mds1 $(mkfs_opts mds1 ${mdsdev}) --reformat $mdsdev \
3305 add ost1 $(mkfs_opts ost1 $(ostdevname 1)) --index=$i \
3306 --reformat $(ostdevname 1) $(ostvdevname 1)
3312 echo checking size of lov_objid for ost index $i
3313 LOV_OBJID_SIZE=$(do_facet mds1 "$DEBUGFS -R 'stat lov_objid' $mdsdev 2>/dev/null" | grep ^User | awk '{print $6}')
3314 if [ "$LOV_OBJID_SIZE" != $(lov_objid_size $i) ]; then
3315 error "lov_objid size has to be $(lov_objid_size $i), not $LOV_OBJID_SIZE"
3317 echo ok, lov_objid size is correct: $LOV_OBJID_SIZE
3324 run_test 55 "check lov_objid size"
3327 local mds_journal_size_orig=$MDSJOURNALSIZE
3331 for num in $(seq 1 $MDSCOUNT); do
3332 add mds${num} $(mkfs_opts mds${num} $(mdsdevname $num)) \
3333 --reformat $(mdsdevname $num) $(mdsvdevname $num)
3335 add ost1 $(mkfs_opts ost1 $(ostdevname 1)) --index=1000 --reformat \
3336 $(ostdevname 1) $(ostvdevname 1)
3337 add ost2 $(mkfs_opts ost2 $(ostdevname 2)) --index=10000 --reformat \
3338 $(ostdevname 2) $(ostvdevname 2)
3342 start_ost2 || error "Unable to start second ost"
3343 mount_client $MOUNT || error "Unable to mount client"
3347 MDSJOURNALSIZE=$mds_journal_size_orig
3350 run_test 56 "check big indexes"
3352 test_57a() { # bug 22656
3353 local NID=$(do_facet ost1 "$LCTL get_param nis" | tail -1 | awk '{print $1}')
3354 writeconf_or_reformat
3355 do_facet ost1 "$TUNEFS --failnode=$NID `ostdevname 1`" || error "tunefs failed"
3357 start_ost && error "OST registration from failnode should fail"
3360 run_test 57a "initial registration from failnode should fail (should return errs)"
3363 local NID=$(do_facet ost1 "$LCTL get_param nis" | tail -1 | awk '{print $1}')
3364 writeconf_or_reformat
3365 do_facet ost1 "$TUNEFS --servicenode=$NID `ostdevname 1`" || error "tunefs failed"
3367 start_ost || error "OST registration from servicenode should not fail"
3370 run_test 57b "initial registration from servicenode should not fail"
3373 do_facet mgs $LCTL get_param mgs.MGS.live.$FSNAME | grep OST | wc -l
3376 test_58() { # bug 22658
3377 if [ $(facet_fstype mds) != ldiskfs ]; then
3378 skip "Only applicable to ldiskfs-based MDTs"
3383 createmany -o $DIR/$tdir/$tfile-%d 100
3384 # make sure that OSTs do not cancel llog cookies before we unmount the MDS
3385 #define OBD_FAIL_OBD_LOG_CANCEL_NET 0x601
3386 do_facet $SINGLEMDS "lctl set_param fail_loc=0x601"
3387 unlinkmany $DIR/$tdir/$tfile-%d 100
3390 local MNTDIR=$(facet_mntpt $SINGLEMDS)
3391 local devname=$(mdsdevname ${SINGLEMDS//mds/})
3393 if ! do_facet $SINGLEMDS "test -b $devname"; then
3397 # remove all files from the OBJECTS dir
3398 do_facet $SINGLEMDS "mount -t ldiskfs $opts $devname $MNTDIR"
3399 do_facet $SINGLEMDS "find $MNTDIR/O/1/d* -type f -delete"
3400 do_facet $SINGLEMDS "umount $MNTDIR"
3401 # restart MDS with missing llog files
3403 do_facet mds "lctl set_param fail_loc=0"
3406 run_test 58 "missing llog files must not prevent MDT from mounting"
3409 start_mgsmds >> /dev/null
3410 local C1=$(count_osts)
3411 if [ $C1 -eq 0 ]; then
3412 start_ost >> /dev/null
3416 echo "original ost count: $C1 (expect > 0)"
3417 [ $C1 -gt 0 ] || error "No OSTs in $FSNAME log"
3418 start_mgsmds -o writeconf >> /dev/null || error "MDT start failed"
3419 local C2=$(count_osts)
3420 echo "after mdt writeconf count: $C2 (expect 0)"
3421 [ $C2 -gt 0 ] && error "MDT writeconf should erase OST logs"
3422 echo "OST start without writeconf should fail:"
3423 start_ost >> /dev/null && error "OST start without writeconf didn't fail"
3424 echo "OST start with writeconf should succeed:"
3425 start_ost -o writeconf >> /dev/null || error "OST1 start failed"
3426 local C3=$(count_osts)
3427 echo "after ost writeconf count: $C3 (expect 1)"
3428 [ $C3 -eq 1 ] || error "new OST writeconf should add:"
3429 start_ost2 -o writeconf >> /dev/null || error "OST2 start failed"
3430 local C4=$(count_osts)
3431 echo "after ost2 writeconf count: $C4 (expect 2)"
3432 [ $C4 -eq 2 ] || error "OST2 writeconf should add log"
3433 stop_ost2 >> /dev/null
3434 cleanup_nocli >> /dev/null
3435 #writeconf to remove all ost2 traces for subsequent tests
3436 writeconf_or_reformat
3438 run_test 59 "writeconf mount option"
3440 test_60() { # LU-471
3443 if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
3444 skip "Only applicable to ldiskfs-based MDTs"
3448 for num in $(seq $MDSCOUNT); do
3449 add mds${num} $(mkfs_opts mds${num} $(mdsdevname $num)) \
3450 --mkfsoptions='\" -E stride=64 -O ^uninit_bg\"' \
3451 --reformat $(mdsdevname $num) $(mdsvdevname $num) ||
3455 dump=$(do_facet $SINGLEMDS dumpe2fs $(mdsdevname 1))
3457 [ $rc -eq 0 ] || error "dumpe2fs $(mdsdevname 1) failed"
3459 # MDT default has dirdata feature
3460 echo $dump | grep dirdata > /dev/null || error "dirdata is not set"
3461 # we disable uninit_bg feature
3462 echo $dump | grep uninit_bg > /dev/null && error "uninit_bg is set"
3463 # we set stride extended options
3464 echo $dump | grep stride > /dev/null || error "stride is not set"
3467 run_test 60 "check mkfs.lustre --mkfsoptions -E -O options setting"
3470 local reformat=false
3472 [ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.1.53) ] ||
3473 { skip "Need MDS version at least 2.1.53"; return 0; }
3475 if [ $(facet_fstype $SINGLEMDS) == ldiskfs ] &&
3476 ! large_xattr_enabled; then
3478 LDISKFS_MKFS_OPTS+=" -O large_xattr"
3480 for num in $(seq $MDSCOUNT); do
3481 add mds${num} $(mkfs_opts mds$num $(mdsdevname $num)) \
3482 --reformat $(mdsdevname $num) $(mdsvdevname $num) ||
3483 error "add mds $num failed"
3487 setup_noconfig || error "setting up the filesystem failed"
3488 client_up || error "starting client failed"
3490 local file=$DIR/$tfile
3493 local large_value="$(generate_string $(max_xattr_size))"
3494 local small_value="bar"
3496 local name="trusted.big"
3497 log "save large xattr $name on $file"
3498 setfattr -n $name -v $large_value $file ||
3499 error "saving $name on $file failed"
3501 local new_value=$(get_xattr_value $name $file)
3502 [[ "$new_value" != "$large_value" ]] &&
3503 error "$name different after saving"
3505 log "shrink value of $name on $file"
3506 setfattr -n $name -v $small_value $file ||
3507 error "shrinking value of $name on $file failed"
3509 new_value=$(get_xattr_value $name $file)
3510 [[ "$new_value" != "$small_value" ]] &&
3511 error "$name different after shrinking"
3513 log "grow value of $name on $file"
3514 setfattr -n $name -v $large_value $file ||
3515 error "growing value of $name on $file failed"
3517 new_value=$(get_xattr_value $name $file)
3518 [[ "$new_value" != "$large_value" ]] &&
3519 error "$name different after growing"
3521 log "check value of $name on $file after remounting MDS"
3523 new_value=$(get_xattr_value $name $file)
3524 [[ "$new_value" != "$large_value" ]] &&
3525 error "$name different after remounting MDS"
3527 log "remove large xattr $name from $file"
3528 setfattr -x $name $file || error "removing $name from $file failed"
3533 LDISKFS_MKFS_OPTS=${LDISKFS_MKFS_OPTS% -O large_xattr}
3537 run_test 61 "large xattr"
3540 if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
3541 skip "Only applicable to ldiskfs-based MDTs"
3546 local mdsdev=$(mdsdevname 1)
3547 local ostdev=$(ostdevname 1)
3549 [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.2.51) ]] ||
3550 { skip "Need MDS version at least 2.2.51"; return 0; }
3552 echo "disable journal for mds"
3553 do_facet mds tune2fs -O ^has_journal $mdsdev || error "tune2fs failed"
3554 start_mds && error "MDT start should fail"
3555 echo "disable journal for ost"
3556 do_facet ost1 tune2fs -O ^has_journal $ostdev || error "tune2fs failed"
3557 start_ost && error "OST start should fail"
3558 cleanup || return $?
3561 run_test 62 "start with disabled journal"
3564 if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
3565 skip "Only applicable to ldiskfs-based MDTs"
3569 local inode_slab=$(do_facet $SINGLEMDS \
3570 "awk '/ldiskfs_inode_cache/ { print \\\$5 }' /proc/slabinfo")
3571 if [ -z "$inode_slab" ]; then
3572 skip "ldiskfs module has not been loaded"
3576 echo "$inode_slab ldisk inodes per page"
3577 [ "$inode_slab" -ge "3" ] ||
3578 error "ldisk inode size is too big, $inode_slab objs per page"
3581 run_test 63 "Verify each page can at least hold 3 ldisk inodes"
3586 start_ost2 || error "Unable to start second ost"
3587 mount_client $MOUNT || error "Unable to mount client"
3588 stop_ost2 || error "Unable to stop second ost"
3590 $LFS df --lazy || error "lfs df failed"
3591 cleanup || return $?
3592 #writeconf to remove all ost2 traces for subsequent tests
3593 writeconf_or_reformat
3595 run_test 64 "check lfs df --lazy "
3597 test_65() { # LU-2237
3598 # Currently, the test is only valid for ldiskfs backend
3599 [ "$(facet_fstype $SINGLEMDS)" != "ldiskfs" ] &&
3600 skip "non-ldiskfs backend" && return
3602 local devname=$(mdsdevname ${SINGLEMDS//mds/})
3603 local brpt=$(facet_mntpt brpt)
3606 if ! do_facet $SINGLEMDS "test -b $devname"; then
3611 local obj=$(do_facet $SINGLEMDS \
3612 "$DEBUGFS -c -R \\\"stat last_rcvd\\\" $devname" |
3614 if [ -z "$obj" ]; then
3615 # The MDT may be just re-formatted, mount the MDT for the
3616 # first time to guarantee the "last_rcvd" file is there.
3617 start_mds || error "fail to mount the MDS for the first time"
3621 # remove the "last_rcvd" file
3622 do_facet $SINGLEMDS "mkdir -p $brpt"
3623 do_facet $SINGLEMDS \
3624 "mount -t $(facet_fstype $SINGLEMDS) $opts $devname $brpt"
3625 do_facet $SINGLEMDS "rm -f ${brpt}/last_rcvd"
3626 do_facet $SINGLEMDS "umount $brpt"
3628 # restart MDS, the "last_rcvd" file should be recreated.
3629 start_mds || error "fail to restart the MDS"
3631 obj=$(do_facet $SINGLEMDS \
3632 "$DEBUGFS -c -R \\\"stat last_rcvd\\\" $devname" | grep Inode)
3633 [ -n "$obj" ] || error "fail to re-create the last_rcvd"
3635 run_test 65 "re-create the lost last_rcvd file when server mount"
3638 [[ $(lustre_version_code mgs) -ge $(version_code 2.3.59) ]] ||
3639 { skip "Need MGS version at least 2.3.59"; return 0; }
3642 local OST1_NID=$(do_facet ost1 $LCTL list_nids | head -1)
3643 local MDS_NID=$(do_facet $SINGLEMDS $LCTL list_nids | head -1)
3645 echo "replace_nids should fail if MDS, OSTs and clients are UP"
3646 do_facet mgs $LCTL replace_nids $FSNAME-OST0000 $OST1_NID &&
3647 error "replace_nids fail"
3649 umount_client $MOUNT || error "unmounting client failed"
3650 echo "replace_nids should fail if MDS and OSTs are UP"
3651 do_facet mgs $LCTL replace_nids $FSNAME-OST0000 $OST1_NID &&
3652 error "replace_nids fail"
3655 echo "replace_nids should fail if MDS is UP"
3656 do_facet mgs $LCTL replace_nids $FSNAME-OST0000 $OST1_NID &&
3657 error "replace_nids fail"
3659 stop_mds || error "stopping mds failed"
3661 if combined_mgs_mds; then
3662 start_mdt 1 "-o nosvc" ||
3663 error "starting mds with nosvc option failed"
3666 echo "command should accept two parameters"
3667 do_facet mgs $LCTL replace_nids $FSNAME-OST0000 &&
3668 error "command should accept two params"
3670 echo "correct device name should be passed"
3671 do_facet mgs $LCTL replace_nids $FSNAME-WRONG0000 $OST1_NID &&
3672 error "wrong devname"
3674 echo "wrong nids list should not destroy the system"
3675 do_facet mgs $LCTL replace_nids $FSNAME-OST0000 "wrong nids list" &&
3678 echo "replace OST nid"
3679 do_facet mgs $LCTL replace_nids $FSNAME-OST0000 $OST1_NID ||
3680 error "replace nids failed"
3682 echo "command should accept two parameters"
3683 do_facet mgs $LCTL replace_nids $FSNAME-MDT0000 &&
3684 error "command should accept two params"
3686 echo "wrong nids list should not destroy the system"
3687 do_facet mgs $LCTL replace_nids $FSNAME-MDT0000 "wrong nids list" &&
3690 echo "replace MDS nid"
3691 do_facet mgs $LCTL replace_nids $FSNAME-MDT0000 $MDS_NID ||
3692 error "replace nids failed"
3694 if ! combined_mgs_mds ; then
3701 check_mount || error "error after nid replace"
3702 cleanup || error "cleanup failed"
3705 run_test 66 "replace nids"
3708 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
3711 start_mdt 1 || error "MDT0 start fail"
3713 start_ost || error "OST0 start fail"
3715 start_mdt 2 || error "MDT1 start fail"
3717 mount_client $MOUNT || error "mount client fails"
3719 mkdir -p $DIR/$tdir || error "create dir fail"
3721 $LFS mkdir -i $MDTIDX $DIR/$tdir/remote_dir ||
3722 error "create remote dir fail"
3724 rm -rf $DIR/$tdir || error "delete dir fail"
3725 cleanup || return $?
3727 run_test 70a "start MDT0, then OST, then MDT1"
3730 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
3733 start_ost || error "OST0 start fail"
3735 start_mdt 1 || error "MDT0 start fail"
3736 start_mdt 2 || error "MDT1 start fail"
3738 mount_client $MOUNT || error "mount client fails"
3740 mkdir -p $DIR/$tdir || error "create dir fail"
3742 $LFS mkdir -i $MDTIDX $DIR/$tdir/remote_dir ||
3743 error "create remote dir fail"
3745 rm -rf $DIR/$tdir || error "delete dir fail"
3747 cleanup || return $?
3749 run_test 70b "start OST, MDT1, MDT0"
3752 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
3755 start_mdt 1 || error "MDT0 start fail"
3756 start_mdt 2 || error "MDT1 start fail"
3757 start_ost || error "OST0 start fail"
3759 mount_client $MOUNT || error "mount client fails"
3760 stop_mdt 1 || error "MDT1 start fail"
3762 local mdc_for_mdt1=$($LCTL dl | grep MDT0000-mdc | awk '{print $4}')
3763 echo "deactivate $mdc_for_mdt1"
3764 $LCTL --device $mdc_for_mdt1 deactivate || return 1
3766 mkdir -p $DIR/$tdir && error "mkdir succeed"
3768 $LFS mkdir -i $MDTIDX $DIR/$tdir/remote_dir &&
3769 error "create remote dir succeed"
3771 cleanup || return $?
3773 run_test 70c "stop MDT0, mkdir fail, create remote dir fail"
3776 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
3779 start_mdt 1 || error "MDT0 start fail"
3780 start_mdt 2 || error "MDT1 start fail"
3781 start_ost || error "OST0 start fail"
3783 mount_client $MOUNT || error "mount client fails"
3785 stop_mdt 2 || error "MDT1 start fail"
3787 local mdc_for_mdt2=$($LCTL dl | grep MDT0001-mdc |
3789 echo "deactivate $mdc_for_mdt2"
3790 $LCTL --device $mdc_for_mdt2 deactivate ||
3791 error "set $mdc_for_mdt2 deactivate failed"
3793 mkdir -p $DIR/$tdir || error "mkdir fail"
3794 $LFS mkdir -i $MDTIDX $DIR/$tdir/remote_dir &&
3795 error "create remote dir succeed"
3797 rm -rf $DIR/$tdir || error "delete dir fail"
3799 cleanup || return $?
3801 run_test 70d "stop MDT1, mkdir succeed, create remote dir fail"
3804 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
3805 if combined_mgs_mds; then
3806 skip "needs separate MGS/MDT" && return
3810 start_mdt 1 || error "MDT0 start fail"
3811 start_ost || error "OST0 start fail"
3812 start_mdt 2 || error "MDT1 start fail"
3813 start_ost2 || error "OST1 start fail"
3815 mount_client $MOUNT || error "mount client fails"
3817 mkdir -p $DIR/$tdir || error "mkdir fail"
3818 $LFS mkdir -i $MDTIDX $DIR/$tdir/remote_dir ||
3819 error "create remote dir succeed"
3821 mcreate $DIR/$tdir/remote_dir/$tfile || error "create file failed"
3822 rm -rf $DIR/$tdir || error "delete dir fail"
3824 umount_client $MOUNT
3825 stop_mdt 1 || error "MDT0 stop fail"
3826 stop_mdt 2 || error "MDT1 stop fail"
3827 stop_ost || error "OST0 stop fail"
3828 stop_ost2 || error "OST1 stop fail"
3830 run_test 71a "start MDT0 OST0, MDT1, OST1"
3833 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
3834 if combined_mgs_mds; then
3835 skip "needs separate MGS/MDT" && return
3839 start_mdt 2 || error "MDT1 start fail"
3840 start_ost || error "OST0 start fail"
3841 start_mdt 1 || error "MDT0 start fail"
3842 start_ost2 || error "OST1 start fail"
3844 mount_client $MOUNT || error "mount client fails"
3846 mkdir -p $DIR/$tdir || error "mkdir fail"
3847 $LFS mkdir -i $MDTIDX $DIR/$tdir/remote_dir ||
3848 error "create remote dir succeed"
3850 mcreate $DIR/$tdir/remote_dir/$tfile || error "create file failed"
3851 rm -rf $DIR/$tdir || error "delete dir fail"
3853 umount_client $MOUNT
3854 stop_mdt 1 || error "MDT0 stop fail"
3855 stop_mdt 2 || error "MDT1 stop fail"
3856 stop_ost || error "OST0 stop fail"
3857 stop_ost2 || error "OST1 stop fail"
3859 run_test 71b "start MDT1, OST0, MDT0, OST1"
3862 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
3863 if combined_mgs_mds; then
3864 skip "needs separate MGS/MDT" && return
3868 start_ost || error "OST0 start fail"
3869 start_ost2 || error "OST1 start fail"
3870 start_mdt 2 || error "MDT1 start fail"
3871 start_mdt 1 || error "MDT0 start fail"
3873 mount_client $MOUNT || error "mount client fails"
3875 mkdir -p $DIR/$tdir || error "mkdir fail"
3876 $LFS mkdir -i $MDTIDX $DIR/$tdir/remote_dir ||
3877 error "create remote dir succeed"
3879 mcreate $DIR/$tdir/remote_dir/$tfile || error "create file failed"
3880 rm -rf $DIR/$tdir || error "delete dir fail"
3882 umount_client $MOUNT
3883 stop_mdt 1 || error "MDT0 stop fail"
3884 stop_mdt 2 || error "MDT1 stop fail"
3885 stop_ost || error "OST0 stop fail"
3886 stop_ost2 || error "OST1 stop fail"
3889 run_test 71c "start OST0, OST1, MDT1, MDT0"
3892 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
3893 if combined_mgs_mds; then
3894 skip "needs separate MGS/MDT" && return
3898 start_ost || error "OST0 start fail"
3899 start_mdt 2 || error "MDT0 start fail"
3900 start_mdt 1 || error "MDT0 start fail"
3901 start_ost2 || error "OST1 start fail"
3903 mount_client $MOUNT || error "mount client fails"
3905 mkdir -p $DIR/$tdir || error "mkdir fail"
3906 $LFS mkdir -i $MDTIDX $DIR/$tdir/remote_dir ||
3907 error "create remote dir succeed"
3909 mcreate $DIR/$tdir/remote_dir/$tfile || error "create file failed"
3910 rm -rf $DIR/$tdir || error "delete dir fail"
3912 umount_client $MOUNT
3913 stop_mdt 1 || error "MDT0 stop fail"
3914 stop_mdt 2 || error "MDT1 stop fail"
3915 stop_ost || error "OST0 stop fail"
3916 stop_ost2 || error "OST1 stop fail"
3919 run_test 71d "start OST0, MDT1, MDT0, OST1"
3922 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
3923 if combined_mgs_mds; then
3924 skip "needs separate MGS/MDT" && return
3928 start_ost || error "OST0 start fail"
3929 start_mdt 2 || error "MDT1 start fail"
3930 start_ost2 || error "OST1 start fail"
3931 start_mdt 1 || error "MDT0 start fail"
3933 mount_client $MOUNT || error "mount client fails"
3935 mkdir -p $DIR/$tdir || error "mkdir fail"
3936 $LFS mkdir -i $MDTIDX $DIR/$tdir/remote_dir ||
3937 error "create remote dir succeed"
3939 mcreate $DIR/$tdir/remote_dir/$tfile || error "create file failed"
3940 rm -rf $DIR/$tdir || error "delete dir fail"
3942 umount_client $MOUNT
3943 stop_mdt 1 || error "MDT0 stop fail"
3944 stop_mdt 2 || error "MDT1 stop fail"
3945 stop_ost || error "OST0 stop fail"
3946 stop_ost2 || error "OST1 stop fail"
3949 run_test 71e "start OST0, MDT1, OST1, MDT0"
3951 test_72() { #LU-2634
3952 local mdsdev=$(mdsdevname 1)
3953 local ostdev=$(ostdevname 1)
3954 local cmd="$E2FSCK -fnvd $mdsdev"
3957 [ "$(facet_fstype $SINGLEMDS)" != "ldiskfs" ] &&
3958 skip "ldiskfs only test" && return
3960 #tune MDT with "-O extents"
3962 for num in $(seq $MDSCOUNT); do
3963 add mds${num} $(mkfs_opts mds$num $(mdsdevname $num)) \
3964 --reformat $(mdsdevname $num) $(mdsvdevname $num) ||
3965 error "add mds $num failed"
3966 $TUNE2FS -O extents $(mdsdevname $num)
3969 add ost1 $(mkfs_opts ost1 $ostdev) --reformat $ostdev ||
3970 error "add $ostdev failed"
3971 start_mgsmds || error "start mds failed"
3972 start_ost || error "start ost failed"
3973 mount_client $MOUNT || error "mount client failed"
3975 #create some short symlinks
3977 createmany -o $DIR/$tdir/$tfile-%d $fn
3978 echo "create $fn short symlinks"
3979 for i in $(seq -w 1 $fn); do
3980 ln -s $DIR/$tdir/$tfile-$i $MOUNT/$tfile-$i
3985 umount_client $MOUNT || error "umount client failed"
3986 stop_mds || error "stop mds failed"
3987 stop_ost || error "stop ost failed"
3990 run_e2fsck $(facet_active_host $SINGLEMDS) $mdsdev "-n"
3992 run_test 72 "test fast symlink with extents flag enabled"
3994 if ! combined_mgs_mds ; then