3 # add uml1 uml2 uml3 in your /etc/hosts
5 # FIXME - there is no reason to use all of these different
6 # return codes, espcially when most of them are mapped to something
7 # else anyway. The combination of test number and return code
8 # figure out what failed.
14 # These tests don't apply to mountconf
15 MOUNTCONFSKIP="10 11 12 13 13b 14 15"
16 # bug number for skipped test: 13739 13710
17 HEAD_EXCEPT=" 32a 32b 33"
19 # bug number for skipped test: 13709 10510 12743
20 ALWAYS_EXCEPT=" $CONF_SANITY_EXCEPT $MOUNTCONFSKIP $HEAD_EXCEPT 22 23 36"
21 # UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
24 PATH=$PWD/$SRCDIR:$SRCDIR:$SRCDIR/../utils:$PATH
26 PTLDEBUG=${PTLDEBUG:--1}
28 LUSTRE=${LUSTRE:-`dirname $0`/..}
29 RLUSTRE=${RLUSTRE:-$LUSTRE}
30 MOUNTLUSTRE=${MOUNTLUSTRE:-/sbin/mount.lustre}
31 MKFSLUSTRE=${MKFSLUSTRE:-/usr/sbin/mkfs.lustre}
34 . $LUSTRE/tests/test-framework.sh
36 # use small MDS + OST size to speed formatting time
39 . ${CONFIG:=$LUSTRE/tests/cfg/local.sh}
50 # who knows if/where $TUNEFS is installed? Better reformat if it fails...
51 do_facet ${facet} "$TUNEFS --writeconf $MDSDEV" || echo "tunefs failed, reformatting instead" && reformat
56 # The MGS must be started before the OSTs for a new fs, so start
57 # and stop to generate the startup logs.
66 echo "start mds service on `facet_active_host mds`"
67 start mds $MDSDEV $MDS_MOUNT_OPTS || return 94
71 echo "stop mds service on `facet_active_host mds`"
72 # These tests all use non-failover stop
73 stop mds -f || return 97
77 echo "start ost1 service on `facet_active_host ost1`"
78 start ost1 `ostdevname 1` $OST_MOUNT_OPTS || return 95
82 echo "stop ost1 service on `facet_active_host ost1`"
83 # These tests all use non-failover stop
84 stop ost1 -f || return 98
88 echo "start ost2 service on `facet_active_host ost2`"
89 start ost2 `ostdevname 2` $OST_MOUNT_OPTS || return 92
93 echo "stop ost2 service on `facet_active_host ost2`"
94 # These tests all use non-failover stop
95 stop ost2 -f || return 93
100 echo "mount $FSNAME on ${MOUNTPATH}....."
101 zconf_mount `hostname` $MOUNTPATH || return 96
105 local SAVEMOUNTOPT=$MOUNTOPT
106 MOUNTOPT="remount,$1"
108 echo "remount '$1' lustre on ${MOUNTPATH}....."
109 zconf_mount `hostname` $MOUNTPATH || return 96
110 MOUNTOPT=$SAVEMOUNTOPT
115 echo "umount lustre on ${MOUNTPATH}....."
116 zconf_umount `hostname` $MOUNTPATH || return 97
119 manual_umount_client(){
120 echo "manual umount lustre on ${MOUNT}...."
121 do_facet client "umount -d $MOUNT"
131 stop_mds || return 201
132 stop_ost || return 202
133 unload_modules || return 203
137 umount_client $MOUNT || return 200
138 cleanup_nocli || return $?
142 do_facet client "cp /etc/passwd $DIR/a" || return 71
143 do_facet client "rm $DIR/a" || return 72
144 # make sure lustre is actually mounted (touch will block,
145 # but grep won't, so do it after)
146 do_facet client "grep $MOUNT' ' /proc/mounts > /dev/null" || return 73
147 echo "setup single mount lustre success"
151 do_facet client "touch $DIR/a" || return 71
152 do_facet client "rm $DIR/a" || return 72
153 do_facet client "touch $DIR2/a" || return 73
154 do_facet client "rm $DIR2/a" || return 74
155 echo "setup double mount lustre success"
160 if [ "$ONLY" == "setup" ]; then
165 if [ "$ONLY" == "cleanup" ]; then
170 #create single point mountpoint
178 check_mount || return 41
181 run_test 0 "single mount setup"
185 echo "start ost second time..."
187 check_mount || return 42
190 run_test 1 "start up ost twice (should return errors)"
195 echo "start mds second time.."
198 check_mount || return 43
201 run_test 2 "start up mds twice (should return err)"
205 #mount.lustre returns an error if already in mtab
206 mount_client $MOUNT && return $?
207 check_mount || return 44
210 run_test 3 "mount client twice (should return err)"
214 touch $DIR/$tfile || return 85
218 # ok for ost to fail shutdown
219 if [ 202 -ne $eno ]; then
224 run_test 4 "force cleanup ost, then cleanup"
228 touch $DIR/$tfile || return 1
229 stop_mds -f || return 2
231 # cleanup may return an error from the failed
232 # disconnects; for now I'll consider this successful
233 # if all the modules have unloaded.
237 echo "killing umount"
238 kill -TERM $UMOUNT_PID
239 echo "waiting for umount to finish"
241 if grep " $MOUNT " /etc/mtab; then
242 echo "test 5: mtab after failed umount"
246 echo "killing umount"
247 kill -TERM $UMOUNT_PID
248 echo "waiting for umount to finish"
250 grep " $MOUNT " /etc/mtab && echo "test 5: mtab after second umount" && return 11
254 # stop_mds is a no-op here, and should not fail
255 cleanup_nocli || return $?
256 # df may have lingering entry
258 # mtab may have lingering entry
259 grep -v $MOUNT" " /etc/mtab > $TMP/mtabtemp
260 mv $TMP/mtabtemp /etc/mtab
262 run_test 5 "force cleanup mds, then cleanup"
266 [ -d $MOUNT ] || mkdir -p $MOUNT
267 grep " $MOUNT " /etc/mtab && echo "test 5b: mtab before mount" && return 10
268 mount_client $MOUNT && return 1
269 grep " $MOUNT " /etc/mtab && echo "test 5b: mtab after failed mount" && return 11
271 # stop_mds is a no-op here, and should not fail
272 cleanup_nocli || return $?
275 run_test 5b "mds down, cleanup after failed mount (bug 2712) (should return errs)"
280 [ -d $MOUNT ] || mkdir -p $MOUNT
281 grep " $MOUNT " /etc/mtab && echo "test 5c: mtab before mount" && return 10
282 mount -t lustre $MGSNID:/wrong.$FSNAME $MOUNT || :
283 grep " $MOUNT " /etc/mtab && echo "test 5c: mtab after failed mount" && return 11
285 cleanup_nocli || return $?
287 run_test 5c "cleanup after failed mount (bug 2712) (should return errs)"
293 grep " $MOUNT " /etc/mtab && echo "test 5d: mtab before mount" && return 10
294 mount_client $MOUNT || return 1
296 grep " $MOUNT " /etc/mtab && echo "test 5d: mtab after unmount" && return 11
299 run_test 5d "mount with ost down"
305 #define OBD_FAIL_PTLRPC_DELAY_SEND 0x506
306 do_facet client "sysctl -w lustre.fail_loc=0x80000506"
307 grep " $MOUNT " /etc/mtab && echo "test 5e: mtab before mount" && return 10
308 mount_client $MOUNT || echo "mount failed (not fatal)"
310 grep " $MOUNT " /etc/mtab && echo "test 5e: mtab after unmount" && return 11
313 run_test 5e "delayed connect, don't crash (bug 10268)"
318 mount_client ${MOUNT} || return 87
319 touch $DIR/a || return 86
322 run_test 6 "manual umount, then mount again"
327 cleanup_nocli || return $?
329 run_test 7 "manual umount, then cleanup"
334 check_mount2 || return 45
335 umount_client $MOUNT2
338 run_test 8 "double mount setup"
343 do_facet ost1 sysctl lnet.debug=\'inode trace\' || return 1
344 do_facet ost1 sysctl lnet.subsystem_debug=\'mds ost\' || return 1
346 CHECK_PTLDEBUG="`do_facet ost1 sysctl -n lnet.debug`"
347 if [ "$CHECK_PTLDEBUG" ] && [ "$CHECK_PTLDEBUG" = "trace inode" ];then
348 echo "lnet.debug success"
350 echo "lnet.debug: want 'trace inode', have '$CHECK_PTLDEBUG'"
353 CHECK_SUBSYS="`do_facet ost1 sysctl -n lnet.subsystem_debug`"
354 if [ "$CHECK_SUBSYS" ] && [ "$CHECK_SUBSYS" = "mds ost" ]; then
355 echo "lnet.subsystem_debug success"
357 echo "lnet.subsystem_debug: want 'mds ost', have '$CHECK_SUBSYS'"
360 stop_ost || return $?
363 run_test 9 "test ptldebug and subsystem for mkfs"
366 echo "generate configuration with the same name for node and mds"
367 OLDXMLCONFIG=$XMLCONFIG
368 XMLCONFIG="broken.xml"
369 [ -f "$XMLCONFIG" ] && rm -f $XMLCONFIG
373 echo "the name for node and mds is the same"
374 do_lmc --add mds --node ${facet}_facet --mds ${facet}_facet \
375 --dev $MDSDEV --size $MDSSIZE || return $?
376 do_lmc --add lov --mds ${facet}_facet --lov lov1 --stripe_sz \
377 $STRIPE_BYTES --stripe_cnt $STRIPES_PER_OBJ \
378 --stripe_pattern 0 || return $?
379 add_ost ost --lov lov1 --dev $OSTDEV --size $OSTSIZE
381 add_facet $facet --lustre_upcall $UPCALL
382 do_lmc --add mtpt --node ${facet}_facet --mds mds_facet \
383 --lov lov1 --path $MOUNT
389 check_mount || return 41
393 XMLCONFIG=$OLDXMLCONFIG
395 run_test 10 "mount lustre with the same name for node and mds"
398 OLDXMLCONFIG=$XMLCONFIG
399 XMLCONFIG="conf11.xml"
401 [ -f "$XMLCONFIG" ] && rm -f $XMLCONFIG
402 add_mds mds --dev $MDSDEV --size $MDSSIZE
403 add_ost ost --dev $OSTDEV --size $OSTSIZE
404 add_client client mds --path $MOUNT --ost ost_svc || return $?
405 echo "Default lov config success!"
407 [ -f "$XMLCONFIG" ] && rm -f $XMLCONFIG
408 add_mds mds --dev $MDSDEV --size $MDSSIZE
409 add_ost ost --dev $OSTDEV --size $OSTSIZE
410 add_client client mds --path $MOUNT && return $?
411 echo "--add mtpt with neither --lov nor --ost will return error"
415 XMLCONFIG=$OLDXMLCONFIG
417 run_test 11 "use default lov configuration (should return error)"
420 OLDXMLCONFIG=$XMLCONFIG
421 XMLCONFIG="batch.xml"
422 BATCHFILE="batchfile"
425 [ -f "$XMLCONFIG" ] && rm -f $XMLCONFIG
426 [ -f "$BATCHFILE" ] && rm -f $BATCHFILE
427 echo "--add net --node $HOSTNAME --nid $HOSTNAME --nettype tcp" > $BATCHFILE
428 echo "--add mds --node $HOSTNAME --mds mds1 --mkfsoptions \"-I 128\"" >> $BATCHFILE
429 # --mkfsoptions "-I 128"
430 do_lmc -m $XMLCONFIG --batch $BATCHFILE || return $?
431 if [ `sed -n '/>-I 128</p' $XMLCONFIG | wc -l` -eq 1 ]; then
432 echo "matched double quote success"
434 echo "matched double quote fail"
439 echo "--add net --node $HOSTNAME --nid $HOSTNAME --nettype tcp" > $BATCHFILE
440 echo "--add mds --node $HOSTNAME --mds mds1 --mkfsoptions \"-I 128" >> $BATCHFILE
441 # --mkfsoptions "-I 128
442 do_lmc -m $XMLCONFIG --batch $BATCHFILE && return $?
443 echo "unmatched double quote should return error"
447 echo "--add net --node $HOSTNAME --nid $HOSTNAME --nettype tcp" > $BATCHFILE
448 echo "--add mds --node $HOSTNAME --mds mds1 --mkfsoptions '-I 128'" >> $BATCHFILE
449 # --mkfsoptions '-I 128'
450 do_lmc -m $XMLCONFIG --batch $BATCHFILE || return $?
451 if [ `sed -n '/>-I 128</p' $XMLCONFIG | wc -l` -eq 1 ]; then
452 echo "matched single quote success"
454 echo "matched single quote fail"
459 echo "--add net --node $HOSTNAME --nid $HOSTNAME --nettype tcp" > $BATCHFILE
460 echo "--add mds --node $HOSTNAME --mds mds1 --mkfsoptions '-I 128" >> $BATCHFILE
461 # --mkfsoptions '-I 128
462 do_lmc -m $XMLCONFIG --batch $BATCHFILE && return $?
463 echo "unmatched single quote should return error"
467 echo "--add net --node $HOSTNAME --nid $HOSTNAME --nettype tcp" > $BATCHFILE
468 echo "--add mds --node $HOSTNAME --mds mds1 --mkfsoptions \-\I\ \128" >> $BATCHFILE
469 # --mkfsoptions \-\I\ \128
470 do_lmc -m $XMLCONFIG --batch $BATCHFILE || return $?
471 if [ `sed -n '/>-I 128</p' $XMLCONFIG | wc -l` -eq 1 ]; then
472 echo "backslash followed by a whitespace/letter success"
474 echo "backslash followed by a whitespace/letter fail"
479 echo "--add net --node $HOSTNAME --nid $HOSTNAME --nettype tcp" > $BATCHFILE
480 echo "--add mds --node $HOSTNAME --mds mds1 --mkfsoptions -I\ 128\\" >> $BATCHFILE
481 # --mkfsoptions -I\ 128\
482 do_lmc -m $XMLCONFIG --batch $BATCHFILE && return $?
483 echo "backslash followed by nothing should return error"
486 XMLCONFIG=$OLDXMLCONFIG
488 run_test 12 "lmc --batch, with single/double quote, backslash in batchfile"
491 OLDXMLCONFIG=$XMLCONFIG
492 XMLCONFIG="conf13-1.xml"
494 # check long uuid will be truncated properly and uniquely
495 echo "To generate XML configuration file(with long ost name): $XMLCONFIG"
496 [ -f "$XMLCONFIG" ] && rm -f $XMLCONFIG
497 do_lmc --add net --node $HOSTNAME --nid $HOSTNAME --nettype tcp
498 do_lmc --add mds --node $HOSTNAME --mds mds1_name_longer_than_31characters
499 do_lmc --add mds --node $HOSTNAME --mds mds2_name_longer_than_31characters
500 if [ ! -f "$XMLCONFIG" ]; then
501 echo "Error:no file $XMLCONFIG created!"
504 EXPECTEDMDS1UUID="e_longer_than_31characters_UUID"
505 EXPECTEDMDS2UUID="longer_than_31characters_UUID_2"
506 FOUNDMDS1UUID=`awk -F"'" '/<mds .*uuid=/' $XMLCONFIG | sed -n '1p' \
507 | sed "s/ /\n\r/g" | awk -F"'" '/uuid=/{print $2}'`
508 FOUNDMDS2UUID=`awk -F"'" '/<mds .*uuid=/' $XMLCONFIG | sed -n '2p' \
509 | sed "s/ /\n\r/g" | awk -F"'" '/uuid=/{print $2}'`
510 [ -z "$FOUNDMDS1UUID" ] && echo "MDS1 UUID empty" && return 1
511 [ -z "$FOUNDMDS2UUID" ] && echo "MDS2 UUID empty" && return 1
512 if ([ $EXPECTEDMDS1UUID = $FOUNDMDS1UUID ] && [ $EXPECTEDMDS2UUID = $FOUNDMDS2UUID ]) || \
513 ([ $EXPECTEDMDS1UUID = $FOUNDMDS2UUID ] && [ $EXPECTEDMDS2UUID = $FOUNDMDS1UUID ]); then
514 echo "Success:long uuid truncated successfully and being unique."
516 echo "Error:expected uuid for mds1 and mds2: $EXPECTEDMDS1UUID; $EXPECTEDMDS2UUID"
517 echo "but: found uuid for mds1 and mds2: $FOUNDMDS1UUID; $FOUNDMDS2UUID"
521 XMLCONFIG=$OLDXMLCONFIG
523 run_test 13 "check new_uuid of lmc operating correctly"
526 OLDXMLCONFIG=$XMLCONFIG
527 XMLCONFIG="conf13-1.xml"
528 SECONDXMLCONFIG="conf13-2.xml"
529 # check multiple invocations for lmc generate same XML configuration file
531 echo "Generate the first XML configuration file"
533 echo "mv $XMLCONFIG to $SECONDXMLCONFIG"
534 sed -e "s/mtime[^ ]*//" $XMLCONFIG > $SECONDXMLCONFIG || return $?
535 echo "Generate the second XML configuration file"
537 # don't compare .xml mtime, it will always be different
538 if [ `sed -e "s/mtime[^ ]*//" $XMLCONFIG | diff - $SECONDXMLCONFIG | wc -l` -eq 0 ]; then
539 echo "Success:multiple invocations for lmc generate same XML file"
541 echo "Error: multiple invocations for lmc generate different XML file"
545 rm -f $XMLCONFIG $SECONDXMLCONFIG
546 XMLCONFIG=$OLDXMLCONFIG
548 run_test 13b "check lmc generates consistent .xml file"
553 # create xml file with --mkfsoptions for ost
554 echo "create xml file with --mkfsoptions for ost"
555 add_mds mds --dev $MDSDEV --size $MDSSIZE
556 add_lov lov1 mds --stripe_sz $STRIPE_BYTES\
557 --stripe_cnt $STRIPES_PER_OBJ --stripe_pattern 0
558 add_ost ost --lov lov1 --dev $OSTDEV --size $OSTSIZE \
559 --mkfsoptions "-Llabel_conf_14"
560 add_client client mds --lov lov1 --path $MOUNT
562 FOUNDSTRING=`awk -F"<" '/<mkfsoptions>/{print $2}' $XMLCONFIG`
563 EXPECTEDSTRING="mkfsoptions>-Llabel_conf_14"
564 if [ "$EXPECTEDSTRING" != "$FOUNDSTRING" ]; then
565 echo "Error: expected: $EXPECTEDSTRING; found: $FOUNDSTRING"
568 echo "Success:mkfsoptions for ost written to xml file correctly."
570 # mount lustre to test lconf mkfsoptions-parsing
574 mount_client $MOUNT || return $?
575 if [ -z "`do_facet ost1 dumpe2fs -h $OSTDEV | grep label_conf_14`" ]; then
576 echo "Error: the mkoptions not applied to mke2fs of ost."
580 echo "lconf mkfsoptions for ost success"
584 run_test 14 "test mkfsoptions of ost for lmc and lconf"
588 [ -f $MOUNTLUSTRE ] && echo "remove $MOUNTLUSTRE" && rm -f $MOUNTLUSTRE
589 if [ -f $MOUNTLUSTRE.sav ]; then
590 echo "return original $MOUNTLUSTRE.sav to $MOUNTLUSTRE"
591 mv $MOUNTLUSTRE.sav $MOUNTLUSTRE
595 # this only tests the kernel mount command, not anything about lustre.
597 MOUNTLUSTRE=${MOUNTLUSTRE:-/sbin/mount.lustre}
601 echo "mount lustre on ${MOUNT} without $MOUNTLUSTRE....."
602 if [ -f "$MOUNTLUSTRE" ]; then
603 echo "save $MOUNTLUSTRE to $MOUNTLUSTRE.sav"
604 mv $MOUNTLUSTRE $MOUNTLUSTRE.sav && trap cleanup_15 EXIT INT
605 if [ -f $MOUNTLUSTRE ]; then
606 skip "$MOUNTLUSTRE cannot be moved, skipping test"
611 mount_client $MOUNT && error "mount succeeded" && return 1
612 echo "mount lustre on $MOUNT without $MOUNTLUSTRE failed as expected"
616 run_test 15 "zconf-mount without /sbin/mount.lustre (should return error)"
618 # LOGS/PENDING do not exist anymore since CMD3
620 TMPMTPT="${MOUNT%/*}/conf16"
622 if [ ! -e "$MDSDEV" ]; then
623 log "no $MDSDEV existing, so mount Lustre to create one"
625 check_mount || return 41
629 [ -f "$MDSDEV" ] && LOOPOPT="-o loop"
631 log "change the mode of $MDSDEV/OBJECTS to 555"
632 do_facet mds "mkdir -p $TMPMTPT &&
633 mount $LOOPOPT -t $FSTYPE $MDSDEV $TMPMTPT &&
634 chmod 555 $TMPMTPT/OBJECTS &&
635 umount $TMPMTPT" || return $?
637 log "mount Lustre to change the mode of OBJECTS, then umount Lustre"
639 check_mount || return 41
642 log "read the mode of OBJECTS and check if they has been changed properly"
643 EXPECTEDOBJECTSMODE=`do_facet mds "debugfs -R 'stat OBJECTS' $MDSDEV 2> /dev/null" | grep 'Mode: ' | sed -e "s/.*Mode: *//" -e "s/ *Flags:.*//"`
645 if [ "$EXPECTEDOBJECTSMODE" = "0777" ]; then
646 log "Success:Lustre change the mode of OBJECTS correctly"
648 error "Lustre does not change mode of OBJECTS properly"
651 run_test 16 "verify that lustre will correct the mode of OBJECTS"
654 if [ ! -e "$MDSDEV" ]; then
655 echo "no $MDSDEV existing, so mount Lustre to create one"
657 check_mount || return 41
661 echo "Remove mds config log"
662 do_facet mds "debugfs -w -R 'unlink CONFIGS/$FSNAME-MDT0000' $MDSDEV || return \$?" || return $?
665 start_mds && return 42
668 run_test 17 "Verify failed mds_postsetup won't fail assertion (2936) (should return errs)"
671 [ -f $MDSDEV ] && echo "remove $MDSDEV" && rm -f $MDSDEV
672 echo "mount mds with large journal..."
673 local myMDSSIZE=2000000
674 OLD_MDS_MKFS_OPTS=$MDS_MKFS_OPTS
676 MDS_MKFS_OPTS="--mgs --mdt --fsname=$FSNAME --device-size=$myMDSSIZE --param sys.timeout=$TIMEOUT $MDSOPT"
679 echo "mount lustre system..."
681 check_mount || return 41
683 echo "check journal size..."
684 FOUNDSIZE=`do_facet mds "debugfs -c -R 'stat <8>' $MDSDEV" | awk '/Size: / { print $NF; exit;}'`
685 if [ $FOUNDSIZE -gt $((32 * 1024 * 1024)) ]; then
686 log "Success: mkfs creates large journals. Size: $((FOUNDSIZE >> 20))M"
688 error "expected journal size > 32M, found $((FOUNDSIZE >> 20))M"
693 MDS_MKFS_OPTS=$OLD_MDS_MKFS_OPTS
696 run_test 18 "check mkfs creates large journals"
699 start_mds || return 1
700 stop_mds -f || return 2
702 run_test 19a "start/stop MDS without OSTs"
705 start_ost || return 1
706 stop_ost -f || return 2
708 run_test 19b "start/stop OSTs without MDS"
711 # first format the ost/mdt
715 check_mount || return 43
717 remount_client ro $MOUNT || return 44
718 touch $DIR/$tfile && echo "$DIR/$tfile created incorrectly" && return 45
719 [ -e $DIR/$tfile ] && echo "$DIR/$tfile exists incorrectly" && return 46
720 remount_client rw $MOUNT || return 47
722 [ ! -f $DIR/$tfile ] && echo "$DIR/$tfile missing" && return 48
723 MCNT=`grep -c $MOUNT /etc/mtab`
724 [ "$MCNT" -ne 1 ] && echo "$MOUNT in /etc/mtab $MCNT times" && return 49
729 run_test 20 "remount ro,rw mounts work and doesn't break /etc/mtab"
737 run_test 21a "start mds before ost, stop ost first"
745 run_test 21b "start ost before mds, stop mds first"
755 run_test 21c "start mds between two osts, stop mds last"
758 #reformat to remove all logs
761 echo Client mount before any osts are in the logs
763 check_mount && return 41
766 echo Client mount with ost in logs, but none running
770 # check_mount will block trying to contact ost
774 echo Client mount with a running ost
777 check_mount || return 41
782 run_test 22 "start a client before osts (should return errs)"
788 # force down client so that recovering mds waits for reconnect
789 zconf_umount `hostname` $MOUNT -f
790 # enter recovery on mds
792 # try to start a new client
793 mount_client $MOUNT &
796 MOUNT_LUSTRE_PID=`ps -ef | grep mount.lustre | grep -v grep | awk '{print $2}'`
797 echo mount pid is ${MOUNT_PID}, mount.lustre pid is ${MOUNT_LUSTRE_PID}
799 ps --ppid $MOUNT_LUSTRE_PID
800 # FIXME why o why can't I kill these? Manual "ctrl-c" works...
801 kill -TERM $MOUNT_PID
802 echo "waiting for mount to finish"
809 #this test isn't working yet
810 #run_test 23 "interrupt client during recovery mount delay"
816 local fs2mds_HOST=$mds_HOST
817 local fs2ost_HOST=$ost_HOST
818 [ -n "$ost1_HOST" ] && fs2ost_HOST=$ost1_HOST
819 if [ -z "$fs2ost_DEV" -o -z "$fs2mds_DEV" ]; then
820 do_facet $SINGLEMDS [ -b "$MDSDEV" ] && \
821 skip "mixed loopback and real device not working" && return
824 local fs2mdsdev=${fs2mds_DEV:-${MDSDEV}_2}
825 local fs2ostdev=${fs2ost_DEV:-$(ostdevname 1)_2}
827 # test 8-char fsname as well
828 local FSNAME2=test1234
829 add fs2mds $MDS_MKFS_OPTS --fsname=${FSNAME2} --nomgs --mgsnode=$MGSNID --reformat $fs2mdsdev || exit 10
831 add fs2ost $OST_MKFS_OPTS --fsname=${FSNAME2} --reformat $fs2ostdev || exit 10
834 start fs2mds $fs2mdsdev $MDS_MOUNT_OPTS
835 start fs2ost $fs2ostdev $OST_MOUNT_OPTS
837 mount -t lustre $MGSNID:/${FSNAME2} $MOUNT2 || return 1
839 check_mount || return 2
840 # files written on 1 should not show up on 2
841 cp /etc/passwd $DIR/$tfile
843 [ -e $MOUNT2/$tfile ] && error "File bleed" && return 7
846 cp /etc/passwd $MOUNT2/b || return 3
847 rm $MOUNT2/b || return 4
848 # 2 is actually mounted
849 grep $MOUNT2' ' /proc/mounts > /dev/null || return 5
851 facet_failover fs2mds
852 facet_failover fs2ost
855 # the MDS must remain up until last MDT
857 MDS=$(do_facet $SINGLEMDS "cat $LPROC/devices" | awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }')
858 [ -z "$MDS" ] && error "No MDT" && return 8
862 cleanup_nocli || return 6
864 run_test 24a "Multiple MDTs on a single node"
867 local fs2mds_HOST=$mds_HOST
868 if [ -z "$fs2mds_DEV" ]; then
869 do_facet $SINGLEMDS [ -b "$MDSDEV" ] && \
870 skip "mixed loopback and real device not working" && return
873 local fs2mdsdev=${fs2mds_DEV:-${MDSDEV}_2}
875 add fs2mds $MDS_MKFS_OPTS --fsname=${FSNAME}2 --mgs --reformat $fs2mdsdev || exit 10
877 start fs2mds $fs2mdsdev $MDS_MOUNT_OPTS && return 2
880 run_test 24b "Multiple MGSs on a single node (should return err)"
884 check_mount || return 2
885 local MODULES=$($LCTL modules | awk '{ print $2 }')
886 rmmod $MODULES 2>/dev/null || true
889 run_test 25 "Verify modules are referenced"
893 # we need modules before mount for sysctl, so make sure...
894 do_facet mds "lsmod | grep -q lustre || modprobe lustre"
895 #define OBD_FAIL_MDS_FS_SETUP 0x135
896 do_facet mds "sysctl -w lustre.fail_loc=0x80000135"
897 start_mds && echo MDS started && return 1
899 DEVS=$(cat $LPROC/devices | wc -l)
900 [ $DEVS -gt 0 ] && return 2
901 unload_modules || return 203
903 run_test 26 "MDT startup failure cleans LOV (should return errs)"
909 local ORIG=$(do_facet $myfacet "$TEST")
910 if [ $# -gt 3 ]; then
916 echo "Setting $PARAM from $ORIG to $FINAL"
917 do_facet mds "$LCTL conf_param $PARAM=$FINAL" || error conf_param failed
923 RESULT=$(do_facet $myfacet "$TEST")
924 if [ $RESULT -eq $FINAL ]; then
925 echo "Updated config after $WAIT sec (got $RESULT)"
929 if [ $WAIT -eq $MAX ]; then
930 echo "Config update not seen: wanted $FINAL got $RESULT"
933 echo "Waiting $(($MAX - $WAIT)) secs for config update"
938 start_ost || return 1
939 start_mds || return 2
940 echo "Requeue thread should have started: "
941 ps -e | grep ll_cfg_requeue
942 set_and_check ost1 "cat $LPROC/obdfilter/$FSNAME-OST0000/client_cache_seconds" "$FSNAME-OST0000.ost.client_cache_seconds" || return 3
945 run_test 27a "Reacquire MGS lock if OST started first"
950 set_and_check mds "cat $LPROC/mdt/$FSNAME-MDT0000/identity_acquire_expire" "$FSNAME-MDT0000.mdt.identity_acquire_expire" || return 3
951 set_and_check client "cat $LPROC/mdc/$FSNAME-MDT0000-mdc-*/max_rpcs_in_flight" "$FSNAME-MDT0000.mdc.max_rpcs_in_flight" || return 4
954 run_test 27b "Reacquire MGS lock after failover"
958 TEST="cat $LPROC/llite/$FSNAME-*/max_read_ahead_whole_mb"
961 FINAL=$(($ORIG + 10))
962 set_and_check client "$TEST" "$FSNAME.llite.max_read_ahead_whole_mb" || return 3
963 set_and_check client "$TEST" "$FSNAME.llite.max_read_ahead_whole_mb" || return 3
964 umount_client $MOUNT || return 200
967 if [ $RESULT -ne $FINAL ]; then
968 echo "New config not seen: wanted $FINAL got $RESULT"
971 echo "New config success: got $RESULT"
975 run_test 28 "permanent parameter setting"
978 [ "$OSTCOUNT" -lt "2" ] && skip "$OSTCOUNT < 2, skipping" && return
979 setup > /dev/null 2>&1
983 local PARAM="$FSNAME-OST0001.osc.active"
984 local PROC_ACT="$LPROC/osc/$FSNAME-OST0001-osc-[^M]*/active"
985 local PROC_UUID="$LPROC/osc/$FSNAME-OST0001-osc-[^M]*/ost_server_uuid"
986 if [ ! -r $PROC_ACT ]; then
987 echo "Can't read $PROC_ACT"
988 ls $LPROC/osc/$FSNAME-*
991 ACTV=$(cat $PROC_ACT)
993 set_and_check client "cat $PROC_ACT" "$PARAM" $DEAC || return 2
994 # also check ost_server_uuid status
995 RESULT=$(grep DEACTIV $PROC_UUID)
996 if [ -z "$RESULT" ]; then
997 echo "Live client not deactivated: $(cat $PROC_UUID)"
1000 echo "Live client success: got $RESULT"
1004 local MPROC="$LPROC/osc/$FSNAME-OST0001-osc-[M]*/active"
1009 RESULT=`do_facet mds " [ -r $MPROC ] && cat $MPROC"`
1010 [ ${PIPESTATUS[0]} = 0 ] || error "Can't read $MPROC"
1011 if [ $RESULT -eq $DEAC ]; then
1012 echo "MDT deactivated also after $WAIT sec (got $RESULT)"
1016 if [ $WAIT -eq $MAX ]; then
1017 echo "MDT not deactivated: wanted $DEAC got $RESULT"
1020 echo "Waiting $(($MAX - $WAIT)) secs for MDT deactivated"
1023 # test new client starts deactivated
1024 umount_client $MOUNT || return 200
1026 RESULT=$(grep DEACTIV $PROC_UUID | grep NEW)
1027 if [ -z "$RESULT" ]; then
1028 echo "New client not deactivated from start: $(cat $PROC_UUID)"
1031 echo "New client success: got $RESULT"
1034 # make sure it reactivates
1035 set_and_check client "cat $PROC_ACT" "$PARAM" $ACTV || return 6
1037 umount_client $MOUNT
1040 #writeconf to remove all ost2 traces for subsequent tests
1043 run_test 29 "permanently remove an OST"
1046 # start mds first after writeconf
1050 TEST="cat $LPROC/llite/$FSNAME-*/max_read_ahead_whole_mb"
1052 for i in $(seq 1 20); do
1053 set_and_check client "$TEST" "$FSNAME.llite.max_read_ahead_whole_mb" $i || return 3
1055 # make sure client restart still works
1056 umount_client $MOUNT
1057 mount_client $MOUNT || return 4
1058 [ "$($TEST)" -ne "$i" ] && return 5
1059 set_and_check client "$TEST" "$FSNAME.llite.max_read_ahead_whole_mb" $ORIG || return 6
1062 run_test 30 "Big config llog"
1064 test_31() { # bug 10734
1065 # ipaddr must not exist
1066 mount -t lustre 4.3.2.1@tcp:/lustre $MOUNT || true
1069 run_test 31 "Connect to non-existent node (shouldn't crash)"
1072 # XXX - make this run on client-only systems with real hardware on
1074 # there appears to be a lot of assumption here about loopback
1076 # or maybe this test is just totally useless on a client-only system
1077 [ "$mds_HOST" = "`hostname`" ] || { skip "remote MDS" && return 0; }
1078 [ "$ost_HOST" = "`hostname`" -o "$ost1_HOST" = "`hostname`" ] || \
1079 { skip "remote OST" && return 0; }
1081 [ -z "$TUNEFS" ] && skip "No tunefs" && return
1082 local DISK1_4=$LUSTRE/tests/disk1_4.zip
1083 [ ! -r $DISK1_4 ] && skip "Cant find $DISK1_4, skipping" && return
1085 unzip -o -j -d $TMP/$tdir $DISK1_4 || { skip "Cant unzip $DISK1_4, skipping" && return ; }
1087 sysctl lnet.debug=$PTLDEBUG
1089 $TUNEFS $TMP/$tdir/mds || error "tunefs failed"
1090 # nids are wrong, so client wont work, but server should start
1091 start mds $TMP/$tdir/mds "-o loop,exclude=lustre-OST0000" || return 3
1092 local UUID=$(cat $LPROC/mds/lustre-MDT0000/uuid)
1094 [ "$UUID" == "mdsA_UUID" ] || error "UUID is wrong: $UUID"
1096 $TUNEFS --mgsnode=`hostname` $TMP/$tdir/ost1 || error "tunefs failed"
1097 start ost1 $TMP/$tdir/ost1 "-o loop" || return 5
1098 UUID=$(cat $LPROC/obdfilter/lustre-OST0000/uuid)
1100 [ "$UUID" == "ost1_UUID" ] || error "UUID is wrong: $UUID"
1102 local NID=$($LCTL list_nids | head -1)
1104 echo "OSC changes should return err:"
1105 $LCTL conf_param lustre-OST0000.osc.max_dirty_mb=15 && return 7
1106 $LCTL conf_param lustre-OST0000.failover.node=$NID && return 8
1108 echo "MDC changes should succeed:"
1109 $LCTL conf_param lustre-MDT0000.mdc.max_rpcs_in_flight=9 || return 9
1110 $LCTL conf_param lustre-MDT0000.failover.node=$NID || return 10
1113 # With a new good MDT failover nid, we should be able to mount a client
1114 # (but it cant talk to OST)
1115 local OLDMOUNTOPT=$MOUNTOPT
1116 MOUNTOPT="exclude=lustre-OST0000"
1118 MOUNTOPT=$OLDMOUNTOPT
1119 set_and_check client "cat $LPROC/mdc/*/max_rpcs_in_flight" "lustre-MDT0000.mdc.max_rpcs_in_flight" || return 11
1121 zconf_umount `hostname` $MOUNT -f
1124 # mount a second time to make sure we didnt leave upgrade flag on
1125 $TUNEFS --dryrun $TMP/$tdir/mds || error "tunefs failed"
1126 start mds $TMP/$tdir/mds "-o loop,exclude=lustre-OST0000" || return 12
1129 [ -d $TMP/$tdir ] && rm -rf $TMP/$tdir
1131 run_test 32a "Upgrade from 1.4 (not live)"
1134 # XXX - make this run on client-only systems with real hardware on
1136 # there appears to be a lot of assumption here about loopback
1138 # or maybe this test is just totally useless on a client-only system
1139 [ "$mds_HOST" = "`hostname`" ] || { skip "remote MDS" && return 0; }
1140 [ "$ost_HOST" = "`hostname`" -o "$ost1_HOST" = "`hostname`" ] || \
1141 { skip "remote OST" && return 0; }
1143 [ -z "$TUNEFS" ] && skip "No tunefs" && return
1144 local DISK1_4=$LUSTRE/tests/disk1_4.zip
1145 [ ! -r $DISK1_4 ] && skip "Cant find $DISK1_4, skipping" && return
1147 unzip -o -j -d $TMP/$tdir $DISK1_4 || { skip "Cant unzip $DISK1_4, skipping" && return ; }
1149 sysctl lnet.debug=$PTLDEBUG
1151 # writeconf will cause servers to register with their current nids
1152 $TUNEFS --writeconf $TMP/$tdir/mds || error "tunefs failed"
1153 start mds $TMP/$tdir/mds "-o loop" || return 3
1154 local UUID=$(cat $LPROC/mds/lustre-MDT0000/uuid)
1156 [ "$UUID" == "mdsA_UUID" ] || error "UUID is wrong: $UUID"
1158 $TUNEFS --mgsnode=`hostname` $TMP/$tdir/ost1 || error "tunefs failed"
1159 start ost1 $TMP/$tdir/ost1 "-o loop" || return 5
1160 UUID=$(cat $LPROC/obdfilter/lustre-OST0000/uuid)
1162 [ "$UUID" == "ost1_UUID" ] || error "UUID is wrong: $UUID"
1164 echo "OSC changes should succeed:"
1165 $LCTL conf_param lustre-OST0000.osc.max_dirty_mb=15 || return 7
1166 $LCTL conf_param lustre-OST0000.failover.node=$NID || return 8
1168 echo "MDC changes should succeed:"
1169 $LCTL conf_param lustre-MDT0000.mdc.max_rpcs_in_flight=9 || return 9
1172 # MDT and OST should have registered with new nids, so we should have
1173 # a fully-functioning client
1174 echo "Check client and old fs contents"
1176 set_and_check client "cat $LPROC/mdc/*/max_rpcs_in_flight" "${NEWNAME}-MDT0000.mdc.max_rpcs_in_flight" || return 11
1177 [ "$(cksum $MOUNT/passwd | cut -d' ' -f 1,2)" == "2479747619 779" ] || return 12
1181 [ -d $TMP/$tdir ] && rm -rf $TMP/$tdir
1183 run_test 32b "Upgrade from 1.4 with writeconf"
1185 test_33() { # bug 12333
1186 local FSNAME2=test1234
1187 local fs2mds_HOST=$mds_HOST
1188 local fs2ost_HOST=$ost_HOST
1189 [ -n "$ost1_HOST" ] && fs2ost_HOST=$ost1_HOST
1191 if [ -z "$fs2ost_DEV" -o -z "$fs2mds_DEV" ]; then
1192 do_facet $SINGLEMDS [ -b "$MDSDEV" ] && \
1193 skip "mixed loopback and real device not working" && return
1196 local fs2mdsdev=${fs2mds_DEV:-${MDSDEV}_2}
1197 local fs2ostdev=${fs2ost_DEV:-$(ostdevname 1)_2}
1198 add fs2mds $MDS_MKFS_OPTS --fsname=${FSNAME2} --reformat $fs2mdsdev || exit 10
1199 add fs2ost $OST_MKFS_OPTS --fsname=${FSNAME2} --index=8191 --mgsnode=$MGSNID --reformat $fs2ostdev || exit 10
1201 start fs2mds $fs2mdsdev $MDS_MOUNT_OPTS
1202 start fs2ost $fs2ostdev $OST_MOUNT_OPTS
1204 mount -t lustre $MGSNID:/${FSNAME2} $MOUNT2 || return 1
1210 rm -rf $MOUNT2 $fs2mdsdev $fs2ostdev
1211 cleanup_nocli || return 6
1213 run_test 33 "Mount ost with a large index number"
1218 do_facet client dd if=/dev/zero of=$MOUNT/24 bs=1024k count=1
1219 # Drop lock cancelation reply during umount
1220 #define OBD_FAIL_LDLM_CANCEL 0x304
1221 do_facet client sysctl -w lustre.fail_loc=0x80000304
1222 #sysctl -w lnet.debug=-1
1223 umount_client $MOUNT
1226 run_test 34 "Drop cancel during umount"
1230 do_facet client multiop $DIR/file O_c &
1232 manual_umount_client
1234 do_facet client killall -USR1 multiop
1235 if [ $rc -eq 0 ]; then
1236 error "umount not fail!"
1241 run_test 34a "umount with opened file should be fail"
1246 touch $DIR/$tfile || return 1
1247 stop_mds --force || return 2
1249 manual_umount_client --force
1251 if [ $rc -ne 0 ]; then
1252 error "mtab after failed umount - rc $rc"
1258 run_test 34b "force umount with failed mds should be normal"
1262 touch $DIR/$tfile || return 1
1263 stop_ost --force || return 2
1265 manual_umount_client --force
1267 if [ $rc -ne 0 ]; then
1268 error "mtab after failed umount - rc $rc"
1274 run_test 34c "force umount with failed ost should be normal"
1276 test_35() { # bug 12459
1279 DBG_SAVE="`sysctl -n lnet.debug`"
1280 sysctl -w lnet.debug="ha"
1282 log "Set up a fake failnode for the MDS"
1284 do_facet mds $LCTL conf_param ${FSNAME}-MDT0000.failover.node=$FAKENID || return 4
1286 log "Wait for RECONNECT_INTERVAL seconds (10s)"
1289 MSG="conf-sanity.sh test_35 `date +%F%kh%Mm%Ss`"
1292 log "Stopping the MDT:"
1293 stop_mds || return 5
1295 df $MOUNT > /dev/null 2>&1 &
1297 log "Restarting the MDT:"
1298 start_mds || return 6
1299 log "Wait for df ($DFPID) ... "
1302 sysctl -w lnet.debug="$DBG_SAVE"
1304 # retrieve from the log the first server that the client tried to
1305 # contact after the connection loss
1306 $LCTL dk $TMP/lustre-log-$TESTNAME.log
1307 NEXTCONN=`awk "/${MSG}/ {start = 1;}
1308 /import_select_connection.*${FSNAME}-MDT0000-mdc.* using connection/ {
1310 if (\\\$NF ~ /$FAKENID/)
1316 }" $TMP/lustre-log-$TESTNAME.log`
1317 [ "$NEXTCONN" != "0" ] && log "The client didn't try to reconnect to the last active server (tried ${NEXTCONN} instead)" && return 7
1320 run_test 35 "Reconnect to the last active server first"
1324 local FSNAME2=test1234
1325 local fs2mds_HOST=$mds_HOST
1326 local fs2ost_HOST=$ost_HOST
1327 local fs3ost_HOST=$ost_HOST
1329 [ -n "$ost1_HOST" ] && fs2ost_HOST=$ost1_HOST && fs3ost_HOST=$ost1_HOST
1332 if [ -z "$fs2ost_DEV" -o -z "$fs2mds_DEV" -o -z "$fs3ost_DEV" ]; then
1333 do_facet $SINGLEMDS [ -b "$MDSDEV" ] && \
1334 skip "mixed loopback and real device not working" && return
1336 [ $OSTCOUNT -lt 2 ] && skip "skipping test for single OST" && return
1338 [ $(grep -c obdfilter $LPROC/devices) -eq 0 ] &&
1339 skip "skipping test for remote OST" && return
1341 local fs2mdsdev=${fs2mds_DEV:-${MDSDEV}_2}
1342 local fs2ostdev=${fs2ost_DEV:-$(ostdevname 1)_2}
1343 local fs3ostdev=${fs3ost_DEV:-$(ostdevname 2)_2}
1344 add fs2mds $MDS_MKFS_OPTS --fsname=${FSNAME2} --reformat $fs2mdsdev || exit 10
1345 add fs2ost $OST_MKFS_OPTS --mkfsoptions='-b1024' --fsname=${FSNAME2} --mgsnode=$MGSNID --reformat $fs2ostdev || exit 10
1346 add fs3ost $OST_MKFS_OPTS --mkfsoptions='-b4096' --fsname=${FSNAME2} --mgsnode=$MGSNID --reformat $fs3ostdev || exit 10
1348 start fs2mds $fs2mdsdev $MDS_MOUNT_OPTS
1349 start fs2ost $fs2ostdev $OST_MOUNT_OPTS
1350 start fs3ost $fs3ostdev $OST_MOUNT_OPTS
1352 mount -t lustre $MGSNID:/${FSNAME2} $MOUNT2 || return 1
1354 sleep 5 # until 11778 fixed
1356 dd if=/dev/zero of=$MOUNT2/$tfile bs=1M count=7 || return 2
1358 BKTOTAL=`awk 'BEGIN{total=0}; {total+=$1}; END{print total}' \
1359 $LPROC/obdfilter/*/kbytestotal`
1360 BKFREE=`awk 'BEGIN{free=0}; {free+=$1}; END{print free}' \
1361 $LPROC/obdfilter/*/kbytesfree`
1362 BKAVAIL=`awk 'BEGIN{avail=0}; {avail+=$1}; END{print avail}' \
1363 $LPROC/obdfilter/*/kbytesavail`
1364 STRING=`df -P $MOUNT2 | tail -n 1 | awk '{print $2","$3","$4}'`
1365 DFTOTAL=`echo $STRING | cut -d, -f1`
1366 DFUSED=`echo $STRING | cut -d, -f2`
1367 DFAVAIL=`echo $STRING | cut -d, -f3`
1368 DFFREE=$(($DFTOTAL - $DFUSED))
1370 ALLOWANCE=$((64 * $OSTCOUNT))
1372 if [ $DFTOTAL -lt $(($BKTOTAL - $ALLOWANCE)) ] ||
1373 [ $DFTOTAL -gt $(($BKTOTAL + $ALLOWANCE)) ] ; then
1374 echo "**** FAIL: df total($DFTOTAL) mismatch OST total($BKTOTAL)"
1377 if [ $DFFREE -lt $(($BKFREE - $ALLOWANCE)) ] ||
1378 [ $DFFREE -gt $(($BKFREE + $ALLOWANCE)) ] ; then
1379 echo "**** FAIL: df free($DFFREE) mismatch OST free($BKFREE)"
1382 if [ $DFAVAIL -lt $(($BKAVAIL - $ALLOWANCE)) ] ||
1383 [ $DFAVAIL -gt $(($BKAVAIL + $ALLOWANCE)) ] ; then
1384 echo "**** FAIL: df avail($DFAVAIL) mismatch OST avail($BKAVAIL)"
1389 stop fs3ost -f || return 200
1390 stop fs2ost -f || return 201
1391 stop fs2mds -f || return 202
1392 rm -rf $MOUNT2 $fs2mdsdev $fs2ostdev $fs3ostdev
1393 unload_modules || return 203
1396 run_test 36 "df report consistency on OSTs with different block size"
1399 LOCAL_MDSDEV="$TMP/mdt.img"
1400 SYM_MDSDEV="$TMP/sym_mdt.img"
1402 echo "MDS : $LOCAL_MDSDEV"
1403 echo "SYMLINK : $SYM_MDSDEV"
1407 mkfs.lustre --reformat --fsname=lustre --mdt --mgs --device-size=9000 $LOCAL_MDSDEV ||
1408 error "mkfs.lustre $LOCAL_MDSDEV failed"
1409 ln -s $LOCAL_MDSDEV $SYM_MDSDEV
1411 echo "mount symlink device - $SYM_MDSDEV"
1413 mount_op=`mount -v -t lustre -o loop $SYM_MDSDEV ${MOUNT%/*}/mds 2>&1 | grep "unable to set tunable"`
1414 umount -d ${MOUNT%/*}/mds
1415 rm -f $LOCAL_MDSDEV $SYM_MDSDEV
1417 if [ -n "$mount_op" ]; then
1418 error "**** FAIL: set tunables failed for symlink device"
1422 run_test 37 "verify set tunables works for symlink device"
1424 umount_client $MOUNT
1428 equals_msg `basename $0`: test complete
1429 [ -f "$TESTSUITELOG" ] && cat $TESTSUITELOG || true