3 # add uml1 uml2 uml3 in your /etc/hosts
5 # FIXME - there is no reason to use all of these different
6 # return codes, espcially when most of them are mapped to something
7 # else anyway. The combination of test number and return code
8 # figure out what failed.
13 PATH=$PWD/$SRCDIR:$SRCDIR:$SRCDIR/../utils:$PATH
15 LUSTRE=${LUSTRE:-`dirname $0`/..}
16 RLUSTRE=${RLUSTRE:-$LUSTRE}
17 MOUNTLUSTRE=${MOUNTLUSTRE:-/sbin/mount.lustre}
19 . $LUSTRE/tests/test-framework.sh
23 . ${CONFIG:=$LUSTRE/tests/cfg/local.sh}
28 add_mds mds --dev $MDSDEV --size $MDSSIZE
29 add_lov lov1 mds --stripe_sz $STRIPE_BYTES\
30 --stripe_cnt $STRIPES_PER_OBJ --stripe_pattern 0
31 add_ost ost --lov lov1 --dev $OSTDEV --size $OSTSIZE
32 add_client client mds --lov lov1 --path $MOUNT
38 add_mds mds2 --dev $MDSDEV --size $MDSSIZE
39 add_lov lov2 mds2 --stripe_sz $STRIPE_BYTES\
40 --stripe_cnt $STRIPES_PER_OBJ --stripe_pattern 0
41 add_ost ost2 --lov lov2 --dev $OSTDEV --size $OSTSIZE
42 add_client client mds2 --lov lov2 --path $MOUNT2
46 echo "start mds service on `facet_active_host mds`"
47 start mds --reformat $MDSLCONFARGS || return 94
50 echo "stop mds service on `facet_active_host mds`"
51 stop mds $@ || return 97
55 echo "start ost service on `facet_active_host ost`"
56 start ost --reformat $OSTLCONFARGS || return 95
60 echo "stop ost service on `facet_active_host ost`"
61 stop ost $@ || return 98
66 echo "mount lustre on ${MOUNTPATH}....."
67 zconf_mount `hostname` $MOUNTPATH || return 96
72 echo "umount lustre on ${MOUNTPATH}....."
73 zconf_umount `hostname` $MOUNTPATH || return 97
76 manual_umount_client(){
77 echo "manual umount lustre on ${MOUNTPATH}...."
78 do_facet client "umount $MOUNT"
88 umount_client $MOUNT || return 200
89 stop_mds || return 201
90 stop_ost || return 202
91 # catch case where these return just fine, but modules are still not unloaded
92 /sbin/lsmod | grep -q portals
94 echo "modules still loaded..."
100 do_facet client "touch $DIR/a" || return 71
101 do_facet client "rm $DIR/a" || return 72
102 echo "setup single mount lustre success"
106 do_facet client "touch $DIR/a" || return 71
107 do_facet client "rm $DIR/a" || return 72
108 do_facet client "touch $DIR2/a" || return 73
109 do_facet client "rm $DIR2/a" || return 74
110 echo "setup double mount lustre success"
115 #create single point mountpoint
124 check_mount || return 41
127 run_test 0 "single mount setup"
131 echo "start ost second time..."
132 start ost --reformat $OSTLCONFARGS
135 check_mount || return 42
138 run_test 1 "start up ost twice"
143 echo "start mds second time.."
144 start mds --reformat $MDSLCONFARGS
147 check_mount || return 43
150 run_test 2 "start up mds twice"
156 check_mount || return 44
161 run_test 3 "mount client twice"
165 touch $DIR/$tfile || return 85
169 # ok for ost to fail shutdown
170 if [ 202 -ne $eno ]; then
175 run_test 4 "force cleanup ost, then cleanup"
179 touch $DIR/$tfile || return 1
180 stop_mds --force || return 2
182 # cleanup may return an error from the failed
183 # disconnects; for now I'll consider this successful
184 # if all the modules have unloaded.
188 echo "killing umount"
189 kill -TERM $UMOUNT_PID
190 echo "waiting for umount to finish"
193 # cleanup client modules
194 $LCONF --cleanup --nosetup --node client_facet $XMLCONFIG > /dev/null
196 # stop_mds is a no-op here, and should not fail
200 lsmod | grep -q portals && return 6
203 run_test 5 "force cleanup mds, then cleanup"
210 [ -d $MOUNT ] || mkdir -p $MOUNT
211 $LCONF --nosetup --node client_facet $XMLCONFIG > /dev/null
212 llmount $mds_HOST://mds_svc/client_facet $MOUNT && exit 1
214 # cleanup client modules
215 $LCONF --cleanup --nosetup --node client_facet $XMLCONFIG > /dev/null
217 # stop_mds is a no-op here, and should not fail
221 lsmod | grep -q portals && return 4
225 run_test 5b "mds down, cleanup after failed mount (bug 2712)"
231 [ -d $MOUNT ] || mkdir -p $MOUNT
232 $LCONF --nosetup --node client_facet $XMLCONFIG > /dev/null
233 llmount $mds_HOST://wrong_mds_svc/client_facet $MOUNT && return 1
235 # cleanup client modules
236 $LCONF --cleanup --nosetup --node client_facet $XMLCONFIG > /dev/null
241 lsmod | grep -q portals && return 4
245 run_test 5c "cleanup after failed mount (bug 2712)"
252 [ -d $MOUNT ] || mkdir -p $MOUNT
253 $LCONF --nosetup --node client_facet $XMLCONFIG > /dev/null
254 llmount $mds_HOST://mds_svc/client_facet $MOUNT || return 1
256 umount $MOUNT || return 2
257 # cleanup client modules
258 $LCONF --cleanup --nosetup --node client_facet $XMLCONFIG > /dev/null
262 lsmod | grep -q portals && return 4
266 run_test 5d "ost down, don't crash during mount attempt"
271 mount_client ${MOUNT} || return 87
272 touch $DIR/a || return 86
275 run_test 6 "manual umount, then mount again"
282 run_test 7 "manual umount, then cleanup"
291 check_mount2 || return 45
293 umount_client $MOUNT2
298 run_test 8 "double mount setup"
301 # backup the old values of PTLDEBUG and SUBSYSTEM
302 OLDPTLDEBUG=$PTLDEBUG
303 OLDSUBSYSTEM=$SUBSYSTEM
305 # generate new configuration file with lmc --ptldebug and --subsystem
310 # check the result of lmc --ptldebug/subsystem
314 CHECK_PTLDEBUG="`cat /proc/sys/portals/debug`"
315 if [ $CHECK_PTLDEBUG = "1" ]; then
316 echo "lmc --debug success"
318 echo "lmc --debug: want 1, have $CHECK_PTLDEBUG"
321 CHECK_SUBSYSTEM="`cat /proc/sys/portals/subsystem_debug`"
322 if [ $CHECK_SUBSYSTEM = "2" ]; then
323 echo "lmc --subsystem success"
325 echo "lmc --subsystem: want 2, have $CHECK_SUBSYSTEM"
328 check_mount || return 41
331 # the new PTLDEBUG/SUBSYSTEM used for lconf --ptldebug/subsystem
332 PTLDEBUG="inode+trace"
335 # check lconf --ptldebug/subsystem overriding lmc --ptldebug/subsystem
338 CHECK_PTLDEBUG="`do_facet mds cat /proc/sys/portals/debug`"
339 if [ $CHECK_PTLDEBUG = "3" ]; then
340 echo "lconf --debug success"
342 echo "lconf --debug: want 3, have $CHECK_PTLDEBUG"
345 CHECK_SUBSYSTEM="`do_facet mds cat /proc/sys/portals/subsystem_debug`"
346 if [ $CHECK_SUBSYSTEM = "20" ]; then
347 echo "lconf --subsystem success"
349 echo "lconf --subsystem: want 20, have $CHECK_SUBSYSTEM"
353 check_mount || return 41
356 # resume the old configuration
357 PTLDEBUG=$OLDPTLDEBUG
358 SUBSYSTEM=$OLDSUBSYSTEM
362 run_test 9 "test --ptldebug and --subsystem for lmc and lconf"
365 echo "generate configuration with the same name for node and mds"
366 OLDXMLCONFIG=$XMLCONFIG
367 XMLCONFIG="broken.xml"
368 [ -f "$XMLCONFIG" ] && rm -f $XMLCONFIG
372 echo "the name for node and mds is the same"
373 do_lmc --add mds --node ${facet}_facet --mds ${facet}_facet \
374 --dev $MDSDEV --size $MDSSIZE || return $?
375 do_lmc --add lov --mds ${facet}_facet --lov lov1 --stripe_sz \
376 $STRIPE_BYTES --stripe_cnt $STRIPES_PER_OBJ \
377 --stripe_pattern 0 || return $?
378 add_ost ost --lov lov1 --dev $OSTDEV --size $OSTSIZE
380 add_facet $facet --lustre_upcall $UPCALL
381 do_lmc --add mtpt --node ${facet}_facet --mds mds_facet \
382 --lov lov1 --path $MOUNT
388 check_mount || return 41
392 XMLCONFIG=$OLDXMLCONFIG
394 run_test 10 "mount lustre with the same name for node and mds"
397 OLDXMLCONFIG=$XMLCONFIG
398 XMLCONFIG="conf11.xml"
400 [ -f "$XMLCONFIG" ] && rm -f $XMLCONFIG
401 add_mds mds --dev $MDSDEV --size $MDSSIZE
402 add_ost ost --dev $OSTDEV --size $OSTSIZE
403 add_client client mds --path $MOUNT --ost ost_svc || return $?
404 echo "Default lov config success!"
406 [ -f "$XMLCONFIG" ] && rm -f $XMLCONFIG
407 add_mds mds --dev $MDSDEV --size $MDSSIZE
408 add_ost ost --dev $OSTDEV --size $OSTSIZE
409 add_client client mds --path $MOUNT && return $?
410 echo "--add mtpt with neither --lov nor --ost will return error"
414 XMLCONFIG=$OLDXMLCONFIG
416 run_test 11 "use default lov configuration (should return error)"
419 OLDXMLCONFIG=$XMLCONFIG
420 XMLCONFIG="batch.xml"
421 BATCHFILE="batchfile"
424 [ -f "$XMLCONFIG" ] && rm -f $XMLCONFIG
425 [ -f "$BATCHFILE" ] && rm -f $BATCHFILE
426 echo "--add net --node localhost --nid localhost.localdomain --nettype tcp" > $BATCHFILE
427 echo "--add mds --node localhost --mds mds1 --mkfsoptions \"-I 128\"" >> $BATCHFILE
428 # --mkfsoptions "-I 128"
429 do_lmc -m $XMLCONFIG --batch $BATCHFILE || return $?
430 if [ `sed -n '/>-I 128</p' $XMLCONFIG | wc -l` -eq 1 ]; then
431 echo "matched double quote success"
433 echo "matched double quote fail"
438 echo "--add net --node localhost --nid localhost.localdomain --nettype tcp" > $BATCHFILE
439 echo "--add mds --node localhost --mds mds1 --mkfsoptions \"-I 128" >> $BATCHFILE
440 # --mkfsoptions "-I 128
441 do_lmc -m $XMLCONFIG --batch $BATCHFILE && return $?
442 echo "unmatched double quote should return error"
446 echo "--add net --node localhost --nid localhost.localdomain --nettype tcp" > $BATCHFILE
447 echo "--add mds --node localhost --mds mds1 --mkfsoptions '-I 128'" >> $BATCHFILE
448 # --mkfsoptions '-I 128'
449 do_lmc -m $XMLCONFIG --batch $BATCHFILE || return $?
450 if [ `sed -n '/>-I 128</p' $XMLCONFIG | wc -l` -eq 1 ]; then
451 echo "matched single quote success"
453 echo "matched single quote fail"
458 echo "--add net --node localhost --nid localhost.localdomain --nettype tcp" > $BATCHFILE
459 echo "--add mds --node localhost --mds mds1 --mkfsoptions '-I 128" >> $BATCHFILE
460 # --mkfsoptions '-I 128
461 do_lmc -m $XMLCONFIG --batch $BATCHFILE && return $?
462 echo "unmatched single quote should return error"
466 echo "--add net --node localhost --nid localhost.localdomain --nettype tcp" > $BATCHFILE
467 echo "--add mds --node localhost --mds mds1 --mkfsoptions \-\I\ \128" >> $BATCHFILE
468 # --mkfsoptions \-\I\ \128
469 do_lmc -m $XMLCONFIG --batch $BATCHFILE || return $?
470 if [ `sed -n '/>-I 128</p' $XMLCONFIG | wc -l` -eq 1 ]; then
471 echo "backslash followed by a whitespace/letter success"
473 echo "backslash followed by a whitespace/letter fail"
478 echo "--add net --node localhost --nid localhost.localdomain --nettype tcp" > $BATCHFILE
479 echo "--add mds --node localhost --mds mds1 --mkfsoptions -I\ 128\\" >> $BATCHFILE
480 # --mkfsoptions -I\ 128\
481 do_lmc -m $XMLCONFIG --batch $BATCHFILE && return $?
482 echo "backslash followed by nothing should return error"
485 XMLCONFIG=$OLDXMLCONFIG
487 run_test 12 "lmc --batch, with single/double quote, backslash in batchfile"
490 OLDXMLCONFIG=$XMLCONFIG
491 XMLCONFIG="conf13-1.xml"
492 SECONDXMLCONFIG="conf13-2.xml"
494 # check long uuid will be truncated properly and uniquely
495 echo "To generate XML configuration file(with long ost name): $XMLCONFIG"
496 [ -f "$XMLCONFIG" ] && rm -f $XMLCONFIG
497 do_lmc --add net --node localhost --nid localhost.localdomain --nettype tcp
498 do_lmc --add mds --node localhost --mds mds1_name_longer_than_31characters
499 do_lmc --add mds --node localhost --mds mds2_name_longer_than_31characters
500 if [ ! -f "$XMLCONFIG" ]; then
501 echo "Error:no file $XMLCONFIG created!"
504 EXPECTEDMDS1UUID="e_longer_than_31characters_UUID"
505 EXPECTEDMDS2UUID="longer_than_31characters_UUID_2"
506 FOUNDMDS1UUID=`awk -F"'" '/<mds .*uuid=/' $XMLCONFIG | sed -n '1p' \
507 | sed "s/ /\n\r/g" | awk -F"'" '/uuid=/{print $2}'`
508 FOUNDMDS2UUID=`awk -F"'" '/<mds .*uuid=/' $XMLCONFIG | sed -n '2p' \
509 | sed "s/ /\n\r/g" | awk -F"'" '/uuid=/{print $2}'`
510 if ([ $EXPECTEDMDS1UUID = $FOUNDMDS1UUID ] && [ $EXPECTEDMDS2UUID = $FOUNDMDS2UUID ]) || \
511 ([ $EXPECTEDMDS1UUID = $FOUNDMDS2UUID ] && [ $EXPECTEDMDS2UUID = $FOUNDMDS1UUID ]); then
512 echo "Success:long uuid truncated successfully and being unique."
514 echo "Error:expected uuid for mds1 and mds2: $EXPECTEDMDS1UUID; $EXPECTEDMDS2UUID"
515 echo "but: found uuid for mds1 and mds2: $FOUNDMDS1UUID; $FOUNDMDS2UUID"
519 # check multiple invocations for lmc generate same XML configuration file
521 echo "Generate the first XML configuration file"
523 echo "mv $XMLCONFIG to $SECONDXMLCONFIG"
524 mv $XMLCONFIG $SECONDXMLCONFIG || return $?
525 echo "Generate the second XML configuration file"
527 if [ `diff $XMLCONFIG $SECONDXMLCONFIG | wc -l` -eq 0 ]; then
528 echo "Success:multiple invocations for lmc generate same XML file"
530 echo "Error: multiple invocations for lmc generate different XML file"
535 rm -f $SECONDXMLCONFIG
536 XMLCONFIG=$OLDXMLCONFIG
538 run_test 13 "check new_uuid of lmc operating correctly"
543 # create xml file with --mkfsoptions for ost
544 echo "create xml file with --mkfsoptions for ost"
545 add_mds mds --dev $MDSDEV --size $MDSSIZE
546 add_lov lov1 mds --stripe_sz $STRIPE_BYTES\
547 --stripe_cnt $STRIPES_PER_OBJ --stripe_pattern 0
548 add_ost ost --lov lov1 --dev $OSTDEV --size $OSTSIZE \
549 --mkfsoptions "-Llabel_conf_14"
550 add_client client mds --lov lov1 --path $MOUNT
552 FOUNDSTRING=`awk -F"<" '/<mkfsoptions>/{print $2}' $XMLCONFIG`
553 EXPECTEDSTRING="mkfsoptions>-Llabel_conf_14"
554 if [ $EXPECTEDSTRING != $FOUNDSTRING ]; then
555 echo "Error: expected: $EXPECTEDSTRING; found: $FOUNDSTRING"
558 echo "Success:mkfsoptions for ost written to xml file correctly."
560 # mount lustre to test lconf mkfsoptions-parsing
564 mount_client $MOUNT || return $?
565 if [ -z "`dumpe2fs -h $OSTDEV | grep label_conf_14`" ]; then
566 echo "Error: the mkoptions not applied to mke2fs of ost."
570 echo "lconf mkfsoptions for ost success"
574 run_test 14 "test mkfsoptions of ost for lmc and lconf"
578 [ -f $MOUNTLUSTRE ] && echo "remove $MOUNTLUSTRE" && rm -f $MOUNTLUSTRE
579 if [ -f $MOUNTLUSTRE.sav ]; then
580 echo "return original $MOUNTLUSTRE.sav to $MOUNTLUSTRE"
581 mv $MOUNTLUSTRE.sav $MOUNTLUSTRE
588 echo "mount lustre on ${MOUNT} with $MOUNTLUSTRE....."
589 if [ -f "$MOUNTLUSTRE" ]; then
590 echo "save $MOUNTLUSTRE to $MOUNTLUSTRE.sav"
591 mv $MOUNTLUSTRE $MOUNTLUSTRE.sav
593 [ -f "$MOUNTLUSTRE" ] && echo "can't move $MOUNTLUSTRE" && return 40
594 trap cleanup_15 EXIT INT
595 [ ! `cp $LUSTRE/utils/llmount $MOUNTLUSTRE` ] || return $?
596 do_node `hostname` mkdir -p $MOUNT 2> /dev/null
597 # load llite module on the client if it isn't in /lib/modules
598 do_node `hostname` lconf --nosetup --node client_facet $XMLCONFIG
599 do_node `hostname` mount -t lustre -o nettype=$NETTYPE \
600 `facet_active_host mds`:/mds_svc/client_facet $MOUNT ||return $?
601 echo "mount lustre on $MOUNT with $MOUNTLUSTRE: success"
602 [ -d /r ] && $LCTL modules > /r/tmp/ogdb-`hostname`
603 check_mount || return 41
604 do_node `hostname` umount $MOUNT
606 [ -f "$MOUNTLUSTRE" ] && rm -f $MOUNTLUSTRE
607 echo "mount lustre on ${MOUNT} without $MOUNTLUSTRE....."
608 do_node `hostname` mount -t lustre -o nettype=$NETTYPE \
609 `facet_active_host mds`:/mds_svc/client_facet $MOUNT &&return $?
610 echo "mount lustre on $MOUNT without $MOUNTLUSTRE failed as expected"
614 run_test 15 "zconf-mount without /sbin/mount.lustre (should return error)"
617 TMPMTPT="/mnt/conf16"
619 if [ ! -f "$MDSDEV" ]; then
620 echo "no $MDSDEV existing, so mount Lustre to create one"
624 check_mount || return 41
628 echo "change the mode of $MDSDEV/OBJECTS,LOGS,PENDING to 555"
629 [ -d $TMPMTPT ] || mkdir -p $TMPMTPT
630 mount -o loop -t ext3 $MDSDEV $TMPMTPT || return $?
631 chmod 555 $TMPMTPT/OBJECTS || return $?
632 chmod 555 $TMPMTPT/LOGS || return $?
633 chmod 555 $TMPMTPT/PENDING || return $?
634 umount $TMPMTPT || return $?
636 echo "mount Lustre to change the mode of OBJECTS/LOGS/PENDING, then umount Lustre"
640 check_mount || return 41
643 echo "read the mode of OBJECTS/LOGS/PENDING and check if they has been changed properly"
644 EXPECTEDOBJECTSMODE=`debugfs -R "stat OBJECTS" $MDSDEV 2> /dev/null | awk '/Mode: /{print $6}'`
645 EXPECTEDLOGSMODE=`debugfs -R "stat LOGS" $MDSDEV 2> /dev/null | awk '/Mode: /{print $6}'`
646 EXPECTEDPENDINGMODE=`debugfs -R "stat PENDING" $MDSDEV 2> /dev/null | awk '/Mode: /{print $6}'`
648 if [ $EXPECTEDOBJECTSMODE = "0777" ]; then
649 echo "Success:Lustre change the mode of OBJECTS correctly"
651 echo "Error: Lustre does not change the mode of OBJECTS properly"
655 if [ $EXPECTEDLOGSMODE = "0777" ]; then
656 echo "Success:Lustre change the mode of LOGS correctly"
658 echo "Error: Lustre does not change the mode of LOGS properly"
662 if [ $EXPECTEDPENDINGMODE = "0777" ]; then
663 echo "Success:Lustre change the mode of PENDING correctly"
665 echo "Error: Lustre does not change the mode of PENDING properly"
669 run_test 16 "verify that lustre will correct the mode of OBJECTS/LOGS/PENDING"
672 TMPMTPT="/mnt/conf17"
674 if [ ! -f "$MDSDEV" ]; then
675 echo "no $MDSDEV existing, so mount Lustre to create one"
679 check_mount || return 41
683 echo "Remove mds config log"
684 [ -d $TMPMTPT ] || mkdir -p $TMPMTPT
685 mount -o loop -t ext3 $MDSDEV $TMPMTPT || return $?
686 rm -f $TMPMTPT/LOGS/mds_svc || return $?
687 umount $TMPMTPT || return $?
690 start mds $MDSLCONFARGS && return 42
693 run_test 17 "Verify failed mds_postsetup won't fail assertion (2936)"
696 [ -f $MDSDEV ] && echo "remove $MDSDEV" && rm -f $MDSDEV
697 echo "mount mds with large journal..."
702 echo "mount lustre system..."
706 check_mount || return 41
708 echo "check journal size..."
709 FOUNDJOURNALSIZE=`debugfs -R "stat <8>" $MDSDEV | awk '/Size: / { print $6; exit;}'`
710 if [ $FOUNDJOURNALSIZE = "79691776" ]; then
711 echo "Success:lconf creates large journals"
713 echo "Error:lconf not create large journals correctly"
714 echo "expected journal size: 79691776(76M), found journal size: $FOUNDJOURNALSIZE"
723 run_test 18 "check lconf creates large journals"