# bug number for skipped test:
ALWAYS_EXCEPT=${ALWAYS_EXCEPT:-""}
# UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
+if [ "x$GSS_PIPEFS" != "xy" ]; then
+ ALWAYS_EXCEPT="$ALWAYS_EXCEPT 4"
+fi
[ "$SLOW" = "no" ] && EXCEPT="$EXCEPT"
SAVE_PWD=$PWD
-#
-# check pre-set $SEC
-#
-if [ ! -z $SEC ]; then
- if [ "$SEC" != "krb5i" -a "$SEC" != "krb5p" ]; then
- echo "SEC=$SEC is invalid, this script only run in gss mode (krb5i/krb5p)"
- exit 1
- fi
-fi
-
export SEC=${SEC:-krb5p}
export KRB5_CCACHE_DIR=/tmp
export KRB5_CRED=$KRB5_CCACHE_DIR/krb5cc_$RUNAS_ID
export KRB5_CRED_SAVE=$KRB5_CCACHE_DIR/krb5cc.sanity.save
-echo "Using security flavor $SEC"
+#
+# check pre-set $SEC
+#
+case "x$SEC" in
+ xkrb5*)
+ echo "Using ptlrpc security flavor $SEC"
+ ;;
+ *)
+ echo "SEC=$SEC is invalid, it has to be gss/krb5 flavor"
+ exit 1
+ ;;
+esac
LUSTRE=${LUSTRE:-`dirname $0`/..}
. $LUSTRE/tests/test-framework.sh
# cleanup all cred/ctx and touch
$RUNAS kdestroy
- $RUNAS $LFS flushctx
+ $RUNAS $LFS flushctx || error "can't flush ctx"
$RUNAS touch $MOUNT/f2_2 && error "unexpected success"
# restore and touch
# because we always use root credential to OSTs
$RUNAS kdestroy
$RUNAS $LFS flushctx
+ echo "destroied credentials/contexs for $RUNAS_ID"
$RUNAS $CHECKSTAT -p 0666 $file && error "checkstat succeed"
kill -s 10 $OPPID
wait $OPPID || error "read file data failed"
# restore and check again
restore_krb5_cred
+ echo "restored credentials for $RUNAS_ID"
$RUNAS $CHECKSTAT -p 0666 $file || error "$RUNAS_ID checkstat (2) error"
+ echo "$RUNAS_ID checkstat OK"
$CHECKSTAT -p 0666 $file || error "$UID checkstat (2) error"
+ echo "$UID checkstat OK"
$RUNAS cat $file > /dev/null || error "$RUNAS_ID cat (2) error"
+ echo "$RUNAS_ID read file data OK"
}
run_test 3 "local cache under DLM lock"
test_5() {
local file1=$MOUNT/f5_1
local file2=$MOUNT/f5_2
- local wait_time=120
+ local wait_time=`expr $TIMEOUT + $TIMEOUT`
# current access should be ok
$RUNAS touch $file1 || error "can't touch $file1"
check_multiple_gss_daemons() {
local facet=$1
+ local gssd=$2
+ local gssd_name=`basename $gssd`
for ((i=0;i<10;i++)); do
- do_facet $facet "$LSVCGSSD -v &"
- done
- for ((i=0;i<10;i++)); do
- do_facet $facet "$LGSSD -v &"
+ do_facet $facet "$gssd -v &"
done
# wait daemons entering "stable" status
sleep 5
- numc=`do_facet $facet ps -o cmd -C lgssd | grep lgssd | wc -l`
- nums=`do_facet $facet ps -o cmd -C lgssd | grep lgssd | wc -l`
- echo "$numc lgssd and $nums lsvcgssd are running"
+ num=`do_facet $facet ps -o cmd -C $gssd_name | grep $gssd_name | wc -l`
+ echo "$num instance(s) of $gssd_name are running"
- if [ $numc -ne 1 -o $nums -ne 1 ]; then
- error "lgssd/lsvcgssd not unique"
+ if [ $num -ne 1 ]; then
+ error "$gssd_name not unique"
fi
}
start_gss_daemons
echo "check with someone already running..."
- check_multiple_gss_daemons $facet
+ check_multiple_gss_daemons $facet $LSVCGSSD
+ if [ "x$GSS_PIPEFS" == "xy" ]; then
+ check_multiple_gss_daemons $facet $LGSSD
+ fi
echo "check with someone run & finished..."
do_facet $facet killall -q -2 lgssd lsvcgssd || true
sleep 5 # wait fully exit
- check_multiple_gss_daemons $facet
+ check_multiple_gss_daemons $facet $LSVCGSSD
+ if [ "x$GSS_PIPEFS" == "xy" ]; then
+ check_multiple_gss_daemons $facet $LGSSD
+ fi
echo "check refresh..."
do_facet $facet killall -q -2 lgssd lsvcgssd || true
sleep 5 # wait fully exit
do_facet $facet ipcrm -S 0x3b92d473
- do_facet $facet ipcrm -S 0x3a92d473
- check_multiple_gss_daemons $facet
+ check_multiple_gss_daemons $facet $LSVCGSSD
+ if [ "x$GSS_PIPEFS" == "xy" ]; then
+ do_facet $facet ipcrm -S 0x3a92d473
+ check_multiple_gss_daemons $facet $LGSSD
+ fi
stop_gss_daemons
}
-run_test 100 "start more multiple gss daemons"
+run_test 100 "start multiple gss daemons"
TMPDIR=$OLDTMPDIR
TMP=$OLDTMP