#!/bin/bash
#
-# Run select tests by setting SEC_ONLY, or as arguments to the script.
-# Skip specific tests by setting SEC_EXCEPT.
+# Run select tests by setting ONLY, or as arguments to the script.
+# Skip specific tests by setting EXCEPT.
#
set -e
-SRCDIR=`dirname $0`
+ONLY=${ONLY:-"$*"}
+# bug number for skipped test: 19430 19967 19967
+ALWAYS_EXCEPT=" 2 5 6 $SANITY_SEC_EXCEPT"
+# UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
+
+SRCDIR=$(dirname $0)
export PATH=$PWD/$SRCDIR:$SRCDIR:$PWD/$SRCDIR/../utils:$PATH:/sbin
+export NAME=${NAME:-local}
-SEC_ONLY=${SEC_ONLY:-"$*"}
-[ "$SEC_EXCEPT" ] && echo "Skipping tests: `echo $SEC_EXCEPT`"
-
-TMP=${TMP:-/tmp}
-LFS=${LFS:-lfs}
-LCTL=${LCTL:-lctl}
-RUNAS=${RUNAS:-runas}
-WTL=${WTL:-write_time_limit}
-
-LPROC=/proc/fs/lustre
-ENABLE_IDENTITY=/usr/sbin/l_getidentity
-DISABLE_IDENTITY=NONE
-LUSTRE_CONF_DIR=/etc/lustre
-PERM_CONF=$LUSTRE_CONF_DIR/perm.conf
-LDLM_LPROC=$LPROC/ldlm
-LLITE_LPROC=$LPROC/llite
-MDC_LPROC=$LPROC/mdc
-MDT_LPROC=$LPROC/mdt
-OST_LPROC=$LPROC/obdfilter
-
-sec_log() {
- echo "$*"
- $LCTL mark "$*" 2> /dev/null || true
-}
-
-SANITYSECLOG=${SANITYSECLOG:-/tmp/sanity-sec.log}
-[ "$SANITYSECLOG" ] && rm -f $SANITYSECLOG || true
-
-sec_error() {
- sec_log "FAIL: $TESTNAME $@"
- if [ "$SANITYSECLOG" ]; then
- echo "FAIL: $TESTNAME $@" >> $SANITYSECLOG
- else
- exit 1
- fi
-}
+LUSTRE=${LUSTRE:-$(dirname $0)/..}
+. $LUSTRE/tests/test-framework.sh
+init_test_env $@
+. ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
+init_logging
-sec_pass() {
- echo PASS $@
-}
+NODEMAP_TESTS=$(seq 7 26)
-sec_skip () {
- sec_log "$0: SKIP: $TESTNAME $@"
- [ "$SANITYSECLOG" ] && echo "$0: SKIP: $TESTNAME $@" >> $SANITYSECLOG
-}
+if ! check_versions; then
+ echo "It is NOT necessary to test nodemap under interoperation mode"
+ EXCEPT="$EXCEPT $NODEMAP_TESTS"
+fi
-ID1=500
-ID2=501
+[ "$SLOW" = "no" ] && EXCEPT_SLOW="26"
-USER1=`cat /etc/passwd|grep :$ID1:$ID1:|cut -d: -f1`
-USER2=`cat /etc/passwd|grep :$ID2:$ID2:|cut -d: -f1`
+[ "$ALWAYS_EXCEPT$EXCEPT$EXCEPT_SLOW" ] &&
+ echo "Skipping tests: $ALWAYS_EXCEPT $EXCEPT $EXCEPT_SLOW"
-if [ -z "$USER1" ]; then
- echo "===== Please add user1 (uid=$ID1 gid=$ID1)! Skip sanity-sec ====="
- sec_error "===== Please add user1 (uid=$ID1 gid=$ID1)! ====="
- exit 0
-fi
+RUNAS_CMD=${RUNAS_CMD:-runas}
-if [ -z "$USER2" ]; then
- echo "===== Please add user2 (uid=$ID2 gid=$ID2)! Skip sanity-sec ====="
- sec_error "===== Please add user2 (uid=$ID2 gid=$ID2)! ====="
- exit 0
-fi
+WTL=${WTL:-"$LUSTRE/tests/write_time_limit"}
-export NAME=${NAME:-local}
+CONFDIR=/etc/lustre
+PERM_CONF=$CONFDIR/perm.conf
+FAIL_ON_ERROR=false
-LUSTRE=${LUSTRE:-`dirname $0`/..}
-. $LUSTRE/tests/test-framework.sh
-init_test_env $@
-. ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
+HOSTNAME_CHECKSUM=$(hostname | sum | awk '{ print $1 }')
+SUBNET_CHECKSUM=$(expr $HOSTNAME_CHECKSUM % 250 + 1)
+NODEMAP_COUNT=16
+NODEMAP_RANGE_COUNT=3
+NODEMAP_IPADDR_LIST="1 10 64 128 200 250"
+NODEMAP_MAX_ID=128
-mounted_lustre_filesystems() {
- awk '($3 ~ "lustre" && $1 ~ ":") { print $2 }' /proc/mounts
-}
+require_dsh_mds || exit 0
+require_dsh_ost || exit 0
-MOUNTED="`mounted_lustre_filesystems`"
-if [ -z "$MOUNTED" ]; then
- formatall
- setupall
- MOUNTED="`mounted_lustre_filesystems`"
- [ -z "$MOUNTED" ] && sec_error "NAME=$NAME not mounted"
- S_MOUNTED=yes
-fi
+clients=${CLIENTS//,/ }
+num_clients=$(get_node_count ${clients})
+clients_arr=($clients)
-[ `echo $MOUNT | wc -w` -gt 1 ] && sec_error "NAME=$NAME mounted more than once"
+ID0=${ID0:-500}
+ID1=${ID1:-501}
+USER0=$(getent passwd | grep :$ID0:$ID0: | cut -d: -f1)
+USER1=$(getent passwd | grep :$ID1:$ID1: | cut -d: -f1)
-DIR=${DIR:-$MOUNT}
-[ -z "`echo $DIR | grep $MOUNT`" ] && echo "$DIR not in $MOUNT" && \
- sec_cleanup && exit 99
+[ -z "$USER0" ] &&
+ skip "need to add user0 ($ID0:$ID0)" && exit 0
+
+[ -z "$USER1" ] &&
+ skip "need to add user1 ($ID1:$ID1)" && exit 0
-[ `ls -l $LDLM_LPROC/namespaces 2>/dev/null | grep *-mdc-* | wc -l` -gt 1 ] \
- && echo "skip multi-MDS test" && sec_cleanup && exit 0
+IDBASE=${IDBASE:-60000}
-OST_COUNT=$(ls -l $LDLM_LPROC/namespaces 2>/dev/null | grep osc | grep -v MDT | wc -l)
+# changes to mappings must be reflected in test 23
+FOPS_IDMAPS=(
+ [0]="$((IDBASE+3)):$((IDBASE+0)) $((IDBASE+4)):$((IDBASE+2))"
+ [1]="$((IDBASE+5)):$((IDBASE+1)) $((IDBASE+6)):$((IDBASE+2))"
+ )
+
+check_and_setup_lustre
+
+sec_cleanup() {
+ if [ "$I_MOUNTED" = "yes" ]; then
+ cleanupall -f || error "sec_cleanup"
+ fi
+}
+
+DIR=${DIR:-$MOUNT}
+[ -z "$(echo $DIR | grep $MOUNT)" ] &&
+ error "$DIR not in $MOUNT" && sec_cleanup && exit 1
+
+[ $(echo $MOUNT | wc -w) -gt 1 ] &&
+ echo "NAME=$MOUNT mounted more than once" && sec_cleanup && exit 0
# for GSS_SUP
GSS_REF=$(lsmod | grep ^ptlrpc_gss | awk '{print $3}')
echo "without GSS support"
fi
-# for MDT_TYPE
-MDT_REF=$(lsmod | grep ^mdt | awk '{print $3}')
-if [ ! -z "$MDT_REF" -a "$MDT_REF" != "0" ]; then
- MDT_TYPE="local"
- echo "local mdt"
-else
- MDT_TYPE="remote"
- echo "remote mdt"
-fi
-
-MDT="`do_facet $SINGLEMDS ls -l $MDT_LPROC/ | grep MDT | awk '{print $9}'`"
-if [ ! -z "$MDT" ]; then
- IDENTITY_UPCALL=$MDT_LPROC/$MDT/identity_upcall
- IDENTITY_UPCALL_BAK="`more $IDENTITY_UPCALL`"
- IDENTITY_FLUSH=$MDT_LPROC/$MDT/identity_flush
- ROOTSQUASH_UID=$MDT_LPROC/$MDT/rootsquash_uid
- ROOTSQUASH_GID=$MDT_LPROC/$MDT/rootsquash_gid
- NOSQUASH_NIDS=$MDT_LPROC/$MDT/nosquash_nids
- MDSCAPA=$MDT_LPROC/$MDT/capa
- CAPA_TIMEOUT=$MDT_LPROC/$MDT/capa_timeout
-fi
-
-# for CLIENT_TYPE
-if [ -z "$(grep remote $LLITE_LPROC/*/client_type 2>/dev/null)" ]; then
- CLIENT_TYPE="local"
- echo "local client"
-else
- CLIENT_TYPE="remote"
- echo "remote client"
-fi
+MDT=$(do_facet $SINGLEMDS lctl get_param -N "mdt.\*MDT0000" |
+ cut -d. -f2 || true)
+[ -z "$MDT" ] && error "fail to get MDT device" && exit 1
+do_facet $SINGLEMDS "mkdir -p $CONFDIR"
+IDENTITY_FLUSH=mdt.$MDT.identity_flush
+IDENTITY_UPCALL=mdt.$MDT.identity_upcall
SAVE_PWD=$PWD
-sec_run_one() {
- BEFORE=`date +%s`
- sec_log "== test $1 $2= `date +%H:%M:%S` ($BEFORE)"
- export TESTNAME=test_$1
- test_$1 || sec_error "exit with rc=$?"
- unset TESTNAME
- sec_pass "($((`date +%s` - $BEFORE))s)"
-}
-
-build_test_filter() {
- for O in $SEC_ONLY; do
- eval SEC_ONLY_${O}=true
- done
- for E in $SEC_EXCEPT; do
- eval SEC_EXCEPT_${E}=true
- done
-}
-
-_basetest() {
- echo $*
-}
-
-basetest() {
- IFS=abcdefghijklmnopqrstuvwxyz _basetest $1
-}
-
-sec_run_test() {
- base=`basetest $1`
- if [ "$SEC_ONLY" ]; then
- testname=SEC_ONLY_$1
- if [ ${!testname}x != x ]; then
- sec_run_one $1 "$2"
- return $?
- fi
- testname=SEC_ONLY_$base
- if [ ${!testname}x != x ]; then
- sec_run_one $1 "$2"
- return $?
- fi
- echo -n "."
- return 0
- fi
- testname=SEC_EXCEPT_$1
- if [ ${!testname}x != x ]; then
- echo "skipping excluded test $1"
- return 0
- fi
- testname=SEC_EXCEPT_$base
- if [ ${!testname}x != x ]; then
- echo "skipping excluded test $1 (base $base)"
- return 0
- fi
- sec_run_one $1 "$2"
- return $?
-}
-
build_test_filter
sec_login() {
local user=$1
local group=$2
- if ! $RUNAS -u $user krb5_login.sh; then
- echo "$user login kerberos failed."
+ if ! $RUNAS_CMD -u $user krb5_login.sh; then
+ error "$user login kerberos failed."
exit 1
fi
- if ! $RUNAS -u $user -g $group ls $DIR > /dev/null; then
- $RUNAS -u $user lfs flushctx -k
- $RUNAS -u $user krb5_login.sh
- if ! $RUNAS -u $user -g $group ls $DIR > /dev/null; then
- echo "init $user $group failed."
- exit 2
- fi
+ if ! $RUNAS_CMD -u $user -g $group ls $DIR > /dev/null 2>&1; then
+ $RUNAS_CMD -u $user lfs flushctx -k
+ $RUNAS_CMD -u $user krb5_login.sh
+ if ! $RUNAS_CMD -u$user -g$group ls $DIR > /dev/null 2>&1; then
+ error "init $user $group failed."
+ exit 2
+ fi
fi
}
-setup() {
- if [ ! -z "$MDT" ]; then
- do_facet $SINGLEMDS echo $ENABLE_IDENTITY > $IDENTITY_UPCALL
- do_facet $SINGLEMDS echo -1 > $IDENTITY_FLUSH
- fi
+declare -a identity_old
- if ! $RUNAS -u $ID1 ls $DIR > /dev/null 2>&1; then
- sec_login $USER1 $USER1
+sec_setup() {
+ for num in $(seq $MDSCOUNT); do
+ switch_identity $num true || identity_old[$num]=$?
+ done
+
+ if ! $RUNAS_CMD -u $ID0 ls $DIR > /dev/null 2>&1; then
+ sec_login $USER0 $USER0
fi
- if ! $RUNAS -u $ID2 ls $DIR > /dev/null 2>&1; then
- sec_login $USER2 $USER2
+ if ! $RUNAS_CMD -u $ID1 ls $DIR > /dev/null 2>&1; then
+ sec_login $USER1 $USER1
fi
}
-setup
+sec_setup
# run as different user
test_0() {
- rm -rf $DIR/d0
- mkdir $DIR/d0
-
- chown $USER1 $DIR/d0 || sec_error
- $RUNAS -u $ID1 ls $DIR || sec_error
- $RUNAS -u $ID1 touch $DIR/f0 && sec_error
- $RUNAS -u $ID1 touch $DIR/d0/f1 || sec_error
- $RUNAS -u $ID2 touch $DIR/d0/f2 && sec_error
- touch $DIR/d0/f3 || sec_error
- chown root $DIR/d0
- chgrp $USER1 $DIR/d0
- chmod 775 $DIR/d0
- $RUNAS -u $ID1 touch $DIR/d0/f4 || sec_error
- $RUNAS -u $ID2 touch $DIR/d0/f5 && sec_error
- touch $DIR/d0/f6 || sec_error
-
- rm -rf $DIR/d0
-}
-sec_run_test 0 "uid permission ============================="
+ umask 0022
+
+ chmod 0755 $DIR || error "chmod (1)"
+ rm -rf $DIR/$tdir || error "rm (1)"
+ mkdir -p $DIR/$tdir || error "mkdir (1)"
+ chown $USER0 $DIR/$tdir || error "chown (2)"
+ $RUNAS_CMD -u $ID0 ls $DIR || error "ls (1)"
+ rm -f $DIR/f0 || error "rm (2)"
+ $RUNAS_CMD -u $ID0 touch $DIR/f0 && error "touch (1)"
+ $RUNAS_CMD -u $ID0 touch $DIR/$tdir/f1 || error "touch (2)"
+ $RUNAS_CMD -u $ID1 touch $DIR/$tdir/f2 && error "touch (3)"
+ touch $DIR/$tdir/f3 || error "touch (4)"
+ chown root $DIR/$tdir || error "chown (3)"
+ chgrp $USER0 $DIR/$tdir || error "chgrp (1)"
+ chmod 0775 $DIR/$tdir || error "chmod (2)"
+ $RUNAS_CMD -u $ID0 touch $DIR/$tdir/f4 || error "touch (5)"
+ $RUNAS_CMD -u $ID1 touch $DIR/$tdir/f5 && error "touch (6)"
+ touch $DIR/$tdir/f6 || error "touch (7)"
+ rm -rf $DIR/$tdir || error "rm (3)"
+}
+run_test 0 "uid permission ============================="
# setuid/gid
test_1() {
- [ $GSS_SUP = 0 ] && sec_skip "without GSS support." && return
- [ -z "$MDT" ] && sec_skip "do not support do_facet operations." && return
-
- do_facet $SINGLEMDS rm -f $PERM_CONF
- do_facet $SINGLEMDS echo -1 > $IDENTITY_FLUSH
-
- rm -rf $DIR/d1
- mkdir $DIR/d1
-
- chown $USER1 $DIR/d1 || sec_error
- $RUNAS -u $ID2 -v $ID1 touch $DIR/d1/f0 && sec_error
- do_facet $SINGLEMDS echo "\* $ID2 setuid" > $PERM_CONF
- echo "enable uid $ID2 setuid"
- do_facet $SINGLEMDS echo -1 > $IDENTITY_FLUSH
- $RUNAS -u $ID2 -v $ID1 touch $DIR/d1/f1 || sec_error
-
- chown root $DIR/d1
- chgrp $USER1 $DIR/d1
- chmod 770 $DIR/d1
- $RUNAS -u $ID2 -g $ID2 touch $DIR/d1/f2 && sec_error
- $RUNAS -u $ID2 -g $ID2 -j $ID1 touch $DIR/d1/f3 && sec_error
- do_facet $SINGLEMDS echo "\* $ID2 setuid,setgid" > $PERM_CONF
- echo "enable uid $ID2 setuid,setgid"
- do_facet $SINGLEMDS echo -1 > $IDENTITY_FLUSH
- $RUNAS -u $ID2 -g $ID2 -j $ID1 touch $DIR/d1/f4 || sec_error
- $RUNAS -u $ID2 -v $ID1 -g $ID2 -j $ID1 touch $DIR/d1/f5 || sec_error
-
- rm -rf $DIR/d1
-
- do_facet $SINGLEMDS rm -f $PERM_CONF
- do_facet $SINGLEMDS echo -1 > $IDENTITY_FLUSH
-}
-sec_run_test 1 "setuid/gid ============================="
-
-# remote_acl
-# for remote client only
-test_2 () {
- [ "$CLIENT_TYPE" = "local" ] && \
- sec_skip "remote_acl for remote client only" && return
- [ -z "$(grep ^acl $MDC_LPROC/*-mdc-*/connect_flags)" ] && \
- sec_skip "must have acl enabled" && return
- [ -z "$(which setfacl 2>/dev/null)" ] && \
- sec_skip "could not find setfacl" && return
- [ "$UID" != 0 ] && sec_skip "must run as root" && return
-
- rm -rf $DIR/d2
- mkdir $DIR/d2
- chmod 755 $DIR/d2
- echo xxx > $DIR/d2/f0
- chmod 644 $DIR/d2/f0
-
- $LFS getfacl $DIR/d2/f0 || sec_error
- $RUNAS -u $ID1 cat $DIR/d2/f0 || sec_error
- $RUNAS -u $ID1 touch $DIR/d2/f0 && sec_error
-
- $LFS setfacl -m u:$USER1:w $DIR/d2/f0 || sec_error
- $LFS getfacl $DIR/d2/f0 || sec_error
- echo "set user $USER1 write permission on file $DIR/d2/f0"
- $RUNAS -u $ID1 touch $DIR/d2/f0 || sec_error
- $RUNAS -u $ID1 cat $DIR/d2/f0 && sec_error
-
- rm -rf $DIR/d2
-}
-sec_run_test 2 "rmtacl ============================="
-
-# rootsquash
-# for remote mdt only
-test_3() {
- [ $GSS_SUP = 0 ] && sec_skip "without GSS support." && return
- [ -z "$MDT" ] && sec_skip "do not support do_facet operations." && return
- [ "$MDT_TYPE" = "local" ] && sec_skip "rootsquash for remote mdt only" && return
-
- do_facet $SINGLEMDS echo "-\*" > $NOSQUASH_NIDS
- do_facet $SINGLEMDS echo 0 > $ROOTSQUASH_UID
- do_facet $SINGLEMDS echo 0 > $ROOTSQUASH_GID
-
- rm -rf $DIR/d3
- mkdir $DIR/d3
- chown $USER1 $DIR/d3
- chmod 700 $DIR/d3
- do_facet $SINGLEMDS echo $ID1 > $ROOTSQUASH_UID
- echo "set rootsquash uid = $ID1"
- touch $DIR/f3_0 && sec_error
- touch $DIR/d3/f3_1 || sec_error
-
- do_facet $SINGLEMDS echo 0 > $ROOTSQUASH_UID
- echo "disable rootsquash"
- chown root $DIR/d3
- chgrp $USER2 $DIR/d3
- chmod 770 $DIR/d3
-
- do_facet $SINGLEMDS echo $ID1 > $ROOTSQUASH_UID
- echo "set rootsquash uid = $ID1"
- touch $DIR/d3/f3_2 && sec_error
- do_facet $SINGLEMDS echo $ID2 > $ROOTSQUASH_GID
- echo "set rootsquash gid = $ID2"
- touch $DIR/d3/f3_3 || sec_error
-
- do_facet $SINGLEMDS echo "+\*" > $NOSQUASH_NIDS
- echo "add host in rootsquash skip list"
- touch $DIR/f3_4 || sec_error
-
- do_facet $SINGLEMDS echo 0 > $ROOTSQUASH_UID
- do_facet $SINGLEMDS echo 0 > $ROOTSQUASH_GID
- do_facet $SINGLEMDS echo "-\*" > $NOSQUASH_NIDS
- rm -rf $DIR/d3
- rm -f $DIR/f3_?
-}
-sec_run_test 3 "rootsquash ============================="
+ [ $GSS_SUP = 0 ] && skip "without GSS support." && return
+
+ rm -rf $DIR/$tdir
+ mkdir -p $DIR/$tdir
+
+ chown $USER0 $DIR/$tdir || error "chown (1)"
+ $RUNAS_CMD -u $ID1 -v $ID0 touch $DIR/$tdir/f0 && error "touch (2)"
+ echo "enable uid $ID1 setuid"
+ do_facet $SINGLEMDS "echo '* $ID1 setuid' >> $PERM_CONF"
+ do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
+ $RUNAS_CMD -u $ID1 -v $ID0 touch $DIR/$tdir/f1 || error "touch (3)"
+
+ chown root $DIR/$tdir || error "chown (4)"
+ chgrp $USER0 $DIR/$tdir || error "chgrp (5)"
+ chmod 0770 $DIR/$tdir || error "chmod (6)"
+ $RUNAS_CMD -u $ID1 -g $ID1 touch $DIR/$tdir/f2 && error "touch (7)"
+ $RUNAS_CMD -u$ID1 -g$ID1 -j$ID0 touch $DIR/$tdir/f3 && error "touch (8)"
+ echo "enable uid $ID1 setuid,setgid"
+ do_facet $SINGLEMDS "echo '* $ID1 setuid,setgid' > $PERM_CONF"
+ do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
+ $RUNAS_CMD -u $ID1 -g $ID1 -j $ID0 touch $DIR/$tdir/f4 ||
+ error "touch (9)"
+ $RUNAS_CMD -u $ID1 -v $ID0 -g $ID1 -j $ID0 touch $DIR/$tdir/f5 ||
+ error "touch (10)"
+
+ rm -rf $DIR/$tdir
+
+ do_facet $SINGLEMDS "rm -f $PERM_CONF"
+ do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
+}
+run_test 1 "setuid/gid ============================="
# bug 3285 - supplementary group should always succeed.
# NB: the supplementary groups are set for local client only,
# as for remote client, the groups of the specified uid on MDT
# will be obtained by upcall /sbin/l_getidentity and used.
test_4() {
- rm -rf $DIR/d4
- mkdir $DIR/d4
- chmod 771 $DIR/d4
- chgrp $ID1 $DIR/d4
- $RUNAS -u $ID1 ls $DIR/d4 || sec_error "setgroups(1) failed"
- if [ "$CLIENT_TYPE" != "remote" ]; then
- if [ ! -z "$MDT" ]; then
- do_facet $SINGLEMDS echo "\* $ID2 setgrp" > $PERM_CONF
- do_facet $SINGLEMDS echo -1 > $IDENTITY_FLUSH
+ local server_version=$(lustre_version_code $SINGLEMDS)
+
+ [[ $server_version -ge $(version_code 2.6.93) ]] ||
+ [[ $server_version -ge $(version_code 2.5.35) &&
+ $server_version -lt $(version_code 2.5.50) ]] ||
+ { skip "Need MDS version at least 2.6.93 or 2.5.35"; return; }
+
+ rm -rf $DIR/$tdir
+ mkdir -p $DIR/$tdir
+ chmod 0771 $DIR/$tdir
+ chgrp $ID0 $DIR/$tdir
+ $RUNAS_CMD -u $ID0 ls $DIR/$tdir || error "setgroups (1)"
+ do_facet $SINGLEMDS "echo '* $ID1 setgrp' > $PERM_CONF"
+ do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
+ $RUNAS_CMD -u $ID1 -G1,2,$ID0 ls $DIR/$tdir ||
+ error "setgroups (2)"
+ $RUNAS_CMD -u $ID1 -G1,2 ls $DIR/$tdir && error "setgroups (3)"
+ rm -rf $DIR/$tdir
+
+ do_facet $SINGLEMDS "rm -f $PERM_CONF"
+ do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
+}
+run_test 4 "set supplementary group ==============="
+
+create_nodemaps() {
+ local i
+ local out
+ local rc
+
+ squash_id default 99 0
+ squash_id default 99 1
+ for (( i = 0; i < NODEMAP_COUNT; i++ )); do
+ local csum=${HOSTNAME_CHECKSUM}_${i}
+
+ if ! do_facet mgs $LCTL nodemap_add $csum; then
+ return 1
+ fi
+
+ out=$(do_facet mgs $LCTL get_param nodemap.$csum.id)
+ ## This needs to return zero if the following statement is 1
+ [[ $(echo $out | grep -c $csum) == 0 ]] && return 1
+ done
+ return 0
+}
+
+delete_nodemaps() {
+ local i
+ local out
+
+ for ((i = 0; i < NODEMAP_COUNT; i++)); do
+ local csum=${HOSTNAME_CHECKSUM}_${i}
+
+ if ! do_facet mgs $LCTL nodemap_del $csum; then
+ error "nodemap_del $csum failed with $?"
+ return 3
fi
- $RUNAS -u $ID2 -G1,2,$ID1 ls $DIR/d4 || sec_error "setgroups(2) failed"
- if [ ! -z "$MDT" ]; then
- do_facet $SINGLEMDS rm -f $PERM_CONF
- do_facet $SINGLEMDS echo -1 > $IDENTITY_FLUSH
+
+ out=$(do_facet mgs $LCTL get_param nodemap.$csum.id)
+ [[ $(echo $out | grep -c $csum) != 0 ]] && return 1
+ done
+ return 0
+}
+
+add_range() {
+ local j
+ local cmd="$LCTL nodemap_add_range"
+ local range
+ local rc=0
+
+ for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
+ range="$SUBNET_CHECKSUM.${2}.${j}.[1-253]@tcp"
+ if ! do_facet mgs $cmd --name $1 --range $range; then
+ rc=$((rc + 1))
fi
+ done
+ return $rc
+}
+
+delete_range() {
+ local j
+ local cmd="$LCTL nodemap_del_range"
+ local range
+ local rc=0
+
+ for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
+ range="$SUBNET_CHECKSUM.${2}.${j}.[1-253]@tcp"
+ if ! do_facet mgs $cmd --name $1 --range $range; then
+ rc=$((rc + 1))
+ fi
+ done
+
+ return $rc
+}
+
+add_idmaps() {
+ local i
+ local cmd="$LCTL nodemap_add_idmap"
+ local rc=0
+
+ for ((i = 0; i < NODEMAP_COUNT; i++)); do
+ local j
+
+ for ((j = 500; j < NODEMAP_MAX_ID; j++)); do
+ local csum=${HOSTNAME_CHECKSUM}_${i}
+ local client_id=$j
+ local fs_id=$((j + 1))
+
+ if ! do_facet mgs $cmd --name $csum --idtype uid \
+ --idmap $client_id:$fs_id; then
+ rc=$((rc + 1))
+ fi
+ if ! do_facet mgs $cmd --name $csum --idtype gid \
+ --idmap $client_id:$fs_id; then
+ rc=$((rc + 1))
+ fi
+ done
+ done
+
+ return $rc
+}
+
+delete_idmaps() {
+ local i
+ local cmd="$LCTL nodemap_del_idmap"
+ local rc=0
+
+ for ((i = 0; i < NODEMAP_COUNT; i++)); do
+ local j
+
+ for ((j = 500; j < NODEMAP_MAX_ID; j++)); do
+ local csum=${HOSTNAME_CHECKSUM}_${i}
+ local client_id=$j
+ local fs_id=$((j + 1))
+
+ if ! do_facet mgs $cmd --name $csum --idtype uid \
+ --idmap $client_id:$fs_id; then
+ rc=$((rc + 1))
+ fi
+ if ! do_facet mgs $cmd --name $csum --idtype gid \
+ --idmap $client_id:$fs_id; then
+ rc=$((rc + 1))
+ fi
+ done
+ done
+
+ return $rc
+}
+
+modify_flags() {
+ local i
+ local proc
+ local option
+ local cmd="$LCTL nodemap_modify"
+ local rc=0
+
+ proc[0]="admin_nodemap"
+ proc[1]="trusted_nodemap"
+ option[0]="admin"
+ option[1]="trusted"
+
+ for ((idx = 0; idx < 2; idx++)); do
+ if ! do_facet mgs $cmd --name $1 --property ${option[$idx]} \
+ --value 1; then
+ rc=$((rc + 1))
+ fi
+
+ if ! do_facet mgs $cmd --name $1 --property ${option[$idx]} \
+ --value 0; then
+ rc=$((rc + 1))
+ fi
+ done
+
+ return $rc
+}
+
+squash_id() {
+ [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
+ skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
+ return
+ local cmd
+
+ cmd[0]="$LCTL nodemap_modify --property squash_uid"
+ cmd[1]="$LCTL nodemap_modify --property squash_gid"
+
+ if ! do_facet mgs ${cmd[$3]} --name $1 --value $2; then
+ return 1
fi
- $RUNAS -u $ID2 -G1,2 ls $DIR/d4 && sec_error "setgroups(3) failed"
- rm -rf $DIR/d4
}
-sec_run_test 4 "set supplementary group ==============="
-mds_capability_timeout() {
- [ $# -lt 1 ] && echo "Miss mds capability timeout value" && return 1
+# ensure that the squash defaults are the expected defaults
+squash_id default 99 0
+squash_id default 99 1
+
+test_nid() {
+ local cmd
+
+ cmd="$LCTL nodemap_test_nid"
+
+ nid=$(do_facet mgs $cmd $1)
- echo "Set mds capability timeout as $1 seconds"
- do_facet $SINGLEMDS echo $1 > $CAPA_TIMEOUT
- return 0
+ if [ $nid == $2 ]; then
+ return 0
+ fi
+
+ return 1
}
-mds_capability_switch() {
- [ $# -lt 1 ] && echo "Miss mds capability switch value" && return 1
+test_idmap() {
+ local i
+ local cmd="$LCTL nodemap_test_id"
+ local rc=0
+
+ ## nodemap deactivated
+ if ! do_facet mgs $LCTL nodemap_activate 0; then
+ return 1
+ fi
+ for ((id = 500; id < NODEMAP_MAX_ID; id++)); do
+ local j
+
+ for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
+ local nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
+ local fs_id=$(do_facet mgs $cmd --nid $nid \
+ --idtype uid --id $id)
+ if [ $fs_id != $id ]; then
+ echo "expected $id, got $fs_id"
+ rc=$((rc + 1))
+ fi
+ done
+ done
+
+ ## nodemap activated
+ if ! do_facet mgs $LCTL nodemap_activate 1; then
+ return 2
+ fi
+
+ for ((id = 500; id < NODEMAP_MAX_ID; id++)); do
+ for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
+ nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
+ fs_id=$(do_facet mgs $cmd --nid $nid \
+ --idtype uid --id $id)
+ expected_id=$((id + 1))
+ if [ $fs_id != $expected_id ]; then
+ echo "expected $expected_id, got $fs_id"
+ rc=$((rc + 1))
+ fi
+ done
+ done
+
+ ## trust client ids
+ for ((i = 0; i < NODEMAP_COUNT; i++)); do
+ local csum=${HOSTNAME_CHECKSUM}_${i}
+
+ if ! do_facet mgs $LCTL nodemap_modify --name $csum \
+ --property trusted --value 1; then
+ error "nodemap_modify $csum failed with $?"
+ return 3
+ fi
+ done
+
+ for ((id = 500; id < NODEMAP_MAX_ID; id++)); do
+ for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
+ nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
+ fs_id=$(do_facet mgs $cmd --nid $nid \
+ --idtype uid --id $id)
+ if [ $fs_id != $id ]; then
+ echo "expected $id, got $fs_id"
+ rc=$((rc + 1))
+ fi
+ done
+ done
+
+ ## ensure allow_root_access is enabled
+ for ((i = 0; i < NODEMAP_COUNT; i++)); do
+ local csum=${HOSTNAME_CHECKSUM}_${i}
+
+ if ! do_facet mgs $LCTL nodemap_modify --name $csum \
+ --property admin --value 1; then
+ error "nodemap_modify $csum failed with $?"
+ return 3
+ fi
+ done
+
+ ## check that root allowed
+ for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
+ nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
+ fs_id=$(do_facet mgs $cmd --nid $nid --idtype uid --id 0)
+ if [ $fs_id != 0 ]; then
+ echo "root allowed expected 0, got $fs_id"
+ rc=$((rc + 1))
+ fi
+ done
+
+ ## ensure allow_root_access is disabled
+ for ((i = 0; i < NODEMAP_COUNT; i++)); do
+ local csum=${HOSTNAME_CHECKSUM}_${i}
+
+ if ! do_facet mgs $LCTL nodemap_modify --name $csum \
+ --property admin --value 0; then
+ error "nodemap_modify ${HOSTNAME_CHECKSUM}_${i} "
+ "failed with $rc"
+ return 3
+ fi
+ done
+
+ ## check that root is mapped to 99
+ for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
+ nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
+ fs_id=$(do_facet mgs $cmd --nid $nid --idtype uid --id 0)
+ if [ $fs_id != 99 ]; then
+ error "root squash expected 99, got $fs_id"
+ rc=$((rc + 1))
+ fi
+ done
+
+ ## reset client trust to 0
+ for ((i = 0; i < NODEMAP_COUNT; i++)); do
+ if ! do_facet mgs $LCTL nodemap_modify \
+ --name ${HOSTNAME_CHECKSUM}_${i} \
+ --property trusted --value 0; then
+ error "nodemap_modify ${HOSTNAME_CHECKSUM}_${i} "
+ "failed with $rc"
+ return 3
+ fi
+ done
+
+ return $rc
+}
+
+test_7() {
+ local rc
+
+ remote_mgs_nodsh && skip "remote MGS with nodsh" && return
+ [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
+ skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
+ return
+
+ create_nodemaps
+ rc=$?
+ [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
- case $1 in
- 0) echo "Turn off mds capability";;
- 3) echo "Turn on mds capability";;
- *) echo "Invalid mds capability switch value" && return 2;;
- esac
+ delete_nodemaps
+ rc=$?
+ [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 2
- do_facet $SINGLEMDS echo $1 > $MDSCAPA
- return 0
+ return 0
}
+run_test 7 "nodemap create and delete"
-oss_capability_switch() {
- [ $# -lt 1 ] && echo "Miss oss capability switch value" && return 1
+test_8() {
+ local rc
- case $1 in
- 0) echo "Turn off oss capability";;
- 1) echo "Turn on oss capability";;
- *) echo "Invalid oss capability switch value" && return 2;;
- esac
+ remote_mgs_nodsh && skip "remote MGS with nodsh" && return
+ [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
+ skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
+ return
- i=0;
- while [ $i -lt $OST_COUNT ]; do
- j=$i;
- i=`expr $i + 1`
- OST="`do_facet ost$i ls -l $OST_LPROC/ | grep OST | awk '{print $9}' | grep $j$`"
- do_facet ost$i echo $1 > $OST_LPROC/$OST/capa
+ # Set up nodemaps
+
+ create_nodemaps
+ rc=$?
+ [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
+
+ # Try duplicates
+
+ create_nodemaps
+ rc=$?
+ [[ $rc == 0 ]] && error "duplicate nodemap_add allowed with $rc" &&
+ return 2
+
+ # Clean up
+ delete_nodemaps
+ rc=$?
+ [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 3
+
+ return 0
+}
+run_test 8 "nodemap reject duplicates"
+
+test_9() {
+ local i
+ local rc
+
+ remote_mgs_nodsh && skip "remote MGS with nodsh" && return
+ [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
+ skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
+ return
+
+ rc=0
+ create_nodemaps
+ rc=$?
+ [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
+
+ rc=0
+ for ((i = 0; i < NODEMAP_COUNT; i++)); do
+ if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
+ rc=$((rc + 1))
+ fi
done
- return 0
+ [[ $rc != 0 ]] && error "nodemap_add_range failed with $rc" && return 2
+
+ rc=0
+ for ((i = 0; i < NODEMAP_COUNT; i++)); do
+ if ! delete_range ${HOSTNAME_CHECKSUM}_${i} $i; then
+ rc=$((rc + 1))
+ fi
+ done
+ [[ $rc != 0 ]] && error "nodemap_del_range failed with $rc" && return 4
+
+ rc=0
+ delete_nodemaps
+ rc=$?
+ [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 4
+
+ return 0
}
+run_test 9 "nodemap range add"
+
+test_10() {
+ local rc
+
+ remote_mgs_nodsh && skip "remote MGS with nodsh" && return
+ [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
+ skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
+ return
+
+ rc=0
+ create_nodemaps
+ rc=$?
+ [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
+
+ rc=0
+ for ((i = 0; i < NODEMAP_COUNT; i++)); do
+ if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
+ rc=$((rc + 1))
+ fi
+ done
+ [[ $rc != 0 ]] && error "nodemap_add_range failed with $rc" && return 2
-turn_capability_on() {
- local capa_timeout=${1:-"1800"}
+ rc=0
+ for ((i = 0; i < NODEMAP_COUNT; i++)); do
+ if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
+ rc=$((rc + 1))
+ fi
+ done
+ [[ $rc == 0 ]] && error "nodemap_add_range duplicate add with $rc" &&
+ return 2
- # To turn on fid capability for the system,
- # there is a requirement that fid capability
- # is turned on on all MDS/OSS servers before
- # client mount.
- umount $MOUNT || return 1
+ rc=0
+ for ((i = 0; i < NODEMAP_COUNT; i++)); do
+ if ! delete_range ${HOSTNAME_CHECKSUM}_${i} $i; then
+ rc=$((rc + 1))
+ fi
+ done
+ [[ $rc != 0 ]] && error "nodemap_del_range failed with $rc" && return 4
- mds_capability_switch 3 || return 2
- oss_capability_switch 1 || return 3
- mds_capability_timeout $capa_timeout || return 4
+ delete_nodemaps
+ rc=$?
+ [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 5
- mount_client $MOUNT || return 5
- return 0
+ return 0
}
+run_test 10 "nodemap reject duplicate ranges"
+
+test_11() {
+ local rc
+
+ remote_mgs_nodsh && skip "remote MGS with nodsh" && return
+ [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
+ skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
+ return
-turn_capability_off() {
- # to turn off fid capability, you can just do
- # it in a live system. But, please turn off
- # capability of all OSS servers before MDS servers.
+ rc=0
+ create_nodemaps
+ rc=$?
+ [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
+
+ rc=0
+ for ((i = 0; i < NODEMAP_COUNT; i++)); do
+ if ! modify_flags ${HOSTNAME_CHECKSUM}_${i}; then
+ rc=$((rc + 1))
+ fi
+ done
+ [[ $rc != 0 ]] && error "nodemap_modify with $rc" && return 2
- oss_capability_switch 0 || return 1
- mds_capability_switch 0 || return 2
- return 0
+ rc=0
+ delete_nodemaps
+ rc=$?
+ [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 3
+
+ return 0
}
+run_test 11 "nodemap modify"
-# We demonstrate that access to the objects in the filesystem are not
-# accessible without supplying secrets from the MDS by disabling a
-# proc variable on the mds so that it does not supply secrets. We then
-# try and access objects which result in failure.
-test_5() {
- local file=$DIR/f5
+test_12() {
+ local rc
- [ -z "$MDT" ] && sec_skip "do not support do_facet operations." && return
- turn_capability_off
- rm -f $file
+ remote_mgs_nodsh && skip "remote MGS with nodsh" && return
+ [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
+ skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
+ return
- # Disable proc variable
- mds_capability_switch 0 || return 1
- oss_capability_switch 1 || return 2
+ rc=0
+ create_nodemaps
+ rc=$?
+ [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
- # proc variable disabled -- access to the objects in the filesystem
- # is not allowed
- echo "Should get Write error here : (proc variable are disabled "\
- "-- access to the objects in the filesystem is denied."
- $WTL $file 30
- if [ $? == 0 ]; then
- echo "Write worked well even though secrets not supplied."
- return 3
- fi
+ rc=0
+ for ((i = 0; i < NODEMAP_COUNT; i++)); do
+ if ! squash_id ${HOSTNAME_CHECKSUM}_${i} 88 0; then
+ rc=$((rc + 1))
+ fi
+ done
+ [[ $rc != 0 ]] && error "nodemap squash_uid with $rc" && return 2
- turn_capability_on || return 4
- sleep 5
+ rc=0
+ for ((i = 0; i < NODEMAP_COUNT; i++)); do
+ if ! squash_id ${HOSTNAME_CHECKSUM}_${i} 88 1; then
+ rc=$((rc + 1))
+ fi
+ done
+ [[ $rc != 0 ]] && error "nodemap squash_gid with $rc" && return 3
- # proc variable enabled, secrets supplied -- write should work now
- echo "Should not fail here : (proc variable enabled, secrets supplied "\
- "-- write should work now)."
- $WTL $file 30
- if [ $? != 0 ]; then
- echo "Write failed even though secrets supplied."
- return 5
- fi
+ rc=0
+ delete_nodemaps
+ rc=$?
+ [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 4
- turn_capability_off
- rm -f $file
+ return 0
}
-sec_run_test 5 "capa secrets ========================="
-
-# Expiry: A test program is performing I/O on a file. It has credential
-# with an expiry half a minute later. While the program is running the
-# credentials expire and no automatic extensions or renewals are
-# enabled. The program will demonstrate an I/O failure.
-test_6() {
- local file=$DIR/f6
+run_test 12 "nodemap set squash ids"
- [ -z "$MDT" ] && sec_skip "do not support do_facet operations." && return
- turn_capability_off
- rm -f $file
-
- turn_capability_on 30 || return 1
- # Token expiry
- $WTL $file 60 || return 2
-
- # Reset MDS capability timeout
- mds_capability_timeout 30 || exit 3
- $WTL $file 60 &
- local PID=$!
- sleep 5
+test_13() {
+ local rc
+
+ remote_mgs_nodsh && skip "remote MGS with nodsh" && return
+ [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
+ skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
+ return
+
+ rc=0
+ create_nodemaps
+ rc=$?
+ [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
+
+ rc=0
+ for ((i = 0; i < NODEMAP_COUNT; i++)); do
+ if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
+ rc=$((rc + 1))
+ fi
+ done
+ [[ $rc != 0 ]] && error "nodemap_add_range failed with $rc" && return 2
+
+ rc=0
+ for ((i = 0; i < NODEMAP_COUNT; i++)); do
+ for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
+ for k in $NODEMAP_IPADDR_LIST; do
+ if ! test_nid $SUBNET_CHECKSUM.$i.$j.$k \
+ ${HOSTNAME_CHECKSUM}_${i}; then
+ rc=$((rc + 1))
+ fi
+ done
+ done
+ done
+ [[ $rc != 0 ]] && error "nodemap_test_nid failed with $rc" && return 3
+
+ rc=0
+ delete_nodemaps
+ rc=$?
+ [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 4
+
+ return 0
+}
+run_test 13 "test nids"
+
+test_14() {
+ local rc
+
+ remote_mgs_nodsh && skip "remote MGS with nodsh" && return
+ [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
+ skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
+ return
+
+ rc=0
+ create_nodemaps
+ rc=$?
+ [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
+
+ rc=0
+ for ((i = 0; i < NODEMAP_COUNT; i++)); do
+ for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
+ for k in $NODEMAP_IPADDR_LIST; do
+ if ! test_nid $SUBNET_CHECKSUM.$i.$j.$k \
+ default; then
+ rc=$((rc + 1))
+ fi
+ done
+ done
+ done
+ [[ $rc != 0 ]] && error "nodemap_test_nid failed with $rc" && return 3
+
+ rc=0
+ delete_nodemaps
+ rc=$?
+ [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 4
+
+ return 0
+}
+run_test 14 "test default nodemap nid lookup"
+
+test_15() {
+ local rc
+
+ remote_mgs_nodsh && skip "remote MGS with nodsh" && return
+ [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
+ skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
+ return
+
+ rc=0
+ create_nodemaps
+ rc=$?
+ [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
+
+ rc=0
+ for ((i = 0; i < NODEMAP_COUNT; i++)); do
+ if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
+ rc=$((rc + 1))
+ fi
+ done
+ [[ $rc != 0 ]] && error "nodemap_add_range failed with $rc" && return 2
+
+ rc=0
+ add_idmaps
+ rc=$?
+ [[ $rc != 0 ]] && error "nodemap_add_idmap failed with $rc" && return 3
- # To disable automatic renew, only need turn capa off on MDS.
- mds_capability_switch 0 || return 4
+ rc=0
+ test_idmap
+ rc=$?
+ [[ $rc != 0 ]] && error "nodemap_test_id failed with $rc" && return 4
- echo "We expect I/O failure."
- wait $PID
- if [ $? == 0 ]; then
- echo "no I/O failure got."
- return 5
+ rc=0
+ delete_idmaps
+ rc=$?
+ [[ $rc != 0 ]] && error "nodemap_del_idmap failed with $rc" && return 5
+
+ rc=0
+ delete_nodemaps
+ rc=$?
+ [[ $rc != 0 ]] && error "nodemap_delete failed with $rc" && return 6
+
+ return 0
+}
+run_test 15 "test id mapping"
+
+wait_nm_sync() {
+ local nodemap_name=$1
+ local key=$2
+ local proc_param="${nodemap_name}.${key}"
+ [ "$nodemap_name" == "active" ] && proc_param="active"
+
+ local is_active=$(do_facet mgs $LCTL get_param -n nodemap.active)
+ (( is_active == 0 )) && [ "$proc_param" != "active" ] && return
+
+ local max_retries=20
+ local is_sync
+ local out1=$(do_facet mgs $LCTL get_param nodemap.${proc_param})
+ local out2
+ local mgs_ip=$(host_nids_address $mgs_HOST $NETTYPE | cut -d' ' -f1)
+ local i
+
+ echo "On MGS ${mgs_ip}, ${proc_param} = $out1"
+
+ # wait up to 10 seconds for other servers to sync with mgs
+ for i in $(seq 1 10); do
+ for node in $(all_server_nodes); do
+ local node_ip=$(host_nids_address $node $NETTYPE |
+ cut -d' ' -f1)
+
+ is_sync=true
+ [ $node_ip == $mgs_ip ] && continue
+
+ out2=$(do_node $node_ip $LCTL get_param \
+ nodemap.$proc_param 2>/dev/null)
+ echo "On $node ${node_ip}, ${proc_param} = $out2"
+ [ "$out1" != "$out2" ] && is_sync=false && break
+ done
+ $is_sync && break
+ sleep 1
+ done
+ if ! $is_sync; then
+ echo MGS
+ echo $out1
+ echo OTHER - IP: $node_ip
+ echo $out2
+ error "mgs and $nodemap_name ${key} mismatch, $i attempts"
fi
+ echo "waited $((i - 1)) seconds for sync"
+}
- turn_capability_off
- rm -f $file
+create_fops_nodemaps() {
+ local i=0
+ local client
+ for client in $clients; do
+ local client_ip=$(host_nids_address $client $NETTYPE)
+ local client_nid=$(h2$NETTYPE $client_ip)
+ do_facet mgs $LCTL nodemap_add c${i} || return 1
+ do_facet mgs $LCTL nodemap_add_range \
+ --name c${i} --range $client_nid || return 1
+ for map in ${FOPS_IDMAPS[i]}; do
+ do_facet mgs $LCTL nodemap_add_idmap --name c${i} \
+ --idtype uid --idmap ${map} || return 1
+ do_facet mgs $LCTL nodemap_add_idmap --name c${i} \
+ --idtype gid --idmap ${map} || return 1
+ done
+
+ wait_nm_sync c$i idmap
+
+ i=$((i + 1))
+ done
+ return 0
}
-sec_run_test 6 "capa expiry ========================="
-log "cleanup: ======================================================"
+delete_fops_nodemaps() {
+ local i=0
+ local client
+ for client in $clients; do
+ do_facet mgs $LCTL nodemap_del c${i} || return 1
+ i=$((i + 1))
+ done
+ return 0
+}
-unsetup() {
- if [ ! -z "$MDT" ]; then
- do_facet $SINGLEMDS echo $IDENTITY_UPCALL_BAK > $IDENTITY_UPCALL
- do_facet $SINGLEMDS echo -1 > $IDENTITY_FLUSH
+fops_mds_index=0
+nm_test_mkdir() {
+ if [ $MDSCOUNT -le 1 ]; then
+ do_node ${clients_arr[0]} mkdir -p $DIR/$tdir
+ else
+ # round-robin MDTs to test DNE nodemap support
+ [ ! -d $DIR ] && do_node ${clients_arr[0]} mkdir -p $DIR
+ do_node ${clients_arr[0]} $LFS setdirstripe -c 1 -i \
+ $((fops_mds_index % MDSCOUNT)) $DIR/$tdir
+ ((fops_mds_index++))
fi
+}
- $RUNAS -u $ID1 ls $DIR
- $RUNAS -u $ID2 ls $DIR
+# acl test directory needs to be initialized on a privileged client
+fops_test_setup() {
+ local admin=$(do_facet mgs $LCTL get_param -n nodemap.c0.admin_nodemap)
+ local trust=$(do_facet mgs $LCTL get_param -n \
+ nodemap.c0.trusted_nodemap)
+
+ do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
+ do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1
+
+ wait_nm_sync c0 admin_nodemap
+ wait_nm_sync c0 trusted_nodemap
+
+ do_node ${clients_arr[0]} rm -rf $DIR/$tdir
+ nm_test_mkdir
+ do_node ${clients_arr[0]} chown $user $DIR/$tdir
+
+ do_facet mgs $LCTL nodemap_modify --name c0 \
+ --property admin --value $admin
+ do_facet mgs $LCTL nodemap_modify --name c0 \
+ --property trusted --value $trust
+
+ # flush MDT locks to make sure they are reacquired before test
+ do_node ${clients_arr[0]} $LCTL set_param \
+ ldlm.namespaces.$FSNAME-MDT*.lru_size=clear
+
+ wait_nm_sync c0 admin_nodemap
+ wait_nm_sync c0 trusted_nodemap
}
-unsetup
-sec_cleanup() {
- if [ "$S_MOUNTED" = "yes" ]; then
- cleanupall -f || sec_error "cleanup failed"
+# fileset test directory needs to be initialized on a privileged client
+fileset_test_setup() {
+ local admin=$(do_facet mgs $LCTL get_param -n nodemap.c0.admin_nodemap)
+ local trust=$(do_facet mgs $LCTL get_param -n \
+ nodemap.c0.trusted_nodemap)
+
+ do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
+ do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1
+
+ wait_nm_sync c0 admin_nodemap
+ wait_nm_sync c0 trusted_nodemap
+
+ # create directory and populate it for subdir mount
+ do_node ${clients_arr[0]} mkdir $MOUNT/$subdir ||
+ error "unable to create dir $MOUNT/$subdir"
+ do_node ${clients_arr[0]} touch $MOUNT/$subdir/this_is_$subdir ||
+ error "unable to create file $MOUNT/$subdir/this_is_$subdir"
+ do_node ${clients_arr[0]} mkdir $MOUNT/$subdir/$subsubdir ||
+ error "unable to create dir $MOUNT/$subdir/$subsubdir"
+ do_node ${clients_arr[0]} touch \
+ $MOUNT/$subdir/$subsubdir/this_is_$subsubdir ||
+ error "unable to create file \
+ $MOUNT/$subdir/$subsubdir/this_is_$subsubdir"
+
+ do_facet mgs $LCTL nodemap_modify --name c0 \
+ --property admin --value $admin
+ do_facet mgs $LCTL nodemap_modify --name c0 \
+ --property trusted --value $trust
+
+ # flush MDT locks to make sure they are reacquired before test
+ do_node ${clients_arr[0]} $LCTL set_param \
+ ldlm.namespaces.$FSNAME-MDT*.lru_size=clear
+
+ wait_nm_sync c0 admin_nodemap
+ wait_nm_sync c0 trusted_nodemap
+}
+
+# fileset test directory needs to be initialized on a privileged client
+fileset_test_cleanup() {
+ local admin=$(do_facet mgs $LCTL get_param -n nodemap.c0.admin_nodemap)
+ local trust=$(do_facet mgs $LCTL get_param -n \
+ nodemap.c0.trusted_nodemap)
+
+ do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
+ do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1
+
+ wait_nm_sync c0 admin_nodemap
+ wait_nm_sync c0 trusted_nodemap
+
+ # cleanup directory created for subdir mount
+ do_node ${clients_arr[0]} rm -rf $MOUNT/$subdir ||
+ error "unable to remove dir $MOUNT/$subdir"
+
+ do_facet mgs $LCTL nodemap_modify --name c0 \
+ --property admin --value $admin
+ do_facet mgs $LCTL nodemap_modify --name c0 \
+ --property trusted --value $trust
+
+ # flush MDT locks to make sure they are reacquired before test
+ do_node ${clients_arr[0]} $LCTL set_param \
+ ldlm.namespaces.$FSNAME-MDT*.lru_size=clear
+
+ wait_nm_sync c0 admin_nodemap
+ wait_nm_sync c0 trusted_nodemap
+}
+
+do_create_delete() {
+ local run_u=$1
+ local key=$2
+ local testfile=$DIR/$tdir/$tfile
+ local rc=0
+ local c=0 d=0
+ local qused_new
+ if $run_u touch $testfile >& /dev/null; then
+ c=1
+ $run_u rm $testfile && d=1
+ fi >& /dev/null
+
+ local res="$c $d"
+ local expected=$(get_cr_del_expected $key)
+ [ "$res" != "$expected" ] &&
+ error "test $key, wanted $expected, got $res" && rc=$((rc + 1))
+ return $rc
+}
+
+nodemap_check_quota() {
+ local run_u="$1"
+ $run_u lfs quota -q $DIR | awk '{ print $2; exit; }'
+}
+
+do_fops_quota_test() {
+ local run_u=$1
+ # fuzz quota used to account for possible indirect blocks, etc
+ local quota_fuzz=$(fs_log_size)
+ local qused_orig=$(nodemap_check_quota "$run_u")
+ local qused_high=$((qused_orig + quota_fuzz))
+ local qused_low=$((qused_orig - quota_fuzz))
+ local testfile=$DIR/$tdir/$tfile
+ $run_u dd if=/dev/zero of=$testfile bs=1M count=1 >& /dev/null ||
+ error "unable to write quota test file"
+ sync; sync_all_data || true
+
+ local qused_new=$(nodemap_check_quota "$run_u")
+ [ $((qused_new)) -lt $((qused_low + 1024)) -o \
+ $((qused_new)) -gt $((qused_high + 1024)) ] &&
+ error "$qused_new != $qused_orig + 1M after write, " \
+ "fuzz is $quota_fuzz"
+ $run_u rm $testfile || error "unable to remove quota test file"
+ wait_delete_completed_mds
+
+ qused_new=$(nodemap_check_quota "$run_u")
+ [ $((qused_new)) -lt $((qused_low)) \
+ -o $((qused_new)) -gt $((qused_high)) ] &&
+ error "quota not reclaimed, expect $qused_orig, " \
+ "got $qused_new, fuzz $quota_fuzz"
+}
+
+get_fops_mapped_user() {
+ local cli_user=$1
+
+ for ((i=0; i < ${#FOPS_IDMAPS[@]}; i++)); do
+ for map in ${FOPS_IDMAPS[i]}; do
+ if [ $(cut -d: -f1 <<< "$map") == $cli_user ]; then
+ cut -d: -f2 <<< "$map"
+ return
+ fi
+ done
+ done
+ echo -1
+}
+
+get_cr_del_expected() {
+ local -a key
+ IFS=":" read -a key <<< "$1"
+ local mapmode="${key[0]}"
+ local mds_user="${key[1]}"
+ local cluster="${key[2]}"
+ local cli_user="${key[3]}"
+ local mode="0${key[4]}"
+ local SUCCESS="1 1"
+ local FAILURE="0 0"
+ local noadmin=0
+ local mapped=0
+ local other=0
+
+ [[ $mapmode == *mapped* ]] && mapped=1
+ # only c1 is mapped in these test cases
+ [[ $mapmode == mapped_trusted* ]] && [ "$cluster" == "c0" ] && mapped=0
+ [[ $mapmode == *noadmin* ]] && noadmin=1
+
+ # o+wx works as long as the user isn't mapped
+ if [ $((mode & 3)) -eq 3 ]; then
+ other=1
+ fi
+
+ # if client user is root, check if root is squashed
+ if [ "$cli_user" == "0" ]; then
+ # squash root succeed, if other bit is on
+ case $noadmin in
+ 0) echo $SUCCESS;;
+ 1) [ "$other" == "1" ] && echo $SUCCESS
+ [ "$other" == "0" ] && echo $FAILURE;;
+ esac
+ return
+ fi
+ if [ "$mapped" == "0" ]; then
+ [ "$other" == "1" ] && echo $SUCCESS
+ [ "$other" == "0" ] && echo $FAILURE
+ return
+ fi
+
+ # if mapped user is mds user, check for u+wx
+ mapped_user=$(get_fops_mapped_user $cli_user)
+ [ "$mapped_user" == "-1" ] &&
+ error "unable to find mapping for client user $cli_user"
+
+ if [ "$mapped_user" == "$mds_user" -a \
+ $(((mode & 0300) == 0300)) -eq 1 ]; then
+ echo $SUCCESS
+ return
+ fi
+ if [ "$mapped_user" != "$mds_user" -a "$other" == "1" ]; then
+ echo $SUCCESS
+ return
fi
+ echo $FAILURE
}
+
+test_fops_admin_cli_i=""
+test_fops_chmod_dir() {
+ local current_cli_i=$1
+ local perm_bits=$2
+ local dir_to_chmod=$3
+ local new_admin_cli_i=""
+
+ # do we need to set up a new admin client?
+ [ "$current_cli_i" == "0" ] && [ "$test_fops_admin_cli_i" != "1" ] &&
+ new_admin_cli_i=1
+ [ "$current_cli_i" != "0" ] && [ "$test_fops_admin_cli_i" != "0" ] &&
+ new_admin_cli_i=0
+
+ # if only one client, and non-admin, need to flip admin everytime
+ if [ "$num_clients" == "1" ]; then
+ test_fops_admin_client=$clients
+ test_fops_admin_val=$(do_facet mgs $LCTL get_param -n \
+ nodemap.c0.admin_nodemap)
+ if [ "$test_fops_admin_val" != "1" ]; then
+ do_facet mgs $LCTL nodemap_modify \
+ --name c0 \
+ --property admin \
+ --value 1
+ wait_nm_sync c0 admin_nodemap
+ fi
+ elif [ "$new_admin_cli_i" != "" ]; then
+ # restore admin val to old admin client
+ if [ "$test_fops_admin_cli_i" != "" ] &&
+ [ "$test_fops_admin_val" != "1" ]; then
+ do_facet mgs $LCTL nodemap_modify \
+ --name c${test_fops_admin_cli_i} \
+ --property admin \
+ --value $test_fops_admin_val
+ wait_nm_sync c${test_fops_admin_cli_i} admin_nodemap
+ fi
+
+ test_fops_admin_cli_i=$new_admin_cli_i
+ test_fops_admin_client=${clients_arr[$new_admin_cli_i]}
+ test_fops_admin_val=$(do_facet mgs $LCTL get_param -n \
+ nodemap.c${new_admin_cli_i}.admin_nodemap)
+
+ if [ "$test_fops_admin_val" != "1" ]; then
+ do_facet mgs $LCTL nodemap_modify \
+ --name c${new_admin_cli_i} \
+ --property admin \
+ --value 1
+ wait_nm_sync c${new_admin_cli_i} admin_nodemap
+ fi
+ fi
+
+ do_node $test_fops_admin_client chmod $perm_bits $DIR/$tdir || return 1
+
+ # remove admin for single client if originally non-admin
+ if [ "$num_clients" == "1" ] && [ "$test_fops_admin_val" != "1" ]; then
+ do_facet mgs $LCTL nodemap_modify --name c0 --property admin \
+ --value 0
+ wait_nm_sync c0 admin_nodemap
+ fi
+
+ return 0
+}
+
+test_fops() {
+ local mapmode="$1"
+ local single_client="$2"
+ local client_user_list=([0]="0 $((IDBASE+3)) $((IDBASE+4))"
+ [1]="0 $((IDBASE+5)) $((IDBASE+6))")
+ local mds_i
+ local rc=0
+ local perm_bit_list="0 3 $((0300)) $((0303))"
+ # SLOW tests 000-007, 010-070, 100-700 (octal modes)
+ [ "$SLOW" == "yes" ] &&
+ perm_bit_list="0 $(seq 1 7) $(seq 8 8 63) $(seq 64 64 511) \
+ $((0303))"
+
+ # step through mds users. -1 means root
+ for mds_i in -1 0 1 2; do
+ local user=$((mds_i + IDBASE))
+ local client
+ local x
+
+ [ "$mds_i" == "-1" ] && user=0
+
+ echo mkdir -p $DIR/$tdir
+ fops_test_setup
+ local cli_i=0
+ for client in $clients; do
+ local u
+ for u in ${client_user_list[$cli_i]}; do
+ local run_u="do_node $client \
+ $RUNAS_CMD -u$u -g$u -G$u"
+ for perm_bits in $perm_bit_list; do
+ local mode=$(printf %03o $perm_bits)
+ local key
+ key="$mapmode:$user:c$cli_i:$u:$mode"
+ test_fops_chmod_dir $cli_i $mode \
+ $DIR/$tdir ||
+ error cannot chmod $key
+ do_create_delete "$run_u" "$key"
+ done
+
+ # check quota
+ test_fops_chmod_dir $cli_i 777 $DIR/$tdir ||
+ error cannot chmod $key
+ do_fops_quota_test "$run_u"
+ done
+
+ cli_i=$((cli_i + 1))
+ [ "$single_client" == "1" ] && break
+ done
+ rm -rf $DIR/$tdir
+ done
+ return $rc
+}
+
+nodemap_version_check () {
+ remote_mgs_nodsh && skip "remote MGS with nodsh" && return 1
+ [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
+ skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
+ return 1
+ return 0
+}
+
+nodemap_test_setup() {
+ local rc
+ local active_nodemap=1
+
+ [ "$1" == "0" ] && active_nodemap=0
+
+ do_nodes $(comma_list $(all_mdts_nodes)) \
+ $LCTL set_param mdt.*.identity_upcall=NONE
+
+ rc=0
+ create_fops_nodemaps
+ rc=$?
+ [[ $rc != 0 ]] && error "adding fops nodemaps failed $rc"
+
+ do_facet mgs $LCTL nodemap_activate $active_nodemap
+ wait_nm_sync active
+
+ do_facet mgs $LCTL nodemap_modify --name default \
+ --property admin --value 1
+ do_facet mgs $LCTL nodemap_modify --name default \
+ --property trusted --value 1
+ wait_nm_sync default trusted_nodemap
+}
+
+nodemap_test_cleanup() {
+ trap 0
+ delete_fops_nodemaps
+ rc=$?
+ [[ $rc != 0 ]] && error "removing fops nodemaps failed $rc"
+
+ do_facet mgs $LCTL nodemap_modify --name default \
+ --property admin --value 0
+ do_facet mgs $LCTL nodemap_modify --name default \
+ --property trusted --value 0
+ wait_nm_sync default trusted_nodemap
+
+ do_facet mgs $LCTL nodemap_activate 0
+ wait_nm_sync active 0
+
+ return 0
+}
+
+nodemap_clients_admin_trusted() {
+ local admin=$1
+ local tr=$2
+ local i=0
+ for client in $clients; do
+ do_facet mgs $LCTL nodemap_modify --name c0 \
+ --property admin --value $admin
+ do_facet mgs $LCTL nodemap_modify --name c0 \
+ --property trusted --value $tr
+ i=$((i + 1))
+ done
+ wait_nm_sync c$((i - 1)) admin_nodemap
+ wait_nm_sync c$((i - 1)) trusted_nodemap
+}
+
+test_16() {
+ nodemap_version_check || return 0
+ nodemap_test_setup 0
+
+ trap nodemap_test_cleanup EXIT
+ test_fops all_off
+ nodemap_test_cleanup
+}
+run_test 16 "test nodemap all_off fileops"
+
+test_17() {
+ nodemap_version_check || return 0
+ nodemap_test_setup
+
+ trap nodemap_test_cleanup EXIT
+ nodemap_clients_admin_trusted 0 1
+ test_fops trusted_noadmin 1
+ nodemap_test_cleanup
+}
+run_test 17 "test nodemap trusted_noadmin fileops"
+
+test_18() {
+ nodemap_version_check || return 0
+ nodemap_test_setup
+
+ trap nodemap_test_cleanup EXIT
+ nodemap_clients_admin_trusted 0 0
+ test_fops mapped_noadmin 1
+ nodemap_test_cleanup
+}
+run_test 18 "test nodemap mapped_noadmin fileops"
+
+test_19() {
+ nodemap_version_check || return 0
+ nodemap_test_setup
+
+ trap nodemap_test_cleanup EXIT
+ nodemap_clients_admin_trusted 1 1
+ test_fops trusted_admin 1
+ nodemap_test_cleanup
+}
+run_test 19 "test nodemap trusted_admin fileops"
+
+test_20() {
+ nodemap_version_check || return 0
+ nodemap_test_setup
+
+ trap nodemap_test_cleanup EXIT
+ nodemap_clients_admin_trusted 1 0
+ test_fops mapped_admin 1
+ nodemap_test_cleanup
+}
+run_test 20 "test nodemap mapped_admin fileops"
+
+test_21() {
+ nodemap_version_check || return 0
+ nodemap_test_setup
+
+ trap nodemap_test_cleanup EXIT
+ local x=1
+ local i=0
+ for client in $clients; do
+ do_facet mgs $LCTL nodemap_modify --name c${i} \
+ --property admin --value 0
+ do_facet mgs $LCTL nodemap_modify --name c${i} \
+ --property trusted --value $x
+ x=0
+ i=$((i + 1))
+ done
+ wait_nm_sync c$((i - 1)) trusted_nodemap
+
+ test_fops mapped_trusted_noadmin
+ nodemap_test_cleanup
+}
+run_test 21 "test nodemap mapped_trusted_noadmin fileops"
+
+test_22() {
+ nodemap_version_check || return 0
+ nodemap_test_setup
+
+ trap nodemap_test_cleanup EXIT
+ local x=1
+ local i=0
+ for client in $clients; do
+ do_facet mgs $LCTL nodemap_modify --name c${i} \
+ --property admin --value 1
+ do_facet mgs $LCTL nodemap_modify --name c${i} \
+ --property trusted --value $x
+ x=0
+ i=$((i + 1))
+ done
+ wait_nm_sync c$((i - 1)) trusted_nodemap
+
+ test_fops mapped_trusted_admin
+ nodemap_test_cleanup
+}
+run_test 22 "test nodemap mapped_trusted_admin fileops"
+
+# acl test directory needs to be initialized on a privileged client
+nodemap_acl_test_setup() {
+ local admin=$(do_facet mgs $LCTL get_param -n nodemap.c0.admin_nodemap)
+ local trust=$(do_facet mgs $LCTL get_param -n \
+ nodemap.c0.trusted_nodemap)
+
+ do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
+ do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1
+
+ wait_nm_sync c0 admin_nodemap
+ wait_nm_sync c0 trusted_nodemap
+
+ do_node ${clients_arr[0]} rm -rf $DIR/$tdir
+ nm_test_mkdir
+ do_node ${clients_arr[0]} chmod a+rwx $DIR/$tdir ||
+ error unable to chmod a+rwx test dir $DIR/$tdir
+
+ do_facet mgs $LCTL nodemap_modify --name c0 \
+ --property admin --value $admin
+ do_facet mgs $LCTL nodemap_modify --name c0 \
+ --property trusted --value $trust
+
+ wait_nm_sync c0 trusted_nodemap
+}
+
+# returns 0 if the number of ACLs does not change on the second (mapped) client
+# after being set on the first client
+nodemap_acl_test() {
+ local user="$1"
+ local set_client="$2"
+ local get_client="$3"
+ local check_setfacl="$4"
+ local setfacl_error=0
+ local testfile=$DIR/$tdir/$tfile
+ local RUNAS_USER="$RUNAS_CMD -u $user"
+ local acl_count=0
+ local acl_count_post=0
+
+ nodemap_acl_test_setup
+ sleep 5
+
+ do_node $set_client $RUNAS_USER touch $testfile
+
+ # ACL masks aren't filtered by nodemap code, so we ignore them
+ acl_count=$(do_node $get_client getfacl $testfile | grep -v mask |
+ wc -l)
+ do_node $set_client $RUNAS_USER setfacl -m $user:rwx $testfile ||
+ setfacl_error=1
+
+ # if check setfacl is set to 1, then it's supposed to error
+ if [ "$check_setfacl" == "1" ]; then
+ [ "$setfacl_error" != "1" ] && return 1
+ return 0
+ fi
+ [ "$setfacl_error" == "1" ] && echo "WARNING: unable to setfacl"
+
+ acl_count_post=$(do_node $get_client getfacl $testfile | grep -v mask |
+ wc -l)
+ [ $acl_count -eq $acl_count_post ] && return 0
+ return 1
+}
+
+test_23() {
+ nodemap_version_check || return 0
+ nodemap_test_setup
+
+ trap nodemap_test_cleanup EXIT
+ # 1 trusted cluster, 1 mapped cluster
+ local unmapped_fs=$((IDBASE+0))
+ local unmapped_c1=$((IDBASE+5))
+ local mapped_fs=$((IDBASE+2))
+ local mapped_c0=$((IDBASE+4))
+ local mapped_c1=$((IDBASE+6))
+
+ do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
+ do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1
+
+ do_facet mgs $LCTL nodemap_modify --name c1 --property admin --value 0
+ do_facet mgs $LCTL nodemap_modify --name c1 --property trusted --value 0
+
+ wait_nm_sync c1 trusted_nodemap
+
+ # setfacl on trusted cluster to unmapped user, verify it's not seen
+ nodemap_acl_test $unmapped_fs ${clients_arr[0]} ${clients_arr[1]} ||
+ error "acl count (1)"
+
+ # setfacl on trusted cluster to mapped user, verify it's seen
+ nodemap_acl_test $mapped_fs ${clients_arr[0]} ${clients_arr[1]} &&
+ error "acl count (2)"
+
+ # setfacl on mapped cluster to mapped user, verify it's seen
+ nodemap_acl_test $mapped_c1 ${clients_arr[1]} ${clients_arr[0]} &&
+ error "acl count (3)"
+
+ # setfacl on mapped cluster to unmapped user, verify error
+ nodemap_acl_test $unmapped_fs ${clients_arr[1]} ${clients_arr[0]} 1 ||
+ error "acl count (4)"
+
+ # 2 mapped clusters
+ do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 0
+ do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 0
+
+ wait_nm_sync c0 trusted_nodemap
+
+ # setfacl to mapped user on c1, also mapped to c0, verify it's seen
+ nodemap_acl_test $mapped_c1 ${clients_arr[1]} ${clients_arr[0]} &&
+ error "acl count (5)"
+
+ # setfacl to mapped user on c1, not mapped to c0, verify not seen
+ nodemap_acl_test $unmapped_c1 ${clients_arr[1]} ${clients_arr[0]} ||
+ error "acl count (6)"
+
+ nodemap_test_cleanup
+}
+run_test 23 "test mapped ACLs"
+
+test_24() {
+ nodemap_test_setup
+
+ trap nodemap_test_cleanup EXIT
+ do_nodes $(comma_list $(all_server_nodes)) $LCTL get_param -R nodemap ||
+ error "proc readable file read failed"
+
+ nodemap_test_cleanup
+}
+run_test 24 "check nodemap proc files for LBUGs and Oopses"
+
+test_25() {
+ local tmpfile=$(mktemp)
+ local tmpfile2=$(mktemp)
+ local subdir=c0dir
+
+ nodemap_version_check || return 0
+
+ # stop clients for this test
+ zconf_umount_clients $CLIENTS $MOUNT ||
+ error "unable to umount clients $CLIENTS"
+
+ nodemap_test_setup
+
+ trap nodemap_test_cleanup EXIT
+
+ # create a new, empty nodemap, and add fileset info to it
+ do_facet mgs $LCTL nodemap_add test26 ||
+ error "unable to create nodemap test26"
+ do_facet mgs $LCTL set_param -P nodemap.test26.fileset=/$subdir ||
+ error "unable to add fileset info to nodemap test26"
+
+ wait_nm_sync test26 id
+
+ do_facet mgs $LCTL nodemap_info > $tmpfile
+ do_facet mds $LCTL nodemap_info > $tmpfile2
+
+ cleanup_and_setup_lustre
+ # stop clients for this test
+ zconf_umount_clients $CLIENTS $MOUNT ||
+ error "unable to umount clients $CLIENTS"
+
+ diff -q <(do_facet mgs $LCTL nodemap_info) $tmpfile >& /dev/null ||
+ error "nodemap_info diff on MGS after remount"
+
+ diff -q <(do_facet mds $LCTL nodemap_info) $tmpfile2 >& /dev/null ||
+ error "nodemap_info diff on MDS after remount"
+
+ # cleanup nodemap
+ do_facet mgs $LCTL nodemap_del test26 ||
+ error "cannot delete nodemap test26 from config"
+ nodemap_test_cleanup
+ # restart clients previously stopped
+ zconf_mount_clients $CLIENTS $MOUNT ||
+ error "unable to mount clients $CLIENTS"
+
+ rm -f $tmpfile $tmpfile2
+}
+run_test 25 "test save and reload nodemap config"
+
+test_26() {
+ nodemap_version_check || return 0
+
+ local large_i=32000
+
+ do_facet mgs "seq -f 'c%g' $large_i | xargs -n1 $LCTL nodemap_add"
+ wait_nm_sync c$large_i admin_nodemap
+
+ do_facet mgs "seq -f 'c%g' $large_i | xargs -n1 $LCTL nodemap_del"
+ wait_nm_sync c$large_i admin_nodemap
+}
+run_test 26 "test transferring very large nodemap"
+
+test_27() {
+ local subdir=c0dir
+ local subsubdir=c0subdir
+
+ nodemap_test_setup
+ trap nodemap_test_cleanup EXIT
+
+ fileset_test_setup
+
+ # add fileset info to nodemap
+ do_facet mgs $LCTL set_param nodemap.c0.fileset=/$subdir ||
+ error "unable to set fileset info on nodemap c0"
+ do_facet mgs $LCTL set_param -P nodemap.c0.fileset=/$subdir ||
+ error "unable to add fileset info to nodemap c0"
+ wait_nm_sync c0 fileset
+
+ # re-mount client
+ zconf_umount_clients ${clients_arr[0]} $MOUNT ||
+ error "unable to umount client ${clients_arr[0]}"
+ zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
+ error "unable to remount client ${clients_arr[0]}"
+
+ # test mount point content
+ do_node ${clients_arr[0]} test -f $MOUNT/this_is_$subdir ||
+ error "fileset not taken into account"
+
+ # re-mount client with sub-subdir
+ zconf_umount_clients ${clients_arr[0]} $MOUNT ||
+ error "unable to umount client ${clients_arr[0]}"
+ export FILESET=/$subsubdir
+ zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
+ error "unable to remount client ${clients_arr[0]}"
+ unset FILESET
+
+ # test mount point content
+ do_node ${clients_arr[0]} test -f $MOUNT/this_is_$subsubdir ||
+ error "subdir of fileset not taken into account"
+
+ # remove fileset info from nodemap
+ do_facet mgs $LCTL nodemap_set_fileset --name c0 --fileset \'\' ||
+ error "unable to delete fileset info on nodemap c0"
+ do_facet mgs $LCTL set_param -P nodemap.c0.fileset=\'\' ||
+ error "unable to reset fileset info on nodemap c0"
+ wait_nm_sync c0 fileset
+
+ # re-mount client
+ zconf_umount_clients ${clients_arr[0]} $MOUNT ||
+ error "unable to umount client ${clients_arr[0]}"
+ zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
+ error "unable to remount client ${clients_arr[0]}"
+
+ # test mount point content
+ do_node ${clients_arr[0]} test -d $MOUNT/$subdir ||
+ (ls $MOUNT ; error "fileset not cleared on nodemap c0")
+
+ fileset_test_cleanup
+ nodemap_test_cleanup
+}
+run_test 27 "test fileset in nodemap"
+
+log "cleanup: ======================================================"
+
+sec_unsetup() {
+ ## nodemap deactivated
+ do_facet mgs $LCTL nodemap_activate 0
+
+ for num in $(seq $MDSCOUNT); do
+ if [ "${identity_old[$num]}" = 1 ]; then
+ switch_identity $num false || identity_old[$num]=$?
+ fi
+ done
+
+ $RUNAS_CMD -u $ID0 ls $DIR
+ $RUNAS_CMD -u $ID1 ls $DIR
+}
+sec_unsetup
+
sec_cleanup
-echo '=========================== finished ==============================='
-[ -f "$SANITYSECLOG" ] && \
- cat $SANITYSECLOG && grep -q FAIL $SANITYSECLOG && exit 1 || true
-echo "$0 completed"
+complete $SECONDS
+exit_status