3 # Run select tests by setting ONLY, or as arguments to the script.
4 # Skip specific tests by setting EXCEPT.
10 # bug number for skipped test: 19430 19967 19967
11 ALWAYS_EXCEPT=" 2 5 6 $SANITY_SEC_EXCEPT"
12 # UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
14 [ "$ALWAYS_EXCEPT$EXCEPT" ] && \
15 echo "Skipping tests: $ALWAYS_EXCEPT $EXCEPT"
18 export PATH=$PWD/$SRCDIR:$SRCDIR:$PWD/$SRCDIR/../utils:$PATH:/sbin
19 export NAME=${NAME:-local}
21 LUSTRE=${LUSTRE:-$(dirname $0)/..}
22 . $LUSTRE/tests/test-framework.sh
24 . ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
27 RUNAS_CMD=${RUNAS_CMD:-runas}
29 WTL=${WTL:-"$LUSTRE/tests/write_time_limit"}
32 PERM_CONF=$CONFDIR/perm.conf
35 HOSTNAME_CHECKSUM=$(hostname | sum | awk '{ print $1 }')
36 SUBNET_CHECKSUM=$(expr $HOSTNAME_CHECKSUM % 250 + 1)
39 NODEMAP_IPADDR_LIST="1 10 64 128 200 250"
42 require_dsh_mds || exit 0
43 require_dsh_ost || exit 0
45 clients=${CLIENTS//,/ }
46 num_clients=$(get_node_count ${clients})
47 clients_arr=($clients)
51 USER0=$(grep :$ID0:$ID0: /etc/passwd | cut -d: -f1)
52 USER1=$(grep :$ID1:$ID1: /etc/passwd | cut -d: -f1)
55 skip "need to add user0 ($ID0:$ID0) to /etc/passwd" && exit 0
58 skip "need to add user1 ($ID1:$ID1) to /etc/passwd" && exit 0
60 IDBASE=${IDBASE:-60000}
62 # changes to mappings must be reflected in test 23
64 [0]="$((IDBASE+3)):$((IDBASE+0)) $((IDBASE+4)):$((IDBASE+2))"
65 [1]="$((IDBASE+5)):$((IDBASE+1)) $((IDBASE+6)):$((IDBASE+2))"
68 check_and_setup_lustre
71 if [ "$I_MOUNTED" = "yes" ]; then
72 cleanupall -f || error "sec_cleanup"
77 [ -z "`echo $DIR | grep $MOUNT`" ] && \
78 error "$DIR not in $MOUNT" && sec_cleanup && exit 1
80 [ `echo $MOUNT | wc -w` -gt 1 ] && \
81 echo "NAME=$MOUNT mounted more than once" && sec_cleanup && exit 0
83 [ $MDSCOUNT -gt 1 ] && \
84 echo "skip multi-MDS test" && sec_cleanup && exit 0
87 GSS_REF=$(lsmod | grep ^ptlrpc_gss | awk '{print $3}')
88 if [ ! -z "$GSS_REF" -a "$GSS_REF" != "0" ]; then
90 echo "with GSS support"
93 echo "without GSS support"
96 MDT=$(do_facet $SINGLEMDS lctl get_param -N "mdt.\*MDT0000" |
98 [ -z "$MDT" ] && error "fail to get MDT device" && exit 1
99 do_facet $SINGLEMDS "mkdir -p $CONFDIR"
100 IDENTITY_FLUSH=mdt.$MDT.identity_flush
101 IDENTITY_UPCALL=mdt.$MDT.identity_upcall
102 MDSCAPA=mdt.$MDT.capa
103 CAPA_TIMEOUT=mdt.$MDT.capa_timeout
104 MDSSECLEVEL=mdt.$MDT.sec_level
107 if [ -z "$(lctl get_param -n llite.*.client_type | grep remote 2>/dev/null)" ]
124 if ! $RUNAS_CMD -u $user krb5_login.sh; then
125 error "$user login kerberos failed."
129 if ! $RUNAS_CMD -u $user -g $group ls $DIR > /dev/null 2>&1; then
130 $RUNAS_CMD -u $user lfs flushctx -k
131 $RUNAS_CMD -u $user krb5_login.sh
132 if ! $RUNAS_CMD -u$user -g$group ls $DIR > /dev/null 2>&1; then
133 error "init $user $group failed."
139 declare -a identity_old
142 for num in `seq $MDSCOUNT`; do
143 switch_identity $num true || identity_old[$num]=$?
146 if ! $RUNAS_CMD -u $ID0 ls $DIR > /dev/null 2>&1; then
147 sec_login $USER0 $USER0
150 if ! $RUNAS_CMD -u $ID1 ls $DIR > /dev/null 2>&1; then
151 sec_login $USER1 $USER1
156 # run as different user
160 chmod 0755 $DIR || error "chmod (1)"
161 rm -rf $DIR/$tdir || error "rm (1)"
162 mkdir -p $DIR/$tdir || error "mkdir (1)"
164 if [ "$CLIENT_TYPE" = "remote" ]; then
165 do_facet $SINGLEMDS "echo '* 0 normtown' > $PERM_CONF"
166 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
167 chown $USER0 $DIR/$tdir && error "chown (1)"
168 do_facet $SINGLEMDS "echo '* 0 rmtown' > $PERM_CONF"
169 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
171 chown $USER0 $DIR/$tdir || error "chown (2)"
174 $RUNAS_CMD -u $ID0 ls $DIR || error "ls (1)"
175 rm -f $DIR/f0 || error "rm (2)"
176 $RUNAS_CMD -u $ID0 touch $DIR/f0 && error "touch (1)"
177 $RUNAS_CMD -u $ID0 touch $DIR/$tdir/f1 || error "touch (2)"
178 $RUNAS_CMD -u $ID1 touch $DIR/$tdir/f2 && error "touch (3)"
179 touch $DIR/$tdir/f3 || error "touch (4)"
180 chown root $DIR/$tdir || error "chown (3)"
181 chgrp $USER0 $DIR/$tdir || error "chgrp (1)"
182 chmod 0775 $DIR/$tdir || error "chmod (2)"
183 $RUNAS_CMD -u $ID0 touch $DIR/$tdir/f4 || error "touch (5)"
184 $RUNAS_CMD -u $ID1 touch $DIR/$tdir/f5 && error "touch (6)"
185 touch $DIR/$tdir/f6 || error "touch (7)"
186 rm -rf $DIR/$tdir || error "rm (3)"
188 if [ "$CLIENT_TYPE" = "remote" ]; then
189 do_facet $SINGLEMDS "rm -f $PERM_CONF"
190 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
193 run_test 0 "uid permission ============================="
197 [ $GSS_SUP = 0 ] && skip "without GSS support." && return
199 if [ "$CLIENT_TYPE" = "remote" ]; then
200 do_facet $SINGLEMDS "echo '* 0 rmtown' > $PERM_CONF"
201 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
207 chown $USER0 $DIR/$tdir || error "chown (1)"
208 $RUNAS_CMD -u $ID1 -v $ID0 touch $DIR/$tdir/f0 && error "touch (2)"
209 echo "enable uid $ID1 setuid"
210 do_facet $SINGLEMDS "echo '* $ID1 setuid' >> $PERM_CONF"
211 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
212 $RUNAS_CMD -u $ID1 -v $ID0 touch $DIR/$tdir/f1 || error "touch (3)"
214 chown root $DIR/$tdir || error "chown (4)"
215 chgrp $USER0 $DIR/$tdir || error "chgrp (5)"
216 chmod 0770 $DIR/$tdir || error "chmod (6)"
217 $RUNAS_CMD -u $ID1 -g $ID1 touch $DIR/$tdir/f2 && error "touch (7)"
218 $RUNAS_CMD -u$ID1 -g$ID1 -j$ID0 touch $DIR/$tdir/f3 && error "touch (8)"
219 echo "enable uid $ID1 setuid,setgid"
220 do_facet $SINGLEMDS "echo '* $ID1 setuid,setgid' > $PERM_CONF"
221 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
222 $RUNAS_CMD -u $ID1 -g $ID1 -j $ID0 touch $DIR/$tdir/f4 ||
224 $RUNAS_CMD -u $ID1 -v $ID0 -g $ID1 -j $ID0 touch $DIR/$tdir/f5 ||
229 do_facet $SINGLEMDS "rm -f $PERM_CONF"
230 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
232 run_test 1 "setuid/gid ============================="
234 run_rmtacl_subtest() {
235 $SAVE_PWD/rmtacl/run $SAVE_PWD/rmtacl/$1.test
240 # for remote client only
242 [ "$CLIENT_TYPE" = "local" ] && \
243 skip "remote_acl for remote client only" && return
244 [ -z "$(lctl get_param -n mdc.*-mdc-*.connect_flags | grep ^acl)" ] && \
245 skip "must have acl enabled" && return
246 [ -z "$(which setfacl 2>/dev/null)" ] && \
247 skip "could not find setfacl" && return
248 [ "$UID" != 0 ] && skip "must run as root" && return
250 do_facet $SINGLEMDS "echo '* 0 rmtacl,rmtown' > $PERM_CONF"
251 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
255 sec_login daemon daemon
256 sec_login games users
262 echo "performing cp ..."
263 run_rmtacl_subtest cp || error "cp"
264 echo "performing getfacl-noacl..."
265 run_rmtacl_subtest getfacl-noacl || error "getfacl-noacl"
266 echo "performing misc..."
267 run_rmtacl_subtest misc || error "misc"
268 echo "performing permissions..."
269 run_rmtacl_subtest permissions || error "permissions"
270 echo "performing setfacl..."
271 run_rmtacl_subtest setfacl || error "setfacl"
273 # inheritance test got from HP
274 echo "performing inheritance..."
275 cp $SAVE_PWD/rmtacl/make-tree .
277 run_rmtacl_subtest inheritance || error "inheritance"
283 do_facet $SINGLEMDS "rm -f $PERM_CONF"
284 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
286 run_test 2 "rmtacl ============================="
289 # root_squash will be redesigned in Lustre 1.7
291 skip "root_squash will be redesigned in Lustre 1.7" && return
293 run_test 3 "rootsquash ============================="
295 # bug 3285 - supplementary group should always succeed.
296 # NB: the supplementary groups are set for local client only,
297 # as for remote client, the groups of the specified uid on MDT
298 # will be obtained by upcall /sbin/l_getidentity and used.
300 if [ "$CLIENT_TYPE" = "remote" ]; then
301 do_facet $SINGLEMDS "echo '* 0 rmtown' > $PERM_CONF"
302 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
307 chmod 0771 $DIR/$tdir
308 chgrp $ID0 $DIR/$tdir
309 $RUNAS_CMD -u $ID0 ls $DIR/$tdir || error "setgroups (1)"
310 if [ "$CLIENT_TYPE" = "local" ]; then
311 do_facet $SINGLEMDS "echo '* $ID1 setgrp' > $PERM_CONF"
312 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
313 $RUNAS_CMD -u $ID1 -G1,2,$ID0 ls $DIR/$tdir ||
314 error "setgroups (2)"
316 $RUNAS_CMD -u $ID1 -G1,2 ls $DIR/$tdir && error "setgroups (3)"
319 do_facet $SINGLEMDS "rm -f $PERM_CONF"
320 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
322 run_test 4 "set supplementary group ==============="
324 mds_capability_timeout() {
325 [ $# -lt 1 ] && echo "Miss mds capability timeout value" && return 1
327 echo "Set mds capability timeout as $1 seconds"
328 do_facet $SINGLEMDS "lctl set_param -n $CAPA_TIMEOUT=$1"
332 mds_sec_level_switch() {
333 [ $# -lt 1 ] && echo "Miss mds sec level switch value" && return 1
336 0) echo "Disable capa for all clients";;
337 1) echo "Enable capa for remote client";;
338 3) echo "Enable capa for all clients";;
339 *) echo "Invalid mds sec level switch value" && return 2;;
342 do_facet $SINGLEMDS "lctl set_param -n $MDSSECLEVEL=$1"
346 oss_sec_level_switch() {
347 [ $# -lt 1 ] && echo "Miss oss sec level switch value" && return 1
350 0) echo "Disable capa for all clients";;
351 1) echo "Enable capa for remote client";;
352 3) echo "Enable capa for all clients";;
353 *) echo "Invalid oss sec level switch value" && return 2;;
356 for i in `seq $OSTCOUNT`; do
357 local j=`expr $i - 1`
358 local OST="`do_facet ost$i "lctl get_param -N obdfilter.\*OST\*$j/stats 2>/dev/null | cut -d"." -f2" || true`"
359 [ -z "$OST" ] && return 3
360 do_facet ost$i "lctl set_param -n obdfilter.$OST.sec_level=$1"
365 mds_capability_switch() {
366 [ $# -lt 1 ] && echo "Miss mds capability switch value" && return 1
369 0) echo "Turn off mds capability";;
370 3) echo "Turn on mds capability";;
371 *) echo "Invalid mds capability switch value" && return 2;;
374 do_facet $SINGLEMDS "lctl set_param -n $MDSCAPA=$1"
378 oss_capability_switch() {
379 [ $# -lt 1 ] && echo "Miss oss capability switch value" && return 1
382 0) echo "Turn off oss capability";;
383 1) echo "Turn on oss capability";;
384 *) echo "Invalid oss capability switch value" && return 2;;
387 for i in `seq $OSTCOUNT`; do
388 local j=`expr $i - 1`
389 local OST="`do_facet ost$i "lctl get_param -N obdfilter.\*OST\*$j/stats 2>/dev/null | cut -d"." -f2" || true`"
390 [ -z "$OST" ] && return 3
391 do_facet ost$i "lctl set_param -n obdfilter.$OST.capa=$1"
397 mds_capability_switch 3 || return 1
398 mds_sec_level_switch 3 || return 2
403 oss_capability_switch 1 || return 1
404 oss_sec_level_switch 3 || return 2
408 turn_capability_on() {
409 local capa_timeout=${1:-"1800"}
411 # To turn on fid capability for the system,
412 # there is a requirement that fid capability
413 # is turned on on all MDS/OSS servers before
416 turn_mds_capa_on || return 1
417 turn_oss_capa_on || return 2
418 mds_capability_timeout $capa_timeout || return 3
419 remount_client $MOUNT || return 4
423 turn_mds_capa_off() {
424 mds_sec_level_switch 0 || return 1
425 mds_capability_switch 0 || return 2
429 turn_oss_capa_off() {
430 oss_sec_level_switch 0 || return 1
431 oss_capability_switch 0 || return 2
435 turn_capability_off() {
436 # to turn off fid capability, you can just do
437 # it in a live system. But, please turn off
438 # capability of all OSS servers before MDS servers.
440 turn_oss_capa_off || return 1
441 turn_mds_capa_off || return 2
445 # We demonstrate that access to the objects in the filesystem are not
446 # accessible without supplying secrets from the MDS by disabling a
447 # proc variable on the mds so that it does not supply secrets. We then
448 # try and access objects which result in failure.
452 [ $GSS_SUP = 0 ] && skip "without GSS support." && return
453 if ! remote_mds; then
454 skip "client should be separated from server."
462 error "turn_capability_off"
468 error "turn_oss_capa_on"
472 if [ "$CLIENT_TYPE" = "remote" ]; then
473 remount_client $MOUNT && return 3
477 remount_client $MOUNT || return 4
480 # proc variable disabled -- access to the objects in the filesystem
482 echo "Should get Write error here : (proc variable are disabled "\
483 "-- access to the objects in the filesystem is denied."
486 error "Write worked well even though secrets not supplied."
492 error "turn_capability_on"
498 # proc variable enabled, secrets supplied -- write should work now
499 echo "Should not fail here : (proc variable enabled, secrets supplied "\
500 "-- write should work now)."
503 error "Write failed even though secrets supplied."
509 error "turn_capability_off"
514 run_test 5 "capa secrets ========================="
516 # Expiry: A test program is performing I/O on a file. It has credential
517 # with an expiry half a minute later. While the program is running the
518 # credentials expire and no automatic extensions or renewals are
519 # enabled. The program will demonstrate an I/O failure.
523 [ $GSS_SUP = 0 ] && skip "without GSS support." && return
524 if ! remote_mds; then
525 skip "client should be separated from server."
531 error "turn_capability_off"
537 turn_capability_on 30
539 error "turn_capability_on 30"
546 error "$WTL $file 60"
550 # Reset MDS capability timeout
551 mds_capability_timeout 30
553 error "mds_capability_timeout 30"
561 # To disable automatic renew, only need turn capa off on MDS.
564 error "turn_mds_capa_off"
568 echo "We expect I/O failure."
571 echo "no I/O failure got."
577 error "turn_capability_off"
582 run_test 6 "capa expiry ========================="
589 squash_id default 99 0
590 squash_id default 99 1
591 for (( i = 0; i < NODEMAP_COUNT; i++ )); do
592 if ! do_facet mgs $LCTL nodemap_add \
593 ${HOSTNAME_CHECKSUM}_${i}; then
596 out=$(do_facet mgs $LCTL get_param \
597 nodemap.${HOSTNAME_CHECKSUM}_${i}.id)
598 ## This needs to return zero if the following statement is 1
599 rc=$(echo $out | grep -c ${HOSTNAME_CHECKSUM}_${i})
600 [[ $rc == 0 ]] && return 1
610 for ((i = 0; i < NODEMAP_COUNT; i++)); do
611 if ! do_facet mgs $LCTL nodemap_del \
612 ${HOSTNAME_CHECKSUM}_${i}; then
613 error "nodemap_del ${HOSTNAME_CHECKSUM}_${i} \
617 out=$(do_facet mgs $LCTL get_param \
618 nodemap.${HOSTNAME_CHECKSUM}_${i}.id)
619 rc=$(echo $out | grep -c ${HOSTNAME_CHECKSUM}_${i})
620 [[ $rc != 0 ]] && return 1
627 local cmd="$LCTL nodemap_add_range"
631 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
632 range="$SUBNET_CHECKSUM.${2}.${j}.[1-253]@tcp"
633 if ! do_facet mgs $cmd --name $1 \
643 local cmd="$LCTL nodemap_del_range"
647 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
648 range="$SUBNET_CHECKSUM.${2}.${j}.[1-253]@tcp"
649 if ! do_facet mgs $cmd --name $1 \
663 local cmd="$LCTL nodemap_add_idmap"
666 for ((i = 0; i < NODEMAP_COUNT; i++)); do
667 for ((j = 500; j < NODEMAP_MAX_ID; j++)); do
670 if ! do_facet mgs $cmd \
671 --name ${HOSTNAME_CHECKSUM}_${i} \
672 --idtype uid --idmap $client_id:$fs_id; then
675 if ! do_facet mgs $cmd \
676 --name ${HOSTNAME_CHECKSUM}_${i} \
677 --idtype gid --idmap $client_id:$fs_id; then
691 local cmd="$LCTL nodemap_del_idmap"
694 for ((i = 0; i < NODEMAP_COUNT; i++)); do
695 for ((j = 500; j < NODEMAP_MAX_ID; j++)); do
698 if ! do_facet mgs $cmd \
699 --name ${HOSTNAME_CHECKSUM}_${i} \
700 --idtype uid --idmap $client_id:$fs_id; then
703 if ! do_facet mgs $cmd \
704 --name ${HOSTNAME_CHECKSUM}_${i} \
705 --idtype gid --idmap $client_id:$fs_id; then
718 local cmd="$LCTL nodemap_modify"
721 proc[0]="admin_nodemap"
722 proc[1]="trusted_nodemap"
726 for ((idx = 0; idx < 2; idx++)); do
727 if ! do_facet mgs $cmd --name $1 \
728 --property ${option[$idx]} \
733 if ! do_facet mgs $cmd --name $1 \
734 --property ${option[$idx]} \
746 cmd[0]="$LCTL nodemap_modify --property squash_uid"
747 cmd[1]="$LCTL nodemap_modify --property squash_gid"
749 if ! do_facet mgs ${cmd[$3]} --name $1 --value $2; then
754 # ensure that the squash defaults are the expected defaults
755 squash_id default 99 0
756 squash_id default 99 1
761 cmd="$LCTL nodemap_test_nid"
763 nid=$(do_facet mgs $cmd $1)
765 if [ $nid == $2 ]; then
776 local cmd="$LCTL nodemap_test_id"
779 ## nodemap deactivated
780 if ! do_facet mgs lctl nodemap_activate 0; then
783 for ((id = 500; id < NODEMAP_MAX_ID; id++)); do
784 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
785 nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
786 fs_id=$(do_facet mgs $cmd --nid $nid \
787 --idtype uid --id $id)
788 if [ $fs_id != $id ]; then
789 echo "expected $id, got $fs_id"
796 if ! do_facet mgs lctl nodemap_activate 1; then
800 for ((id = 500; id < NODEMAP_MAX_ID; id++)); do
801 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
802 nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
803 fs_id=$(do_facet mgs $cmd --nid $nid \
804 --idtype uid --id $id)
805 expected_id=$((id + 1))
806 if [ $fs_id != $expected_id ]; then
807 echo "expected $expected_id, got $fs_id"
814 for ((i = 0; i < NODEMAP_COUNT; i++)); do
815 if ! do_facet mgs $LCTL nodemap_modify \
816 --name ${HOSTNAME_CHECKSUM}_${i} \
817 --property trusted --value 1; then
818 error "nodemap_modify ${HOSTNAME_CHECKSUM}_${i} "
824 for ((id = 500; id < NODEMAP_MAX_ID; id++)); do
825 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
826 nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
827 fs_id=$(do_facet mgs $cmd --nid $nid \
828 --idtype uid --id $id)
829 if [ $fs_id != $id ]; then
830 echo "expected $id, got $fs_id"
836 ## ensure allow_root_access is enabled
837 for ((i = 0; i < NODEMAP_COUNT; i++)); do
838 if ! do_facet mgs $LCTL nodemap_modify \
839 --name ${HOSTNAME_CHECKSUM}_${i} \
840 --property admin --value 1; then
841 error "nodemap_modify ${HOSTNAME_CHECKSUM}_${i} "
847 ## check that root allowed
848 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
849 nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
850 fs_id=$(do_facet mgs $cmd --nid $nid --idtype uid --id 0)
851 if [ $fs_id != 0 ]; then
852 echo "root allowed expected 0, got $fs_id"
857 ## ensure allow_root_access is disabled
858 for ((i = 0; i < NODEMAP_COUNT; i++)); do
859 if ! do_facet mgs $LCTL nodemap_modify \
860 --name ${HOSTNAME_CHECKSUM}_${i} \
861 --property admin --value 0; then
862 error "nodemap_modify ${HOSTNAME_CHECKSUM}_${i} "
868 ## check that root is mapped to 99
869 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
870 nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
871 fs_id=$(do_facet mgs $cmd --nid $nid --idtype uid --id 0)
872 if [ $fs_id != 99 ]; then
873 error "root squash expected 99, got $fs_id"
878 ## reset client trust to 0
879 for ((i = 0; i < NODEMAP_COUNT; i++)); do
880 if ! do_facet mgs $LCTL nodemap_modify \
881 --name ${HOSTNAME_CHECKSUM}_${i} \
882 --property trusted --value 0; then
883 error "nodemap_modify ${HOSTNAME_CHECKSUM}_${i} "
895 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
896 [ $(lustre_version_code $SINGLEMGS) -lt $(version_code 2.5.53) ] &&
897 skip "No nodemap on $(get_lustre_version) MGS, need 2.5.53+" &&
902 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
906 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 2
910 run_test 7 "nodemap create and delete"
915 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
916 [ $(lustre_version_code $SINGLEMGS) -lt $(version_code 2.5.53) ] &&
917 skip "No nodemap on $(get_lustre_version) MGS, need 2.5.53+" &&
924 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
930 [[ $rc == 0 ]] && error "duplicate nodemap_add allowed with $rc" &&
936 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 3
940 run_test 8 "nodemap reject duplicates"
946 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
947 [ $(lustre_version_code $SINGLEMGS) -lt $(version_code 2.5.53) ] &&
948 skip "No nodemap on $(get_lustre_version) MGS, need 2.5.53+" &&
954 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
957 for ((i = 0; i < NODEMAP_COUNT; i++)); do
958 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
962 [[ $rc != 0 ]] && error "nodemap_add_range failed with $rc" && return 2
965 for ((i = 0; i < NODEMAP_COUNT; i++)); do
966 if ! delete_range ${HOSTNAME_CHECKSUM}_${i} $i; then
970 [[ $rc != 0 ]] && error "nodemap_del_range failed with $rc" && return 4
975 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 4
979 run_test 9 "nodemap range add"
984 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
985 [ $(lustre_version_code $SINGLEMGS) -lt $(version_code 2.5.53) ] &&
986 skip "No nodemap on $(get_lustre_version) MGS, need 2.5.53+" &&
992 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
995 for ((i = 0; i < NODEMAP_COUNT; i++)); do
996 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
1000 [[ $rc != 0 ]] && error "nodemap_add_range failed with $rc" && return 2
1003 for ((i = 0; i < NODEMAP_COUNT; i++)); do
1004 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
1008 [[ $rc == 0 ]] && error "nodemap_add_range duplicate add with $rc" &&
1013 for ((i = 0; i < NODEMAP_COUNT; i++)); do
1014 if ! delete_range ${HOSTNAME_CHECKSUM}_${i} $i; then
1018 [[ $rc != 0 ]] && error "nodemap_del_range failed with $rc" && return 4
1022 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 5
1026 run_test 10 "nodemap reject duplicate ranges"
1031 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
1032 [ $(lustre_version_code $SINGLEMGS) -lt $(version_code 2.5.53) ] &&
1033 skip "No nodemap on $(get_lustre_version) MGS, need 2.5.53+" &&
1039 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
1042 for ((i = 0; i < NODEMAP_COUNT; i++)); do
1043 if ! modify_flags ${HOSTNAME_CHECKSUM}_${i}; then
1047 [[ $rc != 0 ]] && error "nodemap_modify with $rc" && return 2
1052 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 3
1056 run_test 11 "nodemap modify"
1061 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
1062 [ $(lustre_version_code $SINGLEMGS) -lt $(version_code 2.5.53) ] &&
1063 skip "No nodemap on $(get_lustre_version) MGS, need 2.5.53+" &&
1069 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
1072 for ((i = 0; i < NODEMAP_COUNT; i++)); do
1073 if ! squash_id ${HOSTNAME_CHECKSUM}_${i} 88 0; then
1077 [[ $rc != 0 ]] && error "nodemap squash_uid with $rc" && return 2
1080 for ((i = 0; i < NODEMAP_COUNT; i++)); do
1081 if ! squash_id ${HOSTNAME_CHECKSUM}_${i} 88 1; then
1085 [[ $rc != 0 ]] && error "nodemap squash_gid with $rc" && return 3
1090 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 4
1094 run_test 12 "nodemap set squash ids"
1099 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
1100 [ $(lustre_version_code $SINGLEMGS) -lt $(version_code 2.5.53) ] &&
1101 skip "No nodemap on $(get_lustre_version) MGS, need 2.5.53+" &&
1107 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
1110 for ((i = 0; i < NODEMAP_COUNT; i++)); do
1111 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
1115 [[ $rc != 0 ]] && error "nodemap_add_range failed with $rc" && return 2
1118 for ((i = 0; i < NODEMAP_COUNT; i++)); do
1119 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
1120 for k in $NODEMAP_IPADDR_LIST; do
1121 if ! test_nid $SUBNET_CHECKSUM.$i.$j.$k \
1122 ${HOSTNAME_CHECKSUM}_${i}; then
1128 [[ $rc != 0 ]] && error "nodemap_test_nid failed with $rc" && return 3
1133 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 4
1137 run_test 13 "test nids"
1142 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
1143 [ $(lustre_version_code $SINGLEMGS) -lt $(version_code 2.5.53) ] &&
1144 skip "No nodemap on $(get_lustre_version) MGS, need 2.5.53+" &&
1150 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
1153 for ((i = 0; i < NODEMAP_COUNT; i++)); do
1154 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
1155 for k in $NODEMAP_IPADDR_LIST; do
1156 if ! test_nid $SUBNET_CHECKSUM.$i.$j.$k \
1163 [[ $rc != 0 ]] && error "nodemap_test_nid failed with $rc" && return 3
1168 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 4
1172 run_test 14 "test default nodemap nid lookup"
1177 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
1178 [ $(lustre_version_code $SINGLEMGS) -lt $(version_code 2.5.53) ] &&
1179 skip "No nodemap on $(get_lustre_version) MGS, need 2.5.53+" &&
1185 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
1188 for ((i = 0; i < NODEMAP_COUNT; i++)); do
1189 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
1193 [[ $rc != 0 ]] && error "nodemap_add_range failed with $rc" && return 2
1198 [[ $rc != 0 ]] && error "nodemap_add_idmap failed with $rc" && return 3
1203 [[ $rc != 0 ]] && error "nodemap_test_id failed with $rc" && return 4
1208 [[ $rc != 0 ]] && error "nodemap_del_idmap failed with $rc" && return 5
1213 [[ $rc != 0 ]] && error "nodemap_delete failed with $rc" && return 6
1217 run_test 15 "test id mapping"
1219 create_fops_nodemaps() {
1222 for client in $clients; do
1223 local client_ip=$(host_nids_address $client $NETTYPE)
1224 local client_nid=$(h2$NETTYPE $client_ip)
1225 do_facet mgs $LCTL nodemap_add c${i} || return 1
1226 do_facet mgs $LCTL nodemap_add_range \
1227 --name c${i} --range $client_nid || return 1
1228 do_facet ost0 $LCTL set_param nodemap.add_nodemap=c${i} ||
1230 do_facet ost0 "$LCTL set_param nodemap.add_nodemap_range='c$i \
1231 $client_nid'" || return 1
1232 for map in ${FOPS_IDMAPS[i]}; do
1233 do_facet mgs $LCTL nodemap_add_idmap --name c${i} \
1234 --idtype uid --idmap ${map} || return 1
1235 do_facet ost0 "$LCTL set_param \
1236 nodemap.add_nodemap_idmap='c$i uid ${map}'" ||
1238 do_facet mgs $LCTL nodemap_add_idmap --name c${i} \
1239 --idtype gid --idmap ${map} || return 1
1240 do_facet ost0 "$LCTL set_param \
1241 nodemap.add_nodemap_idmap='c$i gid ${map}'" ||
1244 out1=$(do_facet mgs $LCTL get_param nodemap.c${i}.idmap)
1245 out2=$(do_facet ost0 $LCTL get_param nodemap.c${i}.idmap)
1246 [ "$out1" != "$out2" ] && error "mgs and oss maps mismatch"
1252 delete_fops_nodemaps() {
1255 for client in $clients; do
1256 do_facet mgs $LCTL nodemap_del c${i} || return 1
1257 do_facet ost0 $LCTL set_param nodemap.remove_nodemap=c${i} ||
1264 # acl test directory needs to be initialized on a privileged client
1266 local admin=$(do_facet mgs $LCTL get_param -n nodemap.c0.admin_nodemap)
1267 local trust=$(do_facet mgs $LCTL get_param -n \
1268 nodemap.c0.trusted_nodemap)
1270 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
1271 do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1
1272 do_facet ost0 $LCTL set_param nodemap.c0.admin_nodemap=1
1273 do_facet ost0 $LCTL set_param nodemap.c0.trusted_nodemap=1
1275 do_node ${clients_arr[0]} rm -rf $DIR/$tdir
1276 do_node ${clients_arr[0]} mkdir -p $DIR/$tdir
1277 do_node ${clients_arr[0]} chown $user $DIR/$tdir
1279 do_facet mgs $LCTL nodemap_modify --name c0 \
1280 --property admin --value $admin
1281 do_facet mgs $LCTL nodemap_modify --name c0 \
1282 --property trusted --value $trust
1283 do_facet ost0 $LCTL set_param nodemap.c0.admin_nodemap=$admin
1284 do_facet ost0 $LCTL set_param nodemap.c0.trusted_nodemap=$trust
1286 # flush MDT locks to make sure they are reacquired before test
1287 do_node ${clients_arr[0]} lctl set_param \
1288 ldlm.namespaces.$FSNAME-MDT*.lru_size=clear
1291 do_create_delete() {
1294 local testfile=$DIR/$tdir/$tfile
1298 if $run_u touch $testfile >& /dev/null; then
1300 $run_u rm $testfile && d=1
1304 local expected=$(get_cr_del_expected $key)
1305 [ "$res" != "$expected" ] && error "test $key expected " \
1306 "$expected, got $res" && rc=$(($rc+1))
1310 nodemap_check_quota() {
1312 $run_u lfs quota -q $DIR | awk '{ print $2; exit; }'
1315 do_fops_quota_test() {
1317 # define fuzz as 2x ost block size in K
1318 local quota_fuzz=$(($(lctl get_param -n \
1319 osc.$FSNAME-OST0000-*.blocksize | head -1) / 512))
1320 local qused_orig=$(nodemap_check_quota "$run_u")
1321 local qused_low=$((qused_orig - quota_fuzz))
1322 local qused_high=$((qused_orig + quota_fuzz))
1323 local testfile=$DIR/$tdir/$tfile
1324 chmod 777 $DIR/$tdir
1325 $run_u dd if=/dev/zero of=$testfile bs=1M count=1 >& /dev/null
1326 sync; sync_all_data || true
1328 local qused_new=$(nodemap_check_quota "$run_u")
1329 [ $((qused_low + 1024)) -le $((qused_new)) \
1330 -a $((qused_high + 1024)) -ge $((qused_new)) ] ||
1331 error "$qused_new != $qused_orig + 1M after write"
1332 $run_u rm $testfile && d=1
1333 $NODEMAP_TEST_QUOTA && wait_delete_completed_mds
1335 qused_new=$(nodemap_check_quota "$run_u")
1336 [ $((qused_low)) -le $((qused_new)) \
1337 -a $((qused_high)) -ge $((qused_new)) ] ||
1338 error "quota not reclaimed, expect $qused_orig got $qused_new"
1341 get_fops_mapped_user() {
1344 for ((i=0; i < ${#FOPS_IDMAPS[@]}; i++)); do
1345 for map in ${FOPS_IDMAPS[i]}; do
1346 if [ $(cut -d: -f1 <<< "$map") == $cli_user ]; then
1347 cut -d: -f2 <<< "$map"
1355 get_cr_del_expected() {
1357 IFS=":" read -a key <<< "$1"
1358 local mapmode="${key[0]}"
1359 local mds_user="${key[1]}"
1360 local cluster="${key[2]}"
1361 local cli_user="${key[3]}"
1362 local mode="0${key[4]}"
1369 [[ $mapmode == *mapped* ]] && mapped=1
1370 # only c1 is mapped in these test cases
1371 [[ $mapmode == mapped_trusted* ]] && [ "$cluster" == "c0" ] && mapped=0
1372 [[ $mapmode == *noadmin* ]] && noadmin=1
1374 # o+wx works as long as the user isn't mapped
1375 if [ $((mode & 3)) -eq 3 ]; then
1379 # if client user is root, check if root is squashed
1380 if [ "$cli_user" == "0" ]; then
1381 # squash root succeed, if other bit is on
1384 1) [ "$other" == "1" ] && echo $SUCCESS
1385 [ "$other" == "0" ] && echo $FAILURE;;
1389 if [ "$mapped" == "0" ]; then
1390 [ "$other" == "1" ] && echo $SUCCESS
1391 [ "$other" == "0" ] && echo $FAILURE
1395 # if mapped user is mds user, check for u+wx
1396 mapped_user=$(get_fops_mapped_user $cli_user)
1397 [ "$mapped_user" == "-1" ] &&
1398 error "unable to find mapping for client user $cli_user"
1400 if [ "$mapped_user" == "$mds_user" -a \
1401 $(((mode & 0300) == 0300)) -eq 1 ]; then
1405 if [ "$mapped_user" != "$mds_user" -a "$other" == "1" ]; then
1414 local single_client="$2"
1415 local client_user_list=([0]="0 $((IDBASE+3)) $((IDBASE+4))"
1416 [1]="0 $((IDBASE+5)) $((IDBASE+6))")
1419 local perm_bit_list="0 3 $((0300)) $((0303))"
1420 [ "$SLOW" == "yes" ] && perm_bit_list=$(seq 0 511)
1422 # step through mds users. -1 means root
1423 for mds_i in -1 0 1 2; do
1424 local user=$((mds_i + IDBASE))
1428 [ "$mds_i" == "-1" ] && user=0
1430 echo mkdir -p $DIR/$tdir
1433 for client in $clients; do
1435 local admin=$(do_facet mgs $LCTL get_param -n \
1436 nodemap.c$cli_i.admin_nodemap)
1437 for u in ${client_user_list[$cli_i]}; do
1438 local run_u="do_node $client \
1439 $RUNAS_CMD -u$u -g$u -G$u"
1440 for perm_bits in $perm_bit_list; do
1441 local mode=$(printf %03o $perm_bits)
1442 do_facet mgs $LCTL nodemap_modify \
1446 do_node $client chmod $mode $DIR/$tdir
1447 do_facet mgs $LCTL nodemap_modify \
1453 key="$mapmode:$user:c$cli_i:$u:$mode"
1454 do_create_delete "$run_u" "$key"
1458 do_fops_quota_test "$run_u"
1461 cli_i=$((cli_i + 1))
1462 [ "$single_client" == "1" ] && break
1469 nodemap_test_setup() {
1471 local active_nodemap=$1
1473 do_facet mgs $LCTL set_param $IDENTITY_UPCALL=NONE
1475 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
1476 [ $(lustre_version_code $SINGLEMGS) -lt $(version_code 2.5.53) ] &&
1477 skip "No nodemap on $(get_lustre_version) MGS, need 2.5.53+" &&
1481 create_fops_nodemaps
1483 [[ $rc != 0 ]] && error "adding fops nodemaps failed $rc"
1485 if [ "$active_nodemap" == "0" ]; then
1486 do_facet mgs $LCTL set_param nodemap.active=0
1487 do_facet ost0 $LCTL set_param nodemap.active=0
1491 do_facet mgs $LCTL nodemap_activate 1
1492 do_facet ost0 $LCTL set_param nodemap.active=1
1493 do_facet mgs $LCTL nodemap_modify --name default \
1494 --property admin --value 1
1495 do_facet mgs $LCTL nodemap_modify --name default \
1496 --property trusted --value 1
1497 do_facet ost0 $LCTL set_param nodemap.default.admin_nodemap=1
1498 do_facet ost0 $LCTL set_param nodemap.default.trusted_nodemap=1
1501 nodemap_test_cleanup() {
1502 delete_fops_nodemaps
1504 [[ $rc != 0 ]] && error "removing fops nodemaps failed $rc"
1509 nodemap_clients_admin_trusted() {
1513 for client in $clients; do
1514 do_facet mgs $LCTL nodemap_modify --name c0 \
1515 --property admin --value $admin
1516 do_facet ost0 $LCTL set_param nodemap.c${i}.admin_nodemap=$admin
1517 do_facet mgs $LCTL nodemap_modify --name c0 \
1518 --property trusted --value $tr
1519 do_facet ost0 $LCTL set_param nodemap.c${i}.trusted_nodemap=$tr
1525 nodemap_test_setup 0
1528 nodemap_test_cleanup
1530 run_test 16 "test nodemap all_off fileops"
1535 nodemap_clients_admin_trusted 0 1
1536 test_fops trusted_noadmin 1
1537 nodemap_test_cleanup
1539 run_test 17 "test nodemap trusted_noadmin fileops"
1543 nodemap_clients_admin_trusted 0 0
1544 test_fops mapped_noadmin 1
1545 nodemap_test_cleanup
1547 run_test 18 "test nodemap mapped_noadmin fileops"
1551 nodemap_clients_admin_trusted 1 1
1552 test_fops trusted_admin 1
1553 nodemap_test_cleanup
1555 run_test 19 "test nodemap trusted_admin fileops"
1559 nodemap_clients_admin_trusted 1 0
1560 test_fops mapped_admin 1
1561 nodemap_test_cleanup
1563 run_test 20 "test nodemap mapped_admin fileops"
1569 for client in $clients; do
1570 do_facet mgs $LCTL nodemap_modify --name c${i} \
1571 --property admin --value 0
1572 do_facet mgs $LCTL nodemap_modify --name c${i} \
1573 --property trusted --value $x
1574 do_facet ost0 $LCTL set_param nodemap.c${i}.admin_nodemap=0
1575 do_facet ost0 $LCTL set_param nodemap.c${i}.trusted_nodemap=$x
1579 test_fops mapped_trusted_noadmin
1580 nodemap_test_cleanup
1582 run_test 21 "test nodemap mapped_trusted_noadmin fileops"
1588 for client in $clients; do
1589 do_facet mgs $LCTL nodemap_modify --name c${i} \
1590 --property admin --value 1
1591 do_facet mgs $LCTL nodemap_modify --name c${i} \
1592 --property trusted --value $x
1593 do_facet ost0 $LCTL set_param nodemap.c${i}.admin_nodemap=1
1594 do_facet ost0 $LCTL set_param nodemap.c${i}.trusted_nodemap=$x
1598 test_fops mapped_trusted_admin
1599 nodemap_test_cleanup
1601 run_test 22 "test nodemap mapped_trusted_admin fileops"
1603 # acl test directory needs to be initialized on a privileged client
1604 nodemap_acl_test_setup() {
1605 local admin=$(do_facet mgs $LCTL get_param -n nodemap.c0.admin_nodemap)
1606 local trust=$(do_facet mgs $LCTL get_param -n \
1607 nodemap.c0.trusted_nodemap)
1609 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
1610 do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1
1611 do_facet ost0 $LCTL set_param nodemap.c0.admin_nodemap=1
1612 do_facet ost0 $LCTL set_param nodemap.c0.trusted_nodemap=1
1614 do_node ${clients_arr[0]} rm -rf $DIR/$tdir
1615 do_node ${clients_arr[0]} mkdir -p $DIR/$tdir
1616 do_node ${clients_arr[0]} chmod a+rwx $DIR/$tdir
1618 do_facet mgs $LCTL nodemap_modify --name c0 \
1619 --property admin --value $admin
1620 do_facet mgs $LCTL nodemap_modify --name c0 \
1621 --property trusted --value $trust
1622 do_facet ost0 $LCTL set_param nodemap.c0.admin_nodemap=$admin
1623 do_facet ost0 $LCTL set_param nodemap.c0.trusted_nodemap=$trust
1627 # returns 0 if the number of ACLs does not change on the second (mapped) client
1628 # after being set on the first client
1629 nodemap_acl_test() {
1631 local set_client="$2"
1632 local get_client="$3"
1633 local check_setfacl="$4"
1634 local setfacl_error=0
1635 local testfile=$DIR/$tdir/$tfile
1636 local RUNAS_USER="$RUNAS_CMD -u $user"
1638 local acl_count_post=0
1640 nodemap_acl_test_setup
1643 do_node $set_client $RUNAS_USER touch $testfile
1645 # ACL masks aren't filtered by nodemap code, so we ignore them
1646 acl_count=$(do_node $get_client getfacl $testfile | grep -v mask |
1648 do_node $set_client $RUNAS_USER setfacl -m $user:rwx $testfile ||
1651 # if check setfacl is set to 1, then it's supposed to error
1652 if [ "$check_setfacl" == "1" ]; then
1653 [ "$setfacl_error" != "1" ] && return 1
1656 [ "$setfacl_error" == "1" ] && echo "WARNING: unable to setfacl"
1658 acl_count_post=$(do_node $get_client getfacl $testfile | grep -v mask |
1660 [ $acl_count -eq $acl_count_post ] && return 0
1667 # 1 trusted cluster, 1 mapped cluster
1668 local unmapped_fs=$((IDBASE+0))
1669 local unmapped_c1=$((IDBASE+5))
1670 local mapped_fs=$((IDBASE+2))
1671 local mapped_c0=$((IDBASE+4))
1672 local mapped_c1=$((IDBASE+6))
1674 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
1675 do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1
1676 do_facet ost0 $LCTL set_param nodemap.c0.admin_nodemap=1
1677 do_facet ost0 $LCTL set_param nodemap.c0.trusted_nodemap=1
1679 do_facet mgs $LCTL nodemap_modify --name c1 --property admin --value 0
1680 do_facet mgs $LCTL nodemap_modify --name c1 --property trusted --value 0
1681 do_facet ost0 $LCTL set_param nodemap.c1.admin_nodemap=0
1682 do_facet ost0 $LCTL set_param nodemap.c1.trusted_nodemap=0
1684 # setfacl on trusted cluster to unmapped user, verify it's not seen
1685 nodemap_acl_test $unmapped_fs ${clients_arr[0]} ${clients_arr[1]} ||
1686 error "acl count (1)"
1688 # setfacl on trusted cluster to mapped user, verify it's seen
1689 nodemap_acl_test $mapped_fs ${clients_arr[0]} ${clients_arr[1]} &&
1690 error "acl count (2)"
1692 # setfacl on mapped cluster to mapped user, verify it's seen
1693 nodemap_acl_test $mapped_c1 ${clients_arr[1]} ${clients_arr[0]} &&
1694 error "acl count (3)"
1696 # setfacl on mapped cluster to unmapped user, verify error
1697 nodemap_acl_test $unmapped_fs ${clients_arr[1]} ${clients_arr[0]} 1 ||
1698 error "acl count (4)"
1701 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 0
1702 do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 0
1703 do_facet ost0 $LCTL set_param nodemap.c0.admin_nodemap=0
1704 do_facet ost0 $LCTL set_param nodemap.c0.trusted_nodemap=0
1706 # setfacl to mapped user on c1, also mapped to c0, verify it's seen
1707 nodemap_acl_test $mapped_c1 ${clients_arr[1]} ${clients_arr[0]} &&
1708 error "acl count (5)"
1710 # setfacl to mapped user on c1, not mapped to c0, verify not seen
1711 nodemap_acl_test $unmapped_c1 ${clients_arr[1]} ${clients_arr[0]} ||
1712 error "acl count (6)"
1714 nodemap_test_cleanup
1716 run_test 23 "test mapped ACLs"
1718 log "cleanup: ======================================================"
1721 ## nodemap deactivated
1722 do_facet mgs lctl nodemap_activate 0
1724 for num in $(seq $MDSCOUNT); do
1725 if [ "${identity_old[$num]}" = 1 ]; then
1726 switch_identity $num false || identity_old[$num]=$?
1730 $RUNAS_CMD -u $ID0 ls $DIR
1731 $RUNAS_CMD -u $ID1 ls $DIR