3 # Run select tests by setting ONLY, or as arguments to the script.
4 # Skip specific tests by setting EXCEPT.
10 # bug number for skipped test: 19430 19967 19967
11 ALWAYS_EXCEPT=" 2 5 6 $SANITY_SEC_EXCEPT"
12 # UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
14 [ "$ALWAYS_EXCEPT$EXCEPT" ] && echo "Skipping tests: $ALWAYS_EXCEPT $EXCEPT"
17 export PATH=$PWD/$SRCDIR:$SRCDIR:$PWD/$SRCDIR/../utils:$PATH:/sbin
18 export NAME=${NAME:-local}
20 LUSTRE=${LUSTRE:-$(dirname $0)/..}
21 . $LUSTRE/tests/test-framework.sh
23 . ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
26 RUNAS_CMD=${RUNAS_CMD:-runas}
28 WTL=${WTL:-"$LUSTRE/tests/write_time_limit"}
31 PERM_CONF=$CONFDIR/perm.conf
34 HOSTNAME_CHECKSUM=$(hostname | sum | awk '{ print $1 }')
35 SUBNET_CHECKSUM=$(expr $HOSTNAME_CHECKSUM % 250 + 1)
38 NODEMAP_IPADDR_LIST="1 10 64 128 200 250"
41 require_dsh_mds || exit 0
42 require_dsh_ost || exit 0
44 clients=${CLIENTS//,/ }
45 num_clients=$(get_node_count ${clients})
46 clients_arr=($clients)
50 USER0=$(grep :$ID0:$ID0: /etc/passwd | cut -d: -f1)
51 USER1=$(grep :$ID1:$ID1: /etc/passwd | cut -d: -f1)
54 skip "need to add user0 ($ID0:$ID0) to /etc/passwd" && exit 0
57 skip "need to add user1 ($ID1:$ID1) to /etc/passwd" && exit 0
59 IDBASE=${IDBASE:-60000}
61 # changes to mappings must be reflected in test 23
63 [0]="$((IDBASE+3)):$((IDBASE+0)) $((IDBASE+4)):$((IDBASE+2))"
64 [1]="$((IDBASE+5)):$((IDBASE+1)) $((IDBASE+6)):$((IDBASE+2))"
67 check_and_setup_lustre
70 if [ "$I_MOUNTED" = "yes" ]; then
71 cleanupall -f || error "sec_cleanup"
76 [ -z "$(echo $DIR | grep $MOUNT)" ] &&
77 error "$DIR not in $MOUNT" && sec_cleanup && exit 1
79 [ $(echo $MOUNT | wc -w) -gt 1 ] &&
80 echo "NAME=$MOUNT mounted more than once" && sec_cleanup && exit 0
83 GSS_REF=$(lsmod | grep ^ptlrpc_gss | awk '{print $3}')
84 if [ ! -z "$GSS_REF" -a "$GSS_REF" != "0" ]; then
86 echo "with GSS support"
89 echo "without GSS support"
92 MDT=$(do_facet $SINGLEMDS lctl get_param -N "mdt.\*MDT0000" |
94 [ -z "$MDT" ] && error "fail to get MDT device" && exit 1
95 do_facet $SINGLEMDS "mkdir -p $CONFDIR"
96 IDENTITY_FLUSH=mdt.$MDT.identity_flush
97 IDENTITY_UPCALL=mdt.$MDT.identity_upcall
98 MDSSECLEVEL=mdt.$MDT.sec_level
101 if [ -z "$(lctl get_param -n llite.*.client_type | grep remote 2>/dev/null)" ]
118 if ! $RUNAS_CMD -u $user krb5_login.sh; then
119 error "$user login kerberos failed."
123 if ! $RUNAS_CMD -u $user -g $group ls $DIR > /dev/null 2>&1; then
124 $RUNAS_CMD -u $user lfs flushctx -k
125 $RUNAS_CMD -u $user krb5_login.sh
126 if ! $RUNAS_CMD -u$user -g$group ls $DIR > /dev/null 2>&1; then
127 error "init $user $group failed."
133 declare -a identity_old
136 for num in $(seq $MDSCOUNT); do
137 switch_identity $num true || identity_old[$num]=$?
140 if ! $RUNAS_CMD -u $ID0 ls $DIR > /dev/null 2>&1; then
141 sec_login $USER0 $USER0
144 if ! $RUNAS_CMD -u $ID1 ls $DIR > /dev/null 2>&1; then
145 sec_login $USER1 $USER1
150 # run as different user
154 chmod 0755 $DIR || error "chmod (1)"
155 rm -rf $DIR/$tdir || error "rm (1)"
156 mkdir -p $DIR/$tdir || error "mkdir (1)"
158 if [ "$CLIENT_TYPE" = "remote" ]; then
159 do_facet $SINGLEMDS "echo '* 0 normtown' > $PERM_CONF"
160 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
161 chown $USER0 $DIR/$tdir && error "chown (1)"
162 do_facet $SINGLEMDS "echo '* 0 rmtown' > $PERM_CONF"
163 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
165 chown $USER0 $DIR/$tdir || error "chown (2)"
168 $RUNAS_CMD -u $ID0 ls $DIR || error "ls (1)"
169 rm -f $DIR/f0 || error "rm (2)"
170 $RUNAS_CMD -u $ID0 touch $DIR/f0 && error "touch (1)"
171 $RUNAS_CMD -u $ID0 touch $DIR/$tdir/f1 || error "touch (2)"
172 $RUNAS_CMD -u $ID1 touch $DIR/$tdir/f2 && error "touch (3)"
173 touch $DIR/$tdir/f3 || error "touch (4)"
174 chown root $DIR/$tdir || error "chown (3)"
175 chgrp $USER0 $DIR/$tdir || error "chgrp (1)"
176 chmod 0775 $DIR/$tdir || error "chmod (2)"
177 $RUNAS_CMD -u $ID0 touch $DIR/$tdir/f4 || error "touch (5)"
178 $RUNAS_CMD -u $ID1 touch $DIR/$tdir/f5 && error "touch (6)"
179 touch $DIR/$tdir/f6 || error "touch (7)"
180 rm -rf $DIR/$tdir || error "rm (3)"
182 if [ "$CLIENT_TYPE" = "remote" ]; then
183 do_facet $SINGLEMDS "rm -f $PERM_CONF"
184 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
187 run_test 0 "uid permission ============================="
191 [ $GSS_SUP = 0 ] && skip "without GSS support." && return
193 if [ "$CLIENT_TYPE" = "remote" ]; then
194 do_facet $SINGLEMDS "echo '* 0 rmtown' > $PERM_CONF"
195 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
201 chown $USER0 $DIR/$tdir || error "chown (1)"
202 $RUNAS_CMD -u $ID1 -v $ID0 touch $DIR/$tdir/f0 && error "touch (2)"
203 echo "enable uid $ID1 setuid"
204 do_facet $SINGLEMDS "echo '* $ID1 setuid' >> $PERM_CONF"
205 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
206 $RUNAS_CMD -u $ID1 -v $ID0 touch $DIR/$tdir/f1 || error "touch (3)"
208 chown root $DIR/$tdir || error "chown (4)"
209 chgrp $USER0 $DIR/$tdir || error "chgrp (5)"
210 chmod 0770 $DIR/$tdir || error "chmod (6)"
211 $RUNAS_CMD -u $ID1 -g $ID1 touch $DIR/$tdir/f2 && error "touch (7)"
212 $RUNAS_CMD -u$ID1 -g$ID1 -j$ID0 touch $DIR/$tdir/f3 && error "touch (8)"
213 echo "enable uid $ID1 setuid,setgid"
214 do_facet $SINGLEMDS "echo '* $ID1 setuid,setgid' > $PERM_CONF"
215 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
216 $RUNAS_CMD -u $ID1 -g $ID1 -j $ID0 touch $DIR/$tdir/f4 ||
218 $RUNAS_CMD -u $ID1 -v $ID0 -g $ID1 -j $ID0 touch $DIR/$tdir/f5 ||
223 do_facet $SINGLEMDS "rm -f $PERM_CONF"
224 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
226 run_test 1 "setuid/gid ============================="
228 run_rmtacl_subtest() {
229 $SAVE_PWD/rmtacl/run $SAVE_PWD/rmtacl/$1.test
234 # for remote client only
236 [ "$CLIENT_TYPE" = "local" ] &&
237 skip "remote_acl for remote client only" && return
238 [ -z "$(lctl get_param -n mdc.*-mdc-*.connect_flags | grep ^acl)" ] &&
239 skip "must have acl enabled" && return
240 [ -z "$(which setfacl 2>/dev/null)" ] &&
241 skip "could not find setfacl" && return
242 [ "$UID" != 0 ] && skip "must run as root" && return
244 do_facet $SINGLEMDS "echo '* 0 rmtacl,rmtown' > $PERM_CONF"
245 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
249 sec_login daemon daemon
250 sec_login games users
256 echo "performing cp ..."
257 run_rmtacl_subtest cp || error "cp"
258 echo "performing getfacl-noacl..."
259 run_rmtacl_subtest getfacl-noacl || error "getfacl-noacl"
260 echo "performing misc..."
261 run_rmtacl_subtest misc || error "misc"
262 echo "performing permissions..."
263 run_rmtacl_subtest permissions || error "permissions"
264 echo "performing setfacl..."
265 run_rmtacl_subtest setfacl || error "setfacl"
267 # inheritance test got from HP
268 echo "performing inheritance..."
269 cp $SAVE_PWD/rmtacl/make-tree .
271 run_rmtacl_subtest inheritance || error "inheritance"
277 do_facet $SINGLEMDS "rm -f $PERM_CONF"
278 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
280 run_test 2 "rmtacl ============================="
282 # bug 3285 - supplementary group should always succeed.
283 # NB: the supplementary groups are set for local client only,
284 # as for remote client, the groups of the specified uid on MDT
285 # will be obtained by upcall /sbin/l_getidentity and used.
287 local server_version=$(lustre_version_code $SINGLEMDS)
289 [[ $server_version -ge $(version_code 2.6.93) ]] ||
290 [[ $server_version -ge $(version_code 2.5.35) &&
291 $server_version -lt $(version_code 2.5.50) ]] ||
292 { skip "Need MDS version at least 2.6.93 or 2.5.35"; return; }
294 if [ "$CLIENT_TYPE" = "remote" ]; then
295 do_facet $SINGLEMDS "echo '* 0 rmtown' > $PERM_CONF"
296 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
301 chmod 0771 $DIR/$tdir
302 chgrp $ID0 $DIR/$tdir
303 $RUNAS_CMD -u $ID0 ls $DIR/$tdir || error "setgroups (1)"
304 if [ "$CLIENT_TYPE" = "local" ]; then
305 do_facet $SINGLEMDS "echo '* $ID1 setgrp' > $PERM_CONF"
306 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
307 $RUNAS_CMD -u $ID1 -G1,2,$ID0 ls $DIR/$tdir ||
308 error "setgroups (2)"
310 $RUNAS_CMD -u $ID1 -G1,2 ls $DIR/$tdir && error "setgroups (3)"
313 do_facet $SINGLEMDS "rm -f $PERM_CONF"
314 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
316 run_test 4 "set supplementary group ==============="
323 squash_id default 99 0
324 squash_id default 99 1
325 for (( i = 0; i < NODEMAP_COUNT; i++ )); do
326 local csum=${HOSTNAME_CHECKSUM}_${i}
328 if ! do_facet mgs $LCTL nodemap_add $csum; then
332 out=$(do_facet mgs $LCTL get_param nodemap.$csum.id)
333 ## This needs to return zero if the following statement is 1
334 [[ $(echo $out | grep -c $csum) == 0 ]] && return 1
343 for ((i = 0; i < NODEMAP_COUNT; i++)); do
344 local csum=${HOSTNAME_CHECKSUM}_${i}
346 if ! do_facet mgs $LCTL nodemap_del $csum; then
347 error "nodemap_del $csum failed with $?"
351 out=$(do_facet mgs $LCTL get_param nodemap.$csum.id)
352 [[ $(echo $out | grep -c $csum) != 0 ]] && return 1
359 local cmd="$LCTL nodemap_add_range"
363 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
364 range="$SUBNET_CHECKSUM.${2}.${j}.[1-253]@tcp"
365 if ! do_facet mgs $cmd --name $1 --range $range; then
374 local cmd="$LCTL nodemap_del_range"
378 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
379 range="$SUBNET_CHECKSUM.${2}.${j}.[1-253]@tcp"
380 if ! do_facet mgs $cmd --name $1 --range $range; then
390 local cmd="$LCTL nodemap_add_idmap"
393 for ((i = 0; i < NODEMAP_COUNT; i++)); do
396 for ((j = 500; j < NODEMAP_MAX_ID; j++)); do
397 local csum=${HOSTNAME_CHECKSUM}_${i}
399 local fs_id=$((j + 1))
401 if ! do_facet mgs $cmd --name $csum --idtype uid \
402 --idmap $client_id:$fs_id; then
405 if ! do_facet mgs $cmd --name $csum --idtype gid \
406 --idmap $client_id:$fs_id; then
417 local cmd="$LCTL nodemap_del_idmap"
420 for ((i = 0; i < NODEMAP_COUNT; i++)); do
423 for ((j = 500; j < NODEMAP_MAX_ID; j++)); do
424 local csum=${HOSTNAME_CHECKSUM}_${i}
426 local fs_id=$((j + 1))
428 if ! do_facet mgs $cmd --name $csum --idtype uid \
429 --idmap $client_id:$fs_id; then
432 if ! do_facet mgs $cmd --name $csum --idtype gid \
433 --idmap $client_id:$fs_id; then
446 local cmd="$LCTL nodemap_modify"
449 proc[0]="admin_nodemap"
450 proc[1]="trusted_nodemap"
454 for ((idx = 0; idx < 2; idx++)); do
455 if ! do_facet mgs $cmd --name $1 --property ${option[$idx]} \
460 if ! do_facet mgs $cmd --name $1 --property ${option[$idx]} \
472 cmd[0]="$LCTL nodemap_modify --property squash_uid"
473 cmd[1]="$LCTL nodemap_modify --property squash_gid"
475 if ! do_facet mgs ${cmd[$3]} --name $1 --value $2; then
480 # ensure that the squash defaults are the expected defaults
481 squash_id default 99 0
482 squash_id default 99 1
487 cmd="$LCTL nodemap_test_nid"
489 nid=$(do_facet mgs $cmd $1)
491 if [ $nid == $2 ]; then
500 local cmd="$LCTL nodemap_test_id"
503 ## nodemap deactivated
504 if ! do_facet mgs lctl nodemap_activate 0; then
507 for ((id = 500; id < NODEMAP_MAX_ID; id++)); do
510 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
511 local nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
512 local fs_id=$(do_facet mgs $cmd --nid $nid \
513 --idtype uid --id $id)
514 if [ $fs_id != $id ]; then
515 echo "expected $id, got $fs_id"
522 if ! do_facet mgs lctl nodemap_activate 1; then
526 for ((id = 500; id < NODEMAP_MAX_ID; id++)); do
527 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
528 nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
529 fs_id=$(do_facet mgs $cmd --nid $nid \
530 --idtype uid --id $id)
531 expected_id=$((id + 1))
532 if [ $fs_id != $expected_id ]; then
533 echo "expected $expected_id, got $fs_id"
540 for ((i = 0; i < NODEMAP_COUNT; i++)); do
541 local csum=${HOSTNAME_CHECKSUM}_${i}
543 if ! do_facet mgs $LCTL nodemap_modify --name $csum \
544 --property trusted --value 1; then
545 error "nodemap_modify $csum failed with $?"
550 for ((id = 500; id < NODEMAP_MAX_ID; id++)); do
551 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
552 nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
553 fs_id=$(do_facet mgs $cmd --nid $nid \
554 --idtype uid --id $id)
555 if [ $fs_id != $id ]; then
556 echo "expected $id, got $fs_id"
562 ## ensure allow_root_access is enabled
563 for ((i = 0; i < NODEMAP_COUNT; i++)); do
564 local csum=${HOSTNAME_CHECKSUM}_${i}
566 if ! do_facet mgs $LCTL nodemap_modify --name $csum \
567 --property admin --value 1; then
568 error "nodemap_modify $csum failed with $?"
573 ## check that root allowed
574 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
575 nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
576 fs_id=$(do_facet mgs $cmd --nid $nid --idtype uid --id 0)
577 if [ $fs_id != 0 ]; then
578 echo "root allowed expected 0, got $fs_id"
583 ## ensure allow_root_access is disabled
584 for ((i = 0; i < NODEMAP_COUNT; i++)); do
585 local csum=${HOSTNAME_CHECKSUM}_${i}
587 if ! do_facet mgs $LCTL nodemap_modify --name $csum \
588 --property admin --value 0; then
589 error "nodemap_modify ${HOSTNAME_CHECKSUM}_${i} "
595 ## check that root is mapped to 99
596 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
597 nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
598 fs_id=$(do_facet mgs $cmd --nid $nid --idtype uid --id 0)
599 if [ $fs_id != 99 ]; then
600 error "root squash expected 99, got $fs_id"
605 ## reset client trust to 0
606 for ((i = 0; i < NODEMAP_COUNT; i++)); do
607 if ! do_facet mgs $LCTL nodemap_modify \
608 --name ${HOSTNAME_CHECKSUM}_${i} \
609 --property trusted --value 0; then
610 error "nodemap_modify ${HOSTNAME_CHECKSUM}_${i} "
622 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
623 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
624 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
629 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
633 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 2
637 run_test 7 "nodemap create and delete"
642 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
643 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
644 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
651 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
657 [[ $rc == 0 ]] && error "duplicate nodemap_add allowed with $rc" &&
663 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 3
667 run_test 8 "nodemap reject duplicates"
673 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
674 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
675 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
681 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
684 for ((i = 0; i < NODEMAP_COUNT; i++)); do
685 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
689 [[ $rc != 0 ]] && error "nodemap_add_range failed with $rc" && return 2
692 for ((i = 0; i < NODEMAP_COUNT; i++)); do
693 if ! delete_range ${HOSTNAME_CHECKSUM}_${i} $i; then
697 [[ $rc != 0 ]] && error "nodemap_del_range failed with $rc" && return 4
702 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 4
706 run_test 9 "nodemap range add"
711 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
712 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
713 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
719 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
722 for ((i = 0; i < NODEMAP_COUNT; i++)); do
723 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
727 [[ $rc != 0 ]] && error "nodemap_add_range failed with $rc" && return 2
730 for ((i = 0; i < NODEMAP_COUNT; i++)); do
731 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
735 [[ $rc == 0 ]] && error "nodemap_add_range duplicate add with $rc" &&
740 for ((i = 0; i < NODEMAP_COUNT; i++)); do
741 if ! delete_range ${HOSTNAME_CHECKSUM}_${i} $i; then
745 [[ $rc != 0 ]] && error "nodemap_del_range failed with $rc" && return 4
749 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 5
753 run_test 10 "nodemap reject duplicate ranges"
758 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
759 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
760 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
766 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
769 for ((i = 0; i < NODEMAP_COUNT; i++)); do
770 if ! modify_flags ${HOSTNAME_CHECKSUM}_${i}; then
774 [[ $rc != 0 ]] && error "nodemap_modify with $rc" && return 2
779 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 3
783 run_test 11 "nodemap modify"
788 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
789 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
790 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
796 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
799 for ((i = 0; i < NODEMAP_COUNT; i++)); do
800 if ! squash_id ${HOSTNAME_CHECKSUM}_${i} 88 0; then
804 [[ $rc != 0 ]] && error "nodemap squash_uid with $rc" && return 2
807 for ((i = 0; i < NODEMAP_COUNT; i++)); do
808 if ! squash_id ${HOSTNAME_CHECKSUM}_${i} 88 1; then
812 [[ $rc != 0 ]] && error "nodemap squash_gid with $rc" && return 3
817 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 4
821 run_test 12 "nodemap set squash ids"
826 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
827 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
828 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
834 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
837 for ((i = 0; i < NODEMAP_COUNT; i++)); do
838 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
842 [[ $rc != 0 ]] && error "nodemap_add_range failed with $rc" && return 2
845 for ((i = 0; i < NODEMAP_COUNT; i++)); do
846 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
847 for k in $NODEMAP_IPADDR_LIST; do
848 if ! test_nid $SUBNET_CHECKSUM.$i.$j.$k \
849 ${HOSTNAME_CHECKSUM}_${i}; then
855 [[ $rc != 0 ]] && error "nodemap_test_nid failed with $rc" && return 3
860 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 4
864 run_test 13 "test nids"
869 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
870 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
871 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
877 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
880 for ((i = 0; i < NODEMAP_COUNT; i++)); do
881 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
882 for k in $NODEMAP_IPADDR_LIST; do
883 if ! test_nid $SUBNET_CHECKSUM.$i.$j.$k \
890 [[ $rc != 0 ]] && error "nodemap_test_nid failed with $rc" && return 3
895 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 4
899 run_test 14 "test default nodemap nid lookup"
904 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
905 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
906 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
912 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
915 for ((i = 0; i < NODEMAP_COUNT; i++)); do
916 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
920 [[ $rc != 0 ]] && error "nodemap_add_range failed with $rc" && return 2
925 [[ $rc != 0 ]] && error "nodemap_add_idmap failed with $rc" && return 3
930 [[ $rc != 0 ]] && error "nodemap_test_id failed with $rc" && return 4
935 [[ $rc != 0 ]] && error "nodemap_del_idmap failed with $rc" && return 5
940 [[ $rc != 0 ]] && error "nodemap_delete failed with $rc" && return 6
944 run_test 15 "test id mapping"
946 # Until nodemaps are distributed by MGS, they need to be distributed manually
947 # This function and all calls to it should be removed once the MGS distributes
948 # nodemaps to the MDS and OSS nodes directly.
949 do_servers_not_mgs() {
950 local mgs_ip=$(host_nids_address $mgs_HOST $NETTYPE)
951 for node in $(all_server_nodes); do
952 local node_ip=$(host_nids_address $node $NETTYPE)
953 [ $node_ip == $mgs_ip ] && continue
958 create_fops_nodemaps() {
961 for client in $clients; do
962 local client_ip=$(host_nids_address $client $NETTYPE)
963 local client_nid=$(h2$NETTYPE $client_ip)
964 do_facet mgs $LCTL nodemap_add c${i} || return 1
965 do_facet mgs $LCTL nodemap_add_range \
966 --name c${i} --range $client_nid || return 1
967 do_servers_not_mgs $LCTL set_param nodemap.add_nodemap=c${i} ||
969 do_servers_not_mgs "$LCTL set_param " \
970 "nodemap.add_nodemap_range='c${i} $client_nid'" ||
972 for map in ${FOPS_IDMAPS[i]}; do
973 do_facet mgs $LCTL nodemap_add_idmap --name c${i} \
974 --idtype uid --idmap ${map} || return 1
975 do_servers_not_mgs "$LCTL set_param " \
976 "nodemap.add_nodemap_idmap='c$i uid ${map}'" ||
978 do_facet mgs $LCTL nodemap_add_idmap --name c${i} \
979 --idtype gid --idmap ${map} || return 1
980 do_servers_not_mgs "$LCTL set_param " \
981 " nodemap.add_nodemap_idmap='c$i gid ${map}'" ||
984 out1=$(do_facet mgs $LCTL get_param nodemap.c${i}.idmap)
985 out2=$(do_facet ost0 $LCTL get_param nodemap.c${i}.idmap)
986 [ "$out1" != "$out2" ] && error "mgs and oss maps mismatch"
992 delete_fops_nodemaps() {
995 for client in $clients; do
996 do_facet mgs $LCTL nodemap_del c${i} || return 1
997 do_servers_not_mgs $LCTL set_param nodemap.remove_nodemap=c$i ||
1006 if [ $MDSCOUNT -le 1 ]; then
1007 do_node ${clients_arr[0]} mkdir -p $DIR/$tdir
1009 # round-robin MDTs to test DNE nodemap support
1010 [ ! -d $DIR ] && do_node ${clients_arr[0]} mkdir -p $DIR
1011 do_node ${clients_arr[0]} $LFS setdirstripe -c 1 -i \
1012 $((fops_mds_index % MDSCOUNT)) $DIR/$tdir
1013 ((fops_mds_index++))
1017 # acl test directory needs to be initialized on a privileged client
1019 local admin=$(do_facet mgs $LCTL get_param -n nodemap.c0.admin_nodemap)
1020 local trust=$(do_facet mgs $LCTL get_param -n \
1021 nodemap.c0.trusted_nodemap)
1023 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
1024 do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1
1025 do_servers_not_mgs $LCTL set_param nodemap.c0.admin_nodemap=1
1026 do_servers_not_mgs $LCTL set_param nodemap.c0.trusted_nodemap=1
1028 do_node ${clients_arr[0]} rm -rf $DIR/$tdir
1030 do_node ${clients_arr[0]} chown $user $DIR/$tdir
1032 do_facet mgs $LCTL nodemap_modify --name c0 \
1033 --property admin --value $admin
1034 do_facet mgs $LCTL nodemap_modify --name c0 \
1035 --property trusted --value $trust
1036 do_servers_not_mgs $LCTL set_param nodemap.c0.admin_nodemap=$admin
1037 do_servers_not_mgs $LCTL set_param nodemap.c0.trusted_nodemap=$trust
1039 # flush MDT locks to make sure they are reacquired before test
1040 do_node ${clients_arr[0]} lctl set_param \
1041 ldlm.namespaces.$FSNAME-MDT*.lru_size=clear
1044 do_create_delete() {
1047 local testfile=$DIR/$tdir/$tfile
1051 if $run_u touch $testfile >& /dev/null; then
1053 $run_u rm $testfile && d=1
1057 local expected=$(get_cr_del_expected $key)
1058 [ "$res" != "$expected" ] &&
1059 error "test $key, wanted $expected, got $res" && rc=$((rc + 1))
1063 nodemap_check_quota() {
1065 $run_u lfs quota -q $DIR | awk '{ print $2; exit; }'
1068 do_fops_quota_test() {
1070 # fuzz quota used to account for possible indirect blocks, etc
1071 local quota_fuzz=$(fs_log_size)
1072 local qused_orig=$(nodemap_check_quota "$run_u")
1073 local qused_high=$((qused_orig + quota_fuzz))
1074 local qused_low=$((qused_orig - quota_fuzz))
1075 local testfile=$DIR/$tdir/$tfile
1076 chmod 777 $DIR/$tdir
1077 $run_u dd if=/dev/zero of=$testfile bs=1M count=1 >& /dev/null
1078 sync; sync_all_data || true
1080 local qused_new=$(nodemap_check_quota "$run_u")
1081 [ $((qused_new)) -lt $((qused_low + 1024)) -o \
1082 $((qused_new)) -gt $((qused_high + 1024)) ] &&
1083 error "$qused_new != $qused_orig + 1M after write, " \
1084 "fuzz is $quota_fuzz"
1085 $run_u rm $testfile && d=1
1086 $NODEMAP_TEST_QUOTA && wait_delete_completed_mds
1088 qused_new=$(nodemap_check_quota "$run_u")
1089 [ $((qused_new)) -lt $((qused_low)) \
1090 -o $((qused_new)) -gt $((qused_high)) ] &&
1091 error "quota not reclaimed, expect $qused_orig, " \
1092 "got $qused_new, fuzz $quota_fuzz"
1095 get_fops_mapped_user() {
1098 for ((i=0; i < ${#FOPS_IDMAPS[@]}; i++)); do
1099 for map in ${FOPS_IDMAPS[i]}; do
1100 if [ $(cut -d: -f1 <<< "$map") == $cli_user ]; then
1101 cut -d: -f2 <<< "$map"
1109 get_cr_del_expected() {
1111 IFS=":" read -a key <<< "$1"
1112 local mapmode="${key[0]}"
1113 local mds_user="${key[1]}"
1114 local cluster="${key[2]}"
1115 local cli_user="${key[3]}"
1116 local mode="0${key[4]}"
1123 [[ $mapmode == *mapped* ]] && mapped=1
1124 # only c1 is mapped in these test cases
1125 [[ $mapmode == mapped_trusted* ]] && [ "$cluster" == "c0" ] && mapped=0
1126 [[ $mapmode == *noadmin* ]] && noadmin=1
1128 # o+wx works as long as the user isn't mapped
1129 if [ $((mode & 3)) -eq 3 ]; then
1133 # if client user is root, check if root is squashed
1134 if [ "$cli_user" == "0" ]; then
1135 # squash root succeed, if other bit is on
1138 1) [ "$other" == "1" ] && echo $SUCCESS
1139 [ "$other" == "0" ] && echo $FAILURE;;
1143 if [ "$mapped" == "0" ]; then
1144 [ "$other" == "1" ] && echo $SUCCESS
1145 [ "$other" == "0" ] && echo $FAILURE
1149 # if mapped user is mds user, check for u+wx
1150 mapped_user=$(get_fops_mapped_user $cli_user)
1151 [ "$mapped_user" == "-1" ] &&
1152 error "unable to find mapping for client user $cli_user"
1154 if [ "$mapped_user" == "$mds_user" -a \
1155 $(((mode & 0300) == 0300)) -eq 1 ]; then
1159 if [ "$mapped_user" != "$mds_user" -a "$other" == "1" ]; then
1168 local single_client="$2"
1169 local client_user_list=([0]="0 $((IDBASE+3)) $((IDBASE+4))"
1170 [1]="0 $((IDBASE+5)) $((IDBASE+6))")
1173 local perm_bit_list="0 3 $((0300)) $((0303))"
1174 # SLOW tests 000-007, 010-070, 100-700 (octal modes)
1175 [ "$SLOW" == "yes" ] &&
1176 perm_bit_list="0 $(seq 1 7) $(seq 8 8 63) $(seq 64 64 511) \
1179 # step through mds users. -1 means root
1180 for mds_i in -1 0 1 2; do
1181 local user=$((mds_i + IDBASE))
1185 [ "$mds_i" == "-1" ] && user=0
1187 echo mkdir -p $DIR/$tdir
1190 for client in $clients; do
1192 local admin=$(do_facet mgs $LCTL get_param -n \
1193 nodemap.c$cli_i.admin_nodemap)
1194 for u in ${client_user_list[$cli_i]}; do
1195 local run_u="do_node $client \
1196 $RUNAS_CMD -u$u -g$u -G$u"
1197 for perm_bits in $perm_bit_list; do
1198 local mode=$(printf %03o $perm_bits)
1200 key="$mapmode:$user:c$cli_i:$u:$mode"
1201 do_facet mgs $LCTL nodemap_modify \
1205 do_servers_not_mgs $LCTL set_param \
1206 nodemap.c$cli_i.admin_nodemap=1
1207 do_node $client chmod $mode $DIR/$tdir \
1208 || error unable to chmod $key
1209 do_facet mgs $LCTL nodemap_modify \
1213 do_servers_not_mgs $LCTL set_param \
1214 nodemap.c$cli_i.admin_nodemap=$admin
1216 do_create_delete "$run_u" "$key"
1220 do_fops_quota_test "$run_u"
1223 cli_i=$((cli_i + 1))
1224 [ "$single_client" == "1" ] && break
1231 nodemap_version_check () {
1232 remote_mgs_nodsh && skip "remote MGS with nodsh" && return 1
1233 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
1234 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
1239 nodemap_test_setup() {
1241 local active_nodemap=$1
1243 do_nodes $(comma_list $(all_mdts_nodes)) \
1244 $LCTL set_param mdt.*.identity_upcall=NONE
1247 create_fops_nodemaps
1249 [[ $rc != 0 ]] && error "adding fops nodemaps failed $rc"
1251 if [ "$active_nodemap" == "0" ]; then
1252 do_facet mgs $LCTL set_param nodemap.active=0
1253 do_servers_not_mgs $LCTL set_param nodemap.active=0
1257 do_facet mgs $LCTL nodemap_activate 1
1258 do_servers_not_mgs $LCTL set_param nodemap.active=1
1259 do_facet mgs $LCTL nodemap_modify --name default \
1260 --property admin --value 1
1261 do_facet mgs $LCTL nodemap_modify --name default \
1262 --property trusted --value 1
1263 do_servers_not_mgs $LCTL set_param nodemap.default.admin_nodemap=1
1264 do_servers_not_mgs $LCTL set_param nodemap.default.trusted_nodemap=1
1267 nodemap_test_cleanup() {
1269 delete_fops_nodemaps
1271 [[ $rc != 0 ]] && error "removing fops nodemaps failed $rc"
1276 nodemap_clients_admin_trusted() {
1280 for client in $clients; do
1281 do_facet mgs $LCTL nodemap_modify --name c0 \
1282 --property admin --value $admin
1283 do_servers_not_mgs $LCTL set_param \
1284 nodemap.c${i}.admin_nodemap=$admin
1285 do_facet mgs $LCTL nodemap_modify --name c0 \
1286 --property trusted --value $tr
1287 do_servers_not_mgs $LCTL set_param \
1288 nodemap.c${i}.trusted_nodemap=$tr
1294 nodemap_version_check || return 0
1295 nodemap_test_setup 0
1297 trap nodemap_test_cleanup EXIT
1299 nodemap_test_cleanup
1301 run_test 16 "test nodemap all_off fileops"
1304 nodemap_version_check || return 0
1307 trap nodemap_test_cleanup EXIT
1308 nodemap_clients_admin_trusted 0 1
1309 test_fops trusted_noadmin 1
1310 nodemap_test_cleanup
1312 run_test 17 "test nodemap trusted_noadmin fileops"
1315 nodemap_version_check || return 0
1318 trap nodemap_test_cleanup EXIT
1319 nodemap_clients_admin_trusted 0 0
1320 test_fops mapped_noadmin 1
1321 nodemap_test_cleanup
1323 run_test 18 "test nodemap mapped_noadmin fileops"
1326 nodemap_version_check || return 0
1329 trap nodemap_test_cleanup EXIT
1330 nodemap_clients_admin_trusted 1 1
1331 test_fops trusted_admin 1
1332 nodemap_test_cleanup
1334 run_test 19 "test nodemap trusted_admin fileops"
1337 nodemap_version_check || return 0
1340 trap nodemap_test_cleanup EXIT
1341 nodemap_clients_admin_trusted 1 0
1342 test_fops mapped_admin 1
1343 nodemap_test_cleanup
1345 run_test 20 "test nodemap mapped_admin fileops"
1348 nodemap_version_check || return 0
1351 trap nodemap_test_cleanup EXIT
1354 for client in $clients; do
1355 do_facet mgs $LCTL nodemap_modify --name c${i} \
1356 --property admin --value 0
1357 do_facet mgs $LCTL nodemap_modify --name c${i} \
1358 --property trusted --value $x
1359 do_servers_not_mgs $LCTL set_param \
1360 nodemap.c${i}.admin_nodemap=0
1361 do_servers_not_mgs $LCTL set_param \
1362 nodemap.c${i}.trusted_nodemap=$x
1366 test_fops mapped_trusted_noadmin
1367 nodemap_test_cleanup
1369 run_test 21 "test nodemap mapped_trusted_noadmin fileops"
1372 nodemap_version_check || return 0
1375 trap nodemap_test_cleanup EXIT
1378 for client in $clients; do
1379 do_facet mgs $LCTL nodemap_modify --name c${i} \
1380 --property admin --value 1
1381 do_facet mgs $LCTL nodemap_modify --name c${i} \
1382 --property trusted --value $x
1383 do_servers_not_mgs $LCTL set_param \
1384 nodemap.c${i}.admin_nodemap=1
1385 do_servers_not_mgs $LCTL set_param \
1386 nodemap.c${i}.trusted_nodemap=$x
1390 test_fops mapped_trusted_admin
1391 nodemap_test_cleanup
1393 run_test 22 "test nodemap mapped_trusted_admin fileops"
1395 # acl test directory needs to be initialized on a privileged client
1396 nodemap_acl_test_setup() {
1397 local admin=$(do_facet mgs $LCTL get_param -n nodemap.c0.admin_nodemap)
1398 local trust=$(do_facet mgs $LCTL get_param -n \
1399 nodemap.c0.trusted_nodemap)
1401 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
1402 do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1
1403 do_servers_not_mgs $LCTL set_param nodemap.c0.admin_nodemap=1
1404 do_servers_not_mgs $LCTL set_param nodemap.c0.trusted_nodemap=1
1406 do_node ${clients_arr[0]} rm -rf $DIR/$tdir
1408 do_node ${clients_arr[0]} chmod a+rwx $DIR/$tdir ||
1409 error unable to chmod a+rwx test dir $DIR/$tdir
1411 do_facet mgs $LCTL nodemap_modify --name c0 \
1412 --property admin --value $admin
1413 do_facet mgs $LCTL nodemap_modify --name c0 \
1414 --property trusted --value $trust
1415 do_servers_not_mgs $LCTL set_param nodemap.c0.admin_nodemap=$admin
1416 do_servers_not_mgs $LCTL set_param nodemap.c0.trusted_nodemap=$trust
1420 # returns 0 if the number of ACLs does not change on the second (mapped) client
1421 # after being set on the first client
1422 nodemap_acl_test() {
1424 local set_client="$2"
1425 local get_client="$3"
1426 local check_setfacl="$4"
1427 local setfacl_error=0
1428 local testfile=$DIR/$tdir/$tfile
1429 local RUNAS_USER="$RUNAS_CMD -u $user"
1431 local acl_count_post=0
1433 nodemap_acl_test_setup
1436 do_node $set_client $RUNAS_USER touch $testfile
1438 # ACL masks aren't filtered by nodemap code, so we ignore them
1439 acl_count=$(do_node $get_client getfacl $testfile | grep -v mask |
1441 do_node $set_client $RUNAS_USER setfacl -m $user:rwx $testfile ||
1444 # if check setfacl is set to 1, then it's supposed to error
1445 if [ "$check_setfacl" == "1" ]; then
1446 [ "$setfacl_error" != "1" ] && return 1
1449 [ "$setfacl_error" == "1" ] && echo "WARNING: unable to setfacl"
1451 acl_count_post=$(do_node $get_client getfacl $testfile | grep -v mask |
1453 [ $acl_count -eq $acl_count_post ] && return 0
1458 nodemap_version_check || return 0
1461 trap nodemap_test_cleanup EXIT
1462 # 1 trusted cluster, 1 mapped cluster
1463 local unmapped_fs=$((IDBASE+0))
1464 local unmapped_c1=$((IDBASE+5))
1465 local mapped_fs=$((IDBASE+2))
1466 local mapped_c0=$((IDBASE+4))
1467 local mapped_c1=$((IDBASE+6))
1469 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
1470 do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1
1471 do_servers_not_mgs $LCTL set_param nodemap.c0.admin_nodemap=1
1472 do_servers_not_mgs $LCTL set_param nodemap.c0.trusted_nodemap=1
1474 do_facet mgs $LCTL nodemap_modify --name c1 --property admin --value 0
1475 do_facet mgs $LCTL nodemap_modify --name c1 --property trusted --value 0
1476 do_servers_not_mgs $LCTL set_param nodemap.c1.admin_nodemap=0
1477 do_servers_not_mgs $LCTL set_param nodemap.c1.trusted_nodemap=0
1479 # setfacl on trusted cluster to unmapped user, verify it's not seen
1480 nodemap_acl_test $unmapped_fs ${clients_arr[0]} ${clients_arr[1]} ||
1481 error "acl count (1)"
1483 # setfacl on trusted cluster to mapped user, verify it's seen
1484 nodemap_acl_test $mapped_fs ${clients_arr[0]} ${clients_arr[1]} &&
1485 error "acl count (2)"
1487 # setfacl on mapped cluster to mapped user, verify it's seen
1488 nodemap_acl_test $mapped_c1 ${clients_arr[1]} ${clients_arr[0]} &&
1489 error "acl count (3)"
1491 # setfacl on mapped cluster to unmapped user, verify error
1492 nodemap_acl_test $unmapped_fs ${clients_arr[1]} ${clients_arr[0]} 1 ||
1493 error "acl count (4)"
1496 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 0
1497 do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 0
1498 do_servers_not_mgs $LCTL set_param nodemap.c0.admin_nodemap=0
1499 do_servers_not_mgs $LCTL set_param nodemap.c0.trusted_nodemap=0
1501 # setfacl to mapped user on c1, also mapped to c0, verify it's seen
1502 nodemap_acl_test $mapped_c1 ${clients_arr[1]} ${clients_arr[0]} &&
1503 error "acl count (5)"
1505 # setfacl to mapped user on c1, not mapped to c0, verify not seen
1506 nodemap_acl_test $unmapped_c1 ${clients_arr[1]} ${clients_arr[0]} ||
1507 error "acl count (6)"
1509 nodemap_test_cleanup
1511 run_test 23 "test mapped ACLs"
1516 trap nodemap_test_cleanup EXIT
1517 do_nodes $(comma_list $(all_server_nodes)) $LCTL get_param -R nodemap ||
1518 error "proc readable file read failed"
1520 nodemap_test_cleanup
1522 run_test 24 "check nodemap proc files for LBUGs and Oopses"
1524 log "cleanup: ======================================================"
1527 ## nodemap deactivated
1528 do_facet mgs lctl nodemap_activate 0
1530 for num in $(seq $MDSCOUNT); do
1531 if [ "${identity_old[$num]}" = 1 ]; then
1532 switch_identity $num false || identity_old[$num]=$?
1536 $RUNAS_CMD -u $ID0 ls $DIR
1537 $RUNAS_CMD -u $ID1 ls $DIR