3 # Run select tests by setting ONLY, or as arguments to the script.
4 # Skip specific tests by setting EXCEPT.
10 # bug number for skipped test: 19430 19967 19967
11 ALWAYS_EXCEPT=" 2 5 6 $SANITY_SEC_EXCEPT"
12 # UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
14 [ "$ALWAYS_EXCEPT$EXCEPT" ] && \
15 echo "Skipping tests: $ALWAYS_EXCEPT $EXCEPT"
18 export PATH=$PWD/$SRCDIR:$SRCDIR:$PWD/$SRCDIR/../utils:$PATH:/sbin
19 export NAME=${NAME:-local}
21 LUSTRE=${LUSTRE:-$(dirname $0)/..}
22 . $LUSTRE/tests/test-framework.sh
24 . ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
27 RUNAS_CMD=${RUNAS_CMD:-runas}
29 WTL=${WTL:-"$LUSTRE/tests/write_time_limit"}
32 PERM_CONF=$CONFDIR/perm.conf
35 HOSTNAME_CHECKSUM=$(hostname | sum | awk '{ print $1 }')
36 SUBNET_CHECKSUM=$(expr $HOSTNAME_CHECKSUM % 250 + 1)
39 NODEMAP_IPADDR_LIST="1 10 64 128 200 250"
42 require_dsh_mds || exit 0
43 require_dsh_ost || exit 0
45 clients=${CLIENTS//,/ }
46 num_clients=$(get_node_count ${clients})
47 clients_arr=($clients)
51 USER0=$(grep :$ID0:$ID0: /etc/passwd | cut -d: -f1)
52 USER1=$(grep :$ID1:$ID1: /etc/passwd | cut -d: -f1)
55 skip "need to add user0 ($ID0:$ID0) to /etc/passwd" && exit 0
58 skip "need to add user1 ($ID1:$ID1) to /etc/passwd" && exit 0
60 IDBASE=${IDBASE:-60000}
62 # changes to mappings must be reflected in test 23
64 [0]="$((IDBASE+3)):$((IDBASE+0)) $((IDBASE+4)):$((IDBASE+2))"
65 [1]="$((IDBASE+5)):$((IDBASE+1)) $((IDBASE+6)):$((IDBASE+2))"
68 check_and_setup_lustre
71 if [ "$I_MOUNTED" = "yes" ]; then
72 cleanupall -f || error "sec_cleanup"
77 [ -z "`echo $DIR | grep $MOUNT`" ] && \
78 error "$DIR not in $MOUNT" && sec_cleanup && exit 1
80 [ `echo $MOUNT | wc -w` -gt 1 ] && \
81 echo "NAME=$MOUNT mounted more than once" && sec_cleanup && exit 0
84 GSS_REF=$(lsmod | grep ^ptlrpc_gss | awk '{print $3}')
85 if [ ! -z "$GSS_REF" -a "$GSS_REF" != "0" ]; then
87 echo "with GSS support"
90 echo "without GSS support"
93 MDT=$(do_facet $SINGLEMDS lctl get_param -N "mdt.\*MDT0000" |
95 [ -z "$MDT" ] && error "fail to get MDT device" && exit 1
96 do_facet $SINGLEMDS "mkdir -p $CONFDIR"
97 IDENTITY_FLUSH=mdt.$MDT.identity_flush
98 IDENTITY_UPCALL=mdt.$MDT.identity_upcall
99 MDSSECLEVEL=mdt.$MDT.sec_level
102 if [ -z "$(lctl get_param -n llite.*.client_type | grep remote 2>/dev/null)" ]
119 if ! $RUNAS_CMD -u $user krb5_login.sh; then
120 error "$user login kerberos failed."
124 if ! $RUNAS_CMD -u $user -g $group ls $DIR > /dev/null 2>&1; then
125 $RUNAS_CMD -u $user lfs flushctx -k
126 $RUNAS_CMD -u $user krb5_login.sh
127 if ! $RUNAS_CMD -u$user -g$group ls $DIR > /dev/null 2>&1; then
128 error "init $user $group failed."
134 declare -a identity_old
137 for num in `seq $MDSCOUNT`; do
138 switch_identity $num true || identity_old[$num]=$?
141 if ! $RUNAS_CMD -u $ID0 ls $DIR > /dev/null 2>&1; then
142 sec_login $USER0 $USER0
145 if ! $RUNAS_CMD -u $ID1 ls $DIR > /dev/null 2>&1; then
146 sec_login $USER1 $USER1
151 # run as different user
155 chmod 0755 $DIR || error "chmod (1)"
156 rm -rf $DIR/$tdir || error "rm (1)"
157 mkdir -p $DIR/$tdir || error "mkdir (1)"
159 if [ "$CLIENT_TYPE" = "remote" ]; then
160 do_facet $SINGLEMDS "echo '* 0 normtown' > $PERM_CONF"
161 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
162 chown $USER0 $DIR/$tdir && error "chown (1)"
163 do_facet $SINGLEMDS "echo '* 0 rmtown' > $PERM_CONF"
164 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
166 chown $USER0 $DIR/$tdir || error "chown (2)"
169 $RUNAS_CMD -u $ID0 ls $DIR || error "ls (1)"
170 rm -f $DIR/f0 || error "rm (2)"
171 $RUNAS_CMD -u $ID0 touch $DIR/f0 && error "touch (1)"
172 $RUNAS_CMD -u $ID0 touch $DIR/$tdir/f1 || error "touch (2)"
173 $RUNAS_CMD -u $ID1 touch $DIR/$tdir/f2 && error "touch (3)"
174 touch $DIR/$tdir/f3 || error "touch (4)"
175 chown root $DIR/$tdir || error "chown (3)"
176 chgrp $USER0 $DIR/$tdir || error "chgrp (1)"
177 chmod 0775 $DIR/$tdir || error "chmod (2)"
178 $RUNAS_CMD -u $ID0 touch $DIR/$tdir/f4 || error "touch (5)"
179 $RUNAS_CMD -u $ID1 touch $DIR/$tdir/f5 && error "touch (6)"
180 touch $DIR/$tdir/f6 || error "touch (7)"
181 rm -rf $DIR/$tdir || error "rm (3)"
183 if [ "$CLIENT_TYPE" = "remote" ]; then
184 do_facet $SINGLEMDS "rm -f $PERM_CONF"
185 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
188 run_test 0 "uid permission ============================="
192 [ $GSS_SUP = 0 ] && skip "without GSS support." && return
194 if [ "$CLIENT_TYPE" = "remote" ]; then
195 do_facet $SINGLEMDS "echo '* 0 rmtown' > $PERM_CONF"
196 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
202 chown $USER0 $DIR/$tdir || error "chown (1)"
203 $RUNAS_CMD -u $ID1 -v $ID0 touch $DIR/$tdir/f0 && error "touch (2)"
204 echo "enable uid $ID1 setuid"
205 do_facet $SINGLEMDS "echo '* $ID1 setuid' >> $PERM_CONF"
206 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
207 $RUNAS_CMD -u $ID1 -v $ID0 touch $DIR/$tdir/f1 || error "touch (3)"
209 chown root $DIR/$tdir || error "chown (4)"
210 chgrp $USER0 $DIR/$tdir || error "chgrp (5)"
211 chmod 0770 $DIR/$tdir || error "chmod (6)"
212 $RUNAS_CMD -u $ID1 -g $ID1 touch $DIR/$tdir/f2 && error "touch (7)"
213 $RUNAS_CMD -u$ID1 -g$ID1 -j$ID0 touch $DIR/$tdir/f3 && error "touch (8)"
214 echo "enable uid $ID1 setuid,setgid"
215 do_facet $SINGLEMDS "echo '* $ID1 setuid,setgid' > $PERM_CONF"
216 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
217 $RUNAS_CMD -u $ID1 -g $ID1 -j $ID0 touch $DIR/$tdir/f4 ||
219 $RUNAS_CMD -u $ID1 -v $ID0 -g $ID1 -j $ID0 touch $DIR/$tdir/f5 ||
224 do_facet $SINGLEMDS "rm -f $PERM_CONF"
225 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
227 run_test 1 "setuid/gid ============================="
229 run_rmtacl_subtest() {
230 $SAVE_PWD/rmtacl/run $SAVE_PWD/rmtacl/$1.test
235 # for remote client only
237 [ "$CLIENT_TYPE" = "local" ] && \
238 skip "remote_acl for remote client only" && return
239 [ -z "$(lctl get_param -n mdc.*-mdc-*.connect_flags | grep ^acl)" ] && \
240 skip "must have acl enabled" && return
241 [ -z "$(which setfacl 2>/dev/null)" ] && \
242 skip "could not find setfacl" && return
243 [ "$UID" != 0 ] && skip "must run as root" && return
245 do_facet $SINGLEMDS "echo '* 0 rmtacl,rmtown' > $PERM_CONF"
246 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
250 sec_login daemon daemon
251 sec_login games users
257 echo "performing cp ..."
258 run_rmtacl_subtest cp || error "cp"
259 echo "performing getfacl-noacl..."
260 run_rmtacl_subtest getfacl-noacl || error "getfacl-noacl"
261 echo "performing misc..."
262 run_rmtacl_subtest misc || error "misc"
263 echo "performing permissions..."
264 run_rmtacl_subtest permissions || error "permissions"
265 echo "performing setfacl..."
266 run_rmtacl_subtest setfacl || error "setfacl"
268 # inheritance test got from HP
269 echo "performing inheritance..."
270 cp $SAVE_PWD/rmtacl/make-tree .
272 run_rmtacl_subtest inheritance || error "inheritance"
278 do_facet $SINGLEMDS "rm -f $PERM_CONF"
279 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
281 run_test 2 "rmtacl ============================="
284 # root_squash will be redesigned in Lustre 1.7
286 skip "root_squash will be redesigned in Lustre 1.7" && return
288 run_test 3 "rootsquash ============================="
290 # bug 3285 - supplementary group should always succeed.
291 # NB: the supplementary groups are set for local client only,
292 # as for remote client, the groups of the specified uid on MDT
293 # will be obtained by upcall /sbin/l_getidentity and used.
295 local server_version=$(lustre_version_code $SINGLEMDS)
297 [[ $server_version -ge $(version_code 2.6.93) ]] ||
298 [[ $server_version -ge $(version_code 2.5.35) &&
299 $server_version -lt $(version_code 2.5.50) ]] ||
300 { skip "Need MDS version at least 2.6.93 or 2.5.35"; return; }
302 if [ "$CLIENT_TYPE" = "remote" ]; then
303 do_facet $SINGLEMDS "echo '* 0 rmtown' > $PERM_CONF"
304 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
309 chmod 0771 $DIR/$tdir
310 chgrp $ID0 $DIR/$tdir
311 $RUNAS_CMD -u $ID0 ls $DIR/$tdir || error "setgroups (1)"
312 if [ "$CLIENT_TYPE" = "local" ]; then
313 do_facet $SINGLEMDS "echo '* $ID1 setgrp' > $PERM_CONF"
314 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
315 $RUNAS_CMD -u $ID1 -G1,2,$ID0 ls $DIR/$tdir ||
316 error "setgroups (2)"
318 $RUNAS_CMD -u $ID1 -G1,2 ls $DIR/$tdir && error "setgroups (3)"
321 do_facet $SINGLEMDS "rm -f $PERM_CONF"
322 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
324 run_test 4 "set supplementary group ==============="
331 squash_id default 99 0
332 squash_id default 99 1
333 for (( i = 0; i < NODEMAP_COUNT; i++ )); do
334 if ! do_facet mgs $LCTL nodemap_add \
335 ${HOSTNAME_CHECKSUM}_${i}; then
338 out=$(do_facet mgs $LCTL get_param \
339 nodemap.${HOSTNAME_CHECKSUM}_${i}.id)
340 ## This needs to return zero if the following statement is 1
341 rc=$(echo $out | grep -c ${HOSTNAME_CHECKSUM}_${i})
342 [[ $rc == 0 ]] && return 1
352 for ((i = 0; i < NODEMAP_COUNT; i++)); do
353 if ! do_facet mgs $LCTL nodemap_del \
354 ${HOSTNAME_CHECKSUM}_${i}; then
355 error "nodemap_del ${HOSTNAME_CHECKSUM}_${i} \
359 out=$(do_facet mgs $LCTL get_param \
360 nodemap.${HOSTNAME_CHECKSUM}_${i}.id)
361 rc=$(echo $out | grep -c ${HOSTNAME_CHECKSUM}_${i})
362 [[ $rc != 0 ]] && return 1
369 local cmd="$LCTL nodemap_add_range"
373 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
374 range="$SUBNET_CHECKSUM.${2}.${j}.[1-253]@tcp"
375 if ! do_facet mgs $cmd --name $1 \
385 local cmd="$LCTL nodemap_del_range"
389 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
390 range="$SUBNET_CHECKSUM.${2}.${j}.[1-253]@tcp"
391 if ! do_facet mgs $cmd --name $1 \
405 local cmd="$LCTL nodemap_add_idmap"
408 for ((i = 0; i < NODEMAP_COUNT; i++)); do
409 for ((j = 500; j < NODEMAP_MAX_ID; j++)); do
412 if ! do_facet mgs $cmd \
413 --name ${HOSTNAME_CHECKSUM}_${i} \
414 --idtype uid --idmap $client_id:$fs_id; then
417 if ! do_facet mgs $cmd \
418 --name ${HOSTNAME_CHECKSUM}_${i} \
419 --idtype gid --idmap $client_id:$fs_id; then
433 local cmd="$LCTL nodemap_del_idmap"
436 for ((i = 0; i < NODEMAP_COUNT; i++)); do
437 for ((j = 500; j < NODEMAP_MAX_ID; j++)); do
440 if ! do_facet mgs $cmd \
441 --name ${HOSTNAME_CHECKSUM}_${i} \
442 --idtype uid --idmap $client_id:$fs_id; then
445 if ! do_facet mgs $cmd \
446 --name ${HOSTNAME_CHECKSUM}_${i} \
447 --idtype gid --idmap $client_id:$fs_id; then
460 local cmd="$LCTL nodemap_modify"
463 proc[0]="admin_nodemap"
464 proc[1]="trusted_nodemap"
468 for ((idx = 0; idx < 2; idx++)); do
469 if ! do_facet mgs $cmd --name $1 \
470 --property ${option[$idx]} \
475 if ! do_facet mgs $cmd --name $1 \
476 --property ${option[$idx]} \
488 cmd[0]="$LCTL nodemap_modify --property squash_uid"
489 cmd[1]="$LCTL nodemap_modify --property squash_gid"
491 if ! do_facet mgs ${cmd[$3]} --name $1 --value $2; then
496 # ensure that the squash defaults are the expected defaults
497 squash_id default 99 0
498 squash_id default 99 1
503 cmd="$LCTL nodemap_test_nid"
505 nid=$(do_facet mgs $cmd $1)
507 if [ $nid == $2 ]; then
518 local cmd="$LCTL nodemap_test_id"
521 ## nodemap deactivated
522 if ! do_facet mgs lctl nodemap_activate 0; then
525 for ((id = 500; id < NODEMAP_MAX_ID; id++)); do
526 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
527 nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
528 fs_id=$(do_facet mgs $cmd --nid $nid \
529 --idtype uid --id $id)
530 if [ $fs_id != $id ]; then
531 echo "expected $id, got $fs_id"
538 if ! do_facet mgs lctl nodemap_activate 1; then
542 for ((id = 500; id < NODEMAP_MAX_ID; id++)); do
543 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
544 nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
545 fs_id=$(do_facet mgs $cmd --nid $nid \
546 --idtype uid --id $id)
547 expected_id=$((id + 1))
548 if [ $fs_id != $expected_id ]; then
549 echo "expected $expected_id, got $fs_id"
556 for ((i = 0; i < NODEMAP_COUNT; i++)); do
557 if ! do_facet mgs $LCTL nodemap_modify \
558 --name ${HOSTNAME_CHECKSUM}_${i} \
559 --property trusted --value 1; then
560 error "nodemap_modify ${HOSTNAME_CHECKSUM}_${i} "
566 for ((id = 500; id < NODEMAP_MAX_ID; id++)); do
567 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
568 nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
569 fs_id=$(do_facet mgs $cmd --nid $nid \
570 --idtype uid --id $id)
571 if [ $fs_id != $id ]; then
572 echo "expected $id, got $fs_id"
578 ## ensure allow_root_access is enabled
579 for ((i = 0; i < NODEMAP_COUNT; i++)); do
580 if ! do_facet mgs $LCTL nodemap_modify \
581 --name ${HOSTNAME_CHECKSUM}_${i} \
582 --property admin --value 1; then
583 error "nodemap_modify ${HOSTNAME_CHECKSUM}_${i} "
589 ## check that root allowed
590 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
591 nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
592 fs_id=$(do_facet mgs $cmd --nid $nid --idtype uid --id 0)
593 if [ $fs_id != 0 ]; then
594 echo "root allowed expected 0, got $fs_id"
599 ## ensure allow_root_access is disabled
600 for ((i = 0; i < NODEMAP_COUNT; i++)); do
601 if ! do_facet mgs $LCTL nodemap_modify \
602 --name ${HOSTNAME_CHECKSUM}_${i} \
603 --property admin --value 0; then
604 error "nodemap_modify ${HOSTNAME_CHECKSUM}_${i} "
610 ## check that root is mapped to 99
611 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
612 nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
613 fs_id=$(do_facet mgs $cmd --nid $nid --idtype uid --id 0)
614 if [ $fs_id != 99 ]; then
615 error "root squash expected 99, got $fs_id"
620 ## reset client trust to 0
621 for ((i = 0; i < NODEMAP_COUNT; i++)); do
622 if ! do_facet mgs $LCTL nodemap_modify \
623 --name ${HOSTNAME_CHECKSUM}_${i} \
624 --property trusted --value 0; then
625 error "nodemap_modify ${HOSTNAME_CHECKSUM}_${i} "
637 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
638 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
639 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
644 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
648 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 2
652 run_test 7 "nodemap create and delete"
657 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
658 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
659 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
666 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
672 [[ $rc == 0 ]] && error "duplicate nodemap_add allowed with $rc" &&
678 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 3
682 run_test 8 "nodemap reject duplicates"
688 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
689 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
690 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
696 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
699 for ((i = 0; i < NODEMAP_COUNT; i++)); do
700 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
704 [[ $rc != 0 ]] && error "nodemap_add_range failed with $rc" && return 2
707 for ((i = 0; i < NODEMAP_COUNT; i++)); do
708 if ! delete_range ${HOSTNAME_CHECKSUM}_${i} $i; then
712 [[ $rc != 0 ]] && error "nodemap_del_range failed with $rc" && return 4
717 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 4
721 run_test 9 "nodemap range add"
726 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
727 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
728 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
734 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
737 for ((i = 0; i < NODEMAP_COUNT; i++)); do
738 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
742 [[ $rc != 0 ]] && error "nodemap_add_range failed with $rc" && return 2
745 for ((i = 0; i < NODEMAP_COUNT; i++)); do
746 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
750 [[ $rc == 0 ]] && error "nodemap_add_range duplicate add with $rc" &&
755 for ((i = 0; i < NODEMAP_COUNT; i++)); do
756 if ! delete_range ${HOSTNAME_CHECKSUM}_${i} $i; then
760 [[ $rc != 0 ]] && error "nodemap_del_range failed with $rc" && return 4
764 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 5
768 run_test 10 "nodemap reject duplicate ranges"
773 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
774 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
775 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
781 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
784 for ((i = 0; i < NODEMAP_COUNT; i++)); do
785 if ! modify_flags ${HOSTNAME_CHECKSUM}_${i}; then
789 [[ $rc != 0 ]] && error "nodemap_modify with $rc" && return 2
794 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 3
798 run_test 11 "nodemap modify"
803 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
804 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
805 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
811 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
814 for ((i = 0; i < NODEMAP_COUNT; i++)); do
815 if ! squash_id ${HOSTNAME_CHECKSUM}_${i} 88 0; then
819 [[ $rc != 0 ]] && error "nodemap squash_uid with $rc" && return 2
822 for ((i = 0; i < NODEMAP_COUNT; i++)); do
823 if ! squash_id ${HOSTNAME_CHECKSUM}_${i} 88 1; then
827 [[ $rc != 0 ]] && error "nodemap squash_gid with $rc" && return 3
832 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 4
836 run_test 12 "nodemap set squash ids"
841 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
842 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
843 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
849 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
852 for ((i = 0; i < NODEMAP_COUNT; i++)); do
853 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
857 [[ $rc != 0 ]] && error "nodemap_add_range failed with $rc" && return 2
860 for ((i = 0; i < NODEMAP_COUNT; i++)); do
861 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
862 for k in $NODEMAP_IPADDR_LIST; do
863 if ! test_nid $SUBNET_CHECKSUM.$i.$j.$k \
864 ${HOSTNAME_CHECKSUM}_${i}; then
870 [[ $rc != 0 ]] && error "nodemap_test_nid failed with $rc" && return 3
875 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 4
879 run_test 13 "test nids"
884 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
885 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
886 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
892 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
895 for ((i = 0; i < NODEMAP_COUNT; i++)); do
896 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
897 for k in $NODEMAP_IPADDR_LIST; do
898 if ! test_nid $SUBNET_CHECKSUM.$i.$j.$k \
905 [[ $rc != 0 ]] && error "nodemap_test_nid failed with $rc" && return 3
910 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 4
914 run_test 14 "test default nodemap nid lookup"
919 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
920 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
921 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
927 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
930 for ((i = 0; i < NODEMAP_COUNT; i++)); do
931 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
935 [[ $rc != 0 ]] && error "nodemap_add_range failed with $rc" && return 2
940 [[ $rc != 0 ]] && error "nodemap_add_idmap failed with $rc" && return 3
945 [[ $rc != 0 ]] && error "nodemap_test_id failed with $rc" && return 4
950 [[ $rc != 0 ]] && error "nodemap_del_idmap failed with $rc" && return 5
955 [[ $rc != 0 ]] && error "nodemap_delete failed with $rc" && return 6
959 run_test 15 "test id mapping"
961 # Until nodemaps are distributed by MGS, they need to be distributed manually
962 # This function and all calls to it should be removed once the MGS distributes
963 # nodemaps to the MDS and OSS nodes directly.
964 do_servers_not_mgs() {
965 local mgs_ip=$(host_nids_address $mgs_HOST $NETTYPE)
966 for node in $(all_server_nodes); do
967 local node_ip=$(host_nids_address $node $NETTYPE)
968 [ $node_ip == $mgs_ip ] && continue
973 create_fops_nodemaps() {
976 for client in $clients; do
977 local client_ip=$(host_nids_address $client $NETTYPE)
978 local client_nid=$(h2$NETTYPE $client_ip)
979 do_facet mgs $LCTL nodemap_add c${i} || return 1
980 do_facet mgs $LCTL nodemap_add_range \
981 --name c${i} --range $client_nid || return 1
982 do_servers_not_mgs $LCTL set_param nodemap.add_nodemap=c${i} ||
984 do_servers_not_mgs "$LCTL set_param \
985 nodemap.add_nodemap_range='c${i} $client_nid'" ||
987 for map in ${FOPS_IDMAPS[i]}; do
988 do_facet mgs $LCTL nodemap_add_idmap --name c${i} \
989 --idtype uid --idmap ${map} || return 1
990 do_servers_not_mgs "$LCTL set_param \
991 nodemap.add_nodemap_idmap='c$i uid ${map}'" ||
993 do_facet mgs $LCTL nodemap_add_idmap --name c${i} \
994 --idtype gid --idmap ${map} || return 1
995 do_servers_not_mgs "$LCTL set_param \
996 nodemap.add_nodemap_idmap='c$i gid ${map}'" ||
999 out1=$(do_facet mgs $LCTL get_param nodemap.c${i}.idmap)
1000 out2=$(do_facet ost0 $LCTL get_param nodemap.c${i}.idmap)
1001 [ "$out1" != "$out2" ] && error "mgs and oss maps mismatch"
1007 delete_fops_nodemaps() {
1010 for client in $clients; do
1011 do_facet mgs $LCTL nodemap_del c${i} || return 1
1012 do_servers_not_mgs $LCTL set_param nodemap.remove_nodemap=c$i ||
1021 if [ $MDSCOUNT -le 1 ]; then
1022 do_node ${clients_arr[0]} mkdir -p $DIR/$tdir
1024 # round-robin MDTs to test DNE nodemap support
1025 [ ! -d $DIR ] && do_node ${clients_arr[0]} mkdir -p $DIR
1026 do_node ${clients_arr[0]} $LFS setdirstripe -c 1 -i \
1027 $((fops_mds_index % MDSCOUNT)) $DIR/$tdir
1028 ((fops_mds_index++))
1032 # acl test directory needs to be initialized on a privileged client
1034 local admin=$(do_facet mgs $LCTL get_param -n nodemap.c0.admin_nodemap)
1035 local trust=$(do_facet mgs $LCTL get_param -n \
1036 nodemap.c0.trusted_nodemap)
1038 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
1039 do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1
1040 do_servers_not_mgs $LCTL set_param nodemap.c0.admin_nodemap=1
1041 do_servers_not_mgs $LCTL set_param nodemap.c0.trusted_nodemap=1
1043 do_node ${clients_arr[0]} rm -rf $DIR/$tdir
1045 do_node ${clients_arr[0]} chown $user $DIR/$tdir
1047 do_facet mgs $LCTL nodemap_modify --name c0 \
1048 --property admin --value $admin
1049 do_facet mgs $LCTL nodemap_modify --name c0 \
1050 --property trusted --value $trust
1051 do_servers_not_mgs $LCTL set_param nodemap.c0.admin_nodemap=$admin
1052 do_servers_not_mgs $LCTL set_param nodemap.c0.trusted_nodemap=$trust
1054 # flush MDT locks to make sure they are reacquired before test
1055 do_node ${clients_arr[0]} lctl set_param \
1056 ldlm.namespaces.$FSNAME-MDT*.lru_size=clear
1059 do_create_delete() {
1062 local testfile=$DIR/$tdir/$tfile
1066 if $run_u touch $testfile >& /dev/null; then
1068 $run_u rm $testfile && d=1
1072 local expected=$(get_cr_del_expected $key)
1073 [ "$res" != "$expected" ] && error "test $key expected " \
1074 "$expected, got $res" && rc=$(($rc+1))
1078 nodemap_check_quota() {
1080 $run_u lfs quota -q $DIR | awk '{ print $2; exit; }'
1083 do_fops_quota_test() {
1085 # fuzz quota used to account for possible indirect blocks, etc
1086 local quota_fuzz=$(fs_log_size)
1087 local qused_orig=$(nodemap_check_quota "$run_u")
1088 local qused_high=$((qused_orig + quota_fuzz))
1089 local qused_low=$((qused_orig - quota_fuzz))
1090 local testfile=$DIR/$tdir/$tfile
1091 chmod 777 $DIR/$tdir
1092 $run_u dd if=/dev/zero of=$testfile bs=1M count=1 >& /dev/null
1093 sync; sync_all_data || true
1095 local qused_new=$(nodemap_check_quota "$run_u")
1096 [ $((qused_new)) -lt $((qused_low + 1024)) \
1097 -o $((qused_new)) -gt $((qused_high + 1024)) ] &&
1098 error "$qused_new != $qused_orig + 1M after write, \
1099 fuzz is $quota_fuzz"
1100 $run_u rm $testfile && d=1
1101 $NODEMAP_TEST_QUOTA && wait_delete_completed_mds
1103 qused_new=$(nodemap_check_quota "$run_u")
1104 [ $((qused_new)) -lt $((qused_low)) \
1105 -o $((qused_new)) -gt $((qused_high)) ] &&
1106 error "quota not reclaimed, expect $qused_orig got $qused_new, \
1110 get_fops_mapped_user() {
1113 for ((i=0; i < ${#FOPS_IDMAPS[@]}; i++)); do
1114 for map in ${FOPS_IDMAPS[i]}; do
1115 if [ $(cut -d: -f1 <<< "$map") == $cli_user ]; then
1116 cut -d: -f2 <<< "$map"
1124 get_cr_del_expected() {
1126 IFS=":" read -a key <<< "$1"
1127 local mapmode="${key[0]}"
1128 local mds_user="${key[1]}"
1129 local cluster="${key[2]}"
1130 local cli_user="${key[3]}"
1131 local mode="0${key[4]}"
1138 [[ $mapmode == *mapped* ]] && mapped=1
1139 # only c1 is mapped in these test cases
1140 [[ $mapmode == mapped_trusted* ]] && [ "$cluster" == "c0" ] && mapped=0
1141 [[ $mapmode == *noadmin* ]] && noadmin=1
1143 # o+wx works as long as the user isn't mapped
1144 if [ $((mode & 3)) -eq 3 ]; then
1148 # if client user is root, check if root is squashed
1149 if [ "$cli_user" == "0" ]; then
1150 # squash root succeed, if other bit is on
1153 1) [ "$other" == "1" ] && echo $SUCCESS
1154 [ "$other" == "0" ] && echo $FAILURE;;
1158 if [ "$mapped" == "0" ]; then
1159 [ "$other" == "1" ] && echo $SUCCESS
1160 [ "$other" == "0" ] && echo $FAILURE
1164 # if mapped user is mds user, check for u+wx
1165 mapped_user=$(get_fops_mapped_user $cli_user)
1166 [ "$mapped_user" == "-1" ] &&
1167 error "unable to find mapping for client user $cli_user"
1169 if [ "$mapped_user" == "$mds_user" -a \
1170 $(((mode & 0300) == 0300)) -eq 1 ]; then
1174 if [ "$mapped_user" != "$mds_user" -a "$other" == "1" ]; then
1183 local single_client="$2"
1184 local client_user_list=([0]="0 $((IDBASE+3)) $((IDBASE+4))"
1185 [1]="0 $((IDBASE+5)) $((IDBASE+6))")
1188 local perm_bit_list="0 3 $((0300)) $((0303))"
1189 # SLOW tests 000-007, 010-070, 100-700 (octal modes)
1190 [ "$SLOW" == "yes" ] &&
1191 perm_bit_list="0 $(seq 1 7) $(seq 8 8 63) $(seq 64 64 511) \
1194 # step through mds users. -1 means root
1195 for mds_i in -1 0 1 2; do
1196 local user=$((mds_i + IDBASE))
1200 [ "$mds_i" == "-1" ] && user=0
1202 echo mkdir -p $DIR/$tdir
1205 for client in $clients; do
1207 local admin=$(do_facet mgs $LCTL get_param -n \
1208 nodemap.c$cli_i.admin_nodemap)
1209 for u in ${client_user_list[$cli_i]}; do
1210 local run_u="do_node $client \
1211 $RUNAS_CMD -u$u -g$u -G$u"
1212 for perm_bits in $perm_bit_list; do
1213 local mode=$(printf %03o $perm_bits)
1215 key="$mapmode:$user:c$cli_i:$u:$mode"
1216 do_facet mgs $LCTL nodemap_modify \
1220 do_servers_not_mgs $LCTL set_param \
1221 nodemap.c$cli_i.admin_nodemap=1
1222 do_node $client chmod $mode $DIR/$tdir \
1223 || error unable to chmod $key
1224 do_facet mgs $LCTL nodemap_modify \
1228 do_servers_not_mgs $LCTL set_param \
1229 nodemap.c$cli_i.admin_nodemap=$admin
1231 do_create_delete "$run_u" "$key"
1235 do_fops_quota_test "$run_u"
1238 cli_i=$((cli_i + 1))
1239 [ "$single_client" == "1" ] && break
1246 nodemap_version_check () {
1247 remote_mgs_nodsh && skip "remote MGS with nodsh" && return 1
1248 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
1249 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
1254 nodemap_test_setup() {
1256 local active_nodemap=$1
1258 do_nodes $(comma_list $(all_mdts_nodes)) $LCTL set_param \
1259 mdt.*.identity_upcall=NONE
1262 create_fops_nodemaps
1264 [[ $rc != 0 ]] && error "adding fops nodemaps failed $rc"
1266 if [ "$active_nodemap" == "0" ]; then
1267 do_facet mgs $LCTL set_param nodemap.active=0
1268 do_servers_not_mgs $LCTL set_param nodemap.active=0
1272 do_facet mgs $LCTL nodemap_activate 1
1273 do_servers_not_mgs $LCTL set_param nodemap.active=1
1274 do_facet mgs $LCTL nodemap_modify --name default \
1275 --property admin --value 1
1276 do_facet mgs $LCTL nodemap_modify --name default \
1277 --property trusted --value 1
1278 do_servers_not_mgs $LCTL set_param nodemap.default.admin_nodemap=1
1279 do_servers_not_mgs $LCTL set_param nodemap.default.trusted_nodemap=1
1282 nodemap_test_cleanup() {
1284 delete_fops_nodemaps
1286 [[ $rc != 0 ]] && error "removing fops nodemaps failed $rc"
1291 nodemap_clients_admin_trusted() {
1295 for client in $clients; do
1296 do_facet mgs $LCTL nodemap_modify --name c0 \
1297 --property admin --value $admin
1298 do_servers_not_mgs $LCTL set_param \
1299 nodemap.c${i}.admin_nodemap=$admin
1300 do_facet mgs $LCTL nodemap_modify --name c0 \
1301 --property trusted --value $tr
1302 do_servers_not_mgs $LCTL set_param \
1303 nodemap.c${i}.trusted_nodemap=$tr
1309 nodemap_version_check || return 0
1310 nodemap_test_setup 0
1312 trap nodemap_test_cleanup EXIT
1314 nodemap_test_cleanup
1316 run_test 16 "test nodemap all_off fileops"
1319 nodemap_version_check || return 0
1322 trap nodemap_test_cleanup EXIT
1323 nodemap_clients_admin_trusted 0 1
1324 test_fops trusted_noadmin 1
1325 nodemap_test_cleanup
1327 run_test 17 "test nodemap trusted_noadmin fileops"
1330 nodemap_version_check || return 0
1333 trap nodemap_test_cleanup EXIT
1334 nodemap_clients_admin_trusted 0 0
1335 test_fops mapped_noadmin 1
1336 nodemap_test_cleanup
1338 run_test 18 "test nodemap mapped_noadmin fileops"
1341 nodemap_version_check || return 0
1344 trap nodemap_test_cleanup EXIT
1345 nodemap_clients_admin_trusted 1 1
1346 test_fops trusted_admin 1
1347 nodemap_test_cleanup
1349 run_test 19 "test nodemap trusted_admin fileops"
1352 nodemap_version_check || return 0
1355 trap nodemap_test_cleanup EXIT
1356 nodemap_clients_admin_trusted 1 0
1357 test_fops mapped_admin 1
1358 nodemap_test_cleanup
1360 run_test 20 "test nodemap mapped_admin fileops"
1363 nodemap_version_check || return 0
1366 trap nodemap_test_cleanup EXIT
1369 for client in $clients; do
1370 do_facet mgs $LCTL nodemap_modify --name c${i} \
1371 --property admin --value 0
1372 do_facet mgs $LCTL nodemap_modify --name c${i} \
1373 --property trusted --value $x
1374 do_servers_not_mgs $LCTL set_param \
1375 nodemap.c${i}.admin_nodemap=0
1376 do_servers_not_mgs $LCTL set_param \
1377 nodemap.c${i}.trusted_nodemap=$x
1381 test_fops mapped_trusted_noadmin
1382 nodemap_test_cleanup
1384 run_test 21 "test nodemap mapped_trusted_noadmin fileops"
1387 nodemap_version_check || return 0
1390 trap nodemap_test_cleanup EXIT
1393 for client in $clients; do
1394 do_facet mgs $LCTL nodemap_modify --name c${i} \
1395 --property admin --value 1
1396 do_facet mgs $LCTL nodemap_modify --name c${i} \
1397 --property trusted --value $x
1398 do_servers_not_mgs $LCTL set_param \
1399 nodemap.c${i}.admin_nodemap=1
1400 do_servers_not_mgs $LCTL set_param \
1401 nodemap.c${i}.trusted_nodemap=$x
1405 test_fops mapped_trusted_admin
1406 nodemap_test_cleanup
1408 run_test 22 "test nodemap mapped_trusted_admin fileops"
1410 # acl test directory needs to be initialized on a privileged client
1411 nodemap_acl_test_setup() {
1412 local admin=$(do_facet mgs $LCTL get_param -n nodemap.c0.admin_nodemap)
1413 local trust=$(do_facet mgs $LCTL get_param -n \
1414 nodemap.c0.trusted_nodemap)
1416 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
1417 do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1
1418 do_servers_not_mgs $LCTL set_param nodemap.c0.admin_nodemap=1
1419 do_servers_not_mgs $LCTL set_param nodemap.c0.trusted_nodemap=1
1421 do_node ${clients_arr[0]} rm -rf $DIR/$tdir
1423 do_node ${clients_arr[0]} chmod a+rwx $DIR/$tdir ||
1424 error unable to chmod a+rwx test dir $DIR/$tdir
1426 do_facet mgs $LCTL nodemap_modify --name c0 \
1427 --property admin --value $admin
1428 do_facet mgs $LCTL nodemap_modify --name c0 \
1429 --property trusted --value $trust
1430 do_servers_not_mgs $LCTL set_param nodemap.c0.admin_nodemap=$admin
1431 do_servers_not_mgs $LCTL set_param nodemap.c0.trusted_nodemap=$trust
1435 # returns 0 if the number of ACLs does not change on the second (mapped) client
1436 # after being set on the first client
1437 nodemap_acl_test() {
1439 local set_client="$2"
1440 local get_client="$3"
1441 local check_setfacl="$4"
1442 local setfacl_error=0
1443 local testfile=$DIR/$tdir/$tfile
1444 local RUNAS_USER="$RUNAS_CMD -u $user"
1446 local acl_count_post=0
1448 nodemap_acl_test_setup
1451 do_node $set_client $RUNAS_USER touch $testfile
1453 # ACL masks aren't filtered by nodemap code, so we ignore them
1454 acl_count=$(do_node $get_client getfacl $testfile | grep -v mask |
1456 do_node $set_client $RUNAS_USER setfacl -m $user:rwx $testfile ||
1459 # if check setfacl is set to 1, then it's supposed to error
1460 if [ "$check_setfacl" == "1" ]; then
1461 [ "$setfacl_error" != "1" ] && return 1
1464 [ "$setfacl_error" == "1" ] && echo "WARNING: unable to setfacl"
1466 acl_count_post=$(do_node $get_client getfacl $testfile | grep -v mask |
1468 [ $acl_count -eq $acl_count_post ] && return 0
1473 nodemap_version_check || return 0
1476 trap nodemap_test_cleanup EXIT
1477 # 1 trusted cluster, 1 mapped cluster
1478 local unmapped_fs=$((IDBASE+0))
1479 local unmapped_c1=$((IDBASE+5))
1480 local mapped_fs=$((IDBASE+2))
1481 local mapped_c0=$((IDBASE+4))
1482 local mapped_c1=$((IDBASE+6))
1484 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
1485 do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1
1486 do_servers_not_mgs $LCTL set_param nodemap.c0.admin_nodemap=1
1487 do_servers_not_mgs $LCTL set_param nodemap.c0.trusted_nodemap=1
1489 do_facet mgs $LCTL nodemap_modify --name c1 --property admin --value 0
1490 do_facet mgs $LCTL nodemap_modify --name c1 --property trusted --value 0
1491 do_servers_not_mgs $LCTL set_param nodemap.c1.admin_nodemap=0
1492 do_servers_not_mgs $LCTL set_param nodemap.c1.trusted_nodemap=0
1494 # setfacl on trusted cluster to unmapped user, verify it's not seen
1495 nodemap_acl_test $unmapped_fs ${clients_arr[0]} ${clients_arr[1]} ||
1496 error "acl count (1)"
1498 # setfacl on trusted cluster to mapped user, verify it's seen
1499 nodemap_acl_test $mapped_fs ${clients_arr[0]} ${clients_arr[1]} &&
1500 error "acl count (2)"
1502 # setfacl on mapped cluster to mapped user, verify it's seen
1503 nodemap_acl_test $mapped_c1 ${clients_arr[1]} ${clients_arr[0]} &&
1504 error "acl count (3)"
1506 # setfacl on mapped cluster to unmapped user, verify error
1507 nodemap_acl_test $unmapped_fs ${clients_arr[1]} ${clients_arr[0]} 1 ||
1508 error "acl count (4)"
1511 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 0
1512 do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 0
1513 do_servers_not_mgs $LCTL set_param nodemap.c0.admin_nodemap=0
1514 do_servers_not_mgs $LCTL set_param nodemap.c0.trusted_nodemap=0
1516 # setfacl to mapped user on c1, also mapped to c0, verify it's seen
1517 nodemap_acl_test $mapped_c1 ${clients_arr[1]} ${clients_arr[0]} &&
1518 error "acl count (5)"
1520 # setfacl to mapped user on c1, not mapped to c0, verify not seen
1521 nodemap_acl_test $unmapped_c1 ${clients_arr[1]} ${clients_arr[0]} ||
1522 error "acl count (6)"
1524 nodemap_test_cleanup
1526 run_test 23 "test mapped ACLs"
1528 log "cleanup: ======================================================"
1531 ## nodemap deactivated
1532 do_facet mgs lctl nodemap_activate 0
1534 for num in $(seq $MDSCOUNT); do
1535 if [ "${identity_old[$num]}" = 1 ]; then
1536 switch_identity $num false || identity_old[$num]=$?
1540 $RUNAS_CMD -u $ID0 ls $DIR
1541 $RUNAS_CMD -u $ID1 ls $DIR