3 # Run select tests by setting ONLY, or as arguments to the script.
4 # Skip specific tests by setting EXCEPT.
10 # bug number for skipped test:
11 ALWAYS_EXCEPT=" $SANITY_SEC_EXCEPT"
13 # bug number for skipped test: 9145 9145 9671 9145 9145 9145 9145 9245
14 ALWAYS_EXCEPT=" 17 18 19 20 21 22 23 27 $ALWAYS_EXCEPT"
16 # UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
19 export PATH=$PWD/$SRCDIR:$SRCDIR:$PWD/$SRCDIR/../utils:$PATH:/sbin
20 export NAME=${NAME:-local}
22 LUSTRE=${LUSTRE:-$(dirname $0)/..}
23 . $LUSTRE/tests/test-framework.sh
25 . ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
28 NODEMAP_TESTS=$(seq 7 26)
30 if ! check_versions; then
31 echo "It is NOT necessary to test nodemap under interoperation mode"
32 EXCEPT="$EXCEPT $NODEMAP_TESTS"
35 [ "$SLOW" = "no" ] && EXCEPT_SLOW="26"
37 [ "$ALWAYS_EXCEPT$EXCEPT$EXCEPT_SLOW" ] &&
38 echo "Skipping tests: $ALWAYS_EXCEPT $EXCEPT $EXCEPT_SLOW"
40 RUNAS_CMD=${RUNAS_CMD:-runas}
42 WTL=${WTL:-"$LUSTRE/tests/write_time_limit"}
45 PERM_CONF=$CONFDIR/perm.conf
48 HOSTNAME_CHECKSUM=$(hostname | sum | awk '{ print $1 }')
49 SUBNET_CHECKSUM=$(expr $HOSTNAME_CHECKSUM % 250 + 1)
52 NODEMAP_IPADDR_LIST="1 10 64 128 200 250"
55 require_dsh_mds || exit 0
56 require_dsh_ost || exit 0
58 clients=${CLIENTS//,/ }
59 num_clients=$(get_node_count ${clients})
60 clients_arr=($clients)
64 USER0=$(getent passwd | grep :$ID0:$ID0: | cut -d: -f1)
65 USER1=$(getent passwd | grep :$ID1:$ID1: | cut -d: -f1)
68 skip "need to add user0 ($ID0:$ID0)" && exit 0
71 skip "need to add user1 ($ID1:$ID1)" && exit 0
73 IDBASE=${IDBASE:-60000}
75 # changes to mappings must be reflected in test 23
77 [0]="$((IDBASE+3)):$((IDBASE+0)) $((IDBASE+4)):$((IDBASE+2))"
78 [1]="$((IDBASE+5)):$((IDBASE+1)) $((IDBASE+6)):$((IDBASE+2))"
81 check_and_setup_lustre
84 if [ "$I_MOUNTED" = "yes" ]; then
85 cleanupall -f || error "sec_cleanup"
90 [ -z "$(echo $DIR | grep $MOUNT)" ] &&
91 error "$DIR not in $MOUNT" && sec_cleanup && exit 1
93 [ $(echo $MOUNT | wc -w) -gt 1 ] &&
94 echo "NAME=$MOUNT mounted more than once" && sec_cleanup && exit 0
97 GSS_REF=$(lsmod | grep ^ptlrpc_gss | awk '{print $3}')
98 if [ ! -z "$GSS_REF" -a "$GSS_REF" != "0" ]; then
100 echo "with GSS support"
103 echo "without GSS support"
106 MDT=$(do_facet $SINGLEMDS lctl get_param -N "mdt.\*MDT0000" |
108 [ -z "$MDT" ] && error "fail to get MDT device" && exit 1
109 do_facet $SINGLEMDS "mkdir -p $CONFDIR"
110 IDENTITY_FLUSH=mdt.$MDT.identity_flush
111 IDENTITY_UPCALL=mdt.$MDT.identity_upcall
122 if ! $RUNAS_CMD -u $user krb5_login.sh; then
123 error "$user login kerberos failed."
127 if ! $RUNAS_CMD -u $user -g $group ls $DIR > /dev/null 2>&1; then
128 $RUNAS_CMD -u $user lfs flushctx -k
129 $RUNAS_CMD -u $user krb5_login.sh
130 if ! $RUNAS_CMD -u$user -g$group ls $DIR > /dev/null 2>&1; then
131 error "init $user $group failed."
137 declare -a identity_old
140 for num in $(seq $MDSCOUNT); do
141 switch_identity $num true || identity_old[$num]=$?
144 if ! $RUNAS_CMD -u $ID0 ls $DIR > /dev/null 2>&1; then
145 sec_login $USER0 $USER0
148 if ! $RUNAS_CMD -u $ID1 ls $DIR > /dev/null 2>&1; then
149 sec_login $USER1 $USER1
154 # run as different user
158 chmod 0755 $DIR || error "chmod (1)"
159 rm -rf $DIR/$tdir || error "rm (1)"
160 mkdir -p $DIR/$tdir || error "mkdir (1)"
161 chown $USER0 $DIR/$tdir || error "chown (2)"
162 $RUNAS_CMD -u $ID0 ls $DIR || error "ls (1)"
163 rm -f $DIR/f0 || error "rm (2)"
164 $RUNAS_CMD -u $ID0 touch $DIR/f0 && error "touch (1)"
165 $RUNAS_CMD -u $ID0 touch $DIR/$tdir/f1 || error "touch (2)"
166 $RUNAS_CMD -u $ID1 touch $DIR/$tdir/f2 && error "touch (3)"
167 touch $DIR/$tdir/f3 || error "touch (4)"
168 chown root $DIR/$tdir || error "chown (3)"
169 chgrp $USER0 $DIR/$tdir || error "chgrp (1)"
170 chmod 0775 $DIR/$tdir || error "chmod (2)"
171 $RUNAS_CMD -u $ID0 touch $DIR/$tdir/f4 || error "touch (5)"
172 $RUNAS_CMD -u $ID1 touch $DIR/$tdir/f5 && error "touch (6)"
173 touch $DIR/$tdir/f6 || error "touch (7)"
174 rm -rf $DIR/$tdir || error "rm (3)"
176 run_test 0 "uid permission ============================="
180 [ $GSS_SUP = 0 ] && skip "without GSS support." && return
185 chown $USER0 $DIR/$tdir || error "chown (1)"
186 $RUNAS_CMD -u $ID1 -v $ID0 touch $DIR/$tdir/f0 && error "touch (2)"
187 echo "enable uid $ID1 setuid"
188 do_facet $SINGLEMDS "echo '* $ID1 setuid' >> $PERM_CONF"
189 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
190 $RUNAS_CMD -u $ID1 -v $ID0 touch $DIR/$tdir/f1 || error "touch (3)"
192 chown root $DIR/$tdir || error "chown (4)"
193 chgrp $USER0 $DIR/$tdir || error "chgrp (5)"
194 chmod 0770 $DIR/$tdir || error "chmod (6)"
195 $RUNAS_CMD -u $ID1 -g $ID1 touch $DIR/$tdir/f2 && error "touch (7)"
196 $RUNAS_CMD -u$ID1 -g$ID1 -j$ID0 touch $DIR/$tdir/f3 && error "touch (8)"
197 echo "enable uid $ID1 setuid,setgid"
198 do_facet $SINGLEMDS "echo '* $ID1 setuid,setgid' > $PERM_CONF"
199 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
200 $RUNAS_CMD -u $ID1 -g $ID1 -j $ID0 touch $DIR/$tdir/f4 ||
202 $RUNAS_CMD -u $ID1 -v $ID0 -g $ID1 -j $ID0 touch $DIR/$tdir/f5 ||
207 do_facet $SINGLEMDS "rm -f $PERM_CONF"
208 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
210 run_test 1 "setuid/gid ============================="
212 # bug 3285 - supplementary group should always succeed.
213 # NB: the supplementary groups are set for local client only,
214 # as for remote client, the groups of the specified uid on MDT
215 # will be obtained by upcall /sbin/l_getidentity and used.
217 local server_version=$(lustre_version_code $SINGLEMDS)
219 [[ $server_version -ge $(version_code 2.6.93) ]] ||
220 [[ $server_version -ge $(version_code 2.5.35) &&
221 $server_version -lt $(version_code 2.5.50) ]] ||
222 { skip "Need MDS version at least 2.6.93 or 2.5.35"; return; }
226 chmod 0771 $DIR/$tdir
227 chgrp $ID0 $DIR/$tdir
228 $RUNAS_CMD -u $ID0 ls $DIR/$tdir || error "setgroups (1)"
229 do_facet $SINGLEMDS "echo '* $ID1 setgrp' > $PERM_CONF"
230 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
231 $RUNAS_CMD -u $ID1 -G1,2,$ID0 ls $DIR/$tdir ||
232 error "setgroups (2)"
233 $RUNAS_CMD -u $ID1 -G1,2 ls $DIR/$tdir && error "setgroups (3)"
236 do_facet $SINGLEMDS "rm -f $PERM_CONF"
237 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
239 run_test 4 "set supplementary group ==============="
246 squash_id default 99 0
247 squash_id default 99 1
248 for (( i = 0; i < NODEMAP_COUNT; i++ )); do
249 local csum=${HOSTNAME_CHECKSUM}_${i}
251 if ! do_facet mgs $LCTL nodemap_add $csum; then
255 out=$(do_facet mgs $LCTL get_param nodemap.$csum.id)
256 ## This needs to return zero if the following statement is 1
257 [[ $(echo $out | grep -c $csum) == 0 ]] && return 1
266 for ((i = 0; i < NODEMAP_COUNT; i++)); do
267 local csum=${HOSTNAME_CHECKSUM}_${i}
269 if ! do_facet mgs $LCTL nodemap_del $csum; then
270 error "nodemap_del $csum failed with $?"
274 out=$(do_facet mgs $LCTL get_param nodemap.$csum.id)
275 [[ $(echo $out | grep -c $csum) != 0 ]] && return 1
282 local cmd="$LCTL nodemap_add_range"
286 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
287 range="$SUBNET_CHECKSUM.${2}.${j}.[1-253]@tcp"
288 if ! do_facet mgs $cmd --name $1 --range $range; then
297 local cmd="$LCTL nodemap_del_range"
301 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
302 range="$SUBNET_CHECKSUM.${2}.${j}.[1-253]@tcp"
303 if ! do_facet mgs $cmd --name $1 --range $range; then
313 local cmd="$LCTL nodemap_add_idmap"
316 for ((i = 0; i < NODEMAP_COUNT; i++)); do
319 for ((j = 500; j < NODEMAP_MAX_ID; j++)); do
320 local csum=${HOSTNAME_CHECKSUM}_${i}
322 local fs_id=$((j + 1))
324 if ! do_facet mgs $cmd --name $csum --idtype uid \
325 --idmap $client_id:$fs_id; then
328 if ! do_facet mgs $cmd --name $csum --idtype gid \
329 --idmap $client_id:$fs_id; then
340 local cmd="$LCTL nodemap_del_idmap"
343 for ((i = 0; i < NODEMAP_COUNT; i++)); do
346 for ((j = 500; j < NODEMAP_MAX_ID; j++)); do
347 local csum=${HOSTNAME_CHECKSUM}_${i}
349 local fs_id=$((j + 1))
351 if ! do_facet mgs $cmd --name $csum --idtype uid \
352 --idmap $client_id:$fs_id; then
355 if ! do_facet mgs $cmd --name $csum --idtype gid \
356 --idmap $client_id:$fs_id; then
369 local cmd="$LCTL nodemap_modify"
372 proc[0]="admin_nodemap"
373 proc[1]="trusted_nodemap"
377 for ((idx = 0; idx < 2; idx++)); do
378 if ! do_facet mgs $cmd --name $1 --property ${option[$idx]} \
383 if ! do_facet mgs $cmd --name $1 --property ${option[$idx]} \
393 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
394 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
398 cmd[0]="$LCTL nodemap_modify --property squash_uid"
399 cmd[1]="$LCTL nodemap_modify --property squash_gid"
401 if ! do_facet mgs ${cmd[$3]} --name $1 --value $2; then
406 # ensure that the squash defaults are the expected defaults
407 squash_id default 99 0
408 squash_id default 99 1
413 cmd="$LCTL nodemap_test_nid"
415 nid=$(do_facet mgs $cmd $1)
417 if [ $nid == $2 ]; then
426 local cmd="$LCTL nodemap_test_id"
429 ## nodemap deactivated
430 if ! do_facet mgs $LCTL nodemap_activate 0; then
433 for ((id = 500; id < NODEMAP_MAX_ID; id++)); do
436 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
437 local nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
438 local fs_id=$(do_facet mgs $cmd --nid $nid \
439 --idtype uid --id $id)
440 if [ $fs_id != $id ]; then
441 echo "expected $id, got $fs_id"
448 if ! do_facet mgs $LCTL nodemap_activate 1; then
452 for ((id = 500; id < NODEMAP_MAX_ID; id++)); do
453 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
454 nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
455 fs_id=$(do_facet mgs $cmd --nid $nid \
456 --idtype uid --id $id)
457 expected_id=$((id + 1))
458 if [ $fs_id != $expected_id ]; then
459 echo "expected $expected_id, got $fs_id"
466 for ((i = 0; i < NODEMAP_COUNT; i++)); do
467 local csum=${HOSTNAME_CHECKSUM}_${i}
469 if ! do_facet mgs $LCTL nodemap_modify --name $csum \
470 --property trusted --value 1; then
471 error "nodemap_modify $csum failed with $?"
476 for ((id = 500; id < NODEMAP_MAX_ID; id++)); do
477 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
478 nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
479 fs_id=$(do_facet mgs $cmd --nid $nid \
480 --idtype uid --id $id)
481 if [ $fs_id != $id ]; then
482 echo "expected $id, got $fs_id"
488 ## ensure allow_root_access is enabled
489 for ((i = 0; i < NODEMAP_COUNT; i++)); do
490 local csum=${HOSTNAME_CHECKSUM}_${i}
492 if ! do_facet mgs $LCTL nodemap_modify --name $csum \
493 --property admin --value 1; then
494 error "nodemap_modify $csum failed with $?"
499 ## check that root allowed
500 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
501 nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
502 fs_id=$(do_facet mgs $cmd --nid $nid --idtype uid --id 0)
503 if [ $fs_id != 0 ]; then
504 echo "root allowed expected 0, got $fs_id"
509 ## ensure allow_root_access is disabled
510 for ((i = 0; i < NODEMAP_COUNT; i++)); do
511 local csum=${HOSTNAME_CHECKSUM}_${i}
513 if ! do_facet mgs $LCTL nodemap_modify --name $csum \
514 --property admin --value 0; then
515 error "nodemap_modify ${HOSTNAME_CHECKSUM}_${i} "
521 ## check that root is mapped to 99
522 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
523 nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
524 fs_id=$(do_facet mgs $cmd --nid $nid --idtype uid --id 0)
525 if [ $fs_id != 99 ]; then
526 error "root squash expected 99, got $fs_id"
531 ## reset client trust to 0
532 for ((i = 0; i < NODEMAP_COUNT; i++)); do
533 if ! do_facet mgs $LCTL nodemap_modify \
534 --name ${HOSTNAME_CHECKSUM}_${i} \
535 --property trusted --value 0; then
536 error "nodemap_modify ${HOSTNAME_CHECKSUM}_${i} "
548 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
549 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
550 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
555 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
559 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 2
563 run_test 7 "nodemap create and delete"
568 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
569 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
570 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
577 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
583 [[ $rc == 0 ]] && error "duplicate nodemap_add allowed with $rc" &&
589 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 3
593 run_test 8 "nodemap reject duplicates"
599 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
600 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
601 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
607 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
610 for ((i = 0; i < NODEMAP_COUNT; i++)); do
611 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
615 [[ $rc != 0 ]] && error "nodemap_add_range failed with $rc" && return 2
618 for ((i = 0; i < NODEMAP_COUNT; i++)); do
619 if ! delete_range ${HOSTNAME_CHECKSUM}_${i} $i; then
623 [[ $rc != 0 ]] && error "nodemap_del_range failed with $rc" && return 4
628 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 4
632 run_test 9 "nodemap range add"
637 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
638 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
639 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
645 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
648 for ((i = 0; i < NODEMAP_COUNT; i++)); do
649 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
653 [[ $rc != 0 ]] && error "nodemap_add_range failed with $rc" && return 2
656 for ((i = 0; i < NODEMAP_COUNT; i++)); do
657 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
661 [[ $rc == 0 ]] && error "nodemap_add_range duplicate add with $rc" &&
666 for ((i = 0; i < NODEMAP_COUNT; i++)); do
667 if ! delete_range ${HOSTNAME_CHECKSUM}_${i} $i; then
671 [[ $rc != 0 ]] && error "nodemap_del_range failed with $rc" && return 4
675 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 5
679 run_test 10a "nodemap reject duplicate ranges"
682 [ $(lustre_version_code mgs) -lt $(version_code 2.10.53) ] &&
683 skip "Need MGS >= 2.10.53" && return
687 local nids="192.168.19.[0-255]@o2ib20"
689 do_facet mgs $LCTL nodemap_del $nm1 2>/dev/null
690 do_facet mgs $LCTL nodemap_del $nm2 2>/dev/null
692 do_facet mgs $LCTL nodemap_add $nm1 || error "Add $nm1 failed"
693 do_facet mgs $LCTL nodemap_add $nm2 || error "Add $nm2 failed"
694 do_facet mgs $LCTL nodemap_add_range --name $nm1 --range $nids ||
695 error "Add range $nids to $nm1 failed"
696 [ -n "$(do_facet mgs $LCTL get_param nodemap.$nm1.* |
697 grep start_nid)" ] || error "No range was found"
698 do_facet mgs $LCTL nodemap_del_range --name $nm2 --range $nids &&
699 error "Deleting range $nids from $nm2 should fail"
700 [ -n "$(do_facet mgs $LCTL get_param nodemap.$nm1.* |
701 grep start_nid)" ] || error "Range $nids should be there"
703 do_facet mgs $LCTL nodemap_del $nm1 || error "Delete $nm1 failed"
704 do_facet mgs $LCTL nodemap_del $nm2 || error "Delete $nm2 failed"
707 run_test 10b "delete range from the correct nodemap"
712 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
713 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
714 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
720 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
723 for ((i = 0; i < NODEMAP_COUNT; i++)); do
724 if ! modify_flags ${HOSTNAME_CHECKSUM}_${i}; then
728 [[ $rc != 0 ]] && error "nodemap_modify with $rc" && return 2
733 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 3
737 run_test 11 "nodemap modify"
742 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
743 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
744 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
750 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
753 for ((i = 0; i < NODEMAP_COUNT; i++)); do
754 if ! squash_id ${HOSTNAME_CHECKSUM}_${i} 88 0; then
758 [[ $rc != 0 ]] && error "nodemap squash_uid with $rc" && return 2
761 for ((i = 0; i < NODEMAP_COUNT; i++)); do
762 if ! squash_id ${HOSTNAME_CHECKSUM}_${i} 88 1; then
766 [[ $rc != 0 ]] && error "nodemap squash_gid with $rc" && return 3
771 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 4
775 run_test 12 "nodemap set squash ids"
780 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
781 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
782 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
788 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
791 for ((i = 0; i < NODEMAP_COUNT; i++)); do
792 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
796 [[ $rc != 0 ]] && error "nodemap_add_range failed with $rc" && return 2
799 for ((i = 0; i < NODEMAP_COUNT; i++)); do
800 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
801 for k in $NODEMAP_IPADDR_LIST; do
802 if ! test_nid $SUBNET_CHECKSUM.$i.$j.$k \
803 ${HOSTNAME_CHECKSUM}_${i}; then
809 [[ $rc != 0 ]] && error "nodemap_test_nid failed with $rc" && return 3
814 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 4
818 run_test 13 "test nids"
823 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
824 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
825 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
831 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
834 for ((i = 0; i < NODEMAP_COUNT; i++)); do
835 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
836 for k in $NODEMAP_IPADDR_LIST; do
837 if ! test_nid $SUBNET_CHECKSUM.$i.$j.$k \
844 [[ $rc != 0 ]] && error "nodemap_test_nid failed with $rc" && return 3
849 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 4
853 run_test 14 "test default nodemap nid lookup"
858 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
859 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
860 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
866 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
869 for ((i = 0; i < NODEMAP_COUNT; i++)); do
870 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
874 [[ $rc != 0 ]] && error "nodemap_add_range failed with $rc" && return 2
879 [[ $rc != 0 ]] && error "nodemap_add_idmap failed with $rc" && return 3
884 [[ $rc != 0 ]] && error "nodemap_test_id failed with $rc" && return 4
889 [[ $rc != 0 ]] && error "nodemap_del_idmap failed with $rc" && return 5
894 [[ $rc != 0 ]] && error "nodemap_delete failed with $rc" && return 6
898 run_test 15 "test id mapping"
901 local nodemap_name=$1
904 local proc_param="${nodemap_name}.${key}"
905 [ "$nodemap_name" == "active" ] && proc_param="active"
907 local is_active=$(do_facet mgs $LCTL get_param -n nodemap.active)
908 (( is_active == 0 )) && [ "$proc_param" != "active" ] && return
914 local mgs_ip=$(host_nids_address $mgs_HOST $NETTYPE | cut -d' ' -f1)
917 if [ -z "$value" ]; then
918 out1=$(do_facet mgs $LCTL get_param nodemap.${proc_param})
919 echo "On MGS ${mgs_ip}, ${proc_param} = $out1"
924 # wait up to 10 seconds for other servers to sync with mgs
925 for i in $(seq 1 10); do
926 for node in $(all_server_nodes); do
927 local node_ip=$(host_nids_address $node $NETTYPE |
931 if [ -z "$value" ]; then
932 [ $node_ip == $mgs_ip ] && continue
935 out2=$(do_node $node_ip $LCTL get_param \
936 nodemap.$proc_param 2>/dev/null)
937 echo "On $node ${node_ip}, ${proc_param} = $out2"
938 [ "$out1" != "$out2" ] && is_sync=false && break
946 echo OTHER - IP: $node_ip
948 error "mgs and $nodemap_name ${key} mismatch, $i attempts"
950 echo "waited $((i - 1)) seconds for sync"
953 create_fops_nodemaps() {
956 for client in $clients; do
957 local client_ip=$(host_nids_address $client $NETTYPE)
958 local client_nid=$(h2nettype $client_ip)
959 do_facet mgs $LCTL nodemap_add c${i} || return 1
960 do_facet mgs $LCTL nodemap_add_range \
961 --name c${i} --range $client_nid || return 1
962 for map in ${FOPS_IDMAPS[i]}; do
963 do_facet mgs $LCTL nodemap_add_idmap --name c${i} \
964 --idtype uid --idmap ${map} || return 1
965 do_facet mgs $LCTL nodemap_add_idmap --name c${i} \
966 --idtype gid --idmap ${map} || return 1
969 wait_nm_sync c$i idmap
976 delete_fops_nodemaps() {
979 for client in $clients; do
980 do_facet mgs $LCTL nodemap_del c${i} || return 1
988 if [ $MDSCOUNT -le 1 ]; then
989 do_node ${clients_arr[0]} mkdir -p $DIR/$tdir
991 # round-robin MDTs to test DNE nodemap support
992 [ ! -d $DIR ] && do_node ${clients_arr[0]} mkdir -p $DIR
993 do_node ${clients_arr[0]} $LFS setdirstripe -c 1 -i \
994 $((fops_mds_index % MDSCOUNT)) $DIR/$tdir
999 # acl test directory needs to be initialized on a privileged client
1001 local admin=$(do_facet mgs $LCTL get_param -n nodemap.c0.admin_nodemap)
1002 local trust=$(do_facet mgs $LCTL get_param -n \
1003 nodemap.c0.trusted_nodemap)
1005 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
1006 do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1
1008 wait_nm_sync c0 admin_nodemap
1009 wait_nm_sync c0 trusted_nodemap
1011 do_node ${clients_arr[0]} rm -rf $DIR/$tdir
1013 do_node ${clients_arr[0]} chown $user $DIR/$tdir
1015 do_facet mgs $LCTL nodemap_modify --name c0 \
1016 --property admin --value $admin
1017 do_facet mgs $LCTL nodemap_modify --name c0 \
1018 --property trusted --value $trust
1020 # flush MDT locks to make sure they are reacquired before test
1021 do_node ${clients_arr[0]} $LCTL set_param \
1022 ldlm.namespaces.$FSNAME-MDT*.lru_size=clear
1024 wait_nm_sync c0 admin_nodemap
1025 wait_nm_sync c0 trusted_nodemap
1028 # fileset test directory needs to be initialized on a privileged client
1029 fileset_test_setup() {
1030 local admin=$(do_facet mgs $LCTL get_param -n nodemap.c0.admin_nodemap)
1031 local trust=$(do_facet mgs $LCTL get_param -n \
1032 nodemap.c0.trusted_nodemap)
1034 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
1035 do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1
1037 wait_nm_sync c0 admin_nodemap
1038 wait_nm_sync c0 trusted_nodemap
1040 # create directory and populate it for subdir mount
1041 do_node ${clients_arr[0]} mkdir $MOUNT/$subdir ||
1042 error "unable to create dir $MOUNT/$subdir"
1043 do_node ${clients_arr[0]} touch $MOUNT/$subdir/this_is_$subdir ||
1044 error "unable to create file $MOUNT/$subdir/this_is_$subdir"
1045 do_node ${clients_arr[0]} mkdir $MOUNT/$subdir/$subsubdir ||
1046 error "unable to create dir $MOUNT/$subdir/$subsubdir"
1047 do_node ${clients_arr[0]} touch \
1048 $MOUNT/$subdir/$subsubdir/this_is_$subsubdir ||
1049 error "unable to create file \
1050 $MOUNT/$subdir/$subsubdir/this_is_$subsubdir"
1052 do_facet mgs $LCTL nodemap_modify --name c0 \
1053 --property admin --value $admin
1054 do_facet mgs $LCTL nodemap_modify --name c0 \
1055 --property trusted --value $trust
1057 # flush MDT locks to make sure they are reacquired before test
1058 do_node ${clients_arr[0]} $LCTL set_param \
1059 ldlm.namespaces.$FSNAME-MDT*.lru_size=clear
1061 wait_nm_sync c0 admin_nodemap
1062 wait_nm_sync c0 trusted_nodemap
1065 # fileset test directory needs to be initialized on a privileged client
1066 fileset_test_cleanup() {
1067 local admin=$(do_facet mgs $LCTL get_param -n nodemap.c0.admin_nodemap)
1068 local trust=$(do_facet mgs $LCTL get_param -n \
1069 nodemap.c0.trusted_nodemap)
1071 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
1072 do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1
1074 wait_nm_sync c0 admin_nodemap
1075 wait_nm_sync c0 trusted_nodemap
1077 # cleanup directory created for subdir mount
1078 do_node ${clients_arr[0]} rm -rf $MOUNT/$subdir ||
1079 error "unable to remove dir $MOUNT/$subdir"
1081 do_facet mgs $LCTL nodemap_modify --name c0 \
1082 --property admin --value $admin
1083 do_facet mgs $LCTL nodemap_modify --name c0 \
1084 --property trusted --value $trust
1086 # flush MDT locks to make sure they are reacquired before test
1087 do_node ${clients_arr[0]} $LCTL set_param \
1088 ldlm.namespaces.$FSNAME-MDT*.lru_size=clear
1090 wait_nm_sync c0 admin_nodemap
1091 wait_nm_sync c0 trusted_nodemap
1094 do_create_delete() {
1097 local testfile=$DIR/$tdir/$tfile
1101 if $run_u touch $testfile >& /dev/null; then
1103 $run_u rm $testfile && d=1
1107 local expected=$(get_cr_del_expected $key)
1108 [ "$res" != "$expected" ] &&
1109 error "test $key, wanted $expected, got $res" && rc=$((rc + 1))
1113 nodemap_check_quota() {
1115 $run_u lfs quota -q $DIR | awk '{ print $2; exit; }'
1118 do_fops_quota_test() {
1120 # fuzz quota used to account for possible indirect blocks, etc
1121 local quota_fuzz=$(fs_log_size)
1122 local qused_orig=$(nodemap_check_quota "$run_u")
1123 local qused_high=$((qused_orig + quota_fuzz))
1124 local qused_low=$((qused_orig - quota_fuzz))
1125 local testfile=$DIR/$tdir/$tfile
1126 $run_u dd if=/dev/zero of=$testfile oflag=sync bs=1M count=1 \
1127 >& /dev/null || error "unable to write quota test file"
1128 sync; sync_all_data || true
1130 local qused_new=$(nodemap_check_quota "$run_u")
1131 [ $((qused_new)) -lt $((qused_low + 1024)) -o \
1132 $((qused_new)) -gt $((qused_high + 1024)) ] &&
1133 error "$qused_new != $qused_orig + 1M after write, " \
1134 "fuzz is $quota_fuzz"
1135 $run_u rm $testfile || error "unable to remove quota test file"
1136 wait_delete_completed_mds
1138 qused_new=$(nodemap_check_quota "$run_u")
1139 [ $((qused_new)) -lt $((qused_low)) \
1140 -o $((qused_new)) -gt $((qused_high)) ] &&
1141 error "quota not reclaimed, expect $qused_orig, " \
1142 "got $qused_new, fuzz $quota_fuzz"
1145 get_fops_mapped_user() {
1148 for ((i=0; i < ${#FOPS_IDMAPS[@]}; i++)); do
1149 for map in ${FOPS_IDMAPS[i]}; do
1150 if [ $(cut -d: -f1 <<< "$map") == $cli_user ]; then
1151 cut -d: -f2 <<< "$map"
1159 get_cr_del_expected() {
1161 IFS=":" read -a key <<< "$1"
1162 local mapmode="${key[0]}"
1163 local mds_user="${key[1]}"
1164 local cluster="${key[2]}"
1165 local cli_user="${key[3]}"
1166 local mode="0${key[4]}"
1173 [[ $mapmode == *mapped* ]] && mapped=1
1174 # only c1 is mapped in these test cases
1175 [[ $mapmode == mapped_trusted* ]] && [ "$cluster" == "c0" ] && mapped=0
1176 [[ $mapmode == *noadmin* ]] && noadmin=1
1178 # o+wx works as long as the user isn't mapped
1179 if [ $((mode & 3)) -eq 3 ]; then
1183 # if client user is root, check if root is squashed
1184 if [ "$cli_user" == "0" ]; then
1185 # squash root succeed, if other bit is on
1188 1) [ "$other" == "1" ] && echo $SUCCESS
1189 [ "$other" == "0" ] && echo $FAILURE;;
1193 if [ "$mapped" == "0" ]; then
1194 [ "$other" == "1" ] && echo $SUCCESS
1195 [ "$other" == "0" ] && echo $FAILURE
1199 # if mapped user is mds user, check for u+wx
1200 mapped_user=$(get_fops_mapped_user $cli_user)
1201 [ "$mapped_user" == "-1" ] &&
1202 error "unable to find mapping for client user $cli_user"
1204 if [ "$mapped_user" == "$mds_user" -a \
1205 $(((mode & 0300) == 0300)) -eq 1 ]; then
1209 if [ "$mapped_user" != "$mds_user" -a "$other" == "1" ]; then
1216 test_fops_admin_cli_i=""
1217 test_fops_chmod_dir() {
1218 local current_cli_i=$1
1220 local dir_to_chmod=$3
1221 local new_admin_cli_i=""
1223 # do we need to set up a new admin client?
1224 [ "$current_cli_i" == "0" ] && [ "$test_fops_admin_cli_i" != "1" ] &&
1226 [ "$current_cli_i" != "0" ] && [ "$test_fops_admin_cli_i" != "0" ] &&
1229 # if only one client, and non-admin, need to flip admin everytime
1230 if [ "$num_clients" == "1" ]; then
1231 test_fops_admin_client=$clients
1232 test_fops_admin_val=$(do_facet mgs $LCTL get_param -n \
1233 nodemap.c0.admin_nodemap)
1234 if [ "$test_fops_admin_val" != "1" ]; then
1235 do_facet mgs $LCTL nodemap_modify \
1239 wait_nm_sync c0 admin_nodemap
1241 elif [ "$new_admin_cli_i" != "" ]; then
1242 # restore admin val to old admin client
1243 if [ "$test_fops_admin_cli_i" != "" ] &&
1244 [ "$test_fops_admin_val" != "1" ]; then
1245 do_facet mgs $LCTL nodemap_modify \
1246 --name c${test_fops_admin_cli_i} \
1248 --value $test_fops_admin_val
1249 wait_nm_sync c${test_fops_admin_cli_i} admin_nodemap
1252 test_fops_admin_cli_i=$new_admin_cli_i
1253 test_fops_admin_client=${clients_arr[$new_admin_cli_i]}
1254 test_fops_admin_val=$(do_facet mgs $LCTL get_param -n \
1255 nodemap.c${new_admin_cli_i}.admin_nodemap)
1257 if [ "$test_fops_admin_val" != "1" ]; then
1258 do_facet mgs $LCTL nodemap_modify \
1259 --name c${new_admin_cli_i} \
1262 wait_nm_sync c${new_admin_cli_i} admin_nodemap
1266 do_node $test_fops_admin_client chmod $perm_bits $DIR/$tdir || return 1
1268 # remove admin for single client if originally non-admin
1269 if [ "$num_clients" == "1" ] && [ "$test_fops_admin_val" != "1" ]; then
1270 do_facet mgs $LCTL nodemap_modify --name c0 --property admin \
1272 wait_nm_sync c0 admin_nodemap
1280 local single_client="$2"
1281 local client_user_list=([0]="0 $((IDBASE+3)) $((IDBASE+4))"
1282 [1]="0 $((IDBASE+5)) $((IDBASE+6))")
1285 local perm_bit_list="0 3 $((0300)) $((0303))"
1286 # SLOW tests 000-007, 010-070, 100-700 (octal modes)
1287 [ "$SLOW" == "yes" ] &&
1288 perm_bit_list="0 $(seq 1 7) $(seq 8 8 63) $(seq 64 64 511) \
1291 # step through mds users. -1 means root
1292 for mds_i in -1 0 1 2; do
1293 local user=$((mds_i + IDBASE))
1297 [ "$mds_i" == "-1" ] && user=0
1299 echo mkdir -p $DIR/$tdir
1302 for client in $clients; do
1304 for u in ${client_user_list[$cli_i]}; do
1305 local run_u="do_node $client \
1306 $RUNAS_CMD -u$u -g$u -G$u"
1307 for perm_bits in $perm_bit_list; do
1308 local mode=$(printf %03o $perm_bits)
1310 key="$mapmode:$user:c$cli_i:$u:$mode"
1311 test_fops_chmod_dir $cli_i $mode \
1313 error cannot chmod $key
1314 do_create_delete "$run_u" "$key"
1318 test_fops_chmod_dir $cli_i 777 $DIR/$tdir ||
1319 error cannot chmod $key
1320 do_fops_quota_test "$run_u"
1323 cli_i=$((cli_i + 1))
1324 [ "$single_client" == "1" ] && break
1331 nodemap_version_check () {
1332 remote_mgs_nodsh && skip "remote MGS with nodsh" && return 1
1333 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
1334 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
1339 nodemap_test_setup() {
1341 local active_nodemap=1
1343 [ "$1" == "0" ] && active_nodemap=0
1345 do_nodes $(comma_list $(all_mdts_nodes)) \
1346 $LCTL set_param mdt.*.identity_upcall=NONE
1349 create_fops_nodemaps
1351 [[ $rc != 0 ]] && error "adding fops nodemaps failed $rc"
1353 do_facet mgs $LCTL nodemap_activate $active_nodemap
1356 do_facet mgs $LCTL nodemap_modify --name default \
1357 --property admin --value 1
1358 do_facet mgs $LCTL nodemap_modify --name default \
1359 --property trusted --value 1
1360 wait_nm_sync default trusted_nodemap
1363 nodemap_test_cleanup() {
1365 delete_fops_nodemaps
1367 [[ $rc != 0 ]] && error "removing fops nodemaps failed $rc"
1369 do_facet mgs $LCTL nodemap_modify --name default \
1370 --property admin --value 0
1371 do_facet mgs $LCTL nodemap_modify --name default \
1372 --property trusted --value 0
1373 wait_nm_sync default trusted_nodemap
1375 do_facet mgs $LCTL nodemap_activate 0
1376 wait_nm_sync active 0
1378 export SK_UNIQUE_NM=false
1382 nodemap_clients_admin_trusted() {
1386 for client in $clients; do
1387 do_facet mgs $LCTL nodemap_modify --name c0 \
1388 --property admin --value $admin
1389 do_facet mgs $LCTL nodemap_modify --name c0 \
1390 --property trusted --value $tr
1393 wait_nm_sync c$((i - 1)) admin_nodemap
1394 wait_nm_sync c$((i - 1)) trusted_nodemap
1398 nodemap_version_check || return 0
1399 nodemap_test_setup 0
1401 trap nodemap_test_cleanup EXIT
1403 nodemap_test_cleanup
1405 run_test 16 "test nodemap all_off fileops"
1408 nodemap_version_check || return 0
1411 trap nodemap_test_cleanup EXIT
1412 nodemap_clients_admin_trusted 0 1
1413 test_fops trusted_noadmin 1
1414 nodemap_test_cleanup
1416 run_test 17 "test nodemap trusted_noadmin fileops"
1419 nodemap_version_check || return 0
1422 trap nodemap_test_cleanup EXIT
1423 nodemap_clients_admin_trusted 0 0
1424 test_fops mapped_noadmin 1
1425 nodemap_test_cleanup
1427 run_test 18 "test nodemap mapped_noadmin fileops"
1430 nodemap_version_check || return 0
1433 trap nodemap_test_cleanup EXIT
1434 nodemap_clients_admin_trusted 1 1
1435 test_fops trusted_admin 1
1436 nodemap_test_cleanup
1438 run_test 19 "test nodemap trusted_admin fileops"
1441 nodemap_version_check || return 0
1444 trap nodemap_test_cleanup EXIT
1445 nodemap_clients_admin_trusted 1 0
1446 test_fops mapped_admin 1
1447 nodemap_test_cleanup
1449 run_test 20 "test nodemap mapped_admin fileops"
1452 nodemap_version_check || return 0
1455 trap nodemap_test_cleanup EXIT
1458 for client in $clients; do
1459 do_facet mgs $LCTL nodemap_modify --name c${i} \
1460 --property admin --value 0
1461 do_facet mgs $LCTL nodemap_modify --name c${i} \
1462 --property trusted --value $x
1466 wait_nm_sync c$((i - 1)) trusted_nodemap
1468 test_fops mapped_trusted_noadmin
1469 nodemap_test_cleanup
1471 run_test 21 "test nodemap mapped_trusted_noadmin fileops"
1474 nodemap_version_check || return 0
1477 trap nodemap_test_cleanup EXIT
1480 for client in $clients; do
1481 do_facet mgs $LCTL nodemap_modify --name c${i} \
1482 --property admin --value 1
1483 do_facet mgs $LCTL nodemap_modify --name c${i} \
1484 --property trusted --value $x
1488 wait_nm_sync c$((i - 1)) trusted_nodemap
1490 test_fops mapped_trusted_admin
1491 nodemap_test_cleanup
1493 run_test 22 "test nodemap mapped_trusted_admin fileops"
1495 # acl test directory needs to be initialized on a privileged client
1496 nodemap_acl_test_setup() {
1497 local admin=$(do_facet mgs $LCTL get_param -n nodemap.c0.admin_nodemap)
1498 local trust=$(do_facet mgs $LCTL get_param -n \
1499 nodemap.c0.trusted_nodemap)
1501 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
1502 do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1
1504 wait_nm_sync c0 admin_nodemap
1505 wait_nm_sync c0 trusted_nodemap
1507 do_node ${clients_arr[0]} rm -rf $DIR/$tdir
1509 do_node ${clients_arr[0]} chmod a+rwx $DIR/$tdir ||
1510 error unable to chmod a+rwx test dir $DIR/$tdir
1512 do_facet mgs $LCTL nodemap_modify --name c0 \
1513 --property admin --value $admin
1514 do_facet mgs $LCTL nodemap_modify --name c0 \
1515 --property trusted --value $trust
1517 wait_nm_sync c0 trusted_nodemap
1520 # returns 0 if the number of ACLs does not change on the second (mapped) client
1521 # after being set on the first client
1522 nodemap_acl_test() {
1524 local set_client="$2"
1525 local get_client="$3"
1526 local check_setfacl="$4"
1527 local setfacl_error=0
1528 local testfile=$DIR/$tdir/$tfile
1529 local RUNAS_USER="$RUNAS_CMD -u $user"
1531 local acl_count_post=0
1533 nodemap_acl_test_setup
1536 do_node $set_client $RUNAS_USER touch $testfile
1538 # ACL masks aren't filtered by nodemap code, so we ignore them
1539 acl_count=$(do_node $get_client getfacl $testfile | grep -v mask |
1541 do_node $set_client $RUNAS_USER setfacl -m $user:rwx $testfile ||
1544 # if check setfacl is set to 1, then it's supposed to error
1545 if [ "$check_setfacl" == "1" ]; then
1546 [ "$setfacl_error" != "1" ] && return 1
1549 [ "$setfacl_error" == "1" ] && echo "WARNING: unable to setfacl"
1551 acl_count_post=$(do_node $get_client getfacl $testfile | grep -v mask |
1553 [ $acl_count -eq $acl_count_post ] && return 0
1558 nodemap_version_check || return 0
1561 trap nodemap_test_cleanup EXIT
1562 # 1 trusted cluster, 1 mapped cluster
1563 local unmapped_fs=$((IDBASE+0))
1564 local unmapped_c1=$((IDBASE+5))
1565 local mapped_fs=$((IDBASE+2))
1566 local mapped_c0=$((IDBASE+4))
1567 local mapped_c1=$((IDBASE+6))
1569 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
1570 do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1
1572 do_facet mgs $LCTL nodemap_modify --name c1 --property admin --value 0
1573 do_facet mgs $LCTL nodemap_modify --name c1 --property trusted --value 0
1575 wait_nm_sync c1 trusted_nodemap
1577 # setfacl on trusted cluster to unmapped user, verify it's not seen
1578 nodemap_acl_test $unmapped_fs ${clients_arr[0]} ${clients_arr[1]} ||
1579 error "acl count (1)"
1581 # setfacl on trusted cluster to mapped user, verify it's seen
1582 nodemap_acl_test $mapped_fs ${clients_arr[0]} ${clients_arr[1]} &&
1583 error "acl count (2)"
1585 # setfacl on mapped cluster to mapped user, verify it's seen
1586 nodemap_acl_test $mapped_c1 ${clients_arr[1]} ${clients_arr[0]} &&
1587 error "acl count (3)"
1589 # setfacl on mapped cluster to unmapped user, verify error
1590 nodemap_acl_test $unmapped_fs ${clients_arr[1]} ${clients_arr[0]} 1 ||
1591 error "acl count (4)"
1594 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 0
1595 do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 0
1597 wait_nm_sync c0 trusted_nodemap
1599 # setfacl to mapped user on c1, also mapped to c0, verify it's seen
1600 nodemap_acl_test $mapped_c1 ${clients_arr[1]} ${clients_arr[0]} &&
1601 error "acl count (5)"
1603 # setfacl to mapped user on c1, not mapped to c0, verify not seen
1604 nodemap_acl_test $unmapped_c1 ${clients_arr[1]} ${clients_arr[0]} ||
1605 error "acl count (6)"
1607 nodemap_test_cleanup
1609 run_test 23 "test mapped ACLs"
1614 trap nodemap_test_cleanup EXIT
1615 do_nodes $(comma_list $(all_server_nodes)) $LCTL get_param -R nodemap ||
1616 error "proc readable file read failed"
1618 nodemap_test_cleanup
1620 run_test 24 "check nodemap proc files for LBUGs and Oopses"
1623 local tmpfile=$(mktemp)
1624 local tmpfile2=$(mktemp)
1625 local tmpfile3=$(mktemp)
1626 local tmpfile4=$(mktemp)
1630 nodemap_version_check || return 0
1632 # stop clients for this test
1633 zconf_umount_clients $CLIENTS $MOUNT ||
1634 error "unable to umount clients $CLIENTS"
1636 export SK_UNIQUE_NM=true
1639 # enable trusted/admin for setquota call in cleanup_and_setup_lustre()
1641 for client in $clients; do
1642 do_facet mgs $LCTL nodemap_modify --name c${i} \
1643 --property admin --value 1
1644 do_facet mgs $LCTL nodemap_modify --name c${i} \
1645 --property trusted --value 1
1648 wait_nm_sync c$((i - 1)) trusted_nodemap
1650 trap nodemap_test_cleanup EXIT
1652 # create a new, empty nodemap, and add fileset info to it
1653 do_facet mgs $LCTL nodemap_add test25 ||
1654 error "unable to create nodemap $testname"
1655 do_facet mgs $LCTL set_param -P nodemap.$testname.fileset=/$subdir ||
1656 error "unable to add fileset info to nodemap test25"
1658 wait_nm_sync test25 id
1660 do_facet mgs $LCTL nodemap_info > $tmpfile
1661 do_facet mds $LCTL nodemap_info > $tmpfile2
1663 if ! $SHARED_KEY; then
1664 # will conflict with SK's nodemaps
1665 cleanup_and_setup_lustre
1667 # stop clients for this test
1668 zconf_umount_clients $CLIENTS $MOUNT ||
1669 error "unable to umount clients $CLIENTS"
1671 do_facet mgs $LCTL nodemap_info > $tmpfile3
1672 diff -q $tmpfile3 $tmpfile >& /dev/null ||
1673 error "nodemap_info diff on MGS after remount"
1675 do_facet mds $LCTL nodemap_info > $tmpfile4
1676 diff -q $tmpfile4 $tmpfile2 >& /dev/null ||
1677 error "nodemap_info diff on MDS after remount"
1680 do_facet mgs $LCTL nodemap_del test25 ||
1681 error "cannot delete nodemap test25 from config"
1682 nodemap_test_cleanup
1683 # restart clients previously stopped
1684 zconf_mount_clients $CLIENTS $MOUNT ||
1685 error "unable to mount clients $CLIENTS"
1687 rm -f $tmpfile $tmpfile2
1688 export SK_UNIQUE_NM=false
1690 run_test 25 "test save and reload nodemap config"
1693 nodemap_version_check || return 0
1697 do_facet mgs "seq -f 'c%g' $large_i | xargs -n1 $LCTL nodemap_add"
1698 wait_nm_sync c$large_i admin_nodemap
1700 do_facet mgs "seq -f 'c%g' $large_i | xargs -n1 $LCTL nodemap_del"
1701 wait_nm_sync c$large_i admin_nodemap
1703 run_test 26 "test transferring very large nodemap"
1707 local subsubdir=c0subdir
1708 local fileset_on_mgs=""
1712 if $SHARED_KEY; then
1713 export SK_UNIQUE_NM=true
1715 # will conflict with SK's nodemaps
1716 trap nodemap_test_cleanup EXIT
1721 # add fileset info to nodemap
1722 do_facet mgs $LCTL set_param -P nodemap.c0.fileset=/$subdir ||
1723 error "unable to add fileset info to nodemap c0"
1724 wait_nm_sync c0 fileset "nodemap.c0.fileset=/$subdir"
1727 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
1728 error "unable to umount client ${clients_arr[0]}"
1729 # set some generic fileset to trigger SSK code
1731 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
1732 error "unable to remount client ${clients_arr[0]}"
1735 # test mount point content
1736 do_node ${clients_arr[0]} test -f $MOUNT/this_is_$subdir ||
1737 error "fileset not taken into account"
1739 # re-mount client with sub-subdir
1740 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
1741 error "unable to umount client ${clients_arr[0]}"
1742 export FILESET=/$subsubdir
1743 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
1744 error "unable to remount client ${clients_arr[0]}"
1747 # test mount point content
1748 do_node ${clients_arr[0]} test -f $MOUNT/this_is_$subsubdir ||
1749 error "subdir of fileset not taken into account"
1751 # remove fileset info from nodemap
1752 do_facet mgs $LCTL nodemap_set_fileset --name c0 --fileset \'\' ||
1753 error "unable to delete fileset info on nodemap c0"
1754 fileset_on_mgs=$(do_facet mgs $LCTL get_param nodemap.c0.fileset)
1755 while [ "${fileset_on_mgs}" != "nodemap.c0.fileset=" ]; do
1756 if [ $loop -eq 10 ]; then
1757 error "On MGS, fileset cannnot be cleared"
1761 echo "On MGS, fileset is still ${fileset_on_mgs}, waiting..."
1764 fileset_on_mgs=$(do_facet mgs $LCTL get_param nodemap.c0.fileset)
1766 do_facet mgs $LCTL set_param -P nodemap.c0.fileset=\'\' ||
1767 error "unable to reset fileset info on nodemap c0"
1768 wait_nm_sync c0 fileset
1771 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
1772 error "unable to umount client ${clients_arr[0]}"
1773 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
1774 error "unable to remount client ${clients_arr[0]}"
1776 # test mount point content
1777 do_node ${clients_arr[0]} test -d $MOUNT/$subdir ||
1778 (ls $MOUNT ; error "fileset not cleared on nodemap c0")
1780 # back to non-nodemap setup
1781 if $SHARED_KEY; then
1782 export SK_UNIQUE_NM=false
1783 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
1784 error "unable to umount client ${clients_arr[0]}"
1786 fileset_test_cleanup
1787 nodemap_test_cleanup
1788 if $SHARED_KEY; then
1789 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
1790 error "unable to remount client ${clients_arr[0]}"
1793 run_test 27 "test fileset in nodemap"
1796 if ! $SHARED_KEY; then
1797 skip "need shared key feature for this test" && return
1799 mkdir -p $DIR/$tdir || error "mkdir failed"
1800 touch $DIR/$tdir/$tdir.out || error "touch failed"
1801 if [ ! -f $DIR/$tdir/$tdir.out ]; then
1802 error "read before rotation failed"
1804 # store top key identity to ensure rotation has occurred
1805 SK_IDENTITY_OLD=$(lctl get_param *.*.*srpc* | grep "expire" |
1806 head -1 | awk '{print $15}' | cut -c1-8)
1807 do_facet $SINGLEMDS lfs flushctx ||
1808 error "could not run flushctx on $SINGLEMDS"
1810 lfs flushctx || error "could not run flushctx on client"
1812 # verify new key is in place
1813 SK_IDENTITY_NEW=$(lctl get_param *.*.*srpc* | grep "expire" |
1814 head -1 | awk '{print $15}' | cut -c1-8)
1815 if [ $SK_IDENTITY_OLD == $SK_IDENTITY_NEW ]; then
1816 error "key did not rotate correctly"
1818 if [ ! -f $DIR/$tdir/$tdir.out ]; then
1819 error "read after rotation failed"
1822 run_test 28 "check shared key rotation method"
1825 if ! $SHARED_KEY; then
1826 skip "need shared key feature for this test" && return
1828 if [ $SK_FLAVOR != "ski" ] && [ $SK_FLAVOR != "skpi" ]; then
1829 skip "test only valid if integrity is active"
1832 mkdir $DIR/$tdir || error "mkdir"
1833 touch $DIR/$tdir/$tfile || error "touch"
1834 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
1835 error "unable to umount clients"
1836 keyctl show | awk '/lustre/ { print $1 }' |
1837 xargs -IX keyctl unlink X
1838 OLD_SK_PATH=$SK_PATH
1839 export SK_PATH=/dev/null
1840 if zconf_mount_clients ${clients_arr[0]} $MOUNT; then
1841 export SK_PATH=$OLD_SK_PATH
1842 if [ -e $DIR/$tdir/$tfile ]; then
1843 error "able to mount and read without key"
1845 error "able to mount without key"
1848 export SK_PATH=$OLD_SK_PATH
1849 keyctl show | awk '/lustre/ { print $1 }' |
1850 xargs -IX keyctl unlink X
1853 run_test 29 "check for missing shared key"
1856 if ! $SHARED_KEY; then
1857 skip "need shared key feature for this test" && return
1859 if [ $SK_FLAVOR != "ski" ] && [ $SK_FLAVOR != "skpi" ]; then
1860 skip "test only valid if integrity is active"
1862 mkdir -p $DIR/$tdir || error "mkdir failed"
1863 touch $DIR/$tdir/$tdir.out || error "touch failed"
1864 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
1865 error "unable to umount clients"
1866 # unload keys from ring
1867 keyctl show | awk '/lustre/ { print $1 }' |
1868 xargs -IX keyctl unlink X
1869 # invalidate the key with bogus filesystem name
1870 lgss_sk -w $SK_PATH/$FSNAME-bogus.key -f $FSNAME.bogus \
1871 -t client -d /dev/urandom || error "lgss_sk failed (1)"
1872 do_facet $SINGLEMDS lfs flushctx || error "could not run flushctx"
1873 OLD_SK_PATH=$SK_PATH
1874 export SK_PATH=$SK_PATH/$FSNAME-bogus.key
1875 if zconf_mount_clients ${clients_arr[0]} $MOUNT; then
1876 SK_PATH=$OLD_SK_PATH
1877 if [ -a $DIR/$tdir/$tdir.out ]; then
1878 error "mount and read file with invalid key"
1880 error "mount with invalid key"
1883 SK_PATH=$OLD_SK_PATH
1884 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
1885 error "unable to umount clients"
1887 run_test 30 "check for invalid shared key"
1889 log "cleanup: ======================================================"
1892 ## nodemap deactivated
1893 do_facet mgs $LCTL nodemap_activate 0
1895 for num in $(seq $MDSCOUNT); do
1896 if [ "${identity_old[$num]}" = 1 ]; then
1897 switch_identity $num false || identity_old[$num]=$?
1901 $RUNAS_CMD -u $ID0 ls $DIR
1902 $RUNAS_CMD -u $ID1 ls $DIR