3 # Run select tests by setting ONLY, or as arguments to the script.
4 # Skip specific tests by setting EXCEPT.
10 # bug number for skipped test: 19430 19967 19967
11 ALWAYS_EXCEPT=" 2 5 6 $SANITY_SEC_EXCEPT"
13 # bug number for skipped test: 9145 9145 9671 9145 9145 9145 9145 9245
14 ALWAYS_EXCEPT=" 17 18 19 20 21 22 23 27 $ALWAYS_EXCEPT"
16 # UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
19 export PATH=$PWD/$SRCDIR:$SRCDIR:$PWD/$SRCDIR/../utils:$PATH:/sbin
20 export NAME=${NAME:-local}
22 LUSTRE=${LUSTRE:-$(dirname $0)/..}
23 . $LUSTRE/tests/test-framework.sh
25 . ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
28 NODEMAP_TESTS=$(seq 7 26)
30 if ! check_versions; then
31 echo "It is NOT necessary to test nodemap under interoperation mode"
32 EXCEPT="$EXCEPT $NODEMAP_TESTS"
35 [ "$SLOW" = "no" ] && EXCEPT_SLOW="26"
37 [ "$ALWAYS_EXCEPT$EXCEPT$EXCEPT_SLOW" ] &&
38 echo "Skipping tests: $ALWAYS_EXCEPT $EXCEPT $EXCEPT_SLOW"
40 RUNAS_CMD=${RUNAS_CMD:-runas}
42 WTL=${WTL:-"$LUSTRE/tests/write_time_limit"}
45 PERM_CONF=$CONFDIR/perm.conf
48 HOSTNAME_CHECKSUM=$(hostname | sum | awk '{ print $1 }')
49 SUBNET_CHECKSUM=$(expr $HOSTNAME_CHECKSUM % 250 + 1)
52 NODEMAP_IPADDR_LIST="1 10 64 128 200 250"
55 require_dsh_mds || exit 0
56 require_dsh_ost || exit 0
58 clients=${CLIENTS//,/ }
59 num_clients=$(get_node_count ${clients})
60 clients_arr=($clients)
64 USER0=$(getent passwd | grep :$ID0:$ID0: | cut -d: -f1)
65 USER1=$(getent passwd | grep :$ID1:$ID1: | cut -d: -f1)
68 skip "need to add user0 ($ID0:$ID0)" && exit 0
71 skip "need to add user1 ($ID1:$ID1)" && exit 0
73 IDBASE=${IDBASE:-60000}
75 # changes to mappings must be reflected in test 23
77 [0]="$((IDBASE+3)):$((IDBASE+0)) $((IDBASE+4)):$((IDBASE+2))"
78 [1]="$((IDBASE+5)):$((IDBASE+1)) $((IDBASE+6)):$((IDBASE+2))"
81 check_and_setup_lustre
84 if [ "$I_MOUNTED" = "yes" ]; then
85 cleanupall -f || error "sec_cleanup"
90 [ -z "$(echo $DIR | grep $MOUNT)" ] &&
91 error "$DIR not in $MOUNT" && sec_cleanup && exit 1
93 [ $(echo $MOUNT | wc -w) -gt 1 ] &&
94 echo "NAME=$MOUNT mounted more than once" && sec_cleanup && exit 0
97 GSS_REF=$(lsmod | grep ^ptlrpc_gss | awk '{print $3}')
98 if [ ! -z "$GSS_REF" -a "$GSS_REF" != "0" ]; then
100 echo "with GSS support"
103 echo "without GSS support"
106 MDT=$(do_facet $SINGLEMDS lctl get_param -N "mdt.\*MDT0000" |
108 [ -z "$MDT" ] && error "fail to get MDT device" && exit 1
109 do_facet $SINGLEMDS "mkdir -p $CONFDIR"
110 IDENTITY_FLUSH=mdt.$MDT.identity_flush
111 IDENTITY_UPCALL=mdt.$MDT.identity_upcall
122 if ! $RUNAS_CMD -u $user krb5_login.sh; then
123 error "$user login kerberos failed."
127 if ! $RUNAS_CMD -u $user -g $group ls $DIR > /dev/null 2>&1; then
128 $RUNAS_CMD -u $user lfs flushctx -k
129 $RUNAS_CMD -u $user krb5_login.sh
130 if ! $RUNAS_CMD -u$user -g$group ls $DIR > /dev/null 2>&1; then
131 error "init $user $group failed."
137 declare -a identity_old
140 for num in $(seq $MDSCOUNT); do
141 switch_identity $num true || identity_old[$num]=$?
144 if ! $RUNAS_CMD -u $ID0 ls $DIR > /dev/null 2>&1; then
145 sec_login $USER0 $USER0
148 if ! $RUNAS_CMD -u $ID1 ls $DIR > /dev/null 2>&1; then
149 sec_login $USER1 $USER1
154 # run as different user
158 chmod 0755 $DIR || error "chmod (1)"
159 rm -rf $DIR/$tdir || error "rm (1)"
160 mkdir -p $DIR/$tdir || error "mkdir (1)"
161 chown $USER0 $DIR/$tdir || error "chown (2)"
162 $RUNAS_CMD -u $ID0 ls $DIR || error "ls (1)"
163 rm -f $DIR/f0 || error "rm (2)"
164 $RUNAS_CMD -u $ID0 touch $DIR/f0 && error "touch (1)"
165 $RUNAS_CMD -u $ID0 touch $DIR/$tdir/f1 || error "touch (2)"
166 $RUNAS_CMD -u $ID1 touch $DIR/$tdir/f2 && error "touch (3)"
167 touch $DIR/$tdir/f3 || error "touch (4)"
168 chown root $DIR/$tdir || error "chown (3)"
169 chgrp $USER0 $DIR/$tdir || error "chgrp (1)"
170 chmod 0775 $DIR/$tdir || error "chmod (2)"
171 $RUNAS_CMD -u $ID0 touch $DIR/$tdir/f4 || error "touch (5)"
172 $RUNAS_CMD -u $ID1 touch $DIR/$tdir/f5 && error "touch (6)"
173 touch $DIR/$tdir/f6 || error "touch (7)"
174 rm -rf $DIR/$tdir || error "rm (3)"
176 run_test 0 "uid permission ============================="
180 [ $GSS_SUP = 0 ] && skip "without GSS support." && return
185 chown $USER0 $DIR/$tdir || error "chown (1)"
186 $RUNAS_CMD -u $ID1 -v $ID0 touch $DIR/$tdir/f0 && error "touch (2)"
187 echo "enable uid $ID1 setuid"
188 do_facet $SINGLEMDS "echo '* $ID1 setuid' >> $PERM_CONF"
189 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
190 $RUNAS_CMD -u $ID1 -v $ID0 touch $DIR/$tdir/f1 || error "touch (3)"
192 chown root $DIR/$tdir || error "chown (4)"
193 chgrp $USER0 $DIR/$tdir || error "chgrp (5)"
194 chmod 0770 $DIR/$tdir || error "chmod (6)"
195 $RUNAS_CMD -u $ID1 -g $ID1 touch $DIR/$tdir/f2 && error "touch (7)"
196 $RUNAS_CMD -u$ID1 -g$ID1 -j$ID0 touch $DIR/$tdir/f3 && error "touch (8)"
197 echo "enable uid $ID1 setuid,setgid"
198 do_facet $SINGLEMDS "echo '* $ID1 setuid,setgid' > $PERM_CONF"
199 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
200 $RUNAS_CMD -u $ID1 -g $ID1 -j $ID0 touch $DIR/$tdir/f4 ||
202 $RUNAS_CMD -u $ID1 -v $ID0 -g $ID1 -j $ID0 touch $DIR/$tdir/f5 ||
207 do_facet $SINGLEMDS "rm -f $PERM_CONF"
208 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
210 run_test 1 "setuid/gid ============================="
212 # bug 3285 - supplementary group should always succeed.
213 # NB: the supplementary groups are set for local client only,
214 # as for remote client, the groups of the specified uid on MDT
215 # will be obtained by upcall /sbin/l_getidentity and used.
217 local server_version=$(lustre_version_code $SINGLEMDS)
219 [[ $server_version -ge $(version_code 2.6.93) ]] ||
220 [[ $server_version -ge $(version_code 2.5.35) &&
221 $server_version -lt $(version_code 2.5.50) ]] ||
222 { skip "Need MDS version at least 2.6.93 or 2.5.35"; return; }
226 chmod 0771 $DIR/$tdir
227 chgrp $ID0 $DIR/$tdir
228 $RUNAS_CMD -u $ID0 ls $DIR/$tdir || error "setgroups (1)"
229 do_facet $SINGLEMDS "echo '* $ID1 setgrp' > $PERM_CONF"
230 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
231 $RUNAS_CMD -u $ID1 -G1,2,$ID0 ls $DIR/$tdir ||
232 error "setgroups (2)"
233 $RUNAS_CMD -u $ID1 -G1,2 ls $DIR/$tdir && error "setgroups (3)"
236 do_facet $SINGLEMDS "rm -f $PERM_CONF"
237 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
239 run_test 4 "set supplementary group ==============="
246 squash_id default 99 0
247 squash_id default 99 1
248 for (( i = 0; i < NODEMAP_COUNT; i++ )); do
249 local csum=${HOSTNAME_CHECKSUM}_${i}
251 if ! do_facet mgs $LCTL nodemap_add $csum; then
255 out=$(do_facet mgs $LCTL get_param nodemap.$csum.id)
256 ## This needs to return zero if the following statement is 1
257 [[ $(echo $out | grep -c $csum) == 0 ]] && return 1
266 for ((i = 0; i < NODEMAP_COUNT; i++)); do
267 local csum=${HOSTNAME_CHECKSUM}_${i}
269 if ! do_facet mgs $LCTL nodemap_del $csum; then
270 error "nodemap_del $csum failed with $?"
274 out=$(do_facet mgs $LCTL get_param nodemap.$csum.id)
275 [[ $(echo $out | grep -c $csum) != 0 ]] && return 1
282 local cmd="$LCTL nodemap_add_range"
286 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
287 range="$SUBNET_CHECKSUM.${2}.${j}.[1-253]@tcp"
288 if ! do_facet mgs $cmd --name $1 --range $range; then
297 local cmd="$LCTL nodemap_del_range"
301 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
302 range="$SUBNET_CHECKSUM.${2}.${j}.[1-253]@tcp"
303 if ! do_facet mgs $cmd --name $1 --range $range; then
313 local cmd="$LCTL nodemap_add_idmap"
316 for ((i = 0; i < NODEMAP_COUNT; i++)); do
319 for ((j = 500; j < NODEMAP_MAX_ID; j++)); do
320 local csum=${HOSTNAME_CHECKSUM}_${i}
322 local fs_id=$((j + 1))
324 if ! do_facet mgs $cmd --name $csum --idtype uid \
325 --idmap $client_id:$fs_id; then
328 if ! do_facet mgs $cmd --name $csum --idtype gid \
329 --idmap $client_id:$fs_id; then
340 local cmd="$LCTL nodemap_del_idmap"
343 for ((i = 0; i < NODEMAP_COUNT; i++)); do
346 for ((j = 500; j < NODEMAP_MAX_ID; j++)); do
347 local csum=${HOSTNAME_CHECKSUM}_${i}
349 local fs_id=$((j + 1))
351 if ! do_facet mgs $cmd --name $csum --idtype uid \
352 --idmap $client_id:$fs_id; then
355 if ! do_facet mgs $cmd --name $csum --idtype gid \
356 --idmap $client_id:$fs_id; then
369 local cmd="$LCTL nodemap_modify"
372 proc[0]="admin_nodemap"
373 proc[1]="trusted_nodemap"
377 for ((idx = 0; idx < 2; idx++)); do
378 if ! do_facet mgs $cmd --name $1 --property ${option[$idx]} \
383 if ! do_facet mgs $cmd --name $1 --property ${option[$idx]} \
393 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
394 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
398 cmd[0]="$LCTL nodemap_modify --property squash_uid"
399 cmd[1]="$LCTL nodemap_modify --property squash_gid"
401 if ! do_facet mgs ${cmd[$3]} --name $1 --value $2; then
406 # ensure that the squash defaults are the expected defaults
407 squash_id default 99 0
408 squash_id default 99 1
413 cmd="$LCTL nodemap_test_nid"
415 nid=$(do_facet mgs $cmd $1)
417 if [ $nid == $2 ]; then
426 local cmd="$LCTL nodemap_test_id"
429 ## nodemap deactivated
430 if ! do_facet mgs $LCTL nodemap_activate 0; then
433 for ((id = 500; id < NODEMAP_MAX_ID; id++)); do
436 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
437 local nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
438 local fs_id=$(do_facet mgs $cmd --nid $nid \
439 --idtype uid --id $id)
440 if [ $fs_id != $id ]; then
441 echo "expected $id, got $fs_id"
448 if ! do_facet mgs $LCTL nodemap_activate 1; then
452 for ((id = 500; id < NODEMAP_MAX_ID; id++)); do
453 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
454 nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
455 fs_id=$(do_facet mgs $cmd --nid $nid \
456 --idtype uid --id $id)
457 expected_id=$((id + 1))
458 if [ $fs_id != $expected_id ]; then
459 echo "expected $expected_id, got $fs_id"
466 for ((i = 0; i < NODEMAP_COUNT; i++)); do
467 local csum=${HOSTNAME_CHECKSUM}_${i}
469 if ! do_facet mgs $LCTL nodemap_modify --name $csum \
470 --property trusted --value 1; then
471 error "nodemap_modify $csum failed with $?"
476 for ((id = 500; id < NODEMAP_MAX_ID; id++)); do
477 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
478 nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
479 fs_id=$(do_facet mgs $cmd --nid $nid \
480 --idtype uid --id $id)
481 if [ $fs_id != $id ]; then
482 echo "expected $id, got $fs_id"
488 ## ensure allow_root_access is enabled
489 for ((i = 0; i < NODEMAP_COUNT; i++)); do
490 local csum=${HOSTNAME_CHECKSUM}_${i}
492 if ! do_facet mgs $LCTL nodemap_modify --name $csum \
493 --property admin --value 1; then
494 error "nodemap_modify $csum failed with $?"
499 ## check that root allowed
500 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
501 nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
502 fs_id=$(do_facet mgs $cmd --nid $nid --idtype uid --id 0)
503 if [ $fs_id != 0 ]; then
504 echo "root allowed expected 0, got $fs_id"
509 ## ensure allow_root_access is disabled
510 for ((i = 0; i < NODEMAP_COUNT; i++)); do
511 local csum=${HOSTNAME_CHECKSUM}_${i}
513 if ! do_facet mgs $LCTL nodemap_modify --name $csum \
514 --property admin --value 0; then
515 error "nodemap_modify ${HOSTNAME_CHECKSUM}_${i} "
521 ## check that root is mapped to 99
522 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
523 nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
524 fs_id=$(do_facet mgs $cmd --nid $nid --idtype uid --id 0)
525 if [ $fs_id != 99 ]; then
526 error "root squash expected 99, got $fs_id"
531 ## reset client trust to 0
532 for ((i = 0; i < NODEMAP_COUNT; i++)); do
533 if ! do_facet mgs $LCTL nodemap_modify \
534 --name ${HOSTNAME_CHECKSUM}_${i} \
535 --property trusted --value 0; then
536 error "nodemap_modify ${HOSTNAME_CHECKSUM}_${i} "
548 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
549 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
550 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
555 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
559 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 2
563 run_test 7 "nodemap create and delete"
568 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
569 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
570 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
577 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
583 [[ $rc == 0 ]] && error "duplicate nodemap_add allowed with $rc" &&
589 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 3
593 run_test 8 "nodemap reject duplicates"
599 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
600 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
601 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
607 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
610 for ((i = 0; i < NODEMAP_COUNT; i++)); do
611 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
615 [[ $rc != 0 ]] && error "nodemap_add_range failed with $rc" && return 2
618 for ((i = 0; i < NODEMAP_COUNT; i++)); do
619 if ! delete_range ${HOSTNAME_CHECKSUM}_${i} $i; then
623 [[ $rc != 0 ]] && error "nodemap_del_range failed with $rc" && return 4
628 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 4
632 run_test 9 "nodemap range add"
637 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
638 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
639 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
645 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
648 for ((i = 0; i < NODEMAP_COUNT; i++)); do
649 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
653 [[ $rc != 0 ]] && error "nodemap_add_range failed with $rc" && return 2
656 for ((i = 0; i < NODEMAP_COUNT; i++)); do
657 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
661 [[ $rc == 0 ]] && error "nodemap_add_range duplicate add with $rc" &&
666 for ((i = 0; i < NODEMAP_COUNT; i++)); do
667 if ! delete_range ${HOSTNAME_CHECKSUM}_${i} $i; then
671 [[ $rc != 0 ]] && error "nodemap_del_range failed with $rc" && return 4
675 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 5
679 run_test 10 "nodemap reject duplicate ranges"
684 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
685 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
686 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
692 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
695 for ((i = 0; i < NODEMAP_COUNT; i++)); do
696 if ! modify_flags ${HOSTNAME_CHECKSUM}_${i}; then
700 [[ $rc != 0 ]] && error "nodemap_modify with $rc" && return 2
705 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 3
709 run_test 11 "nodemap modify"
714 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
715 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
716 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
722 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
725 for ((i = 0; i < NODEMAP_COUNT; i++)); do
726 if ! squash_id ${HOSTNAME_CHECKSUM}_${i} 88 0; then
730 [[ $rc != 0 ]] && error "nodemap squash_uid with $rc" && return 2
733 for ((i = 0; i < NODEMAP_COUNT; i++)); do
734 if ! squash_id ${HOSTNAME_CHECKSUM}_${i} 88 1; then
738 [[ $rc != 0 ]] && error "nodemap squash_gid with $rc" && return 3
743 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 4
747 run_test 12 "nodemap set squash ids"
752 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
753 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
754 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
760 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
763 for ((i = 0; i < NODEMAP_COUNT; i++)); do
764 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
768 [[ $rc != 0 ]] && error "nodemap_add_range failed with $rc" && return 2
771 for ((i = 0; i < NODEMAP_COUNT; i++)); do
772 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
773 for k in $NODEMAP_IPADDR_LIST; do
774 if ! test_nid $SUBNET_CHECKSUM.$i.$j.$k \
775 ${HOSTNAME_CHECKSUM}_${i}; then
781 [[ $rc != 0 ]] && error "nodemap_test_nid failed with $rc" && return 3
786 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 4
790 run_test 13 "test nids"
795 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
796 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
797 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
803 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
806 for ((i = 0; i < NODEMAP_COUNT; i++)); do
807 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
808 for k in $NODEMAP_IPADDR_LIST; do
809 if ! test_nid $SUBNET_CHECKSUM.$i.$j.$k \
816 [[ $rc != 0 ]] && error "nodemap_test_nid failed with $rc" && return 3
821 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 4
825 run_test 14 "test default nodemap nid lookup"
830 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
831 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
832 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
838 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
841 for ((i = 0; i < NODEMAP_COUNT; i++)); do
842 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
846 [[ $rc != 0 ]] && error "nodemap_add_range failed with $rc" && return 2
851 [[ $rc != 0 ]] && error "nodemap_add_idmap failed with $rc" && return 3
856 [[ $rc != 0 ]] && error "nodemap_test_id failed with $rc" && return 4
861 [[ $rc != 0 ]] && error "nodemap_del_idmap failed with $rc" && return 5
866 [[ $rc != 0 ]] && error "nodemap_delete failed with $rc" && return 6
870 run_test 15 "test id mapping"
873 local nodemap_name=$1
876 local proc_param="${nodemap_name}.${key}"
877 [ "$nodemap_name" == "active" ] && proc_param="active"
879 local is_active=$(do_facet mgs $LCTL get_param -n nodemap.active)
880 (( is_active == 0 )) && [ "$proc_param" != "active" ] && return
886 local mgs_ip=$(host_nids_address $mgs_HOST $NETTYPE | cut -d' ' -f1)
889 if [ -z "$value" ]; then
890 out1=$(do_facet mgs $LCTL get_param nodemap.${proc_param})
891 echo "On MGS ${mgs_ip}, ${proc_param} = $out1"
896 # wait up to 10 seconds for other servers to sync with mgs
897 for i in $(seq 1 10); do
898 for node in $(all_server_nodes); do
899 local node_ip=$(host_nids_address $node $NETTYPE |
903 if [ -z "$value" ]; then
904 [ $node_ip == $mgs_ip ] && continue
907 out2=$(do_node $node_ip $LCTL get_param \
908 nodemap.$proc_param 2>/dev/null)
909 echo "On $node ${node_ip}, ${proc_param} = $out2"
910 [ "$out1" != "$out2" ] && is_sync=false && break
918 echo OTHER - IP: $node_ip
920 error "mgs and $nodemap_name ${key} mismatch, $i attempts"
922 echo "waited $((i - 1)) seconds for sync"
925 create_fops_nodemaps() {
928 for client in $clients; do
929 local client_ip=$(host_nids_address $client $NETTYPE)
930 local client_nid=$(h2nettype $client_ip)
931 do_facet mgs $LCTL nodemap_add c${i} || return 1
932 do_facet mgs $LCTL nodemap_add_range \
933 --name c${i} --range $client_nid || return 1
934 for map in ${FOPS_IDMAPS[i]}; do
935 do_facet mgs $LCTL nodemap_add_idmap --name c${i} \
936 --idtype uid --idmap ${map} || return 1
937 do_facet mgs $LCTL nodemap_add_idmap --name c${i} \
938 --idtype gid --idmap ${map} || return 1
941 wait_nm_sync c$i idmap
948 delete_fops_nodemaps() {
951 for client in $clients; do
952 do_facet mgs $LCTL nodemap_del c${i} || return 1
960 if [ $MDSCOUNT -le 1 ]; then
961 do_node ${clients_arr[0]} mkdir -p $DIR/$tdir
963 # round-robin MDTs to test DNE nodemap support
964 [ ! -d $DIR ] && do_node ${clients_arr[0]} mkdir -p $DIR
965 do_node ${clients_arr[0]} $LFS setdirstripe -c 1 -i \
966 $((fops_mds_index % MDSCOUNT)) $DIR/$tdir
971 # acl test directory needs to be initialized on a privileged client
973 local admin=$(do_facet mgs $LCTL get_param -n nodemap.c0.admin_nodemap)
974 local trust=$(do_facet mgs $LCTL get_param -n \
975 nodemap.c0.trusted_nodemap)
977 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
978 do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1
980 wait_nm_sync c0 admin_nodemap
981 wait_nm_sync c0 trusted_nodemap
983 do_node ${clients_arr[0]} rm -rf $DIR/$tdir
985 do_node ${clients_arr[0]} chown $user $DIR/$tdir
987 do_facet mgs $LCTL nodemap_modify --name c0 \
988 --property admin --value $admin
989 do_facet mgs $LCTL nodemap_modify --name c0 \
990 --property trusted --value $trust
992 # flush MDT locks to make sure they are reacquired before test
993 do_node ${clients_arr[0]} $LCTL set_param \
994 ldlm.namespaces.$FSNAME-MDT*.lru_size=clear
996 wait_nm_sync c0 admin_nodemap
997 wait_nm_sync c0 trusted_nodemap
1000 # fileset test directory needs to be initialized on a privileged client
1001 fileset_test_setup() {
1002 local admin=$(do_facet mgs $LCTL get_param -n nodemap.c0.admin_nodemap)
1003 local trust=$(do_facet mgs $LCTL get_param -n \
1004 nodemap.c0.trusted_nodemap)
1006 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
1007 do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1
1009 wait_nm_sync c0 admin_nodemap
1010 wait_nm_sync c0 trusted_nodemap
1012 # create directory and populate it for subdir mount
1013 do_node ${clients_arr[0]} mkdir $MOUNT/$subdir ||
1014 error "unable to create dir $MOUNT/$subdir"
1015 do_node ${clients_arr[0]} touch $MOUNT/$subdir/this_is_$subdir ||
1016 error "unable to create file $MOUNT/$subdir/this_is_$subdir"
1017 do_node ${clients_arr[0]} mkdir $MOUNT/$subdir/$subsubdir ||
1018 error "unable to create dir $MOUNT/$subdir/$subsubdir"
1019 do_node ${clients_arr[0]} touch \
1020 $MOUNT/$subdir/$subsubdir/this_is_$subsubdir ||
1021 error "unable to create file \
1022 $MOUNT/$subdir/$subsubdir/this_is_$subsubdir"
1024 do_facet mgs $LCTL nodemap_modify --name c0 \
1025 --property admin --value $admin
1026 do_facet mgs $LCTL nodemap_modify --name c0 \
1027 --property trusted --value $trust
1029 # flush MDT locks to make sure they are reacquired before test
1030 do_node ${clients_arr[0]} $LCTL set_param \
1031 ldlm.namespaces.$FSNAME-MDT*.lru_size=clear
1033 wait_nm_sync c0 admin_nodemap
1034 wait_nm_sync c0 trusted_nodemap
1037 # fileset test directory needs to be initialized on a privileged client
1038 fileset_test_cleanup() {
1039 local admin=$(do_facet mgs $LCTL get_param -n nodemap.c0.admin_nodemap)
1040 local trust=$(do_facet mgs $LCTL get_param -n \
1041 nodemap.c0.trusted_nodemap)
1043 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
1044 do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1
1046 wait_nm_sync c0 admin_nodemap
1047 wait_nm_sync c0 trusted_nodemap
1049 # cleanup directory created for subdir mount
1050 do_node ${clients_arr[0]} rm -rf $MOUNT/$subdir ||
1051 error "unable to remove dir $MOUNT/$subdir"
1053 do_facet mgs $LCTL nodemap_modify --name c0 \
1054 --property admin --value $admin
1055 do_facet mgs $LCTL nodemap_modify --name c0 \
1056 --property trusted --value $trust
1058 # flush MDT locks to make sure they are reacquired before test
1059 do_node ${clients_arr[0]} $LCTL set_param \
1060 ldlm.namespaces.$FSNAME-MDT*.lru_size=clear
1062 wait_nm_sync c0 admin_nodemap
1063 wait_nm_sync c0 trusted_nodemap
1066 do_create_delete() {
1069 local testfile=$DIR/$tdir/$tfile
1073 if $run_u touch $testfile >& /dev/null; then
1075 $run_u rm $testfile && d=1
1079 local expected=$(get_cr_del_expected $key)
1080 [ "$res" != "$expected" ] &&
1081 error "test $key, wanted $expected, got $res" && rc=$((rc + 1))
1085 nodemap_check_quota() {
1087 $run_u lfs quota -q $DIR | awk '{ print $2; exit; }'
1090 do_fops_quota_test() {
1092 # fuzz quota used to account for possible indirect blocks, etc
1093 local quota_fuzz=$(fs_log_size)
1094 local qused_orig=$(nodemap_check_quota "$run_u")
1095 local qused_high=$((qused_orig + quota_fuzz))
1096 local qused_low=$((qused_orig - quota_fuzz))
1097 local testfile=$DIR/$tdir/$tfile
1098 $run_u dd if=/dev/zero of=$testfile oflag=sync bs=1M count=1 \
1099 >& /dev/null || error "unable to write quota test file"
1100 sync; sync_all_data || true
1102 local qused_new=$(nodemap_check_quota "$run_u")
1103 [ $((qused_new)) -lt $((qused_low + 1024)) -o \
1104 $((qused_new)) -gt $((qused_high + 1024)) ] &&
1105 error "$qused_new != $qused_orig + 1M after write, " \
1106 "fuzz is $quota_fuzz"
1107 $run_u rm $testfile || error "unable to remove quota test file"
1108 wait_delete_completed_mds
1110 qused_new=$(nodemap_check_quota "$run_u")
1111 [ $((qused_new)) -lt $((qused_low)) \
1112 -o $((qused_new)) -gt $((qused_high)) ] &&
1113 error "quota not reclaimed, expect $qused_orig, " \
1114 "got $qused_new, fuzz $quota_fuzz"
1117 get_fops_mapped_user() {
1120 for ((i=0; i < ${#FOPS_IDMAPS[@]}; i++)); do
1121 for map in ${FOPS_IDMAPS[i]}; do
1122 if [ $(cut -d: -f1 <<< "$map") == $cli_user ]; then
1123 cut -d: -f2 <<< "$map"
1131 get_cr_del_expected() {
1133 IFS=":" read -a key <<< "$1"
1134 local mapmode="${key[0]}"
1135 local mds_user="${key[1]}"
1136 local cluster="${key[2]}"
1137 local cli_user="${key[3]}"
1138 local mode="0${key[4]}"
1145 [[ $mapmode == *mapped* ]] && mapped=1
1146 # only c1 is mapped in these test cases
1147 [[ $mapmode == mapped_trusted* ]] && [ "$cluster" == "c0" ] && mapped=0
1148 [[ $mapmode == *noadmin* ]] && noadmin=1
1150 # o+wx works as long as the user isn't mapped
1151 if [ $((mode & 3)) -eq 3 ]; then
1155 # if client user is root, check if root is squashed
1156 if [ "$cli_user" == "0" ]; then
1157 # squash root succeed, if other bit is on
1160 1) [ "$other" == "1" ] && echo $SUCCESS
1161 [ "$other" == "0" ] && echo $FAILURE;;
1165 if [ "$mapped" == "0" ]; then
1166 [ "$other" == "1" ] && echo $SUCCESS
1167 [ "$other" == "0" ] && echo $FAILURE
1171 # if mapped user is mds user, check for u+wx
1172 mapped_user=$(get_fops_mapped_user $cli_user)
1173 [ "$mapped_user" == "-1" ] &&
1174 error "unable to find mapping for client user $cli_user"
1176 if [ "$mapped_user" == "$mds_user" -a \
1177 $(((mode & 0300) == 0300)) -eq 1 ]; then
1181 if [ "$mapped_user" != "$mds_user" -a "$other" == "1" ]; then
1188 test_fops_admin_cli_i=""
1189 test_fops_chmod_dir() {
1190 local current_cli_i=$1
1192 local dir_to_chmod=$3
1193 local new_admin_cli_i=""
1195 # do we need to set up a new admin client?
1196 [ "$current_cli_i" == "0" ] && [ "$test_fops_admin_cli_i" != "1" ] &&
1198 [ "$current_cli_i" != "0" ] && [ "$test_fops_admin_cli_i" != "0" ] &&
1201 # if only one client, and non-admin, need to flip admin everytime
1202 if [ "$num_clients" == "1" ]; then
1203 test_fops_admin_client=$clients
1204 test_fops_admin_val=$(do_facet mgs $LCTL get_param -n \
1205 nodemap.c0.admin_nodemap)
1206 if [ "$test_fops_admin_val" != "1" ]; then
1207 do_facet mgs $LCTL nodemap_modify \
1211 wait_nm_sync c0 admin_nodemap
1213 elif [ "$new_admin_cli_i" != "" ]; then
1214 # restore admin val to old admin client
1215 if [ "$test_fops_admin_cli_i" != "" ] &&
1216 [ "$test_fops_admin_val" != "1" ]; then
1217 do_facet mgs $LCTL nodemap_modify \
1218 --name c${test_fops_admin_cli_i} \
1220 --value $test_fops_admin_val
1221 wait_nm_sync c${test_fops_admin_cli_i} admin_nodemap
1224 test_fops_admin_cli_i=$new_admin_cli_i
1225 test_fops_admin_client=${clients_arr[$new_admin_cli_i]}
1226 test_fops_admin_val=$(do_facet mgs $LCTL get_param -n \
1227 nodemap.c${new_admin_cli_i}.admin_nodemap)
1229 if [ "$test_fops_admin_val" != "1" ]; then
1230 do_facet mgs $LCTL nodemap_modify \
1231 --name c${new_admin_cli_i} \
1234 wait_nm_sync c${new_admin_cli_i} admin_nodemap
1238 do_node $test_fops_admin_client chmod $perm_bits $DIR/$tdir || return 1
1240 # remove admin for single client if originally non-admin
1241 if [ "$num_clients" == "1" ] && [ "$test_fops_admin_val" != "1" ]; then
1242 do_facet mgs $LCTL nodemap_modify --name c0 --property admin \
1244 wait_nm_sync c0 admin_nodemap
1252 local single_client="$2"
1253 local client_user_list=([0]="0 $((IDBASE+3)) $((IDBASE+4))"
1254 [1]="0 $((IDBASE+5)) $((IDBASE+6))")
1257 local perm_bit_list="0 3 $((0300)) $((0303))"
1258 # SLOW tests 000-007, 010-070, 100-700 (octal modes)
1259 [ "$SLOW" == "yes" ] &&
1260 perm_bit_list="0 $(seq 1 7) $(seq 8 8 63) $(seq 64 64 511) \
1263 # step through mds users. -1 means root
1264 for mds_i in -1 0 1 2; do
1265 local user=$((mds_i + IDBASE))
1269 [ "$mds_i" == "-1" ] && user=0
1271 echo mkdir -p $DIR/$tdir
1274 for client in $clients; do
1276 for u in ${client_user_list[$cli_i]}; do
1277 local run_u="do_node $client \
1278 $RUNAS_CMD -u$u -g$u -G$u"
1279 for perm_bits in $perm_bit_list; do
1280 local mode=$(printf %03o $perm_bits)
1282 key="$mapmode:$user:c$cli_i:$u:$mode"
1283 test_fops_chmod_dir $cli_i $mode \
1285 error cannot chmod $key
1286 do_create_delete "$run_u" "$key"
1290 test_fops_chmod_dir $cli_i 777 $DIR/$tdir ||
1291 error cannot chmod $key
1292 do_fops_quota_test "$run_u"
1295 cli_i=$((cli_i + 1))
1296 [ "$single_client" == "1" ] && break
1303 nodemap_version_check () {
1304 remote_mgs_nodsh && skip "remote MGS with nodsh" && return 1
1305 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
1306 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
1311 nodemap_test_setup() {
1313 local active_nodemap=1
1315 [ "$1" == "0" ] && active_nodemap=0
1317 do_nodes $(comma_list $(all_mdts_nodes)) \
1318 $LCTL set_param mdt.*.identity_upcall=NONE
1321 create_fops_nodemaps
1323 [[ $rc != 0 ]] && error "adding fops nodemaps failed $rc"
1325 do_facet mgs $LCTL nodemap_activate $active_nodemap
1328 do_facet mgs $LCTL nodemap_modify --name default \
1329 --property admin --value 1
1330 do_facet mgs $LCTL nodemap_modify --name default \
1331 --property trusted --value 1
1332 wait_nm_sync default trusted_nodemap
1335 nodemap_test_cleanup() {
1337 delete_fops_nodemaps
1339 [[ $rc != 0 ]] && error "removing fops nodemaps failed $rc"
1341 do_facet mgs $LCTL nodemap_modify --name default \
1342 --property admin --value 0
1343 do_facet mgs $LCTL nodemap_modify --name default \
1344 --property trusted --value 0
1345 wait_nm_sync default trusted_nodemap
1347 do_facet mgs $LCTL nodemap_activate 0
1348 wait_nm_sync active 0
1350 export SK_UNIQUE_NM=false
1354 nodemap_clients_admin_trusted() {
1358 for client in $clients; do
1359 do_facet mgs $LCTL nodemap_modify --name c0 \
1360 --property admin --value $admin
1361 do_facet mgs $LCTL nodemap_modify --name c0 \
1362 --property trusted --value $tr
1365 wait_nm_sync c$((i - 1)) admin_nodemap
1366 wait_nm_sync c$((i - 1)) trusted_nodemap
1370 nodemap_version_check || return 0
1371 nodemap_test_setup 0
1373 trap nodemap_test_cleanup EXIT
1375 nodemap_test_cleanup
1377 run_test 16 "test nodemap all_off fileops"
1380 nodemap_version_check || return 0
1383 trap nodemap_test_cleanup EXIT
1384 nodemap_clients_admin_trusted 0 1
1385 test_fops trusted_noadmin 1
1386 nodemap_test_cleanup
1388 run_test 17 "test nodemap trusted_noadmin fileops"
1391 nodemap_version_check || return 0
1394 trap nodemap_test_cleanup EXIT
1395 nodemap_clients_admin_trusted 0 0
1396 test_fops mapped_noadmin 1
1397 nodemap_test_cleanup
1399 run_test 18 "test nodemap mapped_noadmin fileops"
1402 nodemap_version_check || return 0
1405 trap nodemap_test_cleanup EXIT
1406 nodemap_clients_admin_trusted 1 1
1407 test_fops trusted_admin 1
1408 nodemap_test_cleanup
1410 run_test 19 "test nodemap trusted_admin fileops"
1413 nodemap_version_check || return 0
1416 trap nodemap_test_cleanup EXIT
1417 nodemap_clients_admin_trusted 1 0
1418 test_fops mapped_admin 1
1419 nodemap_test_cleanup
1421 run_test 20 "test nodemap mapped_admin fileops"
1424 nodemap_version_check || return 0
1427 trap nodemap_test_cleanup EXIT
1430 for client in $clients; do
1431 do_facet mgs $LCTL nodemap_modify --name c${i} \
1432 --property admin --value 0
1433 do_facet mgs $LCTL nodemap_modify --name c${i} \
1434 --property trusted --value $x
1438 wait_nm_sync c$((i - 1)) trusted_nodemap
1440 test_fops mapped_trusted_noadmin
1441 nodemap_test_cleanup
1443 run_test 21 "test nodemap mapped_trusted_noadmin fileops"
1446 nodemap_version_check || return 0
1449 trap nodemap_test_cleanup EXIT
1452 for client in $clients; do
1453 do_facet mgs $LCTL nodemap_modify --name c${i} \
1454 --property admin --value 1
1455 do_facet mgs $LCTL nodemap_modify --name c${i} \
1456 --property trusted --value $x
1460 wait_nm_sync c$((i - 1)) trusted_nodemap
1462 test_fops mapped_trusted_admin
1463 nodemap_test_cleanup
1465 run_test 22 "test nodemap mapped_trusted_admin fileops"
1467 # acl test directory needs to be initialized on a privileged client
1468 nodemap_acl_test_setup() {
1469 local admin=$(do_facet mgs $LCTL get_param -n nodemap.c0.admin_nodemap)
1470 local trust=$(do_facet mgs $LCTL get_param -n \
1471 nodemap.c0.trusted_nodemap)
1473 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
1474 do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1
1476 wait_nm_sync c0 admin_nodemap
1477 wait_nm_sync c0 trusted_nodemap
1479 do_node ${clients_arr[0]} rm -rf $DIR/$tdir
1481 do_node ${clients_arr[0]} chmod a+rwx $DIR/$tdir ||
1482 error unable to chmod a+rwx test dir $DIR/$tdir
1484 do_facet mgs $LCTL nodemap_modify --name c0 \
1485 --property admin --value $admin
1486 do_facet mgs $LCTL nodemap_modify --name c0 \
1487 --property trusted --value $trust
1489 wait_nm_sync c0 trusted_nodemap
1492 # returns 0 if the number of ACLs does not change on the second (mapped) client
1493 # after being set on the first client
1494 nodemap_acl_test() {
1496 local set_client="$2"
1497 local get_client="$3"
1498 local check_setfacl="$4"
1499 local setfacl_error=0
1500 local testfile=$DIR/$tdir/$tfile
1501 local RUNAS_USER="$RUNAS_CMD -u $user"
1503 local acl_count_post=0
1505 nodemap_acl_test_setup
1508 do_node $set_client $RUNAS_USER touch $testfile
1510 # ACL masks aren't filtered by nodemap code, so we ignore them
1511 acl_count=$(do_node $get_client getfacl $testfile | grep -v mask |
1513 do_node $set_client $RUNAS_USER setfacl -m $user:rwx $testfile ||
1516 # if check setfacl is set to 1, then it's supposed to error
1517 if [ "$check_setfacl" == "1" ]; then
1518 [ "$setfacl_error" != "1" ] && return 1
1521 [ "$setfacl_error" == "1" ] && echo "WARNING: unable to setfacl"
1523 acl_count_post=$(do_node $get_client getfacl $testfile | grep -v mask |
1525 [ $acl_count -eq $acl_count_post ] && return 0
1530 nodemap_version_check || return 0
1533 trap nodemap_test_cleanup EXIT
1534 # 1 trusted cluster, 1 mapped cluster
1535 local unmapped_fs=$((IDBASE+0))
1536 local unmapped_c1=$((IDBASE+5))
1537 local mapped_fs=$((IDBASE+2))
1538 local mapped_c0=$((IDBASE+4))
1539 local mapped_c1=$((IDBASE+6))
1541 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
1542 do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1
1544 do_facet mgs $LCTL nodemap_modify --name c1 --property admin --value 0
1545 do_facet mgs $LCTL nodemap_modify --name c1 --property trusted --value 0
1547 wait_nm_sync c1 trusted_nodemap
1549 # setfacl on trusted cluster to unmapped user, verify it's not seen
1550 nodemap_acl_test $unmapped_fs ${clients_arr[0]} ${clients_arr[1]} ||
1551 error "acl count (1)"
1553 # setfacl on trusted cluster to mapped user, verify it's seen
1554 nodemap_acl_test $mapped_fs ${clients_arr[0]} ${clients_arr[1]} &&
1555 error "acl count (2)"
1557 # setfacl on mapped cluster to mapped user, verify it's seen
1558 nodemap_acl_test $mapped_c1 ${clients_arr[1]} ${clients_arr[0]} &&
1559 error "acl count (3)"
1561 # setfacl on mapped cluster to unmapped user, verify error
1562 nodemap_acl_test $unmapped_fs ${clients_arr[1]} ${clients_arr[0]} 1 ||
1563 error "acl count (4)"
1566 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 0
1567 do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 0
1569 wait_nm_sync c0 trusted_nodemap
1571 # setfacl to mapped user on c1, also mapped to c0, verify it's seen
1572 nodemap_acl_test $mapped_c1 ${clients_arr[1]} ${clients_arr[0]} &&
1573 error "acl count (5)"
1575 # setfacl to mapped user on c1, not mapped to c0, verify not seen
1576 nodemap_acl_test $unmapped_c1 ${clients_arr[1]} ${clients_arr[0]} ||
1577 error "acl count (6)"
1579 nodemap_test_cleanup
1581 run_test 23 "test mapped ACLs"
1586 trap nodemap_test_cleanup EXIT
1587 do_nodes $(comma_list $(all_server_nodes)) $LCTL get_param -R nodemap ||
1588 error "proc readable file read failed"
1590 nodemap_test_cleanup
1592 run_test 24 "check nodemap proc files for LBUGs and Oopses"
1595 local tmpfile=$(mktemp)
1596 local tmpfile2=$(mktemp)
1597 local tmpfile3=$(mktemp)
1598 local tmpfile4=$(mktemp)
1602 nodemap_version_check || return 0
1604 # stop clients for this test
1605 zconf_umount_clients $CLIENTS $MOUNT ||
1606 error "unable to umount clients $CLIENTS"
1608 export SK_UNIQUE_NM=true
1611 # enable trusted/admin for setquota call in cleanup_and_setup_lustre()
1613 for client in $clients; do
1614 do_facet mgs $LCTL nodemap_modify --name c${i} \
1615 --property admin --value 1
1616 do_facet mgs $LCTL nodemap_modify --name c${i} \
1617 --property trusted --value 1
1620 wait_nm_sync c$((i - 1)) trusted_nodemap
1622 trap nodemap_test_cleanup EXIT
1624 # create a new, empty nodemap, and add fileset info to it
1625 do_facet mgs $LCTL nodemap_add test25 ||
1626 error "unable to create nodemap $testname"
1627 do_facet mgs $LCTL set_param -P nodemap.$testname.fileset=/$subdir ||
1628 error "unable to add fileset info to nodemap test25"
1630 wait_nm_sync test25 id
1632 do_facet mgs $LCTL nodemap_info > $tmpfile
1633 do_facet mds $LCTL nodemap_info > $tmpfile2
1635 if ! $SHARED_KEY; then
1636 # will conflict with SK's nodemaps
1637 cleanup_and_setup_lustre
1639 # stop clients for this test
1640 zconf_umount_clients $CLIENTS $MOUNT ||
1641 error "unable to umount clients $CLIENTS"
1643 do_facet mgs $LCTL nodemap_info > $tmpfile3
1644 diff -q $tmpfile3 $tmpfile >& /dev/null ||
1645 error "nodemap_info diff on MGS after remount"
1647 do_facet mds $LCTL nodemap_info > $tmpfile4
1648 diff -q $tmpfile4 $tmpfile2 >& /dev/null ||
1649 error "nodemap_info diff on MDS after remount"
1652 do_facet mgs $LCTL nodemap_del test25 ||
1653 error "cannot delete nodemap test25 from config"
1654 nodemap_test_cleanup
1655 # restart clients previously stopped
1656 zconf_mount_clients $CLIENTS $MOUNT ||
1657 error "unable to mount clients $CLIENTS"
1659 rm -f $tmpfile $tmpfile2
1660 export SK_UNIQUE_NM=false
1662 run_test 25 "test save and reload nodemap config"
1665 nodemap_version_check || return 0
1669 do_facet mgs "seq -f 'c%g' $large_i | xargs -n1 $LCTL nodemap_add"
1670 wait_nm_sync c$large_i admin_nodemap
1672 do_facet mgs "seq -f 'c%g' $large_i | xargs -n1 $LCTL nodemap_del"
1673 wait_nm_sync c$large_i admin_nodemap
1675 run_test 26 "test transferring very large nodemap"
1679 local subsubdir=c0subdir
1680 local fileset_on_mgs=""
1684 if $SHARED_KEY; then
1685 export SK_UNIQUE_NM=true
1687 # will conflict with SK's nodemaps
1688 trap nodemap_test_cleanup EXIT
1693 # add fileset info to nodemap
1694 do_facet mgs $LCTL set_param -P nodemap.c0.fileset=/$subdir ||
1695 error "unable to add fileset info to nodemap c0"
1696 wait_nm_sync c0 fileset "nodemap.c0.fileset=/$subdir"
1699 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
1700 error "unable to umount client ${clients_arr[0]}"
1701 # set some generic fileset to trigger SSK code
1703 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
1704 error "unable to remount client ${clients_arr[0]}"
1707 # test mount point content
1708 do_node ${clients_arr[0]} test -f $MOUNT/this_is_$subdir ||
1709 error "fileset not taken into account"
1711 # re-mount client with sub-subdir
1712 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
1713 error "unable to umount client ${clients_arr[0]}"
1714 export FILESET=/$subsubdir
1715 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
1716 error "unable to remount client ${clients_arr[0]}"
1719 # test mount point content
1720 do_node ${clients_arr[0]} test -f $MOUNT/this_is_$subsubdir ||
1721 error "subdir of fileset not taken into account"
1723 # remove fileset info from nodemap
1724 do_facet mgs $LCTL nodemap_set_fileset --name c0 --fileset \'\' ||
1725 error "unable to delete fileset info on nodemap c0"
1726 fileset_on_mgs=$(do_facet mgs $LCTL get_param nodemap.c0.fileset)
1727 while [ "${fileset_on_mgs}" != "nodemap.c0.fileset=" ]; do
1728 if [ $loop -eq 10 ]; then
1729 error "On MGS, fileset cannnot be cleared"
1733 echo "On MGS, fileset is still ${fileset_on_mgs}, waiting..."
1736 fileset_on_mgs=$(do_facet mgs $LCTL get_param nodemap.c0.fileset)
1738 do_facet mgs $LCTL set_param -P nodemap.c0.fileset=\'\' ||
1739 error "unable to reset fileset info on nodemap c0"
1740 wait_nm_sync c0 fileset
1743 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
1744 error "unable to umount client ${clients_arr[0]}"
1745 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
1746 error "unable to remount client ${clients_arr[0]}"
1748 # test mount point content
1749 do_node ${clients_arr[0]} test -d $MOUNT/$subdir ||
1750 (ls $MOUNT ; error "fileset not cleared on nodemap c0")
1752 # back to non-nodemap setup
1753 if $SHARED_KEY; then
1754 export SK_UNIQUE_NM=false
1755 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
1756 error "unable to umount client ${clients_arr[0]}"
1758 fileset_test_cleanup
1759 nodemap_test_cleanup
1760 if $SHARED_KEY; then
1761 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
1762 error "unable to remount client ${clients_arr[0]}"
1765 run_test 27 "test fileset in nodemap"
1768 if ! $SHARED_KEY; then
1769 skip "need shared key feature for this test" && return
1771 mkdir -p $DIR/$tdir || error "mkdir failed"
1772 touch $DIR/$tdir/$tdir.out || error "touch failed"
1773 if [ ! -f $DIR/$tdir/$tdir.out ]; then
1774 error "read before rotation failed"
1776 # store top key identity to ensure rotation has occurred
1777 SK_IDENTITY_OLD=$(lctl get_param *.*.*srpc* | grep "expire" |
1778 head -1 | awk '{print $15}' | cut -c1-8)
1779 do_facet $SINGLEMDS lfs flushctx ||
1780 error "could not run flushctx on $SINGLEMDS"
1782 lfs flushctx || error "could not run flushctx on client"
1784 # verify new key is in place
1785 SK_IDENTITY_NEW=$(lctl get_param *.*.*srpc* | grep "expire" |
1786 head -1 | awk '{print $15}' | cut -c1-8)
1787 if [ $SK_IDENTITY_OLD == $SK_IDENTITY_NEW ]; then
1788 error "key did not rotate correctly"
1790 if [ ! -f $DIR/$tdir/$tdir.out ]; then
1791 error "read after rotation failed"
1794 run_test 28 "check shared key rotation method"
1797 if ! $SHARED_KEY; then
1798 skip "need shared key feature for this test" && return
1800 if [ $SK_FLAVOR != "ski" ] && [ $SK_FLAVOR != "skpi" ]; then
1801 skip "test only valid if integrity is active"
1804 mkdir $DIR/$tdir || error "mkdir"
1805 touch $DIR/$tdir/$tfile || error "touch"
1806 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
1807 error "unable to umount clients"
1808 keyctl show | awk '/lustre/ { print $1 }' |
1809 xargs -IX keyctl unlink X
1810 OLD_SK_PATH=$SK_PATH
1811 export SK_PATH=/dev/null
1812 if zconf_mount_clients ${clients_arr[0]} $MOUNT; then
1813 export SK_PATH=$OLD_SK_PATH
1814 if [ -e $DIR/$tdir/$tfile ]; then
1815 error "able to mount and read without key"
1817 error "able to mount without key"
1820 export SK_PATH=$OLD_SK_PATH
1821 keyctl show | awk '/lustre/ { print $1 }' |
1822 xargs -IX keyctl unlink X
1825 run_test 29 "check for missing shared key"
1828 if ! $SHARED_KEY; then
1829 skip "need shared key feature for this test" && return
1831 if [ $SK_FLAVOR != "ski" ] && [ $SK_FLAVOR != "skpi" ]; then
1832 skip "test only valid if integrity is active"
1834 mkdir -p $DIR/$tdir || error "mkdir failed"
1835 touch $DIR/$tdir/$tdir.out || error "touch failed"
1836 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
1837 error "unable to umount clients"
1838 # unload keys from ring
1839 keyctl show | awk '/lustre/ { print $1 }' |
1840 xargs -IX keyctl unlink X
1841 # invalidate the key with bogus filesystem name
1842 lgss_sk -w $SK_PATH/$FSNAME-bogus.key -f $FSNAME.bogus \
1843 -t client -d /dev/urandom || error "lgss_sk failed (1)"
1844 do_facet $SINGLEMDS lfs flushctx || error "could not run flushctx"
1845 OLD_SK_PATH=$SK_PATH
1846 export SK_PATH=$SK_PATH/$FSNAME-bogus.key
1847 if zconf_mount_clients ${clients_arr[0]} $MOUNT; then
1848 SK_PATH=$OLD_SK_PATH
1849 if [ -a $DIR/$tdir/$tdir.out ]; then
1850 error "mount and read file with invalid key"
1852 error "mount with invalid key"
1855 SK_PATH=$OLD_SK_PATH
1856 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
1857 error "unable to umount clients"
1859 run_test 30 "check for invalid shared key"
1861 log "cleanup: ======================================================"
1864 ## nodemap deactivated
1865 do_facet mgs $LCTL nodemap_activate 0
1867 for num in $(seq $MDSCOUNT); do
1868 if [ "${identity_old[$num]}" = 1 ]; then
1869 switch_identity $num false || identity_old[$num]=$?
1873 $RUNAS_CMD -u $ID0 ls $DIR
1874 $RUNAS_CMD -u $ID1 ls $DIR