3 # Run select tests by setting ONLY, or as arguments to the script.
4 # Skip specific tests by setting EXCEPT.
11 LUSTRE=${LUSTRE:-$(dirname $0)/..}
12 . $LUSTRE/tests/test-framework.sh
17 ALWAYS_EXCEPT="$SANITY_SEC_EXCEPT "
18 # bug number for skipped test:
20 # UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
22 [ "$SLOW" = "no" ] && EXCEPT_SLOW="26"
24 NODEMAP_TESTS=$(seq 7 26)
26 if ! check_versions; then
27 echo "It is NOT necessary to test nodemap under interoperation mode"
28 EXCEPT="$EXCEPT $NODEMAP_TESTS"
33 RUNAS_CMD=${RUNAS_CMD:-runas}
35 WTL=${WTL:-"$LUSTRE/tests/write_time_limit"}
38 PERM_CONF=$CONFDIR/perm.conf
40 HOSTNAME_CHECKSUM=$(hostname | sum | awk '{ print $1 }')
41 SUBNET_CHECKSUM=$(expr $HOSTNAME_CHECKSUM % 250 + 1)
43 require_dsh_mds || exit 0
44 require_dsh_ost || exit 0
46 clients=${CLIENTS//,/ }
47 num_clients=$(get_node_count ${clients})
48 clients_arr=($clients)
52 USER0=$(getent passwd | grep :$ID0:$ID0: | cut -d: -f1)
53 USER1=$(getent passwd | grep :$ID1:$ID1: | cut -d: -f1)
57 NODEMAP_IPADDR_LIST="1 10 64 128 200 250"
59 NODEMAP_MAX_ID=$((ID0 + NODEMAP_ID_COUNT))
62 skip "need to add user0 ($ID0:$ID0)" && exit 0
65 skip "need to add user1 ($ID1:$ID1)" && exit 0
67 IDBASE=${IDBASE:-60000}
69 # changes to mappings must be reflected in test 23
71 [0]="$((IDBASE+3)):$((IDBASE+0)) $((IDBASE+4)):$((IDBASE+2))"
72 [1]="$((IDBASE+5)):$((IDBASE+1)) $((IDBASE+6)):$((IDBASE+2))"
75 check_and_setup_lustre
80 GSS_REF=$(lsmod | grep ^ptlrpc_gss | awk '{print $3}')
81 if [ ! -z "$GSS_REF" -a "$GSS_REF" != "0" ]; then
83 echo "with GSS support"
86 echo "without GSS support"
89 MDT=$(do_facet $SINGLEMDS lctl get_param -N "mdt.\*MDT0000" |
91 [ -z "$MDT" ] && error "fail to get MDT device" && exit 1
92 do_facet $SINGLEMDS "mkdir -p $CONFDIR"
93 IDENTITY_FLUSH=mdt.$MDT.identity_flush
94 IDENTITY_UPCALL=mdt.$MDT.identity_upcall
103 if ! $RUNAS_CMD -u $user krb5_login.sh; then
104 error "$user login kerberos failed."
108 if ! $RUNAS_CMD -u $user -g $group ls $DIR > /dev/null 2>&1; then
109 $RUNAS_CMD -u $user lfs flushctx -k
110 $RUNAS_CMD -u $user krb5_login.sh
111 if ! $RUNAS_CMD -u$user -g$group ls $DIR > /dev/null 2>&1; then
112 error "init $user $group failed."
118 declare -a identity_old
121 for num in $(seq $MDSCOUNT); do
122 switch_identity $num true || identity_old[$num]=$?
125 if ! $RUNAS_CMD -u $ID0 ls $DIR > /dev/null 2>&1; then
126 sec_login $USER0 $USER0
129 if ! $RUNAS_CMD -u $ID1 ls $DIR > /dev/null 2>&1; then
130 sec_login $USER1 $USER1
135 # run as different user
139 chmod 0755 $DIR || error "chmod (1)"
140 rm -rf $DIR/$tdir || error "rm (1)"
141 mkdir -p $DIR/$tdir || error "mkdir (1)"
142 chown $USER0 $DIR/$tdir || error "chown (2)"
143 $RUNAS_CMD -u $ID0 ls $DIR || error "ls (1)"
144 rm -f $DIR/f0 || error "rm (2)"
145 $RUNAS_CMD -u $ID0 touch $DIR/f0 && error "touch (1)"
146 $RUNAS_CMD -u $ID0 touch $DIR/$tdir/f1 || error "touch (2)"
147 $RUNAS_CMD -u $ID1 touch $DIR/$tdir/f2 && error "touch (3)"
148 touch $DIR/$tdir/f3 || error "touch (4)"
149 chown root $DIR/$tdir || error "chown (3)"
150 chgrp $USER0 $DIR/$tdir || error "chgrp (1)"
151 chmod 0775 $DIR/$tdir || error "chmod (2)"
152 $RUNAS_CMD -u $ID0 touch $DIR/$tdir/f4 || error "touch (5)"
153 $RUNAS_CMD -u $ID1 touch $DIR/$tdir/f5 && error "touch (6)"
154 touch $DIR/$tdir/f6 || error "touch (7)"
155 rm -rf $DIR/$tdir || error "rm (3)"
157 run_test 0 "uid permission ============================="
161 [ $GSS_SUP = 0 ] && skip "without GSS support." && return
166 chown $USER0 $DIR/$tdir || error "chown (1)"
167 $RUNAS_CMD -u $ID1 -v $ID0 touch $DIR/$tdir/f0 && error "touch (2)"
168 echo "enable uid $ID1 setuid"
169 do_facet $SINGLEMDS "echo '* $ID1 setuid' >> $PERM_CONF"
170 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
171 $RUNAS_CMD -u $ID1 -v $ID0 touch $DIR/$tdir/f1 || error "touch (3)"
173 chown root $DIR/$tdir || error "chown (4)"
174 chgrp $USER0 $DIR/$tdir || error "chgrp (5)"
175 chmod 0770 $DIR/$tdir || error "chmod (6)"
176 $RUNAS_CMD -u $ID1 -g $ID1 touch $DIR/$tdir/f2 && error "touch (7)"
177 $RUNAS_CMD -u$ID1 -g$ID1 -j$ID0 touch $DIR/$tdir/f3 && error "touch (8)"
178 echo "enable uid $ID1 setuid,setgid"
179 do_facet $SINGLEMDS "echo '* $ID1 setuid,setgid' > $PERM_CONF"
180 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
181 $RUNAS_CMD -u $ID1 -g $ID1 -j $ID0 touch $DIR/$tdir/f4 ||
183 $RUNAS_CMD -u $ID1 -v $ID0 -g $ID1 -j $ID0 touch $DIR/$tdir/f5 ||
188 do_facet $SINGLEMDS "rm -f $PERM_CONF"
189 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
191 run_test 1 "setuid/gid ============================="
193 # bug 3285 - supplementary group should always succeed.
194 # NB: the supplementary groups are set for local client only,
195 # as for remote client, the groups of the specified uid on MDT
196 # will be obtained by upcall /sbin/l_getidentity and used.
198 local server_version=$(lustre_version_code $SINGLEMDS)
200 [[ $server_version -ge $(version_code 2.6.93) ]] ||
201 [[ $server_version -ge $(version_code 2.5.35) &&
202 $server_version -lt $(version_code 2.5.50) ]] ||
203 { skip "Need MDS version at least 2.6.93 or 2.5.35"; return; }
207 chmod 0771 $DIR/$tdir
208 chgrp $ID0 $DIR/$tdir
209 $RUNAS_CMD -u $ID0 ls $DIR/$tdir || error "setgroups (1)"
210 do_facet $SINGLEMDS "echo '* $ID1 setgrp' > $PERM_CONF"
211 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
212 $RUNAS_CMD -u $ID1 -G1,2,$ID0 ls $DIR/$tdir ||
213 error "setgroups (2)"
214 $RUNAS_CMD -u $ID1 -G1,2 ls $DIR/$tdir && error "setgroups (3)"
217 do_facet $SINGLEMDS "rm -f $PERM_CONF"
218 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
220 run_test 4 "set supplementary group ==============="
227 squash_id default 99 0
228 wait_nm_sync default squash_uid '' inactive
229 squash_id default 99 1
230 wait_nm_sync default squash_gid '' inactive
231 for (( i = 0; i < NODEMAP_COUNT; i++ )); do
232 local csum=${HOSTNAME_CHECKSUM}_${i}
234 do_facet mgs $LCTL nodemap_add $csum
236 if [ $rc -ne 0 ]; then
237 echo "nodemap_add $csum failed with $rc"
241 out=$(do_facet mgs $LCTL get_param nodemap.$csum.id)
242 ## This needs to return zero if the following statement is 1
243 [[ $(echo $out | grep -c $csum) == 0 ]] && return 1
245 for (( i = 0; i < NODEMAP_COUNT; i++ )); do
246 local csum=${HOSTNAME_CHECKSUM}_${i}
248 wait_nm_sync $csum id '' inactive
257 for ((i = 0; i < NODEMAP_COUNT; i++)); do
258 local csum=${HOSTNAME_CHECKSUM}_${i}
260 if ! do_facet mgs $LCTL nodemap_del $csum; then
261 error "nodemap_del $csum failed with $?"
265 out=$(do_facet mgs $LCTL get_param nodemap.$csum.id 2>/dev/null)
266 [[ $(echo $out | grep -c $csum) != 0 ]] && return 1
268 for (( i = 0; i < NODEMAP_COUNT; i++ )); do
269 local csum=${HOSTNAME_CHECKSUM}_${i}
271 wait_nm_sync $csum id '' inactive
278 local cmd="$LCTL nodemap_add_range"
282 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
283 range="$SUBNET_CHECKSUM.${2}.${j}.[1-253]@tcp"
284 if ! do_facet mgs $cmd --name $1 --range $range; then
293 local cmd="$LCTL nodemap_del_range"
297 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
298 range="$SUBNET_CHECKSUM.${2}.${j}.[1-253]@tcp"
299 if ! do_facet mgs $cmd --name $1 --range $range; then
309 local cmd="$LCTL nodemap_add_idmap"
312 echo "Start to add idmaps ..."
313 for ((i = 0; i < NODEMAP_COUNT; i++)); do
316 for ((j = $ID0; j < NODEMAP_MAX_ID; j++)); do
317 local csum=${HOSTNAME_CHECKSUM}_${i}
319 local fs_id=$((j + 1))
321 if ! do_facet mgs $cmd --name $csum --idtype uid \
322 --idmap $client_id:$fs_id; then
325 if ! do_facet mgs $cmd --name $csum --idtype gid \
326 --idmap $client_id:$fs_id; then
335 update_idmaps() { #LU-10040
336 [ $(lustre_version_code mgs) -lt $(version_code 2.10.55) ] &&
337 skip "Need MGS >= 2.10.55" &&
339 local csum=${HOSTNAME_CHECKSUM}_0
340 local old_id_client=$ID0
341 local old_id_fs=$((ID0 + 1))
342 local new_id=$((ID0 + 100))
349 echo "Start to update idmaps ..."
351 #Inserting an existed idmap should return error
352 cmd="$LCTL nodemap_add_idmap --name $csum --idtype uid"
354 $cmd --idmap $old_id_client:$old_id_fs 2>/dev/null; then
355 error "insert idmap {$old_id_client:$old_id_fs} " \
356 "should return error"
361 #Update id_fs and check it
362 if ! do_facet mgs $cmd --idmap $old_id_client:$new_id; then
363 error "$cmd --idmap $old_id_client:$new_id failed"
367 tmp_id=$(do_facet mgs $LCTL get_param -n nodemap.$csum.idmap |
368 awk '{ print $7 }' | sed -n '2p')
369 [ $tmp_id != $new_id ] && { error "new id_fs $tmp_id != $new_id"; \
370 rc=$((rc + 1)); return $rc; }
372 #Update id_client and check it
373 if ! do_facet mgs $cmd --idmap $new_id:$new_id; then
374 error "$cmd --idmap $new_id:$new_id failed"
378 tmp_id=$(do_facet mgs $LCTL get_param -n nodemap.$csum.idmap |
379 awk '{ print $5 }' | sed -n "$((NODEMAP_ID_COUNT + 1)) p")
380 tmp_id=$(echo ${tmp_id%,*}) #e.g. "501,"->"501"
381 [ $tmp_id != $new_id ] && { error "new id_client $tmp_id != $new_id"; \
382 rc=$((rc + 1)); return $rc; }
384 #Delete above updated idmap
385 cmd="$LCTL nodemap_del_idmap --name $csum --idtype uid"
386 if ! do_facet mgs $cmd --idmap $new_id:$new_id; then
387 error "$cmd --idmap $new_id:$new_id failed"
392 #restore the idmaps to make delete_idmaps work well
393 cmd="$LCTL nodemap_add_idmap --name $csum --idtype uid"
394 if ! do_facet mgs $cmd --idmap $old_id_client:$old_id_fs; then
395 error "$cmd --idmap $old_id_client:$old_id_fs failed"
405 local cmd="$LCTL nodemap_del_idmap"
408 echo "Start to delete idmaps ..."
409 for ((i = 0; i < NODEMAP_COUNT; i++)); do
412 for ((j = $ID0; j < NODEMAP_MAX_ID; j++)); do
413 local csum=${HOSTNAME_CHECKSUM}_${i}
415 local fs_id=$((j + 1))
417 if ! do_facet mgs $cmd --name $csum --idtype uid \
418 --idmap $client_id:$fs_id; then
421 if ! do_facet mgs $cmd --name $csum --idtype gid \
422 --idmap $client_id:$fs_id; then
435 local cmd="$LCTL nodemap_modify"
438 proc[0]="admin_nodemap"
439 proc[1]="trusted_nodemap"
443 for ((idx = 0; idx < 2; idx++)); do
444 if ! do_facet mgs $cmd --name $1 --property ${option[$idx]} \
449 if ! do_facet mgs $cmd --name $1 --property ${option[$idx]} \
459 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
460 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
464 cmd[0]="$LCTL nodemap_modify --property squash_uid"
465 cmd[1]="$LCTL nodemap_modify --property squash_gid"
467 if ! do_facet mgs ${cmd[$3]} --name $1 --value $2; then
473 local nodemap_name=$1
478 local is_active=$(do_facet mgs $LCTL get_param -n nodemap.active)
483 local mgs_ip=$(host_nids_address $mgs_HOST $NETTYPE | cut -d' ' -f1)
486 if [ "$nodemap_name" == "active" ]; then
488 elif [ -z "$key" ]; then
489 proc_param=${nodemap_name}
491 proc_param="${nodemap_name}.${key}"
493 if [ "$opt" == "inactive" ]; then
494 # check nm sync even if nodemap is not activated
498 (( is_active == 0 )) && [ "$proc_param" != "active" ] && return
500 if [ -z "$value" ]; then
501 out1=$(do_facet mgs $LCTL get_param $opt \
502 nodemap.${proc_param} 2>/dev/null)
503 echo "On MGS ${mgs_ip}, ${proc_param} = $out1"
508 # wait up to 10 seconds for other servers to sync with mgs
509 for i in $(seq 1 10); do
510 for node in $(all_server_nodes); do
511 local node_ip=$(host_nids_address $node $NETTYPE |
515 if [ -z "$value" ]; then
516 [ $node_ip == $mgs_ip ] && continue
519 out2=$(do_node $node_ip $LCTL get_param $opt \
520 nodemap.$proc_param 2>/dev/null)
521 echo "On $node ${node_ip}, ${proc_param} = $out2"
522 [ "$out1" != "$out2" ] && is_sync=false && break
530 echo OTHER - IP: $node_ip
532 error "mgs and $nodemap_name ${key} mismatch, $i attempts"
534 echo "waited $((i - 1)) seconds for sync"
537 # ensure that the squash defaults are the expected defaults
538 squash_id default 99 0
539 wait_nm_sync default squash_uid '' inactive
540 squash_id default 99 1
541 wait_nm_sync default squash_gid '' inactive
546 cmd="$LCTL nodemap_test_nid"
548 nid=$(do_facet mgs $cmd $1)
550 if [ $nid == $2 ]; then
558 # restore activation state
559 do_facet mgs $LCTL nodemap_activate 0
565 local cmd="$LCTL nodemap_test_id"
568 echo "Start to test idmaps ..."
569 ## nodemap deactivated
570 if ! do_facet mgs $LCTL nodemap_activate 0; then
573 for ((id = $ID0; id < NODEMAP_MAX_ID; id++)); do
576 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
577 local nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
578 local fs_id=$(do_facet mgs $cmd --nid $nid \
579 --idtype uid --id $id)
580 if [ $fs_id != $id ]; then
581 echo "expected $id, got $fs_id"
588 if ! do_facet mgs $LCTL nodemap_activate 1; then
592 for ((id = $ID0; id < NODEMAP_MAX_ID; id++)); do
593 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
594 nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
595 fs_id=$(do_facet mgs $cmd --nid $nid \
596 --idtype uid --id $id)
597 expected_id=$((id + 1))
598 if [ $fs_id != $expected_id ]; then
599 echo "expected $expected_id, got $fs_id"
606 for ((i = 0; i < NODEMAP_COUNT; i++)); do
607 local csum=${HOSTNAME_CHECKSUM}_${i}
609 if ! do_facet mgs $LCTL nodemap_modify --name $csum \
610 --property trusted --value 1; then
611 error "nodemap_modify $csum failed with $?"
616 for ((id = $ID0; id < NODEMAP_MAX_ID; id++)); do
617 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
618 nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
619 fs_id=$(do_facet mgs $cmd --nid $nid \
620 --idtype uid --id $id)
621 if [ $fs_id != $id ]; then
622 echo "expected $id, got $fs_id"
628 ## ensure allow_root_access is enabled
629 for ((i = 0; i < NODEMAP_COUNT; i++)); do
630 local csum=${HOSTNAME_CHECKSUM}_${i}
632 if ! do_facet mgs $LCTL nodemap_modify --name $csum \
633 --property admin --value 1; then
634 error "nodemap_modify $csum failed with $?"
639 ## check that root allowed
640 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
641 nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
642 fs_id=$(do_facet mgs $cmd --nid $nid --idtype uid --id 0)
643 if [ $fs_id != 0 ]; then
644 echo "root allowed expected 0, got $fs_id"
649 ## ensure allow_root_access is disabled
650 for ((i = 0; i < NODEMAP_COUNT; i++)); do
651 local csum=${HOSTNAME_CHECKSUM}_${i}
653 if ! do_facet mgs $LCTL nodemap_modify --name $csum \
654 --property admin --value 0; then
655 error "nodemap_modify ${HOSTNAME_CHECKSUM}_${i} "
661 ## check that root is mapped to 99
662 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
663 nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
664 fs_id=$(do_facet mgs $cmd --nid $nid --idtype uid --id 0)
665 if [ $fs_id != 99 ]; then
666 error "root squash expected 99, got $fs_id"
671 ## reset client trust to 0
672 for ((i = 0; i < NODEMAP_COUNT; i++)); do
673 if ! do_facet mgs $LCTL nodemap_modify \
674 --name ${HOSTNAME_CHECKSUM}_${i} \
675 --property trusted --value 0; then
676 error "nodemap_modify ${HOSTNAME_CHECKSUM}_${i} "
688 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
689 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
690 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
695 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
699 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 2
703 run_test 7 "nodemap create and delete"
708 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
709 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
710 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
717 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
723 [[ $rc == 0 ]] && error "duplicate nodemap_add allowed with $rc" &&
729 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 3
733 run_test 8 "nodemap reject duplicates"
739 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
740 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
741 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
747 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
750 for ((i = 0; i < NODEMAP_COUNT; i++)); do
751 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
755 [[ $rc != 0 ]] && error "nodemap_add_range failed with $rc" && return 2
758 for ((i = 0; i < NODEMAP_COUNT; i++)); do
759 if ! delete_range ${HOSTNAME_CHECKSUM}_${i} $i; then
763 [[ $rc != 0 ]] && error "nodemap_del_range failed with $rc" && return 4
768 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 4
772 run_test 9 "nodemap range add"
777 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
778 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
779 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
785 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
788 for ((i = 0; i < NODEMAP_COUNT; i++)); do
789 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
793 [[ $rc != 0 ]] && error "nodemap_add_range failed with $rc" && return 2
796 for ((i = 0; i < NODEMAP_COUNT; i++)); do
797 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
801 [[ $rc == 0 ]] && error "nodemap_add_range duplicate add with $rc" &&
806 for ((i = 0; i < NODEMAP_COUNT; i++)); do
807 if ! delete_range ${HOSTNAME_CHECKSUM}_${i} $i; then
811 [[ $rc != 0 ]] && error "nodemap_del_range failed with $rc" && return 4
815 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 5
819 run_test 10a "nodemap reject duplicate ranges"
822 [ $(lustre_version_code mgs) -lt $(version_code 2.10.53) ] &&
823 skip "Need MGS >= 2.10.53" && return
827 local nids="192.168.19.[0-255]@o2ib20"
829 do_facet mgs $LCTL nodemap_del $nm1 2>/dev/null
830 do_facet mgs $LCTL nodemap_del $nm2 2>/dev/null
832 do_facet mgs $LCTL nodemap_add $nm1 || error "Add $nm1 failed"
833 do_facet mgs $LCTL nodemap_add $nm2 || error "Add $nm2 failed"
834 do_facet mgs $LCTL nodemap_add_range --name $nm1 --range $nids ||
835 error "Add range $nids to $nm1 failed"
836 [ -n "$(do_facet mgs $LCTL get_param nodemap.$nm1.* |
837 grep start_nid)" ] || error "No range was found"
838 do_facet mgs $LCTL nodemap_del_range --name $nm2 --range $nids &&
839 error "Deleting range $nids from $nm2 should fail"
840 [ -n "$(do_facet mgs $LCTL get_param nodemap.$nm1.* |
841 grep start_nid)" ] || error "Range $nids should be there"
843 do_facet mgs $LCTL nodemap_del $nm1 || error "Delete $nm1 failed"
844 do_facet mgs $LCTL nodemap_del $nm2 || error "Delete $nm2 failed"
847 run_test 10b "delete range from the correct nodemap"
849 test_10c() { #LU-8912
850 [ $(lustre_version_code mgs) -lt $(version_code 2.10.57) ] &&
851 skip "Need MGS >= 2.10.57" && return
853 local nm="nodemap_lu8912"
854 local nid_range="10.210.[32-47].[0-255]@o2ib3"
855 local start_nid="10.210.32.0@o2ib3"
856 local end_nid="10.210.47.255@o2ib3"
857 local start_nid_found
860 do_facet mgs $LCTL nodemap_del $nm 2>/dev/null
861 do_facet mgs $LCTL nodemap_add $nm || error "Add $nm failed"
862 do_facet mgs $LCTL nodemap_add_range --name $nm --range $nid_range ||
863 error "Add range $nid_range to $nm failed"
865 start_nid_found=$(do_facet mgs $LCTL get_param nodemap.$nm.* |
866 awk -F '[,: ]' /start_nid/'{ print $9 }')
867 [ "$start_nid" == "$start_nid_found" ] ||
868 error "start_nid: $start_nid_found != $start_nid"
869 end_nid_found=$(do_facet mgs $LCTL get_param nodemap.$nm.* |
870 awk -F '[,: ]' /end_nid/'{ print $13 }')
871 [ "$end_nid" == "$end_nid_found" ] ||
872 error "end_nid: $end_nid_found != $end_nid"
874 do_facet mgs $LCTL nodemap_del $nm || error "Delete $nm failed"
877 run_test 10c "verfify contiguous range support"
879 test_10d() { #LU-8913
880 [ $(lustre_version_code mgs) -lt $(version_code 2.10.59) ] &&
881 skip "Need MGS >= 2.10.59" && return
883 local nm="nodemap_lu8913"
884 local nid_range="*@o2ib3"
885 local start_nid="0.0.0.0@o2ib3"
886 local end_nid="255.255.255.255@o2ib3"
887 local start_nid_found
890 do_facet mgs $LCTL nodemap_del $nm 2>/dev/null
891 do_facet mgs $LCTL nodemap_add $nm || error "Add $nm failed"
892 do_facet mgs $LCTL nodemap_add_range --name $nm --range $nid_range ||
893 error "Add range $nid_range to $nm failed"
895 start_nid_found=$(do_facet mgs $LCTL get_param nodemap.$nm.* |
896 awk -F '[,: ]' /start_nid/'{ print $9 }')
897 [ "$start_nid" == "$start_nid_found" ] ||
898 error "start_nid: $start_nid_found != $start_nid"
899 end_nid_found=$(do_facet mgs $LCTL get_param nodemap.$nm.* |
900 awk -F '[,: ]' /end_nid/'{ print $13 }')
901 [ "$end_nid" == "$end_nid_found" ] ||
902 error "end_nid: $end_nid_found != $end_nid"
904 do_facet mgs $LCTL nodemap_del $nm || error "Delete $nm failed"
907 run_test 10d "verfify nodemap range format '*@<net>' support"
912 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
913 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
914 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
920 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
923 for ((i = 0; i < NODEMAP_COUNT; i++)); do
924 if ! modify_flags ${HOSTNAME_CHECKSUM}_${i}; then
928 [[ $rc != 0 ]] && error "nodemap_modify with $rc" && return 2
933 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 3
937 run_test 11 "nodemap modify"
942 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
943 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
944 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
950 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
953 for ((i = 0; i < NODEMAP_COUNT; i++)); do
954 if ! squash_id ${HOSTNAME_CHECKSUM}_${i} 88 0; then
958 [[ $rc != 0 ]] && error "nodemap squash_uid with $rc" && return 2
961 for ((i = 0; i < NODEMAP_COUNT; i++)); do
962 if ! squash_id ${HOSTNAME_CHECKSUM}_${i} 88 1; then
966 [[ $rc != 0 ]] && error "nodemap squash_gid with $rc" && return 3
971 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 4
975 run_test 12 "nodemap set squash ids"
980 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
981 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
982 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
988 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
991 for ((i = 0; i < NODEMAP_COUNT; i++)); do
992 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
996 [[ $rc != 0 ]] && error "nodemap_add_range failed with $rc" && return 2
999 for ((i = 0; i < NODEMAP_COUNT; i++)); do
1000 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
1001 for k in $NODEMAP_IPADDR_LIST; do
1002 if ! test_nid $SUBNET_CHECKSUM.$i.$j.$k \
1003 ${HOSTNAME_CHECKSUM}_${i}; then
1009 [[ $rc != 0 ]] && error "nodemap_test_nid failed with $rc" && return 3
1014 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 4
1018 run_test 13 "test nids"
1023 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
1024 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
1025 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
1031 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
1034 for ((i = 0; i < NODEMAP_COUNT; i++)); do
1035 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
1036 for k in $NODEMAP_IPADDR_LIST; do
1037 if ! test_nid $SUBNET_CHECKSUM.$i.$j.$k \
1044 [[ $rc != 0 ]] && error "nodemap_test_nid failed with $rc" && return 3
1049 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 4
1053 run_test 14 "test default nodemap nid lookup"
1058 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
1059 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
1060 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
1066 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
1069 for ((i = 0; i < NODEMAP_COUNT; i++)); do
1070 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
1074 [[ $rc != 0 ]] && error "nodemap_add_range failed with $rc" && return 2
1079 [[ $rc != 0 ]] && error "nodemap_add_idmap failed with $rc" && return 3
1081 activedefault=$(do_facet mgs $LCTL get_param -n nodemap.active)
1082 if [[ "$activedefault" != "1" ]]; then
1083 stack_trap cleanup_active EXIT
1089 [[ $rc != 0 ]] && error "nodemap_test_id failed with $rc" && return 4
1094 [[ $rc != 0 ]] && error "update_idmaps failed with $rc" && return 5
1099 [[ $rc != 0 ]] && error "nodemap_del_idmap failed with $rc" && return 6
1104 [[ $rc != 0 ]] && error "nodemap_delete failed with $rc" && return 7
1108 run_test 15 "test id mapping"
1110 create_fops_nodemaps() {
1113 for client in $clients; do
1114 local client_ip=$(host_nids_address $client $NETTYPE)
1115 local client_nid=$(h2nettype $client_ip)
1116 do_facet mgs $LCTL nodemap_add c${i} || return 1
1117 do_facet mgs $LCTL nodemap_add_range \
1118 --name c${i} --range $client_nid || return 1
1119 for map in ${FOPS_IDMAPS[i]}; do
1120 do_facet mgs $LCTL nodemap_add_idmap --name c${i} \
1121 --idtype uid --idmap ${map} || return 1
1122 do_facet mgs $LCTL nodemap_add_idmap --name c${i} \
1123 --idtype gid --idmap ${map} || return 1
1126 wait_nm_sync c$i idmap
1133 delete_fops_nodemaps() {
1136 for client in $clients; do
1137 do_facet mgs $LCTL nodemap_del c${i} || return 1
1145 if [ $MDSCOUNT -le 1 ]; then
1146 do_node ${clients_arr[0]} mkdir -p $DIR/$tdir
1148 # round-robin MDTs to test DNE nodemap support
1149 [ ! -d $DIR ] && do_node ${clients_arr[0]} mkdir -p $DIR
1150 do_node ${clients_arr[0]} $LFS setdirstripe -c 1 -i \
1151 $((fops_mds_index % MDSCOUNT)) $DIR/$tdir
1152 ((fops_mds_index++))
1156 # acl test directory needs to be initialized on a privileged client
1158 local admin=$(do_facet mgs $LCTL get_param -n nodemap.c0.admin_nodemap)
1159 local trust=$(do_facet mgs $LCTL get_param -n \
1160 nodemap.c0.trusted_nodemap)
1162 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
1163 do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1
1165 wait_nm_sync c0 admin_nodemap
1166 wait_nm_sync c0 trusted_nodemap
1168 do_node ${clients_arr[0]} rm -rf $DIR/$tdir
1170 do_node ${clients_arr[0]} chown $user $DIR/$tdir
1172 do_facet mgs $LCTL nodemap_modify --name c0 \
1173 --property admin --value $admin
1174 do_facet mgs $LCTL nodemap_modify --name c0 \
1175 --property trusted --value $trust
1177 # flush MDT locks to make sure they are reacquired before test
1178 do_node ${clients_arr[0]} $LCTL set_param \
1179 ldlm.namespaces.$FSNAME-MDT*.lru_size=clear
1181 wait_nm_sync c0 admin_nodemap
1182 wait_nm_sync c0 trusted_nodemap
1185 # fileset test directory needs to be initialized on a privileged client
1186 fileset_test_setup() {
1189 if [ -n "$FILESET" -a -z "$SKIP_FILESET" ]; then
1190 cleanup_mount $MOUNT
1191 FILESET="" zconf_mount_clients $CLIENTS $MOUNT
1194 local admin=$(do_facet mgs $LCTL get_param -n \
1195 nodemap.${nm}.admin_nodemap)
1196 local trust=$(do_facet mgs $LCTL get_param -n \
1197 nodemap.${nm}.trusted_nodemap)
1199 do_facet mgs $LCTL nodemap_modify --name $nm --property admin --value 1
1200 do_facet mgs $LCTL nodemap_modify --name $nm --property trusted \
1203 wait_nm_sync $nm admin_nodemap
1204 wait_nm_sync $nm trusted_nodemap
1206 # create directory and populate it for subdir mount
1207 do_node ${clients_arr[0]} mkdir $MOUNT/$subdir ||
1208 error "unable to create dir $MOUNT/$subdir"
1209 do_node ${clients_arr[0]} touch $MOUNT/$subdir/this_is_$subdir ||
1210 error "unable to create file $MOUNT/$subdir/this_is_$subdir"
1211 do_node ${clients_arr[0]} mkdir $MOUNT/$subdir/$subsubdir ||
1212 error "unable to create dir $MOUNT/$subdir/$subsubdir"
1213 do_node ${clients_arr[0]} touch \
1214 $MOUNT/$subdir/$subsubdir/this_is_$subsubdir ||
1215 error "unable to create file \
1216 $MOUNT/$subdir/$subsubdir/this_is_$subsubdir"
1218 do_facet mgs $LCTL nodemap_modify --name $nm \
1219 --property admin --value $admin
1220 do_facet mgs $LCTL nodemap_modify --name $nm \
1221 --property trusted --value $trust
1223 # flush MDT locks to make sure they are reacquired before test
1224 do_node ${clients_arr[0]} $LCTL set_param \
1225 ldlm.namespaces.$FSNAME-MDT*.lru_size=clear
1227 wait_nm_sync $nm admin_nodemap
1228 wait_nm_sync $nm trusted_nodemap
1231 # fileset test directory needs to be initialized on a privileged client
1232 fileset_test_cleanup() {
1234 local admin=$(do_facet mgs $LCTL get_param -n \
1235 nodemap.${nm}.admin_nodemap)
1236 local trust=$(do_facet mgs $LCTL get_param -n \
1237 nodemap.${nm}.trusted_nodemap)
1239 do_facet mgs $LCTL nodemap_modify --name $nm --property admin --value 1
1240 do_facet mgs $LCTL nodemap_modify --name $nm --property trusted \
1243 wait_nm_sync $nm admin_nodemap
1244 wait_nm_sync $nm trusted_nodemap
1246 # cleanup directory created for subdir mount
1247 do_node ${clients_arr[0]} rm -rf $MOUNT/$subdir ||
1248 error "unable to remove dir $MOUNT/$subdir"
1250 do_facet mgs $LCTL nodemap_modify --name $nm \
1251 --property admin --value $admin
1252 do_facet mgs $LCTL nodemap_modify --name $nm \
1253 --property trusted --value $trust
1255 # flush MDT locks to make sure they are reacquired before test
1256 do_node ${clients_arr[0]} $LCTL set_param \
1257 ldlm.namespaces.$FSNAME-MDT*.lru_size=clear
1259 wait_nm_sync $nm admin_nodemap
1260 wait_nm_sync $nm trusted_nodemap
1261 if [ -n "$FILESET" -a -z "$SKIP_FILESET" ]; then
1262 cleanup_mount $MOUNT
1263 zconf_mount_clients $CLIENTS $MOUNT
1267 do_create_delete() {
1270 local testfile=$DIR/$tdir/$tfile
1274 if $run_u touch $testfile >& /dev/null; then
1276 $run_u rm $testfile && d=1
1280 local expected=$(get_cr_del_expected $key)
1281 [ "$res" != "$expected" ] &&
1282 error "test $key, wanted $expected, got $res" && rc=$((rc + 1))
1286 nodemap_check_quota() {
1288 $run_u lfs quota -q $DIR | awk '{ print $2; exit; }'
1291 do_fops_quota_test() {
1293 # fuzz quota used to account for possible indirect blocks, etc
1294 local quota_fuzz=$(fs_log_size)
1295 local qused_orig=$(nodemap_check_quota "$run_u")
1296 local qused_high=$((qused_orig + quota_fuzz))
1297 local qused_low=$((qused_orig - quota_fuzz))
1298 local testfile=$DIR/$tdir/$tfile
1299 $run_u dd if=/dev/zero of=$testfile oflag=sync bs=1M count=1 \
1300 >& /dev/null || error "unable to write quota test file"
1301 sync; sync_all_data || true
1303 local qused_new=$(nodemap_check_quota "$run_u")
1304 [ $((qused_new)) -lt $((qused_low + 1024)) -o \
1305 $((qused_new)) -gt $((qused_high + 1024)) ] &&
1306 error "$qused_new != $qused_orig + 1M after write, " \
1307 "fuzz is $quota_fuzz"
1308 $run_u rm $testfile || error "unable to remove quota test file"
1309 wait_delete_completed_mds
1311 qused_new=$(nodemap_check_quota "$run_u")
1312 [ $((qused_new)) -lt $((qused_low)) \
1313 -o $((qused_new)) -gt $((qused_high)) ] &&
1314 error "quota not reclaimed, expect $qused_orig, " \
1315 "got $qused_new, fuzz $quota_fuzz"
1318 get_fops_mapped_user() {
1321 for ((i=0; i < ${#FOPS_IDMAPS[@]}; i++)); do
1322 for map in ${FOPS_IDMAPS[i]}; do
1323 if [ $(cut -d: -f1 <<< "$map") == $cli_user ]; then
1324 cut -d: -f2 <<< "$map"
1332 get_cr_del_expected() {
1334 IFS=":" read -a key <<< "$1"
1335 local mapmode="${key[0]}"
1336 local mds_user="${key[1]}"
1337 local cluster="${key[2]}"
1338 local cli_user="${key[3]}"
1339 local mode="0${key[4]}"
1346 [[ $mapmode == *mapped* ]] && mapped=1
1347 # only c1 is mapped in these test cases
1348 [[ $mapmode == mapped_trusted* ]] && [ "$cluster" == "c0" ] && mapped=0
1349 [[ $mapmode == *noadmin* ]] && noadmin=1
1351 # o+wx works as long as the user isn't mapped
1352 if [ $((mode & 3)) -eq 3 ]; then
1356 # if client user is root, check if root is squashed
1357 if [ "$cli_user" == "0" ]; then
1358 # squash root succeed, if other bit is on
1361 1) [ "$other" == "1" ] && echo $SUCCESS
1362 [ "$other" == "0" ] && echo $FAILURE;;
1366 if [ "$mapped" == "0" ]; then
1367 [ "$other" == "1" ] && echo $SUCCESS
1368 [ "$other" == "0" ] && echo $FAILURE
1372 # if mapped user is mds user, check for u+wx
1373 mapped_user=$(get_fops_mapped_user $cli_user)
1374 [ "$mapped_user" == "-1" ] &&
1375 error "unable to find mapping for client user $cli_user"
1377 if [ "$mapped_user" == "$mds_user" -a \
1378 $(((mode & 0300) == 0300)) -eq 1 ]; then
1382 if [ "$mapped_user" != "$mds_user" -a "$other" == "1" ]; then
1389 test_fops_admin_cli_i=""
1390 test_fops_chmod_dir() {
1391 local current_cli_i=$1
1393 local dir_to_chmod=$3
1394 local new_admin_cli_i=""
1396 # do we need to set up a new admin client?
1397 [ "$current_cli_i" == "0" ] && [ "$test_fops_admin_cli_i" != "1" ] &&
1399 [ "$current_cli_i" != "0" ] && [ "$test_fops_admin_cli_i" != "0" ] &&
1402 # if only one client, and non-admin, need to flip admin everytime
1403 if [ "$num_clients" == "1" ]; then
1404 test_fops_admin_client=$clients
1405 test_fops_admin_val=$(do_facet mgs $LCTL get_param -n \
1406 nodemap.c0.admin_nodemap)
1407 if [ "$test_fops_admin_val" != "1" ]; then
1408 do_facet mgs $LCTL nodemap_modify \
1412 wait_nm_sync c0 admin_nodemap
1414 elif [ "$new_admin_cli_i" != "" ]; then
1415 # restore admin val to old admin client
1416 if [ "$test_fops_admin_cli_i" != "" ] &&
1417 [ "$test_fops_admin_val" != "1" ]; then
1418 do_facet mgs $LCTL nodemap_modify \
1419 --name c${test_fops_admin_cli_i} \
1421 --value $test_fops_admin_val
1422 wait_nm_sync c${test_fops_admin_cli_i} admin_nodemap
1425 test_fops_admin_cli_i=$new_admin_cli_i
1426 test_fops_admin_client=${clients_arr[$new_admin_cli_i]}
1427 test_fops_admin_val=$(do_facet mgs $LCTL get_param -n \
1428 nodemap.c${new_admin_cli_i}.admin_nodemap)
1430 if [ "$test_fops_admin_val" != "1" ]; then
1431 do_facet mgs $LCTL nodemap_modify \
1432 --name c${new_admin_cli_i} \
1435 wait_nm_sync c${new_admin_cli_i} admin_nodemap
1439 do_node $test_fops_admin_client chmod $perm_bits $DIR/$tdir || return 1
1441 # remove admin for single client if originally non-admin
1442 if [ "$num_clients" == "1" ] && [ "$test_fops_admin_val" != "1" ]; then
1443 do_facet mgs $LCTL nodemap_modify --name c0 --property admin \
1445 wait_nm_sync c0 admin_nodemap
1453 local single_client="$2"
1454 local client_user_list=([0]="0 $((IDBASE+3)) $((IDBASE+4))"
1455 [1]="0 $((IDBASE+5)) $((IDBASE+6))")
1458 local perm_bit_list="0 3 $((0300)) $((0303))"
1459 # SLOW tests 000-007, 010-070, 100-700 (octal modes)
1460 [ "$SLOW" == "yes" ] &&
1461 perm_bit_list="0 $(seq 1 7) $(seq 8 8 63) $(seq 64 64 511) \
1464 # step through mds users. -1 means root
1465 for mds_i in -1 0 1 2; do
1466 local user=$((mds_i + IDBASE))
1470 [ "$mds_i" == "-1" ] && user=0
1472 echo mkdir -p $DIR/$tdir
1475 for client in $clients; do
1477 for u in ${client_user_list[$cli_i]}; do
1478 local run_u="do_node $client \
1479 $RUNAS_CMD -u$u -g$u -G$u"
1480 for perm_bits in $perm_bit_list; do
1481 local mode=$(printf %03o $perm_bits)
1483 key="$mapmode:$user:c$cli_i:$u:$mode"
1484 test_fops_chmod_dir $cli_i $mode \
1486 error cannot chmod $key
1487 do_create_delete "$run_u" "$key"
1491 test_fops_chmod_dir $cli_i 777 $DIR/$tdir ||
1492 error cannot chmod $key
1493 do_fops_quota_test "$run_u"
1496 cli_i=$((cli_i + 1))
1497 [ "$single_client" == "1" ] && break
1504 nodemap_version_check () {
1505 remote_mgs_nodsh && skip "remote MGS with nodsh" && return 1
1506 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
1507 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
1512 nodemap_test_setup() {
1514 local active_nodemap=1
1516 [ "$1" == "0" ] && active_nodemap=0
1518 do_nodes $(comma_list $(all_mdts_nodes)) \
1519 $LCTL set_param mdt.*.identity_upcall=NONE
1522 create_fops_nodemaps
1524 [[ $rc != 0 ]] && error "adding fops nodemaps failed $rc"
1526 do_facet mgs $LCTL nodemap_activate $active_nodemap
1529 do_facet mgs $LCTL nodemap_modify --name default \
1530 --property admin --value 1
1531 wait_nm_sync default admin_nodemap
1532 do_facet mgs $LCTL nodemap_modify --name default \
1533 --property trusted --value 1
1534 wait_nm_sync default trusted_nodemap
1537 nodemap_test_cleanup() {
1539 delete_fops_nodemaps
1541 [[ $rc != 0 ]] && error "removing fops nodemaps failed $rc"
1543 do_facet mgs $LCTL nodemap_modify --name default \
1544 --property admin --value 0
1545 wait_nm_sync default admin_nodemap
1546 do_facet mgs $LCTL nodemap_modify --name default \
1547 --property trusted --value 0
1548 wait_nm_sync default trusted_nodemap
1550 do_facet mgs $LCTL nodemap_activate 0
1551 wait_nm_sync active 0
1553 export SK_UNIQUE_NM=false
1557 nodemap_clients_admin_trusted() {
1561 for client in $clients; do
1562 do_facet mgs $LCTL nodemap_modify --name c0 \
1563 --property admin --value $admin
1564 do_facet mgs $LCTL nodemap_modify --name c0 \
1565 --property trusted --value $tr
1568 wait_nm_sync c$((i - 1)) admin_nodemap
1569 wait_nm_sync c$((i - 1)) trusted_nodemap
1573 nodemap_version_check || return 0
1574 nodemap_test_setup 0
1576 trap nodemap_test_cleanup EXIT
1578 nodemap_test_cleanup
1580 run_test 16 "test nodemap all_off fileops"
1584 [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.55) ]; then
1585 skip "Need MDS >= 2.11.55"
1588 nodemap_version_check || return 0
1591 trap nodemap_test_cleanup EXIT
1592 nodemap_clients_admin_trusted 0 1
1593 test_fops trusted_noadmin 1
1594 nodemap_test_cleanup
1596 run_test 17 "test nodemap trusted_noadmin fileops"
1600 [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.55) ]; then
1601 skip "Need MDS >= 2.11.55"
1604 nodemap_version_check || return 0
1607 trap nodemap_test_cleanup EXIT
1608 nodemap_clients_admin_trusted 0 0
1609 test_fops mapped_noadmin 1
1610 nodemap_test_cleanup
1612 run_test 18 "test nodemap mapped_noadmin fileops"
1616 [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.55) ]; then
1617 skip "Need MDS >= 2.11.55"
1620 nodemap_version_check || return 0
1623 trap nodemap_test_cleanup EXIT
1624 nodemap_clients_admin_trusted 1 1
1625 test_fops trusted_admin 1
1626 nodemap_test_cleanup
1628 run_test 19 "test nodemap trusted_admin fileops"
1632 [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.55) ]; then
1633 skip "Need MDS >= 2.11.55"
1636 nodemap_version_check || return 0
1639 trap nodemap_test_cleanup EXIT
1640 nodemap_clients_admin_trusted 1 0
1641 test_fops mapped_admin 1
1642 nodemap_test_cleanup
1644 run_test 20 "test nodemap mapped_admin fileops"
1648 [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.55) ]; then
1649 skip "Need MDS >= 2.11.55"
1652 nodemap_version_check || return 0
1655 trap nodemap_test_cleanup EXIT
1658 for client in $clients; do
1659 do_facet mgs $LCTL nodemap_modify --name c${i} \
1660 --property admin --value 0
1661 do_facet mgs $LCTL nodemap_modify --name c${i} \
1662 --property trusted --value $x
1666 wait_nm_sync c$((i - 1)) trusted_nodemap
1668 test_fops mapped_trusted_noadmin
1669 nodemap_test_cleanup
1671 run_test 21 "test nodemap mapped_trusted_noadmin fileops"
1675 [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.55) ]; then
1676 skip "Need MDS >= 2.11.55"
1679 nodemap_version_check || return 0
1682 trap nodemap_test_cleanup EXIT
1685 for client in $clients; do
1686 do_facet mgs $LCTL nodemap_modify --name c${i} \
1687 --property admin --value 1
1688 do_facet mgs $LCTL nodemap_modify --name c${i} \
1689 --property trusted --value $x
1693 wait_nm_sync c$((i - 1)) trusted_nodemap
1695 test_fops mapped_trusted_admin
1696 nodemap_test_cleanup
1698 run_test 22 "test nodemap mapped_trusted_admin fileops"
1700 # acl test directory needs to be initialized on a privileged client
1701 nodemap_acl_test_setup() {
1702 local admin=$(do_facet mgs $LCTL get_param -n \
1703 nodemap.c0.admin_nodemap)
1704 local trust=$(do_facet mgs $LCTL get_param -n \
1705 nodemap.c0.trusted_nodemap)
1707 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
1708 do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1
1710 wait_nm_sync c0 admin_nodemap
1711 wait_nm_sync c0 trusted_nodemap
1713 do_node ${clients_arr[0]} rm -rf $DIR/$tdir
1715 do_node ${clients_arr[0]} chmod a+rwx $DIR/$tdir ||
1716 error unable to chmod a+rwx test dir $DIR/$tdir
1718 do_facet mgs $LCTL nodemap_modify --name c0 \
1719 --property admin --value $admin
1720 do_facet mgs $LCTL nodemap_modify --name c0 \
1721 --property trusted --value $trust
1723 wait_nm_sync c0 trusted_nodemap
1726 # returns 0 if the number of ACLs does not change on the second (mapped) client
1727 # after being set on the first client
1728 nodemap_acl_test() {
1730 local set_client="$2"
1731 local get_client="$3"
1732 local check_setfacl="$4"
1733 local setfacl_error=0
1734 local testfile=$DIR/$tdir/$tfile
1735 local RUNAS_USER="$RUNAS_CMD -u $user"
1737 local acl_count_post=0
1739 nodemap_acl_test_setup
1742 do_node $set_client $RUNAS_USER touch $testfile
1744 # ACL masks aren't filtered by nodemap code, so we ignore them
1745 acl_count=$(do_node $get_client getfacl $testfile | grep -v mask |
1747 do_node $set_client $RUNAS_USER setfacl -m $user:rwx $testfile ||
1750 # if check setfacl is set to 1, then it's supposed to error
1751 if [ "$check_setfacl" == "1" ]; then
1752 [ "$setfacl_error" != "1" ] && return 1
1755 [ "$setfacl_error" == "1" ] && echo "WARNING: unable to setfacl"
1757 acl_count_post=$(do_node $get_client getfacl $testfile | grep -v mask |
1759 [ $acl_count -eq $acl_count_post ] && return 0
1764 [ $num_clients -lt 2 ] && skip "Need 2 clients at least" && return
1765 nodemap_version_check || return 0
1768 trap nodemap_test_cleanup EXIT
1769 # 1 trusted cluster, 1 mapped cluster
1770 local unmapped_fs=$((IDBASE+0))
1771 local unmapped_c1=$((IDBASE+5))
1772 local mapped_fs=$((IDBASE+2))
1773 local mapped_c0=$((IDBASE+4))
1774 local mapped_c1=$((IDBASE+6))
1776 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
1777 do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1
1779 do_facet mgs $LCTL nodemap_modify --name c1 --property admin --value 0
1780 do_facet mgs $LCTL nodemap_modify --name c1 --property trusted --value 0
1782 wait_nm_sync c1 trusted_nodemap
1784 # setfacl on trusted cluster to unmapped user, verify it's not seen
1785 nodemap_acl_test $unmapped_fs ${clients_arr[0]} ${clients_arr[1]} ||
1786 error "acl count (1)"
1788 # setfacl on trusted cluster to mapped user, verify it's seen
1789 nodemap_acl_test $mapped_fs ${clients_arr[0]} ${clients_arr[1]} &&
1790 error "acl count (2)"
1792 # setfacl on mapped cluster to mapped user, verify it's seen
1793 nodemap_acl_test $mapped_c1 ${clients_arr[1]} ${clients_arr[0]} &&
1794 error "acl count (3)"
1796 # setfacl on mapped cluster to unmapped user, verify error
1797 nodemap_acl_test $unmapped_fs ${clients_arr[1]} ${clients_arr[0]} 1 ||
1798 error "acl count (4)"
1801 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 0
1802 do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 0
1804 wait_nm_sync c0 trusted_nodemap
1806 # setfacl to mapped user on c1, also mapped to c0, verify it's seen
1807 nodemap_acl_test $mapped_c1 ${clients_arr[1]} ${clients_arr[0]} &&
1808 error "acl count (5)"
1810 # setfacl to mapped user on c1, not mapped to c0, verify not seen
1811 nodemap_acl_test $unmapped_c1 ${clients_arr[1]} ${clients_arr[0]} ||
1812 error "acl count (6)"
1814 nodemap_test_cleanup
1816 run_test 23a "test mapped regular ACLs"
1818 test_23b() { #LU-9929
1819 [ $num_clients -lt 2 ] && skip "Need 2 clients at least" && return
1820 [ $(lustre_version_code mgs) -lt $(version_code 2.10.53) ] &&
1821 skip "Need MGS >= 2.10.53" && return
1823 export SK_UNIQUE_NM=true
1825 trap nodemap_test_cleanup EXIT
1827 local testdir=$DIR/$tdir
1828 local fs_id=$((IDBASE+10))
1833 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
1834 wait_nm_sync c0 admin_nodemap
1835 do_facet mgs $LCTL nodemap_modify --name c1 --property admin --value 1
1836 wait_nm_sync c1 admin_nodemap
1837 do_facet mgs $LCTL nodemap_modify --name c1 --property trusted --value 1
1838 wait_nm_sync c1 trusted_nodemap
1840 # Add idmap $ID0:$fs_id (500:60010)
1841 do_facet mgs $LCTL nodemap_add_idmap --name c0 --idtype gid \
1842 --idmap $ID0:$fs_id ||
1843 error "add idmap $ID0:$fs_id to nodemap c0 failed"
1844 wait_nm_sync c0 idmap
1846 # set/getfacl default acl on client 1 (unmapped gid=500)
1847 do_node ${clients_arr[0]} rm -rf $testdir
1848 do_node ${clients_arr[0]} mkdir -p $testdir
1849 # Here, USER0=$(getent passwd | grep :$ID0:$ID0: | cut -d: -f1)
1850 do_node ${clients_arr[0]} setfacl -R -d -m group:$USER0:rwx $testdir ||
1851 error "setfacl $testdir on ${clients_arr[0]} failed"
1852 unmapped_id=$(do_node ${clients_arr[0]} getfacl $testdir |
1853 grep -E "default:group:.*:rwx" | awk -F: '{print $3}')
1854 [ "$unmapped_id" = "$USER0" ] ||
1855 error "gid=$ID0 was not unmapped correctly on ${clients_arr[0]}"
1857 # getfacl default acl on client 2 (mapped gid=60010)
1858 mapped_id=$(do_node ${clients_arr[1]} getfacl $testdir |
1859 grep -E "default:group:.*:rwx" | awk -F: '{print $3}')
1860 fs_user=$(do_node ${clients_arr[1]} getent passwd |
1861 grep :$fs_id:$fs_id: | cut -d: -f1)
1862 [ -z "$fs_user" ] && fs_user=$fs_id
1863 [ $mapped_id -eq $fs_id -o "$mapped_id" = "$fs_user" ] ||
1864 error "Should return gid=$fs_id or $fs_user on client2"
1867 nodemap_test_cleanup
1868 export SK_UNIQUE_NM=false
1870 run_test 23b "test mapped default ACLs"
1875 trap nodemap_test_cleanup EXIT
1876 do_nodes $(comma_list $(all_server_nodes)) $LCTL get_param -R nodemap
1878 nodemap_test_cleanup
1880 run_test 24 "check nodemap proc files for LBUGs and Oopses"
1883 local tmpfile=$(mktemp)
1884 local tmpfile2=$(mktemp)
1885 local tmpfile3=$(mktemp)
1886 local tmpfile4=$(mktemp)
1890 nodemap_version_check || return 0
1892 # stop clients for this test
1893 zconf_umount_clients $CLIENTS $MOUNT ||
1894 error "unable to umount clients $CLIENTS"
1896 export SK_UNIQUE_NM=true
1899 # enable trusted/admin for setquota call in cleanup_and_setup_lustre()
1901 for client in $clients; do
1902 do_facet mgs $LCTL nodemap_modify --name c${i} \
1903 --property admin --value 1
1904 do_facet mgs $LCTL nodemap_modify --name c${i} \
1905 --property trusted --value 1
1908 wait_nm_sync c$((i - 1)) trusted_nodemap
1910 trap nodemap_test_cleanup EXIT
1912 # create a new, empty nodemap, and add fileset info to it
1913 do_facet mgs $LCTL nodemap_add test25 ||
1914 error "unable to create nodemap $testname"
1915 do_facet mgs $LCTL set_param -P nodemap.$testname.fileset=/$subdir ||
1916 error "unable to add fileset info to nodemap test25"
1918 wait_nm_sync test25 id
1920 do_facet mgs $LCTL nodemap_info > $tmpfile
1921 do_facet mds $LCTL nodemap_info > $tmpfile2
1923 if ! $SHARED_KEY; then
1924 # will conflict with SK's nodemaps
1925 cleanup_and_setup_lustre
1927 # stop clients for this test
1928 zconf_umount_clients $CLIENTS $MOUNT ||
1929 error "unable to umount clients $CLIENTS"
1931 do_facet mgs $LCTL nodemap_info > $tmpfile3
1932 diff -q $tmpfile3 $tmpfile >& /dev/null ||
1933 error "nodemap_info diff on MGS after remount"
1935 do_facet mds $LCTL nodemap_info > $tmpfile4
1936 diff -q $tmpfile4 $tmpfile2 >& /dev/null ||
1937 error "nodemap_info diff on MDS after remount"
1940 do_facet mgs $LCTL nodemap_del test25 ||
1941 error "cannot delete nodemap test25 from config"
1942 nodemap_test_cleanup
1943 # restart clients previously stopped
1944 zconf_mount_clients $CLIENTS $MOUNT ||
1945 error "unable to mount clients $CLIENTS"
1947 rm -f $tmpfile $tmpfile2
1948 export SK_UNIQUE_NM=false
1950 run_test 25 "test save and reload nodemap config"
1953 nodemap_version_check || return 0
1957 do_facet mgs "seq -f 'c%g' $large_i | xargs -n1 $LCTL nodemap_add"
1958 wait_nm_sync c$large_i admin_nodemap
1960 do_facet mgs "seq -f 'c%g' $large_i | xargs -n1 $LCTL nodemap_del"
1961 wait_nm_sync c$large_i admin_nodemap
1963 run_test 26 "test transferring very large nodemap"
1965 nodemap_exercise_fileset() {
1970 if [ "$nm" == "default" ]; then
1971 do_facet mgs $LCTL nodemap_activate 1
1976 if $SHARED_KEY; then
1977 export SK_UNIQUE_NM=true
1979 # will conflict with SK's nodemaps
1980 trap "fileset_test_cleanup $nm" EXIT
1982 fileset_test_setup "$nm"
1984 # add fileset info to $nm nodemap
1985 if ! combined_mgs_mds; then
1986 do_facet mgs $LCTL set_param nodemap.${nm}.fileset=/$subdir ||
1987 error "unable to add fileset info to $nm nodemap on MGS"
1989 do_facet mgs $LCTL set_param -P nodemap.${nm}.fileset=/$subdir ||
1990 error "unable to add fileset info to $nm nodemap for servers"
1991 wait_nm_sync $nm fileset "nodemap.${nm}.fileset=/$subdir"
1994 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
1995 error "unable to umount client ${clients_arr[0]}"
1996 # set some generic fileset to trigger SSK code
1998 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
1999 error "unable to remount client ${clients_arr[0]}"
2002 # test mount point content
2003 do_node ${clients_arr[0]} test -f $MOUNT/this_is_$subdir ||
2004 error "fileset not taken into account"
2006 # re-mount client with sub-subdir
2007 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
2008 error "unable to umount client ${clients_arr[0]}"
2009 export FILESET=/$subsubdir
2010 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
2011 error "unable to remount client ${clients_arr[0]}"
2014 # test mount point content
2015 do_node ${clients_arr[0]} test -f $MOUNT/this_is_$subsubdir ||
2016 error "subdir of fileset not taken into account"
2018 # remove fileset info from nodemap
2019 do_facet mgs $LCTL nodemap_set_fileset --name $nm --fileset clear ||
2020 error "unable to delete fileset info on $nm nodemap"
2021 wait_update_facet mgs "$LCTL get_param nodemap.${nm}.fileset" \
2022 "nodemap.${nm}.fileset=" ||
2023 error "fileset info still not cleared on $nm nodemap"
2024 do_facet mgs $LCTL set_param -P nodemap.${nm}.fileset=clear ||
2025 error "unable to reset fileset info on $nm nodemap"
2026 wait_nm_sync $nm fileset "nodemap.${nm}.fileset="
2029 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
2030 error "unable to umount client ${clients_arr[0]}"
2031 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
2032 error "unable to remount client ${clients_arr[0]}"
2034 # test mount point content
2035 if ! $(do_node ${clients_arr[0]} test -d $MOUNT/$subdir); then
2037 error "fileset not cleared on $nm nodemap"
2040 # back to non-nodemap setup
2041 if $SHARED_KEY; then
2042 export SK_UNIQUE_NM=false
2043 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
2044 error "unable to umount client ${clients_arr[0]}"
2046 fileset_test_cleanup "$nm"
2047 if [ "$nm" == "default" ]; then
2048 do_facet mgs $LCTL nodemap_activate 0
2049 wait_nm_sync active 0
2051 export SK_UNIQUE_NM=false
2053 nodemap_test_cleanup
2055 if $SHARED_KEY; then
2056 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
2057 error "unable to remount client ${clients_arr[0]}"
2062 [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.50) ] &&
2063 skip "Need MDS >= 2.11.50" && return
2065 for nm in "default" "c0"; do
2066 local subdir="subdir_${nm}"
2067 local subsubdir="subsubdir_${nm}"
2069 if [ "$nm" == "default" ] && [ "$SHARED_KEY" == "true" ]; then
2070 echo "Skipping nodemap $nm with SHARED_KEY";
2074 echo "Exercising fileset for nodemap $nm"
2075 nodemap_exercise_fileset "$nm"
2078 run_test 27a "test fileset in various nodemaps"
2080 test_27b() { #LU-10703
2081 [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.50) ] &&
2082 skip "Need MDS >= 2.11.50" && return
2083 [[ $MDSCOUNT -lt 2 ]] && skip "needs >= 2 MDTs" && return
2086 trap nodemap_test_cleanup EXIT
2088 # Add the nodemaps and set their filesets
2089 for i in $(seq 1 $MDSCOUNT); do
2090 do_facet mgs $LCTL nodemap_del nm$i 2>/dev/null
2091 do_facet mgs $LCTL nodemap_add nm$i ||
2092 error "add nodemap nm$i failed"
2093 wait_nm_sync nm$i "" "" "-N"
2095 if ! combined_mgs_mds; then
2097 $LCTL set_param nodemap.nm$i.fileset=/dir$i ||
2098 error "set nm$i.fileset=/dir$i failed on MGS"
2100 do_facet mgs $LCTL set_param -P nodemap.nm$i.fileset=/dir$i ||
2101 error "set nm$i.fileset=/dir$i failed on servers"
2102 wait_nm_sync nm$i fileset "nodemap.nm$i.fileset=/dir$i"
2105 # Check if all the filesets are correct
2106 for i in $(seq 1 $MDSCOUNT); do
2107 fileset=$(do_facet mds$i \
2108 $LCTL get_param -n nodemap.nm$i.fileset)
2109 [ "$fileset" = "/dir$i" ] ||
2110 error "nm$i.fileset $fileset != /dir$i on mds$i"
2111 do_facet mgs $LCTL nodemap_del nm$i ||
2112 error "delete nodemap nm$i failed"
2115 nodemap_test_cleanup
2117 run_test 27b "The new nodemap won't clear the old nodemap's fileset"
2120 if ! $SHARED_KEY; then
2121 skip "need shared key feature for this test" && return
2123 mkdir -p $DIR/$tdir || error "mkdir failed"
2124 touch $DIR/$tdir/$tdir.out || error "touch failed"
2125 if [ ! -f $DIR/$tdir/$tdir.out ]; then
2126 error "read before rotation failed"
2128 # store top key identity to ensure rotation has occurred
2129 SK_IDENTITY_OLD=$(lctl get_param *.*.*srpc* | grep "expire" |
2130 head -1 | awk '{print $15}' | cut -c1-8)
2131 do_facet $SINGLEMDS lfs flushctx ||
2132 error "could not run flushctx on $SINGLEMDS"
2134 lfs flushctx || error "could not run flushctx on client"
2136 # verify new key is in place
2137 SK_IDENTITY_NEW=$(lctl get_param *.*.*srpc* | grep "expire" |
2138 head -1 | awk '{print $15}' | cut -c1-8)
2139 if [ $SK_IDENTITY_OLD == $SK_IDENTITY_NEW ]; then
2140 error "key did not rotate correctly"
2142 if [ ! -f $DIR/$tdir/$tdir.out ]; then
2143 error "read after rotation failed"
2146 run_test 28 "check shared key rotation method"
2149 if ! $SHARED_KEY; then
2150 skip "need shared key feature for this test" && return
2152 if [ $SK_FLAVOR != "ski" ] && [ $SK_FLAVOR != "skpi" ]; then
2153 skip "test only valid if integrity is active"
2156 mkdir $DIR/$tdir || error "mkdir"
2157 touch $DIR/$tdir/$tfile || error "touch"
2158 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
2159 error "unable to umount clients"
2160 keyctl show | awk '/lustre/ { print $1 }' |
2161 xargs -IX keyctl unlink X
2162 OLD_SK_PATH=$SK_PATH
2163 export SK_PATH=/dev/null
2164 if zconf_mount_clients ${clients_arr[0]} $MOUNT; then
2165 export SK_PATH=$OLD_SK_PATH
2166 if [ -e $DIR/$tdir/$tfile ]; then
2167 error "able to mount and read without key"
2169 error "able to mount without key"
2172 export SK_PATH=$OLD_SK_PATH
2173 keyctl show | awk '/lustre/ { print $1 }' |
2174 xargs -IX keyctl unlink X
2177 run_test 29 "check for missing shared key"
2180 if ! $SHARED_KEY; then
2181 skip "need shared key feature for this test" && return
2183 if [ $SK_FLAVOR != "ski" ] && [ $SK_FLAVOR != "skpi" ]; then
2184 skip "test only valid if integrity is active"
2186 mkdir -p $DIR/$tdir || error "mkdir failed"
2187 touch $DIR/$tdir/$tdir.out || error "touch failed"
2188 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
2189 error "unable to umount clients"
2190 # unload keys from ring
2191 keyctl show | awk '/lustre/ { print $1 }' |
2192 xargs -IX keyctl unlink X
2193 # invalidate the key with bogus filesystem name
2194 lgss_sk -w $SK_PATH/$FSNAME-bogus.key -f $FSNAME.bogus \
2195 -t client -d /dev/urandom || error "lgss_sk failed (1)"
2196 do_facet $SINGLEMDS lfs flushctx || error "could not run flushctx"
2197 OLD_SK_PATH=$SK_PATH
2198 export SK_PATH=$SK_PATH/$FSNAME-bogus.key
2199 if zconf_mount_clients ${clients_arr[0]} $MOUNT; then
2200 SK_PATH=$OLD_SK_PATH
2201 if [ -a $DIR/$tdir/$tdir.out ]; then
2202 error "mount and read file with invalid key"
2204 error "mount with invalid key"
2207 SK_PATH=$OLD_SK_PATH
2208 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
2209 error "unable to umount clients"
2211 run_test 30 "check for invalid shared key"
2215 zconf_umount $HOSTNAME $MOUNT || error "unable to umount client"
2217 # remove ${NETTYPE}999 network on all nodes
2218 do_nodes $(comma_list $(all_nodes)) \
2219 "$LNETCTL net del --net ${NETTYPE}999 && \
2220 $LNETCTL lnet unconfigure 2>/dev/null || true"
2222 # necessary to do writeconf in order to de-register
2223 # @${NETTYPE}999 nid for targets
2225 export KEEP_ZPOOL="true"
2227 export SK_MOUNTED=false
2230 export KEEP_ZPOOL="$KZPOOL"
2234 local nid=$(lctl list_nids | grep ${NETTYPE} | head -n1)
2235 local addr=${nid%@*}
2238 export LNETCTL=$(which lnetctl 2> /dev/null)
2240 [ -z "$LNETCTL" ] && skip "without lnetctl support." && return
2241 local_mode && skip "in local mode."
2243 stack_trap cleanup_31 EXIT
2246 if [ "$MOUNT_2" ] && $(grep -q $MOUNT2' ' /proc/mounts); then
2247 umount_client $MOUNT2 || error "umount $MOUNT2 failed"
2249 if $(grep -q $MOUNT' ' /proc/mounts); then
2250 umount_client $MOUNT || error "umount $MOUNT failed"
2253 # check exports on servers are empty for client
2254 do_facet mgs "lctl get_param -n *.MGS*.exports.'$nid'.uuid 2>/dev/null |
2255 grep -q -" && error "export on MGS should be empty"
2256 do_nodes $(comma_list $(mdts_nodes) $(osts_nodes)) \
2257 "lctl get_param -n *.${FSNAME}*.exports.'$nid'.uuid \
2258 2>/dev/null | grep -q -" &&
2259 error "export on servers should be empty"
2261 # add network ${NETTYPE}999 on all nodes
2262 do_nodes $(comma_list $(all_nodes)) \
2263 "$LNETCTL lnet configure && $LNETCTL net add --if \
2264 \$($LNETCTL net show --net $net | awk 'BEGIN{inf=0} \
2265 {if (inf==1) print \$2; fi; inf=0} /interfaces/{inf=1}') \
2266 --net ${NETTYPE}999" ||
2267 error "unable to configure NID ${NETTYPE}999"
2269 # necessary to do writeconf in order to register
2270 # new @${NETTYPE}999 nid for targets
2272 export KEEP_ZPOOL="true"
2274 export SK_MOUNTED=false
2276 setupall server_only || echo 1
2277 export KEEP_ZPOOL="$KZPOOL"
2280 local mgsnid_orig=$MGSNID
2281 # compute new MGSNID
2282 MGSNID=$(do_facet mgs "$LCTL list_nids | grep ${NETTYPE}999")
2284 # on client, turn LNet Dynamic Discovery on
2285 lnetctl set discovery 1
2287 # mount client with -o network=${NETTYPE}999 option:
2288 # should fail because of LNet Dynamic Discovery
2289 mount_client $MOUNT ${MOUNT_OPTS},network=${NETTYPE}999 &&
2290 error "client mount with '-o network' option should be refused"
2292 # on client, reconfigure LNet and turn LNet Dynamic Discovery off
2293 $LNETCTL net del --net ${NETTYPE}999 && lnetctl lnet unconfigure
2296 lnetctl set discovery 0
2298 $LNETCTL lnet configure && $LNETCTL net add --if \
2299 $($LNETCTL net show --net $net | awk 'BEGIN{inf=0} \
2300 {if (inf==1) print $2; fi; inf=0} /interfaces/{inf=1}') \
2301 --net ${NETTYPE}999 ||
2302 error "unable to configure NID ${NETTYPE}999 on client"
2304 # mount client with -o network=${NETTYPE}999 option
2305 mount_client $MOUNT ${MOUNT_OPTS},network=${NETTYPE}999 ||
2306 error "unable to remount client"
2311 # check export on MGS
2312 do_facet mgs "lctl get_param -n *.MGS*.exports.'$nid'.uuid 2>/dev/null |
2314 [ $? -ne 0 ] || error "export for $nid on MGS should not exist"
2317 "lctl get_param -n *.MGS*.exports.'${addr}@${NETTYPE}999'.uuid \
2318 2>/dev/null | grep -q -"
2320 error "export for ${addr}@${NETTYPE}999 on MGS should exist"
2322 # check {mdc,osc} imports
2323 lctl get_param mdc.${FSNAME}-*.import | grep current_connection |
2324 grep -q ${NETTYPE}999
2326 error "import for mdc should use ${addr}@${NETTYPE}999"
2327 lctl get_param osc.${FSNAME}-*.import | grep current_connection |
2328 grep -q ${NETTYPE}999
2330 error "import for osc should use ${addr}@${NETTYPE}999"
2332 run_test 31 "client mount option '-o network'"
2336 zconf_umount_clients ${clients_arr[0]} $MOUNT
2338 # disable sk flavor enforcement on MGS
2339 set_rule _mgs any any null
2341 # stop gss daemon on MGS
2342 if ! combined_mgs_mds ; then
2343 send_sigint $mgs_HOST lsvcgssd
2347 MOUNT_OPTS=$(add_sk_mntflag $MOUNT_OPTS)
2350 restore_to_default_flavor
2354 if ! $SHARED_KEY; then
2355 skip "need shared key feature for this test"
2358 stack_trap cleanup_32 EXIT
2360 # restore to default null flavor
2361 save_flvr=$SK_FLAVOR
2363 restore_to_default_flavor || error "cannot set null flavor"
2364 SK_FLAVOR=$save_flvr
2367 if [ "$MOUNT_2" ] && $(grep -q $MOUNT2' ' /proc/mounts); then
2368 umount_client $MOUNT2 || error "umount $MOUNT2 failed"
2370 if $(grep -q $MOUNT' ' /proc/mounts); then
2371 umount_client $MOUNT || error "umount $MOUNT failed"
2374 # start gss daemon on MGS
2375 if combined_mgs_mds ; then
2376 send_sigint $mds_HOST lsvcgssd
2378 start_gss_daemons $mgs_HOST "$LSVCGSSD -vvv -s -g"
2380 # add mgs key type and MGS NIDs in key on MGS
2381 do_nodes $mgs_HOST "lgss_sk -t mgs,server -g $MGSNID -m \
2382 $SK_PATH/$FSNAME.key >/dev/null 2>&1" ||
2383 error "could not modify keyfile on MGS"
2385 # load modified key file on MGS
2386 do_nodes $mgs_HOST "lgss_sk -l $SK_PATH/$FSNAME.key >/dev/null 2>&1" ||
2387 error "could not load keyfile on MGS"
2389 # add MGS NIDs in key on client
2390 do_nodes ${clients_arr[0]} "lgss_sk -g $MGSNID -m \
2391 $SK_PATH/$FSNAME.key >/dev/null 2>&1" ||
2392 error "could not modify keyfile on MGS"
2394 # set perms for per-nodemap keys else permission denied
2395 do_nodes $(comma_list $(all_nodes)) \
2396 "keyctl show | grep lustre | cut -c1-11 |
2398 xargs -IX keyctl setperm X 0x3f3f3f3f"
2400 # re-mount client with mgssec=skn
2401 save_opts=$MOUNT_OPTS
2402 if [ -z "$MOUNT_OPTS" ]; then
2403 MOUNT_OPTS="-o mgssec=skn"
2405 MOUNT_OPTS="$MOUNT_OPTS,mgssec=skn"
2407 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
2408 error "mount ${clients_arr[0]} with mgssec=skn failed"
2409 MOUNT_OPTS=$save_opts
2412 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
2413 error "umount ${clients_arr[0]} failed"
2415 # enforce ska flavor on MGS
2416 set_rule _mgs any any ska
2418 # re-mount client without mgssec
2419 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS &&
2420 error "mount ${clients_arr[0]} without mgssec should fail"
2422 # re-mount client with mgssec=skn
2423 save_opts=$MOUNT_OPTS
2424 if [ -z "$MOUNT_OPTS" ]; then
2425 MOUNT_OPTS="-o mgssec=skn"
2427 MOUNT_OPTS="$MOUNT_OPTS,mgssec=skn"
2429 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS &&
2430 error "mount ${clients_arr[0]} with mgssec=skn should fail"
2431 MOUNT_OPTS=$save_opts
2433 # re-mount client with mgssec=ska
2434 save_opts=$MOUNT_OPTS
2435 if [ -z "$MOUNT_OPTS" ]; then
2436 MOUNT_OPTS="-o mgssec=ska"
2438 MOUNT_OPTS="$MOUNT_OPTS,mgssec=ska"
2440 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
2441 error "mount ${clients_arr[0]} with mgssec=ska failed"
2442 MOUNT_OPTS=$save_opts
2446 run_test 32 "check for mgssec"
2449 # disable sk flavor enforcement
2450 set_rule $FSNAME any cli2mdt null
2451 wait_flavor cli2mdt null
2454 zconf_umount_clients ${clients_arr[0]} $MOUNT
2456 # stop gss daemon on MGS
2457 if ! combined_mgs_mds ; then
2458 send_sigint $mgs_HOST lsvcgssd
2462 MOUNT_OPTS=$(add_sk_mntflag $MOUNT_OPTS)
2465 restore_to_default_flavor
2469 if ! $SHARED_KEY; then
2470 skip "need shared key feature for this test"
2473 stack_trap cleanup_33 EXIT
2475 # restore to default null flavor
2476 save_flvr=$SK_FLAVOR
2478 restore_to_default_flavor || error "cannot set null flavor"
2479 SK_FLAVOR=$save_flvr
2482 if [ "$MOUNT_2" ] && $(grep -q $MOUNT2' ' /proc/mounts); then
2483 umount_client $MOUNT2 || error "umount $MOUNT2 failed"
2485 if $(grep -q $MOUNT' ' /proc/mounts); then
2486 umount_client $MOUNT || error "umount $MOUNT failed"
2489 # start gss daemon on MGS
2490 if combined_mgs_mds ; then
2491 send_sigint $mds_HOST lsvcgssd
2493 start_gss_daemons $mgs_HOST "$LSVCGSSD -vvv -s -g"
2495 # add mgs key type and MGS NIDs in key on MGS
2496 do_nodes $mgs_HOST "lgss_sk -t mgs,server -g $MGSNID -m \
2497 $SK_PATH/$FSNAME.key >/dev/null 2>&1" ||
2498 error "could not modify keyfile on MGS"
2500 # load modified key file on MGS
2501 do_nodes $mgs_HOST "lgss_sk -l $SK_PATH/$FSNAME.key >/dev/null 2>&1" ||
2502 error "could not load keyfile on MGS"
2504 # add MGS NIDs in key on client
2505 do_nodes ${clients_arr[0]} "lgss_sk -g $MGSNID -m \
2506 $SK_PATH/$FSNAME.key >/dev/null 2>&1" ||
2507 error "could not modify keyfile on MGS"
2509 # set perms for per-nodemap keys else permission denied
2510 do_nodes $(comma_list $(all_nodes)) \
2511 "keyctl show | grep lustre | cut -c1-11 |
2513 xargs -IX keyctl setperm X 0x3f3f3f3f"
2515 # re-mount client with mgssec=skn
2516 save_opts=$MOUNT_OPTS
2517 if [ -z "$MOUNT_OPTS" ]; then
2518 MOUNT_OPTS="-o mgssec=skn"
2520 MOUNT_OPTS="$MOUNT_OPTS,mgssec=skn"
2522 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
2523 error "mount ${clients_arr[0]} with mgssec=skn failed"
2524 MOUNT_OPTS=$save_opts
2526 # enforce ska flavor for cli2mdt
2527 set_rule $FSNAME any cli2mdt ska
2528 wait_flavor cli2mdt ska
2530 # check error message
2531 $LCTL dk | grep "faked source" &&
2532 error "MGS connection srpc flags incorrect"
2536 run_test 33 "correct srpc flags for MGS connection"
2539 # restore deny_unknown
2540 do_facet mgs $LCTL nodemap_modify --name default \
2541 --property deny_unknown --value $denydefault
2542 if [ $? -ne 0 ]; then
2543 error_noexit "cannot reset deny_unknown on default nodemap"
2547 wait_nm_sync default deny_unknown
2554 [ $MGS_VERSION -lt $(version_code 2.12.51) ] &&
2555 skip "deny_unknown on default nm not supported before 2.12.51"
2557 activedefault=$(do_facet mgs $LCTL get_param -n nodemap.active)
2559 if [[ "$activedefault" != "1" ]]; then
2560 do_facet mgs $LCTL nodemap_activate 1
2562 stack_trap cleanup_active EXIT
2565 denydefault=$(do_facet mgs $LCTL get_param -n \
2566 nodemap.default.deny_unknown)
2567 [ -z "$denydefault" ] &&
2568 error "cannot get deny_unknown on default nodemap"
2569 if [ "$denydefault" -eq 0 ]; then
2575 do_facet mgs $LCTL nodemap_modify --name default \
2576 --property deny_unknown --value $denynew ||
2577 error "cannot set deny_unknown on default nodemap"
2579 [ "$(do_facet mgs $LCTL get_param -n nodemap.default.deny_unknown)" \
2581 error "setting deny_unknown on default nodemap did not work"
2583 stack_trap cleanup_34_deny EXIT
2585 wait_nm_sync default deny_unknown
2587 run_test 34 "deny_unknown on default nodemap"
2589 log "cleanup: ======================================================"
2592 for num in $(seq $MDSCOUNT); do
2593 if [ "${identity_old[$num]}" = 1 ]; then
2594 switch_identity $num false || identity_old[$num]=$?
2598 $RUNAS_CMD -u $ID0 ls $DIR
2599 $RUNAS_CMD -u $ID1 ls $DIR
2604 check_and_cleanup_lustre