3 # Run select tests by setting ONLY, or as arguments to the script.
4 # Skip specific tests by setting EXCEPT.
11 LUSTRE=${LUSTRE:-$(dirname $0)/..}
12 . $LUSTRE/tests/test-framework.sh
17 ALWAYS_EXCEPT="$SANITY_SEC_EXCEPT "
18 # bug number for skipped test:
20 # UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
22 [ "$SLOW" = "no" ] && EXCEPT_SLOW="26"
24 NODEMAP_TESTS=$(seq 7 26)
26 if ! check_versions; then
27 echo "It is NOT necessary to test nodemap under interoperation mode"
28 EXCEPT="$EXCEPT $NODEMAP_TESTS"
33 RUNAS_CMD=${RUNAS_CMD:-runas}
35 WTL=${WTL:-"$LUSTRE/tests/write_time_limit"}
38 PERM_CONF=$CONFDIR/perm.conf
40 HOSTNAME_CHECKSUM=$(hostname | sum | awk '{ print $1 }')
41 SUBNET_CHECKSUM=$(expr $HOSTNAME_CHECKSUM % 250 + 1)
43 require_dsh_mds || exit 0
44 require_dsh_ost || exit 0
46 clients=${CLIENTS//,/ }
47 num_clients=$(get_node_count ${clients})
48 clients_arr=($clients)
52 USER0=$(getent passwd | grep :$ID0:$ID0: | cut -d: -f1)
53 USER1=$(getent passwd | grep :$ID1:$ID1: | cut -d: -f1)
57 NODEMAP_IPADDR_LIST="1 10 64 128 200 250"
59 NODEMAP_MAX_ID=$((ID0 + NODEMAP_ID_COUNT))
62 skip "need to add user0 ($ID0:$ID0)" && exit 0
65 skip "need to add user1 ($ID1:$ID1)" && exit 0
67 IDBASE=${IDBASE:-60000}
69 # changes to mappings must be reflected in test 23
71 [0]="$((IDBASE+3)):$((IDBASE+0)) $((IDBASE+4)):$((IDBASE+2))"
72 [1]="$((IDBASE+5)):$((IDBASE+1)) $((IDBASE+6)):$((IDBASE+2))"
75 check_and_setup_lustre
80 GSS_REF=$(lsmod | grep ^ptlrpc_gss | awk '{print $3}')
81 if [ ! -z "$GSS_REF" -a "$GSS_REF" != "0" ]; then
83 echo "with GSS support"
86 echo "without GSS support"
89 MDT=$(do_facet $SINGLEMDS lctl get_param -N "mdt.\*MDT0000" |
91 [ -z "$MDT" ] && error "fail to get MDT device" && exit 1
92 do_facet $SINGLEMDS "mkdir -p $CONFDIR"
93 IDENTITY_FLUSH=mdt.$MDT.identity_flush
94 IDENTITY_UPCALL=mdt.$MDT.identity_upcall
103 if ! $RUNAS_CMD -u $user krb5_login.sh; then
104 error "$user login kerberos failed."
108 if ! $RUNAS_CMD -u $user -g $group ls $DIR > /dev/null 2>&1; then
109 $RUNAS_CMD -u $user lfs flushctx -k
110 $RUNAS_CMD -u $user krb5_login.sh
111 if ! $RUNAS_CMD -u$user -g$group ls $DIR > /dev/null 2>&1; then
112 error "init $user $group failed."
118 declare -a identity_old
121 for num in $(seq $MDSCOUNT); do
122 switch_identity $num true || identity_old[$num]=$?
125 if ! $RUNAS_CMD -u $ID0 ls $DIR > /dev/null 2>&1; then
126 sec_login $USER0 $USER0
129 if ! $RUNAS_CMD -u $ID1 ls $DIR > /dev/null 2>&1; then
130 sec_login $USER1 $USER1
135 # run as different user
139 chmod 0755 $DIR || error "chmod (1)"
140 rm -rf $DIR/$tdir || error "rm (1)"
141 mkdir -p $DIR/$tdir || error "mkdir (1)"
142 chown $USER0 $DIR/$tdir || error "chown (2)"
143 $RUNAS_CMD -u $ID0 ls $DIR || error "ls (1)"
144 rm -f $DIR/f0 || error "rm (2)"
145 $RUNAS_CMD -u $ID0 touch $DIR/f0 && error "touch (1)"
146 $RUNAS_CMD -u $ID0 touch $DIR/$tdir/f1 || error "touch (2)"
147 $RUNAS_CMD -u $ID1 touch $DIR/$tdir/f2 && error "touch (3)"
148 touch $DIR/$tdir/f3 || error "touch (4)"
149 chown root $DIR/$tdir || error "chown (3)"
150 chgrp $USER0 $DIR/$tdir || error "chgrp (1)"
151 chmod 0775 $DIR/$tdir || error "chmod (2)"
152 $RUNAS_CMD -u $ID0 touch $DIR/$tdir/f4 || error "touch (5)"
153 $RUNAS_CMD -u $ID1 touch $DIR/$tdir/f5 && error "touch (6)"
154 touch $DIR/$tdir/f6 || error "touch (7)"
155 rm -rf $DIR/$tdir || error "rm (3)"
157 run_test 0 "uid permission ============================="
161 [ $GSS_SUP = 0 ] && skip "without GSS support." && return
166 chown $USER0 $DIR/$tdir || error "chown (1)"
167 $RUNAS_CMD -u $ID1 -v $ID0 touch $DIR/$tdir/f0 && error "touch (2)"
168 echo "enable uid $ID1 setuid"
169 do_facet $SINGLEMDS "echo '* $ID1 setuid' >> $PERM_CONF"
170 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
171 $RUNAS_CMD -u $ID1 -v $ID0 touch $DIR/$tdir/f1 || error "touch (3)"
173 chown root $DIR/$tdir || error "chown (4)"
174 chgrp $USER0 $DIR/$tdir || error "chgrp (5)"
175 chmod 0770 $DIR/$tdir || error "chmod (6)"
176 $RUNAS_CMD -u $ID1 -g $ID1 touch $DIR/$tdir/f2 && error "touch (7)"
177 $RUNAS_CMD -u$ID1 -g$ID1 -j$ID0 touch $DIR/$tdir/f3 && error "touch (8)"
178 echo "enable uid $ID1 setuid,setgid"
179 do_facet $SINGLEMDS "echo '* $ID1 setuid,setgid' > $PERM_CONF"
180 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
181 $RUNAS_CMD -u $ID1 -g $ID1 -j $ID0 touch $DIR/$tdir/f4 ||
183 $RUNAS_CMD -u $ID1 -v $ID0 -g $ID1 -j $ID0 touch $DIR/$tdir/f5 ||
188 do_facet $SINGLEMDS "rm -f $PERM_CONF"
189 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
191 run_test 1 "setuid/gid ============================="
193 # bug 3285 - supplementary group should always succeed.
194 # NB: the supplementary groups are set for local client only,
195 # as for remote client, the groups of the specified uid on MDT
196 # will be obtained by upcall /sbin/l_getidentity and used.
198 [[ "$MDS1_VERSION" -ge $(version_code 2.6.93) ]] ||
199 [[ "$MDS1_VERSION" -ge $(version_code 2.5.35) &&
200 "$MDS1_VERSION" -lt $(version_code 2.5.50) ]] ||
201 skip "Need MDS version at least 2.6.93 or 2.5.35"
205 chmod 0771 $DIR/$tdir
206 chgrp $ID0 $DIR/$tdir
207 $RUNAS_CMD -u $ID0 ls $DIR/$tdir || error "setgroups (1)"
208 do_facet $SINGLEMDS "echo '* $ID1 setgrp' > $PERM_CONF"
209 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
210 $RUNAS_CMD -u $ID1 -G1,2,$ID0 ls $DIR/$tdir ||
211 error "setgroups (2)"
212 $RUNAS_CMD -u $ID1 -G1,2 ls $DIR/$tdir && error "setgroups (3)"
215 do_facet $SINGLEMDS "rm -f $PERM_CONF"
216 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
218 run_test 4 "set supplementary group ==============="
225 squash_id default 99 0
226 wait_nm_sync default squash_uid '' inactive
227 squash_id default 99 1
228 wait_nm_sync default squash_gid '' inactive
229 for (( i = 0; i < NODEMAP_COUNT; i++ )); do
230 local csum=${HOSTNAME_CHECKSUM}_${i}
232 do_facet mgs $LCTL nodemap_add $csum
234 if [ $rc -ne 0 ]; then
235 echo "nodemap_add $csum failed with $rc"
239 out=$(do_facet mgs $LCTL get_param nodemap.$csum.id)
240 ## This needs to return zero if the following statement is 1
241 [[ $(echo $out | grep -c $csum) == 0 ]] && return 1
243 for (( i = 0; i < NODEMAP_COUNT; i++ )); do
244 local csum=${HOSTNAME_CHECKSUM}_${i}
246 wait_nm_sync $csum id '' inactive
255 for ((i = 0; i < NODEMAP_COUNT; i++)); do
256 local csum=${HOSTNAME_CHECKSUM}_${i}
258 if ! do_facet mgs $LCTL nodemap_del $csum; then
259 error "nodemap_del $csum failed with $?"
263 out=$(do_facet mgs $LCTL get_param nodemap.$csum.id 2>/dev/null)
264 [[ $(echo $out | grep -c $csum) != 0 ]] && return 1
266 for (( i = 0; i < NODEMAP_COUNT; i++ )); do
267 local csum=${HOSTNAME_CHECKSUM}_${i}
269 wait_nm_sync $csum id '' inactive
276 local cmd="$LCTL nodemap_add_range"
280 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
281 range="$SUBNET_CHECKSUM.${2}.${j}.[1-253]@tcp"
282 if ! do_facet mgs $cmd --name $1 --range $range; then
291 local cmd="$LCTL nodemap_del_range"
295 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
296 range="$SUBNET_CHECKSUM.${2}.${j}.[1-253]@tcp"
297 if ! do_facet mgs $cmd --name $1 --range $range; then
307 local cmd="$LCTL nodemap_add_idmap"
310 echo "Start to add idmaps ..."
311 for ((i = 0; i < NODEMAP_COUNT; i++)); do
314 for ((j = $ID0; j < NODEMAP_MAX_ID; j++)); do
315 local csum=${HOSTNAME_CHECKSUM}_${i}
317 local fs_id=$((j + 1))
319 if ! do_facet mgs $cmd --name $csum --idtype uid \
320 --idmap $client_id:$fs_id; then
323 if ! do_facet mgs $cmd --name $csum --idtype gid \
324 --idmap $client_id:$fs_id; then
333 update_idmaps() { #LU-10040
334 [ "$MGS_VERSION" -lt $(version_code 2.10.55) ] &&
335 skip "Need MGS >= 2.10.55"
337 local csum=${HOSTNAME_CHECKSUM}_0
338 local old_id_client=$ID0
339 local old_id_fs=$((ID0 + 1))
340 local new_id=$((ID0 + 100))
347 echo "Start to update idmaps ..."
349 #Inserting an existed idmap should return error
350 cmd="$LCTL nodemap_add_idmap --name $csum --idtype uid"
352 $cmd --idmap $old_id_client:$old_id_fs 2>/dev/null; then
353 error "insert idmap {$old_id_client:$old_id_fs} " \
354 "should return error"
359 #Update id_fs and check it
360 if ! do_facet mgs $cmd --idmap $old_id_client:$new_id; then
361 error "$cmd --idmap $old_id_client:$new_id failed"
365 tmp_id=$(do_facet mgs $LCTL get_param -n nodemap.$csum.idmap |
366 awk '{ print $7 }' | sed -n '2p')
367 [ $tmp_id != $new_id ] && { error "new id_fs $tmp_id != $new_id"; \
368 rc=$((rc + 1)); return $rc; }
370 #Update id_client and check it
371 if ! do_facet mgs $cmd --idmap $new_id:$new_id; then
372 error "$cmd --idmap $new_id:$new_id failed"
376 tmp_id=$(do_facet mgs $LCTL get_param -n nodemap.$csum.idmap |
377 awk '{ print $5 }' | sed -n "$((NODEMAP_ID_COUNT + 1)) p")
378 tmp_id=$(echo ${tmp_id%,*}) #e.g. "501,"->"501"
379 [ $tmp_id != $new_id ] && { error "new id_client $tmp_id != $new_id"; \
380 rc=$((rc + 1)); return $rc; }
382 #Delete above updated idmap
383 cmd="$LCTL nodemap_del_idmap --name $csum --idtype uid"
384 if ! do_facet mgs $cmd --idmap $new_id:$new_id; then
385 error "$cmd --idmap $new_id:$new_id failed"
390 #restore the idmaps to make delete_idmaps work well
391 cmd="$LCTL nodemap_add_idmap --name $csum --idtype uid"
392 if ! do_facet mgs $cmd --idmap $old_id_client:$old_id_fs; then
393 error "$cmd --idmap $old_id_client:$old_id_fs failed"
403 local cmd="$LCTL nodemap_del_idmap"
406 echo "Start to delete idmaps ..."
407 for ((i = 0; i < NODEMAP_COUNT; i++)); do
410 for ((j = $ID0; j < NODEMAP_MAX_ID; j++)); do
411 local csum=${HOSTNAME_CHECKSUM}_${i}
413 local fs_id=$((j + 1))
415 if ! do_facet mgs $cmd --name $csum --idtype uid \
416 --idmap $client_id:$fs_id; then
419 if ! do_facet mgs $cmd --name $csum --idtype gid \
420 --idmap $client_id:$fs_id; then
433 local cmd="$LCTL nodemap_modify"
436 proc[0]="admin_nodemap"
437 proc[1]="trusted_nodemap"
441 for ((idx = 0; idx < 2; idx++)); do
442 if ! do_facet mgs $cmd --name $1 --property ${option[$idx]} \
447 if ! do_facet mgs $cmd --name $1 --property ${option[$idx]} \
457 [ "$MGS_VERSION" -lt $(version_code 2.5.53) ] &&
458 skip "No nodemap on $MGS_VERSION MGS < 2.5.53"
462 cmd[0]="$LCTL nodemap_modify --property squash_uid"
463 cmd[1]="$LCTL nodemap_modify --property squash_gid"
465 if ! do_facet mgs ${cmd[$3]} --name $1 --value $2; then
471 local nodemap_name=$1
476 local is_active=$(do_facet mgs $LCTL get_param -n nodemap.active)
481 local mgs_ip=$(host_nids_address $mgs_HOST $NETTYPE | cut -d' ' -f1)
484 if [ "$nodemap_name" == "active" ]; then
486 elif [ -z "$key" ]; then
487 proc_param=${nodemap_name}
489 proc_param="${nodemap_name}.${key}"
491 if [ "$opt" == "inactive" ]; then
492 # check nm sync even if nodemap is not activated
496 (( is_active == 0 )) && [ "$proc_param" != "active" ] && return
498 if [ -z "$value" ]; then
499 out1=$(do_facet mgs $LCTL get_param $opt \
500 nodemap.${proc_param} 2>/dev/null)
501 echo "On MGS ${mgs_ip}, ${proc_param} = $out1"
506 # wait up to 10 seconds for other servers to sync with mgs
507 for i in $(seq 1 10); do
508 for node in $(all_server_nodes); do
509 local node_ip=$(host_nids_address $node $NETTYPE |
513 if [ -z "$value" ]; then
514 [ $node_ip == $mgs_ip ] && continue
517 out2=$(do_node $node_ip $LCTL get_param $opt \
518 nodemap.$proc_param 2>/dev/null)
519 echo "On $node ${node_ip}, ${proc_param} = $out2"
520 [ "$out1" != "$out2" ] && is_sync=false && break
528 echo OTHER - IP: $node_ip
530 error "mgs and $nodemap_name ${key} mismatch, $i attempts"
532 echo "waited $((i - 1)) seconds for sync"
535 # ensure that the squash defaults are the expected defaults
536 squash_id default 99 0
537 wait_nm_sync default squash_uid '' inactive
538 squash_id default 99 1
539 wait_nm_sync default squash_gid '' inactive
544 cmd="$LCTL nodemap_test_nid"
546 nid=$(do_facet mgs $cmd $1)
548 if [ $nid == $2 ]; then
556 # restore activation state
557 do_facet mgs $LCTL nodemap_activate 0
563 local cmd="$LCTL nodemap_test_id"
566 echo "Start to test idmaps ..."
567 ## nodemap deactivated
568 if ! do_facet mgs $LCTL nodemap_activate 0; then
571 for ((id = $ID0; id < NODEMAP_MAX_ID; id++)); do
574 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
575 local nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
576 local fs_id=$(do_facet mgs $cmd --nid $nid \
577 --idtype uid --id $id)
578 if [ $fs_id != $id ]; then
579 echo "expected $id, got $fs_id"
586 if ! do_facet mgs $LCTL nodemap_activate 1; then
590 for ((id = $ID0; id < NODEMAP_MAX_ID; id++)); do
591 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
592 nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
593 fs_id=$(do_facet mgs $cmd --nid $nid \
594 --idtype uid --id $id)
595 expected_id=$((id + 1))
596 if [ $fs_id != $expected_id ]; then
597 echo "expected $expected_id, got $fs_id"
604 for ((i = 0; i < NODEMAP_COUNT; i++)); do
605 local csum=${HOSTNAME_CHECKSUM}_${i}
607 if ! do_facet mgs $LCTL nodemap_modify --name $csum \
608 --property trusted --value 1; then
609 error "nodemap_modify $csum failed with $?"
614 for ((id = $ID0; id < NODEMAP_MAX_ID; id++)); do
615 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
616 nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
617 fs_id=$(do_facet mgs $cmd --nid $nid \
618 --idtype uid --id $id)
619 if [ $fs_id != $id ]; then
620 echo "expected $id, got $fs_id"
626 ## ensure allow_root_access is enabled
627 for ((i = 0; i < NODEMAP_COUNT; i++)); do
628 local csum=${HOSTNAME_CHECKSUM}_${i}
630 if ! do_facet mgs $LCTL nodemap_modify --name $csum \
631 --property admin --value 1; then
632 error "nodemap_modify $csum failed with $?"
637 ## check that root allowed
638 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
639 nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
640 fs_id=$(do_facet mgs $cmd --nid $nid --idtype uid --id 0)
641 if [ $fs_id != 0 ]; then
642 echo "root allowed expected 0, got $fs_id"
647 ## ensure allow_root_access is disabled
648 for ((i = 0; i < NODEMAP_COUNT; i++)); do
649 local csum=${HOSTNAME_CHECKSUM}_${i}
651 if ! do_facet mgs $LCTL nodemap_modify --name $csum \
652 --property admin --value 0; then
653 error "nodemap_modify ${HOSTNAME_CHECKSUM}_${i} "
659 ## check that root is mapped to 99
660 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
661 nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
662 fs_id=$(do_facet mgs $cmd --nid $nid --idtype uid --id 0)
663 if [ $fs_id != 99 ]; then
664 error "root squash expected 99, got $fs_id"
669 ## reset client trust to 0
670 for ((i = 0; i < NODEMAP_COUNT; i++)); do
671 if ! do_facet mgs $LCTL nodemap_modify \
672 --name ${HOSTNAME_CHECKSUM}_${i} \
673 --property trusted --value 0; then
674 error "nodemap_modify ${HOSTNAME_CHECKSUM}_${i} "
686 remote_mgs_nodsh && skip "remote MGS with nodsh"
687 [ "$MGS_VERSION" -lt $(version_code 2.5.53) ] &&
688 skip "No nodemap on $MGS_VERSION MGS < 2.5.53"
692 [[ $rc != 0 ]] && error "nodemap_add failed with $rc"
696 [[ $rc != 0 ]] && error "nodemap_del failed with $rc"
700 run_test 7 "nodemap create and delete"
705 remote_mgs_nodsh && skip "remote MGS with nodsh"
706 [ "$MGS_VERSION" -lt $(version_code 2.5.53) ] &&
707 skip "No nodemap on $MGS_VERSION MGS < 2.5.53"
713 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
719 [[ $rc == 0 ]] && error "duplicate nodemap_add allowed with $rc" &&
725 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 3
729 run_test 8 "nodemap reject duplicates"
735 remote_mgs_nodsh && skip "remote MGS with nodsh"
736 [ "$MGS_VERSION" -lt $(version_code 2.5.53) ] &&
737 skip "No nodemap on $MGS_VERSION MGS < 2.5.53"
742 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
745 for ((i = 0; i < NODEMAP_COUNT; i++)); do
746 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
750 [[ $rc != 0 ]] && error "nodemap_add_range failed with $rc" && return 2
753 for ((i = 0; i < NODEMAP_COUNT; i++)); do
754 if ! delete_range ${HOSTNAME_CHECKSUM}_${i} $i; then
758 [[ $rc != 0 ]] && error "nodemap_del_range failed with $rc" && return 4
763 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 4
767 run_test 9 "nodemap range add"
772 remote_mgs_nodsh && skip "remote MGS with nodsh"
773 [ "$MGS_VERSION" -lt $(version_code 2.5.53) ] &&
774 skip "No nodemap on $MGS_VERSION MGS < 2.5.53"
779 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
782 for ((i = 0; i < NODEMAP_COUNT; i++)); do
783 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
787 [[ $rc != 0 ]] && error "nodemap_add_range failed with $rc" && return 2
790 for ((i = 0; i < NODEMAP_COUNT; i++)); do
791 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
795 [[ $rc == 0 ]] && error "nodemap_add_range duplicate add with $rc" &&
800 for ((i = 0; i < NODEMAP_COUNT; i++)); do
801 if ! delete_range ${HOSTNAME_CHECKSUM}_${i} $i; then
805 [[ $rc != 0 ]] && error "nodemap_del_range failed with $rc" && return 4
809 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 5
813 run_test 10a "nodemap reject duplicate ranges"
816 [ "$MGS_VERSION" -lt $(version_code 2.10.53) ] &&
817 skip "Need MGS >= 2.10.53"
821 local nids="192.168.19.[0-255]@o2ib20"
823 do_facet mgs $LCTL nodemap_del $nm1 2>/dev/null
824 do_facet mgs $LCTL nodemap_del $nm2 2>/dev/null
826 do_facet mgs $LCTL nodemap_add $nm1 || error "Add $nm1 failed"
827 do_facet mgs $LCTL nodemap_add $nm2 || error "Add $nm2 failed"
828 do_facet mgs $LCTL nodemap_add_range --name $nm1 --range $nids ||
829 error "Add range $nids to $nm1 failed"
830 [ -n "$(do_facet mgs $LCTL get_param nodemap.$nm1.* |
831 grep start_nid)" ] || error "No range was found"
832 do_facet mgs $LCTL nodemap_del_range --name $nm2 --range $nids &&
833 error "Deleting range $nids from $nm2 should fail"
834 [ -n "$(do_facet mgs $LCTL get_param nodemap.$nm1.* |
835 grep start_nid)" ] || error "Range $nids should be there"
837 do_facet mgs $LCTL nodemap_del $nm1 || error "Delete $nm1 failed"
838 do_facet mgs $LCTL nodemap_del $nm2 || error "Delete $nm2 failed"
841 run_test 10b "delete range from the correct nodemap"
843 test_10c() { #LU-8912
844 [ "$MGS_VERSION" -lt $(version_code 2.10.57) ] &&
845 skip "Need MGS >= 2.10.57"
847 local nm="nodemap_lu8912"
848 local nid_range="10.210.[32-47].[0-255]@o2ib3"
849 local start_nid="10.210.32.0@o2ib3"
850 local end_nid="10.210.47.255@o2ib3"
851 local start_nid_found
854 do_facet mgs $LCTL nodemap_del $nm 2>/dev/null
855 do_facet mgs $LCTL nodemap_add $nm || error "Add $nm failed"
856 do_facet mgs $LCTL nodemap_add_range --name $nm --range $nid_range ||
857 error "Add range $nid_range to $nm failed"
859 start_nid_found=$(do_facet mgs $LCTL get_param nodemap.$nm.* |
860 awk -F '[,: ]' /start_nid/'{ print $9 }')
861 [ "$start_nid" == "$start_nid_found" ] ||
862 error "start_nid: $start_nid_found != $start_nid"
863 end_nid_found=$(do_facet mgs $LCTL get_param nodemap.$nm.* |
864 awk -F '[,: ]' /end_nid/'{ print $13 }')
865 [ "$end_nid" == "$end_nid_found" ] ||
866 error "end_nid: $end_nid_found != $end_nid"
868 do_facet mgs $LCTL nodemap_del $nm || error "Delete $nm failed"
871 run_test 10c "verfify contiguous range support"
873 test_10d() { #LU-8913
874 [ "$MGS_VERSION" -lt $(version_code 2.10.59) ] &&
875 skip "Need MGS >= 2.10.59"
877 local nm="nodemap_lu8913"
878 local nid_range="*@o2ib3"
879 local start_nid="0.0.0.0@o2ib3"
880 local end_nid="255.255.255.255@o2ib3"
881 local start_nid_found
884 do_facet mgs $LCTL nodemap_del $nm 2>/dev/null
885 do_facet mgs $LCTL nodemap_add $nm || error "Add $nm failed"
886 do_facet mgs $LCTL nodemap_add_range --name $nm --range $nid_range ||
887 error "Add range $nid_range to $nm failed"
889 start_nid_found=$(do_facet mgs $LCTL get_param nodemap.$nm.* |
890 awk -F '[,: ]' /start_nid/'{ print $9 }')
891 [ "$start_nid" == "$start_nid_found" ] ||
892 error "start_nid: $start_nid_found != $start_nid"
893 end_nid_found=$(do_facet mgs $LCTL get_param nodemap.$nm.* |
894 awk -F '[,: ]' /end_nid/'{ print $13 }')
895 [ "$end_nid" == "$end_nid_found" ] ||
896 error "end_nid: $end_nid_found != $end_nid"
898 do_facet mgs $LCTL nodemap_del $nm || error "Delete $nm failed"
901 run_test 10d "verfify nodemap range format '*@<net>' support"
906 remote_mgs_nodsh && skip "remote MGS with nodsh"
907 [ "$MGS_VERSION" -lt $(version_code 2.5.53) ] &&
908 skip "No nodemap on $MGS_VERSION MGS < 2.5.53"
913 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
916 for ((i = 0; i < NODEMAP_COUNT; i++)); do
917 if ! modify_flags ${HOSTNAME_CHECKSUM}_${i}; then
921 [[ $rc != 0 ]] && error "nodemap_modify with $rc" && return 2
926 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 3
930 run_test 11 "nodemap modify"
935 remote_mgs_nodsh && skip "remote MGS with nodsh"
936 [ "$MGS_VERSION" -lt $(version_code 2.5.53) ] &&
937 skip "No nodemap on $MGS_VERSION MGS < 2.5.53"
942 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
945 for ((i = 0; i < NODEMAP_COUNT; i++)); do
946 if ! squash_id ${HOSTNAME_CHECKSUM}_${i} 88 0; then
950 [[ $rc != 0 ]] && error "nodemap squash_uid with $rc" && return 2
953 for ((i = 0; i < NODEMAP_COUNT; i++)); do
954 if ! squash_id ${HOSTNAME_CHECKSUM}_${i} 88 1; then
958 [[ $rc != 0 ]] && error "nodemap squash_gid with $rc" && return 3
963 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 4
967 run_test 12 "nodemap set squash ids"
972 remote_mgs_nodsh && skip "remote MGS with nodsh"
973 [ "$MGS_VERSION" -lt $(version_code 2.5.53) ] &&
974 skip "No nodemap on $MGS_VERSION MGS < 2.5.53"
979 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
982 for ((i = 0; i < NODEMAP_COUNT; i++)); do
983 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
987 [[ $rc != 0 ]] && error "nodemap_add_range failed with $rc" && return 2
990 for ((i = 0; i < NODEMAP_COUNT; i++)); do
991 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
992 for k in $NODEMAP_IPADDR_LIST; do
993 if ! test_nid $SUBNET_CHECKSUM.$i.$j.$k \
994 ${HOSTNAME_CHECKSUM}_${i}; then
1000 [[ $rc != 0 ]] && error "nodemap_test_nid failed with $rc" && return 3
1005 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 4
1009 run_test 13 "test nids"
1014 remote_mgs_nodsh && skip "remote MGS with nodsh"
1015 [ "$MGS_VERSION" -lt $(version_code 2.5.53) ] &&
1016 skip "No nodemap on $MGS_VERSION MGS < 2.5.53"
1021 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
1024 for ((i = 0; i < NODEMAP_COUNT; i++)); do
1025 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
1026 for k in $NODEMAP_IPADDR_LIST; do
1027 if ! test_nid $SUBNET_CHECKSUM.$i.$j.$k \
1034 [[ $rc != 0 ]] && error "nodemap_test_nid failed with $rc" && return 3
1039 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 4
1043 run_test 14 "test default nodemap nid lookup"
1048 remote_mgs_nodsh && skip "remote MGS with nodsh"
1049 [ "$MGS_VERSION" -lt $(version_code 2.5.53) ] &&
1050 skip "No nodemap on $MGS_VERSION MGS < 2.5.53"
1055 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
1058 for ((i = 0; i < NODEMAP_COUNT; i++)); do
1059 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
1063 [[ $rc != 0 ]] && error "nodemap_add_range failed with $rc" && return 2
1068 [[ $rc != 0 ]] && error "nodemap_add_idmap failed with $rc" && return 3
1070 activedefault=$(do_facet mgs $LCTL get_param -n nodemap.active)
1071 if [[ "$activedefault" != "1" ]]; then
1072 stack_trap cleanup_active EXIT
1078 [[ $rc != 0 ]] && error "nodemap_test_id failed with $rc" && return 4
1083 [[ $rc != 0 ]] && error "update_idmaps failed with $rc" && return 5
1088 [[ $rc != 0 ]] && error "nodemap_del_idmap failed with $rc" && return 6
1093 [[ $rc != 0 ]] && error "nodemap_delete failed with $rc" && return 7
1097 run_test 15 "test id mapping"
1099 create_fops_nodemaps() {
1102 for client in $clients; do
1103 local client_ip=$(host_nids_address $client $NETTYPE)
1104 local client_nid=$(h2nettype $client_ip)
1105 do_facet mgs $LCTL nodemap_add c${i} || return 1
1106 do_facet mgs $LCTL nodemap_add_range \
1107 --name c${i} --range $client_nid || return 1
1108 for map in ${FOPS_IDMAPS[i]}; do
1109 do_facet mgs $LCTL nodemap_add_idmap --name c${i} \
1110 --idtype uid --idmap ${map} || return 1
1111 do_facet mgs $LCTL nodemap_add_idmap --name c${i} \
1112 --idtype gid --idmap ${map} || return 1
1115 wait_nm_sync c$i idmap
1122 delete_fops_nodemaps() {
1125 for client in $clients; do
1126 do_facet mgs $LCTL nodemap_del c${i} || return 1
1134 if [ $MDSCOUNT -le 1 ]; then
1135 do_node ${clients_arr[0]} mkdir -p $DIR/$tdir
1137 # round-robin MDTs to test DNE nodemap support
1138 [ ! -d $DIR ] && do_node ${clients_arr[0]} mkdir -p $DIR
1139 do_node ${clients_arr[0]} $LFS setdirstripe -c 1 -i \
1140 $((fops_mds_index % MDSCOUNT)) $DIR/$tdir
1141 ((fops_mds_index++))
1145 # acl test directory needs to be initialized on a privileged client
1147 local admin=$(do_facet mgs $LCTL get_param -n nodemap.c0.admin_nodemap)
1148 local trust=$(do_facet mgs $LCTL get_param -n \
1149 nodemap.c0.trusted_nodemap)
1151 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
1152 do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1
1154 wait_nm_sync c0 admin_nodemap
1155 wait_nm_sync c0 trusted_nodemap
1157 do_node ${clients_arr[0]} rm -rf $DIR/$tdir
1159 do_node ${clients_arr[0]} chown $user $DIR/$tdir
1161 do_facet mgs $LCTL nodemap_modify --name c0 \
1162 --property admin --value $admin
1163 do_facet mgs $LCTL nodemap_modify --name c0 \
1164 --property trusted --value $trust
1166 # flush MDT locks to make sure they are reacquired before test
1167 do_node ${clients_arr[0]} $LCTL set_param \
1168 ldlm.namespaces.$FSNAME-MDT*.lru_size=clear
1170 wait_nm_sync c0 admin_nodemap
1171 wait_nm_sync c0 trusted_nodemap
1174 # fileset test directory needs to be initialized on a privileged client
1175 fileset_test_setup() {
1178 if [ -n "$FILESET" -a -z "$SKIP_FILESET" ]; then
1179 cleanup_mount $MOUNT
1180 FILESET="" zconf_mount_clients $CLIENTS $MOUNT
1183 local admin=$(do_facet mgs $LCTL get_param -n \
1184 nodemap.${nm}.admin_nodemap)
1185 local trust=$(do_facet mgs $LCTL get_param -n \
1186 nodemap.${nm}.trusted_nodemap)
1188 do_facet mgs $LCTL nodemap_modify --name $nm --property admin --value 1
1189 do_facet mgs $LCTL nodemap_modify --name $nm --property trusted \
1192 wait_nm_sync $nm admin_nodemap
1193 wait_nm_sync $nm trusted_nodemap
1195 # create directory and populate it for subdir mount
1196 do_node ${clients_arr[0]} mkdir $MOUNT/$subdir ||
1197 error "unable to create dir $MOUNT/$subdir"
1198 do_node ${clients_arr[0]} touch $MOUNT/$subdir/this_is_$subdir ||
1199 error "unable to create file $MOUNT/$subdir/this_is_$subdir"
1200 do_node ${clients_arr[0]} mkdir $MOUNT/$subdir/$subsubdir ||
1201 error "unable to create dir $MOUNT/$subdir/$subsubdir"
1202 do_node ${clients_arr[0]} touch \
1203 $MOUNT/$subdir/$subsubdir/this_is_$subsubdir ||
1204 error "unable to create file \
1205 $MOUNT/$subdir/$subsubdir/this_is_$subsubdir"
1207 do_facet mgs $LCTL nodemap_modify --name $nm \
1208 --property admin --value $admin
1209 do_facet mgs $LCTL nodemap_modify --name $nm \
1210 --property trusted --value $trust
1212 # flush MDT locks to make sure they are reacquired before test
1213 do_node ${clients_arr[0]} $LCTL set_param \
1214 ldlm.namespaces.$FSNAME-MDT*.lru_size=clear
1216 wait_nm_sync $nm admin_nodemap
1217 wait_nm_sync $nm trusted_nodemap
1220 # fileset test directory needs to be initialized on a privileged client
1221 fileset_test_cleanup() {
1223 local admin=$(do_facet mgs $LCTL get_param -n \
1224 nodemap.${nm}.admin_nodemap)
1225 local trust=$(do_facet mgs $LCTL get_param -n \
1226 nodemap.${nm}.trusted_nodemap)
1228 do_facet mgs $LCTL nodemap_modify --name $nm --property admin --value 1
1229 do_facet mgs $LCTL nodemap_modify --name $nm --property trusted \
1232 wait_nm_sync $nm admin_nodemap
1233 wait_nm_sync $nm trusted_nodemap
1235 # cleanup directory created for subdir mount
1236 do_node ${clients_arr[0]} rm -rf $MOUNT/$subdir ||
1237 error "unable to remove dir $MOUNT/$subdir"
1239 do_facet mgs $LCTL nodemap_modify --name $nm \
1240 --property admin --value $admin
1241 do_facet mgs $LCTL nodemap_modify --name $nm \
1242 --property trusted --value $trust
1244 # flush MDT locks to make sure they are reacquired before test
1245 do_node ${clients_arr[0]} $LCTL set_param \
1246 ldlm.namespaces.$FSNAME-MDT*.lru_size=clear
1248 wait_nm_sync $nm admin_nodemap
1249 wait_nm_sync $nm trusted_nodemap
1250 if [ -n "$FILESET" -a -z "$SKIP_FILESET" ]; then
1251 cleanup_mount $MOUNT
1252 zconf_mount_clients $CLIENTS $MOUNT
1256 do_create_delete() {
1259 local testfile=$DIR/$tdir/$tfile
1263 if $run_u touch $testfile >& /dev/null; then
1265 $run_u rm $testfile && d=1
1269 local expected=$(get_cr_del_expected $key)
1270 [ "$res" != "$expected" ] &&
1271 error "test $key, wanted $expected, got $res" && rc=$((rc + 1))
1275 nodemap_check_quota() {
1277 $run_u lfs quota -q $DIR | awk '{ print $2; exit; }'
1280 do_fops_quota_test() {
1282 # fuzz quota used to account for possible indirect blocks, etc
1283 local quota_fuzz=$(fs_log_size)
1284 local qused_orig=$(nodemap_check_quota "$run_u")
1285 local qused_high=$((qused_orig + quota_fuzz))
1286 local qused_low=$((qused_orig - quota_fuzz))
1287 local testfile=$DIR/$tdir/$tfile
1288 $run_u dd if=/dev/zero of=$testfile oflag=sync bs=1M count=1 \
1289 >& /dev/null || error "unable to write quota test file"
1290 sync; sync_all_data || true
1292 local qused_new=$(nodemap_check_quota "$run_u")
1293 [ $((qused_new)) -lt $((qused_low + 1024)) -o \
1294 $((qused_new)) -gt $((qused_high + 1024)) ] &&
1295 error "$qused_new != $qused_orig + 1M after write, " \
1296 "fuzz is $quota_fuzz"
1297 $run_u rm $testfile || error "unable to remove quota test file"
1298 wait_delete_completed_mds
1300 qused_new=$(nodemap_check_quota "$run_u")
1301 [ $((qused_new)) -lt $((qused_low)) \
1302 -o $((qused_new)) -gt $((qused_high)) ] &&
1303 error "quota not reclaimed, expect $qused_orig, " \
1304 "got $qused_new, fuzz $quota_fuzz"
1307 get_fops_mapped_user() {
1310 for ((i=0; i < ${#FOPS_IDMAPS[@]}; i++)); do
1311 for map in ${FOPS_IDMAPS[i]}; do
1312 if [ $(cut -d: -f1 <<< "$map") == $cli_user ]; then
1313 cut -d: -f2 <<< "$map"
1321 get_cr_del_expected() {
1323 IFS=":" read -a key <<< "$1"
1324 local mapmode="${key[0]}"
1325 local mds_user="${key[1]}"
1326 local cluster="${key[2]}"
1327 local cli_user="${key[3]}"
1328 local mode="0${key[4]}"
1335 [[ $mapmode == *mapped* ]] && mapped=1
1336 # only c1 is mapped in these test cases
1337 [[ $mapmode == mapped_trusted* ]] && [ "$cluster" == "c0" ] && mapped=0
1338 [[ $mapmode == *noadmin* ]] && noadmin=1
1340 # o+wx works as long as the user isn't mapped
1341 if [ $((mode & 3)) -eq 3 ]; then
1345 # if client user is root, check if root is squashed
1346 if [ "$cli_user" == "0" ]; then
1347 # squash root succeed, if other bit is on
1350 1) [ "$other" == "1" ] && echo $SUCCESS
1351 [ "$other" == "0" ] && echo $FAILURE;;
1355 if [ "$mapped" == "0" ]; then
1356 [ "$other" == "1" ] && echo $SUCCESS
1357 [ "$other" == "0" ] && echo $FAILURE
1361 # if mapped user is mds user, check for u+wx
1362 mapped_user=$(get_fops_mapped_user $cli_user)
1363 [ "$mapped_user" == "-1" ] &&
1364 error "unable to find mapping for client user $cli_user"
1366 if [ "$mapped_user" == "$mds_user" -a \
1367 $(((mode & 0300) == 0300)) -eq 1 ]; then
1371 if [ "$mapped_user" != "$mds_user" -a "$other" == "1" ]; then
1378 test_fops_admin_cli_i=""
1379 test_fops_chmod_dir() {
1380 local current_cli_i=$1
1382 local dir_to_chmod=$3
1383 local new_admin_cli_i=""
1385 # do we need to set up a new admin client?
1386 [ "$current_cli_i" == "0" ] && [ "$test_fops_admin_cli_i" != "1" ] &&
1388 [ "$current_cli_i" != "0" ] && [ "$test_fops_admin_cli_i" != "0" ] &&
1391 # if only one client, and non-admin, need to flip admin everytime
1392 if [ "$num_clients" == "1" ]; then
1393 test_fops_admin_client=$clients
1394 test_fops_admin_val=$(do_facet mgs $LCTL get_param -n \
1395 nodemap.c0.admin_nodemap)
1396 if [ "$test_fops_admin_val" != "1" ]; then
1397 do_facet mgs $LCTL nodemap_modify \
1401 wait_nm_sync c0 admin_nodemap
1403 elif [ "$new_admin_cli_i" != "" ]; then
1404 # restore admin val to old admin client
1405 if [ "$test_fops_admin_cli_i" != "" ] &&
1406 [ "$test_fops_admin_val" != "1" ]; then
1407 do_facet mgs $LCTL nodemap_modify \
1408 --name c${test_fops_admin_cli_i} \
1410 --value $test_fops_admin_val
1411 wait_nm_sync c${test_fops_admin_cli_i} admin_nodemap
1414 test_fops_admin_cli_i=$new_admin_cli_i
1415 test_fops_admin_client=${clients_arr[$new_admin_cli_i]}
1416 test_fops_admin_val=$(do_facet mgs $LCTL get_param -n \
1417 nodemap.c${new_admin_cli_i}.admin_nodemap)
1419 if [ "$test_fops_admin_val" != "1" ]; then
1420 do_facet mgs $LCTL nodemap_modify \
1421 --name c${new_admin_cli_i} \
1424 wait_nm_sync c${new_admin_cli_i} admin_nodemap
1428 do_node $test_fops_admin_client chmod $perm_bits $DIR/$tdir || return 1
1430 # remove admin for single client if originally non-admin
1431 if [ "$num_clients" == "1" ] && [ "$test_fops_admin_val" != "1" ]; then
1432 do_facet mgs $LCTL nodemap_modify --name c0 --property admin \
1434 wait_nm_sync c0 admin_nodemap
1442 local single_client="$2"
1443 local client_user_list=([0]="0 $((IDBASE+3)) $((IDBASE+4))"
1444 [1]="0 $((IDBASE+5)) $((IDBASE+6))")
1447 local perm_bit_list="0 3 $((0300)) $((0303))"
1448 # SLOW tests 000-007, 010-070, 100-700 (octal modes)
1449 [ "$SLOW" == "yes" ] &&
1450 perm_bit_list="0 $(seq 1 7) $(seq 8 8 63) $(seq 64 64 511) \
1453 # step through mds users. -1 means root
1454 for mds_i in -1 0 1 2; do
1455 local user=$((mds_i + IDBASE))
1459 [ "$mds_i" == "-1" ] && user=0
1461 echo mkdir -p $DIR/$tdir
1464 for client in $clients; do
1466 for u in ${client_user_list[$cli_i]}; do
1467 local run_u="do_node $client \
1468 $RUNAS_CMD -u$u -g$u -G$u"
1469 for perm_bits in $perm_bit_list; do
1470 local mode=$(printf %03o $perm_bits)
1472 key="$mapmode:$user:c$cli_i:$u:$mode"
1473 test_fops_chmod_dir $cli_i $mode \
1475 error cannot chmod $key
1476 do_create_delete "$run_u" "$key"
1480 test_fops_chmod_dir $cli_i 777 $DIR/$tdir ||
1481 error cannot chmod $key
1482 do_fops_quota_test "$run_u"
1485 cli_i=$((cli_i + 1))
1486 [ "$single_client" == "1" ] && break
1493 nodemap_version_check () {
1494 remote_mgs_nodsh && skip "remote MGS with nodsh" && return 1
1495 [ "$MGS_VERSION" -lt $(version_code 2.5.53) ] &&
1496 skip "No nodemap on $MGS_VERSION MGS < 2.5.53" &&
1501 nodemap_test_setup() {
1503 local active_nodemap=1
1505 [ "$1" == "0" ] && active_nodemap=0
1507 do_nodes $(comma_list $(all_mdts_nodes)) \
1508 $LCTL set_param mdt.*.identity_upcall=NONE
1511 create_fops_nodemaps
1513 [[ $rc != 0 ]] && error "adding fops nodemaps failed $rc"
1515 do_facet mgs $LCTL nodemap_activate $active_nodemap
1518 do_facet mgs $LCTL nodemap_modify --name default \
1519 --property admin --value 1
1520 wait_nm_sync default admin_nodemap
1521 do_facet mgs $LCTL nodemap_modify --name default \
1522 --property trusted --value 1
1523 wait_nm_sync default trusted_nodemap
1526 nodemap_test_cleanup() {
1528 delete_fops_nodemaps
1530 [[ $rc != 0 ]] && error "removing fops nodemaps failed $rc"
1532 do_facet mgs $LCTL nodemap_modify --name default \
1533 --property admin --value 0
1534 wait_nm_sync default admin_nodemap
1535 do_facet mgs $LCTL nodemap_modify --name default \
1536 --property trusted --value 0
1537 wait_nm_sync default trusted_nodemap
1539 do_facet mgs $LCTL nodemap_activate 0
1540 wait_nm_sync active 0
1542 export SK_UNIQUE_NM=false
1546 nodemap_clients_admin_trusted() {
1550 for client in $clients; do
1551 do_facet mgs $LCTL nodemap_modify --name c0 \
1552 --property admin --value $admin
1553 do_facet mgs $LCTL nodemap_modify --name c0 \
1554 --property trusted --value $tr
1557 wait_nm_sync c$((i - 1)) admin_nodemap
1558 wait_nm_sync c$((i - 1)) trusted_nodemap
1562 nodemap_version_check || return 0
1563 nodemap_test_setup 0
1565 trap nodemap_test_cleanup EXIT
1567 nodemap_test_cleanup
1569 run_test 16 "test nodemap all_off fileops"
1573 [ "$MDS1_VERSION" -lt $(version_code 2.11.55) ]; then
1574 skip "Need MDS >= 2.11.55"
1577 nodemap_version_check || return 0
1580 trap nodemap_test_cleanup EXIT
1581 nodemap_clients_admin_trusted 0 1
1582 test_fops trusted_noadmin 1
1583 nodemap_test_cleanup
1585 run_test 17 "test nodemap trusted_noadmin fileops"
1589 [ "$MDS1_VERSION" -lt $(version_code 2.11.55) ]; then
1590 skip "Need MDS >= 2.11.55"
1593 nodemap_version_check || return 0
1596 trap nodemap_test_cleanup EXIT
1597 nodemap_clients_admin_trusted 0 0
1598 test_fops mapped_noadmin 1
1599 nodemap_test_cleanup
1601 run_test 18 "test nodemap mapped_noadmin fileops"
1605 [ "$MDS1_VERSION" -lt $(version_code 2.11.55) ]; then
1606 skip "Need MDS >= 2.11.55"
1609 nodemap_version_check || return 0
1612 trap nodemap_test_cleanup EXIT
1613 nodemap_clients_admin_trusted 1 1
1614 test_fops trusted_admin 1
1615 nodemap_test_cleanup
1617 run_test 19 "test nodemap trusted_admin fileops"
1621 [ "$MDS1_VERSION" -lt $(version_code 2.11.55) ]; then
1622 skip "Need MDS >= 2.11.55"
1625 nodemap_version_check || return 0
1628 trap nodemap_test_cleanup EXIT
1629 nodemap_clients_admin_trusted 1 0
1630 test_fops mapped_admin 1
1631 nodemap_test_cleanup
1633 run_test 20 "test nodemap mapped_admin fileops"
1637 [ "$MDS1_VERSION" -lt $(version_code 2.11.55) ]; then
1638 skip "Need MDS >= 2.11.55"
1641 nodemap_version_check || return 0
1644 trap nodemap_test_cleanup EXIT
1647 for client in $clients; do
1648 do_facet mgs $LCTL nodemap_modify --name c${i} \
1649 --property admin --value 0
1650 do_facet mgs $LCTL nodemap_modify --name c${i} \
1651 --property trusted --value $x
1655 wait_nm_sync c$((i - 1)) trusted_nodemap
1657 test_fops mapped_trusted_noadmin
1658 nodemap_test_cleanup
1660 run_test 21 "test nodemap mapped_trusted_noadmin fileops"
1664 [ "$MDS1_VERSION" -lt $(version_code 2.11.55) ]; then
1665 skip "Need MDS >= 2.11.55"
1668 nodemap_version_check || return 0
1671 trap nodemap_test_cleanup EXIT
1674 for client in $clients; do
1675 do_facet mgs $LCTL nodemap_modify --name c${i} \
1676 --property admin --value 1
1677 do_facet mgs $LCTL nodemap_modify --name c${i} \
1678 --property trusted --value $x
1682 wait_nm_sync c$((i - 1)) trusted_nodemap
1684 test_fops mapped_trusted_admin
1685 nodemap_test_cleanup
1687 run_test 22 "test nodemap mapped_trusted_admin fileops"
1689 # acl test directory needs to be initialized on a privileged client
1690 nodemap_acl_test_setup() {
1691 local admin=$(do_facet mgs $LCTL get_param -n \
1692 nodemap.c0.admin_nodemap)
1693 local trust=$(do_facet mgs $LCTL get_param -n \
1694 nodemap.c0.trusted_nodemap)
1696 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
1697 do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1
1699 wait_nm_sync c0 admin_nodemap
1700 wait_nm_sync c0 trusted_nodemap
1702 do_node ${clients_arr[0]} rm -rf $DIR/$tdir
1704 do_node ${clients_arr[0]} chmod a+rwx $DIR/$tdir ||
1705 error unable to chmod a+rwx test dir $DIR/$tdir
1707 do_facet mgs $LCTL nodemap_modify --name c0 \
1708 --property admin --value $admin
1709 do_facet mgs $LCTL nodemap_modify --name c0 \
1710 --property trusted --value $trust
1712 wait_nm_sync c0 trusted_nodemap
1715 # returns 0 if the number of ACLs does not change on the second (mapped) client
1716 # after being set on the first client
1717 nodemap_acl_test() {
1719 local set_client="$2"
1720 local get_client="$3"
1721 local check_setfacl="$4"
1722 local setfacl_error=0
1723 local testfile=$DIR/$tdir/$tfile
1724 local RUNAS_USER="$RUNAS_CMD -u $user"
1726 local acl_count_post=0
1728 nodemap_acl_test_setup
1731 do_node $set_client $RUNAS_USER touch $testfile
1733 # ACL masks aren't filtered by nodemap code, so we ignore them
1734 acl_count=$(do_node $get_client getfacl $testfile | grep -v mask |
1736 do_node $set_client $RUNAS_USER setfacl -m $user:rwx $testfile ||
1739 # if check setfacl is set to 1, then it's supposed to error
1740 if [ "$check_setfacl" == "1" ]; then
1741 [ "$setfacl_error" != "1" ] && return 1
1744 [ "$setfacl_error" == "1" ] && echo "WARNING: unable to setfacl"
1746 acl_count_post=$(do_node $get_client getfacl $testfile | grep -v mask |
1748 [ $acl_count -eq $acl_count_post ] && return 0
1753 [ $num_clients -lt 2 ] && skip "Need 2 clients at least" && return
1754 nodemap_version_check || return 0
1757 trap nodemap_test_cleanup EXIT
1758 # 1 trusted cluster, 1 mapped cluster
1759 local unmapped_fs=$((IDBASE+0))
1760 local unmapped_c1=$((IDBASE+5))
1761 local mapped_fs=$((IDBASE+2))
1762 local mapped_c0=$((IDBASE+4))
1763 local mapped_c1=$((IDBASE+6))
1765 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
1766 do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1
1768 do_facet mgs $LCTL nodemap_modify --name c1 --property admin --value 0
1769 do_facet mgs $LCTL nodemap_modify --name c1 --property trusted --value 0
1771 wait_nm_sync c1 trusted_nodemap
1773 # setfacl on trusted cluster to unmapped user, verify it's not seen
1774 nodemap_acl_test $unmapped_fs ${clients_arr[0]} ${clients_arr[1]} ||
1775 error "acl count (1)"
1777 # setfacl on trusted cluster to mapped user, verify it's seen
1778 nodemap_acl_test $mapped_fs ${clients_arr[0]} ${clients_arr[1]} &&
1779 error "acl count (2)"
1781 # setfacl on mapped cluster to mapped user, verify it's seen
1782 nodemap_acl_test $mapped_c1 ${clients_arr[1]} ${clients_arr[0]} &&
1783 error "acl count (3)"
1785 # setfacl on mapped cluster to unmapped user, verify error
1786 nodemap_acl_test $unmapped_fs ${clients_arr[1]} ${clients_arr[0]} 1 ||
1787 error "acl count (4)"
1790 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 0
1791 do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 0
1793 wait_nm_sync c0 trusted_nodemap
1795 # setfacl to mapped user on c1, also mapped to c0, verify it's seen
1796 nodemap_acl_test $mapped_c1 ${clients_arr[1]} ${clients_arr[0]} &&
1797 error "acl count (5)"
1799 # setfacl to mapped user on c1, not mapped to c0, verify not seen
1800 nodemap_acl_test $unmapped_c1 ${clients_arr[1]} ${clients_arr[0]} ||
1801 error "acl count (6)"
1803 nodemap_test_cleanup
1805 run_test 23a "test mapped regular ACLs"
1807 test_23b() { #LU-9929
1808 [ $num_clients -lt 2 ] && skip "Need 2 clients at least"
1809 [ "$MGS_VERSION" -lt $(version_code 2.10.53) ] &&
1810 skip "Need MGS >= 2.10.53"
1812 export SK_UNIQUE_NM=true
1814 trap nodemap_test_cleanup EXIT
1816 local testdir=$DIR/$tdir
1817 local fs_id=$((IDBASE+10))
1822 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
1823 wait_nm_sync c0 admin_nodemap
1824 do_facet mgs $LCTL nodemap_modify --name c1 --property admin --value 1
1825 wait_nm_sync c1 admin_nodemap
1826 do_facet mgs $LCTL nodemap_modify --name c1 --property trusted --value 1
1827 wait_nm_sync c1 trusted_nodemap
1829 # Add idmap $ID0:$fs_id (500:60010)
1830 do_facet mgs $LCTL nodemap_add_idmap --name c0 --idtype gid \
1831 --idmap $ID0:$fs_id ||
1832 error "add idmap $ID0:$fs_id to nodemap c0 failed"
1833 wait_nm_sync c0 idmap
1835 # set/getfacl default acl on client 1 (unmapped gid=500)
1836 do_node ${clients_arr[0]} rm -rf $testdir
1837 do_node ${clients_arr[0]} mkdir -p $testdir
1838 # Here, USER0=$(getent passwd | grep :$ID0:$ID0: | cut -d: -f1)
1839 do_node ${clients_arr[0]} setfacl -R -d -m group:$USER0:rwx $testdir ||
1840 error "setfacl $testdir on ${clients_arr[0]} failed"
1841 unmapped_id=$(do_node ${clients_arr[0]} getfacl $testdir |
1842 grep -E "default:group:.*:rwx" | awk -F: '{print $3}')
1843 [ "$unmapped_id" = "$USER0" ] ||
1844 error "gid=$ID0 was not unmapped correctly on ${clients_arr[0]}"
1846 # getfacl default acl on client 2 (mapped gid=60010)
1847 mapped_id=$(do_node ${clients_arr[1]} getfacl $testdir |
1848 grep -E "default:group:.*:rwx" | awk -F: '{print $3}')
1849 fs_user=$(do_node ${clients_arr[1]} getent passwd |
1850 grep :$fs_id:$fs_id: | cut -d: -f1)
1851 [ -z "$fs_user" ] && fs_user=$fs_id
1852 [ $mapped_id -eq $fs_id -o "$mapped_id" = "$fs_user" ] ||
1853 error "Should return gid=$fs_id or $fs_user on client2"
1856 nodemap_test_cleanup
1857 export SK_UNIQUE_NM=false
1859 run_test 23b "test mapped default ACLs"
1864 trap nodemap_test_cleanup EXIT
1865 do_nodes $(comma_list $(all_server_nodes)) $LCTL get_param -R nodemap
1867 nodemap_test_cleanup
1869 run_test 24 "check nodemap proc files for LBUGs and Oopses"
1872 local tmpfile=$(mktemp)
1873 local tmpfile2=$(mktemp)
1874 local tmpfile3=$(mktemp)
1875 local tmpfile4=$(mktemp)
1879 nodemap_version_check || return 0
1881 # stop clients for this test
1882 zconf_umount_clients $CLIENTS $MOUNT ||
1883 error "unable to umount clients $CLIENTS"
1885 export SK_UNIQUE_NM=true
1888 # enable trusted/admin for setquota call in cleanup_and_setup_lustre()
1890 for client in $clients; do
1891 do_facet mgs $LCTL nodemap_modify --name c${i} \
1892 --property admin --value 1
1893 do_facet mgs $LCTL nodemap_modify --name c${i} \
1894 --property trusted --value 1
1897 wait_nm_sync c$((i - 1)) trusted_nodemap
1899 trap nodemap_test_cleanup EXIT
1901 # create a new, empty nodemap, and add fileset info to it
1902 do_facet mgs $LCTL nodemap_add test25 ||
1903 error "unable to create nodemap $testname"
1904 do_facet mgs $LCTL set_param -P nodemap.$testname.fileset=/$subdir ||
1905 error "unable to add fileset info to nodemap test25"
1907 wait_nm_sync test25 id
1909 do_facet mgs $LCTL nodemap_info > $tmpfile
1910 do_facet mds $LCTL nodemap_info > $tmpfile2
1912 if ! $SHARED_KEY; then
1913 # will conflict with SK's nodemaps
1914 cleanup_and_setup_lustre
1916 # stop clients for this test
1917 zconf_umount_clients $CLIENTS $MOUNT ||
1918 error "unable to umount clients $CLIENTS"
1920 do_facet mgs $LCTL nodemap_info > $tmpfile3
1921 diff -q $tmpfile3 $tmpfile >& /dev/null ||
1922 error "nodemap_info diff on MGS after remount"
1924 do_facet mds $LCTL nodemap_info > $tmpfile4
1925 diff -q $tmpfile4 $tmpfile2 >& /dev/null ||
1926 error "nodemap_info diff on MDS after remount"
1929 do_facet mgs $LCTL nodemap_del test25 ||
1930 error "cannot delete nodemap test25 from config"
1931 nodemap_test_cleanup
1932 # restart clients previously stopped
1933 zconf_mount_clients $CLIENTS $MOUNT ||
1934 error "unable to mount clients $CLIENTS"
1936 rm -f $tmpfile $tmpfile2
1937 export SK_UNIQUE_NM=false
1939 run_test 25 "test save and reload nodemap config"
1942 nodemap_version_check || return 0
1946 do_facet mgs "seq -f 'c%g' $large_i | xargs -n1 $LCTL nodemap_add"
1947 wait_nm_sync c$large_i admin_nodemap
1949 do_facet mgs "seq -f 'c%g' $large_i | xargs -n1 $LCTL nodemap_del"
1950 wait_nm_sync c$large_i admin_nodemap
1952 run_test 26 "test transferring very large nodemap"
1954 nodemap_exercise_fileset() {
1959 if [ "$nm" == "default" ]; then
1960 do_facet mgs $LCTL nodemap_activate 1
1965 if $SHARED_KEY; then
1966 export SK_UNIQUE_NM=true
1968 # will conflict with SK's nodemaps
1969 trap "fileset_test_cleanup $nm" EXIT
1971 fileset_test_setup "$nm"
1973 # add fileset info to $nm nodemap
1974 if ! combined_mgs_mds; then
1975 do_facet mgs $LCTL set_param nodemap.${nm}.fileset=/$subdir ||
1976 error "unable to add fileset info to $nm nodemap on MGS"
1978 do_facet mgs $LCTL set_param -P nodemap.${nm}.fileset=/$subdir ||
1979 error "unable to add fileset info to $nm nodemap for servers"
1980 wait_nm_sync $nm fileset "nodemap.${nm}.fileset=/$subdir"
1983 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
1984 error "unable to umount client ${clients_arr[0]}"
1985 # set some generic fileset to trigger SSK code
1987 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
1988 error "unable to remount client ${clients_arr[0]}"
1991 # test mount point content
1992 do_node ${clients_arr[0]} test -f $MOUNT/this_is_$subdir ||
1993 error "fileset not taken into account"
1995 # re-mount client with sub-subdir
1996 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
1997 error "unable to umount client ${clients_arr[0]}"
1998 export FILESET=/$subsubdir
1999 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
2000 error "unable to remount client ${clients_arr[0]}"
2003 # test mount point content
2004 do_node ${clients_arr[0]} test -f $MOUNT/this_is_$subsubdir ||
2005 error "subdir of fileset not taken into account"
2007 # remove fileset info from nodemap
2008 do_facet mgs $LCTL nodemap_set_fileset --name $nm --fileset clear ||
2009 error "unable to delete fileset info on $nm nodemap"
2010 wait_update_facet mgs "$LCTL get_param nodemap.${nm}.fileset" \
2011 "nodemap.${nm}.fileset=" ||
2012 error "fileset info still not cleared on $nm nodemap"
2013 do_facet mgs $LCTL set_param -P nodemap.${nm}.fileset=clear ||
2014 error "unable to reset fileset info on $nm nodemap"
2015 wait_nm_sync $nm fileset "nodemap.${nm}.fileset="
2018 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
2019 error "unable to umount client ${clients_arr[0]}"
2020 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
2021 error "unable to remount client ${clients_arr[0]}"
2023 # test mount point content
2024 if ! $(do_node ${clients_arr[0]} test -d $MOUNT/$subdir); then
2026 error "fileset not cleared on $nm nodemap"
2029 # back to non-nodemap setup
2030 if $SHARED_KEY; then
2031 export SK_UNIQUE_NM=false
2032 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
2033 error "unable to umount client ${clients_arr[0]}"
2035 fileset_test_cleanup "$nm"
2036 if [ "$nm" == "default" ]; then
2037 do_facet mgs $LCTL nodemap_activate 0
2038 wait_nm_sync active 0
2040 export SK_UNIQUE_NM=false
2042 nodemap_test_cleanup
2044 if $SHARED_KEY; then
2045 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
2046 error "unable to remount client ${clients_arr[0]}"
2051 [ "$MDS1_VERSION" -lt $(version_code 2.11.50) ] &&
2052 skip "Need MDS >= 2.11.50"
2054 for nm in "default" "c0"; do
2055 local subdir="subdir_${nm}"
2056 local subsubdir="subsubdir_${nm}"
2058 if [ "$nm" == "default" ] && [ "$SHARED_KEY" == "true" ]; then
2059 echo "Skipping nodemap $nm with SHARED_KEY";
2063 echo "Exercising fileset for nodemap $nm"
2064 nodemap_exercise_fileset "$nm"
2067 run_test 27a "test fileset in various nodemaps"
2069 test_27b() { #LU-10703
2070 [ "$MDS1_VERSION" -lt $(version_code 2.11.50) ] &&
2071 skip "Need MDS >= 2.11.50"
2072 [[ $MDSCOUNT -lt 2 ]] && skip "needs >= 2 MDTs"
2075 trap nodemap_test_cleanup EXIT
2077 # Add the nodemaps and set their filesets
2078 for i in $(seq 1 $MDSCOUNT); do
2079 do_facet mgs $LCTL nodemap_del nm$i 2>/dev/null
2080 do_facet mgs $LCTL nodemap_add nm$i ||
2081 error "add nodemap nm$i failed"
2082 wait_nm_sync nm$i "" "" "-N"
2084 if ! combined_mgs_mds; then
2086 $LCTL set_param nodemap.nm$i.fileset=/dir$i ||
2087 error "set nm$i.fileset=/dir$i failed on MGS"
2089 do_facet mgs $LCTL set_param -P nodemap.nm$i.fileset=/dir$i ||
2090 error "set nm$i.fileset=/dir$i failed on servers"
2091 wait_nm_sync nm$i fileset "nodemap.nm$i.fileset=/dir$i"
2094 # Check if all the filesets are correct
2095 for i in $(seq 1 $MDSCOUNT); do
2096 fileset=$(do_facet mds$i \
2097 $LCTL get_param -n nodemap.nm$i.fileset)
2098 [ "$fileset" = "/dir$i" ] ||
2099 error "nm$i.fileset $fileset != /dir$i on mds$i"
2100 do_facet mgs $LCTL nodemap_del nm$i ||
2101 error "delete nodemap nm$i failed"
2104 nodemap_test_cleanup
2106 run_test 27b "The new nodemap won't clear the old nodemap's fileset"
2109 if ! $SHARED_KEY; then
2110 skip "need shared key feature for this test" && return
2112 mkdir -p $DIR/$tdir || error "mkdir failed"
2113 touch $DIR/$tdir/$tdir.out || error "touch failed"
2114 if [ ! -f $DIR/$tdir/$tdir.out ]; then
2115 error "read before rotation failed"
2117 # store top key identity to ensure rotation has occurred
2118 SK_IDENTITY_OLD=$(lctl get_param *.*.*srpc* | grep "expire" |
2119 head -1 | awk '{print $15}' | cut -c1-8)
2120 do_facet $SINGLEMDS lfs flushctx ||
2121 error "could not run flushctx on $SINGLEMDS"
2123 lfs flushctx || error "could not run flushctx on client"
2125 # verify new key is in place
2126 SK_IDENTITY_NEW=$(lctl get_param *.*.*srpc* | grep "expire" |
2127 head -1 | awk '{print $15}' | cut -c1-8)
2128 if [ $SK_IDENTITY_OLD == $SK_IDENTITY_NEW ]; then
2129 error "key did not rotate correctly"
2131 if [ ! -f $DIR/$tdir/$tdir.out ]; then
2132 error "read after rotation failed"
2135 run_test 28 "check shared key rotation method"
2138 if ! $SHARED_KEY; then
2139 skip "need shared key feature for this test" && return
2141 if [ $SK_FLAVOR != "ski" ] && [ $SK_FLAVOR != "skpi" ]; then
2142 skip "test only valid if integrity is active"
2145 mkdir $DIR/$tdir || error "mkdir"
2146 touch $DIR/$tdir/$tfile || error "touch"
2147 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
2148 error "unable to umount clients"
2149 keyctl show | awk '/lustre/ { print $1 }' |
2150 xargs -IX keyctl unlink X
2151 OLD_SK_PATH=$SK_PATH
2152 export SK_PATH=/dev/null
2153 if zconf_mount_clients ${clients_arr[0]} $MOUNT; then
2154 export SK_PATH=$OLD_SK_PATH
2155 if [ -e $DIR/$tdir/$tfile ]; then
2156 error "able to mount and read without key"
2158 error "able to mount without key"
2161 export SK_PATH=$OLD_SK_PATH
2162 keyctl show | awk '/lustre/ { print $1 }' |
2163 xargs -IX keyctl unlink X
2166 run_test 29 "check for missing shared key"
2169 if ! $SHARED_KEY; then
2170 skip "need shared key feature for this test" && return
2172 if [ $SK_FLAVOR != "ski" ] && [ $SK_FLAVOR != "skpi" ]; then
2173 skip "test only valid if integrity is active"
2175 mkdir -p $DIR/$tdir || error "mkdir failed"
2176 touch $DIR/$tdir/$tdir.out || error "touch failed"
2177 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
2178 error "unable to umount clients"
2179 # unload keys from ring
2180 keyctl show | awk '/lustre/ { print $1 }' |
2181 xargs -IX keyctl unlink X
2182 # invalidate the key with bogus filesystem name
2183 lgss_sk -w $SK_PATH/$FSNAME-bogus.key -f $FSNAME.bogus \
2184 -t client -d /dev/urandom || error "lgss_sk failed (1)"
2185 do_facet $SINGLEMDS lfs flushctx || error "could not run flushctx"
2186 OLD_SK_PATH=$SK_PATH
2187 export SK_PATH=$SK_PATH/$FSNAME-bogus.key
2188 if zconf_mount_clients ${clients_arr[0]} $MOUNT; then
2189 SK_PATH=$OLD_SK_PATH
2190 if [ -a $DIR/$tdir/$tdir.out ]; then
2191 error "mount and read file with invalid key"
2193 error "mount with invalid key"
2196 SK_PATH=$OLD_SK_PATH
2197 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
2198 error "unable to umount clients"
2200 run_test 30 "check for invalid shared key"
2204 zconf_umount $HOSTNAME $MOUNT || error "unable to umount client"
2206 # remove ${NETTYPE}999 network on all nodes
2207 do_nodes $(comma_list $(all_nodes)) \
2208 "$LNETCTL net del --net ${NETTYPE}999 && \
2209 $LNETCTL lnet unconfigure 2>/dev/null || true"
2211 # necessary to do writeconf in order to de-register
2212 # @${NETTYPE}999 nid for targets
2214 export KEEP_ZPOOL="true"
2216 export SK_MOUNTED=false
2219 export KEEP_ZPOOL="$KZPOOL"
2223 local nid=$(lctl list_nids | grep ${NETTYPE} | head -n1)
2224 local addr=${nid%@*}
2227 export LNETCTL=$(which lnetctl 2> /dev/null)
2229 [ -z "$LNETCTL" ] && skip "without lnetctl support." && return
2230 local_mode && skip "in local mode."
2232 stack_trap cleanup_31 EXIT
2235 if [ "$MOUNT_2" ] && $(grep -q $MOUNT2' ' /proc/mounts); then
2236 umount_client $MOUNT2 || error "umount $MOUNT2 failed"
2238 if $(grep -q $MOUNT' ' /proc/mounts); then
2239 umount_client $MOUNT || error "umount $MOUNT failed"
2242 # check exports on servers are empty for client
2243 do_facet mgs "lctl get_param -n *.MGS*.exports.'$nid'.uuid 2>/dev/null |
2244 grep -q -" && error "export on MGS should be empty"
2245 do_nodes $(comma_list $(mdts_nodes) $(osts_nodes)) \
2246 "lctl get_param -n *.${FSNAME}*.exports.'$nid'.uuid \
2247 2>/dev/null | grep -q -" &&
2248 error "export on servers should be empty"
2250 # add network ${NETTYPE}999 on all nodes
2251 do_nodes $(comma_list $(all_nodes)) \
2252 "$LNETCTL lnet configure && $LNETCTL net add --if \
2253 \$($LNETCTL net show --net $net | awk 'BEGIN{inf=0} \
2254 {if (inf==1) print \$2; fi; inf=0} /interfaces/{inf=1}') \
2255 --net ${NETTYPE}999" ||
2256 error "unable to configure NID ${NETTYPE}999"
2258 # necessary to do writeconf in order to register
2259 # new @${NETTYPE}999 nid for targets
2261 export KEEP_ZPOOL="true"
2263 export SK_MOUNTED=false
2265 setupall server_only || echo 1
2266 export KEEP_ZPOOL="$KZPOOL"
2269 local mgsnid_orig=$MGSNID
2270 # compute new MGSNID
2271 MGSNID=$(do_facet mgs "$LCTL list_nids | grep ${NETTYPE}999")
2273 # on client, turn LNet Dynamic Discovery on
2274 lnetctl set discovery 1
2276 # mount client with -o network=${NETTYPE}999 option:
2277 # should fail because of LNet Dynamic Discovery
2278 mount_client $MOUNT ${MOUNT_OPTS},network=${NETTYPE}999 &&
2279 error "client mount with '-o network' option should be refused"
2281 # on client, reconfigure LNet and turn LNet Dynamic Discovery off
2282 $LNETCTL net del --net ${NETTYPE}999 && lnetctl lnet unconfigure
2285 lnetctl set discovery 0
2287 $LNETCTL lnet configure && $LNETCTL net add --if \
2288 $($LNETCTL net show --net $net | awk 'BEGIN{inf=0} \
2289 {if (inf==1) print $2; fi; inf=0} /interfaces/{inf=1}') \
2290 --net ${NETTYPE}999 ||
2291 error "unable to configure NID ${NETTYPE}999 on client"
2293 # mount client with -o network=${NETTYPE}999 option
2294 mount_client $MOUNT ${MOUNT_OPTS},network=${NETTYPE}999 ||
2295 error "unable to remount client"
2300 # check export on MGS
2301 do_facet mgs "lctl get_param -n *.MGS*.exports.'$nid'.uuid 2>/dev/null |
2303 [ $? -ne 0 ] || error "export for $nid on MGS should not exist"
2306 "lctl get_param -n *.MGS*.exports.'${addr}@${NETTYPE}999'.uuid \
2307 2>/dev/null | grep -q -"
2309 error "export for ${addr}@${NETTYPE}999 on MGS should exist"
2311 # check {mdc,osc} imports
2312 lctl get_param mdc.${FSNAME}-*.import | grep current_connection |
2313 grep -q ${NETTYPE}999
2315 error "import for mdc should use ${addr}@${NETTYPE}999"
2316 lctl get_param osc.${FSNAME}-*.import | grep current_connection |
2317 grep -q ${NETTYPE}999
2319 error "import for osc should use ${addr}@${NETTYPE}999"
2321 run_test 31 "client mount option '-o network'"
2325 zconf_umount_clients ${clients_arr[0]} $MOUNT
2327 # disable sk flavor enforcement on MGS
2328 set_rule _mgs any any null
2330 # stop gss daemon on MGS
2331 if ! combined_mgs_mds ; then
2332 send_sigint $mgs_HOST lsvcgssd
2336 MOUNT_OPTS=$(add_sk_mntflag $MOUNT_OPTS)
2339 restore_to_default_flavor
2343 if ! $SHARED_KEY; then
2344 skip "need shared key feature for this test"
2347 stack_trap cleanup_32 EXIT
2349 # restore to default null flavor
2350 save_flvr=$SK_FLAVOR
2352 restore_to_default_flavor || error "cannot set null flavor"
2353 SK_FLAVOR=$save_flvr
2356 if [ "$MOUNT_2" ] && $(grep -q $MOUNT2' ' /proc/mounts); then
2357 umount_client $MOUNT2 || error "umount $MOUNT2 failed"
2359 if $(grep -q $MOUNT' ' /proc/mounts); then
2360 umount_client $MOUNT || error "umount $MOUNT failed"
2363 # start gss daemon on MGS
2364 if combined_mgs_mds ; then
2365 send_sigint $mds_HOST lsvcgssd
2367 start_gss_daemons $mgs_HOST "$LSVCGSSD -vvv -s -g"
2369 # add mgs key type and MGS NIDs in key on MGS
2370 do_nodes $mgs_HOST "lgss_sk -t mgs,server -g $MGSNID -m \
2371 $SK_PATH/$FSNAME.key >/dev/null 2>&1" ||
2372 error "could not modify keyfile on MGS"
2374 # load modified key file on MGS
2375 do_nodes $mgs_HOST "lgss_sk -l $SK_PATH/$FSNAME.key >/dev/null 2>&1" ||
2376 error "could not load keyfile on MGS"
2378 # add MGS NIDs in key on client
2379 do_nodes ${clients_arr[0]} "lgss_sk -g $MGSNID -m \
2380 $SK_PATH/$FSNAME.key >/dev/null 2>&1" ||
2381 error "could not modify keyfile on MGS"
2383 # set perms for per-nodemap keys else permission denied
2384 do_nodes $(comma_list $(all_nodes)) \
2385 "keyctl show | grep lustre | cut -c1-11 |
2387 xargs -IX keyctl setperm X 0x3f3f3f3f"
2389 # re-mount client with mgssec=skn
2390 save_opts=$MOUNT_OPTS
2391 if [ -z "$MOUNT_OPTS" ]; then
2392 MOUNT_OPTS="-o mgssec=skn"
2394 MOUNT_OPTS="$MOUNT_OPTS,mgssec=skn"
2396 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
2397 error "mount ${clients_arr[0]} with mgssec=skn failed"
2398 MOUNT_OPTS=$save_opts
2401 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
2402 error "umount ${clients_arr[0]} failed"
2404 # enforce ska flavor on MGS
2405 set_rule _mgs any any ska
2407 # re-mount client without mgssec
2408 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS &&
2409 error "mount ${clients_arr[0]} without mgssec should fail"
2411 # re-mount client with mgssec=skn
2412 save_opts=$MOUNT_OPTS
2413 if [ -z "$MOUNT_OPTS" ]; then
2414 MOUNT_OPTS="-o mgssec=skn"
2416 MOUNT_OPTS="$MOUNT_OPTS,mgssec=skn"
2418 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS &&
2419 error "mount ${clients_arr[0]} with mgssec=skn should fail"
2420 MOUNT_OPTS=$save_opts
2422 # re-mount client with mgssec=ska
2423 save_opts=$MOUNT_OPTS
2424 if [ -z "$MOUNT_OPTS" ]; then
2425 MOUNT_OPTS="-o mgssec=ska"
2427 MOUNT_OPTS="$MOUNT_OPTS,mgssec=ska"
2429 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
2430 error "mount ${clients_arr[0]} with mgssec=ska failed"
2431 MOUNT_OPTS=$save_opts
2435 run_test 32 "check for mgssec"
2438 # disable sk flavor enforcement
2439 set_rule $FSNAME any cli2mdt null
2440 wait_flavor cli2mdt null
2443 zconf_umount_clients ${clients_arr[0]} $MOUNT
2445 # stop gss daemon on MGS
2446 if ! combined_mgs_mds ; then
2447 send_sigint $mgs_HOST lsvcgssd
2451 MOUNT_OPTS=$(add_sk_mntflag $MOUNT_OPTS)
2454 restore_to_default_flavor
2458 if ! $SHARED_KEY; then
2459 skip "need shared key feature for this test"
2462 stack_trap cleanup_33 EXIT
2464 # restore to default null flavor
2465 save_flvr=$SK_FLAVOR
2467 restore_to_default_flavor || error "cannot set null flavor"
2468 SK_FLAVOR=$save_flvr
2471 if [ "$MOUNT_2" ] && $(grep -q $MOUNT2' ' /proc/mounts); then
2472 umount_client $MOUNT2 || error "umount $MOUNT2 failed"
2474 if $(grep -q $MOUNT' ' /proc/mounts); then
2475 umount_client $MOUNT || error "umount $MOUNT failed"
2478 # start gss daemon on MGS
2479 if combined_mgs_mds ; then
2480 send_sigint $mds_HOST lsvcgssd
2482 start_gss_daemons $mgs_HOST "$LSVCGSSD -vvv -s -g"
2484 # add mgs key type and MGS NIDs in key on MGS
2485 do_nodes $mgs_HOST "lgss_sk -t mgs,server -g $MGSNID -m \
2486 $SK_PATH/$FSNAME.key >/dev/null 2>&1" ||
2487 error "could not modify keyfile on MGS"
2489 # load modified key file on MGS
2490 do_nodes $mgs_HOST "lgss_sk -l $SK_PATH/$FSNAME.key >/dev/null 2>&1" ||
2491 error "could not load keyfile on MGS"
2493 # add MGS NIDs in key on client
2494 do_nodes ${clients_arr[0]} "lgss_sk -g $MGSNID -m \
2495 $SK_PATH/$FSNAME.key >/dev/null 2>&1" ||
2496 error "could not modify keyfile on MGS"
2498 # set perms for per-nodemap keys else permission denied
2499 do_nodes $(comma_list $(all_nodes)) \
2500 "keyctl show | grep lustre | cut -c1-11 |
2502 xargs -IX keyctl setperm X 0x3f3f3f3f"
2504 # re-mount client with mgssec=skn
2505 save_opts=$MOUNT_OPTS
2506 if [ -z "$MOUNT_OPTS" ]; then
2507 MOUNT_OPTS="-o mgssec=skn"
2509 MOUNT_OPTS="$MOUNT_OPTS,mgssec=skn"
2511 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
2512 error "mount ${clients_arr[0]} with mgssec=skn failed"
2513 MOUNT_OPTS=$save_opts
2515 # enforce ska flavor for cli2mdt
2516 set_rule $FSNAME any cli2mdt ska
2517 wait_flavor cli2mdt ska
2519 # check error message
2520 $LCTL dk | grep "faked source" &&
2521 error "MGS connection srpc flags incorrect"
2525 run_test 33 "correct srpc flags for MGS connection"
2528 # restore deny_unknown
2529 do_facet mgs $LCTL nodemap_modify --name default \
2530 --property deny_unknown --value $denydefault
2531 if [ $? -ne 0 ]; then
2532 error_noexit "cannot reset deny_unknown on default nodemap"
2536 wait_nm_sync default deny_unknown
2543 [ $MGS_VERSION -lt $(version_code 2.12.51) ] &&
2544 skip "deny_unknown on default nm not supported before 2.12.51"
2546 activedefault=$(do_facet mgs $LCTL get_param -n nodemap.active)
2548 if [[ "$activedefault" != "1" ]]; then
2549 do_facet mgs $LCTL nodemap_activate 1
2551 stack_trap cleanup_active EXIT
2554 denydefault=$(do_facet mgs $LCTL get_param -n \
2555 nodemap.default.deny_unknown)
2556 [ -z "$denydefault" ] &&
2557 error "cannot get deny_unknown on default nodemap"
2558 if [ "$denydefault" -eq 0 ]; then
2564 do_facet mgs $LCTL nodemap_modify --name default \
2565 --property deny_unknown --value $denynew ||
2566 error "cannot set deny_unknown on default nodemap"
2568 [ "$(do_facet mgs $LCTL get_param -n nodemap.default.deny_unknown)" \
2570 error "setting deny_unknown on default nodemap did not work"
2572 stack_trap cleanup_34_deny EXIT
2574 wait_nm_sync default deny_unknown
2576 run_test 34 "deny_unknown on default nodemap"
2578 log "cleanup: ======================================================"
2581 for num in $(seq $MDSCOUNT); do
2582 if [ "${identity_old[$num]}" = 1 ]; then
2583 switch_identity $num false || identity_old[$num]=$?
2587 $RUNAS_CMD -u $ID0 ls $DIR
2588 $RUNAS_CMD -u $ID1 ls $DIR
2593 check_and_cleanup_lustre