3 # Run select tests by setting ONLY, or as arguments to the script.
4 # Skip specific tests by setting EXCEPT.
11 LUSTRE=${LUSTRE:-$(dirname $0)/..}
12 . $LUSTRE/tests/test-framework.sh
17 ALWAYS_EXCEPT="$SANITY_SEC_EXCEPT "
18 # bug number for skipped test:
20 # UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
22 [ "$SLOW" = "no" ] && EXCEPT_SLOW="26"
24 NODEMAP_TESTS=$(seq 7 26)
26 if ! check_versions; then
27 echo "It is NOT necessary to test nodemap under interoperation mode"
28 EXCEPT="$EXCEPT $NODEMAP_TESTS"
33 RUNAS_CMD=${RUNAS_CMD:-runas}
35 WTL=${WTL:-"$LUSTRE/tests/write_time_limit"}
38 PERM_CONF=$CONFDIR/perm.conf
40 HOSTNAME_CHECKSUM=$(hostname | sum | awk '{ print $1 }')
41 SUBNET_CHECKSUM=$(expr $HOSTNAME_CHECKSUM % 250 + 1)
43 require_dsh_mds || exit 0
44 require_dsh_ost || exit 0
46 clients=${CLIENTS//,/ }
47 num_clients=$(get_node_count ${clients})
48 clients_arr=($clients)
52 USER0=$(getent passwd | grep :$ID0:$ID0: | cut -d: -f1)
53 USER1=$(getent passwd | grep :$ID1:$ID1: | cut -d: -f1)
57 NODEMAP_IPADDR_LIST="1 10 64 128 200 250"
59 NODEMAP_MAX_ID=$((ID0 + NODEMAP_ID_COUNT))
62 skip "need to add user0 ($ID0:$ID0)" && exit 0
65 skip "need to add user1 ($ID1:$ID1)" && exit 0
67 IDBASE=${IDBASE:-60000}
69 # changes to mappings must be reflected in test 23
71 [0]="$((IDBASE+3)):$((IDBASE+0)) $((IDBASE+4)):$((IDBASE+2))"
72 [1]="$((IDBASE+5)):$((IDBASE+1)) $((IDBASE+6)):$((IDBASE+2))"
75 check_and_setup_lustre
80 GSS_REF=$(lsmod | grep ^ptlrpc_gss | awk '{print $3}')
81 if [ ! -z "$GSS_REF" -a "$GSS_REF" != "0" ]; then
83 echo "with GSS support"
86 echo "without GSS support"
89 MDT=$(do_facet $SINGLEMDS lctl get_param -N "mdt.\*MDT0000" |
91 [ -z "$MDT" ] && error "fail to get MDT device" && exit 1
92 do_facet $SINGLEMDS "mkdir -p $CONFDIR"
93 IDENTITY_FLUSH=mdt.$MDT.identity_flush
94 IDENTITY_UPCALL=mdt.$MDT.identity_upcall
103 if ! $RUNAS_CMD -u $user krb5_login.sh; then
104 error "$user login kerberos failed."
108 if ! $RUNAS_CMD -u $user -g $group ls $DIR > /dev/null 2>&1; then
109 $RUNAS_CMD -u $user lfs flushctx -k
110 $RUNAS_CMD -u $user krb5_login.sh
111 if ! $RUNAS_CMD -u$user -g$group ls $DIR > /dev/null 2>&1; then
112 error "init $user $group failed."
118 declare -a identity_old
121 for num in $(seq $MDSCOUNT); do
122 switch_identity $num true || identity_old[$num]=$?
125 if ! $RUNAS_CMD -u $ID0 ls $DIR > /dev/null 2>&1; then
126 sec_login $USER0 $USER0
129 if ! $RUNAS_CMD -u $ID1 ls $DIR > /dev/null 2>&1; then
130 sec_login $USER1 $USER1
135 # run as different user
139 chmod 0755 $DIR || error "chmod (1)"
140 rm -rf $DIR/$tdir || error "rm (1)"
141 mkdir -p $DIR/$tdir || error "mkdir (1)"
142 chown $USER0 $DIR/$tdir || error "chown (2)"
143 $RUNAS_CMD -u $ID0 ls $DIR || error "ls (1)"
144 rm -f $DIR/f0 || error "rm (2)"
145 $RUNAS_CMD -u $ID0 touch $DIR/f0 && error "touch (1)"
146 $RUNAS_CMD -u $ID0 touch $DIR/$tdir/f1 || error "touch (2)"
147 $RUNAS_CMD -u $ID1 touch $DIR/$tdir/f2 && error "touch (3)"
148 touch $DIR/$tdir/f3 || error "touch (4)"
149 chown root $DIR/$tdir || error "chown (3)"
150 chgrp $USER0 $DIR/$tdir || error "chgrp (1)"
151 chmod 0775 $DIR/$tdir || error "chmod (2)"
152 $RUNAS_CMD -u $ID0 touch $DIR/$tdir/f4 || error "touch (5)"
153 $RUNAS_CMD -u $ID1 touch $DIR/$tdir/f5 && error "touch (6)"
154 touch $DIR/$tdir/f6 || error "touch (7)"
155 rm -rf $DIR/$tdir || error "rm (3)"
157 run_test 0 "uid permission ============================="
161 [ $GSS_SUP = 0 ] && skip "without GSS support." && return
166 chown $USER0 $DIR/$tdir || error "chown (1)"
167 $RUNAS_CMD -u $ID1 -v $ID0 touch $DIR/$tdir/f0 && error "touch (2)"
168 echo "enable uid $ID1 setuid"
169 do_facet $SINGLEMDS "echo '* $ID1 setuid' >> $PERM_CONF"
170 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
171 $RUNAS_CMD -u $ID1 -v $ID0 touch $DIR/$tdir/f1 || error "touch (3)"
173 chown root $DIR/$tdir || error "chown (4)"
174 chgrp $USER0 $DIR/$tdir || error "chgrp (5)"
175 chmod 0770 $DIR/$tdir || error "chmod (6)"
176 $RUNAS_CMD -u $ID1 -g $ID1 touch $DIR/$tdir/f2 && error "touch (7)"
177 $RUNAS_CMD -u$ID1 -g$ID1 -j$ID0 touch $DIR/$tdir/f3 && error "touch (8)"
178 echo "enable uid $ID1 setuid,setgid"
179 do_facet $SINGLEMDS "echo '* $ID1 setuid,setgid' > $PERM_CONF"
180 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
181 $RUNAS_CMD -u $ID1 -g $ID1 -j $ID0 touch $DIR/$tdir/f4 ||
183 $RUNAS_CMD -u $ID1 -v $ID0 -g $ID1 -j $ID0 touch $DIR/$tdir/f5 ||
188 do_facet $SINGLEMDS "rm -f $PERM_CONF"
189 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
191 run_test 1 "setuid/gid ============================="
193 # bug 3285 - supplementary group should always succeed.
194 # NB: the supplementary groups are set for local client only,
195 # as for remote client, the groups of the specified uid on MDT
196 # will be obtained by upcall /sbin/l_getidentity and used.
198 [[ "$MDS1_VERSION" -ge $(version_code 2.6.93) ]] ||
199 [[ "$MDS1_VERSION" -ge $(version_code 2.5.35) &&
200 "$MDS1_VERSION" -lt $(version_code 2.5.50) ]] ||
201 skip "Need MDS version at least 2.6.93 or 2.5.35"
205 chmod 0771 $DIR/$tdir
206 chgrp $ID0 $DIR/$tdir
207 $RUNAS_CMD -u $ID0 ls $DIR/$tdir || error "setgroups (1)"
208 do_facet $SINGLEMDS "echo '* $ID1 setgrp' > $PERM_CONF"
209 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
210 $RUNAS_CMD -u $ID1 -G1,2,$ID0 ls $DIR/$tdir ||
211 error "setgroups (2)"
212 $RUNAS_CMD -u $ID1 -G1,2 ls $DIR/$tdir && error "setgroups (3)"
215 do_facet $SINGLEMDS "rm -f $PERM_CONF"
216 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
218 run_test 4 "set supplementary group ==============="
224 squash_id default 99 0
225 wait_nm_sync default squash_uid '' inactive
226 squash_id default 99 1
227 wait_nm_sync default squash_gid '' inactive
228 for (( i = 0; i < NODEMAP_COUNT; i++ )); do
229 local csum=${HOSTNAME_CHECKSUM}_${i}
231 do_facet mgs $LCTL nodemap_add $csum
233 if [ $rc -ne 0 ]; then
234 echo "nodemap_add $csum failed with $rc"
238 wait_update_facet --verbose mgs \
239 "$LCTL get_param nodemap.$csum.id 2>/dev/null | \
240 grep -c $csum || true" 1 30 ||
243 for (( i = 0; i < NODEMAP_COUNT; i++ )); do
244 local csum=${HOSTNAME_CHECKSUM}_${i}
246 wait_nm_sync $csum id '' inactive
254 for ((i = 0; i < NODEMAP_COUNT; i++)); do
255 local csum=${HOSTNAME_CHECKSUM}_${i}
257 if ! do_facet mgs $LCTL nodemap_del $csum; then
258 error "nodemap_del $csum failed with $?"
262 wait_update_facet --verbose mgs \
263 "$LCTL get_param nodemap.$csum.id 2>/dev/null | \
264 grep -c $csum || true" 0 30 ||
267 for (( i = 0; i < NODEMAP_COUNT; i++ )); do
268 local csum=${HOSTNAME_CHECKSUM}_${i}
270 wait_nm_sync $csum id '' inactive
277 local cmd="$LCTL nodemap_add_range"
281 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
282 range="$SUBNET_CHECKSUM.${2}.${j}.[1-253]@tcp"
283 if ! do_facet mgs $cmd --name $1 --range $range; then
292 local cmd="$LCTL nodemap_del_range"
296 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
297 range="$SUBNET_CHECKSUM.${2}.${j}.[1-253]@tcp"
298 if ! do_facet mgs $cmd --name $1 --range $range; then
308 local cmd="$LCTL nodemap_add_idmap"
311 echo "Start to add idmaps ..."
312 for ((i = 0; i < NODEMAP_COUNT; i++)); do
315 for ((j = $ID0; j < NODEMAP_MAX_ID; j++)); do
316 local csum=${HOSTNAME_CHECKSUM}_${i}
318 local fs_id=$((j + 1))
320 if ! do_facet mgs $cmd --name $csum --idtype uid \
321 --idmap $client_id:$fs_id; then
324 if ! do_facet mgs $cmd --name $csum --idtype gid \
325 --idmap $client_id:$fs_id; then
334 update_idmaps() { #LU-10040
335 [ "$MGS_VERSION" -lt $(version_code 2.10.55) ] &&
336 skip "Need MGS >= 2.10.55"
338 local csum=${HOSTNAME_CHECKSUM}_0
339 local old_id_client=$ID0
340 local old_id_fs=$((ID0 + 1))
341 local new_id=$((ID0 + 100))
348 echo "Start to update idmaps ..."
350 #Inserting an existed idmap should return error
351 cmd="$LCTL nodemap_add_idmap --name $csum --idtype uid"
353 $cmd --idmap $old_id_client:$old_id_fs 2>/dev/null; then
354 error "insert idmap {$old_id_client:$old_id_fs} " \
355 "should return error"
360 #Update id_fs and check it
361 if ! do_facet mgs $cmd --idmap $old_id_client:$new_id; then
362 error "$cmd --idmap $old_id_client:$new_id failed"
366 tmp_id=$(do_facet mgs $LCTL get_param -n nodemap.$csum.idmap |
367 awk '{ print $7 }' | sed -n '2p')
368 [ $tmp_id != $new_id ] && { error "new id_fs $tmp_id != $new_id"; \
369 rc=$((rc + 1)); return $rc; }
371 #Update id_client and check it
372 if ! do_facet mgs $cmd --idmap $new_id:$new_id; then
373 error "$cmd --idmap $new_id:$new_id failed"
377 tmp_id=$(do_facet mgs $LCTL get_param -n nodemap.$csum.idmap |
378 awk '{ print $5 }' | sed -n "$((NODEMAP_ID_COUNT + 1)) p")
379 tmp_id=$(echo ${tmp_id%,*}) #e.g. "501,"->"501"
380 [ $tmp_id != $new_id ] && { error "new id_client $tmp_id != $new_id"; \
381 rc=$((rc + 1)); return $rc; }
383 #Delete above updated idmap
384 cmd="$LCTL nodemap_del_idmap --name $csum --idtype uid"
385 if ! do_facet mgs $cmd --idmap $new_id:$new_id; then
386 error "$cmd --idmap $new_id:$new_id failed"
391 #restore the idmaps to make delete_idmaps work well
392 cmd="$LCTL nodemap_add_idmap --name $csum --idtype uid"
393 if ! do_facet mgs $cmd --idmap $old_id_client:$old_id_fs; then
394 error "$cmd --idmap $old_id_client:$old_id_fs failed"
404 local cmd="$LCTL nodemap_del_idmap"
407 echo "Start to delete idmaps ..."
408 for ((i = 0; i < NODEMAP_COUNT; i++)); do
411 for ((j = $ID0; j < NODEMAP_MAX_ID; j++)); do
412 local csum=${HOSTNAME_CHECKSUM}_${i}
414 local fs_id=$((j + 1))
416 if ! do_facet mgs $cmd --name $csum --idtype uid \
417 --idmap $client_id:$fs_id; then
420 if ! do_facet mgs $cmd --name $csum --idtype gid \
421 --idmap $client_id:$fs_id; then
434 local cmd="$LCTL nodemap_modify"
437 proc[0]="admin_nodemap"
438 proc[1]="trusted_nodemap"
442 for ((idx = 0; idx < 2; idx++)); do
443 if ! do_facet mgs $cmd --name $1 --property ${option[$idx]} \
448 if ! do_facet mgs $cmd --name $1 --property ${option[$idx]} \
458 [ "$MGS_VERSION" -lt $(version_code 2.5.53) ] &&
459 skip "No nodemap on $MGS_VERSION MGS < 2.5.53"
463 cmd[0]="$LCTL nodemap_modify --property squash_uid"
464 cmd[1]="$LCTL nodemap_modify --property squash_gid"
466 if ! do_facet mgs ${cmd[$3]} --name $1 --value $2; then
472 local nodemap_name=$1
477 local is_active=$(do_facet mgs $LCTL get_param -n nodemap.active)
482 local mgs_ip=$(host_nids_address $mgs_HOST $NETTYPE | cut -d' ' -f1)
485 if [ "$nodemap_name" == "active" ]; then
487 elif [ -z "$key" ]; then
488 proc_param=${nodemap_name}
490 proc_param="${nodemap_name}.${key}"
492 if [ "$opt" == "inactive" ]; then
493 # check nm sync even if nodemap is not activated
497 (( is_active == 0 )) && [ "$proc_param" != "active" ] && return
499 if [ -z "$value" ]; then
500 out1=$(do_facet mgs $LCTL get_param $opt \
501 nodemap.${proc_param} 2>/dev/null)
502 echo "On MGS ${mgs_ip}, ${proc_param} = $out1"
507 # wait up to 10 seconds for other servers to sync with mgs
508 for i in $(seq 1 10); do
509 for node in $(all_server_nodes); do
510 local node_ip=$(host_nids_address $node $NETTYPE |
514 if [ -z "$value" ]; then
515 [ $node_ip == $mgs_ip ] && continue
518 out2=$(do_node $node_ip $LCTL get_param $opt \
519 nodemap.$proc_param 2>/dev/null)
520 echo "On $node ${node_ip}, ${proc_param} = $out2"
521 [ "$out1" != "$out2" ] && is_sync=false && break
529 echo OTHER - IP: $node_ip
531 error "mgs and $nodemap_name ${key} mismatch, $i attempts"
533 echo "waited $((i - 1)) seconds for sync"
536 # ensure that the squash defaults are the expected defaults
537 squash_id default 99 0
538 wait_nm_sync default squash_uid '' inactive
539 squash_id default 99 1
540 wait_nm_sync default squash_gid '' inactive
545 cmd="$LCTL nodemap_test_nid"
547 nid=$(do_facet mgs $cmd $1)
549 if [ $nid == $2 ]; then
557 # restore activation state
558 do_facet mgs $LCTL nodemap_activate 0
564 local cmd="$LCTL nodemap_test_id"
567 echo "Start to test idmaps ..."
568 ## nodemap deactivated
569 if ! do_facet mgs $LCTL nodemap_activate 0; then
572 for ((id = $ID0; id < NODEMAP_MAX_ID; id++)); do
575 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
576 local nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
577 local fs_id=$(do_facet mgs $cmd --nid $nid \
578 --idtype uid --id $id)
579 if [ $fs_id != $id ]; then
580 echo "expected $id, got $fs_id"
587 if ! do_facet mgs $LCTL nodemap_activate 1; then
591 for ((id = $ID0; id < NODEMAP_MAX_ID; id++)); do
592 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
593 nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
594 fs_id=$(do_facet mgs $cmd --nid $nid \
595 --idtype uid --id $id)
596 expected_id=$((id + 1))
597 if [ $fs_id != $expected_id ]; then
598 echo "expected $expected_id, got $fs_id"
605 for ((i = 0; i < NODEMAP_COUNT; i++)); do
606 local csum=${HOSTNAME_CHECKSUM}_${i}
608 if ! do_facet mgs $LCTL nodemap_modify --name $csum \
609 --property trusted --value 1; then
610 error "nodemap_modify $csum failed with $?"
615 for ((id = $ID0; id < NODEMAP_MAX_ID; id++)); do
616 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
617 nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
618 fs_id=$(do_facet mgs $cmd --nid $nid \
619 --idtype uid --id $id)
620 if [ $fs_id != $id ]; then
621 echo "expected $id, got $fs_id"
627 ## ensure allow_root_access is enabled
628 for ((i = 0; i < NODEMAP_COUNT; i++)); do
629 local csum=${HOSTNAME_CHECKSUM}_${i}
631 if ! do_facet mgs $LCTL nodemap_modify --name $csum \
632 --property admin --value 1; then
633 error "nodemap_modify $csum failed with $?"
638 ## check that root allowed
639 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
640 nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
641 fs_id=$(do_facet mgs $cmd --nid $nid --idtype uid --id 0)
642 if [ $fs_id != 0 ]; then
643 echo "root allowed expected 0, got $fs_id"
648 ## ensure allow_root_access is disabled
649 for ((i = 0; i < NODEMAP_COUNT; i++)); do
650 local csum=${HOSTNAME_CHECKSUM}_${i}
652 if ! do_facet mgs $LCTL nodemap_modify --name $csum \
653 --property admin --value 0; then
654 error "nodemap_modify ${HOSTNAME_CHECKSUM}_${i} "
660 ## check that root is mapped to 99
661 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
662 nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
663 fs_id=$(do_facet mgs $cmd --nid $nid --idtype uid --id 0)
664 if [ $fs_id != 99 ]; then
665 error "root squash expected 99, got $fs_id"
670 ## reset client trust to 0
671 for ((i = 0; i < NODEMAP_COUNT; i++)); do
672 if ! do_facet mgs $LCTL nodemap_modify \
673 --name ${HOSTNAME_CHECKSUM}_${i} \
674 --property trusted --value 0; then
675 error "nodemap_modify ${HOSTNAME_CHECKSUM}_${i} "
687 remote_mgs_nodsh && skip "remote MGS with nodsh"
688 [ "$MGS_VERSION" -lt $(version_code 2.5.53) ] &&
689 skip "No nodemap on $MGS_VERSION MGS < 2.5.53"
693 [[ $rc != 0 ]] && error "nodemap_add failed with $rc"
697 [[ $rc != 0 ]] && error "nodemap_del failed with $rc"
701 run_test 7 "nodemap create and delete"
706 remote_mgs_nodsh && skip "remote MGS with nodsh"
707 [ "$MGS_VERSION" -lt $(version_code 2.5.53) ] &&
708 skip "No nodemap on $MGS_VERSION MGS < 2.5.53"
714 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
720 [[ $rc == 0 ]] && error "duplicate nodemap_add allowed with $rc" &&
726 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 3
730 run_test 8 "nodemap reject duplicates"
736 remote_mgs_nodsh && skip "remote MGS with nodsh"
737 [ "$MGS_VERSION" -lt $(version_code 2.5.53) ] &&
738 skip "No nodemap on $MGS_VERSION MGS < 2.5.53"
743 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
746 for ((i = 0; i < NODEMAP_COUNT; i++)); do
747 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
751 [[ $rc != 0 ]] && error "nodemap_add_range failed with $rc" && return 2
754 for ((i = 0; i < NODEMAP_COUNT; i++)); do
755 if ! delete_range ${HOSTNAME_CHECKSUM}_${i} $i; then
759 [[ $rc != 0 ]] && error "nodemap_del_range failed with $rc" && return 4
764 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 4
768 run_test 9 "nodemap range add"
773 remote_mgs_nodsh && skip "remote MGS with nodsh"
774 [ "$MGS_VERSION" -lt $(version_code 2.5.53) ] &&
775 skip "No nodemap on $MGS_VERSION MGS < 2.5.53"
780 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
783 for ((i = 0; i < NODEMAP_COUNT; i++)); do
784 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
788 [[ $rc != 0 ]] && error "nodemap_add_range failed with $rc" && return 2
791 for ((i = 0; i < NODEMAP_COUNT; i++)); do
792 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
796 [[ $rc == 0 ]] && error "nodemap_add_range duplicate add with $rc" &&
801 for ((i = 0; i < NODEMAP_COUNT; i++)); do
802 if ! delete_range ${HOSTNAME_CHECKSUM}_${i} $i; then
806 [[ $rc != 0 ]] && error "nodemap_del_range failed with $rc" && return 4
810 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 5
814 run_test 10a "nodemap reject duplicate ranges"
817 [ "$MGS_VERSION" -lt $(version_code 2.10.53) ] &&
818 skip "Need MGS >= 2.10.53"
822 local nids="192.168.19.[0-255]@o2ib20"
824 do_facet mgs $LCTL nodemap_del $nm1 2>/dev/null
825 do_facet mgs $LCTL nodemap_del $nm2 2>/dev/null
827 do_facet mgs $LCTL nodemap_add $nm1 || error "Add $nm1 failed"
828 do_facet mgs $LCTL nodemap_add $nm2 || error "Add $nm2 failed"
829 do_facet mgs $LCTL nodemap_add_range --name $nm1 --range $nids ||
830 error "Add range $nids to $nm1 failed"
831 [ -n "$(do_facet mgs $LCTL get_param nodemap.$nm1.* |
832 grep start_nid)" ] || error "No range was found"
833 do_facet mgs $LCTL nodemap_del_range --name $nm2 --range $nids &&
834 error "Deleting range $nids from $nm2 should fail"
835 [ -n "$(do_facet mgs $LCTL get_param nodemap.$nm1.* |
836 grep start_nid)" ] || error "Range $nids should be there"
838 do_facet mgs $LCTL nodemap_del $nm1 || error "Delete $nm1 failed"
839 do_facet mgs $LCTL nodemap_del $nm2 || error "Delete $nm2 failed"
842 run_test 10b "delete range from the correct nodemap"
844 test_10c() { #LU-8912
845 [ "$MGS_VERSION" -lt $(version_code 2.10.57) ] &&
846 skip "Need MGS >= 2.10.57"
848 local nm="nodemap_lu8912"
849 local nid_range="10.210.[32-47].[0-255]@o2ib3"
850 local start_nid="10.210.32.0@o2ib3"
851 local end_nid="10.210.47.255@o2ib3"
852 local start_nid_found
855 do_facet mgs $LCTL nodemap_del $nm 2>/dev/null
856 do_facet mgs $LCTL nodemap_add $nm || error "Add $nm failed"
857 do_facet mgs $LCTL nodemap_add_range --name $nm --range $nid_range ||
858 error "Add range $nid_range to $nm failed"
860 start_nid_found=$(do_facet mgs $LCTL get_param nodemap.$nm.* |
861 awk -F '[,: ]' /start_nid/'{ print $9 }')
862 [ "$start_nid" == "$start_nid_found" ] ||
863 error "start_nid: $start_nid_found != $start_nid"
864 end_nid_found=$(do_facet mgs $LCTL get_param nodemap.$nm.* |
865 awk -F '[,: ]' /end_nid/'{ print $13 }')
866 [ "$end_nid" == "$end_nid_found" ] ||
867 error "end_nid: $end_nid_found != $end_nid"
869 do_facet mgs $LCTL nodemap_del $nm || error "Delete $nm failed"
872 run_test 10c "verfify contiguous range support"
874 test_10d() { #LU-8913
875 [ "$MGS_VERSION" -lt $(version_code 2.10.59) ] &&
876 skip "Need MGS >= 2.10.59"
878 local nm="nodemap_lu8913"
879 local nid_range="*@o2ib3"
880 local start_nid="0.0.0.0@o2ib3"
881 local end_nid="255.255.255.255@o2ib3"
882 local start_nid_found
885 do_facet mgs $LCTL nodemap_del $nm 2>/dev/null
886 do_facet mgs $LCTL nodemap_add $nm || error "Add $nm failed"
887 do_facet mgs $LCTL nodemap_add_range --name $nm --range $nid_range ||
888 error "Add range $nid_range to $nm failed"
890 start_nid_found=$(do_facet mgs $LCTL get_param nodemap.$nm.* |
891 awk -F '[,: ]' /start_nid/'{ print $9 }')
892 [ "$start_nid" == "$start_nid_found" ] ||
893 error "start_nid: $start_nid_found != $start_nid"
894 end_nid_found=$(do_facet mgs $LCTL get_param nodemap.$nm.* |
895 awk -F '[,: ]' /end_nid/'{ print $13 }')
896 [ "$end_nid" == "$end_nid_found" ] ||
897 error "end_nid: $end_nid_found != $end_nid"
899 do_facet mgs $LCTL nodemap_del $nm || error "Delete $nm failed"
902 run_test 10d "verfify nodemap range format '*@<net>' support"
907 remote_mgs_nodsh && skip "remote MGS with nodsh"
908 [ "$MGS_VERSION" -lt $(version_code 2.5.53) ] &&
909 skip "No nodemap on $MGS_VERSION MGS < 2.5.53"
914 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
917 for ((i = 0; i < NODEMAP_COUNT; i++)); do
918 if ! modify_flags ${HOSTNAME_CHECKSUM}_${i}; then
922 [[ $rc != 0 ]] && error "nodemap_modify with $rc" && return 2
927 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 3
931 run_test 11 "nodemap modify"
936 remote_mgs_nodsh && skip "remote MGS with nodsh"
937 [ "$MGS_VERSION" -lt $(version_code 2.5.53) ] &&
938 skip "No nodemap on $MGS_VERSION MGS < 2.5.53"
943 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
946 for ((i = 0; i < NODEMAP_COUNT; i++)); do
947 if ! squash_id ${HOSTNAME_CHECKSUM}_${i} 88 0; then
951 [[ $rc != 0 ]] && error "nodemap squash_uid with $rc" && return 2
954 for ((i = 0; i < NODEMAP_COUNT; i++)); do
955 if ! squash_id ${HOSTNAME_CHECKSUM}_${i} 88 1; then
959 [[ $rc != 0 ]] && error "nodemap squash_gid with $rc" && return 3
964 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 4
968 run_test 12 "nodemap set squash ids"
973 remote_mgs_nodsh && skip "remote MGS with nodsh"
974 [ "$MGS_VERSION" -lt $(version_code 2.5.53) ] &&
975 skip "No nodemap on $MGS_VERSION MGS < 2.5.53"
980 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
983 for ((i = 0; i < NODEMAP_COUNT; i++)); do
984 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
988 [[ $rc != 0 ]] && error "nodemap_add_range failed with $rc" && return 2
991 for ((i = 0; i < NODEMAP_COUNT; i++)); do
992 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
993 for k in $NODEMAP_IPADDR_LIST; do
994 if ! test_nid $SUBNET_CHECKSUM.$i.$j.$k \
995 ${HOSTNAME_CHECKSUM}_${i}; then
1001 [[ $rc != 0 ]] && error "nodemap_test_nid failed with $rc" && return 3
1006 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 4
1010 run_test 13 "test nids"
1015 remote_mgs_nodsh && skip "remote MGS with nodsh"
1016 [ "$MGS_VERSION" -lt $(version_code 2.5.53) ] &&
1017 skip "No nodemap on $MGS_VERSION MGS < 2.5.53"
1022 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
1025 for ((i = 0; i < NODEMAP_COUNT; i++)); do
1026 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
1027 for k in $NODEMAP_IPADDR_LIST; do
1028 if ! test_nid $SUBNET_CHECKSUM.$i.$j.$k \
1035 [[ $rc != 0 ]] && error "nodemap_test_nid failed with $rc" && return 3
1040 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 4
1044 run_test 14 "test default nodemap nid lookup"
1049 remote_mgs_nodsh && skip "remote MGS with nodsh"
1050 [ "$MGS_VERSION" -lt $(version_code 2.5.53) ] &&
1051 skip "No nodemap on $MGS_VERSION MGS < 2.5.53"
1056 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
1059 for ((i = 0; i < NODEMAP_COUNT; i++)); do
1060 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
1064 [[ $rc != 0 ]] && error "nodemap_add_range failed with $rc" && return 2
1069 [[ $rc != 0 ]] && error "nodemap_add_idmap failed with $rc" && return 3
1071 activedefault=$(do_facet mgs $LCTL get_param -n nodemap.active)
1072 if [[ "$activedefault" != "1" ]]; then
1073 stack_trap cleanup_active EXIT
1079 [[ $rc != 0 ]] && error "nodemap_test_id failed with $rc" && return 4
1084 [[ $rc != 0 ]] && error "update_idmaps failed with $rc" && return 5
1089 [[ $rc != 0 ]] && error "nodemap_del_idmap failed with $rc" && return 6
1094 [[ $rc != 0 ]] && error "nodemap_delete failed with $rc" && return 7
1098 run_test 15 "test id mapping"
1100 create_fops_nodemaps() {
1103 for client in $clients; do
1104 local client_ip=$(host_nids_address $client $NETTYPE)
1105 local client_nid=$(h2nettype $client_ip)
1106 do_facet mgs $LCTL nodemap_add c${i} || return 1
1107 do_facet mgs $LCTL nodemap_add_range \
1108 --name c${i} --range $client_nid || return 1
1109 for map in ${FOPS_IDMAPS[i]}; do
1110 do_facet mgs $LCTL nodemap_add_idmap --name c${i} \
1111 --idtype uid --idmap ${map} || return 1
1112 do_facet mgs $LCTL nodemap_add_idmap --name c${i} \
1113 --idtype gid --idmap ${map} || return 1
1116 wait_nm_sync c$i idmap
1123 delete_fops_nodemaps() {
1126 for client in $clients; do
1127 do_facet mgs $LCTL nodemap_del c${i} || return 1
1135 if [ $MDSCOUNT -le 1 ]; then
1136 do_node ${clients_arr[0]} mkdir -p $DIR/$tdir
1138 # round-robin MDTs to test DNE nodemap support
1139 [ ! -d $DIR ] && do_node ${clients_arr[0]} mkdir -p $DIR
1140 do_node ${clients_arr[0]} $LFS setdirstripe -c 1 -i \
1141 $((fops_mds_index % MDSCOUNT)) $DIR/$tdir
1142 ((fops_mds_index++))
1146 # acl test directory needs to be initialized on a privileged client
1148 local admin=$(do_facet mgs $LCTL get_param -n nodemap.c0.admin_nodemap)
1149 local trust=$(do_facet mgs $LCTL get_param -n \
1150 nodemap.c0.trusted_nodemap)
1152 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
1153 do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1
1155 wait_nm_sync c0 admin_nodemap
1156 wait_nm_sync c0 trusted_nodemap
1158 do_node ${clients_arr[0]} rm -rf $DIR/$tdir
1160 do_node ${clients_arr[0]} chown $user $DIR/$tdir
1162 do_facet mgs $LCTL nodemap_modify --name c0 \
1163 --property admin --value $admin
1164 do_facet mgs $LCTL nodemap_modify --name c0 \
1165 --property trusted --value $trust
1167 # flush MDT locks to make sure they are reacquired before test
1168 do_node ${clients_arr[0]} $LCTL set_param \
1169 ldlm.namespaces.$FSNAME-MDT*.lru_size=clear
1171 wait_nm_sync c0 admin_nodemap
1172 wait_nm_sync c0 trusted_nodemap
1175 # fileset test directory needs to be initialized on a privileged client
1176 fileset_test_setup() {
1179 if [ -n "$FILESET" -a -z "$SKIP_FILESET" ]; then
1180 cleanup_mount $MOUNT
1181 FILESET="" zconf_mount_clients $CLIENTS $MOUNT
1184 local admin=$(do_facet mgs $LCTL get_param -n \
1185 nodemap.${nm}.admin_nodemap)
1186 local trust=$(do_facet mgs $LCTL get_param -n \
1187 nodemap.${nm}.trusted_nodemap)
1189 do_facet mgs $LCTL nodemap_modify --name $nm --property admin --value 1
1190 do_facet mgs $LCTL nodemap_modify --name $nm --property trusted \
1193 wait_nm_sync $nm admin_nodemap
1194 wait_nm_sync $nm trusted_nodemap
1196 # create directory and populate it for subdir mount
1197 do_node ${clients_arr[0]} mkdir $MOUNT/$subdir ||
1198 error "unable to create dir $MOUNT/$subdir"
1199 do_node ${clients_arr[0]} touch $MOUNT/$subdir/this_is_$subdir ||
1200 error "unable to create file $MOUNT/$subdir/this_is_$subdir"
1201 do_node ${clients_arr[0]} mkdir $MOUNT/$subdir/$subsubdir ||
1202 error "unable to create dir $MOUNT/$subdir/$subsubdir"
1203 do_node ${clients_arr[0]} touch \
1204 $MOUNT/$subdir/$subsubdir/this_is_$subsubdir ||
1205 error "unable to create file \
1206 $MOUNT/$subdir/$subsubdir/this_is_$subsubdir"
1208 do_facet mgs $LCTL nodemap_modify --name $nm \
1209 --property admin --value $admin
1210 do_facet mgs $LCTL nodemap_modify --name $nm \
1211 --property trusted --value $trust
1213 # flush MDT locks to make sure they are reacquired before test
1214 do_node ${clients_arr[0]} $LCTL set_param \
1215 ldlm.namespaces.$FSNAME-MDT*.lru_size=clear
1217 wait_nm_sync $nm admin_nodemap
1218 wait_nm_sync $nm trusted_nodemap
1221 # fileset test directory needs to be initialized on a privileged client
1222 fileset_test_cleanup() {
1224 local admin=$(do_facet mgs $LCTL get_param -n \
1225 nodemap.${nm}.admin_nodemap)
1226 local trust=$(do_facet mgs $LCTL get_param -n \
1227 nodemap.${nm}.trusted_nodemap)
1229 do_facet mgs $LCTL nodemap_modify --name $nm --property admin --value 1
1230 do_facet mgs $LCTL nodemap_modify --name $nm --property trusted \
1233 wait_nm_sync $nm admin_nodemap
1234 wait_nm_sync $nm trusted_nodemap
1236 # cleanup directory created for subdir mount
1237 do_node ${clients_arr[0]} rm -rf $MOUNT/$subdir ||
1238 error "unable to remove dir $MOUNT/$subdir"
1240 do_facet mgs $LCTL nodemap_modify --name $nm \
1241 --property admin --value $admin
1242 do_facet mgs $LCTL nodemap_modify --name $nm \
1243 --property trusted --value $trust
1245 # flush MDT locks to make sure they are reacquired before test
1246 do_node ${clients_arr[0]} $LCTL set_param \
1247 ldlm.namespaces.$FSNAME-MDT*.lru_size=clear
1249 wait_nm_sync $nm admin_nodemap
1250 wait_nm_sync $nm trusted_nodemap
1251 if [ -n "$FILESET" -a -z "$SKIP_FILESET" ]; then
1252 cleanup_mount $MOUNT
1253 zconf_mount_clients $CLIENTS $MOUNT
1257 do_create_delete() {
1260 local testfile=$DIR/$tdir/$tfile
1264 if $run_u touch $testfile >& /dev/null; then
1266 $run_u rm $testfile && d=1
1270 local expected=$(get_cr_del_expected $key)
1271 [ "$res" != "$expected" ] &&
1272 error "test $key, wanted $expected, got $res" && rc=$((rc + 1))
1276 nodemap_check_quota() {
1278 $run_u lfs quota -q $DIR | awk '{ print $2; exit; }'
1281 do_fops_quota_test() {
1283 # fuzz quota used to account for possible indirect blocks, etc
1284 local quota_fuzz=$(fs_log_size)
1285 local qused_orig=$(nodemap_check_quota "$run_u")
1286 local qused_high=$((qused_orig + quota_fuzz))
1287 local qused_low=$((qused_orig - quota_fuzz))
1288 local testfile=$DIR/$tdir/$tfile
1289 $run_u dd if=/dev/zero of=$testfile oflag=sync bs=1M count=1 \
1290 >& /dev/null || error "unable to write quota test file"
1291 sync; sync_all_data || true
1293 local qused_new=$(nodemap_check_quota "$run_u")
1294 [ $((qused_new)) -lt $((qused_low + 1024)) -o \
1295 $((qused_new)) -gt $((qused_high + 1024)) ] &&
1296 error "$qused_new != $qused_orig + 1M after write, " \
1297 "fuzz is $quota_fuzz"
1298 $run_u rm $testfile || error "unable to remove quota test file"
1299 wait_delete_completed_mds
1301 qused_new=$(nodemap_check_quota "$run_u")
1302 [ $((qused_new)) -lt $((qused_low)) \
1303 -o $((qused_new)) -gt $((qused_high)) ] &&
1304 error "quota not reclaimed, expect $qused_orig, " \
1305 "got $qused_new, fuzz $quota_fuzz"
1308 get_fops_mapped_user() {
1311 for ((i=0; i < ${#FOPS_IDMAPS[@]}; i++)); do
1312 for map in ${FOPS_IDMAPS[i]}; do
1313 if [ $(cut -d: -f1 <<< "$map") == $cli_user ]; then
1314 cut -d: -f2 <<< "$map"
1322 get_cr_del_expected() {
1324 IFS=":" read -a key <<< "$1"
1325 local mapmode="${key[0]}"
1326 local mds_user="${key[1]}"
1327 local cluster="${key[2]}"
1328 local cli_user="${key[3]}"
1329 local mode="0${key[4]}"
1336 [[ $mapmode == *mapped* ]] && mapped=1
1337 # only c1 is mapped in these test cases
1338 [[ $mapmode == mapped_trusted* ]] && [ "$cluster" == "c0" ] && mapped=0
1339 [[ $mapmode == *noadmin* ]] && noadmin=1
1341 # o+wx works as long as the user isn't mapped
1342 if [ $((mode & 3)) -eq 3 ]; then
1346 # if client user is root, check if root is squashed
1347 if [ "$cli_user" == "0" ]; then
1348 # squash root succeed, if other bit is on
1351 1) [ "$other" == "1" ] && echo $SUCCESS
1352 [ "$other" == "0" ] && echo $FAILURE;;
1356 if [ "$mapped" == "0" ]; then
1357 [ "$other" == "1" ] && echo $SUCCESS
1358 [ "$other" == "0" ] && echo $FAILURE
1362 # if mapped user is mds user, check for u+wx
1363 mapped_user=$(get_fops_mapped_user $cli_user)
1364 [ "$mapped_user" == "-1" ] &&
1365 error "unable to find mapping for client user $cli_user"
1367 if [ "$mapped_user" == "$mds_user" -a \
1368 $(((mode & 0300) == 0300)) -eq 1 ]; then
1372 if [ "$mapped_user" != "$mds_user" -a "$other" == "1" ]; then
1379 test_fops_admin_cli_i=""
1380 test_fops_chmod_dir() {
1381 local current_cli_i=$1
1383 local dir_to_chmod=$3
1384 local new_admin_cli_i=""
1386 # do we need to set up a new admin client?
1387 [ "$current_cli_i" == "0" ] && [ "$test_fops_admin_cli_i" != "1" ] &&
1389 [ "$current_cli_i" != "0" ] && [ "$test_fops_admin_cli_i" != "0" ] &&
1392 # if only one client, and non-admin, need to flip admin everytime
1393 if [ "$num_clients" == "1" ]; then
1394 test_fops_admin_client=$clients
1395 test_fops_admin_val=$(do_facet mgs $LCTL get_param -n \
1396 nodemap.c0.admin_nodemap)
1397 if [ "$test_fops_admin_val" != "1" ]; then
1398 do_facet mgs $LCTL nodemap_modify \
1402 wait_nm_sync c0 admin_nodemap
1404 elif [ "$new_admin_cli_i" != "" ]; then
1405 # restore admin val to old admin client
1406 if [ "$test_fops_admin_cli_i" != "" ] &&
1407 [ "$test_fops_admin_val" != "1" ]; then
1408 do_facet mgs $LCTL nodemap_modify \
1409 --name c${test_fops_admin_cli_i} \
1411 --value $test_fops_admin_val
1412 wait_nm_sync c${test_fops_admin_cli_i} admin_nodemap
1415 test_fops_admin_cli_i=$new_admin_cli_i
1416 test_fops_admin_client=${clients_arr[$new_admin_cli_i]}
1417 test_fops_admin_val=$(do_facet mgs $LCTL get_param -n \
1418 nodemap.c${new_admin_cli_i}.admin_nodemap)
1420 if [ "$test_fops_admin_val" != "1" ]; then
1421 do_facet mgs $LCTL nodemap_modify \
1422 --name c${new_admin_cli_i} \
1425 wait_nm_sync c${new_admin_cli_i} admin_nodemap
1429 do_node $test_fops_admin_client chmod $perm_bits $DIR/$tdir || return 1
1431 # remove admin for single client if originally non-admin
1432 if [ "$num_clients" == "1" ] && [ "$test_fops_admin_val" != "1" ]; then
1433 do_facet mgs $LCTL nodemap_modify --name c0 --property admin \
1435 wait_nm_sync c0 admin_nodemap
1443 local single_client="$2"
1444 local client_user_list=([0]="0 $((IDBASE+3)) $((IDBASE+4))"
1445 [1]="0 $((IDBASE+5)) $((IDBASE+6))")
1448 local perm_bit_list="0 3 $((0300)) $((0303))"
1449 # SLOW tests 000-007, 010-070, 100-700 (octal modes)
1450 [ "$SLOW" == "yes" ] &&
1451 perm_bit_list="0 $(seq 1 7) $(seq 8 8 63) $(seq 64 64 511) \
1454 # step through mds users. -1 means root
1455 for mds_i in -1 0 1 2; do
1456 local user=$((mds_i + IDBASE))
1460 [ "$mds_i" == "-1" ] && user=0
1462 echo mkdir -p $DIR/$tdir
1465 for client in $clients; do
1467 for u in ${client_user_list[$cli_i]}; do
1468 local run_u="do_node $client \
1469 $RUNAS_CMD -u$u -g$u -G$u"
1470 for perm_bits in $perm_bit_list; do
1471 local mode=$(printf %03o $perm_bits)
1473 key="$mapmode:$user:c$cli_i:$u:$mode"
1474 test_fops_chmod_dir $cli_i $mode \
1476 error cannot chmod $key
1477 do_create_delete "$run_u" "$key"
1481 test_fops_chmod_dir $cli_i 777 $DIR/$tdir ||
1482 error cannot chmod $key
1483 do_fops_quota_test "$run_u"
1486 cli_i=$((cli_i + 1))
1487 [ "$single_client" == "1" ] && break
1494 nodemap_version_check () {
1495 remote_mgs_nodsh && skip "remote MGS with nodsh" && return 1
1496 [ "$MGS_VERSION" -lt $(version_code 2.5.53) ] &&
1497 skip "No nodemap on $MGS_VERSION MGS < 2.5.53" &&
1502 nodemap_test_setup() {
1504 local active_nodemap=1
1506 [ "$1" == "0" ] && active_nodemap=0
1508 do_nodes $(comma_list $(all_mdts_nodes)) \
1509 $LCTL set_param mdt.*.identity_upcall=NONE
1512 create_fops_nodemaps
1514 [[ $rc != 0 ]] && error "adding fops nodemaps failed $rc"
1516 do_facet mgs $LCTL nodemap_activate $active_nodemap
1519 do_facet mgs $LCTL nodemap_modify --name default \
1520 --property admin --value 1
1521 wait_nm_sync default admin_nodemap
1522 do_facet mgs $LCTL nodemap_modify --name default \
1523 --property trusted --value 1
1524 wait_nm_sync default trusted_nodemap
1527 nodemap_test_cleanup() {
1529 delete_fops_nodemaps
1531 [[ $rc != 0 ]] && error "removing fops nodemaps failed $rc"
1533 do_facet mgs $LCTL nodemap_modify --name default \
1534 --property admin --value 0
1535 wait_nm_sync default admin_nodemap
1536 do_facet mgs $LCTL nodemap_modify --name default \
1537 --property trusted --value 0
1538 wait_nm_sync default trusted_nodemap
1540 do_facet mgs $LCTL nodemap_activate 0
1541 wait_nm_sync active 0
1543 export SK_UNIQUE_NM=false
1547 nodemap_clients_admin_trusted() {
1551 for client in $clients; do
1552 do_facet mgs $LCTL nodemap_modify --name c0 \
1553 --property admin --value $admin
1554 do_facet mgs $LCTL nodemap_modify --name c0 \
1555 --property trusted --value $tr
1558 wait_nm_sync c$((i - 1)) admin_nodemap
1559 wait_nm_sync c$((i - 1)) trusted_nodemap
1563 nodemap_version_check || return 0
1564 nodemap_test_setup 0
1566 trap nodemap_test_cleanup EXIT
1568 nodemap_test_cleanup
1570 run_test 16 "test nodemap all_off fileops"
1574 [ "$MDS1_VERSION" -lt $(version_code 2.11.55) ]; then
1575 skip "Need MDS >= 2.11.55"
1578 nodemap_version_check || return 0
1581 trap nodemap_test_cleanup EXIT
1582 nodemap_clients_admin_trusted 0 1
1583 test_fops trusted_noadmin 1
1584 nodemap_test_cleanup
1586 run_test 17 "test nodemap trusted_noadmin fileops"
1590 [ "$MDS1_VERSION" -lt $(version_code 2.11.55) ]; then
1591 skip "Need MDS >= 2.11.55"
1594 nodemap_version_check || return 0
1597 trap nodemap_test_cleanup EXIT
1598 nodemap_clients_admin_trusted 0 0
1599 test_fops mapped_noadmin 1
1600 nodemap_test_cleanup
1602 run_test 18 "test nodemap mapped_noadmin fileops"
1606 [ "$MDS1_VERSION" -lt $(version_code 2.11.55) ]; then
1607 skip "Need MDS >= 2.11.55"
1610 nodemap_version_check || return 0
1613 trap nodemap_test_cleanup EXIT
1614 nodemap_clients_admin_trusted 1 1
1615 test_fops trusted_admin 1
1616 nodemap_test_cleanup
1618 run_test 19 "test nodemap trusted_admin fileops"
1622 [ "$MDS1_VERSION" -lt $(version_code 2.11.55) ]; then
1623 skip "Need MDS >= 2.11.55"
1626 nodemap_version_check || return 0
1629 trap nodemap_test_cleanup EXIT
1630 nodemap_clients_admin_trusted 1 0
1631 test_fops mapped_admin 1
1632 nodemap_test_cleanup
1634 run_test 20 "test nodemap mapped_admin fileops"
1638 [ "$MDS1_VERSION" -lt $(version_code 2.11.55) ]; then
1639 skip "Need MDS >= 2.11.55"
1642 nodemap_version_check || return 0
1645 trap nodemap_test_cleanup EXIT
1648 for client in $clients; do
1649 do_facet mgs $LCTL nodemap_modify --name c${i} \
1650 --property admin --value 0
1651 do_facet mgs $LCTL nodemap_modify --name c${i} \
1652 --property trusted --value $x
1656 wait_nm_sync c$((i - 1)) trusted_nodemap
1658 test_fops mapped_trusted_noadmin
1659 nodemap_test_cleanup
1661 run_test 21 "test nodemap mapped_trusted_noadmin fileops"
1665 [ "$MDS1_VERSION" -lt $(version_code 2.11.55) ]; then
1666 skip "Need MDS >= 2.11.55"
1669 nodemap_version_check || return 0
1672 trap nodemap_test_cleanup EXIT
1675 for client in $clients; do
1676 do_facet mgs $LCTL nodemap_modify --name c${i} \
1677 --property admin --value 1
1678 do_facet mgs $LCTL nodemap_modify --name c${i} \
1679 --property trusted --value $x
1683 wait_nm_sync c$((i - 1)) trusted_nodemap
1685 test_fops mapped_trusted_admin
1686 nodemap_test_cleanup
1688 run_test 22 "test nodemap mapped_trusted_admin fileops"
1690 # acl test directory needs to be initialized on a privileged client
1691 nodemap_acl_test_setup() {
1692 local admin=$(do_facet mgs $LCTL get_param -n \
1693 nodemap.c0.admin_nodemap)
1694 local trust=$(do_facet mgs $LCTL get_param -n \
1695 nodemap.c0.trusted_nodemap)
1697 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
1698 do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1
1700 wait_nm_sync c0 admin_nodemap
1701 wait_nm_sync c0 trusted_nodemap
1703 do_node ${clients_arr[0]} rm -rf $DIR/$tdir
1705 do_node ${clients_arr[0]} chmod a+rwx $DIR/$tdir ||
1706 error unable to chmod a+rwx test dir $DIR/$tdir
1708 do_facet mgs $LCTL nodemap_modify --name c0 \
1709 --property admin --value $admin
1710 do_facet mgs $LCTL nodemap_modify --name c0 \
1711 --property trusted --value $trust
1713 wait_nm_sync c0 trusted_nodemap
1716 # returns 0 if the number of ACLs does not change on the second (mapped) client
1717 # after being set on the first client
1718 nodemap_acl_test() {
1720 local set_client="$2"
1721 local get_client="$3"
1722 local check_setfacl="$4"
1723 local setfacl_error=0
1724 local testfile=$DIR/$tdir/$tfile
1725 local RUNAS_USER="$RUNAS_CMD -u $user"
1727 local acl_count_post=0
1729 nodemap_acl_test_setup
1732 do_node $set_client $RUNAS_USER touch $testfile
1734 # ACL masks aren't filtered by nodemap code, so we ignore them
1735 acl_count=$(do_node $get_client getfacl $testfile | grep -v mask |
1737 do_node $set_client $RUNAS_USER setfacl -m $user:rwx $testfile ||
1740 # if check setfacl is set to 1, then it's supposed to error
1741 if [ "$check_setfacl" == "1" ]; then
1742 [ "$setfacl_error" != "1" ] && return 1
1745 [ "$setfacl_error" == "1" ] && echo "WARNING: unable to setfacl"
1747 acl_count_post=$(do_node $get_client getfacl $testfile | grep -v mask |
1749 [ $acl_count -eq $acl_count_post ] && return 0
1754 [ $num_clients -lt 2 ] && skip "Need 2 clients at least" && return
1755 nodemap_version_check || return 0
1758 trap nodemap_test_cleanup EXIT
1759 # 1 trusted cluster, 1 mapped cluster
1760 local unmapped_fs=$((IDBASE+0))
1761 local unmapped_c1=$((IDBASE+5))
1762 local mapped_fs=$((IDBASE+2))
1763 local mapped_c0=$((IDBASE+4))
1764 local mapped_c1=$((IDBASE+6))
1766 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
1767 do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1
1769 do_facet mgs $LCTL nodemap_modify --name c1 --property admin --value 0
1770 do_facet mgs $LCTL nodemap_modify --name c1 --property trusted --value 0
1772 wait_nm_sync c1 trusted_nodemap
1774 # setfacl on trusted cluster to unmapped user, verify it's not seen
1775 nodemap_acl_test $unmapped_fs ${clients_arr[0]} ${clients_arr[1]} ||
1776 error "acl count (1)"
1778 # setfacl on trusted cluster to mapped user, verify it's seen
1779 nodemap_acl_test $mapped_fs ${clients_arr[0]} ${clients_arr[1]} &&
1780 error "acl count (2)"
1782 # setfacl on mapped cluster to mapped user, verify it's seen
1783 nodemap_acl_test $mapped_c1 ${clients_arr[1]} ${clients_arr[0]} &&
1784 error "acl count (3)"
1786 # setfacl on mapped cluster to unmapped user, verify error
1787 nodemap_acl_test $unmapped_fs ${clients_arr[1]} ${clients_arr[0]} 1 ||
1788 error "acl count (4)"
1791 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 0
1792 do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 0
1794 wait_nm_sync c0 trusted_nodemap
1796 # setfacl to mapped user on c1, also mapped to c0, verify it's seen
1797 nodemap_acl_test $mapped_c1 ${clients_arr[1]} ${clients_arr[0]} &&
1798 error "acl count (5)"
1800 # setfacl to mapped user on c1, not mapped to c0, verify not seen
1801 nodemap_acl_test $unmapped_c1 ${clients_arr[1]} ${clients_arr[0]} ||
1802 error "acl count (6)"
1804 nodemap_test_cleanup
1806 run_test 23a "test mapped regular ACLs"
1808 test_23b() { #LU-9929
1809 [ $num_clients -lt 2 ] && skip "Need 2 clients at least"
1810 [ "$MGS_VERSION" -lt $(version_code 2.10.53) ] &&
1811 skip "Need MGS >= 2.10.53"
1813 export SK_UNIQUE_NM=true
1815 trap nodemap_test_cleanup EXIT
1817 local testdir=$DIR/$tdir
1818 local fs_id=$((IDBASE+10))
1823 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
1824 wait_nm_sync c0 admin_nodemap
1825 do_facet mgs $LCTL nodemap_modify --name c1 --property admin --value 1
1826 wait_nm_sync c1 admin_nodemap
1827 do_facet mgs $LCTL nodemap_modify --name c1 --property trusted --value 1
1828 wait_nm_sync c1 trusted_nodemap
1830 # Add idmap $ID0:$fs_id (500:60010)
1831 do_facet mgs $LCTL nodemap_add_idmap --name c0 --idtype gid \
1832 --idmap $ID0:$fs_id ||
1833 error "add idmap $ID0:$fs_id to nodemap c0 failed"
1834 wait_nm_sync c0 idmap
1836 # set/getfacl default acl on client 1 (unmapped gid=500)
1837 do_node ${clients_arr[0]} rm -rf $testdir
1838 do_node ${clients_arr[0]} mkdir -p $testdir
1839 # Here, USER0=$(getent passwd | grep :$ID0:$ID0: | cut -d: -f1)
1840 do_node ${clients_arr[0]} setfacl -R -d -m group:$USER0:rwx $testdir ||
1841 error "setfacl $testdir on ${clients_arr[0]} failed"
1842 unmapped_id=$(do_node ${clients_arr[0]} getfacl $testdir |
1843 grep -E "default:group:.*:rwx" | awk -F: '{print $3}')
1844 [ "$unmapped_id" = "$USER0" ] ||
1845 error "gid=$ID0 was not unmapped correctly on ${clients_arr[0]}"
1847 # getfacl default acl on client 2 (mapped gid=60010)
1848 mapped_id=$(do_node ${clients_arr[1]} getfacl $testdir |
1849 grep -E "default:group:.*:rwx" | awk -F: '{print $3}')
1850 fs_user=$(do_node ${clients_arr[1]} getent passwd |
1851 grep :$fs_id:$fs_id: | cut -d: -f1)
1852 [ -z "$fs_user" ] && fs_user=$fs_id
1853 [ $mapped_id -eq $fs_id -o "$mapped_id" = "$fs_user" ] ||
1854 error "Should return gid=$fs_id or $fs_user on client2"
1857 nodemap_test_cleanup
1858 export SK_UNIQUE_NM=false
1860 run_test 23b "test mapped default ACLs"
1865 trap nodemap_test_cleanup EXIT
1866 do_nodes $(comma_list $(all_server_nodes)) $LCTL get_param -R nodemap
1868 nodemap_test_cleanup
1870 run_test 24 "check nodemap proc files for LBUGs and Oopses"
1873 local tmpfile=$(mktemp)
1874 local tmpfile2=$(mktemp)
1875 local tmpfile3=$(mktemp)
1876 local tmpfile4=$(mktemp)
1880 nodemap_version_check || return 0
1882 # stop clients for this test
1883 zconf_umount_clients $CLIENTS $MOUNT ||
1884 error "unable to umount clients $CLIENTS"
1886 export SK_UNIQUE_NM=true
1889 # enable trusted/admin for setquota call in cleanup_and_setup_lustre()
1891 for client in $clients; do
1892 do_facet mgs $LCTL nodemap_modify --name c${i} \
1893 --property admin --value 1
1894 do_facet mgs $LCTL nodemap_modify --name c${i} \
1895 --property trusted --value 1
1898 wait_nm_sync c$((i - 1)) trusted_nodemap
1900 trap nodemap_test_cleanup EXIT
1902 # create a new, empty nodemap, and add fileset info to it
1903 do_facet mgs $LCTL nodemap_add test25 ||
1904 error "unable to create nodemap $testname"
1905 do_facet mgs $LCTL set_param -P nodemap.$testname.fileset=/$subdir ||
1906 error "unable to add fileset info to nodemap test25"
1908 wait_nm_sync test25 id
1910 do_facet mgs $LCTL nodemap_info > $tmpfile
1911 do_facet mds $LCTL nodemap_info > $tmpfile2
1913 if ! $SHARED_KEY; then
1914 # will conflict with SK's nodemaps
1915 cleanup_and_setup_lustre
1917 # stop clients for this test
1918 zconf_umount_clients $CLIENTS $MOUNT ||
1919 error "unable to umount clients $CLIENTS"
1921 do_facet mgs $LCTL nodemap_info > $tmpfile3
1922 diff -q $tmpfile3 $tmpfile >& /dev/null ||
1923 error "nodemap_info diff on MGS after remount"
1925 do_facet mds $LCTL nodemap_info > $tmpfile4
1926 diff -q $tmpfile4 $tmpfile2 >& /dev/null ||
1927 error "nodemap_info diff on MDS after remount"
1930 do_facet mgs $LCTL nodemap_del test25 ||
1931 error "cannot delete nodemap test25 from config"
1932 nodemap_test_cleanup
1933 # restart clients previously stopped
1934 zconf_mount_clients $CLIENTS $MOUNT ||
1935 error "unable to mount clients $CLIENTS"
1937 rm -f $tmpfile $tmpfile2
1938 export SK_UNIQUE_NM=false
1940 run_test 25 "test save and reload nodemap config"
1943 nodemap_version_check || return 0
1947 do_facet mgs "seq -f 'c%g' $large_i | xargs -n1 $LCTL nodemap_add"
1948 wait_nm_sync c$large_i admin_nodemap
1950 do_facet mgs "seq -f 'c%g' $large_i | xargs -n1 $LCTL nodemap_del"
1951 wait_nm_sync c$large_i admin_nodemap
1953 run_test 26 "test transferring very large nodemap"
1955 nodemap_exercise_fileset() {
1960 if [ "$nm" == "default" ]; then
1961 do_facet mgs $LCTL nodemap_activate 1
1966 if $SHARED_KEY; then
1967 export SK_UNIQUE_NM=true
1969 # will conflict with SK's nodemaps
1970 trap "fileset_test_cleanup $nm" EXIT
1972 fileset_test_setup "$nm"
1974 # add fileset info to $nm nodemap
1975 if ! combined_mgs_mds; then
1976 do_facet mgs $LCTL set_param nodemap.${nm}.fileset=/$subdir ||
1977 error "unable to add fileset info to $nm nodemap on MGS"
1979 do_facet mgs $LCTL set_param -P nodemap.${nm}.fileset=/$subdir ||
1980 error "unable to add fileset info to $nm nodemap for servers"
1981 wait_nm_sync $nm fileset "nodemap.${nm}.fileset=/$subdir"
1984 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
1985 error "unable to umount client ${clients_arr[0]}"
1986 # set some generic fileset to trigger SSK code
1988 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
1989 error "unable to remount client ${clients_arr[0]}"
1992 # test mount point content
1993 do_node ${clients_arr[0]} test -f $MOUNT/this_is_$subdir ||
1994 error "fileset not taken into account"
1996 # re-mount client with sub-subdir
1997 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
1998 error "unable to umount client ${clients_arr[0]}"
1999 export FILESET=/$subsubdir
2000 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
2001 error "unable to remount client ${clients_arr[0]}"
2004 # test mount point content
2005 do_node ${clients_arr[0]} test -f $MOUNT/this_is_$subsubdir ||
2006 error "subdir of fileset not taken into account"
2008 # remove fileset info from nodemap
2009 do_facet mgs $LCTL nodemap_set_fileset --name $nm --fileset clear ||
2010 error "unable to delete fileset info on $nm nodemap"
2011 wait_update_facet mgs "$LCTL get_param nodemap.${nm}.fileset" \
2012 "nodemap.${nm}.fileset=" ||
2013 error "fileset info still not cleared on $nm nodemap"
2014 do_facet mgs $LCTL set_param -P nodemap.${nm}.fileset=clear ||
2015 error "unable to reset fileset info on $nm nodemap"
2016 wait_nm_sync $nm fileset "nodemap.${nm}.fileset="
2019 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
2020 error "unable to umount client ${clients_arr[0]}"
2021 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
2022 error "unable to remount client ${clients_arr[0]}"
2024 # test mount point content
2025 if ! $(do_node ${clients_arr[0]} test -d $MOUNT/$subdir); then
2027 error "fileset not cleared on $nm nodemap"
2030 # back to non-nodemap setup
2031 if $SHARED_KEY; then
2032 export SK_UNIQUE_NM=false
2033 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
2034 error "unable to umount client ${clients_arr[0]}"
2036 fileset_test_cleanup "$nm"
2037 if [ "$nm" == "default" ]; then
2038 do_facet mgs $LCTL nodemap_activate 0
2039 wait_nm_sync active 0
2041 export SK_UNIQUE_NM=false
2043 nodemap_test_cleanup
2045 if $SHARED_KEY; then
2046 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
2047 error "unable to remount client ${clients_arr[0]}"
2052 [ "$MDS1_VERSION" -lt $(version_code 2.11.50) ] &&
2053 skip "Need MDS >= 2.11.50"
2055 for nm in "default" "c0"; do
2056 local subdir="subdir_${nm}"
2057 local subsubdir="subsubdir_${nm}"
2059 if [ "$nm" == "default" ] && [ "$SHARED_KEY" == "true" ]; then
2060 echo "Skipping nodemap $nm with SHARED_KEY";
2064 echo "Exercising fileset for nodemap $nm"
2065 nodemap_exercise_fileset "$nm"
2068 run_test 27a "test fileset in various nodemaps"
2070 test_27b() { #LU-10703
2071 [ "$MDS1_VERSION" -lt $(version_code 2.11.50) ] &&
2072 skip "Need MDS >= 2.11.50"
2073 [[ $MDSCOUNT -lt 2 ]] && skip "needs >= 2 MDTs"
2076 trap nodemap_test_cleanup EXIT
2078 # Add the nodemaps and set their filesets
2079 for i in $(seq 1 $MDSCOUNT); do
2080 do_facet mgs $LCTL nodemap_del nm$i 2>/dev/null
2081 do_facet mgs $LCTL nodemap_add nm$i ||
2082 error "add nodemap nm$i failed"
2083 wait_nm_sync nm$i "" "" "-N"
2085 if ! combined_mgs_mds; then
2087 $LCTL set_param nodemap.nm$i.fileset=/dir$i ||
2088 error "set nm$i.fileset=/dir$i failed on MGS"
2090 do_facet mgs $LCTL set_param -P nodemap.nm$i.fileset=/dir$i ||
2091 error "set nm$i.fileset=/dir$i failed on servers"
2092 wait_nm_sync nm$i fileset "nodemap.nm$i.fileset=/dir$i"
2095 # Check if all the filesets are correct
2096 for i in $(seq 1 $MDSCOUNT); do
2097 fileset=$(do_facet mds$i \
2098 $LCTL get_param -n nodemap.nm$i.fileset)
2099 [ "$fileset" = "/dir$i" ] ||
2100 error "nm$i.fileset $fileset != /dir$i on mds$i"
2101 do_facet mgs $LCTL nodemap_del nm$i ||
2102 error "delete nodemap nm$i failed"
2105 nodemap_test_cleanup
2107 run_test 27b "The new nodemap won't clear the old nodemap's fileset"
2110 if ! $SHARED_KEY; then
2111 skip "need shared key feature for this test" && return
2113 mkdir -p $DIR/$tdir || error "mkdir failed"
2114 touch $DIR/$tdir/$tdir.out || error "touch failed"
2115 if [ ! -f $DIR/$tdir/$tdir.out ]; then
2116 error "read before rotation failed"
2118 # store top key identity to ensure rotation has occurred
2119 SK_IDENTITY_OLD=$(lctl get_param *.*.*srpc* | grep "expire" |
2120 head -1 | awk '{print $15}' | cut -c1-8)
2121 do_facet $SINGLEMDS lfs flushctx ||
2122 error "could not run flushctx on $SINGLEMDS"
2124 lfs flushctx || error "could not run flushctx on client"
2126 # verify new key is in place
2127 SK_IDENTITY_NEW=$(lctl get_param *.*.*srpc* | grep "expire" |
2128 head -1 | awk '{print $15}' | cut -c1-8)
2129 if [ $SK_IDENTITY_OLD == $SK_IDENTITY_NEW ]; then
2130 error "key did not rotate correctly"
2132 if [ ! -f $DIR/$tdir/$tdir.out ]; then
2133 error "read after rotation failed"
2136 run_test 28 "check shared key rotation method"
2139 if ! $SHARED_KEY; then
2140 skip "need shared key feature for this test" && return
2142 if [ $SK_FLAVOR != "ski" ] && [ $SK_FLAVOR != "skpi" ]; then
2143 skip "test only valid if integrity is active"
2146 mkdir $DIR/$tdir || error "mkdir"
2147 touch $DIR/$tdir/$tfile || error "touch"
2148 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
2149 error "unable to umount clients"
2150 keyctl show | awk '/lustre/ { print $1 }' |
2151 xargs -IX keyctl unlink X
2152 OLD_SK_PATH=$SK_PATH
2153 export SK_PATH=/dev/null
2154 if zconf_mount_clients ${clients_arr[0]} $MOUNT; then
2155 export SK_PATH=$OLD_SK_PATH
2156 if [ -e $DIR/$tdir/$tfile ]; then
2157 error "able to mount and read without key"
2159 error "able to mount without key"
2162 export SK_PATH=$OLD_SK_PATH
2163 keyctl show | awk '/lustre/ { print $1 }' |
2164 xargs -IX keyctl unlink X
2167 run_test 29 "check for missing shared key"
2170 if ! $SHARED_KEY; then
2171 skip "need shared key feature for this test" && return
2173 if [ $SK_FLAVOR != "ski" ] && [ $SK_FLAVOR != "skpi" ]; then
2174 skip "test only valid if integrity is active"
2176 mkdir -p $DIR/$tdir || error "mkdir failed"
2177 touch $DIR/$tdir/$tdir.out || error "touch failed"
2178 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
2179 error "unable to umount clients"
2180 # unload keys from ring
2181 keyctl show | awk '/lustre/ { print $1 }' |
2182 xargs -IX keyctl unlink X
2183 # invalidate the key with bogus filesystem name
2184 lgss_sk -w $SK_PATH/$FSNAME-bogus.key -f $FSNAME.bogus \
2185 -t client -d /dev/urandom || error "lgss_sk failed (1)"
2186 do_facet $SINGLEMDS lfs flushctx || error "could not run flushctx"
2187 OLD_SK_PATH=$SK_PATH
2188 export SK_PATH=$SK_PATH/$FSNAME-bogus.key
2189 if zconf_mount_clients ${clients_arr[0]} $MOUNT; then
2190 SK_PATH=$OLD_SK_PATH
2191 if [ -a $DIR/$tdir/$tdir.out ]; then
2192 error "mount and read file with invalid key"
2194 error "mount with invalid key"
2197 SK_PATH=$OLD_SK_PATH
2198 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
2199 error "unable to umount clients"
2201 run_test 30 "check for invalid shared key"
2205 zconf_umount $HOSTNAME $MOUNT || error "unable to umount client"
2207 # remove ${NETTYPE}999 network on all nodes
2208 do_nodes $(comma_list $(all_nodes)) \
2209 "$LNETCTL net del --net ${NETTYPE}999 && \
2210 $LNETCTL lnet unconfigure 2>/dev/null || true"
2212 # necessary to do writeconf in order to de-register
2213 # @${NETTYPE}999 nid for targets
2215 export KEEP_ZPOOL="true"
2217 export SK_MOUNTED=false
2220 export KEEP_ZPOOL="$KZPOOL"
2224 local nid=$(lctl list_nids | grep ${NETTYPE} | head -n1)
2225 local addr=${nid%@*}
2228 export LNETCTL=$(which lnetctl 2> /dev/null)
2230 [ -z "$LNETCTL" ] && skip "without lnetctl support." && return
2231 local_mode && skip "in local mode."
2233 stack_trap cleanup_31 EXIT
2236 if [ "$MOUNT_2" ] && $(grep -q $MOUNT2' ' /proc/mounts); then
2237 umount_client $MOUNT2 || error "umount $MOUNT2 failed"
2239 if $(grep -q $MOUNT' ' /proc/mounts); then
2240 umount_client $MOUNT || error "umount $MOUNT failed"
2243 # check exports on servers are empty for client
2244 do_facet mgs "lctl get_param -n *.MGS*.exports.'$nid'.uuid 2>/dev/null |
2245 grep -q -" && error "export on MGS should be empty"
2246 do_nodes $(comma_list $(mdts_nodes) $(osts_nodes)) \
2247 "lctl get_param -n *.${FSNAME}*.exports.'$nid'.uuid \
2248 2>/dev/null | grep -q -" &&
2249 error "export on servers should be empty"
2251 # add network ${NETTYPE}999 on all nodes
2252 do_nodes $(comma_list $(all_nodes)) \
2253 "$LNETCTL lnet configure && $LNETCTL net add --if \
2254 \$($LNETCTL net show --net $net | awk 'BEGIN{inf=0} \
2255 {if (inf==1) print \$2; fi; inf=0} /interfaces/{inf=1}') \
2256 --net ${NETTYPE}999" ||
2257 error "unable to configure NID ${NETTYPE}999"
2259 # necessary to do writeconf in order to register
2260 # new @${NETTYPE}999 nid for targets
2262 export KEEP_ZPOOL="true"
2264 export SK_MOUNTED=false
2266 setupall server_only || echo 1
2267 export KEEP_ZPOOL="$KZPOOL"
2270 local mgsnid_orig=$MGSNID
2271 # compute new MGSNID
2272 MGSNID=$(do_facet mgs "$LCTL list_nids | grep ${NETTYPE}999")
2274 # on client, turn LNet Dynamic Discovery on
2275 lnetctl set discovery 1
2277 # mount client with -o network=${NETTYPE}999 option:
2278 # should fail because of LNet Dynamic Discovery
2279 mount_client $MOUNT ${MOUNT_OPTS},network=${NETTYPE}999 &&
2280 error "client mount with '-o network' option should be refused"
2282 # on client, reconfigure LNet and turn LNet Dynamic Discovery off
2283 $LNETCTL net del --net ${NETTYPE}999 && lnetctl lnet unconfigure
2286 lnetctl set discovery 0
2288 $LNETCTL lnet configure && $LNETCTL net add --if \
2289 $($LNETCTL net show --net $net | awk 'BEGIN{inf=0} \
2290 {if (inf==1) print $2; fi; inf=0} /interfaces/{inf=1}') \
2291 --net ${NETTYPE}999 ||
2292 error "unable to configure NID ${NETTYPE}999 on client"
2294 # mount client with -o network=${NETTYPE}999 option
2295 mount_client $MOUNT ${MOUNT_OPTS},network=${NETTYPE}999 ||
2296 error "unable to remount client"
2301 # check export on MGS
2302 do_facet mgs "lctl get_param -n *.MGS*.exports.'$nid'.uuid 2>/dev/null |
2304 [ $? -ne 0 ] || error "export for $nid on MGS should not exist"
2307 "lctl get_param -n *.MGS*.exports.'${addr}@${NETTYPE}999'.uuid \
2308 2>/dev/null | grep -q -"
2310 error "export for ${addr}@${NETTYPE}999 on MGS should exist"
2312 # check {mdc,osc} imports
2313 lctl get_param mdc.${FSNAME}-*.import | grep current_connection |
2314 grep -q ${NETTYPE}999
2316 error "import for mdc should use ${addr}@${NETTYPE}999"
2317 lctl get_param osc.${FSNAME}-*.import | grep current_connection |
2318 grep -q ${NETTYPE}999
2320 error "import for osc should use ${addr}@${NETTYPE}999"
2322 run_test 31 "client mount option '-o network'"
2326 zconf_umount_clients ${clients_arr[0]} $MOUNT
2328 # disable sk flavor enforcement on MGS
2329 set_rule _mgs any any null
2331 # stop gss daemon on MGS
2332 if ! combined_mgs_mds ; then
2333 send_sigint $mgs_HOST lsvcgssd
2337 MOUNT_OPTS=$(add_sk_mntflag $MOUNT_OPTS)
2340 restore_to_default_flavor
2344 if ! $SHARED_KEY; then
2345 skip "need shared key feature for this test"
2348 stack_trap cleanup_32 EXIT
2350 # restore to default null flavor
2351 save_flvr=$SK_FLAVOR
2353 restore_to_default_flavor || error "cannot set null flavor"
2354 SK_FLAVOR=$save_flvr
2357 if [ "$MOUNT_2" ] && $(grep -q $MOUNT2' ' /proc/mounts); then
2358 umount_client $MOUNT2 || error "umount $MOUNT2 failed"
2360 if $(grep -q $MOUNT' ' /proc/mounts); then
2361 umount_client $MOUNT || error "umount $MOUNT failed"
2364 # start gss daemon on MGS
2365 if combined_mgs_mds ; then
2366 send_sigint $mds_HOST lsvcgssd
2368 start_gss_daemons $mgs_HOST "$LSVCGSSD -vvv -s -g"
2370 # add mgs key type and MGS NIDs in key on MGS
2371 do_nodes $mgs_HOST "lgss_sk -t mgs,server -g $MGSNID -m \
2372 $SK_PATH/$FSNAME.key >/dev/null 2>&1" ||
2373 error "could not modify keyfile on MGS"
2375 # load modified key file on MGS
2376 do_nodes $mgs_HOST "lgss_sk -l $SK_PATH/$FSNAME.key >/dev/null 2>&1" ||
2377 error "could not load keyfile on MGS"
2379 # add MGS NIDs in key on client
2380 do_nodes ${clients_arr[0]} "lgss_sk -g $MGSNID -m \
2381 $SK_PATH/$FSNAME.key >/dev/null 2>&1" ||
2382 error "could not modify keyfile on MGS"
2384 # set perms for per-nodemap keys else permission denied
2385 do_nodes $(comma_list $(all_nodes)) \
2386 "keyctl show | grep lustre | cut -c1-11 |
2388 xargs -IX keyctl setperm X 0x3f3f3f3f"
2390 # re-mount client with mgssec=skn
2391 save_opts=$MOUNT_OPTS
2392 if [ -z "$MOUNT_OPTS" ]; then
2393 MOUNT_OPTS="-o mgssec=skn"
2395 MOUNT_OPTS="$MOUNT_OPTS,mgssec=skn"
2397 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
2398 error "mount ${clients_arr[0]} with mgssec=skn failed"
2399 MOUNT_OPTS=$save_opts
2402 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
2403 error "umount ${clients_arr[0]} failed"
2405 # enforce ska flavor on MGS
2406 set_rule _mgs any any ska
2408 # re-mount client without mgssec
2409 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS &&
2410 error "mount ${clients_arr[0]} without mgssec should fail"
2412 # re-mount client with mgssec=skn
2413 save_opts=$MOUNT_OPTS
2414 if [ -z "$MOUNT_OPTS" ]; then
2415 MOUNT_OPTS="-o mgssec=skn"
2417 MOUNT_OPTS="$MOUNT_OPTS,mgssec=skn"
2419 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS &&
2420 error "mount ${clients_arr[0]} with mgssec=skn should fail"
2421 MOUNT_OPTS=$save_opts
2423 # re-mount client with mgssec=ska
2424 save_opts=$MOUNT_OPTS
2425 if [ -z "$MOUNT_OPTS" ]; then
2426 MOUNT_OPTS="-o mgssec=ska"
2428 MOUNT_OPTS="$MOUNT_OPTS,mgssec=ska"
2430 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
2431 error "mount ${clients_arr[0]} with mgssec=ska failed"
2432 MOUNT_OPTS=$save_opts
2436 run_test 32 "check for mgssec"
2439 # disable sk flavor enforcement
2440 set_rule $FSNAME any cli2mdt null
2441 wait_flavor cli2mdt null
2444 zconf_umount_clients ${clients_arr[0]} $MOUNT
2446 # stop gss daemon on MGS
2447 if ! combined_mgs_mds ; then
2448 send_sigint $mgs_HOST lsvcgssd
2452 MOUNT_OPTS=$(add_sk_mntflag $MOUNT_OPTS)
2455 restore_to_default_flavor
2459 if ! $SHARED_KEY; then
2460 skip "need shared key feature for this test"
2463 stack_trap cleanup_33 EXIT
2465 # restore to default null flavor
2466 save_flvr=$SK_FLAVOR
2468 restore_to_default_flavor || error "cannot set null flavor"
2469 SK_FLAVOR=$save_flvr
2472 if [ "$MOUNT_2" ] && $(grep -q $MOUNT2' ' /proc/mounts); then
2473 umount_client $MOUNT2 || error "umount $MOUNT2 failed"
2475 if $(grep -q $MOUNT' ' /proc/mounts); then
2476 umount_client $MOUNT || error "umount $MOUNT failed"
2479 # start gss daemon on MGS
2480 if combined_mgs_mds ; then
2481 send_sigint $mds_HOST lsvcgssd
2483 start_gss_daemons $mgs_HOST "$LSVCGSSD -vvv -s -g"
2485 # add mgs key type and MGS NIDs in key on MGS
2486 do_nodes $mgs_HOST "lgss_sk -t mgs,server -g $MGSNID -m \
2487 $SK_PATH/$FSNAME.key >/dev/null 2>&1" ||
2488 error "could not modify keyfile on MGS"
2490 # load modified key file on MGS
2491 do_nodes $mgs_HOST "lgss_sk -l $SK_PATH/$FSNAME.key >/dev/null 2>&1" ||
2492 error "could not load keyfile on MGS"
2494 # add MGS NIDs in key on client
2495 do_nodes ${clients_arr[0]} "lgss_sk -g $MGSNID -m \
2496 $SK_PATH/$FSNAME.key >/dev/null 2>&1" ||
2497 error "could not modify keyfile on MGS"
2499 # set perms for per-nodemap keys else permission denied
2500 do_nodes $(comma_list $(all_nodes)) \
2501 "keyctl show | grep lustre | cut -c1-11 |
2503 xargs -IX keyctl setperm X 0x3f3f3f3f"
2505 # re-mount client with mgssec=skn
2506 save_opts=$MOUNT_OPTS
2507 if [ -z "$MOUNT_OPTS" ]; then
2508 MOUNT_OPTS="-o mgssec=skn"
2510 MOUNT_OPTS="$MOUNT_OPTS,mgssec=skn"
2512 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
2513 error "mount ${clients_arr[0]} with mgssec=skn failed"
2514 MOUNT_OPTS=$save_opts
2516 # enforce ska flavor for cli2mdt
2517 set_rule $FSNAME any cli2mdt ska
2518 wait_flavor cli2mdt ska
2520 # check error message
2521 $LCTL dk | grep "faked source" &&
2522 error "MGS connection srpc flags incorrect"
2526 run_test 33 "correct srpc flags for MGS connection"
2529 # restore deny_unknown
2530 do_facet mgs $LCTL nodemap_modify --name default \
2531 --property deny_unknown --value $denydefault
2532 if [ $? -ne 0 ]; then
2533 error_noexit "cannot reset deny_unknown on default nodemap"
2537 wait_nm_sync default deny_unknown
2544 [ $MGS_VERSION -lt $(version_code 2.12.51) ] &&
2545 skip "deny_unknown on default nm not supported before 2.12.51"
2547 activedefault=$(do_facet mgs $LCTL get_param -n nodemap.active)
2549 if [[ "$activedefault" != "1" ]]; then
2550 do_facet mgs $LCTL nodemap_activate 1
2552 stack_trap cleanup_active EXIT
2555 denydefault=$(do_facet mgs $LCTL get_param -n \
2556 nodemap.default.deny_unknown)
2557 [ -z "$denydefault" ] &&
2558 error "cannot get deny_unknown on default nodemap"
2559 if [ "$denydefault" -eq 0 ]; then
2565 do_facet mgs $LCTL nodemap_modify --name default \
2566 --property deny_unknown --value $denynew ||
2567 error "cannot set deny_unknown on default nodemap"
2569 [ "$(do_facet mgs $LCTL get_param -n nodemap.default.deny_unknown)" \
2571 error "setting deny_unknown on default nodemap did not work"
2573 stack_trap cleanup_34_deny EXIT
2575 wait_nm_sync default deny_unknown
2577 run_test 34 "deny_unknown on default nodemap"
2579 log "cleanup: ======================================================"
2582 for num in $(seq $MDSCOUNT); do
2583 if [ "${identity_old[$num]}" = 1 ]; then
2584 switch_identity $num false || identity_old[$num]=$?
2588 $RUNAS_CMD -u $ID0 ls $DIR
2589 $RUNAS_CMD -u $ID1 ls $DIR
2594 check_and_cleanup_lustre