3 # Run select tests by setting ONLY, or as arguments to the script.
4 # Skip specific tests by setting EXCEPT.
10 # bug number for skipped test:
11 ALWAYS_EXCEPT=" $SANITY_SEC_EXCEPT"
12 # UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
15 export PATH=$PWD/$SRCDIR:$SRCDIR:$PWD/$SRCDIR/../utils:$PATH:/sbin
16 export NAME=${NAME:-local}
18 LUSTRE=${LUSTRE:-$(dirname $0)/..}
19 . $LUSTRE/tests/test-framework.sh
21 . ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
24 NODEMAP_TESTS=$(seq 7 26)
26 if ! check_versions; then
27 echo "It is NOT necessary to test nodemap under interoperation mode"
28 EXCEPT="$EXCEPT $NODEMAP_TESTS"
31 [ "$SLOW" = "no" ] && EXCEPT_SLOW="26"
33 [ "$ALWAYS_EXCEPT$EXCEPT$EXCEPT_SLOW" ] &&
34 echo "Skipping tests: $ALWAYS_EXCEPT $EXCEPT $EXCEPT_SLOW"
36 RUNAS_CMD=${RUNAS_CMD:-runas}
38 WTL=${WTL:-"$LUSTRE/tests/write_time_limit"}
41 PERM_CONF=$CONFDIR/perm.conf
43 HOSTNAME_CHECKSUM=$(hostname | sum | awk '{ print $1 }')
44 SUBNET_CHECKSUM=$(expr $HOSTNAME_CHECKSUM % 250 + 1)
46 require_dsh_mds || exit 0
47 require_dsh_ost || exit 0
49 clients=${CLIENTS//,/ }
50 num_clients=$(get_node_count ${clients})
51 clients_arr=($clients)
55 USER0=$(getent passwd | grep :$ID0:$ID0: | cut -d: -f1)
56 USER1=$(getent passwd | grep :$ID1:$ID1: | cut -d: -f1)
60 NODEMAP_IPADDR_LIST="1 10 64 128 200 250"
62 NODEMAP_MAX_ID=$((ID0 + NODEMAP_ID_COUNT))
65 skip "need to add user0 ($ID0:$ID0)" && exit 0
68 skip "need to add user1 ($ID1:$ID1)" && exit 0
70 IDBASE=${IDBASE:-60000}
72 # changes to mappings must be reflected in test 23
74 [0]="$((IDBASE+3)):$((IDBASE+0)) $((IDBASE+4)):$((IDBASE+2))"
75 [1]="$((IDBASE+5)):$((IDBASE+1)) $((IDBASE+6)):$((IDBASE+2))"
78 check_and_setup_lustre
83 GSS_REF=$(lsmod | grep ^ptlrpc_gss | awk '{print $3}')
84 if [ ! -z "$GSS_REF" -a "$GSS_REF" != "0" ]; then
86 echo "with GSS support"
89 echo "without GSS support"
92 MDT=$(do_facet $SINGLEMDS lctl get_param -N "mdt.\*MDT0000" |
94 [ -z "$MDT" ] && error "fail to get MDT device" && exit 1
95 do_facet $SINGLEMDS "mkdir -p $CONFDIR"
96 IDENTITY_FLUSH=mdt.$MDT.identity_flush
97 IDENTITY_UPCALL=mdt.$MDT.identity_upcall
108 if ! $RUNAS_CMD -u $user krb5_login.sh; then
109 error "$user login kerberos failed."
113 if ! $RUNAS_CMD -u $user -g $group ls $DIR > /dev/null 2>&1; then
114 $RUNAS_CMD -u $user lfs flushctx -k
115 $RUNAS_CMD -u $user krb5_login.sh
116 if ! $RUNAS_CMD -u$user -g$group ls $DIR > /dev/null 2>&1; then
117 error "init $user $group failed."
123 declare -a identity_old
126 for num in $(seq $MDSCOUNT); do
127 switch_identity $num true || identity_old[$num]=$?
130 if ! $RUNAS_CMD -u $ID0 ls $DIR > /dev/null 2>&1; then
131 sec_login $USER0 $USER0
134 if ! $RUNAS_CMD -u $ID1 ls $DIR > /dev/null 2>&1; then
135 sec_login $USER1 $USER1
140 # run as different user
144 chmod 0755 $DIR || error "chmod (1)"
145 rm -rf $DIR/$tdir || error "rm (1)"
146 mkdir -p $DIR/$tdir || error "mkdir (1)"
147 chown $USER0 $DIR/$tdir || error "chown (2)"
148 $RUNAS_CMD -u $ID0 ls $DIR || error "ls (1)"
149 rm -f $DIR/f0 || error "rm (2)"
150 $RUNAS_CMD -u $ID0 touch $DIR/f0 && error "touch (1)"
151 $RUNAS_CMD -u $ID0 touch $DIR/$tdir/f1 || error "touch (2)"
152 $RUNAS_CMD -u $ID1 touch $DIR/$tdir/f2 && error "touch (3)"
153 touch $DIR/$tdir/f3 || error "touch (4)"
154 chown root $DIR/$tdir || error "chown (3)"
155 chgrp $USER0 $DIR/$tdir || error "chgrp (1)"
156 chmod 0775 $DIR/$tdir || error "chmod (2)"
157 $RUNAS_CMD -u $ID0 touch $DIR/$tdir/f4 || error "touch (5)"
158 $RUNAS_CMD -u $ID1 touch $DIR/$tdir/f5 && error "touch (6)"
159 touch $DIR/$tdir/f6 || error "touch (7)"
160 rm -rf $DIR/$tdir || error "rm (3)"
162 run_test 0 "uid permission ============================="
166 [ $GSS_SUP = 0 ] && skip "without GSS support." && return
171 chown $USER0 $DIR/$tdir || error "chown (1)"
172 $RUNAS_CMD -u $ID1 -v $ID0 touch $DIR/$tdir/f0 && error "touch (2)"
173 echo "enable uid $ID1 setuid"
174 do_facet $SINGLEMDS "echo '* $ID1 setuid' >> $PERM_CONF"
175 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
176 $RUNAS_CMD -u $ID1 -v $ID0 touch $DIR/$tdir/f1 || error "touch (3)"
178 chown root $DIR/$tdir || error "chown (4)"
179 chgrp $USER0 $DIR/$tdir || error "chgrp (5)"
180 chmod 0770 $DIR/$tdir || error "chmod (6)"
181 $RUNAS_CMD -u $ID1 -g $ID1 touch $DIR/$tdir/f2 && error "touch (7)"
182 $RUNAS_CMD -u$ID1 -g$ID1 -j$ID0 touch $DIR/$tdir/f3 && error "touch (8)"
183 echo "enable uid $ID1 setuid,setgid"
184 do_facet $SINGLEMDS "echo '* $ID1 setuid,setgid' > $PERM_CONF"
185 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
186 $RUNAS_CMD -u $ID1 -g $ID1 -j $ID0 touch $DIR/$tdir/f4 ||
188 $RUNAS_CMD -u $ID1 -v $ID0 -g $ID1 -j $ID0 touch $DIR/$tdir/f5 ||
193 do_facet $SINGLEMDS "rm -f $PERM_CONF"
194 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
196 run_test 1 "setuid/gid ============================="
198 # bug 3285 - supplementary group should always succeed.
199 # NB: the supplementary groups are set for local client only,
200 # as for remote client, the groups of the specified uid on MDT
201 # will be obtained by upcall /sbin/l_getidentity and used.
203 local server_version=$(lustre_version_code $SINGLEMDS)
205 [[ $server_version -ge $(version_code 2.6.93) ]] ||
206 [[ $server_version -ge $(version_code 2.5.35) &&
207 $server_version -lt $(version_code 2.5.50) ]] ||
208 { skip "Need MDS version at least 2.6.93 or 2.5.35"; return; }
212 chmod 0771 $DIR/$tdir
213 chgrp $ID0 $DIR/$tdir
214 $RUNAS_CMD -u $ID0 ls $DIR/$tdir || error "setgroups (1)"
215 do_facet $SINGLEMDS "echo '* $ID1 setgrp' > $PERM_CONF"
216 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
217 $RUNAS_CMD -u $ID1 -G1,2,$ID0 ls $DIR/$tdir ||
218 error "setgroups (2)"
219 $RUNAS_CMD -u $ID1 -G1,2 ls $DIR/$tdir && error "setgroups (3)"
222 do_facet $SINGLEMDS "rm -f $PERM_CONF"
223 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
225 run_test 4 "set supplementary group ==============="
232 squash_id default 99 0
233 squash_id default 99 1
234 for (( i = 0; i < NODEMAP_COUNT; i++ )); do
235 local csum=${HOSTNAME_CHECKSUM}_${i}
237 if ! do_facet mgs $LCTL nodemap_add $csum; then
241 out=$(do_facet mgs $LCTL get_param nodemap.$csum.id)
242 ## This needs to return zero if the following statement is 1
243 [[ $(echo $out | grep -c $csum) == 0 ]] && return 1
252 for ((i = 0; i < NODEMAP_COUNT; i++)); do
253 local csum=${HOSTNAME_CHECKSUM}_${i}
255 if ! do_facet mgs $LCTL nodemap_del $csum; then
256 error "nodemap_del $csum failed with $?"
260 out=$(do_facet mgs $LCTL get_param nodemap.$csum.id 2>/dev/null)
261 [[ $(echo $out | grep -c $csum) != 0 ]] && return 1
268 local cmd="$LCTL nodemap_add_range"
272 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
273 range="$SUBNET_CHECKSUM.${2}.${j}.[1-253]@tcp"
274 if ! do_facet mgs $cmd --name $1 --range $range; then
283 local cmd="$LCTL nodemap_del_range"
287 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
288 range="$SUBNET_CHECKSUM.${2}.${j}.[1-253]@tcp"
289 if ! do_facet mgs $cmd --name $1 --range $range; then
299 local cmd="$LCTL nodemap_add_idmap"
302 echo "Start to add idmaps ..."
303 for ((i = 0; i < NODEMAP_COUNT; i++)); do
306 for ((j = $ID0; j < NODEMAP_MAX_ID; j++)); do
307 local csum=${HOSTNAME_CHECKSUM}_${i}
309 local fs_id=$((j + 1))
311 if ! do_facet mgs $cmd --name $csum --idtype uid \
312 --idmap $client_id:$fs_id; then
315 if ! do_facet mgs $cmd --name $csum --idtype gid \
316 --idmap $client_id:$fs_id; then
325 update_idmaps() { #LU-10040
326 [ $(lustre_version_code mgs) -lt $(version_code 2.10.55) ] &&
327 skip "Need MGS >= 2.10.55" &&
329 local csum=${HOSTNAME_CHECKSUM}_0
330 local old_id_client=$ID0
331 local old_id_fs=$((ID0 + 1))
332 local new_id=$((ID0 + 100))
339 echo "Start to update idmaps ..."
341 #Inserting an existed idmap should return error
342 cmd="$LCTL nodemap_add_idmap --name $csum --idtype uid"
344 $cmd --idmap $old_id_client:$old_id_fs 2>/dev/null; then
345 error "insert idmap {$old_id_client:$old_id_fs} " \
346 "should return error"
351 #Update id_fs and check it
352 if ! do_facet mgs $cmd --idmap $old_id_client:$new_id; then
353 error "$cmd --idmap $old_id_client:$new_id failed"
357 tmp_id=$(do_facet mgs $LCTL get_param -n nodemap.$csum.idmap |
358 awk '{ print $7 }' | sed -n '2p')
359 [ $tmp_id != $new_id ] && { error "new id_fs $tmp_id != $new_id"; \
360 rc=$((rc + 1)); return $rc; }
362 #Update id_client and check it
363 if ! do_facet mgs $cmd --idmap $new_id:$new_id; then
364 error "$cmd --idmap $new_id:$new_id failed"
368 tmp_id=$(do_facet mgs $LCTL get_param -n nodemap.$csum.idmap |
369 awk '{ print $5 }' | sed -n "$((NODEMAP_ID_COUNT + 1)) p")
370 tmp_id=$(echo ${tmp_id%,*}) #e.g. "501,"->"501"
371 [ $tmp_id != $new_id ] && { error "new id_client $tmp_id != $new_id"; \
372 rc=$((rc + 1)); return $rc; }
374 #Delete above updated idmap
375 cmd="$LCTL nodemap_del_idmap --name $csum --idtype uid"
376 if ! do_facet mgs $cmd --idmap $new_id:$new_id; then
377 error "$cmd --idmap $new_id:$new_id failed"
382 #restore the idmaps to make delete_idmaps work well
383 cmd="$LCTL nodemap_add_idmap --name $csum --idtype uid"
384 if ! do_facet mgs $cmd --idmap $old_id_client:$old_id_fs; then
385 error "$cmd --idmap $old_id_client:$old_id_fs failed"
395 local cmd="$LCTL nodemap_del_idmap"
398 echo "Start to delete idmaps ..."
399 for ((i = 0; i < NODEMAP_COUNT; i++)); do
402 for ((j = $ID0; j < NODEMAP_MAX_ID; j++)); do
403 local csum=${HOSTNAME_CHECKSUM}_${i}
405 local fs_id=$((j + 1))
407 if ! do_facet mgs $cmd --name $csum --idtype uid \
408 --idmap $client_id:$fs_id; then
411 if ! do_facet mgs $cmd --name $csum --idtype gid \
412 --idmap $client_id:$fs_id; then
425 local cmd="$LCTL nodemap_modify"
428 proc[0]="admin_nodemap"
429 proc[1]="trusted_nodemap"
433 for ((idx = 0; idx < 2; idx++)); do
434 if ! do_facet mgs $cmd --name $1 --property ${option[$idx]} \
439 if ! do_facet mgs $cmd --name $1 --property ${option[$idx]} \
449 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
450 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
454 cmd[0]="$LCTL nodemap_modify --property squash_uid"
455 cmd[1]="$LCTL nodemap_modify --property squash_gid"
457 if ! do_facet mgs ${cmd[$3]} --name $1 --value $2; then
462 # ensure that the squash defaults are the expected defaults
463 squash_id default 99 0
464 squash_id default 99 1
469 cmd="$LCTL nodemap_test_nid"
471 nid=$(do_facet mgs $cmd $1)
473 if [ $nid == $2 ]; then
482 local cmd="$LCTL nodemap_test_id"
485 echo "Start to test idmaps ..."
486 ## nodemap deactivated
487 if ! do_facet mgs $LCTL nodemap_activate 0; then
490 for ((id = $ID0; id < NODEMAP_MAX_ID; id++)); do
493 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
494 local nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
495 local fs_id=$(do_facet mgs $cmd --nid $nid \
496 --idtype uid --id $id)
497 if [ $fs_id != $id ]; then
498 echo "expected $id, got $fs_id"
505 if ! do_facet mgs $LCTL nodemap_activate 1; then
509 for ((id = $ID0; id < NODEMAP_MAX_ID; id++)); do
510 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
511 nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
512 fs_id=$(do_facet mgs $cmd --nid $nid \
513 --idtype uid --id $id)
514 expected_id=$((id + 1))
515 if [ $fs_id != $expected_id ]; then
516 echo "expected $expected_id, got $fs_id"
523 for ((i = 0; i < NODEMAP_COUNT; i++)); do
524 local csum=${HOSTNAME_CHECKSUM}_${i}
526 if ! do_facet mgs $LCTL nodemap_modify --name $csum \
527 --property trusted --value 1; then
528 error "nodemap_modify $csum failed with $?"
533 for ((id = $ID0; id < NODEMAP_MAX_ID; id++)); do
534 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
535 nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
536 fs_id=$(do_facet mgs $cmd --nid $nid \
537 --idtype uid --id $id)
538 if [ $fs_id != $id ]; then
539 echo "expected $id, got $fs_id"
545 ## ensure allow_root_access is enabled
546 for ((i = 0; i < NODEMAP_COUNT; i++)); do
547 local csum=${HOSTNAME_CHECKSUM}_${i}
549 if ! do_facet mgs $LCTL nodemap_modify --name $csum \
550 --property admin --value 1; then
551 error "nodemap_modify $csum failed with $?"
556 ## check that root allowed
557 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
558 nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
559 fs_id=$(do_facet mgs $cmd --nid $nid --idtype uid --id 0)
560 if [ $fs_id != 0 ]; then
561 echo "root allowed expected 0, got $fs_id"
566 ## ensure allow_root_access is disabled
567 for ((i = 0; i < NODEMAP_COUNT; i++)); do
568 local csum=${HOSTNAME_CHECKSUM}_${i}
570 if ! do_facet mgs $LCTL nodemap_modify --name $csum \
571 --property admin --value 0; then
572 error "nodemap_modify ${HOSTNAME_CHECKSUM}_${i} "
578 ## check that root is mapped to 99
579 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
580 nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
581 fs_id=$(do_facet mgs $cmd --nid $nid --idtype uid --id 0)
582 if [ $fs_id != 99 ]; then
583 error "root squash expected 99, got $fs_id"
588 ## reset client trust to 0
589 for ((i = 0; i < NODEMAP_COUNT; i++)); do
590 if ! do_facet mgs $LCTL nodemap_modify \
591 --name ${HOSTNAME_CHECKSUM}_${i} \
592 --property trusted --value 0; then
593 error "nodemap_modify ${HOSTNAME_CHECKSUM}_${i} "
605 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
606 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
607 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
612 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
616 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 2
620 run_test 7 "nodemap create and delete"
625 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
626 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
627 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
634 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
640 [[ $rc == 0 ]] && error "duplicate nodemap_add allowed with $rc" &&
646 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 3
650 run_test 8 "nodemap reject duplicates"
656 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
657 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
658 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
664 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
667 for ((i = 0; i < NODEMAP_COUNT; i++)); do
668 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
672 [[ $rc != 0 ]] && error "nodemap_add_range failed with $rc" && return 2
675 for ((i = 0; i < NODEMAP_COUNT; i++)); do
676 if ! delete_range ${HOSTNAME_CHECKSUM}_${i} $i; then
680 [[ $rc != 0 ]] && error "nodemap_del_range failed with $rc" && return 4
685 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 4
689 run_test 9 "nodemap range add"
694 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
695 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
696 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
702 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
705 for ((i = 0; i < NODEMAP_COUNT; i++)); do
706 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
710 [[ $rc != 0 ]] && error "nodemap_add_range failed with $rc" && return 2
713 for ((i = 0; i < NODEMAP_COUNT; i++)); do
714 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
718 [[ $rc == 0 ]] && error "nodemap_add_range duplicate add with $rc" &&
723 for ((i = 0; i < NODEMAP_COUNT; i++)); do
724 if ! delete_range ${HOSTNAME_CHECKSUM}_${i} $i; then
728 [[ $rc != 0 ]] && error "nodemap_del_range failed with $rc" && return 4
732 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 5
736 run_test 10a "nodemap reject duplicate ranges"
739 [ $(lustre_version_code mgs) -lt $(version_code 2.10.53) ] &&
740 skip "Need MGS >= 2.10.53" && return
744 local nids="192.168.19.[0-255]@o2ib20"
746 do_facet mgs $LCTL nodemap_del $nm1 2>/dev/null
747 do_facet mgs $LCTL nodemap_del $nm2 2>/dev/null
749 do_facet mgs $LCTL nodemap_add $nm1 || error "Add $nm1 failed"
750 do_facet mgs $LCTL nodemap_add $nm2 || error "Add $nm2 failed"
751 do_facet mgs $LCTL nodemap_add_range --name $nm1 --range $nids ||
752 error "Add range $nids to $nm1 failed"
753 [ -n "$(do_facet mgs $LCTL get_param nodemap.$nm1.* |
754 grep start_nid)" ] || error "No range was found"
755 do_facet mgs $LCTL nodemap_del_range --name $nm2 --range $nids &&
756 error "Deleting range $nids from $nm2 should fail"
757 [ -n "$(do_facet mgs $LCTL get_param nodemap.$nm1.* |
758 grep start_nid)" ] || error "Range $nids should be there"
760 do_facet mgs $LCTL nodemap_del $nm1 || error "Delete $nm1 failed"
761 do_facet mgs $LCTL nodemap_del $nm2 || error "Delete $nm2 failed"
764 run_test 10b "delete range from the correct nodemap"
766 test_10c() { #LU-8912
767 [ $(lustre_version_code mgs) -lt $(version_code 2.10.57) ] &&
768 skip "Need MGS >= 2.10.57" && return
770 local nm="nodemap_lu8912"
771 local nid_range="10.210.[32-47].[0-255]@o2ib3"
772 local start_nid="10.210.32.0@o2ib3"
773 local end_nid="10.210.47.255@o2ib3"
774 local start_nid_found
777 do_facet mgs $LCTL nodemap_del $nm 2>/dev/null
778 do_facet mgs $LCTL nodemap_add $nm || error "Add $nm failed"
779 do_facet mgs $LCTL nodemap_add_range --name $nm --range $nid_range ||
780 error "Add range $nid_range to $nm failed"
782 start_nid_found=$(do_facet mgs $LCTL get_param nodemap.$nm.* |
783 awk -F '[,: ]' /start_nid/'{ print $9 }')
784 [ "$start_nid" == "$start_nid_found" ] ||
785 error "start_nid: $start_nid_found != $start_nid"
786 end_nid_found=$(do_facet mgs $LCTL get_param nodemap.$nm.* |
787 awk -F '[,: ]' /end_nid/'{ print $13 }')
788 [ "$end_nid" == "$end_nid_found" ] ||
789 error "end_nid: $end_nid_found != $end_nid"
791 do_facet mgs $LCTL nodemap_del $nm || error "Delete $nm failed"
794 run_test 10c "verfify contiguous range support"
796 test_10d() { #LU-8913
797 [ $(lustre_version_code mgs) -lt $(version_code 2.10.59) ] &&
798 skip "Need MGS >= 2.10.59" && return
800 local nm="nodemap_lu8913"
801 local nid_range="*@o2ib3"
802 local start_nid="0.0.0.0@o2ib3"
803 local end_nid="255.255.255.255@o2ib3"
804 local start_nid_found
807 do_facet mgs $LCTL nodemap_del $nm 2>/dev/null
808 do_facet mgs $LCTL nodemap_add $nm || error "Add $nm failed"
809 do_facet mgs $LCTL nodemap_add_range --name $nm --range $nid_range ||
810 error "Add range $nid_range to $nm failed"
812 start_nid_found=$(do_facet mgs $LCTL get_param nodemap.$nm.* |
813 awk -F '[,: ]' /start_nid/'{ print $9 }')
814 [ "$start_nid" == "$start_nid_found" ] ||
815 error "start_nid: $start_nid_found != $start_nid"
816 end_nid_found=$(do_facet mgs $LCTL get_param nodemap.$nm.* |
817 awk -F '[,: ]' /end_nid/'{ print $13 }')
818 [ "$end_nid" == "$end_nid_found" ] ||
819 error "end_nid: $end_nid_found != $end_nid"
821 do_facet mgs $LCTL nodemap_del $nm || error "Delete $nm failed"
824 run_test 10d "verfify nodemap range format '*@<net>' support"
829 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
830 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
831 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
837 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
840 for ((i = 0; i < NODEMAP_COUNT; i++)); do
841 if ! modify_flags ${HOSTNAME_CHECKSUM}_${i}; then
845 [[ $rc != 0 ]] && error "nodemap_modify with $rc" && return 2
850 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 3
854 run_test 11 "nodemap modify"
859 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
860 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
861 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
867 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
870 for ((i = 0; i < NODEMAP_COUNT; i++)); do
871 if ! squash_id ${HOSTNAME_CHECKSUM}_${i} 88 0; then
875 [[ $rc != 0 ]] && error "nodemap squash_uid with $rc" && return 2
878 for ((i = 0; i < NODEMAP_COUNT; i++)); do
879 if ! squash_id ${HOSTNAME_CHECKSUM}_${i} 88 1; then
883 [[ $rc != 0 ]] && error "nodemap squash_gid with $rc" && return 3
888 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 4
892 run_test 12 "nodemap set squash ids"
897 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
898 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
899 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
905 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
908 for ((i = 0; i < NODEMAP_COUNT; i++)); do
909 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
913 [[ $rc != 0 ]] && error "nodemap_add_range failed with $rc" && return 2
916 for ((i = 0; i < NODEMAP_COUNT; i++)); do
917 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
918 for k in $NODEMAP_IPADDR_LIST; do
919 if ! test_nid $SUBNET_CHECKSUM.$i.$j.$k \
920 ${HOSTNAME_CHECKSUM}_${i}; then
926 [[ $rc != 0 ]] && error "nodemap_test_nid failed with $rc" && return 3
931 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 4
935 run_test 13 "test nids"
940 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
941 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
942 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
948 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
951 for ((i = 0; i < NODEMAP_COUNT; i++)); do
952 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
953 for k in $NODEMAP_IPADDR_LIST; do
954 if ! test_nid $SUBNET_CHECKSUM.$i.$j.$k \
961 [[ $rc != 0 ]] && error "nodemap_test_nid failed with $rc" && return 3
966 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 4
970 run_test 14 "test default nodemap nid lookup"
975 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
976 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
977 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
983 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
986 for ((i = 0; i < NODEMAP_COUNT; i++)); do
987 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
991 [[ $rc != 0 ]] && error "nodemap_add_range failed with $rc" && return 2
996 [[ $rc != 0 ]] && error "nodemap_add_idmap failed with $rc" && return 3
1001 [[ $rc != 0 ]] && error "nodemap_test_id failed with $rc" && return 4
1006 [[ $rc != 0 ]] && error "update_idmaps failed with $rc" && return 5
1011 [[ $rc != 0 ]] && error "nodemap_del_idmap failed with $rc" && return 6
1016 [[ $rc != 0 ]] && error "nodemap_delete failed with $rc" && return 7
1020 run_test 15 "test id mapping"
1023 local nodemap_name=$1
1028 local is_active=$(do_facet mgs $LCTL get_param -n nodemap.active)
1029 local max_retries=20
1033 local mgs_ip=$(host_nids_address $mgs_HOST $NETTYPE | cut -d' ' -f1)
1036 if [ "$nodemap_name" == "active" ]; then
1038 elif [ -z "$key" ]; then
1039 proc_param=${nodemap_name}
1041 proc_param="${nodemap_name}.${key}"
1043 (( is_active == 0 )) && [ "$proc_param" != "active" ] && return
1045 if [ -z "$value" ]; then
1046 out1=$(do_facet mgs $LCTL get_param $opt nodemap.${proc_param})
1047 echo "On MGS ${mgs_ip}, ${proc_param} = $out1"
1052 # wait up to 10 seconds for other servers to sync with mgs
1053 for i in $(seq 1 10); do
1054 for node in $(all_server_nodes); do
1055 local node_ip=$(host_nids_address $node $NETTYPE |
1059 if [ -z "$value" ]; then
1060 [ $node_ip == $mgs_ip ] && continue
1063 out2=$(do_node $node_ip $LCTL get_param $opt \
1064 nodemap.$proc_param 2>/dev/null)
1065 echo "On $node ${node_ip}, ${proc_param} = $out2"
1066 [ "$out1" != "$out2" ] && is_sync=false && break
1074 echo OTHER - IP: $node_ip
1076 error "mgs and $nodemap_name ${key} mismatch, $i attempts"
1078 echo "waited $((i - 1)) seconds for sync"
1081 create_fops_nodemaps() {
1084 for client in $clients; do
1085 local client_ip=$(host_nids_address $client $NETTYPE)
1086 local client_nid=$(h2nettype $client_ip)
1087 do_facet mgs $LCTL nodemap_add c${i} || return 1
1088 do_facet mgs $LCTL nodemap_add_range \
1089 --name c${i} --range $client_nid || return 1
1090 for map in ${FOPS_IDMAPS[i]}; do
1091 do_facet mgs $LCTL nodemap_add_idmap --name c${i} \
1092 --idtype uid --idmap ${map} || return 1
1093 do_facet mgs $LCTL nodemap_add_idmap --name c${i} \
1094 --idtype gid --idmap ${map} || return 1
1097 wait_nm_sync c$i idmap
1104 delete_fops_nodemaps() {
1107 for client in $clients; do
1108 do_facet mgs $LCTL nodemap_del c${i} || return 1
1116 if [ $MDSCOUNT -le 1 ]; then
1117 do_node ${clients_arr[0]} mkdir -p $DIR/$tdir
1119 # round-robin MDTs to test DNE nodemap support
1120 [ ! -d $DIR ] && do_node ${clients_arr[0]} mkdir -p $DIR
1121 do_node ${clients_arr[0]} $LFS setdirstripe -c 1 -i \
1122 $((fops_mds_index % MDSCOUNT)) $DIR/$tdir
1123 ((fops_mds_index++))
1127 # acl test directory needs to be initialized on a privileged client
1129 local admin=$(do_facet mgs $LCTL get_param -n nodemap.c0.admin_nodemap)
1130 local trust=$(do_facet mgs $LCTL get_param -n \
1131 nodemap.c0.trusted_nodemap)
1133 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
1134 do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1
1136 wait_nm_sync c0 admin_nodemap
1137 wait_nm_sync c0 trusted_nodemap
1139 do_node ${clients_arr[0]} rm -rf $DIR/$tdir
1141 do_node ${clients_arr[0]} chown $user $DIR/$tdir
1143 do_facet mgs $LCTL nodemap_modify --name c0 \
1144 --property admin --value $admin
1145 do_facet mgs $LCTL nodemap_modify --name c0 \
1146 --property trusted --value $trust
1148 # flush MDT locks to make sure they are reacquired before test
1149 do_node ${clients_arr[0]} $LCTL set_param \
1150 ldlm.namespaces.$FSNAME-MDT*.lru_size=clear
1152 wait_nm_sync c0 admin_nodemap
1153 wait_nm_sync c0 trusted_nodemap
1156 # fileset test directory needs to be initialized on a privileged client
1157 fileset_test_setup() {
1159 local admin=$(do_facet mgs $LCTL get_param -n \
1160 nodemap.${nm}.admin_nodemap)
1161 local trust=$(do_facet mgs $LCTL get_param -n \
1162 nodemap.${nm}.trusted_nodemap)
1164 do_facet mgs $LCTL nodemap_modify --name $nm --property admin --value 1
1165 do_facet mgs $LCTL nodemap_modify --name $nm --property trusted \
1168 wait_nm_sync $nm admin_nodemap
1169 wait_nm_sync $nm trusted_nodemap
1171 # create directory and populate it for subdir mount
1172 do_node ${clients_arr[0]} mkdir $MOUNT/$subdir ||
1173 error "unable to create dir $MOUNT/$subdir"
1174 do_node ${clients_arr[0]} touch $MOUNT/$subdir/this_is_$subdir ||
1175 error "unable to create file $MOUNT/$subdir/this_is_$subdir"
1176 do_node ${clients_arr[0]} mkdir $MOUNT/$subdir/$subsubdir ||
1177 error "unable to create dir $MOUNT/$subdir/$subsubdir"
1178 do_node ${clients_arr[0]} touch \
1179 $MOUNT/$subdir/$subsubdir/this_is_$subsubdir ||
1180 error "unable to create file \
1181 $MOUNT/$subdir/$subsubdir/this_is_$subsubdir"
1183 do_facet mgs $LCTL nodemap_modify --name $nm \
1184 --property admin --value $admin
1185 do_facet mgs $LCTL nodemap_modify --name $nm \
1186 --property trusted --value $trust
1188 # flush MDT locks to make sure they are reacquired before test
1189 do_node ${clients_arr[0]} $LCTL set_param \
1190 ldlm.namespaces.$FSNAME-MDT*.lru_size=clear
1192 wait_nm_sync $nm admin_nodemap
1193 wait_nm_sync $nm trusted_nodemap
1196 # fileset test directory needs to be initialized on a privileged client
1197 fileset_test_cleanup() {
1199 local admin=$(do_facet mgs $LCTL get_param -n \
1200 nodemap.${nm}.admin_nodemap)
1201 local trust=$(do_facet mgs $LCTL get_param -n \
1202 nodemap.${nm}.trusted_nodemap)
1204 do_facet mgs $LCTL nodemap_modify --name $nm --property admin --value 1
1205 do_facet mgs $LCTL nodemap_modify --name $nm --property trusted \
1208 wait_nm_sync $nm admin_nodemap
1209 wait_nm_sync $nm trusted_nodemap
1211 # cleanup directory created for subdir mount
1212 do_node ${clients_arr[0]} rm -rf $MOUNT/$subdir ||
1213 error "unable to remove dir $MOUNT/$subdir"
1215 do_facet mgs $LCTL nodemap_modify --name $nm \
1216 --property admin --value $admin
1217 do_facet mgs $LCTL nodemap_modify --name $nm \
1218 --property trusted --value $trust
1220 # flush MDT locks to make sure they are reacquired before test
1221 do_node ${clients_arr[0]} $LCTL set_param \
1222 ldlm.namespaces.$FSNAME-MDT*.lru_size=clear
1224 wait_nm_sync $nm admin_nodemap
1225 wait_nm_sync $nm trusted_nodemap
1228 do_create_delete() {
1231 local testfile=$DIR/$tdir/$tfile
1235 if $run_u touch $testfile >& /dev/null; then
1237 $run_u rm $testfile && d=1
1241 local expected=$(get_cr_del_expected $key)
1242 [ "$res" != "$expected" ] &&
1243 error "test $key, wanted $expected, got $res" && rc=$((rc + 1))
1247 nodemap_check_quota() {
1249 $run_u lfs quota -q $DIR | awk '{ print $2; exit; }'
1252 do_fops_quota_test() {
1254 # fuzz quota used to account for possible indirect blocks, etc
1255 local quota_fuzz=$(fs_log_size)
1256 local qused_orig=$(nodemap_check_quota "$run_u")
1257 local qused_high=$((qused_orig + quota_fuzz))
1258 local qused_low=$((qused_orig - quota_fuzz))
1259 local testfile=$DIR/$tdir/$tfile
1260 $run_u dd if=/dev/zero of=$testfile oflag=sync bs=1M count=1 \
1261 >& /dev/null || error "unable to write quota test file"
1262 sync; sync_all_data || true
1264 local qused_new=$(nodemap_check_quota "$run_u")
1265 [ $((qused_new)) -lt $((qused_low + 1024)) -o \
1266 $((qused_new)) -gt $((qused_high + 1024)) ] &&
1267 error "$qused_new != $qused_orig + 1M after write, " \
1268 "fuzz is $quota_fuzz"
1269 $run_u rm $testfile || error "unable to remove quota test file"
1270 wait_delete_completed_mds
1272 qused_new=$(nodemap_check_quota "$run_u")
1273 [ $((qused_new)) -lt $((qused_low)) \
1274 -o $((qused_new)) -gt $((qused_high)) ] &&
1275 error "quota not reclaimed, expect $qused_orig, " \
1276 "got $qused_new, fuzz $quota_fuzz"
1279 get_fops_mapped_user() {
1282 for ((i=0; i < ${#FOPS_IDMAPS[@]}; i++)); do
1283 for map in ${FOPS_IDMAPS[i]}; do
1284 if [ $(cut -d: -f1 <<< "$map") == $cli_user ]; then
1285 cut -d: -f2 <<< "$map"
1293 get_cr_del_expected() {
1295 IFS=":" read -a key <<< "$1"
1296 local mapmode="${key[0]}"
1297 local mds_user="${key[1]}"
1298 local cluster="${key[2]}"
1299 local cli_user="${key[3]}"
1300 local mode="0${key[4]}"
1307 [[ $mapmode == *mapped* ]] && mapped=1
1308 # only c1 is mapped in these test cases
1309 [[ $mapmode == mapped_trusted* ]] && [ "$cluster" == "c0" ] && mapped=0
1310 [[ $mapmode == *noadmin* ]] && noadmin=1
1312 # o+wx works as long as the user isn't mapped
1313 if [ $((mode & 3)) -eq 3 ]; then
1317 # if client user is root, check if root is squashed
1318 if [ "$cli_user" == "0" ]; then
1319 # squash root succeed, if other bit is on
1322 1) [ "$other" == "1" ] && echo $SUCCESS
1323 [ "$other" == "0" ] && echo $FAILURE;;
1327 if [ "$mapped" == "0" ]; then
1328 [ "$other" == "1" ] && echo $SUCCESS
1329 [ "$other" == "0" ] && echo $FAILURE
1333 # if mapped user is mds user, check for u+wx
1334 mapped_user=$(get_fops_mapped_user $cli_user)
1335 [ "$mapped_user" == "-1" ] &&
1336 error "unable to find mapping for client user $cli_user"
1338 if [ "$mapped_user" == "$mds_user" -a \
1339 $(((mode & 0300) == 0300)) -eq 1 ]; then
1343 if [ "$mapped_user" != "$mds_user" -a "$other" == "1" ]; then
1350 test_fops_admin_cli_i=""
1351 test_fops_chmod_dir() {
1352 local current_cli_i=$1
1354 local dir_to_chmod=$3
1355 local new_admin_cli_i=""
1357 # do we need to set up a new admin client?
1358 [ "$current_cli_i" == "0" ] && [ "$test_fops_admin_cli_i" != "1" ] &&
1360 [ "$current_cli_i" != "0" ] && [ "$test_fops_admin_cli_i" != "0" ] &&
1363 # if only one client, and non-admin, need to flip admin everytime
1364 if [ "$num_clients" == "1" ]; then
1365 test_fops_admin_client=$clients
1366 test_fops_admin_val=$(do_facet mgs $LCTL get_param -n \
1367 nodemap.c0.admin_nodemap)
1368 if [ "$test_fops_admin_val" != "1" ]; then
1369 do_facet mgs $LCTL nodemap_modify \
1373 wait_nm_sync c0 admin_nodemap
1375 elif [ "$new_admin_cli_i" != "" ]; then
1376 # restore admin val to old admin client
1377 if [ "$test_fops_admin_cli_i" != "" ] &&
1378 [ "$test_fops_admin_val" != "1" ]; then
1379 do_facet mgs $LCTL nodemap_modify \
1380 --name c${test_fops_admin_cli_i} \
1382 --value $test_fops_admin_val
1383 wait_nm_sync c${test_fops_admin_cli_i} admin_nodemap
1386 test_fops_admin_cli_i=$new_admin_cli_i
1387 test_fops_admin_client=${clients_arr[$new_admin_cli_i]}
1388 test_fops_admin_val=$(do_facet mgs $LCTL get_param -n \
1389 nodemap.c${new_admin_cli_i}.admin_nodemap)
1391 if [ "$test_fops_admin_val" != "1" ]; then
1392 do_facet mgs $LCTL nodemap_modify \
1393 --name c${new_admin_cli_i} \
1396 wait_nm_sync c${new_admin_cli_i} admin_nodemap
1400 do_node $test_fops_admin_client chmod $perm_bits $DIR/$tdir || return 1
1402 # remove admin for single client if originally non-admin
1403 if [ "$num_clients" == "1" ] && [ "$test_fops_admin_val" != "1" ]; then
1404 do_facet mgs $LCTL nodemap_modify --name c0 --property admin \
1406 wait_nm_sync c0 admin_nodemap
1414 local single_client="$2"
1415 local client_user_list=([0]="0 $((IDBASE+3)) $((IDBASE+4))"
1416 [1]="0 $((IDBASE+5)) $((IDBASE+6))")
1419 local perm_bit_list="0 3 $((0300)) $((0303))"
1420 # SLOW tests 000-007, 010-070, 100-700 (octal modes)
1421 [ "$SLOW" == "yes" ] &&
1422 perm_bit_list="0 $(seq 1 7) $(seq 8 8 63) $(seq 64 64 511) \
1425 # step through mds users. -1 means root
1426 for mds_i in -1 0 1 2; do
1427 local user=$((mds_i + IDBASE))
1431 [ "$mds_i" == "-1" ] && user=0
1433 echo mkdir -p $DIR/$tdir
1436 for client in $clients; do
1438 for u in ${client_user_list[$cli_i]}; do
1439 local run_u="do_node $client \
1440 $RUNAS_CMD -u$u -g$u -G$u"
1441 for perm_bits in $perm_bit_list; do
1442 local mode=$(printf %03o $perm_bits)
1444 key="$mapmode:$user:c$cli_i:$u:$mode"
1445 test_fops_chmod_dir $cli_i $mode \
1447 error cannot chmod $key
1448 do_create_delete "$run_u" "$key"
1452 test_fops_chmod_dir $cli_i 777 $DIR/$tdir ||
1453 error cannot chmod $key
1454 do_fops_quota_test "$run_u"
1457 cli_i=$((cli_i + 1))
1458 [ "$single_client" == "1" ] && break
1465 nodemap_version_check () {
1466 remote_mgs_nodsh && skip "remote MGS with nodsh" && return 1
1467 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
1468 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
1473 nodemap_test_setup() {
1475 local active_nodemap=1
1477 [ "$1" == "0" ] && active_nodemap=0
1479 do_nodes $(comma_list $(all_mdts_nodes)) \
1480 $LCTL set_param mdt.*.identity_upcall=NONE
1483 create_fops_nodemaps
1485 [[ $rc != 0 ]] && error "adding fops nodemaps failed $rc"
1487 do_facet mgs $LCTL nodemap_activate $active_nodemap
1490 do_facet mgs $LCTL nodemap_modify --name default \
1491 --property admin --value 1
1492 wait_nm_sync default admin_nodemap
1493 do_facet mgs $LCTL nodemap_modify --name default \
1494 --property trusted --value 1
1495 wait_nm_sync default trusted_nodemap
1498 nodemap_test_cleanup() {
1500 delete_fops_nodemaps
1502 [[ $rc != 0 ]] && error "removing fops nodemaps failed $rc"
1504 do_facet mgs $LCTL nodemap_modify --name default \
1505 --property admin --value 0
1506 wait_nm_sync default admin_nodemap
1507 do_facet mgs $LCTL nodemap_modify --name default \
1508 --property trusted --value 0
1509 wait_nm_sync default trusted_nodemap
1511 do_facet mgs $LCTL nodemap_activate 0
1512 wait_nm_sync active 0
1514 export SK_UNIQUE_NM=false
1518 nodemap_clients_admin_trusted() {
1522 for client in $clients; do
1523 do_facet mgs $LCTL nodemap_modify --name c0 \
1524 --property admin --value $admin
1525 do_facet mgs $LCTL nodemap_modify --name c0 \
1526 --property trusted --value $tr
1529 wait_nm_sync c$((i - 1)) admin_nodemap
1530 wait_nm_sync c$((i - 1)) trusted_nodemap
1534 nodemap_version_check || return 0
1535 nodemap_test_setup 0
1537 trap nodemap_test_cleanup EXIT
1539 nodemap_test_cleanup
1541 run_test 16 "test nodemap all_off fileops"
1545 [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.55) ]; then
1546 skip "Need MDS >= 2.11.55"
1549 nodemap_version_check || return 0
1552 trap nodemap_test_cleanup EXIT
1553 nodemap_clients_admin_trusted 0 1
1554 test_fops trusted_noadmin 1
1555 nodemap_test_cleanup
1557 run_test 17 "test nodemap trusted_noadmin fileops"
1561 [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.55) ]; then
1562 skip "Need MDS >= 2.11.55"
1565 nodemap_version_check || return 0
1568 trap nodemap_test_cleanup EXIT
1569 nodemap_clients_admin_trusted 0 0
1570 test_fops mapped_noadmin 1
1571 nodemap_test_cleanup
1573 run_test 18 "test nodemap mapped_noadmin fileops"
1577 [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.55) ]; then
1578 skip "Need MDS >= 2.11.55"
1581 nodemap_version_check || return 0
1584 trap nodemap_test_cleanup EXIT
1585 nodemap_clients_admin_trusted 1 1
1586 test_fops trusted_admin 1
1587 nodemap_test_cleanup
1589 run_test 19 "test nodemap trusted_admin fileops"
1593 [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.55) ]; then
1594 skip "Need MDS >= 2.11.55"
1597 nodemap_version_check || return 0
1600 trap nodemap_test_cleanup EXIT
1601 nodemap_clients_admin_trusted 1 0
1602 test_fops mapped_admin 1
1603 nodemap_test_cleanup
1605 run_test 20 "test nodemap mapped_admin fileops"
1609 [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.55) ]; then
1610 skip "Need MDS >= 2.11.55"
1613 nodemap_version_check || return 0
1616 trap nodemap_test_cleanup EXIT
1619 for client in $clients; do
1620 do_facet mgs $LCTL nodemap_modify --name c${i} \
1621 --property admin --value 0
1622 do_facet mgs $LCTL nodemap_modify --name c${i} \
1623 --property trusted --value $x
1627 wait_nm_sync c$((i - 1)) trusted_nodemap
1629 test_fops mapped_trusted_noadmin
1630 nodemap_test_cleanup
1632 run_test 21 "test nodemap mapped_trusted_noadmin fileops"
1636 [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.55) ]; then
1637 skip "Need MDS >= 2.11.55"
1640 nodemap_version_check || return 0
1643 trap nodemap_test_cleanup EXIT
1646 for client in $clients; do
1647 do_facet mgs $LCTL nodemap_modify --name c${i} \
1648 --property admin --value 1
1649 do_facet mgs $LCTL nodemap_modify --name c${i} \
1650 --property trusted --value $x
1654 wait_nm_sync c$((i - 1)) trusted_nodemap
1656 test_fops mapped_trusted_admin
1657 nodemap_test_cleanup
1659 run_test 22 "test nodemap mapped_trusted_admin fileops"
1661 # acl test directory needs to be initialized on a privileged client
1662 nodemap_acl_test_setup() {
1663 local admin=$(do_facet mgs $LCTL get_param -n \
1664 nodemap.c0.admin_nodemap)
1665 local trust=$(do_facet mgs $LCTL get_param -n \
1666 nodemap.c0.trusted_nodemap)
1668 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
1669 do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1
1671 wait_nm_sync c0 admin_nodemap
1672 wait_nm_sync c0 trusted_nodemap
1674 do_node ${clients_arr[0]} rm -rf $DIR/$tdir
1676 do_node ${clients_arr[0]} chmod a+rwx $DIR/$tdir ||
1677 error unable to chmod a+rwx test dir $DIR/$tdir
1679 do_facet mgs $LCTL nodemap_modify --name c0 \
1680 --property admin --value $admin
1681 do_facet mgs $LCTL nodemap_modify --name c0 \
1682 --property trusted --value $trust
1684 wait_nm_sync c0 trusted_nodemap
1687 # returns 0 if the number of ACLs does not change on the second (mapped) client
1688 # after being set on the first client
1689 nodemap_acl_test() {
1691 local set_client="$2"
1692 local get_client="$3"
1693 local check_setfacl="$4"
1694 local setfacl_error=0
1695 local testfile=$DIR/$tdir/$tfile
1696 local RUNAS_USER="$RUNAS_CMD -u $user"
1698 local acl_count_post=0
1700 nodemap_acl_test_setup
1703 do_node $set_client $RUNAS_USER touch $testfile
1705 # ACL masks aren't filtered by nodemap code, so we ignore them
1706 acl_count=$(do_node $get_client getfacl $testfile | grep -v mask |
1708 do_node $set_client $RUNAS_USER setfacl -m $user:rwx $testfile ||
1711 # if check setfacl is set to 1, then it's supposed to error
1712 if [ "$check_setfacl" == "1" ]; then
1713 [ "$setfacl_error" != "1" ] && return 1
1716 [ "$setfacl_error" == "1" ] && echo "WARNING: unable to setfacl"
1718 acl_count_post=$(do_node $get_client getfacl $testfile | grep -v mask |
1720 [ $acl_count -eq $acl_count_post ] && return 0
1725 [ $num_clients -lt 2 ] && skip "Need 2 clients at least" && return
1726 nodemap_version_check || return 0
1729 trap nodemap_test_cleanup EXIT
1730 # 1 trusted cluster, 1 mapped cluster
1731 local unmapped_fs=$((IDBASE+0))
1732 local unmapped_c1=$((IDBASE+5))
1733 local mapped_fs=$((IDBASE+2))
1734 local mapped_c0=$((IDBASE+4))
1735 local mapped_c1=$((IDBASE+6))
1737 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
1738 do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1
1740 do_facet mgs $LCTL nodemap_modify --name c1 --property admin --value 0
1741 do_facet mgs $LCTL nodemap_modify --name c1 --property trusted --value 0
1743 wait_nm_sync c1 trusted_nodemap
1745 # setfacl on trusted cluster to unmapped user, verify it's not seen
1746 nodemap_acl_test $unmapped_fs ${clients_arr[0]} ${clients_arr[1]} ||
1747 error "acl count (1)"
1749 # setfacl on trusted cluster to mapped user, verify it's seen
1750 nodemap_acl_test $mapped_fs ${clients_arr[0]} ${clients_arr[1]} &&
1751 error "acl count (2)"
1753 # setfacl on mapped cluster to mapped user, verify it's seen
1754 nodemap_acl_test $mapped_c1 ${clients_arr[1]} ${clients_arr[0]} &&
1755 error "acl count (3)"
1757 # setfacl on mapped cluster to unmapped user, verify error
1758 nodemap_acl_test $unmapped_fs ${clients_arr[1]} ${clients_arr[0]} 1 ||
1759 error "acl count (4)"
1762 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 0
1763 do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 0
1765 wait_nm_sync c0 trusted_nodemap
1767 # setfacl to mapped user on c1, also mapped to c0, verify it's seen
1768 nodemap_acl_test $mapped_c1 ${clients_arr[1]} ${clients_arr[0]} &&
1769 error "acl count (5)"
1771 # setfacl to mapped user on c1, not mapped to c0, verify not seen
1772 nodemap_acl_test $unmapped_c1 ${clients_arr[1]} ${clients_arr[0]} ||
1773 error "acl count (6)"
1775 nodemap_test_cleanup
1777 run_test 23a "test mapped regular ACLs"
1779 test_23b() { #LU-9929
1780 [ $num_clients -lt 2 ] && skip "Need 2 clients at least" && return
1781 [ $(lustre_version_code mgs) -lt $(version_code 2.10.53) ] &&
1782 skip "Need MGS >= 2.10.53" && return
1784 export SK_UNIQUE_NM=true
1786 trap nodemap_test_cleanup EXIT
1788 local testdir=$DIR/$tdir
1789 local fs_id=$((IDBASE+10))
1794 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
1795 wait_nm_sync c0 admin_nodemap
1796 do_facet mgs $LCTL nodemap_modify --name c1 --property admin --value 1
1797 wait_nm_sync c1 admin_nodemap
1798 do_facet mgs $LCTL nodemap_modify --name c1 --property trusted --value 1
1799 wait_nm_sync c1 trusted_nodemap
1801 # Add idmap $ID0:$fs_id (500:60010)
1802 do_facet mgs $LCTL nodemap_add_idmap --name c0 --idtype gid \
1803 --idmap $ID0:$fs_id ||
1804 error "add idmap $ID0:$fs_id to nodemap c0 failed"
1805 wait_nm_sync c0 idmap
1807 # set/getfacl default acl on client0 (unmapped gid=500)
1810 # Here, USER0=$(getent passwd | grep :$ID0:$ID0: | cut -d: -f1)
1811 setfacl -R -d -m group:$USER0:rwx $testdir ||
1812 error "setfacl $testdir on ${clients_arr[0]} failed"
1813 unmapped_id=$(getfacl $testdir | grep -E "default:group:.*:rwx" |
1814 awk -F: '{print $3}')
1815 [ "$unmapped_id" = "$USER0" ] ||
1816 error "gid=$ID0 was not unmapped correctly on ${clients_arr[0]}"
1818 # getfacl default acl on client2 (mapped gid=60010)
1819 mapped_id=$(do_node ${clients_arr[1]} getfacl $testdir |
1820 grep -E "default:group:.*:rwx" | awk -F: '{print $3}')
1821 fs_user=$(do_node ${clients_arr[1]} getent passwd |
1822 grep :$fs_id:$fs_id: | cut -d: -f1)
1823 [ -z "$fs_user" ] && fs_user=$fs_id
1824 [ $mapped_id -eq $fs_id -o "$mapped_id" = "$fs_user" ] ||
1825 error "Should return gid=$fs_id or $fs_user on client2"
1828 nodemap_test_cleanup
1829 export SK_UNIQUE_NM=false
1831 run_test 23b "test mapped default ACLs"
1836 trap nodemap_test_cleanup EXIT
1837 do_nodes $(comma_list $(all_server_nodes)) $LCTL get_param -R nodemap
1839 nodemap_test_cleanup
1841 run_test 24 "check nodemap proc files for LBUGs and Oopses"
1844 local tmpfile=$(mktemp)
1845 local tmpfile2=$(mktemp)
1846 local tmpfile3=$(mktemp)
1847 local tmpfile4=$(mktemp)
1851 nodemap_version_check || return 0
1853 # stop clients for this test
1854 zconf_umount_clients $CLIENTS $MOUNT ||
1855 error "unable to umount clients $CLIENTS"
1857 export SK_UNIQUE_NM=true
1860 # enable trusted/admin for setquota call in cleanup_and_setup_lustre()
1862 for client in $clients; do
1863 do_facet mgs $LCTL nodemap_modify --name c${i} \
1864 --property admin --value 1
1865 do_facet mgs $LCTL nodemap_modify --name c${i} \
1866 --property trusted --value 1
1869 wait_nm_sync c$((i - 1)) trusted_nodemap
1871 trap nodemap_test_cleanup EXIT
1873 # create a new, empty nodemap, and add fileset info to it
1874 do_facet mgs $LCTL nodemap_add test25 ||
1875 error "unable to create nodemap $testname"
1876 do_facet mgs $LCTL set_param -P nodemap.$testname.fileset=/$subdir ||
1877 error "unable to add fileset info to nodemap test25"
1879 wait_nm_sync test25 id
1881 do_facet mgs $LCTL nodemap_info > $tmpfile
1882 do_facet mds $LCTL nodemap_info > $tmpfile2
1884 if ! $SHARED_KEY; then
1885 # will conflict with SK's nodemaps
1886 cleanup_and_setup_lustre
1888 # stop clients for this test
1889 zconf_umount_clients $CLIENTS $MOUNT ||
1890 error "unable to umount clients $CLIENTS"
1892 do_facet mgs $LCTL nodemap_info > $tmpfile3
1893 diff -q $tmpfile3 $tmpfile >& /dev/null ||
1894 error "nodemap_info diff on MGS after remount"
1896 do_facet mds $LCTL nodemap_info > $tmpfile4
1897 diff -q $tmpfile4 $tmpfile2 >& /dev/null ||
1898 error "nodemap_info diff on MDS after remount"
1901 do_facet mgs $LCTL nodemap_del test25 ||
1902 error "cannot delete nodemap test25 from config"
1903 nodemap_test_cleanup
1904 # restart clients previously stopped
1905 zconf_mount_clients $CLIENTS $MOUNT ||
1906 error "unable to mount clients $CLIENTS"
1908 rm -f $tmpfile $tmpfile2
1909 export SK_UNIQUE_NM=false
1911 run_test 25 "test save and reload nodemap config"
1914 nodemap_version_check || return 0
1918 do_facet mgs "seq -f 'c%g' $large_i | xargs -n1 $LCTL nodemap_add"
1919 wait_nm_sync c$large_i admin_nodemap
1921 do_facet mgs "seq -f 'c%g' $large_i | xargs -n1 $LCTL nodemap_del"
1922 wait_nm_sync c$large_i admin_nodemap
1924 run_test 26 "test transferring very large nodemap"
1926 nodemap_exercise_fileset() {
1931 if [ "$nm" == "default" ]; then
1932 do_facet mgs $LCTL nodemap_activate 1
1937 if $SHARED_KEY; then
1938 export SK_UNIQUE_NM=true
1940 # will conflict with SK's nodemaps
1941 trap "fileset_test_cleanup $nm" EXIT
1943 fileset_test_setup "$nm"
1945 # add fileset info to $nm nodemap
1946 if ! combined_mgs_mds; then
1947 do_facet mgs $LCTL set_param nodemap.${nm}.fileset=/$subdir ||
1948 error "unable to add fileset info to $nm nodemap on MGS"
1950 do_facet mgs $LCTL set_param -P nodemap.${nm}.fileset=/$subdir ||
1951 error "unable to add fileset info to $nm nodemap for servers"
1952 wait_nm_sync $nm fileset "nodemap.${nm}.fileset=/$subdir"
1955 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
1956 error "unable to umount client ${clients_arr[0]}"
1957 # set some generic fileset to trigger SSK code
1959 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
1960 error "unable to remount client ${clients_arr[0]}"
1963 # test mount point content
1964 do_node ${clients_arr[0]} test -f $MOUNT/this_is_$subdir ||
1965 error "fileset not taken into account"
1967 # re-mount client with sub-subdir
1968 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
1969 error "unable to umount client ${clients_arr[0]}"
1970 export FILESET=/$subsubdir
1971 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
1972 error "unable to remount client ${clients_arr[0]}"
1975 # test mount point content
1976 do_node ${clients_arr[0]} test -f $MOUNT/this_is_$subsubdir ||
1977 error "subdir of fileset not taken into account"
1979 # remove fileset info from nodemap
1980 do_facet mgs $LCTL nodemap_set_fileset --name $nm --fileset clear ||
1981 error "unable to delete fileset info on $nm nodemap"
1982 wait_update_facet mgs "$LCTL get_param nodemap.${nm}.fileset" \
1983 "nodemap.${nm}.fileset=" ||
1984 error "fileset info still not cleared on $nm nodemap"
1985 do_facet mgs $LCTL set_param -P nodemap.${nm}.fileset=clear ||
1986 error "unable to reset fileset info on $nm nodemap"
1987 wait_nm_sync $nm fileset "nodemap.${nm}.fileset="
1990 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
1991 error "unable to umount client ${clients_arr[0]}"
1992 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
1993 error "unable to remount client ${clients_arr[0]}"
1995 # test mount point content
1996 if ! $(do_node ${clients_arr[0]} test -d $MOUNT/$subdir); then
1998 error "fileset not cleared on $nm nodemap"
2001 # back to non-nodemap setup
2002 if $SHARED_KEY; then
2003 export SK_UNIQUE_NM=false
2004 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
2005 error "unable to umount client ${clients_arr[0]}"
2007 fileset_test_cleanup "$nm"
2008 if [ "$nm" == "default" ]; then
2009 do_facet mgs $LCTL nodemap_activate 0
2010 wait_nm_sync active 0
2012 export SK_UNIQUE_NM=false
2014 nodemap_test_cleanup
2016 if $SHARED_KEY; then
2017 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
2018 error "unable to remount client ${clients_arr[0]}"
2023 [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.50) ] &&
2024 skip "Need MDS >= 2.11.50" && return
2026 for nm in "default" "c0"; do
2027 local subdir="subdir_${nm}"
2028 local subsubdir="subsubdir_${nm}"
2030 if [ "$nm" == "default" ] && [ "$SHARED_KEY" == "true" ]; then
2031 echo "Skipping nodemap $nm with SHARED_KEY";
2035 echo "Exercising fileset for nodemap $nm"
2036 nodemap_exercise_fileset "$nm"
2039 run_test 27a "test fileset in various nodemaps"
2041 test_27b() { #LU-10703
2042 [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.50) ] &&
2043 skip "Need MDS >= 2.11.50" && return
2044 [[ $MDSCOUNT -lt 2 ]] && skip "needs >= 2 MDTs" && return
2047 trap nodemap_test_cleanup EXIT
2049 # Add the nodemaps and set their filesets
2050 for i in $(seq 1 $MDSCOUNT); do
2051 do_facet mgs $LCTL nodemap_del nm$i 2>/dev/null
2052 do_facet mgs $LCTL nodemap_add nm$i ||
2053 error "add nodemap nm$i failed"
2054 wait_nm_sync nm$i "" "" "-N"
2056 if ! combined_mgs_mds; then
2058 $LCTL set_param nodemap.nm$i.fileset=/dir$i ||
2059 error "set nm$i.fileset=/dir$i failed on MGS"
2061 do_facet mgs $LCTL set_param -P nodemap.nm$i.fileset=/dir$i ||
2062 error "set nm$i.fileset=/dir$i failed on servers"
2063 wait_nm_sync nm$i fileset "nodemap.nm$i.fileset=/dir$i"
2066 # Check if all the filesets are correct
2067 for i in $(seq 1 $MDSCOUNT); do
2068 fileset=$(do_facet mds$i \
2069 $LCTL get_param -n nodemap.nm$i.fileset)
2070 [ "$fileset" = "/dir$i" ] ||
2071 error "nm$i.fileset $fileset != /dir$i on mds$i"
2072 do_facet mgs $LCTL nodemap_del nm$i ||
2073 error "delete nodemap nm$i failed"
2076 nodemap_test_cleanup
2078 run_test 27b "The new nodemap won't clear the old nodemap's fileset"
2081 if ! $SHARED_KEY; then
2082 skip "need shared key feature for this test" && return
2084 mkdir -p $DIR/$tdir || error "mkdir failed"
2085 touch $DIR/$tdir/$tdir.out || error "touch failed"
2086 if [ ! -f $DIR/$tdir/$tdir.out ]; then
2087 error "read before rotation failed"
2089 # store top key identity to ensure rotation has occurred
2090 SK_IDENTITY_OLD=$(lctl get_param *.*.*srpc* | grep "expire" |
2091 head -1 | awk '{print $15}' | cut -c1-8)
2092 do_facet $SINGLEMDS lfs flushctx ||
2093 error "could not run flushctx on $SINGLEMDS"
2095 lfs flushctx || error "could not run flushctx on client"
2097 # verify new key is in place
2098 SK_IDENTITY_NEW=$(lctl get_param *.*.*srpc* | grep "expire" |
2099 head -1 | awk '{print $15}' | cut -c1-8)
2100 if [ $SK_IDENTITY_OLD == $SK_IDENTITY_NEW ]; then
2101 error "key did not rotate correctly"
2103 if [ ! -f $DIR/$tdir/$tdir.out ]; then
2104 error "read after rotation failed"
2107 run_test 28 "check shared key rotation method"
2110 if ! $SHARED_KEY; then
2111 skip "need shared key feature for this test" && return
2113 if [ $SK_FLAVOR != "ski" ] && [ $SK_FLAVOR != "skpi" ]; then
2114 skip "test only valid if integrity is active"
2117 mkdir $DIR/$tdir || error "mkdir"
2118 touch $DIR/$tdir/$tfile || error "touch"
2119 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
2120 error "unable to umount clients"
2121 keyctl show | awk '/lustre/ { print $1 }' |
2122 xargs -IX keyctl unlink X
2123 OLD_SK_PATH=$SK_PATH
2124 export SK_PATH=/dev/null
2125 if zconf_mount_clients ${clients_arr[0]} $MOUNT; then
2126 export SK_PATH=$OLD_SK_PATH
2127 if [ -e $DIR/$tdir/$tfile ]; then
2128 error "able to mount and read without key"
2130 error "able to mount without key"
2133 export SK_PATH=$OLD_SK_PATH
2134 keyctl show | awk '/lustre/ { print $1 }' |
2135 xargs -IX keyctl unlink X
2138 run_test 29 "check for missing shared key"
2141 if ! $SHARED_KEY; then
2142 skip "need shared key feature for this test" && return
2144 if [ $SK_FLAVOR != "ski" ] && [ $SK_FLAVOR != "skpi" ]; then
2145 skip "test only valid if integrity is active"
2147 mkdir -p $DIR/$tdir || error "mkdir failed"
2148 touch $DIR/$tdir/$tdir.out || error "touch failed"
2149 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
2150 error "unable to umount clients"
2151 # unload keys from ring
2152 keyctl show | awk '/lustre/ { print $1 }' |
2153 xargs -IX keyctl unlink X
2154 # invalidate the key with bogus filesystem name
2155 lgss_sk -w $SK_PATH/$FSNAME-bogus.key -f $FSNAME.bogus \
2156 -t client -d /dev/urandom || error "lgss_sk failed (1)"
2157 do_facet $SINGLEMDS lfs flushctx || error "could not run flushctx"
2158 OLD_SK_PATH=$SK_PATH
2159 export SK_PATH=$SK_PATH/$FSNAME-bogus.key
2160 if zconf_mount_clients ${clients_arr[0]} $MOUNT; then
2161 SK_PATH=$OLD_SK_PATH
2162 if [ -a $DIR/$tdir/$tdir.out ]; then
2163 error "mount and read file with invalid key"
2165 error "mount with invalid key"
2168 SK_PATH=$OLD_SK_PATH
2169 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
2170 error "unable to umount clients"
2172 run_test 30 "check for invalid shared key"
2176 zconf_umount $HOSTNAME $MOUNT || error "unable to umount client"
2178 # remove ${NETTYPE}999 network on all nodes
2179 do_nodes $(comma_list $(all_nodes)) \
2180 "$LNETCTL net del --net ${NETTYPE}999 && \
2181 $LNETCTL lnet unconfigure 2>/dev/null || true"
2183 # necessary to do writeconf in order to de-register
2184 # @${NETTYPE}999 nid for targets
2186 export KEEP_ZPOOL="true"
2188 export SK_MOUNTED=false
2191 export KEEP_ZPOOL="$KZPOOL"
2195 local nid=$(lctl list_nids | grep ${NETTYPE} | head -n1)
2196 local addr=${nid%@*}
2199 export LNETCTL=$(which lnetctl 2> /dev/null)
2201 [ -z "$LNETCTL" ] && skip "without lnetctl support." && return
2202 local_mode && skip "in local mode."
2204 stack_trap cleanup_31 EXIT
2207 if [ "$MOUNT_2" ] && $(grep -q $MOUNT2' ' /proc/mounts); then
2208 umount_client $MOUNT2 || error "umount $MOUNT2 failed"
2210 if $(grep -q $MOUNT' ' /proc/mounts); then
2211 umount_client $MOUNT || error "umount $MOUNT failed"
2214 # check exports on servers are empty for client
2215 do_facet mgs "lctl get_param -n *.MGS*.exports.'$nid'.uuid 2>/dev/null |
2216 grep -q -" && error "export on MGS should be empty"
2217 do_nodes $(comma_list $(mdts_nodes) $(osts_nodes)) \
2218 "lctl get_param -n *.${FSNAME}*.exports.'$nid'.uuid \
2219 2>/dev/null | grep -q -" &&
2220 error "export on servers should be empty"
2222 # add network ${NETTYPE}999 on all nodes
2223 do_nodes $(comma_list $(all_nodes)) \
2224 "$LNETCTL lnet configure && $LNETCTL net add --if \
2225 $($LNETCTL net show --net $net | awk 'BEGIN{inf=0} \
2226 {if (inf==1) print $2; fi; inf=0} /interfaces/{inf=1}') \
2227 --net ${NETTYPE}999" ||
2228 error "unable to configure NID ${NETTYPE}999"
2230 # necessary to do writeconf in order to register
2231 # new @${NETTYPE}999 nid for targets
2233 export KEEP_ZPOOL="true"
2235 export SK_MOUNTED=false
2237 setupall server_only || echo 1
2238 export KEEP_ZPOOL="$KZPOOL"
2241 local mgsnid_orig=$MGSNID
2242 # compute new MGSNID
2243 MGSNID=$(do_facet mgs "$LCTL list_nids | grep ${NETTYPE}999")
2245 # on client, turn LNet Dynamic Discovery on
2246 lnetctl set discovery 1
2248 # mount client with -o network=${NETTYPE}999 option:
2249 # should fail because of LNet Dynamic Discovery
2250 mount_client $MOUNT ${MOUNT_OPTS},network=${NETTYPE}999 &&
2251 error "client mount with '-o network' option should be refused"
2253 # on client, reconfigure LNet and turn LNet Dynamic Discovery off
2254 $LNETCTL net del --net ${NETTYPE}999 && lnetctl lnet unconfigure
2257 lnetctl set discovery 0
2259 $LNETCTL lnet configure && $LNETCTL net add --if \
2260 $($LNETCTL net show --net $net | awk 'BEGIN{inf=0} \
2261 {if (inf==1) print $2; fi; inf=0} /interfaces/{inf=1}') \
2262 --net ${NETTYPE}999 ||
2263 error "unable to configure NID ${NETTYPE}999 on client"
2265 # mount client with -o network=${NETTYPE}999 option
2266 mount_client $MOUNT ${MOUNT_OPTS},network=${NETTYPE}999 ||
2267 error "unable to remount client"
2272 # check export on MGS
2273 do_facet mgs "lctl get_param -n *.MGS*.exports.'$nid'.uuid 2>/dev/null |
2275 [ $? -ne 0 ] || error "export for $nid on MGS should not exist"
2278 "lctl get_param -n *.MGS*.exports.'${addr}@${NETTYPE}999'.uuid \
2279 2>/dev/null | grep -q -"
2281 error "export for ${addr}@${NETTYPE}999 on MGS should exist"
2283 # check {mdc,osc} imports
2284 lctl get_param mdc.${FSNAME}-*.import | grep current_connection |
2285 grep -q ${NETTYPE}999
2287 error "import for mdc should use ${addr}@${NETTYPE}999"
2288 lctl get_param osc.${FSNAME}-*.import | grep current_connection |
2289 grep -q ${NETTYPE}999
2291 error "import for osc should use ${addr}@${NETTYPE}999"
2293 run_test 31 "client mount option '-o network'"
2295 log "cleanup: ======================================================"
2298 ## nodemap deactivated
2299 do_facet mgs $LCTL nodemap_activate 0
2301 for num in $(seq $MDSCOUNT); do
2302 if [ "${identity_old[$num]}" = 1 ]; then
2303 switch_identity $num false || identity_old[$num]=$?
2307 $RUNAS_CMD -u $ID0 ls $DIR
2308 $RUNAS_CMD -u $ID1 ls $DIR
2313 check_and_cleanup_lustre