3 # Run select tests by setting ONLY, or as arguments to the script.
4 # Skip specific tests by setting EXCEPT.
10 # bug number for skipped test:
11 ALWAYS_EXCEPT=" $SANITY_SEC_EXCEPT"
12 # UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
15 export PATH=$PWD/$SRCDIR:$SRCDIR:$PWD/$SRCDIR/../utils:$PATH:/sbin
16 export NAME=${NAME:-local}
18 LUSTRE=${LUSTRE:-$(dirname $0)/..}
19 . $LUSTRE/tests/test-framework.sh
21 . ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
24 NODEMAP_TESTS=$(seq 7 26)
26 if ! check_versions; then
27 echo "It is NOT necessary to test nodemap under interoperation mode"
28 EXCEPT="$EXCEPT $NODEMAP_TESTS"
31 [ "$SLOW" = "no" ] && EXCEPT_SLOW="26"
33 [ "$ALWAYS_EXCEPT$EXCEPT$EXCEPT_SLOW" ] &&
34 echo "Skipping tests: $ALWAYS_EXCEPT $EXCEPT $EXCEPT_SLOW"
36 RUNAS_CMD=${RUNAS_CMD:-runas}
38 WTL=${WTL:-"$LUSTRE/tests/write_time_limit"}
41 PERM_CONF=$CONFDIR/perm.conf
43 HOSTNAME_CHECKSUM=$(hostname | sum | awk '{ print $1 }')
44 SUBNET_CHECKSUM=$(expr $HOSTNAME_CHECKSUM % 250 + 1)
46 require_dsh_mds || exit 0
47 require_dsh_ost || exit 0
49 clients=${CLIENTS//,/ }
50 num_clients=$(get_node_count ${clients})
51 clients_arr=($clients)
55 USER0=$(getent passwd | grep :$ID0:$ID0: | cut -d: -f1)
56 USER1=$(getent passwd | grep :$ID1:$ID1: | cut -d: -f1)
60 NODEMAP_IPADDR_LIST="1 10 64 128 200 250"
62 NODEMAP_MAX_ID=$((ID0 + NODEMAP_ID_COUNT))
65 skip "need to add user0 ($ID0:$ID0)" && exit 0
68 skip "need to add user1 ($ID1:$ID1)" && exit 0
70 IDBASE=${IDBASE:-60000}
72 # changes to mappings must be reflected in test 23
74 [0]="$((IDBASE+3)):$((IDBASE+0)) $((IDBASE+4)):$((IDBASE+2))"
75 [1]="$((IDBASE+5)):$((IDBASE+1)) $((IDBASE+6)):$((IDBASE+2))"
78 check_and_setup_lustre
83 GSS_REF=$(lsmod | grep ^ptlrpc_gss | awk '{print $3}')
84 if [ ! -z "$GSS_REF" -a "$GSS_REF" != "0" ]; then
86 echo "with GSS support"
89 echo "without GSS support"
92 MDT=$(do_facet $SINGLEMDS lctl get_param -N "mdt.\*MDT0000" |
94 [ -z "$MDT" ] && error "fail to get MDT device" && exit 1
95 do_facet $SINGLEMDS "mkdir -p $CONFDIR"
96 IDENTITY_FLUSH=mdt.$MDT.identity_flush
97 IDENTITY_UPCALL=mdt.$MDT.identity_upcall
108 if ! $RUNAS_CMD -u $user krb5_login.sh; then
109 error "$user login kerberos failed."
113 if ! $RUNAS_CMD -u $user -g $group ls $DIR > /dev/null 2>&1; then
114 $RUNAS_CMD -u $user lfs flushctx -k
115 $RUNAS_CMD -u $user krb5_login.sh
116 if ! $RUNAS_CMD -u$user -g$group ls $DIR > /dev/null 2>&1; then
117 error "init $user $group failed."
123 declare -a identity_old
126 for num in $(seq $MDSCOUNT); do
127 switch_identity $num true || identity_old[$num]=$?
130 if ! $RUNAS_CMD -u $ID0 ls $DIR > /dev/null 2>&1; then
131 sec_login $USER0 $USER0
134 if ! $RUNAS_CMD -u $ID1 ls $DIR > /dev/null 2>&1; then
135 sec_login $USER1 $USER1
140 # run as different user
144 chmod 0755 $DIR || error "chmod (1)"
145 rm -rf $DIR/$tdir || error "rm (1)"
146 mkdir -p $DIR/$tdir || error "mkdir (1)"
147 chown $USER0 $DIR/$tdir || error "chown (2)"
148 $RUNAS_CMD -u $ID0 ls $DIR || error "ls (1)"
149 rm -f $DIR/f0 || error "rm (2)"
150 $RUNAS_CMD -u $ID0 touch $DIR/f0 && error "touch (1)"
151 $RUNAS_CMD -u $ID0 touch $DIR/$tdir/f1 || error "touch (2)"
152 $RUNAS_CMD -u $ID1 touch $DIR/$tdir/f2 && error "touch (3)"
153 touch $DIR/$tdir/f3 || error "touch (4)"
154 chown root $DIR/$tdir || error "chown (3)"
155 chgrp $USER0 $DIR/$tdir || error "chgrp (1)"
156 chmod 0775 $DIR/$tdir || error "chmod (2)"
157 $RUNAS_CMD -u $ID0 touch $DIR/$tdir/f4 || error "touch (5)"
158 $RUNAS_CMD -u $ID1 touch $DIR/$tdir/f5 && error "touch (6)"
159 touch $DIR/$tdir/f6 || error "touch (7)"
160 rm -rf $DIR/$tdir || error "rm (3)"
162 run_test 0 "uid permission ============================="
166 [ $GSS_SUP = 0 ] && skip "without GSS support." && return
171 chown $USER0 $DIR/$tdir || error "chown (1)"
172 $RUNAS_CMD -u $ID1 -v $ID0 touch $DIR/$tdir/f0 && error "touch (2)"
173 echo "enable uid $ID1 setuid"
174 do_facet $SINGLEMDS "echo '* $ID1 setuid' >> $PERM_CONF"
175 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
176 $RUNAS_CMD -u $ID1 -v $ID0 touch $DIR/$tdir/f1 || error "touch (3)"
178 chown root $DIR/$tdir || error "chown (4)"
179 chgrp $USER0 $DIR/$tdir || error "chgrp (5)"
180 chmod 0770 $DIR/$tdir || error "chmod (6)"
181 $RUNAS_CMD -u $ID1 -g $ID1 touch $DIR/$tdir/f2 && error "touch (7)"
182 $RUNAS_CMD -u$ID1 -g$ID1 -j$ID0 touch $DIR/$tdir/f3 && error "touch (8)"
183 echo "enable uid $ID1 setuid,setgid"
184 do_facet $SINGLEMDS "echo '* $ID1 setuid,setgid' > $PERM_CONF"
185 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
186 $RUNAS_CMD -u $ID1 -g $ID1 -j $ID0 touch $DIR/$tdir/f4 ||
188 $RUNAS_CMD -u $ID1 -v $ID0 -g $ID1 -j $ID0 touch $DIR/$tdir/f5 ||
193 do_facet $SINGLEMDS "rm -f $PERM_CONF"
194 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
196 run_test 1 "setuid/gid ============================="
198 # bug 3285 - supplementary group should always succeed.
199 # NB: the supplementary groups are set for local client only,
200 # as for remote client, the groups of the specified uid on MDT
201 # will be obtained by upcall /sbin/l_getidentity and used.
203 local server_version=$(lustre_version_code $SINGLEMDS)
205 [[ $server_version -ge $(version_code 2.6.93) ]] ||
206 [[ $server_version -ge $(version_code 2.5.35) &&
207 $server_version -lt $(version_code 2.5.50) ]] ||
208 { skip "Need MDS version at least 2.6.93 or 2.5.35"; return; }
212 chmod 0771 $DIR/$tdir
213 chgrp $ID0 $DIR/$tdir
214 $RUNAS_CMD -u $ID0 ls $DIR/$tdir || error "setgroups (1)"
215 do_facet $SINGLEMDS "echo '* $ID1 setgrp' > $PERM_CONF"
216 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
217 $RUNAS_CMD -u $ID1 -G1,2,$ID0 ls $DIR/$tdir ||
218 error "setgroups (2)"
219 $RUNAS_CMD -u $ID1 -G1,2 ls $DIR/$tdir && error "setgroups (3)"
222 do_facet $SINGLEMDS "rm -f $PERM_CONF"
223 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
225 run_test 4 "set supplementary group ==============="
232 squash_id default 99 0
233 wait_nm_sync default squash_uid '' inactive
234 squash_id default 99 1
235 wait_nm_sync default squash_gid '' inactive
236 for (( i = 0; i < NODEMAP_COUNT; i++ )); do
237 local csum=${HOSTNAME_CHECKSUM}_${i}
239 do_facet mgs $LCTL nodemap_add $csum
241 if [ $rc -ne 0 ]; then
242 echo "nodemap_add $csum failed with $rc"
246 out=$(do_facet mgs $LCTL get_param nodemap.$csum.id)
247 ## This needs to return zero if the following statement is 1
248 [[ $(echo $out | grep -c $csum) == 0 ]] && return 1
250 for (( i = 0; i < NODEMAP_COUNT; i++ )); do
251 local csum=${HOSTNAME_CHECKSUM}_${i}
253 wait_nm_sync $csum id '' inactive
262 for ((i = 0; i < NODEMAP_COUNT; i++)); do
263 local csum=${HOSTNAME_CHECKSUM}_${i}
265 if ! do_facet mgs $LCTL nodemap_del $csum; then
266 error "nodemap_del $csum failed with $?"
270 out=$(do_facet mgs $LCTL get_param nodemap.$csum.id 2>/dev/null)
271 [[ $(echo $out | grep -c $csum) != 0 ]] && return 1
273 for (( i = 0; i < NODEMAP_COUNT; i++ )); do
274 local csum=${HOSTNAME_CHECKSUM}_${i}
276 wait_nm_sync $csum id '' inactive
283 local cmd="$LCTL nodemap_add_range"
287 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
288 range="$SUBNET_CHECKSUM.${2}.${j}.[1-253]@tcp"
289 if ! do_facet mgs $cmd --name $1 --range $range; then
298 local cmd="$LCTL nodemap_del_range"
302 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
303 range="$SUBNET_CHECKSUM.${2}.${j}.[1-253]@tcp"
304 if ! do_facet mgs $cmd --name $1 --range $range; then
314 local cmd="$LCTL nodemap_add_idmap"
317 echo "Start to add idmaps ..."
318 for ((i = 0; i < NODEMAP_COUNT; i++)); do
321 for ((j = $ID0; j < NODEMAP_MAX_ID; j++)); do
322 local csum=${HOSTNAME_CHECKSUM}_${i}
324 local fs_id=$((j + 1))
326 if ! do_facet mgs $cmd --name $csum --idtype uid \
327 --idmap $client_id:$fs_id; then
330 if ! do_facet mgs $cmd --name $csum --idtype gid \
331 --idmap $client_id:$fs_id; then
340 update_idmaps() { #LU-10040
341 [ $(lustre_version_code mgs) -lt $(version_code 2.10.55) ] &&
342 skip "Need MGS >= 2.10.55" &&
344 local csum=${HOSTNAME_CHECKSUM}_0
345 local old_id_client=$ID0
346 local old_id_fs=$((ID0 + 1))
347 local new_id=$((ID0 + 100))
354 echo "Start to update idmaps ..."
356 #Inserting an existed idmap should return error
357 cmd="$LCTL nodemap_add_idmap --name $csum --idtype uid"
359 $cmd --idmap $old_id_client:$old_id_fs 2>/dev/null; then
360 error "insert idmap {$old_id_client:$old_id_fs} " \
361 "should return error"
366 #Update id_fs and check it
367 if ! do_facet mgs $cmd --idmap $old_id_client:$new_id; then
368 error "$cmd --idmap $old_id_client:$new_id failed"
372 tmp_id=$(do_facet mgs $LCTL get_param -n nodemap.$csum.idmap |
373 awk '{ print $7 }' | sed -n '2p')
374 [ $tmp_id != $new_id ] && { error "new id_fs $tmp_id != $new_id"; \
375 rc=$((rc + 1)); return $rc; }
377 #Update id_client and check it
378 if ! do_facet mgs $cmd --idmap $new_id:$new_id; then
379 error "$cmd --idmap $new_id:$new_id failed"
383 tmp_id=$(do_facet mgs $LCTL get_param -n nodemap.$csum.idmap |
384 awk '{ print $5 }' | sed -n "$((NODEMAP_ID_COUNT + 1)) p")
385 tmp_id=$(echo ${tmp_id%,*}) #e.g. "501,"->"501"
386 [ $tmp_id != $new_id ] && { error "new id_client $tmp_id != $new_id"; \
387 rc=$((rc + 1)); return $rc; }
389 #Delete above updated idmap
390 cmd="$LCTL nodemap_del_idmap --name $csum --idtype uid"
391 if ! do_facet mgs $cmd --idmap $new_id:$new_id; then
392 error "$cmd --idmap $new_id:$new_id failed"
397 #restore the idmaps to make delete_idmaps work well
398 cmd="$LCTL nodemap_add_idmap --name $csum --idtype uid"
399 if ! do_facet mgs $cmd --idmap $old_id_client:$old_id_fs; then
400 error "$cmd --idmap $old_id_client:$old_id_fs failed"
410 local cmd="$LCTL nodemap_del_idmap"
413 echo "Start to delete idmaps ..."
414 for ((i = 0; i < NODEMAP_COUNT; i++)); do
417 for ((j = $ID0; j < NODEMAP_MAX_ID; j++)); do
418 local csum=${HOSTNAME_CHECKSUM}_${i}
420 local fs_id=$((j + 1))
422 if ! do_facet mgs $cmd --name $csum --idtype uid \
423 --idmap $client_id:$fs_id; then
426 if ! do_facet mgs $cmd --name $csum --idtype gid \
427 --idmap $client_id:$fs_id; then
440 local cmd="$LCTL nodemap_modify"
443 proc[0]="admin_nodemap"
444 proc[1]="trusted_nodemap"
448 for ((idx = 0; idx < 2; idx++)); do
449 if ! do_facet mgs $cmd --name $1 --property ${option[$idx]} \
454 if ! do_facet mgs $cmd --name $1 --property ${option[$idx]} \
464 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
465 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
469 cmd[0]="$LCTL nodemap_modify --property squash_uid"
470 cmd[1]="$LCTL nodemap_modify --property squash_gid"
472 if ! do_facet mgs ${cmd[$3]} --name $1 --value $2; then
478 local nodemap_name=$1
483 local is_active=$(do_facet mgs $LCTL get_param -n nodemap.active)
488 local mgs_ip=$(host_nids_address $mgs_HOST $NETTYPE | cut -d' ' -f1)
491 if [ "$nodemap_name" == "active" ]; then
493 elif [ -z "$key" ]; then
494 proc_param=${nodemap_name}
496 proc_param="${nodemap_name}.${key}"
498 if [ "$opt" == "inactive" ]; then
499 # check nm sync even if nodemap is not activated
503 (( is_active == 0 )) && [ "$proc_param" != "active" ] && return
505 if [ -z "$value" ]; then
506 out1=$(do_facet mgs $LCTL get_param $opt \
507 nodemap.${proc_param} 2>/dev/null)
508 echo "On MGS ${mgs_ip}, ${proc_param} = $out1"
513 # wait up to 10 seconds for other servers to sync with mgs
514 for i in $(seq 1 10); do
515 for node in $(all_server_nodes); do
516 local node_ip=$(host_nids_address $node $NETTYPE |
520 if [ -z "$value" ]; then
521 [ $node_ip == $mgs_ip ] && continue
524 out2=$(do_node $node_ip $LCTL get_param $opt \
525 nodemap.$proc_param 2>/dev/null)
526 echo "On $node ${node_ip}, ${proc_param} = $out2"
527 [ "$out1" != "$out2" ] && is_sync=false && break
535 echo OTHER - IP: $node_ip
537 error "mgs and $nodemap_name ${key} mismatch, $i attempts"
539 echo "waited $((i - 1)) seconds for sync"
542 # ensure that the squash defaults are the expected defaults
543 squash_id default 99 0
544 wait_nm_sync default squash_uid '' inactive
545 squash_id default 99 1
546 wait_nm_sync default squash_gid '' inactive
551 cmd="$LCTL nodemap_test_nid"
553 nid=$(do_facet mgs $cmd $1)
555 if [ $nid == $2 ]; then
564 local cmd="$LCTL nodemap_test_id"
567 echo "Start to test idmaps ..."
568 ## nodemap deactivated
569 if ! do_facet mgs $LCTL nodemap_activate 0; then
572 for ((id = $ID0; id < NODEMAP_MAX_ID; id++)); do
575 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
576 local nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
577 local fs_id=$(do_facet mgs $cmd --nid $nid \
578 --idtype uid --id $id)
579 if [ $fs_id != $id ]; then
580 echo "expected $id, got $fs_id"
587 if ! do_facet mgs $LCTL nodemap_activate 1; then
591 for ((id = $ID0; id < NODEMAP_MAX_ID; id++)); do
592 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
593 nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
594 fs_id=$(do_facet mgs $cmd --nid $nid \
595 --idtype uid --id $id)
596 expected_id=$((id + 1))
597 if [ $fs_id != $expected_id ]; then
598 echo "expected $expected_id, got $fs_id"
605 for ((i = 0; i < NODEMAP_COUNT; i++)); do
606 local csum=${HOSTNAME_CHECKSUM}_${i}
608 if ! do_facet mgs $LCTL nodemap_modify --name $csum \
609 --property trusted --value 1; then
610 error "nodemap_modify $csum failed with $?"
615 for ((id = $ID0; id < NODEMAP_MAX_ID; id++)); do
616 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
617 nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
618 fs_id=$(do_facet mgs $cmd --nid $nid \
619 --idtype uid --id $id)
620 if [ $fs_id != $id ]; then
621 echo "expected $id, got $fs_id"
627 ## ensure allow_root_access is enabled
628 for ((i = 0; i < NODEMAP_COUNT; i++)); do
629 local csum=${HOSTNAME_CHECKSUM}_${i}
631 if ! do_facet mgs $LCTL nodemap_modify --name $csum \
632 --property admin --value 1; then
633 error "nodemap_modify $csum failed with $?"
638 ## check that root allowed
639 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
640 nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
641 fs_id=$(do_facet mgs $cmd --nid $nid --idtype uid --id 0)
642 if [ $fs_id != 0 ]; then
643 echo "root allowed expected 0, got $fs_id"
648 ## ensure allow_root_access is disabled
649 for ((i = 0; i < NODEMAP_COUNT; i++)); do
650 local csum=${HOSTNAME_CHECKSUM}_${i}
652 if ! do_facet mgs $LCTL nodemap_modify --name $csum \
653 --property admin --value 0; then
654 error "nodemap_modify ${HOSTNAME_CHECKSUM}_${i} "
660 ## check that root is mapped to 99
661 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
662 nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
663 fs_id=$(do_facet mgs $cmd --nid $nid --idtype uid --id 0)
664 if [ $fs_id != 99 ]; then
665 error "root squash expected 99, got $fs_id"
670 ## reset client trust to 0
671 for ((i = 0; i < NODEMAP_COUNT; i++)); do
672 if ! do_facet mgs $LCTL nodemap_modify \
673 --name ${HOSTNAME_CHECKSUM}_${i} \
674 --property trusted --value 0; then
675 error "nodemap_modify ${HOSTNAME_CHECKSUM}_${i} "
687 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
688 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
689 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
694 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
698 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 2
702 run_test 7 "nodemap create and delete"
707 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
708 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
709 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
716 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
722 [[ $rc == 0 ]] && error "duplicate nodemap_add allowed with $rc" &&
728 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 3
732 run_test 8 "nodemap reject duplicates"
738 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
739 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
740 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
746 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
749 for ((i = 0; i < NODEMAP_COUNT; i++)); do
750 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
754 [[ $rc != 0 ]] && error "nodemap_add_range failed with $rc" && return 2
757 for ((i = 0; i < NODEMAP_COUNT; i++)); do
758 if ! delete_range ${HOSTNAME_CHECKSUM}_${i} $i; then
762 [[ $rc != 0 ]] && error "nodemap_del_range failed with $rc" && return 4
767 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 4
771 run_test 9 "nodemap range add"
776 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
777 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
778 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
784 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
787 for ((i = 0; i < NODEMAP_COUNT; i++)); do
788 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
792 [[ $rc != 0 ]] && error "nodemap_add_range failed with $rc" && return 2
795 for ((i = 0; i < NODEMAP_COUNT; i++)); do
796 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
800 [[ $rc == 0 ]] && error "nodemap_add_range duplicate add with $rc" &&
805 for ((i = 0; i < NODEMAP_COUNT; i++)); do
806 if ! delete_range ${HOSTNAME_CHECKSUM}_${i} $i; then
810 [[ $rc != 0 ]] && error "nodemap_del_range failed with $rc" && return 4
814 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 5
818 run_test 10a "nodemap reject duplicate ranges"
821 [ $(lustre_version_code mgs) -lt $(version_code 2.10.53) ] &&
822 skip "Need MGS >= 2.10.53" && return
826 local nids="192.168.19.[0-255]@o2ib20"
828 do_facet mgs $LCTL nodemap_del $nm1 2>/dev/null
829 do_facet mgs $LCTL nodemap_del $nm2 2>/dev/null
831 do_facet mgs $LCTL nodemap_add $nm1 || error "Add $nm1 failed"
832 do_facet mgs $LCTL nodemap_add $nm2 || error "Add $nm2 failed"
833 do_facet mgs $LCTL nodemap_add_range --name $nm1 --range $nids ||
834 error "Add range $nids to $nm1 failed"
835 [ -n "$(do_facet mgs $LCTL get_param nodemap.$nm1.* |
836 grep start_nid)" ] || error "No range was found"
837 do_facet mgs $LCTL nodemap_del_range --name $nm2 --range $nids &&
838 error "Deleting range $nids from $nm2 should fail"
839 [ -n "$(do_facet mgs $LCTL get_param nodemap.$nm1.* |
840 grep start_nid)" ] || error "Range $nids should be there"
842 do_facet mgs $LCTL nodemap_del $nm1 || error "Delete $nm1 failed"
843 do_facet mgs $LCTL nodemap_del $nm2 || error "Delete $nm2 failed"
846 run_test 10b "delete range from the correct nodemap"
848 test_10c() { #LU-8912
849 [ $(lustre_version_code mgs) -lt $(version_code 2.10.57) ] &&
850 skip "Need MGS >= 2.10.57" && return
852 local nm="nodemap_lu8912"
853 local nid_range="10.210.[32-47].[0-255]@o2ib3"
854 local start_nid="10.210.32.0@o2ib3"
855 local end_nid="10.210.47.255@o2ib3"
856 local start_nid_found
859 do_facet mgs $LCTL nodemap_del $nm 2>/dev/null
860 do_facet mgs $LCTL nodemap_add $nm || error "Add $nm failed"
861 do_facet mgs $LCTL nodemap_add_range --name $nm --range $nid_range ||
862 error "Add range $nid_range to $nm failed"
864 start_nid_found=$(do_facet mgs $LCTL get_param nodemap.$nm.* |
865 awk -F '[,: ]' /start_nid/'{ print $9 }')
866 [ "$start_nid" == "$start_nid_found" ] ||
867 error "start_nid: $start_nid_found != $start_nid"
868 end_nid_found=$(do_facet mgs $LCTL get_param nodemap.$nm.* |
869 awk -F '[,: ]' /end_nid/'{ print $13 }')
870 [ "$end_nid" == "$end_nid_found" ] ||
871 error "end_nid: $end_nid_found != $end_nid"
873 do_facet mgs $LCTL nodemap_del $nm || error "Delete $nm failed"
876 run_test 10c "verfify contiguous range support"
878 test_10d() { #LU-8913
879 [ $(lustre_version_code mgs) -lt $(version_code 2.10.59) ] &&
880 skip "Need MGS >= 2.10.59" && return
882 local nm="nodemap_lu8913"
883 local nid_range="*@o2ib3"
884 local start_nid="0.0.0.0@o2ib3"
885 local end_nid="255.255.255.255@o2ib3"
886 local start_nid_found
889 do_facet mgs $LCTL nodemap_del $nm 2>/dev/null
890 do_facet mgs $LCTL nodemap_add $nm || error "Add $nm failed"
891 do_facet mgs $LCTL nodemap_add_range --name $nm --range $nid_range ||
892 error "Add range $nid_range to $nm failed"
894 start_nid_found=$(do_facet mgs $LCTL get_param nodemap.$nm.* |
895 awk -F '[,: ]' /start_nid/'{ print $9 }')
896 [ "$start_nid" == "$start_nid_found" ] ||
897 error "start_nid: $start_nid_found != $start_nid"
898 end_nid_found=$(do_facet mgs $LCTL get_param nodemap.$nm.* |
899 awk -F '[,: ]' /end_nid/'{ print $13 }')
900 [ "$end_nid" == "$end_nid_found" ] ||
901 error "end_nid: $end_nid_found != $end_nid"
903 do_facet mgs $LCTL nodemap_del $nm || error "Delete $nm failed"
906 run_test 10d "verfify nodemap range format '*@<net>' support"
911 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
912 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
913 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
919 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
922 for ((i = 0; i < NODEMAP_COUNT; i++)); do
923 if ! modify_flags ${HOSTNAME_CHECKSUM}_${i}; then
927 [[ $rc != 0 ]] && error "nodemap_modify with $rc" && return 2
932 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 3
936 run_test 11 "nodemap modify"
941 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
942 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
943 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
949 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
952 for ((i = 0; i < NODEMAP_COUNT; i++)); do
953 if ! squash_id ${HOSTNAME_CHECKSUM}_${i} 88 0; then
957 [[ $rc != 0 ]] && error "nodemap squash_uid with $rc" && return 2
960 for ((i = 0; i < NODEMAP_COUNT; i++)); do
961 if ! squash_id ${HOSTNAME_CHECKSUM}_${i} 88 1; then
965 [[ $rc != 0 ]] && error "nodemap squash_gid with $rc" && return 3
970 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 4
974 run_test 12 "nodemap set squash ids"
979 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
980 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
981 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
987 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
990 for ((i = 0; i < NODEMAP_COUNT; i++)); do
991 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
995 [[ $rc != 0 ]] && error "nodemap_add_range failed with $rc" && return 2
998 for ((i = 0; i < NODEMAP_COUNT; i++)); do
999 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
1000 for k in $NODEMAP_IPADDR_LIST; do
1001 if ! test_nid $SUBNET_CHECKSUM.$i.$j.$k \
1002 ${HOSTNAME_CHECKSUM}_${i}; then
1008 [[ $rc != 0 ]] && error "nodemap_test_nid failed with $rc" && return 3
1013 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 4
1017 run_test 13 "test nids"
1022 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
1023 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
1024 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
1030 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
1033 for ((i = 0; i < NODEMAP_COUNT; i++)); do
1034 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
1035 for k in $NODEMAP_IPADDR_LIST; do
1036 if ! test_nid $SUBNET_CHECKSUM.$i.$j.$k \
1043 [[ $rc != 0 ]] && error "nodemap_test_nid failed with $rc" && return 3
1048 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 4
1052 run_test 14 "test default nodemap nid lookup"
1057 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
1058 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
1059 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
1065 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
1068 for ((i = 0; i < NODEMAP_COUNT; i++)); do
1069 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
1073 [[ $rc != 0 ]] && error "nodemap_add_range failed with $rc" && return 2
1078 [[ $rc != 0 ]] && error "nodemap_add_idmap failed with $rc" && return 3
1083 [[ $rc != 0 ]] && error "nodemap_test_id failed with $rc" && return 4
1088 [[ $rc != 0 ]] && error "update_idmaps failed with $rc" && return 5
1093 [[ $rc != 0 ]] && error "nodemap_del_idmap failed with $rc" && return 6
1098 [[ $rc != 0 ]] && error "nodemap_delete failed with $rc" && return 7
1102 run_test 15 "test id mapping"
1104 create_fops_nodemaps() {
1107 for client in $clients; do
1108 local client_ip=$(host_nids_address $client $NETTYPE)
1109 local client_nid=$(h2nettype $client_ip)
1110 do_facet mgs $LCTL nodemap_add c${i} || return 1
1111 do_facet mgs $LCTL nodemap_add_range \
1112 --name c${i} --range $client_nid || return 1
1113 for map in ${FOPS_IDMAPS[i]}; do
1114 do_facet mgs $LCTL nodemap_add_idmap --name c${i} \
1115 --idtype uid --idmap ${map} || return 1
1116 do_facet mgs $LCTL nodemap_add_idmap --name c${i} \
1117 --idtype gid --idmap ${map} || return 1
1120 wait_nm_sync c$i idmap
1127 delete_fops_nodemaps() {
1130 for client in $clients; do
1131 do_facet mgs $LCTL nodemap_del c${i} || return 1
1139 if [ $MDSCOUNT -le 1 ]; then
1140 do_node ${clients_arr[0]} mkdir -p $DIR/$tdir
1142 # round-robin MDTs to test DNE nodemap support
1143 [ ! -d $DIR ] && do_node ${clients_arr[0]} mkdir -p $DIR
1144 do_node ${clients_arr[0]} $LFS setdirstripe -c 1 -i \
1145 $((fops_mds_index % MDSCOUNT)) $DIR/$tdir
1146 ((fops_mds_index++))
1150 # acl test directory needs to be initialized on a privileged client
1152 local admin=$(do_facet mgs $LCTL get_param -n nodemap.c0.admin_nodemap)
1153 local trust=$(do_facet mgs $LCTL get_param -n \
1154 nodemap.c0.trusted_nodemap)
1156 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
1157 do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1
1159 wait_nm_sync c0 admin_nodemap
1160 wait_nm_sync c0 trusted_nodemap
1162 do_node ${clients_arr[0]} rm -rf $DIR/$tdir
1164 do_node ${clients_arr[0]} chown $user $DIR/$tdir
1166 do_facet mgs $LCTL nodemap_modify --name c0 \
1167 --property admin --value $admin
1168 do_facet mgs $LCTL nodemap_modify --name c0 \
1169 --property trusted --value $trust
1171 # flush MDT locks to make sure they are reacquired before test
1172 do_node ${clients_arr[0]} $LCTL set_param \
1173 ldlm.namespaces.$FSNAME-MDT*.lru_size=clear
1175 wait_nm_sync c0 admin_nodemap
1176 wait_nm_sync c0 trusted_nodemap
1179 # fileset test directory needs to be initialized on a privileged client
1180 fileset_test_setup() {
1183 if [ -n "$FILESET" -a -z "$SKIP_FILESET" ]; then
1184 cleanup_mount $MOUNT
1185 FILESET="" zconf_mount_clients $CLIENTS $MOUNT
1188 local admin=$(do_facet mgs $LCTL get_param -n \
1189 nodemap.${nm}.admin_nodemap)
1190 local trust=$(do_facet mgs $LCTL get_param -n \
1191 nodemap.${nm}.trusted_nodemap)
1193 do_facet mgs $LCTL nodemap_modify --name $nm --property admin --value 1
1194 do_facet mgs $LCTL nodemap_modify --name $nm --property trusted \
1197 wait_nm_sync $nm admin_nodemap
1198 wait_nm_sync $nm trusted_nodemap
1200 # create directory and populate it for subdir mount
1201 do_node ${clients_arr[0]} mkdir $MOUNT/$subdir ||
1202 error "unable to create dir $MOUNT/$subdir"
1203 do_node ${clients_arr[0]} touch $MOUNT/$subdir/this_is_$subdir ||
1204 error "unable to create file $MOUNT/$subdir/this_is_$subdir"
1205 do_node ${clients_arr[0]} mkdir $MOUNT/$subdir/$subsubdir ||
1206 error "unable to create dir $MOUNT/$subdir/$subsubdir"
1207 do_node ${clients_arr[0]} touch \
1208 $MOUNT/$subdir/$subsubdir/this_is_$subsubdir ||
1209 error "unable to create file \
1210 $MOUNT/$subdir/$subsubdir/this_is_$subsubdir"
1212 do_facet mgs $LCTL nodemap_modify --name $nm \
1213 --property admin --value $admin
1214 do_facet mgs $LCTL nodemap_modify --name $nm \
1215 --property trusted --value $trust
1217 # flush MDT locks to make sure they are reacquired before test
1218 do_node ${clients_arr[0]} $LCTL set_param \
1219 ldlm.namespaces.$FSNAME-MDT*.lru_size=clear
1221 wait_nm_sync $nm admin_nodemap
1222 wait_nm_sync $nm trusted_nodemap
1225 # fileset test directory needs to be initialized on a privileged client
1226 fileset_test_cleanup() {
1228 local admin=$(do_facet mgs $LCTL get_param -n \
1229 nodemap.${nm}.admin_nodemap)
1230 local trust=$(do_facet mgs $LCTL get_param -n \
1231 nodemap.${nm}.trusted_nodemap)
1233 do_facet mgs $LCTL nodemap_modify --name $nm --property admin --value 1
1234 do_facet mgs $LCTL nodemap_modify --name $nm --property trusted \
1237 wait_nm_sync $nm admin_nodemap
1238 wait_nm_sync $nm trusted_nodemap
1240 # cleanup directory created for subdir mount
1241 do_node ${clients_arr[0]} rm -rf $MOUNT/$subdir ||
1242 error "unable to remove dir $MOUNT/$subdir"
1244 do_facet mgs $LCTL nodemap_modify --name $nm \
1245 --property admin --value $admin
1246 do_facet mgs $LCTL nodemap_modify --name $nm \
1247 --property trusted --value $trust
1249 # flush MDT locks to make sure they are reacquired before test
1250 do_node ${clients_arr[0]} $LCTL set_param \
1251 ldlm.namespaces.$FSNAME-MDT*.lru_size=clear
1253 wait_nm_sync $nm admin_nodemap
1254 wait_nm_sync $nm trusted_nodemap
1255 if [ -n "$FILESET" -a -z "$SKIP_FILESET" ]; then
1256 cleanup_mount $MOUNT
1257 zconf_mount_clients $CLIENTS $MOUNT
1261 do_create_delete() {
1264 local testfile=$DIR/$tdir/$tfile
1268 if $run_u touch $testfile >& /dev/null; then
1270 $run_u rm $testfile && d=1
1274 local expected=$(get_cr_del_expected $key)
1275 [ "$res" != "$expected" ] &&
1276 error "test $key, wanted $expected, got $res" && rc=$((rc + 1))
1280 nodemap_check_quota() {
1282 $run_u lfs quota -q $DIR | awk '{ print $2; exit; }'
1285 do_fops_quota_test() {
1287 # fuzz quota used to account for possible indirect blocks, etc
1288 local quota_fuzz=$(fs_log_size)
1289 local qused_orig=$(nodemap_check_quota "$run_u")
1290 local qused_high=$((qused_orig + quota_fuzz))
1291 local qused_low=$((qused_orig - quota_fuzz))
1292 local testfile=$DIR/$tdir/$tfile
1293 $run_u dd if=/dev/zero of=$testfile oflag=sync bs=1M count=1 \
1294 >& /dev/null || error "unable to write quota test file"
1295 sync; sync_all_data || true
1297 local qused_new=$(nodemap_check_quota "$run_u")
1298 [ $((qused_new)) -lt $((qused_low + 1024)) -o \
1299 $((qused_new)) -gt $((qused_high + 1024)) ] &&
1300 error "$qused_new != $qused_orig + 1M after write, " \
1301 "fuzz is $quota_fuzz"
1302 $run_u rm $testfile || error "unable to remove quota test file"
1303 wait_delete_completed_mds
1305 qused_new=$(nodemap_check_quota "$run_u")
1306 [ $((qused_new)) -lt $((qused_low)) \
1307 -o $((qused_new)) -gt $((qused_high)) ] &&
1308 error "quota not reclaimed, expect $qused_orig, " \
1309 "got $qused_new, fuzz $quota_fuzz"
1312 get_fops_mapped_user() {
1315 for ((i=0; i < ${#FOPS_IDMAPS[@]}; i++)); do
1316 for map in ${FOPS_IDMAPS[i]}; do
1317 if [ $(cut -d: -f1 <<< "$map") == $cli_user ]; then
1318 cut -d: -f2 <<< "$map"
1326 get_cr_del_expected() {
1328 IFS=":" read -a key <<< "$1"
1329 local mapmode="${key[0]}"
1330 local mds_user="${key[1]}"
1331 local cluster="${key[2]}"
1332 local cli_user="${key[3]}"
1333 local mode="0${key[4]}"
1340 [[ $mapmode == *mapped* ]] && mapped=1
1341 # only c1 is mapped in these test cases
1342 [[ $mapmode == mapped_trusted* ]] && [ "$cluster" == "c0" ] && mapped=0
1343 [[ $mapmode == *noadmin* ]] && noadmin=1
1345 # o+wx works as long as the user isn't mapped
1346 if [ $((mode & 3)) -eq 3 ]; then
1350 # if client user is root, check if root is squashed
1351 if [ "$cli_user" == "0" ]; then
1352 # squash root succeed, if other bit is on
1355 1) [ "$other" == "1" ] && echo $SUCCESS
1356 [ "$other" == "0" ] && echo $FAILURE;;
1360 if [ "$mapped" == "0" ]; then
1361 [ "$other" == "1" ] && echo $SUCCESS
1362 [ "$other" == "0" ] && echo $FAILURE
1366 # if mapped user is mds user, check for u+wx
1367 mapped_user=$(get_fops_mapped_user $cli_user)
1368 [ "$mapped_user" == "-1" ] &&
1369 error "unable to find mapping for client user $cli_user"
1371 if [ "$mapped_user" == "$mds_user" -a \
1372 $(((mode & 0300) == 0300)) -eq 1 ]; then
1376 if [ "$mapped_user" != "$mds_user" -a "$other" == "1" ]; then
1383 test_fops_admin_cli_i=""
1384 test_fops_chmod_dir() {
1385 local current_cli_i=$1
1387 local dir_to_chmod=$3
1388 local new_admin_cli_i=""
1390 # do we need to set up a new admin client?
1391 [ "$current_cli_i" == "0" ] && [ "$test_fops_admin_cli_i" != "1" ] &&
1393 [ "$current_cli_i" != "0" ] && [ "$test_fops_admin_cli_i" != "0" ] &&
1396 # if only one client, and non-admin, need to flip admin everytime
1397 if [ "$num_clients" == "1" ]; then
1398 test_fops_admin_client=$clients
1399 test_fops_admin_val=$(do_facet mgs $LCTL get_param -n \
1400 nodemap.c0.admin_nodemap)
1401 if [ "$test_fops_admin_val" != "1" ]; then
1402 do_facet mgs $LCTL nodemap_modify \
1406 wait_nm_sync c0 admin_nodemap
1408 elif [ "$new_admin_cli_i" != "" ]; then
1409 # restore admin val to old admin client
1410 if [ "$test_fops_admin_cli_i" != "" ] &&
1411 [ "$test_fops_admin_val" != "1" ]; then
1412 do_facet mgs $LCTL nodemap_modify \
1413 --name c${test_fops_admin_cli_i} \
1415 --value $test_fops_admin_val
1416 wait_nm_sync c${test_fops_admin_cli_i} admin_nodemap
1419 test_fops_admin_cli_i=$new_admin_cli_i
1420 test_fops_admin_client=${clients_arr[$new_admin_cli_i]}
1421 test_fops_admin_val=$(do_facet mgs $LCTL get_param -n \
1422 nodemap.c${new_admin_cli_i}.admin_nodemap)
1424 if [ "$test_fops_admin_val" != "1" ]; then
1425 do_facet mgs $LCTL nodemap_modify \
1426 --name c${new_admin_cli_i} \
1429 wait_nm_sync c${new_admin_cli_i} admin_nodemap
1433 do_node $test_fops_admin_client chmod $perm_bits $DIR/$tdir || return 1
1435 # remove admin for single client if originally non-admin
1436 if [ "$num_clients" == "1" ] && [ "$test_fops_admin_val" != "1" ]; then
1437 do_facet mgs $LCTL nodemap_modify --name c0 --property admin \
1439 wait_nm_sync c0 admin_nodemap
1447 local single_client="$2"
1448 local client_user_list=([0]="0 $((IDBASE+3)) $((IDBASE+4))"
1449 [1]="0 $((IDBASE+5)) $((IDBASE+6))")
1452 local perm_bit_list="0 3 $((0300)) $((0303))"
1453 # SLOW tests 000-007, 010-070, 100-700 (octal modes)
1454 [ "$SLOW" == "yes" ] &&
1455 perm_bit_list="0 $(seq 1 7) $(seq 8 8 63) $(seq 64 64 511) \
1458 # step through mds users. -1 means root
1459 for mds_i in -1 0 1 2; do
1460 local user=$((mds_i + IDBASE))
1464 [ "$mds_i" == "-1" ] && user=0
1466 echo mkdir -p $DIR/$tdir
1469 for client in $clients; do
1471 for u in ${client_user_list[$cli_i]}; do
1472 local run_u="do_node $client \
1473 $RUNAS_CMD -u$u -g$u -G$u"
1474 for perm_bits in $perm_bit_list; do
1475 local mode=$(printf %03o $perm_bits)
1477 key="$mapmode:$user:c$cli_i:$u:$mode"
1478 test_fops_chmod_dir $cli_i $mode \
1480 error cannot chmod $key
1481 do_create_delete "$run_u" "$key"
1485 test_fops_chmod_dir $cli_i 777 $DIR/$tdir ||
1486 error cannot chmod $key
1487 do_fops_quota_test "$run_u"
1490 cli_i=$((cli_i + 1))
1491 [ "$single_client" == "1" ] && break
1498 nodemap_version_check () {
1499 remote_mgs_nodsh && skip "remote MGS with nodsh" && return 1
1500 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
1501 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
1506 nodemap_test_setup() {
1508 local active_nodemap=1
1510 [ "$1" == "0" ] && active_nodemap=0
1512 do_nodes $(comma_list $(all_mdts_nodes)) \
1513 $LCTL set_param mdt.*.identity_upcall=NONE
1516 create_fops_nodemaps
1518 [[ $rc != 0 ]] && error "adding fops nodemaps failed $rc"
1520 do_facet mgs $LCTL nodemap_activate $active_nodemap
1523 do_facet mgs $LCTL nodemap_modify --name default \
1524 --property admin --value 1
1525 wait_nm_sync default admin_nodemap
1526 do_facet mgs $LCTL nodemap_modify --name default \
1527 --property trusted --value 1
1528 wait_nm_sync default trusted_nodemap
1531 nodemap_test_cleanup() {
1533 delete_fops_nodemaps
1535 [[ $rc != 0 ]] && error "removing fops nodemaps failed $rc"
1537 do_facet mgs $LCTL nodemap_modify --name default \
1538 --property admin --value 0
1539 wait_nm_sync default admin_nodemap
1540 do_facet mgs $LCTL nodemap_modify --name default \
1541 --property trusted --value 0
1542 wait_nm_sync default trusted_nodemap
1544 do_facet mgs $LCTL nodemap_activate 0
1545 wait_nm_sync active 0
1547 export SK_UNIQUE_NM=false
1551 nodemap_clients_admin_trusted() {
1555 for client in $clients; do
1556 do_facet mgs $LCTL nodemap_modify --name c0 \
1557 --property admin --value $admin
1558 do_facet mgs $LCTL nodemap_modify --name c0 \
1559 --property trusted --value $tr
1562 wait_nm_sync c$((i - 1)) admin_nodemap
1563 wait_nm_sync c$((i - 1)) trusted_nodemap
1567 nodemap_version_check || return 0
1568 nodemap_test_setup 0
1570 trap nodemap_test_cleanup EXIT
1572 nodemap_test_cleanup
1574 run_test 16 "test nodemap all_off fileops"
1578 [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.55) ]; then
1579 skip "Need MDS >= 2.11.55"
1582 nodemap_version_check || return 0
1585 trap nodemap_test_cleanup EXIT
1586 nodemap_clients_admin_trusted 0 1
1587 test_fops trusted_noadmin 1
1588 nodemap_test_cleanup
1590 run_test 17 "test nodemap trusted_noadmin fileops"
1594 [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.55) ]; then
1595 skip "Need MDS >= 2.11.55"
1598 nodemap_version_check || return 0
1601 trap nodemap_test_cleanup EXIT
1602 nodemap_clients_admin_trusted 0 0
1603 test_fops mapped_noadmin 1
1604 nodemap_test_cleanup
1606 run_test 18 "test nodemap mapped_noadmin fileops"
1610 [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.55) ]; then
1611 skip "Need MDS >= 2.11.55"
1614 nodemap_version_check || return 0
1617 trap nodemap_test_cleanup EXIT
1618 nodemap_clients_admin_trusted 1 1
1619 test_fops trusted_admin 1
1620 nodemap_test_cleanup
1622 run_test 19 "test nodemap trusted_admin fileops"
1626 [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.55) ]; then
1627 skip "Need MDS >= 2.11.55"
1630 nodemap_version_check || return 0
1633 trap nodemap_test_cleanup EXIT
1634 nodemap_clients_admin_trusted 1 0
1635 test_fops mapped_admin 1
1636 nodemap_test_cleanup
1638 run_test 20 "test nodemap mapped_admin fileops"
1642 [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.55) ]; then
1643 skip "Need MDS >= 2.11.55"
1646 nodemap_version_check || return 0
1649 trap nodemap_test_cleanup EXIT
1652 for client in $clients; do
1653 do_facet mgs $LCTL nodemap_modify --name c${i} \
1654 --property admin --value 0
1655 do_facet mgs $LCTL nodemap_modify --name c${i} \
1656 --property trusted --value $x
1660 wait_nm_sync c$((i - 1)) trusted_nodemap
1662 test_fops mapped_trusted_noadmin
1663 nodemap_test_cleanup
1665 run_test 21 "test nodemap mapped_trusted_noadmin fileops"
1669 [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.55) ]; then
1670 skip "Need MDS >= 2.11.55"
1673 nodemap_version_check || return 0
1676 trap nodemap_test_cleanup EXIT
1679 for client in $clients; do
1680 do_facet mgs $LCTL nodemap_modify --name c${i} \
1681 --property admin --value 1
1682 do_facet mgs $LCTL nodemap_modify --name c${i} \
1683 --property trusted --value $x
1687 wait_nm_sync c$((i - 1)) trusted_nodemap
1689 test_fops mapped_trusted_admin
1690 nodemap_test_cleanup
1692 run_test 22 "test nodemap mapped_trusted_admin fileops"
1694 # acl test directory needs to be initialized on a privileged client
1695 nodemap_acl_test_setup() {
1696 local admin=$(do_facet mgs $LCTL get_param -n \
1697 nodemap.c0.admin_nodemap)
1698 local trust=$(do_facet mgs $LCTL get_param -n \
1699 nodemap.c0.trusted_nodemap)
1701 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
1702 do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1
1704 wait_nm_sync c0 admin_nodemap
1705 wait_nm_sync c0 trusted_nodemap
1707 do_node ${clients_arr[0]} rm -rf $DIR/$tdir
1709 do_node ${clients_arr[0]} chmod a+rwx $DIR/$tdir ||
1710 error unable to chmod a+rwx test dir $DIR/$tdir
1712 do_facet mgs $LCTL nodemap_modify --name c0 \
1713 --property admin --value $admin
1714 do_facet mgs $LCTL nodemap_modify --name c0 \
1715 --property trusted --value $trust
1717 wait_nm_sync c0 trusted_nodemap
1720 # returns 0 if the number of ACLs does not change on the second (mapped) client
1721 # after being set on the first client
1722 nodemap_acl_test() {
1724 local set_client="$2"
1725 local get_client="$3"
1726 local check_setfacl="$4"
1727 local setfacl_error=0
1728 local testfile=$DIR/$tdir/$tfile
1729 local RUNAS_USER="$RUNAS_CMD -u $user"
1731 local acl_count_post=0
1733 nodemap_acl_test_setup
1736 do_node $set_client $RUNAS_USER touch $testfile
1738 # ACL masks aren't filtered by nodemap code, so we ignore them
1739 acl_count=$(do_node $get_client getfacl $testfile | grep -v mask |
1741 do_node $set_client $RUNAS_USER setfacl -m $user:rwx $testfile ||
1744 # if check setfacl is set to 1, then it's supposed to error
1745 if [ "$check_setfacl" == "1" ]; then
1746 [ "$setfacl_error" != "1" ] && return 1
1749 [ "$setfacl_error" == "1" ] && echo "WARNING: unable to setfacl"
1751 acl_count_post=$(do_node $get_client getfacl $testfile | grep -v mask |
1753 [ $acl_count -eq $acl_count_post ] && return 0
1758 [ $num_clients -lt 2 ] && skip "Need 2 clients at least" && return
1759 nodemap_version_check || return 0
1762 trap nodemap_test_cleanup EXIT
1763 # 1 trusted cluster, 1 mapped cluster
1764 local unmapped_fs=$((IDBASE+0))
1765 local unmapped_c1=$((IDBASE+5))
1766 local mapped_fs=$((IDBASE+2))
1767 local mapped_c0=$((IDBASE+4))
1768 local mapped_c1=$((IDBASE+6))
1770 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
1771 do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1
1773 do_facet mgs $LCTL nodemap_modify --name c1 --property admin --value 0
1774 do_facet mgs $LCTL nodemap_modify --name c1 --property trusted --value 0
1776 wait_nm_sync c1 trusted_nodemap
1778 # setfacl on trusted cluster to unmapped user, verify it's not seen
1779 nodemap_acl_test $unmapped_fs ${clients_arr[0]} ${clients_arr[1]} ||
1780 error "acl count (1)"
1782 # setfacl on trusted cluster to mapped user, verify it's seen
1783 nodemap_acl_test $mapped_fs ${clients_arr[0]} ${clients_arr[1]} &&
1784 error "acl count (2)"
1786 # setfacl on mapped cluster to mapped user, verify it's seen
1787 nodemap_acl_test $mapped_c1 ${clients_arr[1]} ${clients_arr[0]} &&
1788 error "acl count (3)"
1790 # setfacl on mapped cluster to unmapped user, verify error
1791 nodemap_acl_test $unmapped_fs ${clients_arr[1]} ${clients_arr[0]} 1 ||
1792 error "acl count (4)"
1795 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 0
1796 do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 0
1798 wait_nm_sync c0 trusted_nodemap
1800 # setfacl to mapped user on c1, also mapped to c0, verify it's seen
1801 nodemap_acl_test $mapped_c1 ${clients_arr[1]} ${clients_arr[0]} &&
1802 error "acl count (5)"
1804 # setfacl to mapped user on c1, not mapped to c0, verify not seen
1805 nodemap_acl_test $unmapped_c1 ${clients_arr[1]} ${clients_arr[0]} ||
1806 error "acl count (6)"
1808 nodemap_test_cleanup
1810 run_test 23a "test mapped regular ACLs"
1812 test_23b() { #LU-9929
1813 [ $num_clients -lt 2 ] && skip "Need 2 clients at least" && return
1814 [ $(lustre_version_code mgs) -lt $(version_code 2.10.53) ] &&
1815 skip "Need MGS >= 2.10.53" && return
1817 export SK_UNIQUE_NM=true
1819 trap nodemap_test_cleanup EXIT
1821 local testdir=$DIR/$tdir
1822 local fs_id=$((IDBASE+10))
1827 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
1828 wait_nm_sync c0 admin_nodemap
1829 do_facet mgs $LCTL nodemap_modify --name c1 --property admin --value 1
1830 wait_nm_sync c1 admin_nodemap
1831 do_facet mgs $LCTL nodemap_modify --name c1 --property trusted --value 1
1832 wait_nm_sync c1 trusted_nodemap
1834 # Add idmap $ID0:$fs_id (500:60010)
1835 do_facet mgs $LCTL nodemap_add_idmap --name c0 --idtype gid \
1836 --idmap $ID0:$fs_id ||
1837 error "add idmap $ID0:$fs_id to nodemap c0 failed"
1838 wait_nm_sync c0 idmap
1840 # set/getfacl default acl on client 1 (unmapped gid=500)
1841 do_node ${clients_arr[0]} rm -rf $testdir
1842 do_node ${clients_arr[0]} mkdir -p $testdir
1843 # Here, USER0=$(getent passwd | grep :$ID0:$ID0: | cut -d: -f1)
1844 do_node ${clients_arr[0]} setfacl -R -d -m group:$USER0:rwx $testdir ||
1845 error "setfacl $testdir on ${clients_arr[0]} failed"
1846 unmapped_id=$(do_node ${clients_arr[0]} getfacl $testdir |
1847 grep -E "default:group:.*:rwx" | awk -F: '{print $3}')
1848 [ "$unmapped_id" = "$USER0" ] ||
1849 error "gid=$ID0 was not unmapped correctly on ${clients_arr[0]}"
1851 # getfacl default acl on client 2 (mapped gid=60010)
1852 mapped_id=$(do_node ${clients_arr[1]} getfacl $testdir |
1853 grep -E "default:group:.*:rwx" | awk -F: '{print $3}')
1854 fs_user=$(do_node ${clients_arr[1]} getent passwd |
1855 grep :$fs_id:$fs_id: | cut -d: -f1)
1856 [ -z "$fs_user" ] && fs_user=$fs_id
1857 [ $mapped_id -eq $fs_id -o "$mapped_id" = "$fs_user" ] ||
1858 error "Should return gid=$fs_id or $fs_user on client2"
1861 nodemap_test_cleanup
1862 export SK_UNIQUE_NM=false
1864 run_test 23b "test mapped default ACLs"
1869 trap nodemap_test_cleanup EXIT
1870 do_nodes $(comma_list $(all_server_nodes)) $LCTL get_param -R nodemap
1872 nodemap_test_cleanup
1874 run_test 24 "check nodemap proc files for LBUGs and Oopses"
1877 local tmpfile=$(mktemp)
1878 local tmpfile2=$(mktemp)
1879 local tmpfile3=$(mktemp)
1880 local tmpfile4=$(mktemp)
1884 nodemap_version_check || return 0
1886 # stop clients for this test
1887 zconf_umount_clients $CLIENTS $MOUNT ||
1888 error "unable to umount clients $CLIENTS"
1890 export SK_UNIQUE_NM=true
1893 # enable trusted/admin for setquota call in cleanup_and_setup_lustre()
1895 for client in $clients; do
1896 do_facet mgs $LCTL nodemap_modify --name c${i} \
1897 --property admin --value 1
1898 do_facet mgs $LCTL nodemap_modify --name c${i} \
1899 --property trusted --value 1
1902 wait_nm_sync c$((i - 1)) trusted_nodemap
1904 trap nodemap_test_cleanup EXIT
1906 # create a new, empty nodemap, and add fileset info to it
1907 do_facet mgs $LCTL nodemap_add test25 ||
1908 error "unable to create nodemap $testname"
1909 do_facet mgs $LCTL set_param -P nodemap.$testname.fileset=/$subdir ||
1910 error "unable to add fileset info to nodemap test25"
1912 wait_nm_sync test25 id
1914 do_facet mgs $LCTL nodemap_info > $tmpfile
1915 do_facet mds $LCTL nodemap_info > $tmpfile2
1917 if ! $SHARED_KEY; then
1918 # will conflict with SK's nodemaps
1919 cleanup_and_setup_lustre
1921 # stop clients for this test
1922 zconf_umount_clients $CLIENTS $MOUNT ||
1923 error "unable to umount clients $CLIENTS"
1925 do_facet mgs $LCTL nodemap_info > $tmpfile3
1926 diff -q $tmpfile3 $tmpfile >& /dev/null ||
1927 error "nodemap_info diff on MGS after remount"
1929 do_facet mds $LCTL nodemap_info > $tmpfile4
1930 diff -q $tmpfile4 $tmpfile2 >& /dev/null ||
1931 error "nodemap_info diff on MDS after remount"
1934 do_facet mgs $LCTL nodemap_del test25 ||
1935 error "cannot delete nodemap test25 from config"
1936 nodemap_test_cleanup
1937 # restart clients previously stopped
1938 zconf_mount_clients $CLIENTS $MOUNT ||
1939 error "unable to mount clients $CLIENTS"
1941 rm -f $tmpfile $tmpfile2
1942 export SK_UNIQUE_NM=false
1944 run_test 25 "test save and reload nodemap config"
1947 nodemap_version_check || return 0
1951 do_facet mgs "seq -f 'c%g' $large_i | xargs -n1 $LCTL nodemap_add"
1952 wait_nm_sync c$large_i admin_nodemap
1954 do_facet mgs "seq -f 'c%g' $large_i | xargs -n1 $LCTL nodemap_del"
1955 wait_nm_sync c$large_i admin_nodemap
1957 run_test 26 "test transferring very large nodemap"
1959 nodemap_exercise_fileset() {
1964 if [ "$nm" == "default" ]; then
1965 do_facet mgs $LCTL nodemap_activate 1
1970 if $SHARED_KEY; then
1971 export SK_UNIQUE_NM=true
1973 # will conflict with SK's nodemaps
1974 trap "fileset_test_cleanup $nm" EXIT
1976 fileset_test_setup "$nm"
1978 # add fileset info to $nm nodemap
1979 if ! combined_mgs_mds; then
1980 do_facet mgs $LCTL set_param nodemap.${nm}.fileset=/$subdir ||
1981 error "unable to add fileset info to $nm nodemap on MGS"
1983 do_facet mgs $LCTL set_param -P nodemap.${nm}.fileset=/$subdir ||
1984 error "unable to add fileset info to $nm nodemap for servers"
1985 wait_nm_sync $nm fileset "nodemap.${nm}.fileset=/$subdir"
1988 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
1989 error "unable to umount client ${clients_arr[0]}"
1990 # set some generic fileset to trigger SSK code
1992 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
1993 error "unable to remount client ${clients_arr[0]}"
1996 # test mount point content
1997 do_node ${clients_arr[0]} test -f $MOUNT/this_is_$subdir ||
1998 error "fileset not taken into account"
2000 # re-mount client with sub-subdir
2001 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
2002 error "unable to umount client ${clients_arr[0]}"
2003 export FILESET=/$subsubdir
2004 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
2005 error "unable to remount client ${clients_arr[0]}"
2008 # test mount point content
2009 do_node ${clients_arr[0]} test -f $MOUNT/this_is_$subsubdir ||
2010 error "subdir of fileset not taken into account"
2012 # remove fileset info from nodemap
2013 do_facet mgs $LCTL nodemap_set_fileset --name $nm --fileset clear ||
2014 error "unable to delete fileset info on $nm nodemap"
2015 wait_update_facet mgs "$LCTL get_param nodemap.${nm}.fileset" \
2016 "nodemap.${nm}.fileset=" ||
2017 error "fileset info still not cleared on $nm nodemap"
2018 do_facet mgs $LCTL set_param -P nodemap.${nm}.fileset=clear ||
2019 error "unable to reset fileset info on $nm nodemap"
2020 wait_nm_sync $nm fileset "nodemap.${nm}.fileset="
2023 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
2024 error "unable to umount client ${clients_arr[0]}"
2025 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
2026 error "unable to remount client ${clients_arr[0]}"
2028 # test mount point content
2029 if ! $(do_node ${clients_arr[0]} test -d $MOUNT/$subdir); then
2031 error "fileset not cleared on $nm nodemap"
2034 # back to non-nodemap setup
2035 if $SHARED_KEY; then
2036 export SK_UNIQUE_NM=false
2037 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
2038 error "unable to umount client ${clients_arr[0]}"
2040 fileset_test_cleanup "$nm"
2041 if [ "$nm" == "default" ]; then
2042 do_facet mgs $LCTL nodemap_activate 0
2043 wait_nm_sync active 0
2045 export SK_UNIQUE_NM=false
2047 nodemap_test_cleanup
2049 if $SHARED_KEY; then
2050 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
2051 error "unable to remount client ${clients_arr[0]}"
2056 [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.50) ] &&
2057 skip "Need MDS >= 2.11.50" && return
2059 for nm in "default" "c0"; do
2060 local subdir="subdir_${nm}"
2061 local subsubdir="subsubdir_${nm}"
2063 if [ "$nm" == "default" ] && [ "$SHARED_KEY" == "true" ]; then
2064 echo "Skipping nodemap $nm with SHARED_KEY";
2068 echo "Exercising fileset for nodemap $nm"
2069 nodemap_exercise_fileset "$nm"
2072 run_test 27a "test fileset in various nodemaps"
2074 test_27b() { #LU-10703
2075 [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.50) ] &&
2076 skip "Need MDS >= 2.11.50" && return
2077 [[ $MDSCOUNT -lt 2 ]] && skip "needs >= 2 MDTs" && return
2080 trap nodemap_test_cleanup EXIT
2082 # Add the nodemaps and set their filesets
2083 for i in $(seq 1 $MDSCOUNT); do
2084 do_facet mgs $LCTL nodemap_del nm$i 2>/dev/null
2085 do_facet mgs $LCTL nodemap_add nm$i ||
2086 error "add nodemap nm$i failed"
2087 wait_nm_sync nm$i "" "" "-N"
2089 if ! combined_mgs_mds; then
2091 $LCTL set_param nodemap.nm$i.fileset=/dir$i ||
2092 error "set nm$i.fileset=/dir$i failed on MGS"
2094 do_facet mgs $LCTL set_param -P nodemap.nm$i.fileset=/dir$i ||
2095 error "set nm$i.fileset=/dir$i failed on servers"
2096 wait_nm_sync nm$i fileset "nodemap.nm$i.fileset=/dir$i"
2099 # Check if all the filesets are correct
2100 for i in $(seq 1 $MDSCOUNT); do
2101 fileset=$(do_facet mds$i \
2102 $LCTL get_param -n nodemap.nm$i.fileset)
2103 [ "$fileset" = "/dir$i" ] ||
2104 error "nm$i.fileset $fileset != /dir$i on mds$i"
2105 do_facet mgs $LCTL nodemap_del nm$i ||
2106 error "delete nodemap nm$i failed"
2109 nodemap_test_cleanup
2111 run_test 27b "The new nodemap won't clear the old nodemap's fileset"
2114 if ! $SHARED_KEY; then
2115 skip "need shared key feature for this test" && return
2117 mkdir -p $DIR/$tdir || error "mkdir failed"
2118 touch $DIR/$tdir/$tdir.out || error "touch failed"
2119 if [ ! -f $DIR/$tdir/$tdir.out ]; then
2120 error "read before rotation failed"
2122 # store top key identity to ensure rotation has occurred
2123 SK_IDENTITY_OLD=$(lctl get_param *.*.*srpc* | grep "expire" |
2124 head -1 | awk '{print $15}' | cut -c1-8)
2125 do_facet $SINGLEMDS lfs flushctx ||
2126 error "could not run flushctx on $SINGLEMDS"
2128 lfs flushctx || error "could not run flushctx on client"
2130 # verify new key is in place
2131 SK_IDENTITY_NEW=$(lctl get_param *.*.*srpc* | grep "expire" |
2132 head -1 | awk '{print $15}' | cut -c1-8)
2133 if [ $SK_IDENTITY_OLD == $SK_IDENTITY_NEW ]; then
2134 error "key did not rotate correctly"
2136 if [ ! -f $DIR/$tdir/$tdir.out ]; then
2137 error "read after rotation failed"
2140 run_test 28 "check shared key rotation method"
2143 if ! $SHARED_KEY; then
2144 skip "need shared key feature for this test" && return
2146 if [ $SK_FLAVOR != "ski" ] && [ $SK_FLAVOR != "skpi" ]; then
2147 skip "test only valid if integrity is active"
2150 mkdir $DIR/$tdir || error "mkdir"
2151 touch $DIR/$tdir/$tfile || error "touch"
2152 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
2153 error "unable to umount clients"
2154 keyctl show | awk '/lustre/ { print $1 }' |
2155 xargs -IX keyctl unlink X
2156 OLD_SK_PATH=$SK_PATH
2157 export SK_PATH=/dev/null
2158 if zconf_mount_clients ${clients_arr[0]} $MOUNT; then
2159 export SK_PATH=$OLD_SK_PATH
2160 if [ -e $DIR/$tdir/$tfile ]; then
2161 error "able to mount and read without key"
2163 error "able to mount without key"
2166 export SK_PATH=$OLD_SK_PATH
2167 keyctl show | awk '/lustre/ { print $1 }' |
2168 xargs -IX keyctl unlink X
2171 run_test 29 "check for missing shared key"
2174 if ! $SHARED_KEY; then
2175 skip "need shared key feature for this test" && return
2177 if [ $SK_FLAVOR != "ski" ] && [ $SK_FLAVOR != "skpi" ]; then
2178 skip "test only valid if integrity is active"
2180 mkdir -p $DIR/$tdir || error "mkdir failed"
2181 touch $DIR/$tdir/$tdir.out || error "touch failed"
2182 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
2183 error "unable to umount clients"
2184 # unload keys from ring
2185 keyctl show | awk '/lustre/ { print $1 }' |
2186 xargs -IX keyctl unlink X
2187 # invalidate the key with bogus filesystem name
2188 lgss_sk -w $SK_PATH/$FSNAME-bogus.key -f $FSNAME.bogus \
2189 -t client -d /dev/urandom || error "lgss_sk failed (1)"
2190 do_facet $SINGLEMDS lfs flushctx || error "could not run flushctx"
2191 OLD_SK_PATH=$SK_PATH
2192 export SK_PATH=$SK_PATH/$FSNAME-bogus.key
2193 if zconf_mount_clients ${clients_arr[0]} $MOUNT; then
2194 SK_PATH=$OLD_SK_PATH
2195 if [ -a $DIR/$tdir/$tdir.out ]; then
2196 error "mount and read file with invalid key"
2198 error "mount with invalid key"
2201 SK_PATH=$OLD_SK_PATH
2202 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
2203 error "unable to umount clients"
2205 run_test 30 "check for invalid shared key"
2209 zconf_umount $HOSTNAME $MOUNT || error "unable to umount client"
2211 # remove ${NETTYPE}999 network on all nodes
2212 do_nodes $(comma_list $(all_nodes)) \
2213 "$LNETCTL net del --net ${NETTYPE}999 && \
2214 $LNETCTL lnet unconfigure 2>/dev/null || true"
2216 # necessary to do writeconf in order to de-register
2217 # @${NETTYPE}999 nid for targets
2219 export KEEP_ZPOOL="true"
2221 export SK_MOUNTED=false
2224 export KEEP_ZPOOL="$KZPOOL"
2228 local nid=$(lctl list_nids | grep ${NETTYPE} | head -n1)
2229 local addr=${nid%@*}
2232 export LNETCTL=$(which lnetctl 2> /dev/null)
2234 [ -z "$LNETCTL" ] && skip "without lnetctl support." && return
2235 local_mode && skip "in local mode."
2237 stack_trap cleanup_31 EXIT
2240 if [ "$MOUNT_2" ] && $(grep -q $MOUNT2' ' /proc/mounts); then
2241 umount_client $MOUNT2 || error "umount $MOUNT2 failed"
2243 if $(grep -q $MOUNT' ' /proc/mounts); then
2244 umount_client $MOUNT || error "umount $MOUNT failed"
2247 # check exports on servers are empty for client
2248 do_facet mgs "lctl get_param -n *.MGS*.exports.'$nid'.uuid 2>/dev/null |
2249 grep -q -" && error "export on MGS should be empty"
2250 do_nodes $(comma_list $(mdts_nodes) $(osts_nodes)) \
2251 "lctl get_param -n *.${FSNAME}*.exports.'$nid'.uuid \
2252 2>/dev/null | grep -q -" &&
2253 error "export on servers should be empty"
2255 # add network ${NETTYPE}999 on all nodes
2256 do_nodes $(comma_list $(all_nodes)) \
2257 "$LNETCTL lnet configure && $LNETCTL net add --if \
2258 \$($LNETCTL net show --net $net | awk 'BEGIN{inf=0} \
2259 {if (inf==1) print \$2; fi; inf=0} /interfaces/{inf=1}') \
2260 --net ${NETTYPE}999" ||
2261 error "unable to configure NID ${NETTYPE}999"
2263 # necessary to do writeconf in order to register
2264 # new @${NETTYPE}999 nid for targets
2266 export KEEP_ZPOOL="true"
2268 export SK_MOUNTED=false
2270 setupall server_only || echo 1
2271 export KEEP_ZPOOL="$KZPOOL"
2274 local mgsnid_orig=$MGSNID
2275 # compute new MGSNID
2276 MGSNID=$(do_facet mgs "$LCTL list_nids | grep ${NETTYPE}999")
2278 # on client, turn LNet Dynamic Discovery on
2279 lnetctl set discovery 1
2281 # mount client with -o network=${NETTYPE}999 option:
2282 # should fail because of LNet Dynamic Discovery
2283 mount_client $MOUNT ${MOUNT_OPTS},network=${NETTYPE}999 &&
2284 error "client mount with '-o network' option should be refused"
2286 # on client, reconfigure LNet and turn LNet Dynamic Discovery off
2287 $LNETCTL net del --net ${NETTYPE}999 && lnetctl lnet unconfigure
2290 lnetctl set discovery 0
2292 $LNETCTL lnet configure && $LNETCTL net add --if \
2293 $($LNETCTL net show --net $net | awk 'BEGIN{inf=0} \
2294 {if (inf==1) print $2; fi; inf=0} /interfaces/{inf=1}') \
2295 --net ${NETTYPE}999 ||
2296 error "unable to configure NID ${NETTYPE}999 on client"
2298 # mount client with -o network=${NETTYPE}999 option
2299 mount_client $MOUNT ${MOUNT_OPTS},network=${NETTYPE}999 ||
2300 error "unable to remount client"
2305 # check export on MGS
2306 do_facet mgs "lctl get_param -n *.MGS*.exports.'$nid'.uuid 2>/dev/null |
2308 [ $? -ne 0 ] || error "export for $nid on MGS should not exist"
2311 "lctl get_param -n *.MGS*.exports.'${addr}@${NETTYPE}999'.uuid \
2312 2>/dev/null | grep -q -"
2314 error "export for ${addr}@${NETTYPE}999 on MGS should exist"
2316 # check {mdc,osc} imports
2317 lctl get_param mdc.${FSNAME}-*.import | grep current_connection |
2318 grep -q ${NETTYPE}999
2320 error "import for mdc should use ${addr}@${NETTYPE}999"
2321 lctl get_param osc.${FSNAME}-*.import | grep current_connection |
2322 grep -q ${NETTYPE}999
2324 error "import for osc should use ${addr}@${NETTYPE}999"
2326 run_test 31 "client mount option '-o network'"
2330 zconf_umount_clients ${clients_arr[0]} $MOUNT
2332 # disable sk flavor enforcement on MGS
2333 set_rule _mgs any any null
2335 # stop gss daemon on MGS
2336 if ! combined_mgs_mds ; then
2337 send_sigint $mgs_HOST lsvcgssd
2341 MOUNT_OPTS=$(add_sk_mntflag $MOUNT_OPTS)
2344 restore_to_default_flavor
2348 if ! $SHARED_KEY; then
2349 skip "need shared key feature for this test"
2352 stack_trap cleanup_32 EXIT
2354 # restore to default null flavor
2355 save_flvr=$SK_FLAVOR
2357 restore_to_default_flavor || error "cannot set null flavor"
2358 SK_FLAVOR=$save_flvr
2361 if [ "$MOUNT_2" ] && $(grep -q $MOUNT2' ' /proc/mounts); then
2362 umount_client $MOUNT2 || error "umount $MOUNT2 failed"
2364 if $(grep -q $MOUNT' ' /proc/mounts); then
2365 umount_client $MOUNT || error "umount $MOUNT failed"
2368 # start gss daemon on MGS
2369 if combined_mgs_mds ; then
2370 send_sigint $mds_HOST lsvcgssd
2372 start_gss_daemons $mgs_HOST "$LSVCGSSD -vvv -s -g"
2374 # add mgs key type and MGS NIDs in key on MGS
2375 do_nodes $mgs_HOST "lgss_sk -t mgs,server -g $MGSNID -m \
2376 $SK_PATH/$FSNAME.key >/dev/null 2>&1" ||
2377 error "could not modify keyfile on MGS"
2379 # load modified key file on MGS
2380 do_nodes $mgs_HOST "lgss_sk -l $SK_PATH/$FSNAME.key >/dev/null 2>&1" ||
2381 error "could not load keyfile on MGS"
2383 # add MGS NIDs in key on client
2384 do_nodes ${clients_arr[0]} "lgss_sk -g $MGSNID -m \
2385 $SK_PATH/$FSNAME.key >/dev/null 2>&1" ||
2386 error "could not modify keyfile on MGS"
2388 # set perms for per-nodemap keys else permission denied
2389 do_nodes $(comma_list $(all_nodes)) \
2390 "keyctl show | grep lustre | cut -c1-11 |
2392 xargs -IX keyctl setperm X 0x3f3f3f3f"
2394 # re-mount client with mgssec=skn
2395 save_opts=$MOUNT_OPTS
2396 if [ -z "$MOUNT_OPTS" ]; then
2397 MOUNT_OPTS="-o mgssec=skn"
2399 MOUNT_OPTS="$MOUNT_OPTS,mgssec=skn"
2401 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
2402 error "mount ${clients_arr[0]} with mgssec=skn failed"
2403 MOUNT_OPTS=$save_opts
2406 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
2407 error "umount ${clients_arr[0]} failed"
2409 # enforce ska flavor on MGS
2410 set_rule _mgs any any ska
2412 # re-mount client without mgssec
2413 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS &&
2414 error "mount ${clients_arr[0]} without mgssec should fail"
2416 # re-mount client with mgssec=skn
2417 save_opts=$MOUNT_OPTS
2418 if [ -z "$MOUNT_OPTS" ]; then
2419 MOUNT_OPTS="-o mgssec=skn"
2421 MOUNT_OPTS="$MOUNT_OPTS,mgssec=skn"
2423 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS &&
2424 error "mount ${clients_arr[0]} with mgssec=skn should fail"
2425 MOUNT_OPTS=$save_opts
2427 # re-mount client with mgssec=ska
2428 save_opts=$MOUNT_OPTS
2429 if [ -z "$MOUNT_OPTS" ]; then
2430 MOUNT_OPTS="-o mgssec=ska"
2432 MOUNT_OPTS="$MOUNT_OPTS,mgssec=ska"
2434 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
2435 error "mount ${clients_arr[0]} with mgssec=ska failed"
2436 MOUNT_OPTS=$save_opts
2440 run_test 32 "check for mgssec"
2443 # disable sk flavor enforcement
2444 set_rule $FSNAME any cli2mdt null
2445 wait_flavor cli2mdt null
2448 zconf_umount_clients ${clients_arr[0]} $MOUNT
2450 # stop gss daemon on MGS
2451 if ! combined_mgs_mds ; then
2452 send_sigint $mgs_HOST lsvcgssd
2456 MOUNT_OPTS=$(add_sk_mntflag $MOUNT_OPTS)
2459 restore_to_default_flavor
2463 if ! $SHARED_KEY; then
2464 skip "need shared key feature for this test"
2467 stack_trap cleanup_33 EXIT
2469 # restore to default null flavor
2470 save_flvr=$SK_FLAVOR
2472 restore_to_default_flavor || error "cannot set null flavor"
2473 SK_FLAVOR=$save_flvr
2476 if [ "$MOUNT_2" ] && $(grep -q $MOUNT2' ' /proc/mounts); then
2477 umount_client $MOUNT2 || error "umount $MOUNT2 failed"
2479 if $(grep -q $MOUNT' ' /proc/mounts); then
2480 umount_client $MOUNT || error "umount $MOUNT failed"
2483 # start gss daemon on MGS
2484 if combined_mgs_mds ; then
2485 send_sigint $mds_HOST lsvcgssd
2487 start_gss_daemons $mgs_HOST "$LSVCGSSD -vvv -s -g"
2489 # add mgs key type and MGS NIDs in key on MGS
2490 do_nodes $mgs_HOST "lgss_sk -t mgs,server -g $MGSNID -m \
2491 $SK_PATH/$FSNAME.key >/dev/null 2>&1" ||
2492 error "could not modify keyfile on MGS"
2494 # load modified key file on MGS
2495 do_nodes $mgs_HOST "lgss_sk -l $SK_PATH/$FSNAME.key >/dev/null 2>&1" ||
2496 error "could not load keyfile on MGS"
2498 # add MGS NIDs in key on client
2499 do_nodes ${clients_arr[0]} "lgss_sk -g $MGSNID -m \
2500 $SK_PATH/$FSNAME.key >/dev/null 2>&1" ||
2501 error "could not modify keyfile on MGS"
2503 # set perms for per-nodemap keys else permission denied
2504 do_nodes $(comma_list $(all_nodes)) \
2505 "keyctl show | grep lustre | cut -c1-11 |
2507 xargs -IX keyctl setperm X 0x3f3f3f3f"
2509 # re-mount client with mgssec=skn
2510 save_opts=$MOUNT_OPTS
2511 if [ -z "$MOUNT_OPTS" ]; then
2512 MOUNT_OPTS="-o mgssec=skn"
2514 MOUNT_OPTS="$MOUNT_OPTS,mgssec=skn"
2516 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
2517 error "mount ${clients_arr[0]} with mgssec=skn failed"
2518 MOUNT_OPTS=$save_opts
2520 # enforce ska flavor for cli2mdt
2521 set_rule $FSNAME any cli2mdt ska
2522 wait_flavor cli2mdt ska
2524 # check error message
2525 $LCTL dk | grep "faked source" &&
2526 error "MGS connection srpc flags incorrect"
2530 run_test 33 "correct srpc flags for MGS connection"
2532 log "cleanup: ======================================================"
2535 ## nodemap deactivated
2536 do_facet mgs $LCTL nodemap_activate 0
2538 for num in $(seq $MDSCOUNT); do
2539 if [ "${identity_old[$num]}" = 1 ]; then
2540 switch_identity $num false || identity_old[$num]=$?
2544 $RUNAS_CMD -u $ID0 ls $DIR
2545 $RUNAS_CMD -u $ID1 ls $DIR
2550 check_and_cleanup_lustre