3 # Run select tests by setting ONLY, or as arguments to the script.
4 # Skip specific tests by setting EXCEPT.
10 # bug number for skipped test:
11 ALWAYS_EXCEPT=" $SANITY_SEC_EXCEPT"
12 # UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
15 export PATH=$PWD/$SRCDIR:$SRCDIR:$PWD/$SRCDIR/../utils:$PATH:/sbin
16 export NAME=${NAME:-local}
18 LUSTRE=${LUSTRE:-$(dirname $0)/..}
19 . $LUSTRE/tests/test-framework.sh
21 . ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
25 NODEMAP_TESTS=$(seq 7 26)
27 if ! check_versions; then
28 echo "It is NOT necessary to test nodemap under interoperation mode"
29 EXCEPT="$EXCEPT $NODEMAP_TESTS"
32 [ "$SLOW" = "no" ] && EXCEPT_SLOW="26"
34 [ "$ALWAYS_EXCEPT$EXCEPT$EXCEPT_SLOW" ] &&
35 echo "Skipping tests: $ALWAYS_EXCEPT $EXCEPT $EXCEPT_SLOW"
37 RUNAS_CMD=${RUNAS_CMD:-runas}
39 WTL=${WTL:-"$LUSTRE/tests/write_time_limit"}
42 PERM_CONF=$CONFDIR/perm.conf
44 HOSTNAME_CHECKSUM=$(hostname | sum | awk '{ print $1 }')
45 SUBNET_CHECKSUM=$(expr $HOSTNAME_CHECKSUM % 250 + 1)
47 require_dsh_mds || exit 0
48 require_dsh_ost || exit 0
50 clients=${CLIENTS//,/ }
51 num_clients=$(get_node_count ${clients})
52 clients_arr=($clients)
56 USER0=$(getent passwd | grep :$ID0:$ID0: | cut -d: -f1)
57 USER1=$(getent passwd | grep :$ID1:$ID1: | cut -d: -f1)
61 NODEMAP_IPADDR_LIST="1 10 64 128 200 250"
63 NODEMAP_MAX_ID=$((ID0 + NODEMAP_ID_COUNT))
66 skip "need to add user0 ($ID0:$ID0)" && exit 0
69 skip "need to add user1 ($ID1:$ID1)" && exit 0
71 IDBASE=${IDBASE:-60000}
73 # changes to mappings must be reflected in test 23
75 [0]="$((IDBASE+3)):$((IDBASE+0)) $((IDBASE+4)):$((IDBASE+2))"
76 [1]="$((IDBASE+5)):$((IDBASE+1)) $((IDBASE+6)):$((IDBASE+2))"
79 check_and_setup_lustre
84 GSS_REF=$(lsmod | grep ^ptlrpc_gss | awk '{print $3}')
85 if [ ! -z "$GSS_REF" -a "$GSS_REF" != "0" ]; then
87 echo "with GSS support"
90 echo "without GSS support"
93 MDT=$(do_facet $SINGLEMDS lctl get_param -N "mdt.\*MDT0000" |
95 [ -z "$MDT" ] && error "fail to get MDT device" && exit 1
96 do_facet $SINGLEMDS "mkdir -p $CONFDIR"
97 IDENTITY_FLUSH=mdt.$MDT.identity_flush
98 IDENTITY_UPCALL=mdt.$MDT.identity_upcall
109 if ! $RUNAS_CMD -u $user krb5_login.sh; then
110 error "$user login kerberos failed."
114 if ! $RUNAS_CMD -u $user -g $group ls $DIR > /dev/null 2>&1; then
115 $RUNAS_CMD -u $user lfs flushctx -k
116 $RUNAS_CMD -u $user krb5_login.sh
117 if ! $RUNAS_CMD -u$user -g$group ls $DIR > /dev/null 2>&1; then
118 error "init $user $group failed."
124 declare -a identity_old
127 for num in $(seq $MDSCOUNT); do
128 switch_identity $num true || identity_old[$num]=$?
131 if ! $RUNAS_CMD -u $ID0 ls $DIR > /dev/null 2>&1; then
132 sec_login $USER0 $USER0
135 if ! $RUNAS_CMD -u $ID1 ls $DIR > /dev/null 2>&1; then
136 sec_login $USER1 $USER1
141 # run as different user
145 chmod 0755 $DIR || error "chmod (1)"
146 rm -rf $DIR/$tdir || error "rm (1)"
147 mkdir -p $DIR/$tdir || error "mkdir (1)"
148 chown $USER0 $DIR/$tdir || error "chown (2)"
149 $RUNAS_CMD -u $ID0 ls $DIR || error "ls (1)"
150 rm -f $DIR/f0 || error "rm (2)"
151 $RUNAS_CMD -u $ID0 touch $DIR/f0 && error "touch (1)"
152 $RUNAS_CMD -u $ID0 touch $DIR/$tdir/f1 || error "touch (2)"
153 $RUNAS_CMD -u $ID1 touch $DIR/$tdir/f2 && error "touch (3)"
154 touch $DIR/$tdir/f3 || error "touch (4)"
155 chown root $DIR/$tdir || error "chown (3)"
156 chgrp $USER0 $DIR/$tdir || error "chgrp (1)"
157 chmod 0775 $DIR/$tdir || error "chmod (2)"
158 $RUNAS_CMD -u $ID0 touch $DIR/$tdir/f4 || error "touch (5)"
159 $RUNAS_CMD -u $ID1 touch $DIR/$tdir/f5 && error "touch (6)"
160 touch $DIR/$tdir/f6 || error "touch (7)"
161 rm -rf $DIR/$tdir || error "rm (3)"
163 run_test 0 "uid permission ============================="
167 [ $GSS_SUP = 0 ] && skip "without GSS support." && return
172 chown $USER0 $DIR/$tdir || error "chown (1)"
173 $RUNAS_CMD -u $ID1 -v $ID0 touch $DIR/$tdir/f0 && error "touch (2)"
174 echo "enable uid $ID1 setuid"
175 do_facet $SINGLEMDS "echo '* $ID1 setuid' >> $PERM_CONF"
176 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
177 $RUNAS_CMD -u $ID1 -v $ID0 touch $DIR/$tdir/f1 || error "touch (3)"
179 chown root $DIR/$tdir || error "chown (4)"
180 chgrp $USER0 $DIR/$tdir || error "chgrp (5)"
181 chmod 0770 $DIR/$tdir || error "chmod (6)"
182 $RUNAS_CMD -u $ID1 -g $ID1 touch $DIR/$tdir/f2 && error "touch (7)"
183 $RUNAS_CMD -u$ID1 -g$ID1 -j$ID0 touch $DIR/$tdir/f3 && error "touch (8)"
184 echo "enable uid $ID1 setuid,setgid"
185 do_facet $SINGLEMDS "echo '* $ID1 setuid,setgid' > $PERM_CONF"
186 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
187 $RUNAS_CMD -u $ID1 -g $ID1 -j $ID0 touch $DIR/$tdir/f4 ||
189 $RUNAS_CMD -u $ID1 -v $ID0 -g $ID1 -j $ID0 touch $DIR/$tdir/f5 ||
194 do_facet $SINGLEMDS "rm -f $PERM_CONF"
195 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
197 run_test 1 "setuid/gid ============================="
199 # bug 3285 - supplementary group should always succeed.
200 # NB: the supplementary groups are set for local client only,
201 # as for remote client, the groups of the specified uid on MDT
202 # will be obtained by upcall /sbin/l_getidentity and used.
204 local server_version=$(lustre_version_code $SINGLEMDS)
206 [[ $server_version -ge $(version_code 2.6.93) ]] ||
207 [[ $server_version -ge $(version_code 2.5.35) &&
208 $server_version -lt $(version_code 2.5.50) ]] ||
209 { skip "Need MDS version at least 2.6.93 or 2.5.35"; return; }
213 chmod 0771 $DIR/$tdir
214 chgrp $ID0 $DIR/$tdir
215 $RUNAS_CMD -u $ID0 ls $DIR/$tdir || error "setgroups (1)"
216 do_facet $SINGLEMDS "echo '* $ID1 setgrp' > $PERM_CONF"
217 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
218 $RUNAS_CMD -u $ID1 -G1,2,$ID0 ls $DIR/$tdir ||
219 error "setgroups (2)"
220 $RUNAS_CMD -u $ID1 -G1,2 ls $DIR/$tdir && error "setgroups (3)"
223 do_facet $SINGLEMDS "rm -f $PERM_CONF"
224 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
226 run_test 4 "set supplementary group ==============="
233 squash_id default 99 0
234 wait_nm_sync default squash_uid '' inactive
235 squash_id default 99 1
236 wait_nm_sync default squash_gid '' inactive
237 for (( i = 0; i < NODEMAP_COUNT; i++ )); do
238 local csum=${HOSTNAME_CHECKSUM}_${i}
240 do_facet mgs $LCTL nodemap_add $csum
242 if [ $rc -ne 0 ]; then
243 echo "nodemap_add $csum failed with $rc"
247 out=$(do_facet mgs $LCTL get_param nodemap.$csum.id)
248 ## This needs to return zero if the following statement is 1
249 [[ $(echo $out | grep -c $csum) == 0 ]] && return 1
251 for (( i = 0; i < NODEMAP_COUNT; i++ )); do
252 local csum=${HOSTNAME_CHECKSUM}_${i}
254 wait_nm_sync $csum id '' inactive
263 for ((i = 0; i < NODEMAP_COUNT; i++)); do
264 local csum=${HOSTNAME_CHECKSUM}_${i}
266 if ! do_facet mgs $LCTL nodemap_del $csum; then
267 error "nodemap_del $csum failed with $?"
271 out=$(do_facet mgs $LCTL get_param nodemap.$csum.id 2>/dev/null)
272 [[ $(echo $out | grep -c $csum) != 0 ]] && return 1
274 for (( i = 0; i < NODEMAP_COUNT; i++ )); do
275 local csum=${HOSTNAME_CHECKSUM}_${i}
277 wait_nm_sync $csum id '' inactive
284 local cmd="$LCTL nodemap_add_range"
288 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
289 range="$SUBNET_CHECKSUM.${2}.${j}.[1-253]@tcp"
290 if ! do_facet mgs $cmd --name $1 --range $range; then
299 local cmd="$LCTL nodemap_del_range"
303 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
304 range="$SUBNET_CHECKSUM.${2}.${j}.[1-253]@tcp"
305 if ! do_facet mgs $cmd --name $1 --range $range; then
315 local cmd="$LCTL nodemap_add_idmap"
318 echo "Start to add idmaps ..."
319 for ((i = 0; i < NODEMAP_COUNT; i++)); do
322 for ((j = $ID0; j < NODEMAP_MAX_ID; j++)); do
323 local csum=${HOSTNAME_CHECKSUM}_${i}
325 local fs_id=$((j + 1))
327 if ! do_facet mgs $cmd --name $csum --idtype uid \
328 --idmap $client_id:$fs_id; then
331 if ! do_facet mgs $cmd --name $csum --idtype gid \
332 --idmap $client_id:$fs_id; then
341 update_idmaps() { #LU-10040
342 [ $(lustre_version_code mgs) -lt $(version_code 2.10.55) ] &&
343 skip "Need MGS >= 2.10.55" &&
345 local csum=${HOSTNAME_CHECKSUM}_0
346 local old_id_client=$ID0
347 local old_id_fs=$((ID0 + 1))
348 local new_id=$((ID0 + 100))
355 echo "Start to update idmaps ..."
357 #Inserting an existed idmap should return error
358 cmd="$LCTL nodemap_add_idmap --name $csum --idtype uid"
360 $cmd --idmap $old_id_client:$old_id_fs 2>/dev/null; then
361 error "insert idmap {$old_id_client:$old_id_fs} " \
362 "should return error"
367 #Update id_fs and check it
368 if ! do_facet mgs $cmd --idmap $old_id_client:$new_id; then
369 error "$cmd --idmap $old_id_client:$new_id failed"
373 tmp_id=$(do_facet mgs $LCTL get_param -n nodemap.$csum.idmap |
374 awk '{ print $7 }' | sed -n '2p')
375 [ $tmp_id != $new_id ] && { error "new id_fs $tmp_id != $new_id"; \
376 rc=$((rc + 1)); return $rc; }
378 #Update id_client and check it
379 if ! do_facet mgs $cmd --idmap $new_id:$new_id; then
380 error "$cmd --idmap $new_id:$new_id failed"
384 tmp_id=$(do_facet mgs $LCTL get_param -n nodemap.$csum.idmap |
385 awk '{ print $5 }' | sed -n "$((NODEMAP_ID_COUNT + 1)) p")
386 tmp_id=$(echo ${tmp_id%,*}) #e.g. "501,"->"501"
387 [ $tmp_id != $new_id ] && { error "new id_client $tmp_id != $new_id"; \
388 rc=$((rc + 1)); return $rc; }
390 #Delete above updated idmap
391 cmd="$LCTL nodemap_del_idmap --name $csum --idtype uid"
392 if ! do_facet mgs $cmd --idmap $new_id:$new_id; then
393 error "$cmd --idmap $new_id:$new_id failed"
398 #restore the idmaps to make delete_idmaps work well
399 cmd="$LCTL nodemap_add_idmap --name $csum --idtype uid"
400 if ! do_facet mgs $cmd --idmap $old_id_client:$old_id_fs; then
401 error "$cmd --idmap $old_id_client:$old_id_fs failed"
411 local cmd="$LCTL nodemap_del_idmap"
414 echo "Start to delete idmaps ..."
415 for ((i = 0; i < NODEMAP_COUNT; i++)); do
418 for ((j = $ID0; j < NODEMAP_MAX_ID; j++)); do
419 local csum=${HOSTNAME_CHECKSUM}_${i}
421 local fs_id=$((j + 1))
423 if ! do_facet mgs $cmd --name $csum --idtype uid \
424 --idmap $client_id:$fs_id; then
427 if ! do_facet mgs $cmd --name $csum --idtype gid \
428 --idmap $client_id:$fs_id; then
441 local cmd="$LCTL nodemap_modify"
444 proc[0]="admin_nodemap"
445 proc[1]="trusted_nodemap"
449 for ((idx = 0; idx < 2; idx++)); do
450 if ! do_facet mgs $cmd --name $1 --property ${option[$idx]} \
455 if ! do_facet mgs $cmd --name $1 --property ${option[$idx]} \
465 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
466 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
470 cmd[0]="$LCTL nodemap_modify --property squash_uid"
471 cmd[1]="$LCTL nodemap_modify --property squash_gid"
473 if ! do_facet mgs ${cmd[$3]} --name $1 --value $2; then
479 local nodemap_name=$1
484 local is_active=$(do_facet mgs $LCTL get_param -n nodemap.active)
489 local mgs_ip=$(host_nids_address $mgs_HOST $NETTYPE | cut -d' ' -f1)
492 if [ "$nodemap_name" == "active" ]; then
494 elif [ -z "$key" ]; then
495 proc_param=${nodemap_name}
497 proc_param="${nodemap_name}.${key}"
499 if [ "$opt" == "inactive" ]; then
500 # check nm sync even if nodemap is not activated
504 (( is_active == 0 )) && [ "$proc_param" != "active" ] && return
506 if [ -z "$value" ]; then
507 out1=$(do_facet mgs $LCTL get_param $opt \
508 nodemap.${proc_param} 2>/dev/null)
509 echo "On MGS ${mgs_ip}, ${proc_param} = $out1"
514 # wait up to 10 seconds for other servers to sync with mgs
515 for i in $(seq 1 10); do
516 for node in $(all_server_nodes); do
517 local node_ip=$(host_nids_address $node $NETTYPE |
521 if [ -z "$value" ]; then
522 [ $node_ip == $mgs_ip ] && continue
525 out2=$(do_node $node_ip $LCTL get_param $opt \
526 nodemap.$proc_param 2>/dev/null)
527 echo "On $node ${node_ip}, ${proc_param} = $out2"
528 [ "$out1" != "$out2" ] && is_sync=false && break
536 echo OTHER - IP: $node_ip
538 error "mgs and $nodemap_name ${key} mismatch, $i attempts"
540 echo "waited $((i - 1)) seconds for sync"
543 # ensure that the squash defaults are the expected defaults
544 squash_id default 99 0
545 wait_nm_sync default squash_uid '' inactive
546 squash_id default 99 1
547 wait_nm_sync default squash_gid '' inactive
552 cmd="$LCTL nodemap_test_nid"
554 nid=$(do_facet mgs $cmd $1)
556 if [ $nid == $2 ]; then
564 local nodemap_name=$1
569 local is_active=$(do_facet mgs $LCTL get_param -n nodemap.active)
574 local mgs_ip=$(host_nids_address $mgs_HOST $NETTYPE | cut -d' ' -f1)
577 if [ "$nodemap_name" == "active" ]; then
579 elif [ -z "$key" ]; then
580 proc_param=${nodemap_name}
582 proc_param="${nodemap_name}.${key}"
584 (( is_active == 0 )) && [ "$proc_param" != "active" ] && return
586 if [ -z "$value" ]; then
587 out1=$(do_facet mgs $LCTL get_param $opt nodemap.${proc_param})
588 echo "On MGS ${mgs_ip}, ${proc_param} = $out1"
593 # wait up to 10 seconds for other servers to sync with mgs
594 for i in $(seq 1 10); do
595 for node in $(all_server_nodes); do
596 local node_ip=$(host_nids_address $node $NETTYPE |
600 if [ -z "$value" ]; then
601 [ $node_ip == $mgs_ip ] && continue
604 out2=$(do_node $node_ip $LCTL get_param $opt \
605 nodemap.$proc_param 2>/dev/null)
606 echo "On $node ${node_ip}, ${proc_param} = $out2"
607 [ "$out1" != "$out2" ] && is_sync=false && break
615 echo OTHER - IP: $node_ip
617 error "mgs and $nodemap_name ${key} mismatch, $i attempts"
619 echo "waited $((i - 1)) seconds for sync"
623 # restore activation state
624 do_facet mgs $LCTL nodemap_activate 0
630 local cmd="$LCTL nodemap_test_id"
633 echo "Start to test idmaps ..."
634 ## nodemap deactivated
635 if ! do_facet mgs $LCTL nodemap_activate 0; then
638 for ((id = $ID0; id < NODEMAP_MAX_ID; id++)); do
641 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
642 local nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
643 local fs_id=$(do_facet mgs $cmd --nid $nid \
644 --idtype uid --id $id)
645 if [ $fs_id != $id ]; then
646 echo "expected $id, got $fs_id"
653 if ! do_facet mgs $LCTL nodemap_activate 1; then
657 for ((id = $ID0; id < NODEMAP_MAX_ID; id++)); do
658 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
659 nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
660 fs_id=$(do_facet mgs $cmd --nid $nid \
661 --idtype uid --id $id)
662 expected_id=$((id + 1))
663 if [ $fs_id != $expected_id ]; then
664 echo "expected $expected_id, got $fs_id"
671 for ((i = 0; i < NODEMAP_COUNT; i++)); do
672 local csum=${HOSTNAME_CHECKSUM}_${i}
674 if ! do_facet mgs $LCTL nodemap_modify --name $csum \
675 --property trusted --value 1; then
676 error "nodemap_modify $csum failed with $?"
681 for ((id = $ID0; id < NODEMAP_MAX_ID; id++)); do
682 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
683 nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
684 fs_id=$(do_facet mgs $cmd --nid $nid \
685 --idtype uid --id $id)
686 if [ $fs_id != $id ]; then
687 echo "expected $id, got $fs_id"
693 ## ensure allow_root_access is enabled
694 for ((i = 0; i < NODEMAP_COUNT; i++)); do
695 local csum=${HOSTNAME_CHECKSUM}_${i}
697 if ! do_facet mgs $LCTL nodemap_modify --name $csum \
698 --property admin --value 1; then
699 error "nodemap_modify $csum failed with $?"
704 ## check that root allowed
705 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
706 nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
707 fs_id=$(do_facet mgs $cmd --nid $nid --idtype uid --id 0)
708 if [ $fs_id != 0 ]; then
709 echo "root allowed expected 0, got $fs_id"
714 ## ensure allow_root_access is disabled
715 for ((i = 0; i < NODEMAP_COUNT; i++)); do
716 local csum=${HOSTNAME_CHECKSUM}_${i}
718 if ! do_facet mgs $LCTL nodemap_modify --name $csum \
719 --property admin --value 0; then
720 error "nodemap_modify ${HOSTNAME_CHECKSUM}_${i} "
726 ## check that root is mapped to 99
727 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
728 nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
729 fs_id=$(do_facet mgs $cmd --nid $nid --idtype uid --id 0)
730 if [ $fs_id != 99 ]; then
731 error "root squash expected 99, got $fs_id"
736 ## reset client trust to 0
737 for ((i = 0; i < NODEMAP_COUNT; i++)); do
738 if ! do_facet mgs $LCTL nodemap_modify \
739 --name ${HOSTNAME_CHECKSUM}_${i} \
740 --property trusted --value 0; then
741 error "nodemap_modify ${HOSTNAME_CHECKSUM}_${i} "
753 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
754 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
755 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
760 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
764 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 2
768 run_test 7 "nodemap create and delete"
773 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
774 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
775 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
782 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
788 [[ $rc == 0 ]] && error "duplicate nodemap_add allowed with $rc" &&
794 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 3
798 run_test 8 "nodemap reject duplicates"
804 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
805 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
806 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
812 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
815 for ((i = 0; i < NODEMAP_COUNT; i++)); do
816 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
820 [[ $rc != 0 ]] && error "nodemap_add_range failed with $rc" && return 2
823 for ((i = 0; i < NODEMAP_COUNT; i++)); do
824 if ! delete_range ${HOSTNAME_CHECKSUM}_${i} $i; then
828 [[ $rc != 0 ]] && error "nodemap_del_range failed with $rc" && return 4
833 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 4
837 run_test 9 "nodemap range add"
842 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
843 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
844 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
850 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
853 for ((i = 0; i < NODEMAP_COUNT; i++)); do
854 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
858 [[ $rc != 0 ]] && error "nodemap_add_range failed with $rc" && return 2
861 for ((i = 0; i < NODEMAP_COUNT; i++)); do
862 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
866 [[ $rc == 0 ]] && error "nodemap_add_range duplicate add with $rc" &&
871 for ((i = 0; i < NODEMAP_COUNT; i++)); do
872 if ! delete_range ${HOSTNAME_CHECKSUM}_${i} $i; then
876 [[ $rc != 0 ]] && error "nodemap_del_range failed with $rc" && return 4
880 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 5
884 run_test 10a "nodemap reject duplicate ranges"
887 [ $(lustre_version_code mgs) -lt $(version_code 2.10.53) ] &&
888 skip "Need MGS >= 2.10.53" && return
892 local nids="192.168.19.[0-255]@o2ib20"
894 do_facet mgs $LCTL nodemap_del $nm1 2>/dev/null
895 do_facet mgs $LCTL nodemap_del $nm2 2>/dev/null
897 do_facet mgs $LCTL nodemap_add $nm1 || error "Add $nm1 failed"
898 do_facet mgs $LCTL nodemap_add $nm2 || error "Add $nm2 failed"
899 do_facet mgs $LCTL nodemap_add_range --name $nm1 --range $nids ||
900 error "Add range $nids to $nm1 failed"
901 [ -n "$(do_facet mgs $LCTL get_param nodemap.$nm1.* |
902 grep start_nid)" ] || error "No range was found"
903 do_facet mgs $LCTL nodemap_del_range --name $nm2 --range $nids &&
904 error "Deleting range $nids from $nm2 should fail"
905 [ -n "$(do_facet mgs $LCTL get_param nodemap.$nm1.* |
906 grep start_nid)" ] || error "Range $nids should be there"
908 do_facet mgs $LCTL nodemap_del $nm1 || error "Delete $nm1 failed"
909 do_facet mgs $LCTL nodemap_del $nm2 || error "Delete $nm2 failed"
912 run_test 10b "delete range from the correct nodemap"
914 test_10c() { #LU-8912
915 [ $(lustre_version_code mgs) -lt $(version_code 2.10.57) ] &&
916 skip "Need MGS >= 2.10.57" && return
918 local nm="nodemap_lu8912"
919 local nid_range="10.210.[32-47].[0-255]@o2ib3"
920 local start_nid="10.210.32.0@o2ib3"
921 local end_nid="10.210.47.255@o2ib3"
922 local start_nid_found
925 do_facet mgs $LCTL nodemap_del $nm 2>/dev/null
926 do_facet mgs $LCTL nodemap_add $nm || error "Add $nm failed"
927 do_facet mgs $LCTL nodemap_add_range --name $nm --range $nid_range ||
928 error "Add range $nid_range to $nm failed"
930 start_nid_found=$(do_facet mgs $LCTL get_param nodemap.$nm.* |
931 awk -F '[,: ]' /start_nid/'{ print $9 }')
932 [ "$start_nid" == "$start_nid_found" ] ||
933 error "start_nid: $start_nid_found != $start_nid"
934 end_nid_found=$(do_facet mgs $LCTL get_param nodemap.$nm.* |
935 awk -F '[,: ]' /end_nid/'{ print $13 }')
936 [ "$end_nid" == "$end_nid_found" ] ||
937 error "end_nid: $end_nid_found != $end_nid"
939 do_facet mgs $LCTL nodemap_del $nm || error "Delete $nm failed"
942 run_test 10c "verfify contiguous range support"
944 test_10d() { #LU-8913
945 [ $(lustre_version_code mgs) -lt $(version_code 2.10.59) ] &&
946 skip "Need MGS >= 2.10.59" && return
948 local nm="nodemap_lu8913"
949 local nid_range="*@o2ib3"
950 local start_nid="0.0.0.0@o2ib3"
951 local end_nid="255.255.255.255@o2ib3"
952 local start_nid_found
955 do_facet mgs $LCTL nodemap_del $nm 2>/dev/null
956 do_facet mgs $LCTL nodemap_add $nm || error "Add $nm failed"
957 do_facet mgs $LCTL nodemap_add_range --name $nm --range $nid_range ||
958 error "Add range $nid_range to $nm failed"
960 start_nid_found=$(do_facet mgs $LCTL get_param nodemap.$nm.* |
961 awk -F '[,: ]' /start_nid/'{ print $9 }')
962 [ "$start_nid" == "$start_nid_found" ] ||
963 error "start_nid: $start_nid_found != $start_nid"
964 end_nid_found=$(do_facet mgs $LCTL get_param nodemap.$nm.* |
965 awk -F '[,: ]' /end_nid/'{ print $13 }')
966 [ "$end_nid" == "$end_nid_found" ] ||
967 error "end_nid: $end_nid_found != $end_nid"
969 do_facet mgs $LCTL nodemap_del $nm || error "Delete $nm failed"
972 run_test 10d "verfify nodemap range format '*@<net>' support"
977 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
978 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
979 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
985 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
988 for ((i = 0; i < NODEMAP_COUNT; i++)); do
989 if ! modify_flags ${HOSTNAME_CHECKSUM}_${i}; then
993 [[ $rc != 0 ]] && error "nodemap_modify with $rc" && return 2
998 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 3
1002 run_test 11 "nodemap modify"
1007 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
1008 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
1009 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
1015 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
1018 for ((i = 0; i < NODEMAP_COUNT; i++)); do
1019 if ! squash_id ${HOSTNAME_CHECKSUM}_${i} 88 0; then
1023 [[ $rc != 0 ]] && error "nodemap squash_uid with $rc" && return 2
1026 for ((i = 0; i < NODEMAP_COUNT; i++)); do
1027 if ! squash_id ${HOSTNAME_CHECKSUM}_${i} 88 1; then
1031 [[ $rc != 0 ]] && error "nodemap squash_gid with $rc" && return 3
1036 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 4
1040 run_test 12 "nodemap set squash ids"
1045 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
1046 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
1047 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
1053 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
1056 for ((i = 0; i < NODEMAP_COUNT; i++)); do
1057 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
1061 [[ $rc != 0 ]] && error "nodemap_add_range failed with $rc" && return 2
1064 for ((i = 0; i < NODEMAP_COUNT; i++)); do
1065 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
1066 for k in $NODEMAP_IPADDR_LIST; do
1067 if ! test_nid $SUBNET_CHECKSUM.$i.$j.$k \
1068 ${HOSTNAME_CHECKSUM}_${i}; then
1074 [[ $rc != 0 ]] && error "nodemap_test_nid failed with $rc" && return 3
1079 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 4
1083 run_test 13 "test nids"
1088 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
1089 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
1090 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
1096 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
1099 for ((i = 0; i < NODEMAP_COUNT; i++)); do
1100 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
1101 for k in $NODEMAP_IPADDR_LIST; do
1102 if ! test_nid $SUBNET_CHECKSUM.$i.$j.$k \
1109 [[ $rc != 0 ]] && error "nodemap_test_nid failed with $rc" && return 3
1114 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 4
1118 run_test 14 "test default nodemap nid lookup"
1123 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
1124 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
1125 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
1131 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
1134 for ((i = 0; i < NODEMAP_COUNT; i++)); do
1135 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
1139 [[ $rc != 0 ]] && error "nodemap_add_range failed with $rc" && return 2
1144 [[ $rc != 0 ]] && error "nodemap_add_idmap failed with $rc" && return 3
1146 activedefault=$(do_facet mgs $LCTL get_param -n nodemap.active)
1147 if [[ "$activedefault" != "1" ]]; then
1148 stack_trap cleanup_active EXIT
1154 [[ $rc != 0 ]] && error "nodemap_test_id failed with $rc" && return 4
1159 [[ $rc != 0 ]] && error "update_idmaps failed with $rc" && return 5
1164 [[ $rc != 0 ]] && error "nodemap_del_idmap failed with $rc" && return 6
1169 [[ $rc != 0 ]] && error "nodemap_delete failed with $rc" && return 7
1173 run_test 15 "test id mapping"
1175 create_fops_nodemaps() {
1178 for client in $clients; do
1179 local client_ip=$(host_nids_address $client $NETTYPE)
1180 local client_nid=$(h2nettype $client_ip)
1181 do_facet mgs $LCTL nodemap_add c${i} || return 1
1182 do_facet mgs $LCTL nodemap_add_range \
1183 --name c${i} --range $client_nid || return 1
1184 for map in ${FOPS_IDMAPS[i]}; do
1185 do_facet mgs $LCTL nodemap_add_idmap --name c${i} \
1186 --idtype uid --idmap ${map} || return 1
1187 do_facet mgs $LCTL nodemap_add_idmap --name c${i} \
1188 --idtype gid --idmap ${map} || return 1
1191 wait_nm_sync c$i idmap
1198 delete_fops_nodemaps() {
1201 for client in $clients; do
1202 do_facet mgs $LCTL nodemap_del c${i} || return 1
1210 if [ $MDSCOUNT -le 1 ]; then
1211 do_node ${clients_arr[0]} mkdir -p $DIR/$tdir
1213 # round-robin MDTs to test DNE nodemap support
1214 [ ! -d $DIR ] && do_node ${clients_arr[0]} mkdir -p $DIR
1215 do_node ${clients_arr[0]} $LFS setdirstripe -c 1 -i \
1216 $((fops_mds_index % MDSCOUNT)) $DIR/$tdir
1217 ((fops_mds_index++))
1221 # acl test directory needs to be initialized on a privileged client
1223 local admin=$(do_facet mgs $LCTL get_param -n nodemap.c0.admin_nodemap)
1224 local trust=$(do_facet mgs $LCTL get_param -n \
1225 nodemap.c0.trusted_nodemap)
1227 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
1228 do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1
1230 wait_nm_sync c0 admin_nodemap
1231 wait_nm_sync c0 trusted_nodemap
1233 do_node ${clients_arr[0]} rm -rf $DIR/$tdir
1235 do_node ${clients_arr[0]} chown $user $DIR/$tdir
1237 do_facet mgs $LCTL nodemap_modify --name c0 \
1238 --property admin --value $admin
1239 do_facet mgs $LCTL nodemap_modify --name c0 \
1240 --property trusted --value $trust
1242 # flush MDT locks to make sure they are reacquired before test
1243 do_node ${clients_arr[0]} $LCTL set_param \
1244 ldlm.namespaces.$FSNAME-MDT*.lru_size=clear
1246 wait_nm_sync c0 admin_nodemap
1247 wait_nm_sync c0 trusted_nodemap
1250 # fileset test directory needs to be initialized on a privileged client
1251 fileset_test_setup() {
1254 if [ -n "$FILESET" -a -z "$SKIP_FILESET" ]; then
1255 cleanup_mount $MOUNT
1256 FILESET="" zconf_mount_clients $CLIENTS $MOUNT
1259 local admin=$(do_facet mgs $LCTL get_param -n \
1260 nodemap.${nm}.admin_nodemap)
1261 local trust=$(do_facet mgs $LCTL get_param -n \
1262 nodemap.${nm}.trusted_nodemap)
1264 do_facet mgs $LCTL nodemap_modify --name $nm --property admin --value 1
1265 do_facet mgs $LCTL nodemap_modify --name $nm --property trusted \
1268 wait_nm_sync $nm admin_nodemap
1269 wait_nm_sync $nm trusted_nodemap
1271 # create directory and populate it for subdir mount
1272 do_node ${clients_arr[0]} mkdir $MOUNT/$subdir ||
1273 error "unable to create dir $MOUNT/$subdir"
1274 do_node ${clients_arr[0]} touch $MOUNT/$subdir/this_is_$subdir ||
1275 error "unable to create file $MOUNT/$subdir/this_is_$subdir"
1276 do_node ${clients_arr[0]} mkdir $MOUNT/$subdir/$subsubdir ||
1277 error "unable to create dir $MOUNT/$subdir/$subsubdir"
1278 do_node ${clients_arr[0]} touch \
1279 $MOUNT/$subdir/$subsubdir/this_is_$subsubdir ||
1280 error "unable to create file \
1281 $MOUNT/$subdir/$subsubdir/this_is_$subsubdir"
1283 do_facet mgs $LCTL nodemap_modify --name $nm \
1284 --property admin --value $admin
1285 do_facet mgs $LCTL nodemap_modify --name $nm \
1286 --property trusted --value $trust
1288 # flush MDT locks to make sure they are reacquired before test
1289 do_node ${clients_arr[0]} $LCTL set_param \
1290 ldlm.namespaces.$FSNAME-MDT*.lru_size=clear
1292 wait_nm_sync $nm admin_nodemap
1293 wait_nm_sync $nm trusted_nodemap
1296 # fileset test directory needs to be initialized on a privileged client
1297 fileset_test_cleanup() {
1299 local admin=$(do_facet mgs $LCTL get_param -n \
1300 nodemap.${nm}.admin_nodemap)
1301 local trust=$(do_facet mgs $LCTL get_param -n \
1302 nodemap.${nm}.trusted_nodemap)
1304 do_facet mgs $LCTL nodemap_modify --name $nm --property admin --value 1
1305 do_facet mgs $LCTL nodemap_modify --name $nm --property trusted \
1308 wait_nm_sync $nm admin_nodemap
1309 wait_nm_sync $nm trusted_nodemap
1311 # cleanup directory created for subdir mount
1312 do_node ${clients_arr[0]} rm -rf $MOUNT/$subdir ||
1313 error "unable to remove dir $MOUNT/$subdir"
1315 do_facet mgs $LCTL nodemap_modify --name $nm \
1316 --property admin --value $admin
1317 do_facet mgs $LCTL nodemap_modify --name $nm \
1318 --property trusted --value $trust
1320 # flush MDT locks to make sure they are reacquired before test
1321 do_node ${clients_arr[0]} $LCTL set_param \
1322 ldlm.namespaces.$FSNAME-MDT*.lru_size=clear
1324 wait_nm_sync $nm admin_nodemap
1325 wait_nm_sync $nm trusted_nodemap
1326 if [ -n "$FILESET" -a -z "$SKIP_FILESET" ]; then
1327 cleanup_mount $MOUNT
1328 zconf_mount_clients $CLIENTS $MOUNT
1332 do_create_delete() {
1335 local testfile=$DIR/$tdir/$tfile
1339 if $run_u touch $testfile >& /dev/null; then
1341 $run_u rm $testfile && d=1
1345 local expected=$(get_cr_del_expected $key)
1346 [ "$res" != "$expected" ] &&
1347 error "test $key, wanted $expected, got $res" && rc=$((rc + 1))
1351 nodemap_check_quota() {
1353 $run_u lfs quota -q $DIR | awk '{ print $2; exit; }'
1356 do_fops_quota_test() {
1358 # fuzz quota used to account for possible indirect blocks, etc
1359 local quota_fuzz=$(fs_log_size)
1360 local qused_orig=$(nodemap_check_quota "$run_u")
1361 local qused_high=$((qused_orig + quota_fuzz))
1362 local qused_low=$((qused_orig - quota_fuzz))
1363 local testfile=$DIR/$tdir/$tfile
1364 $run_u dd if=/dev/zero of=$testfile oflag=sync bs=1M count=1 \
1365 >& /dev/null || error "unable to write quota test file"
1366 sync; sync_all_data || true
1368 local qused_new=$(nodemap_check_quota "$run_u")
1369 [ $((qused_new)) -lt $((qused_low + 1024)) -o \
1370 $((qused_new)) -gt $((qused_high + 1024)) ] &&
1371 error "$qused_new != $qused_orig + 1M after write, " \
1372 "fuzz is $quota_fuzz"
1373 $run_u rm $testfile || error "unable to remove quota test file"
1374 wait_delete_completed_mds
1376 qused_new=$(nodemap_check_quota "$run_u")
1377 [ $((qused_new)) -lt $((qused_low)) \
1378 -o $((qused_new)) -gt $((qused_high)) ] &&
1379 error "quota not reclaimed, expect $qused_orig, " \
1380 "got $qused_new, fuzz $quota_fuzz"
1383 get_fops_mapped_user() {
1386 for ((i=0; i < ${#FOPS_IDMAPS[@]}; i++)); do
1387 for map in ${FOPS_IDMAPS[i]}; do
1388 if [ $(cut -d: -f1 <<< "$map") == $cli_user ]; then
1389 cut -d: -f2 <<< "$map"
1397 get_cr_del_expected() {
1399 IFS=":" read -a key <<< "$1"
1400 local mapmode="${key[0]}"
1401 local mds_user="${key[1]}"
1402 local cluster="${key[2]}"
1403 local cli_user="${key[3]}"
1404 local mode="0${key[4]}"
1411 [[ $mapmode == *mapped* ]] && mapped=1
1412 # only c1 is mapped in these test cases
1413 [[ $mapmode == mapped_trusted* ]] && [ "$cluster" == "c0" ] && mapped=0
1414 [[ $mapmode == *noadmin* ]] && noadmin=1
1416 # o+wx works as long as the user isn't mapped
1417 if [ $((mode & 3)) -eq 3 ]; then
1421 # if client user is root, check if root is squashed
1422 if [ "$cli_user" == "0" ]; then
1423 # squash root succeed, if other bit is on
1426 1) [ "$other" == "1" ] && echo $SUCCESS
1427 [ "$other" == "0" ] && echo $FAILURE;;
1431 if [ "$mapped" == "0" ]; then
1432 [ "$other" == "1" ] && echo $SUCCESS
1433 [ "$other" == "0" ] && echo $FAILURE
1437 # if mapped user is mds user, check for u+wx
1438 mapped_user=$(get_fops_mapped_user $cli_user)
1439 [ "$mapped_user" == "-1" ] &&
1440 error "unable to find mapping for client user $cli_user"
1442 if [ "$mapped_user" == "$mds_user" -a \
1443 $(((mode & 0300) == 0300)) -eq 1 ]; then
1447 if [ "$mapped_user" != "$mds_user" -a "$other" == "1" ]; then
1454 test_fops_admin_cli_i=""
1455 test_fops_chmod_dir() {
1456 local current_cli_i=$1
1458 local dir_to_chmod=$3
1459 local new_admin_cli_i=""
1461 # do we need to set up a new admin client?
1462 [ "$current_cli_i" == "0" ] && [ "$test_fops_admin_cli_i" != "1" ] &&
1464 [ "$current_cli_i" != "0" ] && [ "$test_fops_admin_cli_i" != "0" ] &&
1467 # if only one client, and non-admin, need to flip admin everytime
1468 if [ "$num_clients" == "1" ]; then
1469 test_fops_admin_client=$clients
1470 test_fops_admin_val=$(do_facet mgs $LCTL get_param -n \
1471 nodemap.c0.admin_nodemap)
1472 if [ "$test_fops_admin_val" != "1" ]; then
1473 do_facet mgs $LCTL nodemap_modify \
1477 wait_nm_sync c0 admin_nodemap
1479 elif [ "$new_admin_cli_i" != "" ]; then
1480 # restore admin val to old admin client
1481 if [ "$test_fops_admin_cli_i" != "" ] &&
1482 [ "$test_fops_admin_val" != "1" ]; then
1483 do_facet mgs $LCTL nodemap_modify \
1484 --name c${test_fops_admin_cli_i} \
1486 --value $test_fops_admin_val
1487 wait_nm_sync c${test_fops_admin_cli_i} admin_nodemap
1490 test_fops_admin_cli_i=$new_admin_cli_i
1491 test_fops_admin_client=${clients_arr[$new_admin_cli_i]}
1492 test_fops_admin_val=$(do_facet mgs $LCTL get_param -n \
1493 nodemap.c${new_admin_cli_i}.admin_nodemap)
1495 if [ "$test_fops_admin_val" != "1" ]; then
1496 do_facet mgs $LCTL nodemap_modify \
1497 --name c${new_admin_cli_i} \
1500 wait_nm_sync c${new_admin_cli_i} admin_nodemap
1504 do_node $test_fops_admin_client chmod $perm_bits $DIR/$tdir || return 1
1506 # remove admin for single client if originally non-admin
1507 if [ "$num_clients" == "1" ] && [ "$test_fops_admin_val" != "1" ]; then
1508 do_facet mgs $LCTL nodemap_modify --name c0 --property admin \
1510 wait_nm_sync c0 admin_nodemap
1518 local single_client="$2"
1519 local client_user_list=([0]="0 $((IDBASE+3)) $((IDBASE+4))"
1520 [1]="0 $((IDBASE+5)) $((IDBASE+6))")
1523 local perm_bit_list="0 3 $((0300)) $((0303))"
1524 # SLOW tests 000-007, 010-070, 100-700 (octal modes)
1525 [ "$SLOW" == "yes" ] &&
1526 perm_bit_list="0 $(seq 1 7) $(seq 8 8 63) $(seq 64 64 511) \
1529 # step through mds users. -1 means root
1530 for mds_i in -1 0 1 2; do
1531 local user=$((mds_i + IDBASE))
1535 [ "$mds_i" == "-1" ] && user=0
1537 echo mkdir -p $DIR/$tdir
1540 for client in $clients; do
1542 for u in ${client_user_list[$cli_i]}; do
1543 local run_u="do_node $client \
1544 $RUNAS_CMD -u$u -g$u -G$u"
1545 for perm_bits in $perm_bit_list; do
1546 local mode=$(printf %03o $perm_bits)
1548 key="$mapmode:$user:c$cli_i:$u:$mode"
1549 test_fops_chmod_dir $cli_i $mode \
1551 error cannot chmod $key
1552 do_create_delete "$run_u" "$key"
1556 test_fops_chmod_dir $cli_i 777 $DIR/$tdir ||
1557 error cannot chmod $key
1558 do_fops_quota_test "$run_u"
1561 cli_i=$((cli_i + 1))
1562 [ "$single_client" == "1" ] && break
1569 nodemap_version_check () {
1570 remote_mgs_nodsh && skip "remote MGS with nodsh" && return 1
1571 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
1572 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
1577 nodemap_test_setup() {
1579 local active_nodemap=1
1581 [ "$1" == "0" ] && active_nodemap=0
1583 do_nodes $(comma_list $(all_mdts_nodes)) \
1584 $LCTL set_param mdt.*.identity_upcall=NONE
1587 create_fops_nodemaps
1589 [[ $rc != 0 ]] && error "adding fops nodemaps failed $rc"
1591 do_facet mgs $LCTL nodemap_activate $active_nodemap
1594 do_facet mgs $LCTL nodemap_modify --name default \
1595 --property admin --value 1
1596 wait_nm_sync default admin_nodemap
1597 do_facet mgs $LCTL nodemap_modify --name default \
1598 --property trusted --value 1
1599 wait_nm_sync default trusted_nodemap
1602 nodemap_test_cleanup() {
1604 delete_fops_nodemaps
1606 [[ $rc != 0 ]] && error "removing fops nodemaps failed $rc"
1608 do_facet mgs $LCTL nodemap_modify --name default \
1609 --property admin --value 0
1610 wait_nm_sync default admin_nodemap
1611 do_facet mgs $LCTL nodemap_modify --name default \
1612 --property trusted --value 0
1613 wait_nm_sync default trusted_nodemap
1615 do_facet mgs $LCTL nodemap_activate 0
1616 wait_nm_sync active 0
1618 export SK_UNIQUE_NM=false
1622 nodemap_clients_admin_trusted() {
1626 for client in $clients; do
1627 do_facet mgs $LCTL nodemap_modify --name c0 \
1628 --property admin --value $admin
1629 do_facet mgs $LCTL nodemap_modify --name c0 \
1630 --property trusted --value $tr
1633 wait_nm_sync c$((i - 1)) admin_nodemap
1634 wait_nm_sync c$((i - 1)) trusted_nodemap
1638 nodemap_version_check || return 0
1639 nodemap_test_setup 0
1641 trap nodemap_test_cleanup EXIT
1643 nodemap_test_cleanup
1645 run_test 16 "test nodemap all_off fileops"
1649 [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.55) ]; then
1650 skip "Need MDS >= 2.11.55"
1653 nodemap_version_check || return 0
1656 trap nodemap_test_cleanup EXIT
1657 nodemap_clients_admin_trusted 0 1
1658 test_fops trusted_noadmin 1
1659 nodemap_test_cleanup
1661 run_test 17 "test nodemap trusted_noadmin fileops"
1665 [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.55) ]; then
1666 skip "Need MDS >= 2.11.55"
1669 nodemap_version_check || return 0
1672 trap nodemap_test_cleanup EXIT
1673 nodemap_clients_admin_trusted 0 0
1674 test_fops mapped_noadmin 1
1675 nodemap_test_cleanup
1677 run_test 18 "test nodemap mapped_noadmin fileops"
1681 [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.55) ]; then
1682 skip "Need MDS >= 2.11.55"
1685 nodemap_version_check || return 0
1688 trap nodemap_test_cleanup EXIT
1689 nodemap_clients_admin_trusted 1 1
1690 test_fops trusted_admin 1
1691 nodemap_test_cleanup
1693 run_test 19 "test nodemap trusted_admin fileops"
1697 [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.55) ]; then
1698 skip "Need MDS >= 2.11.55"
1701 nodemap_version_check || return 0
1704 trap nodemap_test_cleanup EXIT
1705 nodemap_clients_admin_trusted 1 0
1706 test_fops mapped_admin 1
1707 nodemap_test_cleanup
1709 run_test 20 "test nodemap mapped_admin fileops"
1713 [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.55) ]; then
1714 skip "Need MDS >= 2.11.55"
1717 nodemap_version_check || return 0
1720 trap nodemap_test_cleanup EXIT
1723 for client in $clients; do
1724 do_facet mgs $LCTL nodemap_modify --name c${i} \
1725 --property admin --value 0
1726 do_facet mgs $LCTL nodemap_modify --name c${i} \
1727 --property trusted --value $x
1731 wait_nm_sync c$((i - 1)) trusted_nodemap
1733 test_fops mapped_trusted_noadmin
1734 nodemap_test_cleanup
1736 run_test 21 "test nodemap mapped_trusted_noadmin fileops"
1740 [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.55) ]; then
1741 skip "Need MDS >= 2.11.55"
1744 nodemap_version_check || return 0
1747 trap nodemap_test_cleanup EXIT
1750 for client in $clients; do
1751 do_facet mgs $LCTL nodemap_modify --name c${i} \
1752 --property admin --value 1
1753 do_facet mgs $LCTL nodemap_modify --name c${i} \
1754 --property trusted --value $x
1758 wait_nm_sync c$((i - 1)) trusted_nodemap
1760 test_fops mapped_trusted_admin
1761 nodemap_test_cleanup
1763 run_test 22 "test nodemap mapped_trusted_admin fileops"
1765 # acl test directory needs to be initialized on a privileged client
1766 nodemap_acl_test_setup() {
1767 local admin=$(do_facet mgs $LCTL get_param -n \
1768 nodemap.c0.admin_nodemap)
1769 local trust=$(do_facet mgs $LCTL get_param -n \
1770 nodemap.c0.trusted_nodemap)
1772 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
1773 do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1
1775 wait_nm_sync c0 admin_nodemap
1776 wait_nm_sync c0 trusted_nodemap
1778 do_node ${clients_arr[0]} rm -rf $DIR/$tdir
1780 do_node ${clients_arr[0]} chmod a+rwx $DIR/$tdir ||
1781 error unable to chmod a+rwx test dir $DIR/$tdir
1783 do_facet mgs $LCTL nodemap_modify --name c0 \
1784 --property admin --value $admin
1785 do_facet mgs $LCTL nodemap_modify --name c0 \
1786 --property trusted --value $trust
1788 wait_nm_sync c0 trusted_nodemap
1791 # returns 0 if the number of ACLs does not change on the second (mapped) client
1792 # after being set on the first client
1793 nodemap_acl_test() {
1795 local set_client="$2"
1796 local get_client="$3"
1797 local check_setfacl="$4"
1798 local setfacl_error=0
1799 local testfile=$DIR/$tdir/$tfile
1800 local RUNAS_USER="$RUNAS_CMD -u $user"
1802 local acl_count_post=0
1804 nodemap_acl_test_setup
1807 do_node $set_client $RUNAS_USER touch $testfile
1809 # ACL masks aren't filtered by nodemap code, so we ignore them
1810 acl_count=$(do_node $get_client getfacl $testfile | grep -v mask |
1812 do_node $set_client $RUNAS_USER setfacl -m $user:rwx $testfile ||
1815 # if check setfacl is set to 1, then it's supposed to error
1816 if [ "$check_setfacl" == "1" ]; then
1817 [ "$setfacl_error" != "1" ] && return 1
1820 [ "$setfacl_error" == "1" ] && echo "WARNING: unable to setfacl"
1822 acl_count_post=$(do_node $get_client getfacl $testfile | grep -v mask |
1824 [ $acl_count -eq $acl_count_post ] && return 0
1829 [ $num_clients -lt 2 ] && skip "Need 2 clients at least" && return
1830 nodemap_version_check || return 0
1833 trap nodemap_test_cleanup EXIT
1834 # 1 trusted cluster, 1 mapped cluster
1835 local unmapped_fs=$((IDBASE+0))
1836 local unmapped_c1=$((IDBASE+5))
1837 local mapped_fs=$((IDBASE+2))
1838 local mapped_c0=$((IDBASE+4))
1839 local mapped_c1=$((IDBASE+6))
1841 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
1842 do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1
1844 do_facet mgs $LCTL nodemap_modify --name c1 --property admin --value 0
1845 do_facet mgs $LCTL nodemap_modify --name c1 --property trusted --value 0
1847 wait_nm_sync c1 trusted_nodemap
1849 # setfacl on trusted cluster to unmapped user, verify it's not seen
1850 nodemap_acl_test $unmapped_fs ${clients_arr[0]} ${clients_arr[1]} ||
1851 error "acl count (1)"
1853 # setfacl on trusted cluster to mapped user, verify it's seen
1854 nodemap_acl_test $mapped_fs ${clients_arr[0]} ${clients_arr[1]} &&
1855 error "acl count (2)"
1857 # setfacl on mapped cluster to mapped user, verify it's seen
1858 nodemap_acl_test $mapped_c1 ${clients_arr[1]} ${clients_arr[0]} &&
1859 error "acl count (3)"
1861 # setfacl on mapped cluster to unmapped user, verify error
1862 nodemap_acl_test $unmapped_fs ${clients_arr[1]} ${clients_arr[0]} 1 ||
1863 error "acl count (4)"
1866 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 0
1867 do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 0
1869 wait_nm_sync c0 trusted_nodemap
1871 # setfacl to mapped user on c1, also mapped to c0, verify it's seen
1872 nodemap_acl_test $mapped_c1 ${clients_arr[1]} ${clients_arr[0]} &&
1873 error "acl count (5)"
1875 # setfacl to mapped user on c1, not mapped to c0, verify not seen
1876 nodemap_acl_test $unmapped_c1 ${clients_arr[1]} ${clients_arr[0]} ||
1877 error "acl count (6)"
1879 nodemap_test_cleanup
1881 run_test 23a "test mapped regular ACLs"
1883 test_23b() { #LU-9929
1884 [ $num_clients -lt 2 ] && skip "Need 2 clients at least" && return
1885 [ $(lustre_version_code mgs) -lt $(version_code 2.10.53) ] &&
1886 skip "Need MGS >= 2.10.53" && return
1888 export SK_UNIQUE_NM=true
1890 trap nodemap_test_cleanup EXIT
1892 local testdir=$DIR/$tdir
1893 local fs_id=$((IDBASE+10))
1898 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
1899 wait_nm_sync c0 admin_nodemap
1900 do_facet mgs $LCTL nodemap_modify --name c1 --property admin --value 1
1901 wait_nm_sync c1 admin_nodemap
1902 do_facet mgs $LCTL nodemap_modify --name c1 --property trusted --value 1
1903 wait_nm_sync c1 trusted_nodemap
1905 # Add idmap $ID0:$fs_id (500:60010)
1906 do_facet mgs $LCTL nodemap_add_idmap --name c0 --idtype gid \
1907 --idmap $ID0:$fs_id ||
1908 error "add idmap $ID0:$fs_id to nodemap c0 failed"
1909 wait_nm_sync c0 idmap
1911 # set/getfacl default acl on client 1 (unmapped gid=500)
1912 do_node ${clients_arr[0]} rm -rf $testdir
1913 do_node ${clients_arr[0]} mkdir -p $testdir
1914 # Here, USER0=$(getent passwd | grep :$ID0:$ID0: | cut -d: -f1)
1915 do_node ${clients_arr[0]} setfacl -R -d -m group:$USER0:rwx $testdir ||
1916 error "setfacl $testdir on ${clients_arr[0]} failed"
1917 unmapped_id=$(do_node ${clients_arr[0]} getfacl $testdir |
1918 grep -E "default:group:.*:rwx" | awk -F: '{print $3}')
1919 [ "$unmapped_id" = "$USER0" ] ||
1920 error "gid=$ID0 was not unmapped correctly on ${clients_arr[0]}"
1922 # getfacl default acl on client 2 (mapped gid=60010)
1923 mapped_id=$(do_node ${clients_arr[1]} getfacl $testdir |
1924 grep -E "default:group:.*:rwx" | awk -F: '{print $3}')
1925 fs_user=$(do_node ${clients_arr[1]} getent passwd |
1926 grep :$fs_id:$fs_id: | cut -d: -f1)
1927 [ -z "$fs_user" ] && fs_user=$fs_id
1928 [ $mapped_id -eq $fs_id -o "$mapped_id" = "$fs_user" ] ||
1929 error "Should return gid=$fs_id or $fs_user on client2"
1932 nodemap_test_cleanup
1933 export SK_UNIQUE_NM=false
1935 run_test 23b "test mapped default ACLs"
1940 trap nodemap_test_cleanup EXIT
1941 do_nodes $(comma_list $(all_server_nodes)) $LCTL get_param -R nodemap
1943 nodemap_test_cleanup
1945 run_test 24 "check nodemap proc files for LBUGs and Oopses"
1948 local tmpfile=$(mktemp)
1949 local tmpfile2=$(mktemp)
1950 local tmpfile3=$(mktemp)
1951 local tmpfile4=$(mktemp)
1955 nodemap_version_check || return 0
1957 # stop clients for this test
1958 zconf_umount_clients $CLIENTS $MOUNT ||
1959 error "unable to umount clients $CLIENTS"
1961 export SK_UNIQUE_NM=true
1964 # enable trusted/admin for setquota call in cleanup_and_setup_lustre()
1966 for client in $clients; do
1967 do_facet mgs $LCTL nodemap_modify --name c${i} \
1968 --property admin --value 1
1969 do_facet mgs $LCTL nodemap_modify --name c${i} \
1970 --property trusted --value 1
1973 wait_nm_sync c$((i - 1)) trusted_nodemap
1975 trap nodemap_test_cleanup EXIT
1977 # create a new, empty nodemap, and add fileset info to it
1978 do_facet mgs $LCTL nodemap_add test25 ||
1979 error "unable to create nodemap $testname"
1980 do_facet mgs $LCTL set_param -P nodemap.$testname.fileset=/$subdir ||
1981 error "unable to add fileset info to nodemap test25"
1983 wait_nm_sync test25 id
1985 do_facet mgs $LCTL nodemap_info > $tmpfile
1986 do_facet mds $LCTL nodemap_info > $tmpfile2
1988 if ! $SHARED_KEY; then
1989 # will conflict with SK's nodemaps
1990 cleanup_and_setup_lustre
1992 # stop clients for this test
1993 zconf_umount_clients $CLIENTS $MOUNT ||
1994 error "unable to umount clients $CLIENTS"
1996 do_facet mgs $LCTL nodemap_info > $tmpfile3
1997 diff -q $tmpfile3 $tmpfile >& /dev/null ||
1998 error "nodemap_info diff on MGS after remount"
2000 do_facet mds $LCTL nodemap_info > $tmpfile4
2001 diff -q $tmpfile4 $tmpfile2 >& /dev/null ||
2002 error "nodemap_info diff on MDS after remount"
2005 do_facet mgs $LCTL nodemap_del test25 ||
2006 error "cannot delete nodemap test25 from config"
2007 nodemap_test_cleanup
2008 # restart clients previously stopped
2009 zconf_mount_clients $CLIENTS $MOUNT ||
2010 error "unable to mount clients $CLIENTS"
2012 rm -f $tmpfile $tmpfile2
2013 export SK_UNIQUE_NM=false
2015 run_test 25 "test save and reload nodemap config"
2018 nodemap_version_check || return 0
2022 do_facet mgs "seq -f 'c%g' $large_i | xargs -n1 $LCTL nodemap_add"
2023 wait_nm_sync c$large_i admin_nodemap
2025 do_facet mgs "seq -f 'c%g' $large_i | xargs -n1 $LCTL nodemap_del"
2026 wait_nm_sync c$large_i admin_nodemap
2028 run_test 26 "test transferring very large nodemap"
2030 nodemap_exercise_fileset() {
2035 if [ "$nm" == "default" ]; then
2036 do_facet mgs $LCTL nodemap_activate 1
2041 if $SHARED_KEY; then
2042 export SK_UNIQUE_NM=true
2044 # will conflict with SK's nodemaps
2045 trap "fileset_test_cleanup $nm" EXIT
2047 fileset_test_setup "$nm"
2049 # add fileset info to $nm nodemap
2050 if ! combined_mgs_mds; then
2051 do_facet mgs $LCTL set_param nodemap.${nm}.fileset=/$subdir ||
2052 error "unable to add fileset info to $nm nodemap on MGS"
2054 do_facet mgs $LCTL set_param -P nodemap.${nm}.fileset=/$subdir ||
2055 error "unable to add fileset info to $nm nodemap for servers"
2056 wait_nm_sync $nm fileset "nodemap.${nm}.fileset=/$subdir"
2059 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
2060 error "unable to umount client ${clients_arr[0]}"
2061 # set some generic fileset to trigger SSK code
2063 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
2064 error "unable to remount client ${clients_arr[0]}"
2067 # test mount point content
2068 do_node ${clients_arr[0]} test -f $MOUNT/this_is_$subdir ||
2069 error "fileset not taken into account"
2071 # re-mount client with sub-subdir
2072 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
2073 error "unable to umount client ${clients_arr[0]}"
2074 export FILESET=/$subsubdir
2075 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
2076 error "unable to remount client ${clients_arr[0]}"
2079 # test mount point content
2080 do_node ${clients_arr[0]} test -f $MOUNT/this_is_$subsubdir ||
2081 error "subdir of fileset not taken into account"
2083 # remove fileset info from nodemap
2084 do_facet mgs $LCTL nodemap_set_fileset --name $nm --fileset clear ||
2085 error "unable to delete fileset info on $nm nodemap"
2086 wait_update_facet mgs "$LCTL get_param nodemap.${nm}.fileset" \
2087 "nodemap.${nm}.fileset=" ||
2088 error "fileset info still not cleared on $nm nodemap"
2089 do_facet mgs $LCTL set_param -P nodemap.${nm}.fileset=clear ||
2090 error "unable to reset fileset info on $nm nodemap"
2091 wait_nm_sync $nm fileset "nodemap.${nm}.fileset="
2094 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
2095 error "unable to umount client ${clients_arr[0]}"
2096 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
2097 error "unable to remount client ${clients_arr[0]}"
2099 # test mount point content
2100 if ! $(do_node ${clients_arr[0]} test -d $MOUNT/$subdir); then
2102 error "fileset not cleared on $nm nodemap"
2105 # back to non-nodemap setup
2106 if $SHARED_KEY; then
2107 export SK_UNIQUE_NM=false
2108 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
2109 error "unable to umount client ${clients_arr[0]}"
2111 fileset_test_cleanup "$nm"
2112 if [ "$nm" == "default" ]; then
2113 do_facet mgs $LCTL nodemap_activate 0
2114 wait_nm_sync active 0
2116 export SK_UNIQUE_NM=false
2118 nodemap_test_cleanup
2120 if $SHARED_KEY; then
2121 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
2122 error "unable to remount client ${clients_arr[0]}"
2127 [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.50) ] &&
2128 skip "Need MDS >= 2.11.50" && return
2130 for nm in "default" "c0"; do
2131 local subdir="subdir_${nm}"
2132 local subsubdir="subsubdir_${nm}"
2134 if [ "$nm" == "default" ] && [ "$SHARED_KEY" == "true" ]; then
2135 echo "Skipping nodemap $nm with SHARED_KEY";
2139 echo "Exercising fileset for nodemap $nm"
2140 nodemap_exercise_fileset "$nm"
2143 run_test 27a "test fileset in various nodemaps"
2145 test_27b() { #LU-10703
2146 [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.50) ] &&
2147 skip "Need MDS >= 2.11.50" && return
2148 [[ $MDSCOUNT -lt 2 ]] && skip "needs >= 2 MDTs" && return
2151 trap nodemap_test_cleanup EXIT
2153 # Add the nodemaps and set their filesets
2154 for i in $(seq 1 $MDSCOUNT); do
2155 do_facet mgs $LCTL nodemap_del nm$i 2>/dev/null
2156 do_facet mgs $LCTL nodemap_add nm$i ||
2157 error "add nodemap nm$i failed"
2158 wait_nm_sync nm$i "" "" "-N"
2160 if ! combined_mgs_mds; then
2162 $LCTL set_param nodemap.nm$i.fileset=/dir$i ||
2163 error "set nm$i.fileset=/dir$i failed on MGS"
2165 do_facet mgs $LCTL set_param -P nodemap.nm$i.fileset=/dir$i ||
2166 error "set nm$i.fileset=/dir$i failed on servers"
2167 wait_nm_sync nm$i fileset "nodemap.nm$i.fileset=/dir$i"
2170 # Check if all the filesets are correct
2171 for i in $(seq 1 $MDSCOUNT); do
2172 fileset=$(do_facet mds$i \
2173 $LCTL get_param -n nodemap.nm$i.fileset)
2174 [ "$fileset" = "/dir$i" ] ||
2175 error "nm$i.fileset $fileset != /dir$i on mds$i"
2176 do_facet mgs $LCTL nodemap_del nm$i ||
2177 error "delete nodemap nm$i failed"
2180 nodemap_test_cleanup
2182 run_test 27b "The new nodemap won't clear the old nodemap's fileset"
2185 if ! $SHARED_KEY; then
2186 skip "need shared key feature for this test" && return
2188 mkdir -p $DIR/$tdir || error "mkdir failed"
2189 touch $DIR/$tdir/$tdir.out || error "touch failed"
2190 if [ ! -f $DIR/$tdir/$tdir.out ]; then
2191 error "read before rotation failed"
2193 # store top key identity to ensure rotation has occurred
2194 SK_IDENTITY_OLD=$(lctl get_param *.*.*srpc* | grep "expire" |
2195 head -1 | awk '{print $15}' | cut -c1-8)
2196 do_facet $SINGLEMDS lfs flushctx ||
2197 error "could not run flushctx on $SINGLEMDS"
2199 lfs flushctx || error "could not run flushctx on client"
2201 # verify new key is in place
2202 SK_IDENTITY_NEW=$(lctl get_param *.*.*srpc* | grep "expire" |
2203 head -1 | awk '{print $15}' | cut -c1-8)
2204 if [ $SK_IDENTITY_OLD == $SK_IDENTITY_NEW ]; then
2205 error "key did not rotate correctly"
2207 if [ ! -f $DIR/$tdir/$tdir.out ]; then
2208 error "read after rotation failed"
2211 run_test 28 "check shared key rotation method"
2214 if ! $SHARED_KEY; then
2215 skip "need shared key feature for this test" && return
2217 if [ $SK_FLAVOR != "ski" ] && [ $SK_FLAVOR != "skpi" ]; then
2218 skip "test only valid if integrity is active"
2221 mkdir $DIR/$tdir || error "mkdir"
2222 touch $DIR/$tdir/$tfile || error "touch"
2223 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
2224 error "unable to umount clients"
2225 keyctl show | awk '/lustre/ { print $1 }' |
2226 xargs -IX keyctl unlink X
2227 OLD_SK_PATH=$SK_PATH
2228 export SK_PATH=/dev/null
2229 if zconf_mount_clients ${clients_arr[0]} $MOUNT; then
2230 export SK_PATH=$OLD_SK_PATH
2231 if [ -e $DIR/$tdir/$tfile ]; then
2232 error "able to mount and read without key"
2234 error "able to mount without key"
2237 export SK_PATH=$OLD_SK_PATH
2238 keyctl show | awk '/lustre/ { print $1 }' |
2239 xargs -IX keyctl unlink X
2242 run_test 29 "check for missing shared key"
2245 if ! $SHARED_KEY; then
2246 skip "need shared key feature for this test" && return
2248 if [ $SK_FLAVOR != "ski" ] && [ $SK_FLAVOR != "skpi" ]; then
2249 skip "test only valid if integrity is active"
2251 mkdir -p $DIR/$tdir || error "mkdir failed"
2252 touch $DIR/$tdir/$tdir.out || error "touch failed"
2253 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
2254 error "unable to umount clients"
2255 # unload keys from ring
2256 keyctl show | awk '/lustre/ { print $1 }' |
2257 xargs -IX keyctl unlink X
2258 # invalidate the key with bogus filesystem name
2259 lgss_sk -w $SK_PATH/$FSNAME-bogus.key -f $FSNAME.bogus \
2260 -t client -d /dev/urandom || error "lgss_sk failed (1)"
2261 do_facet $SINGLEMDS lfs flushctx || error "could not run flushctx"
2262 OLD_SK_PATH=$SK_PATH
2263 export SK_PATH=$SK_PATH/$FSNAME-bogus.key
2264 if zconf_mount_clients ${clients_arr[0]} $MOUNT; then
2265 SK_PATH=$OLD_SK_PATH
2266 if [ -a $DIR/$tdir/$tdir.out ]; then
2267 error "mount and read file with invalid key"
2269 error "mount with invalid key"
2272 SK_PATH=$OLD_SK_PATH
2273 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
2274 error "unable to umount clients"
2276 run_test 30 "check for invalid shared key"
2280 zconf_umount $HOSTNAME $MOUNT || error "unable to umount client"
2282 # remove ${NETTYPE}999 network on all nodes
2283 do_nodes $(comma_list $(all_nodes)) \
2284 "$LNETCTL net del --net ${NETTYPE}999 && \
2285 $LNETCTL lnet unconfigure 2>/dev/null || true"
2287 # necessary to do writeconf in order to de-register
2288 # @${NETTYPE}999 nid for targets
2290 export KEEP_ZPOOL="true"
2292 export SK_MOUNTED=false
2295 export KEEP_ZPOOL="$KZPOOL"
2299 local nid=$(lctl list_nids | grep ${NETTYPE} | head -n1)
2300 local addr=${nid%@*}
2303 export LNETCTL=$(which lnetctl 2> /dev/null)
2305 [ -z "$LNETCTL" ] && skip "without lnetctl support." && return
2306 local_mode && skip "in local mode."
2308 stack_trap cleanup_31 EXIT
2311 if [ "$MOUNT_2" ] && $(grep -q $MOUNT2' ' /proc/mounts); then
2312 umount_client $MOUNT2 || error "umount $MOUNT2 failed"
2314 if $(grep -q $MOUNT' ' /proc/mounts); then
2315 umount_client $MOUNT || error "umount $MOUNT failed"
2318 # check exports on servers are empty for client
2319 do_facet mgs "lctl get_param -n *.MGS*.exports.'$nid'.uuid 2>/dev/null |
2320 grep -q -" && error "export on MGS should be empty"
2321 do_nodes $(comma_list $(mdts_nodes) $(osts_nodes)) \
2322 "lctl get_param -n *.${FSNAME}*.exports.'$nid'.uuid \
2323 2>/dev/null | grep -q -" &&
2324 error "export on servers should be empty"
2326 # add network ${NETTYPE}999 on all nodes
2327 do_nodes $(comma_list $(all_nodes)) \
2328 "$LNETCTL lnet configure && $LNETCTL net add --if \
2329 \$($LNETCTL net show --net $net | awk 'BEGIN{inf=0} \
2330 {if (inf==1) print \$2; fi; inf=0} /interfaces/{inf=1}') \
2331 --net ${NETTYPE}999" ||
2332 error "unable to configure NID ${NETTYPE}999"
2334 # necessary to do writeconf in order to register
2335 # new @${NETTYPE}999 nid for targets
2337 export KEEP_ZPOOL="true"
2339 export SK_MOUNTED=false
2341 setupall server_only || echo 1
2342 export KEEP_ZPOOL="$KZPOOL"
2345 local mgsnid_orig=$MGSNID
2346 # compute new MGSNID
2347 MGSNID=$(do_facet mgs "$LCTL list_nids | grep ${NETTYPE}999")
2349 # on client, turn LNet Dynamic Discovery on
2350 lnetctl set discovery 1
2352 # mount client with -o network=${NETTYPE}999 option:
2353 # should fail because of LNet Dynamic Discovery
2354 mount_client $MOUNT ${MOUNT_OPTS},network=${NETTYPE}999 &&
2355 error "client mount with '-o network' option should be refused"
2357 # on client, reconfigure LNet and turn LNet Dynamic Discovery off
2358 $LNETCTL net del --net ${NETTYPE}999 && lnetctl lnet unconfigure
2361 lnetctl set discovery 0
2363 $LNETCTL lnet configure && $LNETCTL net add --if \
2364 $($LNETCTL net show --net $net | awk 'BEGIN{inf=0} \
2365 {if (inf==1) print $2; fi; inf=0} /interfaces/{inf=1}') \
2366 --net ${NETTYPE}999 ||
2367 error "unable to configure NID ${NETTYPE}999 on client"
2369 # mount client with -o network=${NETTYPE}999 option
2370 mount_client $MOUNT ${MOUNT_OPTS},network=${NETTYPE}999 ||
2371 error "unable to remount client"
2376 # check export on MGS
2377 do_facet mgs "lctl get_param -n *.MGS*.exports.'$nid'.uuid 2>/dev/null |
2379 [ $? -ne 0 ] || error "export for $nid on MGS should not exist"
2382 "lctl get_param -n *.MGS*.exports.'${addr}@${NETTYPE}999'.uuid \
2383 2>/dev/null | grep -q -"
2385 error "export for ${addr}@${NETTYPE}999 on MGS should exist"
2387 # check {mdc,osc} imports
2388 lctl get_param mdc.${FSNAME}-*.import | grep current_connection |
2389 grep -q ${NETTYPE}999
2391 error "import for mdc should use ${addr}@${NETTYPE}999"
2392 lctl get_param osc.${FSNAME}-*.import | grep current_connection |
2393 grep -q ${NETTYPE}999
2395 error "import for osc should use ${addr}@${NETTYPE}999"
2397 run_test 31 "client mount option '-o network'"
2401 zconf_umount_clients ${clients_arr[0]} $MOUNT
2403 # disable sk flavor enforcement on MGS
2404 set_rule _mgs any any null
2406 # stop gss daemon on MGS
2407 if ! combined_mgs_mds ; then
2408 send_sigint $mgs_HOST lsvcgssd
2412 MOUNT_OPTS=$(add_sk_mntflag $MOUNT_OPTS)
2415 restore_to_default_flavor
2419 if ! $SHARED_KEY; then
2420 skip "need shared key feature for this test"
2423 stack_trap cleanup_32 EXIT
2425 # restore to default null flavor
2426 save_flvr=$SK_FLAVOR
2428 restore_to_default_flavor || error "cannot set null flavor"
2429 SK_FLAVOR=$save_flvr
2432 if [ "$MOUNT_2" ] && $(grep -q $MOUNT2' ' /proc/mounts); then
2433 umount_client $MOUNT2 || error "umount $MOUNT2 failed"
2435 if $(grep -q $MOUNT' ' /proc/mounts); then
2436 umount_client $MOUNT || error "umount $MOUNT failed"
2439 # start gss daemon on MGS
2440 if combined_mgs_mds ; then
2441 send_sigint $mds_HOST lsvcgssd
2443 start_gss_daemons $mgs_HOST "$LSVCGSSD -vvv -s -g"
2445 # add mgs key type and MGS NIDs in key on MGS
2446 do_nodes $mgs_HOST "lgss_sk -t mgs,server -g $MGSNID -m \
2447 $SK_PATH/$FSNAME.key >/dev/null 2>&1" ||
2448 error "could not modify keyfile on MGS"
2450 # load modified key file on MGS
2451 do_nodes $mgs_HOST "lgss_sk -l $SK_PATH/$FSNAME.key >/dev/null 2>&1" ||
2452 error "could not load keyfile on MGS"
2454 # add MGS NIDs in key on client
2455 do_nodes ${clients_arr[0]} "lgss_sk -g $MGSNID -m \
2456 $SK_PATH/$FSNAME.key >/dev/null 2>&1" ||
2457 error "could not modify keyfile on MGS"
2459 # set perms for per-nodemap keys else permission denied
2460 do_nodes $(comma_list $(all_nodes)) \
2461 "keyctl show | grep lustre | cut -c1-11 |
2463 xargs -IX keyctl setperm X 0x3f3f3f3f"
2465 # re-mount client with mgssec=skn
2466 save_opts=$MOUNT_OPTS
2467 if [ -z "$MOUNT_OPTS" ]; then
2468 MOUNT_OPTS="-o mgssec=skn"
2470 MOUNT_OPTS="$MOUNT_OPTS,mgssec=skn"
2472 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
2473 error "mount ${clients_arr[0]} with mgssec=skn failed"
2474 MOUNT_OPTS=$save_opts
2477 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
2478 error "umount ${clients_arr[0]} failed"
2480 # enforce ska flavor on MGS
2481 set_rule _mgs any any ska
2483 # re-mount client without mgssec
2484 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS &&
2485 error "mount ${clients_arr[0]} without mgssec should fail"
2487 # re-mount client with mgssec=skn
2488 save_opts=$MOUNT_OPTS
2489 if [ -z "$MOUNT_OPTS" ]; then
2490 MOUNT_OPTS="-o mgssec=skn"
2492 MOUNT_OPTS="$MOUNT_OPTS,mgssec=skn"
2494 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS &&
2495 error "mount ${clients_arr[0]} with mgssec=skn should fail"
2496 MOUNT_OPTS=$save_opts
2498 # re-mount client with mgssec=ska
2499 save_opts=$MOUNT_OPTS
2500 if [ -z "$MOUNT_OPTS" ]; then
2501 MOUNT_OPTS="-o mgssec=ska"
2503 MOUNT_OPTS="$MOUNT_OPTS,mgssec=ska"
2505 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
2506 error "mount ${clients_arr[0]} with mgssec=ska failed"
2507 MOUNT_OPTS=$save_opts
2511 run_test 32 "check for mgssec"
2514 # disable sk flavor enforcement
2515 set_rule $FSNAME any cli2mdt null
2516 wait_flavor cli2mdt null
2519 zconf_umount_clients ${clients_arr[0]} $MOUNT
2521 # stop gss daemon on MGS
2522 if ! combined_mgs_mds ; then
2523 send_sigint $mgs_HOST lsvcgssd
2527 MOUNT_OPTS=$(add_sk_mntflag $MOUNT_OPTS)
2530 restore_to_default_flavor
2534 if ! $SHARED_KEY; then
2535 skip "need shared key feature for this test"
2538 stack_trap cleanup_33 EXIT
2540 # restore to default null flavor
2541 save_flvr=$SK_FLAVOR
2543 restore_to_default_flavor || error "cannot set null flavor"
2544 SK_FLAVOR=$save_flvr
2547 if [ "$MOUNT_2" ] && $(grep -q $MOUNT2' ' /proc/mounts); then
2548 umount_client $MOUNT2 || error "umount $MOUNT2 failed"
2550 if $(grep -q $MOUNT' ' /proc/mounts); then
2551 umount_client $MOUNT || error "umount $MOUNT failed"
2554 # start gss daemon on MGS
2555 if combined_mgs_mds ; then
2556 send_sigint $mds_HOST lsvcgssd
2558 start_gss_daemons $mgs_HOST "$LSVCGSSD -vvv -s -g"
2560 # add mgs key type and MGS NIDs in key on MGS
2561 do_nodes $mgs_HOST "lgss_sk -t mgs,server -g $MGSNID -m \
2562 $SK_PATH/$FSNAME.key >/dev/null 2>&1" ||
2563 error "could not modify keyfile on MGS"
2565 # load modified key file on MGS
2566 do_nodes $mgs_HOST "lgss_sk -l $SK_PATH/$FSNAME.key >/dev/null 2>&1" ||
2567 error "could not load keyfile on MGS"
2569 # add MGS NIDs in key on client
2570 do_nodes ${clients_arr[0]} "lgss_sk -g $MGSNID -m \
2571 $SK_PATH/$FSNAME.key >/dev/null 2>&1" ||
2572 error "could not modify keyfile on MGS"
2574 # set perms for per-nodemap keys else permission denied
2575 do_nodes $(comma_list $(all_nodes)) \
2576 "keyctl show | grep lustre | cut -c1-11 |
2578 xargs -IX keyctl setperm X 0x3f3f3f3f"
2580 # re-mount client with mgssec=skn
2581 save_opts=$MOUNT_OPTS
2582 if [ -z "$MOUNT_OPTS" ]; then
2583 MOUNT_OPTS="-o mgssec=skn"
2585 MOUNT_OPTS="$MOUNT_OPTS,mgssec=skn"
2587 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
2588 error "mount ${clients_arr[0]} with mgssec=skn failed"
2589 MOUNT_OPTS=$save_opts
2591 # enforce ska flavor for cli2mdt
2592 set_rule $FSNAME any cli2mdt ska
2593 wait_flavor cli2mdt ska
2595 # check error message
2596 $LCTL dk | grep "faked source" &&
2597 error "MGS connection srpc flags incorrect"
2601 run_test 33 "correct srpc flags for MGS connection"
2604 # restore deny_unknown
2605 do_facet mgs $LCTL nodemap_modify --name default \
2606 --property deny_unknown --value $denydefault
2607 if [ $? -ne 0 ]; then
2608 error_noexit "cannot reset deny_unknown on default nodemap"
2612 wait_nm_sync default deny_unknown
2619 [ $MGS_VERSION -lt $(version_code 2.12.51) ] &&
2620 skip "deny_unknown on default nm not supported before 2.12.51"
2622 activedefault=$(do_facet mgs $LCTL get_param -n nodemap.active)
2624 if [[ "$activedefault" != "1" ]]; then
2625 do_facet mgs $LCTL nodemap_activate 1
2627 stack_trap cleanup_active EXIT
2630 denydefault=$(do_facet mgs $LCTL get_param -n \
2631 nodemap.default.deny_unknown)
2632 [ -z "$denydefault" ] &&
2633 error "cannot get deny_unknown on default nodemap"
2634 if [ "$denydefault" -eq 0 ]; then
2640 do_facet mgs $LCTL nodemap_modify --name default \
2641 --property deny_unknown --value $denynew ||
2642 error "cannot set deny_unknown on default nodemap"
2644 [ "$(do_facet mgs $LCTL get_param -n nodemap.default.deny_unknown)" \
2646 error "setting deny_unknown on default nodemap did not work"
2648 stack_trap cleanup_34_deny EXIT
2650 wait_nm_sync default deny_unknown
2652 run_test 34 "deny_unknown on default nodemap"
2654 log "cleanup: ======================================================"
2657 ## nodemap deactivated
2658 for num in $(seq $MDSCOUNT); do
2659 if [ "${identity_old[$num]}" = 1 ]; then
2660 switch_identity $num false || identity_old[$num]=$?
2664 $RUNAS_CMD -u $ID0 ls $DIR
2665 $RUNAS_CMD -u $ID1 ls $DIR
2670 check_and_cleanup_lustre