3 # Run select tests by setting ONLY, or as arguments to the script.
4 # Skip specific tests by setting EXCEPT.
10 # bug number for skipped test:
11 ALWAYS_EXCEPT=" $SANITY_SEC_EXCEPT"
13 # bug number for skipped test: 9145 9145 9671 9145 9145 9145 9145 9245
14 ALWAYS_EXCEPT=" 17 18 19 20 21 22 23 27 $ALWAYS_EXCEPT"
16 # UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
19 export PATH=$PWD/$SRCDIR:$SRCDIR:$PWD/$SRCDIR/../utils:$PATH:/sbin
20 export NAME=${NAME:-local}
22 LUSTRE=${LUSTRE:-$(dirname $0)/..}
23 . $LUSTRE/tests/test-framework.sh
25 . ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
28 NODEMAP_TESTS=$(seq 7 26)
30 if ! check_versions; then
31 echo "It is NOT necessary to test nodemap under interoperation mode"
32 EXCEPT="$EXCEPT $NODEMAP_TESTS"
35 [ "$SLOW" = "no" ] && EXCEPT_SLOW="26"
37 [ "$ALWAYS_EXCEPT$EXCEPT$EXCEPT_SLOW" ] &&
38 echo "Skipping tests: $ALWAYS_EXCEPT $EXCEPT $EXCEPT_SLOW"
40 RUNAS_CMD=${RUNAS_CMD:-runas}
42 WTL=${WTL:-"$LUSTRE/tests/write_time_limit"}
45 PERM_CONF=$CONFDIR/perm.conf
47 HOSTNAME_CHECKSUM=$(hostname | sum | awk '{ print $1 }')
48 SUBNET_CHECKSUM=$(expr $HOSTNAME_CHECKSUM % 250 + 1)
50 require_dsh_mds || exit 0
51 require_dsh_ost || exit 0
53 clients=${CLIENTS//,/ }
54 num_clients=$(get_node_count ${clients})
55 clients_arr=($clients)
59 USER0=$(getent passwd | grep :$ID0:$ID0: | cut -d: -f1)
60 USER1=$(getent passwd | grep :$ID1:$ID1: | cut -d: -f1)
64 NODEMAP_IPADDR_LIST="1 10 64 128 200 250"
66 NODEMAP_MAX_ID=$((ID0 + NODEMAP_ID_COUNT))
69 skip "need to add user0 ($ID0:$ID0)" && exit 0
72 skip "need to add user1 ($ID1:$ID1)" && exit 0
74 IDBASE=${IDBASE:-60000}
76 # changes to mappings must be reflected in test 23
78 [0]="$((IDBASE+3)):$((IDBASE+0)) $((IDBASE+4)):$((IDBASE+2))"
79 [1]="$((IDBASE+5)):$((IDBASE+1)) $((IDBASE+6)):$((IDBASE+2))"
82 check_and_setup_lustre
87 GSS_REF=$(lsmod | grep ^ptlrpc_gss | awk '{print $3}')
88 if [ ! -z "$GSS_REF" -a "$GSS_REF" != "0" ]; then
90 echo "with GSS support"
93 echo "without GSS support"
96 MDT=$(do_facet $SINGLEMDS lctl get_param -N "mdt.\*MDT0000" |
98 [ -z "$MDT" ] && error "fail to get MDT device" && exit 1
99 do_facet $SINGLEMDS "mkdir -p $CONFDIR"
100 IDENTITY_FLUSH=mdt.$MDT.identity_flush
101 IDENTITY_UPCALL=mdt.$MDT.identity_upcall
112 if ! $RUNAS_CMD -u $user krb5_login.sh; then
113 error "$user login kerberos failed."
117 if ! $RUNAS_CMD -u $user -g $group ls $DIR > /dev/null 2>&1; then
118 $RUNAS_CMD -u $user lfs flushctx -k
119 $RUNAS_CMD -u $user krb5_login.sh
120 if ! $RUNAS_CMD -u$user -g$group ls $DIR > /dev/null 2>&1; then
121 error "init $user $group failed."
127 declare -a identity_old
130 for num in $(seq $MDSCOUNT); do
131 switch_identity $num true || identity_old[$num]=$?
134 if ! $RUNAS_CMD -u $ID0 ls $DIR > /dev/null 2>&1; then
135 sec_login $USER0 $USER0
138 if ! $RUNAS_CMD -u $ID1 ls $DIR > /dev/null 2>&1; then
139 sec_login $USER1 $USER1
144 # run as different user
148 chmod 0755 $DIR || error "chmod (1)"
149 rm -rf $DIR/$tdir || error "rm (1)"
150 mkdir -p $DIR/$tdir || error "mkdir (1)"
151 chown $USER0 $DIR/$tdir || error "chown (2)"
152 $RUNAS_CMD -u $ID0 ls $DIR || error "ls (1)"
153 rm -f $DIR/f0 || error "rm (2)"
154 $RUNAS_CMD -u $ID0 touch $DIR/f0 && error "touch (1)"
155 $RUNAS_CMD -u $ID0 touch $DIR/$tdir/f1 || error "touch (2)"
156 $RUNAS_CMD -u $ID1 touch $DIR/$tdir/f2 && error "touch (3)"
157 touch $DIR/$tdir/f3 || error "touch (4)"
158 chown root $DIR/$tdir || error "chown (3)"
159 chgrp $USER0 $DIR/$tdir || error "chgrp (1)"
160 chmod 0775 $DIR/$tdir || error "chmod (2)"
161 $RUNAS_CMD -u $ID0 touch $DIR/$tdir/f4 || error "touch (5)"
162 $RUNAS_CMD -u $ID1 touch $DIR/$tdir/f5 && error "touch (6)"
163 touch $DIR/$tdir/f6 || error "touch (7)"
164 rm -rf $DIR/$tdir || error "rm (3)"
166 run_test 0 "uid permission ============================="
170 [ $GSS_SUP = 0 ] && skip "without GSS support." && return
175 chown $USER0 $DIR/$tdir || error "chown (1)"
176 $RUNAS_CMD -u $ID1 -v $ID0 touch $DIR/$tdir/f0 && error "touch (2)"
177 echo "enable uid $ID1 setuid"
178 do_facet $SINGLEMDS "echo '* $ID1 setuid' >> $PERM_CONF"
179 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
180 $RUNAS_CMD -u $ID1 -v $ID0 touch $DIR/$tdir/f1 || error "touch (3)"
182 chown root $DIR/$tdir || error "chown (4)"
183 chgrp $USER0 $DIR/$tdir || error "chgrp (5)"
184 chmod 0770 $DIR/$tdir || error "chmod (6)"
185 $RUNAS_CMD -u $ID1 -g $ID1 touch $DIR/$tdir/f2 && error "touch (7)"
186 $RUNAS_CMD -u$ID1 -g$ID1 -j$ID0 touch $DIR/$tdir/f3 && error "touch (8)"
187 echo "enable uid $ID1 setuid,setgid"
188 do_facet $SINGLEMDS "echo '* $ID1 setuid,setgid' > $PERM_CONF"
189 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
190 $RUNAS_CMD -u $ID1 -g $ID1 -j $ID0 touch $DIR/$tdir/f4 ||
192 $RUNAS_CMD -u $ID1 -v $ID0 -g $ID1 -j $ID0 touch $DIR/$tdir/f5 ||
197 do_facet $SINGLEMDS "rm -f $PERM_CONF"
198 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
200 run_test 1 "setuid/gid ============================="
202 # bug 3285 - supplementary group should always succeed.
203 # NB: the supplementary groups are set for local client only,
204 # as for remote client, the groups of the specified uid on MDT
205 # will be obtained by upcall /sbin/l_getidentity and used.
207 local server_version=$(lustre_version_code $SINGLEMDS)
209 [[ $server_version -ge $(version_code 2.6.93) ]] ||
210 [[ $server_version -ge $(version_code 2.5.35) &&
211 $server_version -lt $(version_code 2.5.50) ]] ||
212 { skip "Need MDS version at least 2.6.93 or 2.5.35"; return; }
216 chmod 0771 $DIR/$tdir
217 chgrp $ID0 $DIR/$tdir
218 $RUNAS_CMD -u $ID0 ls $DIR/$tdir || error "setgroups (1)"
219 do_facet $SINGLEMDS "echo '* $ID1 setgrp' > $PERM_CONF"
220 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
221 $RUNAS_CMD -u $ID1 -G1,2,$ID0 ls $DIR/$tdir ||
222 error "setgroups (2)"
223 $RUNAS_CMD -u $ID1 -G1,2 ls $DIR/$tdir && error "setgroups (3)"
226 do_facet $SINGLEMDS "rm -f $PERM_CONF"
227 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
229 run_test 4 "set supplementary group ==============="
236 squash_id default 99 0
237 squash_id default 99 1
238 for (( i = 0; i < NODEMAP_COUNT; i++ )); do
239 local csum=${HOSTNAME_CHECKSUM}_${i}
241 if ! do_facet mgs $LCTL nodemap_add $csum; then
245 out=$(do_facet mgs $LCTL get_param nodemap.$csum.id)
246 ## This needs to return zero if the following statement is 1
247 [[ $(echo $out | grep -c $csum) == 0 ]] && return 1
256 for ((i = 0; i < NODEMAP_COUNT; i++)); do
257 local csum=${HOSTNAME_CHECKSUM}_${i}
259 if ! do_facet mgs $LCTL nodemap_del $csum; then
260 error "nodemap_del $csum failed with $?"
264 out=$(do_facet mgs $LCTL get_param nodemap.$csum.id 2>/dev/null)
265 [[ $(echo $out | grep -c $csum) != 0 ]] && return 1
272 local cmd="$LCTL nodemap_add_range"
276 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
277 range="$SUBNET_CHECKSUM.${2}.${j}.[1-253]@tcp"
278 if ! do_facet mgs $cmd --name $1 --range $range; then
287 local cmd="$LCTL nodemap_del_range"
291 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
292 range="$SUBNET_CHECKSUM.${2}.${j}.[1-253]@tcp"
293 if ! do_facet mgs $cmd --name $1 --range $range; then
303 local cmd="$LCTL nodemap_add_idmap"
306 echo "Start to add idmaps ..."
307 for ((i = 0; i < NODEMAP_COUNT; i++)); do
310 for ((j = $ID0; j < NODEMAP_MAX_ID; j++)); do
311 local csum=${HOSTNAME_CHECKSUM}_${i}
313 local fs_id=$((j + 1))
315 if ! do_facet mgs $cmd --name $csum --idtype uid \
316 --idmap $client_id:$fs_id; then
319 if ! do_facet mgs $cmd --name $csum --idtype gid \
320 --idmap $client_id:$fs_id; then
329 update_idmaps() { #LU-10040
330 [ $(lustre_version_code mgs) -lt $(version_code 2.10.55) ] &&
331 skip "Need MGS >= 2.10.55" &&
333 local csum=${HOSTNAME_CHECKSUM}_0
334 local old_id_client=$ID0
335 local old_id_fs=$((ID0 + 1))
336 local new_id=$((ID0 + 100))
343 echo "Start to update idmaps ..."
345 #Inserting an existed idmap should return error
346 cmd="$LCTL nodemap_add_idmap --name $csum --idtype uid"
348 $cmd --idmap $old_id_client:$old_id_fs 2>/dev/null; then
349 error "insert idmap {$old_id_client:$old_id_fs} " \
350 "should return error"
355 #Update id_fs and check it
356 if ! do_facet mgs $cmd --idmap $old_id_client:$new_id; then
357 error "$cmd --idmap $old_id_client:$new_id failed"
361 tmp_id=$(do_facet mgs $LCTL get_param -n nodemap.$csum.idmap |
362 awk '{ print $7 }' | sed -n '2p')
363 [ $tmp_id != $new_id ] && { error "new id_fs $tmp_id != $new_id"; \
364 rc=$((rc + 1)); return $rc; }
366 #Update id_client and check it
367 if ! do_facet mgs $cmd --idmap $new_id:$new_id; then
368 error "$cmd --idmap $new_id:$new_id failed"
372 tmp_id=$(do_facet mgs $LCTL get_param -n nodemap.$csum.idmap |
373 awk '{ print $5 }' | sed -n "$((NODEMAP_ID_COUNT + 1)) p")
374 tmp_id=$(echo ${tmp_id%,*}) #e.g. "501,"->"501"
375 [ $tmp_id != $new_id ] && { error "new id_client $tmp_id != $new_id"; \
376 rc=$((rc + 1)); return $rc; }
378 #Delete above updated idmap
379 cmd="$LCTL nodemap_del_idmap --name $csum --idtype uid"
380 if ! do_facet mgs $cmd --idmap $new_id:$new_id; then
381 error "$cmd --idmap $new_id:$new_id failed"
386 #restore the idmaps to make delete_idmaps work well
387 cmd="$LCTL nodemap_add_idmap --name $csum --idtype uid"
388 if ! do_facet mgs $cmd --idmap $old_id_client:$old_id_fs; then
389 error "$cmd --idmap $old_id_client:$old_id_fs failed"
399 local cmd="$LCTL nodemap_del_idmap"
402 echo "Start to delete idmaps ..."
403 for ((i = 0; i < NODEMAP_COUNT; i++)); do
406 for ((j = $ID0; j < NODEMAP_MAX_ID; j++)); do
407 local csum=${HOSTNAME_CHECKSUM}_${i}
409 local fs_id=$((j + 1))
411 if ! do_facet mgs $cmd --name $csum --idtype uid \
412 --idmap $client_id:$fs_id; then
415 if ! do_facet mgs $cmd --name $csum --idtype gid \
416 --idmap $client_id:$fs_id; then
429 local cmd="$LCTL nodemap_modify"
432 proc[0]="admin_nodemap"
433 proc[1]="trusted_nodemap"
437 for ((idx = 0; idx < 2; idx++)); do
438 if ! do_facet mgs $cmd --name $1 --property ${option[$idx]} \
443 if ! do_facet mgs $cmd --name $1 --property ${option[$idx]} \
453 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
454 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
458 cmd[0]="$LCTL nodemap_modify --property squash_uid"
459 cmd[1]="$LCTL nodemap_modify --property squash_gid"
461 if ! do_facet mgs ${cmd[$3]} --name $1 --value $2; then
466 # ensure that the squash defaults are the expected defaults
467 squash_id default 99 0
468 squash_id default 99 1
473 cmd="$LCTL nodemap_test_nid"
475 nid=$(do_facet mgs $cmd $1)
477 if [ $nid == $2 ]; then
486 local cmd="$LCTL nodemap_test_id"
489 echo "Start to test idmaps ..."
490 ## nodemap deactivated
491 if ! do_facet mgs $LCTL nodemap_activate 0; then
494 for ((id = $ID0; id < NODEMAP_MAX_ID; id++)); do
497 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
498 local nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
499 local fs_id=$(do_facet mgs $cmd --nid $nid \
500 --idtype uid --id $id)
501 if [ $fs_id != $id ]; then
502 echo "expected $id, got $fs_id"
509 if ! do_facet mgs $LCTL nodemap_activate 1; then
513 for ((id = $ID0; id < NODEMAP_MAX_ID; id++)); do
514 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
515 nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
516 fs_id=$(do_facet mgs $cmd --nid $nid \
517 --idtype uid --id $id)
518 expected_id=$((id + 1))
519 if [ $fs_id != $expected_id ]; then
520 echo "expected $expected_id, got $fs_id"
527 for ((i = 0; i < NODEMAP_COUNT; i++)); do
528 local csum=${HOSTNAME_CHECKSUM}_${i}
530 if ! do_facet mgs $LCTL nodemap_modify --name $csum \
531 --property trusted --value 1; then
532 error "nodemap_modify $csum failed with $?"
537 for ((id = $ID0; id < NODEMAP_MAX_ID; id++)); do
538 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
539 nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
540 fs_id=$(do_facet mgs $cmd --nid $nid \
541 --idtype uid --id $id)
542 if [ $fs_id != $id ]; then
543 echo "expected $id, got $fs_id"
549 ## ensure allow_root_access is enabled
550 for ((i = 0; i < NODEMAP_COUNT; i++)); do
551 local csum=${HOSTNAME_CHECKSUM}_${i}
553 if ! do_facet mgs $LCTL nodemap_modify --name $csum \
554 --property admin --value 1; then
555 error "nodemap_modify $csum failed with $?"
560 ## check that root allowed
561 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
562 nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
563 fs_id=$(do_facet mgs $cmd --nid $nid --idtype uid --id 0)
564 if [ $fs_id != 0 ]; then
565 echo "root allowed expected 0, got $fs_id"
570 ## ensure allow_root_access is disabled
571 for ((i = 0; i < NODEMAP_COUNT; i++)); do
572 local csum=${HOSTNAME_CHECKSUM}_${i}
574 if ! do_facet mgs $LCTL nodemap_modify --name $csum \
575 --property admin --value 0; then
576 error "nodemap_modify ${HOSTNAME_CHECKSUM}_${i} "
582 ## check that root is mapped to 99
583 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
584 nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
585 fs_id=$(do_facet mgs $cmd --nid $nid --idtype uid --id 0)
586 if [ $fs_id != 99 ]; then
587 error "root squash expected 99, got $fs_id"
592 ## reset client trust to 0
593 for ((i = 0; i < NODEMAP_COUNT; i++)); do
594 if ! do_facet mgs $LCTL nodemap_modify \
595 --name ${HOSTNAME_CHECKSUM}_${i} \
596 --property trusted --value 0; then
597 error "nodemap_modify ${HOSTNAME_CHECKSUM}_${i} "
609 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
610 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
611 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
616 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
620 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 2
624 run_test 7 "nodemap create and delete"
629 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
630 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
631 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
638 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
644 [[ $rc == 0 ]] && error "duplicate nodemap_add allowed with $rc" &&
650 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 3
654 run_test 8 "nodemap reject duplicates"
660 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
661 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
662 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
668 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
671 for ((i = 0; i < NODEMAP_COUNT; i++)); do
672 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
676 [[ $rc != 0 ]] && error "nodemap_add_range failed with $rc" && return 2
679 for ((i = 0; i < NODEMAP_COUNT; i++)); do
680 if ! delete_range ${HOSTNAME_CHECKSUM}_${i} $i; then
684 [[ $rc != 0 ]] && error "nodemap_del_range failed with $rc" && return 4
689 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 4
693 run_test 9 "nodemap range add"
698 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
699 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
700 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
706 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
709 for ((i = 0; i < NODEMAP_COUNT; i++)); do
710 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
714 [[ $rc != 0 ]] && error "nodemap_add_range failed with $rc" && return 2
717 for ((i = 0; i < NODEMAP_COUNT; i++)); do
718 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
722 [[ $rc == 0 ]] && error "nodemap_add_range duplicate add with $rc" &&
727 for ((i = 0; i < NODEMAP_COUNT; i++)); do
728 if ! delete_range ${HOSTNAME_CHECKSUM}_${i} $i; then
732 [[ $rc != 0 ]] && error "nodemap_del_range failed with $rc" && return 4
736 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 5
740 run_test 10a "nodemap reject duplicate ranges"
743 [ $(lustre_version_code mgs) -lt $(version_code 2.10.53) ] &&
744 skip "Need MGS >= 2.10.53" && return
748 local nids="192.168.19.[0-255]@o2ib20"
750 do_facet mgs $LCTL nodemap_del $nm1 2>/dev/null
751 do_facet mgs $LCTL nodemap_del $nm2 2>/dev/null
753 do_facet mgs $LCTL nodemap_add $nm1 || error "Add $nm1 failed"
754 do_facet mgs $LCTL nodemap_add $nm2 || error "Add $nm2 failed"
755 do_facet mgs $LCTL nodemap_add_range --name $nm1 --range $nids ||
756 error "Add range $nids to $nm1 failed"
757 [ -n "$(do_facet mgs $LCTL get_param nodemap.$nm1.* |
758 grep start_nid)" ] || error "No range was found"
759 do_facet mgs $LCTL nodemap_del_range --name $nm2 --range $nids &&
760 error "Deleting range $nids from $nm2 should fail"
761 [ -n "$(do_facet mgs $LCTL get_param nodemap.$nm1.* |
762 grep start_nid)" ] || error "Range $nids should be there"
764 do_facet mgs $LCTL nodemap_del $nm1 || error "Delete $nm1 failed"
765 do_facet mgs $LCTL nodemap_del $nm2 || error "Delete $nm2 failed"
768 run_test 10b "delete range from the correct nodemap"
770 test_10c() { #LU-8912
771 [ $(lustre_version_code mgs) -lt $(version_code 2.10.57) ] &&
772 skip "Need MGS >= 2.10.57" && return
774 local nm="nodemap_lu8912"
775 local nid_range="10.210.[32-47].[0-255]@o2ib3"
776 local start_nid="10.210.32.0@o2ib3"
777 local end_nid="10.210.47.255@o2ib3"
778 local start_nid_found
781 do_facet mgs $LCTL nodemap_del $nm 2>/dev/null
782 do_facet mgs $LCTL nodemap_add $nm || error "Add $nm failed"
783 do_facet mgs $LCTL nodemap_add_range --name $nm --range $nid_range ||
784 error "Add range $nid_range to $nm failed"
786 start_nid_found=$(do_facet mgs $LCTL get_param nodemap.$nm.* |
787 awk -F '[,: ]' /start_nid/'{ print $9 }')
788 [ "$start_nid" == "$start_nid_found" ] ||
789 error "start_nid: $start_nid_found != $start_nid"
790 end_nid_found=$(do_facet mgs $LCTL get_param nodemap.$nm.* |
791 awk -F '[,: ]' /end_nid/'{ print $13 }')
792 [ "$end_nid" == "$end_nid_found" ] ||
793 error "end_nid: $end_nid_found != $end_nid"
795 do_facet mgs $LCTL nodemap_del $nm || error "Delete $nm failed"
798 run_test 10c "verfify contiguous range support"
803 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
804 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
805 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
811 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
814 for ((i = 0; i < NODEMAP_COUNT; i++)); do
815 if ! modify_flags ${HOSTNAME_CHECKSUM}_${i}; then
819 [[ $rc != 0 ]] && error "nodemap_modify with $rc" && return 2
824 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 3
828 run_test 11 "nodemap modify"
833 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
834 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
835 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
841 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
844 for ((i = 0; i < NODEMAP_COUNT; i++)); do
845 if ! squash_id ${HOSTNAME_CHECKSUM}_${i} 88 0; then
849 [[ $rc != 0 ]] && error "nodemap squash_uid with $rc" && return 2
852 for ((i = 0; i < NODEMAP_COUNT; i++)); do
853 if ! squash_id ${HOSTNAME_CHECKSUM}_${i} 88 1; then
857 [[ $rc != 0 ]] && error "nodemap squash_gid with $rc" && return 3
862 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 4
866 run_test 12 "nodemap set squash ids"
871 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
872 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
873 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
879 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
882 for ((i = 0; i < NODEMAP_COUNT; i++)); do
883 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
887 [[ $rc != 0 ]] && error "nodemap_add_range failed with $rc" && return 2
890 for ((i = 0; i < NODEMAP_COUNT; i++)); do
891 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
892 for k in $NODEMAP_IPADDR_LIST; do
893 if ! test_nid $SUBNET_CHECKSUM.$i.$j.$k \
894 ${HOSTNAME_CHECKSUM}_${i}; then
900 [[ $rc != 0 ]] && error "nodemap_test_nid failed with $rc" && return 3
905 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 4
909 run_test 13 "test nids"
914 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
915 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
916 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
922 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
925 for ((i = 0; i < NODEMAP_COUNT; i++)); do
926 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
927 for k in $NODEMAP_IPADDR_LIST; do
928 if ! test_nid $SUBNET_CHECKSUM.$i.$j.$k \
935 [[ $rc != 0 ]] && error "nodemap_test_nid failed with $rc" && return 3
940 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 4
944 run_test 14 "test default nodemap nid lookup"
949 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
950 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
951 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
957 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
960 for ((i = 0; i < NODEMAP_COUNT; i++)); do
961 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
965 [[ $rc != 0 ]] && error "nodemap_add_range failed with $rc" && return 2
970 [[ $rc != 0 ]] && error "nodemap_add_idmap failed with $rc" && return 3
975 [[ $rc != 0 ]] && error "nodemap_test_id failed with $rc" && return 4
980 [[ $rc != 0 ]] && error "update_idmaps failed with $rc" && return 5
985 [[ $rc != 0 ]] && error "nodemap_del_idmap failed with $rc" && return 6
990 [[ $rc != 0 ]] && error "nodemap_delete failed with $rc" && return 7
994 run_test 15 "test id mapping"
997 local nodemap_name=$1
1002 local is_active=$(do_facet mgs $LCTL get_param -n nodemap.active)
1003 local max_retries=20
1007 local mgs_ip=$(host_nids_address $mgs_HOST $NETTYPE | cut -d' ' -f1)
1010 if [ "$nodemap_name" == "active" ]; then
1012 elif [ -z "$key" ]; then
1013 proc_param=${nodemap_name}
1015 proc_param="${nodemap_name}.${key}"
1017 (( is_active == 0 )) && [ "$proc_param" != "active" ] && return
1019 if [ -z "$value" ]; then
1020 out1=$(do_facet mgs $LCTL get_param $opt nodemap.${proc_param})
1021 echo "On MGS ${mgs_ip}, ${proc_param} = $out1"
1026 # wait up to 10 seconds for other servers to sync with mgs
1027 for i in $(seq 1 10); do
1028 for node in $(all_server_nodes); do
1029 local node_ip=$(host_nids_address $node $NETTYPE |
1033 if [ -z "$value" ]; then
1034 [ $node_ip == $mgs_ip ] && continue
1037 out2=$(do_node $node_ip $LCTL get_param $opt \
1038 nodemap.$proc_param 2>/dev/null)
1039 echo "On $node ${node_ip}, ${proc_param} = $out2"
1040 [ "$out1" != "$out2" ] && is_sync=false && break
1048 echo OTHER - IP: $node_ip
1050 error "mgs and $nodemap_name ${key} mismatch, $i attempts"
1052 echo "waited $((i - 1)) seconds for sync"
1055 create_fops_nodemaps() {
1058 for client in $clients; do
1059 local client_ip=$(host_nids_address $client $NETTYPE)
1060 local client_nid=$(h2nettype $client_ip)
1061 do_facet mgs $LCTL nodemap_add c${i} || return 1
1062 do_facet mgs $LCTL nodemap_add_range \
1063 --name c${i} --range $client_nid || return 1
1064 for map in ${FOPS_IDMAPS[i]}; do
1065 do_facet mgs $LCTL nodemap_add_idmap --name c${i} \
1066 --idtype uid --idmap ${map} || return 1
1067 do_facet mgs $LCTL nodemap_add_idmap --name c${i} \
1068 --idtype gid --idmap ${map} || return 1
1071 wait_nm_sync c$i idmap
1078 delete_fops_nodemaps() {
1081 for client in $clients; do
1082 do_facet mgs $LCTL nodemap_del c${i} || return 1
1090 if [ $MDSCOUNT -le 1 ]; then
1091 do_node ${clients_arr[0]} mkdir -p $DIR/$tdir
1093 # round-robin MDTs to test DNE nodemap support
1094 [ ! -d $DIR ] && do_node ${clients_arr[0]} mkdir -p $DIR
1095 do_node ${clients_arr[0]} $LFS setdirstripe -c 1 -i \
1096 $((fops_mds_index % MDSCOUNT)) $DIR/$tdir
1097 ((fops_mds_index++))
1101 # acl test directory needs to be initialized on a privileged client
1103 local admin=$(do_facet mgs $LCTL get_param -n nodemap.c0.admin_nodemap)
1104 local trust=$(do_facet mgs $LCTL get_param -n \
1105 nodemap.c0.trusted_nodemap)
1107 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
1108 do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1
1110 wait_nm_sync c0 admin_nodemap
1111 wait_nm_sync c0 trusted_nodemap
1113 do_node ${clients_arr[0]} rm -rf $DIR/$tdir
1115 do_node ${clients_arr[0]} chown $user $DIR/$tdir
1117 do_facet mgs $LCTL nodemap_modify --name c0 \
1118 --property admin --value $admin
1119 do_facet mgs $LCTL nodemap_modify --name c0 \
1120 --property trusted --value $trust
1122 # flush MDT locks to make sure they are reacquired before test
1123 do_node ${clients_arr[0]} $LCTL set_param \
1124 ldlm.namespaces.$FSNAME-MDT*.lru_size=clear
1126 wait_nm_sync c0 admin_nodemap
1127 wait_nm_sync c0 trusted_nodemap
1130 # fileset test directory needs to be initialized on a privileged client
1131 fileset_test_setup() {
1133 local admin=$(do_facet mgs $LCTL get_param -n \
1134 nodemap.${nm}.admin_nodemap)
1135 local trust=$(do_facet mgs $LCTL get_param -n \
1136 nodemap.${nm}.trusted_nodemap)
1138 do_facet mgs $LCTL nodemap_modify --name $nm --property admin --value 1
1139 do_facet mgs $LCTL nodemap_modify --name $nm --property trusted \
1142 wait_nm_sync $nm admin_nodemap
1143 wait_nm_sync $nm trusted_nodemap
1145 # create directory and populate it for subdir mount
1146 do_node ${clients_arr[0]} mkdir $MOUNT/$subdir ||
1147 error "unable to create dir $MOUNT/$subdir"
1148 do_node ${clients_arr[0]} touch $MOUNT/$subdir/this_is_$subdir ||
1149 error "unable to create file $MOUNT/$subdir/this_is_$subdir"
1150 do_node ${clients_arr[0]} mkdir $MOUNT/$subdir/$subsubdir ||
1151 error "unable to create dir $MOUNT/$subdir/$subsubdir"
1152 do_node ${clients_arr[0]} touch \
1153 $MOUNT/$subdir/$subsubdir/this_is_$subsubdir ||
1154 error "unable to create file \
1155 $MOUNT/$subdir/$subsubdir/this_is_$subsubdir"
1157 do_facet mgs $LCTL nodemap_modify --name $nm \
1158 --property admin --value $admin
1159 do_facet mgs $LCTL nodemap_modify --name $nm \
1160 --property trusted --value $trust
1162 # flush MDT locks to make sure they are reacquired before test
1163 do_node ${clients_arr[0]} $LCTL set_param \
1164 ldlm.namespaces.$FSNAME-MDT*.lru_size=clear
1166 wait_nm_sync $nm admin_nodemap
1167 wait_nm_sync $nm trusted_nodemap
1170 # fileset test directory needs to be initialized on a privileged client
1171 fileset_test_cleanup() {
1173 local admin=$(do_facet mgs $LCTL get_param -n \
1174 nodemap.${nm}.admin_nodemap)
1175 local trust=$(do_facet mgs $LCTL get_param -n \
1176 nodemap.${nm}.trusted_nodemap)
1178 do_facet mgs $LCTL nodemap_modify --name $nm --property admin --value 1
1179 do_facet mgs $LCTL nodemap_modify --name $nm --property trusted \
1182 wait_nm_sync $nm admin_nodemap
1183 wait_nm_sync $nm trusted_nodemap
1185 # cleanup directory created for subdir mount
1186 do_node ${clients_arr[0]} rm -rf $MOUNT/$subdir ||
1187 error "unable to remove dir $MOUNT/$subdir"
1189 do_facet mgs $LCTL nodemap_modify --name $nm \
1190 --property admin --value $admin
1191 do_facet mgs $LCTL nodemap_modify --name $nm \
1192 --property trusted --value $trust
1194 # flush MDT locks to make sure they are reacquired before test
1195 do_node ${clients_arr[0]} $LCTL set_param \
1196 ldlm.namespaces.$FSNAME-MDT*.lru_size=clear
1198 wait_nm_sync $nm admin_nodemap
1199 wait_nm_sync $nm trusted_nodemap
1202 do_create_delete() {
1205 local testfile=$DIR/$tdir/$tfile
1209 if $run_u touch $testfile >& /dev/null; then
1211 $run_u rm $testfile && d=1
1215 local expected=$(get_cr_del_expected $key)
1216 [ "$res" != "$expected" ] &&
1217 error "test $key, wanted $expected, got $res" && rc=$((rc + 1))
1221 nodemap_check_quota() {
1223 $run_u lfs quota -q $DIR | awk '{ print $2; exit; }'
1226 do_fops_quota_test() {
1228 # fuzz quota used to account for possible indirect blocks, etc
1229 local quota_fuzz=$(fs_log_size)
1230 local qused_orig=$(nodemap_check_quota "$run_u")
1231 local qused_high=$((qused_orig + quota_fuzz))
1232 local qused_low=$((qused_orig - quota_fuzz))
1233 local testfile=$DIR/$tdir/$tfile
1234 $run_u dd if=/dev/zero of=$testfile oflag=sync bs=1M count=1 \
1235 >& /dev/null || error "unable to write quota test file"
1236 sync; sync_all_data || true
1238 local qused_new=$(nodemap_check_quota "$run_u")
1239 [ $((qused_new)) -lt $((qused_low + 1024)) -o \
1240 $((qused_new)) -gt $((qused_high + 1024)) ] &&
1241 error "$qused_new != $qused_orig + 1M after write, " \
1242 "fuzz is $quota_fuzz"
1243 $run_u rm $testfile || error "unable to remove quota test file"
1244 wait_delete_completed_mds
1246 qused_new=$(nodemap_check_quota "$run_u")
1247 [ $((qused_new)) -lt $((qused_low)) \
1248 -o $((qused_new)) -gt $((qused_high)) ] &&
1249 error "quota not reclaimed, expect $qused_orig, " \
1250 "got $qused_new, fuzz $quota_fuzz"
1253 get_fops_mapped_user() {
1256 for ((i=0; i < ${#FOPS_IDMAPS[@]}; i++)); do
1257 for map in ${FOPS_IDMAPS[i]}; do
1258 if [ $(cut -d: -f1 <<< "$map") == $cli_user ]; then
1259 cut -d: -f2 <<< "$map"
1267 get_cr_del_expected() {
1269 IFS=":" read -a key <<< "$1"
1270 local mapmode="${key[0]}"
1271 local mds_user="${key[1]}"
1272 local cluster="${key[2]}"
1273 local cli_user="${key[3]}"
1274 local mode="0${key[4]}"
1281 [[ $mapmode == *mapped* ]] && mapped=1
1282 # only c1 is mapped in these test cases
1283 [[ $mapmode == mapped_trusted* ]] && [ "$cluster" == "c0" ] && mapped=0
1284 [[ $mapmode == *noadmin* ]] && noadmin=1
1286 # o+wx works as long as the user isn't mapped
1287 if [ $((mode & 3)) -eq 3 ]; then
1291 # if client user is root, check if root is squashed
1292 if [ "$cli_user" == "0" ]; then
1293 # squash root succeed, if other bit is on
1296 1) [ "$other" == "1" ] && echo $SUCCESS
1297 [ "$other" == "0" ] && echo $FAILURE;;
1301 if [ "$mapped" == "0" ]; then
1302 [ "$other" == "1" ] && echo $SUCCESS
1303 [ "$other" == "0" ] && echo $FAILURE
1307 # if mapped user is mds user, check for u+wx
1308 mapped_user=$(get_fops_mapped_user $cli_user)
1309 [ "$mapped_user" == "-1" ] &&
1310 error "unable to find mapping for client user $cli_user"
1312 if [ "$mapped_user" == "$mds_user" -a \
1313 $(((mode & 0300) == 0300)) -eq 1 ]; then
1317 if [ "$mapped_user" != "$mds_user" -a "$other" == "1" ]; then
1324 test_fops_admin_cli_i=""
1325 test_fops_chmod_dir() {
1326 local current_cli_i=$1
1328 local dir_to_chmod=$3
1329 local new_admin_cli_i=""
1331 # do we need to set up a new admin client?
1332 [ "$current_cli_i" == "0" ] && [ "$test_fops_admin_cli_i" != "1" ] &&
1334 [ "$current_cli_i" != "0" ] && [ "$test_fops_admin_cli_i" != "0" ] &&
1337 # if only one client, and non-admin, need to flip admin everytime
1338 if [ "$num_clients" == "1" ]; then
1339 test_fops_admin_client=$clients
1340 test_fops_admin_val=$(do_facet mgs $LCTL get_param -n \
1341 nodemap.c0.admin_nodemap)
1342 if [ "$test_fops_admin_val" != "1" ]; then
1343 do_facet mgs $LCTL nodemap_modify \
1347 wait_nm_sync c0 admin_nodemap
1349 elif [ "$new_admin_cli_i" != "" ]; then
1350 # restore admin val to old admin client
1351 if [ "$test_fops_admin_cli_i" != "" ] &&
1352 [ "$test_fops_admin_val" != "1" ]; then
1353 do_facet mgs $LCTL nodemap_modify \
1354 --name c${test_fops_admin_cli_i} \
1356 --value $test_fops_admin_val
1357 wait_nm_sync c${test_fops_admin_cli_i} admin_nodemap
1360 test_fops_admin_cli_i=$new_admin_cli_i
1361 test_fops_admin_client=${clients_arr[$new_admin_cli_i]}
1362 test_fops_admin_val=$(do_facet mgs $LCTL get_param -n \
1363 nodemap.c${new_admin_cli_i}.admin_nodemap)
1365 if [ "$test_fops_admin_val" != "1" ]; then
1366 do_facet mgs $LCTL nodemap_modify \
1367 --name c${new_admin_cli_i} \
1370 wait_nm_sync c${new_admin_cli_i} admin_nodemap
1374 do_node $test_fops_admin_client chmod $perm_bits $DIR/$tdir || return 1
1376 # remove admin for single client if originally non-admin
1377 if [ "$num_clients" == "1" ] && [ "$test_fops_admin_val" != "1" ]; then
1378 do_facet mgs $LCTL nodemap_modify --name c0 --property admin \
1380 wait_nm_sync c0 admin_nodemap
1388 local single_client="$2"
1389 local client_user_list=([0]="0 $((IDBASE+3)) $((IDBASE+4))"
1390 [1]="0 $((IDBASE+5)) $((IDBASE+6))")
1393 local perm_bit_list="0 3 $((0300)) $((0303))"
1394 # SLOW tests 000-007, 010-070, 100-700 (octal modes)
1395 [ "$SLOW" == "yes" ] &&
1396 perm_bit_list="0 $(seq 1 7) $(seq 8 8 63) $(seq 64 64 511) \
1399 # step through mds users. -1 means root
1400 for mds_i in -1 0 1 2; do
1401 local user=$((mds_i + IDBASE))
1405 [ "$mds_i" == "-1" ] && user=0
1407 echo mkdir -p $DIR/$tdir
1410 for client in $clients; do
1412 for u in ${client_user_list[$cli_i]}; do
1413 local run_u="do_node $client \
1414 $RUNAS_CMD -u$u -g$u -G$u"
1415 for perm_bits in $perm_bit_list; do
1416 local mode=$(printf %03o $perm_bits)
1418 key="$mapmode:$user:c$cli_i:$u:$mode"
1419 test_fops_chmod_dir $cli_i $mode \
1421 error cannot chmod $key
1422 do_create_delete "$run_u" "$key"
1426 test_fops_chmod_dir $cli_i 777 $DIR/$tdir ||
1427 error cannot chmod $key
1428 do_fops_quota_test "$run_u"
1431 cli_i=$((cli_i + 1))
1432 [ "$single_client" == "1" ] && break
1439 nodemap_version_check () {
1440 remote_mgs_nodsh && skip "remote MGS with nodsh" && return 1
1441 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
1442 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
1447 nodemap_test_setup() {
1449 local active_nodemap=1
1451 [ "$1" == "0" ] && active_nodemap=0
1453 do_nodes $(comma_list $(all_mdts_nodes)) \
1454 $LCTL set_param mdt.*.identity_upcall=NONE
1457 create_fops_nodemaps
1459 [[ $rc != 0 ]] && error "adding fops nodemaps failed $rc"
1461 do_facet mgs $LCTL nodemap_activate $active_nodemap
1464 do_facet mgs $LCTL nodemap_modify --name default \
1465 --property admin --value 1
1466 do_facet mgs $LCTL nodemap_modify --name default \
1467 --property trusted --value 1
1468 wait_nm_sync default trusted_nodemap
1471 nodemap_test_cleanup() {
1473 delete_fops_nodemaps
1475 [[ $rc != 0 ]] && error "removing fops nodemaps failed $rc"
1477 do_facet mgs $LCTL nodemap_modify --name default \
1478 --property admin --value 0
1479 do_facet mgs $LCTL nodemap_modify --name default \
1480 --property trusted --value 0
1481 wait_nm_sync default trusted_nodemap
1483 do_facet mgs $LCTL nodemap_activate 0
1484 wait_nm_sync active 0
1486 export SK_UNIQUE_NM=false
1490 nodemap_clients_admin_trusted() {
1494 for client in $clients; do
1495 do_facet mgs $LCTL nodemap_modify --name c0 \
1496 --property admin --value $admin
1497 do_facet mgs $LCTL nodemap_modify --name c0 \
1498 --property trusted --value $tr
1501 wait_nm_sync c$((i - 1)) admin_nodemap
1502 wait_nm_sync c$((i - 1)) trusted_nodemap
1506 nodemap_version_check || return 0
1507 nodemap_test_setup 0
1509 trap nodemap_test_cleanup EXIT
1511 nodemap_test_cleanup
1513 run_test 16 "test nodemap all_off fileops"
1516 nodemap_version_check || return 0
1519 trap nodemap_test_cleanup EXIT
1520 nodemap_clients_admin_trusted 0 1
1521 test_fops trusted_noadmin 1
1522 nodemap_test_cleanup
1524 run_test 17 "test nodemap trusted_noadmin fileops"
1527 nodemap_version_check || return 0
1530 trap nodemap_test_cleanup EXIT
1531 nodemap_clients_admin_trusted 0 0
1532 test_fops mapped_noadmin 1
1533 nodemap_test_cleanup
1535 run_test 18 "test nodemap mapped_noadmin fileops"
1538 nodemap_version_check || return 0
1541 trap nodemap_test_cleanup EXIT
1542 nodemap_clients_admin_trusted 1 1
1543 test_fops trusted_admin 1
1544 nodemap_test_cleanup
1546 run_test 19 "test nodemap trusted_admin fileops"
1549 nodemap_version_check || return 0
1552 trap nodemap_test_cleanup EXIT
1553 nodemap_clients_admin_trusted 1 0
1554 test_fops mapped_admin 1
1555 nodemap_test_cleanup
1557 run_test 20 "test nodemap mapped_admin fileops"
1560 nodemap_version_check || return 0
1563 trap nodemap_test_cleanup EXIT
1566 for client in $clients; do
1567 do_facet mgs $LCTL nodemap_modify --name c${i} \
1568 --property admin --value 0
1569 do_facet mgs $LCTL nodemap_modify --name c${i} \
1570 --property trusted --value $x
1574 wait_nm_sync c$((i - 1)) trusted_nodemap
1576 test_fops mapped_trusted_noadmin
1577 nodemap_test_cleanup
1579 run_test 21 "test nodemap mapped_trusted_noadmin fileops"
1582 nodemap_version_check || return 0
1585 trap nodemap_test_cleanup EXIT
1588 for client in $clients; do
1589 do_facet mgs $LCTL nodemap_modify --name c${i} \
1590 --property admin --value 1
1591 do_facet mgs $LCTL nodemap_modify --name c${i} \
1592 --property trusted --value $x
1596 wait_nm_sync c$((i - 1)) trusted_nodemap
1598 test_fops mapped_trusted_admin
1599 nodemap_test_cleanup
1601 run_test 22 "test nodemap mapped_trusted_admin fileops"
1603 # acl test directory needs to be initialized on a privileged client
1604 nodemap_acl_test_setup() {
1605 local admin=$(do_facet mgs $LCTL get_param -n \
1606 nodemap.c0.admin_nodemap)
1607 local trust=$(do_facet mgs $LCTL get_param -n \
1608 nodemap.c0.trusted_nodemap)
1610 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
1611 do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1
1613 wait_nm_sync c0 admin_nodemap
1614 wait_nm_sync c0 trusted_nodemap
1616 do_node ${clients_arr[0]} rm -rf $DIR/$tdir
1618 do_node ${clients_arr[0]} chmod a+rwx $DIR/$tdir ||
1619 error unable to chmod a+rwx test dir $DIR/$tdir
1621 do_facet mgs $LCTL nodemap_modify --name c0 \
1622 --property admin --value $admin
1623 do_facet mgs $LCTL nodemap_modify --name c0 \
1624 --property trusted --value $trust
1626 wait_nm_sync c0 trusted_nodemap
1629 # returns 0 if the number of ACLs does not change on the second (mapped) client
1630 # after being set on the first client
1631 nodemap_acl_test() {
1633 local set_client="$2"
1634 local get_client="$3"
1635 local check_setfacl="$4"
1636 local setfacl_error=0
1637 local testfile=$DIR/$tdir/$tfile
1638 local RUNAS_USER="$RUNAS_CMD -u $user"
1640 local acl_count_post=0
1642 nodemap_acl_test_setup
1645 do_node $set_client $RUNAS_USER touch $testfile
1647 # ACL masks aren't filtered by nodemap code, so we ignore them
1648 acl_count=$(do_node $get_client getfacl $testfile | grep -v mask |
1650 do_node $set_client $RUNAS_USER setfacl -m $user:rwx $testfile ||
1653 # if check setfacl is set to 1, then it's supposed to error
1654 if [ "$check_setfacl" == "1" ]; then
1655 [ "$setfacl_error" != "1" ] && return 1
1658 [ "$setfacl_error" == "1" ] && echo "WARNING: unable to setfacl"
1660 acl_count_post=$(do_node $get_client getfacl $testfile | grep -v mask |
1662 [ $acl_count -eq $acl_count_post ] && return 0
1667 nodemap_version_check || return 0
1670 trap nodemap_test_cleanup EXIT
1671 # 1 trusted cluster, 1 mapped cluster
1672 local unmapped_fs=$((IDBASE+0))
1673 local unmapped_c1=$((IDBASE+5))
1674 local mapped_fs=$((IDBASE+2))
1675 local mapped_c0=$((IDBASE+4))
1676 local mapped_c1=$((IDBASE+6))
1678 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
1679 do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1
1681 do_facet mgs $LCTL nodemap_modify --name c1 --property admin --value 0
1682 do_facet mgs $LCTL nodemap_modify --name c1 --property trusted --value 0
1684 wait_nm_sync c1 trusted_nodemap
1686 # setfacl on trusted cluster to unmapped user, verify it's not seen
1687 nodemap_acl_test $unmapped_fs ${clients_arr[0]} ${clients_arr[1]} ||
1688 error "acl count (1)"
1690 # setfacl on trusted cluster to mapped user, verify it's seen
1691 nodemap_acl_test $mapped_fs ${clients_arr[0]} ${clients_arr[1]} &&
1692 error "acl count (2)"
1694 # setfacl on mapped cluster to mapped user, verify it's seen
1695 nodemap_acl_test $mapped_c1 ${clients_arr[1]} ${clients_arr[0]} &&
1696 error "acl count (3)"
1698 # setfacl on mapped cluster to unmapped user, verify error
1699 nodemap_acl_test $unmapped_fs ${clients_arr[1]} ${clients_arr[0]} 1 ||
1700 error "acl count (4)"
1703 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 0
1704 do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 0
1706 wait_nm_sync c0 trusted_nodemap
1708 # setfacl to mapped user on c1, also mapped to c0, verify it's seen
1709 nodemap_acl_test $mapped_c1 ${clients_arr[1]} ${clients_arr[0]} &&
1710 error "acl count (5)"
1712 # setfacl to mapped user on c1, not mapped to c0, verify not seen
1713 nodemap_acl_test $unmapped_c1 ${clients_arr[1]} ${clients_arr[0]} ||
1714 error "acl count (6)"
1716 nodemap_test_cleanup
1718 run_test 23a "test mapped regular ACLs"
1720 test_23b() { #LU-9929
1721 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
1722 [ $(lustre_version_code mgs) -lt $(version_code 2.10.53) ] &&
1723 skip "Need MGS >= 2.10.53" && return
1726 trap nodemap_test_cleanup EXIT
1728 local testdir=$DIR/$tdir
1729 local fs_id=$((IDBASE+10))
1734 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
1735 wait_nm_sync c0 admin_nodemap
1737 # Add idmap $ID0:$fs_id (500:60010)
1738 do_facet mgs $LCTL nodemap_add_idmap --name c0 --idtype gid \
1739 --idmap $ID0:$fs_id ||
1740 error "add idmap $ID0:$fs_id to nodemap c0 failed"
1742 # set/getfacl default acl on client0 (unmapped gid=500)
1745 # Here, USER0=$(getent passwd | grep :$ID0:$ID0: | cut -d: -f1)
1746 setfacl -R -d -m group:$USER0:rwx $testdir ||
1747 error "setfacl $testdir on ${clients_arr[0]} failed"
1748 unmapped_id=$(getfacl $testdir | grep -E "default:group:.*:rwx" |
1749 awk -F: '{print $3}')
1750 [ "$unmapped_id" = "$USER0" ] ||
1751 error "gid=$ID0 was not unmapped correctly on ${clients_arr[0]}"
1753 # getfacl default acl on MGS (mapped gid=60010)
1754 zconf_mount $mgs_HOST $MOUNT
1755 do_rpc_nodes $mgs_HOST is_mounted $MOUNT ||
1756 error "mount lustre on MGS failed"
1757 mapped_id=$(do_node $mgs_HOST getfacl $testdir |
1758 grep -E "default:group:.*:rwx" | awk -F: '{print $3}')
1759 fs_user=$(do_facet mgs getent passwd |
1760 grep :$fs_id:$fs_id: | cut -d: -f1)
1761 [ $mapped_id -eq $fs_id -o "$mapped_id" = "$fs_user" ] ||
1762 error "Should return gid=$fs_id or $fs_user on MGS"
1765 do_facet mgs umount $MOUNT
1766 nodemap_test_cleanup
1768 run_test 23b "test mapped default ACLs"
1773 trap nodemap_test_cleanup EXIT
1774 do_nodes $(comma_list $(all_server_nodes)) $LCTL get_param -R nodemap ||
1775 error "proc readable file read failed"
1777 nodemap_test_cleanup
1779 run_test 24 "check nodemap proc files for LBUGs and Oopses"
1782 local tmpfile=$(mktemp)
1783 local tmpfile2=$(mktemp)
1784 local tmpfile3=$(mktemp)
1785 local tmpfile4=$(mktemp)
1789 nodemap_version_check || return 0
1791 # stop clients for this test
1792 zconf_umount_clients $CLIENTS $MOUNT ||
1793 error "unable to umount clients $CLIENTS"
1795 export SK_UNIQUE_NM=true
1798 # enable trusted/admin for setquota call in cleanup_and_setup_lustre()
1800 for client in $clients; do
1801 do_facet mgs $LCTL nodemap_modify --name c${i} \
1802 --property admin --value 1
1803 do_facet mgs $LCTL nodemap_modify --name c${i} \
1804 --property trusted --value 1
1807 wait_nm_sync c$((i - 1)) trusted_nodemap
1809 trap nodemap_test_cleanup EXIT
1811 # create a new, empty nodemap, and add fileset info to it
1812 do_facet mgs $LCTL nodemap_add test25 ||
1813 error "unable to create nodemap $testname"
1814 do_facet mgs $LCTL set_param -P nodemap.$testname.fileset=/$subdir ||
1815 error "unable to add fileset info to nodemap test25"
1817 wait_nm_sync test25 id
1819 do_facet mgs $LCTL nodemap_info > $tmpfile
1820 do_facet mds $LCTL nodemap_info > $tmpfile2
1822 if ! $SHARED_KEY; then
1823 # will conflict with SK's nodemaps
1824 cleanup_and_setup_lustre
1826 # stop clients for this test
1827 zconf_umount_clients $CLIENTS $MOUNT ||
1828 error "unable to umount clients $CLIENTS"
1830 do_facet mgs $LCTL nodemap_info > $tmpfile3
1831 diff -q $tmpfile3 $tmpfile >& /dev/null ||
1832 error "nodemap_info diff on MGS after remount"
1834 do_facet mds $LCTL nodemap_info > $tmpfile4
1835 diff -q $tmpfile4 $tmpfile2 >& /dev/null ||
1836 error "nodemap_info diff on MDS after remount"
1839 do_facet mgs $LCTL nodemap_del test25 ||
1840 error "cannot delete nodemap test25 from config"
1841 nodemap_test_cleanup
1842 # restart clients previously stopped
1843 zconf_mount_clients $CLIENTS $MOUNT ||
1844 error "unable to mount clients $CLIENTS"
1846 rm -f $tmpfile $tmpfile2
1847 export SK_UNIQUE_NM=false
1849 run_test 25 "test save and reload nodemap config"
1852 nodemap_version_check || return 0
1856 do_facet mgs "seq -f 'c%g' $large_i | xargs -n1 $LCTL nodemap_add"
1857 wait_nm_sync c$large_i admin_nodemap
1859 do_facet mgs "seq -f 'c%g' $large_i | xargs -n1 $LCTL nodemap_del"
1860 wait_nm_sync c$large_i admin_nodemap
1862 run_test 26 "test transferring very large nodemap"
1864 nodemap_exercise_fileset() {
1869 if [ "$nm" == "default" ]; then
1870 do_facet mgs $LCTL nodemap_activate 1
1875 if $SHARED_KEY; then
1876 export SK_UNIQUE_NM=true
1878 # will conflict with SK's nodemaps
1879 trap "fileset_test_cleanup $nm" EXIT
1881 fileset_test_setup "$nm"
1883 # add fileset info to $nm nodemap
1884 if ! combined_mgs_mds; then
1885 do_facet mgs $LCTL set_param nodemap.${nm}.fileset=/$subdir ||
1886 error "unable to add fileset info to $nm nodemap on MGS"
1888 do_facet mgs $LCTL set_param -P nodemap.${nm}.fileset=/$subdir ||
1889 error "unable to add fileset info to $nm nodemap for servers"
1890 wait_nm_sync $nm fileset "nodemap.${nm}.fileset=/$subdir"
1893 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
1894 error "unable to umount client ${clients_arr[0]}"
1895 # set some generic fileset to trigger SSK code
1897 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
1898 error "unable to remount client ${clients_arr[0]}"
1901 # test mount point content
1902 do_node ${clients_arr[0]} test -f $MOUNT/this_is_$subdir ||
1903 error "fileset not taken into account"
1905 # re-mount client with sub-subdir
1906 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
1907 error "unable to umount client ${clients_arr[0]}"
1908 export FILESET=/$subsubdir
1909 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
1910 error "unable to remount client ${clients_arr[0]}"
1913 # test mount point content
1914 do_node ${clients_arr[0]} test -f $MOUNT/this_is_$subsubdir ||
1915 error "subdir of fileset not taken into account"
1917 # remove fileset info from nodemap
1918 do_facet mgs $LCTL nodemap_set_fileset --name $nm --fileset clear ||
1919 error "unable to delete fileset info on $nm nodemap"
1920 wait_update_facet mgs "$LCTL get_param nodemap.${nm}.fileset" \
1921 "nodemap.${nm}.fileset=" ||
1922 error "fileset info still not cleared on $nm nodemap"
1923 do_facet mgs $LCTL set_param -P nodemap.${nm}.fileset=clear ||
1924 error "unable to reset fileset info on $nm nodemap"
1925 wait_nm_sync $nm fileset "nodemap.${nm}.fileset="
1928 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
1929 error "unable to umount client ${clients_arr[0]}"
1930 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
1931 error "unable to remount client ${clients_arr[0]}"
1933 # test mount point content
1934 if ! $(do_node ${clients_arr[0]} test -d $MOUNT/$subdir); then
1936 error "fileset not cleared on $nm nodemap"
1939 # back to non-nodemap setup
1940 if $SHARED_KEY; then
1941 export SK_UNIQUE_NM=false
1942 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
1943 error "unable to umount client ${clients_arr[0]}"
1945 fileset_test_cleanup "$nm"
1946 if [ "$nm" == "default" ]; then
1947 do_facet mgs $LCTL nodemap_activate 0
1948 wait_nm_sync active 0
1950 export SK_UNIQUE_NM=false
1952 nodemap_test_cleanup
1954 if $SHARED_KEY; then
1955 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
1956 error "unable to remount client ${clients_arr[0]}"
1961 [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.10.59) ] &&
1962 skip "Need MDS >= 2.10.59" && return
1964 for nm in "default" "c0"; do
1965 local subdir="subdir_${nm}"
1966 local subsubdir="subsubdir_${nm}"
1968 echo "Exercising fileset for nodemap $nm"
1969 nodemap_exercise_fileset "$nm"
1972 run_test 27a "test fileset in various nodemaps"
1974 test_27b() { #LU-10703
1975 [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.10.59) ] &&
1976 skip "Need MDS >= 2.10.59" && return
1977 [[ $MDSCOUNT -lt 2 ]] && skip "needs >= 2 MDTs" && return
1980 trap nodemap_test_cleanup EXIT
1982 # Add the nodemaps and set their filesets
1983 for i in $(seq 1 $MDSCOUNT); do
1984 do_facet mgs $LCTL nodemap_del nm$i 2>/dev/null
1985 do_facet mgs $LCTL nodemap_add nm$i ||
1986 error "add nodemap nm$i failed"
1987 wait_nm_sync nm$i "" "" "-N"
1989 if ! combined_mgs_mds; then
1991 $LCTL set_param nodemap.nm$i.fileset=/dir$i ||
1992 error "set nm$i.fileset=/dir$i failed on MGS"
1994 do_facet mgs $LCTL set_param -P nodemap.nm$i.fileset=/dir$i ||
1995 error "set nm$i.fileset=/dir$i failed on servers"
1996 wait_nm_sync nm$i fileset "nodemap.nm$i.fileset=/dir$i"
1999 # Check if all the filesets are correct
2000 for i in $(seq 1 $MDSCOUNT); do
2001 fileset=$(do_facet mds$i \
2002 $LCTL get_param -n nodemap.nm$i.fileset)
2003 [ "$fileset" = "/dir$i" ] ||
2004 error "nm$i.fileset $fileset != /dir$i on mds$i"
2005 do_facet mgs $LCTL nodemap_del nm$i ||
2006 error "delete nodemap nm$i failed"
2009 nodemap_test_cleanup
2011 run_test 27b "The new nodemap won't clear the old nodemap's fileset"
2014 if ! $SHARED_KEY; then
2015 skip "need shared key feature for this test" && return
2017 mkdir -p $DIR/$tdir || error "mkdir failed"
2018 touch $DIR/$tdir/$tdir.out || error "touch failed"
2019 if [ ! -f $DIR/$tdir/$tdir.out ]; then
2020 error "read before rotation failed"
2022 # store top key identity to ensure rotation has occurred
2023 SK_IDENTITY_OLD=$(lctl get_param *.*.*srpc* | grep "expire" |
2024 head -1 | awk '{print $15}' | cut -c1-8)
2025 do_facet $SINGLEMDS lfs flushctx ||
2026 error "could not run flushctx on $SINGLEMDS"
2028 lfs flushctx || error "could not run flushctx on client"
2030 # verify new key is in place
2031 SK_IDENTITY_NEW=$(lctl get_param *.*.*srpc* | grep "expire" |
2032 head -1 | awk '{print $15}' | cut -c1-8)
2033 if [ $SK_IDENTITY_OLD == $SK_IDENTITY_NEW ]; then
2034 error "key did not rotate correctly"
2036 if [ ! -f $DIR/$tdir/$tdir.out ]; then
2037 error "read after rotation failed"
2040 run_test 28 "check shared key rotation method"
2043 if ! $SHARED_KEY; then
2044 skip "need shared key feature for this test" && return
2046 if [ $SK_FLAVOR != "ski" ] && [ $SK_FLAVOR != "skpi" ]; then
2047 skip "test only valid if integrity is active"
2050 mkdir $DIR/$tdir || error "mkdir"
2051 touch $DIR/$tdir/$tfile || error "touch"
2052 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
2053 error "unable to umount clients"
2054 keyctl show | awk '/lustre/ { print $1 }' |
2055 xargs -IX keyctl unlink X
2056 OLD_SK_PATH=$SK_PATH
2057 export SK_PATH=/dev/null
2058 if zconf_mount_clients ${clients_arr[0]} $MOUNT; then
2059 export SK_PATH=$OLD_SK_PATH
2060 if [ -e $DIR/$tdir/$tfile ]; then
2061 error "able to mount and read without key"
2063 error "able to mount without key"
2066 export SK_PATH=$OLD_SK_PATH
2067 keyctl show | awk '/lustre/ { print $1 }' |
2068 xargs -IX keyctl unlink X
2071 run_test 29 "check for missing shared key"
2074 if ! $SHARED_KEY; then
2075 skip "need shared key feature for this test" && return
2077 if [ $SK_FLAVOR != "ski" ] && [ $SK_FLAVOR != "skpi" ]; then
2078 skip "test only valid if integrity is active"
2080 mkdir -p $DIR/$tdir || error "mkdir failed"
2081 touch $DIR/$tdir/$tdir.out || error "touch failed"
2082 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
2083 error "unable to umount clients"
2084 # unload keys from ring
2085 keyctl show | awk '/lustre/ { print $1 }' |
2086 xargs -IX keyctl unlink X
2087 # invalidate the key with bogus filesystem name
2088 lgss_sk -w $SK_PATH/$FSNAME-bogus.key -f $FSNAME.bogus \
2089 -t client -d /dev/urandom || error "lgss_sk failed (1)"
2090 do_facet $SINGLEMDS lfs flushctx || error "could not run flushctx"
2091 OLD_SK_PATH=$SK_PATH
2092 export SK_PATH=$SK_PATH/$FSNAME-bogus.key
2093 if zconf_mount_clients ${clients_arr[0]} $MOUNT; then
2094 SK_PATH=$OLD_SK_PATH
2095 if [ -a $DIR/$tdir/$tdir.out ]; then
2096 error "mount and read file with invalid key"
2098 error "mount with invalid key"
2101 SK_PATH=$OLD_SK_PATH
2102 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
2103 error "unable to umount clients"
2105 run_test 30 "check for invalid shared key"
2107 log "cleanup: ======================================================"
2110 ## nodemap deactivated
2111 do_facet mgs $LCTL nodemap_activate 0
2113 for num in $(seq $MDSCOUNT); do
2114 if [ "${identity_old[$num]}" = 1 ]; then
2115 switch_identity $num false || identity_old[$num]=$?
2119 $RUNAS_CMD -u $ID0 ls $DIR
2120 $RUNAS_CMD -u $ID1 ls $DIR
2125 check_and_cleanup_lustre