3 # Run select tests by setting ONLY, or as arguments to the script.
4 # Skip specific tests by setting EXCEPT.
10 # bug number for skipped test:
11 ALWAYS_EXCEPT=" $SANITY_SEC_EXCEPT"
13 # bug number for skipped test: 9145 9145 9671 9145 9145 9145 9145 9245
14 ALWAYS_EXCEPT=" 17 18 19 20 21 22 23 27 $ALWAYS_EXCEPT"
16 # UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
19 export PATH=$PWD/$SRCDIR:$SRCDIR:$PWD/$SRCDIR/../utils:$PATH:/sbin
20 export NAME=${NAME:-local}
22 LUSTRE=${LUSTRE:-$(dirname $0)/..}
23 . $LUSTRE/tests/test-framework.sh
25 . ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
28 NODEMAP_TESTS=$(seq 7 26)
30 if ! check_versions; then
31 echo "It is NOT necessary to test nodemap under interoperation mode"
32 EXCEPT="$EXCEPT $NODEMAP_TESTS"
35 [ "$SLOW" = "no" ] && EXCEPT_SLOW="26"
37 [ "$ALWAYS_EXCEPT$EXCEPT$EXCEPT_SLOW" ] &&
38 echo "Skipping tests: $ALWAYS_EXCEPT $EXCEPT $EXCEPT_SLOW"
40 RUNAS_CMD=${RUNAS_CMD:-runas}
42 WTL=${WTL:-"$LUSTRE/tests/write_time_limit"}
45 PERM_CONF=$CONFDIR/perm.conf
47 HOSTNAME_CHECKSUM=$(hostname | sum | awk '{ print $1 }')
48 SUBNET_CHECKSUM=$(expr $HOSTNAME_CHECKSUM % 250 + 1)
50 require_dsh_mds || exit 0
51 require_dsh_ost || exit 0
53 clients=${CLIENTS//,/ }
54 num_clients=$(get_node_count ${clients})
55 clients_arr=($clients)
59 USER0=$(getent passwd | grep :$ID0:$ID0: | cut -d: -f1)
60 USER1=$(getent passwd | grep :$ID1:$ID1: | cut -d: -f1)
64 NODEMAP_IPADDR_LIST="1 10 64 128 200 250"
66 NODEMAP_MAX_ID=$((ID0 + NODEMAP_ID_COUNT))
69 skip "need to add user0 ($ID0:$ID0)" && exit 0
72 skip "need to add user1 ($ID1:$ID1)" && exit 0
74 IDBASE=${IDBASE:-60000}
76 # changes to mappings must be reflected in test 23
78 [0]="$((IDBASE+3)):$((IDBASE+0)) $((IDBASE+4)):$((IDBASE+2))"
79 [1]="$((IDBASE+5)):$((IDBASE+1)) $((IDBASE+6)):$((IDBASE+2))"
82 check_and_setup_lustre
85 if [ "$I_MOUNTED" = "yes" ]; then
86 cleanupall -f || error "sec_cleanup"
91 [ -z "$(echo $DIR | grep $MOUNT)" ] &&
92 error "$DIR not in $MOUNT" && sec_cleanup && exit 1
94 [ $(echo $MOUNT | wc -w) -gt 1 ] &&
95 echo "NAME=$MOUNT mounted more than once" && sec_cleanup && exit 0
98 GSS_REF=$(lsmod | grep ^ptlrpc_gss | awk '{print $3}')
99 if [ ! -z "$GSS_REF" -a "$GSS_REF" != "0" ]; then
101 echo "with GSS support"
104 echo "without GSS support"
107 MDT=$(do_facet $SINGLEMDS lctl get_param -N "mdt.\*MDT0000" |
109 [ -z "$MDT" ] && error "fail to get MDT device" && exit 1
110 do_facet $SINGLEMDS "mkdir -p $CONFDIR"
111 IDENTITY_FLUSH=mdt.$MDT.identity_flush
112 IDENTITY_UPCALL=mdt.$MDT.identity_upcall
123 if ! $RUNAS_CMD -u $user krb5_login.sh; then
124 error "$user login kerberos failed."
128 if ! $RUNAS_CMD -u $user -g $group ls $DIR > /dev/null 2>&1; then
129 $RUNAS_CMD -u $user lfs flushctx -k
130 $RUNAS_CMD -u $user krb5_login.sh
131 if ! $RUNAS_CMD -u$user -g$group ls $DIR > /dev/null 2>&1; then
132 error "init $user $group failed."
138 declare -a identity_old
141 for num in $(seq $MDSCOUNT); do
142 switch_identity $num true || identity_old[$num]=$?
145 if ! $RUNAS_CMD -u $ID0 ls $DIR > /dev/null 2>&1; then
146 sec_login $USER0 $USER0
149 if ! $RUNAS_CMD -u $ID1 ls $DIR > /dev/null 2>&1; then
150 sec_login $USER1 $USER1
155 # run as different user
159 chmod 0755 $DIR || error "chmod (1)"
160 rm -rf $DIR/$tdir || error "rm (1)"
161 mkdir -p $DIR/$tdir || error "mkdir (1)"
162 chown $USER0 $DIR/$tdir || error "chown (2)"
163 $RUNAS_CMD -u $ID0 ls $DIR || error "ls (1)"
164 rm -f $DIR/f0 || error "rm (2)"
165 $RUNAS_CMD -u $ID0 touch $DIR/f0 && error "touch (1)"
166 $RUNAS_CMD -u $ID0 touch $DIR/$tdir/f1 || error "touch (2)"
167 $RUNAS_CMD -u $ID1 touch $DIR/$tdir/f2 && error "touch (3)"
168 touch $DIR/$tdir/f3 || error "touch (4)"
169 chown root $DIR/$tdir || error "chown (3)"
170 chgrp $USER0 $DIR/$tdir || error "chgrp (1)"
171 chmod 0775 $DIR/$tdir || error "chmod (2)"
172 $RUNAS_CMD -u $ID0 touch $DIR/$tdir/f4 || error "touch (5)"
173 $RUNAS_CMD -u $ID1 touch $DIR/$tdir/f5 && error "touch (6)"
174 touch $DIR/$tdir/f6 || error "touch (7)"
175 rm -rf $DIR/$tdir || error "rm (3)"
177 run_test 0 "uid permission ============================="
181 [ $GSS_SUP = 0 ] && skip "without GSS support." && return
186 chown $USER0 $DIR/$tdir || error "chown (1)"
187 $RUNAS_CMD -u $ID1 -v $ID0 touch $DIR/$tdir/f0 && error "touch (2)"
188 echo "enable uid $ID1 setuid"
189 do_facet $SINGLEMDS "echo '* $ID1 setuid' >> $PERM_CONF"
190 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
191 $RUNAS_CMD -u $ID1 -v $ID0 touch $DIR/$tdir/f1 || error "touch (3)"
193 chown root $DIR/$tdir || error "chown (4)"
194 chgrp $USER0 $DIR/$tdir || error "chgrp (5)"
195 chmod 0770 $DIR/$tdir || error "chmod (6)"
196 $RUNAS_CMD -u $ID1 -g $ID1 touch $DIR/$tdir/f2 && error "touch (7)"
197 $RUNAS_CMD -u$ID1 -g$ID1 -j$ID0 touch $DIR/$tdir/f3 && error "touch (8)"
198 echo "enable uid $ID1 setuid,setgid"
199 do_facet $SINGLEMDS "echo '* $ID1 setuid,setgid' > $PERM_CONF"
200 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
201 $RUNAS_CMD -u $ID1 -g $ID1 -j $ID0 touch $DIR/$tdir/f4 ||
203 $RUNAS_CMD -u $ID1 -v $ID0 -g $ID1 -j $ID0 touch $DIR/$tdir/f5 ||
208 do_facet $SINGLEMDS "rm -f $PERM_CONF"
209 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
211 run_test 1 "setuid/gid ============================="
213 # bug 3285 - supplementary group should always succeed.
214 # NB: the supplementary groups are set for local client only,
215 # as for remote client, the groups of the specified uid on MDT
216 # will be obtained by upcall /sbin/l_getidentity and used.
218 local server_version=$(lustre_version_code $SINGLEMDS)
220 [[ $server_version -ge $(version_code 2.6.93) ]] ||
221 [[ $server_version -ge $(version_code 2.5.35) &&
222 $server_version -lt $(version_code 2.5.50) ]] ||
223 { skip "Need MDS version at least 2.6.93 or 2.5.35"; return; }
227 chmod 0771 $DIR/$tdir
228 chgrp $ID0 $DIR/$tdir
229 $RUNAS_CMD -u $ID0 ls $DIR/$tdir || error "setgroups (1)"
230 do_facet $SINGLEMDS "echo '* $ID1 setgrp' > $PERM_CONF"
231 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
232 $RUNAS_CMD -u $ID1 -G1,2,$ID0 ls $DIR/$tdir ||
233 error "setgroups (2)"
234 $RUNAS_CMD -u $ID1 -G1,2 ls $DIR/$tdir && error "setgroups (3)"
237 do_facet $SINGLEMDS "rm -f $PERM_CONF"
238 do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1"
240 run_test 4 "set supplementary group ==============="
247 squash_id default 99 0
248 squash_id default 99 1
249 for (( i = 0; i < NODEMAP_COUNT; i++ )); do
250 local csum=${HOSTNAME_CHECKSUM}_${i}
252 if ! do_facet mgs $LCTL nodemap_add $csum; then
256 out=$(do_facet mgs $LCTL get_param nodemap.$csum.id)
257 ## This needs to return zero if the following statement is 1
258 [[ $(echo $out | grep -c $csum) == 0 ]] && return 1
267 for ((i = 0; i < NODEMAP_COUNT; i++)); do
268 local csum=${HOSTNAME_CHECKSUM}_${i}
270 if ! do_facet mgs $LCTL nodemap_del $csum; then
271 error "nodemap_del $csum failed with $?"
275 out=$(do_facet mgs $LCTL get_param nodemap.$csum.id 2>/dev/null)
276 [[ $(echo $out | grep -c $csum) != 0 ]] && return 1
283 local cmd="$LCTL nodemap_add_range"
287 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
288 range="$SUBNET_CHECKSUM.${2}.${j}.[1-253]@tcp"
289 if ! do_facet mgs $cmd --name $1 --range $range; then
298 local cmd="$LCTL nodemap_del_range"
302 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
303 range="$SUBNET_CHECKSUM.${2}.${j}.[1-253]@tcp"
304 if ! do_facet mgs $cmd --name $1 --range $range; then
314 local cmd="$LCTL nodemap_add_idmap"
317 echo "Start to add idmaps ..."
318 for ((i = 0; i < NODEMAP_COUNT; i++)); do
321 for ((j = $ID0; j < NODEMAP_MAX_ID; j++)); do
322 local csum=${HOSTNAME_CHECKSUM}_${i}
324 local fs_id=$((j + 1))
326 if ! do_facet mgs $cmd --name $csum --idtype uid \
327 --idmap $client_id:$fs_id; then
330 if ! do_facet mgs $cmd --name $csum --idtype gid \
331 --idmap $client_id:$fs_id; then
340 update_idmaps() { #LU-10040
341 [ $(lustre_version_code mgs) -lt $(version_code 2.10.55) ] &&
342 skip "Need MGS >= 2.10.55" &&
344 local csum=${HOSTNAME_CHECKSUM}_0
345 local old_id_client=$ID0
346 local old_id_fs=$((ID0 + 1))
347 local new_id=$((ID0 + 100))
354 echo "Start to update idmaps ..."
356 #Inserting an existed idmap should return error
357 cmd="$LCTL nodemap_add_idmap --name $csum --idtype uid"
359 $cmd --idmap $old_id_client:$old_id_fs 2>/dev/null; then
360 error "insert idmap {$old_id_client:$old_id_fs} " \
361 "should return error"
366 #Update id_fs and check it
367 if ! do_facet mgs $cmd --idmap $old_id_client:$new_id; then
368 error "$cmd --idmap $old_id_client:$new_id failed"
372 tmp_id=$(do_facet mgs $LCTL get_param -n nodemap.$csum.idmap |
373 awk '{ print $7 }' | sed -n '2p')
374 [ $tmp_id != $new_id ] && { error "new id_fs $tmp_id != $new_id"; \
375 rc=$((rc + 1)); return $rc; }
377 #Update id_client and check it
378 if ! do_facet mgs $cmd --idmap $new_id:$new_id; then
379 error "$cmd --idmap $new_id:$new_id failed"
383 tmp_id=$(do_facet mgs $LCTL get_param -n nodemap.$csum.idmap |
384 awk '{ print $5 }' | sed -n "$((NODEMAP_ID_COUNT + 1)) p")
385 tmp_id=$(echo ${tmp_id%,*}) #e.g. "501,"->"501"
386 [ $tmp_id != $new_id ] && { error "new id_client $tmp_id != $new_id"; \
387 rc=$((rc + 1)); return $rc; }
389 #Delete above updated idmap
390 cmd="$LCTL nodemap_del_idmap --name $csum --idtype uid"
391 if ! do_facet mgs $cmd --idmap $new_id:$new_id; then
392 error "$cmd --idmap $new_id:$new_id failed"
397 #restore the idmaps to make delete_idmaps work well
398 cmd="$LCTL nodemap_add_idmap --name $csum --idtype uid"
399 if ! do_facet mgs $cmd --idmap $old_id_client:$old_id_fs; then
400 error "$cmd --idmap $old_id_client:$old_id_fs failed"
410 local cmd="$LCTL nodemap_del_idmap"
413 echo "Start to delete idmaps ..."
414 for ((i = 0; i < NODEMAP_COUNT; i++)); do
417 for ((j = $ID0; j < NODEMAP_MAX_ID; j++)); do
418 local csum=${HOSTNAME_CHECKSUM}_${i}
420 local fs_id=$((j + 1))
422 if ! do_facet mgs $cmd --name $csum --idtype uid \
423 --idmap $client_id:$fs_id; then
426 if ! do_facet mgs $cmd --name $csum --idtype gid \
427 --idmap $client_id:$fs_id; then
440 local cmd="$LCTL nodemap_modify"
443 proc[0]="admin_nodemap"
444 proc[1]="trusted_nodemap"
448 for ((idx = 0; idx < 2; idx++)); do
449 if ! do_facet mgs $cmd --name $1 --property ${option[$idx]} \
454 if ! do_facet mgs $cmd --name $1 --property ${option[$idx]} \
464 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
465 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
469 cmd[0]="$LCTL nodemap_modify --property squash_uid"
470 cmd[1]="$LCTL nodemap_modify --property squash_gid"
472 if ! do_facet mgs ${cmd[$3]} --name $1 --value $2; then
477 # ensure that the squash defaults are the expected defaults
478 squash_id default 99 0
479 squash_id default 99 1
484 cmd="$LCTL nodemap_test_nid"
486 nid=$(do_facet mgs $cmd $1)
488 if [ $nid == $2 ]; then
497 local cmd="$LCTL nodemap_test_id"
500 echo "Start to test idmaps ..."
501 ## nodemap deactivated
502 if ! do_facet mgs $LCTL nodemap_activate 0; then
505 for ((id = $ID0; id < NODEMAP_MAX_ID; id++)); do
508 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
509 local nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
510 local fs_id=$(do_facet mgs $cmd --nid $nid \
511 --idtype uid --id $id)
512 if [ $fs_id != $id ]; then
513 echo "expected $id, got $fs_id"
520 if ! do_facet mgs $LCTL nodemap_activate 1; then
524 for ((id = $ID0; id < NODEMAP_MAX_ID; id++)); do
525 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
526 nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
527 fs_id=$(do_facet mgs $cmd --nid $nid \
528 --idtype uid --id $id)
529 expected_id=$((id + 1))
530 if [ $fs_id != $expected_id ]; then
531 echo "expected $expected_id, got $fs_id"
538 for ((i = 0; i < NODEMAP_COUNT; i++)); do
539 local csum=${HOSTNAME_CHECKSUM}_${i}
541 if ! do_facet mgs $LCTL nodemap_modify --name $csum \
542 --property trusted --value 1; then
543 error "nodemap_modify $csum failed with $?"
548 for ((id = $ID0; id < NODEMAP_MAX_ID; id++)); do
549 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
550 nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
551 fs_id=$(do_facet mgs $cmd --nid $nid \
552 --idtype uid --id $id)
553 if [ $fs_id != $id ]; then
554 echo "expected $id, got $fs_id"
560 ## ensure allow_root_access is enabled
561 for ((i = 0; i < NODEMAP_COUNT; i++)); do
562 local csum=${HOSTNAME_CHECKSUM}_${i}
564 if ! do_facet mgs $LCTL nodemap_modify --name $csum \
565 --property admin --value 1; then
566 error "nodemap_modify $csum failed with $?"
571 ## check that root allowed
572 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
573 nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
574 fs_id=$(do_facet mgs $cmd --nid $nid --idtype uid --id 0)
575 if [ $fs_id != 0 ]; then
576 echo "root allowed expected 0, got $fs_id"
581 ## ensure allow_root_access is disabled
582 for ((i = 0; i < NODEMAP_COUNT; i++)); do
583 local csum=${HOSTNAME_CHECKSUM}_${i}
585 if ! do_facet mgs $LCTL nodemap_modify --name $csum \
586 --property admin --value 0; then
587 error "nodemap_modify ${HOSTNAME_CHECKSUM}_${i} "
593 ## check that root is mapped to 99
594 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
595 nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
596 fs_id=$(do_facet mgs $cmd --nid $nid --idtype uid --id 0)
597 if [ $fs_id != 99 ]; then
598 error "root squash expected 99, got $fs_id"
603 ## reset client trust to 0
604 for ((i = 0; i < NODEMAP_COUNT; i++)); do
605 if ! do_facet mgs $LCTL nodemap_modify \
606 --name ${HOSTNAME_CHECKSUM}_${i} \
607 --property trusted --value 0; then
608 error "nodemap_modify ${HOSTNAME_CHECKSUM}_${i} "
620 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
621 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
622 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
627 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
631 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 2
635 run_test 7 "nodemap create and delete"
640 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
641 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
642 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
649 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
655 [[ $rc == 0 ]] && error "duplicate nodemap_add allowed with $rc" &&
661 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 3
665 run_test 8 "nodemap reject duplicates"
671 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
672 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
673 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
679 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
682 for ((i = 0; i < NODEMAP_COUNT; i++)); do
683 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
687 [[ $rc != 0 ]] && error "nodemap_add_range failed with $rc" && return 2
690 for ((i = 0; i < NODEMAP_COUNT; i++)); do
691 if ! delete_range ${HOSTNAME_CHECKSUM}_${i} $i; then
695 [[ $rc != 0 ]] && error "nodemap_del_range failed with $rc" && return 4
700 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 4
704 run_test 9 "nodemap range add"
709 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
710 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
711 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
717 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
720 for ((i = 0; i < NODEMAP_COUNT; i++)); do
721 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
725 [[ $rc != 0 ]] && error "nodemap_add_range failed with $rc" && return 2
728 for ((i = 0; i < NODEMAP_COUNT; i++)); do
729 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
733 [[ $rc == 0 ]] && error "nodemap_add_range duplicate add with $rc" &&
738 for ((i = 0; i < NODEMAP_COUNT; i++)); do
739 if ! delete_range ${HOSTNAME_CHECKSUM}_${i} $i; then
743 [[ $rc != 0 ]] && error "nodemap_del_range failed with $rc" && return 4
747 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 5
751 run_test 10a "nodemap reject duplicate ranges"
754 [ $(lustre_version_code mgs) -lt $(version_code 2.10.53) ] &&
755 skip "Need MGS >= 2.10.53" && return
759 local nids="192.168.19.[0-255]@o2ib20"
761 do_facet mgs $LCTL nodemap_del $nm1 2>/dev/null
762 do_facet mgs $LCTL nodemap_del $nm2 2>/dev/null
764 do_facet mgs $LCTL nodemap_add $nm1 || error "Add $nm1 failed"
765 do_facet mgs $LCTL nodemap_add $nm2 || error "Add $nm2 failed"
766 do_facet mgs $LCTL nodemap_add_range --name $nm1 --range $nids ||
767 error "Add range $nids to $nm1 failed"
768 [ -n "$(do_facet mgs $LCTL get_param nodemap.$nm1.* |
769 grep start_nid)" ] || error "No range was found"
770 do_facet mgs $LCTL nodemap_del_range --name $nm2 --range $nids &&
771 error "Deleting range $nids from $nm2 should fail"
772 [ -n "$(do_facet mgs $LCTL get_param nodemap.$nm1.* |
773 grep start_nid)" ] || error "Range $nids should be there"
775 do_facet mgs $LCTL nodemap_del $nm1 || error "Delete $nm1 failed"
776 do_facet mgs $LCTL nodemap_del $nm2 || error "Delete $nm2 failed"
779 run_test 10b "delete range from the correct nodemap"
781 test_10c() { #LU-8912
782 [ $(lustre_version_code mgs) -lt $(version_code 2.10.57) ] &&
783 skip "Need MGS >= 2.10.57" && return
785 local nm="nodemap_lu8912"
786 local nid_range="10.210.[32-47].[0-255]@o2ib3"
787 local start_nid="10.210.32.0@o2ib3"
788 local end_nid="10.210.47.255@o2ib3"
789 local start_nid_found
792 do_facet mgs $LCTL nodemap_del $nm 2>/dev/null
793 do_facet mgs $LCTL nodemap_add $nm || error "Add $nm failed"
794 do_facet mgs $LCTL nodemap_add_range --name $nm --range $nid_range ||
795 error "Add range $nid_range to $nm failed"
797 start_nid_found=$(do_facet mgs $LCTL get_param nodemap.$nm.* |
798 awk -F '[,: ]' /start_nid/'{ print $9 }')
799 [ "$start_nid" == "$start_nid_found" ] ||
800 error "start_nid: $start_nid_found != $start_nid"
801 end_nid_found=$(do_facet mgs $LCTL get_param nodemap.$nm.* |
802 awk -F '[,: ]' /end_nid/'{ print $13 }')
803 [ "$end_nid" == "$end_nid_found" ] ||
804 error "end_nid: $end_nid_found != $end_nid"
806 do_facet mgs $LCTL nodemap_del $nm || error "Delete $nm failed"
809 run_test 10c "verfify contiguous range support"
814 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
815 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
816 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
822 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
825 for ((i = 0; i < NODEMAP_COUNT; i++)); do
826 if ! modify_flags ${HOSTNAME_CHECKSUM}_${i}; then
830 [[ $rc != 0 ]] && error "nodemap_modify with $rc" && return 2
835 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 3
839 run_test 11 "nodemap modify"
844 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
845 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
846 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
852 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
855 for ((i = 0; i < NODEMAP_COUNT; i++)); do
856 if ! squash_id ${HOSTNAME_CHECKSUM}_${i} 88 0; then
860 [[ $rc != 0 ]] && error "nodemap squash_uid with $rc" && return 2
863 for ((i = 0; i < NODEMAP_COUNT; i++)); do
864 if ! squash_id ${HOSTNAME_CHECKSUM}_${i} 88 1; then
868 [[ $rc != 0 ]] && error "nodemap squash_gid with $rc" && return 3
873 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 4
877 run_test 12 "nodemap set squash ids"
882 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
883 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
884 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
890 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
893 for ((i = 0; i < NODEMAP_COUNT; i++)); do
894 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
898 [[ $rc != 0 ]] && error "nodemap_add_range failed with $rc" && return 2
901 for ((i = 0; i < NODEMAP_COUNT; i++)); do
902 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
903 for k in $NODEMAP_IPADDR_LIST; do
904 if ! test_nid $SUBNET_CHECKSUM.$i.$j.$k \
905 ${HOSTNAME_CHECKSUM}_${i}; then
911 [[ $rc != 0 ]] && error "nodemap_test_nid failed with $rc" && return 3
916 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 4
920 run_test 13 "test nids"
925 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
926 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
927 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
933 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
936 for ((i = 0; i < NODEMAP_COUNT; i++)); do
937 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
938 for k in $NODEMAP_IPADDR_LIST; do
939 if ! test_nid $SUBNET_CHECKSUM.$i.$j.$k \
946 [[ $rc != 0 ]] && error "nodemap_test_nid failed with $rc" && return 3
951 [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 4
955 run_test 14 "test default nodemap nid lookup"
960 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
961 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
962 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
968 [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 1
971 for ((i = 0; i < NODEMAP_COUNT; i++)); do
972 if ! add_range ${HOSTNAME_CHECKSUM}_${i} $i; then
976 [[ $rc != 0 ]] && error "nodemap_add_range failed with $rc" && return 2
981 [[ $rc != 0 ]] && error "nodemap_add_idmap failed with $rc" && return 3
986 [[ $rc != 0 ]] && error "nodemap_test_id failed with $rc" && return 4
991 [[ $rc != 0 ]] && error "update_idmaps failed with $rc" && return 5
996 [[ $rc != 0 ]] && error "nodemap_del_idmap failed with $rc" && return 6
1001 [[ $rc != 0 ]] && error "nodemap_delete failed with $rc" && return 7
1005 run_test 15 "test id mapping"
1008 local nodemap_name=$1
1011 local proc_param="${nodemap_name}.${key}"
1012 [ "$nodemap_name" == "active" ] && proc_param="active"
1014 local is_active=$(do_facet mgs $LCTL get_param -n nodemap.active)
1015 (( is_active == 0 )) && [ "$proc_param" != "active" ] && return
1017 local max_retries=20
1021 local mgs_ip=$(host_nids_address $mgs_HOST $NETTYPE | cut -d' ' -f1)
1024 if [ -z "$value" ]; then
1025 out1=$(do_facet mgs $LCTL get_param nodemap.${proc_param})
1026 echo "On MGS ${mgs_ip}, ${proc_param} = $out1"
1031 # wait up to 10 seconds for other servers to sync with mgs
1032 for i in $(seq 1 10); do
1033 for node in $(all_server_nodes); do
1034 local node_ip=$(host_nids_address $node $NETTYPE |
1038 if [ -z "$value" ]; then
1039 [ $node_ip == $mgs_ip ] && continue
1042 out2=$(do_node $node_ip $LCTL get_param \
1043 nodemap.$proc_param 2>/dev/null)
1044 echo "On $node ${node_ip}, ${proc_param} = $out2"
1045 [ "$out1" != "$out2" ] && is_sync=false && break
1053 echo OTHER - IP: $node_ip
1055 error "mgs and $nodemap_name ${key} mismatch, $i attempts"
1057 echo "waited $((i - 1)) seconds for sync"
1060 create_fops_nodemaps() {
1063 for client in $clients; do
1064 local client_ip=$(host_nids_address $client $NETTYPE)
1065 local client_nid=$(h2nettype $client_ip)
1066 do_facet mgs $LCTL nodemap_add c${i} || return 1
1067 do_facet mgs $LCTL nodemap_add_range \
1068 --name c${i} --range $client_nid || return 1
1069 for map in ${FOPS_IDMAPS[i]}; do
1070 do_facet mgs $LCTL nodemap_add_idmap --name c${i} \
1071 --idtype uid --idmap ${map} || return 1
1072 do_facet mgs $LCTL nodemap_add_idmap --name c${i} \
1073 --idtype gid --idmap ${map} || return 1
1076 wait_nm_sync c$i idmap
1083 delete_fops_nodemaps() {
1086 for client in $clients; do
1087 do_facet mgs $LCTL nodemap_del c${i} || return 1
1095 if [ $MDSCOUNT -le 1 ]; then
1096 do_node ${clients_arr[0]} mkdir -p $DIR/$tdir
1098 # round-robin MDTs to test DNE nodemap support
1099 [ ! -d $DIR ] && do_node ${clients_arr[0]} mkdir -p $DIR
1100 do_node ${clients_arr[0]} $LFS setdirstripe -c 1 -i \
1101 $((fops_mds_index % MDSCOUNT)) $DIR/$tdir
1102 ((fops_mds_index++))
1106 # acl test directory needs to be initialized on a privileged client
1108 local admin=$(do_facet mgs $LCTL get_param -n nodemap.c0.admin_nodemap)
1109 local trust=$(do_facet mgs $LCTL get_param -n \
1110 nodemap.c0.trusted_nodemap)
1112 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
1113 do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1
1115 wait_nm_sync c0 admin_nodemap
1116 wait_nm_sync c0 trusted_nodemap
1118 do_node ${clients_arr[0]} rm -rf $DIR/$tdir
1120 do_node ${clients_arr[0]} chown $user $DIR/$tdir
1122 do_facet mgs $LCTL nodemap_modify --name c0 \
1123 --property admin --value $admin
1124 do_facet mgs $LCTL nodemap_modify --name c0 \
1125 --property trusted --value $trust
1127 # flush MDT locks to make sure they are reacquired before test
1128 do_node ${clients_arr[0]} $LCTL set_param \
1129 ldlm.namespaces.$FSNAME-MDT*.lru_size=clear
1131 wait_nm_sync c0 admin_nodemap
1132 wait_nm_sync c0 trusted_nodemap
1135 # fileset test directory needs to be initialized on a privileged client
1136 fileset_test_setup() {
1138 local admin=$(do_facet mgs $LCTL get_param -n \
1139 nodemap.${nm}.admin_nodemap)
1140 local trust=$(do_facet mgs $LCTL get_param -n \
1141 nodemap.${nm}.trusted_nodemap)
1143 do_facet mgs $LCTL nodemap_modify --name $nm --property admin --value 1
1144 do_facet mgs $LCTL nodemap_modify --name $nm --property trusted \
1147 wait_nm_sync $nm admin_nodemap
1148 wait_nm_sync $nm trusted_nodemap
1150 # create directory and populate it for subdir mount
1151 do_node ${clients_arr[0]} mkdir $MOUNT/$subdir ||
1152 error "unable to create dir $MOUNT/$subdir"
1153 do_node ${clients_arr[0]} touch $MOUNT/$subdir/this_is_$subdir ||
1154 error "unable to create file $MOUNT/$subdir/this_is_$subdir"
1155 do_node ${clients_arr[0]} mkdir $MOUNT/$subdir/$subsubdir ||
1156 error "unable to create dir $MOUNT/$subdir/$subsubdir"
1157 do_node ${clients_arr[0]} touch \
1158 $MOUNT/$subdir/$subsubdir/this_is_$subsubdir ||
1159 error "unable to create file \
1160 $MOUNT/$subdir/$subsubdir/this_is_$subsubdir"
1162 do_facet mgs $LCTL nodemap_modify --name $nm \
1163 --property admin --value $admin
1164 do_facet mgs $LCTL nodemap_modify --name $nm \
1165 --property trusted --value $trust
1167 # flush MDT locks to make sure they are reacquired before test
1168 do_node ${clients_arr[0]} $LCTL set_param \
1169 ldlm.namespaces.$FSNAME-MDT*.lru_size=clear
1171 wait_nm_sync $nm admin_nodemap
1172 wait_nm_sync $nm trusted_nodemap
1175 # fileset test directory needs to be initialized on a privileged client
1176 fileset_test_cleanup() {
1178 local admin=$(do_facet mgs $LCTL get_param -n \
1179 nodemap.${nm}.admin_nodemap)
1180 local trust=$(do_facet mgs $LCTL get_param -n \
1181 nodemap.${nm}.trusted_nodemap)
1183 do_facet mgs $LCTL nodemap_modify --name $nm --property admin --value 1
1184 do_facet mgs $LCTL nodemap_modify --name $nm --property trusted \
1187 wait_nm_sync $nm admin_nodemap
1188 wait_nm_sync $nm trusted_nodemap
1190 # cleanup directory created for subdir mount
1191 do_node ${clients_arr[0]} rm -rf $MOUNT/$subdir ||
1192 error "unable to remove dir $MOUNT/$subdir"
1194 do_facet mgs $LCTL nodemap_modify --name $nm \
1195 --property admin --value $admin
1196 do_facet mgs $LCTL nodemap_modify --name $nm \
1197 --property trusted --value $trust
1199 # flush MDT locks to make sure they are reacquired before test
1200 do_node ${clients_arr[0]} $LCTL set_param \
1201 ldlm.namespaces.$FSNAME-MDT*.lru_size=clear
1203 wait_nm_sync $nm admin_nodemap
1204 wait_nm_sync $nm trusted_nodemap
1207 do_create_delete() {
1210 local testfile=$DIR/$tdir/$tfile
1214 if $run_u touch $testfile >& /dev/null; then
1216 $run_u rm $testfile && d=1
1220 local expected=$(get_cr_del_expected $key)
1221 [ "$res" != "$expected" ] &&
1222 error "test $key, wanted $expected, got $res" && rc=$((rc + 1))
1226 nodemap_check_quota() {
1228 $run_u lfs quota -q $DIR | awk '{ print $2; exit; }'
1231 do_fops_quota_test() {
1233 # fuzz quota used to account for possible indirect blocks, etc
1234 local quota_fuzz=$(fs_log_size)
1235 local qused_orig=$(nodemap_check_quota "$run_u")
1236 local qused_high=$((qused_orig + quota_fuzz))
1237 local qused_low=$((qused_orig - quota_fuzz))
1238 local testfile=$DIR/$tdir/$tfile
1239 $run_u dd if=/dev/zero of=$testfile oflag=sync bs=1M count=1 \
1240 >& /dev/null || error "unable to write quota test file"
1241 sync; sync_all_data || true
1243 local qused_new=$(nodemap_check_quota "$run_u")
1244 [ $((qused_new)) -lt $((qused_low + 1024)) -o \
1245 $((qused_new)) -gt $((qused_high + 1024)) ] &&
1246 error "$qused_new != $qused_orig + 1M after write, " \
1247 "fuzz is $quota_fuzz"
1248 $run_u rm $testfile || error "unable to remove quota test file"
1249 wait_delete_completed_mds
1251 qused_new=$(nodemap_check_quota "$run_u")
1252 [ $((qused_new)) -lt $((qused_low)) \
1253 -o $((qused_new)) -gt $((qused_high)) ] &&
1254 error "quota not reclaimed, expect $qused_orig, " \
1255 "got $qused_new, fuzz $quota_fuzz"
1258 get_fops_mapped_user() {
1261 for ((i=0; i < ${#FOPS_IDMAPS[@]}; i++)); do
1262 for map in ${FOPS_IDMAPS[i]}; do
1263 if [ $(cut -d: -f1 <<< "$map") == $cli_user ]; then
1264 cut -d: -f2 <<< "$map"
1272 get_cr_del_expected() {
1274 IFS=":" read -a key <<< "$1"
1275 local mapmode="${key[0]}"
1276 local mds_user="${key[1]}"
1277 local cluster="${key[2]}"
1278 local cli_user="${key[3]}"
1279 local mode="0${key[4]}"
1286 [[ $mapmode == *mapped* ]] && mapped=1
1287 # only c1 is mapped in these test cases
1288 [[ $mapmode == mapped_trusted* ]] && [ "$cluster" == "c0" ] && mapped=0
1289 [[ $mapmode == *noadmin* ]] && noadmin=1
1291 # o+wx works as long as the user isn't mapped
1292 if [ $((mode & 3)) -eq 3 ]; then
1296 # if client user is root, check if root is squashed
1297 if [ "$cli_user" == "0" ]; then
1298 # squash root succeed, if other bit is on
1301 1) [ "$other" == "1" ] && echo $SUCCESS
1302 [ "$other" == "0" ] && echo $FAILURE;;
1306 if [ "$mapped" == "0" ]; then
1307 [ "$other" == "1" ] && echo $SUCCESS
1308 [ "$other" == "0" ] && echo $FAILURE
1312 # if mapped user is mds user, check for u+wx
1313 mapped_user=$(get_fops_mapped_user $cli_user)
1314 [ "$mapped_user" == "-1" ] &&
1315 error "unable to find mapping for client user $cli_user"
1317 if [ "$mapped_user" == "$mds_user" -a \
1318 $(((mode & 0300) == 0300)) -eq 1 ]; then
1322 if [ "$mapped_user" != "$mds_user" -a "$other" == "1" ]; then
1329 test_fops_admin_cli_i=""
1330 test_fops_chmod_dir() {
1331 local current_cli_i=$1
1333 local dir_to_chmod=$3
1334 local new_admin_cli_i=""
1336 # do we need to set up a new admin client?
1337 [ "$current_cli_i" == "0" ] && [ "$test_fops_admin_cli_i" != "1" ] &&
1339 [ "$current_cli_i" != "0" ] && [ "$test_fops_admin_cli_i" != "0" ] &&
1342 # if only one client, and non-admin, need to flip admin everytime
1343 if [ "$num_clients" == "1" ]; then
1344 test_fops_admin_client=$clients
1345 test_fops_admin_val=$(do_facet mgs $LCTL get_param -n \
1346 nodemap.c0.admin_nodemap)
1347 if [ "$test_fops_admin_val" != "1" ]; then
1348 do_facet mgs $LCTL nodemap_modify \
1352 wait_nm_sync c0 admin_nodemap
1354 elif [ "$new_admin_cli_i" != "" ]; then
1355 # restore admin val to old admin client
1356 if [ "$test_fops_admin_cli_i" != "" ] &&
1357 [ "$test_fops_admin_val" != "1" ]; then
1358 do_facet mgs $LCTL nodemap_modify \
1359 --name c${test_fops_admin_cli_i} \
1361 --value $test_fops_admin_val
1362 wait_nm_sync c${test_fops_admin_cli_i} admin_nodemap
1365 test_fops_admin_cli_i=$new_admin_cli_i
1366 test_fops_admin_client=${clients_arr[$new_admin_cli_i]}
1367 test_fops_admin_val=$(do_facet mgs $LCTL get_param -n \
1368 nodemap.c${new_admin_cli_i}.admin_nodemap)
1370 if [ "$test_fops_admin_val" != "1" ]; then
1371 do_facet mgs $LCTL nodemap_modify \
1372 --name c${new_admin_cli_i} \
1375 wait_nm_sync c${new_admin_cli_i} admin_nodemap
1379 do_node $test_fops_admin_client chmod $perm_bits $DIR/$tdir || return 1
1381 # remove admin for single client if originally non-admin
1382 if [ "$num_clients" == "1" ] && [ "$test_fops_admin_val" != "1" ]; then
1383 do_facet mgs $LCTL nodemap_modify --name c0 --property admin \
1385 wait_nm_sync c0 admin_nodemap
1393 local single_client="$2"
1394 local client_user_list=([0]="0 $((IDBASE+3)) $((IDBASE+4))"
1395 [1]="0 $((IDBASE+5)) $((IDBASE+6))")
1398 local perm_bit_list="0 3 $((0300)) $((0303))"
1399 # SLOW tests 000-007, 010-070, 100-700 (octal modes)
1400 [ "$SLOW" == "yes" ] &&
1401 perm_bit_list="0 $(seq 1 7) $(seq 8 8 63) $(seq 64 64 511) \
1404 # step through mds users. -1 means root
1405 for mds_i in -1 0 1 2; do
1406 local user=$((mds_i + IDBASE))
1410 [ "$mds_i" == "-1" ] && user=0
1412 echo mkdir -p $DIR/$tdir
1415 for client in $clients; do
1417 for u in ${client_user_list[$cli_i]}; do
1418 local run_u="do_node $client \
1419 $RUNAS_CMD -u$u -g$u -G$u"
1420 for perm_bits in $perm_bit_list; do
1421 local mode=$(printf %03o $perm_bits)
1423 key="$mapmode:$user:c$cli_i:$u:$mode"
1424 test_fops_chmod_dir $cli_i $mode \
1426 error cannot chmod $key
1427 do_create_delete "$run_u" "$key"
1431 test_fops_chmod_dir $cli_i 777 $DIR/$tdir ||
1432 error cannot chmod $key
1433 do_fops_quota_test "$run_u"
1436 cli_i=$((cli_i + 1))
1437 [ "$single_client" == "1" ] && break
1444 nodemap_version_check () {
1445 remote_mgs_nodsh && skip "remote MGS with nodsh" && return 1
1446 [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
1447 skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
1452 nodemap_test_setup() {
1454 local active_nodemap=1
1456 [ "$1" == "0" ] && active_nodemap=0
1458 do_nodes $(comma_list $(all_mdts_nodes)) \
1459 $LCTL set_param mdt.*.identity_upcall=NONE
1462 create_fops_nodemaps
1464 [[ $rc != 0 ]] && error "adding fops nodemaps failed $rc"
1466 do_facet mgs $LCTL nodemap_activate $active_nodemap
1469 do_facet mgs $LCTL nodemap_modify --name default \
1470 --property admin --value 1
1471 do_facet mgs $LCTL nodemap_modify --name default \
1472 --property trusted --value 1
1473 wait_nm_sync default trusted_nodemap
1476 nodemap_test_cleanup() {
1478 delete_fops_nodemaps
1480 [[ $rc != 0 ]] && error "removing fops nodemaps failed $rc"
1482 do_facet mgs $LCTL nodemap_modify --name default \
1483 --property admin --value 0
1484 do_facet mgs $LCTL nodemap_modify --name default \
1485 --property trusted --value 0
1486 wait_nm_sync default trusted_nodemap
1488 do_facet mgs $LCTL nodemap_activate 0
1489 wait_nm_sync active 0
1491 export SK_UNIQUE_NM=false
1495 nodemap_clients_admin_trusted() {
1499 for client in $clients; do
1500 do_facet mgs $LCTL nodemap_modify --name c0 \
1501 --property admin --value $admin
1502 do_facet mgs $LCTL nodemap_modify --name c0 \
1503 --property trusted --value $tr
1506 wait_nm_sync c$((i - 1)) admin_nodemap
1507 wait_nm_sync c$((i - 1)) trusted_nodemap
1511 nodemap_version_check || return 0
1512 nodemap_test_setup 0
1514 trap nodemap_test_cleanup EXIT
1516 nodemap_test_cleanup
1518 run_test 16 "test nodemap all_off fileops"
1521 nodemap_version_check || return 0
1524 trap nodemap_test_cleanup EXIT
1525 nodemap_clients_admin_trusted 0 1
1526 test_fops trusted_noadmin 1
1527 nodemap_test_cleanup
1529 run_test 17 "test nodemap trusted_noadmin fileops"
1532 nodemap_version_check || return 0
1535 trap nodemap_test_cleanup EXIT
1536 nodemap_clients_admin_trusted 0 0
1537 test_fops mapped_noadmin 1
1538 nodemap_test_cleanup
1540 run_test 18 "test nodemap mapped_noadmin fileops"
1543 nodemap_version_check || return 0
1546 trap nodemap_test_cleanup EXIT
1547 nodemap_clients_admin_trusted 1 1
1548 test_fops trusted_admin 1
1549 nodemap_test_cleanup
1551 run_test 19 "test nodemap trusted_admin fileops"
1554 nodemap_version_check || return 0
1557 trap nodemap_test_cleanup EXIT
1558 nodemap_clients_admin_trusted 1 0
1559 test_fops mapped_admin 1
1560 nodemap_test_cleanup
1562 run_test 20 "test nodemap mapped_admin fileops"
1565 nodemap_version_check || return 0
1568 trap nodemap_test_cleanup EXIT
1571 for client in $clients; do
1572 do_facet mgs $LCTL nodemap_modify --name c${i} \
1573 --property admin --value 0
1574 do_facet mgs $LCTL nodemap_modify --name c${i} \
1575 --property trusted --value $x
1579 wait_nm_sync c$((i - 1)) trusted_nodemap
1581 test_fops mapped_trusted_noadmin
1582 nodemap_test_cleanup
1584 run_test 21 "test nodemap mapped_trusted_noadmin fileops"
1587 nodemap_version_check || return 0
1590 trap nodemap_test_cleanup EXIT
1593 for client in $clients; do
1594 do_facet mgs $LCTL nodemap_modify --name c${i} \
1595 --property admin --value 1
1596 do_facet mgs $LCTL nodemap_modify --name c${i} \
1597 --property trusted --value $x
1601 wait_nm_sync c$((i - 1)) trusted_nodemap
1603 test_fops mapped_trusted_admin
1604 nodemap_test_cleanup
1606 run_test 22 "test nodemap mapped_trusted_admin fileops"
1608 # acl test directory needs to be initialized on a privileged client
1609 nodemap_acl_test_setup() {
1610 local admin=$(do_facet mgs $LCTL get_param -n \
1611 nodemap.c0.admin_nodemap)
1612 local trust=$(do_facet mgs $LCTL get_param -n \
1613 nodemap.c0.trusted_nodemap)
1615 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
1616 do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1
1618 wait_nm_sync c0 admin_nodemap
1619 wait_nm_sync c0 trusted_nodemap
1621 do_node ${clients_arr[0]} rm -rf $DIR/$tdir
1623 do_node ${clients_arr[0]} chmod a+rwx $DIR/$tdir ||
1624 error unable to chmod a+rwx test dir $DIR/$tdir
1626 do_facet mgs $LCTL nodemap_modify --name c0 \
1627 --property admin --value $admin
1628 do_facet mgs $LCTL nodemap_modify --name c0 \
1629 --property trusted --value $trust
1631 wait_nm_sync c0 trusted_nodemap
1634 # returns 0 if the number of ACLs does not change on the second (mapped) client
1635 # after being set on the first client
1636 nodemap_acl_test() {
1638 local set_client="$2"
1639 local get_client="$3"
1640 local check_setfacl="$4"
1641 local setfacl_error=0
1642 local testfile=$DIR/$tdir/$tfile
1643 local RUNAS_USER="$RUNAS_CMD -u $user"
1645 local acl_count_post=0
1647 nodemap_acl_test_setup
1650 do_node $set_client $RUNAS_USER touch $testfile
1652 # ACL masks aren't filtered by nodemap code, so we ignore them
1653 acl_count=$(do_node $get_client getfacl $testfile | grep -v mask |
1655 do_node $set_client $RUNAS_USER setfacl -m $user:rwx $testfile ||
1658 # if check setfacl is set to 1, then it's supposed to error
1659 if [ "$check_setfacl" == "1" ]; then
1660 [ "$setfacl_error" != "1" ] && return 1
1663 [ "$setfacl_error" == "1" ] && echo "WARNING: unable to setfacl"
1665 acl_count_post=$(do_node $get_client getfacl $testfile | grep -v mask |
1667 [ $acl_count -eq $acl_count_post ] && return 0
1672 nodemap_version_check || return 0
1675 trap nodemap_test_cleanup EXIT
1676 # 1 trusted cluster, 1 mapped cluster
1677 local unmapped_fs=$((IDBASE+0))
1678 local unmapped_c1=$((IDBASE+5))
1679 local mapped_fs=$((IDBASE+2))
1680 local mapped_c0=$((IDBASE+4))
1681 local mapped_c1=$((IDBASE+6))
1683 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
1684 do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1
1686 do_facet mgs $LCTL nodemap_modify --name c1 --property admin --value 0
1687 do_facet mgs $LCTL nodemap_modify --name c1 --property trusted --value 0
1689 wait_nm_sync c1 trusted_nodemap
1691 # setfacl on trusted cluster to unmapped user, verify it's not seen
1692 nodemap_acl_test $unmapped_fs ${clients_arr[0]} ${clients_arr[1]} ||
1693 error "acl count (1)"
1695 # setfacl on trusted cluster to mapped user, verify it's seen
1696 nodemap_acl_test $mapped_fs ${clients_arr[0]} ${clients_arr[1]} &&
1697 error "acl count (2)"
1699 # setfacl on mapped cluster to mapped user, verify it's seen
1700 nodemap_acl_test $mapped_c1 ${clients_arr[1]} ${clients_arr[0]} &&
1701 error "acl count (3)"
1703 # setfacl on mapped cluster to unmapped user, verify error
1704 nodemap_acl_test $unmapped_fs ${clients_arr[1]} ${clients_arr[0]} 1 ||
1705 error "acl count (4)"
1708 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 0
1709 do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 0
1711 wait_nm_sync c0 trusted_nodemap
1713 # setfacl to mapped user on c1, also mapped to c0, verify it's seen
1714 nodemap_acl_test $mapped_c1 ${clients_arr[1]} ${clients_arr[0]} &&
1715 error "acl count (5)"
1717 # setfacl to mapped user on c1, not mapped to c0, verify not seen
1718 nodemap_acl_test $unmapped_c1 ${clients_arr[1]} ${clients_arr[0]} ||
1719 error "acl count (6)"
1721 nodemap_test_cleanup
1723 run_test 23a "test mapped regular ACLs"
1725 test_23b() { #LU-9929
1726 remote_mgs_nodsh && skip "remote MGS with nodsh" && return
1727 [ $(lustre_version_code mgs) -lt $(version_code 2.10.53) ] &&
1728 skip "Need MGS >= 2.10.53" && return
1731 trap nodemap_test_cleanup EXIT
1733 local testdir=$DIR/$tdir
1734 local fs_id=$((IDBASE+10))
1739 do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
1740 wait_nm_sync c0 admin_nodemap
1742 # Add idmap $ID0:$fs_id (500:60010)
1743 do_facet mgs $LCTL nodemap_add_idmap --name c0 --idtype gid \
1744 --idmap $ID0:$fs_id ||
1745 error "add idmap $ID0:$fs_id to nodemap c0 failed"
1747 # set/getfacl default acl on client0 (unmapped gid=500)
1750 # Here, USER0=$(getent passwd | grep :$ID0:$ID0: | cut -d: -f1)
1751 setfacl -R -d -m group:$USER0:rwx $testdir ||
1752 error "setfacl $testdir on ${clients_arr[0]} failed"
1753 unmapped_id=$(getfacl $testdir | grep -E "default:group:.*:rwx" |
1754 awk -F: '{print $3}')
1755 [ "$unmapped_id" = "$USER0" ] ||
1756 error "gid=$ID0 was not unmapped correctly on ${clients_arr[0]}"
1758 # getfacl default acl on MGS (mapped gid=60010)
1759 zconf_mount $mgs_HOST $MOUNT
1760 do_rpc_nodes $mgs_HOST is_mounted $MOUNT ||
1761 error "mount lustre on MGS failed"
1762 mapped_id=$(do_node $mgs_HOST getfacl $testdir |
1763 grep -E "default:group:.*:rwx" | awk -F: '{print $3}')
1764 fs_user=$(do_facet mgs getent passwd |
1765 grep :$fs_id:$fs_id: | cut -d: -f1)
1766 [ $mapped_id -eq $fs_id -o "$mapped_id" = "$fs_user" ] ||
1767 error "Should return gid=$fs_id or $fs_user on MGS"
1770 do_facet mgs umount $MOUNT
1771 nodemap_test_cleanup
1773 run_test 23b "test mapped default ACLs"
1778 trap nodemap_test_cleanup EXIT
1779 do_nodes $(comma_list $(all_server_nodes)) $LCTL get_param -R nodemap ||
1780 error "proc readable file read failed"
1782 nodemap_test_cleanup
1784 run_test 24 "check nodemap proc files for LBUGs and Oopses"
1787 local tmpfile=$(mktemp)
1788 local tmpfile2=$(mktemp)
1789 local tmpfile3=$(mktemp)
1790 local tmpfile4=$(mktemp)
1794 nodemap_version_check || return 0
1796 # stop clients for this test
1797 zconf_umount_clients $CLIENTS $MOUNT ||
1798 error "unable to umount clients $CLIENTS"
1800 export SK_UNIQUE_NM=true
1803 # enable trusted/admin for setquota call in cleanup_and_setup_lustre()
1805 for client in $clients; do
1806 do_facet mgs $LCTL nodemap_modify --name c${i} \
1807 --property admin --value 1
1808 do_facet mgs $LCTL nodemap_modify --name c${i} \
1809 --property trusted --value 1
1812 wait_nm_sync c$((i - 1)) trusted_nodemap
1814 trap nodemap_test_cleanup EXIT
1816 # create a new, empty nodemap, and add fileset info to it
1817 do_facet mgs $LCTL nodemap_add test25 ||
1818 error "unable to create nodemap $testname"
1819 do_facet mgs $LCTL set_param -P nodemap.$testname.fileset=/$subdir ||
1820 error "unable to add fileset info to nodemap test25"
1822 wait_nm_sync test25 id
1824 do_facet mgs $LCTL nodemap_info > $tmpfile
1825 do_facet mds $LCTL nodemap_info > $tmpfile2
1827 if ! $SHARED_KEY; then
1828 # will conflict with SK's nodemaps
1829 cleanup_and_setup_lustre
1831 # stop clients for this test
1832 zconf_umount_clients $CLIENTS $MOUNT ||
1833 error "unable to umount clients $CLIENTS"
1835 do_facet mgs $LCTL nodemap_info > $tmpfile3
1836 diff -q $tmpfile3 $tmpfile >& /dev/null ||
1837 error "nodemap_info diff on MGS after remount"
1839 do_facet mds $LCTL nodemap_info > $tmpfile4
1840 diff -q $tmpfile4 $tmpfile2 >& /dev/null ||
1841 error "nodemap_info diff on MDS after remount"
1844 do_facet mgs $LCTL nodemap_del test25 ||
1845 error "cannot delete nodemap test25 from config"
1846 nodemap_test_cleanup
1847 # restart clients previously stopped
1848 zconf_mount_clients $CLIENTS $MOUNT ||
1849 error "unable to mount clients $CLIENTS"
1851 rm -f $tmpfile $tmpfile2
1852 export SK_UNIQUE_NM=false
1854 run_test 25 "test save and reload nodemap config"
1857 nodemap_version_check || return 0
1861 do_facet mgs "seq -f 'c%g' $large_i | xargs -n1 $LCTL nodemap_add"
1862 wait_nm_sync c$large_i admin_nodemap
1864 do_facet mgs "seq -f 'c%g' $large_i | xargs -n1 $LCTL nodemap_del"
1865 wait_nm_sync c$large_i admin_nodemap
1867 run_test 26 "test transferring very large nodemap"
1869 nodemap_exercise_fileset() {
1871 local fileset_on_mgs=""
1875 if [ "$nm" == "default" ]; then
1876 do_facet mgs $LCTL nodemap_activate 1
1881 if $SHARED_KEY; then
1882 export SK_UNIQUE_NM=true
1884 # will conflict with SK's nodemaps
1885 trap "fileset_test_cleanup $nm" EXIT
1887 fileset_test_setup "$nm"
1889 # add fileset info to $nm nodemap
1890 if ! combined_mgs_mds; then
1891 do_facet mgs $LCTL set_param nodemap.${nm}.fileset=/$subdir ||
1892 error "unable to add fileset info to $nm nodemap on MGS"
1894 do_facet mgs $LCTL set_param -P nodemap.${nm}.fileset=/$subdir ||
1895 error "unable to add fileset info to $nm nodemap for servers"
1896 wait_nm_sync $nm fileset "nodemap.${nm}.fileset=/$subdir"
1899 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
1900 error "unable to umount client ${clients_arr[0]}"
1901 # set some generic fileset to trigger SSK code
1903 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
1904 error "unable to remount client ${clients_arr[0]}"
1907 # test mount point content
1908 do_node ${clients_arr[0]} test -f $MOUNT/this_is_$subdir ||
1909 error "fileset not taken into account"
1911 # re-mount client with sub-subdir
1912 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
1913 error "unable to umount client ${clients_arr[0]}"
1914 export FILESET=/$subsubdir
1915 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
1916 error "unable to remount client ${clients_arr[0]}"
1919 # test mount point content
1920 do_node ${clients_arr[0]} test -f $MOUNT/this_is_$subsubdir ||
1921 error "subdir of fileset not taken into account"
1923 # remove fileset info from nodemap
1924 do_facet mgs $LCTL nodemap_set_fileset --name $nm --fileset \'\' ||
1925 error "unable to delete fileset info on $nm nodemap"
1926 wait_update_facet mgs "$LCTL get_param nodemap.${nm}.fileset" \
1927 "nodemap.${nm}.fileset=" ||
1928 error "fileset info still not cleared on $nm nodemap"
1929 do_facet mgs $LCTL set_param -P nodemap.${nm}.fileset=\'\' ||
1930 error "unable to reset fileset info on $nm nodemap"
1931 wait_nm_sync $nm fileset "nodemap.${nm}.fileset="
1934 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
1935 error "unable to umount client ${clients_arr[0]}"
1936 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
1937 error "unable to remount client ${clients_arr[0]}"
1939 # test mount point content
1940 if ! $(do_node ${clients_arr[0]} test -d $MOUNT/$subdir); then
1942 error "fileset not cleared on $nm nodemap"
1945 # back to non-nodemap setup
1946 if $SHARED_KEY; then
1947 export SK_UNIQUE_NM=false
1948 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
1949 error "unable to umount client ${clients_arr[0]}"
1951 fileset_test_cleanup "$nm"
1952 if [ "$nm" == "default" ]; then
1953 do_facet mgs $LCTL nodemap_activate 0
1954 wait_nm_sync active 0
1956 export SK_UNIQUE_NM=false
1958 nodemap_test_cleanup
1960 if $SHARED_KEY; then
1961 zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
1962 error "unable to remount client ${clients_arr[0]}"
1967 for nm in "default" "c0"; do
1968 local subdir="subdir_${nm}"
1969 local subsubdir="subsubdir_${nm}"
1971 echo "Exercising fileset for nodemap $nm"
1972 nodemap_exercise_fileset "$nm"
1975 run_test 27 "test fileset in various nodemaps"
1978 if ! $SHARED_KEY; then
1979 skip "need shared key feature for this test" && return
1981 mkdir -p $DIR/$tdir || error "mkdir failed"
1982 touch $DIR/$tdir/$tdir.out || error "touch failed"
1983 if [ ! -f $DIR/$tdir/$tdir.out ]; then
1984 error "read before rotation failed"
1986 # store top key identity to ensure rotation has occurred
1987 SK_IDENTITY_OLD=$(lctl get_param *.*.*srpc* | grep "expire" |
1988 head -1 | awk '{print $15}' | cut -c1-8)
1989 do_facet $SINGLEMDS lfs flushctx ||
1990 error "could not run flushctx on $SINGLEMDS"
1992 lfs flushctx || error "could not run flushctx on client"
1994 # verify new key is in place
1995 SK_IDENTITY_NEW=$(lctl get_param *.*.*srpc* | grep "expire" |
1996 head -1 | awk '{print $15}' | cut -c1-8)
1997 if [ $SK_IDENTITY_OLD == $SK_IDENTITY_NEW ]; then
1998 error "key did not rotate correctly"
2000 if [ ! -f $DIR/$tdir/$tdir.out ]; then
2001 error "read after rotation failed"
2004 run_test 28 "check shared key rotation method"
2007 if ! $SHARED_KEY; then
2008 skip "need shared key feature for this test" && return
2010 if [ $SK_FLAVOR != "ski" ] && [ $SK_FLAVOR != "skpi" ]; then
2011 skip "test only valid if integrity is active"
2014 mkdir $DIR/$tdir || error "mkdir"
2015 touch $DIR/$tdir/$tfile || error "touch"
2016 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
2017 error "unable to umount clients"
2018 keyctl show | awk '/lustre/ { print $1 }' |
2019 xargs -IX keyctl unlink X
2020 OLD_SK_PATH=$SK_PATH
2021 export SK_PATH=/dev/null
2022 if zconf_mount_clients ${clients_arr[0]} $MOUNT; then
2023 export SK_PATH=$OLD_SK_PATH
2024 if [ -e $DIR/$tdir/$tfile ]; then
2025 error "able to mount and read without key"
2027 error "able to mount without key"
2030 export SK_PATH=$OLD_SK_PATH
2031 keyctl show | awk '/lustre/ { print $1 }' |
2032 xargs -IX keyctl unlink X
2035 run_test 29 "check for missing shared key"
2038 if ! $SHARED_KEY; then
2039 skip "need shared key feature for this test" && return
2041 if [ $SK_FLAVOR != "ski" ] && [ $SK_FLAVOR != "skpi" ]; then
2042 skip "test only valid if integrity is active"
2044 mkdir -p $DIR/$tdir || error "mkdir failed"
2045 touch $DIR/$tdir/$tdir.out || error "touch failed"
2046 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
2047 error "unable to umount clients"
2048 # unload keys from ring
2049 keyctl show | awk '/lustre/ { print $1 }' |
2050 xargs -IX keyctl unlink X
2051 # invalidate the key with bogus filesystem name
2052 lgss_sk -w $SK_PATH/$FSNAME-bogus.key -f $FSNAME.bogus \
2053 -t client -d /dev/urandom || error "lgss_sk failed (1)"
2054 do_facet $SINGLEMDS lfs flushctx || error "could not run flushctx"
2055 OLD_SK_PATH=$SK_PATH
2056 export SK_PATH=$SK_PATH/$FSNAME-bogus.key
2057 if zconf_mount_clients ${clients_arr[0]} $MOUNT; then
2058 SK_PATH=$OLD_SK_PATH
2059 if [ -a $DIR/$tdir/$tdir.out ]; then
2060 error "mount and read file with invalid key"
2062 error "mount with invalid key"
2065 SK_PATH=$OLD_SK_PATH
2066 zconf_umount_clients ${clients_arr[0]} $MOUNT ||
2067 error "unable to umount clients"
2069 run_test 30 "check for invalid shared key"
2071 log "cleanup: ======================================================"
2074 ## nodemap deactivated
2075 do_facet mgs $LCTL nodemap_activate 0
2077 for num in $(seq $MDSCOUNT); do
2078 if [ "${identity_old[$num]}" = 1 ]; then
2079 switch_identity $num false || identity_old[$num]=$?
2083 $RUNAS_CMD -u $ID0 ls $DIR
2084 $RUNAS_CMD -u $ID1 ls $DIR