X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Ftests%2Fsanity-sec.sh;h=5611166b382a073d82401eaed7a2d0f56224a01f;hp=612cba1ab4b0bb9bd756cf5fc9066f111aa6fac4;hb=bb0a1075285076567bf9c3c406116682f0997579;hpb=072d6b3156bf3b5d1738b43aadcba5c378c59ee9 diff --git a/lustre/tests/sanity-sec.sh b/lustre/tests/sanity-sec.sh index 612cba1..5611166 100755 --- a/lustre/tests/sanity-sec.sh +++ b/lustre/tests/sanity-sec.sh @@ -7,12 +7,10 @@ set -e ONLY=${ONLY:-"$*"} -# bug number for skipped test: 19430 19967 19967 -ALWAYS_EXCEPT=" 2 5 6 $SANITY_SEC_EXCEPT" +# bug number for skipped test: +ALWAYS_EXCEPT=" $SANITY_SEC_EXCEPT" # UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT! -[ "$ALWAYS_EXCEPT$EXCEPT" ] && echo "Skipping tests: $ALWAYS_EXCEPT $EXCEPT" - SRCDIR=$(dirname $0) export PATH=$PWD/$SRCDIR:$SRCDIR:$PWD/$SRCDIR/../utils:$PATH:/sbin export NAME=${NAME:-local} @@ -21,8 +19,21 @@ LUSTRE=${LUSTRE:-$(dirname $0)/..} . $LUSTRE/tests/test-framework.sh init_test_env $@ . ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh} +get_lustre_env init_logging +NODEMAP_TESTS=$(seq 7 26) + +if ! check_versions; then + echo "It is NOT necessary to test nodemap under interoperation mode" + EXCEPT="$EXCEPT $NODEMAP_TESTS" +fi + +[ "$SLOW" = "no" ] && EXCEPT_SLOW="26" + +[ "$ALWAYS_EXCEPT$EXCEPT$EXCEPT_SLOW" ] && + echo "Skipping tests: $ALWAYS_EXCEPT $EXCEPT $EXCEPT_SLOW" + RUNAS_CMD=${RUNAS_CMD:-runas} WTL=${WTL:-"$LUSTRE/tests/write_time_limit"} @@ -30,13 +41,8 @@ WTL=${WTL:-"$LUSTRE/tests/write_time_limit"} CONFDIR=/etc/lustre PERM_CONF=$CONFDIR/perm.conf FAIL_ON_ERROR=false - HOSTNAME_CHECKSUM=$(hostname | sum | awk '{ print $1 }') SUBNET_CHECKSUM=$(expr $HOSTNAME_CHECKSUM % 250 + 1) -NODEMAP_COUNT=16 -NODEMAP_RANGE_COUNT=3 -NODEMAP_IPADDR_LIST="1 10 64 128 200 250" -NODEMAP_MAX_ID=128 require_dsh_mds || exit 0 require_dsh_ost || exit 0 @@ -47,14 +53,20 @@ clients_arr=($clients) ID0=${ID0:-500} ID1=${ID1:-501} -USER0=$(grep :$ID0:$ID0: /etc/passwd | cut -d: -f1) -USER1=$(grep :$ID1:$ID1: /etc/passwd | cut -d: -f1) +USER0=$(getent passwd | grep :$ID0:$ID0: | cut -d: -f1) +USER1=$(getent passwd | grep :$ID1:$ID1: | cut -d: -f1) + +NODEMAP_COUNT=16 +NODEMAP_RANGE_COUNT=3 +NODEMAP_IPADDR_LIST="1 10 64 128 200 250" +NODEMAP_ID_COUNT=10 +NODEMAP_MAX_ID=$((ID0 + NODEMAP_ID_COUNT)) [ -z "$USER0" ] && - skip "need to add user0 ($ID0:$ID0) to /etc/passwd" && exit 0 + skip "need to add user0 ($ID0:$ID0)" && exit 0 [ -z "$USER1" ] && - skip "need to add user1 ($ID1:$ID1) to /etc/passwd" && exit 0 + skip "need to add user1 ($ID1:$ID1)" && exit 0 IDBASE=${IDBASE:-60000} @@ -66,18 +78,7 @@ FOPS_IDMAPS=( check_and_setup_lustre -sec_cleanup() { - if [ "$I_MOUNTED" = "yes" ]; then - cleanupall -f || error "sec_cleanup" - fi -} - -DIR=${DIR:-$MOUNT} -[ -z "$(echo $DIR | grep $MOUNT)" ] && - error "$DIR not in $MOUNT" && sec_cleanup && exit 1 - -[ $(echo $MOUNT | wc -w) -gt 1 ] && - echo "NAME=$MOUNT mounted more than once" && sec_cleanup && exit 0 +assert_DIR # for GSS_SUP GSS_REF=$(lsmod | grep ^ptlrpc_gss | awk '{print $3}') @@ -95,17 +96,6 @@ MDT=$(do_facet $SINGLEMDS lctl get_param -N "mdt.\*MDT0000" | do_facet $SINGLEMDS "mkdir -p $CONFDIR" IDENTITY_FLUSH=mdt.$MDT.identity_flush IDENTITY_UPCALL=mdt.$MDT.identity_upcall -MDSSECLEVEL=mdt.$MDT.sec_level - -# for CLIENT_TYPE -if [ -z "$(lctl get_param -n llite.*.client_type | grep remote 2>/dev/null)" ] -then - CLIENT_TYPE="local" - echo "local client" -else - CLIENT_TYPE="remote" - echo "remote client" -fi SAVE_PWD=$PWD @@ -115,6 +105,7 @@ sec_login() { local user=$1 local group=$2 + $GSS_KRB5 || return if ! $RUNAS_CMD -u $user krb5_login.sh; then error "$user login kerberos failed." exit 1 @@ -154,17 +145,7 @@ test_0() { chmod 0755 $DIR || error "chmod (1)" rm -rf $DIR/$tdir || error "rm (1)" mkdir -p $DIR/$tdir || error "mkdir (1)" - - if [ "$CLIENT_TYPE" = "remote" ]; then - do_facet $SINGLEMDS "echo '* 0 normtown' > $PERM_CONF" - do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1" - chown $USER0 $DIR/$tdir && error "chown (1)" - do_facet $SINGLEMDS "echo '* 0 rmtown' > $PERM_CONF" - do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1" - else - chown $USER0 $DIR/$tdir || error "chown (2)" - fi - + chown $USER0 $DIR/$tdir || error "chown (2)" $RUNAS_CMD -u $ID0 ls $DIR || error "ls (1)" rm -f $DIR/f0 || error "rm (2)" $RUNAS_CMD -u $ID0 touch $DIR/f0 && error "touch (1)" @@ -178,11 +159,6 @@ test_0() { $RUNAS_CMD -u $ID1 touch $DIR/$tdir/f5 && error "touch (6)" touch $DIR/$tdir/f6 || error "touch (7)" rm -rf $DIR/$tdir || error "rm (3)" - - if [ "$CLIENT_TYPE" = "remote" ]; then - do_facet $SINGLEMDS "rm -f $PERM_CONF" - do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1" - fi } run_test 0 "uid permission =============================" @@ -190,11 +166,6 @@ run_test 0 "uid permission =============================" test_1() { [ $GSS_SUP = 0 ] && skip "without GSS support." && return - if [ "$CLIENT_TYPE" = "remote" ]; then - do_facet $SINGLEMDS "echo '* 0 rmtown' > $PERM_CONF" - do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1" - fi - rm -rf $DIR/$tdir mkdir -p $DIR/$tdir @@ -225,60 +196,6 @@ test_1() { } run_test 1 "setuid/gid =============================" -run_rmtacl_subtest() { - $SAVE_PWD/rmtacl/run $SAVE_PWD/rmtacl/$1.test - return $? -} - -# remote_acl -# for remote client only -test_2 () { - [ "$CLIENT_TYPE" = "local" ] && - skip "remote_acl for remote client only" && return - [ -z "$(lctl get_param -n mdc.*-mdc-*.connect_flags | grep ^acl)" ] && - skip "must have acl enabled" && return - [ -z "$(which setfacl 2>/dev/null)" ] && - skip "could not find setfacl" && return - [ "$UID" != 0 ] && skip "must run as root" && return - - do_facet $SINGLEMDS "echo '* 0 rmtacl,rmtown' > $PERM_CONF" - do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1" - - sec_login root root - sec_login bin bin - sec_login daemon daemon - sec_login games users - - SAVE_UMASK=$(umask) - umask 0022 - cd $DIR - - echo "performing cp ..." - run_rmtacl_subtest cp || error "cp" - echo "performing getfacl-noacl..." - run_rmtacl_subtest getfacl-noacl || error "getfacl-noacl" - echo "performing misc..." - run_rmtacl_subtest misc || error "misc" - echo "performing permissions..." - run_rmtacl_subtest permissions || error "permissions" - echo "performing setfacl..." - run_rmtacl_subtest setfacl || error "setfacl" - - # inheritance test got from HP - echo "performing inheritance..." - cp $SAVE_PWD/rmtacl/make-tree . - chmod +x make-tree - run_rmtacl_subtest inheritance || error "inheritance" - rm -f make-tree - - cd $SAVE_PWD - umask $SAVE_UMASK - - do_facet $SINGLEMDS "rm -f $PERM_CONF" - do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1" -} -run_test 2 "rmtacl =============================" - # bug 3285 - supplementary group should always succeed. # NB: the supplementary groups are set for local client only, # as for remote client, the groups of the specified uid on MDT @@ -291,22 +208,15 @@ test_4() { $server_version -lt $(version_code 2.5.50) ]] || { skip "Need MDS version at least 2.6.93 or 2.5.35"; return; } - if [ "$CLIENT_TYPE" = "remote" ]; then - do_facet $SINGLEMDS "echo '* 0 rmtown' > $PERM_CONF" - do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1" - fi - rm -rf $DIR/$tdir mkdir -p $DIR/$tdir chmod 0771 $DIR/$tdir chgrp $ID0 $DIR/$tdir $RUNAS_CMD -u $ID0 ls $DIR/$tdir || error "setgroups (1)" - if [ "$CLIENT_TYPE" = "local" ]; then - do_facet $SINGLEMDS "echo '* $ID1 setgrp' > $PERM_CONF" - do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1" - $RUNAS_CMD -u $ID1 -G1,2,$ID0 ls $DIR/$tdir || - error "setgroups (2)" - fi + do_facet $SINGLEMDS "echo '* $ID1 setgrp' > $PERM_CONF" + do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1" + $RUNAS_CMD -u $ID1 -G1,2,$ID0 ls $DIR/$tdir || + error "setgroups (2)" $RUNAS_CMD -u $ID1 -G1,2 ls $DIR/$tdir && error "setgroups (3)" rm -rf $DIR/$tdir @@ -321,18 +231,28 @@ create_nodemaps() { local rc squash_id default 99 0 + wait_nm_sync default squash_uid '' inactive squash_id default 99 1 + wait_nm_sync default squash_gid '' inactive for (( i = 0; i < NODEMAP_COUNT; i++ )); do local csum=${HOSTNAME_CHECKSUM}_${i} - if ! do_facet mgs $LCTL nodemap_add $csum; then - return 1 + do_facet mgs $LCTL nodemap_add $csum + rc=$? + if [ $rc -ne 0 ]; then + echo "nodemap_add $csum failed with $rc" + return $rc fi out=$(do_facet mgs $LCTL get_param nodemap.$csum.id) ## This needs to return zero if the following statement is 1 [[ $(echo $out | grep -c $csum) == 0 ]] && return 1 done + for (( i = 0; i < NODEMAP_COUNT; i++ )); do + local csum=${HOSTNAME_CHECKSUM}_${i} + + wait_nm_sync $csum id '' inactive + done return 0 } @@ -348,9 +268,14 @@ delete_nodemaps() { return 3 fi - out=$(do_facet mgs $LCTL get_param nodemap.$csum.id) + out=$(do_facet mgs $LCTL get_param nodemap.$csum.id 2>/dev/null) [[ $(echo $out | grep -c $csum) != 0 ]] && return 1 done + for (( i = 0; i < NODEMAP_COUNT; i++ )); do + local csum=${HOSTNAME_CHECKSUM}_${i} + + wait_nm_sync $csum id '' inactive + done return 0 } @@ -390,10 +315,11 @@ add_idmaps() { local cmd="$LCTL nodemap_add_idmap" local rc=0 + echo "Start to add idmaps ..." for ((i = 0; i < NODEMAP_COUNT; i++)); do local j - for ((j = 500; j < NODEMAP_MAX_ID; j++)); do + for ((j = $ID0; j < NODEMAP_MAX_ID; j++)); do local csum=${HOSTNAME_CHECKSUM}_${i} local client_id=$j local fs_id=$((j + 1)) @@ -412,15 +338,84 @@ add_idmaps() { return $rc } +update_idmaps() { #LU-10040 + [ $(lustre_version_code mgs) -lt $(version_code 2.10.55) ] && + skip "Need MGS >= 2.10.55" && + return + local csum=${HOSTNAME_CHECKSUM}_0 + local old_id_client=$ID0 + local old_id_fs=$((ID0 + 1)) + local new_id=$((ID0 + 100)) + local tmp_id + local cmd + local run + local idtype + local rc=0 + + echo "Start to update idmaps ..." + + #Inserting an existed idmap should return error + cmd="$LCTL nodemap_add_idmap --name $csum --idtype uid" + if do_facet mgs \ + $cmd --idmap $old_id_client:$old_id_fs 2>/dev/null; then + error "insert idmap {$old_id_client:$old_id_fs} " \ + "should return error" + rc=$((rc + 1)) + return rc + fi + + #Update id_fs and check it + if ! do_facet mgs $cmd --idmap $old_id_client:$new_id; then + error "$cmd --idmap $old_id_client:$new_id failed" + rc=$((rc + 1)) + return $rc + fi + tmp_id=$(do_facet mgs $LCTL get_param -n nodemap.$csum.idmap | + awk '{ print $7 }' | sed -n '2p') + [ $tmp_id != $new_id ] && { error "new id_fs $tmp_id != $new_id"; \ + rc=$((rc + 1)); return $rc; } + + #Update id_client and check it + if ! do_facet mgs $cmd --idmap $new_id:$new_id; then + error "$cmd --idmap $new_id:$new_id failed" + rc=$((rc + 1)) + return $rc + fi + tmp_id=$(do_facet mgs $LCTL get_param -n nodemap.$csum.idmap | + awk '{ print $5 }' | sed -n "$((NODEMAP_ID_COUNT + 1)) p") + tmp_id=$(echo ${tmp_id%,*}) #e.g. "501,"->"501" + [ $tmp_id != $new_id ] && { error "new id_client $tmp_id != $new_id"; \ + rc=$((rc + 1)); return $rc; } + + #Delete above updated idmap + cmd="$LCTL nodemap_del_idmap --name $csum --idtype uid" + if ! do_facet mgs $cmd --idmap $new_id:$new_id; then + error "$cmd --idmap $new_id:$new_id failed" + rc=$((rc + 1)) + return $rc + fi + + #restore the idmaps to make delete_idmaps work well + cmd="$LCTL nodemap_add_idmap --name $csum --idtype uid" + if ! do_facet mgs $cmd --idmap $old_id_client:$old_id_fs; then + error "$cmd --idmap $old_id_client:$old_id_fs failed" + rc=$((rc + 1)) + return $rc + fi + + return $rc +} + delete_idmaps() { local i local cmd="$LCTL nodemap_del_idmap" local rc=0 + echo "Start to delete idmaps ..." for ((i = 0; i < NODEMAP_COUNT; i++)); do local j - for ((j = 500; j < NODEMAP_MAX_ID; j++)); do + for ((j = $ID0; j < NODEMAP_MAX_ID; j++)); do local csum=${HOSTNAME_CHECKSUM}_${i} local client_id=$j local fs_id=$((j + 1)) @@ -467,6 +462,9 @@ modify_flags() { } squash_id() { + [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] && + skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" && + return local cmd cmd[0]="$LCTL nodemap_modify --property squash_uid" @@ -477,9 +475,76 @@ squash_id() { fi } +wait_nm_sync() { + local nodemap_name=$1 + local key=$2 + local value=$3 + local opt=$4 + local proc_param + local is_active=$(do_facet mgs $LCTL get_param -n nodemap.active) + local max_retries=20 + local is_sync + local out1="" + local out2 + local mgs_ip=$(host_nids_address $mgs_HOST $NETTYPE | cut -d' ' -f1) + local i + + if [ "$nodemap_name" == "active" ]; then + proc_param="active" + elif [ -z "$key" ]; then + proc_param=${nodemap_name} + else + proc_param="${nodemap_name}.${key}" + fi + if [ "$opt" == "inactive" ]; then + # check nm sync even if nodemap is not activated + is_active=1 + opt="" + fi + (( is_active == 0 )) && [ "$proc_param" != "active" ] && return + + if [ -z "$value" ]; then + out1=$(do_facet mgs $LCTL get_param $opt \ + nodemap.${proc_param} 2>/dev/null) + echo "On MGS ${mgs_ip}, ${proc_param} = $out1" + else + out1=$value; + fi + + # wait up to 10 seconds for other servers to sync with mgs + for i in $(seq 1 10); do + for node in $(all_server_nodes); do + local node_ip=$(host_nids_address $node $NETTYPE | + cut -d' ' -f1) + + is_sync=true + if [ -z "$value" ]; then + [ $node_ip == $mgs_ip ] && continue + fi + + out2=$(do_node $node_ip $LCTL get_param $opt \ + nodemap.$proc_param 2>/dev/null) + echo "On $node ${node_ip}, ${proc_param} = $out2" + [ "$out1" != "$out2" ] && is_sync=false && break + done + $is_sync && break + sleep 1 + done + if ! $is_sync; then + echo MGS + echo $out1 + echo OTHER - IP: $node_ip + echo $out2 + error "mgs and $nodemap_name ${key} mismatch, $i attempts" + fi + echo "waited $((i - 1)) seconds for sync" +} + # ensure that the squash defaults are the expected defaults squash_id default 99 0 +wait_nm_sync default squash_uid '' inactive squash_id default 99 1 +wait_nm_sync default squash_gid '' inactive test_nid() { local cmd @@ -495,16 +560,82 @@ test_nid() { return 1 } +wait_nm_sync() { + local nodemap_name=$1 + local key=$2 + local value=$3 + local opt=$4 + local proc_param + local is_active=$(do_facet mgs $LCTL get_param -n nodemap.active) + local max_retries=20 + local is_sync + local out1="" + local out2 + local mgs_ip=$(host_nids_address $mgs_HOST $NETTYPE | cut -d' ' -f1) + local i + + if [ "$nodemap_name" == "active" ]; then + proc_param="active" + elif [ -z "$key" ]; then + proc_param=${nodemap_name} + else + proc_param="${nodemap_name}.${key}" + fi + (( is_active == 0 )) && [ "$proc_param" != "active" ] && return + + if [ -z "$value" ]; then + out1=$(do_facet mgs $LCTL get_param $opt nodemap.${proc_param}) + echo "On MGS ${mgs_ip}, ${proc_param} = $out1" + else + out1=$value; + fi + + # wait up to 10 seconds for other servers to sync with mgs + for i in $(seq 1 10); do + for node in $(all_server_nodes); do + local node_ip=$(host_nids_address $node $NETTYPE | + cut -d' ' -f1) + + is_sync=true + if [ -z "$value" ]; then + [ $node_ip == $mgs_ip ] && continue + fi + + out2=$(do_node $node_ip $LCTL get_param $opt \ + nodemap.$proc_param 2>/dev/null) + echo "On $node ${node_ip}, ${proc_param} = $out2" + [ "$out1" != "$out2" ] && is_sync=false && break + done + $is_sync && break + sleep 1 + done + if ! $is_sync; then + echo MGS + echo $out1 + echo OTHER - IP: $node_ip + echo $out2 + error "mgs and $nodemap_name ${key} mismatch, $i attempts" + fi + echo "waited $((i - 1)) seconds for sync" +} + +cleanup_active() { + # restore activation state + do_facet mgs $LCTL nodemap_activate 0 + wait_nm_sync active +} + test_idmap() { local i local cmd="$LCTL nodemap_test_id" local rc=0 + echo "Start to test idmaps ..." ## nodemap deactivated - if ! do_facet mgs lctl nodemap_activate 0; then + if ! do_facet mgs $LCTL nodemap_activate 0; then return 1 fi - for ((id = 500; id < NODEMAP_MAX_ID; id++)); do + for ((id = $ID0; id < NODEMAP_MAX_ID; id++)); do local j for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do @@ -519,11 +650,11 @@ test_idmap() { done ## nodemap activated - if ! do_facet mgs lctl nodemap_activate 1; then + if ! do_facet mgs $LCTL nodemap_activate 1; then return 2 fi - for ((id = 500; id < NODEMAP_MAX_ID; id++)); do + for ((id = $ID0; id < NODEMAP_MAX_ID; id++)); do for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do nid="$SUBNET_CHECKSUM.0.${j}.100@tcp" fs_id=$(do_facet mgs $cmd --nid $nid \ @@ -547,7 +678,7 @@ test_idmap() { fi done - for ((id = 500; id < NODEMAP_MAX_ID; id++)); do + for ((id = $ID0; id < NODEMAP_MAX_ID; id++)); do for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do nid="$SUBNET_CHECKSUM.0.${j}.100@tcp" fs_id=$(do_facet mgs $cmd --nid $nid \ @@ -630,7 +761,7 @@ test_7() { delete_nodemaps rc=$? - [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 2 + [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 2 return 0 } @@ -660,7 +791,7 @@ test_8() { # Clean up delete_nodemaps rc=$? - [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 3 + [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 3 return 0 } @@ -699,13 +830,13 @@ test_9() { rc=0 delete_nodemaps rc=$? - [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 4 + [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 4 return 0 } run_test 9 "nodemap range add" -test_10() { +test_10a() { local rc remote_mgs_nodsh && skip "remote MGS with nodsh" && return @@ -746,11 +877,99 @@ test_10() { delete_nodemaps rc=$? - [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 5 + [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 5 return 0 } -run_test 10 "nodemap reject duplicate ranges" +run_test 10a "nodemap reject duplicate ranges" + +test_10b() { + [ $(lustre_version_code mgs) -lt $(version_code 2.10.53) ] && + skip "Need MGS >= 2.10.53" && return + + local nm1="nodemap1" + local nm2="nodemap2" + local nids="192.168.19.[0-255]@o2ib20" + + do_facet mgs $LCTL nodemap_del $nm1 2>/dev/null + do_facet mgs $LCTL nodemap_del $nm2 2>/dev/null + + do_facet mgs $LCTL nodemap_add $nm1 || error "Add $nm1 failed" + do_facet mgs $LCTL nodemap_add $nm2 || error "Add $nm2 failed" + do_facet mgs $LCTL nodemap_add_range --name $nm1 --range $nids || + error "Add range $nids to $nm1 failed" + [ -n "$(do_facet mgs $LCTL get_param nodemap.$nm1.* | + grep start_nid)" ] || error "No range was found" + do_facet mgs $LCTL nodemap_del_range --name $nm2 --range $nids && + error "Deleting range $nids from $nm2 should fail" + [ -n "$(do_facet mgs $LCTL get_param nodemap.$nm1.* | + grep start_nid)" ] || error "Range $nids should be there" + + do_facet mgs $LCTL nodemap_del $nm1 || error "Delete $nm1 failed" + do_facet mgs $LCTL nodemap_del $nm2 || error "Delete $nm2 failed" + return 0 +} +run_test 10b "delete range from the correct nodemap" + +test_10c() { #LU-8912 + [ $(lustre_version_code mgs) -lt $(version_code 2.10.57) ] && + skip "Need MGS >= 2.10.57" && return + + local nm="nodemap_lu8912" + local nid_range="10.210.[32-47].[0-255]@o2ib3" + local start_nid="10.210.32.0@o2ib3" + local end_nid="10.210.47.255@o2ib3" + local start_nid_found + local end_nid_found + + do_facet mgs $LCTL nodemap_del $nm 2>/dev/null + do_facet mgs $LCTL nodemap_add $nm || error "Add $nm failed" + do_facet mgs $LCTL nodemap_add_range --name $nm --range $nid_range || + error "Add range $nid_range to $nm failed" + + start_nid_found=$(do_facet mgs $LCTL get_param nodemap.$nm.* | + awk -F '[,: ]' /start_nid/'{ print $9 }') + [ "$start_nid" == "$start_nid_found" ] || + error "start_nid: $start_nid_found != $start_nid" + end_nid_found=$(do_facet mgs $LCTL get_param nodemap.$nm.* | + awk -F '[,: ]' /end_nid/'{ print $13 }') + [ "$end_nid" == "$end_nid_found" ] || + error "end_nid: $end_nid_found != $end_nid" + + do_facet mgs $LCTL nodemap_del $nm || error "Delete $nm failed" + return 0 +} +run_test 10c "verfify contiguous range support" + +test_10d() { #LU-8913 + [ $(lustre_version_code mgs) -lt $(version_code 2.10.59) ] && + skip "Need MGS >= 2.10.59" && return + + local nm="nodemap_lu8913" + local nid_range="*@o2ib3" + local start_nid="0.0.0.0@o2ib3" + local end_nid="255.255.255.255@o2ib3" + local start_nid_found + local end_nid_found + + do_facet mgs $LCTL nodemap_del $nm 2>/dev/null + do_facet mgs $LCTL nodemap_add $nm || error "Add $nm failed" + do_facet mgs $LCTL nodemap_add_range --name $nm --range $nid_range || + error "Add range $nid_range to $nm failed" + + start_nid_found=$(do_facet mgs $LCTL get_param nodemap.$nm.* | + awk -F '[,: ]' /start_nid/'{ print $9 }') + [ "$start_nid" == "$start_nid_found" ] || + error "start_nid: $start_nid_found != $start_nid" + end_nid_found=$(do_facet mgs $LCTL get_param nodemap.$nm.* | + awk -F '[,: ]' /end_nid/'{ print $13 }') + [ "$end_nid" == "$end_nid_found" ] || + error "end_nid: $end_nid_found != $end_nid" + + do_facet mgs $LCTL nodemap_del $nm || error "Delete $nm failed" + return 0 +} +run_test 10d "verfify nodemap range format '*@' support" test_11() { local rc @@ -924,66 +1143,53 @@ test_15() { rc=$? [[ $rc != 0 ]] && error "nodemap_add_idmap failed with $rc" && return 3 + activedefault=$(do_facet mgs $LCTL get_param -n nodemap.active) + if [[ "$activedefault" != "1" ]]; then + stack_trap cleanup_active EXIT + fi + rc=0 test_idmap rc=$? [[ $rc != 0 ]] && error "nodemap_test_id failed with $rc" && return 4 rc=0 + update_idmaps + rc=$? + [[ $rc != 0 ]] && error "update_idmaps failed with $rc" && return 5 + + rc=0 delete_idmaps rc=$? - [[ $rc != 0 ]] && error "nodemap_del_idmap failed with $rc" && return 5 + [[ $rc != 0 ]] && error "nodemap_del_idmap failed with $rc" && return 6 rc=0 delete_nodemaps rc=$? - [[ $rc != 0 ]] && error "nodemap_delete failed with $rc" && return 6 + [[ $rc != 0 ]] && error "nodemap_delete failed with $rc" && return 7 return 0 } run_test 15 "test id mapping" -# Until nodemaps are distributed by MGS, they need to be distributed manually -# This function and all calls to it should be removed once the MGS distributes -# nodemaps to the MDS and OSS nodes directly. -do_servers_not_mgs() { - local mgs_ip=$(host_nids_address $mgs_HOST $NETTYPE) - for node in $(all_server_nodes); do - local node_ip=$(host_nids_address $node $NETTYPE) - [ $node_ip == $mgs_ip ] && continue - do_node $node_ip $* - done -} - create_fops_nodemaps() { local i=0 local client for client in $clients; do local client_ip=$(host_nids_address $client $NETTYPE) - local client_nid=$(h2$NETTYPE $client_ip) + local client_nid=$(h2nettype $client_ip) do_facet mgs $LCTL nodemap_add c${i} || return 1 do_facet mgs $LCTL nodemap_add_range \ --name c${i} --range $client_nid || return 1 - do_servers_not_mgs $LCTL set_param nodemap.add_nodemap=c${i} || - return 1 - do_servers_not_mgs "$LCTL set_param " \ - "nodemap.add_nodemap_range='c${i} $client_nid'" || - return 1 for map in ${FOPS_IDMAPS[i]}; do do_facet mgs $LCTL nodemap_add_idmap --name c${i} \ --idtype uid --idmap ${map} || return 1 - do_servers_not_mgs "$LCTL set_param " \ - "nodemap.add_nodemap_idmap='c$i uid ${map}'" || - return 1 do_facet mgs $LCTL nodemap_add_idmap --name c${i} \ --idtype gid --idmap ${map} || return 1 - do_servers_not_mgs "$LCTL set_param " \ - " nodemap.add_nodemap_idmap='c$i gid ${map}'" || - return 1 done - out1=$(do_facet mgs $LCTL get_param nodemap.c${i}.idmap) - out2=$(do_facet ost0 $LCTL get_param nodemap.c${i}.idmap) - [ "$out1" != "$out2" ] && error "mgs and oss maps mismatch" + + wait_nm_sync c$i idmap + i=$((i + 1)) done return 0 @@ -994,8 +1200,6 @@ delete_fops_nodemaps() { local client for client in $clients; do do_facet mgs $LCTL nodemap_del c${i} || return 1 - do_servers_not_mgs $LCTL set_param nodemap.remove_nodemap=c$i || - return 1 i=$((i + 1)) done return 0 @@ -1022,8 +1226,9 @@ fops_test_setup() { do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1 do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1 - do_servers_not_mgs $LCTL set_param nodemap.c0.admin_nodemap=1 - do_servers_not_mgs $LCTL set_param nodemap.c0.trusted_nodemap=1 + + wait_nm_sync c0 admin_nodemap + wait_nm_sync c0 trusted_nodemap do_node ${clients_arr[0]} rm -rf $DIR/$tdir nm_test_mkdir @@ -1033,12 +1238,95 @@ fops_test_setup() { --property admin --value $admin do_facet mgs $LCTL nodemap_modify --name c0 \ --property trusted --value $trust - do_servers_not_mgs $LCTL set_param nodemap.c0.admin_nodemap=$admin - do_servers_not_mgs $LCTL set_param nodemap.c0.trusted_nodemap=$trust # flush MDT locks to make sure they are reacquired before test - do_node ${clients_arr[0]} lctl set_param \ + do_node ${clients_arr[0]} $LCTL set_param \ ldlm.namespaces.$FSNAME-MDT*.lru_size=clear + + wait_nm_sync c0 admin_nodemap + wait_nm_sync c0 trusted_nodemap +} + +# fileset test directory needs to be initialized on a privileged client +fileset_test_setup() { + local nm=$1 + + if [ -n "$FILESET" -a -z "$SKIP_FILESET" ]; then + cleanup_mount $MOUNT + FILESET="" zconf_mount_clients $CLIENTS $MOUNT + fi + + local admin=$(do_facet mgs $LCTL get_param -n \ + nodemap.${nm}.admin_nodemap) + local trust=$(do_facet mgs $LCTL get_param -n \ + nodemap.${nm}.trusted_nodemap) + + do_facet mgs $LCTL nodemap_modify --name $nm --property admin --value 1 + do_facet mgs $LCTL nodemap_modify --name $nm --property trusted \ + --value 1 + + wait_nm_sync $nm admin_nodemap + wait_nm_sync $nm trusted_nodemap + + # create directory and populate it for subdir mount + do_node ${clients_arr[0]} mkdir $MOUNT/$subdir || + error "unable to create dir $MOUNT/$subdir" + do_node ${clients_arr[0]} touch $MOUNT/$subdir/this_is_$subdir || + error "unable to create file $MOUNT/$subdir/this_is_$subdir" + do_node ${clients_arr[0]} mkdir $MOUNT/$subdir/$subsubdir || + error "unable to create dir $MOUNT/$subdir/$subsubdir" + do_node ${clients_arr[0]} touch \ + $MOUNT/$subdir/$subsubdir/this_is_$subsubdir || + error "unable to create file \ + $MOUNT/$subdir/$subsubdir/this_is_$subsubdir" + + do_facet mgs $LCTL nodemap_modify --name $nm \ + --property admin --value $admin + do_facet mgs $LCTL nodemap_modify --name $nm \ + --property trusted --value $trust + + # flush MDT locks to make sure they are reacquired before test + do_node ${clients_arr[0]} $LCTL set_param \ + ldlm.namespaces.$FSNAME-MDT*.lru_size=clear + + wait_nm_sync $nm admin_nodemap + wait_nm_sync $nm trusted_nodemap +} + +# fileset test directory needs to be initialized on a privileged client +fileset_test_cleanup() { + local nm=$1 + local admin=$(do_facet mgs $LCTL get_param -n \ + nodemap.${nm}.admin_nodemap) + local trust=$(do_facet mgs $LCTL get_param -n \ + nodemap.${nm}.trusted_nodemap) + + do_facet mgs $LCTL nodemap_modify --name $nm --property admin --value 1 + do_facet mgs $LCTL nodemap_modify --name $nm --property trusted \ + --value 1 + + wait_nm_sync $nm admin_nodemap + wait_nm_sync $nm trusted_nodemap + + # cleanup directory created for subdir mount + do_node ${clients_arr[0]} rm -rf $MOUNT/$subdir || + error "unable to remove dir $MOUNT/$subdir" + + do_facet mgs $LCTL nodemap_modify --name $nm \ + --property admin --value $admin + do_facet mgs $LCTL nodemap_modify --name $nm \ + --property trusted --value $trust + + # flush MDT locks to make sure they are reacquired before test + do_node ${clients_arr[0]} $LCTL set_param \ + ldlm.namespaces.$FSNAME-MDT*.lru_size=clear + + wait_nm_sync $nm admin_nodemap + wait_nm_sync $nm trusted_nodemap + if [ -n "$FILESET" -a -z "$SKIP_FILESET" ]; then + cleanup_mount $MOUNT + zconf_mount_clients $CLIENTS $MOUNT + fi } do_create_delete() { @@ -1073,8 +1361,8 @@ do_fops_quota_test() { local qused_high=$((qused_orig + quota_fuzz)) local qused_low=$((qused_orig - quota_fuzz)) local testfile=$DIR/$tdir/$tfile - chmod 777 $DIR/$tdir - $run_u dd if=/dev/zero of=$testfile bs=1M count=1 >& /dev/null + $run_u dd if=/dev/zero of=$testfile oflag=sync bs=1M count=1 \ + >& /dev/null || error "unable to write quota test file" sync; sync_all_data || true local qused_new=$(nodemap_check_quota "$run_u") @@ -1082,8 +1370,8 @@ do_fops_quota_test() { $((qused_new)) -gt $((qused_high + 1024)) ] && error "$qused_new != $qused_orig + 1M after write, " \ "fuzz is $quota_fuzz" - $run_u rm $testfile && d=1 - $NODEMAP_TEST_QUOTA && wait_delete_completed_mds + $run_u rm $testfile || error "unable to remove quota test file" + wait_delete_completed_mds qused_new=$(nodemap_check_quota "$run_u") [ $((qused_new)) -lt $((qused_low)) \ @@ -1163,6 +1451,68 @@ get_cr_del_expected() { echo $FAILURE } +test_fops_admin_cli_i="" +test_fops_chmod_dir() { + local current_cli_i=$1 + local perm_bits=$2 + local dir_to_chmod=$3 + local new_admin_cli_i="" + + # do we need to set up a new admin client? + [ "$current_cli_i" == "0" ] && [ "$test_fops_admin_cli_i" != "1" ] && + new_admin_cli_i=1 + [ "$current_cli_i" != "0" ] && [ "$test_fops_admin_cli_i" != "0" ] && + new_admin_cli_i=0 + + # if only one client, and non-admin, need to flip admin everytime + if [ "$num_clients" == "1" ]; then + test_fops_admin_client=$clients + test_fops_admin_val=$(do_facet mgs $LCTL get_param -n \ + nodemap.c0.admin_nodemap) + if [ "$test_fops_admin_val" != "1" ]; then + do_facet mgs $LCTL nodemap_modify \ + --name c0 \ + --property admin \ + --value 1 + wait_nm_sync c0 admin_nodemap + fi + elif [ "$new_admin_cli_i" != "" ]; then + # restore admin val to old admin client + if [ "$test_fops_admin_cli_i" != "" ] && + [ "$test_fops_admin_val" != "1" ]; then + do_facet mgs $LCTL nodemap_modify \ + --name c${test_fops_admin_cli_i} \ + --property admin \ + --value $test_fops_admin_val + wait_nm_sync c${test_fops_admin_cli_i} admin_nodemap + fi + + test_fops_admin_cli_i=$new_admin_cli_i + test_fops_admin_client=${clients_arr[$new_admin_cli_i]} + test_fops_admin_val=$(do_facet mgs $LCTL get_param -n \ + nodemap.c${new_admin_cli_i}.admin_nodemap) + + if [ "$test_fops_admin_val" != "1" ]; then + do_facet mgs $LCTL nodemap_modify \ + --name c${new_admin_cli_i} \ + --property admin \ + --value 1 + wait_nm_sync c${new_admin_cli_i} admin_nodemap + fi + fi + + do_node $test_fops_admin_client chmod $perm_bits $DIR/$tdir || return 1 + + # remove admin for single client if originally non-admin + if [ "$num_clients" == "1" ] && [ "$test_fops_admin_val" != "1" ]; then + do_facet mgs $LCTL nodemap_modify --name c0 --property admin \ + --value 0 + wait_nm_sync c0 admin_nodemap + fi + + return 0 +} + test_fops() { local mapmode="$1" local single_client="$2" @@ -1189,8 +1539,6 @@ test_fops() { local cli_i=0 for client in $clients; do local u - local admin=$(do_facet mgs $LCTL get_param -n \ - nodemap.c$cli_i.admin_nodemap) for u in ${client_user_list[$cli_i]}; do local run_u="do_node $client \ $RUNAS_CMD -u$u -g$u -G$u" @@ -1198,25 +1546,15 @@ test_fops() { local mode=$(printf %03o $perm_bits) local key key="$mapmode:$user:c$cli_i:$u:$mode" - do_facet mgs $LCTL nodemap_modify \ - --name c$cli_i \ - --property admin \ - --value 1 - do_servers_not_mgs $LCTL set_param \ - nodemap.c$cli_i.admin_nodemap=1 - do_node $client chmod $mode $DIR/$tdir \ - || error unable to chmod $key - do_facet mgs $LCTL nodemap_modify \ - --name c$cli_i \ - --property admin \ - --value $admin - do_servers_not_mgs $LCTL set_param \ - nodemap.c$cli_i.admin_nodemap=$admin - + test_fops_chmod_dir $cli_i $mode \ + $DIR/$tdir || + error cannot chmod $key do_create_delete "$run_u" "$key" done # check quota + test_fops_chmod_dir $cli_i 777 $DIR/$tdir || + error cannot chmod $key do_fops_quota_test "$run_u" done @@ -1238,7 +1576,9 @@ nodemap_version_check () { nodemap_test_setup() { local rc - local active_nodemap=$1 + local active_nodemap=1 + + [ "$1" == "0" ] && active_nodemap=0 do_nodes $(comma_list $(all_mdts_nodes)) \ $LCTL set_param mdt.*.identity_upcall=NONE @@ -1248,20 +1588,15 @@ nodemap_test_setup() { rc=$? [[ $rc != 0 ]] && error "adding fops nodemaps failed $rc" - if [ "$active_nodemap" == "0" ]; then - do_facet mgs $LCTL set_param nodemap.active=0 - do_servers_not_mgs $LCTL set_param nodemap.active=0 - return - fi + do_facet mgs $LCTL nodemap_activate $active_nodemap + wait_nm_sync active - do_facet mgs $LCTL nodemap_activate 1 - do_servers_not_mgs $LCTL set_param nodemap.active=1 do_facet mgs $LCTL nodemap_modify --name default \ --property admin --value 1 + wait_nm_sync default admin_nodemap do_facet mgs $LCTL nodemap_modify --name default \ --property trusted --value 1 - do_servers_not_mgs $LCTL set_param nodemap.default.admin_nodemap=1 - do_servers_not_mgs $LCTL set_param nodemap.default.trusted_nodemap=1 + wait_nm_sync default trusted_nodemap } nodemap_test_cleanup() { @@ -1270,6 +1605,17 @@ nodemap_test_cleanup() { rc=$? [[ $rc != 0 ]] && error "removing fops nodemaps failed $rc" + do_facet mgs $LCTL nodemap_modify --name default \ + --property admin --value 0 + wait_nm_sync default admin_nodemap + do_facet mgs $LCTL nodemap_modify --name default \ + --property trusted --value 0 + wait_nm_sync default trusted_nodemap + + do_facet mgs $LCTL nodemap_activate 0 + wait_nm_sync active 0 + + export SK_UNIQUE_NM=false return 0 } @@ -1280,14 +1626,12 @@ nodemap_clients_admin_trusted() { for client in $clients; do do_facet mgs $LCTL nodemap_modify --name c0 \ --property admin --value $admin - do_servers_not_mgs $LCTL set_param \ - nodemap.c${i}.admin_nodemap=$admin do_facet mgs $LCTL nodemap_modify --name c0 \ --property trusted --value $tr - do_servers_not_mgs $LCTL set_param \ - nodemap.c${i}.trusted_nodemap=$tr i=$((i + 1)) done + wait_nm_sync c$((i - 1)) admin_nodemap + wait_nm_sync c$((i - 1)) trusted_nodemap } test_16() { @@ -1301,6 +1645,11 @@ test_16() { run_test 16 "test nodemap all_off fileops" test_17() { + if $SHARED_KEY && + [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.55) ]; then + skip "Need MDS >= 2.11.55" + fi + nodemap_version_check || return 0 nodemap_test_setup @@ -1312,6 +1661,11 @@ test_17() { run_test 17 "test nodemap trusted_noadmin fileops" test_18() { + if $SHARED_KEY && + [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.55) ]; then + skip "Need MDS >= 2.11.55" + fi + nodemap_version_check || return 0 nodemap_test_setup @@ -1323,6 +1677,11 @@ test_18() { run_test 18 "test nodemap mapped_noadmin fileops" test_19() { + if $SHARED_KEY && + [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.55) ]; then + skip "Need MDS >= 2.11.55" + fi + nodemap_version_check || return 0 nodemap_test_setup @@ -1334,6 +1693,11 @@ test_19() { run_test 19 "test nodemap trusted_admin fileops" test_20() { + if $SHARED_KEY && + [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.55) ]; then + skip "Need MDS >= 2.11.55" + fi + nodemap_version_check || return 0 nodemap_test_setup @@ -1345,6 +1709,11 @@ test_20() { run_test 20 "test nodemap mapped_admin fileops" test_21() { + if $SHARED_KEY && + [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.55) ]; then + skip "Need MDS >= 2.11.55" + fi + nodemap_version_check || return 0 nodemap_test_setup @@ -1356,19 +1725,22 @@ test_21() { --property admin --value 0 do_facet mgs $LCTL nodemap_modify --name c${i} \ --property trusted --value $x - do_servers_not_mgs $LCTL set_param \ - nodemap.c${i}.admin_nodemap=0 - do_servers_not_mgs $LCTL set_param \ - nodemap.c${i}.trusted_nodemap=$x x=0 i=$((i + 1)) done + wait_nm_sync c$((i - 1)) trusted_nodemap + test_fops mapped_trusted_noadmin nodemap_test_cleanup } run_test 21 "test nodemap mapped_trusted_noadmin fileops" test_22() { + if $SHARED_KEY && + [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.55) ]; then + skip "Need MDS >= 2.11.55" + fi + nodemap_version_check || return 0 nodemap_test_setup @@ -1380,13 +1752,11 @@ test_22() { --property admin --value 1 do_facet mgs $LCTL nodemap_modify --name c${i} \ --property trusted --value $x - do_servers_not_mgs $LCTL set_param \ - nodemap.c${i}.admin_nodemap=1 - do_servers_not_mgs $LCTL set_param \ - nodemap.c${i}.trusted_nodemap=$x x=0 i=$((i + 1)) done + wait_nm_sync c$((i - 1)) trusted_nodemap + test_fops mapped_trusted_admin nodemap_test_cleanup } @@ -1394,14 +1764,16 @@ run_test 22 "test nodemap mapped_trusted_admin fileops" # acl test directory needs to be initialized on a privileged client nodemap_acl_test_setup() { - local admin=$(do_facet mgs $LCTL get_param -n nodemap.c0.admin_nodemap) + local admin=$(do_facet mgs $LCTL get_param -n \ + nodemap.c0.admin_nodemap) local trust=$(do_facet mgs $LCTL get_param -n \ nodemap.c0.trusted_nodemap) do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1 do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1 - do_servers_not_mgs $LCTL set_param nodemap.c0.admin_nodemap=1 - do_servers_not_mgs $LCTL set_param nodemap.c0.trusted_nodemap=1 + + wait_nm_sync c0 admin_nodemap + wait_nm_sync c0 trusted_nodemap do_node ${clients_arr[0]} rm -rf $DIR/$tdir nm_test_mkdir @@ -1412,9 +1784,8 @@ nodemap_acl_test_setup() { --property admin --value $admin do_facet mgs $LCTL nodemap_modify --name c0 \ --property trusted --value $trust - do_servers_not_mgs $LCTL set_param nodemap.c0.admin_nodemap=$admin - do_servers_not_mgs $LCTL set_param nodemap.c0.trusted_nodemap=$trust + wait_nm_sync c0 trusted_nodemap } # returns 0 if the number of ACLs does not change on the second (mapped) client @@ -1454,7 +1825,8 @@ nodemap_acl_test() { return 1 } -test_23() { +test_23a() { + [ $num_clients -lt 2 ] && skip "Need 2 clients at least" && return nodemap_version_check || return 0 nodemap_test_setup @@ -1468,13 +1840,11 @@ test_23() { do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1 do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1 - do_servers_not_mgs $LCTL set_param nodemap.c0.admin_nodemap=1 - do_servers_not_mgs $LCTL set_param nodemap.c0.trusted_nodemap=1 do_facet mgs $LCTL nodemap_modify --name c1 --property admin --value 0 do_facet mgs $LCTL nodemap_modify --name c1 --property trusted --value 0 - do_servers_not_mgs $LCTL set_param nodemap.c1.admin_nodemap=0 - do_servers_not_mgs $LCTL set_param nodemap.c1.trusted_nodemap=0 + + wait_nm_sync c1 trusted_nodemap # setfacl on trusted cluster to unmapped user, verify it's not seen nodemap_acl_test $unmapped_fs ${clients_arr[0]} ${clients_arr[1]} || @@ -1495,8 +1865,8 @@ test_23() { # 2 mapped clusters do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 0 do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 0 - do_servers_not_mgs $LCTL set_param nodemap.c0.admin_nodemap=0 - do_servers_not_mgs $LCTL set_param nodemap.c0.trusted_nodemap=0 + + wait_nm_sync c0 trusted_nodemap # setfacl to mapped user on c1, also mapped to c0, verify it's seen nodemap_acl_test $mapped_c1 ${clients_arr[1]} ${clients_arr[0]} && @@ -1508,25 +1878,783 @@ test_23() { nodemap_test_cleanup } -run_test 23 "test mapped ACLs" +run_test 23a "test mapped regular ACLs" + +test_23b() { #LU-9929 + [ $num_clients -lt 2 ] && skip "Need 2 clients at least" && return + [ $(lustre_version_code mgs) -lt $(version_code 2.10.53) ] && + skip "Need MGS >= 2.10.53" && return + + export SK_UNIQUE_NM=true + nodemap_test_setup + trap nodemap_test_cleanup EXIT + + local testdir=$DIR/$tdir + local fs_id=$((IDBASE+10)) + local unmapped_id + local mapped_id + local fs_user + + do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1 + wait_nm_sync c0 admin_nodemap + do_facet mgs $LCTL nodemap_modify --name c1 --property admin --value 1 + wait_nm_sync c1 admin_nodemap + do_facet mgs $LCTL nodemap_modify --name c1 --property trusted --value 1 + wait_nm_sync c1 trusted_nodemap + + # Add idmap $ID0:$fs_id (500:60010) + do_facet mgs $LCTL nodemap_add_idmap --name c0 --idtype gid \ + --idmap $ID0:$fs_id || + error "add idmap $ID0:$fs_id to nodemap c0 failed" + wait_nm_sync c0 idmap + + # set/getfacl default acl on client 1 (unmapped gid=500) + do_node ${clients_arr[0]} rm -rf $testdir + do_node ${clients_arr[0]} mkdir -p $testdir + # Here, USER0=$(getent passwd | grep :$ID0:$ID0: | cut -d: -f1) + do_node ${clients_arr[0]} setfacl -R -d -m group:$USER0:rwx $testdir || + error "setfacl $testdir on ${clients_arr[0]} failed" + unmapped_id=$(do_node ${clients_arr[0]} getfacl $testdir | + grep -E "default:group:.*:rwx" | awk -F: '{print $3}') + [ "$unmapped_id" = "$USER0" ] || + error "gid=$ID0 was not unmapped correctly on ${clients_arr[0]}" + + # getfacl default acl on client 2 (mapped gid=60010) + mapped_id=$(do_node ${clients_arr[1]} getfacl $testdir | + grep -E "default:group:.*:rwx" | awk -F: '{print $3}') + fs_user=$(do_node ${clients_arr[1]} getent passwd | + grep :$fs_id:$fs_id: | cut -d: -f1) + [ -z "$fs_user" ] && fs_user=$fs_id + [ $mapped_id -eq $fs_id -o "$mapped_id" = "$fs_user" ] || + error "Should return gid=$fs_id or $fs_user on client2" + + rm -rf $testdir + nodemap_test_cleanup + export SK_UNIQUE_NM=false +} +run_test 23b "test mapped default ACLs" test_24() { nodemap_test_setup trap nodemap_test_cleanup EXIT - do_nodes $(comma_list $(all_server_nodes)) $LCTL get_param -R nodemap || - error "proc readable file read failed" + do_nodes $(comma_list $(all_server_nodes)) $LCTL get_param -R nodemap nodemap_test_cleanup } run_test 24 "check nodemap proc files for LBUGs and Oopses" +test_25() { + local tmpfile=$(mktemp) + local tmpfile2=$(mktemp) + local tmpfile3=$(mktemp) + local tmpfile4=$(mktemp) + local subdir=c0dir + local client + + nodemap_version_check || return 0 + + # stop clients for this test + zconf_umount_clients $CLIENTS $MOUNT || + error "unable to umount clients $CLIENTS" + + export SK_UNIQUE_NM=true + nodemap_test_setup + + # enable trusted/admin for setquota call in cleanup_and_setup_lustre() + i=0 + for client in $clients; do + do_facet mgs $LCTL nodemap_modify --name c${i} \ + --property admin --value 1 + do_facet mgs $LCTL nodemap_modify --name c${i} \ + --property trusted --value 1 + ((i++)) + done + wait_nm_sync c$((i - 1)) trusted_nodemap + + trap nodemap_test_cleanup EXIT + + # create a new, empty nodemap, and add fileset info to it + do_facet mgs $LCTL nodemap_add test25 || + error "unable to create nodemap $testname" + do_facet mgs $LCTL set_param -P nodemap.$testname.fileset=/$subdir || + error "unable to add fileset info to nodemap test25" + + wait_nm_sync test25 id + + do_facet mgs $LCTL nodemap_info > $tmpfile + do_facet mds $LCTL nodemap_info > $tmpfile2 + + if ! $SHARED_KEY; then + # will conflict with SK's nodemaps + cleanup_and_setup_lustre + fi + # stop clients for this test + zconf_umount_clients $CLIENTS $MOUNT || + error "unable to umount clients $CLIENTS" + + do_facet mgs $LCTL nodemap_info > $tmpfile3 + diff -q $tmpfile3 $tmpfile >& /dev/null || + error "nodemap_info diff on MGS after remount" + + do_facet mds $LCTL nodemap_info > $tmpfile4 + diff -q $tmpfile4 $tmpfile2 >& /dev/null || + error "nodemap_info diff on MDS after remount" + + # cleanup nodemap + do_facet mgs $LCTL nodemap_del test25 || + error "cannot delete nodemap test25 from config" + nodemap_test_cleanup + # restart clients previously stopped + zconf_mount_clients $CLIENTS $MOUNT || + error "unable to mount clients $CLIENTS" + + rm -f $tmpfile $tmpfile2 + export SK_UNIQUE_NM=false +} +run_test 25 "test save and reload nodemap config" + +test_26() { + nodemap_version_check || return 0 + + local large_i=32000 + + do_facet mgs "seq -f 'c%g' $large_i | xargs -n1 $LCTL nodemap_add" + wait_nm_sync c$large_i admin_nodemap + + do_facet mgs "seq -f 'c%g' $large_i | xargs -n1 $LCTL nodemap_del" + wait_nm_sync c$large_i admin_nodemap +} +run_test 26 "test transferring very large nodemap" + +nodemap_exercise_fileset() { + local nm="$1" + local loop=0 + + # setup + if [ "$nm" == "default" ]; then + do_facet mgs $LCTL nodemap_activate 1 + wait_nm_sync active + else + nodemap_test_setup + fi + if $SHARED_KEY; then + export SK_UNIQUE_NM=true + else + # will conflict with SK's nodemaps + trap "fileset_test_cleanup $nm" EXIT + fi + fileset_test_setup "$nm" + + # add fileset info to $nm nodemap + if ! combined_mgs_mds; then + do_facet mgs $LCTL set_param nodemap.${nm}.fileset=/$subdir || + error "unable to add fileset info to $nm nodemap on MGS" + fi + do_facet mgs $LCTL set_param -P nodemap.${nm}.fileset=/$subdir || + error "unable to add fileset info to $nm nodemap for servers" + wait_nm_sync $nm fileset "nodemap.${nm}.fileset=/$subdir" + + # re-mount client + zconf_umount_clients ${clients_arr[0]} $MOUNT || + error "unable to umount client ${clients_arr[0]}" + # set some generic fileset to trigger SSK code + export FILESET=/ + zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS || + error "unable to remount client ${clients_arr[0]}" + unset FILESET + + # test mount point content + do_node ${clients_arr[0]} test -f $MOUNT/this_is_$subdir || + error "fileset not taken into account" + + # re-mount client with sub-subdir + zconf_umount_clients ${clients_arr[0]} $MOUNT || + error "unable to umount client ${clients_arr[0]}" + export FILESET=/$subsubdir + zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS || + error "unable to remount client ${clients_arr[0]}" + unset FILESET + + # test mount point content + do_node ${clients_arr[0]} test -f $MOUNT/this_is_$subsubdir || + error "subdir of fileset not taken into account" + + # remove fileset info from nodemap + do_facet mgs $LCTL nodemap_set_fileset --name $nm --fileset clear || + error "unable to delete fileset info on $nm nodemap" + wait_update_facet mgs "$LCTL get_param nodemap.${nm}.fileset" \ + "nodemap.${nm}.fileset=" || + error "fileset info still not cleared on $nm nodemap" + do_facet mgs $LCTL set_param -P nodemap.${nm}.fileset=clear || + error "unable to reset fileset info on $nm nodemap" + wait_nm_sync $nm fileset "nodemap.${nm}.fileset=" + + # re-mount client + zconf_umount_clients ${clients_arr[0]} $MOUNT || + error "unable to umount client ${clients_arr[0]}" + zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS || + error "unable to remount client ${clients_arr[0]}" + + # test mount point content + if ! $(do_node ${clients_arr[0]} test -d $MOUNT/$subdir); then + ls $MOUNT + error "fileset not cleared on $nm nodemap" + fi + + # back to non-nodemap setup + if $SHARED_KEY; then + export SK_UNIQUE_NM=false + zconf_umount_clients ${clients_arr[0]} $MOUNT || + error "unable to umount client ${clients_arr[0]}" + fi + fileset_test_cleanup "$nm" + if [ "$nm" == "default" ]; then + do_facet mgs $LCTL nodemap_activate 0 + wait_nm_sync active 0 + trap 0 + export SK_UNIQUE_NM=false + else + nodemap_test_cleanup + fi + if $SHARED_KEY; then + zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS || + error "unable to remount client ${clients_arr[0]}" + fi +} + +test_27a() { + [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.50) ] && + skip "Need MDS >= 2.11.50" && return + + for nm in "default" "c0"; do + local subdir="subdir_${nm}" + local subsubdir="subsubdir_${nm}" + + if [ "$nm" == "default" ] && [ "$SHARED_KEY" == "true" ]; then + echo "Skipping nodemap $nm with SHARED_KEY"; + continue; + fi + + echo "Exercising fileset for nodemap $nm" + nodemap_exercise_fileset "$nm" + done +} +run_test 27a "test fileset in various nodemaps" + +test_27b() { #LU-10703 + [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.50) ] && + skip "Need MDS >= 2.11.50" && return + [[ $MDSCOUNT -lt 2 ]] && skip "needs >= 2 MDTs" && return + + nodemap_test_setup + trap nodemap_test_cleanup EXIT + + # Add the nodemaps and set their filesets + for i in $(seq 1 $MDSCOUNT); do + do_facet mgs $LCTL nodemap_del nm$i 2>/dev/null + do_facet mgs $LCTL nodemap_add nm$i || + error "add nodemap nm$i failed" + wait_nm_sync nm$i "" "" "-N" + + if ! combined_mgs_mds; then + do_facet mgs \ + $LCTL set_param nodemap.nm$i.fileset=/dir$i || + error "set nm$i.fileset=/dir$i failed on MGS" + fi + do_facet mgs $LCTL set_param -P nodemap.nm$i.fileset=/dir$i || + error "set nm$i.fileset=/dir$i failed on servers" + wait_nm_sync nm$i fileset "nodemap.nm$i.fileset=/dir$i" + done + + # Check if all the filesets are correct + for i in $(seq 1 $MDSCOUNT); do + fileset=$(do_facet mds$i \ + $LCTL get_param -n nodemap.nm$i.fileset) + [ "$fileset" = "/dir$i" ] || + error "nm$i.fileset $fileset != /dir$i on mds$i" + do_facet mgs $LCTL nodemap_del nm$i || + error "delete nodemap nm$i failed" + done + + nodemap_test_cleanup +} +run_test 27b "The new nodemap won't clear the old nodemap's fileset" + +test_28() { + if ! $SHARED_KEY; then + skip "need shared key feature for this test" && return + fi + mkdir -p $DIR/$tdir || error "mkdir failed" + touch $DIR/$tdir/$tdir.out || error "touch failed" + if [ ! -f $DIR/$tdir/$tdir.out ]; then + error "read before rotation failed" + fi + # store top key identity to ensure rotation has occurred + SK_IDENTITY_OLD=$(lctl get_param *.*.*srpc* | grep "expire" | + head -1 | awk '{print $15}' | cut -c1-8) + do_facet $SINGLEMDS lfs flushctx || + error "could not run flushctx on $SINGLEMDS" + sleep 5 + lfs flushctx || error "could not run flushctx on client" + sleep 5 + # verify new key is in place + SK_IDENTITY_NEW=$(lctl get_param *.*.*srpc* | grep "expire" | + head -1 | awk '{print $15}' | cut -c1-8) + if [ $SK_IDENTITY_OLD == $SK_IDENTITY_NEW ]; then + error "key did not rotate correctly" + fi + if [ ! -f $DIR/$tdir/$tdir.out ]; then + error "read after rotation failed" + fi +} +run_test 28 "check shared key rotation method" + +test_29() { + if ! $SHARED_KEY; then + skip "need shared key feature for this test" && return + fi + if [ $SK_FLAVOR != "ski" ] && [ $SK_FLAVOR != "skpi" ]; then + skip "test only valid if integrity is active" + fi + rm -r $DIR/$tdir + mkdir $DIR/$tdir || error "mkdir" + touch $DIR/$tdir/$tfile || error "touch" + zconf_umount_clients ${clients_arr[0]} $MOUNT || + error "unable to umount clients" + keyctl show | awk '/lustre/ { print $1 }' | + xargs -IX keyctl unlink X + OLD_SK_PATH=$SK_PATH + export SK_PATH=/dev/null + if zconf_mount_clients ${clients_arr[0]} $MOUNT; then + export SK_PATH=$OLD_SK_PATH + if [ -e $DIR/$tdir/$tfile ]; then + error "able to mount and read without key" + else + error "able to mount without key" + fi + else + export SK_PATH=$OLD_SK_PATH + keyctl show | awk '/lustre/ { print $1 }' | + xargs -IX keyctl unlink X + fi +} +run_test 29 "check for missing shared key" + +test_30() { + if ! $SHARED_KEY; then + skip "need shared key feature for this test" && return + fi + if [ $SK_FLAVOR != "ski" ] && [ $SK_FLAVOR != "skpi" ]; then + skip "test only valid if integrity is active" + fi + mkdir -p $DIR/$tdir || error "mkdir failed" + touch $DIR/$tdir/$tdir.out || error "touch failed" + zconf_umount_clients ${clients_arr[0]} $MOUNT || + error "unable to umount clients" + # unload keys from ring + keyctl show | awk '/lustre/ { print $1 }' | + xargs -IX keyctl unlink X + # invalidate the key with bogus filesystem name + lgss_sk -w $SK_PATH/$FSNAME-bogus.key -f $FSNAME.bogus \ + -t client -d /dev/urandom || error "lgss_sk failed (1)" + do_facet $SINGLEMDS lfs flushctx || error "could not run flushctx" + OLD_SK_PATH=$SK_PATH + export SK_PATH=$SK_PATH/$FSNAME-bogus.key + if zconf_mount_clients ${clients_arr[0]} $MOUNT; then + SK_PATH=$OLD_SK_PATH + if [ -a $DIR/$tdir/$tdir.out ]; then + error "mount and read file with invalid key" + else + error "mount with invalid key" + fi + fi + SK_PATH=$OLD_SK_PATH + zconf_umount_clients ${clients_arr[0]} $MOUNT || + error "unable to umount clients" +} +run_test 30 "check for invalid shared key" + +cleanup_31() { + # unmount client + zconf_umount $HOSTNAME $MOUNT || error "unable to umount client" + + # remove ${NETTYPE}999 network on all nodes + do_nodes $(comma_list $(all_nodes)) \ + "$LNETCTL net del --net ${NETTYPE}999 && \ + $LNETCTL lnet unconfigure 2>/dev/null || true" + + # necessary to do writeconf in order to de-register + # @${NETTYPE}999 nid for targets + KZPOOL=$KEEP_ZPOOL + export KEEP_ZPOOL="true" + stopall + export SK_MOUNTED=false + writeconf_all + setupall || echo 1 + export KEEP_ZPOOL="$KZPOOL" +} + +test_31() { + local nid=$(lctl list_nids | grep ${NETTYPE} | head -n1) + local addr=${nid%@*} + local net=${nid#*@} + + export LNETCTL=$(which lnetctl 2> /dev/null) + + [ -z "$LNETCTL" ] && skip "without lnetctl support." && return + local_mode && skip "in local mode." + + stack_trap cleanup_31 EXIT + + # umount client + if [ "$MOUNT_2" ] && $(grep -q $MOUNT2' ' /proc/mounts); then + umount_client $MOUNT2 || error "umount $MOUNT2 failed" + fi + if $(grep -q $MOUNT' ' /proc/mounts); then + umount_client $MOUNT || error "umount $MOUNT failed" + fi + + # check exports on servers are empty for client + do_facet mgs "lctl get_param -n *.MGS*.exports.'$nid'.uuid 2>/dev/null | + grep -q -" && error "export on MGS should be empty" + do_nodes $(comma_list $(mdts_nodes) $(osts_nodes)) \ + "lctl get_param -n *.${FSNAME}*.exports.'$nid'.uuid \ + 2>/dev/null | grep -q -" && + error "export on servers should be empty" + + # add network ${NETTYPE}999 on all nodes + do_nodes $(comma_list $(all_nodes)) \ + "$LNETCTL lnet configure && $LNETCTL net add --if \ + \$($LNETCTL net show --net $net | awk 'BEGIN{inf=0} \ + {if (inf==1) print \$2; fi; inf=0} /interfaces/{inf=1}') \ + --net ${NETTYPE}999" || + error "unable to configure NID ${NETTYPE}999" + + # necessary to do writeconf in order to register + # new @${NETTYPE}999 nid for targets + KZPOOL=$KEEP_ZPOOL + export KEEP_ZPOOL="true" + stopall + export SK_MOUNTED=false + writeconf_all + setupall server_only || echo 1 + export KEEP_ZPOOL="$KZPOOL" + + # backup MGSNID + local mgsnid_orig=$MGSNID + # compute new MGSNID + MGSNID=$(do_facet mgs "$LCTL list_nids | grep ${NETTYPE}999") + + # on client, turn LNet Dynamic Discovery on + lnetctl set discovery 1 + + # mount client with -o network=${NETTYPE}999 option: + # should fail because of LNet Dynamic Discovery + mount_client $MOUNT ${MOUNT_OPTS},network=${NETTYPE}999 && + error "client mount with '-o network' option should be refused" + + # on client, reconfigure LNet and turn LNet Dynamic Discovery off + $LNETCTL net del --net ${NETTYPE}999 && lnetctl lnet unconfigure + lustre_rmmod + modprobe lnet + lnetctl set discovery 0 + modprobe ptlrpc + $LNETCTL lnet configure && $LNETCTL net add --if \ + $($LNETCTL net show --net $net | awk 'BEGIN{inf=0} \ + {if (inf==1) print $2; fi; inf=0} /interfaces/{inf=1}') \ + --net ${NETTYPE}999 || + error "unable to configure NID ${NETTYPE}999 on client" + + # mount client with -o network=${NETTYPE}999 option + mount_client $MOUNT ${MOUNT_OPTS},network=${NETTYPE}999 || + error "unable to remount client" + + # restore MGSNID + MGSNID=$mgsnid_orig + + # check export on MGS + do_facet mgs "lctl get_param -n *.MGS*.exports.'$nid'.uuid 2>/dev/null | + grep -q -" + [ $? -ne 0 ] || error "export for $nid on MGS should not exist" + + do_facet mgs \ + "lctl get_param -n *.MGS*.exports.'${addr}@${NETTYPE}999'.uuid \ + 2>/dev/null | grep -q -" + [ $? -eq 0 ] || + error "export for ${addr}@${NETTYPE}999 on MGS should exist" + + # check {mdc,osc} imports + lctl get_param mdc.${FSNAME}-*.import | grep current_connection | + grep -q ${NETTYPE}999 + [ $? -eq 0 ] || + error "import for mdc should use ${addr}@${NETTYPE}999" + lctl get_param osc.${FSNAME}-*.import | grep current_connection | + grep -q ${NETTYPE}999 + [ $? -eq 0 ] || + error "import for osc should use ${addr}@${NETTYPE}999" +} +run_test 31 "client mount option '-o network'" + +cleanup_32() { + # umount client + zconf_umount_clients ${clients_arr[0]} $MOUNT + + # disable sk flavor enforcement on MGS + set_rule _mgs any any null + + # stop gss daemon on MGS + if ! combined_mgs_mds ; then + send_sigint $mgs_HOST lsvcgssd + fi + + # re-mount client + MOUNT_OPTS=$(add_sk_mntflag $MOUNT_OPTS) + mountcli + + restore_to_default_flavor +} + +test_32() { + if ! $SHARED_KEY; then + skip "need shared key feature for this test" + fi + + stack_trap cleanup_32 EXIT + + # restore to default null flavor + save_flvr=$SK_FLAVOR + SK_FLAVOR=null + restore_to_default_flavor || error "cannot set null flavor" + SK_FLAVOR=$save_flvr + + # umount client + if [ "$MOUNT_2" ] && $(grep -q $MOUNT2' ' /proc/mounts); then + umount_client $MOUNT2 || error "umount $MOUNT2 failed" + fi + if $(grep -q $MOUNT' ' /proc/mounts); then + umount_client $MOUNT || error "umount $MOUNT failed" + fi + + # start gss daemon on MGS + if combined_mgs_mds ; then + send_sigint $mds_HOST lsvcgssd + fi + start_gss_daemons $mgs_HOST "$LSVCGSSD -vvv -s -g" + + # add mgs key type and MGS NIDs in key on MGS + do_nodes $mgs_HOST "lgss_sk -t mgs,server -g $MGSNID -m \ + $SK_PATH/$FSNAME.key >/dev/null 2>&1" || + error "could not modify keyfile on MGS" + + # load modified key file on MGS + do_nodes $mgs_HOST "lgss_sk -l $SK_PATH/$FSNAME.key >/dev/null 2>&1" || + error "could not load keyfile on MGS" + + # add MGS NIDs in key on client + do_nodes ${clients_arr[0]} "lgss_sk -g $MGSNID -m \ + $SK_PATH/$FSNAME.key >/dev/null 2>&1" || + error "could not modify keyfile on MGS" + + # set perms for per-nodemap keys else permission denied + do_nodes $(comma_list $(all_nodes)) \ + "keyctl show | grep lustre | cut -c1-11 | + sed -e 's/ //g;' | + xargs -IX keyctl setperm X 0x3f3f3f3f" + + # re-mount client with mgssec=skn + save_opts=$MOUNT_OPTS + if [ -z "$MOUNT_OPTS" ]; then + MOUNT_OPTS="-o mgssec=skn" + else + MOUNT_OPTS="$MOUNT_OPTS,mgssec=skn" + fi + zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS || + error "mount ${clients_arr[0]} with mgssec=skn failed" + MOUNT_OPTS=$save_opts + + # umount client + zconf_umount_clients ${clients_arr[0]} $MOUNT || + error "umount ${clients_arr[0]} failed" + + # enforce ska flavor on MGS + set_rule _mgs any any ska + + # re-mount client without mgssec + zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS && + error "mount ${clients_arr[0]} without mgssec should fail" + + # re-mount client with mgssec=skn + save_opts=$MOUNT_OPTS + if [ -z "$MOUNT_OPTS" ]; then + MOUNT_OPTS="-o mgssec=skn" + else + MOUNT_OPTS="$MOUNT_OPTS,mgssec=skn" + fi + zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS && + error "mount ${clients_arr[0]} with mgssec=skn should fail" + MOUNT_OPTS=$save_opts + + # re-mount client with mgssec=ska + save_opts=$MOUNT_OPTS + if [ -z "$MOUNT_OPTS" ]; then + MOUNT_OPTS="-o mgssec=ska" + else + MOUNT_OPTS="$MOUNT_OPTS,mgssec=ska" + fi + zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS || + error "mount ${clients_arr[0]} with mgssec=ska failed" + MOUNT_OPTS=$save_opts + + exit 0 +} +run_test 32 "check for mgssec" + +cleanup_33() { + # disable sk flavor enforcement + set_rule $FSNAME any cli2mdt null + wait_flavor cli2mdt null + + # umount client + zconf_umount_clients ${clients_arr[0]} $MOUNT + + # stop gss daemon on MGS + if ! combined_mgs_mds ; then + send_sigint $mgs_HOST lsvcgssd + fi + + # re-mount client + MOUNT_OPTS=$(add_sk_mntflag $MOUNT_OPTS) + mountcli + + restore_to_default_flavor +} + +test_33() { + if ! $SHARED_KEY; then + skip "need shared key feature for this test" + fi + + stack_trap cleanup_33 EXIT + + # restore to default null flavor + save_flvr=$SK_FLAVOR + SK_FLAVOR=null + restore_to_default_flavor || error "cannot set null flavor" + SK_FLAVOR=$save_flvr + + # umount client + if [ "$MOUNT_2" ] && $(grep -q $MOUNT2' ' /proc/mounts); then + umount_client $MOUNT2 || error "umount $MOUNT2 failed" + fi + if $(grep -q $MOUNT' ' /proc/mounts); then + umount_client $MOUNT || error "umount $MOUNT failed" + fi + + # start gss daemon on MGS + if combined_mgs_mds ; then + send_sigint $mds_HOST lsvcgssd + fi + start_gss_daemons $mgs_HOST "$LSVCGSSD -vvv -s -g" + + # add mgs key type and MGS NIDs in key on MGS + do_nodes $mgs_HOST "lgss_sk -t mgs,server -g $MGSNID -m \ + $SK_PATH/$FSNAME.key >/dev/null 2>&1" || + error "could not modify keyfile on MGS" + + # load modified key file on MGS + do_nodes $mgs_HOST "lgss_sk -l $SK_PATH/$FSNAME.key >/dev/null 2>&1" || + error "could not load keyfile on MGS" + + # add MGS NIDs in key on client + do_nodes ${clients_arr[0]} "lgss_sk -g $MGSNID -m \ + $SK_PATH/$FSNAME.key >/dev/null 2>&1" || + error "could not modify keyfile on MGS" + + # set perms for per-nodemap keys else permission denied + do_nodes $(comma_list $(all_nodes)) \ + "keyctl show | grep lustre | cut -c1-11 | + sed -e 's/ //g;' | + xargs -IX keyctl setperm X 0x3f3f3f3f" + + # re-mount client with mgssec=skn + save_opts=$MOUNT_OPTS + if [ -z "$MOUNT_OPTS" ]; then + MOUNT_OPTS="-o mgssec=skn" + else + MOUNT_OPTS="$MOUNT_OPTS,mgssec=skn" + fi + zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS || + error "mount ${clients_arr[0]} with mgssec=skn failed" + MOUNT_OPTS=$save_opts + + # enforce ska flavor for cli2mdt + set_rule $FSNAME any cli2mdt ska + wait_flavor cli2mdt ska + + # check error message + $LCTL dk | grep "faked source" && + error "MGS connection srpc flags incorrect" + + exit 0 +} +run_test 33 "correct srpc flags for MGS connection" + +cleanup_34_deny() { + # restore deny_unknown + do_facet mgs $LCTL nodemap_modify --name default \ + --property deny_unknown --value $denydefault + if [ $? -ne 0 ]; then + error_noexit "cannot reset deny_unknown on default nodemap" + return + fi + + wait_nm_sync default deny_unknown +} + +test_34() { + local denynew + local activedefault + + [ $MGS_VERSION -lt $(version_code 2.12.51) ] && + skip "deny_unknown on default nm not supported before 2.12.51" + + activedefault=$(do_facet mgs $LCTL get_param -n nodemap.active) + + if [[ "$activedefault" != "1" ]]; then + do_facet mgs $LCTL nodemap_activate 1 + wait_nm_sync active + stack_trap cleanup_active EXIT + fi + + denydefault=$(do_facet mgs $LCTL get_param -n \ + nodemap.default.deny_unknown) + [ -z "$denydefault" ] && + error "cannot get deny_unknown on default nodemap" + if [ "$denydefault" -eq 0 ]; then + denynew=1; + else + denynew=0; + fi + + do_facet mgs $LCTL nodemap_modify --name default \ + --property deny_unknown --value $denynew || + error "cannot set deny_unknown on default nodemap" + + [ "$(do_facet mgs $LCTL get_param -n nodemap.default.deny_unknown)" \ + -eq $denynew ] || + error "setting deny_unknown on default nodemap did not work" + + stack_trap cleanup_34_deny EXIT + + wait_nm_sync default deny_unknown +} +run_test 34 "deny_unknown on default nodemap" + log "cleanup: ======================================================" sec_unsetup() { ## nodemap deactivated - do_facet mgs lctl nodemap_activate 0 - for num in $(seq $MDSCOUNT); do if [ "${identity_old[$num]}" = 1 ]; then switch_identity $num false || identity_old[$num]=$? @@ -1538,7 +2666,6 @@ sec_unsetup() { } sec_unsetup -sec_cleanup - complete $SECONDS +check_and_cleanup_lustre exit_status