From: Kit Westneat Date: Tue, 8 Jul 2014 18:57:40 +0000 (-0400) Subject: LU-4647 nodemap: add tests to sanity-sec for nodemap mapping X-Git-Tag: 2.6.92~65 X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=commitdiff_plain;h=50f95dd6e5159981f6b950d2fab674955442603b LU-4647 nodemap: add tests to sanity-sec for nodemap mapping Added tests to sanity-sec.sh, as outlined in the original nodemap spec. The tests currently only work with a single OSS node, but this will be fixed in a future update. These tests test basic permissions and quota handling of the nodemapper, as well as ACL mapping. Signed-off-by: Kit Westneat Change-Id: Ieb29091e5b3110593973a5eb03680e86a769b449 Reviewed-on: http://review.whamcloud.com/10406 Reviewed-by: Andreas Dilger Tested-by: Jenkins Tested-by: Maloo Reviewed-by: Oleg Drokin --- diff --git a/lustre/ptlrpc/nodemap_member.c b/lustre/ptlrpc/nodemap_member.c index a4e5063..9871f8d 100644 --- a/lustre/ptlrpc/nodemap_member.c +++ b/lustre/ptlrpc/nodemap_member.c @@ -272,14 +272,25 @@ out: return 0; } +/* Mutex used to serialize calls to reclassify_nodemap_lock */ DEFINE_MUTEX(reclassify_nodemap_lock); /** - * Reclassify the members of a nodemap after range changes or activation, - * based on the member export's NID and the nodemap's new NID ranges. Exports - * that are no longer classified as being part of this nodemap are moved to the - * nodemap whose NID ranges contain the export's NID, and their locks are - * revoked. + * Reclassify the members of a nodemap after range changes or activation. + * This function reclassifies the members of a nodemap based on the member + * export's NID and the nodemap's new NID ranges. Exports that are no longer + * classified as being part of this nodemap are moved to the nodemap whose + * NID ranges contain the export's NID, and their locks are revoked. + * + * Calls to this function are serialized due to a potential deadlock: Say there + * is a nodemap A and a nodemap B that both need to reclassify their members. + * If there is a member in nodemap A that should be in nodemap B, reclassify + * will attempt to add the member to nodemap B. If nodemap B is also + * reclassifying its members, then its hash is locked and nodemap A's attempt + * to add will block and wait for nodemap B's reclassify to finish. If + * nodemap B's reclassify then attempts to reclassify a member that should be + * in nodemap A, it will also try add the member to nodemap A's locked hash, + * causing a deadlock. * * \param nodemap nodemap with members to reclassify */ diff --git a/lustre/tests/sanity-sec.sh b/lustre/tests/sanity-sec.sh index b67ef95..3a478f2 100644 --- a/lustre/tests/sanity-sec.sh +++ b/lustre/tests/sanity-sec.sh @@ -12,19 +12,19 @@ ALWAYS_EXCEPT=" 2 4 5 6 $SANITY_SEC_EXCEPT" # UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT! [ "$ALWAYS_EXCEPT$EXCEPT" ] && \ - echo "Skipping tests: $ALWAYS_EXCEPT $EXCEPT" + echo "Skipping tests: $ALWAYS_EXCEPT $EXCEPT" SRCDIR=`dirname $0` export PATH=$PWD/$SRCDIR:$SRCDIR:$PWD/$SRCDIR/../utils:$PATH:/sbin export NAME=${NAME:-local} -LUSTRE=${LUSTRE:-`dirname $0`/..} +LUSTRE=${LUSTRE:-$(dirname $0)/..} . $LUSTRE/tests/test-framework.sh init_test_env $@ . ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh} init_logging -RUNAS="runas" +RUNAS_CMD=${RUNAS_CMD:-runas} WTL=${WTL:-"$LUSTRE/tests/write_time_limit"} @@ -42,16 +42,28 @@ NODEMAP_MAX_ID=128 require_dsh_mds || exit 0 require_dsh_ost || exit 0 +clients=${CLIENTS//,/ } +num_clients=$(get_node_count ${clients}) +clients_arr=($clients) + ID0=${ID0:-500} ID1=${ID1:-501} -USER0=`cat /etc/passwd|grep :$ID0:$ID0:|cut -d: -f1` -USER1=`cat /etc/passwd|grep :$ID1:$ID1:|cut -d: -f1` +USER0=$(grep :$ID0:$ID0: /etc/passwd | cut -d: -f1) +USER1=$(grep :$ID1:$ID1: /etc/passwd | cut -d: -f1) + +[ -z "$USER0" ] && + skip "need to add user0 ($ID0:$ID0) to /etc/passwd" && exit 0 -[ -z "$USER0" ] && \ - echo "Please add user0 (uid=$ID0 gid=$ID0)! Skip sanity-sec" && exit 0 +[ -z "$USER1" ] && + skip "need to add user1 ($ID1:$ID1) to /etc/passwd" && exit 0 -[ -z "$USER1" ] && \ - echo "Please add user1 (uid=$ID1 gid=$ID1)! Skip sanity-sec" && exit 0 +IDBASE=${IDBASE:-60000} + +# changes to mappings must be reflected in test 23 +FOPS_IDMAPS=( + [0]="$((IDBASE+3)):$((IDBASE+0)) $((IDBASE+4)):$((IDBASE+2))" + [1]="$((IDBASE+5)):$((IDBASE+1)) $((IDBASE+6)):$((IDBASE+2))" + ) check_and_setup_lustre @@ -86,12 +98,14 @@ MDT=$(do_facet $SINGLEMDS lctl get_param -N "mdt.\*MDT0000" | [ -z "$MDT" ] && error "fail to get MDT device" && exit 1 do_facet $SINGLEMDS "mkdir -p $CONFDIR" IDENTITY_FLUSH=mdt.$MDT.identity_flush +IDENTITY_UPCALL=mdt.$MDT.identity_upcall MDSCAPA=mdt.$MDT.capa CAPA_TIMEOUT=mdt.$MDT.capa_timeout MDSSECLEVEL=mdt.$MDT.sec_level # for CLIENT_TYPE -if [ -z "$(lctl get_param -n llite.*.client_type | grep remote 2>/dev/null)" ]; then +if [ -z "$(lctl get_param -n llite.*.client_type | grep remote 2>/dev/null)" ] +then CLIENT_TYPE="local" echo "local client" else @@ -107,15 +121,15 @@ sec_login() { local user=$1 local group=$2 - if ! $RUNAS -u $user krb5_login.sh; then + if ! $RUNAS_CMD -u $user krb5_login.sh; then error "$user login kerberos failed." exit 1 fi - if ! $RUNAS -u $user -g $group ls $DIR > /dev/null 2>&1; then - $RUNAS -u $user lfs flushctx -k - $RUNAS -u $user krb5_login.sh - if ! $RUNAS -u $user -g $group ls $DIR > /dev/null 2>&1; then + if ! $RUNAS_CMD -u $user -g $group ls $DIR > /dev/null 2>&1; then + $RUNAS_CMD -u $user lfs flushctx -k + $RUNAS_CMD -u $user krb5_login.sh + if ! $RUNAS_CMD -u$user -g$group ls $DIR > /dev/null 2>&1; then error "init $user $group failed." exit 2 fi @@ -129,11 +143,11 @@ sec_setup() { switch_identity $num true || identity_old[$num]=$? done - if ! $RUNAS -u $ID0 ls $DIR > /dev/null 2>&1; then + if ! $RUNAS_CMD -u $ID0 ls $DIR > /dev/null 2>&1; then sec_login $USER0 $USER0 fi - if ! $RUNAS -u $ID1 ls $DIR > /dev/null 2>&1; then + if ! $RUNAS_CMD -u $ID1 ls $DIR > /dev/null 2>&1; then sec_login $USER1 $USER1 fi } @@ -157,17 +171,17 @@ test_0() { chown $USER0 $DIR/$tdir || error "chown (2)" fi - $RUNAS -u $ID0 ls $DIR || error "ls (1)" + $RUNAS_CMD -u $ID0 ls $DIR || error "ls (1)" rm -f $DIR/f0 || error "rm (2)" - $RUNAS -u $ID0 touch $DIR/f0 && error "touch (1)" - $RUNAS -u $ID0 touch $DIR/$tdir/f1 || error "touch (2)" - $RUNAS -u $ID1 touch $DIR/$tdir/f2 && error "touch (3)" + $RUNAS_CMD -u $ID0 touch $DIR/f0 && error "touch (1)" + $RUNAS_CMD -u $ID0 touch $DIR/$tdir/f1 || error "touch (2)" + $RUNAS_CMD -u $ID1 touch $DIR/$tdir/f2 && error "touch (3)" touch $DIR/$tdir/f3 || error "touch (4)" chown root $DIR/$tdir || error "chown (3)" chgrp $USER0 $DIR/$tdir || error "chgrp (1)" chmod 0775 $DIR/$tdir || error "chmod (2)" - $RUNAS -u $ID0 touch $DIR/$tdir/f4 || error "touch (5)" - $RUNAS -u $ID1 touch $DIR/$tdir/f5 && error "touch (6)" + $RUNAS_CMD -u $ID0 touch $DIR/$tdir/f4 || error "touch (5)" + $RUNAS_CMD -u $ID1 touch $DIR/$tdir/f5 && error "touch (6)" touch $DIR/$tdir/f6 || error "touch (7)" rm -rf $DIR/$tdir || error "rm (3)" @@ -191,22 +205,24 @@ test_1() { mkdir -p $DIR/$tdir chown $USER0 $DIR/$tdir || error "chown (1)" - $RUNAS -u $ID1 -v $ID0 touch $DIR/$tdir/f0 && error "touch (2)" + $RUNAS_CMD -u $ID1 -v $ID0 touch $DIR/$tdir/f0 && error "touch (2)" echo "enable uid $ID1 setuid" do_facet $SINGLEMDS "echo '* $ID1 setuid' >> $PERM_CONF" do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1" - $RUNAS -u $ID1 -v $ID0 touch $DIR/$tdir/f1 || error "touch (3)" + $RUNAS_CMD -u $ID1 -v $ID0 touch $DIR/$tdir/f1 || error "touch (3)" chown root $DIR/$tdir || error "chown (4)" chgrp $USER0 $DIR/$tdir || error "chgrp (5)" chmod 0770 $DIR/$tdir || error "chmod (6)" - $RUNAS -u $ID1 -g $ID1 touch $DIR/$tdir/f2 && error "touch (7)" - $RUNAS -u $ID1 -g $ID1 -j $ID0 touch $DIR/$tdir/f3 && error "touch (8)" + $RUNAS_CMD -u $ID1 -g $ID1 touch $DIR/$tdir/f2 && error "touch (7)" + $RUNAS_CMD -u$ID1 -g$ID1 -j$ID0 touch $DIR/$tdir/f3 && error "touch (8)" echo "enable uid $ID1 setuid,setgid" do_facet $SINGLEMDS "echo '* $ID1 setuid,setgid' > $PERM_CONF" do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1" - $RUNAS -u $ID1 -g $ID1 -j $ID0 touch $DIR/$tdir/f4 || error "touch (9)" - $RUNAS -u $ID1 -v $ID0 -g $ID1 -j $ID0 touch $DIR/$tdir/f5 || error "touch (10)" + $RUNAS_CMD -u $ID1 -g $ID1 -j $ID0 touch $DIR/$tdir/f4 || + error "touch (9)" + $RUNAS_CMD -u $ID1 -v $ID0 -g $ID1 -j $ID0 touch $DIR/$tdir/f5 || + error "touch (10)" rm -rf $DIR/$tdir @@ -290,13 +306,14 @@ test_4() { mkdir -p $DIR/$tdir chmod 0771 $DIR/$tdir chgrp $ID0 $DIR/$tdir - $RUNAS -u $ID0 ls $DIR/$tdir || error "setgroups (1)" + $RUNAS_CMD -u $ID0 ls $DIR/$tdir || error "setgroups (1)" if [ "$CLIENT_TYPE" = "local" ]; then do_facet $SINGLEMDS "echo '* $ID1 setgrp' > $PERM_CONF" do_facet $SINGLEMDS "lctl set_param -n $IDENTITY_FLUSH=-1" - $RUNAS -u $ID1 -G1,2,$ID0 ls $DIR/$tdir || error "setgroups (2)" + $RUNAS_CMD -u $ID1 -G1,2,$ID0 ls $DIR/$tdir || + error "setgroups (2)" fi - $RUNAS -u $ID1 -G1,2 ls $DIR/$tdir && error "setgroups (3)" + $RUNAS_CMD -u $ID1 -G1,2 ls $DIR/$tdir && error "setgroups (3)" rm -rf $DIR/$tdir do_facet $SINGLEMDS "rm -f $PERM_CONF" @@ -461,7 +478,7 @@ test_5() { fi # proc variable disabled -- access to the objects in the filesystem - # is not allowed + # is not allowed echo "Should get Write error here : (proc variable are disabled "\ "-- access to the objects in the filesystem is denied." $WTL $file 30 @@ -569,6 +586,8 @@ create_nodemaps() { local out local rc + squash_id default 99 0 + squash_id default 99 1 for (( i = 0; i < NODEMAP_COUNT; i++ )); do if ! do_facet mgs $LCTL nodemap_add \ ${HOSTNAME_CHECKSUM}_${i}; then @@ -767,6 +786,7 @@ test_idmap() { fs_id=$(do_facet mgs $cmd --nid $nid \ --idtype uid --id $id) if [ $fs_id != $id ]; then + echo "expected $id, got $fs_id" rc=$((rc + 1)) fi done @@ -784,6 +804,7 @@ test_idmap() { --idtype uid --id $id) expected_id=$((id + 1)) if [ $fs_id != $expected_id ]; then + echo "expected $expected_id, got $fs_id" rc=$((rc + 1)) fi done @@ -805,8 +826,8 @@ test_idmap() { nid="$SUBNET_CHECKSUM.0.${j}.100@tcp" fs_id=$(do_facet mgs $cmd --nid $nid \ --idtype uid --id $id) - expected_id=$((id + 1)) if [ $fs_id != $id ]; then + echo "expected $id, got $fs_id" rc=$((rc + 1)) fi done @@ -823,12 +844,12 @@ test_idmap() { fi done - ## check that root is mapped to 99 + ## check that root allowed for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do nid="$SUBNET_CHECKSUM.0.${j}.100@tcp" fs_id=$(do_facet mgs $cmd --nid $nid --idtype uid --id 0) - expected_id=$((id + 1)) if [ $fs_id != 0 ]; then + echo "root allowed expected 0, got $fs_id" rc=$((rc + 1)) fi done @@ -844,12 +865,12 @@ test_idmap() { fi done - ## check that root allowed + ## check that root is mapped to 99 for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do nid="$SUBNET_CHECKSUM.0.${j}.100@tcp" fs_id=$(do_facet mgs $cmd --nid $nid --idtype uid --id 0) - expected_id=$((id + 1)) if [ $fs_id != 99 ]; then + error "root squash expected 99, got $fs_id" rc=$((rc + 1)) fi done @@ -1195,6 +1216,502 @@ test_15() { } run_test 15 "test id mapping" +create_fops_nodemaps() { + local i=0 + local client + for client in $clients; do + local client_ip=$($LUSTRE/tests/resolveip $client) + do_facet mgs $LCTL nodemap_add c${i} || return 1 + do_facet mgs $LCTL nodemap_add_range \ + --name c${i} --range $client_ip@tcp || return 1 + do_facet ost0 $LCTL set_param nodemap.add_nodemap=c${i} || + return 1 + do_facet ost0 "$LCTL set_param nodemap.add_nodemap_range='c$i \ + $client_ip@tcp'" || return 1 + for map in ${FOPS_IDMAPS[i]}; do + do_facet mgs $LCTL nodemap_add_idmap --name c${i} \ + --idtype uid --idmap ${map} || return 1 + do_facet ost0 "$LCTL set_param \ + nodemap.add_nodemap_idmap='c$i uid ${map}'" || + return 1 + do_facet mgs $LCTL nodemap_add_idmap --name c${i} \ + --idtype gid --idmap ${map} || return 1 + do_facet ost0 "$LCTL set_param \ + nodemap.add_nodemap_idmap='c$i gid ${map}'" || + return 1 + done + out1=$(do_facet mgs $LCTL get_param nodemap.c${i}.idmap) + out2=$(do_facet ost0 $LCTL get_param nodemap.c${i}.idmap) + [ "$out1" != "$out2" ] && error "mgs and oss maps mismatch" + i=$((i + 1)) + done + return 0 +} + +delete_fops_nodemaps() { + local i=0 + local client + for client in $clients; do + do_facet mgs $LCTL nodemap_del c${i} || return 1 + do_facet ost0 $LCTL set_param nodemap.remove_nodemap=c${i} || + return 1 + i=$((i + 1)) + done + return 0 +} + +# acl test directory needs to be initialized on a privileged client +fops_test_setup() { + local admin=$(do_facet mgs $LCTL get_param -n nodemap.c0.admin_nodemap) + local trust=$(do_facet mgs $LCTL get_param -n \ + nodemap.c0.trusted_nodemap) + + do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1 + do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1 + do_facet ost0 $LCTL set_param nodemap.c0.admin_nodemap=1 + do_facet ost0 $LCTL set_param nodemap.c0.trusted_nodemap=1 + + do_node ${clients_arr[0]} rm -rf $DIR/$tdir + do_node ${clients_arr[0]} mkdir -p $DIR/$tdir + do_node ${clients_arr[0]} chown $user $DIR/$tdir + + do_facet mgs $LCTL nodemap_modify --name c0 \ + --property admin --value $admin + do_facet mgs $LCTL nodemap_modify --name c0 \ + --property trusted --value $trust + do_facet ost0 $LCTL set_param nodemap.c0.admin_nodemap=$admin + do_facet ost0 $LCTL set_param nodemap.c0.trusted_nodemap=$trust + +} + +do_create_delete() { + local run_u=$1 + local key=$2 + local testfile=$DIR/$tdir/$tfile + local rc=0 + local c=0 d=0 + local qused_new + if $run_u touch $testfile >& /dev/null; then + c=1 + $run_u rm $testfile && d=1 + fi >& /dev/null + + local res="$c $d" + local expected=$(get_cr_del_expected $key) + [ "$res" != "$expected" ] && error "test $key expected " \ + "$expected, got $res" && rc=$(($rc+1)) + return $rc +} + +nodemap_check_quota() { + local run_u="$1" + $run_u lfs quota -q $DIR | awk '{ print $2; exit; }' +} + +do_fops_quota_test() { + local run_u=$1 + # define fuzz as 2x ost block size in K + local quota_fuzz=$(($(lctl get_param -n osc.lustre-OST0000-*.blocksize | + head -1) / 512)) + local qused_orig=$(nodemap_check_quota "$run_u") + local qused_low=$((qused_orig - quota_fuzz)) + local qused_high=$((qused_orig + quota_fuzz)) + local testfile=$DIR/$tdir/$tfile + chmod 777 $DIR/$tdir + $run_u dd if=/dev/zero of=$testfile bs=1M count=1 >& /dev/null + sync; sync_all_data || true + + local qused_new=$(nodemap_check_quota "$run_u") + [ $((qused_low + 1024)) -le $((qused_new)) \ + -a $((qused_high + 1024)) -ge $((qused_new)) ] || + error "$qused_new != $qused_orig + 1M after write" + $run_u rm $testfile && d=1 + $NODEMAP_TEST_QUOTA && wait_delete_completed_mds + + qused_new=$(nodemap_check_quota "$run_u") + [ $((qused_low)) -le $((qused_new)) \ + -a $((qused_high)) -ge $((qused_new)) ] || + error "quota not reclaimed, expect $qused_orig got $qused_new" +} + +get_fops_mapped_user() { + local cli_user=$1 + + for ((i=0; i < ${#FOPS_IDMAPS[@]}; i++)); do + for map in ${FOPS_IDMAPS[i]}; do + if [ $(cut -d: -f1 <<< "$map") == $cli_user ]; then + cut -d: -f2 <<< "$map" + return + fi + done + done + echo -1 +} + +get_cr_del_expected() { + local -a key + IFS=":" read -a key <<< "$1" + local mapmode="${key[0]}" + local mds_user="${key[1]}" + local cluster="${key[2]}" + local cli_user="${key[3]}" + local mode="0${key[4]}" + local SUCCESS="1 1" + local FAILURE="0 0" + local noadmin=0 + local mapped=0 + local other=0 + + [[ $mapmode == *mapped* ]] && mapped=1 + # only c1 is mapped in these test cases + [[ $mapmode == mapped_trusted* ]] && [ "$cluster" == "c0" ] && mapped=0 + [[ $mapmode == *noadmin* ]] && noadmin=1 + + # o+wx works as long as the user isn't mapped + if [ $((mode & 3)) -eq 3 ]; then + other=1 + fi + + # if client user is root, check if root is squashed + if [ "$cli_user" == "0" ]; then + # squash root succeed, if other bit is on + case $noadmin in + 0) echo $SUCCESS;; + 1) [ "$other" == "1" ] && echo $SUCCESS + [ "$other" == "0" ] && echo $FAILURE;; + esac + return + fi + if [ "$mapped" == "0" ]; then + [ "$other" == "1" ] && echo $SUCCESS + [ "$other" == "0" ] && echo $FAILURE + return + fi + + # if mapped user is mds user, check for u+wx + mapped_user=$(get_fops_mapped_user $cli_user) + [ "$mapped_user" == "-1" ] && + error "unable to find mapping for client user $cli_user" + + if [ "$mapped_user" == "$mds_user" -a \ + $(((mode & 0300) == 0300)) -eq 1 ]; then + echo $SUCCESS + return + fi + if [ "$mapped_user" != "$mds_user" -a "$other" == "1" ]; then + echo $SUCCESS + return + fi + echo $FAILURE +} + +test_fops() { + local mapmode="$1" + local single_client="$2" + local client_user_list=([0]="0 $((IDBASE+3)) $((IDBASE+4))" + [1]="0 $((IDBASE+5)) $((IDBASE+6))") + local mds_i + local rc=0 + local perm_bit_list="0 3 $((0300)) $((0303))" + [ "$SLOW" == "yes" ] && perm_bit_list=$(seq 0 511) + + # step through mds users. -1 means root + for mds_i in -1 0 1 2; do + local user=$((mds_i + IDBASE)) + local client + local x + + [ "$mds_i" == "-1" ] && user=0 + + echo mkdir -p $DIR/$tdir + fops_test_setup + local cli_i=0 + for client in $clients; do + local u + local admin=$(do_facet mgs $LCTL get_param -n \ + nodemap.c$cli_i.admin_nodemap) + for u in ${client_user_list[$cli_i]}; do + local run_u="do_node $client \ + $RUNAS_CMD -u$u -g$u -G$u" + for perm_bits in $perm_bit_list; do + local mode=$(printf %03o $perm_bits) + do_facet mgs $LCTL nodemap_modify \ + --name c$cli_i \ + --property admin \ + --value 1 + do_node $client chmod $mode $DIR/$tdir + do_facet mgs $LCTL nodemap_modify \ + --name c$cli_i \ + --property admin \ + --value $admin + + local key + key="$mapmode:$user:c$cli_i:$u:$mode" + do_create_delete "$run_u" "$key" + done + + # check quota + do_fops_quota_test "$run_u" + done + + cli_i=$((cli_i + 1)) + [ "$single_client" == "1" ] && break + done + rm -rf $DIR/$tdir + done + return $rc +} + +nodemap_test_setup() { + local rc + local active_nodemap=$1 + + do_facet mgs $LCTL set_param $IDENTITY_UPCALL=NONE + + remote_mgs_nodsh && skip "remote MGS with nodsh" && return + [ $(lustre_version_code $SINGLEMGS) -lt $(version_code 2.5.53) ] && + skip "No nodemap on $(get_lustre_version) MGS, need 2.5.53+" && + return + + rc=0 + create_fops_nodemaps + rc=$? + [[ $rc != 0 ]] && error "adding fops nodemaps failed $rc" + + if [ "$active_nodemap" == "0" ]; then + do_facet mgs $LCTL set_param nodemap.active=0 + do_facet ost0 $LCTL set_param nodemap.active=0 + return + fi + + do_facet mgs $LCTL nodemap_activate 1 + do_facet ost0 $LCTL set_param nodemap.active=1 + do_facet mgs $LCTL nodemap_modify --name default \ + --property admin --value 1 + do_facet mgs $LCTL nodemap_modify --name default \ + --property trusted --value 1 + do_facet ost0 $LCTL set_param nodemap.default.admin_nodemap=1 + do_facet ost0 $LCTL set_param nodemap.default.trusted_nodemap=1 +} + +nodemap_test_cleanup() { + delete_fops_nodemaps + rc=$? + [[ $rc != 0 ]] && error "removing fops nodemaps failed $rc" + + return 0 +} + +nodemap_clients_admin_trusted() { + local admin=$1 + local tr=$2 + local i=0 + for client in $clients; do + do_facet mgs $LCTL nodemap_modify --name c0 \ + --property admin --value $admin + do_facet ost0 $LCTL set_param nodemap.c${i}.admin_nodemap=$admin + do_facet mgs $LCTL nodemap_modify --name c0 \ + --property trusted --value $tr + do_facet ost0 $LCTL set_param nodemap.c${i}.trusted_nodemap=$tr + i=$((i + 1)) + done +} + +test_16() { + nodemap_test_setup 0 + + test_fops all_off + nodemap_test_cleanup +} +run_test 16 "test nodemap all_off fileops" + +test_17() { + nodemap_test_setup + + nodemap_clients_admin_trusted 0 1 + test_fops trusted_noadmin 1 + nodemap_test_cleanup +} +run_test 17 "test nodemap trusted_noadmin fileops" + +test_18() { + nodemap_test_setup + nodemap_clients_admin_trusted 0 0 + test_fops mapped_noadmin 1 + nodemap_test_cleanup +} +run_test 18 "test nodemap mapped_noadmin fileops" + +test_19() { + nodemap_test_setup + nodemap_clients_admin_trusted 1 1 + test_fops trusted_admin 1 + nodemap_test_cleanup +} +run_test 19 "test nodemap trusted_admin fileops" + +test_20() { + nodemap_test_setup + nodemap_clients_admin_trusted 1 0 + test_fops mapped_admin 1 + nodemap_test_cleanup +} +run_test 20 "test nodemap mapped_admin fileops" + +test_21() { + nodemap_test_setup + local x=1 + local i=0 + for client in $clients; do + do_facet mgs $LCTL nodemap_modify --name c${i} \ + --property admin --value 0 + do_facet mgs $LCTL nodemap_modify --name c${i} \ + --property trusted --value $x + do_facet ost0 $LCTL set_param nodemap.c${i}.admin_nodemap=0 + do_facet ost0 $LCTL set_param nodemap.c${i}.trusted_nodemap=$x + x=0 + i=$((i + 1)) + done + test_fops mapped_trusted_noadmin + nodemap_test_cleanup +} +run_test 21 "test nodemap mapped_trusted_noadmin fileops" + +test_22() { + nodemap_test_setup + local x=1 + local i=0 + for client in $clients; do + do_facet mgs $LCTL nodemap_modify --name c${i} \ + --property admin --value 1 + do_facet mgs $LCTL nodemap_modify --name c${i} \ + --property trusted --value $x + do_facet ost0 $LCTL set_param nodemap.c${i}.admin_nodemap=1 + do_facet ost0 $LCTL set_param nodemap.c${i}.trusted_nodemap=$x + x=0 + i=$((i + 1)) + done + test_fops mapped_trusted_admin + nodemap_test_cleanup +} +run_test 22 "test nodemap mapped_trusted_admin fileops" + +# acl test directory needs to be initialized on a privileged client +nodemap_acl_test_setup() { + local admin=$(do_facet mgs $LCTL get_param -n nodemap.c0.admin_nodemap) + local trust=$(do_facet mgs $LCTL get_param -n \ + nodemap.c0.trusted_nodemap) + + do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1 + do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1 + do_facet ost0 $LCTL set_param nodemap.c0.admin_nodemap=1 + do_facet ost0 $LCTL set_param nodemap.c0.trusted_nodemap=1 + + do_node ${clients_arr[0]} rm -rf $DIR/$tdir + do_node ${clients_arr[0]} mkdir -p $DIR/$tdir + do_node ${clients_arr[0]} chmod a+rwx $DIR/$tdir + + do_facet mgs $LCTL nodemap_modify --name c0 \ + --property admin --value $admin + do_facet mgs $LCTL nodemap_modify --name c0 \ + --property trusted --value $trust + do_facet ost0 $LCTL set_param nodemap.c0.admin_nodemap=$admin + do_facet ost0 $LCTL set_param nodemap.c0.trusted_nodemap=$trust + +} + +# returns 0 if the number of ACLs does not change on the second (mapped) client +# after being set on the first client +nodemap_acl_test() { + local user="$1" + local set_client="$2" + local get_client="$3" + local check_setfacl="$4" + local setfacl_error=0 + local testfile=$DIR/$tdir/$tfile + local RUNAS_USER="$RUNAS_CMD -u $user" + local acl_count=0 + local acl_count_post=0 + + nodemap_acl_test_setup + sleep 5 + + do_node $set_client $RUNAS_USER find $DIR/ -ls + do_node $set_client $RUNAS_USER touch $testfile + + # ACL masks aren't filtered by nodemap code, so we ignore them + acl_count=$(do_node $get_client getfacl $testfile | grep -v mask | + wc -l) + do_node $set_client $RUNAS_USER setfacl -m $user:rwx $testfile || + setfacl_error=1 + + # if check setfacl is set to 1, then it's supposed to error + if [ "$check_setfacl" == "1" ]; then + [ "$setfacl_error" != "1" ] && return 1 + return 0 + fi + [ "$setfacl_error" == "1" ] && echo "WARNING: unable to setfacl" + + acl_count_post=$(do_node $get_client getfacl $testfile | grep -v mask | + wc -l) + [ $acl_count -eq $acl_count_post ] && return 0 + return 1 +} + +test_23() { + nodemap_test_setup + + # 1 trusted cluster, 1 mapped cluster + local unmapped_fs=$((IDBASE+0)) + local unmapped_c1=$((IDBASE+5)) + local mapped_fs=$((IDBASE+2)) + local mapped_c0=$((IDBASE+4)) + local mapped_c1=$((IDBASE+6)) + + do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1 + do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1 + do_facet ost0 $LCTL set_param nodemap.c0.admin_nodemap=1 + do_facet ost0 $LCTL set_param nodemap.c0.trusted_nodemap=1 + + do_facet mgs $LCTL nodemap_modify --name c1 --property admin --value 0 + do_facet mgs $LCTL nodemap_modify --name c1 --property trusted --value 0 + do_facet ost0 $LCTL set_param nodemap.c1.admin_nodemap=0 + do_facet ost0 $LCTL set_param nodemap.c1.trusted_nodemap=0 + + # setfacl on trusted cluster to unmapped user, verify it's not seen + nodemap_acl_test $unmapped_fs ${clients_arr[0]} ${clients_arr[1]} || + error "acl count (1)" + + # setfacl on trusted cluster to mapped user, verify it's seen + nodemap_acl_test $mapped_fs ${clients_arr[0]} ${clients_arr[1]} && + error "acl count (2)" + + # setfacl on mapped cluster to mapped user, verify it's seen + nodemap_acl_test $mapped_c1 ${clients_arr[1]} ${clients_arr[0]} && + error "acl count (3)" + + # setfacl on mapped cluster to unmapped user, verify error + nodemap_acl_test $unmapped_fs ${clients_arr[1]} ${clients_arr[0]} 1 || + error "acl count (4)" + + # 2 mapped clusters + do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 0 + do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 0 + do_facet ost0 $LCTL set_param nodemap.c0.admin_nodemap=0 + do_facet ost0 $LCTL set_param nodemap.c0.trusted_nodemap=0 + + # setfacl to mapped user on c1, also mapped to c0, verify it's seen + nodemap_acl_test $mapped_c1 ${clients_arr[1]} ${clients_arr[0]} && + error "acl count (5)" + + # setfacl to mapped user on c1, not mapped to c0, verify not seen + nodemap_acl_test $unmapped_c1 ${clients_arr[1]} ${clients_arr[0]} || + error "acl count (6)" + + nodemap_test_cleanup +} +run_test 23 "test mapped ACLs" + log "cleanup: ======================================================" sec_unsetup() { @@ -1207,8 +1724,8 @@ sec_unsetup() { fi done - $RUNAS -u $ID0 ls $DIR - $RUNAS -u $ID1 ls $DIR + $RUNAS_CMD -u $ID0 ls $DIR + $RUNAS_CMD -u $ID1 ls $DIR } sec_unsetup