X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Ftests%2Fsanity-sec.sh;h=dfd07faa9758d89a99cb819e5219dc54f1885895;hp=fba1d5c5fef222c5042a19cde0738125cee5400b;hb=ecb2b82169879cbf2a04bcf15a560b455c7ee1eb;hpb=9d06de39731ae16d030cda672ae771496d4f0952 diff --git a/lustre/tests/sanity-sec.sh b/lustre/tests/sanity-sec.sh index fba1d5c..dfd07fa 100755 --- a/lustre/tests/sanity-sec.sh +++ b/lustre/tests/sanity-sec.sh @@ -7,8 +7,12 @@ set -e ONLY=${ONLY:-"$*"} -# bug number for skipped test: 19430 19967 19967 -ALWAYS_EXCEPT=" 2 5 6 $SANITY_SEC_EXCEPT" +# bug number for skipped test: +ALWAYS_EXCEPT=" $SANITY_SEC_EXCEPT" +if $SHARED_KEY; then +# bug number for skipped test: 9145 9145 9671 9145 9145 9145 9145 9245 + ALWAYS_EXCEPT=" 17 18 19 20 21 22 23 27 $ALWAYS_EXCEPT" +fi # UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT! SRCDIR=$(dirname $0) @@ -40,13 +44,8 @@ WTL=${WTL:-"$LUSTRE/tests/write_time_limit"} CONFDIR=/etc/lustre PERM_CONF=$CONFDIR/perm.conf FAIL_ON_ERROR=false - HOSTNAME_CHECKSUM=$(hostname | sum | awk '{ print $1 }') SUBNET_CHECKSUM=$(expr $HOSTNAME_CHECKSUM % 250 + 1) -NODEMAP_COUNT=16 -NODEMAP_RANGE_COUNT=3 -NODEMAP_IPADDR_LIST="1 10 64 128 200 250" -NODEMAP_MAX_ID=128 require_dsh_mds || exit 0 require_dsh_ost || exit 0 @@ -60,6 +59,12 @@ ID1=${ID1:-501} USER0=$(getent passwd | grep :$ID0:$ID0: | cut -d: -f1) USER1=$(getent passwd | grep :$ID1:$ID1: | cut -d: -f1) +NODEMAP_COUNT=16 +NODEMAP_RANGE_COUNT=3 +NODEMAP_IPADDR_LIST="1 10 64 128 200 250" +NODEMAP_ID_COUNT=10 +NODEMAP_MAX_ID=$((ID0 + NODEMAP_ID_COUNT)) + [ -z "$USER0" ] && skip "need to add user0 ($ID0:$ID0)" && exit 0 @@ -76,18 +81,7 @@ FOPS_IDMAPS=( check_and_setup_lustre -sec_cleanup() { - if [ "$I_MOUNTED" = "yes" ]; then - cleanupall -f || error "sec_cleanup" - fi -} - -DIR=${DIR:-$MOUNT} -[ -z "$(echo $DIR | grep $MOUNT)" ] && - error "$DIR not in $MOUNT" && sec_cleanup && exit 1 - -[ $(echo $MOUNT | wc -w) -gt 1 ] && - echo "NAME=$MOUNT mounted more than once" && sec_cleanup && exit 0 +assert_DIR # for GSS_SUP GSS_REF=$(lsmod | grep ^ptlrpc_gss | awk '{print $3}') @@ -105,7 +99,6 @@ MDT=$(do_facet $SINGLEMDS lctl get_param -N "mdt.\*MDT0000" | do_facet $SINGLEMDS "mkdir -p $CONFDIR" IDENTITY_FLUSH=mdt.$MDT.identity_flush IDENTITY_UPCALL=mdt.$MDT.identity_upcall -MDSSECLEVEL=mdt.$MDT.sec_level SAVE_PWD=$PWD @@ -115,6 +108,7 @@ sec_login() { local user=$1 local group=$2 + $GSS_KRB5 || return if ! $RUNAS_CMD -u $user krb5_login.sh; then error "$user login kerberos failed." exit 1 @@ -267,7 +261,7 @@ delete_nodemaps() { return 3 fi - out=$(do_facet mgs $LCTL get_param nodemap.$csum.id) + out=$(do_facet mgs $LCTL get_param nodemap.$csum.id 2>/dev/null) [[ $(echo $out | grep -c $csum) != 0 ]] && return 1 done return 0 @@ -309,10 +303,11 @@ add_idmaps() { local cmd="$LCTL nodemap_add_idmap" local rc=0 + echo "Start to add idmaps ..." for ((i = 0; i < NODEMAP_COUNT; i++)); do local j - for ((j = 500; j < NODEMAP_MAX_ID; j++)); do + for ((j = $ID0; j < NODEMAP_MAX_ID; j++)); do local csum=${HOSTNAME_CHECKSUM}_${i} local client_id=$j local fs_id=$((j + 1)) @@ -331,15 +326,84 @@ add_idmaps() { return $rc } +update_idmaps() { #LU-10040 + [ $(lustre_version_code mgs) -lt $(version_code 2.10.55) ] && + skip "Need MGS >= 2.10.55" && + return + local csum=${HOSTNAME_CHECKSUM}_0 + local old_id_client=$ID0 + local old_id_fs=$((ID0 + 1)) + local new_id=$((ID0 + 100)) + local tmp_id + local cmd + local run + local idtype + local rc=0 + + echo "Start to update idmaps ..." + + #Inserting an existed idmap should return error + cmd="$LCTL nodemap_add_idmap --name $csum --idtype uid" + if do_facet mgs \ + $cmd --idmap $old_id_client:$old_id_fs 2>/dev/null; then + error "insert idmap {$old_id_client:$old_id_fs} " \ + "should return error" + rc=$((rc + 1)) + return rc + fi + + #Update id_fs and check it + if ! do_facet mgs $cmd --idmap $old_id_client:$new_id; then + error "$cmd --idmap $old_id_client:$new_id failed" + rc=$((rc + 1)) + return $rc + fi + tmp_id=$(do_facet mgs $LCTL get_param -n nodemap.$csum.idmap | + awk '{ print $7 }' | sed -n '2p') + [ $tmp_id != $new_id ] && { error "new id_fs $tmp_id != $new_id"; \ + rc=$((rc + 1)); return $rc; } + + #Update id_client and check it + if ! do_facet mgs $cmd --idmap $new_id:$new_id; then + error "$cmd --idmap $new_id:$new_id failed" + rc=$((rc + 1)) + return $rc + fi + tmp_id=$(do_facet mgs $LCTL get_param -n nodemap.$csum.idmap | + awk '{ print $5 }' | sed -n "$((NODEMAP_ID_COUNT + 1)) p") + tmp_id=$(echo ${tmp_id%,*}) #e.g. "501,"->"501" + [ $tmp_id != $new_id ] && { error "new id_client $tmp_id != $new_id"; \ + rc=$((rc + 1)); return $rc; } + + #Delete above updated idmap + cmd="$LCTL nodemap_del_idmap --name $csum --idtype uid" + if ! do_facet mgs $cmd --idmap $new_id:$new_id; then + error "$cmd --idmap $new_id:$new_id failed" + rc=$((rc + 1)) + return $rc + fi + + #restore the idmaps to make delete_idmaps work well + cmd="$LCTL nodemap_add_idmap --name $csum --idtype uid" + if ! do_facet mgs $cmd --idmap $old_id_client:$old_id_fs; then + error "$cmd --idmap $old_id_client:$old_id_fs failed" + rc=$((rc + 1)) + return $rc + fi + + return $rc +} + delete_idmaps() { local i local cmd="$LCTL nodemap_del_idmap" local rc=0 + echo "Start to delete idmaps ..." for ((i = 0; i < NODEMAP_COUNT; i++)); do local j - for ((j = 500; j < NODEMAP_MAX_ID; j++)); do + for ((j = $ID0; j < NODEMAP_MAX_ID; j++)); do local csum=${HOSTNAME_CHECKSUM}_${i} local client_id=$j local fs_id=$((j + 1)) @@ -422,11 +486,12 @@ test_idmap() { local cmd="$LCTL nodemap_test_id" local rc=0 + echo "Start to test idmaps ..." ## nodemap deactivated if ! do_facet mgs $LCTL nodemap_activate 0; then return 1 fi - for ((id = 500; id < NODEMAP_MAX_ID; id++)); do + for ((id = $ID0; id < NODEMAP_MAX_ID; id++)); do local j for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do @@ -445,7 +510,7 @@ test_idmap() { return 2 fi - for ((id = 500; id < NODEMAP_MAX_ID; id++)); do + for ((id = $ID0; id < NODEMAP_MAX_ID; id++)); do for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do nid="$SUBNET_CHECKSUM.0.${j}.100@tcp" fs_id=$(do_facet mgs $cmd --nid $nid \ @@ -469,7 +534,7 @@ test_idmap() { fi done - for ((id = 500; id < NODEMAP_MAX_ID; id++)); do + for ((id = $ID0; id < NODEMAP_MAX_ID; id++)); do for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do nid="$SUBNET_CHECKSUM.0.${j}.100@tcp" fs_id=$(do_facet mgs $cmd --nid $nid \ @@ -552,7 +617,7 @@ test_7() { delete_nodemaps rc=$? - [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 2 + [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 2 return 0 } @@ -582,7 +647,7 @@ test_8() { # Clean up delete_nodemaps rc=$? - [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 3 + [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 3 return 0 } @@ -621,13 +686,13 @@ test_9() { rc=0 delete_nodemaps rc=$? - [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 4 + [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 4 return 0 } run_test 9 "nodemap range add" -test_10() { +test_10a() { local rc remote_mgs_nodsh && skip "remote MGS with nodsh" && return @@ -668,11 +733,99 @@ test_10() { delete_nodemaps rc=$? - [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 5 + [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 5 return 0 } -run_test 10 "nodemap reject duplicate ranges" +run_test 10a "nodemap reject duplicate ranges" + +test_10b() { + [ $(lustre_version_code mgs) -lt $(version_code 2.10.53) ] && + skip "Need MGS >= 2.10.53" && return + + local nm1="nodemap1" + local nm2="nodemap2" + local nids="192.168.19.[0-255]@o2ib20" + + do_facet mgs $LCTL nodemap_del $nm1 2>/dev/null + do_facet mgs $LCTL nodemap_del $nm2 2>/dev/null + + do_facet mgs $LCTL nodemap_add $nm1 || error "Add $nm1 failed" + do_facet mgs $LCTL nodemap_add $nm2 || error "Add $nm2 failed" + do_facet mgs $LCTL nodemap_add_range --name $nm1 --range $nids || + error "Add range $nids to $nm1 failed" + [ -n "$(do_facet mgs $LCTL get_param nodemap.$nm1.* | + grep start_nid)" ] || error "No range was found" + do_facet mgs $LCTL nodemap_del_range --name $nm2 --range $nids && + error "Deleting range $nids from $nm2 should fail" + [ -n "$(do_facet mgs $LCTL get_param nodemap.$nm1.* | + grep start_nid)" ] || error "Range $nids should be there" + + do_facet mgs $LCTL nodemap_del $nm1 || error "Delete $nm1 failed" + do_facet mgs $LCTL nodemap_del $nm2 || error "Delete $nm2 failed" + return 0 +} +run_test 10b "delete range from the correct nodemap" + +test_10c() { #LU-8912 + [ $(lustre_version_code mgs) -lt $(version_code 2.10.57) ] && + skip "Need MGS >= 2.10.57" && return + + local nm="nodemap_lu8912" + local nid_range="10.210.[32-47].[0-255]@o2ib3" + local start_nid="10.210.32.0@o2ib3" + local end_nid="10.210.47.255@o2ib3" + local start_nid_found + local end_nid_found + + do_facet mgs $LCTL nodemap_del $nm 2>/dev/null + do_facet mgs $LCTL nodemap_add $nm || error "Add $nm failed" + do_facet mgs $LCTL nodemap_add_range --name $nm --range $nid_range || + error "Add range $nid_range to $nm failed" + + start_nid_found=$(do_facet mgs $LCTL get_param nodemap.$nm.* | + awk -F '[,: ]' /start_nid/'{ print $9 }') + [ "$start_nid" == "$start_nid_found" ] || + error "start_nid: $start_nid_found != $start_nid" + end_nid_found=$(do_facet mgs $LCTL get_param nodemap.$nm.* | + awk -F '[,: ]' /end_nid/'{ print $13 }') + [ "$end_nid" == "$end_nid_found" ] || + error "end_nid: $end_nid_found != $end_nid" + + do_facet mgs $LCTL nodemap_del $nm || error "Delete $nm failed" + return 0 +} +run_test 10c "verfify contiguous range support" + +test_10d() { #LU-8913 + [ $(lustre_version_code mgs) -lt $(version_code 2.10.59) ] && + skip "Need MGS >= 2.10.59" && return + + local nm="nodemap_lu8913" + local nid_range="*@o2ib3" + local start_nid="0.0.0.0@o2ib3" + local end_nid="255.255.255.255@o2ib3" + local start_nid_found + local end_nid_found + + do_facet mgs $LCTL nodemap_del $nm 2>/dev/null + do_facet mgs $LCTL nodemap_add $nm || error "Add $nm failed" + do_facet mgs $LCTL nodemap_add_range --name $nm --range $nid_range || + error "Add range $nid_range to $nm failed" + + start_nid_found=$(do_facet mgs $LCTL get_param nodemap.$nm.* | + awk -F '[,: ]' /start_nid/'{ print $9 }') + [ "$start_nid" == "$start_nid_found" ] || + error "start_nid: $start_nid_found != $start_nid" + end_nid_found=$(do_facet mgs $LCTL get_param nodemap.$nm.* | + awk -F '[,: ]' /end_nid/'{ print $13 }') + [ "$end_nid" == "$end_nid_found" ] || + error "end_nid: $end_nid_found != $end_nid" + + do_facet mgs $LCTL nodemap_del $nm || error "Delete $nm failed" + return 0 +} +run_test 10d "verfify nodemap range format '*@' support" test_11() { local rc @@ -852,14 +1005,19 @@ test_15() { [[ $rc != 0 ]] && error "nodemap_test_id failed with $rc" && return 4 rc=0 + update_idmaps + rc=$? + [[ $rc != 0 ]] && error "update_idmaps failed with $rc" && return 5 + + rc=0 delete_idmaps rc=$? - [[ $rc != 0 ]] && error "nodemap_del_idmap failed with $rc" && return 5 + [[ $rc != 0 ]] && error "nodemap_del_idmap failed with $rc" && return 6 rc=0 delete_nodemaps rc=$? - [[ $rc != 0 ]] && error "nodemap_delete failed with $rc" && return 6 + [[ $rc != 0 ]] && error "nodemap_delete failed with $rc" && return 7 return 0 } @@ -868,19 +1026,33 @@ run_test 15 "test id mapping" wait_nm_sync() { local nodemap_name=$1 local key=$2 - local proc_param="${nodemap_name}.${key}" - [ "$nodemap_name" == "active" ] && proc_param="active" - + local value=$3 + local opt=$4 + local proc_param local is_active=$(do_facet mgs $LCTL get_param -n nodemap.active) - (( is_active == 0 )) && [ "$proc_param" != "active" ] && return - local max_retries=20 local is_sync - local out1=$(do_facet mgs $LCTL get_param nodemap.${proc_param}) + local out1="" local out2 local mgs_ip=$(host_nids_address $mgs_HOST $NETTYPE | cut -d' ' -f1) local i + if [ "$nodemap_name" == "active" ]; then + proc_param="active" + elif [ -z "$key" ]; then + proc_param=${nodemap_name} + else + proc_param="${nodemap_name}.${key}" + fi + (( is_active == 0 )) && [ "$proc_param" != "active" ] && return + + if [ -z "$value" ]; then + out1=$(do_facet mgs $LCTL get_param $opt nodemap.${proc_param}) + echo "On MGS ${mgs_ip}, ${proc_param} = $out1" + else + out1=$value; + fi + # wait up to 10 seconds for other servers to sync with mgs for i in $(seq 1 10); do for node in $(all_server_nodes); do @@ -888,10 +1060,13 @@ wait_nm_sync() { cut -d' ' -f1) is_sync=true - [ $node_ip == $mgs_ip ] && continue + if [ -z "$value" ]; then + [ $node_ip == $mgs_ip ] && continue + fi - out2=$(do_node $node_ip $LCTL get_param \ + out2=$(do_node $node_ip $LCTL get_param $opt \ nodemap.$proc_param 2>/dev/null) + echo "On $node ${node_ip}, ${proc_param} = $out2" [ "$out1" != "$out2" ] && is_sync=false && break done $is_sync && break @@ -912,7 +1087,7 @@ create_fops_nodemaps() { local client for client in $clients; do local client_ip=$(host_nids_address $client $NETTYPE) - local client_nid=$(h2$NETTYPE $client_ip) + local client_nid=$(h2nettype $client_ip) do_facet mgs $LCTL nodemap_add c${i} || return 1 do_facet mgs $LCTL nodemap_add_range \ --name c${i} --range $client_nid || return 1 @@ -984,15 +1159,18 @@ fops_test_setup() { # fileset test directory needs to be initialized on a privileged client fileset_test_setup() { - local admin=$(do_facet mgs $LCTL get_param -n nodemap.c0.admin_nodemap) + local nm=$1 + local admin=$(do_facet mgs $LCTL get_param -n \ + nodemap.${nm}.admin_nodemap) local trust=$(do_facet mgs $LCTL get_param -n \ - nodemap.c0.trusted_nodemap) + nodemap.${nm}.trusted_nodemap) - do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1 - do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1 + do_facet mgs $LCTL nodemap_modify --name $nm --property admin --value 1 + do_facet mgs $LCTL nodemap_modify --name $nm --property trusted \ + --value 1 - wait_nm_sync c0 admin_nodemap - wait_nm_sync c0 trusted_nodemap + wait_nm_sync $nm admin_nodemap + wait_nm_sync $nm trusted_nodemap # create directory and populate it for subdir mount do_node ${clients_arr[0]} mkdir $MOUNT/$subdir || @@ -1006,46 +1184,49 @@ fileset_test_setup() { error "unable to create file \ $MOUNT/$subdir/$subsubdir/this_is_$subsubdir" - do_facet mgs $LCTL nodemap_modify --name c0 \ + do_facet mgs $LCTL nodemap_modify --name $nm \ --property admin --value $admin - do_facet mgs $LCTL nodemap_modify --name c0 \ + do_facet mgs $LCTL nodemap_modify --name $nm \ --property trusted --value $trust # flush MDT locks to make sure they are reacquired before test do_node ${clients_arr[0]} $LCTL set_param \ ldlm.namespaces.$FSNAME-MDT*.lru_size=clear - wait_nm_sync c0 admin_nodemap - wait_nm_sync c0 trusted_nodemap + wait_nm_sync $nm admin_nodemap + wait_nm_sync $nm trusted_nodemap } # fileset test directory needs to be initialized on a privileged client fileset_test_cleanup() { - local admin=$(do_facet mgs $LCTL get_param -n nodemap.c0.admin_nodemap) + local nm=$1 + local admin=$(do_facet mgs $LCTL get_param -n \ + nodemap.${nm}.admin_nodemap) local trust=$(do_facet mgs $LCTL get_param -n \ - nodemap.c0.trusted_nodemap) + nodemap.${nm}.trusted_nodemap) - do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1 - do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1 + do_facet mgs $LCTL nodemap_modify --name $nm --property admin --value 1 + do_facet mgs $LCTL nodemap_modify --name $nm --property trusted \ + --value 1 - wait_nm_sync c0 admin_nodemap - wait_nm_sync c0 trusted_nodemap + wait_nm_sync $nm admin_nodemap + wait_nm_sync $nm trusted_nodemap # cleanup directory created for subdir mount do_node ${clients_arr[0]} rm -rf $MOUNT/$subdir || error "unable to remove dir $MOUNT/$subdir" - do_facet mgs $LCTL nodemap_modify --name c0 \ + do_facet mgs $LCTL nodemap_modify --name $nm \ --property admin --value $admin - do_facet mgs $LCTL nodemap_modify --name c0 \ + do_facet mgs $LCTL nodemap_modify --name $nm \ --property trusted --value $trust # flush MDT locks to make sure they are reacquired before test do_node ${clients_arr[0]} $LCTL set_param \ ldlm.namespaces.$FSNAME-MDT*.lru_size=clear - wait_nm_sync c0 admin_nodemap - wait_nm_sync c0 trusted_nodemap + wait_nm_sync $nm admin_nodemap + wait_nm_sync $nm trusted_nodemap } do_create_delete() { @@ -1080,8 +1261,8 @@ do_fops_quota_test() { local qused_high=$((qused_orig + quota_fuzz)) local qused_low=$((qused_orig - quota_fuzz)) local testfile=$DIR/$tdir/$tfile - $run_u dd if=/dev/zero of=$testfile bs=1M count=1 >& /dev/null || - error "unable to write quota test file" + $run_u dd if=/dev/zero of=$testfile oflag=sync bs=1M count=1 \ + >& /dev/null || error "unable to write quota test file" sync; sync_all_data || true local qused_new=$(nodemap_check_quota "$run_u") @@ -1312,6 +1493,7 @@ nodemap_test_setup() { do_facet mgs $LCTL nodemap_modify --name default \ --property admin --value 1 + wait_nm_sync default admin_nodemap do_facet mgs $LCTL nodemap_modify --name default \ --property trusted --value 1 wait_nm_sync default trusted_nodemap @@ -1325,6 +1507,7 @@ nodemap_test_cleanup() { do_facet mgs $LCTL nodemap_modify --name default \ --property admin --value 0 + wait_nm_sync default admin_nodemap do_facet mgs $LCTL nodemap_modify --name default \ --property trusted --value 0 wait_nm_sync default trusted_nodemap @@ -1332,6 +1515,7 @@ nodemap_test_cleanup() { do_facet mgs $LCTL nodemap_activate 0 wait_nm_sync active 0 + export SK_UNIQUE_NM=false return 0 } @@ -1450,7 +1634,8 @@ run_test 22 "test nodemap mapped_trusted_admin fileops" # acl test directory needs to be initialized on a privileged client nodemap_acl_test_setup() { - local admin=$(do_facet mgs $LCTL get_param -n nodemap.c0.admin_nodemap) + local admin=$(do_facet mgs $LCTL get_param -n \ + nodemap.c0.admin_nodemap) local trust=$(do_facet mgs $LCTL get_param -n \ nodemap.c0.trusted_nodemap) @@ -1510,7 +1695,8 @@ nodemap_acl_test() { return 1 } -test_23() { +test_23a() { + [ $num_clients -lt 2 ] && skip "Need 2 clients at least" && return nodemap_version_check || return 0 nodemap_test_setup @@ -1562,7 +1748,57 @@ test_23() { nodemap_test_cleanup } -run_test 23 "test mapped ACLs" +run_test 23a "test mapped regular ACLs" + +test_23b() { #LU-9929 + remote_mgs_nodsh && skip "remote MGS with nodsh" && return + [ $(lustre_version_code mgs) -lt $(version_code 2.10.53) ] && + skip "Need MGS >= 2.10.53" && return + + nodemap_test_setup + trap nodemap_test_cleanup EXIT + + local testdir=$DIR/$tdir + local fs_id=$((IDBASE+10)) + local unmapped_id + local mapped_id + local fs_user + + do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1 + wait_nm_sync c0 admin_nodemap + + # Add idmap $ID0:$fs_id (500:60010) + do_facet mgs $LCTL nodemap_add_idmap --name c0 --idtype gid \ + --idmap $ID0:$fs_id || + error "add idmap $ID0:$fs_id to nodemap c0 failed" + + # set/getfacl default acl on client0 (unmapped gid=500) + rm -rf $testdir + mkdir -p $testdir + # Here, USER0=$(getent passwd | grep :$ID0:$ID0: | cut -d: -f1) + setfacl -R -d -m group:$USER0:rwx $testdir || + error "setfacl $testdir on ${clients_arr[0]} failed" + unmapped_id=$(getfacl $testdir | grep -E "default:group:.*:rwx" | + awk -F: '{print $3}') + [ "$unmapped_id" = "$USER0" ] || + error "gid=$ID0 was not unmapped correctly on ${clients_arr[0]}" + + # getfacl default acl on MGS (mapped gid=60010) + zconf_mount $mgs_HOST $MOUNT + do_rpc_nodes $mgs_HOST is_mounted $MOUNT || + error "mount lustre on MGS failed" + mapped_id=$(do_node $mgs_HOST getfacl $testdir | + grep -E "default:group:.*:rwx" | awk -F: '{print $3}') + fs_user=$(do_facet mgs getent passwd | + grep :$fs_id:$fs_id: | cut -d: -f1) + [ $mapped_id -eq $fs_id -o "$mapped_id" = "$fs_user" ] || + error "Should return gid=$fs_id or $fs_user on MGS" + + rm -rf $testdir + do_facet mgs umount $MOUNT + nodemap_test_cleanup +} +run_test 23b "test mapped default ACLs" test_24() { nodemap_test_setup @@ -1578,7 +1814,10 @@ run_test 24 "check nodemap proc files for LBUGs and Oopses" test_25() { local tmpfile=$(mktemp) local tmpfile2=$(mktemp) + local tmpfile3=$(mktemp) + local tmpfile4=$(mktemp) local subdir=c0dir + local client nodemap_version_check || return 0 @@ -1586,41 +1825,59 @@ test_25() { zconf_umount_clients $CLIENTS $MOUNT || error "unable to umount clients $CLIENTS" + export SK_UNIQUE_NM=true nodemap_test_setup + # enable trusted/admin for setquota call in cleanup_and_setup_lustre() + i=0 + for client in $clients; do + do_facet mgs $LCTL nodemap_modify --name c${i} \ + --property admin --value 1 + do_facet mgs $LCTL nodemap_modify --name c${i} \ + --property trusted --value 1 + ((i++)) + done + wait_nm_sync c$((i - 1)) trusted_nodemap + trap nodemap_test_cleanup EXIT # create a new, empty nodemap, and add fileset info to it - do_facet mgs $LCTL nodemap_add test26 || - error "unable to create nodemap test26" - do_facet mgs $LCTL set_param -P nodemap.test26.fileset=/$subdir || - error "unable to add fileset info to nodemap test26" + do_facet mgs $LCTL nodemap_add test25 || + error "unable to create nodemap $testname" + do_facet mgs $LCTL set_param -P nodemap.$testname.fileset=/$subdir || + error "unable to add fileset info to nodemap test25" - wait_nm_sync test26 id + wait_nm_sync test25 id do_facet mgs $LCTL nodemap_info > $tmpfile do_facet mds $LCTL nodemap_info > $tmpfile2 - cleanup_and_setup_lustre + if ! $SHARED_KEY; then + # will conflict with SK's nodemaps + cleanup_and_setup_lustre + fi # stop clients for this test zconf_umount_clients $CLIENTS $MOUNT || error "unable to umount clients $CLIENTS" - diff -q <(do_facet mgs $LCTL nodemap_info) $tmpfile >& /dev/null || + do_facet mgs $LCTL nodemap_info > $tmpfile3 + diff -q $tmpfile3 $tmpfile >& /dev/null || error "nodemap_info diff on MGS after remount" - diff -q <(do_facet mds $LCTL nodemap_info) $tmpfile2 >& /dev/null || + do_facet mds $LCTL nodemap_info > $tmpfile4 + diff -q $tmpfile4 $tmpfile2 >& /dev/null || error "nodemap_info diff on MDS after remount" # cleanup nodemap - do_facet mgs $LCTL nodemap_del test26 || - error "cannot delete nodemap test26 from config" + do_facet mgs $LCTL nodemap_del test25 || + error "cannot delete nodemap test25 from config" nodemap_test_cleanup # restart clients previously stopped zconf_mount_clients $CLIENTS $MOUNT || error "unable to mount clients $CLIENTS" rm -f $tmpfile $tmpfile2 + export SK_UNIQUE_NM=false } run_test 25 "test save and reload nodemap config" @@ -1637,27 +1894,42 @@ test_26() { } run_test 26 "test transferring very large nodemap" -test_27() { - local subdir=c0dir - local subsubdir=c0subdir +nodemap_exercise_fileset() { + local nm="$1" + local loop=0 - nodemap_test_setup - trap nodemap_test_cleanup EXIT - - fileset_test_setup + # setup + if [ "$nm" == "default" ]; then + do_facet mgs $LCTL nodemap_activate 1 + wait_nm_sync active + else + nodemap_test_setup + fi + if $SHARED_KEY; then + export SK_UNIQUE_NM=true + else + # will conflict with SK's nodemaps + trap "fileset_test_cleanup $nm" EXIT + fi + fileset_test_setup "$nm" - # add fileset info to nodemap - do_facet mgs $LCTL set_param nodemap.c0.fileset=/$subdir || - error "unable to set fileset info on nodemap c0" - do_facet mgs $LCTL set_param -P nodemap.c0.fileset=/$subdir || - error "unable to add fileset info to nodemap c0" - wait_nm_sync c0 fileset + # add fileset info to $nm nodemap + if ! combined_mgs_mds; then + do_facet mgs $LCTL set_param nodemap.${nm}.fileset=/$subdir || + error "unable to add fileset info to $nm nodemap on MGS" + fi + do_facet mgs $LCTL set_param -P nodemap.${nm}.fileset=/$subdir || + error "unable to add fileset info to $nm nodemap for servers" + wait_nm_sync $nm fileset "nodemap.${nm}.fileset=/$subdir" # re-mount client zconf_umount_clients ${clients_arr[0]} $MOUNT || error "unable to umount client ${clients_arr[0]}" + # set some generic fileset to trigger SSK code + export FILESET=/ zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS || error "unable to remount client ${clients_arr[0]}" + unset FILESET # test mount point content do_node ${clients_arr[0]} test -f $MOUNT/this_is_$subdir || @@ -1676,11 +1948,14 @@ test_27() { error "subdir of fileset not taken into account" # remove fileset info from nodemap - do_facet mgs $LCTL nodemap_set_fileset --name c0 --fileset \'\' || - error "unable to delete fileset info on nodemap c0" - do_facet mgs $LCTL set_param -P nodemap.c0.fileset=\'\' || - error "unable to reset fileset info on nodemap c0" - wait_nm_sync c0 fileset + do_facet mgs $LCTL nodemap_set_fileset --name $nm --fileset clear || + error "unable to delete fileset info on $nm nodemap" + wait_update_facet mgs "$LCTL get_param nodemap.${nm}.fileset" \ + "nodemap.${nm}.fileset=" || + error "fileset info still not cleared on $nm nodemap" + do_facet mgs $LCTL set_param -P nodemap.${nm}.fileset=clear || + error "unable to reset fileset info on $nm nodemap" + wait_nm_sync $nm fileset "nodemap.${nm}.fileset=" # re-mount client zconf_umount_clients ${clients_arr[0]} $MOUNT || @@ -1689,13 +1964,178 @@ test_27() { error "unable to remount client ${clients_arr[0]}" # test mount point content - do_node ${clients_arr[0]} test -d $MOUNT/$subdir || - error "fileset not cleared on nodemap c0" + if ! $(do_node ${clients_arr[0]} test -d $MOUNT/$subdir); then + ls $MOUNT + error "fileset not cleared on $nm nodemap" + fi + + # back to non-nodemap setup + if $SHARED_KEY; then + export SK_UNIQUE_NM=false + zconf_umount_clients ${clients_arr[0]} $MOUNT || + error "unable to umount client ${clients_arr[0]}" + fi + fileset_test_cleanup "$nm" + if [ "$nm" == "default" ]; then + do_facet mgs $LCTL nodemap_activate 0 + wait_nm_sync active 0 + trap 0 + export SK_UNIQUE_NM=false + else + nodemap_test_cleanup + fi + if $SHARED_KEY; then + zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS || + error "unable to remount client ${clients_arr[0]}" + fi +} + +test_27a() { + [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.50) ] && + skip "Need MDS >= 2.11.50" && return + + for nm in "default" "c0"; do + local subdir="subdir_${nm}" + local subsubdir="subsubdir_${nm}" + + echo "Exercising fileset for nodemap $nm" + nodemap_exercise_fileset "$nm" + done +} +run_test 27a "test fileset in various nodemaps" + +test_27b() { #LU-10703 + [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.50) ] && + skip "Need MDS >= 2.11.50" && return + [[ $MDSCOUNT -lt 2 ]] && skip "needs >= 2 MDTs" && return + + nodemap_test_setup + trap nodemap_test_cleanup EXIT + + # Add the nodemaps and set their filesets + for i in $(seq 1 $MDSCOUNT); do + do_facet mgs $LCTL nodemap_del nm$i 2>/dev/null + do_facet mgs $LCTL nodemap_add nm$i || + error "add nodemap nm$i failed" + wait_nm_sync nm$i "" "" "-N" + + if ! combined_mgs_mds; then + do_facet mgs \ + $LCTL set_param nodemap.nm$i.fileset=/dir$i || + error "set nm$i.fileset=/dir$i failed on MGS" + fi + do_facet mgs $LCTL set_param -P nodemap.nm$i.fileset=/dir$i || + error "set nm$i.fileset=/dir$i failed on servers" + wait_nm_sync nm$i fileset "nodemap.nm$i.fileset=/dir$i" + done + + # Check if all the filesets are correct + for i in $(seq 1 $MDSCOUNT); do + fileset=$(do_facet mds$i \ + $LCTL get_param -n nodemap.nm$i.fileset) + [ "$fileset" = "/dir$i" ] || + error "nm$i.fileset $fileset != /dir$i on mds$i" + do_facet mgs $LCTL nodemap_del nm$i || + error "delete nodemap nm$i failed" + done - fileset_test_cleanup nodemap_test_cleanup } -run_test 27 "test fileset in nodemap" +run_test 27b "The new nodemap won't clear the old nodemap's fileset" + +test_28() { + if ! $SHARED_KEY; then + skip "need shared key feature for this test" && return + fi + mkdir -p $DIR/$tdir || error "mkdir failed" + touch $DIR/$tdir/$tdir.out || error "touch failed" + if [ ! -f $DIR/$tdir/$tdir.out ]; then + error "read before rotation failed" + fi + # store top key identity to ensure rotation has occurred + SK_IDENTITY_OLD=$(lctl get_param *.*.*srpc* | grep "expire" | + head -1 | awk '{print $15}' | cut -c1-8) + do_facet $SINGLEMDS lfs flushctx || + error "could not run flushctx on $SINGLEMDS" + sleep 5 + lfs flushctx || error "could not run flushctx on client" + sleep 5 + # verify new key is in place + SK_IDENTITY_NEW=$(lctl get_param *.*.*srpc* | grep "expire" | + head -1 | awk '{print $15}' | cut -c1-8) + if [ $SK_IDENTITY_OLD == $SK_IDENTITY_NEW ]; then + error "key did not rotate correctly" + fi + if [ ! -f $DIR/$tdir/$tdir.out ]; then + error "read after rotation failed" + fi +} +run_test 28 "check shared key rotation method" + +test_29() { + if ! $SHARED_KEY; then + skip "need shared key feature for this test" && return + fi + if [ $SK_FLAVOR != "ski" ] && [ $SK_FLAVOR != "skpi" ]; then + skip "test only valid if integrity is active" + fi + rm -r $DIR/$tdir + mkdir $DIR/$tdir || error "mkdir" + touch $DIR/$tdir/$tfile || error "touch" + zconf_umount_clients ${clients_arr[0]} $MOUNT || + error "unable to umount clients" + keyctl show | awk '/lustre/ { print $1 }' | + xargs -IX keyctl unlink X + OLD_SK_PATH=$SK_PATH + export SK_PATH=/dev/null + if zconf_mount_clients ${clients_arr[0]} $MOUNT; then + export SK_PATH=$OLD_SK_PATH + if [ -e $DIR/$tdir/$tfile ]; then + error "able to mount and read without key" + else + error "able to mount without key" + fi + else + export SK_PATH=$OLD_SK_PATH + keyctl show | awk '/lustre/ { print $1 }' | + xargs -IX keyctl unlink X + fi +} +run_test 29 "check for missing shared key" + +test_30() { + if ! $SHARED_KEY; then + skip "need shared key feature for this test" && return + fi + if [ $SK_FLAVOR != "ski" ] && [ $SK_FLAVOR != "skpi" ]; then + skip "test only valid if integrity is active" + fi + mkdir -p $DIR/$tdir || error "mkdir failed" + touch $DIR/$tdir/$tdir.out || error "touch failed" + zconf_umount_clients ${clients_arr[0]} $MOUNT || + error "unable to umount clients" + # unload keys from ring + keyctl show | awk '/lustre/ { print $1 }' | + xargs -IX keyctl unlink X + # invalidate the key with bogus filesystem name + lgss_sk -w $SK_PATH/$FSNAME-bogus.key -f $FSNAME.bogus \ + -t client -d /dev/urandom || error "lgss_sk failed (1)" + do_facet $SINGLEMDS lfs flushctx || error "could not run flushctx" + OLD_SK_PATH=$SK_PATH + export SK_PATH=$SK_PATH/$FSNAME-bogus.key + if zconf_mount_clients ${clients_arr[0]} $MOUNT; then + SK_PATH=$OLD_SK_PATH + if [ -a $DIR/$tdir/$tdir.out ]; then + error "mount and read file with invalid key" + else + error "mount with invalid key" + fi + fi + SK_PATH=$OLD_SK_PATH + zconf_umount_clients ${clients_arr[0]} $MOUNT || + error "unable to umount clients" +} +run_test 30 "check for invalid shared key" log "cleanup: ======================================================" @@ -1714,7 +2154,6 @@ sec_unsetup() { } sec_unsetup -sec_cleanup - complete $SECONDS +check_and_cleanup_lustre exit_status