set -e
ONLY=${ONLY:-"$*"}
-# bug number for skipped test: 19430 19967 19967
-ALWAYS_EXCEPT=" 2 5 6 $SANITY_SEC_EXCEPT"
+# bug number for skipped test:
+ALWAYS_EXCEPT=" $SANITY_SEC_EXCEPT"
# UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
SRCDIR=$(dirname $0)
CONFDIR=/etc/lustre
PERM_CONF=$CONFDIR/perm.conf
FAIL_ON_ERROR=false
-
HOSTNAME_CHECKSUM=$(hostname | sum | awk '{ print $1 }')
SUBNET_CHECKSUM=$(expr $HOSTNAME_CHECKSUM % 250 + 1)
-NODEMAP_COUNT=16
-NODEMAP_RANGE_COUNT=3
-NODEMAP_IPADDR_LIST="1 10 64 128 200 250"
-NODEMAP_MAX_ID=128
require_dsh_mds || exit 0
require_dsh_ost || exit 0
USER0=$(getent passwd | grep :$ID0:$ID0: | cut -d: -f1)
USER1=$(getent passwd | grep :$ID1:$ID1: | cut -d: -f1)
+NODEMAP_COUNT=16
+NODEMAP_RANGE_COUNT=3
+NODEMAP_IPADDR_LIST="1 10 64 128 200 250"
+NODEMAP_ID_COUNT=10
+NODEMAP_MAX_ID=$((ID0 + NODEMAP_ID_COUNT))
+
[ -z "$USER0" ] &&
skip "need to add user0 ($ID0:$ID0)" && exit 0
check_and_setup_lustre
-sec_cleanup() {
- if [ "$I_MOUNTED" = "yes" ]; then
- cleanupall -f || error "sec_cleanup"
- fi
-}
-
-DIR=${DIR:-$MOUNT}
-[ -z "$(echo $DIR | grep $MOUNT)" ] &&
- error "$DIR not in $MOUNT" && sec_cleanup && exit 1
-
-[ $(echo $MOUNT | wc -w) -gt 1 ] &&
- echo "NAME=$MOUNT mounted more than once" && sec_cleanup && exit 0
+assert_DIR
# for GSS_SUP
GSS_REF=$(lsmod | grep ^ptlrpc_gss | awk '{print $3}')
local user=$1
local group=$2
+ $GSS_KRB5 || return
if ! $RUNAS_CMD -u $user krb5_login.sh; then
error "$user login kerberos failed."
exit 1
return 3
fi
- out=$(do_facet mgs $LCTL get_param nodemap.$csum.id)
+ out=$(do_facet mgs $LCTL get_param nodemap.$csum.id 2>/dev/null)
[[ $(echo $out | grep -c $csum) != 0 ]] && return 1
done
return 0
local cmd="$LCTL nodemap_add_idmap"
local rc=0
+ echo "Start to add idmaps ..."
for ((i = 0; i < NODEMAP_COUNT; i++)); do
local j
- for ((j = 500; j < NODEMAP_MAX_ID; j++)); do
+ for ((j = $ID0; j < NODEMAP_MAX_ID; j++)); do
local csum=${HOSTNAME_CHECKSUM}_${i}
local client_id=$j
local fs_id=$((j + 1))
return $rc
}
+update_idmaps() { #LU-10040
+ [ $(lustre_version_code mgs) -lt $(version_code 2.10.55) ] &&
+ skip "Need MGS >= 2.10.55" &&
+ return
+ local csum=${HOSTNAME_CHECKSUM}_0
+ local old_id_client=$ID0
+ local old_id_fs=$((ID0 + 1))
+ local new_id=$((ID0 + 100))
+ local tmp_id
+ local cmd
+ local run
+ local idtype
+ local rc=0
+
+ echo "Start to update idmaps ..."
+
+ #Inserting an existed idmap should return error
+ cmd="$LCTL nodemap_add_idmap --name $csum --idtype uid"
+ if do_facet mgs \
+ $cmd --idmap $old_id_client:$old_id_fs 2>/dev/null; then
+ error "insert idmap {$old_id_client:$old_id_fs} " \
+ "should return error"
+ rc=$((rc + 1))
+ return rc
+ fi
+
+ #Update id_fs and check it
+ if ! do_facet mgs $cmd --idmap $old_id_client:$new_id; then
+ error "$cmd --idmap $old_id_client:$new_id failed"
+ rc=$((rc + 1))
+ return $rc
+ fi
+ tmp_id=$(do_facet mgs $LCTL get_param -n nodemap.$csum.idmap |
+ awk '{ print $7 }' | sed -n '2p')
+ [ $tmp_id != $new_id ] && { error "new id_fs $tmp_id != $new_id"; \
+ rc=$((rc + 1)); return $rc; }
+
+ #Update id_client and check it
+ if ! do_facet mgs $cmd --idmap $new_id:$new_id; then
+ error "$cmd --idmap $new_id:$new_id failed"
+ rc=$((rc + 1))
+ return $rc
+ fi
+ tmp_id=$(do_facet mgs $LCTL get_param -n nodemap.$csum.idmap |
+ awk '{ print $5 }' | sed -n "$((NODEMAP_ID_COUNT + 1)) p")
+ tmp_id=$(echo ${tmp_id%,*}) #e.g. "501,"->"501"
+ [ $tmp_id != $new_id ] && { error "new id_client $tmp_id != $new_id"; \
+ rc=$((rc + 1)); return $rc; }
+
+ #Delete above updated idmap
+ cmd="$LCTL nodemap_del_idmap --name $csum --idtype uid"
+ if ! do_facet mgs $cmd --idmap $new_id:$new_id; then
+ error "$cmd --idmap $new_id:$new_id failed"
+ rc=$((rc + 1))
+ return $rc
+ fi
+
+ #restore the idmaps to make delete_idmaps work well
+ cmd="$LCTL nodemap_add_idmap --name $csum --idtype uid"
+ if ! do_facet mgs $cmd --idmap $old_id_client:$old_id_fs; then
+ error "$cmd --idmap $old_id_client:$old_id_fs failed"
+ rc=$((rc + 1))
+ return $rc
+ fi
+
+ return $rc
+}
+
delete_idmaps() {
local i
local cmd="$LCTL nodemap_del_idmap"
local rc=0
+ echo "Start to delete idmaps ..."
for ((i = 0; i < NODEMAP_COUNT; i++)); do
local j
- for ((j = 500; j < NODEMAP_MAX_ID; j++)); do
+ for ((j = $ID0; j < NODEMAP_MAX_ID; j++)); do
local csum=${HOSTNAME_CHECKSUM}_${i}
local client_id=$j
local fs_id=$((j + 1))
local cmd="$LCTL nodemap_test_id"
local rc=0
+ echo "Start to test idmaps ..."
## nodemap deactivated
if ! do_facet mgs $LCTL nodemap_activate 0; then
return 1
fi
- for ((id = 500; id < NODEMAP_MAX_ID; id++)); do
+ for ((id = $ID0; id < NODEMAP_MAX_ID; id++)); do
local j
for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
return 2
fi
- for ((id = 500; id < NODEMAP_MAX_ID; id++)); do
+ for ((id = $ID0; id < NODEMAP_MAX_ID; id++)); do
for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
fs_id=$(do_facet mgs $cmd --nid $nid \
fi
done
- for ((id = 500; id < NODEMAP_MAX_ID; id++)); do
+ for ((id = $ID0; id < NODEMAP_MAX_ID; id++)); do
for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
fs_id=$(do_facet mgs $cmd --nid $nid \
delete_nodemaps
rc=$?
- [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 2
+ [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 2
return 0
}
# Clean up
delete_nodemaps
rc=$?
- [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 3
+ [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 3
return 0
}
rc=0
delete_nodemaps
rc=$?
- [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 4
+ [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 4
return 0
}
run_test 9 "nodemap range add"
-test_10() {
+test_10a() {
local rc
remote_mgs_nodsh && skip "remote MGS with nodsh" && return
delete_nodemaps
rc=$?
- [[ $rc != 0 ]] && error "nodemap_add failed with $rc" && return 5
+ [[ $rc != 0 ]] && error "nodemap_del failed with $rc" && return 5
return 0
}
-run_test 10 "nodemap reject duplicate ranges"
+run_test 10a "nodemap reject duplicate ranges"
+
+test_10b() {
+ [ $(lustre_version_code mgs) -lt $(version_code 2.10.53) ] &&
+ skip "Need MGS >= 2.10.53" && return
+
+ local nm1="nodemap1"
+ local nm2="nodemap2"
+ local nids="192.168.19.[0-255]@o2ib20"
+
+ do_facet mgs $LCTL nodemap_del $nm1 2>/dev/null
+ do_facet mgs $LCTL nodemap_del $nm2 2>/dev/null
+
+ do_facet mgs $LCTL nodemap_add $nm1 || error "Add $nm1 failed"
+ do_facet mgs $LCTL nodemap_add $nm2 || error "Add $nm2 failed"
+ do_facet mgs $LCTL nodemap_add_range --name $nm1 --range $nids ||
+ error "Add range $nids to $nm1 failed"
+ [ -n "$(do_facet mgs $LCTL get_param nodemap.$nm1.* |
+ grep start_nid)" ] || error "No range was found"
+ do_facet mgs $LCTL nodemap_del_range --name $nm2 --range $nids &&
+ error "Deleting range $nids from $nm2 should fail"
+ [ -n "$(do_facet mgs $LCTL get_param nodemap.$nm1.* |
+ grep start_nid)" ] || error "Range $nids should be there"
+
+ do_facet mgs $LCTL nodemap_del $nm1 || error "Delete $nm1 failed"
+ do_facet mgs $LCTL nodemap_del $nm2 || error "Delete $nm2 failed"
+ return 0
+}
+run_test 10b "delete range from the correct nodemap"
+
+test_10c() { #LU-8912
+ [ $(lustre_version_code mgs) -lt $(version_code 2.10.57) ] &&
+ skip "Need MGS >= 2.10.57" && return
+
+ local nm="nodemap_lu8912"
+ local nid_range="10.210.[32-47].[0-255]@o2ib3"
+ local start_nid="10.210.32.0@o2ib3"
+ local end_nid="10.210.47.255@o2ib3"
+ local start_nid_found
+ local end_nid_found
+
+ do_facet mgs $LCTL nodemap_del $nm 2>/dev/null
+ do_facet mgs $LCTL nodemap_add $nm || error "Add $nm failed"
+ do_facet mgs $LCTL nodemap_add_range --name $nm --range $nid_range ||
+ error "Add range $nid_range to $nm failed"
+
+ start_nid_found=$(do_facet mgs $LCTL get_param nodemap.$nm.* |
+ awk -F '[,: ]' /start_nid/'{ print $9 }')
+ [ "$start_nid" == "$start_nid_found" ] ||
+ error "start_nid: $start_nid_found != $start_nid"
+ end_nid_found=$(do_facet mgs $LCTL get_param nodemap.$nm.* |
+ awk -F '[,: ]' /end_nid/'{ print $13 }')
+ [ "$end_nid" == "$end_nid_found" ] ||
+ error "end_nid: $end_nid_found != $end_nid"
+
+ do_facet mgs $LCTL nodemap_del $nm || error "Delete $nm failed"
+ return 0
+}
+run_test 10c "verfify contiguous range support"
+
+test_10d() { #LU-8913
+ [ $(lustre_version_code mgs) -lt $(version_code 2.10.59) ] &&
+ skip "Need MGS >= 2.10.59" && return
+
+ local nm="nodemap_lu8913"
+ local nid_range="*@o2ib3"
+ local start_nid="0.0.0.0@o2ib3"
+ local end_nid="255.255.255.255@o2ib3"
+ local start_nid_found
+ local end_nid_found
+
+ do_facet mgs $LCTL nodemap_del $nm 2>/dev/null
+ do_facet mgs $LCTL nodemap_add $nm || error "Add $nm failed"
+ do_facet mgs $LCTL nodemap_add_range --name $nm --range $nid_range ||
+ error "Add range $nid_range to $nm failed"
+
+ start_nid_found=$(do_facet mgs $LCTL get_param nodemap.$nm.* |
+ awk -F '[,: ]' /start_nid/'{ print $9 }')
+ [ "$start_nid" == "$start_nid_found" ] ||
+ error "start_nid: $start_nid_found != $start_nid"
+ end_nid_found=$(do_facet mgs $LCTL get_param nodemap.$nm.* |
+ awk -F '[,: ]' /end_nid/'{ print $13 }')
+ [ "$end_nid" == "$end_nid_found" ] ||
+ error "end_nid: $end_nid_found != $end_nid"
+
+ do_facet mgs $LCTL nodemap_del $nm || error "Delete $nm failed"
+ return 0
+}
+run_test 10d "verfify nodemap range format '*@<net>' support"
test_11() {
local rc
[[ $rc != 0 ]] && error "nodemap_test_id failed with $rc" && return 4
rc=0
+ update_idmaps
+ rc=$?
+ [[ $rc != 0 ]] && error "update_idmaps failed with $rc" && return 5
+
+ rc=0
delete_idmaps
rc=$?
- [[ $rc != 0 ]] && error "nodemap_del_idmap failed with $rc" && return 5
+ [[ $rc != 0 ]] && error "nodemap_del_idmap failed with $rc" && return 6
rc=0
delete_nodemaps
rc=$?
- [[ $rc != 0 ]] && error "nodemap_delete failed with $rc" && return 6
+ [[ $rc != 0 ]] && error "nodemap_delete failed with $rc" && return 7
return 0
}
wait_nm_sync() {
local nodemap_name=$1
local key=$2
- local proc_param="${nodemap_name}.${key}"
- [ "$nodemap_name" == "active" ] && proc_param="active"
-
+ local value=$3
+ local opt=$4
+ local proc_param
local is_active=$(do_facet mgs $LCTL get_param -n nodemap.active)
- (( is_active == 0 )) && [ "$proc_param" != "active" ] && return
-
local max_retries=20
local is_sync
- local out1=$(do_facet mgs $LCTL get_param nodemap.${proc_param})
+ local out1=""
local out2
local mgs_ip=$(host_nids_address $mgs_HOST $NETTYPE | cut -d' ' -f1)
local i
+ if [ "$nodemap_name" == "active" ]; then
+ proc_param="active"
+ elif [ -z "$key" ]; then
+ proc_param=${nodemap_name}
+ else
+ proc_param="${nodemap_name}.${key}"
+ fi
+ (( is_active == 0 )) && [ "$proc_param" != "active" ] && return
+
+ if [ -z "$value" ]; then
+ out1=$(do_facet mgs $LCTL get_param $opt nodemap.${proc_param})
+ echo "On MGS ${mgs_ip}, ${proc_param} = $out1"
+ else
+ out1=$value;
+ fi
+
# wait up to 10 seconds for other servers to sync with mgs
for i in $(seq 1 10); do
for node in $(all_server_nodes); do
cut -d' ' -f1)
is_sync=true
- [ $node_ip == $mgs_ip ] && continue
+ if [ -z "$value" ]; then
+ [ $node_ip == $mgs_ip ] && continue
+ fi
- out2=$(do_node $node_ip $LCTL get_param \
+ out2=$(do_node $node_ip $LCTL get_param $opt \
nodemap.$proc_param 2>/dev/null)
+ echo "On $node ${node_ip}, ${proc_param} = $out2"
[ "$out1" != "$out2" ] && is_sync=false && break
done
$is_sync && break
local client
for client in $clients; do
local client_ip=$(host_nids_address $client $NETTYPE)
- local client_nid=$(h2$NETTYPE $client_ip)
+ local client_nid=$(h2nettype $client_ip)
do_facet mgs $LCTL nodemap_add c${i} || return 1
do_facet mgs $LCTL nodemap_add_range \
--name c${i} --range $client_nid || return 1
# fileset test directory needs to be initialized on a privileged client
fileset_test_setup() {
- local admin=$(do_facet mgs $LCTL get_param -n nodemap.c0.admin_nodemap)
+ local nm=$1
+
+ if [ -n "$FILESET" -a -z "$SKIP_FILESET" ]; then
+ cleanup_mount $MOUNT
+ FILESET="" zconf_mount_clients $CLIENTS $MOUNT
+ fi
+
+ local admin=$(do_facet mgs $LCTL get_param -n \
+ nodemap.${nm}.admin_nodemap)
local trust=$(do_facet mgs $LCTL get_param -n \
- nodemap.c0.trusted_nodemap)
+ nodemap.${nm}.trusted_nodemap)
- do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
- do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1
+ do_facet mgs $LCTL nodemap_modify --name $nm --property admin --value 1
+ do_facet mgs $LCTL nodemap_modify --name $nm --property trusted \
+ --value 1
- wait_nm_sync c0 admin_nodemap
- wait_nm_sync c0 trusted_nodemap
+ wait_nm_sync $nm admin_nodemap
+ wait_nm_sync $nm trusted_nodemap
# create directory and populate it for subdir mount
do_node ${clients_arr[0]} mkdir $MOUNT/$subdir ||
error "unable to create file \
$MOUNT/$subdir/$subsubdir/this_is_$subsubdir"
- do_facet mgs $LCTL nodemap_modify --name c0 \
+ do_facet mgs $LCTL nodemap_modify --name $nm \
--property admin --value $admin
- do_facet mgs $LCTL nodemap_modify --name c0 \
+ do_facet mgs $LCTL nodemap_modify --name $nm \
--property trusted --value $trust
# flush MDT locks to make sure they are reacquired before test
do_node ${clients_arr[0]} $LCTL set_param \
ldlm.namespaces.$FSNAME-MDT*.lru_size=clear
- wait_nm_sync c0 admin_nodemap
- wait_nm_sync c0 trusted_nodemap
+ wait_nm_sync $nm admin_nodemap
+ wait_nm_sync $nm trusted_nodemap
}
# fileset test directory needs to be initialized on a privileged client
fileset_test_cleanup() {
- local admin=$(do_facet mgs $LCTL get_param -n nodemap.c0.admin_nodemap)
+ local nm=$1
+ local admin=$(do_facet mgs $LCTL get_param -n \
+ nodemap.${nm}.admin_nodemap)
local trust=$(do_facet mgs $LCTL get_param -n \
- nodemap.c0.trusted_nodemap)
+ nodemap.${nm}.trusted_nodemap)
- do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
- do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1
+ do_facet mgs $LCTL nodemap_modify --name $nm --property admin --value 1
+ do_facet mgs $LCTL nodemap_modify --name $nm --property trusted \
+ --value 1
- wait_nm_sync c0 admin_nodemap
- wait_nm_sync c0 trusted_nodemap
+ wait_nm_sync $nm admin_nodemap
+ wait_nm_sync $nm trusted_nodemap
# cleanup directory created for subdir mount
do_node ${clients_arr[0]} rm -rf $MOUNT/$subdir ||
error "unable to remove dir $MOUNT/$subdir"
- do_facet mgs $LCTL nodemap_modify --name c0 \
+ do_facet mgs $LCTL nodemap_modify --name $nm \
--property admin --value $admin
- do_facet mgs $LCTL nodemap_modify --name c0 \
+ do_facet mgs $LCTL nodemap_modify --name $nm \
--property trusted --value $trust
# flush MDT locks to make sure they are reacquired before test
do_node ${clients_arr[0]} $LCTL set_param \
ldlm.namespaces.$FSNAME-MDT*.lru_size=clear
- wait_nm_sync c0 admin_nodemap
- wait_nm_sync c0 trusted_nodemap
+ wait_nm_sync $nm admin_nodemap
+ wait_nm_sync $nm trusted_nodemap
+ if [ -n "$FILESET" -a -z "$SKIP_FILESET" ]; then
+ cleanup_mount $MOUNT
+ zconf_mount_clients $CLIENTS $MOUNT
+ fi
}
do_create_delete() {
local qused_high=$((qused_orig + quota_fuzz))
local qused_low=$((qused_orig - quota_fuzz))
local testfile=$DIR/$tdir/$tfile
- $run_u dd if=/dev/zero of=$testfile bs=1M count=1 >& /dev/null ||
- error "unable to write quota test file"
+ $run_u dd if=/dev/zero of=$testfile oflag=sync bs=1M count=1 \
+ >& /dev/null || error "unable to write quota test file"
sync; sync_all_data || true
local qused_new=$(nodemap_check_quota "$run_u")
do_facet mgs $LCTL nodemap_modify --name default \
--property admin --value 1
+ wait_nm_sync default admin_nodemap
do_facet mgs $LCTL nodemap_modify --name default \
--property trusted --value 1
wait_nm_sync default trusted_nodemap
do_facet mgs $LCTL nodemap_modify --name default \
--property admin --value 0
+ wait_nm_sync default admin_nodemap
do_facet mgs $LCTL nodemap_modify --name default \
--property trusted --value 0
wait_nm_sync default trusted_nodemap
do_facet mgs $LCTL nodemap_activate 0
wait_nm_sync active 0
+ export SK_UNIQUE_NM=false
return 0
}
run_test 16 "test nodemap all_off fileops"
test_17() {
+ if $SHARED_KEY &&
+ [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.55) ]; then
+ skip "Need MDS >= 2.11.55"
+ fi
+
nodemap_version_check || return 0
nodemap_test_setup
run_test 17 "test nodemap trusted_noadmin fileops"
test_18() {
+ if $SHARED_KEY &&
+ [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.55) ]; then
+ skip "Need MDS >= 2.11.55"
+ fi
+
nodemap_version_check || return 0
nodemap_test_setup
run_test 18 "test nodemap mapped_noadmin fileops"
test_19() {
+ if $SHARED_KEY &&
+ [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.55) ]; then
+ skip "Need MDS >= 2.11.55"
+ fi
+
nodemap_version_check || return 0
nodemap_test_setup
run_test 19 "test nodemap trusted_admin fileops"
test_20() {
+ if $SHARED_KEY &&
+ [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.55) ]; then
+ skip "Need MDS >= 2.11.55"
+ fi
+
nodemap_version_check || return 0
nodemap_test_setup
run_test 20 "test nodemap mapped_admin fileops"
test_21() {
+ if $SHARED_KEY &&
+ [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.55) ]; then
+ skip "Need MDS >= 2.11.55"
+ fi
+
nodemap_version_check || return 0
nodemap_test_setup
run_test 21 "test nodemap mapped_trusted_noadmin fileops"
test_22() {
+ if $SHARED_KEY &&
+ [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.55) ]; then
+ skip "Need MDS >= 2.11.55"
+ fi
+
nodemap_version_check || return 0
nodemap_test_setup
# acl test directory needs to be initialized on a privileged client
nodemap_acl_test_setup() {
- local admin=$(do_facet mgs $LCTL get_param -n nodemap.c0.admin_nodemap)
+ local admin=$(do_facet mgs $LCTL get_param -n \
+ nodemap.c0.admin_nodemap)
local trust=$(do_facet mgs $LCTL get_param -n \
nodemap.c0.trusted_nodemap)
return 1
}
-test_23() {
+test_23a() {
+ [ $num_clients -lt 2 ] && skip "Need 2 clients at least" && return
nodemap_version_check || return 0
nodemap_test_setup
nodemap_test_cleanup
}
-run_test 23 "test mapped ACLs"
+run_test 23a "test mapped regular ACLs"
+
+test_23b() { #LU-9929
+ [ $num_clients -lt 2 ] && skip "Need 2 clients at least" && return
+ [ $(lustre_version_code mgs) -lt $(version_code 2.10.53) ] &&
+ skip "Need MGS >= 2.10.53" && return
+
+ export SK_UNIQUE_NM=true
+ nodemap_test_setup
+ trap nodemap_test_cleanup EXIT
+
+ local testdir=$DIR/$tdir
+ local fs_id=$((IDBASE+10))
+ local unmapped_id
+ local mapped_id
+ local fs_user
+
+ do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
+ wait_nm_sync c0 admin_nodemap
+ do_facet mgs $LCTL nodemap_modify --name c1 --property admin --value 1
+ wait_nm_sync c1 admin_nodemap
+ do_facet mgs $LCTL nodemap_modify --name c1 --property trusted --value 1
+ wait_nm_sync c1 trusted_nodemap
+
+ # Add idmap $ID0:$fs_id (500:60010)
+ do_facet mgs $LCTL nodemap_add_idmap --name c0 --idtype gid \
+ --idmap $ID0:$fs_id ||
+ error "add idmap $ID0:$fs_id to nodemap c0 failed"
+ wait_nm_sync c0 idmap
+
+ # set/getfacl default acl on client0 (unmapped gid=500)
+ rm -rf $testdir
+ mkdir -p $testdir
+ # Here, USER0=$(getent passwd | grep :$ID0:$ID0: | cut -d: -f1)
+ setfacl -R -d -m group:$USER0:rwx $testdir ||
+ error "setfacl $testdir on ${clients_arr[0]} failed"
+ unmapped_id=$(getfacl $testdir | grep -E "default:group:.*:rwx" |
+ awk -F: '{print $3}')
+ [ "$unmapped_id" = "$USER0" ] ||
+ error "gid=$ID0 was not unmapped correctly on ${clients_arr[0]}"
+
+ # getfacl default acl on client2 (mapped gid=60010)
+ mapped_id=$(do_node ${clients_arr[1]} getfacl $testdir |
+ grep -E "default:group:.*:rwx" | awk -F: '{print $3}')
+ fs_user=$(do_node ${clients_arr[1]} getent passwd |
+ grep :$fs_id:$fs_id: | cut -d: -f1)
+ [ -z "$fs_user" ] && fs_user=$fs_id
+ [ $mapped_id -eq $fs_id -o "$mapped_id" = "$fs_user" ] ||
+ error "Should return gid=$fs_id or $fs_user on client2"
+
+ rm -rf $testdir
+ nodemap_test_cleanup
+ export SK_UNIQUE_NM=false
+}
+run_test 23b "test mapped default ACLs"
test_24() {
nodemap_test_setup
trap nodemap_test_cleanup EXIT
- do_nodes $(comma_list $(all_server_nodes)) $LCTL get_param -R nodemap ||
- error "proc readable file read failed"
+ do_nodes $(comma_list $(all_server_nodes)) $LCTL get_param -R nodemap
nodemap_test_cleanup
}
test_25() {
local tmpfile=$(mktemp)
local tmpfile2=$(mktemp)
+ local tmpfile3=$(mktemp)
+ local tmpfile4=$(mktemp)
local subdir=c0dir
+ local client
nodemap_version_check || return 0
zconf_umount_clients $CLIENTS $MOUNT ||
error "unable to umount clients $CLIENTS"
+ export SK_UNIQUE_NM=true
nodemap_test_setup
+ # enable trusted/admin for setquota call in cleanup_and_setup_lustre()
+ i=0
+ for client in $clients; do
+ do_facet mgs $LCTL nodemap_modify --name c${i} \
+ --property admin --value 1
+ do_facet mgs $LCTL nodemap_modify --name c${i} \
+ --property trusted --value 1
+ ((i++))
+ done
+ wait_nm_sync c$((i - 1)) trusted_nodemap
+
trap nodemap_test_cleanup EXIT
# create a new, empty nodemap, and add fileset info to it
- do_facet mgs $LCTL nodemap_add test26 ||
- error "unable to create nodemap test26"
- do_facet mgs $LCTL set_param -P nodemap.test26.fileset=/$subdir ||
- error "unable to add fileset info to nodemap test26"
+ do_facet mgs $LCTL nodemap_add test25 ||
+ error "unable to create nodemap $testname"
+ do_facet mgs $LCTL set_param -P nodemap.$testname.fileset=/$subdir ||
+ error "unable to add fileset info to nodemap test25"
- wait_nm_sync test26 id
+ wait_nm_sync test25 id
do_facet mgs $LCTL nodemap_info > $tmpfile
do_facet mds $LCTL nodemap_info > $tmpfile2
- cleanup_and_setup_lustre
+ if ! $SHARED_KEY; then
+ # will conflict with SK's nodemaps
+ cleanup_and_setup_lustre
+ fi
# stop clients for this test
zconf_umount_clients $CLIENTS $MOUNT ||
error "unable to umount clients $CLIENTS"
- diff -q <(do_facet mgs $LCTL nodemap_info) $tmpfile >& /dev/null ||
+ do_facet mgs $LCTL nodemap_info > $tmpfile3
+ diff -q $tmpfile3 $tmpfile >& /dev/null ||
error "nodemap_info diff on MGS after remount"
- diff -q <(do_facet mds $LCTL nodemap_info) $tmpfile2 >& /dev/null ||
+ do_facet mds $LCTL nodemap_info > $tmpfile4
+ diff -q $tmpfile4 $tmpfile2 >& /dev/null ||
error "nodemap_info diff on MDS after remount"
# cleanup nodemap
- do_facet mgs $LCTL nodemap_del test26 ||
- error "cannot delete nodemap test26 from config"
+ do_facet mgs $LCTL nodemap_del test25 ||
+ error "cannot delete nodemap test25 from config"
nodemap_test_cleanup
# restart clients previously stopped
zconf_mount_clients $CLIENTS $MOUNT ||
error "unable to mount clients $CLIENTS"
rm -f $tmpfile $tmpfile2
+ export SK_UNIQUE_NM=false
}
run_test 25 "test save and reload nodemap config"
}
run_test 26 "test transferring very large nodemap"
-test_27() {
- local subdir=c0dir
- local subsubdir=c0subdir
-
- nodemap_test_setup
- trap nodemap_test_cleanup EXIT
+nodemap_exercise_fileset() {
+ local nm="$1"
+ local loop=0
- fileset_test_setup
+ # setup
+ if [ "$nm" == "default" ]; then
+ do_facet mgs $LCTL nodemap_activate 1
+ wait_nm_sync active
+ else
+ nodemap_test_setup
+ fi
+ if $SHARED_KEY; then
+ export SK_UNIQUE_NM=true
+ else
+ # will conflict with SK's nodemaps
+ trap "fileset_test_cleanup $nm" EXIT
+ fi
+ fileset_test_setup "$nm"
- # add fileset info to nodemap
- do_facet mgs $LCTL set_param nodemap.c0.fileset=/$subdir ||
- error "unable to set fileset info on nodemap c0"
- do_facet mgs $LCTL set_param -P nodemap.c0.fileset=/$subdir ||
- error "unable to add fileset info to nodemap c0"
- wait_nm_sync c0 fileset
+ # add fileset info to $nm nodemap
+ if ! combined_mgs_mds; then
+ do_facet mgs $LCTL set_param nodemap.${nm}.fileset=/$subdir ||
+ error "unable to add fileset info to $nm nodemap on MGS"
+ fi
+ do_facet mgs $LCTL set_param -P nodemap.${nm}.fileset=/$subdir ||
+ error "unable to add fileset info to $nm nodemap for servers"
+ wait_nm_sync $nm fileset "nodemap.${nm}.fileset=/$subdir"
# re-mount client
zconf_umount_clients ${clients_arr[0]} $MOUNT ||
error "unable to umount client ${clients_arr[0]}"
+ # set some generic fileset to trigger SSK code
+ export FILESET=/
zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
error "unable to remount client ${clients_arr[0]}"
+ unset FILESET
# test mount point content
do_node ${clients_arr[0]} test -f $MOUNT/this_is_$subdir ||
error "subdir of fileset not taken into account"
# remove fileset info from nodemap
- do_facet mgs $LCTL nodemap_set_fileset --name c0 --fileset \'\' ||
- error "unable to delete fileset info on nodemap c0"
- do_facet mgs $LCTL set_param -P nodemap.c0.fileset=\'\' ||
- error "unable to reset fileset info on nodemap c0"
- wait_nm_sync c0 fileset
+ do_facet mgs $LCTL nodemap_set_fileset --name $nm --fileset clear ||
+ error "unable to delete fileset info on $nm nodemap"
+ wait_update_facet mgs "$LCTL get_param nodemap.${nm}.fileset" \
+ "nodemap.${nm}.fileset=" ||
+ error "fileset info still not cleared on $nm nodemap"
+ do_facet mgs $LCTL set_param -P nodemap.${nm}.fileset=clear ||
+ error "unable to reset fileset info on $nm nodemap"
+ wait_nm_sync $nm fileset "nodemap.${nm}.fileset="
# re-mount client
zconf_umount_clients ${clients_arr[0]} $MOUNT ||
error "unable to remount client ${clients_arr[0]}"
# test mount point content
- do_node ${clients_arr[0]} test -d $MOUNT/$subdir ||
- error "fileset not cleared on nodemap c0"
+ if ! $(do_node ${clients_arr[0]} test -d $MOUNT/$subdir); then
+ ls $MOUNT
+ error "fileset not cleared on $nm nodemap"
+ fi
+
+ # back to non-nodemap setup
+ if $SHARED_KEY; then
+ export SK_UNIQUE_NM=false
+ zconf_umount_clients ${clients_arr[0]} $MOUNT ||
+ error "unable to umount client ${clients_arr[0]}"
+ fi
+ fileset_test_cleanup "$nm"
+ if [ "$nm" == "default" ]; then
+ do_facet mgs $LCTL nodemap_activate 0
+ wait_nm_sync active 0
+ trap 0
+ export SK_UNIQUE_NM=false
+ else
+ nodemap_test_cleanup
+ fi
+ if $SHARED_KEY; then
+ zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
+ error "unable to remount client ${clients_arr[0]}"
+ fi
+}
+
+test_27a() {
+ [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.50) ] &&
+ skip "Need MDS >= 2.11.50" && return
+
+ for nm in "default" "c0"; do
+ local subdir="subdir_${nm}"
+ local subsubdir="subsubdir_${nm}"
+
+ if [ "$nm" == "default" ] && [ "$SHARED_KEY" == "true" ]; then
+ echo "Skipping nodemap $nm with SHARED_KEY";
+ continue;
+ fi
+
+ echo "Exercising fileset for nodemap $nm"
+ nodemap_exercise_fileset "$nm"
+ done
+}
+run_test 27a "test fileset in various nodemaps"
+
+test_27b() { #LU-10703
+ [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.50) ] &&
+ skip "Need MDS >= 2.11.50" && return
+ [[ $MDSCOUNT -lt 2 ]] && skip "needs >= 2 MDTs" && return
+
+ nodemap_test_setup
+ trap nodemap_test_cleanup EXIT
+
+ # Add the nodemaps and set their filesets
+ for i in $(seq 1 $MDSCOUNT); do
+ do_facet mgs $LCTL nodemap_del nm$i 2>/dev/null
+ do_facet mgs $LCTL nodemap_add nm$i ||
+ error "add nodemap nm$i failed"
+ wait_nm_sync nm$i "" "" "-N"
+
+ if ! combined_mgs_mds; then
+ do_facet mgs \
+ $LCTL set_param nodemap.nm$i.fileset=/dir$i ||
+ error "set nm$i.fileset=/dir$i failed on MGS"
+ fi
+ do_facet mgs $LCTL set_param -P nodemap.nm$i.fileset=/dir$i ||
+ error "set nm$i.fileset=/dir$i failed on servers"
+ wait_nm_sync nm$i fileset "nodemap.nm$i.fileset=/dir$i"
+ done
+
+ # Check if all the filesets are correct
+ for i in $(seq 1 $MDSCOUNT); do
+ fileset=$(do_facet mds$i \
+ $LCTL get_param -n nodemap.nm$i.fileset)
+ [ "$fileset" = "/dir$i" ] ||
+ error "nm$i.fileset $fileset != /dir$i on mds$i"
+ do_facet mgs $LCTL nodemap_del nm$i ||
+ error "delete nodemap nm$i failed"
+ done
- fileset_test_cleanup
nodemap_test_cleanup
}
-run_test 27 "test fileset in nodemap"
+run_test 27b "The new nodemap won't clear the old nodemap's fileset"
+
+test_28() {
+ if ! $SHARED_KEY; then
+ skip "need shared key feature for this test" && return
+ fi
+ mkdir -p $DIR/$tdir || error "mkdir failed"
+ touch $DIR/$tdir/$tdir.out || error "touch failed"
+ if [ ! -f $DIR/$tdir/$tdir.out ]; then
+ error "read before rotation failed"
+ fi
+ # store top key identity to ensure rotation has occurred
+ SK_IDENTITY_OLD=$(lctl get_param *.*.*srpc* | grep "expire" |
+ head -1 | awk '{print $15}' | cut -c1-8)
+ do_facet $SINGLEMDS lfs flushctx ||
+ error "could not run flushctx on $SINGLEMDS"
+ sleep 5
+ lfs flushctx || error "could not run flushctx on client"
+ sleep 5
+ # verify new key is in place
+ SK_IDENTITY_NEW=$(lctl get_param *.*.*srpc* | grep "expire" |
+ head -1 | awk '{print $15}' | cut -c1-8)
+ if [ $SK_IDENTITY_OLD == $SK_IDENTITY_NEW ]; then
+ error "key did not rotate correctly"
+ fi
+ if [ ! -f $DIR/$tdir/$tdir.out ]; then
+ error "read after rotation failed"
+ fi
+}
+run_test 28 "check shared key rotation method"
+
+test_29() {
+ if ! $SHARED_KEY; then
+ skip "need shared key feature for this test" && return
+ fi
+ if [ $SK_FLAVOR != "ski" ] && [ $SK_FLAVOR != "skpi" ]; then
+ skip "test only valid if integrity is active"
+ fi
+ rm -r $DIR/$tdir
+ mkdir $DIR/$tdir || error "mkdir"
+ touch $DIR/$tdir/$tfile || error "touch"
+ zconf_umount_clients ${clients_arr[0]} $MOUNT ||
+ error "unable to umount clients"
+ keyctl show | awk '/lustre/ { print $1 }' |
+ xargs -IX keyctl unlink X
+ OLD_SK_PATH=$SK_PATH
+ export SK_PATH=/dev/null
+ if zconf_mount_clients ${clients_arr[0]} $MOUNT; then
+ export SK_PATH=$OLD_SK_PATH
+ if [ -e $DIR/$tdir/$tfile ]; then
+ error "able to mount and read without key"
+ else
+ error "able to mount without key"
+ fi
+ else
+ export SK_PATH=$OLD_SK_PATH
+ keyctl show | awk '/lustre/ { print $1 }' |
+ xargs -IX keyctl unlink X
+ fi
+}
+run_test 29 "check for missing shared key"
+
+test_30() {
+ if ! $SHARED_KEY; then
+ skip "need shared key feature for this test" && return
+ fi
+ if [ $SK_FLAVOR != "ski" ] && [ $SK_FLAVOR != "skpi" ]; then
+ skip "test only valid if integrity is active"
+ fi
+ mkdir -p $DIR/$tdir || error "mkdir failed"
+ touch $DIR/$tdir/$tdir.out || error "touch failed"
+ zconf_umount_clients ${clients_arr[0]} $MOUNT ||
+ error "unable to umount clients"
+ # unload keys from ring
+ keyctl show | awk '/lustre/ { print $1 }' |
+ xargs -IX keyctl unlink X
+ # invalidate the key with bogus filesystem name
+ lgss_sk -w $SK_PATH/$FSNAME-bogus.key -f $FSNAME.bogus \
+ -t client -d /dev/urandom || error "lgss_sk failed (1)"
+ do_facet $SINGLEMDS lfs flushctx || error "could not run flushctx"
+ OLD_SK_PATH=$SK_PATH
+ export SK_PATH=$SK_PATH/$FSNAME-bogus.key
+ if zconf_mount_clients ${clients_arr[0]} $MOUNT; then
+ SK_PATH=$OLD_SK_PATH
+ if [ -a $DIR/$tdir/$tdir.out ]; then
+ error "mount and read file with invalid key"
+ else
+ error "mount with invalid key"
+ fi
+ fi
+ SK_PATH=$OLD_SK_PATH
+ zconf_umount_clients ${clients_arr[0]} $MOUNT ||
+ error "unable to umount clients"
+}
+run_test 30 "check for invalid shared key"
+
+cleanup_31() {
+ # unmount client
+ zconf_umount $HOSTNAME $MOUNT || error "unable to umount client"
+
+ # remove ${NETTYPE}999 network on all nodes
+ do_nodes $(comma_list $(all_nodes)) \
+ "$LNETCTL net del --net ${NETTYPE}999 && \
+ $LNETCTL lnet unconfigure 2>/dev/null || true"
+
+ # necessary to do writeconf in order to de-register
+ # @${NETTYPE}999 nid for targets
+ KZPOOL=$KEEP_ZPOOL
+ export KEEP_ZPOOL="true"
+ stopall
+ export SK_MOUNTED=false
+ writeconf_all
+ setupall || echo 1
+ export KEEP_ZPOOL="$KZPOOL"
+}
+
+test_31() {
+ local nid=$(lctl list_nids | grep ${NETTYPE} | head -n1)
+ local addr=${nid%@*}
+ local net=${nid#*@}
+
+ export LNETCTL=$(which lnetctl 2> /dev/null)
+
+ [ -z "$LNETCTL" ] && skip "without lnetctl support." && return
+ local_mode && skip "in local mode."
+
+ stack_trap cleanup_31 EXIT
+
+ # umount client
+ if [ "$MOUNT_2" ] && $(grep -q $MOUNT2' ' /proc/mounts); then
+ umount_client $MOUNT2 || error "umount $MOUNT2 failed"
+ fi
+ if $(grep -q $MOUNT' ' /proc/mounts); then
+ umount_client $MOUNT || error "umount $MOUNT failed"
+ fi
+
+ # check exports on servers are empty for client
+ do_facet mgs "lctl get_param -n *.MGS*.exports.'$nid'.uuid 2>/dev/null |
+ grep -q -" && error "export on MGS should be empty"
+ do_nodes $(comma_list $(mdts_nodes) $(osts_nodes)) \
+ "lctl get_param -n *.${FSNAME}*.exports.'$nid'.uuid \
+ 2>/dev/null | grep -q -" &&
+ error "export on servers should be empty"
+
+ # add network ${NETTYPE}999 on all nodes
+ do_nodes $(comma_list $(all_nodes)) \
+ "$LNETCTL lnet configure && $LNETCTL net add --if \
+ $($LNETCTL net show --net $net | awk 'BEGIN{inf=0} \
+ {if (inf==1) print $2; fi; inf=0} /interfaces/{inf=1}') \
+ --net ${NETTYPE}999" ||
+ error "unable to configure NID ${NETTYPE}999"
+
+ # necessary to do writeconf in order to register
+ # new @${NETTYPE}999 nid for targets
+ KZPOOL=$KEEP_ZPOOL
+ export KEEP_ZPOOL="true"
+ stopall
+ export SK_MOUNTED=false
+ writeconf_all
+ setupall server_only || echo 1
+ export KEEP_ZPOOL="$KZPOOL"
+
+ # backup MGSNID
+ local mgsnid_orig=$MGSNID
+ # compute new MGSNID
+ MGSNID=$(do_facet mgs "$LCTL list_nids | grep ${NETTYPE}999")
+
+ # on client, turn LNet Dynamic Discovery on
+ lnetctl set discovery 1
+
+ # mount client with -o network=${NETTYPE}999 option:
+ # should fail because of LNet Dynamic Discovery
+ mount_client $MOUNT ${MOUNT_OPTS},network=${NETTYPE}999 &&
+ error "client mount with '-o network' option should be refused"
+
+ # on client, reconfigure LNet and turn LNet Dynamic Discovery off
+ $LNETCTL net del --net ${NETTYPE}999 && lnetctl lnet unconfigure
+ lustre_rmmod
+ modprobe lnet
+ lnetctl set discovery 0
+ modprobe ptlrpc
+ $LNETCTL lnet configure && $LNETCTL net add --if \
+ $($LNETCTL net show --net $net | awk 'BEGIN{inf=0} \
+ {if (inf==1) print $2; fi; inf=0} /interfaces/{inf=1}') \
+ --net ${NETTYPE}999 ||
+ error "unable to configure NID ${NETTYPE}999 on client"
+
+ # mount client with -o network=${NETTYPE}999 option
+ mount_client $MOUNT ${MOUNT_OPTS},network=${NETTYPE}999 ||
+ error "unable to remount client"
+
+ # restore MGSNID
+ MGSNID=$mgsnid_orig
+
+ # check export on MGS
+ do_facet mgs "lctl get_param -n *.MGS*.exports.'$nid'.uuid 2>/dev/null |
+ grep -q -"
+ [ $? -ne 0 ] || error "export for $nid on MGS should not exist"
+
+ do_facet mgs \
+ "lctl get_param -n *.MGS*.exports.'${addr}@${NETTYPE}999'.uuid \
+ 2>/dev/null | grep -q -"
+ [ $? -eq 0 ] ||
+ error "export for ${addr}@${NETTYPE}999 on MGS should exist"
+
+ # check {mdc,osc} imports
+ lctl get_param mdc.${FSNAME}-*.import | grep current_connection |
+ grep -q ${NETTYPE}999
+ [ $? -eq 0 ] ||
+ error "import for mdc should use ${addr}@${NETTYPE}999"
+ lctl get_param osc.${FSNAME}-*.import | grep current_connection |
+ grep -q ${NETTYPE}999
+ [ $? -eq 0 ] ||
+ error "import for osc should use ${addr}@${NETTYPE}999"
+}
+run_test 31 "client mount option '-o network'"
+
+cleanup_32() {
+ # umount client
+ zconf_umount_clients ${clients_arr[0]} $MOUNT
+
+ # disable sk flavor enforcement on MGS
+ set_rule _mgs any any null
+
+ # stop gss daemon on MGS
+ if ! combined_mgs_mds ; then
+ send_sigint $mgs_HOST lsvcgssd
+ fi
+
+ # re-mount client
+ MOUNT_OPTS=$(add_sk_mntflag $MOUNT_OPTS)
+ mountcli
+
+ restore_to_default_flavor
+}
+
+test_32() {
+ if ! $SHARED_KEY; then
+ skip "need shared key feature for this test"
+ fi
+
+ stack_trap cleanup_32 EXIT
+
+ # restore to default null flavor
+ save_flvr=$SK_FLAVOR
+ SK_FLAVOR=null
+ restore_to_default_flavor || error "cannot set null flavor"
+ SK_FLAVOR=$save_flvr
+
+ # umount client
+ if [ "$MOUNT_2" ] && $(grep -q $MOUNT2' ' /proc/mounts); then
+ umount_client $MOUNT2 || error "umount $MOUNT2 failed"
+ fi
+ if $(grep -q $MOUNT' ' /proc/mounts); then
+ umount_client $MOUNT || error "umount $MOUNT failed"
+ fi
+
+ # start gss daemon on MGS
+ if combined_mgs_mds ; then
+ send_sigint $mds_HOST lsvcgssd
+ fi
+ start_gss_daemons $mgs_HOST "$LSVCGSSD -vvv -s -g"
+
+ # add mgs key type and MGS NIDs in key on MGS
+ do_nodes $mgs_HOST "lgss_sk -t mgs,server -g $MGSNID -m \
+ $SK_PATH/$FSNAME.key >/dev/null 2>&1" ||
+ error "could not modify keyfile on MGS"
+
+ # load modified key file on MGS
+ do_nodes $mgs_HOST "lgss_sk -l $SK_PATH/$FSNAME.key >/dev/null 2>&1" ||
+ error "could not load keyfile on MGS"
+
+ # add MGS NIDs in key on client
+ do_nodes ${clients_arr[0]} "lgss_sk -g $MGSNID -m \
+ $SK_PATH/$FSNAME.key >/dev/null 2>&1" ||
+ error "could not modify keyfile on MGS"
+
+ # set perms for per-nodemap keys else permission denied
+ do_nodes $(comma_list $(all_nodes)) \
+ "keyctl show | grep lustre | cut -c1-11 |
+ sed -e 's/ //g;' |
+ xargs -IX keyctl setperm X 0x3f3f3f3f"
+
+ # re-mount client with mgssec=skn
+ save_opts=$MOUNT_OPTS
+ if [ -z "$MOUNT_OPTS" ]; then
+ MOUNT_OPTS="-o mgssec=skn"
+ else
+ MOUNT_OPTS="$MOUNT_OPTS,mgssec=skn"
+ fi
+ zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
+ error "mount ${clients_arr[0]} with mgssec=skn failed"
+ MOUNT_OPTS=$save_opts
+
+ # umount client
+ zconf_umount_clients ${clients_arr[0]} $MOUNT ||
+ error "umount ${clients_arr[0]} failed"
+
+ # enforce ska flavor on MGS
+ set_rule _mgs any any ska
+
+ # re-mount client without mgssec
+ zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS &&
+ error "mount ${clients_arr[0]} without mgssec should fail"
+
+ # re-mount client with mgssec=skn
+ save_opts=$MOUNT_OPTS
+ if [ -z "$MOUNT_OPTS" ]; then
+ MOUNT_OPTS="-o mgssec=skn"
+ else
+ MOUNT_OPTS="$MOUNT_OPTS,mgssec=skn"
+ fi
+ zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS &&
+ error "mount ${clients_arr[0]} with mgssec=skn should fail"
+ MOUNT_OPTS=$save_opts
+
+ # re-mount client with mgssec=ska
+ save_opts=$MOUNT_OPTS
+ if [ -z "$MOUNT_OPTS" ]; then
+ MOUNT_OPTS="-o mgssec=ska"
+ else
+ MOUNT_OPTS="$MOUNT_OPTS,mgssec=ska"
+ fi
+ zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
+ error "mount ${clients_arr[0]} with mgssec=ska failed"
+ MOUNT_OPTS=$save_opts
+
+ exit 0
+}
+run_test 32 "check for mgssec"
+
+cleanup_33() {
+ # disable sk flavor enforcement
+ set_rule $FSNAME any cli2mdt null
+ wait_flavor cli2mdt null
+
+ # umount client
+ zconf_umount_clients ${clients_arr[0]} $MOUNT
+
+ # stop gss daemon on MGS
+ if ! combined_mgs_mds ; then
+ send_sigint $mgs_HOST lsvcgssd
+ fi
+
+ # re-mount client
+ MOUNT_OPTS=$(add_sk_mntflag $MOUNT_OPTS)
+ mountcli
+
+ restore_to_default_flavor
+}
+
+test_33() {
+ if ! $SHARED_KEY; then
+ skip "need shared key feature for this test"
+ fi
+
+ stack_trap cleanup_33 EXIT
+
+ # restore to default null flavor
+ save_flvr=$SK_FLAVOR
+ SK_FLAVOR=null
+ restore_to_default_flavor || error "cannot set null flavor"
+ SK_FLAVOR=$save_flvr
+
+ # umount client
+ if [ "$MOUNT_2" ] && $(grep -q $MOUNT2' ' /proc/mounts); then
+ umount_client $MOUNT2 || error "umount $MOUNT2 failed"
+ fi
+ if $(grep -q $MOUNT' ' /proc/mounts); then
+ umount_client $MOUNT || error "umount $MOUNT failed"
+ fi
+
+ # start gss daemon on MGS
+ if combined_mgs_mds ; then
+ send_sigint $mds_HOST lsvcgssd
+ fi
+ start_gss_daemons $mgs_HOST "$LSVCGSSD -vvv -s -g"
+
+ # add mgs key type and MGS NIDs in key on MGS
+ do_nodes $mgs_HOST "lgss_sk -t mgs,server -g $MGSNID -m \
+ $SK_PATH/$FSNAME.key >/dev/null 2>&1" ||
+ error "could not modify keyfile on MGS"
+
+ # load modified key file on MGS
+ do_nodes $mgs_HOST "lgss_sk -l $SK_PATH/$FSNAME.key >/dev/null 2>&1" ||
+ error "could not load keyfile on MGS"
+
+ # add MGS NIDs in key on client
+ do_nodes ${clients_arr[0]} "lgss_sk -g $MGSNID -m \
+ $SK_PATH/$FSNAME.key >/dev/null 2>&1" ||
+ error "could not modify keyfile on MGS"
+
+ # set perms for per-nodemap keys else permission denied
+ do_nodes $(comma_list $(all_nodes)) \
+ "keyctl show | grep lustre | cut -c1-11 |
+ sed -e 's/ //g;' |
+ xargs -IX keyctl setperm X 0x3f3f3f3f"
+
+ # re-mount client with mgssec=skn
+ save_opts=$MOUNT_OPTS
+ if [ -z "$MOUNT_OPTS" ]; then
+ MOUNT_OPTS="-o mgssec=skn"
+ else
+ MOUNT_OPTS="$MOUNT_OPTS,mgssec=skn"
+ fi
+ zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
+ error "mount ${clients_arr[0]} with mgssec=skn failed"
+ MOUNT_OPTS=$save_opts
+
+ # enforce ska flavor for cli2mdt
+ set_rule $FSNAME any cli2mdt ska
+ wait_flavor cli2mdt ska
+
+ # check error message
+ $LCTL dk | grep "faked source" &&
+ error "MGS connection srpc flags incorrect"
+
+ exit 0
+}
+run_test 33 "correct srpc flags for MGS connection"
log "cleanup: ======================================================"
}
sec_unsetup
-sec_cleanup
-
complete $SECONDS
+check_and_cleanup_lustre
exit_status