set -e
ONLY=${ONLY:-"$*"}
-# bug number for skipped test: LU-10229
-ALWAYS_EXCEPT=" 27 $SANITY_SEC_EXCEPT"
-if $SHARED_KEY; then
-# bug number for skipped test: 9145 9145 9671 9145 9145 9145 9145 9245
- ALWAYS_EXCEPT=" 17 18 19 20 21 22 23 27 $ALWAYS_EXCEPT"
-fi
+# bug number for skipped test:
+ALWAYS_EXCEPT=" $SANITY_SEC_EXCEPT"
# UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
SRCDIR=$(dirname $0)
check_and_setup_lustre
-sec_cleanup() {
- if [ "$I_MOUNTED" = "yes" ]; then
- cleanupall -f || error "sec_cleanup"
- fi
-}
-
-DIR=${DIR:-$MOUNT}
-[ -z "$(echo $DIR | grep $MOUNT)" ] &&
- error "$DIR not in $MOUNT" && sec_cleanup && exit 1
-
-[ $(echo $MOUNT | wc -w) -gt 1 ] &&
- echo "NAME=$MOUNT mounted more than once" && sec_cleanup && exit 0
+assert_DIR
# for GSS_SUP
GSS_REF=$(lsmod | grep ^ptlrpc_gss | awk '{print $3}')
}
run_test 10b "delete range from the correct nodemap"
+test_10c() { #LU-8912
+ [ $(lustre_version_code mgs) -lt $(version_code 2.10.57) ] &&
+ skip "Need MGS >= 2.10.57" && return
+
+ local nm="nodemap_lu8912"
+ local nid_range="10.210.[32-47].[0-255]@o2ib3"
+ local start_nid="10.210.32.0@o2ib3"
+ local end_nid="10.210.47.255@o2ib3"
+ local start_nid_found
+ local end_nid_found
+
+ do_facet mgs $LCTL nodemap_del $nm 2>/dev/null
+ do_facet mgs $LCTL nodemap_add $nm || error "Add $nm failed"
+ do_facet mgs $LCTL nodemap_add_range --name $nm --range $nid_range ||
+ error "Add range $nid_range to $nm failed"
+
+ start_nid_found=$(do_facet mgs $LCTL get_param nodemap.$nm.* |
+ awk -F '[,: ]' /start_nid/'{ print $9 }')
+ [ "$start_nid" == "$start_nid_found" ] ||
+ error "start_nid: $start_nid_found != $start_nid"
+ end_nid_found=$(do_facet mgs $LCTL get_param nodemap.$nm.* |
+ awk -F '[,: ]' /end_nid/'{ print $13 }')
+ [ "$end_nid" == "$end_nid_found" ] ||
+ error "end_nid: $end_nid_found != $end_nid"
+
+ do_facet mgs $LCTL nodemap_del $nm || error "Delete $nm failed"
+ return 0
+}
+run_test 10c "verfify contiguous range support"
+
+test_10d() { #LU-8913
+ [ $(lustre_version_code mgs) -lt $(version_code 2.10.59) ] &&
+ skip "Need MGS >= 2.10.59" && return
+
+ local nm="nodemap_lu8913"
+ local nid_range="*@o2ib3"
+ local start_nid="0.0.0.0@o2ib3"
+ local end_nid="255.255.255.255@o2ib3"
+ local start_nid_found
+ local end_nid_found
+
+ do_facet mgs $LCTL nodemap_del $nm 2>/dev/null
+ do_facet mgs $LCTL nodemap_add $nm || error "Add $nm failed"
+ do_facet mgs $LCTL nodemap_add_range --name $nm --range $nid_range ||
+ error "Add range $nid_range to $nm failed"
+
+ start_nid_found=$(do_facet mgs $LCTL get_param nodemap.$nm.* |
+ awk -F '[,: ]' /start_nid/'{ print $9 }')
+ [ "$start_nid" == "$start_nid_found" ] ||
+ error "start_nid: $start_nid_found != $start_nid"
+ end_nid_found=$(do_facet mgs $LCTL get_param nodemap.$nm.* |
+ awk -F '[,: ]' /end_nid/'{ print $13 }')
+ [ "$end_nid" == "$end_nid_found" ] ||
+ error "end_nid: $end_nid_found != $end_nid"
+
+ do_facet mgs $LCTL nodemap_del $nm || error "Delete $nm failed"
+ return 0
+}
+run_test 10d "verfify nodemap range format '*@<net>' support"
+
test_11() {
local rc
local nodemap_name=$1
local key=$2
local value=$3
- local proc_param="${nodemap_name}.${key}"
- [ "$nodemap_name" == "active" ] && proc_param="active"
-
+ local opt=$4
+ local proc_param
local is_active=$(do_facet mgs $LCTL get_param -n nodemap.active)
- (( is_active == 0 )) && [ "$proc_param" != "active" ] && return
-
local max_retries=20
local is_sync
local out1=""
local mgs_ip=$(host_nids_address $mgs_HOST $NETTYPE | cut -d' ' -f1)
local i
+ if [ "$nodemap_name" == "active" ]; then
+ proc_param="active"
+ elif [ -z "$key" ]; then
+ proc_param=${nodemap_name}
+ else
+ proc_param="${nodemap_name}.${key}"
+ fi
+ (( is_active == 0 )) && [ "$proc_param" != "active" ] && return
+
if [ -z "$value" ]; then
- out1=$(do_facet mgs $LCTL get_param nodemap.${proc_param})
+ out1=$(do_facet mgs $LCTL get_param $opt nodemap.${proc_param})
echo "On MGS ${mgs_ip}, ${proc_param} = $out1"
else
out1=$value;
[ $node_ip == $mgs_ip ] && continue
fi
- out2=$(do_node $node_ip $LCTL get_param \
+ out2=$(do_node $node_ip $LCTL get_param $opt \
nodemap.$proc_param 2>/dev/null)
echo "On $node ${node_ip}, ${proc_param} = $out2"
[ "$out1" != "$out2" ] && is_sync=false && break
# fileset test directory needs to be initialized on a privileged client
fileset_test_setup() {
local nm=$1
+
+ if [ -n "$FILESET" -a -z "$SKIP_FILESET" ]; then
+ cleanup_mount $MOUNT
+ FILESET="" zconf_mount_clients $CLIENTS $MOUNT
+ fi
+
local admin=$(do_facet mgs $LCTL get_param -n \
nodemap.${nm}.admin_nodemap)
local trust=$(do_facet mgs $LCTL get_param -n \
wait_nm_sync $nm admin_nodemap
wait_nm_sync $nm trusted_nodemap
+ if [ -n "$FILESET" -a -z "$SKIP_FILESET" ]; then
+ cleanup_mount $MOUNT
+ zconf_mount_clients $CLIENTS $MOUNT
+ fi
}
do_create_delete() {
do_facet mgs $LCTL nodemap_modify --name default \
--property admin --value 1
+ wait_nm_sync default admin_nodemap
do_facet mgs $LCTL nodemap_modify --name default \
--property trusted --value 1
wait_nm_sync default trusted_nodemap
do_facet mgs $LCTL nodemap_modify --name default \
--property admin --value 0
+ wait_nm_sync default admin_nodemap
do_facet mgs $LCTL nodemap_modify --name default \
--property trusted --value 0
wait_nm_sync default trusted_nodemap
run_test 16 "test nodemap all_off fileops"
test_17() {
+ if $SHARED_KEY &&
+ [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.55) ]; then
+ skip "Need MDS >= 2.11.55"
+ fi
+
nodemap_version_check || return 0
nodemap_test_setup
run_test 17 "test nodemap trusted_noadmin fileops"
test_18() {
+ if $SHARED_KEY &&
+ [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.55) ]; then
+ skip "Need MDS >= 2.11.55"
+ fi
+
nodemap_version_check || return 0
nodemap_test_setup
run_test 18 "test nodemap mapped_noadmin fileops"
test_19() {
+ if $SHARED_KEY &&
+ [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.55) ]; then
+ skip "Need MDS >= 2.11.55"
+ fi
+
nodemap_version_check || return 0
nodemap_test_setup
run_test 19 "test nodemap trusted_admin fileops"
test_20() {
+ if $SHARED_KEY &&
+ [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.55) ]; then
+ skip "Need MDS >= 2.11.55"
+ fi
+
nodemap_version_check || return 0
nodemap_test_setup
run_test 20 "test nodemap mapped_admin fileops"
test_21() {
+ if $SHARED_KEY &&
+ [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.55) ]; then
+ skip "Need MDS >= 2.11.55"
+ fi
+
nodemap_version_check || return 0
nodemap_test_setup
run_test 21 "test nodemap mapped_trusted_noadmin fileops"
test_22() {
+ if $SHARED_KEY &&
+ [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.55) ]; then
+ skip "Need MDS >= 2.11.55"
+ fi
+
nodemap_version_check || return 0
nodemap_test_setup
}
test_23a() {
+ [ $num_clients -lt 2 ] && skip "Need 2 clients at least" && return
nodemap_version_check || return 0
nodemap_test_setup
run_test 23a "test mapped regular ACLs"
test_23b() { #LU-9929
- remote_mgs_nodsh && skip "remote MGS with nodsh" && return
+ [ $num_clients -lt 2 ] && skip "Need 2 clients at least" && return
[ $(lustre_version_code mgs) -lt $(version_code 2.10.53) ] &&
skip "Need MGS >= 2.10.53" && return
+ export SK_UNIQUE_NM=true
nodemap_test_setup
trap nodemap_test_cleanup EXIT
do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
wait_nm_sync c0 admin_nodemap
+ do_facet mgs $LCTL nodemap_modify --name c1 --property admin --value 1
+ wait_nm_sync c1 admin_nodemap
+ do_facet mgs $LCTL nodemap_modify --name c1 --property trusted --value 1
+ wait_nm_sync c1 trusted_nodemap
# Add idmap $ID0:$fs_id (500:60010)
do_facet mgs $LCTL nodemap_add_idmap --name c0 --idtype gid \
--idmap $ID0:$fs_id ||
error "add idmap $ID0:$fs_id to nodemap c0 failed"
+ wait_nm_sync c0 idmap
# set/getfacl default acl on client0 (unmapped gid=500)
rm -rf $testdir
[ "$unmapped_id" = "$USER0" ] ||
error "gid=$ID0 was not unmapped correctly on ${clients_arr[0]}"
- # getfacl default acl on MGS (mapped gid=60010)
- zconf_mount $mgs_HOST $MOUNT
- do_rpc_nodes $mgs_HOST is_mounted $MOUNT ||
- error "mount lustre on MGS failed"
- mapped_id=$(do_node $mgs_HOST getfacl $testdir |
+ # getfacl default acl on client2 (mapped gid=60010)
+ mapped_id=$(do_node ${clients_arr[1]} getfacl $testdir |
grep -E "default:group:.*:rwx" | awk -F: '{print $3}')
- fs_user=$(do_facet mgs getent passwd |
+ fs_user=$(do_node ${clients_arr[1]} getent passwd |
grep :$fs_id:$fs_id: | cut -d: -f1)
+ [ -z "$fs_user" ] && fs_user=$fs_id
[ $mapped_id -eq $fs_id -o "$mapped_id" = "$fs_user" ] ||
- error "Should return gid=$fs_id or $fs_user on MGS"
+ error "Should return gid=$fs_id or $fs_user on client2"
rm -rf $testdir
- do_facet mgs umount $MOUNT
nodemap_test_cleanup
+ export SK_UNIQUE_NM=false
}
run_test 23b "test mapped default ACLs"
nodemap_test_setup
trap nodemap_test_cleanup EXIT
- do_nodes $(comma_list $(all_server_nodes)) $LCTL get_param -R nodemap ||
- error "proc readable file read failed"
+ do_nodes $(comma_list $(all_server_nodes)) $LCTL get_param -R nodemap
nodemap_test_cleanup
}
nodemap_exercise_fileset() {
local nm="$1"
- local fileset_on_mgs=""
local loop=0
# setup
fileset_test_setup "$nm"
# add fileset info to $nm nodemap
- do_facet mgs $LCTL set_param nodemap.${nm}.fileset=/$subdir ||
+ if ! combined_mgs_mds; then
+ do_facet mgs $LCTL set_param nodemap.${nm}.fileset=/$subdir ||
error "unable to add fileset info to $nm nodemap on MGS"
+ fi
do_facet mgs $LCTL set_param -P nodemap.${nm}.fileset=/$subdir ||
error "unable to add fileset info to $nm nodemap for servers"
wait_nm_sync $nm fileset "nodemap.${nm}.fileset=/$subdir"
error "subdir of fileset not taken into account"
# remove fileset info from nodemap
- do_facet mgs $LCTL nodemap_set_fileset --name $nm --fileset \'\' ||
+ do_facet mgs $LCTL nodemap_set_fileset --name $nm --fileset clear ||
error "unable to delete fileset info on $nm nodemap"
wait_update_facet mgs "$LCTL get_param nodemap.${nm}.fileset" \
"nodemap.${nm}.fileset=" ||
error "fileset info still not cleared on $nm nodemap"
- do_facet mgs $LCTL set_param -P nodemap.${nm}.fileset=\'\' ||
+ do_facet mgs $LCTL set_param -P nodemap.${nm}.fileset=clear ||
error "unable to reset fileset info on $nm nodemap"
wait_nm_sync $nm fileset "nodemap.${nm}.fileset="
fi
}
-test_27() {
+test_27a() {
+ [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.50) ] &&
+ skip "Need MDS >= 2.11.50" && return
+
for nm in "default" "c0"; do
local subdir="subdir_${nm}"
local subsubdir="subsubdir_${nm}"
+ if [ "$nm" == "default" ] && [ "$SHARED_KEY" == "true" ]; then
+ echo "Skipping nodemap $nm with SHARED_KEY";
+ continue;
+ fi
+
echo "Exercising fileset for nodemap $nm"
nodemap_exercise_fileset "$nm"
done
}
-run_test 27 "test fileset in various nodemaps"
+run_test 27a "test fileset in various nodemaps"
+
+test_27b() { #LU-10703
+ [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.50) ] &&
+ skip "Need MDS >= 2.11.50" && return
+ [[ $MDSCOUNT -lt 2 ]] && skip "needs >= 2 MDTs" && return
+
+ nodemap_test_setup
+ trap nodemap_test_cleanup EXIT
+
+ # Add the nodemaps and set their filesets
+ for i in $(seq 1 $MDSCOUNT); do
+ do_facet mgs $LCTL nodemap_del nm$i 2>/dev/null
+ do_facet mgs $LCTL nodemap_add nm$i ||
+ error "add nodemap nm$i failed"
+ wait_nm_sync nm$i "" "" "-N"
+
+ if ! combined_mgs_mds; then
+ do_facet mgs \
+ $LCTL set_param nodemap.nm$i.fileset=/dir$i ||
+ error "set nm$i.fileset=/dir$i failed on MGS"
+ fi
+ do_facet mgs $LCTL set_param -P nodemap.nm$i.fileset=/dir$i ||
+ error "set nm$i.fileset=/dir$i failed on servers"
+ wait_nm_sync nm$i fileset "nodemap.nm$i.fileset=/dir$i"
+ done
+
+ # Check if all the filesets are correct
+ for i in $(seq 1 $MDSCOUNT); do
+ fileset=$(do_facet mds$i \
+ $LCTL get_param -n nodemap.nm$i.fileset)
+ [ "$fileset" = "/dir$i" ] ||
+ error "nm$i.fileset $fileset != /dir$i on mds$i"
+ do_facet mgs $LCTL nodemap_del nm$i ||
+ error "delete nodemap nm$i failed"
+ done
+
+ nodemap_test_cleanup
+}
+run_test 27b "The new nodemap won't clear the old nodemap's fileset"
test_28() {
if ! $SHARED_KEY; then
}
run_test 30 "check for invalid shared key"
+cleanup_31() {
+ # unmount client
+ zconf_umount $HOSTNAME $MOUNT || error "unable to umount client"
+
+ # remove ${NETTYPE}999 network on all nodes
+ do_nodes $(comma_list $(all_nodes)) \
+ "$LNETCTL net del --net ${NETTYPE}999 && \
+ $LNETCTL lnet unconfigure 2>/dev/null || true"
+
+ # necessary to do writeconf in order to de-register
+ # @${NETTYPE}999 nid for targets
+ KZPOOL=$KEEP_ZPOOL
+ export KEEP_ZPOOL="true"
+ stopall
+ export SK_MOUNTED=false
+ writeconf_all
+ setupall || echo 1
+ export KEEP_ZPOOL="$KZPOOL"
+}
+
+test_31() {
+ local nid=$(lctl list_nids | grep ${NETTYPE} | head -n1)
+ local addr=${nid%@*}
+ local net=${nid#*@}
+
+ export LNETCTL=$(which lnetctl 2> /dev/null)
+
+ [ -z "$LNETCTL" ] && skip "without lnetctl support." && return
+ local_mode && skip "in local mode."
+
+ stack_trap cleanup_31 EXIT
+
+ # umount client
+ if [ "$MOUNT_2" ] && $(grep -q $MOUNT2' ' /proc/mounts); then
+ umount_client $MOUNT2 || error "umount $MOUNT2 failed"
+ fi
+ if $(grep -q $MOUNT' ' /proc/mounts); then
+ umount_client $MOUNT || error "umount $MOUNT failed"
+ fi
+
+ # check exports on servers are empty for client
+ do_facet mgs "lctl get_param -n *.MGS*.exports.'$nid'.uuid 2>/dev/null |
+ grep -q -" && error "export on MGS should be empty"
+ do_nodes $(comma_list $(mdts_nodes) $(osts_nodes)) \
+ "lctl get_param -n *.${FSNAME}*.exports.'$nid'.uuid \
+ 2>/dev/null | grep -q -" &&
+ error "export on servers should be empty"
+
+ # add network ${NETTYPE}999 on all nodes
+ do_nodes $(comma_list $(all_nodes)) \
+ "$LNETCTL lnet configure && $LNETCTL net add --if \
+ $($LNETCTL net show --net $net | awk 'BEGIN{inf=0} \
+ {if (inf==1) print $2; fi; inf=0} /interfaces/{inf=1}') \
+ --net ${NETTYPE}999" ||
+ error "unable to configure NID ${NETTYPE}999"
+
+ # necessary to do writeconf in order to register
+ # new @${NETTYPE}999 nid for targets
+ KZPOOL=$KEEP_ZPOOL
+ export KEEP_ZPOOL="true"
+ stopall
+ export SK_MOUNTED=false
+ writeconf_all
+ setupall server_only || echo 1
+ export KEEP_ZPOOL="$KZPOOL"
+
+ # backup MGSNID
+ local mgsnid_orig=$MGSNID
+ # compute new MGSNID
+ MGSNID=$(do_facet mgs "$LCTL list_nids | grep ${NETTYPE}999")
+
+ # on client, turn LNet Dynamic Discovery on
+ lnetctl set discovery 1
+
+ # mount client with -o network=${NETTYPE}999 option:
+ # should fail because of LNet Dynamic Discovery
+ mount_client $MOUNT ${MOUNT_OPTS},network=${NETTYPE}999 &&
+ error "client mount with '-o network' option should be refused"
+
+ # on client, reconfigure LNet and turn LNet Dynamic Discovery off
+ $LNETCTL net del --net ${NETTYPE}999 && lnetctl lnet unconfigure
+ lustre_rmmod
+ modprobe lnet
+ lnetctl set discovery 0
+ modprobe ptlrpc
+ $LNETCTL lnet configure && $LNETCTL net add --if \
+ $($LNETCTL net show --net $net | awk 'BEGIN{inf=0} \
+ {if (inf==1) print $2; fi; inf=0} /interfaces/{inf=1}') \
+ --net ${NETTYPE}999 ||
+ error "unable to configure NID ${NETTYPE}999 on client"
+
+ # mount client with -o network=${NETTYPE}999 option
+ mount_client $MOUNT ${MOUNT_OPTS},network=${NETTYPE}999 ||
+ error "unable to remount client"
+
+ # restore MGSNID
+ MGSNID=$mgsnid_orig
+
+ # check export on MGS
+ do_facet mgs "lctl get_param -n *.MGS*.exports.'$nid'.uuid 2>/dev/null |
+ grep -q -"
+ [ $? -ne 0 ] || error "export for $nid on MGS should not exist"
+
+ do_facet mgs \
+ "lctl get_param -n *.MGS*.exports.'${addr}@${NETTYPE}999'.uuid \
+ 2>/dev/null | grep -q -"
+ [ $? -eq 0 ] ||
+ error "export for ${addr}@${NETTYPE}999 on MGS should exist"
+
+ # check {mdc,osc} imports
+ lctl get_param mdc.${FSNAME}-*.import | grep current_connection |
+ grep -q ${NETTYPE}999
+ [ $? -eq 0 ] ||
+ error "import for mdc should use ${addr}@${NETTYPE}999"
+ lctl get_param osc.${FSNAME}-*.import | grep current_connection |
+ grep -q ${NETTYPE}999
+ [ $? -eq 0 ] ||
+ error "import for osc should use ${addr}@${NETTYPE}999"
+}
+run_test 31 "client mount option '-o network'"
+
log "cleanup: ======================================================"
sec_unsetup() {
}
sec_unsetup
-sec_cleanup
-
complete $SECONDS
+check_and_cleanup_lustre
exit_status