ALWAYS_EXCEPT=" 2 5 6 $SANITY_SEC_EXCEPT"
# UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
-[ "$ALWAYS_EXCEPT$EXCEPT" ] && echo "Skipping tests: $ALWAYS_EXCEPT $EXCEPT"
-
SRCDIR=$(dirname $0)
export PATH=$PWD/$SRCDIR:$SRCDIR:$PWD/$SRCDIR/../utils:$PATH:/sbin
export NAME=${NAME:-local}
. ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
init_logging
+NODEMAP_TESTS=$(seq 7 26)
+
+if ! check_versions; then
+ echo "It is NOT necessary to test nodemap under interoperation mode"
+ EXCEPT="$EXCEPT $NODEMAP_TESTS"
+fi
+
+[ "$SLOW" = "no" ] && EXCEPT_SLOW="26"
+
+[ "$ALWAYS_EXCEPT$EXCEPT$EXCEPT_SLOW" ] &&
+ echo "Skipping tests: $ALWAYS_EXCEPT $EXCEPT $EXCEPT_SLOW"
+
RUNAS_CMD=${RUNAS_CMD:-runas}
WTL=${WTL:-"$LUSTRE/tests/write_time_limit"}
ID0=${ID0:-500}
ID1=${ID1:-501}
-USER0=$(grep :$ID0:$ID0: /etc/passwd | cut -d: -f1)
-USER1=$(grep :$ID1:$ID1: /etc/passwd | cut -d: -f1)
+USER0=$(getent passwd | grep :$ID0:$ID0: | cut -d: -f1)
+USER1=$(getent passwd | grep :$ID1:$ID1: | cut -d: -f1)
[ -z "$USER0" ] &&
- skip "need to add user0 ($ID0:$ID0) to /etc/passwd" && exit 0
+ skip "need to add user0 ($ID0:$ID0)" && exit 0
[ -z "$USER1" ] &&
- skip "need to add user1 ($ID1:$ID1) to /etc/passwd" && exit 0
+ skip "need to add user1 ($ID1:$ID1)" && exit 0
IDBASE=${IDBASE:-60000}
}
squash_id() {
+ [ $(lustre_version_code mgs) -lt $(version_code 2.5.53) ] &&
+ skip "No nodemap on $(lustre_build_version mgs) MGS < 2.5.53" &&
+ return
local cmd
cmd[0]="$LCTL nodemap_modify --property squash_uid"
local rc=0
## nodemap deactivated
- if ! do_facet mgs lctl nodemap_activate 0; then
+ if ! do_facet mgs $LCTL nodemap_activate 0; then
return 1
fi
for ((id = 500; id < NODEMAP_MAX_ID; id++)); do
done
## nodemap activated
- if ! do_facet mgs lctl nodemap_activate 1; then
+ if ! do_facet mgs $LCTL nodemap_activate 1; then
return 2
fi
}
run_test 15 "test id mapping"
-# Until nodemaps are distributed by MGS, they need to be distributed manually
-# This function and all calls to it should be removed once the MGS distributes
-# nodemaps to the MDS and OSS nodes directly.
-do_servers_not_mgs() {
- local mgs_ip=$(host_nids_address $mgs_HOST $NETTYPE)
- for node in $(all_server_nodes); do
- local node_ip=$(host_nids_address $node $NETTYPE)
- [ $node_ip == $mgs_ip ] && continue
- do_node $node_ip $*
+wait_nm_sync() {
+ local nodemap_name=$1
+ local key=$2
+ local proc_param="${nodemap_name}.${key}"
+ [ "$nodemap_name" == "active" ] && proc_param="active"
+
+ local is_active=$(do_facet mgs $LCTL get_param -n nodemap.active)
+ (( is_active == 0 )) && [ "$proc_param" != "active" ] && return
+
+ local max_retries=20
+ local is_sync
+ local out1=$(do_facet mgs $LCTL get_param nodemap.${proc_param})
+ local out2
+ local mgs_ip=$(host_nids_address $mgs_HOST $NETTYPE | cut -d' ' -f1)
+ local i
+
+ # wait up to 10 seconds for other servers to sync with mgs
+ for i in $(seq 1 10); do
+ for node in $(all_server_nodes); do
+ local node_ip=$(host_nids_address $node $NETTYPE |
+ cut -d' ' -f1)
+
+ is_sync=true
+ [ $node_ip == $mgs_ip ] && continue
+
+ out2=$(do_node $node_ip $LCTL get_param \
+ nodemap.$proc_param 2>/dev/null)
+ [ "$out1" != "$out2" ] && is_sync=false && break
+ done
+ $is_sync && break
+ sleep 1
done
+ if ! $is_sync; then
+ echo MGS
+ echo $out1
+ echo OTHER - IP: $node_ip
+ echo $out2
+ error "mgs and $nodemap_name ${key} mismatch, $i attempts"
+ fi
+ echo "waited $((i - 1)) seconds for sync"
}
create_fops_nodemaps() {
do_facet mgs $LCTL nodemap_add c${i} || return 1
do_facet mgs $LCTL nodemap_add_range \
--name c${i} --range $client_nid || return 1
- do_servers_not_mgs $LCTL set_param nodemap.add_nodemap=c${i} ||
- return 1
- do_servers_not_mgs "$LCTL set_param " \
- "nodemap.add_nodemap_range='c${i} $client_nid'" ||
- return 1
for map in ${FOPS_IDMAPS[i]}; do
do_facet mgs $LCTL nodemap_add_idmap --name c${i} \
--idtype uid --idmap ${map} || return 1
- do_servers_not_mgs "$LCTL set_param " \
- "nodemap.add_nodemap_idmap='c$i uid ${map}'" ||
- return 1
do_facet mgs $LCTL nodemap_add_idmap --name c${i} \
--idtype gid --idmap ${map} || return 1
- do_servers_not_mgs "$LCTL set_param " \
- " nodemap.add_nodemap_idmap='c$i gid ${map}'" ||
- return 1
done
- out1=$(do_facet mgs $LCTL get_param nodemap.c${i}.idmap)
- out2=$(do_facet ost0 $LCTL get_param nodemap.c${i}.idmap)
- [ "$out1" != "$out2" ] && error "mgs and oss maps mismatch"
+
+ wait_nm_sync c$i idmap
+
i=$((i + 1))
done
return 0
local client
for client in $clients; do
do_facet mgs $LCTL nodemap_del c${i} || return 1
- do_servers_not_mgs $LCTL set_param nodemap.remove_nodemap=c$i ||
- return 1
i=$((i + 1))
done
return 0
do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1
- do_servers_not_mgs $LCTL set_param nodemap.c0.admin_nodemap=1
- do_servers_not_mgs $LCTL set_param nodemap.c0.trusted_nodemap=1
+
+ wait_nm_sync c0 admin_nodemap
+ wait_nm_sync c0 trusted_nodemap
do_node ${clients_arr[0]} rm -rf $DIR/$tdir
nm_test_mkdir
--property admin --value $admin
do_facet mgs $LCTL nodemap_modify --name c0 \
--property trusted --value $trust
- do_servers_not_mgs $LCTL set_param nodemap.c0.admin_nodemap=$admin
- do_servers_not_mgs $LCTL set_param nodemap.c0.trusted_nodemap=$trust
# flush MDT locks to make sure they are reacquired before test
- do_node ${clients_arr[0]} lctl set_param \
+ do_node ${clients_arr[0]} $LCTL set_param \
+ ldlm.namespaces.$FSNAME-MDT*.lru_size=clear
+
+ wait_nm_sync c0 admin_nodemap
+ wait_nm_sync c0 trusted_nodemap
+}
+
+# fileset test directory needs to be initialized on a privileged client
+fileset_test_setup() {
+ local admin=$(do_facet mgs $LCTL get_param -n nodemap.c0.admin_nodemap)
+ local trust=$(do_facet mgs $LCTL get_param -n \
+ nodemap.c0.trusted_nodemap)
+
+ do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
+ do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1
+
+ wait_nm_sync c0 admin_nodemap
+ wait_nm_sync c0 trusted_nodemap
+
+ # create directory and populate it for subdir mount
+ do_node ${clients_arr[0]} mkdir $MOUNT/$subdir ||
+ error "unable to create dir $MOUNT/$subdir"
+ do_node ${clients_arr[0]} touch $MOUNT/$subdir/this_is_$subdir ||
+ error "unable to create file $MOUNT/$subdir/this_is_$subdir"
+ do_node ${clients_arr[0]} mkdir $MOUNT/$subdir/$subsubdir ||
+ error "unable to create dir $MOUNT/$subdir/$subsubdir"
+ do_node ${clients_arr[0]} touch \
+ $MOUNT/$subdir/$subsubdir/this_is_$subsubdir ||
+ error "unable to create file \
+ $MOUNT/$subdir/$subsubdir/this_is_$subsubdir"
+
+ do_facet mgs $LCTL nodemap_modify --name c0 \
+ --property admin --value $admin
+ do_facet mgs $LCTL nodemap_modify --name c0 \
+ --property trusted --value $trust
+
+ # flush MDT locks to make sure they are reacquired before test
+ do_node ${clients_arr[0]} $LCTL set_param \
+ ldlm.namespaces.$FSNAME-MDT*.lru_size=clear
+
+ wait_nm_sync c0 admin_nodemap
+ wait_nm_sync c0 trusted_nodemap
+}
+
+# fileset test directory needs to be initialized on a privileged client
+fileset_test_cleanup() {
+ local admin=$(do_facet mgs $LCTL get_param -n nodemap.c0.admin_nodemap)
+ local trust=$(do_facet mgs $LCTL get_param -n \
+ nodemap.c0.trusted_nodemap)
+
+ do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
+ do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1
+
+ wait_nm_sync c0 admin_nodemap
+ wait_nm_sync c0 trusted_nodemap
+
+ # cleanup directory created for subdir mount
+ do_node ${clients_arr[0]} rm -rf $MOUNT/$subdir ||
+ error "unable to remove dir $MOUNT/$subdir"
+
+ do_facet mgs $LCTL nodemap_modify --name c0 \
+ --property admin --value $admin
+ do_facet mgs $LCTL nodemap_modify --name c0 \
+ --property trusted --value $trust
+
+ # flush MDT locks to make sure they are reacquired before test
+ do_node ${clients_arr[0]} $LCTL set_param \
ldlm.namespaces.$FSNAME-MDT*.lru_size=clear
+
+ wait_nm_sync c0 admin_nodemap
+ wait_nm_sync c0 trusted_nodemap
}
do_create_delete() {
local qused_high=$((qused_orig + quota_fuzz))
local qused_low=$((qused_orig - quota_fuzz))
local testfile=$DIR/$tdir/$tfile
- chmod 777 $DIR/$tdir
- $run_u dd if=/dev/zero of=$testfile bs=1M count=1 >& /dev/null
+ $run_u dd if=/dev/zero of=$testfile bs=1M count=1 >& /dev/null ||
+ error "unable to write quota test file"
sync; sync_all_data || true
local qused_new=$(nodemap_check_quota "$run_u")
$((qused_new)) -gt $((qused_high + 1024)) ] &&
error "$qused_new != $qused_orig + 1M after write, " \
"fuzz is $quota_fuzz"
- $run_u rm $testfile && d=1
- $NODEMAP_TEST_QUOTA && wait_delete_completed_mds
+ $run_u rm $testfile || error "unable to remove quota test file"
+ wait_delete_completed_mds
qused_new=$(nodemap_check_quota "$run_u")
[ $((qused_new)) -lt $((qused_low)) \
echo $FAILURE
}
+test_fops_admin_cli_i=""
+test_fops_chmod_dir() {
+ local current_cli_i=$1
+ local perm_bits=$2
+ local dir_to_chmod=$3
+ local new_admin_cli_i=""
+
+ # do we need to set up a new admin client?
+ [ "$current_cli_i" == "0" ] && [ "$test_fops_admin_cli_i" != "1" ] &&
+ new_admin_cli_i=1
+ [ "$current_cli_i" != "0" ] && [ "$test_fops_admin_cli_i" != "0" ] &&
+ new_admin_cli_i=0
+
+ # if only one client, and non-admin, need to flip admin everytime
+ if [ "$num_clients" == "1" ]; then
+ test_fops_admin_client=$clients
+ test_fops_admin_val=$(do_facet mgs $LCTL get_param -n \
+ nodemap.c0.admin_nodemap)
+ if [ "$test_fops_admin_val" != "1" ]; then
+ do_facet mgs $LCTL nodemap_modify \
+ --name c0 \
+ --property admin \
+ --value 1
+ wait_nm_sync c0 admin_nodemap
+ fi
+ elif [ "$new_admin_cli_i" != "" ]; then
+ # restore admin val to old admin client
+ if [ "$test_fops_admin_cli_i" != "" ] &&
+ [ "$test_fops_admin_val" != "1" ]; then
+ do_facet mgs $LCTL nodemap_modify \
+ --name c${test_fops_admin_cli_i} \
+ --property admin \
+ --value $test_fops_admin_val
+ wait_nm_sync c${test_fops_admin_cli_i} admin_nodemap
+ fi
+
+ test_fops_admin_cli_i=$new_admin_cli_i
+ test_fops_admin_client=${clients_arr[$new_admin_cli_i]}
+ test_fops_admin_val=$(do_facet mgs $LCTL get_param -n \
+ nodemap.c${new_admin_cli_i}.admin_nodemap)
+
+ if [ "$test_fops_admin_val" != "1" ]; then
+ do_facet mgs $LCTL nodemap_modify \
+ --name c${new_admin_cli_i} \
+ --property admin \
+ --value 1
+ wait_nm_sync c${new_admin_cli_i} admin_nodemap
+ fi
+ fi
+
+ do_node $test_fops_admin_client chmod $perm_bits $DIR/$tdir || return 1
+
+ # remove admin for single client if originally non-admin
+ if [ "$num_clients" == "1" ] && [ "$test_fops_admin_val" != "1" ]; then
+ do_facet mgs $LCTL nodemap_modify --name c0 --property admin \
+ --value 0
+ wait_nm_sync c0 admin_nodemap
+ fi
+
+ return 0
+}
+
test_fops() {
local mapmode="$1"
local single_client="$2"
local cli_i=0
for client in $clients; do
local u
- local admin=$(do_facet mgs $LCTL get_param -n \
- nodemap.c$cli_i.admin_nodemap)
for u in ${client_user_list[$cli_i]}; do
local run_u="do_node $client \
$RUNAS_CMD -u$u -g$u -G$u"
local mode=$(printf %03o $perm_bits)
local key
key="$mapmode:$user:c$cli_i:$u:$mode"
- do_facet mgs $LCTL nodemap_modify \
- --name c$cli_i \
- --property admin \
- --value 1
- do_servers_not_mgs $LCTL set_param \
- nodemap.c$cli_i.admin_nodemap=1
- do_node $client chmod $mode $DIR/$tdir \
- || error unable to chmod $key
- do_facet mgs $LCTL nodemap_modify \
- --name c$cli_i \
- --property admin \
- --value $admin
- do_servers_not_mgs $LCTL set_param \
- nodemap.c$cli_i.admin_nodemap=$admin
-
+ test_fops_chmod_dir $cli_i $mode \
+ $DIR/$tdir ||
+ error cannot chmod $key
do_create_delete "$run_u" "$key"
done
# check quota
+ test_fops_chmod_dir $cli_i 777 $DIR/$tdir ||
+ error cannot chmod $key
do_fops_quota_test "$run_u"
done
nodemap_test_setup() {
local rc
- local active_nodemap=$1
+ local active_nodemap=1
+
+ [ "$1" == "0" ] && active_nodemap=0
do_nodes $(comma_list $(all_mdts_nodes)) \
$LCTL set_param mdt.*.identity_upcall=NONE
rc=$?
[[ $rc != 0 ]] && error "adding fops nodemaps failed $rc"
- if [ "$active_nodemap" == "0" ]; then
- do_facet mgs $LCTL set_param nodemap.active=0
- do_servers_not_mgs $LCTL set_param nodemap.active=0
- return
- fi
+ do_facet mgs $LCTL nodemap_activate $active_nodemap
+ wait_nm_sync active
- do_facet mgs $LCTL nodemap_activate 1
- do_servers_not_mgs $LCTL set_param nodemap.active=1
do_facet mgs $LCTL nodemap_modify --name default \
--property admin --value 1
do_facet mgs $LCTL nodemap_modify --name default \
--property trusted --value 1
- do_servers_not_mgs $LCTL set_param nodemap.default.admin_nodemap=1
- do_servers_not_mgs $LCTL set_param nodemap.default.trusted_nodemap=1
+ wait_nm_sync default trusted_nodemap
}
nodemap_test_cleanup() {
rc=$?
[[ $rc != 0 ]] && error "removing fops nodemaps failed $rc"
+ do_facet mgs $LCTL nodemap_modify --name default \
+ --property admin --value 0
+ do_facet mgs $LCTL nodemap_modify --name default \
+ --property trusted --value 0
+ wait_nm_sync default trusted_nodemap
+
+ do_facet mgs $LCTL nodemap_activate 0
+ wait_nm_sync active 0
+
return 0
}
for client in $clients; do
do_facet mgs $LCTL nodemap_modify --name c0 \
--property admin --value $admin
- do_servers_not_mgs $LCTL set_param \
- nodemap.c${i}.admin_nodemap=$admin
do_facet mgs $LCTL nodemap_modify --name c0 \
--property trusted --value $tr
- do_servers_not_mgs $LCTL set_param \
- nodemap.c${i}.trusted_nodemap=$tr
i=$((i + 1))
done
+ wait_nm_sync c$((i - 1)) admin_nodemap
+ wait_nm_sync c$((i - 1)) trusted_nodemap
}
test_16() {
--property admin --value 0
do_facet mgs $LCTL nodemap_modify --name c${i} \
--property trusted --value $x
- do_servers_not_mgs $LCTL set_param \
- nodemap.c${i}.admin_nodemap=0
- do_servers_not_mgs $LCTL set_param \
- nodemap.c${i}.trusted_nodemap=$x
x=0
i=$((i + 1))
done
+ wait_nm_sync c$((i - 1)) trusted_nodemap
+
test_fops mapped_trusted_noadmin
nodemap_test_cleanup
}
--property admin --value 1
do_facet mgs $LCTL nodemap_modify --name c${i} \
--property trusted --value $x
- do_servers_not_mgs $LCTL set_param \
- nodemap.c${i}.admin_nodemap=1
- do_servers_not_mgs $LCTL set_param \
- nodemap.c${i}.trusted_nodemap=$x
x=0
i=$((i + 1))
done
+ wait_nm_sync c$((i - 1)) trusted_nodemap
+
test_fops mapped_trusted_admin
nodemap_test_cleanup
}
do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1
- do_servers_not_mgs $LCTL set_param nodemap.c0.admin_nodemap=1
- do_servers_not_mgs $LCTL set_param nodemap.c0.trusted_nodemap=1
+
+ wait_nm_sync c0 admin_nodemap
+ wait_nm_sync c0 trusted_nodemap
do_node ${clients_arr[0]} rm -rf $DIR/$tdir
nm_test_mkdir
--property admin --value $admin
do_facet mgs $LCTL nodemap_modify --name c0 \
--property trusted --value $trust
- do_servers_not_mgs $LCTL set_param nodemap.c0.admin_nodemap=$admin
- do_servers_not_mgs $LCTL set_param nodemap.c0.trusted_nodemap=$trust
+ wait_nm_sync c0 trusted_nodemap
}
# returns 0 if the number of ACLs does not change on the second (mapped) client
do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 1
do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 1
- do_servers_not_mgs $LCTL set_param nodemap.c0.admin_nodemap=1
- do_servers_not_mgs $LCTL set_param nodemap.c0.trusted_nodemap=1
do_facet mgs $LCTL nodemap_modify --name c1 --property admin --value 0
do_facet mgs $LCTL nodemap_modify --name c1 --property trusted --value 0
- do_servers_not_mgs $LCTL set_param nodemap.c1.admin_nodemap=0
- do_servers_not_mgs $LCTL set_param nodemap.c1.trusted_nodemap=0
+
+ wait_nm_sync c1 trusted_nodemap
# setfacl on trusted cluster to unmapped user, verify it's not seen
nodemap_acl_test $unmapped_fs ${clients_arr[0]} ${clients_arr[1]} ||
# 2 mapped clusters
do_facet mgs $LCTL nodemap_modify --name c0 --property admin --value 0
do_facet mgs $LCTL nodemap_modify --name c0 --property trusted --value 0
- do_servers_not_mgs $LCTL set_param nodemap.c0.admin_nodemap=0
- do_servers_not_mgs $LCTL set_param nodemap.c0.trusted_nodemap=0
+
+ wait_nm_sync c0 trusted_nodemap
# setfacl to mapped user on c1, also mapped to c0, verify it's seen
nodemap_acl_test $mapped_c1 ${clients_arr[1]} ${clients_arr[0]} &&
nodemap_test_setup
trap nodemap_test_cleanup EXIT
- for node in $(all_server_nodes); do
- local node_ip=$(host_nids_address $node $NETTYPE)
- do_node $node_ip 'find /proc/fs/lustre/nodemap -exec \
- cat {} \;' &> /dev/null
- do_node $node_ip 'find /proc/fs/lustre/nodemap \
- -type f -perm /444 | xargs cat' &> /dev/null ||
- error "proc readable file read failed"
- done
+ do_nodes $(comma_list $(all_server_nodes)) $LCTL get_param -R nodemap ||
+ error "proc readable file read failed"
nodemap_test_cleanup
}
run_test 24 "check nodemap proc files for LBUGs and Oopses"
+test_25() {
+ local tmpfile=$(mktemp)
+ local tmpfile2=$(mktemp)
+ local subdir=c0dir
+
+ nodemap_version_check || return 0
+
+ # stop clients for this test
+ zconf_umount_clients $CLIENTS $MOUNT ||
+ error "unable to umount clients $CLIENTS"
+
+ nodemap_test_setup
+
+ trap nodemap_test_cleanup EXIT
+
+ # create a new, empty nodemap, and add fileset info to it
+ do_facet mgs $LCTL nodemap_add test26 ||
+ error "unable to create nodemap test26"
+ do_facet mgs $LCTL set_param -P nodemap.test26.fileset=/$subdir ||
+ error "unable to add fileset info to nodemap test26"
+
+ wait_nm_sync test26 id
+
+ do_facet mgs $LCTL nodemap_info > $tmpfile
+ do_facet mds $LCTL nodemap_info > $tmpfile2
+
+ cleanup_and_setup_lustre
+ # stop clients for this test
+ zconf_umount_clients $CLIENTS $MOUNT ||
+ error "unable to umount clients $CLIENTS"
+
+ diff -q <(do_facet mgs $LCTL nodemap_info) $tmpfile >& /dev/null ||
+ error "nodemap_info diff on MGS after remount"
+
+ diff -q <(do_facet mds $LCTL nodemap_info) $tmpfile2 >& /dev/null ||
+ error "nodemap_info diff on MDS after remount"
+
+ # cleanup nodemap
+ do_facet mgs $LCTL nodemap_del test26 ||
+ error "cannot delete nodemap test26 from config"
+ nodemap_test_cleanup
+ # restart clients previously stopped
+ zconf_mount_clients $CLIENTS $MOUNT ||
+ error "unable to mount clients $CLIENTS"
+
+ rm -f $tmpfile $tmpfile2
+}
+run_test 25 "test save and reload nodemap config"
+
+test_26() {
+ nodemap_version_check || return 0
+
+ local large_i=32000
+
+ do_facet mgs "seq -f 'c%g' $large_i | xargs -n1 $LCTL nodemap_add"
+ wait_nm_sync c$large_i admin_nodemap
+
+ do_facet mgs "seq -f 'c%g' $large_i | xargs -n1 $LCTL nodemap_del"
+ wait_nm_sync c$large_i admin_nodemap
+}
+run_test 26 "test transferring very large nodemap"
+
+test_27() {
+ local subdir=c0dir
+ local subsubdir=c0subdir
+
+ nodemap_test_setup
+ trap nodemap_test_cleanup EXIT
+
+ fileset_test_setup
+
+ # add fileset info to nodemap
+ do_facet mgs $LCTL set_param nodemap.c0.fileset=/$subdir ||
+ error "unable to set fileset info on nodemap c0"
+ do_facet mgs $LCTL set_param -P nodemap.c0.fileset=/$subdir ||
+ error "unable to add fileset info to nodemap c0"
+ wait_nm_sync c0 fileset
+
+ # re-mount client
+ zconf_umount_clients ${clients_arr[0]} $MOUNT ||
+ error "unable to umount client ${clients_arr[0]}"
+ zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
+ error "unable to remount client ${clients_arr[0]}"
+
+ # test mount point content
+ do_node ${clients_arr[0]} test -f $MOUNT/this_is_$subdir ||
+ error "fileset not taken into account"
+
+ # re-mount client with sub-subdir
+ zconf_umount_clients ${clients_arr[0]} $MOUNT ||
+ error "unable to umount client ${clients_arr[0]}"
+ export FILESET=/$subsubdir
+ zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
+ error "unable to remount client ${clients_arr[0]}"
+ unset FILESET
+
+ # test mount point content
+ do_node ${clients_arr[0]} test -f $MOUNT/this_is_$subsubdir ||
+ error "subdir of fileset not taken into account"
+
+ # remove fileset info from nodemap
+ do_facet mgs $LCTL nodemap_set_fileset --name c0 --fileset \'\' ||
+ error "unable to delete fileset info on nodemap c0"
+ do_facet mgs $LCTL set_param -P nodemap.c0.fileset=\'\' ||
+ error "unable to reset fileset info on nodemap c0"
+ wait_nm_sync c0 fileset
+
+ # re-mount client
+ zconf_umount_clients ${clients_arr[0]} $MOUNT ||
+ error "unable to umount client ${clients_arr[0]}"
+ zconf_mount_clients ${clients_arr[0]} $MOUNT $MOUNT_OPTS ||
+ error "unable to remount client ${clients_arr[0]}"
+
+ # test mount point content
+ do_node ${clients_arr[0]} test -d $MOUNT/$subdir ||
+ error "fileset not cleared on nodemap c0"
+
+ fileset_test_cleanup
+ nodemap_test_cleanup
+}
+run_test 27 "test fileset in nodemap"
+
log "cleanup: ======================================================"
sec_unsetup() {
## nodemap deactivated
- do_facet mgs lctl nodemap_activate 0
+ do_facet mgs $LCTL nodemap_activate 0
for num in $(seq $MDSCOUNT); do
if [ "${identity_old[$num]}" = 1 ]; then