export REFORMAT=${REFORMAT:-""}
export WRITECONF=${WRITECONF:-""}
export VERBOSE=${VERBOSE:-false}
-export CATASTROPHE=${CATASTROPHE:-/proc/sys/lnet/catastrophe}
export GSS=false
export GSS_KRB5=false
export GSS_PIPEFS=false
[ ! -f "$LST" ] && export LST=$(which lst)
export SGPDDSURVEY=${SGPDDSURVEY:-"$LUSTRE/../lustre-iokit/sgpdd-survey/sgpdd-survey")}
[ ! -f "$SGPDDSURVEY" ] && export SGPDDSURVEY=$(which sgpdd-survey)
+ export MCREATE=${MCREATE:-mcreate}
# Ubuntu, at least, has a truncate command in /usr/bin
# so fully path our truncate command.
export TRUNCATE=${TRUNCATE:-$LUSTRE/tests/truncate}
cmp $mount_lustre $sbin_mount || umount $sbin_mount
fi
if ! grep -qe "$sbin_mount " /proc/mounts; then
- if [ ! -s "$sbin_mount" ]; then
+ [ ! -f "$sbin_mount" ] && touch "$sbin_mount"
+ if [ ! -s "$sbin_mount" -a -w "$sbin_mount" ]; then
cat <<- EOF > "$sbin_mount"
#!/bin/sh
#STUB MARK
EOF
chmod a+x $sbin_mount
fi
- mount --bind $mount_lustre $sbin_mount
+ mount --bind $mount_lustre $sbin_mount ||
+ error "can't bind $mount_lustre to $sbin_mount"
fi
fi
}
fi
if [ -n "$LGSS_KEYRING_DEBUG" ]; then
- echo $LGSS_KEYRING_DEBUG > /proc/fs/lustre/sptlrpc/gss/lgss_keyring/debug_level
+ lctl set_param -n \
+ sptlrpc.gss.lgss_keyring.debug_level=$LGSS_KEYRING_DEBUG
fi
fi
}
zconf_mount() {
local client=$1
local mnt=$2
- local OPTIONS=${3:-$MOUNTOPT}
+ local opts=${3:-$MOUNT_OPTS}
+ opts=${opts:+-o $opts}
+ local flags=${4:-$MOUNT_FLAGS}
local device=$MGSNID:/$FSNAME
if [ -z "$mnt" -o -z "$FSNAME" ]; then
- echo Bad zconf mount command: opt=$OPTIONS dev=$device mnt=$mnt
+ echo Bad zconf mount command: opt=$flags $opts dev=$device mnt=$mnt
exit 1
fi
- echo "Starting client: $client: $OPTIONS $device $mnt"
+ echo "Starting client: $client: $flags $opts $device $mnt"
do_node $client mkdir -p $mnt
- do_node $client $MOUNT_CMD $OPTIONS $device $mnt || return 1
+ do_node $client $MOUNT_CMD $flags $opts $device $mnt || return 1
set_default_debug_nodes $client
zconf_mount_clients() {
local clients=$1
local mnt=$2
- local OPTIONS=${3:-$MOUNTOPT}
+ local opts=${3:-$MOUNT_OPTS}
+ opts=${opts:+-o $opts}
+ local flags=${4:-$MOUNT_FLAGS}
local device=$MGSNID:/$FSNAME
if [ -z "$mnt" -o -z "$FSNAME" ]; then
- echo Bad zconf mount command: opt=$OPTIONS dev=$device mnt=$mnt
+ echo Bad zconf mount command: opt=$flags $opts dev=$device mnt=$mnt
exit 1
fi
- echo "Starting client $clients: $OPTIONS $device $mnt"
+ echo "Starting client $clients: $flags $opts $device $mnt"
do_nodes $clients "
running=\\\$(mount | grep -c $mnt' ');
rc=0;
if [ \\\$running -eq 0 ] ; then
mkdir -p $mnt;
- $MOUNT_CMD $OPTIONS $device $mnt;
+ $MOUNT_CMD $flags $opts $device $mnt;
rc=\\\$?;
fi;
exit \\\$rc" || return ${PIPESTATUS[0]}
# verify that lustre actually cleaned up properly
cleanup_check() {
- [ -f "$CATASTROPHE" ] && [[ $(< $CATASTROPHE) -ne 0 ]] &&
- error "LBUG/LASSERT detected"
+ VAR=$(lctl get_param -n catastrophe 2>&1)
+ if [ $? = 0 ] ; then
+ if [ $VAR != 0 ]; then
+ error "LBUG/LASSERT detected"
+ fi
+ fi
BUSY=$(dmesg | grep -i destruct || true)
if [ -n "$BUSY" ]; then
echo "$BUSY" 1>&2
return $RC
}
-drop_bl_callback() {
+drop_bl_callback_once() {
+ rc=0
+ do_facet client lctl set_param ldlm.namespaces.*.early_lock_cancel=0
#define OBD_FAIL_LDLM_BL_CALLBACK_NET 0x305
- RC=0
do_facet client lctl set_param fail_loc=0x80000305
- do_facet client "$@" || RC=$?
+ do_facet client "$@" || rc=$?
do_facet client lctl set_param fail_loc=0
- return $RC
+ do_facet client lctl set_param ldlm.namespaces.*.early_lock_cancel=1
+ return $rc
+}
+
+drop_bl_callback() {
+ rc=0
+ do_facet client lctl set_param ldlm.namespaces.*.early_lock_cancel=0
+#define OBD_FAIL_LDLM_BL_CALLBACK_NET 0x305
+ do_facet client lctl set_param fail_loc=0x305
+ do_facet client "$@" || rc=$?
+ do_facet client lctl set_param fail_loc=0
+ do_facet client lctl set_param ldlm.namespaces.*.early_lock_cancel=1
+ return $rc
}
drop_ldlm_reply() {
cancel_lru_locks() {
$LCTL mark "cancel_lru_locks $1 start"
- for d in `lctl get_param -N ldlm.namespaces.*.lru_size | egrep -i $1`; do
- $LCTL set_param -n $d=clear
- done
- $LCTL get_param ldlm.namespaces.*.lock_unused_count | egrep -i $1 | grep -v '=0'
+
+ if [ $1 != "MGC" ]; then
+ for d in $(lctl get_param -N ldlm.namespaces.*.lru_size |
+ egrep -i $1); do
+ $LCTL set_param -n $d=clear
+ done
+ $LCTL get_param ldlm.namespaces.*.lock_unused_count | egrep -i $1 |
+ grep -v '=0'
+ else
+ for d in $(find \
+ /{proc,sys}/fs/lustre/ldlm/namespaces/*$1*/lru_size \
+ 2> /dev/null); do
+ echo "clear" > $d
+ done
+
+ for d in $(find \
+ /{proc,sys}/fs/lustre/ldlm/namespaces/*$1*/lock_unused_count \
+ 2> /dev/null); do
+ if [ $(cat $d) != 0 ]; then
+ echo "ldlm.namespaces.$(echo "$d" |
+ cut -f 7 -d'/').lock_unused_count=$(cat $d)"
+ fi
+ done
+ fi
+
$LCTL mark "cancel_lru_locks $1 stop"
}
reset_fail_loc () {
echo -n "Resetting fail_loc on all nodes..."
- do_nodes $(comma_list $(nodes_list)) "lctl set_param -n fail_loc=0 2>/dev/null || true"
+ do_nodes $(comma_list $(nodes_list)) "lctl set_param -n fail_loc=0 \
+ fail_val=0 2>/dev/null || true"
echo done.
}
check_catastrophe() {
local rnodes=${1:-$(comma_list $(remote_nodes_list))}
- local C=$CATASTROPHE
- [ -f $C ] && [ $(cat $C) -ne 0 ] && return 1
+ VAR=$(lctl get_param -n catastrophe 2>&1)
+ if [ $? = 0 ] ; then
+ if [ $VAR != 0 ]; then
+ return 1
+ fi
+ fi
[ -z "$rnodes" ] && return 0
local data
- data=$(do_nodes "$rnodes" "rc=\\\$([ -f $C ] &&
- echo \\\$(< $C) || echo 0);
+ data=$(do_nodes "$rnodes" "rc=\\\$(lctl get_param -n catastrophe);
if [ \\\$rc -ne 0 ]; then echo \\\$(hostname): \\\$rc; fi
exit \\\$rc")
local rc=$?
echo $((last_id - next_id + 1))
}
+
+check_file_in_pool()
+{
+ local file=$1
+ local pool=$2
+ local tlist="$3"
+ local res=$($GETSTRIPE $file | grep 0x | cut -f2)
+ for i in $res
+ do
+ for t in $tlist ; do
+ [ "$i" -eq "$t" ] && continue 2
+ done
+
+ echo "pool list: $tlist"
+ echo "striping: $res"
+ error_noexit "$file not allocated in $pool"
+ return 1
+ done
+ return 0
+}
+
+pool_add() {
+ echo "Creating new pool"
+ local pool=$1
+
+ create_pool $FSNAME.$pool ||
+ { error_noexit "No pool created, result code $?"; return 1; }
+ [ $($LFS pool_list $FSNAME | grep -c $pool) -eq 1 ] ||
+ { error_noexit "$pool not in lfs pool_list"; return 2; }
+}
+
+pool_add_targets() {
+ echo "Adding targets to pool"
+ local pool=$1
+ local first=$2
+ local last=$3
+ local step=${4:-1}
+
+ local list=$(seq $first $step $last)
+
+ local t=$(for i in $list; do printf "$FSNAME-OST%04x_UUID " $i; done)
+ do_facet mgs $LCTL pool_add \
+ $FSNAME.$pool $FSNAME-OST[$first-$last/$step]
+ wait_update $HOSTNAME "lctl get_param -n lov.$FSNAME-*.pools.$pool \
+ | sort -u | tr '\n' ' ' " "$t" || {
+ error_noexit "Add to pool failed"
+ return 1
+ }
+ local lfscount=$($LFS pool_list $FSNAME.$pool | grep -c "\-OST")
+ local addcount=$(((last - first) / step + 1))
+ [ $lfscount -eq $addcount ] || {
+ error_noexit "lfs pool_list bad ost count" \
+ "$lfscount != $addcount"
+ return 2
+ }
+}
+
+pool_set_dir() {
+ local pool=$1
+ local tdir=$2
+ echo "Setting pool on directory $tdir"
+
+ $SETSTRIPE -c 2 -p $pool $tdir && return 0
+
+ error_noexit "Cannot set pool $pool to $tdir"
+ return 1
+}
+
+pool_check_dir() {
+ local pool=$1
+ local tdir=$2
+ echo "Checking pool on directory $tdir"
+
+ local res=$($GETSTRIPE --pool $tdir | sed "s/\s*$//")
+ [ "$res" = "$pool" ] && return 0
+
+ error_noexit "Pool on '$tdir' is '$res', not '$pool'"
+ return 1
+}
+
+pool_dir_rel_path() {
+ echo "Testing relative path works well"
+ local pool=$1
+ local tdir=$2
+ local root=$3
+
+ mkdir -p $root/$tdir/$tdir
+ cd $root/$tdir
+ pool_set_dir $pool $tdir || return 1
+ pool_set_dir $pool ./$tdir || return 2
+ pool_set_dir $pool ../$tdir || return 3
+ pool_set_dir $pool ../$tdir/$tdir || return 4
+ rm -rf $tdir; cd - > /dev/null
+}
+
+pool_alloc_files() {
+ echo "Checking files allocation from directory pool"
+ local pool=$1
+ local tdir=$2
+ local count=$3
+ local tlist="$4"
+
+ local failed=0
+ for i in $(seq -w 1 $count)
+ do
+ local file=$tdir/file-$i
+ touch $file
+ check_file_in_pool $file $pool "$tlist" || \
+ failed=$((failed + 1))
+ done
+ [ "$failed" = 0 ] && return 0
+
+ error_noexit "$failed files not allocated in $pool"
+ return 1
+}
+
+pool_create_files() {
+ echo "Creating files in pool"
+ local pool=$1
+ local tdir=$2
+ local count=$3
+ local tlist="$4"
+
+ mkdir -p $tdir
+ local failed=0
+ for i in $(seq -w 1 $count)
+ do
+ local file=$tdir/spoo-$i
+ $SETSTRIPE -p $pool $file
+ check_file_in_pool $file $pool "$tlist" || \
+ failed=$((failed + 1))
+ done
+ [ "$failed" = 0 ] && return 0
+
+ error_noexit "$failed files not allocated in $pool"
+ return 1
+}
+
+pool_lfs_df() {
+ echo "Checking 'lfs df' output"
+ local pool=$1
+
+ local t=$($LCTL get_param -n lov.$FSNAME-clilov-*.pools.$pool |
+ tr '\n' ' ')
+ local res=$($LFS df --pool $FSNAME.$pool |
+ awk '{print $1}' |
+ grep "$FSNAME-OST" |
+ tr '\n' ' ')
+ [ "$res" = "$t" ] && return 0
+
+ error_noexit "Pools OSTs '$t' is not '$res' that lfs df reports"
+ return 1
+}
+
+pool_file_rel_path() {
+ echo "Creating files in a pool with relative pathname"
+ local pool=$1
+ local tdir=$2
+
+ mkdir -p $tdir ||
+ { error_noexit "unable to create $tdir"; return 1 ; }
+ local file="/..$tdir/$tfile-1"
+ $SETSTRIPE -p $pool $file ||
+ { error_noexit "unable to create $file" ; return 2 ; }
+
+ cd $tdir
+ $SETSTRIPE -p $pool $tfile-2 || {
+ error_noexit "unable to create $tfile-2 in $tdir"
+ return 3
+ }
+}
+
+pool_remove_first_target() {
+ echo "Removing first target from a pool"
+ local pool=$1
+
+ local pname="lov.$FSNAME-*.pools.$pool"
+ local t=$($LCTL get_param -n $pname | head -1)
+ do_facet mgs $LCTL pool_remove $FSNAME.$pool $t
+ wait_update $HOSTNAME "lctl get_param -n $pname | grep $t" "" || {
+ error_noexit "$t not removed from $FSNAME.$pool"
+ return 1
+ }
+}
+
+pool_remove_all_targets() {
+ echo "Removing all targets from pool"
+ local pool=$1
+ local file=$2
+ local pname="lov.$FSNAME-*.pools.$pool"
+ for t in $($LCTL get_param -n $pname | sort -u)
+ do
+ do_facet mgs $LCTL pool_remove $FSNAME.$pool $t
+ done
+ wait_update $HOSTNAME "lctl get_param -n $pname" "" || {
+ error_noexit "Pool $FSNAME.$pool cannot be drained"
+ return 1
+ }
+ # striping on an empty/nonexistant pool should fall back
+ # to "pool of everything"
+ touch $file || {
+ error_noexit "failed to use fallback striping for empty pool"
+ return 2
+ }
+ # setstripe on an empty pool should fail
+ $SETSTRIPE -p $pool $file 2>/dev/null && {
+ error_noexit "expected failure when creating file" \
+ "with empty pool"
+ return 3
+ }
+ return 0
+}
+
+pool_remove() {
+ echo "Destroying pool"
+ local pool=$1
+ local file=$2
+
+ do_facet mgs $LCTL pool_destroy $FSNAME.$pool
+
+ sleep 2
+ # striping on an empty/nonexistant pool should fall back
+ # to "pool of everything"
+ touch $file || {
+ error_noexit "failed to use fallback striping for missing pool"
+ return 1
+ }
+ # setstripe on an empty pool should fail
+ $SETSTRIPE -p $pool $file 2>/dev/null && {
+ error_noexit "expected failure when creating file" \
+ "with missing pool"
+ return 2
+ }
+
+ # get param should return err once pool is gone
+ if wait_update $HOSTNAME "lctl get_param -n \
+ lov.$FSNAME-*.pools.$pool 2>/dev/null || echo foo" "foo"
+ then
+ remove_pool_from_list $FSNAME.$pool
+ return 0
+ fi
+ error_noexit "Pool $FSNAME.$pool is not destroyed"
+ return 3
+}