#!/bin/bash
+# -*- mode: Bash; tab-width: 4; indent-tabs-mode: t; -*-
+# vim:autoindent:shiftwidth=4:tabstop=4:
#
# Run select tests by setting ONLY, or as arguments to the script.
# Skip specific tests by setting EXCEPT.
. $LUSTRE/tests/test-framework.sh
init_test_env $@
. ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
+init_logging
check_and_setup_lustre
local ost_count=$($GETSTRIPE $file | grep 0x | wc -l)
[[ -n "$count" ]] && [[ $ost_count -ne $count ]] && \
- error "Stripe count $count expected; got $ost_count"
+ { error "Stripe count $count expected; got $ost_count" && return 1; }
return 0
}
fi
}
-create_pool() {
- do_facet $SINGLEMDS lctl pool_new $FSNAME.$1
- local RC=$?
- # get param should return err until pool is created
- [[ $RC -ne 0 ]] && return $RC
-
- wait_update $HOSTNAME "lctl get_param -n lov.$FSNAME-*.pools.$1 \
- 2>/dev/null || echo foo" "" || RC=1
- [[ $RC -ne 0 ]] && error "pool_new failed"
- return $RC
-}
-
drain_pool() {
pool=$1
wait_update $HOSTNAME "lctl get_param -n lov.$FSNAME-*.pools.$pool" ""\
||error "Failed to remove targets from pool: $pool"
}
-destroy_pool_int() {
- OSTS=$(do_facet $SINGLEMDS lctl pool_list $1 | \
- awk '$1 !~ /^Pool:/ {print $1}')
- for ost in $OSTS
- do
- do_facet $SINGLEMDS lctl pool_remove $1 $ost
- done
- do_facet $SINGLEMDS lctl pool_destroy $1
-}
-
-destroy_pool() {
- local RC
-
- do_facet $SINGLEMDS lctl pool_list $FSNAME.$1
- RC=$?
- [[ $RC -ne 0 ]] && return $RC
-
- destroy_pool_int $FSNAME.$1
- RC=$?
- [[ $RC -ne 0 ]] && return $RC
-
- wait_update $HOSTNAME "lctl get_param -n lov.$FSNAME-*.pools.$1 \
- 2>/dev/null || echo foo" "foo" && return 0
-}
-
add_pool() {
local pool=$1
local osts=$2
}
create_pool_nofail() {
- create_pool $1
+ create_pool $FSNAME.$1
if [[ $? != 0 ]]
then
error "Pool creation of $1 failed"
}
create_pool_fail() {
- create_pool $1
+ create_pool $FSNAME.$1
if [[ $? == 0 ]]
then
error "Pool creation of $1 succeeded; should have failed"
cleanup_tests
}
+set_cleanup_trap() {
+ trap "cleanup_pools $FSNAME" EXIT
+}
# Initialization
remote_mds_nodsh && skip "remote MDS with nodsh" && exit 0
remote_ost_nodsh && skip "remote OST with nodsh" && exit 0
ost_pools_init
-# Tests for new commands added
+# Tests for new commands added
test_1() {
+ set_cleanup_trap
echo "Creating a pool with a 1 character pool name"
create_pool_nofail p
create_pool_fail $NAME
echo "pool_new should fail if fs-name or poolname are missing."
- do_facet $SINGLEMDS lctl pool_new .pool1
+ do_facet $SINGLEMDS lctl pool_new .pool1 2>/dev/null
[[ $? -ne 0 ]] || \
error "pool_new did not fail even though fs-name was missing."
- do_facet $SINGLEMDS lctl pool_new pool1
+ do_facet $SINGLEMDS lctl pool_new pool1 2>/dev/null
[[ $? -ne 0 ]] || \
error "pool_new did not fail even though fs-name was missing."
- do_facet $SINGLEMDS lctl pool_new ${FSNAME}.
+ do_facet $SINGLEMDS lctl pool_new ${FSNAME}. 2>/dev/null
[[ $? -ne 0 ]] || \
error "pool_new did not fail even though pool name was missing."
- do_facet $SINGLEMDS lctl pool_new .
+ do_facet $SINGLEMDS lctl pool_new . 2>/dev/null
[[ $? -ne 0 ]] || \
error "pool_new did not fail even though pool name and fs-name " \
"were missing."
- do_facet $SINGLEMDS lctl pool_new ${FSNAME},pool1
+ do_facet $SINGLEMDS lctl pool_new ${FSNAME},pool1 2>/dev/null
[[ $? -ne 0 ]] || \
error "pool_new did not fail even though pool name format was wrong"
- do_facet $SINGLEMDS lctl pool_new ${FSNAME}/pool1
+ do_facet $SINGLEMDS lctl pool_new ${FSNAME}/pool1 2>/dev/null
[[ $? -ne 0 ]] || \
error "pool_new did not fail even though pool name format was wrong"
- do_facet $SINGLEMDS lctl pool_new ${FSNAME}.p
+ do_facet $SINGLEMDS lctl pool_new ${FSNAME}.p 2>/dev/null
[[ $? -ne 0 ]] || \
error "pool_new did not fail even though pool1 existed"
destroy_pool p
run_test 1 "Test lctl pool_new ========================================="
test_2a() {
+ set_cleanup_trap
destroy_pool $POOL
- do_facet $SINGLEMDS lctl pool_add $FSNAME.$POOL $FSNAME-OST0000
+ do_facet $SINGLEMDS lctl pool_add $FSNAME.$POOL $FSNAME-OST0000 2>/dev/null
[[ $? -ne 0 ]] || \
error " pool_add did not fail even though pool did " \
" not exist."
run_test 2a "pool_add: non-existant pool"
test_2b() {
+ set_cleanup_trap
do_facet $SINGLEMDS lctl pool_add $FSNAME.p1234567891234567890 \
- $FSNAME-OST0000
+ $FSNAME-OST0000 2>/dev/null
[[ $? -ne 0 ]] || \
error "pool_add did not fail even though pool name was invalid."
}
# Testing various combinations of OST name list
test_2c() {
+ set_cleanup_trap
local TGT
local RC
run_test 2c "pool_add: OST index combinations ==========================="
test_2d() {
+ set_cleanup_trap
local TGT
local RC
run_test 2d "pool_add: OSTs that don't exist should be rejected ========"
test_2e() {
+ set_cleanup_trap
local TGT
local RC
local RESULT
run_test 2e "pool_add: OST already in a pool should be rejected ========"
test_3a() {
+ set_cleanup_trap
lctl get_param -n lov.$FSNAME-*.pools.$POOL 2>/dev/null
[[ $? -ne 0 ]] || \
destroy_pool $POOL
- do_facet $SINGLEMDS lctl pool_remove $FSNAME.$POOL $FSNAME-OST0000
+ do_facet $SINGLEMDS lctl pool_remove $FSNAME.$POOL $FSNAME-OST0000 2>/dev/null
[[ $? -ne 0 ]] || \
- error "pool_remove did not fail even though" \
- "pool did not exist."
+ error "pool_remove did not fail even though pool did not exist."
}
run_test 3a "pool_remove: non-existant pool"
test_3b() {
- do_facet $SINGLEMDS lctl pool_remove ${NON_EXISTANT_FS}.$POOL OST0000
+ set_cleanup_trap
+ do_facet $SINGLEMDS lctl pool_remove ${NON_EXISTANT_FS}.$POOL OST0000 2>/dev/null
[[ $? -ne 0 ]] || \
error "pool_remove did not fail even though fsname did not exist."
}
run_test 3b "pool_remove: non-existant fsname"
test_3c() {
+ set_cleanup_trap
do_facet $SINGLEMDS lctl pool_remove $FSNAME.p1234567891234567890 \
- $FSNAME-OST0000
+ $FSNAME-OST0000 2>/dev/null
[[ $? -ne 0 ]] || \
error "pool_remove did not fail even though pool name was invalid."
}
run_test 3c "pool_remove: Invalid pool name"
-
# Testing various combinations of OST name list
test_3d() {
+ set_cleanup_trap
lctl get_param -n lov.$FSNAME-*.pools.$POOL 2>/dev/null
[[ $? -ne 0 ]] || \
destroy_pool $POOL
run_test 3d "pool_remove: OST index combinations ==========================="
test_4a() {
+ set_cleanup_trap
lctl get_param -n lov.$FSNAME-*.pools.$POOL 2>/dev/null
[[ $? -ne 0 ]] || \
destroy_pool $POOL
- do_facet $SINGLEMDS lctl pool_destroy $FSNAME.$POOL
+ do_facet $SINGLEMDS lctl pool_destroy $FSNAME.$POOL 2>/dev/null
[[ $? -ne 0 ]] || \
error "pool_destroy did not fail even though pool did not exist."
}
run_test 4a "pool_destroy: non-existant pool"
test_4b() {
- do_facet $SINGLEMDS lctl pool_destroy ${NON_EXISTANT_FS}.$POOL
+ set_cleanup_trap
+ do_facet $SINGLEMDS lctl pool_destroy ${NON_EXISTANT_FS}.$POOL 2>/dev/null
[[ $? -ne 0 ]] || \
error "pool_destroy did not fail even though the filesystem did not exist."
}
run_test 4b "pool_destroy: non-existant fs-name"
test_4c() {
+ set_cleanup_trap
create_pool_nofail $POOL
add_pool $POOL "OST0000" "$FSNAME-OST0000_UUID "
do_facet $SINGLEMDS lctl pool_destroy ${FSNAME}.$POOL
[[ $? -ne 0 ]] || \
- error "pool_destroy succeeded with a non-empty pool name."
+ error "pool_destroy succeeded with a non-empty pool."
destroy_pool $POOL
}
run_test 4c "pool_destroy: non-empty pool ==============================="
sub_test_5() {
local LCMD=$1
- $LCMD pool_list
+ $LCMD pool_list 2>/dev/null
[[ $? -ne 0 ]] || \
error "pool_list did not fail even though fsname was not mentioned."
- destroy_pool $POOL
- destroy_pool $POOL2
+ destroy_pool $POOL 2>/dev/null
+ destroy_pool $POOL2 2>/dev/null
create_pool_nofail $POOL
create_pool_nofail $POOL2
[[ $? -eq 0 ]] || \
error "pool_list $FSNAME.$POOL failed."
- $LCMD pool_list ${NON_EXISTANT_FS}
+ $LCMD pool_list ${NON_EXISTANT_FS} 2>/dev/null
[[ $? -ne 0 ]] || \
error "pool_list did not fail for a non-existant fsname $NON_EXISTANT_FS"
- $LCMD pool_list ${FSNAME}.$NON_EXISTANT_POOL
+ $LCMD pool_list ${FSNAME}.$NON_EXISTANT_POOL 2>/dev/null
[[ $? -ne 0 ]] || \
error "pool_list did not fail for a non-existant pool $NON_EXISTANT_POOL"
fi
rm -rf ${DIR}nonexistant
- $LCMD pool_list ${DIR}nonexistant
+ $LCMD pool_list ${DIR}nonexistant 2>/dev/null
[[ $? -ne 0 ]] || \
error "pool_list did not fail for invalid mountpoint ${DIR}nonexistant"
}
test_5() {
+ set_cleanup_trap
# Issue commands from client
sub_test_5 $LCTL
sub_test_5 $LFS
run_test 5 "lfs/lctl pool_list"
test_6() {
+ set_cleanup_trap
local POOL_ROOT=${POOL_ROOT:-$DIR/$tdir}
local POOL_DIR=$POOL_ROOT/dir_tst
local POOL_FILE=$POOL_ROOT/file_tst
[[ $? -eq 0 ]] || \
error "pool_list $FSNAME failed."
- do_facet $SINGLEMDS lctl pool_add $FSNAME.$POOL $TGT_ALL
+ add_pool $POOL $TGT_ALL "$TGT_UUID"
mkdir -p $POOL_DIR
$SETSTRIPE -c -1 -p $POOL $POOL_DIR
check_dir_in_pool $POOL_DIR $POOL
# If an invalid pool name is specified, the command should fail
- $SETSTRIPE -c 2 -p $INVALID_POOL $POOL_DIR
+ $SETSTRIPE -c 2 -p $INVALID_POOL $POOL_DIR 2>/dev/null
[[ $? -ne 0 ]] || \
- error_ignore 19919 "setstripe to invalid pool did not fail."
+ error "setstripe to invalid pool did not fail."
# If the pool name does not exist, the command should fail
- $SETSTRIPE -c 2 -p $NON_EXISTANT_POOL $POOL_DIR
+ $SETSTRIPE -c 2 -p $NON_EXISTANT_POOL $POOL_DIR 2>/dev/null
[[ $? -ne 0 ]] || \
- error_ignore 19919 "setstripe to non-existant pool did not fail."
+ error "setstripe to non-existant pool did not fail."
# lfs setstripe should work as before if a pool name is not specified.
$SETSTRIPE -c -1 $POOL_DIR
# pool is specified.
create_pool_nofail $POOL2
add_pool $POOL2 "OST0000" "$FSNAME-OST0000_UUID "
- $SETSTRIPE -o 1 -p $POOL2 $ROOT_POOL/$tfile
+ $SETSTRIPE -o 1 -p $POOL2 $ROOT_POOL/$tfile 2>/dev/null
[[ $? -ne 0 ]] || \
error "$SETSTRIPE with start index outside the pool did not fail."
- destroy_pool $POOL
- destroy_pool $POOL2
}
run_test 6 "getstripe/setstripe"
test_11() {
+ set_cleanup_trap
local POOL_ROOT=${POOL_ROOT:-$DIR/$tdir}
- [[ $OSTCOUNT -le 1 ]] && skip "Need atleast 2 OSTs" && return
+ [[ $OSTCOUNT -le 1 ]] && skip_env "Need atleast 2 OSTs" && return
create_pool_nofail $POOL
create_pool_nofail $POOL2
- do_facet $SINGLEMDS lctl pool_add $FSNAME.$POOL \
- $FSNAME-OST[$TGT_FIRST-$TGT_MAX/2]
local start=$((TGT_FIRST+1))
do_facet $SINGLEMDS lctl pool_add $FSNAME.$POOL2 \
$FSNAME-OST[$start-$TGT_MAX/2]
+ add_pool $POOL $TGT_HALF "$TGT_UUID2"
+
create_dir $POOL_ROOT/dir1 $POOL
create_dir $POOL_ROOT/dir2 $POOL2
check_dir_in_pool $POOL_ROOT/dir1 $POOL
rm -rf $POOL_ROOT/dir?
- destroy_pool $POOL
- destroy_pool $POOL2
-
return 0
}
run_test 11 "OSTs in overlapping/multiple pools"
test_12() {
+ set_cleanup_trap
local POOL_ROOT=${POOL_ROOT:-$DIR/$tdir}
- [[ $OSTCOUNT -le 2 ]] && skip "Need atleast 3 OSTs" && return
+ [[ $OSTCOUNT -le 2 ]] && skip_env "Need atleast 3 OSTs" && return
create_pool_nofail $POOL
create_pool_nofail $POOL2
- do_facet $SINGLEMDS lctl pool_add $FSNAME.$POOL \
- $FSNAME-OST[$TGT_FIRST-$TGT_MAX/2]
local start=$((TGT_FIRST+1))
do_facet $SINGLEMDS lctl pool_add $FSNAME.$POOL2 \
$FSNAME-OST[$start-$TGT_MAX/2]
+ add_pool $POOL $TGT_HALF "$TGT_UUID2"
+
echo creating some files in $POOL and $POOL2
create_dir $POOL_ROOT/dir1 $POOL
echo Changing the pool membership
do_facet $SINGLEMDS lctl pool_remove $FSNAME.$POOL $FSNAME-OST[$TGT_FIRST]
do_facet $SINGLEMDS lctl pool_list $FSNAME.$POOL
- do_facet $SINGLEMDS lctl pool_add $FSNAME.$POOL2 $FSNAME-OST[$TGT_FIRST]
+ FIRST_UUID=$(echo $TGT_UUID | awk '{print $1}')
+ add_pool $POOL2 $FSNAME-OST[$TGT_FIRST] "$FIRST_UUID "
do_facet $SINGLEMDS lctl pool_list $FSNAME.$POOL2
echo Checking the files again
echo Creating some more files
create_dir $POOL_ROOT/dir3 $POOL
- create_dir $POOL_ROOT/dir4 POOL2
+ create_dir $POOL_ROOT/dir4 $POOL2
create_file $POOL_ROOT/file3 $POOL
create_file $POOL_ROOT/file4 $POOL2
check_file_in_pool $POOL_ROOT/file3 $POOL
check_file_in_pool $POOL_ROOT/file4 $POOL2
- destroy_pool $POOL
- destroy_pool $POOL2
-
return 0
}
run_test 12 "OST Pool Membership"
test_13() {
- [[ $OSTCOUNT -le 2 ]] && skip "Need atleast 3 OSTs" && return
+ set_cleanup_trap
+ [[ $OSTCOUNT -le 2 ]] && skip_env "Need atleast 3 OSTs" && return
local POOL_ROOT=${POOL_ROOT:-$DIR/$tdir}
local numfiles=10
local count=3
create_pool_nofail $POOL
- do_facet $SINGLEMDS lctl pool_add $FSNAME.$POOL $TGT_ALL
+ add_pool $POOL $TGT_ALL "$TGT_UUID"
create_dir $POOL_ROOT/dir1 $POOL -1
createmany -o $POOL_ROOT/dir1/$tfile $numfiles || \
done
rm -rf create_dir $POOL_ROOT/dir?
- destroy_pool $POOL
return 0
}
run_test 13 "Striping characteristics in a pool"
test_14() {
- [[ $OSTCOUNT -le 2 ]] && skip "Need atleast 3 OSTs" && return
+ set_cleanup_trap
+ [[ $OSTCOUNT -le 2 ]] && skip_env "Need atleast 3 OSTs" && return
local POOL_ROOT=${POOL_ROOT:-$DIR/$tdir}
local numfiles=100
# echo "Iteration: $i OST: $OST"
create_file $POOL_ROOT/dir1/file${i} $POOL 1
- check_file_in_osts $POOL_ROOT/dir1/file${i} $OST
+ check_file_in_pool $POOL_ROOT/dir1/file${i} $POOL
i=$((i+1))
done
- # Fill OST $TGT_FIRST with 10M files
+ # Fill up OST0 until it is nearly full.
+ # Create 9 files of size OST0_SIZE/10 each.
create_dir $POOL_ROOT/dir2 $POOL2 1
- RC=0
- i=0
- while [[ $RC -eq 0 ]];
+ $LFS df $POOL_ROOT/dir2
+ echo "Filling up OST0"
+ OST0_SIZE=`$LFS df $POOL_ROOT/dir2 | awk '/\[OST:0\]/ {print $4}'`
+ FILE_SIZE=$((OST0_SIZE/1024/10))
+ i=1
+ while [[ $i -lt 10 ]];
do
- dd if=/dev/zero of=$POOL_ROOT/dir2/f${i} bs=1k count=$((1024*10))
- RC=$?
+ dd if=/dev/zero of=$POOL_ROOT/dir2/f${i} bs=1M count=$FILE_SIZE
i=$((i+1))
done
-
- # Leave some space on the OST
- rm -f $POOL_ROOT/dir2/f0
- df -h /mnt/ost?
+ $LFS df $POOL_ROOT/dir2
# OST $TGT_FIRST is no longer favored; but it may still be used.
create_dir $POOL_ROOT/dir3 $POOL 1
done
rm -rf $POOL_ROOT
- destroy_pool $POOL
- destroy_pool $POOL2
return 0
}
run_test 14 "Round robin and QOS striping within a pool"
test_15() {
+ set_cleanup_trap
local POOL_ROOT=${POOL_ROOT:-$DIR/$tdir}
local numfiles=100
local i=0
i=$((i+1))
done
- i=0
- while [[ $i -lt $OSTCOUNT ]]
- do
- destroy_pool pool${i}
- i=$((i+1))
- done
-
return 0
}
run_test 15 "One directory per OST/pool"
test_16() {
+ set_cleanup_trap
local POOL_ROOT=${POOL_ROOT:-$DIR/$tdir}
local numfiles=10
local i=0
rm -rf $POOL_ROOT/$tdir
- destroy_pool $POOL
-
return 0
}
run_test 16 "Inheritance of pool properties"
test_17() {
+ set_cleanup_trap
local POOL_ROOT=${POOL_ROOT:-$DIR/$tdir}
local numfiles=10
local i=0
}
test_18() {
+ set_cleanup_trap
local POOL_ROOT=${POOL_ROOT:-$DIR/$tdir}
local numfiles=10000
local i=0
test_19() {
+ set_cleanup_trap
local POOL_ROOT=${POOL_ROOT:-$DIR/$tdir}
local numfiles=12
local dir1=$POOL_ROOT/dir1
done
rm -rf $dir1 $dir2
- destroy_pool $POOL
return 0
}
run_test 19 "Pools should not come into play when not specified"
test_20() {
+ set_cleanup_trap
local POOL_ROOT=${POOL_ROOT:-$DIR/$tdir}
local numfiles=12
local dir1=$POOL_ROOT/dir1
check_file_not_in_pool $dir2/file4 $POOL2
rm -rf $dir1
- destroy_pool $POOL
- destroy_pool $POOL2
return 0
}
run_test 20 "Different pools in a directory hierarchy."
test_21() {
+ set_cleanup_trap
local POOL_ROOT=${POOL_ROOT:-$DIR/$tdir}
- [[ $OSTCOUNT -le 1 ]] && skip "Need atleast 2 OSTs" && return
+ [[ $OSTCOUNT -le 1 ]] && skip_env "Need atleast 2 OSTs" && return
local numfiles=12
local i=0
check_file_in_pool $dir/file1 $POOL
rm -rf $dir
- destroy_pool $POOL
return 0
}
for c in $(seq 1 10);
do
echo "Pool $pool, iteration $c"
- create_pool_nofail $pool
- local TGT=$(for i in `seq $TGT_FIRST $step $TGT_MAX`; \
- do printf "$FSNAME-OST%04x_UUID " $i; done)
- add_pool $pool "$FSNAME-OST[$TGT_FIRST-$TGT_MAX/$step]" "$TGT"
- destroy_pool $pool
- do_facet $SINGLEMDS lctl pool_list $FSNAME
+ do_facet $SINGLEMDS lctl pool_add $FSNAME.$pool OST[$TGT_FIRST-$TGT_MAX/$step] 2>/dev/null
+ local TGT_SECOND=$(($TGT_FIRST+$step))
+ if [ "$TGT_SECOND" -le "$TGT_MAX" ]; then
+ do_facet $SINGLEMDS lctl pool_remove $FSNAME.$pool OST[$TGT_SECOND-$TGT_MAX/$step]
+ fi
done
echo loop for $pool complete
}
test_22() {
+ set_cleanup_trap
local POOL_ROOT=${POOL_ROOT:-$DIR/$tdir}
- [[ $OSTCOUNT -le 1 ]] && skip "Need atleast 2 OSTs" && return
+ [[ $OSTCOUNT -le 1 ]] && skip_env "Need at least 2 OSTs" && return
local numfiles=100
+ create_pool_nofail $POOL
+ add_pool $POOL "OST0000" "$FSNAME-OST0000_UUID "
+ create_pool_nofail $POOL2
+ add_pool $POOL2 "OST0000" "$FSNAME-OST0000_UUID "
+
add_loop $POOL 1 &
add_loop $POOL2 2 &
sleep 5
run_test 22 "Simultaneous manipulation of a pool"
test_23() {
+ set_cleanup_trap
local POOL_ROOT=${POOL_ROOT:-$DIR/$tdir}
- [[ $OSTCOUNT -le 1 ]] && skip "Need atleast 2 OSTs" && return
+ [[ $OSTCOUNT -le 1 ]] && skip_env "Need atleast 2 OSTs" && return
mkdir -p $POOL_ROOT
check_runas_id $TSTID $TSTID $RUNAS || {
- skip "User $RUNAS_ID does not exist - skipping"
+ skip_env "User $RUNAS_ID does not exist - skipping"
return 0
}
- local numfiles=12
local i=0
local TGT
- local LIMIT=1024
+ local BLK_SZ=1024
+ local BUNIT_SZ=1024 # min block quota unit(kB)
+ local LOVNAME=`lctl get_param -n llite.*.lov.common_name | tail -n 1`
+ local OSTCOUNT=`lctl get_param -n lov.$LOVNAME.numobd`
+ local LIMIT
local dir=$POOL_ROOT/dir
- local file1="$dir/$tfile-quota1"
- local file2="$dir/$tfile-quota2"
+ local file="$dir/$tfile-quota"
create_pool_nofail $POOL
$LFS quotaoff -ug $MOUNT
$LFS quotacheck -ug $MOUNT
- $LFS setquota -u $TSTUSR -b $LIMIT -B $LIMIT $dir #-i 5 -I 5 $dir
-
- $LFS setstripe $file1 -c 1 -p $POOL
- chown $TSTUSR.$TSTUSR $file1
- ls -l $file1
+ LIMIT=$((BUNIT_SZ * (OSTCOUNT + 1)))
+ $LFS setquota -u $TSTUSR -b $LIMIT -B $LIMIT $dir
+ sleep 3
+ $LFS quota -v -u $TSTUSR $dir
+
+ $LFS setstripe $file -c 1 -p $POOL
+ chown $TSTUSR.$TSTUSR $file
+ ls -l $file
type runas
- stat=$(LC_ALL=C $RUNAS dd if=/dev/zero of=$file1 bs=1024 count=$((LIMIT*2)) 2>&1)
+ LOCALE=C $RUNAS dd if=/dev/zero of=$file bs=$BLK_SZ count=$((BUNIT_SZ*2)) || true
+ $LFS quota -v -u $TSTUSR $dir
+ cancel_lru_locks osc
+ stat=$(LOCALE=C $RUNAS dd if=/dev/zero of=$file bs=$BLK_SZ count=$BUNIT_SZ seek=$((BUNIT_SZ*2)) 2>&1)
RC=$?
echo $stat
[[ $RC -eq 0 ]] && error "dd did not fail with EDQUOT."
- echo $stat | grep "Disk quota exceeded"
+ echo $stat | grep "Disk quota exceeded" > /dev/null
[[ $? -eq 1 ]] && error "dd did not fail with EDQUOT."
+ $LFS quota -v -u $TSTUSR $dir
echo "second run"
$LFS quotaoff -ug $MOUNT
while [ $RC -eq 0 ];
do
i=$((i+1))
- stat=$(LOCALE=C $RUNAS2 dd if=/dev/zero of=${file2}$i bs=1024 \
+ stat=$(LOCALE=C $RUNAS2 dd if=/dev/zero of=${file}$i bs=1M \
count=$((LIMIT*LIMIT)) 2>&1)
RC=$?
if [ $RC -eq 1 ]; then
df -h
rm -rf $POOL_ROOT
- destroy_pool $POOL
return 0
}
run_test 23 "OST pools and quota"
test_24() {
+ set_cleanup_trap
local POOL_ROOT=${POOL_ROOT:-$DIR/$tdir}
- [[ $OSTCOUNT -le 1 ]] && skip "Need atleast 2 OSTs" && return
+ [[ $OSTCOUNT -le 1 ]] && skip_env "Need atleast 2 OSTs" && return
local numfiles=10
local i=0
done
rm -rf $POOL_ROOT
- destroy_pool $POOL
return 0
}
run_test 24 "Independence of pool from other setstripe parameters"
+test_25() {
+ set_cleanup_trap
+ local dev=$(mdsdevname ${SINGLEMDS//mds/})
+ local POOL_ROOT=${POOL_ROOT:-$DIR/$tdir}
+
+ mkdir -p $POOL_ROOT
+
+ for i in $(seq 10); do
+ create_pool_nofail pool$i
+ do_facet $SINGLEMDS "lctl pool_add $FSNAME.pool$i OST0000; sync"
+ wait_update $HOSTNAME "lctl get_param -n lov.$FSNAME-*.pools.pool$i | \
+ sort -u | tr '\n' ' ' " "$FSNAME-OST0000_UUID " || \
+ error "pool_add failed: $1; $2"
+
+ stop $SINGLEMDS || return 1
+ start $SINGLEMDS ${dev} $MDS_MOUNT_OPTS || \
+ { error "Failed to start $SINGLEMDS after stopping" && break; }
+ wait_osc_import_state mds ost FULL
+ clients_up
+
+ # Veriy that the pool got created and is usable
+ df $POOL_ROOT > /dev/null
+ sleep 5
+ # Make sure OST0 can be striped on
+ $SETSTRIPE -o 0 -c 1 $POOL_ROOT/$tfile
+ STR=$($GETSTRIPE $POOL_ROOT/$tfile | grep 0x | cut -f2 | tr -d " ")
+ rm $POOL_ROOT/$tfile
+ if [[ "$STR" == "0" ]]; then
+ echo "Creating a file in pool$i"
+ create_file $POOL_ROOT/file$i pool$i || break
+ check_file_in_pool $POOL_ROOT/file$i pool$i || break
+ else
+ echo "OST 0 seems to be unavailable. Try later."
+ fi
+ done
+
+ rm -rf $POOL_ROOT
+}
+run_test 25 "Create new pool and restart MDS ======================="
+
log "cleanup: ======================================================"
cd $ORIG_PWD
-cleanup_tests
+cleanup_pools $FSNAME
check_and_cleanup_lustre
echo '=========================== finished ==============================='
[ -f "$POOLSLOG" ] && cat $POOLSLOG && grep -q FAIL $POOLSLOG && exit 1 || true