Whamcloud - gitweb
LU-6910 osp: add procfs values for OST reserved size
[fs/lustre-release.git] / lustre / tests / sanity.sh
index 7a25e4b..1bb2891 100755 (executable)
@@ -13507,6 +13507,120 @@ test_252() {
 }
 run_test 252 "check lr_reader tool"
 
+test_253_fill_ost() {
+       local size_1
+       local hwm=$3
+       local free_10
+
+       blocks=$($LFS df $MOUNT | grep $1 | awk '{ print $4 }')
+       size_1=$((blocks/1024-hwm))
+       free_10=$((blocks/10240))
+       if (( free_10 > size_1 )); then
+               size_1=$free_10
+       else
+               size_1=$((size_1+size_1/10))
+       fi
+       if [[ $hwm < $((blocks/1024)) ]]; then
+               dd if=/dev/zero of=$DIR/$tdir/1 bs=1M count=$size_1 \
+                        oflag=append conv=notrunc
+
+               sleep_maxage
+
+               blocks=$($LFS df $MOUNT | grep $1 | awk '{ print $4 }')
+               echo "OST still has $((blocks/1024)) mbytes free"
+       fi
+}
+
+test_253() {
+       local ostidx=0
+       local rc=0
+
+       [ $PARALLEL == "yes" ] && skip "skip parallel run" && return
+       remote_mds_nodsh && skip "remote MDS with nodsh" && return
+       remote_mgs_nodsh && skip "remote MGS with nodsh" && return
+
+       rm -rf $DIR/$tdir
+       wait_mds_ost_sync
+       wait_delete_completed
+       mkdir $DIR/$tdir
+       local ost_name=$($LFS osts | grep ${ostidx}": " | \
+               awk '{print $2}' | sed -e 's/_UUID$//')
+
+       # on the mdt's osc
+       local mdtosc_proc1=$(get_mdtosc_proc_path $SINGLEMDS $ost_name)
+       local last_wm_h=$(do_facet $SINGLEMDS lctl get_param -n \
+                       osp.$mdtosc_proc1.reserved_mb_high)
+       local last_wm_l=$(do_facet $SINGLEMDS lctl get_param -n \
+                       osp.$mdtosc_proc1.reserved_mb_low)
+       echo "prev high watermark $last_wm_h, prev low watermark $last_wm_l"
+
+       do_facet mgs $LCTL pool_new $FSNAME.$TESTNAME ||
+               error "Pool creation failed"
+       do_facet mgs $LCTL pool_add $FSNAME.$TESTNAME $ost_name ||
+               errot "Adding $ost_name to pool fialed"
+
+       # Wait for client to see a OST at pool
+       wait_update $HOSTNAME "lctl get_param -n
+                       lov.$FSNAME-*.pools.$TESTNAME | sort -u |
+                       grep $ost_name" "$ost_name""_UUID" $((TIMEOUT/2)) ||
+                       return 2
+       $SETSTRIPE $DIR/$tdir -i $ostidx -c 1 -p $FSNAME.$TESTNAME ||
+               error "Setstripe failed"
+
+       dd if=/dev/zero of=$DIR/$tdir/0 bs=1M count=10
+       local blocks=$($LFS df $MOUNT | grep $ost_name | awk '{ print $4 }')
+       echo "OST still has $((blocks/1024)) mbytes free"
+
+       local new_hwm=$((blocks/1024-10))
+       do_facet $SINGLEMDS lctl set_param \
+                       osp.$mdtosc_proc1.reserved_mb_high=$((new_hwm+5))
+       do_facet $SINGLEMDS lctl set_param \
+                       osp.$mdtosc_proc1.reserved_mb_low=$new_hwm
+
+       test_253_fill_ost $ost_name $mdtosc_proc1 $new_hwm
+
+       #First enospc could execute orphan deletion so repeat.
+       test_253_fill_ost $ost_name $mdtosc_proc1 $new_hwm
+
+       local oa_status=$(do_facet $SINGLEMDS lctl get_param -n \
+                       osp.$mdtosc_proc1.prealloc_status)
+       echo "prealloc_status $oa_status"
+
+       dd if=/dev/zero of=$DIR/$tdir/2 bs=1M count=1 &&
+               error "File creation should fail"
+       #object allocation was stopped, but we still able to append files
+       dd if=/dev/zero of=$DIR/$tdir/1 bs=1M seek=6 count=5 oflag=append ||
+               error "Append failed"
+       rm -f $DIR/$tdir/1 $DIR/$tdir/0 $DIR/$tdir/r*
+
+       wait_delete_completed
+
+       sleep_maxage
+
+       for i in $(seq 10 12); do
+               dd if=/dev/zero of=$DIR/$tdir/$i bs=1M count=1 2>/dev/null ||
+                       error "File creation failed after rm";
+       done
+
+       oa_status=$(do_facet $SINGLEMDS lctl get_param -n \
+                       osp.$mdtosc_proc1.prealloc_status)
+       echo "prealloc_status $oa_status"
+
+       if (( oa_status != 0 )); then
+               error "Object allocation still disable after rm"
+       fi
+       do_facet $SINGLEMDS lctl set_param \
+                       osp.$mdtosc_proc1.reserved_mb_high=$last_wm_h
+       do_facet $SINGLEMDS lctl set_param \
+                       osp.$mdtosc_proc1.reserved_mb_low=$last_wm_l
+
+
+       do_facet mgs $LCTL pool_remove $FSNAME.$TESTNAME $ost_name ||
+               error "Remove $ost_name from pool failed"
+       do_facet mgs $LCTL pool_destroy $FSNAME.$TESTNAME ||
+               error "Pool destroy fialed"
+}
+run_test 253 "Check object allocation limit"
 
 cleanup_test_300() {
        trap 0