3 # This script is used to test large size LUN support in Lustre.
5 ################################################################################
8 LUSTRE=${LUSTRE:-$(cd $(dirname $0)/..; echo $PWD)}
9 . $LUSTRE/tests/test-framework.sh
11 . ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
14 if [ "$REFORMAT" != "yes" ]; then
15 skip_env "$0 reformats all devices,\
16 please set REFORMAT to run this test"
19 # Variable to run mdsrate
20 THREADS_PER_CLIENT=${THREADS_PER_CLIENT:-5} # thread(s) per client node
21 MACHINEFILE=${MACHINEFILE:-$TMP/$TESTSUITE.machines}
22 NODES_TO_USE=${NODES_TO_USE:-$CLIENTS}
23 NUM_CLIENTS=$(get_node_count ${NODES_TO_USE//,/ })
26 ALWAYS_EXCEPT="$LARGE_LUN_EXCEPT"
29 LARGE_LUN_RESTORE_MOUNT=false
30 if is_mounted $MOUNT || is_mounted $MOUNT2; then
31 LARGE_LUN_RESTORE_MOUNT=true
33 # Unmount and cleanup the Lustre filesystem
37 FULL_MODE=${FULL_MODE:-false}
38 RUN_FSCK=${RUN_FSCK:-true}
39 # if SLOW=yes, enable the FULL_MODE
40 [[ $SLOW = yes ]] && FULL_MODE=true
41 #########################################################################
42 # Dump the super block information for the filesystem present on device.
48 log "dump the super block information on $facet device $dev"
49 local fstype=$(facet_fstype $facet)
53 cmd="$DUMPE2FS -h $dev" ;;
55 cmd="$ZDB -l $(zpool_name $facet)" ;;
57 error "unknown fstype!" ;;
60 do_facet $facet "$cmd"
63 # Report Lustre filesystem disk space usage and inodes usage of each MDT/OST.
72 cmd="lfs df -h $mnt_pnt"
76 cmd="lfs df -i $mnt_pnt"
81 # Cleanup the directories and files created by llverfs utility.
84 local mnt=${2:-$MOUNT}
85 local cmd="rm -rf $mnt/{llverfs,dir}*"
86 do_facet $target "$cmd"
91 generate_machine_file $NODES_TO_USE $MACHINEFILE ||
92 error "can not generate machinefile"
94 # set the default stripe count for files in this test to one
95 local testdir=$MOUNT/mdsrate
98 $LFS setstripe $testdir -i 0 -c 1
101 local num_dirs=$THREADS_PER_CLIENT
102 [[ $num_dirs -eq 0 ]] && num_dirs=1
103 local free_inodes=$(lfs df -i $MOUNT | grep "OST:0" | awk '{print $4}')
105 num_files=$((free_inodes / num_dirs))
107 local command="$MDSRATE $MDSRATE_DEBUG --create --verbose \
108 --ndirs $num_dirs --dirfmt '$testdir/dir%d' \
109 --nfiles $num_files --filefmt 'file%%d'"
112 mpi_run -machinefile $MACHINEFILE \
113 -np $((NUM_CLIENTS * THREADS_PER_CLIENT)) $command
115 if [ ${PIPESTATUS[0]} != 0 ]; then
116 error "mdsrate create failed"
122 local fstype=$(facet_fstype $facet)
126 run_e2fsck $(facet_active_host $facet) $(facet_device $facet) \
127 "-y" || error "run e2fsck error"
130 # Could call fsck.zfs, but currently it does nothing,
131 # Could also call zpool scrub, but that could take a LONG time
132 # do_facet $facet "fsck.zfs $(facet_device $facet)"
137 # Run e2fsck on MDS and OST
141 check_fsfacet $SINGLEMDS
143 for num in $(seq $OSTCOUNT); do
144 check_fsfacet ost${num}
147 ################################## Main Flow ###################################
151 [ "$mds1_FSTYPE" != ldiskfs ] && skip_env "ldiskfs only test"
155 for num in $(seq $OSTCOUNT); do
156 dev=$(ostdevname $num)
157 log "run llverdev on the OST $dev"
158 do_rpc_nodes $(facet_host ost${num}) run_llverdev $dev -vpf ||
159 error "llverdev on $dev failed!"
161 # restore format overwritten by llverdev
164 run_test 1 "run llverdev on raw LUN"
170 local zostsize_restore=${OSTSIZE}
171 local zmin=$((30 << 30)) # 30GiB in bytes
174 stack_trap "export OSTSIZE=$zostsize_restore" EXIT
176 for num in $(seq $OSTCOUNT); do
177 dev=$(ostdevname $num)
178 ostmnt=$(facet_mntpt ost${num})
179 fstype=$(facet_fstype ost${num})
181 if [[ $fstype == "zfs" ]] && [[ ${OSTSIZE} -lt ${zmin} ]]; then
182 local real_dev=$(ostvdevname $num)
183 local num_sectors=$(get_num_sectors $facet $real_dev)
184 local phy_bytes=$((num_sectors * 512))
186 if [ ${phy_bytes} -lt ${zmin} ] ; then
187 log "ost${num}: OSTSIZE ${OSTSIZE} less than 30GiB"
188 log "ost${num}: Block device ${phy_bytes} too small"
189 log " .. skipping this ost"
192 # Backing block device is big enough
194 log "ost${num}: OSTSIZE ${OSTSIZE} too small, increasing to 30GiB [temporarily]"
196 # NOTE: OSTSIZE is in KB
197 export OSTSIZE=$((zmin >> 10))
200 # Mount the OST as an ldiskfs or zfs filesystem.
201 log "mount the OST $dev as a $fstype filesystem"
202 add ost${num} $(mkfs_opts ost${num} $dev) $FSTYPE_OPT \
203 --reformat $(ostdevname $num) \
204 $(ostvdevname $num) > /dev/null ||
205 error "format ost${num} error"
206 if [ $fstype == zfs ]; then
207 import_zpool ost${num}
209 "$ZFS set canmount=on $dev; " \
210 "$ZFS set mountpoint=legacy $dev; " \
213 run_dumpfs ost${num} $dev
214 do_facet ost${num} mount -t $fstype $dev \
215 $ostmnt "$OST_MOUNT_OPTS"
217 # Run llverfs on the mounted filesystem in partial mode
218 # to ensure that the kernel can perform filesystem operations
219 # on the complete device without any errors.
220 log "run llverfs in partial mode on the OST $fstype $ostmnt"
221 do_rpc_nodes $(facet_host ost${num}) run_llverfs $ostmnt -vpl \
222 "no" || error "run_llverfs error on $fstype"
225 log "unmount the OST $dev"
228 # After llverfs is run on the filesystem in partial
229 # mode, a full e2fsck should be run to catch any errors early.
230 $RUN_FSCK && check_fsfacet ost${num}
233 log "full mode, mount the OST $dev as a $fstype again"
234 if [ $fstype == zfs ]; then
235 import_zpool ost${num}
237 do_facet ost${num} mount -t $(facet_fstype ost${num}) \
238 $dev $ostmnt "$OST_MOUNT_OPTS"
239 cleanup_dirs ost${num} $ostmnt
240 do_facet ost${num} "sync"
242 run_dumpfs ost${num} $dev
244 # Run llverfs on the mounted ldiskfs filesystem in full
245 # mode to ensure that the kernel can perform filesystem
246 # operations on the complete device without any errors.
247 log "run llverfs in full mode on OST $fstype $ostmnt"
248 do_rpc_nodes $(facet_host ost${num}) run_llverfs \
250 error "run_llverfs error on $fstype"
253 log "unmount the OST $dev"
256 # After llverfs is run on the ldiskfs filesystem in
257 # full mode, a full e2fsck should be run to catch any
259 $RUN_FSCK && check_fsfacet ost${num}
261 export OSTSIZE=${zostsize_restore}
263 [[ $skipped -ne 0 ]] && skip_env "No OST with enough space is available."
264 # there is no reason to continue using ost devices
265 # filled by llverfs as ldiskfs
268 run_test 2 "run llverfs on OST ldiskfs/zfs filesystem"
271 [ -z "$CLIENTS" ] && skip_env "CLIENTS not defined, skipping"
272 [ -z "$MPIRUN" ] && skip_env "MIPRUN not defined, skipping"
273 [ -z "$MDSRATE" ] && skip_env "MDSRATE not defined, skipping"
274 [ ! -x $MDSRATE ] && skip_env "$MDSRATE not built, skipping"
275 # Setup the Lustre filesystem.
276 log "setup the lustre filesystem"
277 REFORMAT="yes" check_and_setup_lustre
279 log "run mdsrate to use up the free inodes."
280 # Run the mdsrate test suite.
288 run_test 3 "use up free inodes on the OST with mdsrate"
291 # Setup the Lustre filesystem.
292 log "setup the lustre filesystem"
293 REFORMAT="yes" check_and_setup_lustre
296 for num in $(seq $OSTCOUNT); do
297 dev=$(ostdevname $num)
298 run_dumpfs ost${num} $dev
301 # Run llverfs on the mounted Lustre filesystem both in partial and
302 # full mode to to fill the filesystem and verify the file contents.
303 log "run llverfs in partial mode on the Lustre filesystem $MOUNT"
304 run_llverfs $MOUNT -vp "no" || error "run_llverfs error on lustre"
312 # Setup the Lustre filesystem again.
313 log "setup the lustre filesystem again"
316 cleanup_dirs client $MOUNT
320 for num in $(seq $OSTCOUNT); do
321 dev=$(ostdevname $num)
322 run_dumpfs ost${num} $dev
325 log "run llverfs in full mode on the Lustre filesystem $MOUNT"
326 run_llverfs $MOUNT -vl "no" ||
327 error "run_llverfs error on lustre"
335 run_test 4 "run llverfs on lustre filesystem"
338 $LARGE_LUN_RESTORE_MOUNT && setupall
339 check_and_cleanup_lustre