3 # This script is used to test large size LUN support in Lustre.
8 LUSTRE=${LUSTRE:-$(dirname $0)/..}
9 . $LUSTRE/tests/test-framework.sh
13 ALWAYS_EXCEPT="$LARGE_LUN_EXCEPT"
16 if [ "$REFORMAT" != "yes" ]; then
17 skip_env "$0 reformats all devices,\
18 please set REFORMAT to run this test"
21 # Variable to run mdsrate
22 THREADS_PER_CLIENT=${THREADS_PER_CLIENT:-5} # thread(s) per client node
23 NODES_TO_USE=${NODES_TO_USE:-$CLIENTS}
24 NUM_CLIENTS=$(get_node_count ${NODES_TO_USE//,/ })
26 LARGE_LUN_RESTORE_MOUNT=false
27 if is_mounted $MOUNT || is_mounted $MOUNT2; then
28 LARGE_LUN_RESTORE_MOUNT=true
30 # Unmount and cleanup the Lustre filesystem
34 FULL_MODE=${FULL_MODE:-false}
35 RUN_FSCK=${RUN_FSCK:-true}
36 # if SLOW=yes, enable the FULL_MODE
37 [[ $SLOW = yes ]] && FULL_MODE=true
38 #########################################################################
39 # Dump the super block information for the filesystem present on device.
45 log "dump the super block information on $facet device $dev"
46 local fstype=$(facet_fstype $facet)
50 cmd="$DUMPE2FS -h $dev" ;;
52 cmd="$ZDB -l $(zpool_name $facet)" ;;
54 error "unknown fstype!" ;;
57 do_facet $facet "$cmd"
60 # Report Lustre filesystem disk space usage and inodes usage of each MDT/OST.
69 cmd="lfs df -h $mnt_pnt"
73 cmd="lfs df -i $mnt_pnt"
78 # Cleanup the directories and files created by llverfs utility.
81 local mnt=${2:-$MOUNT}
82 local cmd="rm -rf $mnt/{llverfs,dir}*"
83 do_facet $target "$cmd"
88 generate_machine_file $NODES_TO_USE $MACHINEFILE ||
89 error "can not generate machinefile"
91 # set the default stripe count for files in this test to one
92 local testdir=$MOUNT/mdsrate
95 $LFS setstripe $testdir -i 0 -c 1
98 local num_dirs=$THREADS_PER_CLIENT
99 [[ $num_dirs -eq 0 ]] && num_dirs=1
100 local free_inodes=$(lfs df -i $MOUNT | grep "OST:0" | awk '{print $4}')
102 num_files=$((free_inodes / num_dirs))
104 local command="$MDSRATE $MDSRATE_DEBUG --create --verbose \
105 --ndirs $num_dirs --dirfmt '$testdir/dir%d' \
106 --nfiles $num_files --filefmt 'file%%d'"
109 mpi_run -machinefile $MACHINEFILE \
110 -np $((NUM_CLIENTS * THREADS_PER_CLIENT)) $command
112 if [ ${PIPESTATUS[0]} != 0 ]; then
113 error "mdsrate create failed"
119 local fstype=$(facet_fstype $facet)
123 run_e2fsck $(facet_active_host $facet) $(facet_device $facet) \
124 "-y" || error "run e2fsck error"
127 # Could call fsck.zfs, but currently it does nothing,
128 # Could also call zpool scrub, but that could take a LONG time
129 # do_facet $facet "fsck.zfs $(facet_device $facet)"
134 # Run e2fsck on MDS and OST
138 check_fsfacet $SINGLEMDS
140 for num in $(seq $OSTCOUNT); do
141 check_fsfacet ost${num}
144 ################################## Main Flow ###################################
148 [ "$mds1_FSTYPE" != ldiskfs ] && skip_env "ldiskfs only test"
152 for num in $(seq $OSTCOUNT); do
153 dev=$(ostdevname $num)
154 log "run llverdev on the OST $dev"
155 do_rpc_nodes $(facet_host ost${num}) run_llverdev $dev -vpf ||
156 error "llverdev on $dev failed!"
158 # restore format overwritten by llverdev
161 run_test 1 "run llverdev on raw LUN"
167 local zostsize_restore=${OSTSIZE}
168 local zmin=$((30 << 30)) # 30GiB in bytes
171 stack_trap "export OSTSIZE=$zostsize_restore" EXIT
173 for num in $(seq $OSTCOUNT); do
174 dev=$(ostdevname $num)
175 ostmnt=$(facet_mntpt ost${num})
176 fstype=$(facet_fstype ost${num})
178 if [[ $fstype == "zfs" ]] && [[ ${OSTSIZE} -lt ${zmin} ]]; then
179 local real_dev=$(ostvdevname $num)
180 local num_sectors=$(get_num_sectors $facet $real_dev)
181 local phy_bytes=$((num_sectors * 512))
183 if [ ${phy_bytes} -lt ${zmin} ] ; then
184 log "ost${num}: OSTSIZE ${OSTSIZE} less than 30GiB"
185 log "ost${num}: Block device ${phy_bytes} too small"
186 log " .. skipping this ost"
189 # Backing block device is big enough
191 log "ost${num}: OSTSIZE ${OSTSIZE} too small, increasing to 30GiB [temporarily]"
193 # NOTE: OSTSIZE is in KB
194 export OSTSIZE=$((zmin >> 10))
197 # Mount the OST as an ldiskfs or zfs filesystem.
198 log "mount the OST $dev as a $fstype filesystem"
199 add ost${num} $(mkfs_opts ost${num} $dev) $FSTYPE_OPT \
200 --reformat $(ostdevname $num) \
201 $(ostvdevname $num) > /dev/null ||
202 error "format ost${num} error"
203 if [ $fstype == zfs ]; then
204 import_zpool ost${num}
206 "$ZFS set canmount=on $dev; " \
207 "$ZFS set mountpoint=legacy $dev; " \
210 run_dumpfs ost${num} $dev
211 do_facet ost${num} mount -t $fstype $dev \
212 $ostmnt "$OST_MOUNT_OPTS"
214 # Run llverfs on the mounted filesystem in partial mode
215 # to ensure that the kernel can perform filesystem operations
216 # on the complete device without any errors.
217 log "run llverfs in partial mode on the OST $fstype $ostmnt"
218 do_rpc_nodes $(facet_host ost${num}) run_llverfs $ostmnt -vpl \
219 "no" || error "run_llverfs error on $fstype"
222 log "unmount the OST $dev"
225 # After llverfs is run on the filesystem in partial
226 # mode, a full e2fsck should be run to catch any errors early.
227 $RUN_FSCK && check_fsfacet ost${num}
230 log "full mode, mount the OST $dev as a $fstype again"
231 if [ $fstype == zfs ]; then
232 import_zpool ost${num}
234 do_facet ost${num} mount -t $(facet_fstype ost${num}) \
235 $dev $ostmnt "$OST_MOUNT_OPTS"
236 cleanup_dirs ost${num} $ostmnt
237 do_facet ost${num} "sync"
239 run_dumpfs ost${num} $dev
241 # Run llverfs on the mounted ldiskfs filesystem in full
242 # mode to ensure that the kernel can perform filesystem
243 # operations on the complete device without any errors.
244 log "run llverfs in full mode on OST $fstype $ostmnt"
245 do_rpc_nodes $(facet_host ost${num}) run_llverfs \
247 error "run_llverfs error on $fstype"
250 log "unmount the OST $dev"
253 # After llverfs is run on the ldiskfs filesystem in
254 # full mode, a full e2fsck should be run to catch any
256 $RUN_FSCK && check_fsfacet ost${num}
258 export OSTSIZE=${zostsize_restore}
260 [[ $skipped -ne 0 ]] && skip_env "No OST with enough space is available."
261 # there is no reason to continue using ost devices
262 # filled by llverfs as ldiskfs
265 run_test 2 "run llverfs on OST ldiskfs/zfs filesystem"
268 [ -z "$MPIRUN" ] && skip_env "MIPRUN not defined, skipping"
269 [ -z "$MDSRATE" ] && skip_env "MDSRATE not defined, skipping"
270 [ ! -x $MDSRATE ] && skip_env "$MDSRATE not built, skipping"
271 # Setup the Lustre filesystem.
272 log "setup the lustre filesystem"
273 REFORMAT="yes" check_and_setup_lustre
275 log "run mdsrate to use up the free inodes."
276 # Run the mdsrate test suite.
284 run_test 3 "use up free inodes on the OST with mdsrate"
287 # Setup the Lustre filesystem.
288 log "setup the lustre filesystem"
289 REFORMAT="yes" check_and_setup_lustre
292 for num in $(seq $OSTCOUNT); do
293 dev=$(ostdevname $num)
294 run_dumpfs ost${num} $dev
297 # Run llverfs on the mounted Lustre filesystem both in partial and
298 # full mode to to fill the filesystem and verify the file contents.
299 log "run llverfs in partial mode on the Lustre filesystem $MOUNT"
300 run_llverfs $MOUNT -vp "no" || error "run_llverfs error on lustre"
308 # Setup the Lustre filesystem again.
309 log "setup the lustre filesystem again"
312 cleanup_dirs client $MOUNT
316 for num in $(seq $OSTCOUNT); do
317 dev=$(ostdevname $num)
318 run_dumpfs ost${num} $dev
321 log "run llverfs in full mode on the Lustre filesystem $MOUNT"
322 run_llverfs $MOUNT -vl "no" ||
323 error "run_llverfs error on lustre"
331 run_test 4 "run llverfs on lustre filesystem"
334 $LARGE_LUN_RESTORE_MOUNT && setupall
335 check_and_cleanup_lustre