+
+ if ! md_array_is_active $dev ; then
+ mdadm $args $dev
+ result=$?
+ fi
+
+ udev_trigger
+ return $result
+}
+
+# Usage: stop_md_device <device>
+# Stop the md device backing device.
+# Return 0 if the array is stopped successfully or was not active,
+# otherwise return error code from mdadm.
+stop_md_device ()
+{
+ local dev=$1
+ local raidtab=$2
+ local args="-Sq"
+ local result=0
+
+ if [ -n "$raidtab" ] ; then
+ args="$args -c $raidtab"
+ fi
+
+ if [ -e $dev ] && md_array_is_active $dev ; then
+ mdadm $args $dev
+ result=$?
+ fi
+
+ return $result
+}
+
+# Usage: md_array_is_active <device>
+# return 0 if device is an active md RAID array, or 1 otherwise
+md_array_is_active ()
+{
+ local device=$1
+
+ [ -e "$device" ] || return 1
+
+ mdadm --detail -t $device > /dev/null 2>&1
+ if [ $? -eq 4 ] ; then
+ return 1
+ fi
+ return 0
+}
+
+# Usage: start_services <label> [ <label> ... ]
+# fsck and mount any devices listed as arguments (in parallel).
+# Attempt to assemble software raid arrays or zfs pools backing
+# Lustre devices.
+start_services ()
+{
+ local result=0
+ local devices=""
+ local dir dev label
+ local successflag
+ local labels
+
+ start_zfs_services
+ for label in $*; do
+ dir=`label_to_mountpt $label`
+ devtype=`$LDEV -t $label`
+ dev=`label_to_device $label`
+ journal=`$LDEV -j $label`
+ raidtab=`$LDEV -r $label`
+
+ if [ -z "$dir" ] || [ -z "$dev" ]; then
+ echo "$label is not a valid lustre label on this node"
+ result=2
+ continue
+ fi
+
+ if [ "$devtype" = "md" ] ; then
+ if ! assemble_md_device $dev $raidtab ; then
+ echo "failed to assemble array $dev backing $label"
+ result=2
+ continue
+ fi
+ elif [ "$devtype" = "zfs" ] ; then
+ if ! import_zpool $label ; then
+ result=2
+ fi
+ fi
+
+ # Journal device field in ldev.conf may be "-" or empty,
+ # so only attempt to assemble if its an absolute path.
+ # Ignore errors since the journal device may not be an
+ # md device.
+ if echo $journal | grep -q ^/ ; then
+ assemble_md_device $journal $raidtab 2>/dev/null
+ fi
+
+ if [ "x$devtype" != "xzfs" ] ; then
+ if mountpt_is_active $label || \
+ device_is_active $label; then
+ echo "$label is already mounted"
+ # no error
+ continue
+ fi
+ if ! mmp_test $dev; then
+ result=2
+ continue
+ fi
+ if ! adjust_scsi_timeout $dev; then
+ result=2
+ continue
+ fi
+ fi
+ devices="$devices $dev"
+ labels="$labels $label"
+ done
+ if [ $result == 0 ]; then
+ fsck_test $devices || return 2
+
+ # Fork to handle multiple mount_one_device()'s in parallel.
+ # Errors occurred if $successflag comes up missing afterwards.
+ successflag=`mktemp`
+ [ -e $successflag ] || return 2
+ for label in $labels; do
+ mount_one_device $label $successflag `$LDEV -t $label` &
+ # stagger to avoid module loading races
+ if [[ -n $MOUNT_DELAY && $MOUNT_DELAY -gt 0 ]] ; then
+ sleep $MOUNT_DELAY
+ fi
+ done
+ for label in $labels; do
+ wait
+ done
+ [ -e $successflag ] || return 2
+ rm -f $successflag
+ fi
+
+ return $result
+}
+
+# Usage: stop_services <label> [ <label> ... ]
+# Unmount any devices listed as arguments (serially).
+# Any devices which are not mounted or don't exist are skipped with no error.
+stop_services ()
+{
+ local labels=$*
+ local result=0
+ local pids=""
+ local dir dev label
+
+ for label in $labels; do
+ dir=`label_to_mountpt $label`
+ if [ -z "$dir" ]; then
+ echo "$label is not a valid lustre label on this node"
+ result=2
+ continue
+ fi
+ if ! mountpt_is_active $label; then
+ #echo "$label is not mounted"
+ # no error
+ continue
+ fi
+
+ echo "Unmounting $dir"
+ umount $dir &
+
+ if [ -z "$pids" ]; then
+ pids="$!"
+ else
+ pids="$pids $!"
+ fi
+ done
+
+ # wait for all umount processes to complete, report any errors
+ for pid in $pids; do
+ wait $pid || result=2
+ done
+
+ # double check!
+ for label in $labels; do
+ if mountpt_is_active $label; then
+ dir=`label_to_mountpt $label`
+ echo "Mount point $dir is still active"
+ result=2
+ fi
+ if device_is_active $label; then
+ dev=`label_to_device $label`
+ echo "Device $dev is still active"
+ result=2
+ fi
+ done
+ stop_devices $labels
+
+ return $result