+ return 0
+}
+
+# Usage: start_services <label> [ <label> ... ]
+# fsck and mount any devices listed as arguments (in parallel).
+# Attempt to assemble software raid arrays or zfs pools backing
+# Lustre devices.
+start_services ()
+{
+ local result=0
+ local devices=""
+ local dir dev label
+ local successflag
+ local labels
+
+ start_zfs_services
+ for label in $*; do
+ dir=`label_to_mountpt $label`
+ devtype=`$LDEV -t $label`
+ dev=`label_to_device $label`
+ journal=`$LDEV -j $label`
+ raidtab=`$LDEV -r $label`
+
+ if [ -z "$dir" ] || [ -z "$dev" ]; then
+ echo "$label is not a valid lustre label on this node"
+ result=2
+ continue
+ fi
+
+ if [ "$devtype" = "md" ] ; then
+ if ! assemble_md_device $dev $raidtab ; then
+ echo "failed to assemble array $dev backing $label"
+ result=2
+ continue
+ fi
+ elif [ "$devtype" = "zfs" ] ; then
+ if ! import_zpool $label ; then
+ result=2
+ fi
+ fi
+
+ # Journal device field in ldev.conf may be "-" or empty,
+ # so only attempt to assemble if its an absolute path.
+ # Ignore errors since the journal device may not be an
+ # md device.
+ if echo $journal | grep -q ^/ ; then
+ assemble_md_device $journal $raidtab 2>/dev/null
+ fi
+
+ if mountpt_is_active $label || \
+ device_is_active $label; then
+ echo "$label is already mounted"
+ # no error
+ continue
+ fi
+
+ if [ "x$devtype" != "xzfs" ] ; then
+ if ! mmp_test $dev; then
+ result=2
+ continue
+ fi
+ if ! adjust_scsi_timeout $dev; then
+ result=2
+ continue
+ fi
+ fi
+ devices="$devices $dev"
+ labels="$labels $label"
+ done
+ if [ $result == 0 ]; then
+ fsck_test $devices || return 2
+
+ # Fork to handle multiple mount_one_device()'s in parallel.
+ # Errors occurred if $successflag comes up missing afterwards.
+ successflag=`mktemp`
+ [ -e $successflag ] || return 2
+ for label in $labels; do
+ mount_one_device $label $successflag `$LDEV -t $label` &
+ # stagger to avoid module loading races
+ if [[ -n $MOUNT_DELAY && $MOUNT_DELAY -gt 0 ]] ; then
+ sleep $MOUNT_DELAY
+ fi
+ done
+ for label in $labels; do
+ wait
+ done
+ [ -e $successflag ] || return 2
+ rm -f $successflag
+ fi
+
+ return $result
+}
+
+# Usage: stop_services <label> [ <label> ... ]
+# Unmount any devices listed as arguments (serially).
+# Any devices which are not mounted or don't exist are skipped with no error.
+stop_services ()
+{
+ local labels=$*
+ local result=0
+ local pids=""
+ local dir dev label
+
+ for label in $labels; do
+ dir=`label_to_mountpt $label`
+ if [ -z "$dir" ]; then
+ echo "$label is not a valid lustre label on this node"
+ result=2
+ continue
+ fi
+ if ! mountpt_is_active $label; then
+ #echo "$label is not mounted"
+ # no error
+ continue
+ fi
+
+ echo "Unmounting $dir"
+ umount $dir &
+
+ if [ -z "$pids" ]; then
+ pids="$!"
+ else
+ pids="$pids $!"
+ fi
+ done
+
+ # wait for all umount processes to complete, report any errors
+ for pid in $pids; do
+ wait $pid || result=2
+ done
+
+ # double check!
+ for label in $labels; do
+ if mountpt_is_active $label; then
+ dir=`label_to_mountpt $label`
+ echo "Mount point $dir is still active"
+ result=2
+ fi
+ if device_is_active $label; then
+ dev=`label_to_device $label`
+ echo "Device $dev is still active"
+ result=2
+ fi
+ done
+ stop_devices $labels
+
+ return $result
+}
+
+# Usage: start_lustre_services [local|foreign|all|<label>]
+# If no parameter is specified, local devices will be started.
+start_lustre_services ()
+{
+ local labels=""
+
+ case "$1" in
+ ""|local)
+ labels=$LOCAL_SRV
+ ;;
+ foreign)
+ labels=$FOREIGN_SRV
+ ;;
+ all) labels="$LOCAL_SRV $FOREIGN_SRV"
+ ;;
+ *) labels="$1"
+ ;;
+ esac
+ # for use by heartbeat V1 resource agent:
+ # starting an already-started service must not be an error
+ start_services $labels || exit 2