+# Get the device of a facet.
+#
+facet_device() {
+ local facet=$1
+ local device
+
+ case $facet in
+ mgs) device=$(mgsdevname) ;;
+ mds*) device=$(mdsdevname $(facet_number $facet)) ;;
+ ost*) device=$(ostdevname $(facet_number $facet)) ;;
+ fs2mds) device=$(mdsdevname 1_2) ;;
+ fs2ost) device=$(ostdevname 1_2) ;;
+ fs3ost) device=$(ostdevname 2_2) ;;
+ *) ;;
+ esac
+
+ echo -n $device
+}
+
+#
+# Get the virtual device of a facet.
+#
+facet_vdevice() {
+ local facet=$1
+ local device
+
+ case $facet in
+ mgs) device=$(mgsvdevname) ;;
+ mds*) device=$(mdsvdevname $(facet_number $facet)) ;;
+ ost*) device=$(ostvdevname $(facet_number $facet)) ;;
+ fs2mds) device=$(mdsvdevname 1_2) ;;
+ fs2ost) device=$(ostvdevname 1_2) ;;
+ fs3ost) device=$(ostvdevname 2_2) ;;
+ *) ;;
+ esac
+
+ echo -n $device
+}
+
+#
+# Re-read the partition table on failover partner host.
+# After a ZFS storage pool is created on a shared device, the partition table
+# on the device may change. However, the operating system on the failover
+# host may not notice the change automatically. Without the up-to-date partition
+# block devices, 'zpool import ..' cannot find the labels, whose positions are
+# relative to partition rather than disk beginnings.
+#
+# This function performs partprobe on the failover host to make it re-read the
+# partition table.
+#
+refresh_partition_table() {
+ local facet=$1
+ local device=$2
+ local host
+
+ host=$(facet_passive_host $facet)
+ if [[ -n "$host" ]]; then
+ do_node $host "$PARTPROBE $device"
+ fi
+}
+
+#
+# Get ZFS storage pool name.
+#
+zpool_name() {
+ local facet=$1
+ local device
+ local poolname
+
+ device=$(facet_device $facet)
+ # poolname is string before "/"
+ poolname="${device%%/*}"
+
+ echo -n $poolname
+}
+
+#
+# Export ZFS storage pool.
+# Before exporting the pool, all datasets within the pool should be unmounted.
+#
+export_zpool() {
+ local facet=$1
+ shift
+ local opts="$@"
+ local poolname
+
+ poolname=$(zpool_name $facet)
+
+ if [[ -n "$poolname" ]]; then
+ do_facet $facet "! $ZPOOL list -H $poolname >/dev/null 2>&1 ||
+ grep -q ^$poolname/ /proc/mounts ||
+ $ZPOOL export $opts $poolname"
+ fi
+}
+
+#
+# Import ZFS storage pool.
+# Force importing, even if the pool appears to be potentially active.
+#
+import_zpool() {
+ local facet=$1
+ shift
+ local opts=${@:-"-o cachefile=none"}
+ local poolname
+
+ poolname=$(zpool_name $facet)
+
+ if [[ -n "$poolname" ]]; then
+ opts+=" -d $(dirname $(facet_vdevice $facet))"
+ do_facet $facet "$ZPOOL list -H $poolname >/dev/null 2>&1 ||
+ $ZPOOL import -f $opts $poolname"
+ fi
+}
+
+#
+# Set the "cachefile=none" property on ZFS storage pool so that the pool
+# is not automatically imported on system startup.
+#
+# In a failover environment, this will provide resource level fencing which
+# will ensure that the same ZFS storage pool will not be imported concurrently
+# on different nodes.
+#
+disable_zpool_cache() {
+ local facet=$1
+ local poolname
+
+ poolname=$(zpool_name $facet)
+
+ if [[ -n "$poolname" ]]; then
+ do_facet $facet "$ZPOOL set cachefile=none $poolname"
+ fi
+}
+
+#
+# This and set_osd_param() shall be used to access OSD parameters