echo -n $size
}
+fs_inode_ksize() {
+ local facet=${1:-$SINGLEMDS}
+ local fstype=$(facet_fstype $facet)
+ local size=0
+ case $fstype in
+ ldiskfs) size=4;; # ~4KB per inode
+ zfs) size=11;; # 10 to 11KB per inode
+ esac
+
+ echo -n $size
+}
+
check_gss_daemon_nodes() {
local list=$1
dname=$2
echo -n $device
}
+running_in_vm() {
+ local virt=$(virt-what 2> /dev/null)
+
+ [ $? -eq 0 ] && [ -n "$virt" ] && { echo $virt; return; }
+
+ virt=$(dmidecode -s system-product-name | awk '{print $1}')
+
+ case $virt in
+ VMware|KVM|VirtualBox|Parallels) echo ${virt,,} ;;
+ *) ;;
+ esac
+}
+
#
# Re-read the partition table on failover partner host.
# After a ZFS storage pool is created on a shared device, the partition table
}
cleanup_echo_devs () {
- local devs=$($LCTL dl | grep echo | awk '{print $4}')
+ trap 0
+ local dev
+ local devs=$($LCTL dl | grep echo | awk '{print $4}')
- for dev in $devs; do
- $LCTL --device $dev cleanup
- $LCTL --device $dev detach
- done
+ for dev in $devs; do
+ $LCTL --device $dev cleanup
+ $LCTL --device $dev detach
+ done
}
cleanupall() {
exit 1
}
+# Throw an error if it's not running in vm - usually for performance
+# verification
+error_not_in_vm() {
+ local virt=$(running_in_vm)
+ if [[ -n "$virt" ]]; then
+ echo "running in VM '$virt', ignore error"
+ error_ignore env=$virt "$@"
+ else
+ error "$@"
+ fi
+}
+
skip_env () {
$FAIL_ON_SKIP_ENV && error false $@ || skip $@
}