-dnl #
-dnl # Supported configure options. When no options are specified support
-dnl # for ZFS OSDs will be autodetected assuming server support is enabled.
-dnl # If the ZFS OSD cannot be built support for it is disabled and a
-dnl # warning is issued but the configure process is allowed to continue.
-dnl #
-dnl # --without-zfs - Disable zfs support.
-dnl # --with-zfs=no
-dnl #
-dnl # --with-zfs - Enable zfs support and attempt to autodetect the zfs
-dnl # --with-zfs=yes headers in one of the following places. Because zfs
-dnl # support was explicitly required if the headers cannot
-dnl # be located it is treated as a fatal error.
-dnl #
-dnl # * /var/lib/dkms/zfs/${VERSION}/source
-dnl # * /usr/src/zfs-${VERSION}/${LINUXRELEASE}
-dnl # * /usr/src/zfs-${VERSION}
-dnl # * ../zfs/
-dnl # * $LINUX/zfs
-dnl #
-dnl # --with-zfs-devel=path
-dnl # - User provided directory where zfs development headers
-dnl # are located. This option is typically used when user
-dnl # uses rpm2cpio to unpack src rpm.
-dnl # Assumes layout of:
-dnl # ${zfs-devel-path}/usr/include/libzfs
-dnl # ${zfs-devel-path}/usr/include/libspl
-dnl # ${zfs-devel-path}/lib64/libzfs.so.* or
-dnl # ${zfs-devel-path}/lib/libzfs.so.*
-dnl #
-dnl # --with-zfs=path - Enable zfs support and use the zfs headers in the
-dnl # provided path. No autodetection is performed and
-dnl # if no headers are found this is a fatal error.
-dnl #
-dnl # --with-zfs-obj - When zfs support is enabled the object directory
-dnl # will be based on the --with-zfs directory. If this
-dnl # is detected incorrectly it can be explicitly
-dnl # specified using this option.
-dnl #
-dnl # --without-spl - Disable spl support.
-dnl # --with-spl=no
-dnl #
-dnl # --with-spl - Enable spl support and attempt to autodetect the spl
-dnl # --with-spl=yes headers in one of the following places in this order:
-dnl # * /var/lib/dkms/spl/${VERSION}/source
-dnl # * /usr/src/spl-${VERSION}/${LINUXRELEASE}
-dnl # * /usr/src/spl-${VERSION}
-dnl # * ../spl/
-dnl # * $LINUX/spl
-dnl #
-dnl # --with-spl=path - Enable spl support and use the spl headers in the
-dnl # provided path. No autodetection is performed.
-dnl #
-dnl # --with-spl-obj - When spl support is enabled the object directory
-dnl # will be based on the --with-spl directory. If this
-dnl # is detected incorrectly it can be explicitly
-dnl # specified using this option.
-dnl #
+# SPDX-License-Identifier: NOASSERTION
+
+#
+# This file is part of Lustre, http://www.lustre.org/
+#
+# config/lustre-build-zfs.m4
+#
+# openZFS OSD related configuration
+#
+
+#
+# Supported configure options. When no options are specified support
+# for ZFS OSDs will be autodetected assuming server support is enabled.
+# If the ZFS OSD cannot be built support for it is disabled and a
+# warning is issued but the configure process is allowed to continue.
+#
+# --without-zfs - Disable zfs support.
+# --with-zfs=no
+#
+# --with-zfs - Enable zfs support and attempt to autodetect the zfs
+# --with-zfs=yes headers in one of the following places. Because zfs
+# support was explicitly required if the headers cannot
+# be located it is treated as a fatal error.
+#
+# * /var/lib/dkms/zfs/${VERSION}/source
+# * /usr/src/zfs-${VERSION}/${LINUXRELEASE}
+# * /usr/src/zfs-${VERSION}
+# * ../zfs/
+# * $LINUX/zfs
+#
+# --with-zfs-devel=path
+# - User provided directory where zfs development headers
+# are located. This option is typically used when user
+# uses rpm2cpio to unpack src rpm.
+# Assumes layout of:
+# ${zfs-devel-path}/usr/include/libzfs
+# ${zfs-devel-path}/usr/include/libspl
+# ${zfs-devel-path}/lib64/libzfs.so.* or
+# ${zfs-devel-path}/lib/libzfs.so.*
+#
+# --with-zfs=path - Enable zfs support and use the zfs headers in the
+# provided path. No autodetection is performed and
+# if no headers are found this is a fatal error.
+#
+# --with-zfs-obj - When zfs support is enabled the object directory
+# will be based on the --with-zfs directory. If this
+# is detected incorrectly it can be explicitly
+# specified using this option.
+#
+# --without-spl - Disable spl support.
+# --with-spl=no
+#
+# --with-spl - Enable spl support and attempt to autodetect the spl
+# --with-spl=yes headers in one of the following places in this order:
+# * /var/lib/dkms/spl/${VERSION}/source
+# * /usr/src/spl-${VERSION}/${LINUXRELEASE}
+# * /usr/src/spl-${VERSION}
+# * ../spl/
+# * $LINUX/spl
+#
+# --with-spl=path - Enable spl support and use the spl headers in the
+# provided path. No autodetection is performed.
+#
+# --with-spl-obj - When spl support is enabled the object directory
+# will be based on the --with-spl directory. If this
+# is detected incorrectly it can be explicitly
+# specified using this option.
+#
+
+#
+# LB_SPL
+#
AC_DEFUN([LB_SPL], [
AC_ARG_WITH([spl],
AS_HELP_STRING([--with-spl=PATH],
[Path to spl build objects]),
[splobj="$withval"])
- dnl #
- dnl # The existence of spl.release[.in] is used to identify a valid
- dnl # source directory. In order of preference:
- dnl #
+ #
+ # The existence of spl.release[.in] is used to identify a valid
+ # source directory. In order of preference:
+ #
splver=$(ls -1 /usr/src/ | grep ^spl- | cut -f2 -d'-' |
sort -V | head -n1)
spldkms="/var/lib/dkms/spl/${splver}"
enable_zfs=no
])
- dnl #
- dnl # The existence of the spl_config.h is used to identify a valid
- dnl # spl object directory. In many cases the object and source
- dnl # directory are the same, however the objects may also reside
- dnl # is a subdirectory named after the kernel version. When
- dnl # weak modules are used, the kernel version may not be the
- dnl # same as the LINUXRELEASE against which we are building lustre.
- dnl #
+ #
+ # The existence of the spl_config.h is used to identify a valid
+ # spl object directory. In many cases the object and source
+ # directory are the same, however the objects may also reside
+ # is a subdirectory named after the kernel version. When
+ # weak modules are used, the kernel version may not be the
+ # same as the LINUXRELEASE against which we are building lustre.
+ #
AC_MSG_CHECKING([spl build directory])
AS_IF([test -z "$splobj"], [
last_spl_obj_dir=$(ls -d ${splsrc}/[[0-9]]*/ 2> /dev/null | tail -n 1 | sed 's|/$||')
enable_zfs=no
])
- dnl #
- dnl # Verify the source version using SPL_META_VERSION in spl_config.h
- dnl #
+ #
+ # Verify the source version using SPL_META_VERSION in spl_config.h
+ #
AS_IF([test x$enable_zfs = xyes], [
AC_MSG_CHECKING([spl source version])
AS_IF([fgrep -q SPL_META_VERSION $splobj/spl_config.h], [
AC_MSG_RESULT([$splver])
])
- dnl #
- dnl # Verify the modules systems exist by the expect name.
- dnl #
+ #
+ # Verify the modules systems exist by the expect name.
+ #
AS_IF([test x$enable_zfs = xyes], [
AC_MSG_CHECKING([spl file name for module symbols])
AS_IF([test -r $splobj/$SYMVERFILE], [
AC_SUBST(EXTRA_SYMBOLS)
])
-])
+]) # LB_SPL
+#
+# LB_ZFS
+#
AC_DEFUN([LB_ZFS], [
AC_ARG_WITH([zfs-obj],
AS_HELP_STRING([--with-zfs-obj=PATH],
[Path to zfs build objects]),
[zfsobj="$withval"])
- dnl #
- dnl # The existence of zfs.release[.in] is used to identify a valid
- dnl # source directory. In order of preference:
- dnl #
+ #
+ # The existence of zfs.release[.in] is used to identify a valid
+ # source directory. In order of preference:
+ #
zfsver=$(ls -1 /usr/src/ | grep ^zfs- | cut -f2 -d'-' |
sort -V | head -n1)
zfsdkms="/var/lib/dkms/zfs/${zfsver}"
enable_zfs=no
])
- dnl #
- dnl # The existence of the zfs_config.h is used to identify a valid
- dnl # zfs object directory. In many cases the object and source
- dnl # directory are the same, however the objects may also reside
- dnl # is a subdirectory named after the kernel version. When
- dnl # weak modules are used, the kernel version may not be the
- dnl # same as the LINUXRELEASE against which we are building lustre.
- dnl #
+ #
+ # The existence of the zfs_config.h is used to identify a valid
+ # zfs object directory. In many cases the object and source
+ # directory are the same, however the objects may also reside
+ # is a subdirectory named after the kernel version. When
+ # weak modules are used, the kernel version may not be the
+ # same as the LINUXRELEASE against which we are building lustre.
+ #
AC_MSG_CHECKING([zfs build directory])
AS_IF([test -z "$zfsobj"], [
last_zfs_obj_dir=$(ls -d ${zfssrc}/[[0-9]]*/ 2> /dev/null | tail -n 1 | sed 's|/$||')
enable_zfs=no
])
- dnl #
- dnl # Verify the source version using SPL_META_VERSION in spl_config.h
- dnl #
+ #
+ # Verify the source version using SPL_META_VERSION in spl_config.h
+ #
AS_IF([test x$enable_zfs = xyes], [
AC_MSG_CHECKING([zfs source version])
AS_IF([fgrep -q ZFS_META_VERSION $zfsobj/zfs_config.h], [
AC_MSG_RESULT([$zfsver])
])
- dnl #
- dnl # Verify the modules systems exist by the expect name.
- dnl #
+ #
+ # Verify the modules systems exist by the expect name.
+ #
AS_IF([test x$enable_zfs = xyes], [
AC_MSG_CHECKING([zfs file name for module symbols])
AS_IF([test -r $zfsobj/$SYMVERFILE], [
AC_SUBST(EXTRA_SYMBOLS)
])
-])
+]) # LB_ZFS
+#
+# LB_ZFS_DEVEL
+#
AC_DEFUN([LB_ZFS_DEVEL], [
AC_ARG_WITH([zfs-devel],
[AS_HELP_STRING([--with-zfs-devel=PATH],
])
])
AC_MSG_RESULT([$zfsinc])
-])
+]) # LB_ZFS_DEVEL
+#
+# LB_ZFS_USER
+#
AC_DEFUN([LB_ZFS_USER], [
- dnl #
- dnl # Detect user space zfs development headers.
- dnl #
+ #
+ # Detect user space zfs development headers.
+ #
AC_MSG_CHECKING([zfs devel headers])
AS_IF([test -z "${zfsinc}"], [
AS_IF([test -e "${zfssrc}/include/libzfs.h" && test -e "${zfssrc}/lib/libspl/include"], [
AC_SUBST(ZFS_LIBZFS_INCLUDE)
AC_SUBST(ZFS_LIBZFS_LDFLAGS)
AC_SUBST(ZFS_LIBZFS_LIBS)
-])
+]) # LB_ZFS_USER
+#
+# LB_CONFIG_ZFS
+#
AC_DEFUN([LB_CONFIG_ZFS], [
AC_ARG_WITH([zfs],
[AS_HELP_STRING([--with-zfs=PATH], [Path to zfs source])],
LB_ZFS_DEVEL
LB_ZFS_USER
- dnl #
- dnl # Define zfs source code version
- dnl #
+ #
+ # Define zfs source code version
+ #
ZFS_MAJOR=$(echo $zfsver | sed -re ['s/([0-9]+)\.([0-9]+)\.([0-9]+)(\.([0-9]+))?.*/\1/'])
ZFS_MINOR=$(echo $zfsver | sed -re ['s/([0-9]+)\.([0-9]+)\.([0-9]+)(\.([0-9]+))?.*/\2/'])
ZFS_PATCH=$(echo $zfsver | sed -re ['s/([0-9]+)\.([0-9]+)\.([0-9]+)(\.([0-9]+))?.*/\3/'])
AC_DEFINE_UNQUOTED([ZFS_PATCH], [$ZFS_PATCH], [zfs patch version])
AC_DEFINE_UNQUOTED([ZFS_FIX], [$ZFS_FIX], [zfs fix version])
- dnl #
- dnl # SPL is only needed if ZFS is prior to 0.8.0
- dnl #
+ #
+ # SPL is only needed if ZFS is prior to 0.8.0
+ #
AS_IF([test x$enable_modules = xyes && test -n "$ZFS_MAJOR" &&
test $ZFS_MAJOR -eq 0 && test $ZFS_MINOR -lt 8], [
LB_SPL
enable_spl=no
])
- dnl #
- dnl # enable_zfs will be set to no in LB_SPL or LB_ZFS if
- dnl # one of more of the build requirements is not met.
- dnl #
+ #
+ # enable_zfs will be set to no in LB_SPL or LB_ZFS if
+ # one of more of the build requirements is not met.
+ #
AS_IF([test x$enable_zfs = xyes], [
AC_DEFINE(HAVE_ZFS_OSD, 1, Enable zfs osd)
],[
AC_MSG_ERROR([spa_maxblocksize does not exist])
])
- dnl #
- dnl # ZFS 0.7.x adds support for large dnodes. This
- dnl # allows Lustre to optionally specify the size of a
- dnl # dnode which ZFS will then use to store metadata such
- dnl # as xattrs. The default dnode size specified by the
- dnl # 'dnodesize' dataset property will be used unless a
- dnl # specific value is provided.
- dnl #
+ #
+ # ZFS 0.7.x adds support for large dnodes. This
+ # allows Lustre to optionally specify the size of a
+ # dnode which ZFS will then use to store metadata such
+ # as xattrs. The default dnode size specified by the
+ # 'dnodesize' dataset property will be used unless a
+ # specific value is provided.
+ #
LB_CHECK_COMPILE([if zfs defines dmu_object_alloc_dnsize],
dmu_object_alloc_dnsize, [
#include <sys/dmu.h>
AC_MSG_ERROR([dmu_object_alloc_dnsize does not exist])
])
- dnl #
- dnl # ZFS 0.7.x extended dmu_prefetch() to take an additional
- dnl # 'level' and 'priority' argument. Use a level of 0 and a
- dnl # priority of ZIO_PRIORITY_SYNC_READ to replicate the
- dnl # behavior of the four argument version.
- dnl #
+ #
+ # ZFS 0.7.x extended dmu_prefetch() to take an additional
+ # 'level' and 'priority' argument. Use a level of 0 and a
+ # priority of ZIO_PRIORITY_SYNC_READ to replicate the
+ # behavior of the four argument version.
+ #
LB_CHECK_COMPILE([if ZFS has 'dmu_prefetch' with 6 args],
dmu_prefetch, [
#include <sys/dmu.h>
],[
AC_MSG_ERROR([6 argument dmu_pretch does not exist])
])
- dnl #
- dnl # ZFS 0.7.0 feature: SPA_FEATURE_USEROBJ_ACCOUNTING
- dnl #
+ #
+ # ZFS 0.7.0 feature: SPA_FEATURE_USEROBJ_ACCOUNTING
+ #
LB_CHECK_COMPILE([if ZFS has native dnode accounting supported],
dmu_objset_userobjused_enabled, [
#include <sys/dmu_objset.h>
],[
AC_MSG_ERROR([native dnode accounting does not exist])
])
- dnl #
- dnl # ZFS 0.7.0 feature: MULTIHOST
- dnl #
+ #
+ # ZFS 0.7.0 feature: MULTIHOST
+ #
LB_CHECK_COMPILE([if ZFS has multihost protection],
spa_multihost, [
#include <sys/fs/zfs.h>
],[
AC_MSG_ERROR([multihost protection does not exist])
])
- dnl #
- dnl # ZFS 0.7.x adds new method zap_lookup_by_dnode
- dnl #
+ #
+ # ZFS 0.7.x adds new method zap_lookup_by_dnode
+ #
LB_CHECK_COMPILE([if ZFS has 'zap_lookup_by_dnode'],
zap_lookup_by_dnode, [
#include <sys/zap.h>
],[
AC_MSG_ERROR([zap_lookup_by_dnode does not exist])
])
- dnl #
- dnl # ZFS 0.7.x adds new method zap_add_by_dnode
- dnl #
+ #
+ # ZFS 0.7.x adds new method zap_add_by_dnode
+ #
LB_CHECK_COMPILE([if ZFS has 'zap_add_by_dnode'],
zap_add_by_dnode, [
#include <sys/zap.h>
],[
AC_MSG_ERROR([zap_add_by_dnode does not exist])
])
- dnl #
- dnl # ZFS 0.7.x adds new method zap_remove_by_dnode
- dnl #
+ #
+ # ZFS 0.7.x adds new method zap_remove_by_dnode
+ #
LB_CHECK_COMPILE([if ZFS has 'zap_remove_by_dnode'],
zap_remove_by_dnode, [
#include <sys/zap.h>
],[
AC_MSG_ERROR([zap_remove_by_dnode does not exist])
])
- dnl #
- dnl # ZFS 0.7.x adds new method dmu_tx_hold_zap_by_dnode
- dnl #
+ #
+ # ZFS 0.7.x adds new method dmu_tx_hold_zap_by_dnode
+ #
LB_CHECK_COMPILE([if ZFS has 'dmu_tx_hold_zap_by_dnode'],
dmu_tx_hold_zap_by_dnode, [
#include <sys/zap.h>
],[
AC_MSG_ERROR([dmu_tx_hold_zap_by_dnode does not exist])
])
- dnl #
- dnl # ZFS 0.7.x adds new method dmu_tx_hold_write_by_dnode
- dnl #
+ #
+ # ZFS 0.7.x adds new method dmu_tx_hold_write_by_dnode
+ #
LB_CHECK_COMPILE([if ZFS has 'dmu_tx_hold_write_by_dnode'],
dmu_tx_hold_write_by_dnode, [
#include <sys/zap.h>
],[
AC_MSG_ERROR([dmu_tx_hold_write_by_dnode does not exist])
])
- dnl #
- dnl # ZFS 0.7.x adds new method dmu_write_by_dnode
- dnl #
+ #
+ # ZFS 0.7.x adds new method dmu_write_by_dnode
+ #
LB_CHECK_COMPILE([if ZFS has 'dmu_write_by_dnode'],
dmu_write_by_dnode, [
#include <sys/zap.h>
],[
AC_MSG_ERROR([dmu_write_by_dnode does not exist])
])
- dnl #
- dnl # ZFS 0.7.x adds new method dmu_read_by_dnode
- dnl #
+ #
+ # ZFS 0.7.x adds new method dmu_read_by_dnode
+ #
LB_CHECK_COMPILE([if ZFS has 'dmu_read_by_dnode'],
dmu_read_by_dnode, [
#include <sys/zap.h>
],[
AC_MSG_ERROR([dmu_read_by_dnode does not exist])
])
- dnl #
- dnl # ZFS 0.7.2 adds new method dmu_tx_mark_netfree
- dnl #
+ #
+ # ZFS 0.7.2 adds new method dmu_tx_mark_netfree
+ #
LB_CHECK_COMPILE([if ZFS has 'dmu_tx_mark_netfree'],
dmu_tx_mark_netfree, [
#include <sys/dmu.h>
AC_DEFINE(HAVE_DMU_TX_MARK_NETFREE, 1,
[Have dmu_tx_mark_netfree])
])
- dnl #
- dnl # ZFS 0.7.10 changes timestruc_t to inode_timespec_t
- dnl #
+ #
+ # ZFS 0.7.10 changes timestruc_t to inode_timespec_t
+ #
LB_CHECK_COMPILE([if SPL has 'inode_timespec_t'],
zfs_have_inode_timespec, [
#include <sys/fs/zfs.h>
AC_DEFINE(HAVE_ZFS_INODE_TIMESPEC, 1,
[Have inode_timespec_t])
])
- dnl # ZFS 0.7.12/0.8.x uses zfs_refcount_add() instead of
- dnl # refcount_add(). ZFS 2.0 renamed sys/refcount.h to
- dnl # sys/zfs_refcount.h, rather the add another check to
- dnl # determine the correct header name include it
- dnl # indirectly through sys/dnode.h.
- dnl #
+ # ZFS 0.7.12/0.8.x uses zfs_refcount_add() instead of
+ # refcount_add(). ZFS 2.0 renamed sys/refcount.h to
+ # sys/zfs_refcount.h, rather the add another check to
+ # determine the correct header name include it
+ # indirectly through sys/dnode.h.
+ #
LB_CHECK_COMPILE([if ZFS has 'zfs_refcount_add'],
zfs_refcount_add, [
#include <sys/dnode.h>
AC_DEFINE(HAVE_ZFS_REFCOUNT_ADD, 1,
[Have zfs_refcount_add])
])
- dnl #
- dnl # ZFS 0.8.x changes dmu_objset_own for encryption
- dnl #
+ #
+ # ZFS 0.8.x changes dmu_objset_own for encryption
+ #
LB_CHECK_COMPILE([if ZFS has 'dmu_objset_own' with 6 args],
dmu_objset_own, [
#include <sys/dmu_objset.h>
AC_DEFINE(HAVE_DMU_OBJSET_OWN_6ARG, 1,
[Have dmu_objset_own() with 6 args])
])
- dnl #
- dnl # ZFS 0.8.x changes dmu_objset_disown for encryption
- dnl #
+ #
+ # ZFS 0.8.x changes dmu_objset_disown for encryption
+ #
LB_CHECK_COMPILE([if ZFS has 'dmu_objset_disown' with 3 args],
dmu_objset_disown, [
#include <sys/dmu_objset.h>
AC_DEFINE(HAVE_DMU_OBJSET_DISOWN_3ARG, 1,
[Have dmu_objset_disown() with 3 args])
])
- dnl #
- dnl # ZFS exports dmu_offet_next
- dnl #
+ #
+ # ZFS exports dmu_offet_next
+ #
AC_CACHE_CHECK([if ZFS exports 'dmu_offset_next'],
[lb_cv_dmu_offset_next], [
lb_cv_dmu_offset_next="no"
AC_DEFINE(HAVE_DMU_OFFSET_NEXT, 1,
[Have dmu_offset_next() exported])
])
- dnl #
- dnl # ZFS 2.0 replaced .db_last_dirty / .dr_next with a list_t
- dnl # and list_node_t named .db_dirty_records / .dr_dbuf_node.
- dnl #
+ #
+ # ZFS 2.0 replaced .db_last_dirty / .dr_next with a list_t
+ # and list_node_t named .db_dirty_records / .dr_dbuf_node.
+ #
LB_CHECK_COMPILE([if ZFS has 'db_dirty_records' list_t],
db_dirty_records, [
#include <sys/dbuf.h>
AC_DEFINE(HAVE_DB_DIRTY_RECORDS_LIST, 1,
[Have db_dirty_records list_t])
])
- dnl #
- dnl # ZFS 2.0 renamed sys/refcount.h to zfs_refcount.h
- dnl # This build issue shows up with ZFS 2.0.7 and Lustre 2.12 LTS
- dnl #
+ #
+ # ZFS 2.0 renamed sys/refcount.h to zfs_refcount.h
+ # This build issue shows up with ZFS 2.0.7 and Lustre 2.12 LTS
+ #
LB_CHECK_COMPILE([if ZFS renamed sys/refcount to zfs_refcount.h],
zfs_zfs_refcount, [
#include <sys/zfs_refcount.h>
])
AM_CONDITIONAL(ZFS_ENABLED, [test "x$enable_zfs" = xyes])
AM_CONDITIONAL(SPL_ENABLED, [test "x$enable_spl" = xyes])
-])
+]) # LB_CONFIG_ZFS