-dnl #
-dnl # Supported configure options. When no options are specified support
-dnl # for ZFS OSDs will be autodetected assuming server support is enabled.
-dnl # If the ZFS OSD cannot be built support for it is disabled and a
-dnl # warning is issued but the configure process is allowed to continue.
-dnl #
-dnl # --without-zfs - Disable zfs support.
-dnl # --with-zfs=no
-dnl #
-dnl # --with-zfs - Enable zfs support and attempt to autodetect the zfs
-dnl # --with-zfs=yes headers in one of the following places. Because zfs
-dnl # support was explicitly required if the headers cannot
-dnl # be located it is treated as a fatal error.
-dnl #
-dnl # * /var/lib/dkms/zfs/${VERSION}/source
-dnl # * /usr/src/zfs-${VERSION}/${LINUXRELEASE}
-dnl # * /usr/src/zfs-${VERSION}
-dnl # * ../spl/
-dnl # * $LINUX
-dnl #
-dnl # --with-zfs-devel=path
-dnl # - User provided directory where zfs development headers
-dnl # are located. This option is typically used when user
-dnl # uses rpm2cpio to unpack src rpm.
-dnl # Assumes layout of:
-dnl # ${zfs-devel-path}/usr/include/libzfs
-dnl # ${zfs-devel-path}/usr/include/libspl
-dnl # ${zfs-devel-path}/lib64/libzfs.so.* or
-dnl # ${zfs-devel-path}/lib/libzfs.so.*
-dnl #
-dnl # --with-zfs=path - Enable zfs support and use the zfs headers in the
-dnl # provided path. No autodetection is performed and
-dnl # if no headers are found this is a fatal error.
-dnl #
-dnl # --with-zfs-obj - When zfs support is enabled the object directory
-dnl # will be based on the --with-zfs directory. If this
-dnl # is detected incorrectly it can be explicitly
-dnl # specified using this option.
-dnl #
-dnl # --without-spl - Disable spl support.
-dnl # --with-spl=no
-dnl #
-dnl # --with-spl - Enable spl support and attempt to autodetect the spl
-dnl # --with-spl=yes headers in one of the following places in this order:
-dnl # * /var/lib/dkms/spl/${VERSION}/source
-dnl # * /usr/src/spl-${VERSION}/${LINUXRELEASE}
-dnl # * /usr/src/spl-${VERSION}
-dnl # * ../spl/
-dnl # * $LINUX
-dnl #
-dnl # --with-spl=path - Enable spl support and use the spl headers in the
-dnl # provided path. No autodetection is performed.
-dnl #
-dnl # --with-spl-obj - When spl support is enabled the object directory
-dnl # will be based on the --with-spl directory. If this
-dnl # is detected incorrectly it can be explicitly
-dnl # specified using this option.
-dnl #
+# SPDX-License-Identifier: NOASSERTION
+
+#
+# This file is part of Lustre, http://www.lustre.org/
+#
+# config/lustre-build-zfs.m4
+#
+# openZFS OSD related configuration
+#
+
+#
+# Supported configure options. When no options are specified support
+# for ZFS OSDs will be autodetected assuming server support is enabled.
+# If the ZFS OSD cannot be built support for it is disabled and a
+# warning is issued but the configure process is allowed to continue.
+#
+# --without-zfs - Disable zfs support.
+# --with-zfs=no
+#
+# --with-zfs - Enable zfs support and attempt to autodetect the zfs
+# --with-zfs=yes headers in one of the following places. Because zfs
+# support was explicitly required if the headers cannot
+# be located it is treated as a fatal error.
+#
+# * /var/lib/dkms/zfs/${VERSION}/source
+# * /usr/src/zfs-${VERSION}/${LINUXRELEASE}
+# * /usr/src/zfs-${VERSION}
+# * ../zfs/
+# * $LINUX/zfs
+#
+# --with-zfs-devel=path
+# - User provided directory where zfs development headers
+# are located. This option is typically used when user
+# uses rpm2cpio to unpack src rpm.
+# Assumes layout of:
+# ${zfs-devel-path}/usr/include/libzfs
+# ${zfs-devel-path}/usr/include/libspl
+# ${zfs-devel-path}/lib64/libzfs.so.* or
+# ${zfs-devel-path}/lib/libzfs.so.*
+#
+# --with-zfs=path - Enable zfs support and use the zfs headers in the
+# provided path. No autodetection is performed and
+# if no headers are found this is a fatal error.
+#
+# --with-zfs-obj - When zfs support is enabled the object directory
+# will be based on the --with-zfs directory. If this
+# is detected incorrectly it can be explicitly
+# specified using this option.
+#
+# --without-spl - Disable spl support.
+# --with-spl=no
+#
+# --with-spl - Enable spl support and attempt to autodetect the spl
+# --with-spl=yes headers in one of the following places in this order:
+# * /var/lib/dkms/spl/${VERSION}/source
+# * /usr/src/spl-${VERSION}/${LINUXRELEASE}
+# * /usr/src/spl-${VERSION}
+# * ../spl/
+# * $LINUX/spl
+#
+# --with-spl=path - Enable spl support and use the spl headers in the
+# provided path. No autodetection is performed.
+#
+# --with-spl-obj - When spl support is enabled the object directory
+# will be based on the --with-spl directory. If this
+# is detected incorrectly it can be explicitly
+# specified using this option.
+#
+
+#
+# LB_SPL
+#
AC_DEFUN([LB_SPL], [
AC_ARG_WITH([spl],
AS_HELP_STRING([--with-spl=PATH],
[Path to spl build objects]),
[splobj="$withval"])
- dnl #
- dnl # The existence of spl.release[.in] is used to identify a valid
- dnl # source directory. In order of preference:
- dnl #
- splver=$(ls -1 /usr/src/ | grep -m1 spl | cut -f2 -d'-')
+ #
+ # The existence of spl.release[.in] is used to identify a valid
+ # source directory. In order of preference:
+ #
+ splver=$(ls -1 /usr/src/ | grep ^spl- | cut -f2 -d'-' |
+ sort -V | head -n1)
spldkms="/var/lib/dkms/spl/${splver}"
splsrc1="/usr/src/spl-${splver}/${LINUXRELEASE}"
splsrc2="/usr/src/spl-${splver}"
splsrc3="../spl/"
- splsrc4="$LINUX"
+ splsrc4="$LINUX/spl"
AC_MSG_CHECKING([spl source directory])
AS_IF([test -z "${splsrc}"], [
enable_zfs=no
])
- dnl #
- dnl # The existence of the spl_config.h is used to identify a valid
- dnl # spl object directory. In many cases the object and source
- dnl # directory are the same, however the objects may also reside
- dnl # is a subdirectory named after the kernel version. When
- dnl # weak modules are used, the kernel version may not be the
- dnl # same as the LINUXRELEASE against which we are building lustre.
- dnl #
+ #
+ # The existence of the spl_config.h is used to identify a valid
+ # spl object directory. In many cases the object and source
+ # directory are the same, however the objects may also reside
+ # is a subdirectory named after the kernel version. When
+ # weak modules are used, the kernel version may not be the
+ # same as the LINUXRELEASE against which we are building lustre.
+ #
AC_MSG_CHECKING([spl build directory])
AS_IF([test -z "$splobj"], [
- last_spl_obj_dir=$(ls -d ${splsrc}/[[0-9]]*/ | tail -n 1 | sed 's|/$||')
+ last_spl_obj_dir=$(ls -d ${splsrc}/[[0-9]]*/ 2> /dev/null | tail -n 1 | sed 's|/$||')
AS_IF([test "${splsrc}" = "${spldkms}/source"], [
AS_IF([test -e "${spldkms}/${LINUXRELEASE}/${target_cpu}/spl_config.h"], [
splobj=${spldkms}/${LINUXRELEASE}/${target_cpu}
enable_zfs=no
])
- dnl #
- dnl # Verify the source version using SPL_META_VERSION in spl_config.h
- dnl #
+ #
+ # Verify the source version using SPL_META_VERSION in spl_config.h
+ #
AS_IF([test x$enable_zfs = xyes], [
AC_MSG_CHECKING([spl source version])
AS_IF([fgrep -q SPL_META_VERSION $splobj/spl_config.h], [
AC_MSG_RESULT([$splver])
])
- dnl #
- dnl # Verify the modules systems exist by the expect name.
- dnl #
+ #
+ # Verify the modules systems exist by the expect name.
+ #
AS_IF([test x$enable_zfs = xyes], [
AC_MSG_CHECKING([spl file name for module symbols])
AS_IF([test -r $splobj/$SYMVERFILE], [
AC_MSG_RESULT([$splsym])
])
- SPL=${splsrc}
- SPL_OBJ=${splobj}
- SPL_VERSION=${splver}
+ AS_IF([test x$enable_zfs = xyes], [
+ SPL=${splsrc}
+ SPL_OBJ=${splobj}
+ SPL_VERSION=${splver}
+
+ AC_SUBST(SPL)
+ AC_SUBST(SPL_OBJ)
+ AC_SUBST(SPL_VERSION)
+ AC_SUBST(EXTRA_SYMBOLS)
+ ])
- AC_SUBST(SPL)
- AC_SUBST(SPL_OBJ)
- AC_SUBST(SPL_VERSION)
- AC_SUBST(EXTRA_SYMBOLS)
-])
+]) # LB_SPL
+#
+# LB_ZFS
+#
AC_DEFUN([LB_ZFS], [
AC_ARG_WITH([zfs-obj],
AS_HELP_STRING([--with-zfs-obj=PATH],
[Path to zfs build objects]),
[zfsobj="$withval"])
- dnl #
- dnl # The existence of zfs.release[.in] is used to identify a valid
- dnl # source directory. In order of preference:
- dnl #
- zfsver=$(ls -1 /usr/src/ | grep -m1 zfs | cut -f2 -d'-')
+ #
+ # The existence of zfs.release[.in] is used to identify a valid
+ # source directory. In order of preference:
+ #
+ zfsver=$(ls -1 /usr/src/ | grep ^zfs- | cut -f2 -d'-' |
+ sort -V | head -n1)
zfsdkms="/var/lib/dkms/zfs/${zfsver}"
zfssrc1="/usr/src/zfs-${zfsver}/${LINUXRELEASE}"
zfssrc2="/usr/src/zfs-${zfsver}"
zfssrc3="../zfs/"
- zfssrc4="$LINUX"
+ zfssrc4="$LINUX/zfs"
AC_MSG_CHECKING([zfs source directory])
AS_IF([test -z "${zfssrc}"], [
enable_zfs=no
])
- dnl #
- dnl # The existence of the zfs_config.h is used to identify a valid
- dnl # zfs object directory. In many cases the object and source
- dnl # directory are the same, however the objects may also reside
- dnl # is a subdirectory named after the kernel version. When
- dnl # weak modules are used, the kernel version may not be the
- dnl # same as the LINUXRELEASE against which we are building lustre.
- dnl #
+ #
+ # The existence of the zfs_config.h is used to identify a valid
+ # zfs object directory. In many cases the object and source
+ # directory are the same, however the objects may also reside
+ # is a subdirectory named after the kernel version. When
+ # weak modules are used, the kernel version may not be the
+ # same as the LINUXRELEASE against which we are building lustre.
+ #
AC_MSG_CHECKING([zfs build directory])
AS_IF([test -z "$zfsobj"], [
- last_zfs_obj_dir=$(ls -d ${zfssrc}/[[0-9]]*/ | tail -n 1 | sed 's|/$||')
+ last_zfs_obj_dir=$(ls -d ${zfssrc}/[[0-9]]*/ 2> /dev/null | tail -n 1 | sed 's|/$||')
AS_IF([test "${zfssrc}" = "${zfsdkms}/source"], [
AS_IF([test -e "${zfsdkms}/${LINUXRELEASE}/${target_cpu}/zfs_config.h"], [
zfsobj=${zfsdkms}/${LINUXRELEASE}/${target_cpu}
enable_zfs=no
])
- dnl #
- dnl # Verify the source version using SPL_META_VERSION in spl_config.h
- dnl #
+ #
+ # Verify the source version using SPL_META_VERSION in spl_config.h
+ #
AS_IF([test x$enable_zfs = xyes], [
AC_MSG_CHECKING([zfs source version])
AS_IF([fgrep -q ZFS_META_VERSION $zfsobj/zfs_config.h], [
AC_MSG_RESULT([$zfsver])
])
- dnl #
- dnl # Verify the modules systems exist by the expect name.
- dnl #
+ #
+ # Verify the modules systems exist by the expect name.
+ #
AS_IF([test x$enable_zfs = xyes], [
AC_MSG_CHECKING([zfs file name for module symbols])
AS_IF([test -r $zfsobj/$SYMVERFILE], [
AC_MSG_RESULT([$zfssym])
])
- ZFS=${zfssrc}
- ZFS_OBJ=${zfsobj}
- ZFS_VERSION=${zfsver}
+ AS_IF([test x$enable_zfs = xyes], [
+ ZFS=${zfssrc}
+ ZFS_OBJ=${zfsobj}
+ ZFS_VERSION=${zfsver}
+
+ AC_SUBST(ZFS)
+ AC_SUBST(ZFS_OBJ)
+ AC_SUBST(ZFS_VERSION)
+ AC_SUBST(EXTRA_SYMBOLS)
+ ])
- AC_SUBST(ZFS)
- AC_SUBST(ZFS_OBJ)
- AC_SUBST(ZFS_VERSION)
- AC_SUBST(EXTRA_SYMBOLS)
-])
+]) # LB_ZFS
+#
+# LB_ZFS_DEVEL
+#
AC_DEFUN([LB_ZFS_DEVEL], [
AC_ARG_WITH([zfs-devel],
[AS_HELP_STRING([--with-zfs-devel=PATH],
])
])
AC_MSG_RESULT([$zfsinc])
-])
+]) # LB_ZFS_DEVEL
+#
+# LB_ZFS_USER
+#
AC_DEFUN([LB_ZFS_USER], [
- dnl #
- dnl # Detect user space zfs development headers.
- dnl #
+ #
+ # Detect user space zfs development headers.
+ #
AC_MSG_CHECKING([zfs devel headers])
AS_IF([test -z "${zfsinc}"], [
AS_IF([test -e "${zfssrc}/include/libzfs.h" && test -e "${zfssrc}/lib/libspl/include"], [
- zfsinc="-I $zfssrc/lib/libspl/include -I $zfssrc/include"
- zfslib="-L$zfssrc/lib/libzfs/.libs/"
+ zfsinc="-I $zfssrc/lib/libspl/include -I $zfssrc/lib/libspl/include/os/linux -I $zfssrc/include"
+ zfslib="-L$zfssrc/.libs/ -L$zfssrc/lib/libzfs/.libs/ -L$zfssrc/lib/libnvpair/.libs/ -L$zfssrc/lib/libzpool/.libs/"
], [test -d /usr/include/libzfs && test -d /usr/include/libspl], [
zfsinc="-I/usr/include/libspl -I /usr/include/libzfs"
zfslib=""
AC_MSG_RESULT([$zfsinc])
ZFS_LIBZFS_INCLUDE=${zfsinc}
- ZFS_LIBZFS_LDFLAGS="-lzfs -lnvpair ${zfslib}"
+ ZFS_LIBZFS_LDFLAGS=${zfslib}
+ ZFS_LIBZFS_LIBS="-lzfs -lnvpair -lzpool"
AC_SUBST(ZFS_LIBZFS_INCLUDE)
AC_SUBST(ZFS_LIBZFS_LDFLAGS)
-])
+ AC_SUBST(ZFS_LIBZFS_LIBS)
+]) # LB_ZFS_USER
+#
+# LB_CONFIG_ZFS
+#
AC_DEFUN([LB_CONFIG_ZFS], [
AC_ARG_WITH([zfs],
[AS_HELP_STRING([--with-zfs=PATH], [Path to zfs source])],
[
AS_IF([test x$withval = xno], [
+ enable_spl=no
enable_zfs=no
require_zfs=no
], [test x$withval = xyes], [
+ enable_spl=yes
enable_zfs=yes
require_zfs=yes
], [
+ enable_spl=yes
enable_zfs=yes
require_zfs=yes
zfssrc="$withval"
])
], [
AS_IF([test x$enable_server != xno], [
+ enable_spl=yes
require_zfs=no
enable_zfs=yes
], [
+ enable_spl=no
require_zfs=no
enable_zfs=no
])
AS_IF([test x$enable_zfs = xyes], [
AS_IF([test x$enable_modules = xyes], [
- LB_SPL
LB_ZFS
])
LB_ZFS_DEVEL
LB_ZFS_USER
- dnl #
- dnl # enable_zfs will be set to no in LB_SPL or LB_ZFS if
- dnl # one of more of the build requirements is not met.
- dnl #
+ #
+ # Define zfs source code version
+ #
+ ZFS_MAJOR=$(echo $zfsver | sed -re ['s/([0-9]+)\.([0-9]+)\.([0-9]+)(\.([0-9]+))?.*/\1/'])
+ ZFS_MINOR=$(echo $zfsver | sed -re ['s/([0-9]+)\.([0-9]+)\.([0-9]+)(\.([0-9]+))?.*/\2/'])
+ ZFS_PATCH=$(echo $zfsver | sed -re ['s/([0-9]+)\.([0-9]+)\.([0-9]+)(\.([0-9]+))?.*/\3/'])
+ ZFS_FIX=$(echo $zfsver | sed -re ['s/([0-9]+)\.([0-9]+)\.([0-9]+)(\.([0-9]+))?.*/\5/'])
+ AS_IF([test -z "$ZFS_FIX"], [ZFS_FIX="0"])
+
+ AC_DEFINE_UNQUOTED([ZFS_MAJOR], [$ZFS_MAJOR], [zfs major version])
+ AC_DEFINE_UNQUOTED([ZFS_MINOR], [$ZFS_MINOR], [zfs minor version])
+ AC_DEFINE_UNQUOTED([ZFS_PATCH], [$ZFS_PATCH], [zfs patch version])
+ AC_DEFINE_UNQUOTED([ZFS_FIX], [$ZFS_FIX], [zfs fix version])
+
+ #
+ # SPL is only needed if ZFS is prior to 0.8.0
+ #
+ AS_IF([test x$enable_modules = xyes && test -n "$ZFS_MAJOR" &&
+ test $ZFS_MAJOR -eq 0 && test $ZFS_MINOR -lt 8], [
+ LB_SPL
+ ],[
+ enable_spl=no
+ ])
+
+ #
+ # enable_zfs will be set to no in LB_SPL or LB_ZFS if
+ # one of more of the build requirements is not met.
+ #
AS_IF([test x$enable_zfs = xyes], [
AC_DEFINE(HAVE_ZFS_OSD, 1, Enable zfs osd)
],[
])
])
- dnl #
- dnl # Define zfs source code version
- dnl #
- AS_IF([test x$enable_zfs = xyes], [
- ZFS_MAJOR=$(echo $zfsver | sed -re ['s/([0-9]+)\.([0-9]+)\.([0-9]+)(\.([0-9]+))?.*/\1/'])
- ZFS_MINOR=$(echo $zfsver | sed -re ['s/([0-9]+)\.([0-9]+)\.([0-9]+)(\.([0-9]+))?.*/\2/'])
- ZFS_PATCH=$(echo $zfsver | sed -re ['s/([0-9]+)\.([0-9]+)\.([0-9]+)(\.([0-9]+))?.*/\3/'])
- ZFS_FIX=$(echo $zfsver | sed -re ['s/([0-9]+)\.([0-9]+)\.([0-9]+)(\.([0-9]+))?.*/\5/'])
- AS_IF([test -z "$ZFS_FIX"], [ZFS_FIX="0"])
-
- AC_DEFINE_UNQUOTED([ZFS_MAJOR], [$ZFS_MAJOR], [zfs major version])
- AC_DEFINE_UNQUOTED([ZFS_MINOR], [$ZFS_MINOR], [zfs minor version])
- AC_DEFINE_UNQUOTED([ZFS_PATCH], [$ZFS_PATCH], [zfs patch version])
- AC_DEFINE_UNQUOTED([ZFS_FIX], [$ZFS_FIX], [zfs fix version])
- ])
AS_IF([test "x$enable_zfs" = xyes], [
LB_CHECK_COMPILE([if zfs defines dsl_pool_config_enter/exit],
],[
AC_DEFINE(HAVE_DSL_POOL_CONFIG, 1,
[Have dsl_pool_config_enter/exit in ZFS])
- ])
- LB_CHECK_COMPILE([if zfs defines dsl_sync_task_do_nowait],
- dsl_sync_task_do_nowait, [
- #include <sys/dsl_synctask.h>
],[
- dsl_sync_task_do_nowait(NULL, NULL, NULL, NULL, NULL, 0, NULL);
- ],[
- AC_DEFINE(HAVE_DSL_SYNC_TASK_DO_NOWAIT, 1,
- [Have dsl_sync_task_do_nowait in ZFS])
+ AC_MSG_ERROR([dsl_pool_config_enter/exit do not exist])
])
- LB_CHECK_COMPILE([if zfs defines sa_spill_alloc],
- sa_spill_alloc, [
- #include <sys/kmem.h>
- #include <sys/sa.h>
+ LB_CHECK_COMPILE([if zfs defines zio_buf_alloc/free],
+ zio_buf_alloc, [
+ #include <sys/zio.h>
],[
- void *ptr;
+ void *ptr = zio_buf_alloc(1024);
- ptr = sa_spill_alloc(KM_SLEEP);
- sa_spill_free(ptr);
+ (void)ptr;
+ ],[
+ AC_DEFINE(HAVE_ZIO_BUF_ALLOC, 1,
+ [Have zio_buf_alloc/free in ZFS])
],[
- AC_DEFINE(HAVE_SA_SPILL_ALLOC, 1,
- [Have sa_spill_alloc in ZFS])
+ AC_MSG_ERROR([zio_buf_alloc/free do not exist])
])
LB_CHECK_COMPILE([if zfs defines spa_maxblocksize],
spa_maxblocksize, [
#include <sys/spa.h>
],[
spa_t *spa = NULL;
- int size;
+ int size = spa_maxblocksize(spa);
- size = spa_maxblocksize(spa);
+ (void)size;
],[
AC_DEFINE(HAVE_SPA_MAXBLOCKSIZE, 1,
[Have spa_maxblocksize in ZFS])
+ ],[
+ AC_MSG_ERROR([spa_maxblocksize does not exist])
])
- dnl #
- dnl # ZFS 0.7.x adds support for large dnodes. This
- dnl # allows Lustre to optionally specify the size of a
- dnl # dnode which ZFS will then use to store metadata such
- dnl # as xattrs. The default dnode size specified by the
- dnl # 'dnodesize' dataset property will be used unless a
- dnl # specific value is provided.
- dnl #
+ #
+ # ZFS 0.7.x adds support for large dnodes. This
+ # allows Lustre to optionally specify the size of a
+ # dnode which ZFS will then use to store metadata such
+ # as xattrs. The default dnode size specified by the
+ # 'dnodesize' dataset property will be used unless a
+ # specific value is provided.
+ #
LB_CHECK_COMPILE([if zfs defines dmu_object_alloc_dnsize],
dmu_object_alloc_dnsize, [
#include <sys/dmu.h>
],[
AC_DEFINE(HAVE_DMU_OBJECT_ALLOC_DNSIZE, 1,
[Have dmu_object_alloc_dnsize in ZFS])
+ ],[
+ AC_MSG_ERROR([dmu_object_alloc_dnsize does not exist])
])
- dnl #
- dnl # ZFS 0.7.x extended dmu_prefetch() to take an additional
- dnl # 'level' and 'priority' argument. Use a level of 0 and a
- dnl # priority of ZIO_PRIORITY_SYNC_READ to replicate the
- dnl # behavior of the four argument version.
- dnl #
+ #
+ # ZFS 0.7.x extended dmu_prefetch() to take an additional
+ # 'level' and 'priority' argument. Use a level of 0 and a
+ # priority of ZIO_PRIORITY_SYNC_READ to replicate the
+ # behavior of the four argument version.
+ #
LB_CHECK_COMPILE([if ZFS has 'dmu_prefetch' with 6 args],
dmu_prefetch, [
#include <sys/dmu.h>
],[
AC_DEFINE(HAVE_DMU_PREFETCH_6ARG, 1,
[Have 6 argument dmu_pretch in ZFS])
+ ],[
+ AC_MSG_ERROR([6 argument dmu_pretch does not exist])
])
- dnl #
- dnl # ZFS 0.7.0 feature: SPA_FEATURE_USEROBJ_ACCOUNTING
- dnl #
- LB_CHECK_COMPILE([if zfs has native dnode accounting supported],
- dmu_objset_id_quota_upgrade, [
+ #
+ # ZFS 0.7.0 feature: SPA_FEATURE_USEROBJ_ACCOUNTING
+ #
+ LB_CHECK_COMPILE([if ZFS has native dnode accounting supported],
+ dmu_objset_userobjused_enabled, [
#include <sys/dmu_objset.h>
],[
- dmu_objset_id_quota_upgrade(NULL);
+ dmu_objset_userobjused_enabled(NULL);
],[
AC_DEFINE(HAVE_DMU_USEROBJ_ACCOUNTING, 1,
[Have native dnode accounting in ZFS])
+ ],[
+ AC_MSG_ERROR([native dnode accounting does not exist])
])
- dnl # ZFS 0.7.x adds new method zap_lookup_by_dnode
- dnl #
+ #
+ # ZFS 0.7.0 feature: MULTIHOST
+ #
+ LB_CHECK_COMPILE([if ZFS has multihost protection],
+ spa_multihost, [
+ #include <sys/fs/zfs.h>
+ ],[
+ zpool_prop_t prop = ZPOOL_PROP_MULTIHOST;
+
+ (void)prop;
+ ],[
+ AC_DEFINE(HAVE_ZFS_MULTIHOST, 1,
+ [Have multihost protection in ZFS])
+ ],[
+ AC_MSG_ERROR([multihost protection does not exist])
+ ])
+ #
+ # ZFS 0.7.x adds new method zap_lookup_by_dnode
+ #
LB_CHECK_COMPILE([if ZFS has 'zap_lookup_by_dnode'],
zap_lookup_by_dnode, [
#include <sys/zap.h>
],[
AC_DEFINE(HAVE_ZAP_LOOKUP_BY_DNODE, 1,
[Have zap_lookup_by_dnode() in ZFS])
+ ],[
+ AC_MSG_ERROR([zap_lookup_by_dnode does not exist])
])
- dnl #
- dnl # ZFS 0.7.x adds new method zap_add_by_dnode
- dnl #
+ #
+ # ZFS 0.7.x adds new method zap_add_by_dnode
+ #
LB_CHECK_COMPILE([if ZFS has 'zap_add_by_dnode'],
zap_add_by_dnode, [
#include <sys/zap.h>
],[
AC_DEFINE(HAVE_ZAP_ADD_BY_DNODE, 1,
[Have zap_add_by_dnode() in ZFS])
+ ],[
+ AC_MSG_ERROR([zap_add_by_dnode does not exist])
])
- dnl #
- dnl # ZFS 0.7.x adds new method zap_remove_by_dnode
- dnl #
+ #
+ # ZFS 0.7.x adds new method zap_remove_by_dnode
+ #
LB_CHECK_COMPILE([if ZFS has 'zap_remove_by_dnode'],
zap_remove_by_dnode, [
#include <sys/zap.h>
],[
AC_DEFINE(HAVE_ZAP_REMOVE_ADD_BY_DNODE, 1,
[Have zap_remove_by_dnode() in ZFS])
+ ],[
+ AC_MSG_ERROR([zap_remove_by_dnode does not exist])
])
- dnl #
- dnl # ZFS 0.7.x adds new method dmu_tx_hold_zap_by_dnode
- dnl #
+ #
+ # ZFS 0.7.x adds new method dmu_tx_hold_zap_by_dnode
+ #
LB_CHECK_COMPILE([if ZFS has 'dmu_tx_hold_zap_by_dnode'],
dmu_tx_hold_zap_by_dnode, [
#include <sys/zap.h>
],[
AC_DEFINE(HAVE_DMU_TX_HOLD_ZAP_BY_DNODE, 1,
[Have dmu_tx_hold_zap_by_dnode() in ZFS])
+ ],[
+ AC_MSG_ERROR([dmu_tx_hold_zap_by_dnode does not exist])
])
- dnl #
- dnl # ZFS 0.7.x adds new method dmu_tx_hold_write_by_dnode
- dnl #
+ #
+ # ZFS 0.7.x adds new method dmu_tx_hold_write_by_dnode
+ #
LB_CHECK_COMPILE([if ZFS has 'dmu_tx_hold_write_by_dnode'],
dmu_tx_hold_write_by_dnode, [
#include <sys/zap.h>
],[
AC_DEFINE(HAVE_DMU_TX_HOLD_WRITE_BY_DNODE, 1,
[Have dmu_tx_hold_write_by_dnode() in ZFS])
+ ],[
+ AC_MSG_ERROR([dmu_tx_hold_write_by_dnode does not exist])
])
- dnl #
- dnl # ZFS 0.7.x adds new method dmu_write_by_dnode
- dnl #
+ #
+ # ZFS 0.7.x adds new method dmu_write_by_dnode
+ #
LB_CHECK_COMPILE([if ZFS has 'dmu_write_by_dnode'],
dmu_write_by_dnode, [
#include <sys/zap.h>
],[
AC_DEFINE(HAVE_DMU_WRITE_BY_DNODE, 1,
[Have dmu_write_by_dnode() in ZFS])
+ ],[
+ AC_MSG_ERROR([dmu_write_by_dnode does not exist])
])
- dnl #
- dnl # ZFS 0.7.x adds new method dmu_read_by_dnode
- dnl #
+ #
+ # ZFS 0.7.x adds new method dmu_read_by_dnode
+ #
LB_CHECK_COMPILE([if ZFS has 'dmu_read_by_dnode'],
dmu_read_by_dnode, [
#include <sys/zap.h>
],[
AC_DEFINE(HAVE_DMU_READ_BY_DNODE, 1,
[Have dmu_read_by_dnode() in ZFS])
+ ],[
+ AC_MSG_ERROR([dmu_read_by_dnode does not exist])
])
- dnl #
- dnl # ZFS 0.8.x changes dmu_objset_own for encryption
- dnl #
+ #
+ # ZFS 0.7.2 adds new method dmu_tx_mark_netfree
+ #
+ LB_CHECK_COMPILE([if ZFS has 'dmu_tx_mark_netfree'],
+ dmu_tx_mark_netfree, [
+ #include <sys/dmu.h>
+ ],[
+ dmu_tx_t *tx = NULL;
+ dmu_tx_mark_netfree(tx);
+ ],[
+ AC_DEFINE(HAVE_DMU_TX_MARK_NETFREE, 1,
+ [Have dmu_tx_mark_netfree])
+ ])
+ #
+ # ZFS 0.7.10 changes timestruc_t to inode_timespec_t
+ #
+ LB_CHECK_COMPILE([if SPL has 'inode_timespec_t'],
+ zfs_have_inode_timespec, [
+ #include <sys/fs/zfs.h>
+ ],[
+ inode_timespec_t now;
+ gethrestime(&now);
+ ],[
+ AC_DEFINE(HAVE_ZFS_INODE_TIMESPEC, 1,
+ [Have inode_timespec_t])
+ ])
+ # ZFS 0.7.12/0.8.x uses zfs_refcount_add() instead of
+ # refcount_add(). ZFS 2.0 renamed sys/refcount.h to
+ # sys/zfs_refcount.h, rather the add another check to
+ # determine the correct header name include it
+ # indirectly through sys/dnode.h.
+ #
+ LB_CHECK_COMPILE([if ZFS has 'zfs_refcount_add'],
+ zfs_refcount_add, [
+ #include <sys/dnode.h>
+ ],[
+ zfs_refcount_add((zfs_refcount_t *) NULL, NULL);
+ ],[
+ AC_DEFINE(HAVE_ZFS_REFCOUNT_ADD, 1,
+ [Have zfs_refcount_add])
+ ])
+ #
+ # ZFS 0.8.x changes dmu_objset_own for encryption
+ #
LB_CHECK_COMPILE([if ZFS has 'dmu_objset_own' with 6 args],
dmu_objset_own, [
#include <sys/dmu_objset.h>
],[
objset_t *os = NULL;
dmu_objset_type_t type = DMU_OST_ANY;
- dmu_objset_own(NULL, type, B_FALSE, B_FALSE, FTAG, &os);
+ dmu_objset_own(NULL, type, B_FALSE, B_TRUE, FTAG, &os);
],[
AC_DEFINE(HAVE_DMU_OBJSET_OWN_6ARG, 1,
[Have dmu_objset_own() with 6 args])
])
- dnl #
- dnl # ZFS 0.8.x changes dmu_objset_disown for encryption
- dnl #
+ #
+ # ZFS 0.8.x changes dmu_objset_disown for encryption
+ #
LB_CHECK_COMPILE([if ZFS has 'dmu_objset_disown' with 3 args],
dmu_objset_disown, [
#include <sys/dmu_objset.h>
],[
objset_t *os = NULL;
- dmu_objset_disown(os, B_FALSE, FTAG);
+ dmu_objset_disown(os, B_TRUE, FTAG);
],[
AC_DEFINE(HAVE_DMU_OBJSET_DISOWN_3ARG, 1,
[Have dmu_objset_disown() with 3 args])
])
+ #
+ # ZFS exports dmu_offet_next
+ #
+ AC_CACHE_CHECK([if ZFS exports 'dmu_offset_next'],
+ [lb_cv_dmu_offset_next], [
+ lb_cv_dmu_offset_next="no"
+ AS_IF([grep -q -E "EXPORT_SYMBOL.*\(dmu_offset_next\)" "$zfssrc/module/zfs/dmu.c" 2>/dev/null],
+ [lb_cv_dmu_offset_next="yes"])
+ ])
+ AS_IF([test "x$lb_cv_dmu_offset_next" = "xyes"], [
+ AC_DEFINE(HAVE_DMU_OFFSET_NEXT, 1,
+ [Have dmu_offset_next() exported])
+ ])
+ #
+ # ZFS 2.0 replaced .db_last_dirty / .dr_next with a list_t
+ # and list_node_t named .db_dirty_records / .dr_dbuf_node.
+ #
+ LB_CHECK_COMPILE([if ZFS has 'db_dirty_records' list_t],
+ db_dirty_records, [
+ #include <sys/dbuf.h>
+ ],[
+ dmu_buf_impl_t db;
+ dbuf_dirty_record_t *dr;
+ dr = list_head(&db.db_dirty_records);
+ ],[
+ AC_DEFINE(HAVE_DB_DIRTY_RECORDS_LIST, 1,
+ [Have db_dirty_records list_t])
+ ])
+ #
+ # ZFS 2.0 renamed sys/refcount.h to zfs_refcount.h
+ # This build issue shows up with ZFS 2.0.7 and Lustre 2.12 LTS
+ #
+ LB_CHECK_COMPILE([if ZFS renamed sys/refcount to zfs_refcount.h],
+ zfs_zfs_refcount, [
+ #include <sys/zfs_refcount.h>
+ ],[
+ zfs_refcount_add((zfs_refcount_t *) NULL, NULL);
+ ],[
+ AC_DEFINE(HAVE_ZFS_REFCOUNT_HEADER, 1,
+ [Have zfs_refcount.h])
+ ])
+ old_EXTRA_KCFLAGS=$EXTRA_KCFLAGS
+ EXTRA_KCFLAGS+=" -Werror"
+ dnl #
+ dnl # ZFS 2.2.0 nvpair now returns and expects constant args
+ dnl #
+ LB_CHECK_COMPILE([if ZFS nvlist interfaces require const],
+ zfs_nvpair_const, [
+ #include <sys/nvpair.h>
+ ], [
+ nvpair_t *nvp = NULL;
+ nvlist_t *nvl = NULL;
+ const char *name = nvpair_name(nvp);
+ nvlist_lookup_string(nvl, name, &name);
+ nvlist_lookup_nvlist(nvl, name, &nvl);
+ ], [
+ AC_DEFINE(HAVE_ZFS_NVLIST_CONST_INTERFACES, 1,
+ [ZFS nvlist interfaces require const])
+ ])
+ dnl #
+ dnl # ZFS 2.2.1 arc_prune_func_t now uses uint64_t for the
+ dnl # first parameter
+ dnl #
+ LB_CHECK_COMPILE([if ZFS arc_prune_func_t uses uint64_t],
+ zfs_arc_prune_func_uint64, [
+ #include <sys/arc.h>
+ ], [
+ void arc_prune_func(uint64_t bytes, void *priv) {}
+ arc_prune_t *arc_p __attribute__ ((unused)) =
+ arc_add_prune_callback(arc_prune_func, NULL);
+ ], [
+ AC_DEFINE(HAVE_ZFS_ARC_PRUNE_FUNC_UINT64, 1,
+ [ZFS arc_prune_func_t uses uint64_t])
+ ])
+ EXTRA_KCFLAGS=$old_EXTRA_KCFLAGS
])
AS_IF([test "x$enable_zfs" = xyes], [
AC_SUBST(ENABLE_ZFS, no)
])
AM_CONDITIONAL(ZFS_ENABLED, [test "x$enable_zfs" = xyes])
-])
+ AM_CONDITIONAL(SPL_ENABLED, [test "x$enable_spl" = xyes])
+]) # LB_CONFIG_ZFS