AC_MSG_NOTICE([LNet core checks
==============================================================================])
+AC_ARG_WITH([cuda],
+ AC_HELP_STRING([--with-cuda=path],
+ [Use a CUDA sources.]),
+ [LB_ARG_CANON_PATH([cuda], [CUDA_PATH])],
+ [CUDA_PATH=`ls -d1 /usr/src/nvidia-*/nvidia/ | tail -1`]
+)
+
+AC_ARG_WITH([gds],
+ AC_HELP_STRING([--with-gds=path],
+ [Use a gds sources.]),
+ [LB_ARG_CANON_PATH([gds], [GDS_PATH])],
+ [GDS_PATH=`ls -d1 /usr/src/nvidia-fs* | tail -1`]
+)
+
+AS_IF([test -n "${CUDA_PATH}" && test -n "${GDS_PATH}"],[
+LB_CHECK_FILE([$CUDA_PATH/nv-p2p.h],
+ [
+ AC_MSG_RESULT([CUDA path is $CUDA_PATH])
+ AC_SUBST(CUDA_PATH)
+ ],
+ [AC_MSG_ERROR([CUDA sources don't found. nv-p2p.h don't exit])]
+)
+
+LB_CHECK_FILE([$GDS_PATH/nvfs-dma.h],
+ [
+ LB_CHECK_FILE([$GDS_PATH/config-host.h], [
+ AC_MSG_RESULT([GDS path is $GDS_PATH])
+ AC_SUBST(GDS_PATH)
+ AC_DEFINE(WITH_GDS, 1, "GDS build enabled")
+ ], [])
+ ],
+ [])
+],[
+ AC_MSG_WARN([CUDA or GDS sources don't found. GDS support disabled])
+]
+)
+
# lnet/utils/lnetconfig/liblnetconfig_netlink.c
AS_IF([test "x$enable_dist" = xno], [
PKG_CHECK_MODULES(LIBNL3, [libnl-genl-3.0 >= 3.1])
lib-types.h \
udsp.h \
lnet_rdma.h \
+ lnet_gds.h \
socklnd.h
--- /dev/null
+#ifndef LUSTRE_NVFS_H
+#define LUSTRE_NVFS_H
+
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/blkdev.h>
+#include <linux/cpumask.h>
+#include <linux/scatterlist.h>
+#include <linux/percpu-defs.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+
+
+struct nvfs_dma_rw_ops {
+ unsigned long long ft_bmap; /* feature bitmap */
+
+ int (*nvfs_blk_rq_map_sg) (struct request_queue *q,
+ struct request *req,
+ struct scatterlist *sglist);
+
+ int (*nvfs_dma_map_sg_attrs) (struct device *device,
+ struct scatterlist *sglist,
+ int nents,
+ enum dma_data_direction dma_dir,
+ unsigned long attrs);
+
+ int (*nvfs_dma_unmap_sg) (struct device *device,
+ struct scatterlist *sglist,
+ int nents,
+ enum dma_data_direction dma_dir);
+ bool (*nvfs_is_gpu_page) (struct page *);
+ unsigned int (*nvfs_gpu_index) (struct page *page);
+ unsigned int (*nvfs_device_priority) (struct device *dev, unsigned int dev_index);
+};
+
+/* feature list for dma_ops, values indicate bit pos */
+enum ft_bits {
+ nvfs_ft_prep_sglist = 1ULL << 0,
+ nvfs_ft_map_sglist = 1ULL << 1,
+ nvfs_ft_is_gpu_page = 1ULL << 2,
+ nvfs_ft_device_priority = 1ULL << 3,
+};
+
+/* check features for use in registration with vendor drivers */
+#define NVIDIA_FS_CHECK_FT_SGLIST_PREP(ops) \
+ ((ops)->ft_bmap & nvfs_ft_prep_sglist)
+#define NVIDIA_FS_CHECK_FT_SGLIST_DMA(ops) \
+ ((ops)->ft_bmap & nvfs_ft_map_sglist)
+#define NVIDIA_FS_CHECK_FT_GPU_PAGE(ops) \
+ ((ops)->ft_bmap & nvfs_ft_is_gpu_page)
+#define NVIDIA_FS_CHECK_FT_DEVICE_PRIORITY(ops) \
+ ((ops)->ft_bmap & nvfs_ft_device_priority)
+
+#endif /* LUSTRE_NVFS_H */
+
-#ifndef LUSTRE_NVFS_H
-#define LUSTRE_NVFS_H
+#ifndef LNET_RDMA_H
+#define LNET_RDMA_H
-#include <linux/types.h>
-#include <linux/delay.h>
-#include <linux/blkdev.h>
-#include <linux/cpumask.h>
-#include <linux/scatterlist.h>
-#include <linux/percpu-defs.h>
-#include <linux/dma-direction.h>
-#include <linux/dma-mapping.h>
+struct device;
+struct page;
+enum dma_data_direction;
+struct scatterlist;
-#define REGSTR2(x) x##_register_nvfs_dma_ops
-#define REGSTR(x) REGSTR2(x)
-
-#define UNREGSTR2(x) x##_unregister_nvfs_dma_ops
-#define UNREGSTR(x) UNREGSTR2(x)
-
-#define MODULE_PREFIX lustre_v1
-
-#define REGISTER_FUNC REGSTR(MODULE_PREFIX)
-#define UNREGISTER_FUNC UNREGSTR(MODULE_PREFIX)
-
-#define NVFS_IO_ERR -1
-#define NVFS_CPU_REQ -2
-
-#define NVFS_HOLD_TIME_MS 1000
-
-struct nvfs_dma_rw_ops {
- unsigned long long ft_bmap; /* feature bitmap */
-
- int (*nvfs_blk_rq_map_sg) (struct request_queue *q,
- struct request *req,
- struct scatterlist *sglist);
-
- int (*nvfs_dma_map_sg_attrs) (struct device *device,
- struct scatterlist *sglist,
- int nents,
- enum dma_data_direction dma_dir,
- unsigned long attrs);
-
- int (*nvfs_dma_unmap_sg) (struct device *device,
- struct scatterlist *sglist,
- int nents,
- enum dma_data_direction dma_dir);
- bool (*nvfs_is_gpu_page) (struct page *);
- unsigned int (*nvfs_gpu_index) (struct page *page);
- unsigned int (*nvfs_device_priority) (struct device *dev, unsigned int dev_index);
-};
-
-/* feature list for dma_ops, values indicate bit pos */
-enum ft_bits {
- nvfs_ft_prep_sglist = 1ULL << 0,
- nvfs_ft_map_sglist = 1ULL << 1,
- nvfs_ft_is_gpu_page = 1ULL << 2,
- nvfs_ft_device_priority = 1ULL << 3,
-};
-
-/* check features for use in registration with vendor drivers */
-#define NVIDIA_FS_CHECK_FT_SGLIST_PREP(ops) \
- ((ops)->ft_bmap & nvfs_ft_prep_sglist)
-#define NVIDIA_FS_CHECK_FT_SGLIST_DMA(ops) \
- ((ops)->ft_bmap & nvfs_ft_map_sglist)
-#define NVIDIA_FS_CHECK_FT_GPU_PAGE(ops) \
- ((ops)->ft_bmap & nvfs_ft_is_gpu_page)
-#define NVIDIA_FS_CHECK_FT_DEVICE_PRIORITY(ops) \
- ((ops)->ft_bmap & nvfs_ft_device_priority)
+struct nvfs_dma_rw_ops;
int REGISTER_FUNC (struct nvfs_dma_rw_ops *ops);
lnet-objs += lib-socket.o lib-move.o module.o lo.o
lnet-objs += router.o router_proc.o acceptor.o peer.o net_fault.o udsp.o
+CFLAGS_lnet_rdma.o += -I @GDS_PATH@ -I@CUDA_PATH@
+
default: all
@INCLUDE_RULES@
+#ifdef WITH_GDS
+#include "nvfs-dma.h"
+#else
+#include <lnet/lnet_gds.h>
+#endif
+
#include <lnet/lnet_rdma.h>
#include <libcfs/libcfs.h>
+
+/* MAX / MIN conflict */
#include <lnet/lib-lnet.h>
+#define REGSTR2(x) x##_register_nvfs_dma_ops
+#define REGSTR(x) REGSTR2(x)
+
+#define UNREGSTR2(x) x##_unregister_nvfs_dma_ops
+#define UNREGSTR(x) UNREGSTR2(x)
+
+#define MODULE_PREFIX lustre_v1
+
+#define REGISTER_FUNC REGSTR(MODULE_PREFIX)
+#define UNREGISTER_FUNC UNREGSTR(MODULE_PREFIX)
+
+#define NVFS_IO_ERR -1
+#define NVFS_CPU_REQ -2
+
+#define NVFS_HOLD_TIME_MS 1000
+
#define ERROR_PRINT_DEADLINE 3600
atomic_t nvfs_shutdown = ATOMIC_INIT(1);
}
EXPORT_SYMBOL(lnet_get_dev_prio);
+unsigned int
+lnet_get_dev_idx(struct page *page)
+{
+ unsigned int dev_idx = UINT_MAX;
+ struct nvfs_dma_rw_ops *nvfs_ops;
+
+ nvfs_ops = nvfs_get_ops();
+ if (!nvfs_ops)
+ return dev_idx;
+
+ dev_idx = nvfs_ops->nvfs_gpu_index(page);
+
+ nvfs_put_ops();
+ return dev_idx;
+}
+
int lnet_rdma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction direction)
{
return found;
}
EXPORT_SYMBOL(lnet_is_rdma_only_page);
-
-unsigned int
-lnet_get_dev_idx(struct page *page)
-{
- unsigned int dev_idx = UINT_MAX;
- struct nvfs_dma_rw_ops *nvfs_ops;
-
- nvfs_ops = nvfs_get_ops();
- if (!nvfs_ops)
- return dev_idx;
-
- dev_idx = nvfs_ops->nvfs_gpu_index(page);
-
- nvfs_put_ops();
- return dev_idx;
-}
-EXPORT_SYMBOL(lnet_get_dev_idx);
-