1 // SPDX-License-Identifier: GPL-2.0
3 /* This file is part of Lustre, http://www.lustre.org/ */
8 #include <lnet/lnet_gds.h>
11 #include <lnet/lnet_rdma.h>
12 #include <libcfs/libcfs.h>
14 /* MAX / MIN conflict */
15 #include <lnet/lib-lnet.h>
17 #define REGSTR2(x) x##_register_nvfs_dma_ops
18 #define REGSTR(x) REGSTR2(x)
20 #define UNREGSTR2(x) x##_unregister_nvfs_dma_ops
21 #define UNREGSTR(x) UNREGSTR2(x)
23 #define MODULE_PREFIX lustre_v1
25 #define REGISTER_FUNC REGSTR(MODULE_PREFIX)
26 #define UNREGISTER_FUNC UNREGSTR(MODULE_PREFIX)
28 #define NVFS_IO_ERR -1
29 #define NVFS_CPU_REQ -2
31 #define NVFS_HOLD_TIME_MS 1000
33 #define ERROR_PRINT_DEADLINE 3600
35 atomic_t nvfs_shutdown = ATOMIC_INIT(1);
36 struct nvfs_dma_rw_ops *nvfs_ops = NULL;
37 struct percpu_counter nvfs_n_ops;
39 static inline long nvfs_count_ops(void)
41 return percpu_counter_sum(&nvfs_n_ops);
44 static struct nvfs_dma_rw_ops *nvfs_get_ops(void)
46 if (!nvfs_ops || atomic_read(&nvfs_shutdown))
49 percpu_counter_inc(&nvfs_n_ops);
54 static inline void nvfs_put_ops(void)
56 percpu_counter_dec(&nvfs_n_ops);
59 static inline bool nvfs_check_feature_set(struct nvfs_dma_rw_ops *ops)
61 bool supported = true;
62 static time64_t last_printed;
64 if (unlikely(!NVIDIA_FS_CHECK_FT_SGLIST_PREP(ops))) {
65 if ((ktime_get_seconds() - last_printed) > ERROR_PRINT_DEADLINE)
67 "NVFS sg list preparation callback missing\n");
70 if (unlikely(!NVIDIA_FS_CHECK_FT_SGLIST_DMA(ops))) {
71 if ((ktime_get_seconds() - last_printed) > ERROR_PRINT_DEADLINE)
73 "NVFS DMA mapping callbacks missing\n");
76 if (unlikely(!NVIDIA_FS_CHECK_FT_GPU_PAGE(ops))) {
77 if ((ktime_get_seconds() - last_printed) > ERROR_PRINT_DEADLINE)
79 "NVFS page identification callback missing\n");
82 if (unlikely(!NVIDIA_FS_CHECK_FT_DEVICE_PRIORITY(ops))) {
83 if ((ktime_get_seconds() - last_printed) > ERROR_PRINT_DEADLINE)
85 "NVFS device priority callback not missing\n");
89 if (unlikely(!supported &&
90 ((ktime_get_seconds() - last_printed) > ERROR_PRINT_DEADLINE)))
91 last_printed = ktime_get_seconds();
98 int REGISTER_FUNC(struct nvfs_dma_rw_ops *ops)
100 if (!ops || !nvfs_check_feature_set(ops))
104 (void)percpu_counter_init(&nvfs_n_ops, 0, GFP_KERNEL);
105 atomic_set(&nvfs_shutdown, 0);
106 CDEBUG(D_NET, "registering nvfs %p\n", ops);
109 EXPORT_SYMBOL(REGISTER_FUNC);
111 void UNREGISTER_FUNC(void)
113 (void)atomic_cmpxchg(&nvfs_shutdown, 0, 1);
115 CDEBUG(D_NET, "Attempting to de-register nvfs: %ld\n",
117 msleep(NVFS_HOLD_TIME_MS);
118 } while (nvfs_count_ops());
120 percpu_counter_destroy(&nvfs_n_ops);
122 EXPORT_SYMBOL(UNREGISTER_FUNC);
125 lnet_get_dev_prio(struct device *dev, unsigned int dev_idx)
127 unsigned int dev_prio = UINT_MAX;
128 struct nvfs_dma_rw_ops *nvfs_ops;
133 nvfs_ops = nvfs_get_ops();
137 dev_prio = nvfs_ops->nvfs_device_priority (dev, dev_idx);
142 EXPORT_SYMBOL(lnet_get_dev_prio);
145 lnet_get_dev_idx(struct page *page)
147 unsigned int dev_idx = UINT_MAX;
148 struct nvfs_dma_rw_ops *nvfs_ops;
150 nvfs_ops = nvfs_get_ops();
154 dev_idx = nvfs_ops->nvfs_gpu_index(page);
160 int lnet_rdma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
161 int nents, enum dma_data_direction direction)
163 struct nvfs_dma_rw_ops *nvfs_ops = nvfs_get_ops();
168 count = nvfs_ops->nvfs_dma_map_sg_attrs(dev,
169 sg, nents, direction,
172 if (unlikely((count == NVFS_IO_ERR))) {
177 if (unlikely(count == NVFS_CPU_REQ))
185 EXPORT_SYMBOL(lnet_rdma_map_sg_attrs);
187 int lnet_rdma_unmap_sg(struct device *dev,
188 struct scatterlist *sg, int nents,
189 enum dma_data_direction direction)
191 struct nvfs_dma_rw_ops *nvfs_ops = nvfs_get_ops();
196 count = nvfs_ops->nvfs_dma_unmap_sg(dev, sg,
199 /* drop the count we got by calling nvfs_get_ops() */
210 EXPORT_SYMBOL(lnet_rdma_unmap_sg);
213 lnet_is_rdma_only_page(struct page *page)
215 bool is_gpu_page = false;
216 struct nvfs_dma_rw_ops *nvfs_ops;
218 LASSERT(page != NULL);
220 nvfs_ops = nvfs_get_ops();
221 if (nvfs_ops != NULL) {
222 is_gpu_page = nvfs_ops->nvfs_is_gpu_page(page);
227 EXPORT_SYMBOL(lnet_is_rdma_only_page);