1 // SPDX-License-Identifier: GPL-2.0
3 /* This file is part of Lustre, http://www.lustre.org/ */
8 #include <lnet/lnet_gds.h>
11 #include <lnet/lnet_rdma.h>
12 #include <libcfs/libcfs.h>
14 /* MAX / MIN conflict */
15 #include <lnet/lib-lnet.h>
17 #define NVFS_IO_ERR -1
18 #define NVFS_CPU_REQ -2
20 #define NVFS_HOLD_TIME_MS 1000
22 #define ERROR_PRINT_DEADLINE 3600
24 atomic_t nvfs_shutdown = ATOMIC_INIT(1);
25 struct nvfs_dma_rw_ops *nvfs_ops = NULL;
26 struct percpu_counter nvfs_n_ops;
28 static inline long nvfs_count_ops(void)
30 return percpu_counter_sum(&nvfs_n_ops);
33 static struct nvfs_dma_rw_ops *nvfs_get_ops(void)
35 if (!nvfs_ops || atomic_read(&nvfs_shutdown))
38 percpu_counter_inc(&nvfs_n_ops);
43 static inline void nvfs_put_ops(void)
45 percpu_counter_dec(&nvfs_n_ops);
48 static inline bool nvfs_check_feature_set(struct nvfs_dma_rw_ops *ops)
50 bool supported = true;
51 static time64_t last_printed;
53 if (unlikely(!NVIDIA_FS_CHECK_FT_SGLIST_PREP(ops))) {
54 if ((ktime_get_seconds() - last_printed) > ERROR_PRINT_DEADLINE)
56 "NVFS sg list preparation callback missing\n");
59 if (unlikely(!NVIDIA_FS_CHECK_FT_SGLIST_DMA(ops))) {
60 if ((ktime_get_seconds() - last_printed) > ERROR_PRINT_DEADLINE)
62 "NVFS DMA mapping callbacks missing\n");
65 if (unlikely(!NVIDIA_FS_CHECK_FT_GPU_PAGE(ops))) {
66 if ((ktime_get_seconds() - last_printed) > ERROR_PRINT_DEADLINE)
68 "NVFS page identification callback missing\n");
71 if (unlikely(!NVIDIA_FS_CHECK_FT_DEVICE_PRIORITY(ops))) {
72 if ((ktime_get_seconds() - last_printed) > ERROR_PRINT_DEADLINE)
74 "NVFS device priority callback not missing\n");
78 if (unlikely(!supported &&
79 ((ktime_get_seconds() - last_printed) > ERROR_PRINT_DEADLINE)))
80 last_printed = ktime_get_seconds();
87 int REGISTER_FUNC(struct nvfs_dma_rw_ops *ops)
89 if (!ops || !nvfs_check_feature_set(ops))
93 (void)percpu_counter_init(&nvfs_n_ops, 0, GFP_KERNEL);
94 atomic_set(&nvfs_shutdown, 0);
95 CDEBUG(D_NET, "registering nvfs %p\n", ops);
98 EXPORT_SYMBOL(REGISTER_FUNC);
100 void UNREGISTER_FUNC(void)
102 (void)atomic_cmpxchg(&nvfs_shutdown, 0, 1);
104 CDEBUG(D_NET, "Attempting to de-register nvfs: %ld\n",
106 msleep(NVFS_HOLD_TIME_MS);
107 } while (nvfs_count_ops());
109 percpu_counter_destroy(&nvfs_n_ops);
111 EXPORT_SYMBOL(UNREGISTER_FUNC);
114 lnet_get_dev_prio(struct device *dev, unsigned int dev_idx)
116 unsigned int dev_prio = UINT_MAX;
117 struct nvfs_dma_rw_ops *nvfs_ops;
122 nvfs_ops = nvfs_get_ops();
126 dev_prio = nvfs_ops->nvfs_device_priority (dev, dev_idx);
131 EXPORT_SYMBOL(lnet_get_dev_prio);
134 lnet_get_dev_idx(struct page *page)
136 unsigned int dev_idx = UINT_MAX;
137 struct nvfs_dma_rw_ops *nvfs_ops;
139 nvfs_ops = nvfs_get_ops();
143 dev_idx = nvfs_ops->nvfs_gpu_index(page);
149 int lnet_rdma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
150 int nents, enum dma_data_direction direction)
152 struct nvfs_dma_rw_ops *nvfs_ops = nvfs_get_ops();
157 count = nvfs_ops->nvfs_dma_map_sg_attrs(dev,
158 sg, nents, direction,
161 if (unlikely((count == NVFS_IO_ERR))) {
166 if (unlikely(count == NVFS_CPU_REQ))
174 EXPORT_SYMBOL(lnet_rdma_map_sg_attrs);
176 int lnet_rdma_unmap_sg(struct device *dev,
177 struct scatterlist *sg, int nents,
178 enum dma_data_direction direction)
180 struct nvfs_dma_rw_ops *nvfs_ops = nvfs_get_ops();
185 count = nvfs_ops->nvfs_dma_unmap_sg(dev, sg,
188 /* drop the count we got by calling nvfs_get_ops() */
199 EXPORT_SYMBOL(lnet_rdma_unmap_sg);
202 lnet_is_rdma_only_page(struct page *page)
204 bool is_gpu_page = false;
205 struct nvfs_dma_rw_ops *nvfs_ops;
207 LASSERT(page != NULL);
209 nvfs_ops = nvfs_get_ops();
210 if (nvfs_ops != NULL) {
211 is_gpu_page = nvfs_ops->nvfs_is_gpu_page(page);
216 EXPORT_SYMBOL(lnet_is_rdma_only_page);