4 #include <lnet/lnet_gds.h>
7 #include <lnet/lnet_rdma.h>
8 #include <libcfs/libcfs.h>
10 /* MAX / MIN conflict */
11 #include <lnet/lib-lnet.h>
13 #define REGSTR2(x) x##_register_nvfs_dma_ops
14 #define REGSTR(x) REGSTR2(x)
16 #define UNREGSTR2(x) x##_unregister_nvfs_dma_ops
17 #define UNREGSTR(x) UNREGSTR2(x)
19 #define MODULE_PREFIX lustre_v1
21 #define REGISTER_FUNC REGSTR(MODULE_PREFIX)
22 #define UNREGISTER_FUNC UNREGSTR(MODULE_PREFIX)
24 #define NVFS_IO_ERR -1
25 #define NVFS_CPU_REQ -2
27 #define NVFS_HOLD_TIME_MS 1000
29 #define ERROR_PRINT_DEADLINE 3600
31 atomic_t nvfs_shutdown = ATOMIC_INIT(1);
32 struct nvfs_dma_rw_ops *nvfs_ops = NULL;
33 struct percpu_counter nvfs_n_ops;
35 static inline long nvfs_count_ops(void)
37 return percpu_counter_sum(&nvfs_n_ops);
40 static struct nvfs_dma_rw_ops *nvfs_get_ops(void)
42 if (!nvfs_ops || atomic_read(&nvfs_shutdown))
45 percpu_counter_inc(&nvfs_n_ops);
50 static inline void nvfs_put_ops(void)
52 percpu_counter_dec(&nvfs_n_ops);
55 static inline bool nvfs_check_feature_set(struct nvfs_dma_rw_ops *ops)
57 bool supported = true;
58 static time64_t last_printed;
60 if (unlikely(!NVIDIA_FS_CHECK_FT_SGLIST_PREP(ops))) {
61 if ((ktime_get_seconds() - last_printed) > ERROR_PRINT_DEADLINE)
63 "NVFS sg list preparation callback missing\n");
66 if (unlikely(!NVIDIA_FS_CHECK_FT_SGLIST_DMA(ops))) {
67 if ((ktime_get_seconds() - last_printed) > ERROR_PRINT_DEADLINE)
69 "NVFS DMA mapping callbacks missing\n");
72 if (unlikely(!NVIDIA_FS_CHECK_FT_GPU_PAGE(ops))) {
73 if ((ktime_get_seconds() - last_printed) > ERROR_PRINT_DEADLINE)
75 "NVFS page identification callback missing\n");
78 if (unlikely(!NVIDIA_FS_CHECK_FT_DEVICE_PRIORITY(ops))) {
79 if ((ktime_get_seconds() - last_printed) > ERROR_PRINT_DEADLINE)
81 "NVFS device priority callback not missing\n");
85 if (unlikely(!supported &&
86 ((ktime_get_seconds() - last_printed) > ERROR_PRINT_DEADLINE)))
87 last_printed = ktime_get_seconds();
94 int REGISTER_FUNC(struct nvfs_dma_rw_ops *ops)
96 if (!ops || !nvfs_check_feature_set(ops))
100 (void)percpu_counter_init(&nvfs_n_ops, 0, GFP_KERNEL);
101 atomic_set(&nvfs_shutdown, 0);
102 CDEBUG(D_NET, "registering nvfs %p\n", ops);
105 EXPORT_SYMBOL(REGISTER_FUNC);
107 void UNREGISTER_FUNC(void)
109 (void)atomic_cmpxchg(&nvfs_shutdown, 0, 1);
111 CDEBUG(D_NET, "Attempting to de-register nvfs: %ld\n",
113 msleep(NVFS_HOLD_TIME_MS);
114 } while (nvfs_count_ops());
116 percpu_counter_destroy(&nvfs_n_ops);
118 EXPORT_SYMBOL(UNREGISTER_FUNC);
121 lnet_get_dev_prio(struct device *dev, unsigned int dev_idx)
123 unsigned int dev_prio = UINT_MAX;
124 struct nvfs_dma_rw_ops *nvfs_ops;
129 nvfs_ops = nvfs_get_ops();
133 dev_prio = nvfs_ops->nvfs_device_priority (dev, dev_idx);
138 EXPORT_SYMBOL(lnet_get_dev_prio);
141 lnet_get_dev_idx(struct page *page)
143 unsigned int dev_idx = UINT_MAX;
144 struct nvfs_dma_rw_ops *nvfs_ops;
146 nvfs_ops = nvfs_get_ops();
150 dev_idx = nvfs_ops->nvfs_gpu_index(page);
156 int lnet_rdma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
157 int nents, enum dma_data_direction direction)
159 struct nvfs_dma_rw_ops *nvfs_ops = nvfs_get_ops();
164 count = nvfs_ops->nvfs_dma_map_sg_attrs(dev,
165 sg, nents, direction,
168 if (unlikely((count == NVFS_IO_ERR))) {
173 if (unlikely(count == NVFS_CPU_REQ))
181 EXPORT_SYMBOL(lnet_rdma_map_sg_attrs);
183 int lnet_rdma_unmap_sg(struct device *dev,
184 struct scatterlist *sg, int nents,
185 enum dma_data_direction direction)
187 struct nvfs_dma_rw_ops *nvfs_ops = nvfs_get_ops();
192 count = nvfs_ops->nvfs_dma_unmap_sg(dev, sg,
195 /* drop the count we got by calling nvfs_get_ops() */
206 EXPORT_SYMBOL(lnet_rdma_unmap_sg);
209 lnet_is_rdma_only_page(struct page *page)
212 struct nvfs_dma_rw_ops *nvfs_ops;
217 nvfs_ops = nvfs_get_ops();
221 if (!nvfs_ops->nvfs_is_gpu_page(page))
230 EXPORT_SYMBOL(lnet_is_rdma_only_page);