1 // SPDX-License-Identifier: GPL-2.0
3 /* Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
4 * Use is subject to license terms.
6 * Copyright (c) 2012, 2017, Intel Corporation.
9 /* This file is part of Lustre, http://www.lustre.org/ */
11 #define DEBUG_SUBSYSTEM S_LNET
13 #include <linux/miscdevice.h>
14 #include <lnet/lib-lnet.h>
15 #include <uapi/linux/lnet/lnet-dlc.h>
16 #include <uapi/linux/lustre/lustre_ver.h>
18 static int config_on_load = 0;
19 module_param(config_on_load, int, 0444);
20 MODULE_PARM_DESC(config_on_load, "configure network at module load");
22 static DEFINE_MUTEX(lnet_config_mutex);
24 int lnet_configure(void *arg)
26 /* 'arg' only there so I can be passed to cfs_create_thread() */
29 mutex_lock(&lnet_config_mutex);
31 if (!the_lnet.ln_niinit_self) {
32 rc = try_module_get(THIS_MODULE);
37 rc = LNetNIInit(LNET_PID_LUSTRE);
39 the_lnet.ln_niinit_self = 1;
42 module_put(THIS_MODULE);
47 mutex_unlock(&lnet_config_mutex);
51 int lnet_unconfigure(void)
55 mutex_lock(&lnet_config_mutex);
57 if (the_lnet.ln_niinit_self) {
58 the_lnet.ln_niinit_self = 0;
60 module_put(THIS_MODULE);
63 mutex_lock(&the_lnet.ln_api_mutex);
64 refcount = the_lnet.ln_refcount;
65 mutex_unlock(&the_lnet.ln_api_mutex);
67 mutex_unlock(&lnet_config_mutex);
69 return (refcount == 0) ? 0 : -EBUSY;
73 lnet_dyn_configure_net(struct libcfs_ioctl_hdr *hdr)
75 struct lnet_ioctl_config_data *conf =
76 (struct lnet_ioctl_config_data *)hdr;
79 if (conf->cfg_hdr.ioc_len < sizeof(*conf))
82 mutex_lock(&lnet_config_mutex);
83 if (the_lnet.ln_niinit_self)
84 rc = lnet_dyn_add_net(conf);
87 mutex_unlock(&lnet_config_mutex);
93 lnet_dyn_unconfigure_net(struct libcfs_ioctl_hdr *hdr)
95 struct lnet_ioctl_config_data *conf =
96 (struct lnet_ioctl_config_data *) hdr;
99 if (conf->cfg_hdr.ioc_len < sizeof(*conf))
102 mutex_lock(&lnet_config_mutex);
103 if (the_lnet.ln_niinit_self)
104 rc = lnet_dyn_del_net(conf->cfg_net);
107 mutex_unlock(&lnet_config_mutex);
113 lnet_dyn_configure_ni(struct libcfs_ioctl_hdr *hdr)
115 struct lnet_ioctl_config_ni *conf =
116 (struct lnet_ioctl_config_ni *)hdr;
119 if (conf->lic_cfg_hdr.ioc_len < sizeof(*conf))
122 mutex_lock(&lnet_config_mutex);
123 if (the_lnet.ln_niinit_self) {
124 struct lnet_ioctl_config_lnd_tunables *tun = NULL;
128 /* get the tunables if they are available */
129 if (conf->lic_cfg_hdr.ioc_len >=
130 sizeof(*conf) + sizeof(*tun))
131 tun = (struct lnet_ioctl_config_lnd_tunables *) conf->lic_bulk;
133 lnet_nid4_to_nid(conf->lic_nid, &nid);
134 net_id = LNET_NID_NET(&nid);
135 rc = lnet_dyn_add_ni(conf, net_id, &LNET_ANY_NID, tun);
137 mutex_unlock(&lnet_config_mutex);
143 lnet_dyn_unconfigure_ni(struct libcfs_ioctl_hdr *hdr)
145 struct lnet_ioctl_config_ni *conf =
146 (struct lnet_ioctl_config_ni *) hdr;
150 if (conf->lic_cfg_hdr.ioc_len < sizeof(*conf) ||
151 !the_lnet.ln_niinit_self)
154 lnet_nid4_to_nid(conf->lic_nid, &nid);
155 mutex_lock(&lnet_config_mutex);
156 if (the_lnet.ln_niinit_self)
157 rc = lnet_dyn_del_ni(&nid);
160 mutex_unlock(&lnet_config_mutex);
166 lnet_ioctl(unsigned int cmd, struct libcfs_ioctl_hdr *hdr)
171 case IOC_LIBCFS_CONFIGURE: {
172 struct libcfs_ioctl_data *data =
173 (struct libcfs_ioctl_data *)hdr;
175 if (data->ioc_hdr.ioc_len < sizeof(*data)) {
178 the_lnet.ln_nis_from_mod_params = data->ioc_flags;
179 rc = lnet_configure(NULL);
184 case IOC_LIBCFS_UNCONFIGURE:
185 rc = lnet_unconfigure();
188 case IOC_LIBCFS_ADD_NET:
189 rc = lnet_dyn_configure_net(hdr);
192 case IOC_LIBCFS_DEL_NET:
193 rc = lnet_dyn_unconfigure_net(hdr);
196 case IOC_LIBCFS_ADD_LOCAL_NI:
197 rc = lnet_dyn_configure_ni(hdr);
200 case IOC_LIBCFS_DEL_LOCAL_NI:
201 rc = lnet_dyn_unconfigure_ni(hdr);
205 /* Passing LNET_PID_ANY only gives me a ref if the net is up
206 * already; I'll need it to ensure the net can't go down while
207 * I'm called into it */
208 rc = LNetNIInit(LNET_PID_ANY);
210 rc = LNetCtl(cmd, hdr);
217 BLOCKING_NOTIFIER_HEAD(lnet_ioctl_list);
218 EXPORT_SYMBOL(lnet_ioctl_list);
220 static inline size_t lnet_ioctl_packlen(struct libcfs_ioctl_data *data)
222 size_t len = sizeof(*data);
224 len += (data->ioc_inllen1 + 7) & ~7;
225 len += (data->ioc_inllen2 + 7) & ~7;
229 static bool lnet_ioctl_is_invalid(struct libcfs_ioctl_data *data)
231 const int maxlen = 1 << 30;
233 if (data->ioc_hdr.ioc_len > maxlen)
236 if (data->ioc_inllen1 > maxlen)
239 if (data->ioc_inllen2 > maxlen)
242 if (data->ioc_inlbuf1 && !data->ioc_inllen1)
245 if (data->ioc_inlbuf2 && !data->ioc_inllen2)
248 if (data->ioc_pbuf1 && !data->ioc_plen1)
251 if (data->ioc_pbuf2 && !data->ioc_plen2)
254 if (data->ioc_plen1 && !data->ioc_pbuf1)
257 if (data->ioc_plen2 && !data->ioc_pbuf2)
260 if (lnet_ioctl_packlen(data) != data->ioc_hdr.ioc_len)
263 if (data->ioc_inllen1 &&
264 data->ioc_bulk[((data->ioc_inllen1 + 7) & ~7) +
265 data->ioc_inllen2 - 1] != '\0')
271 static int lnet_ioctl_data_adjust(struct libcfs_ioctl_data *data)
275 if (lnet_ioctl_is_invalid(data)) {
276 CERROR("lnet ioctl: parameter not correctly formatted\n");
280 if (data->ioc_inllen1 != 0)
281 data->ioc_inlbuf1 = &data->ioc_bulk[0];
283 if (data->ioc_inllen2 != 0)
284 data->ioc_inlbuf2 = (&data->ioc_bulk[0] +
285 round_up(data->ioc_inllen1, 8));
290 static int lnet_ioctl_getdata(struct libcfs_ioctl_hdr **hdr_pp,
291 struct libcfs_ioctl_hdr __user *uhdr)
293 struct libcfs_ioctl_hdr hdr;
297 if (copy_from_user(&hdr, uhdr, sizeof(hdr)))
300 if (hdr.ioc_version != LNET_IOCTL_VERSION &&
301 hdr.ioc_version != LNET_IOCTL_VERSION2) {
302 CERROR("lnet ioctl: version mismatch expected %#x, got %#x\n",
303 LNET_IOCTL_VERSION, hdr.ioc_version);
307 if (hdr.ioc_len < sizeof(struct libcfs_ioctl_hdr)) {
308 CERROR("lnet ioctl: user buffer too small for ioctl\n");
312 if (hdr.ioc_len > LIBCFS_IOC_DATA_MAX) {
313 CERROR("lnet ioctl: user buffer is too large %d/%d\n",
314 hdr.ioc_len, LIBCFS_IOC_DATA_MAX);
318 LIBCFS_ALLOC(*hdr_pp, hdr.ioc_len);
322 if (copy_from_user(*hdr_pp, uhdr, hdr.ioc_len))
323 GOTO(free, err = -EFAULT);
325 if ((*hdr_pp)->ioc_version != hdr.ioc_version ||
326 (*hdr_pp)->ioc_len != hdr.ioc_len) {
327 GOTO(free, err = -EINVAL);
333 LIBCFS_FREE(*hdr_pp, hdr.ioc_len);
338 lnet_psdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
340 struct libcfs_ioctl_data *data = NULL;
341 struct libcfs_ioctl_hdr *hdr;
343 void __user *uparam = (void __user *)arg;
346 if (!capable(CAP_SYS_ADMIN))
349 if (_IOC_TYPE(cmd) != IOC_LIBCFS_TYPE ||
350 _IOC_NR(cmd) < IOC_LIBCFS_MIN_NR ||
351 _IOC_NR(cmd) > IOC_LIBCFS_MAX_NR) {
352 CDEBUG(D_IOCTL, "invalid ioctl ( type %d, nr %d, size %d )\n",
353 _IOC_TYPE(cmd), _IOC_NR(cmd), _IOC_SIZE(cmd));
357 /* 'cmd' and permissions get checked in our arch-specific caller */
358 err = lnet_ioctl_getdata(&hdr, uparam);
360 CDEBUG_LIMIT(D_ERROR,
361 "lnet ioctl: data header error %d\n", err);
365 if (hdr->ioc_version == LNET_IOCTL_VERSION) {
366 /* The lnet_ioctl_data_adjust() function performs adjustment
367 * operations on the libcfs_ioctl_data structure to make
368 * it usable by the code. This doesn't need to be called
369 * for new data structures added.
371 data = container_of(hdr, struct libcfs_ioctl_data, ioc_hdr);
372 err = lnet_ioctl_data_adjust(data);
377 CDEBUG(D_IOCTL, "lnet ioctl cmd %u\n", cmd);
379 #if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 18, 53, 0)
380 err = libcfs_ioctl(cmd, data);
383 err = lnet_ioctl(cmd, hdr);
384 if (err == -EINVAL) {
385 err = blocking_notifier_call_chain(&lnet_ioctl_list,
387 if (!(err & NOTIFY_STOP_MASK))
388 /* No-one claimed the ioctl */
391 err = notifier_to_errno(err);
393 if (copy_to_user(uparam, hdr, hdr->ioc_len) && !err)
396 LIBCFS_FREE(hdr, hdr->ioc_len);
400 static const struct file_operations lnet_fops = {
401 .owner = THIS_MODULE,
402 .unlocked_ioctl = lnet_psdev_ioctl,
405 static struct miscdevice lnet_dev = {
406 .minor = MISC_DYNAMIC_MINOR,
411 static int __init lnet_init(void)
422 CERROR("cfs_cpu_init: rc = %d\n", rc);
426 rc = lnet_lib_init();
428 CERROR("lnet_lib_init: rc = %d\n", rc);
433 rc = misc_register(&lnet_dev);
435 CERROR("misc_register: rc = %d\n", rc);
440 if (live_router_check_interval != INT_MIN ||
441 dead_router_check_interval != INT_MIN)
442 LCONSOLE_WARN("live_router_check_interval and dead_router_check_interval have been deprecated. Use alive_router_check_interval instead. Ignoring these deprecated parameters.\n");
444 if (config_on_load) {
445 /* Have to schedule a separate thread to avoid deadlocking
447 (void)kthread_run(lnet_configure, NULL, "lnet_initd");
453 static void __exit lnet_exit(void)
455 misc_deregister(&lnet_dev);
462 MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
463 MODULE_DESCRIPTION("Lustre Networking layer");
464 MODULE_VERSION(LNET_VERSION);
465 MODULE_LICENSE("GPL");
467 module_init(lnet_init);
468 module_exit(lnet_exit);