1 // SPDX-License-Identifier: GPL-2.0
3 /* Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
4 * Use is subject to license terms.
6 * Copyright (c) 2012, 2017, Intel Corporation.
9 /* This file is part of Lustre, http://www.lustre.org/ */
11 #define DEBUG_SUBSYSTEM S_LNET
13 #include <linux/miscdevice.h>
14 #include <lnet/lib-lnet.h>
15 #include <uapi/linux/lnet/lnet-dlc.h>
17 static int config_on_load = 0;
18 module_param(config_on_load, int, 0444);
19 MODULE_PARM_DESC(config_on_load, "configure network at module load");
21 static DEFINE_MUTEX(lnet_config_mutex);
23 int lnet_configure(void *arg)
25 /* 'arg' only there so I can be passed to cfs_create_thread() */
28 mutex_lock(&lnet_config_mutex);
30 if (!the_lnet.ln_niinit_self) {
31 rc = try_module_get(THIS_MODULE);
36 rc = LNetNIInit(LNET_PID_LUSTRE);
38 the_lnet.ln_niinit_self = 1;
41 module_put(THIS_MODULE);
46 mutex_unlock(&lnet_config_mutex);
50 int lnet_unconfigure(void)
54 mutex_lock(&lnet_config_mutex);
56 if (the_lnet.ln_niinit_self) {
57 the_lnet.ln_niinit_self = 0;
59 module_put(THIS_MODULE);
62 mutex_lock(&the_lnet.ln_api_mutex);
63 refcount = the_lnet.ln_refcount;
64 mutex_unlock(&the_lnet.ln_api_mutex);
66 mutex_unlock(&lnet_config_mutex);
68 return (refcount == 0) ? 0 : -EBUSY;
72 lnet_dyn_configure_net(struct libcfs_ioctl_hdr *hdr)
74 struct lnet_ioctl_config_data *conf =
75 (struct lnet_ioctl_config_data *)hdr;
78 if (conf->cfg_hdr.ioc_len < sizeof(*conf))
81 mutex_lock(&lnet_config_mutex);
82 if (the_lnet.ln_niinit_self)
83 rc = lnet_dyn_add_net(conf);
86 mutex_unlock(&lnet_config_mutex);
92 lnet_dyn_unconfigure_net(struct libcfs_ioctl_hdr *hdr)
94 struct lnet_ioctl_config_data *conf =
95 (struct lnet_ioctl_config_data *) hdr;
98 if (conf->cfg_hdr.ioc_len < sizeof(*conf))
101 mutex_lock(&lnet_config_mutex);
102 if (the_lnet.ln_niinit_self)
103 rc = lnet_dyn_del_net(conf->cfg_net);
106 mutex_unlock(&lnet_config_mutex);
112 lnet_dyn_configure_ni(struct libcfs_ioctl_hdr *hdr)
114 struct lnet_ioctl_config_ni *conf =
115 (struct lnet_ioctl_config_ni *)hdr;
118 if (conf->lic_cfg_hdr.ioc_len < sizeof(*conf))
121 mutex_lock(&lnet_config_mutex);
122 if (the_lnet.ln_niinit_self) {
123 struct lnet_ioctl_config_lnd_tunables *tun = NULL;
127 /* get the tunables if they are available */
128 if (conf->lic_cfg_hdr.ioc_len >=
129 sizeof(*conf) + sizeof(*tun))
130 tun = (struct lnet_ioctl_config_lnd_tunables *) conf->lic_bulk;
132 lnet_nid4_to_nid(conf->lic_nid, &nid);
133 net_id = LNET_NID_NET(&nid);
134 rc = lnet_dyn_add_ni(conf, net_id, &LNET_ANY_NID, tun);
136 mutex_unlock(&lnet_config_mutex);
142 lnet_dyn_unconfigure_ni(struct libcfs_ioctl_hdr *hdr)
144 struct lnet_ioctl_config_ni *conf =
145 (struct lnet_ioctl_config_ni *) hdr;
149 if (conf->lic_cfg_hdr.ioc_len < sizeof(*conf) ||
150 !the_lnet.ln_niinit_self)
153 lnet_nid4_to_nid(conf->lic_nid, &nid);
154 mutex_lock(&lnet_config_mutex);
155 if (the_lnet.ln_niinit_self)
156 rc = lnet_dyn_del_ni(&nid);
159 mutex_unlock(&lnet_config_mutex);
165 lnet_ioctl(unsigned int cmd, struct libcfs_ioctl_hdr *hdr)
170 case IOC_LIBCFS_CONFIGURE: {
171 struct libcfs_ioctl_data *data =
172 (struct libcfs_ioctl_data *)hdr;
174 if (data->ioc_hdr.ioc_len < sizeof(*data)) {
177 the_lnet.ln_nis_from_mod_params = data->ioc_flags;
178 rc = lnet_configure(NULL);
183 case IOC_LIBCFS_UNCONFIGURE:
184 rc = lnet_unconfigure();
187 case IOC_LIBCFS_ADD_NET:
188 rc = lnet_dyn_configure_net(hdr);
191 case IOC_LIBCFS_DEL_NET:
192 rc = lnet_dyn_unconfigure_net(hdr);
195 case IOC_LIBCFS_ADD_LOCAL_NI:
196 rc = lnet_dyn_configure_ni(hdr);
199 case IOC_LIBCFS_DEL_LOCAL_NI:
200 rc = lnet_dyn_unconfigure_ni(hdr);
204 /* Passing LNET_PID_ANY only gives me a ref if the net is up
205 * already; I'll need it to ensure the net can't go down while
206 * I'm called into it */
207 rc = LNetNIInit(LNET_PID_ANY);
209 rc = LNetCtl(cmd, hdr);
216 BLOCKING_NOTIFIER_HEAD(lnet_ioctl_list);
217 EXPORT_SYMBOL(lnet_ioctl_list);
219 static inline size_t lnet_ioctl_packlen(struct libcfs_ioctl_data *data)
221 size_t len = sizeof(*data);
223 len += (data->ioc_inllen1 + 7) & ~7;
224 len += (data->ioc_inllen2 + 7) & ~7;
228 static bool lnet_ioctl_is_invalid(struct libcfs_ioctl_data *data)
230 const int maxlen = 1 << 30;
232 if (data->ioc_hdr.ioc_len > maxlen)
235 if (data->ioc_inllen1 > maxlen)
238 if (data->ioc_inllen2 > maxlen)
241 if (data->ioc_inlbuf1 && !data->ioc_inllen1)
244 if (data->ioc_inlbuf2 && !data->ioc_inllen2)
247 if (data->ioc_pbuf1 && !data->ioc_plen1)
250 if (data->ioc_pbuf2 && !data->ioc_plen2)
253 if (data->ioc_plen1 && !data->ioc_pbuf1)
256 if (data->ioc_plen2 && !data->ioc_pbuf2)
259 if (lnet_ioctl_packlen(data) != data->ioc_hdr.ioc_len)
262 if (data->ioc_inllen1 &&
263 data->ioc_bulk[((data->ioc_inllen1 + 7) & ~7) +
264 data->ioc_inllen2 - 1] != '\0')
270 static int lnet_ioctl_data_adjust(struct libcfs_ioctl_data *data)
274 if (lnet_ioctl_is_invalid(data)) {
275 CERROR("lnet ioctl: parameter not correctly formatted\n");
279 if (data->ioc_inllen1 != 0)
280 data->ioc_inlbuf1 = &data->ioc_bulk[0];
282 if (data->ioc_inllen2 != 0)
283 data->ioc_inlbuf2 = (&data->ioc_bulk[0] +
284 round_up(data->ioc_inllen1, 8));
289 static int lnet_ioctl_getdata(struct libcfs_ioctl_hdr **hdr_pp,
290 struct libcfs_ioctl_hdr __user *uhdr)
292 struct libcfs_ioctl_hdr hdr;
296 if (copy_from_user(&hdr, uhdr, sizeof(hdr)))
299 if (hdr.ioc_version != LNET_IOCTL_VERSION &&
300 hdr.ioc_version != LNET_IOCTL_VERSION2) {
301 CERROR("lnet ioctl: version mismatch expected %#x, got %#x\n",
302 LNET_IOCTL_VERSION, hdr.ioc_version);
306 if (hdr.ioc_len < sizeof(struct libcfs_ioctl_hdr)) {
307 CERROR("lnet ioctl: user buffer too small for ioctl\n");
311 if (hdr.ioc_len > LIBCFS_IOC_DATA_MAX) {
312 CERROR("lnet ioctl: user buffer is too large %d/%d\n",
313 hdr.ioc_len, LIBCFS_IOC_DATA_MAX);
317 LIBCFS_ALLOC(*hdr_pp, hdr.ioc_len);
321 if (copy_from_user(*hdr_pp, uhdr, hdr.ioc_len))
322 GOTO(free, err = -EFAULT);
324 if ((*hdr_pp)->ioc_version != hdr.ioc_version ||
325 (*hdr_pp)->ioc_len != hdr.ioc_len) {
326 GOTO(free, err = -EINVAL);
332 LIBCFS_FREE(*hdr_pp, hdr.ioc_len);
337 lnet_psdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
339 struct libcfs_ioctl_data *data = NULL;
340 struct libcfs_ioctl_hdr *hdr;
342 void __user *uparam = (void __user *)arg;
345 if (!capable(CAP_SYS_ADMIN))
348 if (_IOC_TYPE(cmd) != IOC_LIBCFS_TYPE ||
349 _IOC_NR(cmd) < IOC_LIBCFS_MIN_NR ||
350 _IOC_NR(cmd) > IOC_LIBCFS_MAX_NR) {
351 CDEBUG(D_IOCTL, "invalid ioctl ( type %d, nr %d, size %d )\n",
352 _IOC_TYPE(cmd), _IOC_NR(cmd), _IOC_SIZE(cmd));
356 /* 'cmd' and permissions get checked in our arch-specific caller */
357 err = lnet_ioctl_getdata(&hdr, uparam);
359 CDEBUG_LIMIT(D_ERROR,
360 "lnet ioctl: data header error %d\n", err);
364 if (hdr->ioc_version == LNET_IOCTL_VERSION) {
365 /* The lnet_ioctl_data_adjust() function performs adjustment
366 * operations on the libcfs_ioctl_data structure to make
367 * it usable by the code. This doesn't need to be called
368 * for new data structures added.
370 data = container_of(hdr, struct libcfs_ioctl_data, ioc_hdr);
371 err = lnet_ioctl_data_adjust(data);
376 CDEBUG(D_IOCTL, "lnet ioctl cmd %u\n", cmd);
378 err = libcfs_ioctl(cmd, data);
380 err = lnet_ioctl(cmd, hdr);
381 if (err == -EINVAL) {
382 err = blocking_notifier_call_chain(&lnet_ioctl_list,
384 if (!(err & NOTIFY_STOP_MASK))
385 /* No-one claimed the ioctl */
388 err = notifier_to_errno(err);
390 if (copy_to_user(uparam, hdr, hdr->ioc_len) && !err)
393 LIBCFS_FREE(hdr, hdr->ioc_len);
397 static const struct file_operations lnet_fops = {
398 .owner = THIS_MODULE,
399 .unlocked_ioctl = lnet_psdev_ioctl,
402 static struct miscdevice lnet_dev = {
403 .minor = MISC_DYNAMIC_MINOR,
408 static int __init lnet_init(void)
419 CERROR("cfs_cpu_init: rc = %d\n", rc);
423 rc = lnet_lib_init();
425 CERROR("lnet_lib_init: rc = %d\n", rc);
430 rc = misc_register(&lnet_dev);
432 CERROR("misc_register: rc = %d\n", rc);
437 if (live_router_check_interval != INT_MIN ||
438 dead_router_check_interval != INT_MIN)
439 LCONSOLE_WARN("live_router_check_interval and dead_router_check_interval have been deprecated. Use alive_router_check_interval instead. Ignoring these deprecated parameters.\n");
441 if (config_on_load) {
442 /* Have to schedule a separate thread to avoid deadlocking
444 (void)kthread_run(lnet_configure, NULL, "lnet_initd");
450 static void __exit lnet_exit(void)
452 misc_deregister(&lnet_dev);
459 MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
460 MODULE_DESCRIPTION("Lustre Networking layer");
461 MODULE_VERSION(LNET_VERSION);
462 MODULE_LICENSE("GPL");
464 module_init(lnet_init);
465 module_exit(lnet_exit);