Whamcloud - gitweb
LU-9120 lnet: calculate the lnd timeout
[fs/lustre-release.git] / lnet / lnet / api-ni.c
index 63eeaca..aa35054 100644 (file)
@@ -23,7 +23,7 @@
  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
  *
- * Copyright (c) 2011, 2016, Intel Corporation.
+ * Copyright (c) 2011, 2017, Intel Corporation.
  */
 /*
  * This file is part of Lustre, http://www.lustre.org/
  */
 
 #define DEBUG_SUBSYSTEM S_LNET
+
+#include <linux/ctype.h>
 #include <linux/log2.h>
 #include <linux/ktime.h>
 #include <linux/moduleparam.h>
+#include <linux/uaccess.h>
 
 #include <lnet/lib-lnet.h>
 
@@ -75,20 +78,79 @@ module_param(lnet_numa_range, uint, 0444);
 MODULE_PARM_DESC(lnet_numa_range,
                "NUMA range to consider during Multi-Rail selection");
 
+/*
+ * lnet_health_sensitivity determines by how much we decrement the health
+ * value on sending error. The value defaults to 0, which means health
+ * checking is turned off by default.
+ */
+unsigned int lnet_health_sensitivity = 0;
+static int sensitivity_set(const char *val, cfs_kernel_param_arg_t *kp);
+static struct kernel_param_ops param_ops_health_sensitivity = {
+       .set = sensitivity_set,
+       .get = param_get_int,
+};
+#define param_check_health_sensitivity(name, p) \
+               __param_check(name, p, int)
+#ifdef HAVE_KERNEL_PARAM_OPS
+module_param(lnet_health_sensitivity, health_sensitivity, S_IRUGO|S_IWUSR);
+#else
+module_param_call(lnet_health_sensitivity, sensitivity_set, param_get_int,
+                 &lnet_health_sensitivity, S_IRUGO|S_IWUSR);
+#endif
+MODULE_PARM_DESC(lnet_health_sensitivity,
+               "Value to decrement the health value by on error");
+
 static int lnet_interfaces_max = LNET_INTERFACES_MAX_DEFAULT;
-static int intf_max_set(const char *val, struct kernel_param *kp);
+static int intf_max_set(const char *val, cfs_kernel_param_arg_t *kp);
+
+static struct kernel_param_ops param_ops_interfaces_max = {
+       .set = intf_max_set,
+       .get = param_get_int,
+};
+
+#define param_check_interfaces_max(name, p) \
+               __param_check(name, p, int)
+
+#ifdef HAVE_KERNEL_PARAM_OPS
+module_param(lnet_interfaces_max, interfaces_max, 0644);
+#else
 module_param_call(lnet_interfaces_max, intf_max_set, param_get_int,
-                 &lnet_interfaces_max, S_IRUGO|S_IWUSR);
+                 &param_ops_interfaces_max, 0644);
+#endif
 MODULE_PARM_DESC(lnet_interfaces_max,
                "Maximum number of interfaces in a node.");
 
 unsigned lnet_peer_discovery_disabled = 0;
-static int discovery_set(const char *val, struct kernel_param *kp);
+static int discovery_set(const char *val, cfs_kernel_param_arg_t *kp);
+
+static struct kernel_param_ops param_ops_discovery_disabled = {
+       .set = discovery_set,
+       .get = param_get_int,
+};
+
+#define param_check_discovery_disabled(name, p) \
+               __param_check(name, p, int)
+#ifdef HAVE_KERNEL_PARAM_OPS
+module_param(lnet_peer_discovery_disabled, discovery_disabled, 0644);
+#else
 module_param_call(lnet_peer_discovery_disabled, discovery_set, param_get_int,
-                 &lnet_peer_discovery_disabled, S_IRUGO|S_IWUSR);
+                 &param_ops_discovery_disabled, 0644);
+#endif
 MODULE_PARM_DESC(lnet_peer_discovery_disabled,
                "Set to 1 to disable peer discovery on this node.");
 
+unsigned lnet_transaction_timeout = 5;
+module_param(lnet_transaction_timeout, uint, 0444);
+MODULE_PARM_DESC(lnet_transaction_timeout,
+               "Time in seconds to wait for a REPLY or an ACK");
+
+unsigned lnet_retry_count = 0;
+module_param(lnet_retry_count, uint, 0444);
+MODULE_PARM_DESC(lnet_retry_count,
+                "Maximum number of times to retry transmitting a message");
+
+unsigned lnet_lnd_timeout = LNET_LND_DEFAULT_TIMEOUT;
+
 /*
  * This sequence number keeps track of how many times DLC was used to
  * update the local NIs. It is incremented when a NI is added or
@@ -101,11 +163,47 @@ static atomic_t lnet_dlc_seq_no = ATOMIC_INIT(0);
 static int lnet_ping(struct lnet_process_id id, signed long timeout,
                     struct lnet_process_id __user *ids, int n_ids);
 
-static int lnet_discover(lnet_process_id_t id, __u32 force,
-                        lnet_process_id_t __user *ids, int n_ids);
+static int lnet_discover(struct lnet_process_id id, __u32 force,
+                        struct lnet_process_id __user *ids, int n_ids);
+
+static int
+sensitivity_set(const char *val, cfs_kernel_param_arg_t *kp)
+{
+       int rc;
+       unsigned *sensitivity = (unsigned *)kp->arg;
+       unsigned long value;
+
+       rc = kstrtoul(val, 0, &value);
+       if (rc) {
+               CERROR("Invalid module parameter value for 'lnet_health_sensitivity'\n");
+               return rc;
+       }
+
+       /*
+        * The purpose of locking the api_mutex here is to ensure that
+        * the correct value ends up stored properly.
+        */
+       mutex_lock(&the_lnet.ln_api_mutex);
+
+       if (the_lnet.ln_state != LNET_STATE_RUNNING) {
+               mutex_unlock(&the_lnet.ln_api_mutex);
+               return 0;
+       }
+
+       if (value == *sensitivity) {
+               mutex_unlock(&the_lnet.ln_api_mutex);
+               return 0;
+       }
+
+       *sensitivity = value;
+
+       mutex_unlock(&the_lnet.ln_api_mutex);
+
+       return 0;
+}
 
 static int
-discovery_set(const char *val, struct kernel_param *kp)
+discovery_set(const char *val, cfs_kernel_param_arg_t *kp)
 {
        int rc;
        unsigned *discovery = (unsigned *)kp->arg;
@@ -155,7 +253,7 @@ discovery_set(const char *val, struct kernel_param *kp)
 }
 
 static int
-intf_max_set(const char *val, struct kernel_param *kp)
+intf_max_set(const char *val, cfs_kernel_param_arg_t *kp)
 {
        int value, rc;
 
@@ -167,8 +265,8 @@ intf_max_set(const char *val, struct kernel_param *kp)
 
        if (value < LNET_INTERFACES_MIN) {
                CWARN("max interfaces provided are too small, setting to %d\n",
-                     LNET_INTERFACES_MIN);
-               value = LNET_INTERFACES_MIN;
+                     LNET_INTERFACES_MAX_DEFAULT);
+               value = LNET_INTERFACES_MAX_DEFAULT;
        }
 
        *(int *)kp->arg = value;
@@ -209,8 +307,9 @@ static void
 lnet_init_locks(void)
 {
        spin_lock_init(&the_lnet.ln_eq_wait_lock);
+       spin_lock_init(&the_lnet.ln_msg_resend_lock);
        init_waitqueue_head(&the_lnet.ln_eq_waitq);
-       init_waitqueue_head(&the_lnet.ln_rc_waitq);
+       init_waitqueue_head(&the_lnet.ln_mt_waitq);
        mutex_init(&the_lnet.ln_lnd_mutex);
 }
 
@@ -476,6 +575,13 @@ static struct lnet_lnd *lnet_find_lnd_by_type(__u32 type)
        return NULL;
 }
 
+unsigned int
+lnet_get_lnd_timeout(void)
+{
+       return lnet_lnd_timeout;
+}
+EXPORT_SYMBOL(lnet_get_lnd_timeout);
+
 void
 lnet_register_lnd(struct lnet_lnd *lnd)
 {
@@ -745,6 +851,8 @@ lnet_prepare(lnet_pid_t requested_pid)
        INIT_LIST_HEAD(&the_lnet.ln_dc_request);
        INIT_LIST_HEAD(&the_lnet.ln_dc_working);
        INIT_LIST_HEAD(&the_lnet.ln_dc_expired);
+       INIT_LIST_HEAD(&the_lnet.ln_mt_localNIRecovq);
+       INIT_LIST_HEAD(&the_lnet.ln_mt_peerNIRecovq);
        init_waitqueue_head(&the_lnet.ln_dc_waitq);
 
        rc = lnet_descriptor_setup();
@@ -993,8 +1101,7 @@ lnet_islocalnet(__u32 net_id)
 bool
 lnet_is_ni_healthy_locked(struct lnet_ni *ni)
 {
-       if (ni->ni_state == LNET_NI_STATE_ACTIVE ||
-           ni->ni_state == LNET_NI_STATE_DEGRADED)
+       if (ni->ni_state & LNET_NI_STATE_ACTIVE)
                return true;
 
        return false;
@@ -1388,11 +1495,11 @@ lnet_ping_target_fini(void)
 /* Resize the push target. */
 int lnet_push_target_resize(void)
 {
-       lnet_process_id_t id = { LNET_NID_ANY, LNET_PID_ANY };
-       lnet_md_t md = { NULL };
-       lnet_handle_me_t meh;
-       lnet_handle_md_t mdh;
-       lnet_handle_md_t old_mdh;
+       struct lnet_process_id id = { LNET_NID_ANY, LNET_PID_ANY };
+       struct lnet_md md = { NULL };
+       struct lnet_handle_me meh;
+       struct lnet_handle_md mdh;
+       struct lnet_handle_md old_mdh;
        struct lnet_ping_buffer *pbuf;
        struct lnet_ping_buffer *old_pbuf;
        int nnis = the_lnet.ln_push_target_nnis;
@@ -1581,7 +1688,7 @@ lnet_clear_zombies_nis_locked(struct lnet_net *net)
                list_del_init(&ni->ni_netlist);
                /* the ni should be in deleting state. If it's not it's
                 * a bug */
-               LASSERT(ni->ni_state == LNET_NI_STATE_DELETING);
+               LASSERT(ni->ni_state & LNET_NI_STATE_DELETING);
                cfs_percpt_for_each(ref, j, ni->ni_refs) {
                        if (*ref == 0)
                                continue;
@@ -1629,7 +1736,10 @@ lnet_shutdown_lndni(struct lnet_ni *ni)
        struct lnet_net *net = ni->ni_net;
 
        lnet_net_lock(LNET_LOCK_EX);
-       ni->ni_state = LNET_NI_STATE_DELETING;
+       lnet_ni_lock(ni);
+       ni->ni_state |= LNET_NI_STATE_DELETING;
+       ni->ni_state &= ~LNET_NI_STATE_ACTIVE;
+       lnet_ni_unlock(ni);
        lnet_ni_unlink_locked(ni);
        lnet_incr_dlc_seq();
        lnet_net_unlock(LNET_LOCK_EX);
@@ -1683,6 +1793,10 @@ static void
 lnet_shutdown_lndnets(void)
 {
        struct lnet_net *net;
+       struct list_head resend;
+       struct lnet_msg *msg, *tmp;
+
+       INIT_LIST_HEAD(&resend);
 
        /* NB called holding the global mutex */
 
@@ -1718,6 +1832,16 @@ lnet_shutdown_lndnets(void)
                lnet_shutdown_lndnet(net);
        }
 
+       spin_lock(&the_lnet.ln_msg_resend_lock);
+       list_splice(&the_lnet.ln_msg_resend, &resend);
+       spin_unlock(&the_lnet.ln_msg_resend_lock);
+
+       list_for_each_entry_safe(msg, tmp, &resend, msg_list) {
+               list_del_init(&msg->msg_list);
+               msg->msg_no_resend = true;
+               lnet_finalize(msg, -ECANCELED);
+       }
+
        lnet_net_lock(LNET_LOCK_EX);
        the_lnet.ln_state = LNET_STATE_SHUTDOWN;
        lnet_net_unlock(LNET_LOCK_EX);
@@ -1751,7 +1875,10 @@ lnet_startup_lndni(struct lnet_ni *ni, struct lnet_lnd_tunables *tun)
                goto failed0;
        }
 
-       ni->ni_state = LNET_NI_STATE_ACTIVE;
+       lnet_ni_lock(ni);
+       ni->ni_state |= LNET_NI_STATE_ACTIVE;
+       ni->ni_state &= ~LNET_NI_STATE_INIT;
+       lnet_ni_unlock(ni);
 
        /* We keep a reference on the loopback net through the loopback NI */
        if (net->net_lnd->lnd_type == LOLND) {
@@ -1786,6 +1913,7 @@ lnet_startup_lndni(struct lnet_ni *ni, struct lnet_lnd_tunables *tun)
 
        atomic_set(&ni->ni_tx_credits,
                   lnet_ni_tq_credits(ni) * ni->ni_ncpts);
+       atomic_set(&ni->ni_healthv, LNET_MAX_HEALTH_VALUE);
 
        CDEBUG(D_LNI, "Added LNI %s [%d/%d/%d/%d]\n",
                libcfs_nid2str(ni->ni_nid),
@@ -1829,8 +1957,6 @@ lnet_startup_lndnet(struct lnet_net *net, struct lnet_lnd_tunables *tun)
        if (lnet_net_unique(net->net_id, &the_lnet.ln_nets, &net_l)) {
                lnd_type = LNET_NETTYP(net->net_id);
 
-               LASSERT(libcfs_isknown_lnd(lnd_type));
-
                mutex_lock(&the_lnet.ln_lnd_mutex);
                lnd = lnet_find_lnd_by_type(lnd_type);
 
@@ -2061,6 +2187,7 @@ int lnet_lib_init(void)
        INIT_LIST_HEAD(&the_lnet.ln_lnds);
        INIT_LIST_HEAD(&the_lnet.ln_net_zombie);
        INIT_LIST_HEAD(&the_lnet.ln_rcd_zombie);
+       INIT_LIST_HEAD(&the_lnet.ln_msg_resend);
        INIT_LIST_HEAD(&the_lnet.ln_rcd_deathrow);
 
        /* The hash table size is the number of bits it takes to express the set
@@ -2199,20 +2326,20 @@ LNetNIInit(lnet_pid_t requested_pid)
 
        lnet_ping_target_update(pbuf, ping_mdh);
 
-       rc = lnet_router_checker_start();
+       rc = lnet_monitor_thr_start();
        if (rc != 0)
                goto err_stop_ping;
 
        rc = lnet_push_target_init();
        if (rc != 0)
-               goto err_stop_router_checker;
+               goto err_stop_monitor_thr;
 
        rc = lnet_peer_discovery_start();
        if (rc != 0)
                goto err_destroy_push_target;
 
        lnet_fault_init();
-       lnet_proc_init();
+       lnet_router_debugfs_init();
 
        mutex_unlock(&the_lnet.ln_api_mutex);
 
@@ -2220,8 +2347,8 @@ LNetNIInit(lnet_pid_t requested_pid)
 
 err_destroy_push_target:
        lnet_push_target_fini();
-err_stop_router_checker:
-       lnet_router_checker_stop();
+err_stop_monitor_thr:
+       lnet_monitor_thr_stop();
 err_stop_ping:
        lnet_ping_target_fini();
 err_acceptor_stop:
@@ -2270,10 +2397,10 @@ LNetNIFini()
 
                lnet_fault_fini();
 
-               lnet_proc_fini();
+               lnet_router_debugfs_init();
                lnet_peer_discovery_stop();
                lnet_push_target_fini();
-               lnet_router_checker_stop();
+               lnet_monitor_thr_stop();
                lnet_ping_target_fini();
 
                /* Teardown fns that use my own API functions BEFORE here */
@@ -2474,10 +2601,17 @@ lnet_get_next_ni_locked(struct lnet_net *mynet, struct lnet_ni *prev)
        struct lnet_ni          *ni;
        struct lnet_net         *net = mynet;
 
+       /*
+        * It is possible that the net has been cleaned out while there is
+        * a message being sent. This function accessed the net without
+        * checking if the list is empty
+        */
        if (prev == NULL) {
                if (net == NULL)
                        net = list_entry(the_lnet.ln_nets.next, struct lnet_net,
                                        net_list);
+               if (list_empty(&net->net_ni_list))
+                       return NULL;
                ni = list_entry(net->net_ni_list.next, struct lnet_ni,
                                ni_netlist);
 
@@ -2499,6 +2633,8 @@ lnet_get_next_ni_locked(struct lnet_net *mynet, struct lnet_ni *prev)
                /* get the next net */
                net = list_entry(prev->ni_net->net_list.next, struct lnet_net,
                                 net_list);
+               if (list_empty(&net->net_ni_list))
+                       return NULL;
                /* get the ni on it */
                ni = list_entry(net->net_ni_list.next, struct lnet_ni,
                                ni_netlist);
@@ -2506,6 +2642,9 @@ lnet_get_next_ni_locked(struct lnet_net *mynet, struct lnet_ni *prev)
                return ni;
        }
 
+       if (list_empty(&prev->ni_netlist))
+               return NULL;
+
        /* there are more nis left */
        ni = list_entry(prev->ni_netlist.next, struct lnet_ni, ni_netlist);
 
@@ -2729,7 +2868,7 @@ int lnet_dyn_add_ni(struct lnet_ioctl_config_ni *conf)
        struct lnet_ni *ni;
        struct lnet_ioctl_config_lnd_tunables *tun = NULL;
        int rc, i;
-       __u32 net_id;
+       __u32 net_id, lnd_type;
 
        /* get the tunables if they are available */
        if (conf->lic_cfg_hdr.ioc_len >=
@@ -2743,6 +2882,12 @@ int lnet_dyn_add_ni(struct lnet_ioctl_config_ni *conf)
                                                  tun);
 
        net_id = LNET_NIDNET(conf->lic_nid);
+       lnd_type = LNET_NETTYP(net_id);
+
+       if (!libcfs_isknown_lnd(lnd_type)) {
+               CERROR("No valid net and lnd information provided\n");
+               return -EINVAL;
+       }
 
        net = lnet_net_alloc(net_id, NULL);
        if (!net)
@@ -3238,11 +3383,7 @@ LNetCtl(unsigned int cmd, void *arg)
                        return -EINVAL;
 
                mutex_lock(&the_lnet.ln_api_mutex);
-               rc = lnet_get_peer_info(&cfg->prcfg_prim_nid,
-                                       &cfg->prcfg_cfg_nid,
-                                       &cfg->prcfg_count,
-                                       &cfg->prcfg_mr,
-                                       &cfg->prcfg_size,
+               rc = lnet_get_peer_info(cfg,
                                        (void __user *)cfg->prcfg_bulk);
                mutex_unlock(&the_lnet.ln_api_mutex);
                return rc;
@@ -3256,19 +3397,21 @@ LNetCtl(unsigned int cmd, void *arg)
 
                mutex_lock(&the_lnet.ln_api_mutex);
                rc = lnet_get_peer_list(&cfg->prcfg_count, &cfg->prcfg_size,
-                               (lnet_process_id_t __user *)cfg->prcfg_bulk);
+                               (struct lnet_process_id __user *)cfg->prcfg_bulk);
                mutex_unlock(&the_lnet.ln_api_mutex);
                return rc;
        }
 
        case IOC_LIBCFS_NOTIFY_ROUTER: {
-               unsigned long jiffies_passed;
-
-               jiffies_passed = ktime_get_real_seconds() - data->ioc_u64[0];
-               jiffies_passed = cfs_time_seconds(jiffies_passed);
+               time64_t deadline = ktime_get_real_seconds() - data->ioc_u64[0];
 
+               /* The deadline passed in by the user should be some time in
+                * seconds in the future since the UNIX epoch. We have to map
+                * that deadline to the wall clock.
+                */
+               deadline += ktime_get_seconds();
                return lnet_notify(NULL, data->ioc_nid, data->ioc_flags,
-                                  jiffies - jiffies_passed);
+                                  deadline);
        }
 
        case IOC_LIBCFS_LNET_DIST:
@@ -3329,11 +3472,15 @@ LNetCtl(unsigned int cmd, void *arg)
                if (rc < 0)
                        return rc;
 
+               mutex_lock(&the_lnet.ln_api_mutex);
                lp = lnet_find_peer(ping->ping_id.nid);
                if (lp) {
                        ping->ping_id.nid = lp->lp_primary_nid;
                        ping->mr_info = lnet_peer_is_multi_rail(lp);
+                       lnet_peer_decref_locked(lp);
                }
+               mutex_unlock(&the_lnet.ln_api_mutex);
+
                ping->ping_count = rc;
                return 0;
        }
@@ -3347,11 +3494,15 @@ LNetCtl(unsigned int cmd, void *arg)
                                   discover->ping_count);
                if (rc < 0)
                        return rc;
+
+               mutex_lock(&the_lnet.ln_api_mutex);
                lp = lnet_find_peer(discover->ping_id.nid);
                if (lp) {
                        discover->ping_id.nid = lp->lp_primary_nid;
                        discover->mr_info = lnet_peer_is_multi_rail(lp);
+                       lnet_peer_decref_locked(lp);
                }
+               mutex_unlock(&the_lnet.ln_api_mutex);
 
                discover->ping_count = rc;
                return 0;
@@ -3469,9 +3620,16 @@ static int lnet_ping(struct lnet_process_id id, signed long timeout,
        sigset_t blocked;
 
        /* n_ids limit is arbitrary */
-       if (n_ids <= 0 || n_ids > lnet_interfaces_max || id.nid == LNET_NID_ANY)
+       if (n_ids <= 0 || id.nid == LNET_NID_ANY)
                return -EINVAL;
 
+       /*
+        * if the user buffer has more space than the lnet_interfaces_max
+        * then only fill it up to lnet_interfaces_max
+        */
+       if (n_ids > lnet_interfaces_max)
+               n_ids = lnet_interfaces_max;
+
        if (id.pid == LNET_PID_ANY)
                id.pid = LNET_PID_LUSTRE;
 
@@ -3503,7 +3661,7 @@ static int lnet_ping(struct lnet_process_id id, signed long timeout,
 
        rc = LNetGet(LNET_NID_ANY, mdh, id,
                     LNET_RESERVED_PORTAL,
-                    LNET_PROTO_PING_MATCHBITS, 0);
+                    LNET_PROTO_PING_MATCHBITS, 0, false);
 
        if (rc != 0) {
                /* Don't CERROR; this could be deliberate! */
@@ -3629,27 +3787,36 @@ static int lnet_ping(struct lnet_process_id id, signed long timeout,
 }
 
 static int
-lnet_discover(lnet_process_id_t id, __u32 force, lnet_process_id_t __user *ids,
-             int n_ids)
+lnet_discover(struct lnet_process_id id, __u32 force,
+             struct lnet_process_id __user *ids, int n_ids)
 {
        struct lnet_peer_ni *lpni;
        struct lnet_peer_ni *p;
        struct lnet_peer *lp;
-       lnet_process_id_t *buf;
+       struct lnet_process_id *buf;
        int cpt;
        int i;
        int rc;
        int max_intf = lnet_interfaces_max;
+       size_t buf_size;
 
        if (n_ids <= 0 ||
-           id.nid == LNET_NID_ANY ||
-           n_ids > max_intf)
+           id.nid == LNET_NID_ANY)
                return -EINVAL;
 
        if (id.pid == LNET_PID_ANY)
                id.pid = LNET_PID_LUSTRE;
 
-       LIBCFS_ALLOC(buf, n_ids * sizeof(*buf));
+       /*
+        * if the user buffer has more space than the max_intf
+        * then only fill it up to max_intf
+        */
+       if (n_ids > max_intf)
+               n_ids = max_intf;
+
+       buf_size = n_ids * sizeof(*buf);
+
+       LIBCFS_ALLOC(buf, buf_size);
        if (!buf)
                return -ENOMEM;
 
@@ -3702,5 +3869,7 @@ out_decref:
 out:
        lnet_net_unlock(cpt);
 
+       LIBCFS_FREE(buf, buf_size);
+
        return rc;
 }