Whamcloud - gitweb
LU-9480 lnet: add enhanced statistics
[fs/lustre-release.git] / lnet / lnet / api-ni.c
index 91e8621..63eeaca 100644 (file)
 
 #define D_LNI D_CONSOLE
 
-struct lnet the_lnet;          /* THE state of the network */
+/*
+ * initialize ln_api_mutex statically, since it needs to be used in
+ * discovery_set callback. That module parameter callback can be called
+ * before module init completes. The mutex needs to be ready for use then.
+ */
+struct lnet the_lnet = {
+       .ln_api_mutex = __MUTEX_INITIALIZER(the_lnet.ln_api_mutex),
+};             /* THE state of the network */
 EXPORT_SYMBOL(the_lnet);
 
 static char *ip2nets = "";
@@ -94,11 +101,16 @@ static atomic_t lnet_dlc_seq_no = ATOMIC_INIT(0);
 static int lnet_ping(struct lnet_process_id id, signed long timeout,
                     struct lnet_process_id __user *ids, int n_ids);
 
+static int lnet_discover(lnet_process_id_t id, __u32 force,
+                        lnet_process_id_t __user *ids, int n_ids);
+
 static int
 discovery_set(const char *val, struct kernel_param *kp)
 {
        int rc;
+       unsigned *discovery = (unsigned *)kp->arg;
        unsigned long value;
+       struct lnet_ping_buffer *pbuf;
 
        rc = kstrtoul(val, 0, &value);
        if (rc) {
@@ -106,7 +118,38 @@ discovery_set(const char *val, struct kernel_param *kp)
                return rc;
        }
 
-       *(unsigned *)kp->arg = (value) ? 1 : 0;
+       value = (value) ? 1 : 0;
+
+       /*
+        * The purpose of locking the api_mutex here is to ensure that
+        * the correct value ends up stored properly.
+        */
+       mutex_lock(&the_lnet.ln_api_mutex);
+
+       if (value == *discovery) {
+               mutex_unlock(&the_lnet.ln_api_mutex);
+               return 0;
+       }
+
+       *discovery = value;
+
+       if (the_lnet.ln_state != LNET_STATE_RUNNING) {
+               mutex_unlock(&the_lnet.ln_api_mutex);
+               return 0;
+       }
+
+       /* tell peers that discovery setting has changed */
+       lnet_net_lock(LNET_LOCK_EX);
+       pbuf = the_lnet.ln_ping_target;
+       if (value)
+               pbuf->pb_info.pi_features &= ~LNET_PING_FEAT_DISCOVERY;
+       else
+               pbuf->pb_info.pi_features |= LNET_PING_FEAT_DISCOVERY;
+       lnet_net_unlock(LNET_LOCK_EX);
+
+       lnet_push_update_to_peers(1);
+
+       mutex_unlock(&the_lnet.ln_api_mutex);
 
        return 0;
 }
@@ -169,7 +212,6 @@ lnet_init_locks(void)
        init_waitqueue_head(&the_lnet.ln_eq_waitq);
        init_waitqueue_head(&the_lnet.ln_rc_waitq);
        mutex_init(&the_lnet.ln_lnd_mutex);
-       mutex_init(&the_lnet.ln_api_mutex);
 }
 
 static void
@@ -700,6 +742,10 @@ lnet_prepare(lnet_pid_t requested_pid)
        INIT_LIST_HEAD(&the_lnet.ln_routers);
        INIT_LIST_HEAD(&the_lnet.ln_drop_rules);
        INIT_LIST_HEAD(&the_lnet.ln_delay_rules);
+       INIT_LIST_HEAD(&the_lnet.ln_dc_request);
+       INIT_LIST_HEAD(&the_lnet.ln_dc_working);
+       INIT_LIST_HEAD(&the_lnet.ln_dc_expired);
+       init_waitqueue_head(&the_lnet.ln_dc_waitq);
 
        rc = lnet_descriptor_setup();
        if (rc != 0)
@@ -1056,7 +1102,8 @@ lnet_ping_target_create(int nnis)
        pbuf->pb_info.pi_nnis = nnis;
        pbuf->pb_info.pi_pid = the_lnet.ln_pid;
        pbuf->pb_info.pi_magic = LNET_PROTO_PING_MAGIC;
-       pbuf->pb_info.pi_features = LNET_PING_FEAT_NI_STATUS;
+       pbuf->pb_info.pi_features =
+               LNET_PING_FEAT_NI_STATUS | LNET_PING_FEAT_MULTI_RAIL;
 
        return pbuf;
 }
@@ -1297,6 +1344,8 @@ lnet_ping_target_update(struct lnet_ping_buffer *pbuf,
 
        if (!the_lnet.ln_routing)
                pbuf->pb_info.pi_features |= LNET_PING_FEAT_RTE_DISABLED;
+       if (!lnet_peer_discovery_disabled)
+               pbuf->pb_info.pi_features |= LNET_PING_FEAT_DISCOVERY;
 
        /* Ensure only known feature bits have been set. */
        LASSERT(pbuf->pb_info.pi_features & LNET_PING_FEAT_BITS);
@@ -1318,6 +1367,8 @@ lnet_ping_target_update(struct lnet_ping_buffer *pbuf,
                lnet_ping_md_unlink(old_pbuf, &old_ping_md);
                lnet_ping_buffer_decref(old_pbuf);
        }
+
+       lnet_push_update_to_peers(0);
 }
 
 static void
@@ -1419,6 +1470,7 @@ static void lnet_push_target_event_handler(struct lnet_event *ev)
        if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
                lnet_swap_pinginfo(pbuf);
 
+       lnet_peer_push_event(ev);
        if (ev->unlinked)
                lnet_ping_buffer_decref(pbuf);
 }
@@ -1982,8 +2034,6 @@ int lnet_lib_init(void)
 
        lnet_assert_wire_constants();
 
-       memset(&the_lnet, 0, sizeof(the_lnet));
-
        /* refer to global cfs_cpt_table for now */
        the_lnet.ln_cpt_table   = cfs_cpt_table;
        the_lnet.ln_cpt_number  = cfs_cpt_number(cfs_cpt_table);
@@ -2281,8 +2331,12 @@ lnet_fill_ni_info(struct lnet_ni *ni, struct lnet_ioctl_config_ni *cfg_ni,
        memcpy(&tun->lt_cmn, &ni->ni_net->net_tunables, sizeof(tun->lt_cmn));
 
        if (stats) {
-               stats->iel_send_count = atomic_read(&ni->ni_stats.send_count);
-               stats->iel_recv_count = atomic_read(&ni->ni_stats.recv_count);
+               stats->iel_send_count = lnet_sum_stats(&ni->ni_stats,
+                                                      LNET_STATS_TYPE_SEND);
+               stats->iel_recv_count = lnet_sum_stats(&ni->ni_stats,
+                                                      LNET_STATS_TYPE_RECV);
+               stats->iel_drop_count = lnet_sum_stats(&ni->ni_stats,
+                                                      LNET_STATS_TYPE_DROP);
        }
 
        /*
@@ -2509,6 +2563,29 @@ lnet_get_ni_config(struct lnet_ioctl_config_ni *cfg_ni,
        return rc;
 }
 
+int lnet_get_ni_stats(struct lnet_ioctl_element_msg_stats *msg_stats)
+{
+       struct lnet_ni *ni;
+       int cpt;
+       int rc = -ENOENT;
+
+       if (!msg_stats)
+               return -EINVAL;
+
+       cpt = lnet_net_lock_current();
+
+       ni = lnet_get_ni_idx_locked(msg_stats->im_idx);
+
+       if (ni) {
+               lnet_usr_translate_stats(msg_stats, &ni->ni_stats);
+               rc = 0;
+       }
+
+       lnet_net_unlock(cpt);
+
+       return rc;
+}
+
 static int lnet_add_net_common(struct lnet_net *net,
                               struct lnet_ioctl_config_lnd_tunables *tun)
 {
@@ -2976,9 +3053,10 @@ LNetCtl(unsigned int cmd, void *arg)
                __u32 tun_size;
 
                cfg_ni = arg;
+
                /* get the tunables if they are available */
                if (cfg_ni->lic_cfg_hdr.ioc_len <
-                   sizeof(*cfg_ni) + sizeof(*stats)+ sizeof(*tun))
+                   sizeof(*cfg_ni) + sizeof(*stats) + sizeof(*tun))
                        return -EINVAL;
 
                stats = (struct lnet_ioctl_element_stats *)
@@ -2995,6 +3073,19 @@ LNetCtl(unsigned int cmd, void *arg)
                return rc;
        }
 
+       case IOC_LIBCFS_GET_LOCAL_NI_MSG_STATS: {
+               struct lnet_ioctl_element_msg_stats *msg_stats = arg;
+
+               if (msg_stats->im_hdr.ioc_len != sizeof(*msg_stats))
+                       return -EINVAL;
+
+               mutex_lock(&the_lnet.ln_api_mutex);
+               rc = lnet_get_ni_stats(msg_stats);
+               mutex_unlock(&the_lnet.ln_api_mutex);
+
+               return rc;
+       }
+
        case IOC_LIBCFS_GET_NET: {
                size_t total = sizeof(*config) +
                               sizeof(struct lnet_ioctl_net_config);
@@ -3142,21 +3233,30 @@ LNetCtl(unsigned int cmd, void *arg)
 
        case IOC_LIBCFS_GET_PEER_NI: {
                struct lnet_ioctl_peer_cfg *cfg = arg;
-               struct lnet_peer_ni_credit_info __user *lpni_cri;
-               struct lnet_ioctl_element_stats __user *lpni_stats;
-               size_t usr_size = sizeof(*lpni_cri) + sizeof(*lpni_stats);
 
-               if ((cfg->prcfg_hdr.ioc_len != sizeof(*cfg)) ||
-                   (cfg->prcfg_size != usr_size))
+               if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
                        return -EINVAL;
 
-               lpni_cri = cfg->prcfg_bulk;
-               lpni_stats = cfg->prcfg_bulk + sizeof(*lpni_cri);
+               mutex_lock(&the_lnet.ln_api_mutex);
+               rc = lnet_get_peer_info(&cfg->prcfg_prim_nid,
+                                       &cfg->prcfg_cfg_nid,
+                                       &cfg->prcfg_count,
+                                       &cfg->prcfg_mr,
+                                       &cfg->prcfg_size,
+                                       (void __user *)cfg->prcfg_bulk);
+               mutex_unlock(&the_lnet.ln_api_mutex);
+               return rc;
+       }
+
+       case IOC_LIBCFS_GET_PEER_LIST: {
+               struct lnet_ioctl_peer_cfg *cfg = arg;
+
+               if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
+                       return -EINVAL;
 
                mutex_lock(&the_lnet.ln_api_mutex);
-               rc = lnet_get_peer_info(cfg->prcfg_count, &cfg->prcfg_prim_nid,
-                                       &cfg->prcfg_cfg_nid, &cfg->prcfg_mr,
-                                       lpni_cri, lpni_stats);
+               rc = lnet_get_peer_list(&cfg->prcfg_count, &cfg->prcfg_size,
+                               (lnet_process_id_t __user *)cfg->prcfg_bulk);
                mutex_unlock(&the_lnet.ln_api_mutex);
                return rc;
        }
@@ -3194,24 +3294,69 @@ LNetCtl(unsigned int cmd, void *arg)
                id.nid = data->ioc_nid;
                id.pid = data->ioc_u32[0];
 
-               /* Don't block longer than 2 minutes */
-               if (data->ioc_u32[1] > 120 * MSEC_PER_SEC)
-                       return -EINVAL;
-
-               /* If timestamp is negative then disable timeout */
-               if ((s32)data->ioc_u32[1] < 0)
-                       timeout = MAX_SCHEDULE_TIMEOUT;
+               /* If timeout is negative then set default of 3 minutes */
+               if (((s32)data->ioc_u32[1] <= 0) ||
+                   data->ioc_u32[1] > (DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC))
+                       timeout = msecs_to_jiffies(DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC);
                else
                        timeout = msecs_to_jiffies(data->ioc_u32[1]);
 
                rc = lnet_ping(id, timeout, data->ioc_pbuf1,
                               data->ioc_plen1 / sizeof(struct lnet_process_id));
+
                if (rc < 0)
                        return rc;
+
                data->ioc_count = rc;
                return 0;
        }
 
+       case IOC_LIBCFS_PING_PEER: {
+               struct lnet_ioctl_ping_data *ping = arg;
+               struct lnet_peer *lp;
+               signed long timeout;
+
+               /* If timeout is negative then set default of 3 minutes */
+               if (((s32)ping->op_param) <= 0 ||
+                   ping->op_param > (DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC))
+                       timeout = msecs_to_jiffies(DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC);
+               else
+                       timeout = msecs_to_jiffies(ping->op_param);
+
+               rc = lnet_ping(ping->ping_id, timeout,
+                              ping->ping_buf,
+                              ping->ping_count);
+               if (rc < 0)
+                       return rc;
+
+               lp = lnet_find_peer(ping->ping_id.nid);
+               if (lp) {
+                       ping->ping_id.nid = lp->lp_primary_nid;
+                       ping->mr_info = lnet_peer_is_multi_rail(lp);
+               }
+               ping->ping_count = rc;
+               return 0;
+       }
+
+       case IOC_LIBCFS_DISCOVER: {
+               struct lnet_ioctl_ping_data *discover = arg;
+               struct lnet_peer *lp;
+
+               rc = lnet_discover(discover->ping_id, discover->op_param,
+                                  discover->ping_buf,
+                                  discover->ping_count);
+               if (rc < 0)
+                       return rc;
+               lp = lnet_find_peer(discover->ping_id.nid);
+               if (lp) {
+                       discover->ping_id.nid = lp->lp_primary_nid;
+                       discover->mr_info = lnet_peer_is_multi_rail(lp);
+               }
+
+               discover->ping_count = rc;
+               return 0;
+       }
+
        default:
                ni = lnet_net2ni_addref(data->ioc_net);
                if (ni == NULL)
@@ -3344,7 +3489,7 @@ static int lnet_ping(struct lnet_process_id id, signed long timeout,
        /* initialize md content */
        md.start     = &pbuf->pb_info;
        md.length    = LNET_PING_INFO_SIZE(n_ids);
-       md.threshold = 2; /*GET/REPLY*/
+       md.threshold = 2; /* GET/REPLY */
        md.max_size  = 0;
        md.options   = LNET_MD_TRUNCATE;
        md.user_ptr  = NULL;
@@ -3362,7 +3507,6 @@ static int lnet_ping(struct lnet_process_id id, signed long timeout,
 
        if (rc != 0) {
                /* Don't CERROR; this could be deliberate! */
-
                rc2 = LNetMDUnlink(mdh);
                LASSERT(rc2 == 0);
 
@@ -3410,7 +3554,6 @@ static int lnet_ping(struct lnet_process_id id, signed long timeout,
                        replied = 1;
                        rc = event.mlength;
                }
-
        } while (rc2 <= 0 || !event.unlinked);
 
        if (!replied) {
@@ -3424,10 +3567,9 @@ static int lnet_ping(struct lnet_process_id id, signed long timeout,
        nob = rc;
        LASSERT(nob >= 0 && nob <= LNET_PING_INFO_SIZE(n_ids));
 
-       rc = -EPROTO;                           /* if I can't parse... */
+       rc = -EPROTO;           /* if I can't parse... */
 
        if (nob < 8) {
-               /* can't check magic/version */
                CERROR("%s: ping info too short %d\n",
                       libcfs_id2str(id), nob);
                goto fail_free_eq;
@@ -3448,7 +3590,8 @@ static int lnet_ping(struct lnet_process_id id, signed long timeout,
        }
 
        if (nob < LNET_PING_INFO_SIZE(0)) {
-               CERROR("%s: Short reply %d(%d min)\n", libcfs_id2str(id),
+               CERROR("%s: Short reply %d(%d min)\n",
+                      libcfs_id2str(id),
                       nob, (int)LNET_PING_INFO_SIZE(0));
                goto fail_free_eq;
        }
@@ -3457,12 +3600,13 @@ static int lnet_ping(struct lnet_process_id id, signed long timeout,
                n_ids = pbuf->pb_info.pi_nnis;
 
        if (nob < LNET_PING_INFO_SIZE(n_ids)) {
-               CERROR("%s: Short reply %d(%d expected)\n", libcfs_id2str(id),
+               CERROR("%s: Short reply %d(%d expected)\n",
+                      libcfs_id2str(id),
                       nob, (int)LNET_PING_INFO_SIZE(n_ids));
                goto fail_free_eq;
        }
 
-       rc = -EFAULT;                           /* If I SEGV... */
+       rc = -EFAULT;           /* if I segv in copy_to_user()... */
 
        memset(&tmpid, 0, sizeof(tmpid));
        for (i = 0; i < n_ids; i++) {
@@ -3483,3 +3627,80 @@ static int lnet_ping(struct lnet_process_id id, signed long timeout,
        lnet_ping_buffer_decref(pbuf);
        return rc;
 }
+
+static int
+lnet_discover(lnet_process_id_t id, __u32 force, lnet_process_id_t __user *ids,
+             int n_ids)
+{
+       struct lnet_peer_ni *lpni;
+       struct lnet_peer_ni *p;
+       struct lnet_peer *lp;
+       lnet_process_id_t *buf;
+       int cpt;
+       int i;
+       int rc;
+       int max_intf = lnet_interfaces_max;
+
+       if (n_ids <= 0 ||
+           id.nid == LNET_NID_ANY ||
+           n_ids > max_intf)
+               return -EINVAL;
+
+       if (id.pid == LNET_PID_ANY)
+               id.pid = LNET_PID_LUSTRE;
+
+       LIBCFS_ALLOC(buf, n_ids * sizeof(*buf));
+       if (!buf)
+               return -ENOMEM;
+
+       cpt = lnet_net_lock_current();
+       lpni = lnet_nid2peerni_locked(id.nid, LNET_NID_ANY, cpt);
+       if (IS_ERR(lpni)) {
+               rc = PTR_ERR(lpni);
+               goto out;
+       }
+
+       /*
+        * Clearing the NIDS_UPTODATE flag ensures the peer will
+        * be discovered, provided discovery has not been disabled.
+        */
+       lp = lpni->lpni_peer_net->lpn_peer;
+       spin_lock(&lp->lp_lock);
+       lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
+       /* If the force flag is set, force a PING and PUSH as well. */
+       if (force)
+               lp->lp_state |= LNET_PEER_FORCE_PING | LNET_PEER_FORCE_PUSH;
+       spin_unlock(&lp->lp_lock);
+       rc = lnet_discover_peer_locked(lpni, cpt, true);
+       if (rc)
+               goto out_decref;
+
+       /* Peer may have changed. */
+       lp = lpni->lpni_peer_net->lpn_peer;
+       if (lp->lp_nnis < n_ids)
+               n_ids = lp->lp_nnis;
+
+       i = 0;
+       p = NULL;
+       while ((p = lnet_get_next_peer_ni_locked(lp, NULL, p)) != NULL) {
+               buf[i].pid = id.pid;
+               buf[i].nid = p->lpni_nid;
+               if (++i >= n_ids)
+                       break;
+       }
+
+       lnet_net_unlock(cpt);
+
+       rc = -EFAULT;
+       if (copy_to_user(ids, buf, n_ids * sizeof(*buf)))
+               goto out_relock;
+       rc = n_ids;
+out_relock:
+       lnet_net_lock(cpt);
+out_decref:
+       lnet_peer_ni_decref_locked(lpni);
+out:
+       lnet_net_unlock(cpt);
+
+       return rc;
+}