MODULE_PARM_DESC(use_tcp_bonding,
"Set to 1 to use socklnd bonding. 0 to use Multi-Rail");
+static __u32 lnet_numa_range = 0;
+module_param(lnet_numa_range, int, 0444);
+MODULE_PARM_DESC(lnet_numa_range,
+ "NUMA range to consider during Multi-Rail selection");
+
/*
* This sequence number keeps track of how many times DLC was used to
* update the configuration. It is incremented on any DLC update and
for (i = 0; i < the_lnet.ln_nportals; i++)
lnet_clear_lazy_portal(ni, i, "Shutting down NI");
- /* Do peer table cleanup for this ni */
- lnet_peer_tables_cleanup(ni);
-
lnet_net_lock(LNET_LOCK_EX);
lnet_clear_zombies_nis_locked(net);
lnet_net_unlock(LNET_LOCK_EX);
lnet_net_lock(LNET_LOCK_EX);
}
+ lnet_net_unlock(LNET_LOCK_EX);
+
+ /* Do peer table cleanup for this net */
+ lnet_peer_tables_cleanup(net);
+
+ lnet_net_lock(LNET_LOCK_EX);
/*
* decrement ref count on lnd only when the entire network goes
* away
tq->tq_credits = lnet_ni_tq_credits(ni);
}
+ atomic_set(&ni->ni_tx_credits,
+ lnet_ni_tq_credits(ni) * ni->ni_ncpts);
+
CDEBUG(D_LNI, "Added LNI %s [%d/%d/%d/%d]\n",
libcfs_nid2str(ni->ni_nid),
ni->ni_net->net_tunables.lct_peer_tx_credits,
static void
lnet_fill_ni_info(struct lnet_ni *ni, struct lnet_ioctl_config_ni *cfg_ni,
struct lnet_ioctl_config_lnd_tunables *tun,
+ struct lnet_ioctl_element_stats *stats,
__u32 tun_size)
{
size_t min_size = 0;
cfg_ni->lic_nid = ni->ni_nid;
cfg_ni->lic_status = ni->ni_status->ns_status;
cfg_ni->lic_tcp_bonding = use_tcp_bonding;
+ cfg_ni->lic_dev_cpt = ni->dev_cpt;
memcpy(&tun->lt_cmn, &ni->ni_net->net_tunables, sizeof(tun->lt_cmn));
+ if (stats) {
+ stats->send_count = atomic_read(&ni->ni_stats.send_count);
+ stats->recv_count = atomic_read(&ni->ni_stats.recv_count);
+ }
+
/*
* tun->lt_tun will always be present, but in order to be
* backwards compatible, we need to deal with the cases when
int
lnet_get_ni_config(struct lnet_ioctl_config_ni *cfg_ni,
struct lnet_ioctl_config_lnd_tunables *tun,
+ struct lnet_ioctl_element_stats *stats,
__u32 tun_size)
{
struct lnet_ni *ni;
int cpt;
int rc = -ENOENT;
- if (!cfg_ni || !tun)
+ if (!cfg_ni || !tun || !stats)
return -EINVAL;
cpt = lnet_net_lock_current();
if (ni) {
rc = 0;
lnet_ni_lock(ni);
- lnet_fill_ni_info(ni, cfg_ni, tun, tun_size);
+ lnet_fill_ni_info(ni, cfg_ni, tun, stats, tun_size);
lnet_ni_unlock(ni);
}
return atomic_read(&lnet_dlc_seq_no);
}
+inline __u32 lnet_get_numa_range(void)
+{
+ return lnet_numa_range;
+}
+
/**
* LNet ioctl handler.
*
if (config->cfg_hdr.ioc_len < sizeof(*config))
return -EINVAL;
- return lnet_get_route(config->cfg_count,
- &config->cfg_net,
- &config->cfg_config_u.cfg_route.rtr_hop,
- &config->cfg_nid,
- &config->cfg_config_u.cfg_route.rtr_flags,
- &config->cfg_config_u.cfg_route.
+ mutex_lock(&the_lnet.ln_api_mutex);
+ rc = lnet_get_route(config->cfg_count,
+ &config->cfg_net,
+ &config->cfg_config_u.cfg_route.rtr_hop,
+ &config->cfg_nid,
+ &config->cfg_config_u.cfg_route.rtr_flags,
+ &config->cfg_config_u.cfg_route.
rtr_priority);
+ mutex_unlock(&the_lnet.ln_api_mutex);
+ return rc;
case IOC_LIBCFS_GET_LOCAL_NI: {
struct lnet_ioctl_config_ni *cfg_ni;
struct lnet_ioctl_config_lnd_tunables *tun = NULL;
+ struct lnet_ioctl_element_stats *stats;
__u32 tun_size;
cfg_ni = arg;
/* get the tunables if they are available */
if (cfg_ni->lic_cfg_hdr.ioc_len <
- sizeof(*cfg_ni) + sizeof(*tun))
+ sizeof(*cfg_ni) + sizeof(*stats)+ sizeof(*tun))
return -EINVAL;
+ stats = (struct lnet_ioctl_element_stats *)
+ cfg_ni->lic_bulk;
tun = (struct lnet_ioctl_config_lnd_tunables *)
- cfg_ni->lic_bulk;
+ (cfg_ni->lic_bulk + sizeof(*stats));
- tun_size = cfg_ni->lic_cfg_hdr.ioc_len - sizeof(*cfg_ni);
+ tun_size = cfg_ni->lic_cfg_hdr.ioc_len - sizeof(*cfg_ni) -
+ sizeof(*stats);
- return lnet_get_ni_config(cfg_ni, tun, tun_size);
+ mutex_lock(&the_lnet.ln_api_mutex);
+ rc = lnet_get_ni_config(cfg_ni, tun, stats, tun_size);
+ mutex_unlock(&the_lnet.ln_api_mutex);
+ return rc;
}
case IOC_LIBCFS_GET_NET: {
if (config->cfg_hdr.ioc_len < total)
return -EINVAL;
- return lnet_get_net_config(config);
+ mutex_lock(&the_lnet.ln_api_mutex);
+ rc = lnet_get_net_config(config);
+ mutex_unlock(&the_lnet.ln_api_mutex);
+ return rc;
}
case IOC_LIBCFS_GET_LNET_STATS:
if (lnet_stats->st_hdr.ioc_len < sizeof(*lnet_stats))
return -EINVAL;
+ mutex_lock(&the_lnet.ln_api_mutex);
lnet_counters_get(&lnet_stats->st_cntrs);
+ mutex_unlock(&the_lnet.ln_api_mutex);
return 0;
}
mutex_unlock(&the_lnet.ln_api_mutex);
return rc;
+ case IOC_LIBCFS_SET_NUMA_RANGE: {
+ struct lnet_ioctl_numa_range *numa;
+ numa = arg;
+ if (numa->nr_hdr.ioc_len != sizeof(*numa))
+ return -EINVAL;
+ mutex_lock(&the_lnet.ln_api_mutex);
+ lnet_numa_range = numa->nr_range;
+ mutex_unlock(&the_lnet.ln_api_mutex);
+ return 0;
+ }
+
+ case IOC_LIBCFS_GET_NUMA_RANGE: {
+ struct lnet_ioctl_numa_range *numa;
+ numa = arg;
+ if (numa->nr_hdr.ioc_len != sizeof(*numa))
+ return -EINVAL;
+ numa->nr_range = lnet_numa_range;
+ return 0;
+ }
+
case IOC_LIBCFS_GET_BUF: {
struct lnet_ioctl_pool_cfg *pool_cfg;
size_t total = sizeof(*config) + sizeof(*pool_cfg);
return -EINVAL;
pool_cfg = (struct lnet_ioctl_pool_cfg *)config->cfg_bulk;
- return lnet_get_rtr_pool_cfg(config->cfg_count, pool_cfg);
+
+ mutex_lock(&the_lnet.ln_api_mutex);
+ rc = lnet_get_rtr_pool_cfg(config->cfg_count, pool_cfg);
+ mutex_unlock(&the_lnet.ln_api_mutex);
+ return rc;
}
case IOC_LIBCFS_ADD_PEER_NI: {
if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
return -EINVAL;
- return lnet_add_peer_ni_to_peer(cfg->prcfg_key_nid,
- cfg->prcfg_cfg_nid);
+ mutex_lock(&the_lnet.ln_api_mutex);
+ lnet_incr_dlc_seq();
+ rc = lnet_add_peer_ni_to_peer(cfg->prcfg_key_nid,
+ cfg->prcfg_cfg_nid,
+ cfg->prcfg_mr);
+ mutex_unlock(&the_lnet.ln_api_mutex);
+ return rc;
}
case IOC_LIBCFS_DEL_PEER_NI: {
if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
return -EINVAL;
- return lnet_del_peer_ni_from_peer(cfg->prcfg_key_nid,
- cfg->prcfg_cfg_nid);
+ mutex_lock(&the_lnet.ln_api_mutex);
+ lnet_incr_dlc_seq();
+ rc = lnet_del_peer_ni_from_peer(cfg->prcfg_key_nid,
+ cfg->prcfg_cfg_nid);
+ mutex_unlock(&the_lnet.ln_api_mutex);
+ return rc;
}
case IOC_LIBCFS_GET_PEER_INFO: {
if (peer_info->pr_hdr.ioc_len < sizeof(*peer_info))
return -EINVAL;
- return lnet_get_peer_ni_info(
+ mutex_lock(&the_lnet.ln_api_mutex);
+ rc = lnet_get_peer_ni_info(
peer_info->pr_count,
&peer_info->pr_nid,
peer_info->pr_lnd_u.pr_peer_credits.cr_aliveness,
&peer_info->pr_lnd_u.pr_peer_credits.cr_peer_rtr_credits,
&peer_info->pr_lnd_u.pr_peer_credits.cr_peer_min_rtr_credits,
&peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_qnob);
+ mutex_unlock(&the_lnet.ln_api_mutex);
+ return rc;
}
case IOC_LIBCFS_GET_PEER_NI: {
struct lnet_ioctl_peer_cfg *cfg = arg;
struct lnet_peer_ni_credit_info *lpni_cri;
- size_t total = sizeof(*cfg) + sizeof(*lpni_cri);
+ struct lnet_ioctl_element_stats *lpni_stats;
+ size_t total = sizeof(*cfg) + sizeof(*lpni_cri) +
+ sizeof(*lpni_stats);
if (cfg->prcfg_hdr.ioc_len < total)
return -EINVAL;
lpni_cri = (struct lnet_peer_ni_credit_info*) cfg->prcfg_bulk;
+ lpni_stats = (struct lnet_ioctl_element_stats *)
+ (cfg->prcfg_bulk + sizeof(*lpni_cri));
- return lnet_get_peer_info(cfg->prcfg_idx, &cfg->prcfg_key_nid,
- &cfg->prcfg_cfg_nid, lpni_cri);
+ mutex_lock(&the_lnet.ln_api_mutex);
+ rc = lnet_get_peer_info(cfg->prcfg_idx, &cfg->prcfg_key_nid,
+ &cfg->prcfg_cfg_nid, &cfg->prcfg_mr,
+ lpni_cri, lpni_stats);
+ mutex_unlock(&the_lnet.ln_api_mutex);
+ return rc;
}
case IOC_LIBCFS_NOTIFY_ROUTER: {