MODULE_PARM_DESC(use_tcp_bonding,
"Set to 1 to use socklnd bonding. 0 to use Multi-Rail");
+static __u32 lnet_numa_range = 0;
+module_param(lnet_numa_range, int, 0444);
+MODULE_PARM_DESC(lnet_numa_range,
+ "NUMA range to consider during Multi-Rail selection");
+
+/*
+ * This sequence number keeps track of how many times DLC was used to
+ * update the local NIs. It is incremented when a NI is added or
+ * removed and checked when sending a message to determine if there is
+ * a need to re-run the selection algorithm. See lnet_select_pathway()
+ * for more details on its usage.
+ */
+static atomic_t lnet_dlc_seq_no = ATOMIC_INIT(0);
+
static int lnet_ping(lnet_process_id_t id, signed long timeout,
lnet_process_id_t __user *ids, int n_ids);
the_lnet.ln_pid = requested_pid;
INIT_LIST_HEAD(&the_lnet.ln_test_peers);
+ INIT_LIST_HEAD(&the_lnet.ln_peers);
+ INIT_LIST_HEAD(&the_lnet.ln_remote_peer_ni_list);
INIT_LIST_HEAD(&the_lnet.ln_nets);
INIT_LIST_HEAD(&the_lnet.ln_routers);
INIT_LIST_HEAD(&the_lnet.ln_drop_rules);
lnet_res_container_cleanup(&the_lnet.ln_eq_container);
lnet_msg_containers_destroy();
- lnet_peer_tables_destroy();
+ lnet_peer_uninit();
lnet_rtrpools_free(0);
if (the_lnet.ln_counters != NULL) {
}
lnet_ni_t *
-lnet_net2ni(__u32 net)
+lnet_net2ni_addref(__u32 net)
{
lnet_ni_t *ni;
lnet_net_lock(0);
ni = lnet_net2ni_locked(net, 0);
+ if (ni)
+ lnet_ni_addref_locked(ni, 0);
lnet_net_unlock(0);
return ni;
}
-EXPORT_SYMBOL(lnet_net2ni);
+EXPORT_SYMBOL(lnet_net2ni_addref);
struct lnet_net *
lnet_get_net_locked(__u32 net_id)
return NULL;
}
-static unsigned int
+unsigned int
lnet_nid_cpt_hash(lnet_nid_t nid, unsigned int number)
{
__u64 key = nid;
}
static inline int
+lnet_get_net_ni_count_pre(struct lnet_net *net)
+{
+ struct lnet_ni *ni;
+ int count = 0;
+
+ list_for_each_entry(ni, &net->net_ni_added, ni_netlist)
+ count++;
+
+ return count;
+}
+
+static inline int
lnet_get_ni_count(void)
{
struct lnet_ni *ni;
lnet_net_lock(LNET_LOCK_EX);
ni->ni_state = LNET_NI_STATE_DELETING;
lnet_ni_unlink_locked(ni);
+ lnet_incr_dlc_seq();
lnet_net_unlock(LNET_LOCK_EX);
/* clear messages for this NI on the lazy portal */
for (i = 0; i < the_lnet.ln_nportals; i++)
lnet_clear_lazy_portal(ni, i, "Shutting down NI");
- /* Do peer table cleanup for this ni */
- lnet_peer_tables_cleanup(ni);
-
lnet_net_lock(LNET_LOCK_EX);
lnet_clear_zombies_nis_locked(net);
lnet_net_unlock(LNET_LOCK_EX);
lnet_net_lock(LNET_LOCK_EX);
}
+ lnet_net_unlock(LNET_LOCK_EX);
+
+ /* Do peer table cleanup for this net */
+ lnet_peer_tables_cleanup(net);
+
+ lnet_net_lock(LNET_LOCK_EX);
/*
* decrement ref count on lnd only when the entire network goes
* away
tq->tq_credits = lnet_ni_tq_credits(ni);
}
+ atomic_set(&ni->ni_tx_credits,
+ lnet_ni_tq_credits(ni) * ni->ni_ncpts);
+
CDEBUG(D_LNI, "Added LNI %s [%d/%d/%d/%d]\n",
libcfs_nid2str(ni->ni_nid),
ni->ni_net->net_tunables.lct_peer_tx_credits,
lnet_net_lock(LNET_LOCK_EX);
list_splice_tail(&local_ni_list, &net_l->net_ni_list);
+ lnet_incr_dlc_seq();
lnet_net_unlock(LNET_LOCK_EX);
/* if the network is not unique then we don't want to keep
}
EXPORT_SYMBOL(LNetNIFini);
+
+static int lnet_handle_dbg_task(struct lnet_ioctl_dbg *dbg,
+ struct lnet_dbg_task_info *dbg_info)
+{
+ switch (dbg->dbg_task) {
+ case LNET_DBG_INCR_DLC_SEQ:
+ lnet_incr_dlc_seq();
+ }
+
+ return 0;
+}
/**
* Grabs the ni data from the ni structure and fills the out
* parameters
*
* \param[in] ni network interface structure
- * \param[out] cpt_count the number of cpts the ni is on
- * \param[out] nid Network Interface ID
- * \param[out] peer_timeout NI peer timeout
- * \param[out] peer_tx_crdits NI peer transmit credits
- * \param[out] peer_rtr_credits NI peer router credits
- * \param[out] max_tx_credits NI max transmit credit
- * \param[out] net_config Network configuration
+ * \param[out] cfg_ni NI config information
+ * \param[out] tun network and LND tunables
*/
static void
-lnet_fill_ni_info(struct lnet_ni *ni, struct lnet_ioctl_config_data *config)
+lnet_fill_ni_info(struct lnet_ni *ni, struct lnet_ioctl_config_ni *cfg_ni,
+ struct lnet_ioctl_config_lnd_tunables *tun,
+ struct lnet_ioctl_element_stats *stats,
+ __u32 tun_size)
+{
+ size_t min_size = 0;
+ int i;
+
+ if (!ni || !cfg_ni || !tun)
+ return;
+
+ if (ni->ni_interfaces[0] != NULL) {
+ for (i = 0; i < ARRAY_SIZE(ni->ni_interfaces); i++) {
+ if (ni->ni_interfaces[i] != NULL) {
+ strncpy(cfg_ni->lic_ni_intf[i],
+ ni->ni_interfaces[i],
+ sizeof(cfg_ni->lic_ni_intf[i]));
+ }
+ }
+ }
+
+ cfg_ni->lic_nid = ni->ni_nid;
+ cfg_ni->lic_status = ni->ni_status->ns_status;
+ cfg_ni->lic_tcp_bonding = use_tcp_bonding;
+ cfg_ni->lic_dev_cpt = ni->dev_cpt;
+
+ memcpy(&tun->lt_cmn, &ni->ni_net->net_tunables, sizeof(tun->lt_cmn));
+
+ if (stats) {
+ stats->send_count = atomic_read(&ni->ni_stats.send_count);
+ stats->recv_count = atomic_read(&ni->ni_stats.recv_count);
+ }
+
+ /*
+ * tun->lt_tun will always be present, but in order to be
+ * backwards compatible, we need to deal with the cases when
+ * tun->lt_tun is smaller than what the kernel has, because it
+ * comes from an older version of a userspace program, then we'll
+ * need to copy as much information as we have available space.
+ */
+ min_size = tun_size - sizeof(tun->lt_cmn);
+ memcpy(&tun->lt_tun, &ni->ni_lnd_tunables, min_size);
+
+ /* copy over the cpts */
+ if (ni->ni_ncpts == LNET_CPT_NUMBER &&
+ ni->ni_cpts == NULL) {
+ for (i = 0; i < ni->ni_ncpts; i++)
+ cfg_ni->lic_cpts[i] = i;
+ } else {
+ for (i = 0;
+ ni->ni_cpts != NULL && i < ni->ni_ncpts &&
+ i < LNET_MAX_SHOW_NUM_CPT;
+ i++)
+ cfg_ni->lic_cpts[i] = ni->ni_cpts[i];
+ }
+ cfg_ni->lic_ncpts = ni->ni_ncpts;
+}
+
+/**
+ * NOTE: This is a legacy function left in the code to be backwards
+ * compatible with older userspace programs. It should eventually be
+ * removed.
+ *
+ * Grabs the ni data from the ni structure and fills the out
+ * parameters
+ *
+ * \param[in] ni network interface structure
+ * \param[out] config config information
+ */
+static void
+lnet_fill_ni_info_legacy(struct lnet_ni *ni,
+ struct lnet_ioctl_config_data *config)
{
struct lnet_ioctl_net_config *net_config;
struct lnet_ioctl_config_lnd_tunables *lnd_cfg = NULL;
if (ni != NULL) {
rc = 0;
lnet_ni_lock(ni);
- lnet_fill_ni_info(ni, config);
+ lnet_fill_ni_info_legacy(ni, config);
lnet_ni_unlock(ni);
}
}
int
-lnet_dyn_add_ni(lnet_pid_t requested_pid, struct lnet_ioctl_config_data *conf)
+lnet_get_ni_config(struct lnet_ioctl_config_ni *cfg_ni,
+ struct lnet_ioctl_config_lnd_tunables *tun,
+ struct lnet_ioctl_element_stats *stats,
+ __u32 tun_size)
{
- char *nets = conf->cfg_config_u.cfg_net.net_intf;
- struct lnet_ping_info *pinfo;
- lnet_handle_md_t md_handle;
- struct lnet_net *net;
- struct list_head net_head;
- int rc;
- lnet_remotenet_t *rnet;
- int net_ni_count;
- int num_acceptor_nets;
- __u32 net_type;
- struct lnet_ioctl_config_lnd_tunables *lnd_tunables = NULL;
-
- INIT_LIST_HEAD(&net_head);
+ struct lnet_ni *ni;
+ int cpt;
+ int rc = -ENOENT;
- if (conf && conf->cfg_hdr.ioc_len > sizeof(*conf))
- lnd_tunables = (struct lnet_ioctl_config_lnd_tunables *)conf->cfg_bulk;
+ if (!cfg_ni || !tun || !stats)
+ return -EINVAL;
- /* Create a net/ni structures for the network string */
- rc = lnet_parse_networks(&net_head, nets, use_tcp_bonding);
- if (rc <= 0)
- return rc == 0 ? -EINVAL : rc;
+ cpt = lnet_net_lock_current();
- mutex_lock(&the_lnet.ln_api_mutex);
+ ni = lnet_get_ni_idx_locked(cfg_ni->lic_idx);
- if (rc > 1) {
- rc = -EINVAL; /* only add one network per call */
- goto failed0;
+ if (ni) {
+ rc = 0;
+ lnet_ni_lock(ni);
+ lnet_fill_ni_info(ni, cfg_ni, tun, stats, tun_size);
+ lnet_ni_unlock(ni);
}
- net = list_entry(net_head.next, struct lnet_net, net_list);
+ lnet_net_unlock(cpt);
+ return rc;
+}
+
+static int lnet_add_net_common(struct lnet_net *net,
+ struct lnet_ioctl_config_lnd_tunables *tun)
+{
+ struct lnet_net *netl = NULL;
+ __u32 net_id;
+ lnet_ping_info_t *pinfo;
+ lnet_handle_md_t md_handle;
+ int rc;
+ lnet_remotenet_t *rnet;
+ int net_ni_count;
+ int num_acceptor_nets;
lnet_net_lock(LNET_LOCK_EX);
rnet = lnet_find_rnet_locked(net->net_id);
lnet_net_unlock(LNET_LOCK_EX);
- /* make sure that the net added doesn't invalidate the current
- * configuration LNet is keeping */
- if (rnet != NULL) {
+ /*
+ * make sure that the net added doesn't invalidate the current
+ * configuration LNet is keeping
+ */
+ if (rnet) {
CERROR("Adding net %s will invalidate routing configuration\n",
- nets);
+ libcfs_net2str(net->net_id));
rc = -EUSERS;
- goto failed0;
+ goto failed1;
}
/*
* we should allocate enough slots to accomodate the number of NIs
* which will be added.
*
- * We can use lnet_get_net_ni_count_locked() since the net is not
- * on a public list yet, so locking is not a problem
+ * since ni hasn't been configured yet, use
+ * lnet_get_net_ni_count_pre() which checks the net_ni_added list
*/
- net_ni_count = lnet_get_net_ni_count_locked(net);
+ net_ni_count = lnet_get_net_ni_count_pre(net);
rc = lnet_ping_info_setup(&pinfo, &md_handle,
net_ni_count + lnet_get_ni_count(),
false);
- if (rc != 0)
- goto failed0;
-
- list_del_init(&net->net_list);
+ if (rc < 0)
+ goto failed1;
- if (lnd_tunables)
+ if (tun)
memcpy(&net->net_tunables,
- &lnd_tunables->lt_cmn, sizeof(lnd_tunables->lt_cmn));
+ &tun->lt_cmn, sizeof(net->net_tunables));
+ else
+ memset(&net->net_tunables, -1, sizeof(net->net_tunables));
/*
* before starting this network get a count of the current TCP
*/
num_acceptor_nets = lnet_count_acceptor_nets();
- /*
- * lnd_startup_lndnet() can deallocate 'net' even if it it returns
- * success, because we endded up adding interfaces to an existing
- * network. So grab the net_type now
- */
- net_type = LNET_NETTYP(net->net_id);
+ net_id = net->net_id;
rc = lnet_startup_lndnet(net,
- (lnd_tunables) ? &lnd_tunables->lt_tun : NULL);
+ (tun) ? &tun->lt_tun : NULL);
if (rc < 0)
- goto failed1;
+ goto failed;
+
+ lnet_net_lock(LNET_LOCK_EX);
+ netl = lnet_get_net_locked(net_id);
+ lnet_net_unlock(LNET_LOCK_EX);
+
+ LASSERT(netl);
/*
* Start the acceptor thread if this is the first network
* being added that requires the thread.
*/
- if (net_type == SOCKLND && num_acceptor_nets == 0)
+ if (netl->net_lnd->lnd_accept &&
+ num_acceptor_nets == 0)
{
rc = lnet_acceptor_start();
if (rc < 0) {
/* shutdown the net that we just started */
CERROR("Failed to start up acceptor thread\n");
- /*
- * Note that if we needed to start the acceptor
- * thread, then 'net' must have been the first TCP
- * network, therefore was unique, and therefore
- * wasn't deallocated by lnet_startup_lndnet()
- */
lnet_shutdown_lndnet(net);
- goto failed1;
+ goto failed;
}
}
+ lnet_net_lock(LNET_LOCK_EX);
+ lnet_peer_net_added(netl);
+ lnet_net_unlock(LNET_LOCK_EX);
+
lnet_ping_target_update(pinfo, md_handle);
- mutex_unlock(&the_lnet.ln_api_mutex);
return 0;
-failed1:
+failed:
lnet_ping_md_unlink(pinfo, &md_handle);
lnet_ping_info_free(pinfo);
-failed0:
+failed1:
+ lnet_net_free(net);
+ return rc;
+}
+
+static int lnet_handle_legacy_ip2nets(char *ip2nets,
+ struct lnet_ioctl_config_lnd_tunables *tun)
+{
+ struct lnet_net *net;
+ char *nets;
+ int rc;
+ struct list_head net_head;
+
+ INIT_LIST_HEAD(&net_head);
+
+ rc = lnet_parse_ip2nets(&nets, ip2nets);
+ if (rc < 0)
+ return rc;
+
+ rc = lnet_parse_networks(&net_head, nets, use_tcp_bonding);
+ if (rc < 0)
+ return rc;
+
+ mutex_lock(&the_lnet.ln_api_mutex);
+ while (!list_empty(&net_head)) {
+ net = list_entry(net_head.next, struct lnet_net, net_list);
+ list_del_init(&net->net_list);
+ rc = lnet_add_net_common(net, tun);
+ if (rc < 0)
+ goto out;
+ }
+
+out:
+ mutex_unlock(&the_lnet.ln_api_mutex);
+
+ while (!list_empty(&net_head)) {
+ net = list_entry(net_head.next, struct lnet_net, net_list);
+ list_del_init(&net->net_list);
+ lnet_net_free(net);
+ }
+ return rc;
+}
+
+int lnet_dyn_add_ni(struct lnet_ioctl_config_ni *conf)
+{
+ struct lnet_net *net;
+ struct lnet_ni *ni;
+ struct lnet_ioctl_config_lnd_tunables *tun = NULL;
+ int rc;
+ __u32 net_id;
+
+ /* get the tunables if they are available */
+ if (conf->lic_cfg_hdr.ioc_len >=
+ sizeof(*conf) + sizeof(*tun))
+ tun = (struct lnet_ioctl_config_lnd_tunables *)
+ conf->lic_bulk;
+
+ /* handle legacy ip2nets from DLC */
+ if (conf->lic_legacy_ip2nets[0] != '\0')
+ return lnet_handle_legacy_ip2nets(conf->lic_legacy_ip2nets,
+ tun);
+
+ net_id = LNET_NIDNET(conf->lic_nid);
+
+ net = lnet_net_alloc(net_id, NULL);
+ if (!net)
+ return -ENOMEM;
+
+ ni = lnet_ni_alloc_w_cpt_array(net, conf->lic_cpts, conf->lic_ncpts,
+ conf->lic_ni_intf[0]);
+ if (!ni)
+ return -ENOMEM;
+
+ mutex_lock(&the_lnet.ln_api_mutex);
+
+ rc = lnet_add_net_common(net, tun);
+
+ mutex_unlock(&the_lnet.ln_api_mutex);
+
+ return rc;
+}
+
+int lnet_dyn_del_ni(struct lnet_ioctl_config_ni *conf)
+{
+ struct lnet_net *net;
+ struct lnet_ni *ni;
+ __u32 net_id = LNET_NIDNET(conf->lic_nid);
+ lnet_ping_info_t *pinfo;
+ lnet_handle_md_t md_handle;
+ int rc;
+ int net_count;
+ __u32 addr;
+
+ /* don't allow userspace to shutdown the LOLND */
+ if (LNET_NETTYP(net_id) == LOLND)
+ return -EINVAL;
+
+ mutex_lock(&the_lnet.ln_api_mutex);
+
+ lnet_net_lock(0);
+
+ net = lnet_get_net_locked(net_id);
+ if (!net) {
+ CERROR("net %s not found\n",
+ libcfs_net2str(net_id));
+ rc = -ENOENT;
+ goto net_unlock;
+ }
+
+ addr = LNET_NIDADDR(conf->lic_nid);
+ if (addr == 0) {
+ /* remove the entire net */
+ net_count = lnet_get_net_ni_count_locked(net);
+
+ lnet_net_unlock(0);
+
+ /* create and link a new ping info, before removing the old one */
+ rc = lnet_ping_info_setup(&pinfo, &md_handle,
+ lnet_get_ni_count() - net_count,
+ false);
+ if (rc != 0)
+ goto out;
+
+ lnet_shutdown_lndnet(net);
+
+ if (lnet_count_acceptor_nets() == 0)
+ lnet_acceptor_stop();
+
+ lnet_ping_target_update(pinfo, md_handle);
+
+ goto out;
+ }
+
+ ni = lnet_nid2ni_locked(conf->lic_nid, 0);
+ if (!ni) {
+ CERROR("nid %s not found \n",
+ libcfs_nid2str(conf->lic_nid));
+ rc = -ENOENT;
+ goto net_unlock;
+ }
+
+ net_count = lnet_get_net_ni_count_locked(net);
+
+ lnet_net_unlock(0);
+
+ /* create and link a new ping info, before removing the old one */
+ rc = lnet_ping_info_setup(&pinfo, &md_handle,
+ lnet_get_ni_count() - 1, false);
+ if (rc != 0)
+ goto out;
+
+ lnet_shutdown_lndni(ni);
+
+ if (lnet_count_acceptor_nets() == 0)
+ lnet_acceptor_stop();
+
+ lnet_ping_target_update(pinfo, md_handle);
+
+ /* check if the net is empty and remove it if it is */
+ if (net_count == 1)
+ lnet_shutdown_lndnet(net);
+
+ goto out;
+
+net_unlock:
+ lnet_net_unlock(0);
+out:
+ mutex_unlock(&the_lnet.ln_api_mutex);
+
+ return rc;
+}
+
+/*
+ * lnet_dyn_add_net and lnet_dyn_del_net are now deprecated.
+ * They are only expected to be called for unique networks.
+ * That can be as a result of older DLC library
+ * calls. Multi-Rail DLC and beyond no longer uses these APIs.
+ */
+int
+lnet_dyn_add_net(struct lnet_ioctl_config_data *conf)
+{
+ struct lnet_net *net;
+ struct list_head net_head;
+ int rc;
+ struct lnet_ioctl_config_lnd_tunables tun;
+ char *nets = conf->cfg_config_u.cfg_net.net_intf;
+
+ INIT_LIST_HEAD(&net_head);
+
+ /* Create a net/ni structures for the network string */
+ rc = lnet_parse_networks(&net_head, nets, use_tcp_bonding);
+ if (rc <= 0)
+ return rc == 0 ? -EINVAL : rc;
+
+ mutex_lock(&the_lnet.ln_api_mutex);
+
+ if (rc > 1) {
+ rc = -EINVAL; /* only add one network per call */
+ goto failed;
+ }
+
+ net = list_entry(net_head.next, struct lnet_net, net_list);
+ list_del_init(&net->net_list);
+
+ LASSERT(lnet_net_unique(net->net_id, &the_lnet.ln_nets, NULL));
+
+ memset(&tun, sizeof(tun), 0);
+
+ tun.lt_cmn.lct_peer_timeout =
+ conf->cfg_config_u.cfg_net.net_peer_timeout;
+ tun.lt_cmn.lct_peer_tx_credits =
+ conf->cfg_config_u.cfg_net.net_peer_tx_credits;
+ tun.lt_cmn.lct_peer_rtr_credits =
+ conf->cfg_config_u.cfg_net.net_peer_rtr_credits;
+ tun.lt_cmn.lct_max_tx_credits =
+ conf->cfg_config_u.cfg_net.net_max_tx_credits;
+
+ rc = lnet_add_net_common(net, &tun);
+ if (rc != 0)
+ goto failed;
+
+ return 0;
+
+failed:
mutex_unlock(&the_lnet.ln_api_mutex);
while (!list_empty(&net_head)) {
net = list_entry(net_head.next, struct lnet_net, net_list);
}
int
-lnet_dyn_del_ni(__u32 net_id)
+lnet_dyn_del_net(__u32 net_id)
{
struct lnet_net *net;
struct lnet_ping_info *pinfo;
return rc;
}
+void lnet_incr_dlc_seq(void)
+{
+ atomic_inc(&lnet_dlc_seq_no);
+}
+
+__u32 lnet_get_dlc_seq_locked(void)
+{
+ return atomic_read(&lnet_dlc_seq_no);
+}
+
+inline __u32 lnet_get_numa_range(void)
+{
+ return lnet_numa_range;
+}
+
/**
* LNet ioctl handler.
*
if (config->cfg_hdr.ioc_len < sizeof(*config))
return -EINVAL;
- return lnet_get_route(config->cfg_count,
- &config->cfg_net,
- &config->cfg_config_u.cfg_route.rtr_hop,
- &config->cfg_nid,
- &config->cfg_config_u.cfg_route.rtr_flags,
- &config->cfg_config_u.cfg_route.
+ mutex_lock(&the_lnet.ln_api_mutex);
+ rc = lnet_get_route(config->cfg_count,
+ &config->cfg_net,
+ &config->cfg_config_u.cfg_route.rtr_hop,
+ &config->cfg_nid,
+ &config->cfg_config_u.cfg_route.rtr_flags,
+ &config->cfg_config_u.cfg_route.
rtr_priority);
+ mutex_unlock(&the_lnet.ln_api_mutex);
+ return rc;
+
+ case IOC_LIBCFS_GET_LOCAL_NI: {
+ struct lnet_ioctl_config_ni *cfg_ni;
+ struct lnet_ioctl_config_lnd_tunables *tun = NULL;
+ struct lnet_ioctl_element_stats *stats;
+ __u32 tun_size;
+
+ cfg_ni = arg;
+ /* get the tunables if they are available */
+ if (cfg_ni->lic_cfg_hdr.ioc_len <
+ sizeof(*cfg_ni) + sizeof(*stats)+ sizeof(*tun))
+ return -EINVAL;
+
+ stats = (struct lnet_ioctl_element_stats *)
+ cfg_ni->lic_bulk;
+ tun = (struct lnet_ioctl_config_lnd_tunables *)
+ (cfg_ni->lic_bulk + sizeof(*stats));
+
+ tun_size = cfg_ni->lic_cfg_hdr.ioc_len - sizeof(*cfg_ni) -
+ sizeof(*stats);
+
+ mutex_lock(&the_lnet.ln_api_mutex);
+ rc = lnet_get_ni_config(cfg_ni, tun, stats, tun_size);
+ mutex_unlock(&the_lnet.ln_api_mutex);
+ return rc;
+ }
case IOC_LIBCFS_GET_NET: {
size_t total = sizeof(*config) +
if (config->cfg_hdr.ioc_len < total)
return -EINVAL;
- return lnet_get_net_config(config);
+ mutex_lock(&the_lnet.ln_api_mutex);
+ rc = lnet_get_net_config(config);
+ mutex_unlock(&the_lnet.ln_api_mutex);
+ return rc;
}
case IOC_LIBCFS_GET_LNET_STATS:
if (lnet_stats->st_hdr.ioc_len < sizeof(*lnet_stats))
return -EINVAL;
+ mutex_lock(&the_lnet.ln_api_mutex);
lnet_counters_get(&lnet_stats->st_cntrs);
+ mutex_unlock(&the_lnet.ln_api_mutex);
return 0;
}
mutex_unlock(&the_lnet.ln_api_mutex);
return rc;
+ case IOC_LIBCFS_SET_NUMA_RANGE: {
+ struct lnet_ioctl_numa_range *numa;
+ numa = arg;
+ if (numa->nr_hdr.ioc_len != sizeof(*numa))
+ return -EINVAL;
+ mutex_lock(&the_lnet.ln_api_mutex);
+ lnet_numa_range = numa->nr_range;
+ mutex_unlock(&the_lnet.ln_api_mutex);
+ return 0;
+ }
+
+ case IOC_LIBCFS_GET_NUMA_RANGE: {
+ struct lnet_ioctl_numa_range *numa;
+ numa = arg;
+ if (numa->nr_hdr.ioc_len != sizeof(*numa))
+ return -EINVAL;
+ numa->nr_range = lnet_numa_range;
+ return 0;
+ }
+
case IOC_LIBCFS_GET_BUF: {
struct lnet_ioctl_pool_cfg *pool_cfg;
size_t total = sizeof(*config) + sizeof(*pool_cfg);
return -EINVAL;
pool_cfg = (struct lnet_ioctl_pool_cfg *)config->cfg_bulk;
- return lnet_get_rtr_pool_cfg(config->cfg_count, pool_cfg);
+
+ mutex_lock(&the_lnet.ln_api_mutex);
+ rc = lnet_get_rtr_pool_cfg(config->cfg_count, pool_cfg);
+ mutex_unlock(&the_lnet.ln_api_mutex);
+ return rc;
+ }
+
+ case IOC_LIBCFS_ADD_PEER_NI: {
+ struct lnet_ioctl_peer_cfg *cfg = arg;
+
+ if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
+ return -EINVAL;
+
+ mutex_lock(&the_lnet.ln_api_mutex);
+ rc = lnet_add_peer_ni_to_peer(cfg->prcfg_key_nid,
+ cfg->prcfg_cfg_nid,
+ cfg->prcfg_mr);
+ mutex_unlock(&the_lnet.ln_api_mutex);
+ return rc;
+ }
+
+ case IOC_LIBCFS_DEL_PEER_NI: {
+ struct lnet_ioctl_peer_cfg *cfg = arg;
+
+ if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
+ return -EINVAL;
+
+ mutex_lock(&the_lnet.ln_api_mutex);
+ rc = lnet_del_peer_ni_from_peer(cfg->prcfg_key_nid,
+ cfg->prcfg_cfg_nid);
+ mutex_unlock(&the_lnet.ln_api_mutex);
+ return rc;
}
case IOC_LIBCFS_GET_PEER_INFO: {
if (peer_info->pr_hdr.ioc_len < sizeof(*peer_info))
return -EINVAL;
- return lnet_get_peer_info(
+ mutex_lock(&the_lnet.ln_api_mutex);
+ rc = lnet_get_peer_ni_info(
peer_info->pr_count,
&peer_info->pr_nid,
peer_info->pr_lnd_u.pr_peer_credits.cr_aliveness,
&peer_info->pr_lnd_u.pr_peer_credits.cr_peer_rtr_credits,
&peer_info->pr_lnd_u.pr_peer_credits.cr_peer_min_rtr_credits,
&peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_qnob);
+ mutex_unlock(&the_lnet.ln_api_mutex);
+ return rc;
+ }
+
+ case IOC_LIBCFS_GET_PEER_NI: {
+ struct lnet_ioctl_peer_cfg *cfg = arg;
+ struct lnet_peer_ni_credit_info *lpni_cri;
+ struct lnet_ioctl_element_stats *lpni_stats;
+ size_t total = sizeof(*cfg) + sizeof(*lpni_cri) +
+ sizeof(*lpni_stats);
+
+ if (cfg->prcfg_hdr.ioc_len < total)
+ return -EINVAL;
+
+ lpni_cri = (struct lnet_peer_ni_credit_info*) cfg->prcfg_bulk;
+ lpni_stats = (struct lnet_ioctl_element_stats *)
+ (cfg->prcfg_bulk + sizeof(*lpni_cri));
+
+ mutex_lock(&the_lnet.ln_api_mutex);
+ rc = lnet_get_peer_info(cfg->prcfg_idx, &cfg->prcfg_key_nid,
+ &cfg->prcfg_cfg_nid, &cfg->prcfg_mr,
+ lpni_cri, lpni_stats);
+ mutex_unlock(&the_lnet.ln_api_mutex);
+ return rc;
}
case IOC_LIBCFS_NOTIFY_ROUTER: {
data->ioc_count = rc;
return 0;
}
+
+ case IOC_LIBCFS_DBG: {
+ struct lnet_ioctl_dbg *dbg = arg;
+ struct lnet_dbg_task_info *dbg_info;
+ size_t total = sizeof(*dbg) + sizeof(*dbg_info);
+
+ if (dbg->dbg_hdr.ioc_len < total)
+ return -EINVAL;
+
+ dbg_info = (struct lnet_dbg_task_info*) dbg->dbg_bulk;
+
+ return lnet_handle_dbg_task(dbg, dbg_info);
+ }
+
default:
- ni = lnet_net2ni(data->ioc_net);
+ ni = lnet_net2ni_addref(data->ioc_net);
if (ni == NULL)
return -EINVAL;
else
rc = ni->ni_net->net_lnd->lnd_ctl(ni, cmd, arg);
+ lnet_ni_decref(ni);
return rc;
}
/* not reached */