return 0;
}
- *discovery = value;
-
+ /*
+ * We still want to set the discovery value even when LNet is not
+ * running. This is the case when LNet is being loaded and we want
+ * the module parameters to take effect. Otherwise if we're
+ * changing the value dynamically, we want to set it after
+ * updating the peers
+ */
if (the_lnet.ln_state != LNET_STATE_RUNNING) {
+ *discovery = value;
mutex_unlock(&the_lnet.ln_api_mutex);
return 0;
}
pbuf->pb_info.pi_features |= LNET_PING_FEAT_DISCOVERY;
lnet_net_unlock(LNET_LOCK_EX);
+ /*
+ * Always update the peers. This will result in a push to the
+ * peers with the updated capabilities feature mask. The peer can
+ * then take appropriate action to update its representation of
+ * the node.
+ *
+ * If discovery is already off, turn it on first before pushing
+ * the update. The discovery flag must be on before pushing.
+ * otherwise if the flag is on and we're turning it off then push
+ * first before turning the flag off. In the former case the flag
+ * is being set twice, but I find it's better to do that rather
+ * than have duplicate code in an if/else statement.
+ */
+ if (*discovery > 0 && value == 0)
+ *discovery = value;
lnet_push_update_to_peers(1);
+ *discovery = value;
mutex_unlock(&the_lnet.ln_api_mutex);
return 0;
}
-static char *
+static const char *
lnet_get_routes(void)
{
return routes;
}
-static char *
+static const char *
lnet_get_networks(void)
{
- char *nets;
- int rc;
+ const char *nets;
+ int rc;
if (*networks != 0 && *ip2nets != 0) {
LCONSOLE_ERROR_MSG(0x101, "Please specify EITHER 'networks' or "
INIT_LIST_HEAD(&the_lnet.ln_mt_localNIRecovq);
INIT_LIST_HEAD(&the_lnet.ln_mt_peerNIRecovq);
init_waitqueue_head(&the_lnet.ln_dc_waitq);
- the_lnet.ln_mt_eq = NULL;
+ the_lnet.ln_mt_handler = NULL;
init_completion(&the_lnet.ln_started);
rc = lnet_slab_setup();
static int
lnet_unprepare (void)
{
- int rc;
-
/* NB no LNET_LOCK since this is the last reference. All LND instances
* have shut down already, so it is safe to unlink and free all
* descriptors, even those that appear committed to a network op (eg MD
the_lnet.ln_mt_zombie_rstqs = NULL;
}
- if (the_lnet.ln_mt_eq) {
- rc = LNetEQFree(the_lnet.ln_mt_eq);
- the_lnet.ln_mt_eq = NULL;
- LASSERT(rc == 0);
- }
+ lnet_assert_handler_unused(the_lnet.ln_mt_handler);
+ the_lnet.ln_mt_handler = NULL;
lnet_portals_destroy();
/* Loopback is guaranteed to be present */
if (pinfo->pi_nnis < 1 || pinfo->pi_nnis > lnet_interfaces_max)
return -ERANGE;
- if (LNET_NETTYP(LNET_NIDNET(LNET_PING_INFO_LONI(pinfo))) != LOLND)
+ if (LNET_PING_INFO_LONI(pinfo) != LNET_NID_LO_0)
return -EPROTO;
return 0;
}
static void
lnet_ping_target_event_handler(struct lnet_event *event)
{
- struct lnet_ping_buffer *pbuf = event->md.user_ptr;
+ struct lnet_ping_buffer *pbuf = event->md_user_ptr;
if (event->unlinked)
lnet_ping_buffer_decref(pbuf);
};
struct lnet_me *me;
struct lnet_md md = { NULL };
- int rc, rc2;
-
- if (set_eq) {
- the_lnet.ln_ping_target_eq =
- LNetEQAlloc(lnet_ping_target_event_handler);
- if (IS_ERR(the_lnet.ln_ping_target_eq)) {
- rc = PTR_ERR(the_lnet.ln_ping_target_eq);
- CERROR("Can't allocate ping buffer EQ: %d\n", rc);
- return rc;
- }
- }
+ int rc;
+
+ if (set_eq)
+ the_lnet.ln_ping_target_handler =
+ lnet_ping_target_event_handler;
*ppbuf = lnet_ping_target_create(ni_count);
if (*ppbuf == NULL) {
md.max_size = 0;
md.options = LNET_MD_OP_GET | LNET_MD_TRUNCATE |
LNET_MD_MANAGE_REMOTE;
- md.eq_handle = the_lnet.ln_ping_target_eq;
+ md.handler = the_lnet.ln_ping_target_handler;
md.user_ptr = *ppbuf;
rc = LNetMDAttach(me, md, LNET_RETAIN, ping_mdh);
lnet_ping_buffer_decref(*ppbuf);
*ppbuf = NULL;
fail_free_eq:
- if (set_eq) {
- rc2 = LNetEQFree(the_lnet.ln_ping_target_eq);
- LASSERT(rc2 == 0);
- }
return rc;
}
LNetInvalidateMDHandle(ping_mdh);
/* NB the MD could be busy; this just starts the unlink */
- while (atomic_read(&pbuf->pb_refcnt) > 1) {
- CDEBUG(D_NET, "Still waiting for ping data MD to unlink\n");
- schedule_timeout_uninterruptible(cfs_time_seconds(1));
- }
+ wait_var_event_warning(&pbuf->pb_refcnt,
+ atomic_read(&pbuf->pb_refcnt) <= 1,
+ "Still waiting for ping data MD to unlink\n");
}
static void
static void
lnet_ping_target_fini(void)
{
- int rc;
-
lnet_ping_md_unlink(the_lnet.ln_ping_target,
&the_lnet.ln_ping_target_md);
- rc = LNetEQFree(the_lnet.ln_ping_target_eq);
- LASSERT(rc == 0);
-
+ lnet_assert_handler_unused(the_lnet.ln_ping_target_handler);
lnet_ping_target_destroy();
}
md.max_size = 0;
md.options = LNET_MD_OP_PUT | LNET_MD_TRUNCATE;
md.user_ptr = pbuf;
- md.eq_handle = the_lnet.ln_push_target_eq;
+ md.handler = the_lnet.ln_push_target_handler;
rc = LNetMDAttach(me, md, LNET_UNLINK, mdhp);
if (rc) {
static void lnet_push_target_event_handler(struct lnet_event *ev)
{
- struct lnet_ping_buffer *pbuf = ev->md.user_ptr;
+ struct lnet_ping_buffer *pbuf = ev->md_user_ptr;
CDEBUG(D_NET, "type %d status %d unlinked %d\n", ev->type, ev->status,
ev->unlinked);
if (the_lnet.ln_push_target)
return -EALREADY;
- the_lnet.ln_push_target_eq =
- LNetEQAlloc(lnet_push_target_event_handler);
- if (IS_ERR(the_lnet.ln_push_target_eq)) {
- rc = PTR_ERR(the_lnet.ln_push_target_eq);
- CERROR("Can't allocated push target EQ: %d\n", rc);
- return rc;
- }
+ the_lnet.ln_push_target_handler =
+ lnet_push_target_event_handler;
rc = LNetSetLazyPortal(LNET_RESERVED_PORTAL);
LASSERT(rc == 0);
if (rc) {
LNetClearLazyPortal(LNET_RESERVED_PORTAL);
- LNetEQFree(the_lnet.ln_push_target_eq);
- the_lnet.ln_push_target_eq = NULL;
+ the_lnet.ln_push_target_handler = NULL;
}
return rc;
LNetInvalidateMDHandle(&the_lnet.ln_push_target_md);
/* Wait for the unlink to complete. */
- while (atomic_read(&the_lnet.ln_push_target->pb_refcnt) > 1) {
- CDEBUG(D_NET, "Still waiting for ping data MD to unlink\n");
- schedule_timeout_uninterruptible(cfs_time_seconds(1));
- }
+ wait_var_event_warning(&the_lnet.ln_push_target->pb_refcnt,
+ atomic_read(&the_lnet.ln_push_target->pb_refcnt) <= 1,
+ "Still waiting for ping data MD to unlink\n");
/* Drop ref set by lnet_ping_buffer_alloc() */
lnet_ping_buffer_decref(the_lnet.ln_push_target);
the_lnet.ln_push_target_nnis = 0;
LNetClearLazyPortal(LNET_RESERVED_PORTAL);
- LNetEQFree(the_lnet.ln_push_target_eq);
- the_lnet.ln_push_target_eq = NULL;
+ lnet_assert_handler_unused(the_lnet.ln_push_target_handler);
+ the_lnet.ln_push_target_handler = NULL;
}
static int
lnet_ping_target_update(pbuf, ping_mdh);
- the_lnet.ln_mt_eq = LNetEQAlloc(lnet_mt_event_handler);
- if (IS_ERR(the_lnet.ln_mt_eq)) {
- rc = PTR_ERR(the_lnet.ln_mt_eq);
- CERROR("Can't allocate monitor thread EQ: %d\n", rc);
- goto err_stop_ping;
- }
+ the_lnet.ln_mt_handler = lnet_mt_event_handler;
rc = lnet_push_target_init();
if (rc != 0)
}
cfg_ni->lic_nid = ni->ni_nid;
- if (LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND)
+ if (ni->ni_nid == LNET_NID_LO_0)
cfg_ni->lic_status = LNET_NI_STATUS_UP;
else
cfg_ni->lic_status = ni->ni_status->ns_status;
config->cfg_config_u.cfg_net.net_peer_rtr_credits =
ni->ni_net->net_tunables.lct_peer_rtr_credits;
- if (LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND)
+ if (ni->ni_nid == LNET_NID_LO_0)
net_config->ni_status = LNET_NI_STATUS_UP;
else
net_config->ni_status = ni->ni_status->ns_status;
int rc;
struct lnet_remotenet *rnet;
int net_ni_count;
- int num_acceptor_nets;
lnet_net_lock(LNET_LOCK_EX);
rnet = lnet_find_rnet_locked(net->net_id);
else
memset(&net->net_tunables, -1, sizeof(net->net_tunables));
- /*
- * before starting this network get a count of the current TCP
- * networks which require the acceptor thread running. If that
- * count is == 0 before we start up this network, then we'd want to
- * start up the acceptor thread after starting up this network
- */
- num_acceptor_nets = lnet_count_acceptor_nets();
-
net_id = net->net_id;
rc = lnet_startup_lndnet(net,
* Start the acceptor thread if this is the first network
* being added that requires the thread.
*/
- if (net->net_lnd->lnd_accept && num_acceptor_nets == 0) {
+ if (net->net_lnd->lnd_accept) {
rc = lnet_acceptor_start();
if (rc < 0) {
/* shutdown the net that we just started */
struct lnet_ioctl_config_lnd_tunables *tun)
{
struct lnet_net *net;
- char *nets;
+ const char *nets;
int rc;
LIST_HEAD(net_head);
lnet_shutdown_lndnet(net);
- if (lnet_count_acceptor_nets() == 0)
- lnet_acceptor_stop();
+ lnet_acceptor_stop();
lnet_ping_target_update(pbuf, ping_mdh);
lnet_shutdown_lndni(ni);
- if (lnet_count_acceptor_nets() == 0)
- lnet_acceptor_stop();
+ lnet_acceptor_stop();
lnet_ping_target_update(pbuf, ping_mdh);
LIST_HEAD(net_head);
int rc;
struct lnet_ioctl_config_lnd_tunables tun;
- char *nets = conf->cfg_config_u.cfg_net.net_intf;
+ const char *nets = conf->cfg_config_u.cfg_net.net_intf;
/* Create a net/ni structures for the network string */
rc = lnet_parse_networks(&net_head, nets, use_tcp_bonding);
lnet_shutdown_lndnet(net);
- if (lnet_count_acceptor_nets() == 0)
- lnet_acceptor_stop();
+ lnet_acceptor_stop();
lnet_ping_target_update(pbuf, ping_mdh);
static void
lnet_ping_event_handler(struct lnet_event *event)
{
- struct ping_data *pd = event->md.user_ptr;
+ struct ping_data *pd = event->md_user_ptr;
CDEBUG(D_NET, "ping event (%d %d)%s\n",
event->type, event->status,
static int lnet_ping(struct lnet_process_id id, signed long timeout,
struct lnet_process_id __user *ids, int n_ids)
{
- struct lnet_eq *eq;
struct lnet_md md = { NULL };
struct ping_data pd = { 0 };
struct lnet_ping_buffer *pbuf;
if (!pbuf)
return -ENOMEM;
- eq = LNetEQAlloc(lnet_ping_event_handler);
- if (IS_ERR(eq)) {
- rc = PTR_ERR(eq);
- CERROR("Can't allocate EQ: %d\n", rc);
- goto fail_ping_buffer_decref;
- }
-
/* initialize md content */
md.start = &pbuf->pb_info;
md.length = LNET_PING_INFO_SIZE(n_ids);
md.max_size = 0;
md.options = LNET_MD_TRUNCATE;
md.user_ptr = &pd;
- md.eq_handle = eq;
+ md.handler = lnet_ping_event_handler;
init_completion(&pd.completion);
rc = LNetMDBind(md, LNET_UNLINK, &pd.mdh);
if (rc != 0) {
CERROR("Can't bind MD: %d\n", rc);
- goto fail_free_eq;
+ goto fail_ping_buffer_decref;
}
rc = LNetGet(LNET_NID_ANY, pd.mdh, id,
}
if (!pd.replied) {
rc = -EIO;
- goto fail_free_eq;
+ goto fail_ping_buffer_decref;
}
nob = pd.rc;
if (nob < 8) {
CERROR("%s: ping info too short %d\n",
libcfs_id2str(id), nob);
- goto fail_free_eq;
+ goto fail_ping_buffer_decref;
}
if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC)) {
} else if (pbuf->pb_info.pi_magic != LNET_PROTO_PING_MAGIC) {
CERROR("%s: Unexpected magic %08x\n",
libcfs_id2str(id), pbuf->pb_info.pi_magic);
- goto fail_free_eq;
+ goto fail_ping_buffer_decref;
}
if ((pbuf->pb_info.pi_features & LNET_PING_FEAT_NI_STATUS) == 0) {
CERROR("%s: ping w/o NI status: 0x%x\n",
libcfs_id2str(id), pbuf->pb_info.pi_features);
- goto fail_free_eq;
+ goto fail_ping_buffer_decref;
}
if (nob < LNET_PING_INFO_SIZE(0)) {
CERROR("%s: Short reply %d(%d min)\n",
libcfs_id2str(id),
nob, (int)LNET_PING_INFO_SIZE(0));
- goto fail_free_eq;
+ goto fail_ping_buffer_decref;
}
if (pbuf->pb_info.pi_nnis < n_ids)
CERROR("%s: Short reply %d(%d expected)\n",
libcfs_id2str(id),
nob, (int)LNET_PING_INFO_SIZE(n_ids));
- goto fail_free_eq;
+ goto fail_ping_buffer_decref;
}
rc = -EFAULT; /* if I segv in copy_to_user()... */
tmpid.pid = pbuf->pb_info.pi_pid;
tmpid.nid = pbuf->pb_info.pi_ni[i].ns_nid;
if (copy_to_user(&ids[i], &tmpid, sizeof(tmpid)))
- goto fail_free_eq;
+ goto fail_ping_buffer_decref;
}
rc = pbuf->pb_info.pi_nnis;
- fail_free_eq:
- rc2 = LNetEQFree(eq);
- if (rc2 != 0)
- CERROR("rc2 %d\n", rc2);
- LASSERT(rc2 == 0);
-
fail_ping_buffer_decref:
lnet_ping_buffer_decref(pbuf);
return rc;
int cpt;
int i;
int rc;
- int max_intf = lnet_interfaces_max;
if (n_ids <= 0 ||
id.nid == LNET_NID_ANY)
id.pid = LNET_PID_LUSTRE;
/*
- * if the user buffer has more space than the max_intf
- * then only fill it up to max_intf
+ * If the user buffer has more space than the lnet_interfaces_max,
+ * then only fill it up to lnet_interfaces_max.
*/
- if (n_ids > max_intf)
- n_ids = max_intf;
+ if (n_ids > lnet_interfaces_max)
+ n_ids = lnet_interfaces_max;
CFS_ALLOC_PTR_ARRAY(buf, n_ids);
if (!buf)
if (rc)
goto out_decref;
- /* Peer may have changed. */
- lp = lpni->lpni_peer_net->lpn_peer;
- if (lp->lp_nnis < n_ids)
- n_ids = lp->lp_nnis;
-
i = 0;
p = NULL;
while ((p = lnet_get_next_peer_ni_locked(lp, NULL, p)) != NULL) {
if (++i >= n_ids)
break;
}
+ rc = i;
- lnet_net_unlock(cpt);
-
- rc = -EFAULT;
- if (copy_to_user(ids, buf, n_ids * sizeof(*buf)))
- goto out_relock;
- rc = n_ids;
-out_relock:
- lnet_net_lock(cpt);
out_decref:
lnet_peer_ni_decref_locked(lpni);
out:
lnet_net_unlock(cpt);
+ if (rc >= 0)
+ if (copy_to_user(ids, buf, rc * sizeof(*buf)))
+ rc = -EFAULT;
CFS_FREE_PTR_ARRAY(buf, n_ids);
return rc;