for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
LASSERT(list_empty(&hash[j]));
- LIBCFS_FREE(hash, LNET_PEER_HASH_SIZE * sizeof(*hash));
+ CFS_FREE_PTR_ARRAY(hash, LNET_PEER_HASH_SIZE);
}
cfs_percpt_free(the_lnet.ln_peer_tables);
init_waitqueue_head(&lp->lp_dc_waitq);
spin_lock_init(&lp->lp_lock);
lp->lp_primary_nid = nid;
+ lp->lp_disc_src_nid = LNET_NID_ANY;
if (lnet_peers_start_down())
lp->lp_alive = false;
else
* to ever use a different interface when sending messages to
* myself.
*/
- if (LNET_NETTYP(LNET_NIDNET(nid)) == LOLND)
+ if (nid == LNET_NID_LO_0)
lp->lp_state = LNET_PEER_NO_DISCOVERY;
lp->lp_cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
/* assign the next peer_ni to be the primary */
lpni2 = lnet_get_next_peer_ni_locked(lp, NULL, lpni);
LASSERT(lpni2);
- lp->lp_primary_nid = lpni->lpni_nid;
+ lp->lp_primary_nid = lpni2->lpni_nid;
}
rc = lnet_peer_ni_del_locked(lpni, force);
static void
lnet_peer_ni_finalize_wait(struct lnet_peer_table *ptable)
{
- int i = 3;
-
- spin_lock(&ptable->pt_zombie_lock);
- while (ptable->pt_zombies) {
- spin_unlock(&ptable->pt_zombie_lock);
-
- if (is_power_of_2(i)) {
- CDEBUG(D_WARNING,
+ wait_var_event_warning(&ptable->pt_zombies,
+ ptable->pt_zombies == 0,
"Waiting for %d zombies on peer table\n",
ptable->pt_zombies);
- }
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1) >> 1);
- spin_lock(&ptable->pt_zombie_lock);
- }
- spin_unlock(&ptable->pt_zombie_lock);
}
static void
if (oldnids) {
size = sizeof(*nids) * (lpni->lpni_pref_nnids - 1);
- LIBCFS_FREE(oldnids, sizeof(*oldnids) * size);
+ CFS_FREE_PTR_ARRAY(oldnids, size);
}
out:
if (rc == -EEXIST && (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF)) {
if (oldnids) {
size = sizeof(*nids) * (lpni->lpni_pref_nnids + 1);
- LIBCFS_FREE(oldnids, sizeof(*oldnids) * size);
+ CFS_FREE_PTR_ARRAY(oldnids, size);
}
out:
CDEBUG(D_NET, "peer %s nid %s: %d\n",
int rc = 0;
int cpt;
+ if (nid == LNET_NID_LO_0)
+ return LNET_NID_LO_0;
+
cpt = lnet_net_lock_current();
lpni = lnet_nid2peerni_locked(nid, LNET_NID_ANY, cpt);
if (IS_ERR(lpni)) {
lp = lpni->lpni_peer_net->lpn_peer;
while (!lnet_peer_is_uptodate(lp)) {
+ spin_lock(&lp->lp_lock);
+ /* force a full discovery cycle */
+ lp->lp_state |= LNET_PEER_FORCE_PING | LNET_PEER_FORCE_PUSH;
+ spin_unlock(&lp->lp_lock);
+
rc = lnet_discover_peer_locked(lpni, cpt, true);
if (rc)
goto out_decref;
/* Add peer_ni to peer_net */
lpni->lpni_peer_net = lpn;
list_add_tail(&lpni->lpni_peer_nis, &lpn->lpn_peer_nis);
+ lnet_update_peer_net_healthv(lpni);
lnet_peer_net_addref_locked(lpn);
/* Add peer_net to peer */
ptable->pt_zombies--;
spin_unlock(&ptable->pt_zombie_lock);
- if (lpni->lpni_pref_nnids > 1) {
- LIBCFS_FREE(lpni->lpni_pref.nids,
- sizeof(*lpni->lpni_pref.nids) * lpni->lpni_pref_nnids);
- }
+ if (lpni->lpni_pref_nnids > 1)
+ CFS_FREE_PTR_ARRAY(lpni->lpni_pref.nids, lpni->lpni_pref_nnids);
+
LIBCFS_FREE(lpni, sizeof(*lpni));
lnet_peer_net_decref_locked(lpn);
*/
void lnet_peer_push_event(struct lnet_event *ev)
{
- struct lnet_ping_buffer *pbuf = ev->md.user_ptr;
+ struct lnet_ping_buffer *pbuf;
struct lnet_peer *lp;
+ pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start + ev->offset);
+
/* lnet_find_peer() adds a refcount */
lp = lnet_find_peer(ev->source.nid);
if (!lp) {
CDEBUG(D_NET, "Push Put from unknown %s (source %s). Ignoring...\n",
libcfs_nid2str(ev->initiator.nid),
libcfs_nid2str(ev->source.nid));
+ pbuf->pb_needs_post = true;
return;
}
}
/*
- * Check the MULTIRAIL flag. Complain if the peer was DLC
- * configured without it.
- */
- if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
- if (lp->lp_state & LNET_PEER_CONFIGURED) {
- CERROR("Push says %s is Multi-Rail, DLC says not\n",
- libcfs_nid2str(lp->lp_primary_nid));
- } else {
- lp->lp_state |= LNET_PEER_MULTI_RAIL;
- lnet_peer_clr_non_mr_pref_nids(lp);
- }
- }
-
- /*
* The peer may have discovery disabled at its end. Set
* NO_DISCOVERY as appropriate.
*/
if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY)) {
CDEBUG(D_NET, "Peer %s has discovery disabled\n",
libcfs_nid2str(lp->lp_primary_nid));
+ /*
+ * Mark the peer for deletion if we already know about it
+ * and it's going from discovery set to no discovery set
+ */
+ if (!(lp->lp_state & (LNET_PEER_NO_DISCOVERY |
+ LNET_PEER_DISCOVERING)) &&
+ lp->lp_state & LNET_PEER_DISCOVERED) {
+ CDEBUG(D_NET, "Marking %s:0x%x for deletion\n",
+ libcfs_nid2str(lp->lp_primary_nid),
+ lp->lp_state);
+ lp->lp_state |= LNET_PEER_MARK_DELETION;
+ }
lp->lp_state |= LNET_PEER_NO_DISCOVERY;
} else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
CDEBUG(D_NET, "Peer %s has discovery enabled\n",
}
/*
+ * Update the MULTI_RAIL flag based on the push. If the peer
+ * was configured with DLC then the setting should match what
+ * DLC put in.
+ * NB: We verified above that the MR feature bit is set in pi_features
+ */
+ if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
+ CDEBUG(D_NET, "peer %s(%p) is MR\n",
+ libcfs_nid2str(lp->lp_primary_nid), lp);
+ } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
+ CWARN("Push says %s is Multi-Rail, DLC says not\n",
+ libcfs_nid2str(lp->lp_primary_nid));
+ } else if (lnet_peer_discovery_disabled) {
+ CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled locally\n",
+ libcfs_nid2str(lp->lp_primary_nid), lp);
+ } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
+ CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled remotely\n",
+ libcfs_nid2str(lp->lp_primary_nid), lp);
+ } else {
+ CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
+ libcfs_nid2str(lp->lp_primary_nid), lp);
+ lp->lp_state |= LNET_PEER_MULTI_RAIL;
+ lnet_peer_clr_non_mr_pref_nids(lp);
+ }
+
+ /*
* Check for truncation of the Put message. Clear the
* NIDS_UPTODATE flag and set FORCE_PING to trigger a ping,
* and tell discovery to allocate a bigger buffer.
*/
- if (pbuf->pb_nnis < pbuf->pb_info.pi_nnis) {
+ if (ev->mlength < ev->rlength) {
if (the_lnet.ln_push_target_nnis < pbuf->pb_info.pi_nnis)
the_lnet.ln_push_target_nnis = pbuf->pb_info.pi_nnis;
lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
LNET_PING_BUFFER_SEQNO(pbuf));
out:
+ /* We've processed this buffer. It can be reposted */
+ pbuf->pb_needs_post = true;
+
/*
* Queue the peer for discovery if not done, force it on the request
* queue and wake the discovery thread if the peer was already queued,
{
struct lnet_ping_buffer *pbuf;
- pbuf = LNET_PING_INFO_TO_BUFFER(ev->md.start);
+ pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
spin_lock(&lp->lp_lock);
lp->lp_state &= ~LNET_PEER_PUSH_SENT;
lp->lp_push_error = ev->status;
spin_lock(&lp->lp_lock);
+ lp->lp_disc_src_nid = ev->target.nid;
+
/*
* If some kind of error happened the contents of message
* cannot be used. Set PING_FAILED to trigger a retry.
goto out;
}
- pbuf = LNET_PING_INFO_TO_BUFFER(ev->md.start);
+ pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
lnet_swap_pinginfo(pbuf);
*/
static void lnet_discovery_event_handler(struct lnet_event *event)
{
- struct lnet_peer *lp = event->md.user_ptr;
+ struct lnet_peer *lp = event->md_user_ptr;
struct lnet_ping_buffer *pbuf;
int rc;
}
lnet_net_lock(LNET_LOCK_EX);
if (event->unlinked) {
- pbuf = LNET_PING_INFO_TO_BUFFER(event->md.start);
+ pbuf = LNET_PING_INFO_TO_BUFFER(event->md_start);
lnet_ping_buffer_decref(pbuf);
lnet_peer_decref_locked(lp);
}
lp->lp_state &= ~LNET_PEER_ROUTER_ENABLED;
spin_unlock(&lp->lp_lock);
- nnis = MAX(lp->lp_nnis, pbuf->pb_info.pi_nnis);
- LIBCFS_ALLOC(curnis, nnis * sizeof(*curnis));
- LIBCFS_ALLOC(addnis, nnis * sizeof(*addnis));
- LIBCFS_ALLOC(delnis, nnis * sizeof(*delnis));
+ nnis = max_t(int, lp->lp_nnis, pbuf->pb_info.pi_nnis);
+ CFS_ALLOC_PTR_ARRAY(curnis, nnis);
+ CFS_ALLOC_PTR_ARRAY(addnis, nnis);
+ CFS_ALLOC_PTR_ARRAY(delnis, nnis);
if (!curnis || !addnis || !delnis) {
rc = -ENOMEM;
goto out;
* present in curnis[] then this peer is for this node.
*/
for (i = 0; i < ncurnis; i++) {
- if (LNET_NETTYP(LNET_NIDNET(curnis[i])) == LOLND)
+ if (curnis[i] == LNET_NID_LO_0)
continue;
for (j = 1; j < pbuf->pb_info.pi_nnis; j++) {
if (curnis[i] == pbuf->pb_info.pi_ni[j].ns_nid) {
*/
rc = 0;
out:
- LIBCFS_FREE(curnis, nnis * sizeof(*curnis));
- LIBCFS_FREE(addnis, nnis * sizeof(*addnis));
- LIBCFS_FREE(delnis, nnis * sizeof(*delnis));
+ CFS_FREE_PTR_ARRAY(curnis, nnis);
+ CFS_FREE_PTR_ARRAY(addnis, nnis);
+ CFS_FREE_PTR_ARRAY(delnis, nnis);
lnet_ping_buffer_decref(pbuf);
CDEBUG(D_NET, "peer %s (%p): %d\n", libcfs_nid2str(lp->lp_primary_nid), lp, rc);
if (pbuf->pb_info.pi_nnis <= 1)
goto out;
nid = pbuf->pb_info.pi_ni[1].ns_nid;
- if (LNET_NETTYP(LNET_NIDNET(lp->lp_primary_nid)) == LOLND) {
+ if (lp->lp_primary_nid == LNET_NID_LO_0) {
rc = lnet_peer_set_primary_nid(lp, nid, flags);
if (!rc)
rc = lnet_peer_merge_data(lp, pbuf);
pnid = lnet_peer_select_nid(lp);
lnet_net_unlock(cpt);
- nnis = MAX(lp->lp_data_nnis, LNET_INTERFACES_MIN);
+ nnis = max(lp->lp_data_nnis, LNET_INTERFACES_MIN);
rc = lnet_send_ping(pnid, &lp->lp_ping_mdh, nnis, lp,
- the_lnet.ln_dc_eqh, false);
+ the_lnet.ln_dc_handler, false);
/*
* if LNetMDBind in lnet_send_ping fails we need to decrement the
return rc ? rc : LNET_REDISCOVER_PEER;
}
+/*
+ * Mark the peer as discovered.
+ */
+static int lnet_peer_discovered(struct lnet_peer *lp)
+__must_hold(&lp->lp_lock)
+{
+ lp->lp_state |= LNET_PEER_DISCOVERED;
+ lp->lp_state &= ~(LNET_PEER_DISCOVERING |
+ LNET_PEER_REDISCOVER);
+
+ CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
+
+ return 0;
+}
+
/* Active side of push. */
static int lnet_peer_send_push(struct lnet_peer *lp)
__must_hold(&lp->lp_lock)
/* Don't push to a non-multi-rail peer. */
if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
+ /* if peer's NIDs are uptodate then peer is discovered */
+ if (lp->lp_state & LNET_PEER_NIDS_UPTODATE) {
+ rc = lnet_peer_discovered(lp);
+ return rc;
+ }
+
return 0;
}
md.threshold = 2; /* Put/Ack */
md.max_size = 0;
md.options = 0;
- md.eq_handle = the_lnet.ln_dc_eqh;
+ md.handler = the_lnet.ln_dc_handler;
md.user_ptr = lp;
- rc = LNetMDBind(md, LNET_UNLINK, &lp->lp_push_mdh);
+ rc = LNetMDBind(&md, LNET_UNLINK, &lp->lp_push_mdh);
if (rc) {
lnet_ping_buffer_decref(pbuf);
CERROR("Can't bind push source MD: %d\n", rc);
goto fail_unlink;
}
- rc = LNetPut(LNET_NID_ANY, lp->lp_push_mdh,
+ rc = LNetPut(lp->lp_disc_src_nid, lp->lp_push_mdh,
LNET_ACK_REQ, id, LNET_RESERVED_PORTAL,
LNET_PROTO_PING_MATCHBITS, 0, 0);
+ /*
+ * reset the discovery nid. There is no need to restrict sending
+ * from that source, if we call lnet_push_update_to_peers(). It'll
+ * get set to a specific NID, if we initiate discovery from the
+ * scratch
+ */
+ lp->lp_disc_src_nid = LNET_NID_ANY;
+
if (rc)
goto fail_unlink;
}
/*
- * Mark the peer as discovered.
- */
-static int lnet_peer_discovered(struct lnet_peer *lp)
-__must_hold(&lp->lp_lock)
-{
- lp->lp_state |= LNET_PEER_DISCOVERED;
- lp->lp_state &= ~(LNET_PEER_DISCOVERING |
- LNET_PEER_REDISCOVER);
-
- CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
-
- return 0;
-}
-
-
-/*
* Discovering this peer is taking too long. Cancel any Ping or Push
* that discovery is waiting on by unlinking the relevant MDs. The
* lnet_discovery_event_handler() will proceed from here and complete
TASK_INTERRUPTIBLE);
if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
break;
- if (lnet_push_target_resize_needed())
+ if (lnet_push_target_resize_needed() ||
+ the_lnet.ln_push_target->pb_needs_post)
break;
if (!list_empty(&the_lnet.ln_dc_request))
break;
wait_for_completion(&the_lnet.ln_started);
CDEBUG(D_NET, "started\n");
- cfs_block_allsigs();
for (;;) {
if (lnet_peer_discovery_wait_for_work())
break;
- lnet_resend_msgs();
-
if (lnet_push_target_resize_needed())
lnet_push_target_resize();
+ else if (the_lnet.ln_push_target->pb_needs_post)
+ lnet_push_target_post(the_lnet.ln_push_target,
+ &the_lnet.ln_push_target_md);
+
+ lnet_resend_msgs();
lnet_net_lock(LNET_LOCK_EX);
- if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
+ if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING) {
+ lnet_net_unlock(LNET_LOCK_EX);
break;
+ }
/*
* Process all incoming discovery work requests. When
lp->lp_last_queued = ktime_get_real_seconds();
lnet_net_unlock(LNET_LOCK_EX);
+ if (lnet_push_target_resize_needed())
+ lnet_push_target_resize();
+ else if (the_lnet.ln_push_target->pb_needs_post)
+ lnet_push_target_post(the_lnet.ln_push_target,
+ &the_lnet.ln_push_target_md);
+
/*
* Select an action depending on the state of
* the peer and whether discovery is disabled.
lnet_peer_discovery_complete(lp);
if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
break;
+
+ if (lp->lp_state & LNET_PEER_MARK_DELETION) {
+ struct list_head rlist;
+ struct lnet_route *route, *tmp;
+ int sensitivity = lp->lp_health_sensitivity;
+
+ INIT_LIST_HEAD(&rlist);
+
+ /*
+ * remove the peer from the discovery work
+ * queue if it's on there in preparation
+ * of deleting it.
+ */
+ if (!list_empty(&lp->lp_dc_list))
+ list_del(&lp->lp_dc_list);
+
+ lnet_net_unlock(LNET_LOCK_EX);
+
+ mutex_lock(&the_lnet.ln_api_mutex);
+
+ lnet_net_lock(LNET_LOCK_EX);
+ list_for_each_entry_safe(route, tmp,
+ &lp->lp_routes,
+ lr_gwlist)
+ lnet_move_route(route, NULL, &rlist);
+ lnet_net_unlock(LNET_LOCK_EX);
+
+ /* delete the peer */
+ lnet_peer_del(lp);
+
+ list_for_each_entry_safe(route, tmp,
+ &rlist, lr_list) {
+ /* re-add these routes */
+ lnet_add_route(route->lr_net,
+ route->lr_hops,
+ route->lr_nid,
+ route->lr_priority,
+ sensitivity);
+ LIBCFS_FREE(route, sizeof(*route));
+ }
+ mutex_unlock(&the_lnet.ln_api_mutex);
+
+ lnet_net_lock(LNET_LOCK_EX);
+ }
}
lnet_net_unlock(LNET_LOCK_EX);
/* Queue cleanup 2: wait for the expired queue to clear. */
while (!list_empty(&the_lnet.ln_dc_expired))
- schedule_timeout(cfs_time_seconds(1));
+ schedule_timeout_uninterruptible(cfs_time_seconds(1));
/* Queue cleanup 3: clear the request queue. */
lnet_net_lock(LNET_LOCK_EX);
}
lnet_net_unlock(LNET_LOCK_EX);
- LNetEQFree(the_lnet.ln_dc_eqh);
- LNetInvalidateEQHandle(&the_lnet.ln_dc_eqh);
+ lnet_assert_handler_unused(the_lnet.ln_dc_handler);
+ the_lnet.ln_dc_handler = NULL;
the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
wake_up(&the_lnet.ln_dc_waitq);
int lnet_peer_discovery_start(void)
{
struct task_struct *task;
- int rc;
+ int rc = 0;
if (the_lnet.ln_dc_state != LNET_DC_STATE_SHUTDOWN)
return -EALREADY;
- rc = LNetEQAlloc(0, lnet_discovery_event_handler, &the_lnet.ln_dc_eqh);
- if (rc != 0) {
- CERROR("Can't allocate discovery EQ: %d\n", rc);
- return rc;
- }
-
+ the_lnet.ln_dc_handler = lnet_discovery_event_handler;
the_lnet.ln_dc_state = LNET_DC_STATE_RUNNING;
task = kthread_run(lnet_peer_discovery, NULL, "lnet_discovery");
if (IS_ERR(task)) {
rc = PTR_ERR(task);
CERROR("Can't start peer discovery thread: %d\n", rc);
- LNetEQFree(the_lnet.ln_dc_eqh);
- LNetInvalidateEQHandle(&the_lnet.ln_dc_eqh);
+ the_lnet.ln_dc_handler = NULL;
the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
}
if (list_empty(&lpni->lpni_recovery) &&
atomic_read(&lpni->lpni_healthv) < LNET_MAX_HEALTH_VALUE) {
- CERROR("lpni %s added to recovery queue. Health = %d\n",
+ CDEBUG(D_NET, "lpni %s added to recovery queue. Health = %d\n",
libcfs_nid2str(lpni->lpni_nid),
atomic_read(&lpni->lpni_healthv));
list_add_tail(&lpni->lpni_recovery, &the_lnet.ln_mt_peerNIRecovq);