* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2012, 2013, Intel Corporation.
+ * Copyright (c) 2012, 2014, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
"Reserved");
int
-lnet_fail_nid (lnet_nid_t nid, unsigned int threshold)
+lnet_fail_nid(lnet_nid_t nid, unsigned int threshold)
{
- lnet_test_peer_t *tp;
- cfs_list_t *el;
- cfs_list_t *next;
- cfs_list_t cull;
+ lnet_test_peer_t *tp;
+ struct list_head *el;
+ struct list_head *next;
+ struct list_head cull;
- LASSERT (the_lnet.ln_init);
+ LASSERT(the_lnet.ln_init);
/* NB: use lnet_net_lock(0) to serialize operations on test peers */
if (threshold != 0) {
tp->tp_threshold = threshold;
lnet_net_lock(0);
- cfs_list_add_tail(&tp->tp_list, &the_lnet.ln_test_peers);
+ list_add_tail(&tp->tp_list, &the_lnet.ln_test_peers);
lnet_net_unlock(0);
return 0;
}
/* removing entries */
- CFS_INIT_LIST_HEAD(&cull);
+ INIT_LIST_HEAD(&cull);
lnet_net_lock(0);
- cfs_list_for_each_safe (el, next, &the_lnet.ln_test_peers) {
- tp = cfs_list_entry (el, lnet_test_peer_t, tp_list);
+ list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
+ tp = list_entry(el, lnet_test_peer_t, tp_list);
- if (tp->tp_threshold == 0 || /* needs culling anyway */
- nid == LNET_NID_ANY || /* removing all entries */
- tp->tp_nid == nid) /* matched this one */
- {
- cfs_list_del (&tp->tp_list);
- cfs_list_add (&tp->tp_list, &cull);
- }
- }
+ if (tp->tp_threshold == 0 || /* needs culling anyway */
+ nid == LNET_NID_ANY || /* removing all entries */
+ tp->tp_nid == nid) { /* matched this one */
+ list_del(&tp->tp_list);
+ list_add(&tp->tp_list, &cull);
+ }
+ }
lnet_net_unlock(0);
- while (!cfs_list_empty (&cull)) {
- tp = cfs_list_entry (cull.next, lnet_test_peer_t, tp_list);
+ while (!list_empty(&cull)) {
+ tp = list_entry(cull.next, lnet_test_peer_t, tp_list);
- cfs_list_del (&tp->tp_list);
- LIBCFS_FREE(tp, sizeof (*tp));
- }
- return 0;
+ list_del(&tp->tp_list);
+ LIBCFS_FREE(tp, sizeof(*tp));
+ }
+ return 0;
}
static int
fail_peer (lnet_nid_t nid, int outgoing)
{
- lnet_test_peer_t *tp;
- cfs_list_t *el;
- cfs_list_t *next;
- cfs_list_t cull;
- int fail = 0;
+ lnet_test_peer_t *tp;
+ struct list_head *el;
+ struct list_head *next;
+ struct list_head cull;
+ int fail = 0;
- CFS_INIT_LIST_HEAD (&cull);
+ INIT_LIST_HEAD(&cull);
/* NB: use lnet_net_lock(0) to serialize operations on test peers */
lnet_net_lock(0);
- cfs_list_for_each_safe (el, next, &the_lnet.ln_test_peers) {
- tp = cfs_list_entry (el, lnet_test_peer_t, tp_list);
-
- if (tp->tp_threshold == 0) {
- /* zombie entry */
- if (outgoing) {
- /* only cull zombies on outgoing tests,
- * since we may be at interrupt priority on
- * incoming messages. */
- cfs_list_del (&tp->tp_list);
- cfs_list_add (&tp->tp_list, &cull);
- }
- continue;
- }
+ list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
+ tp = list_entry(el, lnet_test_peer_t, tp_list);
+
+ if (tp->tp_threshold == 0) {
+ /* zombie entry */
+ if (outgoing) {
+ /* only cull zombies on outgoing tests,
+ * since we may be at interrupt priority on
+ * incoming messages. */
+ list_del(&tp->tp_list);
+ list_add(&tp->tp_list, &cull);
+ }
+ continue;
+ }
- if (tp->tp_nid == LNET_NID_ANY || /* fail every peer */
- nid == tp->tp_nid) { /* fail this peer */
- fail = 1;
-
- if (tp->tp_threshold != LNET_MD_THRESH_INF) {
- tp->tp_threshold--;
- if (outgoing &&
- tp->tp_threshold == 0) {
- /* see above */
- cfs_list_del (&tp->tp_list);
- cfs_list_add (&tp->tp_list, &cull);
- }
- }
- break;
- }
- }
+ if (tp->tp_nid == LNET_NID_ANY || /* fail every peer */
+ nid == tp->tp_nid) { /* fail this peer */
+ fail = 1;
+
+ if (tp->tp_threshold != LNET_MD_THRESH_INF) {
+ tp->tp_threshold--;
+ if (outgoing &&
+ tp->tp_threshold == 0) {
+ /* see above */
+ list_del(&tp->tp_list);
+ list_add(&tp->tp_list, &cull);
+ }
+ }
+ break;
+ }
+ }
lnet_net_unlock(0);
- while (!cfs_list_empty (&cull)) {
- tp = cfs_list_entry (cull.next, lnet_test_peer_t, tp_list);
- cfs_list_del (&tp->tp_list);
+ while (!list_empty(&cull)) {
+ tp = list_entry(cull.next, lnet_test_peer_t, tp_list);
+ list_del(&tp->tp_list);
- LIBCFS_FREE(tp, sizeof (*tp));
- }
+ LIBCFS_FREE(tp, sizeof(*tp));
+ }
- return (fail);
+ return fail;
}
unsigned int
int alive;
cfs_time_t deadline;
- LASSERT (lnet_peer_aliveness_enabled(lp));
-
/* Trust lnet_notify() if it has more recent aliveness news, but
* ignore the initial assumed death (see lnet_peers_start_down()).
*/
int
lnet_peer_alive_locked (lnet_peer_t *lp)
{
- cfs_time_t now = cfs_time_current();
+ cfs_time_t now = cfs_time_current();
+ bool query;
- if (!lnet_peer_aliveness_enabled(lp))
- return -ENODEV;
+ if (!lnet_peer_aliveness_enabled(lp))
+ return -ENODEV;
- if (lnet_peer_is_alive(lp, now))
- return 1;
-
- /* Peer appears dead, but we should avoid frequent NI queries (at
- * most once per lnet_queryinterval seconds). */
- if (lp->lp_last_query != 0) {
- static const int lnet_queryinterval = 1;
-
- cfs_time_t next_query =
- cfs_time_add(lp->lp_last_query,
- cfs_time_seconds(lnet_queryinterval));
-
- if (cfs_time_before(now, next_query)) {
- if (lp->lp_alive)
- CWARN("Unexpected aliveness of peer %s: "
- "%d < %d (%d/%d)\n",
- libcfs_nid2str(lp->lp_nid),
- (int)now, (int)next_query,
- lnet_queryinterval,
- lp->lp_ni->ni_peertimeout);
- return 0;
- }
+ if (lp->lp_last_query == 0) {
+ query = true;
+ } else {
+ /* Peer appears dead, but we should avoid frequent NI queries
+ * (at most once per ni_query_interval seconds). */
+ static const int ni_query_interval = 1;
+ cfs_time_t next_query;
+
+ next_query = cfs_time_add(lp->lp_last_query,
+ cfs_time_seconds(ni_query_interval));
+ query = cfs_time_aftereq(now, next_query);
}
/* query NI for latest aliveness news */
- lnet_ni_query_locked(lp->lp_ni, lp);
+ if (query)
+ lnet_ni_query_locked(lp->lp_ni, lp);
if (lnet_peer_is_alive(lp, now))
return 1;
* lnet_send() is going to lnet_net_unlock immediately after this, so
* it sets do_send FALSE and I don't do the unlock/send/lock bit.
*
- * \retval 0 If \a msg sent or OK to send.
- * \retval EAGAIN If \a msg blocked for credit.
- * \retval EHOSTUNREACH If the next hop of the message appears dead.
- * \retval ECANCELED If the MD of the message has been unlinked.
+ * \retval LNET_CREDIT_OK If \a msg sent or OK to send.
+ * \retval LNET_CREDIT_WAIT If \a msg blocked for credit.
+ * \retval -EHOSTUNREACH If the next hop of the message appears dead.
+ * \retval -ECANCELED If the MD of the message has been unlinked.
*/
static int
lnet_post_send_locked(lnet_msg_t *msg, int do_send)
/* NB 'lp' is always the next hop */
if ((msg->msg_target.pid & LNET_PID_USERFLAG) == 0 &&
- lnet_peer_alive_locked(lp) == 0) {
+ lnet_peer_alive_locked(lp) == 0 &&
+ !lnet_msg_is_rc_ping(msg)) { /* send RC ping even for dead router */
the_lnet.ln_counters[cpt]->drop_count++;
the_lnet.ln_counters[cpt]->drop_length += msg->msg_len;
lnet_net_unlock(cpt);
lnet_finalize(ni, msg, -EHOSTUNREACH);
lnet_net_lock(cpt);
- return EHOSTUNREACH;
+ return -EHOSTUNREACH;
}
if (msg->msg_md != NULL &&
lnet_finalize(ni, msg, -ECANCELED);
lnet_net_lock(cpt);
- return ECANCELED;
+ return -ECANCELED;
}
- if (!msg->msg_peertxcredit) {
- LASSERT ((lp->lp_txcredits < 0) ==
- !cfs_list_empty(&lp->lp_txq));
+ if (!msg->msg_peertxcredit) {
+ LASSERT((lp->lp_txcredits < 0) ==
+ !list_empty(&lp->lp_txq));
- msg->msg_peertxcredit = 1;
- lp->lp_txqnob += msg->msg_len + sizeof(lnet_hdr_t);
- lp->lp_txcredits--;
+ msg->msg_peertxcredit = 1;
+ lp->lp_txqnob += msg->msg_len + sizeof(lnet_hdr_t);
+ lp->lp_txcredits--;
- if (lp->lp_txcredits < lp->lp_mintxcredits)
- lp->lp_mintxcredits = lp->lp_txcredits;
+ if (lp->lp_txcredits < lp->lp_mintxcredits)
+ lp->lp_mintxcredits = lp->lp_txcredits;
- if (lp->lp_txcredits < 0) {
+ if (lp->lp_txcredits < 0) {
msg->msg_tx_delayed = 1;
- cfs_list_add_tail(&msg->msg_list, &lp->lp_txq);
- return EAGAIN;
- }
- }
+ list_add_tail(&msg->msg_list, &lp->lp_txq);
+ return LNET_CREDIT_WAIT;
+ }
+ }
- if (!msg->msg_txcredit) {
+ if (!msg->msg_txcredit) {
LASSERT((tq->tq_credits < 0) ==
- !cfs_list_empty(&tq->tq_delayed));
+ !list_empty(&tq->tq_delayed));
msg->msg_txcredit = 1;
tq->tq_credits--;
if (tq->tq_credits < 0) {
msg->msg_tx_delayed = 1;
- cfs_list_add_tail(&msg->msg_list, &tq->tq_delayed);
- return EAGAIN;
+ list_add_tail(&msg->msg_list, &tq->tq_delayed);
+ return LNET_CREDIT_WAIT;
}
}
lnet_ni_send(ni, msg);
lnet_net_lock(cpt);
}
- return 0;
+ return LNET_CREDIT_OK;
}
#ifdef __KERNEL__
lnet_post_routed_recv_locked (lnet_msg_t *msg, int do_recv)
{
/* lnet_parse is going to lnet_net_unlock immediately after this, so it
- * sets do_recv FALSE and I don't do the unlock/send/lock bit. I
- * return EAGAIN if msg blocked and 0 if received or OK to receive */
+ * sets do_recv FALSE and I don't do the unlock/send/lock bit.
+ * I return LNET_CREDIT_WAIT if msg blocked and LNET_CREDIT_OK if
+ * received or OK to receive */
lnet_peer_t *lp = msg->msg_rxpeer;
lnet_rtrbufpool_t *rbp;
lnet_rtrbuf_t *rb;
/* non-lnet_parse callers only receive delayed messages */
LASSERT(!do_recv || msg->msg_rx_delayed);
- if (!msg->msg_peerrtrcredit) {
- LASSERT ((lp->lp_rtrcredits < 0) ==
- !cfs_list_empty(&lp->lp_rtrq));
+ if (!msg->msg_peerrtrcredit) {
+ LASSERT((lp->lp_rtrcredits < 0) ==
+ !list_empty(&lp->lp_rtrq));
msg->msg_peerrtrcredit = 1;
lp->lp_rtrcredits--;
if (lp->lp_rtrcredits < lp->lp_minrtrcredits)
lp->lp_minrtrcredits = lp->lp_rtrcredits;
- if (lp->lp_rtrcredits < 0) {
- /* must have checked eager_recv before here */
+ if (lp->lp_rtrcredits < 0) {
+ /* must have checked eager_recv before here */
LASSERT(msg->msg_rx_ready_delay);
msg->msg_rx_delayed = 1;
- cfs_list_add_tail(&msg->msg_list, &lp->lp_rtrq);
- return EAGAIN;
- }
- }
-
- rbp = lnet_msg2bufpool(msg);
+ list_add_tail(&msg->msg_list, &lp->lp_rtrq);
+ return LNET_CREDIT_WAIT;
+ }
+ }
- if (!msg->msg_rtrcredit) {
- LASSERT ((rbp->rbp_credits < 0) ==
- !cfs_list_empty(&rbp->rbp_msgs));
+ rbp = lnet_msg2bufpool(msg);
- msg->msg_rtrcredit = 1;
- rbp->rbp_credits--;
- if (rbp->rbp_credits < rbp->rbp_mincredits)
- rbp->rbp_mincredits = rbp->rbp_credits;
+ if (!msg->msg_rtrcredit) {
+ msg->msg_rtrcredit = 1;
+ rbp->rbp_credits--;
+ if (rbp->rbp_credits < rbp->rbp_mincredits)
+ rbp->rbp_mincredits = rbp->rbp_credits;
- if (rbp->rbp_credits < 0) {
- /* must have checked eager_recv before here */
+ if (rbp->rbp_credits < 0) {
+ /* must have checked eager_recv before here */
LASSERT(msg->msg_rx_ready_delay);
msg->msg_rx_delayed = 1;
- cfs_list_add_tail(&msg->msg_list, &rbp->rbp_msgs);
- return EAGAIN;
- }
- }
+ list_add_tail(&msg->msg_list, &rbp->rbp_msgs);
+ return LNET_CREDIT_WAIT;
+ }
+ }
- LASSERT (!cfs_list_empty(&rbp->rbp_bufs));
- rb = cfs_list_entry(rbp->rbp_bufs.next, lnet_rtrbuf_t, rb_list);
- cfs_list_del(&rb->rb_list);
+ LASSERT(!list_empty(&rbp->rbp_bufs));
+ rb = list_entry(rbp->rbp_bufs.next, lnet_rtrbuf_t, rb_list);
+ list_del(&rb->rb_list);
msg->msg_niov = rbp->rbp_npages;
msg->msg_kiov = &rb->rb_kiov[0];
0, msg->msg_len, msg->msg_len);
lnet_net_lock(cpt);
}
- return 0;
+ return LNET_CREDIT_OK;
}
#endif
msg->msg_txcredit = 0;
LASSERT((tq->tq_credits < 0) ==
- !cfs_list_empty(&tq->tq_delayed));
+ !list_empty(&tq->tq_delayed));
tq->tq_credits++;
if (tq->tq_credits <= 0) {
- msg2 = cfs_list_entry(tq->tq_delayed.next,
- lnet_msg_t, msg_list);
- cfs_list_del(&msg2->msg_list);
+ msg2 = list_entry(tq->tq_delayed.next,
+ lnet_msg_t, msg_list);
+ list_del(&msg2->msg_list);
LASSERT(msg2->msg_txpeer->lp_ni == ni);
LASSERT(msg2->msg_tx_delayed);
msg->msg_peertxcredit = 0;
LASSERT((txpeer->lp_txcredits < 0) ==
- !cfs_list_empty(&txpeer->lp_txq));
+ !list_empty(&txpeer->lp_txq));
txpeer->lp_txqnob -= msg->msg_len + sizeof(lnet_hdr_t);
LASSERT (txpeer->lp_txqnob >= 0);
txpeer->lp_txcredits++;
if (txpeer->lp_txcredits <= 0) {
- msg2 = cfs_list_entry(txpeer->lp_txq.next,
+ msg2 = list_entry(txpeer->lp_txq.next,
lnet_msg_t, msg_list);
- cfs_list_del(&msg2->msg_list);
+ list_del(&msg2->msg_list);
LASSERT(msg2->msg_txpeer == txpeer);
LASSERT(msg2->msg_tx_delayed);
}
}
+#ifdef __KERNEL__
+void
+lnet_schedule_blocked_locked(lnet_rtrbufpool_t *rbp)
+{
+ lnet_msg_t *msg;
+
+ if (list_empty(&rbp->rbp_msgs))
+ return;
+ msg = list_entry(rbp->rbp_msgs.next,
+ lnet_msg_t, msg_list);
+ list_del(&msg->msg_list);
+
+ (void)lnet_post_routed_recv_locked(msg, 1);
+}
+#endif
+
+void
+lnet_drop_routed_msgs_locked(struct list_head *list, int cpt)
+{
+ lnet_msg_t *msg;
+ lnet_msg_t *tmp;
+ struct list_head drop;
+
+ INIT_LIST_HEAD(&drop);
+
+ list_splice_init(list, &drop);
+
+ lnet_net_unlock(cpt);
+
+ list_for_each_entry_safe(msg, tmp, &drop, msg_list) {
+ lnet_ni_recv(msg->msg_rxpeer->lp_ni, msg->msg_private, NULL,
+ 0, 0, 0, msg->msg_hdr.payload_length);
+ list_del_init(&msg->msg_list);
+ lnet_finalize(NULL, msg, -ECANCELED);
+ }
+
+ lnet_net_lock(cpt);
+}
+
void
lnet_return_rx_credits_locked(lnet_msg_t *msg)
{
#ifdef __KERNEL__
lnet_msg_t *msg2;
- if (msg->msg_rtrcredit) {
- /* give back global router credits */
- lnet_rtrbuf_t *rb;
- lnet_rtrbufpool_t *rbp;
+ if (msg->msg_rtrcredit) {
+ /* give back global router credits */
+ lnet_rtrbuf_t *rb;
+ lnet_rtrbufpool_t *rbp;
- /* NB If a msg ever blocks for a buffer in rbp_msgs, it stays
- * there until it gets one allocated, or aborts the wait
- * itself */
- LASSERT (msg->msg_kiov != NULL);
+ /* NB If a msg ever blocks for a buffer in rbp_msgs, it stays
+ * there until it gets one allocated, or aborts the wait
+ * itself */
+ LASSERT(msg->msg_kiov != NULL);
- rb = cfs_list_entry(msg->msg_kiov, lnet_rtrbuf_t, rb_kiov[0]);
- rbp = rb->rb_pool;
- LASSERT (rbp == lnet_msg2bufpool(msg));
+ rb = list_entry(msg->msg_kiov, lnet_rtrbuf_t, rb_kiov[0]);
+ rbp = rb->rb_pool;
- msg->msg_kiov = NULL;
- msg->msg_rtrcredit = 0;
+ msg->msg_kiov = NULL;
+ msg->msg_rtrcredit = 0;
- LASSERT((rbp->rbp_credits < 0) ==
- !cfs_list_empty(&rbp->rbp_msgs));
- LASSERT((rbp->rbp_credits > 0) ==
- !cfs_list_empty(&rbp->rbp_bufs));
+ LASSERT(rbp == lnet_msg2bufpool(msg));
- cfs_list_add(&rb->rb_list, &rbp->rbp_bufs);
- rbp->rbp_credits++;
- if (rbp->rbp_credits <= 0) {
- msg2 = cfs_list_entry(rbp->rbp_msgs.next,
- lnet_msg_t, msg_list);
- cfs_list_del(&msg2->msg_list);
+ LASSERT((rbp->rbp_credits > 0) ==
+ !list_empty(&rbp->rbp_bufs));
- (void) lnet_post_routed_recv_locked(msg2, 1);
- }
- }
+ /* If routing is now turned off, we just drop this buffer and
+ * don't bother trying to return credits. */
+ if (!the_lnet.ln_routing) {
+ lnet_destroy_rtrbuf(rb, rbp->rbp_npages);
+ goto routing_off;
+ }
+
+ /* It is possible that a user has lowered the desired number of
+ * buffers in this pool. Make sure we never put back
+ * more buffers than the stated number. */
+ if (rbp->rbp_credits >= rbp->rbp_nbuffers) {
+ /* Discard this buffer so we don't have too many. */
+ lnet_destroy_rtrbuf(rb, rbp->rbp_npages);
+ } else {
+ list_add(&rb->rb_list, &rbp->rbp_bufs);
+ rbp->rbp_credits++;
+ if (rbp->rbp_credits <= 0)
+ lnet_schedule_blocked_locked(rbp);
+ }
+ }
- if (msg->msg_peerrtrcredit) {
- /* give back peer router credits */
- msg->msg_peerrtrcredit = 0;
+routing_off:
+ if (msg->msg_peerrtrcredit) {
+ /* give back peer router credits */
+ msg->msg_peerrtrcredit = 0;
- LASSERT((rxpeer->lp_rtrcredits < 0) ==
- !cfs_list_empty(&rxpeer->lp_rtrq));
+ LASSERT((rxpeer->lp_rtrcredits < 0) ==
+ !list_empty(&rxpeer->lp_rtrq));
- rxpeer->lp_rtrcredits++;
- if (rxpeer->lp_rtrcredits <= 0) {
- msg2 = cfs_list_entry(rxpeer->lp_rtrq.next,
- lnet_msg_t, msg_list);
- cfs_list_del(&msg2->msg_list);
+ rxpeer->lp_rtrcredits++;
- (void) lnet_post_routed_recv_locked(msg2, 1);
- }
- }
+ /* drop all messages which are queued to be routed on that
+ * peer. */
+ if (!the_lnet.ln_routing) {
+ lnet_drop_routed_msgs_locked(&rxpeer->lp_rtrq,
+ msg->msg_rx_cpt);
+ } else if (rxpeer->lp_rtrcredits <= 0) {
+ msg2 = list_entry(rxpeer->lp_rtrq.next,
+ lnet_msg_t, msg_list);
+ list_del(&msg2->msg_list);
+
+ (void) lnet_post_routed_recv_locked(msg2, 1);
+ }
+ }
#else
- LASSERT (!msg->msg_rtrcredit);
- LASSERT (!msg->msg_peerrtrcredit);
+ LASSERT(!msg->msg_rtrcredit);
+ LASSERT(!msg->msg_peerrtrcredit);
#endif
- if (rxpeer != NULL) {
- msg->msg_rxpeer = NULL;
- lnet_peer_decref_locked(rxpeer);
- }
+ if (rxpeer != NULL) {
+ msg->msg_rxpeer = NULL;
+ lnet_peer_decref_locked(rxpeer);
+ }
}
static int
lnet_peer_t *p1 = r1->lr_gateway;
lnet_peer_t *p2 = r2->lr_gateway;
+ if (p1->lp_ni->ni_peertimeout > 0 &&
+ p2->lp_ni->ni_peertimeout > 0) {
+ /* if a router has queued bytes but no aliveness update for
+ * the last 10 seconds, it could be potentially dead or
+ * congested, so we prefer not to choose it even its status
+ * is still alive.
+ */
+ int router_slow = cfs_time_seconds(10);
+ bool r1_slow;
+ bool r2_slow;
+ cfs_time_t now = cfs_time_current();
+
+ r1_slow = p1->lp_txqnob != 0 &&
+ cfs_time_aftereq(now, p1->lp_last_alive + router_slow);
+ r2_slow = p2->lp_txqnob != 0 &&
+ cfs_time_aftereq(now, p2->lp_last_alive + router_slow);
+
+ if (!r1_slow && r2_slow)
+ return 1;
+
+ if (r1_slow && !r2_slow)
+ return -1;
+ }
+
if (r1->lr_priority < r2->lr_priority)
return 1;
lp_best = NULL;
best_route = last_route = NULL;
- cfs_list_for_each_entry(route, &rnet->lrn_routes, lr_list) {
+ list_for_each_entry(route, &rnet->lrn_routes, lr_list) {
lp = route->lr_gateway;
if (!lnet_is_route_alive(route))
msg->msg_target_is_router = 1;
msg->msg_target.nid = lp->lp_nid;
- msg->msg_target.pid = LUSTRE_SRV_LNET_PID;
+ msg->msg_target.pid = LNET_PID_LUSTRE;
}
/* 'lp' is our best choice of peer */
rc = lnet_post_send_locked(msg, 0);
lnet_net_unlock(cpt);
- if (rc == EHOSTUNREACH || rc == ECANCELED)
- return -rc;
+ if (rc < 0)
+ return rc;
- if (rc == 0)
+ if (rc == LNET_CREDIT_OK)
lnet_ni_send(src_ni, msg);
- return 0; /* rc == 0 or EAGAIN */
+ return 0; /* rc == LNET_CREDIT_OK or LNET_CREDIT_WAIT */
}
-static void
+void
lnet_drop_message(lnet_ni_t *ni, int cpt, void *private, unsigned int nob)
{
lnet_net_lock(cpt);
return 0;
}
-static int
+/**
+ * \retval LNET_CREDIT_OK If \a msg is forwarded
+ * \retval LNET_CREDIT_WAIT If \a msg is blocked because w/o buffer
+ * \retval -ve error code
+ */
+int
lnet_parse_forward_locked(lnet_ni_t *ni, lnet_msg_t *msg)
{
int rc = 0;
#ifdef __KERNEL__
+ if (!the_lnet.ln_routing)
+ return -ECANCELED;
+
if (msg->msg_rxpeer->lp_rtrcredits <= 0 ||
lnet_msg2bufpool(msg)->rbp_credits <= 0) {
if (ni->ni_lnd->lnd_eager_recv == NULL) {
return rc;
}
+int
+lnet_parse_local(lnet_ni_t *ni, lnet_msg_t *msg)
+{
+ int rc;
+
+ switch (msg->msg_type) {
+ case LNET_MSG_ACK:
+ rc = lnet_parse_ack(ni, msg);
+ break;
+ case LNET_MSG_PUT:
+ rc = lnet_parse_put(ni, msg);
+ break;
+ case LNET_MSG_GET:
+ rc = lnet_parse_get(ni, msg, msg->msg_rdma_get);
+ break;
+ case LNET_MSG_REPLY:
+ rc = lnet_parse_reply(ni, msg);
+ break;
+ default: /* prevent an unused label if !kernel */
+ LASSERT(0);
+ return -EPROTO;
+ }
+
+ LASSERT(rc == 0 || rc == ENOENT);
+ return rc;
+}
+
char *
lnet_msgtyp2str (int type)
{
lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid,
void *private, int rdma_req)
{
- int rc = 0;
- int cpt;
- int for_me;
struct lnet_msg *msg;
- lnet_pid_t dest_pid;
- lnet_nid_t dest_nid;
- lnet_nid_t src_nid;
- __u32 payload_length;
- __u32 type;
-
- LASSERT (!in_interrupt ());
+ lnet_peer_t *rxpeer;
+ lnet_pid_t dest_pid;
+ lnet_nid_t dest_nid;
+ lnet_nid_t src_nid;
+ __u32 payload_length;
+ __u32 type;
+ int for_me;
+ int cpt;
+ int rc = 0;
+
+ LASSERT(!in_interrupt());
type = le32_to_cpu(hdr->type);
src_nid = le64_to_cpu(hdr->src_nid);
dest_nid = le64_to_cpu(hdr->dest_nid);
dest_pid = le32_to_cpu(hdr->dest_pid);
- payload_length = le32_to_cpu(hdr->payload_length);
+ payload_length = le32_to_cpu(hdr->payload_length);
- for_me = (ni->ni_nid == dest_nid);
+ for_me = (ni->ni_nid == dest_nid);
cpt = lnet_cpt_of_nid(from_nid);
- switch (type) {
- case LNET_MSG_ACK:
- case LNET_MSG_GET:
- if (payload_length > 0) {
- CERROR("%s, src %s: bad %s payload %d (0 expected)\n",
- libcfs_nid2str(from_nid),
- libcfs_nid2str(src_nid),
- lnet_msgtyp2str(type), payload_length);
- return -EPROTO;
- }
- break;
-
- case LNET_MSG_PUT:
- case LNET_MSG_REPLY:
- if (payload_length > (__u32)(for_me ? LNET_MAX_PAYLOAD : LNET_MTU)) {
- CERROR("%s, src %s: bad %s payload %d "
- "(%d max expected)\n",
- libcfs_nid2str(from_nid),
- libcfs_nid2str(src_nid),
- lnet_msgtyp2str(type),
- payload_length,
- for_me ? LNET_MAX_PAYLOAD : LNET_MTU);
- return -EPROTO;
- }
- break;
+ switch (type) {
+ case LNET_MSG_ACK:
+ case LNET_MSG_GET:
+ if (payload_length > 0) {
+ CERROR("%s, src %s: bad %s payload %d (0 expected)\n",
+ libcfs_nid2str(from_nid),
+ libcfs_nid2str(src_nid),
+ lnet_msgtyp2str(type), payload_length);
+ return -EPROTO;
+ }
+ break;
+
+ case LNET_MSG_PUT:
+ case LNET_MSG_REPLY:
+ if (payload_length >
+ (__u32)(for_me ? LNET_MAX_PAYLOAD : LNET_MTU)) {
+ CERROR("%s, src %s: bad %s payload %d "
+ "(%d max expected)\n",
+ libcfs_nid2str(from_nid),
+ libcfs_nid2str(src_nid),
+ lnet_msgtyp2str(type),
+ payload_length,
+ for_me ? LNET_MAX_PAYLOAD : LNET_MTU);
+ return -EPROTO;
+ }
+ break;
- default:
- CERROR("%s, src %s: Bad message type 0x%x\n",
- libcfs_nid2str(from_nid),
- libcfs_nid2str(src_nid), type);
- return -EPROTO;
- }
+ default:
+ CERROR("%s, src %s: Bad message type 0x%x\n",
+ libcfs_nid2str(from_nid),
+ libcfs_nid2str(src_nid), type);
+ return -EPROTO;
+ }
if (the_lnet.ln_routing &&
ni->ni_last_alive != cfs_time_current_sec()) {
- lnet_ni_lock(ni);
-
/* NB: so far here is the only place to set NI status to "up */
+ lnet_ni_lock(ni);
ni->ni_last_alive = cfs_time_current_sec();
if (ni->ni_status != NULL &&
ni->ni_status->ns_status == LNET_NI_STATUS_DOWN)
ni->ni_status->ns_status = LNET_NI_STATUS_UP;
lnet_ni_unlock(ni);
- }
+ }
- /* Regard a bad destination NID as a protocol error. Senders should
- * know what they're doing; if they don't they're misconfigured, buggy
- * or malicious so we chop them off at the knees :) */
-
- if (!for_me) {
- if (LNET_NIDNET(dest_nid) == LNET_NIDNET(ni->ni_nid)) {
- /* should have gone direct */
- CERROR ("%s, src %s: Bad dest nid %s "
- "(should have been sent direct)\n",
- libcfs_nid2str(from_nid),
- libcfs_nid2str(src_nid),
- libcfs_nid2str(dest_nid));
- return -EPROTO;
- }
+ /* Regard a bad destination NID as a protocol error. Senders should
+ * know what they're doing; if they don't they're misconfigured, buggy
+ * or malicious so we chop them off at the knees :) */
- if (lnet_islocalnid(dest_nid)) {
- /* dest is another local NI; sender should have used
- * this node's NID on its own network */
- CERROR ("%s, src %s: Bad dest nid %s "
- "(it's my nid but on a different network)\n",
- libcfs_nid2str(from_nid),
- libcfs_nid2str(src_nid),
- libcfs_nid2str(dest_nid));
- return -EPROTO;
- }
+ if (!for_me) {
+ if (LNET_NIDNET(dest_nid) == LNET_NIDNET(ni->ni_nid)) {
+ /* should have gone direct */
+ CERROR("%s, src %s: Bad dest nid %s "
+ "(should have been sent direct)\n",
+ libcfs_nid2str(from_nid),
+ libcfs_nid2str(src_nid),
+ libcfs_nid2str(dest_nid));
+ return -EPROTO;
+ }
- if (rdma_req && type == LNET_MSG_GET) {
- CERROR ("%s, src %s: Bad optimized GET for %s "
- "(final destination must be me)\n",
- libcfs_nid2str(from_nid),
- libcfs_nid2str(src_nid),
- libcfs_nid2str(dest_nid));
- return -EPROTO;
- }
+ if (lnet_islocalnid(dest_nid)) {
+ /* dest is another local NI; sender should have used
+ * this node's NID on its own network */
+ CERROR("%s, src %s: Bad dest nid %s "
+ "(it's my nid but on a different network)\n",
+ libcfs_nid2str(from_nid),
+ libcfs_nid2str(src_nid),
+ libcfs_nid2str(dest_nid));
+ return -EPROTO;
+ }
- if (!the_lnet.ln_routing) {
- CERROR ("%s, src %s: Dropping message for %s "
- "(routing not enabled)\n",
- libcfs_nid2str(from_nid),
- libcfs_nid2str(src_nid),
- libcfs_nid2str(dest_nid));
- goto drop;
- }
- }
+ if (rdma_req && type == LNET_MSG_GET) {
+ CERROR("%s, src %s: Bad optimized GET for %s "
+ "(final destination must be me)\n",
+ libcfs_nid2str(from_nid),
+ libcfs_nid2str(src_nid),
+ libcfs_nid2str(dest_nid));
+ return -EPROTO;
+ }
- /* Message looks OK; we're not going to return an error, so we MUST
- * call back lnd_recv() come what may... */
+ if (!the_lnet.ln_routing) {
+ CERROR("%s, src %s: Dropping message for %s "
+ "(routing not enabled)\n",
+ libcfs_nid2str(from_nid),
+ libcfs_nid2str(src_nid),
+ libcfs_nid2str(dest_nid));
+ goto drop;
+ }
+ }
- if (!cfs_list_empty (&the_lnet.ln_test_peers) && /* normally we don't */
- fail_peer (src_nid, 0)) /* shall we now? */
- {
- CERROR("%s, src %s: Dropping %s to simulate failure\n",
- libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
- lnet_msgtyp2str(type));
- goto drop;
- }
+ /* Message looks OK; we're not going to return an error, so we MUST
+ * call back lnd_recv() come what may... */
- msg = lnet_msg_alloc();
- if (msg == NULL) {
- CERROR("%s, src %s: Dropping %s (out of memory)\n",
- libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
- lnet_msgtyp2str(type));
- goto drop;
- }
+ if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */
+ fail_peer(src_nid, 0)) { /* shall we now? */
+ CERROR("%s, src %s: Dropping %s to simulate failure\n",
+ libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
+ lnet_msgtyp2str(type));
+ goto drop;
+ }
+
+ if (!list_empty(&the_lnet.ln_drop_rules) &&
+ lnet_drop_rule_match(hdr)) {
+ CDEBUG(D_NET, "%s, src %s, dst %s: Dropping %s to simulate"
+ "silent message loss\n",
+ libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
+ libcfs_nid2str(dest_nid), lnet_msgtyp2str(type));
+ goto drop;
+ }
- /* msg zeroed in lnet_msg_alloc; i.e. flags all clear, pointers NULL etc */
- msg->msg_type = type;
- msg->msg_private = private;
- msg->msg_receiving = 1;
- msg->msg_len = msg->msg_wanted = payload_length;
- msg->msg_offset = 0;
- msg->msg_hdr = *hdr;
+ msg = lnet_msg_alloc();
+ if (msg == NULL) {
+ CERROR("%s, src %s: Dropping %s (out of memory)\n",
+ libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
+ lnet_msgtyp2str(type));
+ goto drop;
+ }
+
+ /* msg zeroed in lnet_msg_alloc; i.e. flags all clear,
+ * pointers NULL etc */
+
+ msg->msg_type = type;
+ msg->msg_private = private;
+ msg->msg_receiving = 1;
+ msg->msg_rdma_get = rdma_req;
+ msg->msg_len = msg->msg_wanted = payload_length;
+ msg->msg_offset = 0;
+ msg->msg_hdr = *hdr;
/* for building message event */
msg->msg_from = from_nid;
if (!for_me) {
goto drop;
}
+ if (lnet_isrouter(msg->msg_rxpeer)) {
+ lnet_peer_set_alive(msg->msg_rxpeer);
+ if (avoid_asym_router_failure &&
+ LNET_NIDNET(src_nid) != LNET_NIDNET(from_nid)) {
+ /* received a remote message from router, update
+ * remote NI status on this router.
+ * NB: multi-hop routed message will be ignored.
+ */
+ lnet_router_ni_update_locked(msg->msg_rxpeer,
+ LNET_NIDNET(src_nid));
+ }
+ }
+
lnet_msg_commit(msg, cpt);
+ /* LND just notified me for incoming message from rxpeer, so assume
+ * it is alive */
+ rxpeer = msg->msg_rxpeer;
+ rxpeer->lp_last_alive = rxpeer->lp_last_query = cfs_time_current();
+ if (!rxpeer->lp_alive)
+ lnet_notify_locked(rxpeer, 0, 1, rxpeer->lp_last_alive);
+
+ if (lnet_isrouter(msg->msg_rxpeer) &&
+ LNET_NIDNET(src_nid) != LNET_NIDNET(from_nid)) {
+ lnet_router_ni_update_locked(msg->msg_rxpeer,
+ LNET_NIDNET(src_nid));
+ }
+
+ /* message delay simulation */
+ if (unlikely(!list_empty(&the_lnet.ln_delay_rules) &&
+ lnet_delay_rule_match_locked(hdr, msg))) {
+ lnet_net_unlock(cpt);
+ return 0;
+ }
if (!for_me) {
rc = lnet_parse_forward_locked(ni, msg);
if (rc < 0)
goto free_drop;
- if (rc == 0) {
+
+ if (rc == LNET_CREDIT_OK) {
lnet_ni_recv(ni, msg->msg_private, msg, 0,
0, payload_length, payload_length);
}
lnet_net_unlock(cpt);
- switch (type) {
- case LNET_MSG_ACK:
- rc = lnet_parse_ack(ni, msg);
- break;
- case LNET_MSG_PUT:
- rc = lnet_parse_put(ni, msg);
- break;
- case LNET_MSG_GET:
- rc = lnet_parse_get(ni, msg, rdma_req);
- break;
- case LNET_MSG_REPLY:
- rc = lnet_parse_reply(ni, msg);
- break;
- default:
- LASSERT(0);
- rc = -EPROTO;
- goto free_drop; /* prevent an unused label if !kernel */
- }
-
- if (rc == 0)
- return 0;
-
- LASSERT (rc == ENOENT);
+ rc = lnet_parse_local(ni, msg);
+ if (rc != 0)
+ goto free_drop;
+ return 0;
free_drop:
LASSERT(msg->msg_md == NULL);
EXPORT_SYMBOL(lnet_parse);
void
-lnet_drop_delayed_msg_list(cfs_list_t *head, char *reason)
+lnet_drop_delayed_msg_list(struct list_head *head, char *reason)
{
- while (!cfs_list_empty(head)) {
+ while (!list_empty(head)) {
lnet_process_id_t id = {0};
lnet_msg_t *msg;
- msg = cfs_list_entry(head->next, lnet_msg_t, msg_list);
- cfs_list_del(&msg->msg_list);
+ msg = list_entry(head->next, lnet_msg_t, msg_list);
+ list_del(&msg->msg_list);
id.nid = msg->msg_hdr.src_nid;
id.pid = msg->msg_hdr.src_pid;
}
void
-lnet_recv_delayed_msg_list(cfs_list_t *head)
+lnet_recv_delayed_msg_list(struct list_head *head)
{
- while (!cfs_list_empty(head)) {
+ while (!list_empty(head)) {
lnet_msg_t *msg;
lnet_process_id_t id;
- msg = cfs_list_entry(head->next, lnet_msg_t, msg_list);
- cfs_list_del(&msg->msg_list);
+ msg = list_entry(head->next, lnet_msg_t, msg_list);
+ list_del(&msg->msg_list);
/* md won't disappear under me, since each msg
* holds a ref on it */
int cpt;
int rc;
- LASSERT (the_lnet.ln_init);
- LASSERT (the_lnet.ln_refcount > 0);
+ LASSERT(the_lnet.ln_init);
+ LASSERT(the_lnet.ln_refcount > 0);
- if (!cfs_list_empty (&the_lnet.ln_test_peers) && /* normally we don't */
- fail_peer (target.nid, 1)) /* shall we now? */
- {
+ if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */
+ fail_peer(target.nid, 1)) { /* shall we now? */
CERROR("Dropping PUT to %s: simulated failure\n",
libcfs_id2str(target));
return -EIO;
LASSERT (the_lnet.ln_init);
LASSERT (the_lnet.ln_refcount > 0);
- if (!cfs_list_empty (&the_lnet.ln_test_peers) && /* normally we don't */
- fail_peer (target.nid, 1)) /* shall we now? */
+ if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */
+ fail_peer(target.nid, 1)) /* shall we now? */
{
CERROR("Dropping GET to %s: simulated failure\n",
libcfs_id2str(target));
int
LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp)
{
- cfs_list_t *e;
+ struct list_head *e;
struct lnet_ni *ni;
lnet_remotenet_t *rnet;
__u32 dstnet = LNET_NIDNET(dstnid);
int hops;
int cpt;
__u32 order = 2;
- cfs_list_t *rn_list;
+ struct list_head *rn_list;
/* if !local_nid_dist_zero, I don't return a distance of 0 ever
* (when lustre sees a distance of 0, it substitutes 0@lo), so I
cpt = lnet_net_lock_current();
- cfs_list_for_each (e, &the_lnet.ln_nis) {
- ni = cfs_list_entry(e, lnet_ni_t, ni_list);
+ list_for_each(e, &the_lnet.ln_nis) {
+ ni = list_entry(e, lnet_ni_t, ni_list);
if (ni->ni_nid == dstnid) {
if (srcnidp != NULL)
}
rn_list = lnet_net2rnethash(dstnet);
- cfs_list_for_each(e, rn_list) {
- rnet = cfs_list_entry(e, lnet_remotenet_t, lrn_list);
+ list_for_each(e, rn_list) {
+ rnet = list_entry(e, lnet_remotenet_t, lrn_list);
- if (rnet->lrn_net == dstnet) {
- lnet_route_t *route;
- lnet_route_t *shortest = NULL;
+ if (rnet->lrn_net == dstnet) {
+ lnet_route_t *route;
+ lnet_route_t *shortest = NULL;
- LASSERT (!cfs_list_empty(&rnet->lrn_routes));
+ LASSERT(!list_empty(&rnet->lrn_routes));
- cfs_list_for_each_entry(route, &rnet->lrn_routes,
- lr_list) {
- if (shortest == NULL ||
- route->lr_hops < shortest->lr_hops)
- shortest = route;
- }
+ list_for_each_entry(route, &rnet->lrn_routes,
+ lr_list) {
+ if (shortest == NULL ||
+ route->lr_hops < shortest->lr_hops)
+ shortest = route;
+ }
LASSERT (shortest != NULL);
hops = shortest->lr_hops;
#else
lnet_ni_t *ni;
lnet_remotenet_t *rnet;
- cfs_list_t *tmp;
+ struct list_head *tmp;
lnet_route_t *route;
lnet_nid_t *nids;
int nnids;
cpt = lnet_net_lock_current();
rnet = lnet_find_net_locked(LNET_NIDNET(id.nid));
if (rnet != NULL) {
- cfs_list_for_each(tmp, &rnet->lrn_routes) {
+ list_for_each(tmp, &rnet->lrn_routes) {
if (nnids == maxnids) {
lnet_net_unlock(cpt);
LIBCFS_FREE(nids, maxnids * sizeof(*nids));
goto again;
}
- route = cfs_list_entry(tmp, lnet_route_t, lr_list);
- nids[nnids++] = route->lr_gateway->lp_nid;
- }
- }
+ route = list_entry(tmp, lnet_route_t, lr_list);
+ nids[nnids++] = route->lr_gateway->lp_nid;
+ }
+ }
lnet_net_unlock(cpt);
/* set async on all the routers */
while (nnids-- > 0) {
- id.pid = LUSTRE_SRV_LNET_PID;
+ id.pid = LNET_PID_LUSTRE;
id.nid = nids[nnids];
ni = lnet_net2ni(LNET_NIDNET(id.nid));