X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lnet%2Flnet%2Flib-move.c;h=5987b1b730545bb888eaf2d38e83708bc9509941;hp=0ef94aa337a59ef831c5f8ff418c3ec5c2dd3a4e;hb=cc27201a76574b51dc3ffb37f039b3364cab386d;hpb=8e498d3f23ea9bcbef524153c6613f93a6229431 diff --git a/lnet/lnet/lib-move.c b/lnet/lnet/lib-move.c index 0ef94aa..5987b1b 100644 --- a/lnet/lnet/lib-move.c +++ b/lnet/lnet/lib-move.c @@ -63,6 +63,36 @@ struct lnet_send_data { __u32 sd_send_case; }; +static inline bool +lnet_msg_is_response(struct lnet_msg *msg) +{ + return msg->msg_type == LNET_MSG_ACK || msg->msg_type == LNET_MSG_REPLY; +} + +static inline bool +lnet_response_tracking_enabled(__u32 msg_type, unsigned int md_options) +{ + if (md_options & LNET_MD_NO_TRACK_RESPONSE) + /* Explicitly disabled in MD options */ + return false; + + if (md_options & LNET_MD_TRACK_RESPONSE) + /* Explicity enabled in MD options */ + return true; + + if (lnet_response_tracking == 3) + /* Enabled for all message types */ + return true; + + if (msg_type == LNET_MSG_PUT) + return lnet_response_tracking == 2; + + if (msg_type == LNET_MSG_GET) + return lnet_response_tracking == 1; + + return false; +} + static inline struct lnet_comm_count * get_stats_counts(struct lnet_element_stats *stats, enum lnet_stats_type stats_type) @@ -165,7 +195,7 @@ lnet_fail_nid(lnet_nid_t nid, unsigned int threshold) struct lnet_test_peer *tp; struct list_head *el; struct list_head *next; - struct list_head cull; + LIST_HEAD(cull); /* NB: use lnet_net_lock(0) to serialize operations on test peers */ if (threshold != 0) { @@ -183,9 +213,6 @@ lnet_fail_nid(lnet_nid_t nid, unsigned int threshold) return 0; } - /* removing entries */ - INIT_LIST_HEAD(&cull); - lnet_net_lock(0); list_for_each_safe(el, next, &the_lnet.ln_test_peers) { @@ -194,8 +221,7 @@ lnet_fail_nid(lnet_nid_t nid, unsigned int threshold) if (tp->tp_threshold == 0 || /* needs culling anyway */ nid == LNET_NID_ANY || /* removing all entries */ tp->tp_nid == nid) { /* matched this one */ - list_del(&tp->tp_list); - list_add(&tp->tp_list, &cull); + list_move(&tp->tp_list, &cull); } } @@ -216,10 +242,8 @@ fail_peer (lnet_nid_t nid, int outgoing) struct lnet_test_peer *tp; struct list_head *el; struct list_head *next; - struct list_head cull; - int fail = 0; - - INIT_LIST_HEAD(&cull); + LIST_HEAD(cull); + int fail = 0; /* NB: use lnet_net_lock(0) to serialize operations on test peers */ lnet_net_lock(0); @@ -233,8 +257,7 @@ fail_peer (lnet_nid_t nid, int outgoing) /* only cull zombies on outgoing tests, * since we may be at interrupt priority on * incoming messages. */ - list_del(&tp->tp_list); - list_add(&tp->tp_list, &cull); + list_move(&tp->tp_list, &cull); } continue; } @@ -248,8 +271,7 @@ fail_peer (lnet_nid_t nid, int outgoing) if (outgoing && tp->tp_threshold == 0) { /* see above */ - list_del(&tp->tp_list); - list_add(&tp->tp_list, &cull); + list_move(&tp->tp_list, &cull); } } break; @@ -287,7 +309,7 @@ lnet_copy_iov2iov(unsigned int ndiov, struct kvec *diov, unsigned int doffset, unsigned int nob) { /* NB diov, siov are READ-ONLY */ - unsigned int this_nob; + unsigned int this_nob; if (nob == 0) return; @@ -313,9 +335,9 @@ lnet_copy_iov2iov(unsigned int ndiov, struct kvec *diov, unsigned int doffset, do { LASSERT(ndiov > 0); LASSERT(nsiov > 0); - this_nob = MIN(diov->iov_len - doffset, - siov->iov_len - soffset); - this_nob = MIN(this_nob, nob); + this_nob = min3((unsigned int)diov->iov_len - doffset, + (unsigned int)siov->iov_len - soffset, + nob); memcpy((char *)diov->iov_base + doffset, (char *)siov->iov_base + soffset, this_nob); @@ -340,70 +362,24 @@ lnet_copy_iov2iov(unsigned int ndiov, struct kvec *diov, unsigned int doffset, } EXPORT_SYMBOL(lnet_copy_iov2iov); -int -lnet_extract_iov(int dst_niov, struct kvec *dst, - int src_niov, struct kvec *src, - unsigned int offset, unsigned int len) -{ - /* Initialise 'dst' to the subset of 'src' starting at 'offset', - * for exactly 'len' bytes, and return the number of entries. - * NB not destructive to 'src' */ - unsigned int frag_len; - unsigned int niov; - - if (len == 0) /* no data => */ - return (0); /* no frags */ - - LASSERT(src_niov > 0); - while (offset >= src->iov_len) { /* skip initial frags */ - offset -= src->iov_len; - src_niov--; - src++; - LASSERT(src_niov > 0); - } - - niov = 1; - for (;;) { - LASSERT(src_niov > 0); - LASSERT((int)niov <= dst_niov); - - frag_len = src->iov_len - offset; - dst->iov_base = ((char *)src->iov_base) + offset; - - if (len <= frag_len) { - dst->iov_len = len; - return (niov); - } - - dst->iov_len = frag_len; - - len -= frag_len; - dst++; - src++; - niov++; - src_niov--; - offset = 0; - } -} -EXPORT_SYMBOL(lnet_extract_iov); - - unsigned int -lnet_kiov_nob(unsigned int niov, lnet_kiov_t *kiov) +lnet_kiov_nob(unsigned int niov, struct bio_vec *kiov) { unsigned int nob = 0; LASSERT(niov == 0 || kiov != NULL); while (niov-- > 0) - nob += (kiov++)->kiov_len; + nob += (kiov++)->bv_len; return (nob); } EXPORT_SYMBOL(lnet_kiov_nob); void -lnet_copy_kiov2kiov(unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset, - unsigned int nsiov, lnet_kiov_t *siov, unsigned int soffset, +lnet_copy_kiov2kiov(unsigned int ndiov, struct bio_vec *diov, + unsigned int doffset, + unsigned int nsiov, struct bio_vec *siov, + unsigned int soffset, unsigned int nob) { /* NB diov, siov are READ-ONLY */ @@ -417,16 +393,16 @@ lnet_copy_kiov2kiov(unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset, LASSERT (!in_interrupt ()); LASSERT (ndiov > 0); - while (doffset >= diov->kiov_len) { - doffset -= diov->kiov_len; + while (doffset >= diov->bv_len) { + doffset -= diov->bv_len; diov++; ndiov--; LASSERT(ndiov > 0); } LASSERT(nsiov > 0); - while (soffset >= siov->kiov_len) { - soffset -= siov->kiov_len; + while (soffset >= siov->bv_len) { + soffset -= siov->bv_len; siov++; nsiov--; LASSERT(nsiov > 0); @@ -435,16 +411,16 @@ lnet_copy_kiov2kiov(unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset, do { LASSERT(ndiov > 0); LASSERT(nsiov > 0); - this_nob = MIN(diov->kiov_len - doffset, - siov->kiov_len - soffset); - this_nob = MIN(this_nob, nob); + this_nob = min3(diov->bv_len - doffset, + siov->bv_len - soffset, + nob); if (daddr == NULL) - daddr = ((char *)kmap(diov->kiov_page)) + - diov->kiov_offset + doffset; + daddr = ((char *)kmap(diov->bv_page)) + + diov->bv_offset + doffset; if (saddr == NULL) - saddr = ((char *)kmap(siov->kiov_page)) + - siov->kiov_offset + soffset; + saddr = ((char *)kmap(siov->bv_page)) + + siov->bv_offset + soffset; /* Vanishing risk of kmap deadlock when mapping 2 pages. * However in practice at least one of the kiovs will be mapped @@ -453,22 +429,22 @@ lnet_copy_kiov2kiov(unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset, memcpy (daddr, saddr, this_nob); nob -= this_nob; - if (diov->kiov_len > doffset + this_nob) { + if (diov->bv_len > doffset + this_nob) { daddr += this_nob; doffset += this_nob; } else { - kunmap(diov->kiov_page); + kunmap(diov->bv_page); daddr = NULL; diov++; ndiov--; doffset = 0; } - if (siov->kiov_len > soffset + this_nob) { + if (siov->bv_len > soffset + this_nob) { saddr += this_nob; soffset += this_nob; } else { - kunmap(siov->kiov_page); + kunmap(siov->bv_page); saddr = NULL; siov++; nsiov--; @@ -477,15 +453,16 @@ lnet_copy_kiov2kiov(unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset, } while (nob > 0); if (daddr != NULL) - kunmap(diov->kiov_page); + kunmap(diov->bv_page); if (saddr != NULL) - kunmap(siov->kiov_page); + kunmap(siov->bv_page); } EXPORT_SYMBOL(lnet_copy_kiov2kiov); void lnet_copy_kiov2iov (unsigned int niov, struct kvec *iov, unsigned int iovoffset, - unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffset, + unsigned int nkiov, struct bio_vec *kiov, + unsigned int kiovoffset, unsigned int nob) { /* NB iov, kiov are READ-ONLY */ @@ -506,8 +483,8 @@ lnet_copy_kiov2iov (unsigned int niov, struct kvec *iov, unsigned int iovoffset, } LASSERT(nkiov > 0); - while (kiovoffset >= kiov->kiov_len) { - kiovoffset -= kiov->kiov_len; + while (kiovoffset >= kiov->bv_len) { + kiovoffset -= kiov->bv_len; kiov++; nkiov--; LASSERT(nkiov > 0); @@ -516,13 +493,13 @@ lnet_copy_kiov2iov (unsigned int niov, struct kvec *iov, unsigned int iovoffset, do { LASSERT(niov > 0); LASSERT(nkiov > 0); - this_nob = MIN(iov->iov_len - iovoffset, - kiov->kiov_len - kiovoffset); - this_nob = MIN(this_nob, nob); + this_nob = min3((unsigned int)iov->iov_len - iovoffset, + (unsigned int)kiov->bv_len - kiovoffset, + nob); if (addr == NULL) - addr = ((char *)kmap(kiov->kiov_page)) + - kiov->kiov_offset + kiovoffset; + addr = ((char *)kmap(kiov->bv_page)) + + kiov->bv_offset + kiovoffset; memcpy((char *)iov->iov_base + iovoffset, addr, this_nob); nob -= this_nob; @@ -535,11 +512,11 @@ lnet_copy_kiov2iov (unsigned int niov, struct kvec *iov, unsigned int iovoffset, iovoffset = 0; } - if (kiov->kiov_len > kiovoffset + this_nob) { + if (kiov->bv_len > kiovoffset + this_nob) { addr += this_nob; kiovoffset += this_nob; } else { - kunmap(kiov->kiov_page); + kunmap(kiov->bv_page); addr = NULL; kiov++; nkiov--; @@ -549,12 +526,13 @@ lnet_copy_kiov2iov (unsigned int niov, struct kvec *iov, unsigned int iovoffset, } while (nob > 0); if (addr != NULL) - kunmap(kiov->kiov_page); + kunmap(kiov->bv_page); } EXPORT_SYMBOL(lnet_copy_kiov2iov); void -lnet_copy_iov2kiov(unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffset, +lnet_copy_iov2kiov(unsigned int nkiov, struct bio_vec *kiov, + unsigned int kiovoffset, unsigned int niov, struct kvec *iov, unsigned int iovoffset, unsigned int nob) { @@ -568,8 +546,8 @@ lnet_copy_iov2kiov(unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffse LASSERT (!in_interrupt ()); LASSERT (nkiov > 0); - while (kiovoffset >= kiov->kiov_len) { - kiovoffset -= kiov->kiov_len; + while (kiovoffset >= kiov->bv_len) { + kiovoffset -= kiov->bv_len; kiov++; nkiov--; LASSERT(nkiov > 0); @@ -586,22 +564,22 @@ lnet_copy_iov2kiov(unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffse do { LASSERT(nkiov > 0); LASSERT(niov > 0); - this_nob = MIN(kiov->kiov_len - kiovoffset, - iov->iov_len - iovoffset); - this_nob = MIN(this_nob, nob); + this_nob = min3((unsigned int)kiov->bv_len - kiovoffset, + (unsigned int)iov->iov_len - iovoffset, + nob); if (addr == NULL) - addr = ((char *)kmap(kiov->kiov_page)) + - kiov->kiov_offset + kiovoffset; + addr = ((char *)kmap(kiov->bv_page)) + + kiov->bv_offset + kiovoffset; memcpy (addr, (char *)iov->iov_base + iovoffset, this_nob); nob -= this_nob; - if (kiov->kiov_len > kiovoffset + this_nob) { + if (kiov->bv_len > kiovoffset + this_nob) { addr += this_nob; kiovoffset += this_nob; } else { - kunmap(kiov->kiov_page); + kunmap(kiov->bv_page); addr = NULL; kiov++; nkiov--; @@ -618,13 +596,13 @@ lnet_copy_iov2kiov(unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffse } while (nob > 0); if (addr != NULL) - kunmap(kiov->kiov_page); + kunmap(kiov->bv_page); } EXPORT_SYMBOL(lnet_copy_iov2kiov); int -lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst, - int src_niov, lnet_kiov_t *src, +lnet_extract_kiov(int dst_niov, struct bio_vec *dst, + int src_niov, struct bio_vec *src, unsigned int offset, unsigned int len) { /* Initialise 'dst' to the subset of 'src' starting at 'offset', @@ -637,8 +615,8 @@ lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst, return (0); /* no frags */ LASSERT(src_niov > 0); - while (offset >= src->kiov_len) { /* skip initial frags */ - offset -= src->kiov_len; + while (offset >= src->bv_len) { /* skip initial frags */ + offset -= src->bv_len; src_niov--; src++; LASSERT(src_niov > 0); @@ -649,18 +627,18 @@ lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst, LASSERT(src_niov > 0); LASSERT((int)niov <= dst_niov); - frag_len = src->kiov_len - offset; - dst->kiov_page = src->kiov_page; - dst->kiov_offset = src->kiov_offset + offset; + frag_len = src->bv_len - offset; + dst->bv_page = src->bv_page; + dst->bv_offset = src->bv_offset + offset; if (len <= frag_len) { - dst->kiov_len = len; - LASSERT(dst->kiov_offset + dst->kiov_len <= PAGE_SIZE); + dst->bv_len = len; + LASSERT(dst->bv_offset + dst->bv_len <= PAGE_SIZE); return niov; } - dst->kiov_len = frag_len; - LASSERT(dst->kiov_offset + dst->kiov_len <= PAGE_SIZE); + dst->bv_len = frag_len; + LASSERT(dst->bv_offset + dst->bv_len <= PAGE_SIZE); len -= frag_len; dst++; @@ -677,10 +655,10 @@ lnet_ni_recv(struct lnet_ni *ni, void *private, struct lnet_msg *msg, int delayed, unsigned int offset, unsigned int mlen, unsigned int rlen) { - unsigned int niov = 0; + unsigned int niov = 0; struct kvec *iov = NULL; - lnet_kiov_t *kiov = NULL; - int rc; + struct bio_vec *kiov = NULL; + int rc; LASSERT (!in_interrupt ()); LASSERT (mlen == 0 || msg != NULL); @@ -697,7 +675,6 @@ lnet_ni_recv(struct lnet_ni *ni, void *private, struct lnet_msg *msg, if (mlen != 0) { niov = msg->msg_niov; - iov = msg->msg_iov; kiov = msg->msg_kiov; LASSERT (niov > 0); @@ -706,7 +683,7 @@ lnet_ni_recv(struct lnet_ni *ni, void *private, struct lnet_msg *msg, } rc = (ni->ni_net->net_lnd->lnd_recv)(ni, private, msg, delayed, - niov, iov, kiov, offset, mlen, + niov, kiov, offset, mlen, rlen); if (rc < 0) lnet_finalize(msg, rc); @@ -721,14 +698,10 @@ lnet_setpayloadbuffer(struct lnet_msg *msg) LASSERT(!msg->msg_routing); LASSERT(md != NULL); LASSERT(msg->msg_niov == 0); - LASSERT(msg->msg_iov == NULL); LASSERT(msg->msg_kiov == NULL); msg->msg_niov = md->md_niov; - if ((md->md_options & LNET_MD_KIOV) != 0) - msg->msg_kiov = md->md_iov.kiov; - else - msg->msg_iov = md->md_iov.iov; + msg->msg_kiov = md->md_kiov; } void @@ -756,12 +729,12 @@ lnet_prep_send(struct lnet_msg *msg, int type, struct lnet_process_id target, static void lnet_ni_send(struct lnet_ni *ni, struct lnet_msg *msg) { - void *priv = msg->msg_private; + void *priv = msg->msg_private; int rc; - LASSERT (!in_interrupt ()); - LASSERT (LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND || - (msg->msg_txcredit && msg->msg_peertxcredit)); + LASSERT(!in_interrupt()); + LASSERT(ni->ni_nid == LNET_NID_LO_0 || + (msg->msg_txcredit && msg->msg_peertxcredit)); rc = (ni->ni_net->net_lnd->lnd_send)(ni, priv, msg); if (rc < 0) { @@ -794,12 +767,32 @@ lnet_ni_eager_recv(struct lnet_ni *ni, struct lnet_msg *msg) return rc; } +static bool +lnet_is_peer_deadline_passed(struct lnet_peer_ni *lpni, time64_t now) +{ + time64_t deadline; + + deadline = lpni->lpni_last_alive + + lpni->lpni_net->net_tunables.lct_peer_timeout; + + /* + * assume peer_ni is alive as long as we're within the configured + * peer timeout + */ + if (deadline > now) + return false; + + return true; +} + /* NB: returns 1 when alive, 0 when dead, negative when error; * may drop the lnet_net_lock */ static int lnet_peer_alive_locked(struct lnet_ni *ni, struct lnet_peer_ni *lpni, struct lnet_msg *msg) { + time64_t now = ktime_get_seconds(); + if (!lnet_peer_aliveness_enabled(lpni)) return -ENODEV; @@ -815,10 +808,12 @@ lnet_peer_alive_locked(struct lnet_ni *ni, struct lnet_peer_ni *lpni, return 1; /* always send any responses */ - if (msg->msg_type == LNET_MSG_ACK || - msg->msg_type == LNET_MSG_REPLY) + if (lnet_msg_is_response(msg)) return 1; + if (!lnet_is_peer_deadline_passed(lpni, now)) + return true; + return lnet_is_peer_ni_alive(lpni); } @@ -845,8 +840,10 @@ lnet_post_send_locked(struct lnet_msg *msg, int do_send) LASSERT(!do_send || msg->msg_tx_delayed); LASSERT(!msg->msg_receiving); LASSERT(msg->msg_tx_committed); + /* can't get here if we're sending to the loopback interface */ - LASSERT(lp->lpni_nid != the_lnet.ln_loni->ni_nid); + if (the_lnet.ln_loni) + LASSERT(lp->lpni_nid != the_lnet.ln_loni->ni_nid); /* NB 'lp' is always the next hop */ if ((msg->msg_target.pid & LNET_PID_USERFLAG) == 0 && @@ -975,7 +972,6 @@ lnet_post_routed_recv_locked(struct lnet_msg *msg, int do_recv) struct lnet_rtrbufpool *rbp; struct lnet_rtrbuf *rb; - LASSERT(msg->msg_iov == NULL); LASSERT(msg->msg_kiov == NULL); LASSERT(msg->msg_niov == 0); LASSERT(msg->msg_routing); @@ -992,8 +988,6 @@ lnet_post_routed_recv_locked(struct lnet_msg *msg, int do_recv) if (!msg->msg_peerrtrcredit) { /* lpni_lock protects the credit manipulation */ spin_lock(&lpni->lpni_lock); - /* lp_lock protects the lp_rtrq */ - spin_lock(&lp->lp_lock); msg->msg_peerrtrcredit = 1; lpni->lpni_rtrcredits--; @@ -1001,15 +995,16 @@ lnet_post_routed_recv_locked(struct lnet_msg *msg, int do_recv) lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits; if (lpni->lpni_rtrcredits < 0) { + spin_unlock(&lpni->lpni_lock); /* must have checked eager_recv before here */ LASSERT(msg->msg_rx_ready_delay); msg->msg_rx_delayed = 1; + /* lp_lock protects the lp_rtrq */ + spin_lock(&lp->lp_lock); list_add_tail(&msg->msg_list, &lp->lp_rtrq); spin_unlock(&lp->lp_lock); - spin_unlock(&lpni->lpni_lock); return LNET_CREDIT_WAIT; } - spin_unlock(&lp->lp_lock); spin_unlock(&lpni->lpni_lock); } @@ -1236,24 +1231,22 @@ routing_off: LASSERT(rxpeerni->lpni_peer_net); LASSERT(rxpeerni->lpni_peer_net->lpn_peer); - lp = rxpeerni->lpni_peer_net->lpn_peer; - /* give back peer router credits */ msg->msg_peerrtrcredit = 0; spin_lock(&rxpeerni->lpni_lock); - spin_lock(&lp->lp_lock); - rxpeerni->lpni_rtrcredits++; + spin_unlock(&rxpeerni->lpni_lock); + + lp = rxpeerni->lpni_peer_net->lpn_peer; + spin_lock(&lp->lp_lock); /* drop all messages which are queued to be routed on that * peer. */ if (!the_lnet.ln_routing) { - struct list_head drop; - INIT_LIST_HEAD(&drop); + LIST_HEAD(drop); list_splice_init(&lp->lp_rtrq, &drop); spin_unlock(&lp->lp_lock); - spin_unlock(&rxpeerni->lpni_lock); lnet_drop_routed_msgs_locked(&drop, msg->msg_rx_cpt); } else if (!list_empty(&lp->lp_rtrq)) { int msg2_cpt; @@ -1263,7 +1256,6 @@ routing_off: list_del(&msg2->msg_list); msg2_cpt = msg2->msg_rx_cpt; spin_unlock(&lp->lp_lock); - spin_unlock(&rxpeerni->lpni_lock); /* * messages on the lp_rtrq can be from any NID in * the peer, which means they might have different @@ -1281,7 +1273,6 @@ routing_off: } } else { spin_unlock(&lp->lp_lock); - spin_unlock(&rxpeerni->lpni_lock); } } if (rxni != NULL) { @@ -1294,36 +1285,188 @@ routing_off: } } -#if 0 +static struct lnet_peer_ni * +lnet_select_peer_ni(struct lnet_ni *best_ni, lnet_nid_t dst_nid, + struct lnet_peer *peer, + struct lnet_peer_ni *best_lpni, + struct lnet_peer_net *peer_net) +{ + /* + * Look at the peer NIs for the destination peer that connect + * to the chosen net. If a peer_ni is preferred when using the + * best_ni to communicate, we use that one. If there is no + * preferred peer_ni, or there are multiple preferred peer_ni, + * the available transmit credits are used. If the transmit + * credits are equal, we round-robin over the peer_ni. + */ + struct lnet_peer_ni *lpni = NULL; + int best_lpni_credits = (best_lpni) ? best_lpni->lpni_txcredits : + INT_MIN; + int best_lpni_healthv = (best_lpni) ? + atomic_read(&best_lpni->lpni_healthv) : 0; + bool best_lpni_is_preferred = false; + bool lpni_is_preferred; + int lpni_healthv; + __u32 lpni_sel_prio; + __u32 best_sel_prio = LNET_MAX_SELECTION_PRIORITY; + + while ((lpni = lnet_get_next_peer_ni_locked(peer, peer_net, lpni))) { + /* + * if the best_ni we've chosen aleady has this lpni + * preferred, then let's use it + */ + if (best_ni) { + lpni_is_preferred = lnet_peer_is_pref_nid_locked(lpni, + best_ni->ni_nid); + CDEBUG(D_NET, "%s lpni_is_preferred = %d\n", + libcfs_nid2str(best_ni->ni_nid), + lpni_is_preferred); + } else { + lpni_is_preferred = false; + } + + lpni_healthv = atomic_read(&lpni->lpni_healthv); + lpni_sel_prio = lpni->lpni_sel_priority; + + if (best_lpni) + CDEBUG(D_NET, "n:[%s, %s] h:[%d, %d] p:[%d, %d] c:[%d, %d] s:[%d, %d]\n", + libcfs_nid2str(lpni->lpni_nid), + libcfs_nid2str(best_lpni->lpni_nid), + lpni_healthv, best_lpni_healthv, + lpni_sel_prio, best_sel_prio, + lpni->lpni_txcredits, best_lpni_credits, + lpni->lpni_seq, best_lpni->lpni_seq); + else + goto select_lpni; + + /* pick the healthiest peer ni */ + if (lpni_healthv < best_lpni_healthv) + continue; + else if (lpni_healthv > best_lpni_healthv) { + if (best_lpni_is_preferred) + best_lpni_is_preferred = false; + goto select_lpni; + } + + if (lpni_sel_prio > best_sel_prio) + continue; + else if (lpni_sel_prio < best_sel_prio) { + if (best_lpni_is_preferred) + best_lpni_is_preferred = false; + goto select_lpni; + } + + /* if this is a preferred peer use it */ + if (!best_lpni_is_preferred && lpni_is_preferred) { + best_lpni_is_preferred = true; + goto select_lpni; + } else if (best_lpni_is_preferred && !lpni_is_preferred) { + /* this is not the preferred peer so let's ignore + * it. + */ + continue; + } + + if (lpni->lpni_txcredits < best_lpni_credits) + /* We already have a peer that has more credits + * available than this one. No need to consider + * this peer further. + */ + continue; + else if (lpni->lpni_txcredits > best_lpni_credits) + goto select_lpni; + + /* The best peer found so far and the current peer + * have the same number of available credits let's + * make sure to select between them using Round Robin + */ + if (best_lpni && (best_lpni->lpni_seq <= lpni->lpni_seq)) + continue; +select_lpni: + best_lpni_is_preferred = lpni_is_preferred; + best_lpni_healthv = lpni_healthv; + best_sel_prio = lpni_sel_prio; + best_lpni = lpni; + best_lpni_credits = lpni->lpni_txcredits; + } + + /* if we still can't find a peer ni then we can't reach it */ + if (!best_lpni) { + __u32 net_id = (peer_net) ? peer_net->lpn_net_id : + LNET_NIDNET(dst_nid); + CDEBUG(D_NET, "no peer_ni found on peer net %s\n", + libcfs_net2str(net_id)); + return NULL; + } + + CDEBUG(D_NET, "sd_best_lpni = %s\n", + libcfs_nid2str(best_lpni->lpni_nid)); + + return best_lpni; +} + +/* + * Prerequisite: the best_ni should already be set in the sd + * Find the best lpni. + * If the net id is provided then restrict lpni selection on + * that particular net. + * Otherwise find any reachable lpni. When dealing with an MR + * gateway and it has multiple lpnis which we can use + * we want to select the best one from the list of reachable + * ones. + */ +static inline struct lnet_peer_ni * +lnet_find_best_lpni(struct lnet_ni *lni, lnet_nid_t dst_nid, + struct lnet_peer *peer, __u32 net_id) +{ + struct lnet_peer_net *peer_net; + + /* find the best_lpni on any local network */ + if (net_id == LNET_NET_ANY) { + struct lnet_peer_ni *best_lpni = NULL; + struct lnet_peer_net *lpn; + list_for_each_entry(lpn, &peer->lp_peer_nets, lpn_peer_nets) { + /* no net specified find any reachable peer ni */ + if (!lnet_islocalnet_locked(lpn->lpn_net_id)) + continue; + best_lpni = lnet_select_peer_ni(lni, dst_nid, peer, + best_lpni, lpn); + } + + return best_lpni; + } + /* restrict on the specified net */ + peer_net = lnet_peer_get_net_locked(peer, net_id); + if (peer_net) + return lnet_select_peer_ni(lni, dst_nid, peer, NULL, peer_net); + + return NULL; +} + static int -lnet_compare_peers(struct lnet_peer_ni *p1, struct lnet_peer_ni *p2) +lnet_compare_gw_lpnis(struct lnet_peer_ni *lpni1, struct lnet_peer_ni *lpni2) { - if (p1->lpni_txqnob < p2->lpni_txqnob) + if (lpni1->lpni_txqnob < lpni2->lpni_txqnob) return 1; - if (p1->lpni_txqnob > p2->lpni_txqnob) + if (lpni1->lpni_txqnob > lpni2->lpni_txqnob) return -1; - if (p1->lpni_txcredits > p2->lpni_txcredits) + if (lpni1->lpni_txcredits > lpni2->lpni_txcredits) return 1; - if (p1->lpni_txcredits < p2->lpni_txcredits) + if (lpni1->lpni_txcredits < lpni2->lpni_txcredits) return -1; return 0; } -#endif +/* Compare route priorities and hop counts */ static int lnet_compare_routes(struct lnet_route *r1, struct lnet_route *r2) { - /* TODO re-implement gateway comparison - struct lnet_peer_ni *p1 = r1->lr_gateway; - struct lnet_peer_ni *p2 = r2->lr_gateway; - */ int r1_hops = (r1->lr_hops == LNET_UNDEFINED_HOPS) ? 1 : r1->lr_hops; int r2_hops = (r2->lr_hops == LNET_UNDEFINED_HOPS) ? 1 : r2->lr_hops; - /*int rc;*/ if (r1->lr_priority < r2->lr_priority) return 1; @@ -1337,65 +1480,122 @@ lnet_compare_routes(struct lnet_route *r1, struct lnet_route *r2) if (r1_hops > r2_hops) return -1; - /* - rc = lnet_compare_peers(p1, p2); - if (rc) - return rc; - */ - - if (r1->lr_seq - r2->lr_seq <= 0) - return 1; - - return -1; + return 0; } -/* TODO: lnet_find_route_locked() needs to be reimplemented */ static struct lnet_route * -lnet_find_route_locked(struct lnet_net *net, __u32 remote_net, - lnet_nid_t rtr_nid, struct lnet_route **prev_route) +lnet_find_route_locked(struct lnet_remotenet *rnet, __u32 src_net, + struct lnet_peer_ni *remote_lpni, + struct lnet_route **prev_route, + struct lnet_peer_ni **gwni) { - struct lnet_remotenet *rnet; - struct lnet_route *route; + struct lnet_peer_ni *lpni, *best_gw_ni = NULL; struct lnet_route *best_route; struct lnet_route *last_route; - struct lnet_peer *lp_best; - struct lnet_peer *lp; + struct lnet_route *route; int rc; + bool best_rte_is_preferred = false; + lnet_nid_t gw_pnid; - /* If @rtr_nid is not LNET_NID_ANY, return the gateway with - * rtr_nid nid, otherwise find the best gateway I can use */ - - rnet = lnet_find_rnet_locked(remote_net); - if (rnet == NULL) - return NULL; + CDEBUG(D_NET, "Looking up a route to %s, from %s\n", + libcfs_net2str(rnet->lrn_net), libcfs_net2str(src_net)); - lp_best = NULL; best_route = last_route = NULL; list_for_each_entry(route, &rnet->lrn_routes, lr_list) { - lp = route->lr_gateway; - if (!lnet_is_route_alive(route)) continue; + gw_pnid = route->lr_gateway->lp_primary_nid; + + /* no protection on below fields, but it's harmless */ + if (last_route && (last_route->lr_seq - route->lr_seq < 0)) + last_route = route; + + /* if the best route found is in the preferred list then + * tag it as preferred and use it later on. But if we + * didn't find any routes which are on the preferred list + * then just use the best route possible. + */ + rc = lnet_peer_is_pref_rtr_locked(remote_lpni, gw_pnid); + + if (!best_route || (rc && !best_rte_is_preferred)) { + /* Restrict the selection of the router NI on the + * src_net provided. If the src_net is LNET_NID_ANY, + * then select the best interface available. + */ + lpni = lnet_find_best_lpni(NULL, LNET_NID_ANY, + route->lr_gateway, + src_net); + if (!lpni) { + CDEBUG(D_NET, + "Gateway %s does not have a peer NI on net %s\n", + libcfs_nid2str(gw_pnid), + libcfs_net2str(src_net)); + continue; + } + } + + if (rc && !best_rte_is_preferred) { + /* This is the first preferred route we found, + * so it beats any route found previously + */ + best_route = route; + if (!last_route) + last_route = route; + best_gw_ni = lpni; + best_rte_is_preferred = true; + CDEBUG(D_NET, "preferred gw = %s\n", + libcfs_nid2str(gw_pnid)); + continue; + } else if ((!rc) && best_rte_is_preferred) + /* The best route we found so far is in the preferred + * list, so it beats any non-preferred route + */ + continue; - if (lp_best == NULL) { + if (!best_route) { best_route = last_route = route; - lp_best = lp; + best_gw_ni = lpni; continue; } - /* no protection on below fields, but it's harmless */ - if (last_route->lr_seq - route->lr_seq < 0) - last_route = route; - rc = lnet_compare_routes(route, best_route); - if (rc < 0) + if (rc == -1) + continue; + + /* Restrict the selection of the router NI on the + * src_net provided. If the src_net is LNET_NID_ANY, + * then select the best interface available. + */ + lpni = lnet_find_best_lpni(NULL, LNET_NID_ANY, + route->lr_gateway, + src_net); + if (!lpni) { + CDEBUG(D_NET, + "Gateway %s does not have a peer NI on net %s\n", + libcfs_nid2str(gw_pnid), + libcfs_net2str(src_net)); + continue; + } + + if (rc == 1) { + best_route = route; + best_gw_ni = lpni; + continue; + } + + rc = lnet_compare_gw_lpnis(lpni, best_gw_ni); + if (rc == -1) continue; - best_route = route; - lp_best = lp; + if (rc == 1 || route->lr_seq <= best_route->lr_seq) { + best_route = route; + best_gw_ni = lpni; + continue; + } } *prev_route = last_route; + *gwni = best_gw_ni; return best_route; } @@ -1409,6 +1609,7 @@ lnet_get_best_ni(struct lnet_net *local_net, struct lnet_ni *best_ni, unsigned int shortest_distance; int best_credits; int best_healthv; + __u32 best_sel_prio; /* * If there is no peer_ni that we can send to on this network, @@ -1418,6 +1619,7 @@ lnet_get_best_ni(struct lnet_net *local_net, struct lnet_ni *best_ni, return best_ni; if (best_ni == NULL) { + best_sel_prio = LNET_MAX_SELECTION_PRIORITY; shortest_distance = UINT_MAX; best_credits = INT_MIN; best_healthv = 0; @@ -1426,6 +1628,7 @@ lnet_get_best_ni(struct lnet_net *local_net, struct lnet_ni *best_ni, best_ni->ni_dev_cpt); best_credits = atomic_read(&best_ni->ni_tx_credits); best_healthv = atomic_read(&best_ni->ni_healthv); + best_sel_prio = best_ni->ni_sel_priority; } while ((ni = lnet_get_next_ni_locked(local_net, ni))) { @@ -1433,10 +1636,12 @@ lnet_get_best_ni(struct lnet_net *local_net, struct lnet_ni *best_ni, int ni_credits; int ni_healthv; int ni_fatal; + __u32 ni_sel_prio; ni_credits = atomic_read(&ni->ni_tx_credits); ni_healthv = atomic_read(&ni->ni_healthv); ni_fatal = atomic_read(&ni->ni_fatal_error_on); + ni_sel_prio = ni->ni_sel_priority; /* * calculate the distance from the CPT on which @@ -1447,12 +1652,6 @@ lnet_get_best_ni(struct lnet_net *local_net, struct lnet_ni *best_ni, md_cpt, ni->ni_dev_cpt); - CDEBUG(D_NET, "compare ni %s [c:%d, d:%d, s:%d] with best_ni %s [c:%d, d:%d, s:%d]\n", - libcfs_nid2str(ni->ni_nid), ni_credits, distance, - ni->ni_seq, (best_ni) ? libcfs_nid2str(best_ni->ni_nid) - : "not seleced", best_credits, shortest_distance, - (best_ni) ? best_ni->ni_seq : 0); - /* * All distances smaller than the NUMA range * are treated equally. @@ -1464,31 +1663,47 @@ lnet_get_best_ni(struct lnet_net *local_net, struct lnet_ni *best_ni, * Select on health, shorter distance, available * credits, then round-robin. */ - if (ni_fatal) { + if (ni_fatal) continue; - } else if (ni_healthv < best_healthv) { + + if (best_ni) + CDEBUG(D_NET, "compare ni %s [c:%d, d:%d, s:%d, p:%u] with best_ni %s [c:%d, d:%d, s:%d, p:%u]\n", + libcfs_nid2str(ni->ni_nid), ni_credits, distance, + ni->ni_seq, ni_sel_prio, + (best_ni) ? libcfs_nid2str(best_ni->ni_nid) + : "not selected", best_credits, shortest_distance, + (best_ni) ? best_ni->ni_seq : 0, + best_sel_prio); + else + goto select_ni; + + if (ni_healthv < best_healthv) continue; - } else if (ni_healthv > best_healthv) { - best_healthv = ni_healthv; - /* - * If we're going to prefer this ni because it's - * the healthiest, then we should set the - * shortest_distance in the algorithm in case - * there are multiple NIs with the same health but - * different distances. - */ - if (distance < shortest_distance) - shortest_distance = distance; - } else if (distance > shortest_distance) { + else if (ni_healthv > best_healthv) + goto select_ni; + + if (ni_sel_prio > best_sel_prio) continue; - } else if (distance < shortest_distance) { - shortest_distance = distance; - } else if (ni_credits < best_credits) { + else if (ni_sel_prio < best_sel_prio) + goto select_ni; + + if (distance > shortest_distance) continue; - } else if (ni_credits == best_credits) { - if (best_ni && best_ni->ni_seq <= ni->ni_seq) - continue; - } + else if (distance < shortest_distance) + goto select_ni; + + if (ni_credits < best_credits) + continue; + else if (ni_credits > best_credits) + goto select_ni; + + if (best_ni && best_ni->ni_seq <= ni->ni_seq) + continue; + +select_ni: + best_sel_prio = ni_sel_prio; + shortest_distance = distance; + best_healthv = ni_healthv; best_ni = ni; best_credits = ni_credits; } @@ -1548,6 +1763,9 @@ lnet_handle_lo_send(struct lnet_send_data *sd) struct lnet_msg *msg = sd->sd_msg; int cpt = sd->sd_cpt; + if (the_lnet.ln_state != LNET_STATE_RUNNING) + return -ESHUTDOWN; + /* No send credit hassles with LOLND */ lnet_ni_addref_locked(the_lnet.ln_loni, cpt); msg->msg_hdr.dest_nid = cpu_to_le64(the_lnet.ln_loni->ni_nid); @@ -1574,11 +1792,24 @@ lnet_handle_send(struct lnet_send_data *sd) __u32 routing = send_case & REMOTE_DST; struct lnet_rsp_tracker *rspt; - /* - * Increment sequence number of the selected peer so that we - * pick the next one in Round Robin. + /* Increment sequence number of the selected peer, peer net, + * local ni and local net so that we pick the next ones + * in Round Robin. */ best_lpni->lpni_seq++; + best_lpni->lpni_peer_net->lpn_seq++; + best_ni->ni_seq++; + best_ni->ni_net->net_seq++; + + CDEBUG(D_NET, "%s NI seq info: [%d:%d:%d:%u] %s LPNI seq info [%d:%d:%d:%u]\n", + libcfs_nid2str(best_ni->ni_nid), + best_ni->ni_seq, best_ni->ni_net->net_seq, + atomic_read(&best_ni->ni_tx_credits), + best_ni->ni_sel_priority, + libcfs_nid2str(best_lpni->lpni_nid), + best_lpni->lpni_seq, best_lpni->lpni_peer_net->lpn_seq, + best_lpni->lpni_txcredits, + best_lpni->lpni_sel_priority); /* * grab a reference on the peer_ni so it sticks around even if @@ -1681,149 +1912,29 @@ lnet_handle_send(struct lnet_send_data *sd) rc = lnet_post_send_locked(msg, 0); if (!rc) - CDEBUG(D_NET, "TRACE: %s(%s:%s) -> %s(%s:%s) : %s try# %d\n", + CDEBUG(D_NET, "TRACE: %s(%s:%s) -> %s(%s:%s) %s : %s try# %d\n", libcfs_nid2str(msg->msg_hdr.src_nid), libcfs_nid2str(msg->msg_txni->ni_nid), libcfs_nid2str(sd->sd_src_nid), libcfs_nid2str(msg->msg_hdr.dest_nid), libcfs_nid2str(sd->sd_dst_nid), libcfs_nid2str(msg->msg_txpeer->lpni_nid), + libcfs_nid2str(sd->sd_rtr_nid), lnet_msgtyp2str(msg->msg_type), msg->msg_retry_count); return rc; } -static struct lnet_peer_ni * -lnet_select_peer_ni(struct lnet_send_data *sd, struct lnet_peer *peer, - struct lnet_peer_net *peer_net) -{ - /* - * Look at the peer NIs for the destination peer that connect - * to the chosen net. If a peer_ni is preferred when using the - * best_ni to communicate, we use that one. If there is no - * preferred peer_ni, or there are multiple preferred peer_ni, - * the available transmit credits are used. If the transmit - * credits are equal, we round-robin over the peer_ni. - */ - struct lnet_peer_ni *lpni = NULL; - struct lnet_peer_ni *best_lpni = NULL; - struct lnet_ni *best_ni = sd->sd_best_ni; - lnet_nid_t dst_nid = sd->sd_dst_nid; - int best_lpni_credits = INT_MIN; - bool preferred = false; - bool ni_is_pref; - int best_lpni_healthv = 0; - int lpni_healthv; - - while ((lpni = lnet_get_next_peer_ni_locked(peer, peer_net, lpni))) { - /* - * if the best_ni we've chosen aleady has this lpni - * preferred, then let's use it - */ - ni_is_pref = lnet_peer_is_pref_nid_locked(lpni, - best_ni->ni_nid); - - lpni_healthv = atomic_read(&lpni->lpni_healthv); - - CDEBUG(D_NET, "%s ni_is_pref = %d\n", - libcfs_nid2str(best_ni->ni_nid), ni_is_pref); - - if (best_lpni) - CDEBUG(D_NET, "%s c:[%d, %d], s:[%d, %d]\n", - libcfs_nid2str(lpni->lpni_nid), - lpni->lpni_txcredits, best_lpni_credits, - lpni->lpni_seq, best_lpni->lpni_seq); - - /* pick the healthiest peer ni */ - if (lpni_healthv < best_lpni_healthv) { - continue; - } else if (lpni_healthv > best_lpni_healthv) { - best_lpni_healthv = lpni_healthv; - /* if this is a preferred peer use it */ - } else if (!preferred && ni_is_pref) { - preferred = true; - } else if (preferred && !ni_is_pref) { - /* - * this is not the preferred peer so let's ignore - * it. - */ - continue; - } else if (lpni->lpni_txcredits < best_lpni_credits) { - /* - * We already have a peer that has more credits - * available than this one. No need to consider - * this peer further. - */ - continue; - } else if (lpni->lpni_txcredits == best_lpni_credits) { - /* - * The best peer found so far and the current peer - * have the same number of available credits let's - * make sure to select between them using Round - * Robin - */ - if (best_lpni) { - if (best_lpni->lpni_seq <= lpni->lpni_seq) - continue; - } - } - - best_lpni = lpni; - best_lpni_credits = lpni->lpni_txcredits; - } - - /* if we still can't find a peer ni then we can't reach it */ - if (!best_lpni) { - __u32 net_id = (peer_net) ? peer_net->lpn_net_id : - LNET_NIDNET(dst_nid); - CDEBUG(D_NET, "no peer_ni found on peer net %s\n", - libcfs_net2str(net_id)); - return NULL; - } - - CDEBUG(D_NET, "sd_best_lpni = %s\n", - libcfs_nid2str(best_lpni->lpni_nid)); - - return best_lpni; -} - -/* - * Prerequisite: the best_ni should already be set in the sd - */ -static inline struct lnet_peer_ni * -lnet_find_best_lpni_on_net(struct lnet_send_data *sd, struct lnet_peer *peer, - __u32 net_id) -{ - struct lnet_peer_net *peer_net; - - /* - * The gateway is Multi-Rail capable so now we must select the - * proper peer_ni - */ - peer_net = lnet_peer_get_net_locked(peer, net_id); - - if (!peer_net) { - CERROR("gateway peer %s has no NI on net %s\n", - libcfs_nid2str(peer->lp_primary_nid), - libcfs_net2str(net_id)); - return NULL; - } - - return lnet_select_peer_ni(sd, peer, peer_net); -} - static inline void -lnet_set_non_mr_pref_nid(struct lnet_send_data *sd) +lnet_set_non_mr_pref_nid(struct lnet_peer_ni *lpni, struct lnet_ni *lni, + struct lnet_msg *msg) { - if (sd->sd_send_case & NMR_DST && - sd->sd_msg->msg_type != LNET_MSG_REPLY && - sd->sd_msg->msg_type != LNET_MSG_ACK && - sd->sd_best_lpni->lpni_pref_nnids == 0) { + if (!lnet_peer_is_multi_rail(lpni->lpni_peer_net->lpn_peer) && + !lnet_msg_is_response(msg) && lpni->lpni_pref_nnids == 0) { CDEBUG(D_NET, "Setting preferred local NID %s on NMR peer %s\n", - libcfs_nid2str(sd->sd_best_ni->ni_nid), - libcfs_nid2str(sd->sd_best_lpni->lpni_nid)); - lnet_peer_ni_set_non_mr_pref_nid(sd->sd_best_lpni, - sd->sd_best_ni->ni_nid); + libcfs_nid2str(lni->ni_nid), + libcfs_nid2str(lpni->lpni_nid)); + lnet_peer_ni_set_non_mr_pref_nid(lpni, lni->ni_nid); } } @@ -1848,10 +1959,7 @@ lnet_handle_spec_local_nmr_dst(struct lnet_send_data *sd) return -EINVAL; } - /* - * the preferred NID will only be set for NMR peers - */ - lnet_set_non_mr_pref_nid(sd); + lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni, sd->sd_msg); return lnet_handle_send(sd); } @@ -1861,8 +1969,11 @@ lnet_handle_spec_local_nmr_dst(struct lnet_send_data *sd) * Local Destination * MR Peer * - * Run the selection algorithm on the peer NIs unless we're sending - * a response, in this case just send to the destination + * Don't run the selection algorithm on the peer NIs. By specifying the + * local NID, we're also saying that we should always use the destination NID + * provided. This handles the case where we should be using the same + * destination NID for the all the messages which belong to the same RPC + * request. */ static int lnet_handle_spec_local_mr_dst(struct lnet_send_data *sd) @@ -1875,17 +1986,6 @@ lnet_handle_spec_local_mr_dst(struct lnet_send_data *sd) return -EINVAL; } - /* - * only run the selection algorithm to pick the peer_ni if we're - * sending a GET or a PUT. Responses are sent to the same - * destination NID provided. - */ - if (!(sd->sd_send_case & SND_RESP)) { - sd->sd_best_lpni = - lnet_find_best_lpni_on_net(sd, sd->sd_peer, - sd->sd_best_ni->ni_net->net_id); - } - if (sd->sd_best_lpni && sd->sd_best_lpni->lpni_nid == the_lnet.ln_loni->ni_nid) return lnet_handle_lo_send(sd); @@ -1903,8 +2003,7 @@ struct lnet_ni * lnet_find_best_ni_on_spec_net(struct lnet_ni *cur_best_ni, struct lnet_peer *peer, struct lnet_peer_net *peer_net, - int cpt, - bool incr_seq) + int cpt) { struct lnet_net *local_net; struct lnet_ni *best_ni; @@ -1924,42 +2023,71 @@ lnet_find_best_ni_on_spec_net(struct lnet_ni *cur_best_ni, best_ni = lnet_get_best_ni(local_net, cur_best_ni, peer, peer_net, cpt); - if (incr_seq && best_ni) - best_ni->ni_seq++; - return best_ni; } static int -lnet_initiate_peer_discovery(struct lnet_peer_ni *lpni, - struct lnet_msg *msg, lnet_nid_t rtr_nid, +lnet_initiate_peer_discovery(struct lnet_peer_ni *lpni, struct lnet_msg *msg, int cpt) { struct lnet_peer *peer; - lnet_nid_t primary_nid; + struct lnet_peer_ni *new_lpni; int rc; lnet_peer_ni_addref_locked(lpni); + peer = lpni->lpni_peer_net->lpn_peer; + + if (lnet_peer_gw_discovery(peer)) { + lnet_peer_ni_decref_locked(lpni); + return 0; + } + + if (!lnet_msg_discovery(msg) || lnet_peer_is_uptodate(peer)) { + lnet_peer_ni_decref_locked(lpni); + return 0; + } + rc = lnet_discover_peer_locked(lpni, cpt, false); if (rc) { lnet_peer_ni_decref_locked(lpni); return rc; } - /* The peer may have changed. */ - peer = lpni->lpni_peer_net->lpn_peer; - /* queue message and return */ - msg->msg_rtr_nid_param = rtr_nid; - msg->msg_sending = 0; - msg->msg_txpeer = NULL; + + new_lpni = lnet_find_peer_ni_locked(lpni->lpni_nid); + if (!new_lpni) { + lnet_peer_ni_decref_locked(lpni); + return -ENOENT; + } + + peer = new_lpni->lpni_peer_net->lpn_peer; spin_lock(&peer->lp_lock); - list_add_tail(&msg->msg_list, &peer->lp_dc_pendq); + if (lpni == new_lpni && lnet_peer_is_uptodate_locked(peer)) { + /* The peer NI did not change and the peer is up to date. + * Nothing more to do. + */ + spin_unlock(&peer->lp_lock); + lnet_peer_ni_decref_locked(lpni); + lnet_peer_ni_decref_locked(new_lpni); + return 0; + } spin_unlock(&peer->lp_lock); + + /* Either the peer NI changed during discovery, or the peer isn't up + * to date. In both cases we want to queue the message on the + * (possibly new) peer's pending queue and queue the peer for discovery + */ + msg->msg_sending = 0; + msg->msg_txpeer = NULL; + lnet_net_unlock(cpt); + lnet_peer_queue_message(peer, msg); + lnet_net_lock(cpt); + lnet_peer_ni_decref_locked(lpni); - primary_nid = peer->lp_primary_nid; + lnet_peer_ni_decref_locked(new_lpni); CDEBUG(D_NET, "msg %p delayed. %s pending discovery\n", - msg, libcfs_nid2str(primary_nid)); + msg, libcfs_nid2str(peer->lp_primary_nid)); return LNET_DC_WAIT; } @@ -1970,97 +2098,194 @@ lnet_handle_find_routed_path(struct lnet_send_data *sd, struct lnet_peer_ni **gw_lpni, struct lnet_peer **gw_peer) { + int rc; + __u32 local_lnet; struct lnet_peer *gw; - struct lnet_route *best_route; - struct lnet_route *last_route; + struct lnet_peer *lp; + struct lnet_peer_net *lpn; + struct lnet_peer_net *best_lpn = NULL; + struct lnet_remotenet *rnet, *best_rnet = NULL; + struct lnet_route *best_route = NULL; + struct lnet_route *last_route = NULL; struct lnet_peer_ni *lpni = NULL; - lnet_nid_t src_nid = sd->sd_src_nid; + struct lnet_peer_ni *gwni = NULL; + bool route_found = false; + lnet_nid_t src_nid = (sd->sd_src_nid != LNET_NID_ANY) ? sd->sd_src_nid : + (sd->sd_best_ni != NULL) ? sd->sd_best_ni->ni_nid : + LNET_NID_ANY; + int best_lpn_healthv = 0; + __u32 best_lpn_sel_prio = LNET_MAX_SELECTION_PRIORITY; + + CDEBUG(D_NET, "using src nid %s for route restriction\n", + libcfs_nid2str(src_nid)); + + /* If a router nid was specified then we are replying to a GET or + * sending an ACK. In this case we use the gateway associated with the + * specified router nid. + */ + if (sd->sd_rtr_nid != LNET_NID_ANY) { + gwni = lnet_find_peer_ni_locked(sd->sd_rtr_nid); + if (gwni) { + gw = gwni->lpni_peer_net->lpn_peer; + lnet_peer_ni_decref_locked(gwni); + if (gw->lp_rtr_refcount) { + local_lnet = LNET_NIDNET(sd->sd_rtr_nid); + route_found = true; + } + } else { + CWARN("No peer NI for gateway %s. Attempting to find an alternative route.\n", + libcfs_nid2str(sd->sd_rtr_nid)); + } + } + + if (!route_found) { + if (sd->sd_msg->msg_routing) { + /* If I'm routing this message then I need to find the + * next hop based on the destination NID + */ + best_rnet = lnet_find_rnet_locked(LNET_NIDNET(sd->sd_dst_nid)); + if (!best_rnet) { + CERROR("Unable to route message to %s - Route table may be misconfigured\n", + libcfs_nid2str(sd->sd_dst_nid)); + return -EHOSTUNREACH; + } + } else { + /* we've already looked up the initial lpni using + * dst_nid + */ + lpni = sd->sd_best_lpni; + /* the peer tree must be in existence */ + LASSERT(lpni && lpni->lpni_peer_net && + lpni->lpni_peer_net->lpn_peer); + lp = lpni->lpni_peer_net->lpn_peer; + + list_for_each_entry(lpn, &lp->lp_peer_nets, lpn_peer_nets) { + /* is this remote network reachable? */ + rnet = lnet_find_rnet_locked(lpn->lpn_net_id); + if (!rnet) + continue; + + if (!best_lpn) { + best_lpn = lpn; + best_rnet = rnet; + } + + /* select the preferred peer net */ + if (best_lpn_healthv > lpn->lpn_healthv) + continue; + else if (best_lpn_healthv < lpn->lpn_healthv) + goto use_lpn; + + if (best_lpn_sel_prio < lpn->lpn_sel_priority) + continue; + else if (best_lpn_sel_prio > lpn->lpn_sel_priority) + goto use_lpn; + + if (best_lpn->lpn_seq <= lpn->lpn_seq) + continue; +use_lpn: + best_lpn_healthv = lpn->lpn_healthv; + best_lpn_sel_prio = lpn->lpn_sel_priority; + best_lpn = lpn; + best_rnet = rnet; + } + + if (!best_lpn) { + CERROR("peer %s has no available nets\n", + libcfs_nid2str(sd->sd_dst_nid)); + return -EHOSTUNREACH; + } + + sd->sd_best_lpni = lnet_find_best_lpni(sd->sd_best_ni, + sd->sd_dst_nid, + lp, + best_lpn->lpn_net_id); + if (!sd->sd_best_lpni) { + CERROR("peer %s is unreachable\n", + libcfs_nid2str(sd->sd_dst_nid)); + return -EHOSTUNREACH; + } + + /* We're attempting to round robin over the remote peer + * NI's so update the final destination we selected + */ + sd->sd_final_dst_lpni = sd->sd_best_lpni; + + /* Increment the sequence number of the remote lpni so + * we can round robin over the different interfaces of + * the remote lpni + */ + sd->sd_best_lpni->lpni_seq++; + } - best_route = lnet_find_route_locked(NULL, LNET_NIDNET(dst_nid), - sd->sd_rtr_nid, &last_route); - if (!best_route) { - CERROR("no route to %s from %s\n", - libcfs_nid2str(dst_nid), libcfs_nid2str(src_nid)); - return -EHOSTUNREACH; - } + /* + * find the best route. Restrict the selection on the net of the + * local NI if we've already picked the local NI to send from. + * Otherwise, let's pick any route we can find and then find + * a local NI we can reach the route's gateway on. Any route we + * select will be reachable by virtue of the restriction we have + * when adding a route. + */ + best_route = lnet_find_route_locked(best_rnet, + LNET_NIDNET(src_nid), + sd->sd_best_lpni, + &last_route, &gwni); + + if (!best_route) { + CERROR("no route to %s from %s\n", + libcfs_nid2str(dst_nid), + libcfs_nid2str(src_nid)); + return -EHOSTUNREACH; + } - gw = best_route->lr_gateway; - *gw_peer = gw; + if (!gwni) { + CERROR("Internal Error. Route expected to %s from %s\n", + libcfs_nid2str(dst_nid), + libcfs_nid2str(src_nid)); + return -EFAULT; + } + + gw = best_route->lr_gateway; + LASSERT(gw == gwni->lpni_peer_net->lpn_peer); + local_lnet = best_route->lr_lnet; + } /* * Discover this gateway if it hasn't already been discovered. * This means we might delay the message until discovery has * completed */ -#if 0 - /* TODO: disable discovey for now */ - if (lnet_msg_discovery(sd->sd_msg) && - !lnet_peer_is_uptodate(*gw_peer)) { - sd->sd_msg->msg_src_nid_param = sd->sd_src_nid; - return lnet_initiate_peer_discovery(gw, sd->sd_msg, - sd->sd_rtr_nid, sd->sd_cpt); - } -#endif + sd->sd_msg->msg_src_nid_param = sd->sd_src_nid; + rc = lnet_initiate_peer_discovery(gwni, sd->sd_msg, sd->sd_cpt); + if (rc) + return rc; if (!sd->sd_best_ni) sd->sd_best_ni = lnet_find_best_ni_on_spec_net(NULL, gw, lnet_peer_get_net_locked(gw, - best_route->lr_lnet), - sd->sd_md_cpt, - true); + local_lnet), + sd->sd_md_cpt); if (!sd->sd_best_ni) { - CERROR("Internal Error. Expected local ni on %s " - "but non found :%s\n", - libcfs_net2str(best_route->lr_lnet), + CERROR("Internal Error. Expected local ni on %s but non found :%s\n", + libcfs_net2str(local_lnet), libcfs_nid2str(sd->sd_src_nid)); return -EFAULT; } - /* - * if gw is MR let's find its best peer_ni - */ - if (lnet_peer_is_multi_rail(gw)) { - lpni = lnet_find_best_lpni_on_net(sd, gw, - sd->sd_best_ni->ni_net->net_id); - /* - * We've already verified that the gw has an NI on that - * desired net, but we're not finding it. Something is - * wrong. - */ - if (!lpni) { - CERROR("Internal Error. Route expected to %s from %s\n", - libcfs_nid2str(dst_nid), - libcfs_nid2str(src_nid)); - return -EFAULT; - } - } else { - struct lnet_peer_net *lpn; - lpn = lnet_peer_get_net_locked(gw, best_route->lr_lnet); - if (!lpn) { - CERROR("Internal Error. Route expected to %s from %s\n", - libcfs_nid2str(dst_nid), - libcfs_nid2str(src_nid)); - return -EFAULT; - } - lpni = list_entry(lpn->lpn_peer_nis.next, struct lnet_peer_ni, - lpni_peer_nis); - if (!lpni) { - CERROR("Internal Error. Route expected to %s from %s\n", - libcfs_nid2str(dst_nid), - libcfs_nid2str(src_nid)); - return -EFAULT; - } - } - - *gw_lpni = lpni; + *gw_lpni = gwni; + *gw_peer = gw; /* - * increment the route sequence number since now we're sure we're - * going to use it + * increment the sequence numbers since now we're sure we're + * going to use this path */ - LASSERT(best_route && last_route); - best_route->lr_seq = last_route->lr_seq + 1; + if (sd->sd_rtr_nid == LNET_NID_ANY) { + LASSERT(best_route && last_route); + best_route->lr_seq = last_route->lr_seq + 1; + if (best_lpn) + best_lpn->lpn_seq++; + } return 0; } @@ -2104,10 +2329,11 @@ lnet_handle_spec_router_dst(struct lnet_send_data *sd) if (sd->sd_send_case & NMR_DST) /* - * since the final destination is non-MR let's set its preferred - * NID before we send - */ - lnet_set_non_mr_pref_nid(sd); + * since the final destination is non-MR let's set its preferred + * NID before we send + */ + lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni, + sd->sd_msg); /* * We're going to send to the gw found so let's set its @@ -2120,10 +2346,22 @@ lnet_handle_spec_router_dst(struct lnet_send_data *sd) } struct lnet_ni * -lnet_find_best_ni_on_local_net(struct lnet_peer *peer, int md_cpt) +lnet_find_best_ni_on_local_net(struct lnet_peer *peer, int md_cpt, + bool discovery) { - struct lnet_peer_net *peer_net = NULL; + struct lnet_peer_net *lpn = NULL; + struct lnet_peer_net *best_lpn = NULL; + struct lnet_net *net = NULL; + struct lnet_net *best_net = NULL; struct lnet_ni *best_ni = NULL; + int best_lpn_healthv = 0; + int best_net_healthv = 0; + int net_healthv; + __u32 best_lpn_sel_prio = LNET_MAX_SELECTION_PRIORITY; + __u32 lpn_sel_prio; + __u32 best_net_sel_prio = LNET_MAX_SELECTION_PRIORITY; + __u32 net_sel_prio; + bool exit = false; /* * The peer can have multiple interfaces, some of them can be on @@ -2133,33 +2371,92 @@ lnet_find_best_ni_on_local_net(struct lnet_peer *peer, int md_cpt) */ /* go through all the peer nets and find the best_ni */ - list_for_each_entry(peer_net, &peer->lp_peer_nets, lpn_peer_nets) { + list_for_each_entry(lpn, &peer->lp_peer_nets, lpn_peer_nets) { /* * The peer's list of nets can contain non-local nets. We * want to only examine the local ones. */ - if (!lnet_get_net_locked(peer_net->lpn_net_id)) + net = lnet_get_net_locked(lpn->lpn_net_id); + if (!net) + continue; + + lpn_sel_prio = lpn->lpn_sel_priority; + net_healthv = lnet_get_net_healthv_locked(net); + net_sel_prio = net->net_sel_priority; + + /* + * if this is a discovery message and lp_disc_net_id is + * specified then use that net to send the discovery on. + */ + if (peer->lp_disc_net_id == lpn->lpn_net_id && + discovery) { + exit = true; + goto select_lpn; + } + + if (!best_lpn) + goto select_lpn; + + /* always select the lpn with the best health */ + if (best_lpn_healthv > lpn->lpn_healthv) continue; - best_ni = lnet_find_best_ni_on_spec_net(best_ni, peer, - peer_net, md_cpt, false); + else if (best_lpn_healthv < lpn->lpn_healthv) + goto select_lpn; + + /* select the preferred peer and local nets */ + if (best_lpn_sel_prio < lpn_sel_prio) + continue; + else if (best_lpn_sel_prio > lpn_sel_prio) + goto select_lpn; + + if (best_net_healthv > net_healthv) + continue; + else if (best_net_healthv < net_healthv) + goto select_lpn; + + if (best_net_sel_prio < net_sel_prio) + continue; + else if (best_net_sel_prio > net_sel_prio) + goto select_lpn; + + if (best_lpn->lpn_seq < lpn->lpn_seq) + continue; + else if (best_lpn->lpn_seq > lpn->lpn_seq) + goto select_lpn; + + /* round robin over the local networks */ + if (best_net->net_seq <= net->net_seq) + continue; + +select_lpn: + best_net_healthv = net_healthv; + best_net_sel_prio = net_sel_prio; + best_lpn_healthv = lpn->lpn_healthv; + best_lpn_sel_prio = lpn_sel_prio; + best_lpn = lpn; + best_net = net; + + if (exit) + break; } - if (best_ni) - /* increment sequence number so we can round robin */ - best_ni->ni_seq++; + if (best_lpn) { + /* Select the best NI on the same net as best_lpn chosen + * above + */ + best_ni = lnet_find_best_ni_on_spec_net(NULL, peer, + best_lpn, md_cpt); + } return best_ni; } static struct lnet_ni * -lnet_find_existing_preferred_best_ni(struct lnet_send_data *sd) +lnet_find_existing_preferred_best_ni(struct lnet_peer_ni *lpni, int cpt) { struct lnet_ni *best_ni = NULL; - struct lnet_peer_net *peer_net; - struct lnet_peer *peer = sd->sd_peer; - struct lnet_peer_ni *best_lpni = sd->sd_best_lpni; - struct lnet_peer_ni *lpni; - int cpt = sd->sd_cpt; + struct lnet_peer_net *peer_net = lpni->lpni_peer_net; + struct lnet_peer_ni *lpni_entry; /* * We must use a consistent source address when sending to a @@ -2171,18 +2468,13 @@ lnet_find_existing_preferred_best_ni(struct lnet_send_data *sd) * So we need to pick the NI the peer prefers for this * particular network. */ - - /* Get the target peer_ni */ - peer_net = lnet_peer_get_net_locked(peer, - LNET_NIDNET(best_lpni->lpni_nid)); - LASSERT(peer_net != NULL); - list_for_each_entry(lpni, &peer_net->lpn_peer_nis, - lpni_peer_nis) { - if (lpni->lpni_pref_nnids == 0) + LASSERT(peer_net); + list_for_each_entry(lpni_entry, &peer_net->lpn_peer_nis, + lpni_peer_nis) { + if (lpni_entry->lpni_pref_nnids == 0) continue; - LASSERT(lpni->lpni_pref_nnids == 1); - best_ni = lnet_nid2ni_locked( - lpni->lpni_pref.nid, cpt); + LASSERT(lpni_entry->lpni_pref_nnids == 1); + best_ni = lnet_nid2ni_locked(lpni_entry->lpni_pref.nid, cpt); break; } @@ -2207,14 +2499,15 @@ lnet_select_preferred_best_ni(struct lnet_send_data *sd) * particular network. */ - best_ni = lnet_find_existing_preferred_best_ni(sd); + best_ni = lnet_find_existing_preferred_best_ni(sd->sd_best_lpni, + sd->sd_cpt); /* if best_ni is still not set just pick one */ if (!best_ni) { best_ni = lnet_find_best_ni_on_spec_net(NULL, sd->sd_peer, sd->sd_best_lpni->lpni_peer_net, - sd->sd_md_cpt, true); + sd->sd_md_cpt); /* If there is no best_ni we don't have a route */ if (!best_ni) { CERROR("no path to %s from net %s\n", @@ -2227,7 +2520,7 @@ lnet_select_preferred_best_ni(struct lnet_send_data *sd) sd->sd_best_ni = best_ni; /* Set preferred NI if necessary. */ - lnet_set_non_mr_pref_nid(sd); + lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni, sd->sd_msg); return 0; } @@ -2246,7 +2539,7 @@ lnet_select_preferred_best_ni(struct lnet_send_data *sd) static int lnet_handle_any_local_nmr_dst(struct lnet_send_data *sd) { - int rc; + int rc = 0; /* sd->sd_best_lpni is already set to the final destination */ @@ -2263,7 +2556,22 @@ lnet_handle_any_local_nmr_dst(struct lnet_send_data *sd) return -EFAULT; } - rc = lnet_select_preferred_best_ni(sd); + if (sd->sd_msg->msg_routing) { + /* If I'm forwarding this message then I can choose any NI + * on the destination peer net + */ + sd->sd_best_ni = lnet_find_best_ni_on_spec_net(NULL, + sd->sd_peer, + sd->sd_best_lpni->lpni_peer_net, + sd->sd_md_cpt); + if (!sd->sd_best_ni) { + CERROR("Unable to forward message to %s. No local NI available\n", + libcfs_nid2str(sd->sd_dst_nid)); + rc = -EHOSTUNREACH; + } + } else + rc = lnet_select_preferred_best_ni(sd); + if (!rc) rc = lnet_handle_send(sd); @@ -2288,7 +2596,7 @@ lnet_handle_any_mr_dsta(struct lnet_send_data *sd) sd->sd_best_ni = lnet_find_best_ni_on_spec_net(NULL, sd->sd_peer, sd->sd_best_lpni->lpni_peer_net, - sd->sd_md_cpt, true); + sd->sd_md_cpt); if (!sd->sd_best_ni) { /* @@ -2311,11 +2619,13 @@ lnet_handle_any_mr_dsta(struct lnet_send_data *sd) * networks. */ sd->sd_best_ni = lnet_find_best_ni_on_local_net(sd->sd_peer, - sd->sd_md_cpt); + sd->sd_md_cpt, + lnet_msg_discovery(sd->sd_msg)); if (sd->sd_best_ni) { sd->sd_best_lpni = - lnet_find_best_lpni_on_net(sd, sd->sd_peer, - sd->sd_best_ni->ni_net->net_id); + lnet_find_best_lpni(sd->sd_best_ni, sd->sd_dst_nid, + sd->sd_peer, + sd->sd_best_ni->ni_net->net_id); /* * if we're successful in selecting a peer_ni on the local @@ -2421,11 +2731,11 @@ lnet_handle_any_mr_dst(struct lnet_send_data *sd) return rc; /* - * TODO; One possible enhancement is to run the selection - * algorithm on the peer. However for remote peers the credits are - * not decremented, so we'll be basically going over the peer NIs - * in round robin. An MR router will run the selection algorithm - * on the next-hop interfaces. + * Now that we must route to the destination, we must consider the + * MR case, where the destination has multiple interfaces, some of + * which we can route to and others we do not. For this reason we + * need to select the destination which we can route to and if + * there are multiple, we need to round robin. */ rc = lnet_handle_find_routed_path(sd, sd->sd_dst_nid, &gw_lpni, &gw_peer); @@ -2458,9 +2768,10 @@ lnet_handle_any_router_nmr_dst(struct lnet_send_data *sd) struct lnet_peer *gw_peer = NULL; /* - * Let's set if we have a preferred NI to talk to this NMR peer + * Let's see if we have a preferred NI to talk to this NMR peer */ - sd->sd_best_ni = lnet_find_existing_preferred_best_ni(sd); + sd->sd_best_ni = lnet_find_existing_preferred_best_ni(sd->sd_best_lpni, + sd->sd_cpt); /* * find the router and that'll find the best NI if we didn't find @@ -2475,7 +2786,7 @@ lnet_handle_any_router_nmr_dst(struct lnet_send_data *sd) * set the best_ni we've chosen as the preferred one for * this peer */ - lnet_set_non_mr_pref_nid(sd); + lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni, sd->sd_msg); /* we'll be sending to the gw */ sd->sd_best_lpni = gw_lpni; @@ -2531,12 +2842,14 @@ static int lnet_select_pathway(lnet_nid_t src_nid, lnet_nid_t dst_nid, struct lnet_msg *msg, lnet_nid_t rtr_nid) { - struct lnet_peer_ni *lpni; - struct lnet_peer *peer; - struct lnet_send_data send_data; - int cpt, rc; - int md_cpt; - __u32 send_case = 0; + struct lnet_peer_ni *lpni; + struct lnet_peer *peer; + struct lnet_send_data send_data; + int cpt, rc; + int md_cpt; + __u32 send_case = 0; + bool final_hop; + bool mr_forwarding_allowed; memset(&send_data, 0, sizeof(send_data)); @@ -2564,7 +2877,7 @@ again: */ send_data.sd_msg = msg; send_data.sd_cpt = cpt; - if (LNET_NETTYP(LNET_NIDNET(dst_nid)) == LOLND) { + if (dst_nid == LNET_NID_LO_0) { rc = lnet_handle_lo_send(&send_data); lnet_net_unlock(cpt); return rc; @@ -2582,28 +2895,31 @@ again: } /* - * Cache the original src_nid. If we need to resend the message - * then we'll need to know whether the src_nid was originally + * Cache the original src_nid and rtr_nid. If we need to resend the + * message then we'll need to know whether the src_nid was originally * specified for this message. If it was originally specified, * then we need to keep using the same src_nid since it's - * continuing the same sequence of messages. + * continuing the same sequence of messages. Similarly, rtr_nid will + * affect our choice of next hop. */ msg->msg_src_nid_param = src_nid; + msg->msg_rtr_nid_param = rtr_nid; /* - * Now that we have a peer_ni, check if we want to discover - * the peer. Traffic to the LNET_RESERVED_PORTAL should not - * trigger discovery. + * If necessary, perform discovery on the peer that owns this peer_ni. + * Note, this can result in the ownership of this peer_ni changing + * to another peer object. */ - peer = lpni->lpni_peer_net->lpn_peer; - if (lnet_msg_discovery(msg) && !lnet_peer_is_uptodate(peer)) { - rc = lnet_initiate_peer_discovery(lpni, msg, rtr_nid, cpt); + rc = lnet_initiate_peer_discovery(lpni, msg, cpt); + if (rc) { lnet_peer_ni_decref_locked(lpni); lnet_net_unlock(cpt); return rc; } lnet_peer_ni_decref_locked(lpni); + peer = lpni->lpni_peer_net->lpn_peer; + /* * Identify the different send cases */ @@ -2617,18 +2933,56 @@ again: else send_case |= REMOTE_DST; + final_hop = false; + if (msg->msg_routing && (send_case & LOCAL_DST)) + final_hop = true; + + /* Determine whether to allow MR forwarding for this message. + * NB: MR forwarding is allowed if the message originator and the + * destination are both MR capable, and the destination lpni that was + * originally chosen by the originator is unhealthy or down. + * We check the MR capability of the destination further below + */ + mr_forwarding_allowed = false; + if (final_hop) { + struct lnet_peer *src_lp; + struct lnet_peer_ni *src_lpni; + + src_lpni = lnet_nid2peerni_locked(msg->msg_hdr.src_nid, + LNET_NID_ANY, cpt); + /* We don't fail the send if we hit any errors here. We'll just + * try to send it via non-multi-rail criteria + */ + if (!IS_ERR(src_lpni)) { + /* Drop ref taken by lnet_nid2peerni_locked() */ + lnet_peer_ni_decref_locked(src_lpni); + src_lp = lpni->lpni_peer_net->lpn_peer; + if (lnet_peer_is_multi_rail(src_lp) && + !lnet_is_peer_ni_alive(lpni)) + mr_forwarding_allowed = true; + + } + CDEBUG(D_NET, "msg %p MR forwarding %s\n", msg, + mr_forwarding_allowed ? "allowed" : "not allowed"); + } + /* - * if this is a non-MR peer or if we're recovering a peer ni then - * let's consider this an NMR case so we can hit the destination - * NID. + * Deal with the peer as NMR in the following cases: + * 1. the peer is NMR + * 2. We're trying to recover a specific peer NI + * 3. I'm a router sending to the final destination and MR forwarding is + * not allowed for this message (as determined above). + * In this case the source of the message would've + * already selected the final destination so my job + * is to honor the selection. */ - if (!lnet_peer_is_multi_rail(peer) || msg->msg_recovery) + if (!lnet_peer_is_multi_rail(peer) || msg->msg_recovery || + (final_hop && !mr_forwarding_allowed)) send_case |= NMR_DST; else send_case |= MR_DST; - if (msg->msg_type == LNET_MSG_REPLY || - msg->msg_type == LNET_MSG_ACK) + if (lnet_msg_is_response(msg)) send_case |= SND_RESP; /* assign parameters to the send_data */ @@ -2684,8 +3038,13 @@ lnet_send(lnet_nid_t src_nid, struct lnet_msg *msg, lnet_nid_t rtr_nid) LASSERT(!msg->msg_tx_committed); rc = lnet_select_pathway(src_nid, dst_nid, msg, rtr_nid); - if (rc < 0) + if (rc < 0) { + if (rc == -EHOSTUNREACH) + msg->msg_health_status = LNET_MSG_STATUS_REMOTE_ERROR; + else + msg->msg_health_status = LNET_MSG_STATUS_LOCAL_ERROR; return rc; + } if (rc == LNET_CREDIT_OK) lnet_ni_send(msg->msg_txni, msg); @@ -2719,32 +3078,63 @@ lnet_detach_rsp_tracker(struct lnet_libmd *md, int cpt) return; rspt = md->md_rspt_ptr; - md->md_rspt_ptr = NULL; /* debug code */ LASSERT(rspt->rspt_cpt == cpt); - /* - * invalidate the handle to indicate that a response has been - * received, which will then lead the monitor thread to clean up - * the rspt block. - */ - LNetInvalidateMDHandle(&rspt->rspt_mdh); + md->md_rspt_ptr = NULL; + + if (LNetMDHandleIsInvalid(rspt->rspt_mdh)) { + /* + * The monitor thread has invalidated this handle because the + * response timed out, but it failed to lookup the MD. That + * means this response tracker is on the zombie list. We can + * safely remove it under the resource lock (held by caller) and + * free the response tracker block. + */ + list_del(&rspt->rspt_on_list); + lnet_rspt_free(rspt, cpt); + } else { + /* + * invalidate the handle to indicate that a response has been + * received, which will then lead the monitor thread to clean up + * the rspt block. + */ + LNetInvalidateMDHandle(&rspt->rspt_mdh); + } +} + +void +lnet_clean_zombie_rstqs(void) +{ + struct lnet_rsp_tracker *rspt, *tmp; + int i; + + cfs_cpt_for_each(i, lnet_cpt_table()) { + list_for_each_entry_safe(rspt, tmp, + the_lnet.ln_mt_zombie_rstqs[i], + rspt_on_list) { + list_del(&rspt->rspt_on_list); + lnet_rspt_free(rspt, i); + } + } + + cfs_percpt_free(the_lnet.ln_mt_zombie_rstqs); } static void -lnet_finalize_expired_responses(bool force) +lnet_finalize_expired_responses(void) { struct lnet_libmd *md; - struct list_head local_queue; struct lnet_rsp_tracker *rspt, *tmp; + ktime_t now; int i; if (the_lnet.ln_mt_rstq == NULL) return; cfs_cpt_for_each(i, lnet_cpt_table()) { - INIT_LIST_HEAD(&local_queue); + LIST_HEAD(local_queue); lnet_net_lock(i); if (!the_lnet.ln_mt_rstq[i]) { @@ -2754,6 +3144,8 @@ lnet_finalize_expired_responses(bool force) list_splice_init(the_lnet.ln_mt_rstq[i], &local_queue); lnet_net_unlock(i); + now = ktime_get(); + list_for_each_entry_safe(rspt, tmp, &local_queue, rspt_on_list) { /* * The rspt mdh will be invalidated when a response @@ -2769,41 +3161,74 @@ lnet_finalize_expired_responses(bool force) lnet_res_lock(i); if (LNetMDHandleIsInvalid(rspt->rspt_mdh)) { lnet_res_unlock(i); - list_del_init(&rspt->rspt_on_list); + list_del(&rspt->rspt_on_list); lnet_rspt_free(rspt, i); continue; } - if (ktime_compare(ktime_get(), rspt->rspt_deadline) >= 0 || - force) { + if (ktime_compare(now, rspt->rspt_deadline) >= 0 || + the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN) { struct lnet_peer_ni *lpni; lnet_nid_t nid; md = lnet_handle2md(&rspt->rspt_mdh); if (!md) { + /* MD has been queued for unlink, but + * rspt hasn't been detached (Note we've + * checked above that the rspt_mdh is + * valid). Since we cannot lookup the MD + * we're unable to detach the rspt + * ourselves. Thus, move the rspt to the + * zombie list where we'll wait for + * either: + * 1. The remaining operations on the + * MD to complete. In this case the + * final operation will result in + * lnet_msg_detach_md()-> + * lnet_detach_rsp_tracker() where + * we will clean up this response + * tracker. + * 2. LNet to shutdown. In this case + * we'll wait until after all LND Nets + * have shutdown and then we can + * safely free any remaining response + * tracker blocks on the zombie list. + * Note: We need to hold the resource + * lock when adding to the zombie list + * because we may have concurrent access + * with lnet_detach_rsp_tracker(). + */ LNetInvalidateMDHandle(&rspt->rspt_mdh); + list_move(&rspt->rspt_on_list, + the_lnet.ln_mt_zombie_rstqs[i]); lnet_res_unlock(i); - list_del_init(&rspt->rspt_on_list); - lnet_rspt_free(rspt, i); continue; } LASSERT(md->md_rspt_ptr == rspt); md->md_rspt_ptr = NULL; lnet_res_unlock(i); - lnet_net_lock(i); - the_lnet.ln_counters[i]->lct_health.lch_response_timeout_count++; - lnet_net_unlock(i); - - list_del_init(&rspt->rspt_on_list); + LNetMDUnlink(rspt->rspt_mdh); nid = rspt->rspt_next_hop_nid; - CNETERR("Response timed out: md = %p: nid = %s\n", - md, libcfs_nid2str(nid)); - LNetMDUnlink(rspt->rspt_mdh); + list_del(&rspt->rspt_on_list); lnet_rspt_free(rspt, i); + /* If we're shutting down we just want to clean + * up the rspt blocks + */ + if (the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN) + continue; + + lnet_net_lock(i); + the_lnet.ln_counters[i]->lct_health.lch_response_timeout_count++; + lnet_net_unlock(i); + + CDEBUG(D_NET, + "Response timeout: md = %p: nid = %s\n", + md, libcfs_nid2str(nid)); + /* * If there is a timeout on the response * from the next hop decrement its health @@ -2822,10 +3247,11 @@ lnet_finalize_expired_responses(bool force) } } - lnet_net_lock(i); - if (!list_empty(&local_queue)) + if (!list_empty(&local_queue)) { + lnet_net_lock(i); list_splice(&local_queue, the_lnet.ln_mt_rstq[i]); - lnet_net_unlock(i); + lnet_net_unlock(i); + } } } @@ -2851,40 +3277,19 @@ lnet_resend_pending_msgs_locked(struct list_head *resendq, int cpt) lnet_finalize(msg, -EFAULT); lnet_net_lock(cpt); } else { - struct lnet_peer *peer; int rc; - lnet_nid_t src_nid = LNET_NID_ANY; - - /* - * if this message is not being routed and the - * peer is non-MR then we must use the same - * src_nid that was used in the original send. - * Otherwise if we're routing the message (IE - * we're a router) then we can use any of our - * local interfaces. It doesn't matter to the - * final destination. - */ - peer = lpni->lpni_peer_net->lpn_peer; - if (!msg->msg_routing && - !lnet_peer_is_multi_rail(peer)) - src_nid = le64_to_cpu(msg->msg_hdr.src_nid); - /* - * If we originally specified a src NID, then we - * must attempt to reuse it in the resend as well. - */ - if (msg->msg_src_nid_param != LNET_NID_ANY) - src_nid = msg->msg_src_nid_param; lnet_peer_ni_decref_locked(lpni); lnet_net_unlock(cpt); CDEBUG(D_NET, "resending %s->%s: %s recovery %d try# %d\n", - libcfs_nid2str(src_nid), + libcfs_nid2str(msg->msg_src_nid_param), libcfs_id2str(msg->msg_target), lnet_msgtyp2str(msg->msg_type), msg->msg_recovery, msg->msg_retry_count); - rc = lnet_send(src_nid, msg, LNET_NID_ANY); + rc = lnet_send(msg->msg_src_nid_param, msg, + msg->msg_rtr_nid_param); if (rc) { CERROR("Error sending %s to %s: %d\n", lnet_msgtyp2str(msg->msg_type), @@ -2936,8 +3341,8 @@ static void lnet_recover_local_nis(void) { struct lnet_mt_event_info *ev_info; - struct list_head processed_list; - struct list_head local_queue; + LIST_HEAD(processed_list); + LIST_HEAD(local_queue); struct lnet_handle_md mdh; struct lnet_ni *tmp; struct lnet_ni *ni; @@ -2945,9 +3350,6 @@ lnet_recover_local_nis(void) int healthv; int rc; - INIT_LIST_HEAD(&local_queue); - INIT_LIST_HEAD(&processed_list); - /* * splice the recovery queue on a local queue. We will iterate * through the local queue and update it as needed. Once we're @@ -3049,7 +3451,8 @@ lnet_recover_local_nis(void) ev_info->mt_type = MT_TYPE_LOCAL_NI; ev_info->mt_nid = nid; rc = lnet_send_ping(nid, &mdh, LNET_INTERFACES_MIN, - ev_info, the_lnet.ln_mt_eqh, true); + ev_info, the_lnet.ln_mt_handler, + true); /* lookup the nid again */ lnet_net_lock(0); ni = lnet_nid2ni_locked(nid, 0); @@ -3098,26 +3501,6 @@ lnet_recover_local_nis(void) lnet_net_unlock(0); } -static struct list_head ** -lnet_create_array_of_queues(void) -{ - struct list_head **qs; - struct list_head *q; - int i; - - qs = cfs_percpt_alloc(lnet_cpt_table(), - sizeof(struct list_head)); - if (!qs) { - CERROR("Failed to allocate queues\n"); - return NULL; - } - - cfs_percpt_for_each(q, i, qs) - INIT_LIST_HEAD(q); - - return qs; -} - static int lnet_resendqs_create(void) { @@ -3198,11 +3581,9 @@ static void lnet_clean_resendqs(void) { struct lnet_msg *msg, *tmp; - struct list_head msgs; + LIST_HEAD(msgs); int i; - INIT_LIST_HEAD(&msgs); - cfs_cpt_for_each(i, lnet_cpt_table()) { lnet_net_lock(i); list_splice_init(the_lnet.ln_mt_resendqs[i], &msgs); @@ -3221,17 +3602,15 @@ static void lnet_recover_peer_nis(void) { struct lnet_mt_event_info *ev_info; - struct list_head processed_list; - struct list_head local_queue; + LIST_HEAD(processed_list); + LIST_HEAD(local_queue); struct lnet_handle_md mdh; struct lnet_peer_ni *lpni; struct lnet_peer_ni *tmp; lnet_nid_t nid; int healthv; int rc; - - INIT_LIST_HEAD(&local_queue); - INIT_LIST_HEAD(&processed_list); + time64_t now; /* * Always use cpt 0 for locking across all interactions with @@ -3242,6 +3621,8 @@ lnet_recover_peer_nis(void) &local_queue); lnet_net_unlock(0); + now = ktime_get_seconds(); + list_for_each_entry_safe(lpni, tmp, &local_queue, lpni_recovery) { /* @@ -3307,7 +3688,8 @@ lnet_recover_peer_nis(void) ev_info->mt_type = MT_TYPE_PEER_NI; ev_info->mt_nid = nid; rc = lnet_send_ping(nid, &mdh, LNET_INTERFACES_MIN, - ev_info, the_lnet.ln_mt_eqh, true); + ev_info, the_lnet.ln_mt_handler, + true); lnet_net_lock(0); /* * lnet_find_peer_ni_locked() grabs a refcount for @@ -3321,29 +3703,21 @@ lnet_recover_peer_nis(void) } lpni->lpni_recovery_ping_mdh = mdh; - /* - * While we're unlocked the lpni could've been - * readded on the recovery queue. In this case we - * don't need to add it to the local queue, since - * it's already on there and the thread that added - * it would've incremented the refcount on the - * peer, which means we need to decref the refcount - * that was implicitly grabbed by find_peer_ni_locked. - * Otherwise, if the lpni is still not on - * the recovery queue, then we'll add it to the - * processed list. - */ - if (list_empty(&lpni->lpni_recovery)) - list_add_tail(&lpni->lpni_recovery, &processed_list); - else - lnet_peer_ni_decref_locked(lpni); - lnet_net_unlock(0); - spin_lock(&lpni->lpni_lock); - if (rc) + lnet_peer_ni_add_to_recoveryq_locked(lpni, + &processed_list, + now); + if (rc) { + spin_lock(&lpni->lpni_lock); lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_PENDING; - } - spin_unlock(&lpni->lpni_lock); + spin_unlock(&lpni->lpni_lock); + } + + /* Drop the ref taken by lnet_find_peer_ni_locked() */ + lnet_peer_ni_decref_locked(lpni); + lnet_net_unlock(0); + } else + spin_unlock(&lpni->lpni_lock); } list_splice_init(&processed_list, &local_queue); @@ -3360,6 +3734,7 @@ lnet_monitor_thread(void *arg) int interval; time64_t now; + wait_for_completion(&the_lnet.ln_started); /* * The monitor thread takes care of the following: * 1. Checks the aliveness of routers @@ -3370,8 +3745,6 @@ lnet_monitor_thread(void *arg) * 4. Checks if there are any NIs on the remote recovery queue * and pings them. */ - cfs_block_allsigs(); - while (the_lnet.ln_mt_state == LNET_MT_STATE_RUNNING) { now = ktime_get_real_seconds(); @@ -3381,7 +3754,7 @@ lnet_monitor_thread(void *arg) lnet_resend_pending_msgs(); if (now >= rsp_timeout) { - lnet_finalize_expired_responses(false); + lnet_finalize_expired_responses(); rsp_timeout = now + (lnet_transaction_timeout / 2); } @@ -3399,17 +3772,24 @@ lnet_monitor_thread(void *arg) * if we wake up every 1 second? Although, we've seen * cases where we get a complaint that an idle thread * is waking up unnecessarily. + * + * Take into account the current net_count when you wake + * up for alive router checking, since we need to check + * possibly as many networks as we have configured. */ interval = min(lnet_recovery_interval, - lnet_transaction_timeout / 2); - wait_event_interruptible_timeout(the_lnet.ln_mt_waitq, - false, - cfs_time_seconds(interval)); + min((unsigned int) alive_router_check_interval / + lnet_current_net_count, + lnet_transaction_timeout / 2)); + wait_for_completion_interruptible_timeout( + &the_lnet.ln_mt_wait_complete, + cfs_time_seconds(interval)); + /* Must re-init the completion before testing anything, + * including ln_mt_state. + */ + reinit_completion(&the_lnet.ln_mt_wait_complete); } - /* clean up the router checker */ - lnet_prune_rc_data(1); - /* Shutting down */ lnet_net_lock(LNET_LOCK_EX); the_lnet.ln_mt_state = LNET_MT_STATE_SHUTDOWN; @@ -3431,7 +3811,7 @@ lnet_monitor_thread(void *arg) int lnet_send_ping(lnet_nid_t dest_nid, struct lnet_handle_md *mdh, int nnis, - void *user_data, struct lnet_handle_eq eqh, bool recovery) + void *user_data, lnet_handler_t handler, bool recovery) { struct lnet_md md = { NULL }; struct lnet_process_id id; @@ -3454,11 +3834,11 @@ lnet_send_ping(lnet_nid_t dest_nid, md.length = LNET_PING_INFO_SIZE(nnis); md.threshold = 2; /* GET/REPLY */ md.max_size = 0; - md.options = LNET_MD_TRUNCATE; + md.options = LNET_MD_TRUNCATE | LNET_MD_TRACK_RESPONSE; md.user_ptr = user_data; - md.eq_handle = eqh; + md.handler = handler; - rc = LNetMDBind(md, LNET_UNLINK, mdh); + rc = LNetMDBind(&md, LNET_UNLINK, mdh); if (rc) { lnet_ping_buffer_decref(pbuf); CERROR("Can't bind MD: %d\n", rc); @@ -3486,7 +3866,7 @@ fail_error: static void lnet_handle_recovery_reply(struct lnet_mt_event_info *ev_info, - int status, bool unlink_event) + int status, bool send, bool unlink_event) { lnet_nid_t nid = ev_info->mt_nid; @@ -3500,7 +3880,8 @@ lnet_handle_recovery_reply(struct lnet_mt_event_info *ev_info, return; } lnet_ni_lock(ni); - ni->ni_recovery_state &= ~LNET_NI_RECOVERY_PENDING; + if (!send || (send && status != 0)) + ni->ni_recovery_state &= ~LNET_NI_RECOVERY_PENDING; if (status) ni->ni_recovery_state |= LNET_NI_RECOVERY_FAILED; lnet_ni_unlock(ni); @@ -3519,7 +3900,8 @@ lnet_handle_recovery_reply(struct lnet_mt_event_info *ev_info, * In the peer case, it'll naturally be incremented */ if (!unlink_event) - lnet_inc_healthv(&ni->ni_healthv); + lnet_inc_healthv(&ni->ni_healthv, + lnet_health_sensitivity); } else { struct lnet_peer_ni *lpni; int cpt; @@ -3531,7 +3913,8 @@ lnet_handle_recovery_reply(struct lnet_mt_event_info *ev_info, return; } spin_lock(&lpni->lpni_lock); - lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_PENDING; + if (!send || (send && status != 0)) + lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_PENDING; if (status) lpni->lpni_state |= LNET_PEER_NI_RECOVERY_FAILED; spin_unlock(&lpni->lpni_lock); @@ -3547,7 +3930,7 @@ lnet_handle_recovery_reply(struct lnet_mt_event_info *ev_info, void lnet_mt_event_handler(struct lnet_event *event) { - struct lnet_mt_event_info *ev_info = event->md.user_ptr; + struct lnet_mt_event_info *ev_info = event->md_user_ptr; struct lnet_ping_buffer *pbuf; /* TODO: remove assert */ @@ -3562,8 +3945,9 @@ lnet_mt_event_handler(struct lnet_event *event) case LNET_EVENT_UNLINK: CDEBUG(D_NET, "%s recovery ping unlinked\n", libcfs_nid2str(ev_info->mt_nid)); + /* fallthrough */ case LNET_EVENT_REPLY: - lnet_handle_recovery_reply(ev_info, event->status, + lnet_handle_recovery_reply(ev_info, event->status, false, event->type == LNET_EVENT_UNLINK); break; case LNET_EVENT_SEND: @@ -3571,6 +3955,7 @@ lnet_mt_event_handler(struct lnet_event *event) libcfs_nid2str(ev_info->mt_nid), (event->status) ? "unsuccessfully" : "successfully", event->status); + lnet_handle_recovery_reply(ev_info, event->status, true, false); break; default: CERROR("Unexpected event: %d\n", event->type); @@ -3578,7 +3963,7 @@ lnet_mt_event_handler(struct lnet_event *event) } if (event->unlinked) { LIBCFS_FREE(ev_info, sizeof(*ev_info)); - pbuf = LNET_PING_INFO_TO_BUFFER(event->md.start); + pbuf = LNET_PING_INFO_TO_BUFFER(event->md_start); lnet_ping_buffer_decref(pbuf); } } @@ -3600,7 +3985,7 @@ lnet_rsp_tracker_create(void) static void lnet_rsp_tracker_clean(void) { - lnet_finalize_expired_responses(true); + lnet_finalize_expired_responses(); cfs_percpt_free(the_lnet.ln_mt_rstq); the_lnet.ln_mt_rstq = NULL; @@ -3622,11 +4007,6 @@ int lnet_monitor_thr_start(void) if (rc) goto clean_queues; - /* Pre monitor thread start processing */ - rc = lnet_router_pre_mt_start(); - if (rc) - goto free_mem; - sema_init(&the_lnet.ln_mt_signal, 0); lnet_net_lock(LNET_LOCK_EX); @@ -3639,9 +4019,6 @@ int lnet_monitor_thr_start(void) goto clean_thread; } - /* post monitor thread start processing */ - lnet_router_post_mt_start(); - return 0; clean_thread: @@ -3651,8 +4028,6 @@ clean_thread: /* block until event callback signals exit */ down(&the_lnet.ln_mt_signal); /* clean up */ - lnet_router_cleanup(); -free_mem: lnet_net_lock(LNET_LOCK_EX); the_lnet.ln_mt_state = LNET_MT_STATE_SHUTDOWN; lnet_net_unlock(LNET_LOCK_EX); @@ -3660,7 +4035,7 @@ free_mem: lnet_clean_local_ni_recoveryq(); lnet_clean_peer_ni_recoveryq(); lnet_clean_resendqs(); - LNetInvalidateEQHandle(&the_lnet.ln_mt_eqh); + the_lnet.ln_mt_handler = NULL; return rc; clean_queues: lnet_rsp_tracker_clean(); @@ -3681,20 +4056,17 @@ void lnet_monitor_thr_stop(void) lnet_net_unlock(LNET_LOCK_EX); /* tell the monitor thread that we're shutting down */ - wake_up(&the_lnet.ln_mt_waitq); + complete(&the_lnet.ln_mt_wait_complete); /* block until monitor thread signals that it's done */ down(&the_lnet.ln_mt_signal); LASSERT(the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN); /* perform cleanup tasks */ - lnet_router_cleanup(); lnet_rsp_tracker_clean(); lnet_clean_local_ni_recoveryq(); lnet_clean_peer_ni_recoveryq(); lnet_clean_resendqs(); - - return; } void @@ -3865,8 +4237,8 @@ lnet_parse_reply(struct lnet_ni *ni, struct lnet_msg *msg) struct lnet_hdr *hdr = &msg->msg_hdr; struct lnet_process_id src = {0}; struct lnet_libmd *md; - int rlength; - int mlength; + unsigned int rlength; + unsigned int mlength; int cpt; cpt = lnet_cpt_of_cookie(hdr->msg.reply.dst_wmd.wh_object_cookie); @@ -3895,7 +4267,7 @@ lnet_parse_reply(struct lnet_ni *ni, struct lnet_msg *msg) LASSERT(md->md_offset == 0); rlength = hdr->payload_length; - mlength = MIN(rlength, (int)md->md_length); + mlength = min(rlength, md->md_length); if (mlength < rlength && (md->md_options & LNET_MD_TRUNCATE) == 0) { @@ -4050,82 +4422,21 @@ lnet_msgtyp2str (int type) } } -void -lnet_print_hdr(struct lnet_hdr *hdr) -{ - struct lnet_process_id src = { - .nid = hdr->src_nid, - .pid = hdr->src_pid, - }; - struct lnet_process_id dst = { - .nid = hdr->dest_nid, - .pid = hdr->dest_pid, - }; - char *type_str = lnet_msgtyp2str(hdr->type); - - CWARN("P3 Header at %p of type %s\n", hdr, type_str); - CWARN(" From %s\n", libcfs_id2str(src)); - CWARN(" To %s\n", libcfs_id2str(dst)); - - switch (hdr->type) { - default: - break; - - case LNET_MSG_PUT: - CWARN(" Ptl index %d, ack md %#llx.%#llx, " - "match bits %llu\n", - hdr->msg.put.ptl_index, - hdr->msg.put.ack_wmd.wh_interface_cookie, - hdr->msg.put.ack_wmd.wh_object_cookie, - hdr->msg.put.match_bits); - CWARN(" Length %d, offset %d, hdr data %#llx\n", - hdr->payload_length, hdr->msg.put.offset, - hdr->msg.put.hdr_data); - break; - - case LNET_MSG_GET: - CWARN(" Ptl index %d, return md %#llx.%#llx, " - "match bits %llu\n", hdr->msg.get.ptl_index, - hdr->msg.get.return_wmd.wh_interface_cookie, - hdr->msg.get.return_wmd.wh_object_cookie, - hdr->msg.get.match_bits); - CWARN(" Length %d, src offset %d\n", - hdr->msg.get.sink_length, - hdr->msg.get.src_offset); - break; - - case LNET_MSG_ACK: - CWARN(" dst md %#llx.%#llx, " - "manipulated length %d\n", - hdr->msg.ack.dst_wmd.wh_interface_cookie, - hdr->msg.ack.dst_wmd.wh_object_cookie, - hdr->msg.ack.mlength); - break; - - case LNET_MSG_REPLY: - CWARN(" dst md %#llx.%#llx, " - "length %d\n", - hdr->msg.reply.dst_wmd.wh_interface_cookie, - hdr->msg.reply.dst_wmd.wh_object_cookie, - hdr->payload_length); - } - -} - int lnet_parse(struct lnet_ni *ni, struct lnet_hdr *hdr, lnet_nid_t from_nid, void *private, int rdma_req) { - int rc = 0; - int cpt; - int for_me; - struct lnet_msg *msg; - lnet_pid_t dest_pid; - lnet_nid_t dest_nid; - lnet_nid_t src_nid; struct lnet_peer_ni *lpni; - __u32 payload_length; - __u32 type; + struct lnet_msg *msg; + __u32 payload_length; + lnet_pid_t dest_pid; + lnet_nid_t dest_nid; + lnet_nid_t src_nid; + bool push = false; + int for_me; + __u32 type; + int rc = 0; + int cpt; LASSERT (!in_interrupt ()); @@ -4180,16 +4491,18 @@ lnet_parse(struct lnet_ni *ni, struct lnet_hdr *hdr, lnet_nid_t from_nid, } if (the_lnet.ln_routing && - ni->ni_last_alive != ktime_get_real_seconds()) { - /* NB: so far here is the only place to set NI status to "up */ + ni->ni_net->net_last_alive != ktime_get_real_seconds()) { lnet_ni_lock(ni); - ni->ni_last_alive = ktime_get_real_seconds(); - if (ni->ni_status != NULL && - ni->ni_status->ns_status == LNET_NI_STATUS_DOWN) - ni->ni_status->ns_status = LNET_NI_STATUS_UP; + spin_lock(&ni->ni_net->net_lock); + ni->ni_net->net_last_alive = ktime_get_real_seconds(); + spin_unlock(&ni->ni_net->net_lock); + push = lnet_ni_set_status_locked(ni, LNET_NI_STATUS_UP); lnet_ni_unlock(ni); } + if (push) + lnet_push_update_to_peers(1); + /* Regard a bad destination NID as a protocol error. Senders should * know what they're doing; if they don't they're misconfigured, buggy * or malicious so we chop them off at the knees :) */ @@ -4247,69 +4560,14 @@ lnet_parse(struct lnet_ni *ni, struct lnet_hdr *hdr, lnet_nid_t from_nid, } if (!list_empty(&the_lnet.ln_drop_rules) && - lnet_drop_rule_match(hdr, NULL)) { - CDEBUG(D_NET, "%s, src %s, dst %s: Dropping %s to simulate" - "silent message loss\n", + lnet_drop_rule_match(hdr, ni->ni_nid, NULL)) { + CDEBUG(D_NET, + "%s, src %s, dst %s: Dropping %s to simulate silent message loss\n", libcfs_nid2str(from_nid), libcfs_nid2str(src_nid), libcfs_nid2str(dest_nid), lnet_msgtyp2str(type)); goto drop; } - if (lnet_drop_asym_route && for_me && - LNET_NIDNET(src_nid) != LNET_NIDNET(from_nid)) { - struct lnet_net *net; - struct lnet_remotenet *rnet; - bool found = true; - - /* we are dealing with a routed message, - * so see if route to reach src_nid goes through from_nid - */ - lnet_net_lock(cpt); - net = lnet_get_net_locked(LNET_NIDNET(ni->ni_nid)); - if (!net) { - lnet_net_unlock(cpt); - CERROR("net %s not found\n", - libcfs_net2str(LNET_NIDNET(ni->ni_nid))); - return -EPROTO; - } - - rnet = lnet_find_rnet_locked(LNET_NIDNET(src_nid)); - if (rnet) { - struct lnet_peer *gw = NULL; - struct lnet_peer_ni *lpni = NULL; - struct lnet_route *route; - - list_for_each_entry(route, &rnet->lrn_routes, lr_list) { - found = false; - gw = route->lr_gateway; - if (route->lr_lnet != net->net_id) - continue; - /* - * if the nid is one of the gateway's NIDs - * then this is a valid gateway - */ - while ((lpni = lnet_get_next_peer_ni_locked(gw, - NULL, lpni)) != NULL) { - if (lpni->lpni_nid == from_nid) { - found = true; - break; - } - } - } - } - lnet_net_unlock(cpt); - if (!found) { - /* we would not use from_nid to route a message to - * src_nid - * => asymmetric routing detected but forbidden - */ - CERROR("%s, src %s: Dropping asymmetrical route %s\n", - libcfs_nid2str(from_nid), - libcfs_nid2str(src_nid), lnet_msgtyp2str(type)); - goto drop; - } - } - msg = lnet_msg_alloc(); if (msg == NULL) { CERROR("%s, src %s: Dropping %s (out of memory)\n", @@ -4359,6 +4617,36 @@ lnet_parse(struct lnet_ni *ni, struct lnet_hdr *hdr, lnet_nid_t from_nid, return 0; goto drop; } + + if (lnet_drop_asym_route && for_me && + LNET_NIDNET(src_nid) != LNET_NIDNET(from_nid)) { + __u32 src_net_id = LNET_NIDNET(src_nid); + struct lnet_peer *gw = lpni->lpni_peer_net->lpn_peer; + struct lnet_route *route; + bool found = false; + + list_for_each_entry(route, &gw->lp_routes, lr_gwlist) { + if (route->lr_net == src_net_id) { + found = true; + break; + } + } + if (!found) { + lnet_net_unlock(cpt); + /* we would not use from_nid to route a message to + * src_nid + * => asymmetric routing detected but forbidden + */ + CERROR("%s, src %s: Dropping asymmetrical route %s\n", + libcfs_nid2str(from_nid), + libcfs_nid2str(src_nid), lnet_msgtyp2str(type)); + lnet_msg_free(msg); + goto drop; + } + } + + lpni->lpni_last_alive = ktime_get_seconds(); + msg->msg_rxpeer = lpni; msg->msg_rxni = ni; lnet_ni_addref_locked(ni, cpt); @@ -4494,7 +4782,6 @@ lnet_attach_rsp_tracker(struct lnet_rsp_tracker *rspt, int cpt, struct lnet_libmd *md, struct lnet_handle_md mdh) { s64 timeout_ns; - bool new_entry = true; struct lnet_rsp_tracker *local_rspt; /* @@ -4513,8 +4800,7 @@ lnet_attach_rsp_tracker(struct lnet_rsp_tracker *rspt, int cpt, * we already have an rspt attached to the md, so we'll * update the deadline on that one. */ - LIBCFS_FREE(rspt, sizeof(*rspt)); - new_entry = false; + lnet_rspt_free(rspt, cpt); } else { /* new md */ rspt->rspt_mdh = mdh; @@ -4530,9 +4816,7 @@ lnet_attach_rsp_tracker(struct lnet_rsp_tracker *rspt, int cpt, * list in order to expire all the older entries first. */ lnet_net_lock(cpt); - if (!new_entry && !list_empty(&local_rspt->rspt_on_list)) - list_del_init(&local_rspt->rspt_on_list); - list_add_tail(&local_rspt->rspt_on_list, the_lnet.ln_mt_rstq[cpt]); + list_move_tail(&local_rspt->rspt_on_list, the_lnet.ln_mt_rstq[cpt]); lnet_net_unlock(cpt); lnet_res_unlock(cpt); } @@ -4608,7 +4892,7 @@ LNetPut(lnet_nid_t self, struct lnet_handle_md mdh, enum lnet_ack_req ack, libcfs_id2str(target)); return -ENOMEM; } - msg->msg_vmflush = !!memory_pressure_get(); + msg->msg_vmflush = !!(current->flags & PF_MEMALLOC); cpt = lnet_cpt_of_cookie(mdh.cookie); @@ -4634,7 +4918,9 @@ LNetPut(lnet_nid_t self, struct lnet_handle_md mdh, enum lnet_ack_req ack, md->md_me->me_portal); lnet_res_unlock(cpt); - LIBCFS_FREE(rspt, sizeof(*rspt)); + if (rspt) + lnet_rspt_free(rspt, cpt); + lnet_msg_free(msg); return -ENOENT; } @@ -4667,8 +4953,11 @@ LNetPut(lnet_nid_t self, struct lnet_handle_md mdh, enum lnet_ack_req ack, lnet_build_msg_event(msg, LNET_EVENT_SEND); - if (ack == LNET_ACK_REQ) + if (rspt && lnet_response_tracking_enabled(LNET_MSG_PUT, + md->md_options)) lnet_attach_rsp_tracker(rspt, cpt, md, mdh); + else if (rspt) + lnet_rspt_free(rspt, cpt); if (CFS_FAIL_CHECK_ORSET(CFS_FAIL_PTLRPC_OST_BULK_CB2, CFS_FAIL_ONCE)) @@ -4861,7 +5150,7 @@ LNetGet(lnet_nid_t self, struct lnet_handle_md mdh, lnet_res_unlock(cpt); lnet_msg_free(msg); - LIBCFS_FREE(rspt, sizeof(*rspt)); + lnet_rspt_free(rspt, cpt); return -ENOENT; } @@ -4886,7 +5175,10 @@ LNetGet(lnet_nid_t self, struct lnet_handle_md mdh, lnet_build_msg_event(msg, LNET_EVENT_SEND); - lnet_attach_rsp_tracker(rspt, cpt, md, mdh); + if (lnet_response_tracking_enabled(LNET_MSG_GET, md->md_options)) + lnet_attach_rsp_tracker(rspt, cpt, md, mdh); + else + lnet_rspt_free(rspt, cpt); rc = lnet_send(self, msg, LNET_NID_ANY); if (rc < 0) { @@ -4918,14 +5210,14 @@ EXPORT_SYMBOL(LNetGet); int LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp) { - struct list_head *e; + struct list_head *e; struct lnet_ni *ni = NULL; struct lnet_remotenet *rnet; - __u32 dstnet = LNET_NIDNET(dstnid); - int hops; - int cpt; - __u32 order = 2; - struct list_head *rn_list; + __u32 dstnet = LNET_NIDNET(dstnid); + int hops; + int cpt; + __u32 order = 2; + struct list_head *rn_list; /* if !local_nid_dist_zero, I don't return a distance of 0 ever * (when lustre sees a distance of 0, it substitutes 0@lo), so I @@ -4941,7 +5233,7 @@ LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp) if (srcnidp != NULL) *srcnidp = dstnid; if (orderp != NULL) { - if (LNET_NETTYP(LNET_NIDNET(dstnid)) == LOLND) + if (dstnid == LNET_NID_LO_0) *orderp = 0; else *orderp = 1; @@ -4956,9 +5248,9 @@ LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp) * current net namespace. * If not, assign order above 0xffff0000, * to make this ni not a priority. */ - if (!net_eq(ni->ni_net_ns, current->nsproxy->net_ns)) - order += 0xffff0000; - + if (current->nsproxy && + !net_eq(ni->ni_net_ns, current->nsproxy->net_ns)) + order += 0xffff0000; if (srcnidp != NULL) *srcnidp = ni->ni_nid; if (orderp != NULL)