X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lnet%2Flnet%2Flib-move.c;h=81261c9189242fa687484b048953b793b69e97dc;hb=3b760208109b249fd9051d97dbc98664ca4b5769;hp=f69b21f52b8771e1ddceddd6f1e494fe1adc380b;hpb=e5a8f3fc12840aee97fca03d76b1ae9b4572acb8;p=fs%2Flustre-release.git diff --git a/lnet/lnet/lib-move.c b/lnet/lnet/lib-move.c index f69b21f..81261c9 100644 --- a/lnet/lnet/lib-move.c +++ b/lnet/lnet/lib-move.c @@ -27,7 +27,6 @@ */ /* * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. * * lnet/lnet/lib-move.c * @@ -40,6 +39,7 @@ #include #include +#include #include static int local_nid_dist_zero = 1; @@ -55,9 +55,9 @@ struct lnet_send_data { struct lnet_peer_ni *sd_gw_lpni; struct lnet_peer_net *sd_peer_net; struct lnet_msg *sd_msg; - lnet_nid_t sd_dst_nid; - lnet_nid_t sd_src_nid; - lnet_nid_t sd_rtr_nid; + struct lnet_nid sd_dst_nid; + struct lnet_nid sd_src_nid; + struct lnet_nid sd_rtr_nid; int sd_cpt; int sd_md_cpt; __u32 sd_send_case; @@ -190,13 +190,15 @@ void lnet_usr_translate_stats(struct lnet_ioctl_element_msg_stats *msg_stats, } int -lnet_fail_nid(lnet_nid_t nid, unsigned int threshold) +lnet_fail_nid(lnet_nid_t nid4, unsigned int threshold) { struct lnet_test_peer *tp; struct list_head *el; struct list_head *next; + struct lnet_nid nid; LIST_HEAD(cull); + lnet_nid4_to_nid(nid4, &nid); /* NB: use lnet_net_lock(0) to serialize operations on test peers */ if (threshold != 0) { /* Adding a new entry */ @@ -219,17 +221,17 @@ lnet_fail_nid(lnet_nid_t nid, unsigned int threshold) tp = list_entry(el, struct lnet_test_peer, tp_list); if (tp->tp_threshold == 0 || /* needs culling anyway */ - nid == LNET_NID_ANY || /* removing all entries */ - tp->tp_nid == nid) { /* matched this one */ + LNET_NID_IS_ANY(&nid) || /* removing all entries */ + nid_same(&tp->tp_nid, &nid)) { /* matched this one */ list_move(&tp->tp_list, &cull); } } lnet_net_unlock(0); - while (!list_empty(&cull)) { - tp = list_entry(cull.next, struct lnet_test_peer, tp_list); - + while ((tp = list_first_entry_or_null(&cull, + struct lnet_test_peer, + tp_list)) != NULL) { list_del(&tp->tp_list); LIBCFS_FREE(tp, sizeof(*tp)); } @@ -237,7 +239,7 @@ lnet_fail_nid(lnet_nid_t nid, unsigned int threshold) } static int -fail_peer (lnet_nid_t nid, int outgoing) +fail_peer(struct lnet_nid *nid, int outgoing) { struct lnet_test_peer *tp; struct list_head *el; @@ -262,8 +264,8 @@ fail_peer (lnet_nid_t nid, int outgoing) continue; } - if (tp->tp_nid == LNET_NID_ANY || /* fail every peer */ - nid == tp->tp_nid) { /* fail this peer */ + if (LNET_NID_IS_ANY(&tp->tp_nid) || /* fail every peer */ + nid_same(nid, &tp->tp_nid)) { /* fail this peer */ fail = 1; if (tp->tp_threshold != LNET_MD_THRESH_INF) { @@ -280,10 +282,10 @@ fail_peer (lnet_nid_t nid, int outgoing) lnet_net_unlock(0); - while (!list_empty(&cull)) { - tp = list_entry(cull.next, struct lnet_test_peer, tp_list); + while ((tp = list_first_entry_or_null(&cull, + struct lnet_test_peer, + tp_list)) != NULL) { list_del(&tp->tp_list); - LIBCFS_FREE(tp, sizeof(*tp)); } @@ -705,11 +707,11 @@ lnet_setpayloadbuffer(struct lnet_msg *msg) } void -lnet_prep_send(struct lnet_msg *msg, int type, struct lnet_process_id target, +lnet_prep_send(struct lnet_msg *msg, int type, struct lnet_processid *target, unsigned int offset, unsigned int len) { msg->msg_type = type; - msg->msg_target = target; + msg->msg_target = *target; msg->msg_len = len; msg->msg_offset = offset; @@ -717,23 +719,23 @@ lnet_prep_send(struct lnet_msg *msg, int type, struct lnet_process_id target, lnet_setpayloadbuffer(msg); memset (&msg->msg_hdr, 0, sizeof (msg->msg_hdr)); - msg->msg_hdr.type = cpu_to_le32(type); + msg->msg_hdr.type = type; /* dest_nid will be overwritten by lnet_select_pathway() */ - msg->msg_hdr.dest_nid = cpu_to_le64(target.nid); - msg->msg_hdr.dest_pid = cpu_to_le32(target.pid); + msg->msg_hdr.dest_nid = target->nid; + msg->msg_hdr.dest_pid = target->pid; /* src_nid will be set later */ - msg->msg_hdr.src_pid = cpu_to_le32(the_lnet.ln_pid); - msg->msg_hdr.payload_length = cpu_to_le32(len); + msg->msg_hdr.src_pid = the_lnet.ln_pid; + msg->msg_hdr.payload_length = len; } -static void +void lnet_ni_send(struct lnet_ni *ni, struct lnet_msg *msg) { void *priv = msg->msg_private; int rc; LASSERT(!in_interrupt()); - LASSERT(ni->ni_nid == LNET_NID_LO_0 || + LASSERT(nid_is_lo0(&ni->ni_nid) || (msg->msg_txcredit && msg->msg_peertxcredit)); rc = (ni->ni_net->net_lnd->lnd_send)(ni, priv, msg); @@ -759,8 +761,8 @@ lnet_ni_eager_recv(struct lnet_ni *ni, struct lnet_msg *msg) if (rc != 0) { CERROR("recv from %s / send to %s aborted: " "eager_recv failed %d\n", - libcfs_nid2str(msg->msg_rxpeer->lpni_nid), - libcfs_id2str(msg->msg_target), rc); + libcfs_nidstr(&msg->msg_rxpeer->lpni_nid), + libcfs_idstr(&msg->msg_target), rc); LASSERT(rc < 0); /* required by my callers */ } @@ -843,7 +845,7 @@ lnet_post_send_locked(struct lnet_msg *msg, int do_send) /* can't get here if we're sending to the loopback interface */ if (the_lnet.ln_loni) - LASSERT(lp->lpni_nid != the_lnet.ln_loni->ni_nid); + LASSERT(!nid_same(&lp->lpni_nid, &the_lnet.ln_loni->ni_nid)); /* NB 'lp' is always the next hop */ if ((msg->msg_target.pid & LNET_PID_USERFLAG) == 0 && @@ -862,7 +864,7 @@ lnet_post_send_locked(struct lnet_msg *msg, int do_send) LNET_STATS_TYPE_DROP); CNETERR("Dropping message for %s: peer not alive\n", - libcfs_id2str(msg->msg_target)); + libcfs_idstr(&msg->msg_target)); msg->msg_health_status = LNET_MSG_STATUS_REMOTE_DROPPED; if (do_send) lnet_finalize(msg, -EHOSTUNREACH); @@ -877,11 +879,11 @@ lnet_post_send_locked(struct lnet_msg *msg, int do_send) CNETERR("Aborting message for %s: LNetM[DE]Unlink() already " "called on the MD/ME.\n", - libcfs_id2str(msg->msg_target)); + libcfs_idstr(&msg->msg_target)); if (do_send) { msg->msg_no_resend = true; CDEBUG(D_NET, "msg %p to %s canceled and will not be resent\n", - msg, libcfs_id2str(msg->msg_target)); + msg, libcfs_idstr(&msg->msg_target)); lnet_finalize(msg, -ECANCELED); } @@ -895,7 +897,7 @@ lnet_post_send_locked(struct lnet_msg *msg, int do_send) !list_empty(&lp->lpni_txq)); msg->msg_peertxcredit = 1; - lp->lpni_txqnob += msg->msg_len + sizeof(struct lnet_hdr); + lp->lpni_txqnob += msg->msg_len + sizeof(struct lnet_hdr_nid4); lp->lpni_txcredits--; if (lp->lpni_txcredits < lp->lpni_mintxcredits) @@ -928,6 +930,12 @@ lnet_post_send_locked(struct lnet_msg *msg, int do_send) } } + if (unlikely(!list_empty(&the_lnet.ln_delay_rules)) && + lnet_delay_rule_match_locked(&msg->msg_hdr, msg)) { + msg->msg_tx_delayed = 1; + return LNET_CREDIT_WAIT; + } + /* unset the tx_delay flag as we're going to send it now */ msg->msg_tx_delayed = 0; @@ -1026,7 +1034,7 @@ lnet_post_routed_recv_locked(struct lnet_msg *msg, int do_recv) } LASSERT(!list_empty(&rbp->rbp_bufs)); - rb = list_entry(rbp->rbp_bufs.next, struct lnet_rtrbuf, rb_list); + rb = list_first_entry(&rbp->rbp_bufs, struct lnet_rtrbuf, rb_list); list_del(&rb->rb_list); msg->msg_niov = rbp->rbp_npages; @@ -1066,8 +1074,8 @@ lnet_return_tx_credits_locked(struct lnet_msg *msg) tq->tq_credits++; atomic_inc(&ni->ni_tx_credits); if (tq->tq_credits <= 0) { - msg2 = list_entry(tq->tq_delayed.next, - struct lnet_msg, msg_list); + msg2 = list_first_entry(&tq->tq_delayed, + struct lnet_msg, msg_list); list_del(&msg2->msg_list); LASSERT(msg2->msg_txni == ni); @@ -1086,15 +1094,16 @@ lnet_return_tx_credits_locked(struct lnet_msg *msg) LASSERT((txpeer->lpni_txcredits < 0) == !list_empty(&txpeer->lpni_txq)); - txpeer->lpni_txqnob -= msg->msg_len + sizeof(struct lnet_hdr); + txpeer->lpni_txqnob -= msg->msg_len + + sizeof(struct lnet_hdr_nid4); LASSERT(txpeer->lpni_txqnob >= 0); txpeer->lpni_txcredits++; if (txpeer->lpni_txcredits <= 0) { int msg2_cpt; - msg2 = list_entry(txpeer->lpni_txq.next, - struct lnet_msg, msg_list); + msg2 = list_first_entry(&txpeer->lpni_txq, + struct lnet_msg, msg_list); list_del(&msg2->msg_list); spin_unlock(&txpeer->lpni_lock); @@ -1146,8 +1155,8 @@ lnet_schedule_blocked_locked(struct lnet_rtrbufpool *rbp) if (list_empty(&rbp->rbp_msgs)) return; - msg = list_entry(rbp->rbp_msgs.next, - struct lnet_msg, msg_list); + msg = list_first_entry(&rbp->rbp_msgs, + struct lnet_msg, msg_list); list_del(&msg->msg_list); (void)lnet_post_routed_recv_locked(msg, 1); @@ -1251,8 +1260,8 @@ routing_off: } else if (!list_empty(&lp->lp_rtrq)) { int msg2_cpt; - msg2 = list_entry(lp->lp_rtrq.next, - struct lnet_msg, msg_list); + msg2 = list_first_entry(&lp->lp_rtrq, + struct lnet_msg, msg_list); list_del(&msg2->msg_list); msg2_cpt = msg2->msg_rx_cpt; spin_unlock(&lp->lp_lock); @@ -1316,10 +1325,10 @@ lnet_select_peer_ni(struct lnet_ni *best_ni, lnet_nid_t dst_nid, * preferred, then let's use it */ if (best_ni) { - lpni_is_preferred = lnet_peer_is_pref_nid_locked(lpni, - best_ni->ni_nid); + lpni_is_preferred = lnet_peer_is_pref_nid_locked( + lpni, &best_ni->ni_nid); CDEBUG(D_NET, "%s lpni_is_preferred = %d\n", - libcfs_nid2str(best_ni->ni_nid), + libcfs_nidstr(&best_ni->ni_nid), lpni_is_preferred); } else { lpni_is_preferred = false; @@ -1330,8 +1339,8 @@ lnet_select_peer_ni(struct lnet_ni *best_ni, lnet_nid_t dst_nid, if (best_lpni) CDEBUG(D_NET, "n:[%s, %s] h:[%d, %d] p:[%d, %d] c:[%d, %d] s:[%d, %d]\n", - libcfs_nid2str(lpni->lpni_nid), - libcfs_nid2str(best_lpni->lpni_nid), + libcfs_nidstr(&lpni->lpni_nid), + libcfs_nidstr(&best_lpni->lpni_nid), lpni_healthv, best_lpni_healthv, lpni_sel_prio, best_sel_prio, lpni->lpni_txcredits, best_lpni_credits, @@ -1400,7 +1409,7 @@ select_lpni: } CDEBUG(D_NET, "sd_best_lpni = %s\n", - libcfs_nid2str(best_lpni->lpni_nid)); + libcfs_nidstr(&best_lpni->lpni_nid)); return best_lpni; } @@ -1495,7 +1504,7 @@ lnet_find_route_locked(struct lnet_remotenet *rnet, __u32 src_net, struct lnet_route *route; int rc; bool best_rte_is_preferred = false; - lnet_nid_t gw_pnid; + struct lnet_nid *gw_pnid; CDEBUG(D_NET, "Looking up a route to %s, from %s\n", libcfs_net2str(rnet->lrn_net), libcfs_net2str(src_net)); @@ -1504,7 +1513,7 @@ lnet_find_route_locked(struct lnet_remotenet *rnet, __u32 src_net, list_for_each_entry(route, &rnet->lrn_routes, lr_list) { if (!lnet_is_route_alive(route)) continue; - gw_pnid = route->lr_gateway->lp_primary_nid; + gw_pnid = &route->lr_gateway->lp_primary_nid; /* no protection on below fields, but it's harmless */ if (last_route && (last_route->lr_seq - route->lr_seq < 0)) @@ -1528,7 +1537,7 @@ lnet_find_route_locked(struct lnet_remotenet *rnet, __u32 src_net, if (!lpni) { CDEBUG(D_NET, "Gateway %s does not have a peer NI on net %s\n", - libcfs_nid2str(gw_pnid), + libcfs_nidstr(gw_pnid), libcfs_net2str(src_net)); continue; } @@ -1544,7 +1553,7 @@ lnet_find_route_locked(struct lnet_remotenet *rnet, __u32 src_net, best_gw_ni = lpni; best_rte_is_preferred = true; CDEBUG(D_NET, "preferred gw = %s\n", - libcfs_nid2str(gw_pnid)); + libcfs_nidstr(gw_pnid)); continue; } else if ((!rc) && best_rte_is_preferred) /* The best route we found so far is in the preferred @@ -1572,7 +1581,7 @@ lnet_find_route_locked(struct lnet_remotenet *rnet, __u32 src_net, if (!lpni) { CDEBUG(D_NET, "Gateway %s does not have a peer NI on net %s\n", - libcfs_nid2str(gw_pnid), + libcfs_nidstr(gw_pnid), libcfs_net2str(src_net)); continue; } @@ -1600,16 +1609,40 @@ lnet_find_route_locked(struct lnet_remotenet *rnet, __u32 src_net, return best_route; } +static inline unsigned int +lnet_dev_prio_of_md(struct lnet_ni *ni, unsigned int dev_idx) +{ + if (dev_idx == UINT_MAX) + return UINT_MAX; + + if (!ni || !ni->ni_net || !ni->ni_net->net_lnd || + !ni->ni_net->net_lnd->lnd_get_dev_prio) + return UINT_MAX; + + return ni->ni_net->net_lnd->lnd_get_dev_prio(ni, dev_idx); +} + static struct lnet_ni * lnet_get_best_ni(struct lnet_net *local_net, struct lnet_ni *best_ni, struct lnet_peer *peer, struct lnet_peer_net *peer_net, - int md_cpt) + struct lnet_msg *msg, int md_cpt) { - struct lnet_ni *ni = NULL; + struct lnet_libmd *md = msg->msg_md; + unsigned int offset = msg->msg_offset; unsigned int shortest_distance; + struct lnet_ni *ni = NULL; int best_credits; int best_healthv; __u32 best_sel_prio; + unsigned int best_dev_prio; + unsigned int dev_idx = UINT_MAX; + bool gpu = md ? (md->md_flags & LNET_MD_FLAG_GPU) : false; + + if (gpu) { + struct page *page = lnet_get_first_page(md, offset); + + dev_idx = lnet_get_dev_idx(page); + } /* * If there is no peer_ni that we can send to on this network, @@ -1621,9 +1654,11 @@ lnet_get_best_ni(struct lnet_net *local_net, struct lnet_ni *best_ni, if (best_ni == NULL) { best_sel_prio = LNET_MAX_SELECTION_PRIORITY; shortest_distance = UINT_MAX; + best_dev_prio = UINT_MAX; best_credits = INT_MIN; best_healthv = 0; } else { + best_dev_prio = lnet_dev_prio_of_md(best_ni, dev_idx); shortest_distance = cfs_cpt_distance(lnet_cpt_table(), md_cpt, best_ni->ni_dev_cpt); best_credits = atomic_read(&best_ni->ni_tx_credits); @@ -1637,6 +1672,7 @@ lnet_get_best_ni(struct lnet_net *local_net, struct lnet_ni *best_ni, int ni_healthv; int ni_fatal; __u32 ni_sel_prio; + unsigned int ni_dev_prio; ni_credits = atomic_read(&ni->ni_tx_credits); ni_healthv = atomic_read(&ni->ni_healthv); @@ -1652,28 +1688,30 @@ lnet_get_best_ni(struct lnet_net *local_net, struct lnet_ni *best_ni, md_cpt, ni->ni_dev_cpt); + ni_dev_prio = lnet_dev_prio_of_md(ni, dev_idx); + /* * All distances smaller than the NUMA range * are treated equally. */ - if (distance < lnet_numa_range) + if (!gpu && distance < lnet_numa_range) distance = lnet_numa_range; /* - * Select on health, shorter distance, available - * credits, then round-robin. + * Select on health, selection policy, direct dma prio, + * shorter distance, available credits, then round-robin. */ if (ni_fatal) continue; if (best_ni) - CDEBUG(D_NET, "compare ni %s [c:%d, d:%d, s:%d, p:%u] with best_ni %s [c:%d, d:%d, s:%d, p:%u]\n", - libcfs_nid2str(ni->ni_nid), ni_credits, distance, - ni->ni_seq, ni_sel_prio, - (best_ni) ? libcfs_nid2str(best_ni->ni_nid) + CDEBUG(D_NET, "compare ni %s [c:%d, d:%d, s:%d, p:%u, g:%u, h:%d] with best_ni %s [c:%d, d:%d, s:%d, p:%u, g:%u, h:%d]\n", + libcfs_nidstr(&ni->ni_nid), ni_credits, distance, + ni->ni_seq, ni_sel_prio, ni_dev_prio, ni_healthv, + (best_ni) ? libcfs_nidstr(&best_ni->ni_nid) : "not selected", best_credits, shortest_distance, (best_ni) ? best_ni->ni_seq : 0, - best_sel_prio); + best_sel_prio, best_dev_prio, best_healthv); else goto select_ni; @@ -1687,6 +1725,11 @@ lnet_get_best_ni(struct lnet_net *local_net, struct lnet_ni *best_ni, else if (ni_sel_prio < best_sel_prio) goto select_ni; + if (ni_dev_prio > best_dev_prio) + continue; + else if (ni_dev_prio < best_dev_prio) + goto select_ni; + if (distance > shortest_distance) continue; else if (distance < shortest_distance) @@ -1702,6 +1745,7 @@ lnet_get_best_ni(struct lnet_net *local_net, struct lnet_ni *best_ni, select_ni: best_sel_prio = ni_sel_prio; + best_dev_prio = ni_dev_prio; shortest_distance = distance; best_healthv = ni_healthv; best_ni = ni; @@ -1709,11 +1753,24 @@ select_ni: } CDEBUG(D_NET, "selected best_ni %s\n", - (best_ni) ? libcfs_nid2str(best_ni->ni_nid) : "no selection"); + (best_ni) ? libcfs_nidstr(&best_ni->ni_nid) : "no selection"); return best_ni; } +static bool +lnet_reserved_msg(struct lnet_msg *msg) +{ + if (msg->msg_type == LNET_MSG_PUT) { + if (msg->msg_hdr.msg.put.ptl_index == LNET_RESERVED_PORTAL) + return true; + } else if (msg->msg_type == LNET_MSG_GET) { + if (msg->msg_hdr.msg.get.ptl_index == LNET_RESERVED_PORTAL) + return true; + } + return false; +} + /* * Traffic to the LNET_RESERVED_PORTAL may not trigger peer discovery, * because such traffic is required to perform discovery. We therefore @@ -1725,14 +1782,7 @@ select_ni: static bool lnet_msg_discovery(struct lnet_msg *msg) { - if (msg->msg_type == LNET_MSG_PUT) { - if (msg->msg_hdr.msg.put.ptl_index != LNET_RESERVED_PORTAL) - return true; - } else if (msg->msg_type == LNET_MSG_GET) { - if (msg->msg_hdr.msg.get.ptl_index != LNET_RESERVED_PORTAL) - return true; - } - return false; + return !(lnet_reserved_msg(msg) || lnet_msg_is_response(msg)); } #define SRC_SPEC 0x0001 @@ -1768,10 +1818,9 @@ lnet_handle_lo_send(struct lnet_send_data *sd) /* No send credit hassles with LOLND */ lnet_ni_addref_locked(the_lnet.ln_loni, cpt); - msg->msg_hdr.dest_nid = cpu_to_le64(the_lnet.ln_loni->ni_nid); + msg->msg_hdr.dest_nid = the_lnet.ln_loni->ni_nid; if (!msg->msg_routing) - msg->msg_hdr.src_nid = - cpu_to_le64(the_lnet.ln_loni->ni_nid); + msg->msg_hdr.src_nid = the_lnet.ln_loni->ni_nid; msg->msg_target.nid = the_lnet.ln_loni->ni_nid; lnet_msg_commit(msg, cpt); msg->msg_txni = the_lnet.ln_loni; @@ -1790,23 +1839,26 @@ lnet_handle_send(struct lnet_send_data *sd) __u32 send_case = sd->sd_send_case; int rc; __u32 routing = send_case & REMOTE_DST; - struct lnet_rsp_tracker *rspt; + struct lnet_rsp_tracker *rspt; /* Increment sequence number of the selected peer, peer net, * local ni and local net so that we pick the next ones * in Round Robin. */ - best_lpni->lpni_seq++; - best_lpni->lpni_peer_net->lpn_seq++; - best_ni->ni_seq++; - best_ni->ni_net->net_seq++; + best_lpni->lpni_peer_net->lpn_peer->lp_send_seq++; + best_lpni->lpni_peer_net->lpn_seq = + best_lpni->lpni_peer_net->lpn_peer->lp_send_seq; + best_lpni->lpni_seq = best_lpni->lpni_peer_net->lpn_seq; + the_lnet.ln_net_seq++; + best_ni->ni_net->net_seq = the_lnet.ln_net_seq; + best_ni->ni_seq = best_ni->ni_net->net_seq; CDEBUG(D_NET, "%s NI seq info: [%d:%d:%d:%u] %s LPNI seq info [%d:%d:%d:%u]\n", - libcfs_nid2str(best_ni->ni_nid), + libcfs_nidstr(&best_ni->ni_nid), best_ni->ni_seq, best_ni->ni_net->net_seq, atomic_read(&best_ni->ni_tx_credits), best_ni->ni_sel_priority, - libcfs_nid2str(best_lpni->lpni_nid), + libcfs_nidstr(&best_lpni->lpni_nid), best_lpni->lpni_seq, best_lpni->lpni_peer_net->lpn_seq, best_lpni->lpni_txcredits, best_lpni->lpni_sel_priority); @@ -1826,7 +1878,7 @@ lnet_handle_send(struct lnet_send_data *sd) * the configuration has changed. We don't have a hold on the best_ni * yet, and it may have vanished. */ - cpt2 = lnet_cpt_of_nid_locked(best_lpni->lpni_nid, best_ni); + cpt2 = lnet_cpt_of_nid_locked(&best_lpni->lpni_nid, best_ni); if (sd->sd_cpt != cpt2) { __u32 seq = lnet_get_dlc_seq_locked(); lnet_net_unlock(sd->sd_cpt); @@ -1872,7 +1924,7 @@ lnet_handle_send(struct lnet_send_data *sd) * originator and set it here. */ if (!msg->msg_routing) - msg->msg_hdr.src_nid = cpu_to_le64(msg->msg_txni->ni_nid); + msg->msg_hdr.src_nid = msg->msg_txni->ni_nid; if (routing) { msg->msg_target_is_router = 1; @@ -1887,13 +1939,18 @@ lnet_handle_send(struct lnet_send_data *sd) * lnet_select_pathway() function and is never changed. * It's safe to use it here. */ - msg->msg_hdr.dest_nid = cpu_to_le64(final_dst_lpni->lpni_nid); + final_dst_lpni->lpni_peer_net->lpn_peer->lp_send_seq++; + final_dst_lpni->lpni_peer_net->lpn_seq = + final_dst_lpni->lpni_peer_net->lpn_peer->lp_send_seq; + final_dst_lpni->lpni_seq = + final_dst_lpni->lpni_peer_net->lpn_seq; + msg->msg_hdr.dest_nid = final_dst_lpni->lpni_nid; } else { /* * if we're not routing set the dest_nid to the best peer * ni NID that we picked earlier in the algorithm. */ - msg->msg_hdr.dest_nid = cpu_to_le64(msg->msg_txpeer->lpni_nid); + msg->msg_hdr.dest_nid = msg->msg_txpeer->lpni_nid; } /* @@ -1903,9 +1960,10 @@ lnet_handle_send(struct lnet_send_data *sd) if (msg->msg_md) { rspt = msg->msg_md->md_rspt_ptr; if (rspt) { - rspt->rspt_next_hop_nid = msg->msg_txpeer->lpni_nid; + rspt->rspt_next_hop_nid = + msg->msg_txpeer->lpni_nid; CDEBUG(D_NET, "rspt_next_hop_nid = %s\n", - libcfs_nid2str(rspt->rspt_next_hop_nid)); + libcfs_nidstr(&rspt->rspt_next_hop_nid)); } } @@ -1913,13 +1971,13 @@ lnet_handle_send(struct lnet_send_data *sd) if (!rc) CDEBUG(D_NET, "TRACE: %s(%s:%s) -> %s(%s:%s) %s : %s try# %d\n", - libcfs_nid2str(msg->msg_hdr.src_nid), - libcfs_nid2str(msg->msg_txni->ni_nid), - libcfs_nid2str(sd->sd_src_nid), - libcfs_nid2str(msg->msg_hdr.dest_nid), - libcfs_nid2str(sd->sd_dst_nid), - libcfs_nid2str(msg->msg_txpeer->lpni_nid), - libcfs_nid2str(sd->sd_rtr_nid), + libcfs_nidstr(&msg->msg_hdr.src_nid), + libcfs_nidstr(&msg->msg_txni->ni_nid), + libcfs_nidstr(&sd->sd_src_nid), + libcfs_nidstr(&msg->msg_hdr.dest_nid), + libcfs_nidstr(&sd->sd_dst_nid), + libcfs_nidstr(&msg->msg_txpeer->lpni_nid), + libcfs_nidstr(&sd->sd_rtr_nid), lnet_msgtyp2str(msg->msg_type), msg->msg_retry_count); return rc; @@ -1932,9 +1990,9 @@ lnet_set_non_mr_pref_nid(struct lnet_peer_ni *lpni, struct lnet_ni *lni, if (!lnet_peer_is_multi_rail(lpni->lpni_peer_net->lpn_peer) && !lnet_msg_is_response(msg) && lpni->lpni_pref_nnids == 0) { CDEBUG(D_NET, "Setting preferred local NID %s on NMR peer %s\n", - libcfs_nid2str(lni->ni_nid), - libcfs_nid2str(lpni->lpni_nid)); - lnet_peer_ni_set_non_mr_pref_nid(lpni, lni->ni_nid); + libcfs_nidstr(&lni->ni_nid), + libcfs_nidstr(&lpni->lpni_nid)); + lnet_peer_ni_set_non_mr_pref_nid(lpni, &lni->ni_nid); } } @@ -1951,11 +2009,11 @@ lnet_handle_spec_local_nmr_dst(struct lnet_send_data *sd) /* the destination lpni is set before we get here. */ /* find local NI */ - sd->sd_best_ni = lnet_nid2ni_locked(sd->sd_src_nid, sd->sd_cpt); + sd->sd_best_ni = lnet_nid_to_ni_locked(&sd->sd_src_nid, sd->sd_cpt); if (!sd->sd_best_ni) { - CERROR("Can't send to %s: src %s is not a " - "local nid\n", libcfs_nid2str(sd->sd_dst_nid), - libcfs_nid2str(sd->sd_src_nid)); + CERROR("Can't send to %s: src %s is not a local nid\n", + libcfs_nidstr(&sd->sd_dst_nid), + libcfs_nidstr(&sd->sd_src_nid)); return -EINVAL; } @@ -1978,22 +2036,23 @@ lnet_handle_spec_local_nmr_dst(struct lnet_send_data *sd) static int lnet_handle_spec_local_mr_dst(struct lnet_send_data *sd) { - sd->sd_best_ni = lnet_nid2ni_locked(sd->sd_src_nid, sd->sd_cpt); + sd->sd_best_ni = lnet_nid_to_ni_locked(&sd->sd_src_nid, sd->sd_cpt); if (!sd->sd_best_ni) { - CERROR("Can't send to %s: src %s is not a " - "local nid\n", libcfs_nid2str(sd->sd_dst_nid), - libcfs_nid2str(sd->sd_src_nid)); + CERROR("Can't send to %s: src %s is not a local nid\n", + libcfs_nidstr(&sd->sd_dst_nid), + libcfs_nidstr(&sd->sd_src_nid)); return -EINVAL; } if (sd->sd_best_lpni && - sd->sd_best_lpni->lpni_nid == the_lnet.ln_loni->ni_nid) + nid_same(&sd->sd_best_lpni->lpni_nid, + &the_lnet.ln_loni->ni_nid)) return lnet_handle_lo_send(sd); else if (sd->sd_best_lpni) return lnet_handle_send(sd); CERROR("can't send to %s. no NI on %s\n", - libcfs_nid2str(sd->sd_dst_nid), + libcfs_nidstr(&sd->sd_dst_nid), libcfs_net2str(sd->sd_best_ni->ni_net->net_id)); return -EHOSTUNREACH; @@ -2003,6 +2062,7 @@ struct lnet_ni * lnet_find_best_ni_on_spec_net(struct lnet_ni *cur_best_ni, struct lnet_peer *peer, struct lnet_peer_net *peer_net, + struct lnet_msg *msg, int cpt) { struct lnet_net *local_net; @@ -2021,7 +2081,7 @@ lnet_find_best_ni_on_spec_net(struct lnet_ni *cur_best_ni, * 3. Round Robin */ best_ni = lnet_get_best_ni(local_net, cur_best_ni, - peer, peer_net, cpt); + peer, peer_net, msg, cpt); return best_ni; } @@ -2031,6 +2091,7 @@ lnet_initiate_peer_discovery(struct lnet_peer_ni *lpni, struct lnet_msg *msg, int cpt) { struct lnet_peer *peer; + struct lnet_peer_ni *new_lpni; int rc; lnet_peer_ni_addref_locked(lpni); @@ -2052,36 +2113,52 @@ lnet_initiate_peer_discovery(struct lnet_peer_ni *lpni, struct lnet_msg *msg, lnet_peer_ni_decref_locked(lpni); return rc; } - /* The peer may have changed. */ - peer = lpni->lpni_peer_net->lpn_peer; + + new_lpni = lnet_peer_ni_find_locked(&lpni->lpni_nid); + if (!new_lpni) { + lnet_peer_ni_decref_locked(lpni); + return -ENOENT; + } + + peer = new_lpni->lpni_peer_net->lpn_peer; spin_lock(&peer->lp_lock); - if (lnet_peer_is_uptodate_locked(peer)) { + if (lpni == new_lpni && lnet_peer_is_uptodate_locked(peer)) { + /* The peer NI did not change and the peer is up to date. + * Nothing more to do. + */ spin_unlock(&peer->lp_lock); lnet_peer_ni_decref_locked(lpni); + lnet_peer_ni_decref_locked(new_lpni); return 0; } - /* queue message and return */ + spin_unlock(&peer->lp_lock); + + /* Either the peer NI changed during discovery, or the peer isn't up + * to date. In both cases we want to queue the message on the + * (possibly new) peer's pending queue and queue the peer for discovery + */ msg->msg_sending = 0; msg->msg_txpeer = NULL; - list_add_tail(&msg->msg_list, &peer->lp_dc_pendq); - spin_unlock(&peer->lp_lock); + lnet_net_unlock(cpt); + lnet_peer_queue_message(peer, msg); + lnet_net_lock(cpt); lnet_peer_ni_decref_locked(lpni); + lnet_peer_ni_decref_locked(new_lpni); CDEBUG(D_NET, "msg %p delayed. %s pending discovery\n", - msg, libcfs_nid2str(peer->lp_primary_nid)); + msg, libcfs_nidstr(&peer->lp_primary_nid)); return LNET_DC_WAIT; } static int lnet_handle_find_routed_path(struct lnet_send_data *sd, - lnet_nid_t dst_nid, + struct lnet_nid *dst_nid, struct lnet_peer_ni **gw_lpni, struct lnet_peer **gw_peer) { int rc; - __u32 local_lnet; struct lnet_peer *gw; struct lnet_peer *lp; struct lnet_peer_net *lpn; @@ -2092,45 +2169,54 @@ lnet_handle_find_routed_path(struct lnet_send_data *sd, struct lnet_peer_ni *lpni = NULL; struct lnet_peer_ni *gwni = NULL; bool route_found = false; - lnet_nid_t src_nid = (sd->sd_src_nid != LNET_NID_ANY) ? sd->sd_src_nid : - (sd->sd_best_ni != NULL) ? sd->sd_best_ni->ni_nid : - LNET_NID_ANY; + struct lnet_nid *src_nid = + !LNET_NID_IS_ANY(&sd->sd_src_nid) || !sd->sd_best_ni + ? &sd->sd_src_nid + : &sd->sd_best_ni->ni_nid; int best_lpn_healthv = 0; __u32 best_lpn_sel_prio = LNET_MAX_SELECTION_PRIORITY; - CDEBUG(D_NET, "using src nid %s for route restriction\n", - libcfs_nid2str(src_nid)); + CDEBUG(D_NET, "%s route (%s) from local NI %s to destination %s\n", + LNET_NID_IS_ANY(&sd->sd_rtr_nid) ? "Lookup" : "Specified", + libcfs_nidstr(&sd->sd_rtr_nid), libcfs_nidstr(src_nid), + libcfs_nidstr(&sd->sd_dst_nid)); /* If a router nid was specified then we are replying to a GET or * sending an ACK. In this case we use the gateway associated with the * specified router nid. */ - if (sd->sd_rtr_nid != LNET_NID_ANY) { - gwni = lnet_find_peer_ni_locked(sd->sd_rtr_nid); + if (!LNET_NID_IS_ANY(&sd->sd_rtr_nid)) { + gwni = lnet_peer_ni_find_locked(&sd->sd_rtr_nid); if (gwni) { gw = gwni->lpni_peer_net->lpn_peer; lnet_peer_ni_decref_locked(gwni); - if (gw->lp_rtr_refcount) { - local_lnet = LNET_NIDNET(sd->sd_rtr_nid); + if (gw->lp_rtr_refcount) route_found = true; - } } else { CWARN("No peer NI for gateway %s. Attempting to find an alternative route.\n", - libcfs_nid2str(sd->sd_rtr_nid)); + libcfs_nidstr(&sd->sd_rtr_nid)); } } if (!route_found) { - if (sd->sd_msg->msg_routing) { + if (sd->sd_msg->msg_routing || !LNET_NID_IS_ANY(src_nid)) { /* If I'm routing this message then I need to find the * next hop based on the destination NID + * + * We also find next hop based on the destination NID + * if the source NI was specified */ - best_rnet = lnet_find_rnet_locked(LNET_NIDNET(sd->sd_dst_nid)); + best_rnet = lnet_find_rnet_locked(LNET_NID_NET(&sd->sd_dst_nid)); if (!best_rnet) { - CERROR("Unable to route message to %s - Route table may be misconfigured\n", - libcfs_nid2str(sd->sd_dst_nid)); + CERROR("Unable to send message from %s to %s - Route table may be misconfigured\n", + (src_nid && LNET_NID_IS_ANY(src_nid)) ? + "any local NI" : + libcfs_nidstr(src_nid), + libcfs_nidstr(&sd->sd_dst_nid)); return -EHOSTUNREACH; } + CDEBUG(D_NET, "best_rnet %s\n", + libcfs_net2str(best_rnet->lrn_net)); } else { /* we've already looked up the initial lpni using * dst_nid @@ -2147,10 +2233,18 @@ lnet_handle_find_routed_path(struct lnet_send_data *sd, if (!rnet) continue; - if (!best_lpn) { - best_lpn = lpn; - best_rnet = rnet; - } + if (!best_lpn) + goto use_lpn; + else + CDEBUG(D_NET, "n[%s, %s] h[%d, %d], p[%u, %u], s[%d, %d]\n", + libcfs_net2str(lpn->lpn_net_id), + libcfs_net2str(best_lpn->lpn_net_id), + lpn->lpn_healthv, + best_lpn->lpn_healthv, + lpn->lpn_sel_priority, + best_lpn->lpn_sel_priority, + lpn->lpn_seq, + best_lpn->lpn_seq); /* select the preferred peer net */ if (best_lpn_healthv > lpn->lpn_healthv) @@ -2174,17 +2268,20 @@ use_lpn: if (!best_lpn) { CERROR("peer %s has no available nets\n", - libcfs_nid2str(sd->sd_dst_nid)); + libcfs_nidstr(&sd->sd_dst_nid)); return -EHOSTUNREACH; } + CDEBUG(D_NET, "selected best_lpn %s\n", + libcfs_net2str(best_lpn->lpn_net_id)); + sd->sd_best_lpni = lnet_find_best_lpni(sd->sd_best_ni, - sd->sd_dst_nid, + lnet_nid_to_nid4(&sd->sd_dst_nid), lp, best_lpn->lpn_net_id); if (!sd->sd_best_lpni) { CERROR("peer %s is unreachable\n", - libcfs_nid2str(sd->sd_dst_nid)); + libcfs_nidstr(&sd->sd_dst_nid)); return -EHOSTUNREACH; } @@ -2192,12 +2289,6 @@ use_lpn: * NI's so update the final destination we selected */ sd->sd_final_dst_lpni = sd->sd_best_lpni; - - /* Increment the sequence number of the remote lpni so - * we can round robin over the different interfaces of - * the remote lpni - */ - sd->sd_best_lpni->lpni_seq++; } /* @@ -2209,64 +2300,70 @@ use_lpn: * when adding a route. */ best_route = lnet_find_route_locked(best_rnet, - LNET_NIDNET(src_nid), + LNET_NID_NET(src_nid), sd->sd_best_lpni, &last_route, &gwni); if (!best_route) { CERROR("no route to %s from %s\n", - libcfs_nid2str(dst_nid), - libcfs_nid2str(src_nid)); + libcfs_nidstr(dst_nid), + libcfs_nidstr(src_nid)); return -EHOSTUNREACH; } if (!gwni) { CERROR("Internal Error. Route expected to %s from %s\n", - libcfs_nid2str(dst_nid), - libcfs_nid2str(src_nid)); + libcfs_nidstr(dst_nid), + libcfs_nidstr(src_nid)); return -EFAULT; } gw = best_route->lr_gateway; LASSERT(gw == gwni->lpni_peer_net->lpn_peer); - local_lnet = best_route->lr_lnet; } /* - * Discover this gateway if it hasn't already been discovered. - * This means we might delay the message until discovery has - * completed + * If the router checker is not active then discover the gateway here. + * This ensures we are able to take advantage of multi-rail routing, but + * if the router checker is active then we do not unecessarily delay + * messages while the gateway is being checked by the dedicated monitor + * thread. + * + * NB: We're only checking the alive_router_check_interval here, rather + * than calling lnet_router_checker_active(), because the other + * conditions that are checked by that function are either + * irrelevant (the_lnet.ln_routing) or must be true (list of routers + * is not empty) */ - sd->sd_msg->msg_src_nid_param = sd->sd_src_nid; - rc = lnet_initiate_peer_discovery(gwni, sd->sd_msg, sd->sd_cpt); - if (rc) - return rc; - - if (!sd->sd_best_ni) - sd->sd_best_ni = lnet_find_best_ni_on_spec_net(NULL, gw, - lnet_peer_get_net_locked(gw, - local_lnet), - sd->sd_md_cpt); + if (alive_router_check_interval <= 0) { + rc = lnet_initiate_peer_discovery(gwni, sd->sd_msg, sd->sd_cpt); + if (rc) + return rc; + } if (!sd->sd_best_ni) { - CERROR("Internal Error. Expected local ni on %s but non found :%s\n", - libcfs_net2str(local_lnet), - libcfs_nid2str(sd->sd_src_nid)); - return -EFAULT; + lpn = gwni->lpni_peer_net; + sd->sd_best_ni = lnet_find_best_ni_on_spec_net(NULL, gw, lpn, + sd->sd_msg, + sd->sd_md_cpt); + if (!sd->sd_best_ni) { + CERROR("Internal Error. Expected local ni on %s but non found: %s\n", + libcfs_net2str(lpn->lpn_net_id), + libcfs_nidstr(&sd->sd_src_nid)); + return -EFAULT; + } } *gw_lpni = gwni; *gw_peer = gw; /* - * increment the sequence numbers since now we're sure we're - * going to use this path + * increment the sequence number since now we're sure we're + * going to use this route */ - if (sd->sd_rtr_nid == LNET_NID_ANY) { + if (LNET_NID_IS_ANY(&sd->sd_rtr_nid)) { LASSERT(best_route && last_route); best_route->lr_seq = last_route->lr_seq + 1; - if (best_lpn) - best_lpn->lpn_seq++; } return 0; @@ -2296,16 +2393,16 @@ lnet_handle_spec_router_dst(struct lnet_send_data *sd) struct lnet_peer *gw_peer = NULL; /* find local NI */ - sd->sd_best_ni = lnet_nid2ni_locked(sd->sd_src_nid, sd->sd_cpt); + sd->sd_best_ni = lnet_nid_to_ni_locked(&sd->sd_src_nid, sd->sd_cpt); if (!sd->sd_best_ni) { - CERROR("Can't send to %s: src %s is not a " - "local nid\n", libcfs_nid2str(sd->sd_dst_nid), - libcfs_nid2str(sd->sd_src_nid)); + CERROR("Can't send to %s: src %s is not a local nid\n", + libcfs_nidstr(&sd->sd_dst_nid), + libcfs_nidstr(&sd->sd_src_nid)); return -EINVAL; } - rc = lnet_handle_find_routed_path(sd, sd->sd_dst_nid, &gw_lpni, - &gw_peer); + rc = lnet_handle_find_routed_path(sd, &sd->sd_dst_nid, + &gw_lpni, &gw_peer); if (rc) return rc; @@ -2329,7 +2426,7 @@ lnet_handle_spec_router_dst(struct lnet_send_data *sd) struct lnet_ni * lnet_find_best_ni_on_local_net(struct lnet_peer *peer, int md_cpt, - bool discovery) + struct lnet_msg *msg, bool discovery) { struct lnet_peer_net *lpn = NULL; struct lnet_peer_net *best_lpn = NULL; @@ -2343,7 +2440,15 @@ lnet_find_best_ni_on_local_net(struct lnet_peer *peer, int md_cpt, __u32 lpn_sel_prio; __u32 best_net_sel_prio = LNET_MAX_SELECTION_PRIORITY; __u32 net_sel_prio; - bool exit = false; + + /* if this is a discovery message and lp_disc_net_id is + * specified then use that net to send the discovery on. + */ + if (discovery && peer->lp_disc_net_id) { + best_lpn = lnet_peer_get_net_locked(peer, peer->lp_disc_net_id); + if (best_lpn && lnet_get_net_locked(best_lpn->lpn_net_id)) + goto select_best_ni; + } /* * The peer can have multiple interfaces, some of them can be on @@ -2366,18 +2471,25 @@ lnet_find_best_ni_on_local_net(struct lnet_peer *peer, int md_cpt, net_healthv = lnet_get_net_healthv_locked(net); net_sel_prio = net->net_sel_priority; - /* - * if this is a discovery message and lp_disc_net_id is - * specified then use that net to send the discovery on. - */ - if (peer->lp_disc_net_id == lpn->lpn_net_id && - discovery) { - exit = true; - goto select_lpn; - } - if (!best_lpn) goto select_lpn; + else + CDEBUG(D_NET, + "n[%s, %s] ph[%d, %d], pp[%u, %u], nh[%d, %d], np[%u, %u], ps[%u, %u], ns[%u, %u]\n", + libcfs_net2str(lpn->lpn_net_id), + libcfs_net2str(best_lpn->lpn_net_id), + lpn->lpn_healthv, + best_lpn_healthv, + lpn_sel_prio, + best_lpn_sel_prio, + net_healthv, + best_net_healthv, + net_sel_prio, + best_net_sel_prio, + lpn->lpn_seq, + best_lpn->lpn_seq, + net->net_seq, + best_net->net_seq); /* always select the lpn with the best health */ if (best_lpn_healthv > lpn->lpn_healthv) @@ -2417,17 +2529,17 @@ select_lpn: best_lpn_sel_prio = lpn_sel_prio; best_lpn = lpn; best_net = net; - - if (exit) - break; } if (best_lpn) { /* Select the best NI on the same net as best_lpn chosen * above */ - best_ni = lnet_find_best_ni_on_spec_net(NULL, peer, - best_lpn, md_cpt); +select_best_ni: + CDEBUG(D_NET, "selected best_lpn %s\n", + libcfs_net2str(best_lpn->lpn_net_id)); + best_ni = lnet_find_best_ni_on_spec_net(NULL, peer, best_lpn, + msg, md_cpt); } return best_ni; @@ -2456,7 +2568,8 @@ lnet_find_existing_preferred_best_ni(struct lnet_peer_ni *lpni, int cpt) if (lpni_entry->lpni_pref_nnids == 0) continue; LASSERT(lpni_entry->lpni_pref_nnids == 1); - best_ni = lnet_nid2ni_locked(lpni_entry->lpni_pref.nid, cpt); + best_ni = lnet_nid_to_ni_locked(&lpni_entry->lpni_pref.nid, + cpt); break; } @@ -2468,7 +2581,6 @@ static int lnet_select_preferred_best_ni(struct lnet_send_data *sd) { struct lnet_ni *best_ni = NULL; - struct lnet_peer_ni *best_lpni = sd->sd_best_lpni; /* * We must use a consistent source address when sending to a @@ -2479,24 +2591,27 @@ lnet_select_preferred_best_ni(struct lnet_send_data *sd) * * So we need to pick the NI the peer prefers for this * particular network. + * + * An exception is traffic on LNET_RESERVED_PORTAL. Internal LNet + * traffic doesn't care which source NI is used, and we don't actually + * want to restrict local recovery pings to a single source NI. */ + if (!lnet_reserved_msg(sd->sd_msg)) + best_ni = lnet_find_existing_preferred_best_ni(sd->sd_best_lpni, + sd->sd_cpt); - best_ni = lnet_find_existing_preferred_best_ni(sd->sd_best_lpni, - sd->sd_cpt); - - /* if best_ni is still not set just pick one */ - if (!best_ni) { - best_ni = - lnet_find_best_ni_on_spec_net(NULL, sd->sd_peer, + if (!best_ni) + best_ni = lnet_find_best_ni_on_spec_net(NULL, sd->sd_peer, sd->sd_best_lpni->lpni_peer_net, + sd->sd_msg, sd->sd_md_cpt); - /* If there is no best_ni we don't have a route */ - if (!best_ni) { - CERROR("no path to %s from net %s\n", - libcfs_nid2str(best_lpni->lpni_nid), - libcfs_net2str(best_lpni->lpni_net->net_id)); - return -EHOSTUNREACH; - } + + /* If there is no best_ni we don't have a route */ + if (!best_ni) { + CERROR("no path to %s from net %s\n", + libcfs_nidstr(&sd->sd_best_lpni->lpni_nid), + libcfs_net2str(sd->sd_best_lpni->lpni_net->net_id)); + return -EHOSTUNREACH; } sd->sd_best_ni = best_ni; @@ -2531,10 +2646,9 @@ lnet_handle_any_local_nmr_dst(struct lnet_send_data *sd) * output a relevant message and fail the send */ if (!sd->sd_best_lpni) { - CERROR("Internal fault. Unable to send msg %s to %s. " - "NID not known\n", + CERROR("Internal fault. Unable to send msg %s to %s. NID not known\n", lnet_msgtyp2str(sd->sd_msg->msg_type), - libcfs_nid2str(sd->sd_dst_nid)); + libcfs_nidstr(&sd->sd_dst_nid)); return -EFAULT; } @@ -2545,10 +2659,11 @@ lnet_handle_any_local_nmr_dst(struct lnet_send_data *sd) sd->sd_best_ni = lnet_find_best_ni_on_spec_net(NULL, sd->sd_peer, sd->sd_best_lpni->lpni_peer_net, + sd->sd_msg, sd->sd_md_cpt); if (!sd->sd_best_ni) { CERROR("Unable to forward message to %s. No local NI available\n", - libcfs_nid2str(sd->sd_dst_nid)); + libcfs_nidstr(&sd->sd_dst_nid)); rc = -EHOSTUNREACH; } } else @@ -2578,6 +2693,7 @@ lnet_handle_any_mr_dsta(struct lnet_send_data *sd) sd->sd_best_ni = lnet_find_best_ni_on_spec_net(NULL, sd->sd_peer, sd->sd_best_lpni->lpni_peer_net, + sd->sd_msg, sd->sd_md_cpt); if (!sd->sd_best_ni) { @@ -2585,9 +2701,8 @@ lnet_handle_any_mr_dsta(struct lnet_send_data *sd) * We're not going to deal with not able to send * a response to the provided final destination */ - CERROR("Can't send response to %s. " - "No local NI available\n", - libcfs_nid2str(sd->sd_dst_nid)); + CERROR("Can't send response to %s. No local NI available\n", + libcfs_nidstr(&sd->sd_dst_nid)); return -EHOSTUNREACH; } @@ -2602,10 +2717,12 @@ lnet_handle_any_mr_dsta(struct lnet_send_data *sd) */ sd->sd_best_ni = lnet_find_best_ni_on_local_net(sd->sd_peer, sd->sd_md_cpt, + sd->sd_msg, lnet_msg_discovery(sd->sd_msg)); if (sd->sd_best_ni) { sd->sd_best_lpni = - lnet_find_best_lpni(sd->sd_best_ni, sd->sd_dst_nid, + lnet_find_best_lpni(sd->sd_best_ni, + lnet_nid_to_nid4(&sd->sd_dst_nid), sd->sd_peer, sd->sd_best_ni->ni_net->net_id); @@ -2616,7 +2733,8 @@ lnet_handle_any_mr_dsta(struct lnet_send_data *sd) * network */ if (sd->sd_best_lpni && - sd->sd_best_lpni->lpni_nid == the_lnet.ln_loni->ni_nid) { + nid_same(&sd->sd_best_lpni->lpni_nid, + &the_lnet.ln_loni->ni_nid)) { /* * in case we initially started with a routed * destination, let's reset to local @@ -2636,8 +2754,8 @@ lnet_handle_any_mr_dsta(struct lnet_send_data *sd) CERROR("Internal Error. Expected to have a best_lpni: " "%s -> %s\n", - libcfs_nid2str(sd->sd_src_nid), - libcfs_nid2str(sd->sd_dst_nid)); + libcfs_nidstr(&sd->sd_src_nid), + libcfs_nidstr(&sd->sd_dst_nid)); return -EFAULT; } @@ -2683,12 +2801,11 @@ lnet_handle_any_mr_dst(struct lnet_send_data *sd) struct lnet_peer_ni *gw; struct lnet_peer *gw_peer; - rc = lnet_handle_find_routed_path(sd, sd->sd_dst_nid, &gw, - &gw_peer); + rc = lnet_handle_find_routed_path( + sd, &sd->sd_dst_nid, &gw, &gw_peer); if (rc < 0) { - CERROR("Can't send response to %s. " - "No route available\n", - libcfs_nid2str(sd->sd_dst_nid)); + CERROR("Can't send response to %s. No route available\n", + libcfs_nidstr(&sd->sd_dst_nid)); return -EHOSTUNREACH; } else if (rc > 0) { return rc; @@ -2719,8 +2836,8 @@ lnet_handle_any_mr_dst(struct lnet_send_data *sd) * need to select the destination which we can route to and if * there are multiple, we need to round robin. */ - rc = lnet_handle_find_routed_path(sd, sd->sd_dst_nid, &gw_lpni, - &gw_peer); + rc = lnet_handle_find_routed_path(sd, &sd->sd_dst_nid, + &gw_lpni, &gw_peer); if (rc) return rc; @@ -2759,7 +2876,7 @@ lnet_handle_any_router_nmr_dst(struct lnet_send_data *sd) * find the router and that'll find the best NI if we didn't find * it already. */ - rc = lnet_handle_find_routed_path(sd, sd->sd_dst_nid, &gw_lpni, + rc = lnet_handle_find_routed_path(sd, &sd->sd_dst_nid, &gw_lpni, &gw_peer); if (rc) return rc; @@ -2788,9 +2905,9 @@ lnet_handle_send_case_locked(struct lnet_send_data *sd) CDEBUG(D_NET, "Source %s%s to %s %s %s destination\n", (send_case & SRC_SPEC) ? "Specified: " : "ANY", - (send_case & SRC_SPEC) ? libcfs_nid2str(sd->sd_src_nid) : "", + (send_case & SRC_SPEC) ? libcfs_nidstr(&sd->sd_src_nid) : "", (send_case & MR_DST) ? "MR: " : "NMR: ", - libcfs_nid2str(sd->sd_dst_nid), + libcfs_nidstr(&sd->sd_dst_nid), (send_case & LOCAL_DST) ? "local" : "routed"); switch (send_case) { @@ -2821,8 +2938,10 @@ lnet_handle_send_case_locked(struct lnet_send_data *sd) } static int -lnet_select_pathway(lnet_nid_t src_nid, lnet_nid_t dst_nid, - struct lnet_msg *msg, lnet_nid_t rtr_nid) +lnet_select_pathway(struct lnet_nid *src_nid, + struct lnet_nid *dst_nid, + struct lnet_msg *msg, + struct lnet_nid *rtr_nid) { struct lnet_peer_ni *lpni; struct lnet_peer *peer; @@ -2859,7 +2978,7 @@ again: */ send_data.sd_msg = msg; send_data.sd_cpt = cpt; - if (dst_nid == LNET_NID_LO_0) { + if (nid_is_lo0(dst_nid)) { rc = lnet_handle_lo_send(&send_data); lnet_net_unlock(cpt); return rc; @@ -2870,7 +2989,7 @@ again: * created due to network traffic. This call will create the * peer->peer_net->peer_ni tree. */ - lpni = lnet_nid2peerni_locked(dst_nid, LNET_NID_ANY, cpt); + lpni = lnet_peerni_by_nid_locked(dst_nid, NULL, cpt); if (IS_ERR(lpni)) { lnet_net_unlock(cpt); return PTR_ERR(lpni); @@ -2884,8 +3003,14 @@ again: * continuing the same sequence of messages. Similarly, rtr_nid will * affect our choice of next hop. */ - msg->msg_src_nid_param = src_nid; - msg->msg_rtr_nid_param = rtr_nid; + if (src_nid) + msg->msg_src_nid_param = *src_nid; + else + msg->msg_src_nid_param = LNET_ANY_NID; + if (rtr_nid) + msg->msg_rtr_nid_param = *rtr_nid; + else + msg->msg_rtr_nid_param = LNET_ANY_NID; /* * If necessary, perform discovery on the peer that owns this peer_ni. @@ -2905,15 +3030,19 @@ again: /* * Identify the different send cases */ - if (src_nid == LNET_NID_ANY) + if (!src_nid || LNET_NID_IS_ANY(src_nid)) { send_case |= SRC_ANY; - else + if (lnet_get_net_locked(LNET_NID_NET(dst_nid))) + send_case |= LOCAL_DST; + else + send_case |= REMOTE_DST; + } else { send_case |= SRC_SPEC; - - if (lnet_get_net_locked(LNET_NIDNET(dst_nid))) - send_case |= LOCAL_DST; - else - send_case |= REMOTE_DST; + if (LNET_NID_NET(src_nid) == LNET_NID_NET(dst_nid)) + send_case |= LOCAL_DST; + else + send_case |= REMOTE_DST; + } final_hop = false; if (msg->msg_routing && (send_case & LOCAL_DST)) @@ -2930,13 +3059,13 @@ again: struct lnet_peer *src_lp; struct lnet_peer_ni *src_lpni; - src_lpni = lnet_nid2peerni_locked(msg->msg_hdr.src_nid, - LNET_NID_ANY, cpt); + src_lpni = lnet_peerni_by_nid_locked(&msg->msg_hdr.src_nid, + NULL, cpt); /* We don't fail the send if we hit any errors here. We'll just * try to send it via non-multi-rail criteria */ if (!IS_ERR(src_lpni)) { - /* Drop ref taken by lnet_nid2peerni_locked() */ + /* Drop ref taken by lnet_peerni_by_nid_locked() */ lnet_peer_ni_decref_locked(src_lpni); src_lp = lpni->lpni_peer_net->lpn_peer; if (lnet_peer_is_multi_rail(src_lp) && @@ -2968,9 +3097,15 @@ again: send_case |= SND_RESP; /* assign parameters to the send_data */ - send_data.sd_rtr_nid = rtr_nid; - send_data.sd_src_nid = src_nid; - send_data.sd_dst_nid = dst_nid; + if (rtr_nid) + send_data.sd_rtr_nid = *rtr_nid; + else + send_data.sd_rtr_nid = LNET_ANY_NID; + if (src_nid) + send_data.sd_src_nid = *src_nid; + else + send_data.sd_src_nid = LNET_ANY_NID; + send_data.sd_dst_nid = *dst_nid; send_data.sd_best_lpni = lpni; /* * keep a pointer to the final destination in case we're going to @@ -2998,16 +3133,12 @@ again: } int -lnet_send(lnet_nid_t src_nid, struct lnet_msg *msg, lnet_nid_t rtr_nid) +lnet_send(struct lnet_nid *src_nid, struct lnet_msg *msg, + struct lnet_nid *rtr_nid) { - lnet_nid_t dst_nid = msg->msg_target.nid; - int rc; + struct lnet_nid *dst_nid = &msg->msg_target.nid; + int rc; - /* - * NB: rtr_nid is set to LNET_NID_ANY for all current use-cases, - * but we might want to use pre-determined router for ACK/REPLY - * in the future - */ /* NB: ni != NULL == interface pre-determined (ACK/REPLY) */ LASSERT(msg->msg_txpeer == NULL); LASSERT(msg->msg_txni == NULL); @@ -3042,7 +3173,7 @@ enum lnet_mt_event_type { struct lnet_mt_event_info { enum lnet_mt_event_type mt_type; - lnet_nid_t mt_nid; + struct lnet_nid mt_nid; }; /* called with res_lock held */ @@ -3151,7 +3282,7 @@ lnet_finalize_expired_responses(void) if (ktime_compare(now, rspt->rspt_deadline) >= 0 || the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN) { struct lnet_peer_ni *lpni; - lnet_nid_t nid; + struct lnet_nid nid; md = lnet_handle2md(&rspt->rspt_mdh); if (!md) { @@ -3209,7 +3340,7 @@ lnet_finalize_expired_responses(void) CDEBUG(D_NET, "Response timeout: md = %p: nid = %s\n", - md, libcfs_nid2str(nid)); + md, libcfs_nidstr(&nid)); /* * If there is a timeout on the response @@ -3217,7 +3348,7 @@ lnet_finalize_expired_responses(void) * value so that we don't use it */ lnet_net_lock(0); - lpni = lnet_find_peer_ni_locked(nid); + lpni = lnet_peer_ni_find_locked(&nid); if (lpni) { lnet_handle_remote_failure_locked(lpni); lnet_peer_ni_decref_locked(lpni); @@ -3250,11 +3381,11 @@ lnet_resend_pending_msgs_locked(struct list_head *resendq, int cpt) list_del_init(&msg->msg_list); - lpni = lnet_find_peer_ni_locked(msg->msg_hdr.dest_nid); + lpni = lnet_peer_ni_find_locked(&msg->msg_hdr.dest_nid); if (!lpni) { lnet_net_unlock(cpt); CERROR("Expected that a peer is already created for %s\n", - libcfs_nid2str(msg->msg_hdr.dest_nid)); + libcfs_nidstr(&msg->msg_hdr.dest_nid)); msg->msg_no_resend = true; lnet_finalize(msg, -EFAULT); lnet_net_lock(cpt); @@ -3265,17 +3396,17 @@ lnet_resend_pending_msgs_locked(struct list_head *resendq, int cpt) lnet_net_unlock(cpt); CDEBUG(D_NET, "resending %s->%s: %s recovery %d try# %d\n", - libcfs_nid2str(msg->msg_src_nid_param), - libcfs_id2str(msg->msg_target), + libcfs_nidstr(&msg->msg_src_nid_param), + libcfs_idstr(&msg->msg_target), lnet_msgtyp2str(msg->msg_type), msg->msg_recovery, msg->msg_retry_count); - rc = lnet_send(msg->msg_src_nid_param, msg, - msg->msg_rtr_nid_param); + rc = lnet_send(&msg->msg_src_nid_param, msg, + &msg->msg_rtr_nid_param); if (rc) { CERROR("Error sending %s to %s: %d\n", lnet_msgtyp2str(msg->msg_type), - libcfs_id2str(msg->msg_target), rc); + libcfs_idstr(&msg->msg_target), rc); msg->msg_no_resend = true; lnet_finalize(msg, rc); } @@ -3328,9 +3459,10 @@ lnet_recover_local_nis(void) struct lnet_handle_md mdh; struct lnet_ni *tmp; struct lnet_ni *ni; - lnet_nid_t nid; + struct lnet_nid nid; int healthv; int rc; + time64_t now; /* * splice the recovery queue on a local queue. We will iterate @@ -3344,6 +3476,8 @@ lnet_recover_local_nis(void) &local_queue); lnet_net_unlock(0); + now = ktime_get_seconds(); + list_for_each_entry_safe(ni, tmp, &local_queue, ni_recovery) { /* * if an NI is being deleted or it is now healthy, there @@ -3377,12 +3511,18 @@ lnet_recover_local_nis(void) ni->ni_recovery_state &= ~LNET_NI_RECOVERY_FAILED; } + lnet_ni_unlock(ni); - lnet_net_unlock(0); + if (now < ni->ni_next_ping) { + lnet_net_unlock(0); + continue; + } + + lnet_net_unlock(0); CDEBUG(D_NET, "attempting to recover local ni: %s\n", - libcfs_nid2str(ni->ni_nid)); + libcfs_nidstr(&ni->ni_nid)); lnet_ni_lock(ni); if (!(ni->ni_recovery_state & LNET_NI_RECOVERY_PENDING)) { @@ -3392,7 +3532,7 @@ lnet_recover_local_nis(void) LIBCFS_ALLOC(ev_info, sizeof(*ev_info)); if (!ev_info) { CERROR("out of memory. Can't recover %s\n", - libcfs_nid2str(ni->ni_nid)); + libcfs_nidstr(&ni->ni_nid)); lnet_ni_lock(ni); ni->ni_recovery_state &= ~LNET_NI_RECOVERY_PENDING; @@ -3432,12 +3572,12 @@ lnet_recover_local_nis(void) ev_info->mt_type = MT_TYPE_LOCAL_NI; ev_info->mt_nid = nid; - rc = lnet_send_ping(nid, &mdh, LNET_INTERFACES_MIN, + rc = lnet_send_ping(&nid, &mdh, LNET_INTERFACES_MIN, ev_info, the_lnet.ln_mt_handler, true); /* lookup the nid again */ lnet_net_lock(0); - ni = lnet_nid2ni_locked(nid, 0); + ni = lnet_nid_to_ni_locked(&nid, 0); if (!ni) { /* * the NI has been deleted when we dropped @@ -3447,30 +3587,20 @@ lnet_recover_local_nis(void) LNetMDUnlink(mdh); continue; } - /* - * Same note as in lnet_recover_peer_nis(). When - * we're sending the ping, the NI is free to be - * deleted or manipulated. By this point it - * could've been added back on the recovery queue, - * and a refcount taken on it. - * So we can't just add it blindly again or we'll - * corrupt the queue. We must check under lock if - * it's not on any list and if not then add it - * to the processed list, which will eventually be - * spliced back on to the recovery queue. - */ + ni->ni_ping_count++; + ni->ni_ping_mdh = mdh; - if (list_empty(&ni->ni_recovery)) { - list_add_tail(&ni->ni_recovery, &processed_list); - lnet_ni_addref_locked(ni, 0); - } - lnet_net_unlock(0); + lnet_ni_add_to_recoveryq_locked(ni, &processed_list, + now); - lnet_ni_lock(ni); - if (rc) + if (rc) { + lnet_ni_lock(ni); ni->ni_recovery_state &= ~LNET_NI_RECOVERY_PENDING; - } - lnet_ni_unlock(ni); + lnet_ni_unlock(ni); + } + lnet_net_unlock(0); + } else + lnet_ni_unlock(ni); } /* @@ -3507,9 +3637,9 @@ lnet_clean_local_ni_recoveryq(void) /* This is only called when the monitor thread has stopped */ lnet_net_lock(0); - while (!list_empty(&the_lnet.ln_mt_localNIRecovq)) { - ni = list_entry(the_lnet.ln_mt_localNIRecovq.next, - struct lnet_ni, ni_recovery); + while ((ni = list_first_entry_or_null(&the_lnet.ln_mt_localNIRecovq, + struct lnet_ni, + ni_recovery)) != NULL) { list_del_init(&ni->ni_recovery); lnet_ni_lock(ni); lnet_unlink_ni_recovery_mdh_locked(ni, 0, true); @@ -3589,9 +3719,10 @@ lnet_recover_peer_nis(void) struct lnet_handle_md mdh; struct lnet_peer_ni *lpni; struct lnet_peer_ni *tmp; - lnet_nid_t nid; + struct lnet_nid nid; int healthv; int rc; + time64_t now; /* * Always use cpt 0 for locking across all interactions with @@ -3602,6 +3733,8 @@ lnet_recover_peer_nis(void) &local_queue); lnet_net_unlock(0); + now = ktime_get_seconds(); + list_for_each_entry_safe(lpni, tmp, &local_queue, lpni_recovery) { /* @@ -3632,6 +3765,12 @@ lnet_recover_peer_nis(void) } spin_unlock(&lpni->lpni_lock); + + if (now < lpni->lpni_next_ping) { + lnet_net_unlock(0); + continue; + } + lnet_net_unlock(0); /* @@ -3648,7 +3787,7 @@ lnet_recover_peer_nis(void) LIBCFS_ALLOC(ev_info, sizeof(*ev_info)); if (!ev_info) { CERROR("out of memory. Can't recover %s\n", - libcfs_nid2str(lpni->lpni_nid)); + libcfs_nidstr(&lpni->lpni_nid)); spin_lock(&lpni->lpni_lock); lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_PENDING; spin_unlock(&lpni->lpni_lock); @@ -3657,8 +3796,8 @@ lnet_recover_peer_nis(void) /* look at the comments in lnet_recover_local_nis() */ mdh = lpni->lpni_recovery_ping_mdh; - LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh); nid = lpni->lpni_nid; + LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh); lnet_net_lock(0); list_del_init(&lpni->lpni_recovery); lnet_peer_ni_decref_locked(lpni); @@ -3666,45 +3805,39 @@ lnet_recover_peer_nis(void) ev_info->mt_type = MT_TYPE_PEER_NI; ev_info->mt_nid = nid; - rc = lnet_send_ping(nid, &mdh, LNET_INTERFACES_MIN, + rc = lnet_send_ping(&nid, &mdh, LNET_INTERFACES_MIN, ev_info, the_lnet.ln_mt_handler, true); lnet_net_lock(0); /* - * lnet_find_peer_ni_locked() grabs a refcount for + * lnet_peer_ni_find_locked() grabs a refcount for * us. No need to take it explicitly. */ - lpni = lnet_find_peer_ni_locked(nid); + lpni = lnet_peer_ni_find_locked(&nid); if (!lpni) { lnet_net_unlock(0); LNetMDUnlink(mdh); continue; } + lpni->lpni_ping_count++; + lpni->lpni_recovery_ping_mdh = mdh; - /* - * While we're unlocked the lpni could've been - * readded on the recovery queue. In this case we - * don't need to add it to the local queue, since - * it's already on there and the thread that added - * it would've incremented the refcount on the - * peer, which means we need to decref the refcount - * that was implicitly grabbed by find_peer_ni_locked. - * Otherwise, if the lpni is still not on - * the recovery queue, then we'll add it to the - * processed list. - */ - if (list_empty(&lpni->lpni_recovery)) - list_add_tail(&lpni->lpni_recovery, &processed_list); - else - lnet_peer_ni_decref_locked(lpni); - lnet_net_unlock(0); - spin_lock(&lpni->lpni_lock); - if (rc) + lnet_peer_ni_add_to_recoveryq_locked(lpni, + &processed_list, + now); + if (rc) { + spin_lock(&lpni->lpni_lock); lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_PENDING; - } - spin_unlock(&lpni->lpni_lock); + spin_unlock(&lpni->lpni_lock); + } + + /* Drop the ref taken by lnet_peer_ni_find_locked() */ + lnet_peer_ni_decref_locked(lpni); + lnet_net_unlock(0); + } else + spin_unlock(&lpni->lpni_lock); } list_splice_init(&processed_list, &local_queue); @@ -3716,9 +3849,7 @@ lnet_recover_peer_nis(void) static int lnet_monitor_thread(void *arg) { - time64_t recovery_timeout = 0; time64_t rsp_timeout = 0; - int interval; time64_t now; wait_for_completion(&the_lnet.ln_started); @@ -3745,11 +3876,8 @@ lnet_monitor_thread(void *arg) rsp_timeout = now + (lnet_transaction_timeout / 2); } - if (now >= recovery_timeout) { - lnet_recover_local_nis(); - lnet_recover_peer_nis(); - recovery_timeout = now + lnet_recovery_interval; - } + lnet_recover_local_nis(); + lnet_recover_peer_nis(); /* * TODO do we need to check if we should sleep without @@ -3759,18 +3887,10 @@ lnet_monitor_thread(void *arg) * if we wake up every 1 second? Although, we've seen * cases where we get a complaint that an idle thread * is waking up unnecessarily. - * - * Take into account the current net_count when you wake - * up for alive router checking, since we need to check - * possibly as many networks as we have configured. */ - interval = min(lnet_recovery_interval, - min((unsigned int) alive_router_check_interval / - lnet_current_net_count, - lnet_transaction_timeout / 2)); wait_for_completion_interruptible_timeout( &the_lnet.ln_mt_wait_complete, - cfs_time_seconds(interval)); + cfs_time_seconds(1)); /* Must re-init the completion before testing anything, * including ln_mt_state. */ @@ -3796,16 +3916,16 @@ lnet_monitor_thread(void *arg) * Returns < 0 if LNetGet fails */ int -lnet_send_ping(lnet_nid_t dest_nid, +lnet_send_ping(struct lnet_nid *dest_nid, struct lnet_handle_md *mdh, int nnis, void *user_data, lnet_handler_t handler, bool recovery) { struct lnet_md md = { NULL }; - struct lnet_process_id id; + struct lnet_processid id; struct lnet_ping_buffer *pbuf; int rc; - if (dest_nid == LNET_NID_ANY) { + if (LNET_NID_IS_ANY(dest_nid)) { rc = -EHOSTUNREACH; goto fail_error; } @@ -3833,9 +3953,9 @@ lnet_send_ping(lnet_nid_t dest_nid, goto fail_error; } id.pid = LNET_PID_LUSTRE; - id.nid = dest_nid; + id.nid = *dest_nid; - rc = LNetGet(LNET_NID_ANY, *mdh, id, + rc = LNetGet(NULL, *mdh, &id, LNET_RESERVED_PORTAL, LNET_PROTO_PING_MATCHBITS, 0, recovery); @@ -3855,13 +3975,13 @@ static void lnet_handle_recovery_reply(struct lnet_mt_event_info *ev_info, int status, bool send, bool unlink_event) { - lnet_nid_t nid = ev_info->mt_nid; + struct lnet_nid *nid = &ev_info->mt_nid; if (ev_info->mt_type == MT_TYPE_LOCAL_NI) { struct lnet_ni *ni; lnet_net_lock(0); - ni = lnet_nid2ni_locked(nid, 0); + ni = lnet_nid_to_ni_locked(nid, 0); if (!ni) { lnet_net_unlock(0); return; @@ -3876,7 +3996,7 @@ lnet_handle_recovery_reply(struct lnet_mt_event_info *ev_info, if (status != 0) { CERROR("local NI (%s) recovery failed with %d\n", - libcfs_nid2str(nid), status); + libcfs_nidstr(nid), status); return; } /* @@ -3894,7 +4014,7 @@ lnet_handle_recovery_reply(struct lnet_mt_event_info *ev_info, int cpt; cpt = lnet_net_lock_current(); - lpni = lnet_find_peer_ni_locked(nid); + lpni = lnet_peer_ni_find_locked(nid); if (!lpni) { lnet_net_unlock(cpt); return; @@ -3910,7 +4030,7 @@ lnet_handle_recovery_reply(struct lnet_mt_event_info *ev_info, if (status != 0) CERROR("peer NI (%s) recovery failed with %d\n", - libcfs_nid2str(nid), status); + libcfs_nidstr(nid), status); } } @@ -3931,15 +4051,15 @@ lnet_mt_event_handler(struct lnet_event *event) switch (event->type) { case LNET_EVENT_UNLINK: CDEBUG(D_NET, "%s recovery ping unlinked\n", - libcfs_nid2str(ev_info->mt_nid)); - /* fallthrough */ + libcfs_nidstr(&ev_info->mt_nid)); + fallthrough; case LNET_EVENT_REPLY: lnet_handle_recovery_reply(ev_info, event->status, false, event->type == LNET_EVENT_UNLINK); break; case LNET_EVENT_SEND: CDEBUG(D_NET, "%s recovery message sent %s:%d\n", - libcfs_nid2str(ev_info->mt_nid), + libcfs_nidstr(&ev_info->mt_nid), (event->status) ? "unsuccessfully" : "successfully", event->status); lnet_handle_recovery_reply(ev_info, event->status, true, false); @@ -4046,7 +4166,9 @@ void lnet_monitor_thr_stop(void) complete(&the_lnet.ln_mt_wait_complete); /* block until monitor thread signals that it's done */ + mutex_unlock(&the_lnet.ln_api_mutex); down(&the_lnet.ln_mt_signal); + mutex_lock(&the_lnet.ln_api_mutex); LASSERT(the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN); /* perform cleanup tasks */ @@ -4102,14 +4224,14 @@ lnet_parse_put(struct lnet_ni *ni, struct lnet_msg *msg) hdr->msg.put.offset = le32_to_cpu(hdr->msg.put.offset); /* Primary peer NID. */ - info.mi_id.nid = msg->msg_initiator; + info.mi_id.nid = msg->msg_initiator; info.mi_id.pid = hdr->src_pid; info.mi_opc = LNET_MD_OP_PUT; info.mi_portal = hdr->msg.put.ptl_index; info.mi_rlength = hdr->payload_length; info.mi_roffset = hdr->msg.put.offset; info.mi_mbits = hdr->msg.put.match_bits; - info.mi_cpt = lnet_cpt_of_nid(msg->msg_initiator, ni); + info.mi_cpt = lnet_nid2cpt(&msg->msg_initiator, ni); msg->msg_rx_ready_delay = ni->ni_net->net_lnd->lnd_eager_recv == NULL; ready_delay = msg->msg_rx_ready_delay; @@ -4135,12 +4257,12 @@ lnet_parse_put(struct lnet_ni *ni, struct lnet_msg *msg) ready_delay = true; goto again; } - /* fall through */ + fallthrough; case LNET_MATCHMD_DROP: CNETERR("Dropping PUT from %s portal %d match %llu" " offset %d length %d: %d\n", - libcfs_id2str(info.mi_id), info.mi_portal, + libcfs_idstr(&info.mi_id), info.mi_portal, info.mi_mbits, info.mi_roffset, info.mi_rlength, rc); return -ENOENT; /* -ve: OK but no match */ @@ -4152,7 +4274,7 @@ lnet_parse_get(struct lnet_ni *ni, struct lnet_msg *msg, int rdma_get) { struct lnet_match_info info; struct lnet_hdr *hdr = &msg->msg_hdr; - struct lnet_process_id source_id; + struct lnet_processid source_id; struct lnet_handle_wire reply_wmd; int rc; @@ -4165,20 +4287,20 @@ lnet_parse_get(struct lnet_ni *ni, struct lnet_msg *msg, int rdma_get) source_id.nid = hdr->src_nid; source_id.pid = hdr->src_pid; /* Primary peer NID */ - info.mi_id.nid = msg->msg_initiator; + info.mi_id.nid = msg->msg_initiator; info.mi_id.pid = hdr->src_pid; info.mi_opc = LNET_MD_OP_GET; info.mi_portal = hdr->msg.get.ptl_index; info.mi_rlength = hdr->msg.get.sink_length; info.mi_roffset = hdr->msg.get.src_offset; info.mi_mbits = hdr->msg.get.match_bits; - info.mi_cpt = lnet_cpt_of_nid(msg->msg_initiator, ni); + info.mi_cpt = lnet_nid2cpt(&msg->msg_initiator, ni); rc = lnet_ptl_match_md(&info, msg); if (rc == LNET_MATCHMD_DROP) { CNETERR("Dropping GET from %s portal %d match %llu" " offset %d length %d\n", - libcfs_id2str(info.mi_id), info.mi_portal, + libcfs_idstr(&info.mi_id), info.mi_portal, info.mi_mbits, info.mi_roffset, info.mi_rlength); return -ENOENT; /* -ve: OK but no match */ } @@ -4189,7 +4311,7 @@ lnet_parse_get(struct lnet_ni *ni, struct lnet_msg *msg, int rdma_get) reply_wmd = hdr->msg.get.return_wmd; - lnet_prep_send(msg, LNET_MSG_REPLY, source_id, + lnet_prep_send(msg, LNET_MSG_REPLY, &source_id, msg->msg_offset, msg->msg_wanted); msg->msg_hdr.msg.reply.dst_wmd = reply_wmd; @@ -4204,12 +4326,12 @@ lnet_parse_get(struct lnet_ni *ni, struct lnet_msg *msg, int rdma_get) lnet_ni_recv(ni, msg->msg_private, NULL, 0, 0, 0, 0); msg->msg_receiving = 0; - rc = lnet_send(ni->ni_nid, msg, msg->msg_from); + rc = lnet_send(&ni->ni_nid, msg, &msg->msg_from); if (rc < 0) { /* didn't get as far as lnet_ni_send() */ CERROR("%s: Unable to send REPLY for GET from %s: %d\n", - libcfs_nid2str(ni->ni_nid), - libcfs_id2str(info.mi_id), rc); + libcfs_nidstr(&ni->ni_nid), + libcfs_idstr(&info.mi_id), rc); lnet_finalize(msg, rc); } @@ -4222,7 +4344,7 @@ lnet_parse_reply(struct lnet_ni *ni, struct lnet_msg *msg) { void *private = msg->msg_private; struct lnet_hdr *hdr = &msg->msg_hdr; - struct lnet_process_id src = {0}; + struct lnet_processid src = {}; struct lnet_libmd *md; unsigned int rlength; unsigned int mlength; @@ -4239,7 +4361,7 @@ lnet_parse_reply(struct lnet_ni *ni, struct lnet_msg *msg) if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) { CNETERR("%s: Dropping REPLY from %s for %s " "MD %#llx.%#llx\n", - libcfs_nid2str(ni->ni_nid), libcfs_id2str(src), + libcfs_nidstr(&ni->ni_nid), libcfs_idstr(&src), (md == NULL) ? "invalid" : "inactive", hdr->msg.reply.dst_wmd.wh_interface_cookie, hdr->msg.reply.dst_wmd.wh_object_cookie); @@ -4260,7 +4382,7 @@ lnet_parse_reply(struct lnet_ni *ni, struct lnet_msg *msg) (md->md_options & LNET_MD_TRUNCATE) == 0) { CNETERR("%s: Dropping REPLY from %s length %d " "for MD %#llx would overflow (%d)\n", - libcfs_nid2str(ni->ni_nid), libcfs_id2str(src), + libcfs_nidstr(&ni->ni_nid), libcfs_idstr(&src), rlength, hdr->msg.reply.dst_wmd.wh_object_cookie, mlength); lnet_res_unlock(cpt); @@ -4268,7 +4390,7 @@ lnet_parse_reply(struct lnet_ni *ni, struct lnet_msg *msg) } CDEBUG(D_NET, "%s: Reply from %s of length %d/%d into md %#llx\n", - libcfs_nid2str(ni->ni_nid), libcfs_id2str(src), + libcfs_nidstr(&ni->ni_nid), libcfs_idstr(&src), mlength, rlength, hdr->msg.reply.dst_wmd.wh_object_cookie); lnet_msg_attach_md(msg, md, 0, mlength); @@ -4288,7 +4410,7 @@ static int lnet_parse_ack(struct lnet_ni *ni, struct lnet_msg *msg) { struct lnet_hdr *hdr = &msg->msg_hdr; - struct lnet_process_id src = {0}; + struct lnet_processid src = {}; struct lnet_libmd *md; int cpt; @@ -4308,7 +4430,7 @@ lnet_parse_ack(struct lnet_ni *ni, struct lnet_msg *msg) /* Don't moan; this is expected */ CDEBUG(D_NET, "%s: Dropping ACK from %s to %s MD %#llx.%#llx\n", - libcfs_nid2str(ni->ni_nid), libcfs_id2str(src), + libcfs_nidstr(&ni->ni_nid), libcfs_idstr(&src), (md == NULL) ? "invalid" : "inactive", hdr->msg.ack.dst_wmd.wh_interface_cookie, hdr->msg.ack.dst_wmd.wh_object_cookie); @@ -4321,7 +4443,7 @@ lnet_parse_ack(struct lnet_ni *ni, struct lnet_msg *msg) } CDEBUG(D_NET, "%s: ACK from %s into md %#llx\n", - libcfs_nid2str(ni->ni_nid), libcfs_id2str(src), + libcfs_nidstr(&ni->ni_nid), libcfs_idstr(&src), hdr->msg.ack.dst_wmd.wh_object_cookie); lnet_msg_attach_md(msg, md, 0, 0); @@ -4408,38 +4530,40 @@ lnet_msgtyp2str (int type) return (""); } } +EXPORT_SYMBOL(lnet_msgtyp2str); int -lnet_parse(struct lnet_ni *ni, struct lnet_hdr *hdr, lnet_nid_t from_nid, - void *private, int rdma_req) +lnet_parse(struct lnet_ni *ni, struct lnet_hdr *hdr, + struct lnet_nid *from_nid, void *private, int rdma_req) { struct lnet_peer_ni *lpni; struct lnet_msg *msg; __u32 payload_length; lnet_pid_t dest_pid; - lnet_nid_t dest_nid; - lnet_nid_t src_nid; + struct lnet_nid dest_nid; + struct lnet_nid src_nid; bool push = false; int for_me; __u32 type; int rc = 0; int cpt; + time64_t now = ktime_get_seconds(); LASSERT (!in_interrupt ()); - type = le32_to_cpu(hdr->type); - src_nid = le64_to_cpu(hdr->src_nid); - dest_nid = le64_to_cpu(hdr->dest_nid); - dest_pid = le32_to_cpu(hdr->dest_pid); - payload_length = le32_to_cpu(hdr->payload_length); + type = hdr->type; + src_nid = hdr->src_nid; + dest_nid = hdr->dest_nid; + dest_pid = hdr->dest_pid; + payload_length = hdr->payload_length; - for_me = (ni->ni_nid == dest_nid); - cpt = lnet_cpt_of_nid(from_nid, ni); + for_me = nid_same(&ni->ni_nid, &dest_nid); + cpt = lnet_nid2cpt(from_nid, ni); CDEBUG(D_NET, "TRACE: %s(%s) <- %s : %s - %s\n", - libcfs_nid2str(dest_nid), - libcfs_nid2str(ni->ni_nid), - libcfs_nid2str(src_nid), + libcfs_nidstr(&dest_nid), + libcfs_nidstr(&ni->ni_nid), + libcfs_nidstr(&src_nid), lnet_msgtyp2str(type), (for_me) ? "for me" : "routed"); @@ -4448,8 +4572,8 @@ lnet_parse(struct lnet_ni *ni, struct lnet_hdr *hdr, lnet_nid_t from_nid, case LNET_MSG_GET: if (payload_length > 0) { CERROR("%s, src %s: bad %s payload %d (0 expected)\n", - libcfs_nid2str(from_nid), - libcfs_nid2str(src_nid), + libcfs_nidstr(from_nid), + libcfs_nidstr(&src_nid), lnet_msgtyp2str(type), payload_length); return -EPROTO; } @@ -4461,8 +4585,8 @@ lnet_parse(struct lnet_ni *ni, struct lnet_hdr *hdr, lnet_nid_t from_nid, (__u32)(for_me ? LNET_MAX_PAYLOAD : LNET_MTU)) { CERROR("%s, src %s: bad %s payload %d " "(%d max expected)\n", - libcfs_nid2str(from_nid), - libcfs_nid2str(src_nid), + libcfs_nidstr(from_nid), + libcfs_nidstr(&src_nid), lnet_msgtyp2str(type), payload_length, for_me ? LNET_MAX_PAYLOAD : LNET_MTU); @@ -4472,16 +4596,23 @@ lnet_parse(struct lnet_ni *ni, struct lnet_hdr *hdr, lnet_nid_t from_nid, default: CERROR("%s, src %s: Bad message type 0x%x\n", - libcfs_nid2str(from_nid), - libcfs_nid2str(src_nid), type); + libcfs_nidstr(from_nid), + libcfs_nidstr(&src_nid), type); return -EPROTO; } - if (the_lnet.ln_routing && - ni->ni_net->net_last_alive != ktime_get_real_seconds()) { + /* Only update net_last_alive for incoming GETs on the reserved portal + * (i.e. incoming lnet/discovery pings). + * This avoids situations where the router's own traffic results in NI + * status changes + */ + if (the_lnet.ln_routing && type == LNET_MSG_GET && + hdr->msg.get.ptl_index == LNET_RESERVED_PORTAL && + !lnet_islocalnid(&src_nid) && + ni->ni_net->net_last_alive != now) { lnet_ni_lock(ni); spin_lock(&ni->ni_net->net_lock); - ni->ni_net->net_last_alive = ktime_get_real_seconds(); + ni->ni_net->net_last_alive = now; spin_unlock(&ni->ni_net->net_lock); push = lnet_ni_set_status_locked(ni, LNET_NI_STATUS_UP); lnet_ni_unlock(ni); @@ -4495,42 +4626,42 @@ lnet_parse(struct lnet_ni *ni, struct lnet_hdr *hdr, lnet_nid_t from_nid, * or malicious so we chop them off at the knees :) */ if (!for_me) { - if (LNET_NIDNET(dest_nid) == LNET_NIDNET(ni->ni_nid)) { + if (LNET_NID_NET(&dest_nid) == LNET_NID_NET(&ni->ni_nid)) { /* should have gone direct */ CERROR("%s, src %s: Bad dest nid %s " "(should have been sent direct)\n", - libcfs_nid2str(from_nid), - libcfs_nid2str(src_nid), - libcfs_nid2str(dest_nid)); + libcfs_nidstr(from_nid), + libcfs_nidstr(&src_nid), + libcfs_nidstr(&dest_nid)); return -EPROTO; } - if (lnet_islocalnid(dest_nid)) { + if (lnet_islocalnid(&dest_nid)) { /* dest is another local NI; sender should have used * this node's NID on its own network */ CERROR("%s, src %s: Bad dest nid %s " "(it's my nid but on a different network)\n", - libcfs_nid2str(from_nid), - libcfs_nid2str(src_nid), - libcfs_nid2str(dest_nid)); + libcfs_nidstr(from_nid), + libcfs_nidstr(&src_nid), + libcfs_nidstr(&dest_nid)); return -EPROTO; } if (rdma_req && type == LNET_MSG_GET) { CERROR("%s, src %s: Bad optimized GET for %s " "(final destination must be me)\n", - libcfs_nid2str(from_nid), - libcfs_nid2str(src_nid), - libcfs_nid2str(dest_nid)); + libcfs_nidstr(from_nid), + libcfs_nidstr(&src_nid), + libcfs_nidstr(&dest_nid)); return -EPROTO; } if (!the_lnet.ln_routing) { CERROR("%s, src %s: Dropping message for %s " "(routing not enabled)\n", - libcfs_nid2str(from_nid), - libcfs_nid2str(src_nid), - libcfs_nid2str(dest_nid)); + libcfs_nidstr(from_nid), + libcfs_nidstr(&src_nid), + libcfs_nidstr(&dest_nid)); goto drop; } } @@ -4539,81 +4670,26 @@ lnet_parse(struct lnet_ni *ni, struct lnet_hdr *hdr, lnet_nid_t from_nid, * call back lnd_recv() come what may... */ if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */ - fail_peer(src_nid, 0)) { /* shall we now? */ + fail_peer(&src_nid, 0)) { /* shall we now? */ CERROR("%s, src %s: Dropping %s to simulate failure\n", - libcfs_nid2str(from_nid), libcfs_nid2str(src_nid), + libcfs_nidstr(from_nid), libcfs_nidstr(&src_nid), lnet_msgtyp2str(type)); goto drop; } if (!list_empty(&the_lnet.ln_drop_rules) && - lnet_drop_rule_match(hdr, ni->ni_nid, NULL)) { + lnet_drop_rule_match(hdr, &ni->ni_nid, NULL)) { CDEBUG(D_NET, "%s, src %s, dst %s: Dropping %s to simulate silent message loss\n", - libcfs_nid2str(from_nid), libcfs_nid2str(src_nid), - libcfs_nid2str(dest_nid), lnet_msgtyp2str(type)); + libcfs_nidstr(from_nid), libcfs_nidstr(&src_nid), + libcfs_nidstr(&dest_nid), lnet_msgtyp2str(type)); goto drop; } - if (lnet_drop_asym_route && for_me && - LNET_NIDNET(src_nid) != LNET_NIDNET(from_nid)) { - struct lnet_net *net; - struct lnet_remotenet *rnet; - bool found = true; - - /* we are dealing with a routed message, - * so see if route to reach src_nid goes through from_nid - */ - lnet_net_lock(cpt); - net = lnet_get_net_locked(LNET_NIDNET(ni->ni_nid)); - if (!net) { - lnet_net_unlock(cpt); - CERROR("net %s not found\n", - libcfs_net2str(LNET_NIDNET(ni->ni_nid))); - return -EPROTO; - } - - rnet = lnet_find_rnet_locked(LNET_NIDNET(src_nid)); - if (rnet) { - struct lnet_peer *gw = NULL; - struct lnet_peer_ni *lpni = NULL; - struct lnet_route *route; - - list_for_each_entry(route, &rnet->lrn_routes, lr_list) { - found = false; - gw = route->lr_gateway; - if (route->lr_lnet != net->net_id) - continue; - /* - * if the nid is one of the gateway's NIDs - * then this is a valid gateway - */ - while ((lpni = lnet_get_next_peer_ni_locked(gw, - NULL, lpni)) != NULL) { - if (lpni->lpni_nid == from_nid) { - found = true; - break; - } - } - } - } - lnet_net_unlock(cpt); - if (!found) { - /* we would not use from_nid to route a message to - * src_nid - * => asymmetric routing detected but forbidden - */ - CERROR("%s, src %s: Dropping asymmetrical route %s\n", - libcfs_nid2str(from_nid), - libcfs_nid2str(src_nid), lnet_msgtyp2str(type)); - goto drop; - } - } - msg = lnet_msg_alloc(); if (msg == NULL) { CERROR("%s, src %s: Dropping %s (out of memory)\n", - libcfs_nid2str(from_nid), libcfs_nid2str(src_nid), + libcfs_nidstr(from_nid), libcfs_nidstr(&src_nid), lnet_msgtyp2str(type)); goto drop; } @@ -4629,45 +4705,95 @@ lnet_parse(struct lnet_ni *ni, struct lnet_hdr *hdr, lnet_nid_t from_nid, msg->msg_offset = 0; msg->msg_hdr = *hdr; /* for building message event */ - msg->msg_from = from_nid; + msg->msg_from = *from_nid; if (!for_me) { - msg->msg_target.pid = dest_pid; - msg->msg_target.nid = dest_nid; - msg->msg_routing = 1; - - } else { - /* convert common msg->hdr fields to host byteorder */ - msg->msg_hdr.type = type; - msg->msg_hdr.src_nid = src_nid; - msg->msg_hdr.src_pid = le32_to_cpu(msg->msg_hdr.src_pid); - msg->msg_hdr.dest_nid = dest_nid; - msg->msg_hdr.dest_pid = dest_pid; - msg->msg_hdr.payload_length = payload_length; + msg->msg_target.pid = dest_pid; + msg->msg_target.nid = dest_nid; + msg->msg_routing = 1; } lnet_net_lock(cpt); - lpni = lnet_nid2peerni_locked(from_nid, ni->ni_nid, cpt); + lpni = lnet_peerni_by_nid_locked(from_nid, &ni->ni_nid, cpt); if (IS_ERR(lpni)) { lnet_net_unlock(cpt); - CERROR("%s, src %s: Dropping %s " - "(error %ld looking up sender)\n", - libcfs_nid2str(from_nid), libcfs_nid2str(src_nid), - lnet_msgtyp2str(type), PTR_ERR(lpni)); + rc = PTR_ERR(lpni); + CERROR("%s, src %s: Dropping %s (error %d looking up sender)\n", + libcfs_nidstr(from_nid), libcfs_nidstr(&src_nid), + lnet_msgtyp2str(type), rc); lnet_msg_free(msg); if (rc == -ESHUTDOWN) /* We are shutting down. Don't do anything more */ - return 0; + return rc; goto drop; } - if (the_lnet.ln_routing) - lpni->lpni_last_alive = ktime_get_seconds(); + /* If this message was forwarded to us from a router then we may need + * to update router aliveness or check for an asymmetrical route + * (or both) + */ + if (((lnet_drop_asym_route && for_me) || + !lpni->lpni_peer_net->lpn_peer->lp_alive) && + LNET_NID_NET(&src_nid) != LNET_NID_NET(from_nid)) { + __u32 src_net_id = LNET_NID_NET(&src_nid); + struct lnet_peer *gw = lpni->lpni_peer_net->lpn_peer; + struct lnet_route *route; + bool found = false; + + list_for_each_entry(route, &gw->lp_routes, lr_gwlist) { + if (route->lr_net == src_net_id) { + found = true; + /* If we're transitioning the gateway from + * dead -> alive, and discovery is disabled + * locally or on the gateway, then we need to + * update the cached route aliveness for each + * route to the src_nid's net. + * + * Otherwise, we're only checking for + * symmetrical route, and we can break the + * loop + */ + if (!gw->lp_alive && + lnet_is_discovery_disabled(gw)) + lnet_set_route_aliveness(route, true); + else + break; + } + } + if (lnet_drop_asym_route && for_me && !found) { + /* Drop ref taken by lnet_nid2peerni_locked() */ + lnet_peer_ni_decref_locked(lpni); + lnet_net_unlock(cpt); + /* we would not use from_nid to route a message to + * src_nid + * => asymmetric routing detected but forbidden + */ + CERROR("%s, src %s: Dropping asymmetrical route %s\n", + libcfs_nidstr(from_nid), + libcfs_nidstr(&src_nid), lnet_msgtyp2str(type)); + lnet_msg_free(msg); + goto drop; + } + if (!gw->lp_alive) { + struct lnet_peer_net *lpn; + struct lnet_peer_ni *lpni2; + + gw->lp_alive = true; + /* Mark all remote NIs on src_nid's net UP */ + lpn = lnet_peer_get_net_locked(gw, src_net_id); + if (lpn) + list_for_each_entry(lpni2, &lpn->lpn_peer_nis, + lpni_peer_nis) + lpni2->lpni_ns_status = LNET_NI_STATUS_UP; + } + } + + lpni->lpni_last_alive = now; msg->msg_rxpeer = lpni; msg->msg_rxni = ni; lnet_ni_addref_locked(ni, cpt); /* Multi-Rail: Primary NID of source. */ - msg->msg_initiator = lnet_peer_primary_nid_locked(src_nid); + lnet_peer_primary_nid_locked(&src_nid, &msg->msg_initiator); /* * mark the status of this lpni as UP since we received a message @@ -4719,11 +4845,12 @@ EXPORT_SYMBOL(lnet_parse); void lnet_drop_delayed_msg_list(struct list_head *head, char *reason) { - while (!list_empty(head)) { - struct lnet_process_id id = {0}; - struct lnet_msg *msg; + struct lnet_msg *msg; + + while ((msg = list_first_entry_or_null(head, struct lnet_msg, + msg_list)) != NULL) { + struct lnet_processid id = {}; - msg = list_entry(head->next, struct lnet_msg, msg_list); list_del(&msg->msg_list); id.nid = msg->msg_hdr.src_nid; @@ -4736,7 +4863,7 @@ lnet_drop_delayed_msg_list(struct list_head *head, char *reason) CWARN("Dropping delayed PUT from %s portal %d match %llu" " offset %d length %d: %s\n", - libcfs_id2str(id), + libcfs_idstr(&id), msg->msg_hdr.msg.put.ptl_index, msg->msg_hdr.msg.put.match_bits, msg->msg_hdr.msg.put.offset, @@ -4763,11 +4890,12 @@ lnet_drop_delayed_msg_list(struct list_head *head, char *reason) void lnet_recv_delayed_msg_list(struct list_head *head) { - while (!list_empty(head)) { - struct lnet_msg *msg; - struct lnet_process_id id; + struct lnet_msg *msg; + + while ((msg = list_first_entry_or_null(head, struct lnet_msg, + msg_list)) != NULL) { + struct lnet_processid id; - msg = list_entry(head->next, struct lnet_msg, msg_list); list_del(&msg->msg_list); /* md won't disappear under me, since each msg @@ -4784,7 +4912,7 @@ lnet_recv_delayed_msg_list(struct list_head *head) CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d " "match %llu offset %d length %d.\n", - libcfs_id2str(id), msg->msg_hdr.msg.put.ptl_index, + libcfs_idstr(&id), msg->msg_hdr.msg.put.ptl_index, msg->msg_hdr.msg.put.match_bits, msg->msg_hdr.msg.put.offset, msg->msg_hdr.payload_length); @@ -4882,8 +5010,8 @@ lnet_attach_rsp_tracker(struct lnet_rsp_tracker *rspt, int cpt, * \see struct lnet_event::hdr_data and lnet_event_kind_t. */ int -LNetPut(lnet_nid_t self, struct lnet_handle_md mdh, enum lnet_ack_req ack, - struct lnet_process_id target, unsigned int portal, +LNetPut(struct lnet_nid *self, struct lnet_handle_md mdh, enum lnet_ack_req ack, + struct lnet_processid *target, unsigned int portal, __u64 match_bits, unsigned int offset, __u64 hdr_data) { @@ -4896,16 +5024,16 @@ LNetPut(lnet_nid_t self, struct lnet_handle_md mdh, enum lnet_ack_req ack, LASSERT(the_lnet.ln_refcount > 0); if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */ - fail_peer(target.nid, 1)) { /* shall we now? */ + fail_peer(&target->nid, 1)) { /* shall we now? */ CERROR("Dropping PUT to %s: simulated failure\n", - libcfs_id2str(target)); + libcfs_idstr(target)); return -EIO; } msg = lnet_msg_alloc(); if (msg == NULL) { CERROR("Dropping PUT to %s: ENOMEM on struct lnet_msg\n", - libcfs_id2str(target)); + libcfs_idstr(target)); return -ENOMEM; } msg->msg_vmflush = !!(current->flags & PF_MEMALLOC); @@ -4916,7 +5044,7 @@ LNetPut(lnet_nid_t self, struct lnet_handle_md mdh, enum lnet_ack_req ack, rspt = lnet_rspt_alloc(cpt); if (!rspt) { CERROR("Dropping PUT to %s: ENOMEM on response tracker\n", - libcfs_id2str(target)); + libcfs_idstr(target)); return -ENOMEM; } INIT_LIST_HEAD(&rspt->rspt_on_list); @@ -4927,7 +5055,7 @@ LNetPut(lnet_nid_t self, struct lnet_handle_md mdh, enum lnet_ack_req ack, md = lnet_handle2md(&mdh); if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) { CERROR("Dropping PUT (%llu:%d:%s): MD (%d) invalid\n", - match_bits, portal, libcfs_id2str(target), + match_bits, portal, libcfs_idstr(target), md == NULL ? -1 : md->md_threshold); if (md != NULL && md->md_me != NULL) CERROR("Source MD also attached to portal %d\n", @@ -4941,7 +5069,7 @@ LNetPut(lnet_nid_t self, struct lnet_handle_md mdh, enum lnet_ack_req ack, return -ENOENT; } - CDEBUG(D_NET, "LNetPut -> %s\n", libcfs_id2str(target)); + CDEBUG(D_NET, "%s -> %s\n", __func__, libcfs_idstr(target)); lnet_msg_attach_md(msg, md, 0, 0); @@ -4979,11 +5107,11 @@ LNetPut(lnet_nid_t self, struct lnet_handle_md mdh, enum lnet_ack_req ack, CFS_FAIL_ONCE)) rc = -EIO; else - rc = lnet_send(self, msg, LNET_NID_ANY); + rc = lnet_send(self, msg, NULL); if (rc != 0) { CNETERR("Error sending PUT to %s: %d\n", - libcfs_id2str(target), rc); + libcfs_idstr(target), rc); msg->msg_no_resend = true; lnet_finalize(msg, rc); } @@ -5006,7 +5134,7 @@ lnet_create_reply_msg(struct lnet_ni *ni, struct lnet_msg *getmsg) { struct lnet_msg *msg = lnet_msg_alloc(); struct lnet_libmd *getmd = getmsg->msg_md; - struct lnet_process_id peer_id = getmsg->msg_target; + struct lnet_processid *peer_id = &getmsg->msg_target; int cpt; LASSERT(!getmsg->msg_target_is_router); @@ -5014,7 +5142,7 @@ lnet_create_reply_msg(struct lnet_ni *ni, struct lnet_msg *getmsg) if (msg == NULL) { CERROR("%s: Dropping REPLY from %s: can't allocate msg\n", - libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id)); + libcfs_nidstr(&ni->ni_nid), libcfs_idstr(peer_id)); goto drop; } @@ -5025,7 +5153,7 @@ lnet_create_reply_msg(struct lnet_ni *ni, struct lnet_msg *getmsg) if (getmd->md_threshold == 0) { CERROR("%s: Dropping REPLY from %s for inactive MD %p\n", - libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id), + libcfs_nidstr(&ni->ni_nid), libcfs_idstr(peer_id), getmd); lnet_res_unlock(cpt); goto drop; @@ -5034,20 +5162,21 @@ lnet_create_reply_msg(struct lnet_ni *ni, struct lnet_msg *getmsg) LASSERT(getmd->md_offset == 0); CDEBUG(D_NET, "%s: Reply from %s md %p\n", - libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id), getmd); + libcfs_nidstr(&ni->ni_nid), libcfs_idstr(peer_id), getmd); /* setup information for lnet_build_msg_event */ - msg->msg_initiator = getmsg->msg_txpeer->lpni_peer_net->lpn_peer->lp_primary_nid; - msg->msg_from = peer_id.nid; + msg->msg_initiator = + getmsg->msg_txpeer->lpni_peer_net->lpn_peer->lp_primary_nid; + msg->msg_from = peer_id->nid; msg->msg_type = LNET_MSG_GET; /* flag this msg as an "optimized" GET */ - msg->msg_hdr.src_nid = peer_id.nid; + msg->msg_hdr.src_nid = peer_id->nid; msg->msg_hdr.payload_length = getmd->md_length; msg->msg_receiving = 1; /* required by lnet_msg_attach_md */ lnet_msg_attach_md(msg, getmd, getmd->md_offset, getmd->md_length); lnet_res_unlock(cpt); - cpt = lnet_cpt_of_nid(peer_id.nid, ni); + cpt = lnet_nid2cpt(&peer_id->nid, ni); lnet_net_lock(cpt); lnet_msg_commit(msg, cpt); @@ -5058,7 +5187,7 @@ lnet_create_reply_msg(struct lnet_ni *ni, struct lnet_msg *getmsg) return msg; drop: - cpt = lnet_cpt_of_nid(peer_id.nid, ni); + cpt = lnet_nid2cpt(&peer_id->nid, ni); lnet_net_lock(cpt); lnet_incr_stats(&ni->ni_stats, LNET_MSG_GET, LNET_STATS_TYPE_DROP); @@ -5113,8 +5242,8 @@ EXPORT_SYMBOL(lnet_set_reply_msg_len); * \retval -ENOENT Invalid MD object. */ int -LNetGet(lnet_nid_t self, struct lnet_handle_md mdh, - struct lnet_process_id target, unsigned int portal, +LNetGet(struct lnet_nid *self, struct lnet_handle_md mdh, + struct lnet_processid *target, unsigned int portal, __u64 match_bits, unsigned int offset, bool recovery) { struct lnet_msg *msg; @@ -5126,17 +5255,17 @@ LNetGet(lnet_nid_t self, struct lnet_handle_md mdh, LASSERT(the_lnet.ln_refcount > 0); if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */ - fail_peer(target.nid, 1)) /* shall we now? */ + fail_peer(&target->nid, 1)) /* shall we now? */ { CERROR("Dropping GET to %s: simulated failure\n", - libcfs_id2str(target)); + libcfs_idstr(target)); return -EIO; } msg = lnet_msg_alloc(); if (!msg) { CERROR("Dropping GET to %s: ENOMEM on struct lnet_msg\n", - libcfs_id2str(target)); + libcfs_idstr(target)); return -ENOMEM; } @@ -5145,7 +5274,7 @@ LNetGet(lnet_nid_t self, struct lnet_handle_md mdh, rspt = lnet_rspt_alloc(cpt); if (!rspt) { CERROR("Dropping GET to %s: ENOMEM on response tracker\n", - libcfs_id2str(target)); + libcfs_idstr(target)); return -ENOMEM; } INIT_LIST_HEAD(&rspt->rspt_on_list); @@ -5157,7 +5286,7 @@ LNetGet(lnet_nid_t self, struct lnet_handle_md mdh, md = lnet_handle2md(&mdh); if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) { CERROR("Dropping GET (%llu:%d:%s): MD (%d) invalid\n", - match_bits, portal, libcfs_id2str(target), + match_bits, portal, libcfs_idstr(target), md == NULL ? -1 : md->md_threshold); if (md != NULL && md->md_me != NULL) CERROR("REPLY MD also attached to portal %d\n", @@ -5170,7 +5299,7 @@ LNetGet(lnet_nid_t self, struct lnet_handle_md mdh, return -ENOENT; } - CDEBUG(D_NET, "LNetGet -> %s\n", libcfs_id2str(target)); + CDEBUG(D_NET, "%s -> %s\n", __func__, libcfs_idstr(target)); lnet_msg_attach_md(msg, md, 0, 0); @@ -5196,10 +5325,10 @@ LNetGet(lnet_nid_t self, struct lnet_handle_md mdh, else lnet_rspt_free(rspt, cpt); - rc = lnet_send(self, msg, LNET_NID_ANY); + rc = lnet_send(self, msg, NULL); if (rc < 0) { CNETERR("Error sending GET to %s: %d\n", - libcfs_id2str(target), rc); + libcfs_idstr(target), rc); msg->msg_no_resend = true; lnet_finalize(msg, rc); } @@ -5224,64 +5353,80 @@ EXPORT_SYMBOL(LNetGet); * \retval -EHOSTUNREACH If \a dstnid is not reachable. */ int -LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp) +LNetDist(struct lnet_nid *dstnid, struct lnet_nid *srcnid, __u32 *orderp) { - struct list_head *e; struct lnet_ni *ni = NULL; struct lnet_remotenet *rnet; - __u32 dstnet = LNET_NIDNET(dstnid); + __u32 dstnet = LNET_NID_NET(dstnid); int hops; int cpt; __u32 order = 2; struct list_head *rn_list; + struct lnet_ni *matched_dstnet = NULL; /* if !local_nid_dist_zero, I don't return a distance of 0 ever * (when lustre sees a distance of 0, it substitutes 0@lo), so I * keep order 0 free for 0@lo and order 1 free for a local NID - * match */ + * match + * WARNING: dstnid and srcnid might point to same place. + * Don't set *srcnid until late. + */ LASSERT(the_lnet.ln_refcount > 0); cpt = lnet_net_lock_current(); while ((ni = lnet_get_next_ni_locked(NULL, ni))) { - if (ni->ni_nid == dstnid) { - if (srcnidp != NULL) - *srcnidp = dstnid; + if (nid_same(&ni->ni_nid, dstnid)) { if (orderp != NULL) { - if (dstnid == LNET_NID_LO_0) + if (nid_is_lo0(dstnid)) *orderp = 0; else *orderp = 1; } + if (srcnid) + *srcnid = *dstnid; lnet_net_unlock(cpt); return local_nid_dist_zero ? 0 : 1; } - if (LNET_NIDNET(ni->ni_nid) == dstnet) { - /* Check if ni was originally created in - * current net namespace. - * If not, assign order above 0xffff0000, - * to make this ni not a priority. */ - if (current->nsproxy && - !net_eq(ni->ni_net_ns, current->nsproxy->net_ns)) - order += 0xffff0000; - if (srcnidp != NULL) - *srcnidp = ni->ni_nid; - if (orderp != NULL) - *orderp = order; - lnet_net_unlock(cpt); - return 1; + if (!matched_dstnet && LNET_NID_NET(&ni->ni_nid) == dstnet) { + matched_dstnet = ni; + /* We matched the destination net, but we may have + * additional local NIs to inspect. + * + * We record the order as appropriate, but + * they may be overwritten if we match local NI above. + */ + + if (orderp) { + /* Check if ni was originally created in + * current net namespace. + * If not, assign order above 0xffff0000, + * to make this ni not a priority. + */ + if (current->nsproxy && + !net_eq(ni->ni_net_ns, + current->nsproxy->net_ns)) + *orderp = order + 0xffff0000; + else + *orderp = order; + } } order++; } - rn_list = lnet_net2rnethash(dstnet); - list_for_each(e, rn_list) { - rnet = list_entry(e, struct lnet_remotenet, lrn_list); + if (matched_dstnet) { + if (srcnid) + *srcnid = matched_dstnet->ni_nid; + lnet_net_unlock(cpt); + return 1; + } + rn_list = lnet_net2rnethash(dstnet); + list_for_each_entry(rnet, rn_list, lrn_list) { if (rnet->lrn_net == dstnet) { struct lnet_route *route; struct lnet_route *shortest = NULL; @@ -5304,12 +5449,12 @@ LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp) LASSERT(shortest != NULL); hops = shortest_hops; - if (srcnidp != NULL) { + if (srcnid) { struct lnet_net *net; net = lnet_get_net_locked(shortest->lr_lnet); LASSERT(net); ni = lnet_get_next_ni_locked(net, NULL); - *srcnidp = ni->ni_nid; + *srcnid = ni->ni_nid; } if (orderp != NULL) *orderp = order;