Whamcloud - gitweb
LU-3679 lnet: reflect down routes in /proc/sys/lnet/routes
[fs/lustre-release.git] / lnet / lnet / lib-move.c
index a37c7bb..ce7f926 100644 (file)
@@ -26,6 +26,8 @@
 /*
  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, 2013, Intel Corporation.
  */
 /*
  * This file is part of Lustre, http://www.lustre.org/
@@ -161,11 +163,13 @@ lnet_iov_nob (unsigned int niov, struct iovec *iov)
 {
         unsigned int nob = 0;
 
+       LASSERT(niov == 0 || iov != NULL);
         while (niov-- > 0)
                 nob += (iov++)->iov_len;
 
         return (nob);
 }
+EXPORT_SYMBOL(lnet_iov_nob);
 
 void
 lnet_copy_iov2iov (unsigned int ndiov, struct iovec *diov, unsigned int doffset,
@@ -224,6 +228,7 @@ lnet_copy_iov2iov (unsigned int ndiov, struct iovec *diov, unsigned int doffset,
                 }
         } while (nob > 0);
 }
+EXPORT_SYMBOL(lnet_copy_iov2iov);
 
 int
 lnet_extract_iov (int dst_niov, struct iovec *dst,
@@ -270,6 +275,7 @@ lnet_extract_iov (int dst_niov, struct iovec *dst,
                 offset = 0;
         }
 }
+EXPORT_SYMBOL(lnet_extract_iov);
 
 #ifndef __KERNEL__
 unsigned int
@@ -318,28 +324,30 @@ lnet_kiov_nob (unsigned int niov, lnet_kiov_t *kiov)
 {
         unsigned int  nob = 0;
 
+       LASSERT(niov == 0 || kiov != NULL);
         while (niov-- > 0)
                 nob += (kiov++)->kiov_len;
 
         return (nob);
 }
+EXPORT_SYMBOL(lnet_kiov_nob);
 
 void
 lnet_copy_kiov2kiov (unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset,
                      unsigned int nsiov, lnet_kiov_t *siov, unsigned int soffset,
                      unsigned int nob)
 {
-        /* NB diov, siov are READ-ONLY */
-        unsigned int    this_nob;
-        char           *daddr = NULL;
-        char           *saddr = NULL;
+       /* NB diov, siov are READ-ONLY */
+       unsigned int    this_nob;
+       char           *daddr = NULL;
+       char           *saddr = NULL;
 
-        if (nob == 0)
-                return;
+       if (nob == 0)
+               return;
 
-        LASSERT (!cfs_in_interrupt ());
+       LASSERT (!in_interrupt ());
 
-        LASSERT (ndiov > 0);
+       LASSERT (ndiov > 0);
         while (doffset >= diov->kiov_len) {
                 doffset -= diov->kiov_len;
                 diov++;
@@ -362,64 +370,65 @@ lnet_copy_kiov2kiov (unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset
                                siov->kiov_len - soffset);
                 this_nob = MIN(this_nob, nob);
 
-                if (daddr == NULL)
-                        daddr = ((char *)cfs_kmap(diov->kiov_page)) + 
-                                diov->kiov_offset + doffset;
-                if (saddr == NULL)
-                        saddr = ((char *)cfs_kmap(siov->kiov_page)) + 
-                                siov->kiov_offset + soffset;
+               if (daddr == NULL)
+                       daddr = ((char *)kmap(diov->kiov_page)) +
+                               diov->kiov_offset + doffset;
+               if (saddr == NULL)
+                       saddr = ((char *)kmap(siov->kiov_page)) +
+                               siov->kiov_offset + soffset;
 
-                /* Vanishing risk of kmap deadlock when mapping 2 pages.
-                 * However in practice at least one of the kiovs will be mapped
-                 * kernel pages and the map/unmap will be NOOPs */
+               /* Vanishing risk of kmap deadlock when mapping 2 pages.
+                * However in practice at least one of the kiovs will be mapped
+                * kernel pages and the map/unmap will be NOOPs */
 
-                memcpy (daddr, saddr, this_nob);
-                nob -= this_nob;
+               memcpy (daddr, saddr, this_nob);
+               nob -= this_nob;
 
-                if (diov->kiov_len > doffset + this_nob) {
-                        daddr += this_nob;
-                        doffset += this_nob;
-                } else {
-                        cfs_kunmap(diov->kiov_page);
-                        daddr = NULL;
-                        diov++;
-                        ndiov--;
-                        doffset = 0;
-                }
+               if (diov->kiov_len > doffset + this_nob) {
+                       daddr += this_nob;
+                       doffset += this_nob;
+               } else {
+                       kunmap(diov->kiov_page);
+                       daddr = NULL;
+                       diov++;
+                       ndiov--;
+                       doffset = 0;
+               }
 
-                if (siov->kiov_len > soffset + this_nob) {
-                        saddr += this_nob;
-                        soffset += this_nob;
-                } else {
-                        cfs_kunmap(siov->kiov_page);
-                        saddr = NULL;
-                        siov++;
-                        nsiov--;
-                        soffset = 0;
-                }
-        } while (nob > 0);
+               if (siov->kiov_len > soffset + this_nob) {
+                       saddr += this_nob;
+                       soffset += this_nob;
+               } else {
+                       kunmap(siov->kiov_page);
+                       saddr = NULL;
+                       siov++;
+                       nsiov--;
+                       soffset = 0;
+               }
+       } while (nob > 0);
 
-        if (daddr != NULL)
-                cfs_kunmap(diov->kiov_page);
-        if (saddr != NULL)
-                cfs_kunmap(siov->kiov_page);
+       if (daddr != NULL)
+               kunmap(diov->kiov_page);
+       if (saddr != NULL)
+               kunmap(siov->kiov_page);
 }
+EXPORT_SYMBOL(lnet_copy_kiov2kiov);
 
 void
 lnet_copy_kiov2iov (unsigned int niov, struct iovec *iov, unsigned int iovoffset,
                     unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffset,
                     unsigned int nob)
 {
-        /* NB iov, kiov are READ-ONLY */
-        unsigned int    this_nob;
-        char           *addr = NULL;
+       /* NB iov, kiov are READ-ONLY */
+       unsigned int    this_nob;
+       char           *addr = NULL;
 
-        if (nob == 0)
-                return;
+       if (nob == 0)
+               return;
 
-        LASSERT (!cfs_in_interrupt ());
+       LASSERT (!in_interrupt ());
 
-        LASSERT (niov > 0);
+       LASSERT (niov > 0);
         while (iovoffset >= iov->iov_len) {
                 iovoffset -= iov->iov_len;
                 iov++;
@@ -443,7 +452,7 @@ lnet_copy_kiov2iov (unsigned int niov, struct iovec *iov, unsigned int iovoffset
                 this_nob = MIN(this_nob, nob);
 
                 if (addr == NULL)
-                        addr = ((char *)cfs_kmap(kiov->kiov_page)) + 
+                       addr = ((char *)kmap(kiov->kiov_page)) +
                                 kiov->kiov_offset + kiovoffset;
 
                 memcpy ((char *)iov->iov_base + iovoffset, addr, this_nob);
@@ -461,34 +470,35 @@ lnet_copy_kiov2iov (unsigned int niov, struct iovec *iov, unsigned int iovoffset
                         addr += this_nob;
                         kiovoffset += this_nob;
                 } else {
-                        cfs_kunmap(kiov->kiov_page);
-                        addr = NULL;
-                        kiov++;
-                        nkiov--;
-                        kiovoffset = 0;
-                }
+                       kunmap(kiov->kiov_page);
+                       addr = NULL;
+                       kiov++;
+                       nkiov--;
+                       kiovoffset = 0;
+               }
 
-        } while (nob > 0);
+       } while (nob > 0);
 
-        if (addr != NULL)
-                cfs_kunmap(kiov->kiov_page);
+       if (addr != NULL)
+               kunmap(kiov->kiov_page);
 }
+EXPORT_SYMBOL(lnet_copy_kiov2iov);
 
 void
 lnet_copy_iov2kiov (unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffset,
                     unsigned int niov, struct iovec *iov, unsigned int iovoffset,
                     unsigned int nob)
 {
-        /* NB kiov, iov are READ-ONLY */
-        unsigned int    this_nob;
-        char           *addr = NULL;
+       /* NB kiov, iov are READ-ONLY */
+       unsigned int    this_nob;
+       char           *addr = NULL;
 
-        if (nob == 0)
-                return;
+       if (nob == 0)
+               return;
 
-        LASSERT (!cfs_in_interrupt ());
+       LASSERT (!in_interrupt ());
 
-        LASSERT (nkiov > 0);
+       LASSERT (nkiov > 0);
         while (kiovoffset >= kiov->kiov_len) {
                 kiovoffset -= kiov->kiov_len;
                 kiov++;
@@ -512,35 +522,36 @@ lnet_copy_iov2kiov (unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffs
                 this_nob = MIN(this_nob, nob);
 
                 if (addr == NULL)
-                        addr = ((char *)cfs_kmap(kiov->kiov_page)) + 
-                                kiov->kiov_offset + kiovoffset;
+                       addr = ((char *)kmap(kiov->kiov_page)) +
+                               kiov->kiov_offset + kiovoffset;
 
-                memcpy (addr, (char *)iov->iov_base + iovoffset, this_nob);
-                nob -= this_nob;
+               memcpy (addr, (char *)iov->iov_base + iovoffset, this_nob);
+               nob -= this_nob;
 
-                if (kiov->kiov_len > kiovoffset + this_nob) {
-                        addr += this_nob;
-                        kiovoffset += this_nob;
-                } else {
-                        cfs_kunmap(kiov->kiov_page);
-                        addr = NULL;
-                        kiov++;
-                        nkiov--;
-                        kiovoffset = 0;
-                }
+               if (kiov->kiov_len > kiovoffset + this_nob) {
+                       addr += this_nob;
+                       kiovoffset += this_nob;
+               } else {
+                       kunmap(kiov->kiov_page);
+                       addr = NULL;
+                       kiov++;
+                       nkiov--;
+                       kiovoffset = 0;
+               }
 
-                if (iov->iov_len > iovoffset + this_nob) {
-                        iovoffset += this_nob;
-                } else {
-                        iov++;
-                        niov--;
-                        iovoffset = 0;
-                }
-        } while (nob > 0);
+               if (iov->iov_len > iovoffset + this_nob) {
+                       iovoffset += this_nob;
+               } else {
+                       iov++;
+                       niov--;
+                       iovoffset = 0;
+               }
+       } while (nob > 0);
 
-        if (addr != NULL)
-                cfs_kunmap(kiov->kiov_page);
+       if (addr != NULL)
+               kunmap(kiov->kiov_page);
 }
+EXPORT_SYMBOL(lnet_copy_iov2kiov);
 
 int
 lnet_extract_kiov (int dst_niov, lnet_kiov_t *dst,
@@ -573,14 +584,14 @@ lnet_extract_kiov (int dst_niov, lnet_kiov_t *dst,
                 dst->kiov_page = src->kiov_page;
                 dst->kiov_offset = src->kiov_offset + offset;
 
-                if (len <= frag_len) {
-                        dst->kiov_len = len;
-                        LASSERT (dst->kiov_offset + dst->kiov_len <= CFS_PAGE_SIZE);
-                        return (niov);
-                }
+               if (len <= frag_len) {
+                       dst->kiov_len = len;
+                       LASSERT (dst->kiov_offset + dst->kiov_len <= PAGE_CACHE_SIZE);
+                       return niov;
+               }
 
-                dst->kiov_len = frag_len;
-                LASSERT (dst->kiov_offset + dst->kiov_len <= CFS_PAGE_SIZE);
+               dst->kiov_len = frag_len;
+               LASSERT (dst->kiov_offset + dst->kiov_len <= PAGE_CACHE_SIZE);
 
                 len -= frag_len;
                 dst++;
@@ -590,19 +601,20 @@ lnet_extract_kiov (int dst_niov, lnet_kiov_t *dst,
                 offset = 0;
         }
 }
+EXPORT_SYMBOL(lnet_extract_kiov);
 #endif
 
 void
 lnet_ni_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
              unsigned int offset, unsigned int mlen, unsigned int rlen)
 {
-        unsigned int  niov = 0;
-        struct iovec *iov = NULL;
-        lnet_kiov_t  *kiov = NULL;
-        int           rc;
+       unsigned int  niov = 0;
+       struct iovec *iov = NULL;
+       lnet_kiov_t  *kiov = NULL;
+       int           rc;
 
-        LASSERT (!cfs_in_interrupt ());
-        LASSERT (mlen == 0 || msg != NULL);
+       LASSERT (!in_interrupt ());
+       LASSERT (mlen == 0 || msg != NULL);
 
         if (msg != NULL) {
                 LASSERT(msg->msg_receiving);
@@ -673,16 +685,16 @@ lnet_prep_send(lnet_msg_t *msg, int type, lnet_process_id_t target,
 void
 lnet_ni_send(lnet_ni_t *ni, lnet_msg_t *msg)
 {
-        void   *priv = msg->msg_private;
-        int     rc;
+       void   *priv = msg->msg_private;
+       int     rc;
 
-        LASSERT (!cfs_in_interrupt ());
-        LASSERT (LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND ||
-                 (msg->msg_txcredit && msg->msg_peertxcredit));
+       LASSERT (!in_interrupt ());
+       LASSERT (LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND ||
+                (msg->msg_txcredit && msg->msg_peertxcredit));
 
-        rc = (ni->ni_lnd->lnd_send)(ni, priv, msg);
-        if (rc < 0)
-                lnet_finalize(ni, msg, rc);
+       rc = (ni->ni_lnd->lnd_send)(ni, priv, msg);
+       if (rc < 0)
+               lnet_finalize(ni, msg, rc);
 }
 
 int
@@ -713,11 +725,10 @@ lnet_ni_eager_recv(lnet_ni_t *ni, lnet_msg_t *msg)
 void
 lnet_ni_query_locked(lnet_ni_t *ni, lnet_peer_t *lp)
 {
-       cfs_time_t      last_alive = 0;
+       cfs_time_t last_alive = 0;
 
        LASSERT(lnet_peer_aliveness_enabled(lp));
        LASSERT(ni->ni_lnd->lnd_query != NULL);
-       LASSERT(the_lnet.ln_routing == 1);
 
        lnet_net_unlock(lp->lp_cpt);
        (ni->ni_lnd->lnd_query)(ni, lp->lp_nid, &last_alive);
@@ -737,7 +748,6 @@ lnet_peer_is_alive (lnet_peer_t *lp, cfs_time_t now)
         cfs_time_t deadline;
 
         LASSERT (lnet_peer_aliveness_enabled(lp));
-        LASSERT (the_lnet.ln_routing == 1);
 
         /* Trust lnet_notify() if it has more recent aliveness news, but
          * ignore the initial assumed death (see lnet_peers_start_down()).
@@ -769,10 +779,6 @@ lnet_peer_alive_locked (lnet_peer_t *lp)
 {
         cfs_time_t now = cfs_time_current();
 
-        /* LU-630: only router checks peer health. */
-        if (the_lnet.ln_routing == 0)
-                return 1;
-
         if (!lnet_peer_aliveness_enabled(lp))
                 return -ENODEV;
 
@@ -903,7 +909,7 @@ lnet_msg2bufpool(lnet_msg_t *msg)
        rbp = &the_lnet.ln_rtrpools[cpt][0];
 
        LASSERT(msg->msg_len <= LNET_MTU);
-       while (msg->msg_len > (unsigned int)rbp->rbp_npages * CFS_PAGE_SIZE) {
+       while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_CACHE_SIZE) {
                rbp++;
                LASSERT(rbp < &the_lnet.ln_rtrpools[cpt][LNET_NRBPOOLS]);
        }
@@ -1118,6 +1124,12 @@ lnet_compare_routes(lnet_route_t *r1, lnet_route_t *r2)
        lnet_peer_t *p1 = r1->lr_gateway;
        lnet_peer_t *p2 = r2->lr_gateway;
 
+       if (r1->lr_priority < r2->lr_priority)
+               return 1;
+
+       if (r1->lr_priority > r2->lr_priority)
+               return -1;
+
        if (r1->lr_hops < r2->lr_hops)
                return 1;
 
@@ -1146,9 +1158,9 @@ static lnet_peer_t *
 lnet_find_route_locked(lnet_ni_t *ni, lnet_nid_t target, lnet_nid_t rtr_nid)
 {
        lnet_remotenet_t        *rnet;
-       lnet_route_t            *rtr;
-       lnet_route_t            *rtr_best;
-       lnet_route_t            *rtr_last;
+       lnet_route_t            *route;
+       lnet_route_t            *best_route;
+       lnet_route_t            *last_route;
        struct lnet_peer        *lp_best;
        struct lnet_peer        *lp;
        int                     rc;
@@ -1161,13 +1173,11 @@ lnet_find_route_locked(lnet_ni_t *ni, lnet_nid_t target, lnet_nid_t rtr_nid)
                return NULL;
 
        lp_best = NULL;
-       rtr_best = rtr_last = NULL;
-       cfs_list_for_each_entry(rtr, &rnet->lrn_routes, lr_list) {
-               lp = rtr->lr_gateway;
+       best_route = last_route = NULL;
+       cfs_list_for_each_entry(route, &rnet->lrn_routes, lr_list) {
+               lp = route->lr_gateway;
 
-               if (!lp->lp_alive || /* gateway is down */
-                   ((lp->lp_ping_feats & LNET_PING_FEAT_NI_STATUS) != 0 &&
-                    rtr->lr_downis != 0)) /* NI to target is down */
+               if (!lnet_is_route_alive(route))
                        continue;
 
                if (ni != NULL && lp->lp_ni != ni)
@@ -1177,28 +1187,28 @@ lnet_find_route_locked(lnet_ni_t *ni, lnet_nid_t target, lnet_nid_t rtr_nid)
                        return lp;
 
                if (lp_best == NULL) {
-                       rtr_best = rtr_last = rtr;
+                       best_route = last_route = route;
                        lp_best = lp;
                        continue;
                }
 
                /* no protection on below fields, but it's harmless */
-               if (rtr_last->lr_seq - rtr->lr_seq < 0)
-                       rtr_last = rtr;
+               if (last_route->lr_seq - route->lr_seq < 0)
+                       last_route = route;
 
-               rc = lnet_compare_routes(rtr, rtr_best);
+               rc = lnet_compare_routes(route, best_route);
                if (rc < 0)
                        continue;
 
-               rtr_best = rtr;
+               best_route = route;
                lp_best = lp;
        }
 
        /* set sequence number on the best router to the latest sequence + 1
         * so we can round-robin all routers, it's race and inaccurate but
         * harmless and functional  */
-       if (rtr_best != NULL)
-               rtr_best->lr_seq = rtr_last->lr_seq + 1;
+       if (best_route != NULL)
+               best_route->lr_seq = last_route->lr_seq + 1;
        return lp_best;
 }
 
@@ -1270,11 +1280,11 @@ lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg, lnet_nid_t rtr_nid)
                LASSERT(src_nid != LNET_NID_ANY);
                lnet_msg_commit(msg, cpt);
 
-                if (!msg->msg_routing)
-                        msg->msg_hdr.src_nid = cpu_to_le64(src_nid);
+               if (!msg->msg_routing)
+                       msg->msg_hdr.src_nid = cpu_to_le64(src_nid);
 
-                if (src_ni == the_lnet.ln_loni) {
-                        /* No send credit hassles with LOLND */
+               if (src_ni == the_lnet.ln_loni) {
+                       /* No send credit hassles with LOLND */
                        lnet_net_unlock(cpt);
                        lnet_ni_send(src_ni, msg);
 
@@ -1408,8 +1418,8 @@ lnet_recv_put(lnet_ni_t *ni, lnet_msg_t *msg)
 
        lnet_build_msg_event(msg, LNET_EVENT_PUT);
 
-        /* Must I ACK?  If so I'll grab the ack_wmd out of the header and put
-         * it back into the ACK during lnet_finalize() */
+       /* Must I ACK?  If so I'll grab the ack_wmd out of the header and put
+        * it back into the ACK during lnet_finalize() */
        msg->msg_ack = (!lnet_is_wire_handle_none(&hdr->msg.put.ack_wmd) &&
                        (msg->msg_md->md_options & LNET_MD_ACK_DISABLE) == 0);
 
@@ -1523,7 +1533,7 @@ lnet_parse_get(lnet_ni_t *ni, lnet_msg_t *msg, int rdma_get)
        rc = lnet_send(ni->ni_nid, msg, LNET_NID_ANY);
        if (rc < 0) {
                /* didn't get as far as lnet_ni_send() */
-                CERROR("%s: Unable to send REPLY for GET from %s: %d\n",
+               CERROR("%s: Unable to send REPLY for GET from %s: %d\n",
                       libcfs_nid2str(ni->ni_nid),
                       libcfs_id2str(info.mi_id), rc);
 
@@ -1693,6 +1703,7 @@ lnet_msgtyp2str (int type)
                 return ("<UNKNOWN>");
         }
 }
+EXPORT_SYMBOL(lnet_msgtyp2str);
 
 void
 lnet_print_hdr(lnet_hdr_t * hdr)
@@ -1766,16 +1777,16 @@ lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid,
        struct lnet_msg *msg;
         lnet_pid_t     dest_pid;
         lnet_nid_t     dest_nid;
-        lnet_nid_t     src_nid;
-        __u32          payload_length;
-        __u32          type;
+       lnet_nid_t     src_nid;
+       __u32          payload_length;
+       __u32          type;
 
-        LASSERT (!cfs_in_interrupt ());
+       LASSERT (!in_interrupt ());
 
-        type = le32_to_cpu(hdr->type);
-        src_nid = le64_to_cpu(hdr->src_nid);
-        dest_nid = le64_to_cpu(hdr->dest_nid);
-        dest_pid = le32_to_cpu(hdr->dest_pid);
+       type = le32_to_cpu(hdr->type);
+       src_nid = le64_to_cpu(hdr->src_nid);
+       dest_nid = le64_to_cpu(hdr->dest_nid);
+       dest_pid = le32_to_cpu(hdr->dest_pid);
         payload_length = le32_to_cpu(hdr->payload_length);
 
         for_me = (ni->ni_nid == dest_nid);
@@ -1977,6 +1988,7 @@ lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid,
        lnet_drop_message(ni, cpt, private, payload_length);
        return 0;
 }
+EXPORT_SYMBOL(lnet_parse);
 
 void
 lnet_drop_delayed_msg_list(cfs_list_t *head, char *reason)
@@ -2011,12 +2023,12 @@ lnet_drop_delayed_msg_list(cfs_list_t *head, char *reason)
                lnet_drop_message(msg->msg_rxpeer->lp_ni,
                                  msg->msg_rxpeer->lp_cpt,
                                  msg->msg_private, msg->msg_len);
-
-               lnet_net_lock(msg->msg_rxpeer->lp_cpt);
-               lnet_peer_decref_locked(msg->msg_rxpeer);
-               lnet_net_unlock(msg->msg_rxpeer->lp_cpt);
-
-               lnet_msg_free(msg);
+               /*
+                * NB: message will not generate event because w/o attached MD,
+                * but we still should give error code so lnet_msg_decommit()
+                * can skip counters operations and other checks.
+                */
+               lnet_finalize(msg->msg_rxpeer->lp_ni, msg, -ENOENT);
        }
 }
 
@@ -2124,7 +2136,7 @@ LNetPut(lnet_nid_t self, lnet_handle_md_t mdh, lnet_ack_req_t ack,
                        libcfs_id2str(target));
                 return -ENOMEM;
         }
-        msg->msg_vmflush = !!cfs_memory_pressure_get();
+       msg->msg_vmflush = !!memory_pressure_get();
 
        cpt = lnet_cpt_of_cookie(mdh.cookie);
        lnet_res_lock(cpt);
@@ -2181,6 +2193,7 @@ LNetPut(lnet_nid_t self, lnet_handle_md_t mdh, lnet_ack_req_t ack,
         /* completion will be signalled by an event */
         return 0;
 }
+EXPORT_SYMBOL(LNetPut);
 
 lnet_msg_t *
 lnet_create_reply_msg (lnet_ni_t *ni, lnet_msg_t *getmsg)
@@ -2257,6 +2270,7 @@ lnet_create_reply_msg (lnet_ni_t *ni, lnet_msg_t *getmsg)
 
        return NULL;
 }
+EXPORT_SYMBOL(lnet_create_reply_msg);
 
 void
 lnet_set_reply_msg_len(lnet_ni_t *ni, lnet_msg_t *reply, unsigned int len)
@@ -2273,6 +2287,7 @@ lnet_set_reply_msg_len(lnet_ni_t *ni, lnet_msg_t *reply, unsigned int len)
 
         reply->msg_ev.mlength = len;
 }
+EXPORT_SYMBOL(lnet_set_reply_msg_len);
 
 /**
  * Initiate an asynchronous GET operation.
@@ -2295,9 +2310,9 @@ lnet_set_reply_msg_len(lnet_ni_t *ni, lnet_msg_t *reply, unsigned int len)
  * \retval -ENOENT Invalid MD object.
  */
 int
-LNetGet(lnet_nid_t self, lnet_handle_md_t mdh, 
-        lnet_process_id_t target, unsigned int portal, 
-        __u64 match_bits, unsigned int offset)
+LNetGet(lnet_nid_t self, lnet_handle_md_t mdh,
+       lnet_process_id_t target, unsigned int portal,
+       __u64 match_bits, unsigned int offset)
 {
        struct lnet_msg         *msg;
        struct lnet_libmd       *md;
@@ -2372,6 +2387,7 @@ LNetGet(lnet_nid_t self, lnet_handle_md_t mdh,
         /* completion will be signalled by an event */
         return 0;
 }
+EXPORT_SYMBOL(LNetGet);
 
 /**
  * Calculate distance to node at \a dstnid.
@@ -2397,6 +2413,7 @@ LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp)
        int                     hops;
        int                     cpt;
        __u32                   order = 2;
+       cfs_list_t              *rn_list;
 
         /* if !local_nid_dist_zero, I don't return a distance of 0 ever
          * (when lustre sees a distance of 0, it substitutes 0@lo), so I
@@ -2431,13 +2448,14 @@ LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp)
                         if (orderp != NULL)
                                 *orderp = order;
                        lnet_net_unlock(cpt);
-                        return 1;
-                }
+                       return 1;
+               }
 
-                order++;
-        }
+               order++;
+       }
 
-        cfs_list_for_each (e, &the_lnet.ln_remote_nets) {
+       rn_list = lnet_net2rnethash(dstnet);
+       cfs_list_for_each(e, rn_list) {
                 rnet = cfs_list_entry(e, lnet_remotenet_t, lrn_list);
 
                 if (rnet->lrn_net == dstnet) {
@@ -2468,6 +2486,7 @@ LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp)
        lnet_net_unlock(cpt);
        return -EHOSTUNREACH;
 }
+EXPORT_SYMBOL(LNetDist);
 
 /**
  * Set the number of asynchronous messages expected from a target process.
@@ -2558,3 +2577,4 @@ LNetSetAsync(lnet_process_id_t id, int nasync)
         return rc;
 #endif
 }
+EXPORT_SYMBOL(LNetSetAsync);