/*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright (c) 2012, 2013, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
return (nob);
}
+EXPORT_SYMBOL(lnet_iov_nob);
void
lnet_copy_iov2iov (unsigned int ndiov, struct iovec *diov, unsigned int doffset,
}
} while (nob > 0);
}
+EXPORT_SYMBOL(lnet_copy_iov2iov);
int
lnet_extract_iov (int dst_niov, struct iovec *dst,
offset = 0;
}
}
+EXPORT_SYMBOL(lnet_extract_iov);
#ifndef __KERNEL__
unsigned int
return (nob);
}
+EXPORT_SYMBOL(lnet_kiov_nob);
void
lnet_copy_kiov2kiov (unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset,
siov->kiov_len - soffset);
this_nob = MIN(this_nob, nob);
- if (daddr == NULL)
- daddr = ((char *)cfs_kmap(diov->kiov_page)) +
- diov->kiov_offset + doffset;
- if (saddr == NULL)
- saddr = ((char *)cfs_kmap(siov->kiov_page)) +
- siov->kiov_offset + soffset;
+ if (daddr == NULL)
+ daddr = ((char *)kmap(diov->kiov_page)) +
+ diov->kiov_offset + doffset;
+ if (saddr == NULL)
+ saddr = ((char *)kmap(siov->kiov_page)) +
+ siov->kiov_offset + soffset;
- /* Vanishing risk of kmap deadlock when mapping 2 pages.
- * However in practice at least one of the kiovs will be mapped
- * kernel pages and the map/unmap will be NOOPs */
+ /* Vanishing risk of kmap deadlock when mapping 2 pages.
+ * However in practice at least one of the kiovs will be mapped
+ * kernel pages and the map/unmap will be NOOPs */
- memcpy (daddr, saddr, this_nob);
- nob -= this_nob;
+ memcpy (daddr, saddr, this_nob);
+ nob -= this_nob;
- if (diov->kiov_len > doffset + this_nob) {
- daddr += this_nob;
- doffset += this_nob;
- } else {
- cfs_kunmap(diov->kiov_page);
- daddr = NULL;
- diov++;
- ndiov--;
- doffset = 0;
- }
+ if (diov->kiov_len > doffset + this_nob) {
+ daddr += this_nob;
+ doffset += this_nob;
+ } else {
+ kunmap(diov->kiov_page);
+ daddr = NULL;
+ diov++;
+ ndiov--;
+ doffset = 0;
+ }
- if (siov->kiov_len > soffset + this_nob) {
- saddr += this_nob;
- soffset += this_nob;
- } else {
- cfs_kunmap(siov->kiov_page);
- saddr = NULL;
- siov++;
- nsiov--;
- soffset = 0;
- }
- } while (nob > 0);
+ if (siov->kiov_len > soffset + this_nob) {
+ saddr += this_nob;
+ soffset += this_nob;
+ } else {
+ kunmap(siov->kiov_page);
+ saddr = NULL;
+ siov++;
+ nsiov--;
+ soffset = 0;
+ }
+ } while (nob > 0);
- if (daddr != NULL)
- cfs_kunmap(diov->kiov_page);
- if (saddr != NULL)
- cfs_kunmap(siov->kiov_page);
+ if (daddr != NULL)
+ kunmap(diov->kiov_page);
+ if (saddr != NULL)
+ kunmap(siov->kiov_page);
}
+EXPORT_SYMBOL(lnet_copy_kiov2kiov);
void
lnet_copy_kiov2iov (unsigned int niov, struct iovec *iov, unsigned int iovoffset,
this_nob = MIN(this_nob, nob);
if (addr == NULL)
- addr = ((char *)cfs_kmap(kiov->kiov_page)) +
+ addr = ((char *)kmap(kiov->kiov_page)) +
kiov->kiov_offset + kiovoffset;
memcpy ((char *)iov->iov_base + iovoffset, addr, this_nob);
addr += this_nob;
kiovoffset += this_nob;
} else {
- cfs_kunmap(kiov->kiov_page);
- addr = NULL;
- kiov++;
- nkiov--;
- kiovoffset = 0;
- }
+ kunmap(kiov->kiov_page);
+ addr = NULL;
+ kiov++;
+ nkiov--;
+ kiovoffset = 0;
+ }
- } while (nob > 0);
+ } while (nob > 0);
- if (addr != NULL)
- cfs_kunmap(kiov->kiov_page);
+ if (addr != NULL)
+ kunmap(kiov->kiov_page);
}
+EXPORT_SYMBOL(lnet_copy_kiov2iov);
void
lnet_copy_iov2kiov (unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffset,
this_nob = MIN(this_nob, nob);
if (addr == NULL)
- addr = ((char *)cfs_kmap(kiov->kiov_page)) +
- kiov->kiov_offset + kiovoffset;
+ addr = ((char *)kmap(kiov->kiov_page)) +
+ kiov->kiov_offset + kiovoffset;
- memcpy (addr, (char *)iov->iov_base + iovoffset, this_nob);
- nob -= this_nob;
+ memcpy (addr, (char *)iov->iov_base + iovoffset, this_nob);
+ nob -= this_nob;
- if (kiov->kiov_len > kiovoffset + this_nob) {
- addr += this_nob;
- kiovoffset += this_nob;
- } else {
- cfs_kunmap(kiov->kiov_page);
- addr = NULL;
- kiov++;
- nkiov--;
- kiovoffset = 0;
- }
+ if (kiov->kiov_len > kiovoffset + this_nob) {
+ addr += this_nob;
+ kiovoffset += this_nob;
+ } else {
+ kunmap(kiov->kiov_page);
+ addr = NULL;
+ kiov++;
+ nkiov--;
+ kiovoffset = 0;
+ }
- if (iov->iov_len > iovoffset + this_nob) {
- iovoffset += this_nob;
- } else {
- iov++;
- niov--;
- iovoffset = 0;
- }
- } while (nob > 0);
+ if (iov->iov_len > iovoffset + this_nob) {
+ iovoffset += this_nob;
+ } else {
+ iov++;
+ niov--;
+ iovoffset = 0;
+ }
+ } while (nob > 0);
- if (addr != NULL)
- cfs_kunmap(kiov->kiov_page);
+ if (addr != NULL)
+ kunmap(kiov->kiov_page);
}
+EXPORT_SYMBOL(lnet_copy_iov2kiov);
int
lnet_extract_kiov (int dst_niov, lnet_kiov_t *dst,
dst->kiov_page = src->kiov_page;
dst->kiov_offset = src->kiov_offset + offset;
- if (len <= frag_len) {
- dst->kiov_len = len;
- LASSERT (dst->kiov_offset + dst->kiov_len <= CFS_PAGE_SIZE);
- return (niov);
- }
+ if (len <= frag_len) {
+ dst->kiov_len = len;
+ LASSERT (dst->kiov_offset + dst->kiov_len <= PAGE_CACHE_SIZE);
+ return niov;
+ }
- dst->kiov_len = frag_len;
- LASSERT (dst->kiov_offset + dst->kiov_len <= CFS_PAGE_SIZE);
+ dst->kiov_len = frag_len;
+ LASSERT (dst->kiov_offset + dst->kiov_len <= PAGE_CACHE_SIZE);
len -= frag_len;
dst++;
offset = 0;
}
}
+EXPORT_SYMBOL(lnet_extract_kiov);
#endif
void
LASSERT(!msg->msg_sending);
LASSERT(msg->msg_receiving);
+ LASSERT(!msg->msg_rx_ready_delay);
LASSERT(ni->ni_lnd->lnd_eager_recv != NULL);
msg->msg_rx_ready_delay = 1;
void
lnet_ni_query_locked(lnet_ni_t *ni, lnet_peer_t *lp)
{
- cfs_time_t last_alive = 0;
+ cfs_time_t last_alive = 0;
LASSERT(lnet_peer_aliveness_enabled(lp));
LASSERT(ni->ni_lnd->lnd_query != NULL);
- LASSERT(the_lnet.ln_routing == 1);
lnet_net_unlock(lp->lp_cpt);
(ni->ni_lnd->lnd_query)(ni, lp->lp_nid, &last_alive);
cfs_time_t deadline;
LASSERT (lnet_peer_aliveness_enabled(lp));
- LASSERT (the_lnet.ln_routing == 1);
/* Trust lnet_notify() if it has more recent aliveness news, but
* ignore the initial assumed death (see lnet_peers_start_down()).
{
cfs_time_t now = cfs_time_current();
- /* LU-630: only router checks peer health. */
- if (the_lnet.ln_routing == 0)
- return 1;
-
if (!lnet_peer_aliveness_enabled(lp))
return -ENODEV;
rbp = &the_lnet.ln_rtrpools[cpt][0];
LASSERT(msg->msg_len <= LNET_MTU);
- while (msg->msg_len > (unsigned int)rbp->rbp_npages * CFS_PAGE_SIZE) {
+ while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_CACHE_SIZE) {
rbp++;
LASSERT(rbp < &the_lnet.ln_rtrpools[cpt][LNET_NRBPOOLS]);
}
lnet_peer_t *p1 = r1->lr_gateway;
lnet_peer_t *p2 = r2->lr_gateway;
+ if (r1->lr_priority < r2->lr_priority)
+ return 1;
+
+ if (r1->lr_priority > r2->lr_priority)
+ return -1;
+
if (r1->lr_hops < r2->lr_hops)
return 1;
lp = rtr->lr_gateway;
if (!lp->lp_alive || /* gateway is down */
- (lp->lp_ping_version == LNET_PROTO_PING_VERSION &&
+ ((lp->lp_ping_feats & LNET_PING_FEAT_NI_STATUS) != 0 &&
rtr->lr_downis != 0)) /* NI to target is down */
continue;
LASSERT(src_nid != LNET_NID_ANY);
lnet_msg_commit(msg, cpt);
- if (!msg->msg_routing)
- msg->msg_hdr.src_nid = cpu_to_le64(src_nid);
+ if (!msg->msg_routing)
+ msg->msg_hdr.src_nid = cpu_to_le64(src_nid);
- if (src_ni == the_lnet.ln_loni) {
- /* No send credit hassles with LOLND */
+ if (src_ni == the_lnet.ln_loni) {
+ /* No send credit hassles with LOLND */
lnet_net_unlock(cpt);
lnet_ni_send(src_ni, msg);
* pre-determined router, this can happen if router table
* was changed when we release the lock */
if (rtr_nid != lp->lp_nid) {
- cpt2 = lnet_cpt_of_nid(lp->lp_nid);
+ cpt2 = lnet_cpt_of_nid_locked(lp->lp_nid);
if (cpt2 != cpt) {
if (src_ni != NULL)
lnet_ni_decref_locked(src_ni, cpt);
lnet_build_msg_event(msg, LNET_EVENT_PUT);
- /* Must I ACK? If so I'll grab the ack_wmd out of the header and put
- * it back into the ACK during lnet_finalize() */
+ /* Must I ACK? If so I'll grab the ack_wmd out of the header and put
+ * it back into the ACK during lnet_finalize() */
msg->msg_ack = (!lnet_is_wire_handle_none(&hdr->msg.put.ack_wmd) &&
(msg->msg_md->md_options & LNET_MD_ACK_DISABLE) == 0);
rc = lnet_send(ni->ni_nid, msg, LNET_NID_ANY);
if (rc < 0) {
/* didn't get as far as lnet_ni_send() */
- CERROR("%s: Unable to send REPLY for GET from %s: %d\n",
+ CERROR("%s: Unable to send REPLY for GET from %s: %d\n",
libcfs_nid2str(ni->ni_nid),
libcfs_id2str(info.mi_id), rc);
lnet_msg_attach_md(msg, md, 0, mlength);
- if (mlength != 0)
- lnet_setpayloadbuffer(msg);
+ if (mlength != 0)
+ lnet_setpayloadbuffer(msg);
lnet_res_unlock(cpt);
lnet_build_msg_event(msg, LNET_EVENT_REPLY);
- lnet_ni_recv(ni, private, msg, 0, 0, mlength, rlength);
- return 0;
+ lnet_ni_recv(ni, private, msg, 0, 0, mlength, rlength);
+ return 0;
}
static int
return ("<UNKNOWN>");
}
}
+EXPORT_SYMBOL(lnet_msgtyp2str);
void
lnet_print_hdr(lnet_hdr_t * hdr)
lnet_drop_message(ni, cpt, private, payload_length);
return 0;
}
+EXPORT_SYMBOL(lnet_parse);
void
lnet_drop_delayed_msg_list(cfs_list_t *head, char *reason)
lnet_drop_message(msg->msg_rxpeer->lp_ni,
msg->msg_rxpeer->lp_cpt,
msg->msg_private, msg->msg_len);
-
- lnet_net_lock(msg->msg_rxpeer->lp_cpt);
- lnet_peer_decref_locked(msg->msg_rxpeer);
- lnet_net_unlock(msg->msg_rxpeer->lp_cpt);
-
- lnet_msg_free(msg);
+ /*
+ * NB: message will not generate event because w/o attached MD,
+ * but we still should give error code so lnet_msg_decommit()
+ * can skip counters operations and other checks.
+ */
+ lnet_finalize(msg->msg_rxpeer->lp_ni, msg, -ENOENT);
}
}
libcfs_id2str(target));
return -ENOMEM;
}
- msg->msg_vmflush = !!cfs_memory_pressure_get();
+ msg->msg_vmflush = !!memory_pressure_get();
cpt = lnet_cpt_of_cookie(mdh.cookie);
lnet_res_lock(cpt);
lnet_res_unlock(cpt);
lnet_msg_free(msg);
- return -ENOENT;
- }
+ return -ENOENT;
+ }
- CDEBUG(D_NET, "LNetPut -> %s\n", libcfs_id2str(target));
+ CDEBUG(D_NET, "LNetPut -> %s\n", libcfs_id2str(target));
lnet_msg_attach_md(msg, md, 0, 0);
/* completion will be signalled by an event */
return 0;
}
+EXPORT_SYMBOL(LNetPut);
lnet_msg_t *
lnet_create_reply_msg (lnet_ni_t *ni, lnet_msg_t *getmsg)
getmd);
lnet_res_unlock(cpt);
goto drop;
- }
+ }
- LASSERT (getmd->md_offset == 0);
+ LASSERT(getmd->md_offset == 0);
CDEBUG(D_NET, "%s: Reply from %s md %p\n",
libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id), getmd);
/* setup information for lnet_build_msg_event */
msg->msg_from = peer_id.nid;
- msg->msg_type = LNET_MSG_GET; /* flag this msg as an "optimized" GET */
+ msg->msg_type = LNET_MSG_GET; /* flag this msg as an "optimized" GET */
msg->msg_hdr.src_nid = peer_id.nid;
msg->msg_hdr.payload_length = getmd->md_length;
msg->msg_receiving = 1; /* required by lnet_msg_attach_md */
return NULL;
}
+EXPORT_SYMBOL(lnet_create_reply_msg);
void
lnet_set_reply_msg_len(lnet_ni_t *ni, lnet_msg_t *reply, unsigned int len)
reply->msg_ev.mlength = len;
}
+EXPORT_SYMBOL(lnet_set_reply_msg_len);
/**
* Initiate an asynchronous GET operation.
* \retval -ENOENT Invalid MD object.
*/
int
-LNetGet(lnet_nid_t self, lnet_handle_md_t mdh,
- lnet_process_id_t target, unsigned int portal,
- __u64 match_bits, unsigned int offset)
+LNetGet(lnet_nid_t self, lnet_handle_md_t mdh,
+ lnet_process_id_t target, unsigned int portal,
+ __u64 match_bits, unsigned int offset)
{
struct lnet_msg *msg;
struct lnet_libmd *md;
/* completion will be signalled by an event */
return 0;
}
+EXPORT_SYMBOL(LNetGet);
/**
* Calculate distance to node at \a dstnid.
int hops;
int cpt;
__u32 order = 2;
+ cfs_list_t *rn_list;
/* if !local_nid_dist_zero, I don't return a distance of 0 ever
* (when lustre sees a distance of 0, it substitutes 0@lo), so I
if (orderp != NULL)
*orderp = order;
lnet_net_unlock(cpt);
- return 1;
- }
+ return 1;
+ }
- order++;
- }
+ order++;
+ }
- cfs_list_for_each (e, &the_lnet.ln_remote_nets) {
+ rn_list = lnet_net2rnethash(dstnet);
+ cfs_list_for_each(e, rn_list) {
rnet = cfs_list_entry(e, lnet_remotenet_t, lrn_list);
if (rnet->lrn_net == dstnet) {
lnet_net_unlock(cpt);
return -EHOSTUNREACH;
}
+EXPORT_SYMBOL(LNetDist);
/**
* Set the number of asynchronous messages expected from a target process.
return rc;
#endif
}
+EXPORT_SYMBOL(LNetSetAsync);