__u32 sd_send_case;
};
+static inline bool
+lnet_msg_is_response(struct lnet_msg *msg)
+{
+ return msg->msg_type == LNET_MSG_ACK || msg->msg_type == LNET_MSG_REPLY;
+}
+
static inline struct lnet_comm_count *
get_stats_counts(struct lnet_element_stats *stats,
enum lnet_stats_type stats_type)
struct lnet_test_peer *tp;
struct list_head *el;
struct list_head *next;
- struct list_head cull;
+ LIST_HEAD(cull);
/* NB: use lnet_net_lock(0) to serialize operations on test peers */
if (threshold != 0) {
return 0;
}
- /* removing entries */
- INIT_LIST_HEAD(&cull);
-
lnet_net_lock(0);
list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
struct lnet_test_peer *tp;
struct list_head *el;
struct list_head *next;
- struct list_head cull;
- int fail = 0;
-
- INIT_LIST_HEAD(&cull);
+ LIST_HEAD(cull);
+ int fail = 0;
/* NB: use lnet_net_lock(0) to serialize operations on test peers */
lnet_net_lock(0);
unsigned int nob)
{
/* NB diov, siov are READ-ONLY */
- unsigned int this_nob;
+ unsigned int this_nob;
if (nob == 0)
return;
do {
LASSERT(ndiov > 0);
LASSERT(nsiov > 0);
- this_nob = MIN(diov->iov_len - doffset,
- siov->iov_len - soffset);
- this_nob = MIN(this_nob, nob);
+ this_nob = min3((unsigned int)diov->iov_len - doffset,
+ (unsigned int)siov->iov_len - soffset,
+ nob);
memcpy((char *)diov->iov_base + doffset,
(char *)siov->iov_base + soffset, this_nob);
do {
LASSERT(ndiov > 0);
LASSERT(nsiov > 0);
- this_nob = MIN(diov->kiov_len - doffset,
- siov->kiov_len - soffset);
- this_nob = MIN(this_nob, nob);
+ this_nob = min3(diov->kiov_len - doffset,
+ siov->kiov_len - soffset,
+ nob);
if (daddr == NULL)
daddr = ((char *)kmap(diov->kiov_page)) +
do {
LASSERT(niov > 0);
LASSERT(nkiov > 0);
- this_nob = MIN(iov->iov_len - iovoffset,
- kiov->kiov_len - kiovoffset);
- this_nob = MIN(this_nob, nob);
+ this_nob = min3((unsigned int)iov->iov_len - iovoffset,
+ (unsigned int)kiov->kiov_len - kiovoffset,
+ nob);
if (addr == NULL)
addr = ((char *)kmap(kiov->kiov_page)) +
do {
LASSERT(nkiov > 0);
LASSERT(niov > 0);
- this_nob = MIN(kiov->kiov_len - kiovoffset,
- iov->iov_len - iovoffset);
- this_nob = MIN(this_nob, nob);
+ this_nob = min3((unsigned int)kiov->kiov_len - kiovoffset,
+ (unsigned int)iov->iov_len - iovoffset,
+ nob);
if (addr == NULL)
addr = ((char *)kmap(kiov->kiov_page)) +
return 1;
/* always send any responses */
- if (msg->msg_type == LNET_MSG_ACK ||
- msg->msg_type == LNET_MSG_REPLY)
+ if (lnet_msg_is_response(msg))
return 1;
if (!lnet_is_peer_deadline_passed(lpni, now))
if (!msg->msg_peerrtrcredit) {
/* lpni_lock protects the credit manipulation */
spin_lock(&lpni->lpni_lock);
- /* lp_lock protects the lp_rtrq */
- spin_lock(&lp->lp_lock);
msg->msg_peerrtrcredit = 1;
lpni->lpni_rtrcredits--;
lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
if (lpni->lpni_rtrcredits < 0) {
+ spin_unlock(&lpni->lpni_lock);
/* must have checked eager_recv before here */
LASSERT(msg->msg_rx_ready_delay);
msg->msg_rx_delayed = 1;
+ /* lp_lock protects the lp_rtrq */
+ spin_lock(&lp->lp_lock);
list_add_tail(&msg->msg_list, &lp->lp_rtrq);
spin_unlock(&lp->lp_lock);
- spin_unlock(&lpni->lpni_lock);
return LNET_CREDIT_WAIT;
}
- spin_unlock(&lp->lp_lock);
spin_unlock(&lpni->lpni_lock);
}
LASSERT(rxpeerni->lpni_peer_net);
LASSERT(rxpeerni->lpni_peer_net->lpn_peer);
- lp = rxpeerni->lpni_peer_net->lpn_peer;
-
/* give back peer router credits */
msg->msg_peerrtrcredit = 0;
spin_lock(&rxpeerni->lpni_lock);
- spin_lock(&lp->lp_lock);
-
rxpeerni->lpni_rtrcredits++;
+ spin_unlock(&rxpeerni->lpni_lock);
+
+ lp = rxpeerni->lpni_peer_net->lpn_peer;
+ spin_lock(&lp->lp_lock);
/* drop all messages which are queued to be routed on that
* peer. */
if (!the_lnet.ln_routing) {
- struct list_head drop;
- INIT_LIST_HEAD(&drop);
+ LIST_HEAD(drop);
list_splice_init(&lp->lp_rtrq, &drop);
spin_unlock(&lp->lp_lock);
- spin_unlock(&rxpeerni->lpni_lock);
lnet_drop_routed_msgs_locked(&drop, msg->msg_rx_cpt);
} else if (!list_empty(&lp->lp_rtrq)) {
int msg2_cpt;
list_del(&msg2->msg_list);
msg2_cpt = msg2->msg_rx_cpt;
spin_unlock(&lp->lp_lock);
- spin_unlock(&rxpeerni->lpni_lock);
/*
* messages on the lp_rtrq can be from any NID in
* the peer, which means they might have different
}
} else {
spin_unlock(&lp->lp_lock);
- spin_unlock(&rxpeerni->lpni_lock);
}
}
if (rxni != NULL) {
*/
peer_net = lnet_peer_get_net_locked(peer, net_id);
- if (!peer_net) {
- CERROR("gateway peer %s has no NI on net %s\n",
- libcfs_nid2str(peer->lp_primary_nid),
- libcfs_net2str(net_id));
- return NULL;
- }
-
return lnet_select_peer_ni(lni, dst_nid, peer, peer_net);
}
}
static struct lnet_route *
-lnet_find_route_locked(struct lnet_remotenet *rnet,
+lnet_find_route_locked(struct lnet_remotenet *rnet, __u32 src_net,
struct lnet_route **prev_route,
struct lnet_peer_ni **gwni)
{
if (!lnet_is_route_alive(route))
continue;
+ /*
+ * Restrict the selection of the router NI on the src_net
+ * provided. If the src_net is LNET_NID_ANY, then select
+ * the best interface available.
+ */
if (!best_route) {
- best_route = last_route = route;
- best_gw_ni = lnet_find_best_lpni_on_net(NULL,
- LNET_NID_ANY,
- route->lr_gateway,
- route->lr_lnet);
- LASSERT(best_gw_ni);
+ lpni = lnet_find_best_lpni_on_net(NULL, LNET_NID_ANY,
+ route->lr_gateway,
+ src_net);
+ if (lpni) {
+ best_route = last_route = route;
+ best_gw_ni = lpni;
+ } else
+ CERROR("Gateway %s does not have a peer NI on net %s\n",
+ libcfs_nid2str(route->lr_gateway->lp_primary_nid),
+ libcfs_net2str(src_net));
+
continue;
}
lpni = lnet_find_best_lpni_on_net(NULL, LNET_NID_ANY,
route->lr_gateway,
- route->lr_lnet);
- LASSERT(lpni);
+ src_net);
+ if (!lpni) {
+ CERROR("Gateway %s does not have a peer NI on net %s\n",
+ libcfs_nid2str(route->lr_gateway->lp_primary_nid),
+ libcfs_net2str(src_net));
+ continue;
+ }
if (rc == 1) {
best_route = route;
}
static inline void
-lnet_set_non_mr_pref_nid(struct lnet_send_data *sd)
+lnet_set_non_mr_pref_nid(struct lnet_peer_ni *lpni, struct lnet_ni *lni,
+ struct lnet_msg *msg)
{
- if (sd->sd_send_case & NMR_DST &&
- sd->sd_msg->msg_type != LNET_MSG_REPLY &&
- sd->sd_msg->msg_type != LNET_MSG_ACK &&
- sd->sd_best_lpni->lpni_pref_nnids == 0) {
+ if (!lnet_msg_is_response(msg) && lpni->lpni_pref_nnids == 0) {
CDEBUG(D_NET, "Setting preferred local NID %s on NMR peer %s\n",
- libcfs_nid2str(sd->sd_best_ni->ni_nid),
- libcfs_nid2str(sd->sd_best_lpni->lpni_nid));
- lnet_peer_ni_set_non_mr_pref_nid(sd->sd_best_lpni,
- sd->sd_best_ni->ni_nid);
+ libcfs_nid2str(lni->ni_nid),
+ libcfs_nid2str(lpni->lpni_nid));
+ lnet_peer_ni_set_non_mr_pref_nid(lpni, lni->ni_nid);
}
}
return -EINVAL;
}
- /*
- * the preferred NID will only be set for NMR peers
- */
- lnet_set_non_mr_pref_nid(sd);
+ lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni, sd->sd_msg);
return lnet_handle_send(sd);
}
}
static int
-lnet_initiate_peer_discovery(struct lnet_peer_ni *lpni,
- struct lnet_msg *msg, lnet_nid_t rtr_nid,
+lnet_initiate_peer_discovery(struct lnet_peer_ni *lpni, struct lnet_msg *msg,
int cpt)
{
struct lnet_peer *peer;
- lnet_nid_t primary_nid;
int rc;
lnet_peer_ni_addref_locked(lpni);
return 0;
}
/* queue message and return */
- msg->msg_rtr_nid_param = rtr_nid;
msg->msg_sending = 0;
msg->msg_txpeer = NULL;
list_add_tail(&msg->msg_list, &peer->lp_dc_pendq);
- primary_nid = peer->lp_primary_nid;
spin_unlock(&peer->lp_lock);
lnet_peer_ni_decref_locked(lpni);
CDEBUG(D_NET, "msg %p delayed. %s pending discovery\n",
- msg, libcfs_nid2str(primary_nid));
+ msg, libcfs_nid2str(peer->lp_primary_nid));
return LNET_DC_WAIT;
}
return -EHOSTUNREACH;
}
- best_route = lnet_find_route_locked(best_rnet, &last_route,
- &gwni);
+ best_route = lnet_find_route_locked(best_rnet,
+ LNET_NIDNET(src_nid),
+ &last_route, &gwni);
if (!best_route) {
CERROR("no route to %s from %s\n",
libcfs_nid2str(dst_nid),
* completed
*/
sd->sd_msg->msg_src_nid_param = sd->sd_src_nid;
- rc = lnet_initiate_peer_discovery(gwni, sd->sd_msg, sd->sd_rtr_nid,
- sd->sd_cpt);
+ rc = lnet_initiate_peer_discovery(gwni, sd->sd_msg, sd->sd_cpt);
if (rc)
return rc;
if (sd->sd_send_case & NMR_DST)
/*
- * since the final destination is non-MR let's set its preferred
- * NID before we send
- */
- lnet_set_non_mr_pref_nid(sd);
+ * since the final destination is non-MR let's set its preferred
+ * NID before we send
+ */
+ lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni,
+ sd->sd_msg);
/*
* We're going to send to the gw found so let's set its
{
struct lnet_peer_net *peer_net = NULL;
struct lnet_ni *best_ni = NULL;
+ int lpn_healthv = 0;
/*
* The peer can have multiple interfaces, some of them can be on
*/
if (!lnet_get_net_locked(peer_net->lpn_net_id))
continue;
- best_ni = lnet_find_best_ni_on_spec_net(best_ni, peer,
- peer_net, md_cpt, false);
+
+ /* always select the lpn with the best health */
+ if (lpn_healthv <= peer_net->lpn_healthv)
+ lpn_healthv = peer_net->lpn_healthv;
+ else
+ continue;
+
+ best_ni = lnet_find_best_ni_on_spec_net(best_ni, peer, peer_net,
+ md_cpt, false);
/*
* if this is a discovery message and lp_disc_net_id is
}
static struct lnet_ni *
-lnet_find_existing_preferred_best_ni(struct lnet_send_data *sd)
+lnet_find_existing_preferred_best_ni(struct lnet_peer_ni *lpni, int cpt)
{
struct lnet_ni *best_ni = NULL;
- struct lnet_peer_net *peer_net;
- struct lnet_peer *peer = sd->sd_peer;
- struct lnet_peer_ni *best_lpni = sd->sd_best_lpni;
- struct lnet_peer_ni *lpni;
- int cpt = sd->sd_cpt;
+ struct lnet_peer_net *peer_net = lpni->lpni_peer_net;
+ struct lnet_peer_ni *lpni_entry;
/*
* We must use a consistent source address when sending to a
* So we need to pick the NI the peer prefers for this
* particular network.
*/
-
- /* Get the target peer_ni */
- peer_net = lnet_peer_get_net_locked(peer,
- LNET_NIDNET(best_lpni->lpni_nid));
- LASSERT(peer_net != NULL);
- list_for_each_entry(lpni, &peer_net->lpn_peer_nis,
- lpni_peer_nis) {
- if (lpni->lpni_pref_nnids == 0)
+ LASSERT(peer_net);
+ list_for_each_entry(lpni_entry, &peer_net->lpn_peer_nis,
+ lpni_peer_nis) {
+ if (lpni_entry->lpni_pref_nnids == 0)
continue;
- LASSERT(lpni->lpni_pref_nnids == 1);
- best_ni = lnet_nid2ni_locked(
- lpni->lpni_pref.nid, cpt);
+ LASSERT(lpni_entry->lpni_pref_nnids == 1);
+ best_ni = lnet_nid2ni_locked(lpni_entry->lpni_pref.nid, cpt);
break;
}
* particular network.
*/
- best_ni = lnet_find_existing_preferred_best_ni(sd);
+ best_ni = lnet_find_existing_preferred_best_ni(sd->sd_best_lpni,
+ sd->sd_cpt);
/* if best_ni is still not set just pick one */
if (!best_ni) {
sd->sd_best_ni = best_ni;
/* Set preferred NI if necessary. */
- lnet_set_non_mr_pref_nid(sd);
+ lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni, sd->sd_msg);
return 0;
}
struct lnet_peer *gw_peer = NULL;
/*
- * Let's set if we have a preferred NI to talk to this NMR peer
+ * Let's see if we have a preferred NI to talk to this NMR peer
*/
- sd->sd_best_ni = lnet_find_existing_preferred_best_ni(sd);
+ sd->sd_best_ni = lnet_find_existing_preferred_best_ni(sd->sd_best_lpni,
+ sd->sd_cpt);
/*
* find the router and that'll find the best NI if we didn't find
* set the best_ni we've chosen as the preferred one for
* this peer
*/
- lnet_set_non_mr_pref_nid(sd);
+ lnet_set_non_mr_pref_nid(sd->sd_best_lpni, sd->sd_best_ni, sd->sd_msg);
/* we'll be sending to the gw */
sd->sd_best_lpni = gw_lpni;
again:
/*
- * If we're sending to ourselves then there is no need to go through
- * any selection. We can shortcut the entire process and send over
- * lolnd.
- *
- * However, we make two exceptions to this rule:
- * 1. If the src_nid is specified then our API defines that we must send
- * via that interface.
- * 2. Recovery messages must be sent to the lnet_ni that is being
- * recovered.
+ * If we're being asked to send to the loopback interface, there
+ * is no need to go through any selection. We can just shortcut
+ * the entire process and send over lolnd
*/
send_data.sd_msg = msg;
send_data.sd_cpt = cpt;
- if (src_nid == LNET_NID_ANY && !msg->msg_recovery &&
- lnet_nid2ni_locked(dst_nid, cpt)) {
+ if (LNET_NETTYP(LNET_NIDNET(dst_nid)) == LOLND) {
rc = lnet_handle_lo_send(&send_data);
lnet_net_unlock(cpt);
return rc;
}
/*
- * Cache the original src_nid. If we need to resend the message
- * then we'll need to know whether the src_nid was originally
+ * Cache the original src_nid and rtr_nid. If we need to resend the
+ * message then we'll need to know whether the src_nid was originally
* specified for this message. If it was originally specified,
* then we need to keep using the same src_nid since it's
- * continuing the same sequence of messages.
+ * continuing the same sequence of messages. Similarly, rtr_nid will
+ * affect our choice of next hop.
*/
msg->msg_src_nid_param = src_nid;
+ msg->msg_rtr_nid_param = rtr_nid;
/*
* If necessary, perform discovery on the peer that owns this peer_ni.
* Note, this can result in the ownership of this peer_ni changing
* to another peer object.
*/
- rc = lnet_initiate_peer_discovery(lpni, msg, rtr_nid, cpt);
+ rc = lnet_initiate_peer_discovery(lpni, msg, cpt);
if (rc) {
lnet_peer_ni_decref_locked(lpni);
lnet_net_unlock(cpt);
send_case |= REMOTE_DST;
/*
- * if this is a non-MR peer or if we're recovering a peer ni then
- * let's consider this an NMR case so we can hit the destination
- * NID.
+ * Deal with the peer as NMR in the following cases:
+ * 1. the peer is NMR
+ * 2. We're trying to recover a specific peer NI
+ * 3. I'm a router sending to the final destination
+ * In this case the source of the message would've
+ * already selected the final destination so my job
+ * is to honor the selection.
*/
- if (!lnet_peer_is_multi_rail(peer) || msg->msg_recovery)
+ if (!lnet_peer_is_multi_rail(peer) || msg->msg_recovery ||
+ (msg->msg_routing && (send_case & LOCAL_DST)))
send_case |= NMR_DST;
else
send_case |= MR_DST;
- if (msg->msg_type == LNET_MSG_REPLY ||
- msg->msg_type == LNET_MSG_ACK)
+ if (lnet_msg_is_response(msg))
send_case |= SND_RESP;
/* assign parameters to the send_data */
lnet_finalize_expired_responses(void)
{
struct lnet_libmd *md;
- struct list_head local_queue;
struct lnet_rsp_tracker *rspt, *tmp;
ktime_t now;
int i;
return;
cfs_cpt_for_each(i, lnet_cpt_table()) {
- INIT_LIST_HEAD(&local_queue);
+ LIST_HEAD(local_queue);
lnet_net_lock(i);
if (!the_lnet.ln_mt_rstq[i]) {
lnet_finalize(msg, -EFAULT);
lnet_net_lock(cpt);
} else {
- struct lnet_peer *peer;
int rc;
- lnet_nid_t src_nid = LNET_NID_ANY;
-
- /*
- * if this message is not being routed and the
- * peer is non-MR then we must use the same
- * src_nid that was used in the original send.
- * Otherwise if we're routing the message (IE
- * we're a router) then we can use any of our
- * local interfaces. It doesn't matter to the
- * final destination.
- */
- peer = lpni->lpni_peer_net->lpn_peer;
- if (!msg->msg_routing &&
- !lnet_peer_is_multi_rail(peer))
- src_nid = le64_to_cpu(msg->msg_hdr.src_nid);
- /*
- * If we originally specified a src NID, then we
- * must attempt to reuse it in the resend as well.
- */
- if (msg->msg_src_nid_param != LNET_NID_ANY)
- src_nid = msg->msg_src_nid_param;
lnet_peer_ni_decref_locked(lpni);
lnet_net_unlock(cpt);
CDEBUG(D_NET, "resending %s->%s: %s recovery %d try# %d\n",
- libcfs_nid2str(src_nid),
+ libcfs_nid2str(msg->msg_src_nid_param),
libcfs_id2str(msg->msg_target),
lnet_msgtyp2str(msg->msg_type),
msg->msg_recovery,
msg->msg_retry_count);
- rc = lnet_send(src_nid, msg, LNET_NID_ANY);
+ rc = lnet_send(msg->msg_src_nid_param, msg,
+ msg->msg_rtr_nid_param);
if (rc) {
CERROR("Error sending %s to %s: %d\n",
lnet_msgtyp2str(msg->msg_type),
lnet_recover_local_nis(void)
{
struct lnet_mt_event_info *ev_info;
- struct list_head processed_list;
- struct list_head local_queue;
+ LIST_HEAD(processed_list);
+ LIST_HEAD(local_queue);
struct lnet_handle_md mdh;
struct lnet_ni *tmp;
struct lnet_ni *ni;
int healthv;
int rc;
- INIT_LIST_HEAD(&local_queue);
- INIT_LIST_HEAD(&processed_list);
-
/*
* splice the recovery queue on a local queue. We will iterate
* through the local queue and update it as needed. Once we're
ev_info->mt_type = MT_TYPE_LOCAL_NI;
ev_info->mt_nid = nid;
rc = lnet_send_ping(nid, &mdh, LNET_INTERFACES_MIN,
- ev_info, the_lnet.ln_mt_eqh, true);
+ ev_info, the_lnet.ln_mt_eq, true);
/* lookup the nid again */
lnet_net_lock(0);
ni = lnet_nid2ni_locked(nid, 0);
lnet_clean_resendqs(void)
{
struct lnet_msg *msg, *tmp;
- struct list_head msgs;
+ LIST_HEAD(msgs);
int i;
- INIT_LIST_HEAD(&msgs);
-
cfs_cpt_for_each(i, lnet_cpt_table()) {
lnet_net_lock(i);
list_splice_init(the_lnet.ln_mt_resendqs[i], &msgs);
lnet_recover_peer_nis(void)
{
struct lnet_mt_event_info *ev_info;
- struct list_head processed_list;
- struct list_head local_queue;
+ LIST_HEAD(processed_list);
+ LIST_HEAD(local_queue);
struct lnet_handle_md mdh;
struct lnet_peer_ni *lpni;
struct lnet_peer_ni *tmp;
int healthv;
int rc;
- INIT_LIST_HEAD(&local_queue);
- INIT_LIST_HEAD(&processed_list);
-
/*
* Always use cpt 0 for locking across all interactions with
* ln_mt_peerNIRecovq
ev_info->mt_type = MT_TYPE_PEER_NI;
ev_info->mt_nid = nid;
rc = lnet_send_ping(nid, &mdh, LNET_INTERFACES_MIN,
- ev_info, the_lnet.ln_mt_eqh, true);
+ ev_info, the_lnet.ln_mt_eq, true);
lnet_net_lock(0);
/*
* lnet_find_peer_ni_locked() grabs a refcount for
* 4. Checks if there are any NIs on the remote recovery queue
* and pings them.
*/
- cfs_block_allsigs();
-
while (the_lnet.ln_mt_state == LNET_MT_STATE_RUNNING) {
now = ktime_get_real_seconds();
int
lnet_send_ping(lnet_nid_t dest_nid,
struct lnet_handle_md *mdh, int nnis,
- void *user_data, struct lnet_handle_eq eqh, bool recovery)
+ void *user_data, struct lnet_eq *eq, bool recovery)
{
struct lnet_md md = { NULL };
struct lnet_process_id id;
md.max_size = 0;
md.options = LNET_MD_TRUNCATE;
md.user_ptr = user_data;
- md.eq_handle = eqh;
+ md.eq_handle = eq;
rc = LNetMDBind(md, LNET_UNLINK, mdh);
if (rc) {
lnet_clean_local_ni_recoveryq();
lnet_clean_peer_ni_recoveryq();
lnet_clean_resendqs();
- LNetInvalidateEQHandle(&the_lnet.ln_mt_eqh);
+ the_lnet.ln_mt_eq = NULL;
return rc;
clean_queues:
lnet_rsp_tracker_clean();
struct lnet_hdr *hdr = &msg->msg_hdr;
struct lnet_process_id src = {0};
struct lnet_libmd *md;
- int rlength;
- int mlength;
+ unsigned int rlength;
+ unsigned int mlength;
int cpt;
cpt = lnet_cpt_of_cookie(hdr->msg.reply.dst_wmd.wh_object_cookie);
LASSERT(md->md_offset == 0);
rlength = hdr->payload_length;
- mlength = MIN(rlength, (int)md->md_length);
+ mlength = min(rlength, md->md_length);
if (mlength < rlength &&
(md->md_options & LNET_MD_TRUNCATE) == 0) {
if (!list_empty(&the_lnet.ln_drop_rules) &&
lnet_drop_rule_match(hdr, ni->ni_nid, NULL)) {
- CDEBUG(D_NET, "%s, src %s, dst %s: Dropping %s to simulate"
- "silent message loss\n",
+ CDEBUG(D_NET,
+ "%s, src %s, dst %s: Dropping %s to simulate silent message loss\n",
libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
libcfs_nid2str(dest_nid), lnet_msgtyp2str(type));
goto drop;
struct lnet_libmd *md, struct lnet_handle_md mdh)
{
s64 timeout_ns;
- bool new_entry = true;
struct lnet_rsp_tracker *local_rspt;
/*
* update the deadline on that one.
*/
lnet_rspt_free(rspt, cpt);
- new_entry = false;
} else {
/* new md */
rspt->rspt_mdh = mdh;
* list in order to expire all the older entries first.
*/
lnet_net_lock(cpt);
- if (!new_entry && !list_empty(&local_rspt->rspt_on_list))
- list_del_init(&local_rspt->rspt_on_list);
- list_add_tail(&local_rspt->rspt_on_list, the_lnet.ln_mt_rstq[cpt]);
+ list_move_tail(&local_rspt->rspt_on_list, the_lnet.ln_mt_rstq[cpt]);
lnet_net_unlock(cpt);
lnet_res_unlock(cpt);
}