4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
34 * Message decoding, parsing and finalizing routines
37 #define DEBUG_SUBSYSTEM S_LNET
39 #include <lnet/lib-lnet.h>
42 lnet_build_unlink_event(struct lnet_libmd *md, struct lnet_event *ev)
46 memset(ev, 0, sizeof(*ev));
50 ev->type = LNET_EVENT_UNLINK;
51 lnet_md_deconstruct(md, &ev->md);
52 lnet_md2handle(&ev->md_handle, md);
57 * Don't need any lock, must be called after lnet_commit_md
60 lnet_build_msg_event(struct lnet_msg *msg, enum lnet_event_kind ev_type)
62 struct lnet_hdr *hdr = &msg->msg_hdr;
63 struct lnet_event *ev = &msg->msg_ev;
65 LASSERT(!msg->msg_routing);
68 ev->msg_type = msg->msg_type;
70 if (ev_type == LNET_EVENT_SEND) {
71 /* event for active message */
72 ev->target.nid = le64_to_cpu(hdr->dest_nid);
73 ev->target.pid = le32_to_cpu(hdr->dest_pid);
74 ev->initiator.nid = LNET_NID_ANY;
75 ev->initiator.pid = the_lnet.ln_pid;
76 ev->source.nid = LNET_NID_ANY;
77 ev->source.pid = the_lnet.ln_pid;
78 ev->sender = LNET_NID_ANY;
80 /* event for passive message */
81 ev->target.pid = hdr->dest_pid;
82 ev->target.nid = hdr->dest_nid;
83 ev->initiator.pid = hdr->src_pid;
84 /* Multi-Rail: resolve src_nid to "primary" peer NID */
85 ev->initiator.nid = msg->msg_initiator;
86 /* Multi-Rail: track source NID. */
87 ev->source.pid = hdr->src_pid;
88 ev->source.nid = hdr->src_nid;
89 ev->rlength = hdr->payload_length;
90 ev->sender = msg->msg_from;
91 ev->mlength = msg->msg_wanted;
92 ev->offset = msg->msg_offset;
99 case LNET_EVENT_PUT: /* passive PUT */
100 ev->pt_index = hdr->msg.put.ptl_index;
101 ev->match_bits = hdr->msg.put.match_bits;
102 ev->hdr_data = hdr->msg.put.hdr_data;
105 case LNET_EVENT_GET: /* passive GET */
106 ev->pt_index = hdr->msg.get.ptl_index;
107 ev->match_bits = hdr->msg.get.match_bits;
111 case LNET_EVENT_ACK: /* ACK */
112 ev->match_bits = hdr->msg.ack.match_bits;
113 ev->mlength = hdr->msg.ack.mlength;
116 case LNET_EVENT_REPLY: /* REPLY */
119 case LNET_EVENT_SEND: /* active message */
120 if (msg->msg_type == LNET_MSG_PUT) {
121 ev->pt_index = le32_to_cpu(hdr->msg.put.ptl_index);
122 ev->match_bits = le64_to_cpu(hdr->msg.put.match_bits);
123 ev->offset = le32_to_cpu(hdr->msg.put.offset);
125 ev->rlength = le32_to_cpu(hdr->payload_length);
126 ev->hdr_data = le64_to_cpu(hdr->msg.put.hdr_data);
129 LASSERT(msg->msg_type == LNET_MSG_GET);
130 ev->pt_index = le32_to_cpu(hdr->msg.get.ptl_index);
131 ev->match_bits = le64_to_cpu(hdr->msg.get.match_bits);
133 ev->rlength = le32_to_cpu(hdr->msg.get.sink_length);
134 ev->offset = le32_to_cpu(hdr->msg.get.src_offset);
142 lnet_msg_commit(struct lnet_msg *msg, int cpt)
144 struct lnet_msg_container *container = the_lnet.ln_msg_containers[cpt];
145 struct lnet_counters *counters = the_lnet.ln_counters[cpt];
148 /* set the message deadline */
149 timeout_ns = lnet_transaction_timeout * NSEC_PER_SEC;
150 msg->msg_deadline = ktime_add_ns(ktime_get(), timeout_ns);
152 /* routed message can be committed for both receiving and sending */
153 LASSERT(!msg->msg_tx_committed);
155 if (msg->msg_sending) {
156 LASSERT(!msg->msg_receiving);
157 msg->msg_tx_cpt = cpt;
158 msg->msg_tx_committed = 1;
159 if (msg->msg_rx_committed) { /* routed message REPLY */
160 LASSERT(msg->msg_onactivelist);
164 LASSERT(!msg->msg_sending);
165 msg->msg_rx_cpt = cpt;
166 msg->msg_rx_committed = 1;
169 LASSERT(!msg->msg_onactivelist);
171 msg->msg_onactivelist = 1;
172 list_add_tail(&msg->msg_activelist, &container->msc_active);
174 counters->msgs_alloc++;
175 if (counters->msgs_alloc > counters->msgs_max)
176 counters->msgs_max = counters->msgs_alloc;
180 lnet_msg_decommit_tx(struct lnet_msg *msg, int status)
182 struct lnet_counters *counters;
183 struct lnet_event *ev = &msg->msg_ev;
185 LASSERT(msg->msg_tx_committed);
189 counters = the_lnet.ln_counters[msg->msg_tx_cpt];
191 default: /* routed message */
192 LASSERT(msg->msg_routing);
193 LASSERT(msg->msg_rx_committed);
194 LASSERT(ev->type == 0);
196 counters->route_length += msg->msg_len;
197 counters->route_count++;
201 /* should have been decommitted */
202 LASSERT(!msg->msg_rx_committed);
203 /* overwritten while sending ACK */
204 LASSERT(msg->msg_type == LNET_MSG_ACK);
205 msg->msg_type = LNET_MSG_PUT; /* fix type */
208 case LNET_EVENT_SEND:
209 LASSERT(!msg->msg_rx_committed);
210 if (msg->msg_type == LNET_MSG_PUT)
211 counters->send_length += msg->msg_len;
215 LASSERT(msg->msg_rx_committed);
216 /* overwritten while sending reply, we should never be
217 * here for optimized GET */
218 LASSERT(msg->msg_type == LNET_MSG_REPLY);
219 msg->msg_type = LNET_MSG_GET; /* fix type */
223 counters->send_count++;
227 lnet_incr_stats(&msg->msg_txpeer->lpni_stats,
229 LNET_STATS_TYPE_SEND);
231 lnet_incr_stats(&msg->msg_txni->ni_stats,
233 LNET_STATS_TYPE_SEND);
235 lnet_return_tx_credits_locked(msg);
236 msg->msg_tx_committed = 0;
240 lnet_msg_decommit_rx(struct lnet_msg *msg, int status)
242 struct lnet_counters *counters;
243 struct lnet_event *ev = &msg->msg_ev;
245 LASSERT(!msg->msg_tx_committed); /* decommitted or never committed */
246 LASSERT(msg->msg_rx_committed);
251 counters = the_lnet.ln_counters[msg->msg_rx_cpt];
254 LASSERT(ev->type == 0);
255 LASSERT(msg->msg_routing);
259 LASSERT(msg->msg_type == LNET_MSG_ACK);
263 /* type is "REPLY" if it's an optimized GET on passive side,
264 * because optimized GET will never be committed for sending,
265 * so message type wouldn't be changed back to "GET" by
266 * lnet_msg_decommit_tx(), see details in lnet_parse_get() */
267 LASSERT(msg->msg_type == LNET_MSG_REPLY ||
268 msg->msg_type == LNET_MSG_GET);
269 counters->send_length += msg->msg_wanted;
273 LASSERT(msg->msg_type == LNET_MSG_PUT);
276 case LNET_EVENT_REPLY:
277 /* type is "GET" if it's an optimized GET on active side,
278 * see details in lnet_create_reply_msg() */
279 LASSERT(msg->msg_type == LNET_MSG_GET ||
280 msg->msg_type == LNET_MSG_REPLY);
284 counters->recv_count++;
288 lnet_incr_stats(&msg->msg_rxpeer->lpni_stats,
290 LNET_STATS_TYPE_RECV);
292 lnet_incr_stats(&msg->msg_rxni->ni_stats,
294 LNET_STATS_TYPE_RECV);
295 if (ev->type == LNET_EVENT_PUT || ev->type == LNET_EVENT_REPLY)
296 counters->recv_length += msg->msg_wanted;
299 lnet_return_rx_credits_locked(msg);
300 msg->msg_rx_committed = 0;
304 lnet_msg_decommit(struct lnet_msg *msg, int cpt, int status)
308 LASSERT(msg->msg_tx_committed || msg->msg_rx_committed);
309 LASSERT(msg->msg_onactivelist);
311 if (msg->msg_tx_committed) { /* always decommit for sending first */
312 LASSERT(cpt == msg->msg_tx_cpt);
313 lnet_msg_decommit_tx(msg, status);
316 if (msg->msg_rx_committed) {
317 /* forwarding msg committed for both receiving and sending */
318 if (cpt != msg->msg_rx_cpt) {
319 lnet_net_unlock(cpt);
320 cpt2 = msg->msg_rx_cpt;
323 lnet_msg_decommit_rx(msg, status);
326 list_del(&msg->msg_activelist);
327 msg->msg_onactivelist = 0;
329 the_lnet.ln_counters[cpt2]->msgs_alloc--;
332 lnet_net_unlock(cpt2);
338 lnet_msg_attach_md(struct lnet_msg *msg, struct lnet_libmd *md,
339 unsigned int offset, unsigned int mlen)
341 /* NB: @offset and @len are only useful for receiving */
342 /* Here, we attach the MD on lnet_msg and mark it busy and
343 * decrementing its threshold. Come what may, the lnet_msg "owns"
344 * the MD until a call to lnet_msg_detach_md or lnet_finalize()
345 * signals completion. */
346 LASSERT(!msg->msg_routing);
349 if (msg->msg_receiving) { /* committed for receiving */
350 msg->msg_offset = offset;
351 msg->msg_wanted = mlen;
355 if (md->md_threshold != LNET_MD_THRESH_INF) {
356 LASSERT(md->md_threshold > 0);
360 /* build umd in event */
361 lnet_md2handle(&msg->msg_ev.md_handle, md);
362 lnet_md_deconstruct(md, &msg->msg_ev.md);
366 lnet_msg_detach_md(struct lnet_msg *msg, int status)
368 struct lnet_libmd *md = msg->msg_md;
371 /* Now it's safe to drop my caller's ref */
373 LASSERT(md->md_refcount >= 0);
375 unlink = lnet_md_unlinkable(md);
376 if (md->md_eq != NULL) {
377 msg->msg_ev.status = status;
378 msg->msg_ev.unlinked = unlink;
379 lnet_eq_enqueue_event(md->md_eq, &msg->msg_ev);
389 lnet_complete_msg_locked(struct lnet_msg *msg, int cpt)
391 struct lnet_handle_wire ack_wmd;
393 int status = msg->msg_ev.status;
395 LASSERT(msg->msg_onactivelist);
397 if (status == 0 && msg->msg_ack) {
398 /* Only send an ACK if the PUT completed successfully */
400 lnet_msg_decommit(msg, cpt, 0);
403 lnet_net_unlock(cpt);
405 LASSERT(msg->msg_ev.type == LNET_EVENT_PUT);
406 LASSERT(!msg->msg_routing);
408 ack_wmd = msg->msg_hdr.msg.put.ack_wmd;
410 lnet_prep_send(msg, LNET_MSG_ACK, msg->msg_ev.source, 0, 0);
412 msg->msg_hdr.msg.ack.dst_wmd = ack_wmd;
413 msg->msg_hdr.msg.ack.match_bits = msg->msg_ev.match_bits;
414 msg->msg_hdr.msg.ack.mlength = cpu_to_le32(msg->msg_ev.mlength);
416 /* NB: we probably want to use NID of msg::msg_from as 3rd
417 * parameter (router NID) if it's routed message */
418 rc = lnet_send(msg->msg_ev.target.nid, msg, LNET_NID_ANY);
422 * NB: message is committed for sending, we should return
423 * on success because LND will finalize this message later.
425 * Also, there is possibility that message is committed for
426 * sending and also failed before delivering to LND,
427 * i.e: ENOMEM, in that case we can't fall through either
428 * because CPT for sending can be different with CPT for
429 * receiving, so we should return back to lnet_finalize()
430 * to make sure we are locking the correct partition.
434 } else if (status == 0 && /* OK so far */
435 (msg->msg_routing && !msg->msg_sending)) {
437 LASSERT(!msg->msg_receiving); /* called back recv already */
438 lnet_net_unlock(cpt);
440 rc = lnet_send(LNET_NID_ANY, msg, LNET_NID_ANY);
444 * NB: message is committed for sending, we should return
445 * on success because LND will finalize this message later.
447 * Also, there is possibility that message is committed for
448 * sending and also failed before delivering to LND,
449 * i.e: ENOMEM, in that case we can't fall through either:
450 * - The rule is message must decommit for sending first if
451 * the it's committed for both sending and receiving
452 * - CPT for sending can be different with CPT for receiving,
453 * so we should return back to lnet_finalize() to make
454 * sure we are locking the correct partition.
459 lnet_msg_decommit(msg, cpt, status);
465 lnet_dec_healthv_locked(atomic_t *healthv)
467 int h = atomic_read(healthv);
469 if (h < lnet_health_sensitivity) {
470 atomic_set(healthv, 0);
472 h -= lnet_health_sensitivity;
473 atomic_set(healthv, h);
478 lnet_handle_local_failure(struct lnet_msg *msg)
480 struct lnet_ni *local_ni;
482 local_ni = msg->msg_txni;
485 * the lnet_net_lock(0) is used to protect the addref on the ni
486 * and the recovery queue.
489 /* the mt could've shutdown and cleaned up the queues */
490 if (the_lnet.ln_mt_state != LNET_MT_STATE_RUNNING) {
495 lnet_dec_healthv_locked(&local_ni->ni_healthv);
497 * add the NI to the recovery queue if it's not already there
498 * and it's health value is actually below the maximum. It's
499 * possible that the sensitivity might be set to 0, and the health
500 * value will not be reduced. In this case, there is no reason to
503 if (list_empty(&local_ni->ni_recovery) &&
504 atomic_read(&local_ni->ni_healthv) < LNET_MAX_HEALTH_VALUE) {
505 CERROR("ni %s added to recovery queue. Health = %d\n",
506 libcfs_nid2str(local_ni->ni_nid),
507 atomic_read(&local_ni->ni_healthv));
508 list_add_tail(&local_ni->ni_recovery,
509 &the_lnet.ln_mt_localNIRecovq);
510 lnet_ni_addref_locked(local_ni, 0);
516 lnet_handle_remote_failure(struct lnet_msg *msg)
518 struct lnet_peer_ni *lpni;
520 lpni = msg->msg_txpeer;
522 /* lpni could be NULL if we're in the LOLND case */
527 lnet_dec_healthv_locked(&lpni->lpni_healthv);
529 * add the peer NI to the recovery queue if it's not already there
530 * and it's health value is actually below the maximum. It's
531 * possible that the sensitivity might be set to 0, and the health
532 * value will not be reduced. In this case, there is no reason to
535 lnet_peer_ni_add_to_recoveryq_locked(lpni);
540 lnet_incr_hstats(struct lnet_msg *msg, enum lnet_msg_hstatus hstatus)
542 struct lnet_ni *ni = msg->msg_txni;
543 struct lnet_peer_ni *lpni = msg->msg_txpeer;
544 struct lnet_counters *counters = the_lnet.ln_counters[0];
547 case LNET_MSG_STATUS_LOCAL_INTERRUPT:
548 atomic_inc(&ni->ni_hstats.hlt_local_interrupt);
549 counters->local_interrupt_count++;
551 case LNET_MSG_STATUS_LOCAL_DROPPED:
552 atomic_inc(&ni->ni_hstats.hlt_local_dropped);
553 counters->local_dropped_count++;
555 case LNET_MSG_STATUS_LOCAL_ABORTED:
556 atomic_inc(&ni->ni_hstats.hlt_local_aborted);
557 counters->local_aborted_count++;
559 case LNET_MSG_STATUS_LOCAL_NO_ROUTE:
560 atomic_inc(&ni->ni_hstats.hlt_local_no_route);
561 counters->local_no_route_count++;
563 case LNET_MSG_STATUS_LOCAL_TIMEOUT:
564 atomic_inc(&ni->ni_hstats.hlt_local_timeout);
565 counters->local_timeout_count++;
567 case LNET_MSG_STATUS_LOCAL_ERROR:
568 atomic_inc(&ni->ni_hstats.hlt_local_error);
569 counters->local_error_count++;
571 case LNET_MSG_STATUS_REMOTE_DROPPED:
573 atomic_inc(&lpni->lpni_hstats.hlt_remote_dropped);
574 counters->remote_dropped_count++;
576 case LNET_MSG_STATUS_REMOTE_ERROR:
578 atomic_inc(&lpni->lpni_hstats.hlt_remote_error);
579 counters->remote_error_count++;
581 case LNET_MSG_STATUS_REMOTE_TIMEOUT:
583 atomic_inc(&lpni->lpni_hstats.hlt_remote_timeout);
584 counters->remote_timeout_count++;
586 case LNET_MSG_STATUS_NETWORK_TIMEOUT:
588 atomic_inc(&lpni->lpni_hstats.hlt_network_timeout);
589 counters->network_timeout_count++;
591 case LNET_MSG_STATUS_OK:
599 * Do a health check on the message:
600 * return -1 if we're not going to handle the error or
601 * if we've reached the maximum number of retries.
602 * success case will return -1 as well
603 * return 0 if it the message is requeued for send
606 lnet_health_check(struct lnet_msg *msg)
608 enum lnet_msg_hstatus hstatus = msg->msg_health_status;
611 /* if we're shutting down no point in handling health. */
612 if (the_lnet.ln_state != LNET_STATE_RUNNING)
615 LASSERT(msg->msg_txni);
618 * if we're sending to the LOLND then the msg_txpeer will not be
619 * set. So no need to sanity check it.
621 if (LNET_NETTYP(LNET_NIDNET(msg->msg_txni->ni_nid)) != LOLND)
622 LASSERT(msg->msg_txpeer);
626 if (hstatus != LNET_MSG_STATUS_OK &&
627 ktime_compare(ktime_get(), msg->msg_deadline) >= 0)
631 * stats are only incremented for errors so avoid wasting time
632 * incrementing statistics if there is no error.
634 if (hstatus != LNET_MSG_STATUS_OK) {
636 lnet_incr_hstats(msg, hstatus);
640 CDEBUG(D_NET, "health check: %s->%s: %s: %s\n",
641 libcfs_nid2str(msg->msg_txni->ni_nid),
642 (lo) ? "self" : libcfs_nid2str(msg->msg_txpeer->lpni_nid),
643 lnet_msgtyp2str(msg->msg_type),
644 lnet_health_error2str(hstatus));
647 case LNET_MSG_STATUS_OK:
648 lnet_inc_healthv(&msg->msg_txni->ni_healthv);
650 * It's possible msg_txpeer is NULL in the LOLND
654 lnet_inc_healthv(&msg->msg_txpeer->lpni_healthv);
656 /* we can finalize this message */
658 case LNET_MSG_STATUS_LOCAL_INTERRUPT:
659 case LNET_MSG_STATUS_LOCAL_DROPPED:
660 case LNET_MSG_STATUS_LOCAL_ABORTED:
661 case LNET_MSG_STATUS_LOCAL_NO_ROUTE:
662 case LNET_MSG_STATUS_LOCAL_TIMEOUT:
663 lnet_handle_local_failure(msg);
664 /* add to the re-send queue */
668 * These errors will not trigger a resend so simply
669 * finalize the message
671 case LNET_MSG_STATUS_LOCAL_ERROR:
672 lnet_handle_local_failure(msg);
676 * TODO: since the remote dropped the message we can
677 * attempt a resend safely.
679 case LNET_MSG_STATUS_REMOTE_DROPPED:
680 lnet_handle_remote_failure(msg);
683 case LNET_MSG_STATUS_REMOTE_ERROR:
684 case LNET_MSG_STATUS_REMOTE_TIMEOUT:
685 case LNET_MSG_STATUS_NETWORK_TIMEOUT:
686 lnet_handle_remote_failure(msg);
693 /* don't resend recovery messages */
694 if (msg->msg_recovery) {
695 CDEBUG(D_NET, "msg %s->%s is a recovery ping. retry# %d\n",
696 libcfs_nid2str(msg->msg_from),
697 libcfs_nid2str(msg->msg_target.nid),
698 msg->msg_retry_count);
703 * if we explicitly indicated we don't want to resend then just
706 if (msg->msg_no_resend) {
707 CDEBUG(D_NET, "msg %s->%s requested no resend. retry# %d\n",
708 libcfs_nid2str(msg->msg_from),
709 libcfs_nid2str(msg->msg_target.nid),
710 msg->msg_retry_count);
714 /* check if the message has exceeded the number of retries */
715 if (msg->msg_retry_count >= lnet_retry_count) {
716 CNETERR("msg %s->%s exceeded retry count %d\n",
717 libcfs_nid2str(msg->msg_from),
718 libcfs_nid2str(msg->msg_target.nid),
719 msg->msg_retry_count);
722 msg->msg_retry_count++;
724 lnet_net_lock(msg->msg_tx_cpt);
727 * remove message from the active list and reset it in preparation
728 * for a resend. Two exception to this
730 * 1. the router case, whe a message is committed for rx when
731 * received, then tx when it is sent. When committed to both tx and
732 * rx we don't want to remove it from the active list.
734 * 2. The REPLY case since it uses the same msg block for the GET
737 if (!msg->msg_routing && msg->msg_type != LNET_MSG_REPLY) {
738 list_del_init(&msg->msg_activelist);
739 msg->msg_onactivelist = 0;
742 * The msg_target.nid which was originally set
743 * when calling LNetGet() or LNetPut() might've
744 * been overwritten if we're routing this message.
745 * Call lnet_return_tx_credits_locked() to return
746 * the credit this message consumed. The message will
747 * consume another credit when it gets resent.
749 msg->msg_target.nid = msg->msg_hdr.dest_nid;
750 lnet_msg_decommit_tx(msg, -EAGAIN);
751 msg->msg_sending = 0;
752 msg->msg_receiving = 0;
753 msg->msg_target_is_router = 0;
755 CDEBUG(D_NET, "%s->%s:%s:%s - queuing for resend\n",
756 libcfs_nid2str(msg->msg_hdr.src_nid),
757 libcfs_nid2str(msg->msg_hdr.dest_nid),
758 lnet_msgtyp2str(msg->msg_type),
759 lnet_health_error2str(hstatus));
761 list_add_tail(&msg->msg_list, the_lnet.ln_mt_resendqs[msg->msg_tx_cpt]);
762 lnet_net_unlock(msg->msg_tx_cpt);
764 wake_up(&the_lnet.ln_mt_waitq);
769 lnet_detach_md(struct lnet_msg *msg, int status)
771 int cpt = lnet_cpt_of_cookie(msg->msg_md->md_lh.lh_cookie);
774 lnet_msg_detach_md(msg, status);
775 lnet_res_unlock(cpt);
779 lnet_is_health_check(struct lnet_msg *msg)
782 int status = msg->msg_ev.status;
785 * perform a health check for any message committed for transmit
787 hc = msg->msg_tx_committed;
789 /* Check for status inconsistencies */
791 ((!status && msg->msg_health_status != LNET_MSG_STATUS_OK) ||
792 (status && msg->msg_health_status == LNET_MSG_STATUS_OK))) {
793 CERROR("Msg is in inconsistent state, don't perform health "
794 "checking (%d, %d)\n", status, msg->msg_health_status);
798 CDEBUG(D_NET, "health check = %d, status = %d, hstatus = %d\n",
799 hc, status, msg->msg_health_status);
805 lnet_health_error2str(enum lnet_msg_hstatus hstatus)
808 case LNET_MSG_STATUS_LOCAL_INTERRUPT:
809 return "LOCAL_INTERRUPT";
810 case LNET_MSG_STATUS_LOCAL_DROPPED:
811 return "LOCAL_DROPPED";
812 case LNET_MSG_STATUS_LOCAL_ABORTED:
813 return "LOCAL_ABORTED";
814 case LNET_MSG_STATUS_LOCAL_NO_ROUTE:
815 return "LOCAL_NO_ROUTE";
816 case LNET_MSG_STATUS_LOCAL_TIMEOUT:
817 return "LOCAL_TIMEOUT";
818 case LNET_MSG_STATUS_LOCAL_ERROR:
819 return "LOCAL_ERROR";
820 case LNET_MSG_STATUS_REMOTE_DROPPED:
821 return "REMOTE_DROPPED";
822 case LNET_MSG_STATUS_REMOTE_ERROR:
823 return "REMOTE_ERROR";
824 case LNET_MSG_STATUS_REMOTE_TIMEOUT:
825 return "REMOTE_TIMEOUT";
826 case LNET_MSG_STATUS_NETWORK_TIMEOUT:
827 return "NETWORK_TIMEOUT";
828 case LNET_MSG_STATUS_OK:
836 lnet_send_error_simulation(struct lnet_msg *msg,
837 enum lnet_msg_hstatus *hstatus)
842 if (list_empty(&the_lnet.ln_drop_rules))
845 /* match only health rules */
846 if (!lnet_drop_rule_match(&msg->msg_hdr, hstatus))
849 CDEBUG(D_NET, "src %s, dst %s: %s simulate health error: %s\n",
850 libcfs_nid2str(msg->msg_hdr.src_nid),
851 libcfs_nid2str(msg->msg_hdr.dest_nid),
852 lnet_msgtyp2str(msg->msg_type),
853 lnet_health_error2str(*hstatus));
857 EXPORT_SYMBOL(lnet_send_error_simulation);
860 lnet_finalize(struct lnet_msg *msg, int status)
862 struct lnet_msg_container *container;
869 LASSERT(!in_interrupt());
874 msg->msg_ev.status = status;
877 * if this is an ACK or a REPLY then make sure to remove the
880 if (msg->msg_ev.type == LNET_EVENT_REPLY ||
881 msg->msg_ev.type == LNET_EVENT_ACK) {
882 cpt = lnet_cpt_of_cookie(msg->msg_md->md_lh.lh_cookie);
883 lnet_detach_rsp_tracker(msg->msg_md, cpt);
886 /* if the message is successfully sent, no need to keep the MD around */
887 if (msg->msg_md != NULL && !status)
888 lnet_detach_md(msg, status);
891 hc = lnet_is_health_check(msg);
894 * the MD would've been detached from the message if it was
895 * successfully sent. However, if it wasn't successfully sent the
896 * MD would be around. And since we recalculate whether to
897 * health check or not, it's possible that we change our minds and
898 * we don't want to health check this message. In this case also
901 * If the message is successful we're going to
902 * go through the lnet_health_check() function, but that'll just
903 * increment the appropriate health value and return.
905 if (msg->msg_md != NULL && !hc)
906 lnet_detach_md(msg, status);
909 if (!msg->msg_tx_committed && !msg->msg_rx_committed) {
910 /* not committed to network yet */
911 LASSERT(!msg->msg_onactivelist);
918 * Check the health status of the message. If it has one
919 * of the errors that we're supposed to handle, and it has
920 * not timed out, then
921 * 1. Decrement the appropriate health_value
922 * 2. queue the message on the resend queue
924 * if the message send is success, timed out or failed in the
925 * health check for any reason then we'll just finalize the
926 * message. Otherwise just return since the message has been
927 * put on the resend queue.
929 if (!lnet_health_check(msg))
933 * if we get here then we need to clean up the md because we're
934 * finalizing the message.
936 if (msg->msg_md != NULL)
937 lnet_detach_md(msg, status);
941 * NB: routed message can be committed for both receiving and sending,
942 * we should finalize in LIFO order and keep counters correct.
943 * (finalize sending first then finalize receiving)
945 cpt = msg->msg_tx_committed ? msg->msg_tx_cpt : msg->msg_rx_cpt;
948 container = the_lnet.ln_msg_containers[cpt];
949 list_add_tail(&msg->msg_list, &container->msc_finalizing);
951 /* Recursion breaker. Don't complete the message here if I am (or
952 * enough other threads are) already completing messages */
955 for (i = 0; i < container->msc_nfinalizers; i++) {
956 if (container->msc_finalizers[i] == current)
959 if (my_slot < 0 && container->msc_finalizers[i] == NULL)
963 if (i < container->msc_nfinalizers || my_slot < 0) {
964 lnet_net_unlock(cpt);
968 container->msc_finalizers[my_slot] = current;
970 while (!list_empty(&container->msc_finalizing)) {
971 msg = list_entry(container->msc_finalizing.next,
972 struct lnet_msg, msg_list);
974 list_del_init(&msg->msg_list);
976 /* NB drops and regains the lnet lock if it actually does
977 * anything, so my finalizing friends can chomp along too */
978 rc = lnet_complete_msg_locked(msg, cpt);
983 if (unlikely(!list_empty(&the_lnet.ln_delay_rules))) {
984 lnet_net_unlock(cpt);
985 lnet_delay_rule_check();
989 container->msc_finalizers[my_slot] = NULL;
990 lnet_net_unlock(cpt);
995 EXPORT_SYMBOL(lnet_finalize);
998 lnet_msg_container_cleanup(struct lnet_msg_container *container)
1002 if (container->msc_init == 0)
1005 while (!list_empty(&container->msc_active)) {
1006 struct lnet_msg *msg;
1008 msg = list_entry(container->msc_active.next,
1009 struct lnet_msg, msg_activelist);
1010 LASSERT(msg->msg_onactivelist);
1011 msg->msg_onactivelist = 0;
1012 list_del_init(&msg->msg_activelist);
1018 CERROR("%d active msg on exit\n", count);
1020 if (container->msc_finalizers != NULL) {
1021 LIBCFS_FREE(container->msc_finalizers,
1022 container->msc_nfinalizers *
1023 sizeof(*container->msc_finalizers));
1024 container->msc_finalizers = NULL;
1026 container->msc_init = 0;
1030 lnet_msg_container_setup(struct lnet_msg_container *container, int cpt)
1034 container->msc_init = 1;
1036 INIT_LIST_HEAD(&container->msc_active);
1037 INIT_LIST_HEAD(&container->msc_finalizing);
1039 /* number of CPUs */
1040 container->msc_nfinalizers = cfs_cpt_weight(lnet_cpt_table(), cpt);
1041 if (container->msc_nfinalizers == 0)
1042 container->msc_nfinalizers = 1;
1044 LIBCFS_CPT_ALLOC(container->msc_finalizers, lnet_cpt_table(), cpt,
1045 container->msc_nfinalizers *
1046 sizeof(*container->msc_finalizers));
1048 if (container->msc_finalizers == NULL) {
1049 CERROR("Failed to allocate message finalizers\n");
1050 lnet_msg_container_cleanup(container);
1058 lnet_msg_containers_destroy(void)
1060 struct lnet_msg_container *container;
1063 if (the_lnet.ln_msg_containers == NULL)
1066 cfs_percpt_for_each(container, i, the_lnet.ln_msg_containers)
1067 lnet_msg_container_cleanup(container);
1069 cfs_percpt_free(the_lnet.ln_msg_containers);
1070 the_lnet.ln_msg_containers = NULL;
1074 lnet_msg_containers_create(void)
1076 struct lnet_msg_container *container;
1080 the_lnet.ln_msg_containers = cfs_percpt_alloc(lnet_cpt_table(),
1081 sizeof(*container));
1083 if (the_lnet.ln_msg_containers == NULL) {
1084 CERROR("Failed to allocate cpu-partition data for network\n");
1088 cfs_percpt_for_each(container, i, the_lnet.ln_msg_containers) {
1089 rc = lnet_msg_container_setup(container, i);
1091 lnet_msg_containers_destroy();