4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
34 * Message decoding, parsing and finalizing routines
37 #define DEBUG_SUBSYSTEM S_LNET
39 #include <lnet/lib-lnet.h>
42 lnet_build_unlink_event(struct lnet_libmd *md, struct lnet_event *ev)
46 memset(ev, 0, sizeof(*ev));
50 ev->type = LNET_EVENT_UNLINK;
51 lnet_md_deconstruct(md, &ev->md);
52 lnet_md2handle(&ev->md_handle, md);
57 * Don't need any lock, must be called after lnet_commit_md
60 lnet_build_msg_event(struct lnet_msg *msg, enum lnet_event_kind ev_type)
62 struct lnet_hdr *hdr = &msg->msg_hdr;
63 struct lnet_event *ev = &msg->msg_ev;
65 LASSERT(!msg->msg_routing);
68 ev->msg_type = msg->msg_type;
70 if (ev_type == LNET_EVENT_SEND) {
71 /* event for active message */
72 ev->target.nid = le64_to_cpu(hdr->dest_nid);
73 ev->target.pid = le32_to_cpu(hdr->dest_pid);
74 ev->initiator.nid = LNET_NID_ANY;
75 ev->initiator.pid = the_lnet.ln_pid;
76 ev->source.nid = LNET_NID_ANY;
77 ev->source.pid = the_lnet.ln_pid;
78 ev->sender = LNET_NID_ANY;
80 /* event for passive message */
81 ev->target.pid = hdr->dest_pid;
82 ev->target.nid = hdr->dest_nid;
83 ev->initiator.pid = hdr->src_pid;
84 /* Multi-Rail: resolve src_nid to "primary" peer NID */
85 ev->initiator.nid = msg->msg_initiator;
86 /* Multi-Rail: track source NID. */
87 ev->source.pid = hdr->src_pid;
88 ev->source.nid = hdr->src_nid;
89 ev->rlength = hdr->payload_length;
90 ev->sender = msg->msg_from;
91 ev->mlength = msg->msg_wanted;
92 ev->offset = msg->msg_offset;
99 case LNET_EVENT_PUT: /* passive PUT */
100 ev->pt_index = hdr->msg.put.ptl_index;
101 ev->match_bits = hdr->msg.put.match_bits;
102 ev->hdr_data = hdr->msg.put.hdr_data;
105 case LNET_EVENT_GET: /* passive GET */
106 ev->pt_index = hdr->msg.get.ptl_index;
107 ev->match_bits = hdr->msg.get.match_bits;
111 case LNET_EVENT_ACK: /* ACK */
112 ev->match_bits = hdr->msg.ack.match_bits;
113 ev->mlength = hdr->msg.ack.mlength;
116 case LNET_EVENT_REPLY: /* REPLY */
119 case LNET_EVENT_SEND: /* active message */
120 if (msg->msg_type == LNET_MSG_PUT) {
121 ev->pt_index = le32_to_cpu(hdr->msg.put.ptl_index);
122 ev->match_bits = le64_to_cpu(hdr->msg.put.match_bits);
123 ev->offset = le32_to_cpu(hdr->msg.put.offset);
125 ev->rlength = le32_to_cpu(hdr->payload_length);
126 ev->hdr_data = le64_to_cpu(hdr->msg.put.hdr_data);
129 LASSERT(msg->msg_type == LNET_MSG_GET);
130 ev->pt_index = le32_to_cpu(hdr->msg.get.ptl_index);
131 ev->match_bits = le64_to_cpu(hdr->msg.get.match_bits);
133 ev->rlength = le32_to_cpu(hdr->msg.get.sink_length);
134 ev->offset = le32_to_cpu(hdr->msg.get.src_offset);
142 lnet_msg_commit(struct lnet_msg *msg, int cpt)
144 struct lnet_msg_container *container = the_lnet.ln_msg_containers[cpt];
145 struct lnet_counters_common *common;
148 /* set the message deadline */
149 timeout_ns = lnet_transaction_timeout * NSEC_PER_SEC;
150 msg->msg_deadline = ktime_add_ns(ktime_get(), timeout_ns);
152 /* routed message can be committed for both receiving and sending */
153 LASSERT(!msg->msg_tx_committed);
155 if (msg->msg_sending) {
156 LASSERT(!msg->msg_receiving);
157 msg->msg_tx_cpt = cpt;
158 msg->msg_tx_committed = 1;
159 if (msg->msg_rx_committed) { /* routed message REPLY */
160 LASSERT(msg->msg_onactivelist);
164 LASSERT(!msg->msg_sending);
165 msg->msg_rx_cpt = cpt;
166 msg->msg_rx_committed = 1;
169 LASSERT(!msg->msg_onactivelist);
171 msg->msg_onactivelist = 1;
172 list_add_tail(&msg->msg_activelist, &container->msc_active);
174 common = &the_lnet.ln_counters[cpt]->lct_common;
175 common->lcc_msgs_alloc++;
176 if (common->lcc_msgs_alloc > common->lcc_msgs_max)
177 common->lcc_msgs_max = common->lcc_msgs_alloc;
181 lnet_msg_decommit_tx(struct lnet_msg *msg, int status)
183 struct lnet_counters_common *common;
184 struct lnet_event *ev = &msg->msg_ev;
186 LASSERT(msg->msg_tx_committed);
190 common = &(the_lnet.ln_counters[msg->msg_tx_cpt]->lct_common);
192 default: /* routed message */
193 LASSERT(msg->msg_routing);
194 LASSERT(msg->msg_rx_committed);
195 LASSERT(ev->type == 0);
197 common->lcc_route_length += msg->msg_len;
198 common->lcc_route_count++;
202 /* should have been decommitted */
203 LASSERT(!msg->msg_rx_committed);
204 /* overwritten while sending ACK */
205 LASSERT(msg->msg_type == LNET_MSG_ACK);
206 msg->msg_type = LNET_MSG_PUT; /* fix type */
209 case LNET_EVENT_SEND:
210 LASSERT(!msg->msg_rx_committed);
211 if (msg->msg_type == LNET_MSG_PUT)
212 common->lcc_send_length += msg->msg_len;
216 LASSERT(msg->msg_rx_committed);
217 /* overwritten while sending reply, we should never be
218 * here for optimized GET */
219 LASSERT(msg->msg_type == LNET_MSG_REPLY);
220 msg->msg_type = LNET_MSG_GET; /* fix type */
224 common->lcc_send_count++;
228 lnet_incr_stats(&msg->msg_txpeer->lpni_stats,
230 LNET_STATS_TYPE_SEND);
232 lnet_incr_stats(&msg->msg_txni->ni_stats,
234 LNET_STATS_TYPE_SEND);
236 lnet_return_tx_credits_locked(msg);
237 msg->msg_tx_committed = 0;
241 lnet_msg_decommit_rx(struct lnet_msg *msg, int status)
243 struct lnet_counters_common *common;
244 struct lnet_event *ev = &msg->msg_ev;
246 LASSERT(!msg->msg_tx_committed); /* decommitted or never committed */
247 LASSERT(msg->msg_rx_committed);
252 common = &(the_lnet.ln_counters[msg->msg_rx_cpt]->lct_common);
255 LASSERT(ev->type == 0);
256 LASSERT(msg->msg_routing);
260 LASSERT(msg->msg_type == LNET_MSG_ACK);
264 /* type is "REPLY" if it's an optimized GET on passive side,
265 * because optimized GET will never be committed for sending,
266 * so message type wouldn't be changed back to "GET" by
267 * lnet_msg_decommit_tx(), see details in lnet_parse_get() */
268 LASSERT(msg->msg_type == LNET_MSG_REPLY ||
269 msg->msg_type == LNET_MSG_GET);
270 common->lcc_send_length += msg->msg_wanted;
274 LASSERT(msg->msg_type == LNET_MSG_PUT);
277 case LNET_EVENT_REPLY:
278 /* type is "GET" if it's an optimized GET on active side,
279 * see details in lnet_create_reply_msg() */
280 LASSERT(msg->msg_type == LNET_MSG_GET ||
281 msg->msg_type == LNET_MSG_REPLY);
285 common->lcc_recv_count++;
289 lnet_incr_stats(&msg->msg_rxpeer->lpni_stats,
291 LNET_STATS_TYPE_RECV);
293 lnet_incr_stats(&msg->msg_rxni->ni_stats,
295 LNET_STATS_TYPE_RECV);
296 if (ev->type == LNET_EVENT_PUT || ev->type == LNET_EVENT_REPLY)
297 common->lcc_recv_length += msg->msg_wanted;
300 lnet_return_rx_credits_locked(msg);
301 msg->msg_rx_committed = 0;
305 lnet_msg_decommit(struct lnet_msg *msg, int cpt, int status)
309 LASSERT(msg->msg_tx_committed || msg->msg_rx_committed);
310 LASSERT(msg->msg_onactivelist);
312 if (msg->msg_tx_committed) { /* always decommit for sending first */
313 LASSERT(cpt == msg->msg_tx_cpt);
314 lnet_msg_decommit_tx(msg, status);
317 if (msg->msg_rx_committed) {
318 /* forwarding msg committed for both receiving and sending */
319 if (cpt != msg->msg_rx_cpt) {
320 lnet_net_unlock(cpt);
321 cpt2 = msg->msg_rx_cpt;
324 lnet_msg_decommit_rx(msg, status);
327 list_del(&msg->msg_activelist);
328 msg->msg_onactivelist = 0;
330 the_lnet.ln_counters[cpt2]->lct_common.lcc_msgs_alloc--;
333 lnet_net_unlock(cpt2);
339 lnet_msg_attach_md(struct lnet_msg *msg, struct lnet_libmd *md,
340 unsigned int offset, unsigned int mlen)
342 /* NB: @offset and @len are only useful for receiving */
343 /* Here, we attach the MD on lnet_msg and mark it busy and
344 * decrementing its threshold. Come what may, the lnet_msg "owns"
345 * the MD until a call to lnet_msg_detach_md or lnet_finalize()
346 * signals completion. */
347 LASSERT(!msg->msg_routing);
350 if (msg->msg_receiving) { /* committed for receiving */
351 msg->msg_offset = offset;
352 msg->msg_wanted = mlen;
356 if (md->md_threshold != LNET_MD_THRESH_INF) {
357 LASSERT(md->md_threshold > 0);
361 /* build umd in event */
362 lnet_md2handle(&msg->msg_ev.md_handle, md);
363 lnet_md_deconstruct(md, &msg->msg_ev.md);
367 lnet_complete_msg_locked(struct lnet_msg *msg, int cpt)
369 struct lnet_handle_wire ack_wmd;
371 int status = msg->msg_ev.status;
373 LASSERT(msg->msg_onactivelist);
375 if (status == 0 && msg->msg_ack) {
376 /* Only send an ACK if the PUT completed successfully */
378 lnet_msg_decommit(msg, cpt, 0);
381 lnet_net_unlock(cpt);
383 LASSERT(msg->msg_ev.type == LNET_EVENT_PUT);
384 LASSERT(!msg->msg_routing);
386 ack_wmd = msg->msg_hdr.msg.put.ack_wmd;
388 lnet_prep_send(msg, LNET_MSG_ACK, msg->msg_ev.source, 0, 0);
390 msg->msg_hdr.msg.ack.dst_wmd = ack_wmd;
391 msg->msg_hdr.msg.ack.match_bits = msg->msg_ev.match_bits;
392 msg->msg_hdr.msg.ack.mlength = cpu_to_le32(msg->msg_ev.mlength);
394 /* NB: we probably want to use NID of msg::msg_from as 3rd
395 * parameter (router NID) if it's routed message */
396 rc = lnet_send(msg->msg_ev.target.nid, msg, msg->msg_from);
400 * NB: message is committed for sending, we should return
401 * on success because LND will finalize this message later.
403 * Also, there is possibility that message is committed for
404 * sending and also failed before delivering to LND,
405 * i.e: ENOMEM, in that case we can't fall through either
406 * because CPT for sending can be different with CPT for
407 * receiving, so we should return back to lnet_finalize()
408 * to make sure we are locking the correct partition.
412 } else if (status == 0 && /* OK so far */
413 (msg->msg_routing && !msg->msg_sending)) {
415 LASSERT(!msg->msg_receiving); /* called back recv already */
416 lnet_net_unlock(cpt);
418 rc = lnet_send(LNET_NID_ANY, msg, LNET_NID_ANY);
422 * NB: message is committed for sending, we should return
423 * on success because LND will finalize this message later.
425 * Also, there is possibility that message is committed for
426 * sending and also failed before delivering to LND,
427 * i.e: ENOMEM, in that case we can't fall through either:
428 * - The rule is message must decommit for sending first if
429 * the it's committed for both sending and receiving
430 * - CPT for sending can be different with CPT for receiving,
431 * so we should return back to lnet_finalize() to make
432 * sure we are locking the correct partition.
437 lnet_msg_decommit(msg, cpt, status);
443 lnet_dec_healthv_locked(atomic_t *healthv, int sensitivity)
445 int h = atomic_read(healthv);
447 if (h < sensitivity) {
448 atomic_set(healthv, 0);
451 atomic_set(healthv, h);
456 lnet_handle_local_failure(struct lnet_ni *local_ni)
459 * the lnet_net_lock(0) is used to protect the addref on the ni
460 * and the recovery queue.
463 /* the mt could've shutdown and cleaned up the queues */
464 if (the_lnet.ln_mt_state != LNET_MT_STATE_RUNNING) {
469 lnet_dec_healthv_locked(&local_ni->ni_healthv, lnet_health_sensitivity);
471 * add the NI to the recovery queue if it's not already there
472 * and it's health value is actually below the maximum. It's
473 * possible that the sensitivity might be set to 0, and the health
474 * value will not be reduced. In this case, there is no reason to
477 if (list_empty(&local_ni->ni_recovery) &&
478 atomic_read(&local_ni->ni_healthv) < LNET_MAX_HEALTH_VALUE) {
479 CERROR("ni %s added to recovery queue. Health = %d\n",
480 libcfs_nid2str(local_ni->ni_nid),
481 atomic_read(&local_ni->ni_healthv));
482 list_add_tail(&local_ni->ni_recovery,
483 &the_lnet.ln_mt_localNIRecovq);
484 lnet_ni_addref_locked(local_ni, 0);
490 lnet_handle_remote_failure_locked(struct lnet_peer_ni *lpni)
492 __u32 sensitivity = lnet_health_sensitivity;
493 __u32 lp_sensitivity;
495 /* lpni could be NULL if we're in the LOLND case */
500 * If there is a health sensitivity in the peer then use that
501 * instead of the globally set one.
503 lp_sensitivity = lpni->lpni_peer_net->lpn_peer->lp_health_sensitivity;
505 sensitivity = lp_sensitivity;
507 lnet_dec_healthv_locked(&lpni->lpni_healthv, sensitivity);
509 * add the peer NI to the recovery queue if it's not already there
510 * and it's health value is actually below the maximum. It's
511 * possible that the sensitivity might be set to 0, and the health
512 * value will not be reduced. In this case, there is no reason to
515 lnet_peer_ni_add_to_recoveryq_locked(lpni);
519 lnet_handle_remote_failure(struct lnet_peer_ni *lpni)
521 /* lpni could be NULL if we're in the LOLND case */
526 /* the mt could've shutdown and cleaned up the queues */
527 if (the_lnet.ln_mt_state != LNET_MT_STATE_RUNNING) {
531 lnet_handle_remote_failure_locked(lpni);
536 lnet_incr_hstats(struct lnet_msg *msg, enum lnet_msg_hstatus hstatus)
538 struct lnet_ni *ni = msg->msg_txni;
539 struct lnet_peer_ni *lpni = msg->msg_txpeer;
540 struct lnet_counters_health *health;
542 health = &the_lnet.ln_counters[0]->lct_health;
545 case LNET_MSG_STATUS_LOCAL_INTERRUPT:
546 atomic_inc(&ni->ni_hstats.hlt_local_interrupt);
547 health->lch_local_interrupt_count++;
549 case LNET_MSG_STATUS_LOCAL_DROPPED:
550 atomic_inc(&ni->ni_hstats.hlt_local_dropped);
551 health->lch_local_dropped_count++;
553 case LNET_MSG_STATUS_LOCAL_ABORTED:
554 atomic_inc(&ni->ni_hstats.hlt_local_aborted);
555 health->lch_local_aborted_count++;
557 case LNET_MSG_STATUS_LOCAL_NO_ROUTE:
558 atomic_inc(&ni->ni_hstats.hlt_local_no_route);
559 health->lch_local_no_route_count++;
561 case LNET_MSG_STATUS_LOCAL_TIMEOUT:
562 atomic_inc(&ni->ni_hstats.hlt_local_timeout);
563 health->lch_local_timeout_count++;
565 case LNET_MSG_STATUS_LOCAL_ERROR:
566 atomic_inc(&ni->ni_hstats.hlt_local_error);
567 health->lch_local_error_count++;
569 case LNET_MSG_STATUS_REMOTE_DROPPED:
571 atomic_inc(&lpni->lpni_hstats.hlt_remote_dropped);
572 health->lch_remote_dropped_count++;
574 case LNET_MSG_STATUS_REMOTE_ERROR:
576 atomic_inc(&lpni->lpni_hstats.hlt_remote_error);
577 health->lch_remote_error_count++;
579 case LNET_MSG_STATUS_REMOTE_TIMEOUT:
581 atomic_inc(&lpni->lpni_hstats.hlt_remote_timeout);
582 health->lch_remote_timeout_count++;
584 case LNET_MSG_STATUS_NETWORK_TIMEOUT:
586 atomic_inc(&lpni->lpni_hstats.hlt_network_timeout);
587 health->lch_network_timeout_count++;
589 case LNET_MSG_STATUS_OK:
597 lnet_resend_msg_locked(struct lnet_msg *msg)
599 msg->msg_retry_count++;
602 * remove message from the active list and reset it to prepare
603 * for a resend. Two exceptions to this
605 * 1. the router case. When a message is being routed it is
606 * committed for rx when received and committed for tx when
607 * forwarded. We don't want to remove it from the active list, since
608 * code which handles receiving expects it to remain on the active
611 * 2. The REPLY case. Reply messages use the same message
612 * structure for the GET that was received.
614 if (!msg->msg_routing && msg->msg_type != LNET_MSG_REPLY) {
615 list_del_init(&msg->msg_activelist);
616 msg->msg_onactivelist = 0;
619 * The msg_target.nid which was originally set
620 * when calling LNetGet() or LNetPut() might've
621 * been overwritten if we're routing this message.
622 * Call lnet_msg_decommit_tx() to return the credit
623 * this message consumed. The message will
624 * consume another credit when it gets resent.
626 msg->msg_target.nid = msg->msg_hdr.dest_nid;
627 lnet_msg_decommit_tx(msg, -EAGAIN);
628 msg->msg_sending = 0;
629 msg->msg_receiving = 0;
630 msg->msg_target_is_router = 0;
632 CDEBUG(D_NET, "%s->%s:%s:%s - queuing msg (%p) for resend\n",
633 libcfs_nid2str(msg->msg_hdr.src_nid),
634 libcfs_nid2str(msg->msg_hdr.dest_nid),
635 lnet_msgtyp2str(msg->msg_type),
636 lnet_health_error2str(msg->msg_health_status), msg);
638 list_add_tail(&msg->msg_list, the_lnet.ln_mt_resendqs[msg->msg_tx_cpt]);
640 complete(&the_lnet.ln_mt_wait_complete);
644 lnet_check_finalize_recursion_locked(struct lnet_msg *msg,
645 struct list_head *containerq,
646 int nworkers, void **workers)
651 list_add_tail(&msg->msg_list, containerq);
653 for (i = 0; i < nworkers; i++) {
654 if (workers[i] == current)
657 if (my_slot < 0 && workers[i] == NULL)
661 if (i < nworkers || my_slot < 0)
664 workers[my_slot] = current;
670 lnet_attempt_msg_resend(struct lnet_msg *msg)
672 struct lnet_msg_container *container;
676 /* we can only resend tx_committed messages */
677 LASSERT(msg->msg_tx_committed);
679 /* don't resend recovery messages */
680 if (msg->msg_recovery) {
681 CDEBUG(D_NET, "msg %s->%s is a recovery ping. retry# %d\n",
682 libcfs_nid2str(msg->msg_from),
683 libcfs_nid2str(msg->msg_target.nid),
684 msg->msg_retry_count);
685 return -ENOTRECOVERABLE;
689 * if we explicitly indicated we don't want to resend then just
692 if (msg->msg_no_resend) {
693 CDEBUG(D_NET, "msg %s->%s requested no resend. retry# %d\n",
694 libcfs_nid2str(msg->msg_from),
695 libcfs_nid2str(msg->msg_target.nid),
696 msg->msg_retry_count);
697 return -ENOTRECOVERABLE;
700 /* check if the message has exceeded the number of retries */
701 if (msg->msg_retry_count >= lnet_retry_count) {
702 CNETERR("msg %s->%s exceeded retry count %d\n",
703 libcfs_nid2str(msg->msg_from),
704 libcfs_nid2str(msg->msg_target.nid),
705 msg->msg_retry_count);
706 return -ENOTRECOVERABLE;
709 cpt = msg->msg_tx_cpt;
712 /* check again under lock */
713 if (the_lnet.ln_mt_state != LNET_MT_STATE_RUNNING) {
714 lnet_net_unlock(cpt);
718 container = the_lnet.ln_msg_containers[cpt];
720 lnet_check_finalize_recursion_locked(msg,
721 &container->msc_resending,
722 container->msc_nfinalizers,
723 container->msc_resenders);
725 /* enough threads are resending */
727 lnet_net_unlock(cpt);
731 while (!list_empty(&container->msc_resending)) {
732 msg = list_entry(container->msc_resending.next,
733 struct lnet_msg, msg_list);
734 list_del(&msg->msg_list);
737 * resending the message will require us to call
738 * lnet_msg_decommit_tx() which will return the credit
739 * which this message holds. This could trigger another
740 * queued message to be sent. If that message fails and
741 * requires a resend we will recurse.
742 * But since at this point the slot is taken, the message
743 * will be queued in the container and dealt with
744 * later. This breaks the recursion.
746 lnet_resend_msg_locked(msg);
750 * msc_resenders is an array of process pointers. Each entry holds
751 * a pointer to the current process operating on the message. An
752 * array entry is created per CPT. If the array slot is already
753 * set, then it means that there is a thread on the CPT currently
754 * resending a message.
755 * Once the thread finishes clear the slot to enable the thread to
756 * take on more resend work.
758 container->msc_resenders[my_slot] = NULL;
759 lnet_net_unlock(cpt);
765 * Do a health check on the message:
766 * return -1 if we're not going to handle the error or
767 * if we've reached the maximum number of retries.
768 * success case will return -1 as well
769 * return 0 if it the message is requeued for send
772 lnet_health_check(struct lnet_msg *msg)
774 enum lnet_msg_hstatus hstatus = msg->msg_health_status;
775 struct lnet_peer_ni *lpni;
779 /* if we're shutting down no point in handling health. */
780 if (the_lnet.ln_mt_state != LNET_MT_STATE_RUNNING)
783 LASSERT(msg->msg_tx_committed || msg->msg_rx_committed);
786 * if we're sending to the LOLND then the msg_txpeer will not be
787 * set. So no need to sanity check it.
789 if (msg->msg_tx_committed &&
790 LNET_NETTYP(LNET_NIDNET(msg->msg_txni->ni_nid)) != LOLND)
791 LASSERT(msg->msg_txpeer);
792 else if (msg->msg_tx_committed &&
793 LNET_NETTYP(LNET_NIDNET(msg->msg_txni->ni_nid)) == LOLND)
796 if (hstatus != LNET_MSG_STATUS_OK &&
797 ktime_compare(ktime_get(), msg->msg_deadline) >= 0)
801 * stats are only incremented for errors so avoid wasting time
802 * incrementing statistics if there is no error.
804 if (hstatus != LNET_MSG_STATUS_OK) {
806 lnet_incr_hstats(msg, hstatus);
811 * always prefer txni/txpeer if they message is committed for both
814 if (msg->msg_tx_committed) {
816 lpni = msg->msg_txpeer;
819 lpni = msg->msg_rxpeer;
827 CDEBUG(D_NET, "health check: %s->%s: %s: %s\n",
828 libcfs_nid2str(ni->ni_nid),
829 (lo) ? "self" : libcfs_nid2str(lpni->lpni_nid),
830 lnet_msgtyp2str(msg->msg_type),
831 lnet_health_error2str(hstatus));
834 case LNET_MSG_STATUS_OK:
836 * increment the local ni health weather we successfully
837 * received or sent a message on it.
839 lnet_inc_healthv(&ni->ni_healthv);
841 * It's possible msg_txpeer is NULL in the LOLND
842 * case. Only increment the peer's health if we're
843 * receiving a message from it. It's the only sure way to
844 * know that a remote interface is up.
845 * If this interface is part of a router, then take that
846 * as indication that the router is fully healthy.
848 if (lpni && msg->msg_rx_committed) {
850 * If we're receiving a message from the router or
851 * I'm a router, then set that lpni's health to
852 * maximum so we can commence communication
854 if (lnet_isrouter(lpni) || the_lnet.ln_routing)
855 lnet_set_healthv(&lpni->lpni_healthv,
856 LNET_MAX_HEALTH_VALUE);
858 lnet_inc_healthv(&lpni->lpni_healthv);
861 /* we can finalize this message */
863 case LNET_MSG_STATUS_LOCAL_INTERRUPT:
864 case LNET_MSG_STATUS_LOCAL_DROPPED:
865 case LNET_MSG_STATUS_LOCAL_ABORTED:
866 case LNET_MSG_STATUS_LOCAL_NO_ROUTE:
867 case LNET_MSG_STATUS_LOCAL_TIMEOUT:
868 lnet_handle_local_failure(ni);
869 if (msg->msg_tx_committed)
870 /* add to the re-send queue */
871 return lnet_attempt_msg_resend(msg);
875 * These errors will not trigger a resend so simply
876 * finalize the message
878 case LNET_MSG_STATUS_LOCAL_ERROR:
879 lnet_handle_local_failure(ni);
883 * TODO: since the remote dropped the message we can
884 * attempt a resend safely.
886 case LNET_MSG_STATUS_REMOTE_DROPPED:
887 lnet_handle_remote_failure(lpni);
888 if (msg->msg_tx_committed)
889 return lnet_attempt_msg_resend(msg);
892 case LNET_MSG_STATUS_REMOTE_ERROR:
893 case LNET_MSG_STATUS_REMOTE_TIMEOUT:
894 case LNET_MSG_STATUS_NETWORK_TIMEOUT:
895 lnet_handle_remote_failure(lpni);
901 /* no resend is needed */
906 lnet_msg_detach_md(struct lnet_msg *msg, int cpt, int status)
908 struct lnet_libmd *md = msg->msg_md;
911 /* Now it's safe to drop my caller's ref */
913 LASSERT(md->md_refcount >= 0);
915 unlink = lnet_md_unlinkable(md);
916 if (md->md_eq != NULL) {
917 if ((md->md_flags & LNET_MD_FLAG_ABORTED) && !status) {
918 msg->msg_ev.status = -ETIMEDOUT;
919 CDEBUG(D_NET, "md 0x%p already unlinked\n", md);
921 msg->msg_ev.status = status;
923 msg->msg_ev.unlinked = unlink;
924 lnet_eq_enqueue_event(md->md_eq, &msg->msg_ev);
927 if (unlink || (md->md_refcount == 0 &&
928 md->md_threshold == LNET_MD_THRESH_INF))
929 lnet_detach_rsp_tracker(md, cpt);
938 lnet_is_health_check(struct lnet_msg *msg)
941 int status = msg->msg_ev.status;
943 if ((!msg->msg_tx_committed && !msg->msg_rx_committed) ||
944 !msg->msg_onactivelist) {
945 CDEBUG(D_NET, "msg %p not committed for send or receive\n",
950 if ((msg->msg_tx_committed && !msg->msg_txpeer) ||
951 (msg->msg_rx_committed && !msg->msg_rxpeer)) {
952 /* The optimized GET case does not set msg_rxpeer, but status
953 * could be zero. Only print the error message if we have a
957 CDEBUG(D_NET, "msg %p status %d cannot retry\n", msg,
962 /* Check for status inconsistencies */
963 if ((!status && msg->msg_health_status != LNET_MSG_STATUS_OK) ||
964 (status && msg->msg_health_status == LNET_MSG_STATUS_OK)) {
965 CDEBUG(D_NET, "Msg %p is in inconsistent state, don't perform health "
966 "checking (%d, %d)\n", msg, status,
967 msg->msg_health_status);
971 CDEBUG(D_NET, "health check = %d, status = %d, hstatus = %d\n",
972 hc, status, msg->msg_health_status);
978 lnet_health_error2str(enum lnet_msg_hstatus hstatus)
981 case LNET_MSG_STATUS_LOCAL_INTERRUPT:
982 return "LOCAL_INTERRUPT";
983 case LNET_MSG_STATUS_LOCAL_DROPPED:
984 return "LOCAL_DROPPED";
985 case LNET_MSG_STATUS_LOCAL_ABORTED:
986 return "LOCAL_ABORTED";
987 case LNET_MSG_STATUS_LOCAL_NO_ROUTE:
988 return "LOCAL_NO_ROUTE";
989 case LNET_MSG_STATUS_LOCAL_TIMEOUT:
990 return "LOCAL_TIMEOUT";
991 case LNET_MSG_STATUS_LOCAL_ERROR:
992 return "LOCAL_ERROR";
993 case LNET_MSG_STATUS_REMOTE_DROPPED:
994 return "REMOTE_DROPPED";
995 case LNET_MSG_STATUS_REMOTE_ERROR:
996 return "REMOTE_ERROR";
997 case LNET_MSG_STATUS_REMOTE_TIMEOUT:
998 return "REMOTE_TIMEOUT";
999 case LNET_MSG_STATUS_NETWORK_TIMEOUT:
1000 return "NETWORK_TIMEOUT";
1001 case LNET_MSG_STATUS_OK:
1009 lnet_send_error_simulation(struct lnet_msg *msg,
1010 enum lnet_msg_hstatus *hstatus)
1015 if (list_empty(&the_lnet.ln_drop_rules))
1018 /* match only health rules */
1019 if (!lnet_drop_rule_match(&msg->msg_hdr, LNET_NID_ANY,
1023 CDEBUG(D_NET, "src %s(%s)->dst %s: %s simulate health error: %s\n",
1024 libcfs_nid2str(msg->msg_hdr.src_nid),
1025 libcfs_nid2str(msg->msg_txni->ni_nid),
1026 libcfs_nid2str(msg->msg_hdr.dest_nid),
1027 lnet_msgtyp2str(msg->msg_type),
1028 lnet_health_error2str(*hstatus));
1032 EXPORT_SYMBOL(lnet_send_error_simulation);
1035 lnet_finalize(struct lnet_msg *msg, int status)
1037 struct lnet_msg_container *container;
1042 LASSERT(!in_interrupt());
1047 msg->msg_ev.status = status;
1049 if (lnet_is_health_check(msg)) {
1051 * Check the health status of the message. If it has one
1052 * of the errors that we're supposed to handle, and it has
1053 * not timed out, then
1054 * 1. Decrement the appropriate health_value
1055 * 2. queue the message on the resend queue
1057 * if the message send is success, timed out or failed in the
1058 * health check for any reason then we'll just finalize the
1059 * message. Otherwise just return since the message has been
1060 * put on the resend queue.
1062 if (!lnet_health_check(msg))
1067 * We're not going to resend this message so detach its MD and invoke
1068 * the appropriate callbacks
1070 if (msg->msg_md != NULL) {
1071 cpt = lnet_cpt_of_cookie(msg->msg_md->md_lh.lh_cookie);
1073 lnet_msg_detach_md(msg, cpt, status);
1074 lnet_res_unlock(cpt);
1078 if (!msg->msg_tx_committed && !msg->msg_rx_committed) {
1079 /* not committed to network yet */
1080 LASSERT(!msg->msg_onactivelist);
1086 * NB: routed message can be committed for both receiving and sending,
1087 * we should finalize in LIFO order and keep counters correct.
1088 * (finalize sending first then finalize receiving)
1090 cpt = msg->msg_tx_committed ? msg->msg_tx_cpt : msg->msg_rx_cpt;
1093 container = the_lnet.ln_msg_containers[cpt];
1095 /* Recursion breaker. Don't complete the message here if I am (or
1096 * enough other threads are) already completing messages */
1097 my_slot = lnet_check_finalize_recursion_locked(msg,
1098 &container->msc_finalizing,
1099 container->msc_nfinalizers,
1100 container->msc_finalizers);
1102 /* enough threads are resending */
1103 if (my_slot == -1) {
1104 lnet_net_unlock(cpt);
1109 while (!list_empty(&container->msc_finalizing)) {
1110 msg = list_entry(container->msc_finalizing.next,
1111 struct lnet_msg, msg_list);
1113 list_del_init(&msg->msg_list);
1115 /* NB drops and regains the lnet lock if it actually does
1116 * anything, so my finalizing friends can chomp along too */
1117 rc = lnet_complete_msg_locked(msg, cpt);
1122 if (unlikely(!list_empty(&the_lnet.ln_delay_rules))) {
1123 lnet_net_unlock(cpt);
1124 lnet_delay_rule_check();
1128 container->msc_finalizers[my_slot] = NULL;
1129 lnet_net_unlock(cpt);
1134 EXPORT_SYMBOL(lnet_finalize);
1137 lnet_msg_container_cleanup(struct lnet_msg_container *container)
1141 if (container->msc_init == 0)
1144 while (!list_empty(&container->msc_active)) {
1145 struct lnet_msg *msg;
1147 msg = list_entry(container->msc_active.next,
1148 struct lnet_msg, msg_activelist);
1149 LASSERT(msg->msg_onactivelist);
1150 msg->msg_onactivelist = 0;
1151 list_del_init(&msg->msg_activelist);
1157 CERROR("%d active msg on exit\n", count);
1159 if (container->msc_finalizers != NULL) {
1160 LIBCFS_FREE(container->msc_finalizers,
1161 container->msc_nfinalizers *
1162 sizeof(*container->msc_finalizers));
1163 container->msc_finalizers = NULL;
1166 if (container->msc_resenders != NULL) {
1167 LIBCFS_FREE(container->msc_resenders,
1168 container->msc_nfinalizers *
1169 sizeof(*container->msc_resenders));
1170 container->msc_resenders = NULL;
1172 container->msc_init = 0;
1176 lnet_msg_container_setup(struct lnet_msg_container *container, int cpt)
1180 container->msc_init = 1;
1182 INIT_LIST_HEAD(&container->msc_active);
1183 INIT_LIST_HEAD(&container->msc_finalizing);
1184 INIT_LIST_HEAD(&container->msc_resending);
1186 /* number of CPUs */
1187 container->msc_nfinalizers = cfs_cpt_weight(lnet_cpt_table(), cpt);
1188 if (container->msc_nfinalizers == 0)
1189 container->msc_nfinalizers = 1;
1191 LIBCFS_CPT_ALLOC(container->msc_finalizers, lnet_cpt_table(), cpt,
1192 container->msc_nfinalizers *
1193 sizeof(*container->msc_finalizers));
1195 if (container->msc_finalizers == NULL) {
1196 CERROR("Failed to allocate message finalizers\n");
1197 lnet_msg_container_cleanup(container);
1201 LIBCFS_CPT_ALLOC(container->msc_resenders, lnet_cpt_table(), cpt,
1202 container->msc_nfinalizers *
1203 sizeof(*container->msc_resenders));
1205 if (container->msc_resenders == NULL) {
1206 CERROR("Failed to allocate message resenders\n");
1207 lnet_msg_container_cleanup(container);
1215 lnet_msg_containers_destroy(void)
1217 struct lnet_msg_container *container;
1220 if (the_lnet.ln_msg_containers == NULL)
1223 cfs_percpt_for_each(container, i, the_lnet.ln_msg_containers)
1224 lnet_msg_container_cleanup(container);
1226 cfs_percpt_free(the_lnet.ln_msg_containers);
1227 the_lnet.ln_msg_containers = NULL;
1231 lnet_msg_containers_create(void)
1233 struct lnet_msg_container *container;
1237 the_lnet.ln_msg_containers = cfs_percpt_alloc(lnet_cpt_table(),
1238 sizeof(*container));
1240 if (the_lnet.ln_msg_containers == NULL) {
1241 CERROR("Failed to allocate cpu-partition data for network\n");
1245 cfs_percpt_for_each(container, i, the_lnet.ln_msg_containers) {
1246 rc = lnet_msg_container_setup(container, i);
1248 lnet_msg_containers_destroy();