4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
33 * Message decoding, parsing and finalizing routines
36 #define DEBUG_SUBSYSTEM S_LNET
38 #include <lnet/lib-lnet.h>
41 lnet_build_unlink_event(struct lnet_libmd *md, struct lnet_event *ev)
45 memset(ev, 0, sizeof(*ev));
49 ev->type = LNET_EVENT_UNLINK;
50 lnet_md_deconstruct(md, ev);
51 lnet_md2handle(&ev->md_handle, md);
56 * Don't need any lock, must be called after lnet_commit_md
59 lnet_build_msg_event(struct lnet_msg *msg, enum lnet_event_kind ev_type)
61 struct lnet_hdr *hdr = &msg->msg_hdr;
62 struct lnet_event *ev = &msg->msg_ev;
64 LASSERT(!msg->msg_routing);
67 ev->msg_type = msg->msg_type;
69 if (ev_type == LNET_EVENT_SEND) {
70 /* event for active message */
71 ev->target.nid = le64_to_cpu(hdr->dest_nid);
72 ev->target.pid = le32_to_cpu(hdr->dest_pid);
73 ev->initiator.nid = LNET_NID_ANY;
74 ev->initiator.pid = the_lnet.ln_pid;
75 ev->source.nid = LNET_NID_ANY;
76 ev->source.pid = the_lnet.ln_pid;
77 ev->sender = LNET_NID_ANY;
79 /* event for passive message */
80 ev->target.pid = hdr->dest_pid;
81 ev->target.nid = hdr->dest_nid;
82 ev->initiator.pid = hdr->src_pid;
83 /* Multi-Rail: resolve src_nid to "primary" peer NID */
84 ev->initiator.nid = msg->msg_initiator;
85 /* Multi-Rail: track source NID. */
86 ev->source.pid = hdr->src_pid;
87 ev->source.nid = hdr->src_nid;
88 ev->rlength = hdr->payload_length;
89 ev->sender = msg->msg_from;
90 ev->mlength = msg->msg_wanted;
91 ev->offset = msg->msg_offset;
98 case LNET_EVENT_PUT: /* passive PUT */
99 ev->pt_index = hdr->msg.put.ptl_index;
100 ev->match_bits = hdr->msg.put.match_bits;
101 ev->hdr_data = hdr->msg.put.hdr_data;
104 case LNET_EVENT_GET: /* passive GET */
105 ev->pt_index = hdr->msg.get.ptl_index;
106 ev->match_bits = hdr->msg.get.match_bits;
110 case LNET_EVENT_ACK: /* ACK */
111 ev->match_bits = hdr->msg.ack.match_bits;
112 ev->mlength = hdr->msg.ack.mlength;
115 case LNET_EVENT_REPLY: /* REPLY */
118 case LNET_EVENT_SEND: /* active message */
119 if (msg->msg_type == LNET_MSG_PUT) {
120 ev->pt_index = le32_to_cpu(hdr->msg.put.ptl_index);
121 ev->match_bits = le64_to_cpu(hdr->msg.put.match_bits);
122 ev->offset = le32_to_cpu(hdr->msg.put.offset);
124 ev->rlength = le32_to_cpu(hdr->payload_length);
125 ev->hdr_data = le64_to_cpu(hdr->msg.put.hdr_data);
128 LASSERT(msg->msg_type == LNET_MSG_GET);
129 ev->pt_index = le32_to_cpu(hdr->msg.get.ptl_index);
130 ev->match_bits = le64_to_cpu(hdr->msg.get.match_bits);
132 ev->rlength = le32_to_cpu(hdr->msg.get.sink_length);
133 ev->offset = le32_to_cpu(hdr->msg.get.src_offset);
141 lnet_msg_commit(struct lnet_msg *msg, int cpt)
143 struct lnet_msg_container *container = the_lnet.ln_msg_containers[cpt];
144 struct lnet_counters_common *common;
147 /* set the message deadline */
148 timeout_ns = lnet_transaction_timeout * NSEC_PER_SEC;
149 msg->msg_deadline = ktime_add_ns(ktime_get(), timeout_ns);
151 /* routed message can be committed for both receiving and sending */
152 LASSERT(!msg->msg_tx_committed);
154 if (msg->msg_sending) {
155 LASSERT(!msg->msg_receiving);
156 msg->msg_tx_cpt = cpt;
157 msg->msg_tx_committed = 1;
158 if (msg->msg_rx_committed) { /* routed message REPLY */
159 LASSERT(msg->msg_onactivelist);
163 LASSERT(!msg->msg_sending);
164 msg->msg_rx_cpt = cpt;
165 msg->msg_rx_committed = 1;
168 LASSERT(!msg->msg_onactivelist);
170 msg->msg_onactivelist = 1;
171 list_add_tail(&msg->msg_activelist, &container->msc_active);
173 common = &the_lnet.ln_counters[cpt]->lct_common;
174 common->lcc_msgs_alloc++;
175 if (common->lcc_msgs_alloc > common->lcc_msgs_max)
176 common->lcc_msgs_max = common->lcc_msgs_alloc;
180 lnet_msg_decommit_tx(struct lnet_msg *msg, int status)
182 struct lnet_counters_common *common;
183 struct lnet_event *ev = &msg->msg_ev;
185 LASSERT(msg->msg_tx_committed);
189 common = &(the_lnet.ln_counters[msg->msg_tx_cpt]->lct_common);
191 default: /* routed message */
192 LASSERT(msg->msg_routing);
193 LASSERT(msg->msg_rx_committed);
194 LASSERT(ev->type == 0);
196 common->lcc_route_length += msg->msg_len;
197 common->lcc_route_count++;
201 /* should have been decommitted */
202 LASSERT(!msg->msg_rx_committed);
203 /* overwritten while sending ACK */
204 LASSERT(msg->msg_type == LNET_MSG_ACK);
205 msg->msg_type = LNET_MSG_PUT; /* fix type */
208 case LNET_EVENT_SEND:
209 LASSERT(!msg->msg_rx_committed);
210 if (msg->msg_type == LNET_MSG_PUT)
211 common->lcc_send_length += msg->msg_len;
215 LASSERT(msg->msg_rx_committed);
216 /* overwritten while sending reply, we should never be
217 * here for optimized GET */
218 LASSERT(msg->msg_type == LNET_MSG_REPLY);
219 msg->msg_type = LNET_MSG_GET; /* fix type */
223 common->lcc_send_count++;
227 lnet_incr_stats(&msg->msg_txpeer->lpni_stats,
229 LNET_STATS_TYPE_SEND);
231 lnet_incr_stats(&msg->msg_txni->ni_stats,
233 LNET_STATS_TYPE_SEND);
235 lnet_return_tx_credits_locked(msg);
236 msg->msg_tx_committed = 0;
240 lnet_msg_decommit_rx(struct lnet_msg *msg, int status)
242 struct lnet_counters_common *common;
243 struct lnet_event *ev = &msg->msg_ev;
245 LASSERT(!msg->msg_tx_committed); /* decommitted or never committed */
246 LASSERT(msg->msg_rx_committed);
251 common = &(the_lnet.ln_counters[msg->msg_rx_cpt]->lct_common);
254 LASSERT(ev->type == 0);
255 LASSERT(msg->msg_routing);
259 LASSERT(msg->msg_type == LNET_MSG_ACK);
263 /* type is "REPLY" if it's an optimized GET on passive side,
264 * because optimized GET will never be committed for sending,
265 * so message type wouldn't be changed back to "GET" by
266 * lnet_msg_decommit_tx(), see details in lnet_parse_get() */
267 LASSERT(msg->msg_type == LNET_MSG_REPLY ||
268 msg->msg_type == LNET_MSG_GET);
269 common->lcc_send_length += msg->msg_wanted;
273 LASSERT(msg->msg_type == LNET_MSG_PUT);
276 case LNET_EVENT_REPLY:
277 /* type is "GET" if it's an optimized GET on active side,
278 * see details in lnet_create_reply_msg() */
279 LASSERT(msg->msg_type == LNET_MSG_GET ||
280 msg->msg_type == LNET_MSG_REPLY);
284 common->lcc_recv_count++;
288 lnet_incr_stats(&msg->msg_rxpeer->lpni_stats,
290 LNET_STATS_TYPE_RECV);
292 lnet_incr_stats(&msg->msg_rxni->ni_stats,
294 LNET_STATS_TYPE_RECV);
295 if (ev->type == LNET_EVENT_PUT || ev->type == LNET_EVENT_REPLY)
296 common->lcc_recv_length += msg->msg_wanted;
299 lnet_return_rx_credits_locked(msg);
300 msg->msg_rx_committed = 0;
304 lnet_msg_decommit(struct lnet_msg *msg, int cpt, int status)
308 LASSERT(msg->msg_tx_committed || msg->msg_rx_committed);
309 LASSERT(msg->msg_onactivelist);
311 if (msg->msg_tx_committed) { /* always decommit for sending first */
312 LASSERT(cpt == msg->msg_tx_cpt);
313 lnet_msg_decommit_tx(msg, status);
316 if (msg->msg_rx_committed) {
317 /* forwarding msg committed for both receiving and sending */
318 if (cpt != msg->msg_rx_cpt) {
319 lnet_net_unlock(cpt);
320 cpt2 = msg->msg_rx_cpt;
323 lnet_msg_decommit_rx(msg, status);
326 list_del(&msg->msg_activelist);
327 msg->msg_onactivelist = 0;
329 the_lnet.ln_counters[cpt2]->lct_common.lcc_msgs_alloc--;
332 lnet_net_unlock(cpt2);
338 lnet_msg_attach_md(struct lnet_msg *msg, struct lnet_libmd *md,
339 unsigned int offset, unsigned int mlen)
341 /* NB: @offset and @len are only useful for receiving */
342 /* Here, we attach the MD on lnet_msg and mark it busy and
343 * decrementing its threshold. Come what may, the lnet_msg "owns"
344 * the MD until a call to lnet_msg_detach_md or lnet_finalize()
345 * signals completion. */
346 LASSERT(!msg->msg_routing);
349 if (msg->msg_receiving) { /* committed for receiving */
350 msg->msg_offset = offset;
351 msg->msg_wanted = mlen;
355 if (md->md_threshold != LNET_MD_THRESH_INF) {
356 LASSERT(md->md_threshold > 0);
360 /* build umd in event */
361 lnet_md2handle(&msg->msg_ev.md_handle, md);
362 lnet_md_deconstruct(md, &msg->msg_ev);
366 lnet_complete_msg_locked(struct lnet_msg *msg, int cpt)
368 struct lnet_handle_wire ack_wmd;
370 int status = msg->msg_ev.status;
372 LASSERT(msg->msg_onactivelist);
374 if (status == 0 && msg->msg_ack) {
375 /* Only send an ACK if the PUT completed successfully */
377 lnet_msg_decommit(msg, cpt, 0);
380 lnet_net_unlock(cpt);
382 LASSERT(msg->msg_ev.type == LNET_EVENT_PUT);
383 LASSERT(!msg->msg_routing);
385 ack_wmd = msg->msg_hdr.msg.put.ack_wmd;
387 lnet_prep_send(msg, LNET_MSG_ACK, msg->msg_ev.source, 0, 0);
389 msg->msg_hdr.msg.ack.dst_wmd = ack_wmd;
390 msg->msg_hdr.msg.ack.match_bits = msg->msg_ev.match_bits;
391 msg->msg_hdr.msg.ack.mlength = cpu_to_le32(msg->msg_ev.mlength);
393 rc = lnet_send(msg->msg_ev.target.nid, msg, msg->msg_from);
397 * NB: message is committed for sending, we should return
398 * on success because LND will finalize this message later.
400 * Also, there is possibility that message is committed for
401 * sending and also failed before delivering to LND,
402 * i.e: ENOMEM, in that case we can't fall through either
403 * because CPT for sending can be different with CPT for
404 * receiving, so we should return back to lnet_finalize()
405 * to make sure we are locking the correct partition.
409 } else if (status == 0 && /* OK so far */
410 (msg->msg_routing && !msg->msg_sending)) {
412 LASSERT(!msg->msg_receiving); /* called back recv already */
413 lnet_net_unlock(cpt);
415 rc = lnet_send(LNET_NID_ANY, msg, LNET_NID_ANY);
419 * NB: message is committed for sending, we should return
420 * on success because LND will finalize this message later.
422 * Also, there is possibility that message is committed for
423 * sending and also failed before delivering to LND,
424 * i.e: ENOMEM, in that case we can't fall through either:
425 * - The rule is message must decommit for sending first if
426 * the it's committed for both sending and receiving
427 * - CPT for sending can be different with CPT for receiving,
428 * so we should return back to lnet_finalize() to make
429 * sure we are locking the correct partition.
434 lnet_msg_decommit(msg, cpt, status);
440 lnet_dec_healthv_locked(atomic_t *healthv, int sensitivity)
442 int h = atomic_read(healthv);
444 if (h < sensitivity) {
445 atomic_set(healthv, 0);
448 atomic_set(healthv, h);
452 /* must hold net_lock/0 */
454 lnet_ni_add_to_recoveryq_locked(struct lnet_ni *ni,
455 struct list_head *recovery_queue, time64_t now)
457 if (!list_empty(&ni->ni_recovery))
460 if (atomic_read(&ni->ni_healthv) == LNET_MAX_HEALTH_VALUE)
463 /* This NI is going on the recovery queue, so take a ref on it */
464 lnet_ni_addref_locked(ni, 0);
466 lnet_ni_set_next_ping(ni, now);
468 CDEBUG(D_NET, "%s added to recovery queue. ping count: %u next ping: %lld health :%d\n",
469 libcfs_nidstr(&ni->ni_nid),
472 atomic_read(&ni->ni_healthv));
474 list_add_tail(&ni->ni_recovery, recovery_queue);
478 lnet_handle_local_failure(struct lnet_ni *local_ni)
481 * the lnet_net_lock(0) is used to protect the addref on the ni
482 * and the recovery queue.
485 /* the mt could've shutdown and cleaned up the queues */
486 if (the_lnet.ln_mt_state != LNET_MT_STATE_RUNNING) {
491 lnet_dec_healthv_locked(&local_ni->ni_healthv, lnet_health_sensitivity);
492 lnet_ni_add_to_recoveryq_locked(local_ni, &the_lnet.ln_mt_localNIRecovq,
493 ktime_get_seconds());
497 /* must hold net_lock/0 */
499 lnet_handle_remote_failure_locked(struct lnet_peer_ni *lpni)
501 __u32 sensitivity = lnet_health_sensitivity;
502 __u32 lp_sensitivity;
505 * If there is a health sensitivity in the peer then use that
506 * instead of the globally set one.
508 lp_sensitivity = lpni->lpni_peer_net->lpn_peer->lp_health_sensitivity;
510 sensitivity = lp_sensitivity;
512 lnet_dec_healthv_locked(&lpni->lpni_healthv, sensitivity);
514 /* update the peer_net's health value */
515 lnet_update_peer_net_healthv(lpni);
518 * add the peer NI to the recovery queue if it's not already there
519 * and it's health value is actually below the maximum. It's
520 * possible that the sensitivity might be set to 0, and the health
521 * value will not be reduced. In this case, there is no reason to
524 lnet_peer_ni_add_to_recoveryq_locked(lpni,
525 &the_lnet.ln_mt_peerNIRecovq,
526 ktime_get_seconds());
530 lnet_handle_remote_failure(struct lnet_peer_ni *lpni)
532 /* lpni could be NULL if we're in the LOLND case */
537 /* the mt could've shutdown and cleaned up the queues */
538 if (the_lnet.ln_mt_state != LNET_MT_STATE_RUNNING) {
542 lnet_handle_remote_failure_locked(lpni);
547 lnet_incr_hstats(struct lnet_ni *ni, struct lnet_peer_ni *lpni,
548 enum lnet_msg_hstatus hstatus)
550 struct lnet_counters_health *health;
552 health = &the_lnet.ln_counters[0]->lct_health;
555 case LNET_MSG_STATUS_LOCAL_INTERRUPT:
556 atomic_inc(&ni->ni_hstats.hlt_local_interrupt);
557 health->lch_local_interrupt_count++;
559 case LNET_MSG_STATUS_LOCAL_DROPPED:
560 atomic_inc(&ni->ni_hstats.hlt_local_dropped);
561 health->lch_local_dropped_count++;
563 case LNET_MSG_STATUS_LOCAL_ABORTED:
564 atomic_inc(&ni->ni_hstats.hlt_local_aborted);
565 health->lch_local_aborted_count++;
567 case LNET_MSG_STATUS_LOCAL_NO_ROUTE:
568 atomic_inc(&ni->ni_hstats.hlt_local_no_route);
569 health->lch_local_no_route_count++;
571 case LNET_MSG_STATUS_LOCAL_TIMEOUT:
572 atomic_inc(&ni->ni_hstats.hlt_local_timeout);
573 health->lch_local_timeout_count++;
575 case LNET_MSG_STATUS_LOCAL_ERROR:
576 atomic_inc(&ni->ni_hstats.hlt_local_error);
577 health->lch_local_error_count++;
579 case LNET_MSG_STATUS_REMOTE_DROPPED:
581 atomic_inc(&lpni->lpni_hstats.hlt_remote_dropped);
582 health->lch_remote_dropped_count++;
584 case LNET_MSG_STATUS_REMOTE_ERROR:
586 atomic_inc(&lpni->lpni_hstats.hlt_remote_error);
587 health->lch_remote_error_count++;
589 case LNET_MSG_STATUS_REMOTE_TIMEOUT:
591 atomic_inc(&lpni->lpni_hstats.hlt_remote_timeout);
592 health->lch_remote_timeout_count++;
594 case LNET_MSG_STATUS_NETWORK_TIMEOUT:
596 atomic_inc(&lpni->lpni_hstats.hlt_network_timeout);
597 health->lch_network_timeout_count++;
599 case LNET_MSG_STATUS_OK:
607 lnet_resend_msg_locked(struct lnet_msg *msg)
609 msg->msg_retry_count++;
612 * remove message from the active list and reset it to prepare
613 * for a resend. Two exceptions to this
615 * 1. the router case. When a message is being routed it is
616 * committed for rx when received and committed for tx when
617 * forwarded. We don't want to remove it from the active list, since
618 * code which handles receiving expects it to remain on the active
621 * 2. The REPLY case. Reply messages use the same message
622 * structure for the GET that was received.
624 if (!msg->msg_routing && msg->msg_type != LNET_MSG_REPLY) {
625 list_del_init(&msg->msg_activelist);
626 msg->msg_onactivelist = 0;
629 * The msg_target.nid which was originally set
630 * when calling LNetGet() or LNetPut() might've
631 * been overwritten if we're routing this message.
632 * Call lnet_msg_decommit_tx() to return the credit
633 * this message consumed. The message will
634 * consume another credit when it gets resent.
636 msg->msg_target.nid = msg->msg_hdr.dest_nid;
637 lnet_msg_decommit_tx(msg, -EAGAIN);
638 msg->msg_sending = 0;
639 msg->msg_receiving = 0;
640 msg->msg_target_is_router = 0;
642 CDEBUG(D_NET, "%s->%s:%s:%s - queuing msg (%p) for resend\n",
643 libcfs_nid2str(msg->msg_hdr.src_nid),
644 libcfs_nid2str(msg->msg_hdr.dest_nid),
645 lnet_msgtyp2str(msg->msg_type),
646 lnet_health_error2str(msg->msg_health_status), msg);
648 list_add_tail(&msg->msg_list, the_lnet.ln_mt_resendqs[msg->msg_tx_cpt]);
650 complete(&the_lnet.ln_mt_wait_complete);
654 lnet_check_finalize_recursion_locked(struct lnet_msg *msg,
655 struct list_head *containerq,
656 int nworkers, void **workers)
661 list_add_tail(&msg->msg_list, containerq);
663 for (i = 0; i < nworkers; i++) {
664 if (workers[i] == current)
667 if (my_slot < 0 && workers[i] == NULL)
671 if (i < nworkers || my_slot < 0)
674 workers[my_slot] = current;
680 lnet_attempt_msg_resend(struct lnet_msg *msg)
682 struct lnet_msg_container *container;
686 /* we can only resend tx_committed messages */
687 LASSERT(msg->msg_tx_committed);
689 /* don't resend recovery messages */
690 if (msg->msg_recovery) {
691 CDEBUG(D_NET, "msg %s->%s is a recovery ping. retry# %d\n",
692 libcfs_nid2str(msg->msg_from),
693 libcfs_nid2str(msg->msg_target.nid),
694 msg->msg_retry_count);
695 return -ENOTRECOVERABLE;
699 * if we explicitly indicated we don't want to resend then just
702 if (msg->msg_no_resend) {
703 CDEBUG(D_NET, "msg %s->%s requested no resend. retry# %d\n",
704 libcfs_nid2str(msg->msg_from),
705 libcfs_nid2str(msg->msg_target.nid),
706 msg->msg_retry_count);
707 return -ENOTRECOVERABLE;
710 /* check if the message has exceeded the number of retries */
711 if (msg->msg_retry_count >= lnet_retry_count) {
712 CNETERR("msg %s->%s exceeded retry count %d\n",
713 libcfs_nid2str(msg->msg_from),
714 libcfs_nid2str(msg->msg_target.nid),
715 msg->msg_retry_count);
716 return -ENOTRECOVERABLE;
719 cpt = msg->msg_tx_cpt;
722 /* check again under lock */
723 if (the_lnet.ln_mt_state != LNET_MT_STATE_RUNNING) {
724 lnet_net_unlock(cpt);
728 container = the_lnet.ln_msg_containers[cpt];
730 lnet_check_finalize_recursion_locked(msg,
731 &container->msc_resending,
732 container->msc_nfinalizers,
733 container->msc_resenders);
735 /* enough threads are resending */
737 lnet_net_unlock(cpt);
741 while (!list_empty(&container->msc_resending)) {
742 msg = list_entry(container->msc_resending.next,
743 struct lnet_msg, msg_list);
744 list_del(&msg->msg_list);
747 * resending the message will require us to call
748 * lnet_msg_decommit_tx() which will return the credit
749 * which this message holds. This could trigger another
750 * queued message to be sent. If that message fails and
751 * requires a resend we will recurse.
752 * But since at this point the slot is taken, the message
753 * will be queued in the container and dealt with
754 * later. This breaks the recursion.
756 lnet_resend_msg_locked(msg);
760 * msc_resenders is an array of process pointers. Each entry holds
761 * a pointer to the current process operating on the message. An
762 * array entry is created per CPT. If the array slot is already
763 * set, then it means that there is a thread on the CPT currently
764 * resending a message.
765 * Once the thread finishes clear the slot to enable the thread to
766 * take on more resend work.
768 container->msc_resenders[my_slot] = NULL;
769 lnet_net_unlock(cpt);
775 * Do a health check on the message:
776 * return -1 if we're not going to handle the error or
777 * if we've reached the maximum number of retries.
778 * success case will return -1 as well
779 * return 0 if it the message is requeued for send
782 lnet_health_check(struct lnet_msg *msg)
784 enum lnet_msg_hstatus hstatus = msg->msg_health_status;
785 struct lnet_peer_ni *lpni;
788 bool attempt_local_resend;
789 bool attempt_remote_resend;
790 bool handle_local_health;
791 bool handle_remote_health;
793 /* if we're shutting down no point in handling health. */
794 if (the_lnet.ln_mt_state != LNET_MT_STATE_RUNNING)
797 LASSERT(msg->msg_tx_committed || msg->msg_rx_committed);
800 * if we're sending to the LOLND then the msg_txpeer will not be
801 * set. So no need to sanity check it.
803 if (msg->msg_tx_committed &&
804 !nid_is_lo0(&msg->msg_txni->ni_nid))
805 LASSERT(msg->msg_txpeer);
806 else if (msg->msg_tx_committed &&
807 nid_is_lo0(&msg->msg_txni->ni_nid))
810 if (hstatus != LNET_MSG_STATUS_OK &&
811 ktime_compare(ktime_get(), msg->msg_deadline) >= 0)
815 * always prefer txni/txpeer if they message is committed for both
818 if (msg->msg_tx_committed) {
820 lpni = msg->msg_txpeer;
821 attempt_local_resend = attempt_remote_resend = true;
824 lpni = msg->msg_rxpeer;
825 attempt_local_resend = attempt_remote_resend = false;
833 CDEBUG(D_NET, "health check: %s->%s: %s: %s\n",
834 libcfs_nidstr(&ni->ni_nid),
835 (lo) ? "self" : libcfs_nidstr(&lpni->lpni_nid),
836 lnet_msgtyp2str(msg->msg_type),
837 lnet_health_error2str(hstatus));
840 * stats are only incremented for errors so avoid wasting time
841 * incrementing statistics if there is no error. Similarly, whether to
842 * update health values or perform resends is only applicable for
843 * messages with a health status != OK.
845 if (hstatus != LNET_MSG_STATUS_OK) {
846 /* Don't further decrement the health value if a recovery
849 if (msg->msg_recovery)
850 handle_local_health = handle_remote_health = false;
852 handle_local_health = handle_remote_health = true;
854 /* For local failures, health/recovery/resends are not needed if
855 * I only have a single (non-lolnd) interface. NB: pb_nnis
856 * includes the lolnd interface, so a single-rail node would
859 if (the_lnet.ln_ping_target->pb_nnis <= 2) {
860 handle_local_health = false;
861 attempt_local_resend = false;
865 lnet_incr_hstats(ni, lpni, hstatus);
866 /* For remote failures, health/recovery/resends are not needed
867 * if the peer only has a single interface. Special case for
868 * routers where we rely on health feature to manage route
869 * aliveness. NB: unlike pb_nnis above, lp_nnis does _not_
870 * include the lolnd, so a single-rail node would have
873 if (lpni && lpni->lpni_peer_net &&
874 lpni->lpni_peer_net->lpn_peer &&
875 lpni->lpni_peer_net->lpn_peer->lp_nnis <= 1) {
876 attempt_remote_resend = false;
877 if (!lnet_isrouter(lpni))
878 handle_remote_health = false;
884 case LNET_MSG_STATUS_OK:
886 * increment the local ni health whether we successfully
887 * received or sent a message on it.
889 * Ping counts are reset to 0 as appropriate to allow for
892 lnet_inc_healthv(&ni->ni_healthv, lnet_health_sensitivity);
894 ni->ni_ping_count = 0;
896 * It's possible msg_txpeer is NULL in the LOLND
897 * case. Only increment the peer's health if we're
898 * receiving a message from it. It's the only sure way to
899 * know that a remote interface is up.
900 * If this interface is part of a router, then take that
901 * as indication that the router is fully healthy.
903 if (lpni && msg->msg_rx_committed) {
904 lpni->lpni_ping_count = 0;
906 * If we're receiving a message from the router or
907 * I'm a router, then set that lpni's health to
908 * maximum so we can commence communication
910 if (lnet_isrouter(lpni) || the_lnet.ln_routing) {
911 lnet_set_lpni_healthv_locked(lpni,
912 LNET_MAX_HEALTH_VALUE);
914 __u32 sensitivity = lpni->lpni_peer_net->
915 lpn_peer->lp_health_sensitivity;
917 lnet_inc_lpni_healthv_locked(lpni,
918 (sensitivity) ? sensitivity :
919 lnet_health_sensitivity);
920 /* This peer NI may have previously aged out
921 * of recovery. Now that we've received a
922 * message from it, we can continue recovery
923 * if its health value is still below the
926 lnet_peer_ni_add_to_recoveryq_locked(lpni,
927 &the_lnet.ln_mt_peerNIRecovq,
928 ktime_get_seconds());
933 /* we can finalize this message */
935 case LNET_MSG_STATUS_LOCAL_INTERRUPT:
936 case LNET_MSG_STATUS_LOCAL_DROPPED:
937 case LNET_MSG_STATUS_LOCAL_ABORTED:
938 case LNET_MSG_STATUS_LOCAL_NO_ROUTE:
939 case LNET_MSG_STATUS_LOCAL_TIMEOUT:
940 if (handle_local_health)
941 lnet_handle_local_failure(ni);
942 if (attempt_local_resend)
943 return lnet_attempt_msg_resend(msg);
945 case LNET_MSG_STATUS_LOCAL_ERROR:
946 if (handle_local_health)
947 lnet_handle_local_failure(ni);
949 case LNET_MSG_STATUS_REMOTE_DROPPED:
950 if (handle_remote_health)
951 lnet_handle_remote_failure(lpni);
952 if (attempt_remote_resend)
953 return lnet_attempt_msg_resend(msg);
955 case LNET_MSG_STATUS_REMOTE_ERROR:
956 case LNET_MSG_STATUS_REMOTE_TIMEOUT:
957 if (handle_remote_health)
958 lnet_handle_remote_failure(lpni);
960 case LNET_MSG_STATUS_NETWORK_TIMEOUT:
961 if (handle_remote_health)
962 lnet_handle_remote_failure(lpni);
963 if (handle_local_health)
964 lnet_handle_local_failure(ni);
970 /* no resend is needed */
975 lnet_msg_detach_md(struct lnet_msg *msg, int status)
977 struct lnet_libmd *md = msg->msg_md;
978 lnet_handler_t handler = NULL;
979 int cpt = lnet_cpt_of_cookie(md->md_lh.lh_cookie);
983 while (md->md_flags & LNET_MD_FLAG_HANDLING)
984 /* An event handler is running - wait for it to
985 * complete to avoid races.
987 lnet_md_wait_handling(md, cpt);
989 /* Now it's safe to drop my caller's ref */
991 LASSERT(md->md_refcount >= 0);
993 unlink = lnet_md_unlinkable(md);
994 if (md->md_handler) {
995 if ((md->md_flags & LNET_MD_FLAG_ABORTED) && !status) {
996 msg->msg_ev.status = -ETIMEDOUT;
997 CDEBUG(D_NET, "md 0x%p already unlinked\n", md);
999 msg->msg_ev.status = status;
1001 msg->msg_ev.unlinked = unlink;
1002 handler = md->md_handler;
1004 md->md_flags |= LNET_MD_FLAG_HANDLING;
1007 if (unlink || (md->md_refcount == 0 &&
1008 md->md_threshold == LNET_MD_THRESH_INF))
1009 lnet_detach_rsp_tracker(md, cpt);
1015 lnet_res_unlock(cpt);
1018 handler(&msg->msg_ev);
1021 md->md_flags &= ~LNET_MD_FLAG_HANDLING;
1023 lnet_res_unlock(cpt);
1029 lnet_is_health_check(struct lnet_msg *msg)
1032 int status = msg->msg_ev.status;
1034 if ((!msg->msg_tx_committed && !msg->msg_rx_committed) ||
1035 !msg->msg_onactivelist) {
1036 CDEBUG(D_NET, "msg %p not committed for send or receive\n",
1041 if ((msg->msg_tx_committed && !msg->msg_txpeer) ||
1042 (msg->msg_rx_committed && !msg->msg_rxpeer)) {
1043 /* The optimized GET case does not set msg_rxpeer, but status
1044 * could be zero. Only print the error message if we have a
1048 CDEBUG(D_NET, "msg %p status %d cannot retry\n", msg,
1053 /* Check for status inconsistencies */
1054 if ((!status && msg->msg_health_status != LNET_MSG_STATUS_OK) ||
1055 (status && msg->msg_health_status == LNET_MSG_STATUS_OK)) {
1056 CDEBUG(D_NET, "Msg %p is in inconsistent state, don't perform health "
1057 "checking (%d, %d)\n", msg, status,
1058 msg->msg_health_status);
1062 CDEBUG(D_NET, "health check = %d, status = %d, hstatus = %d\n",
1063 hc, status, msg->msg_health_status);
1069 lnet_health_error2str(enum lnet_msg_hstatus hstatus)
1072 case LNET_MSG_STATUS_LOCAL_INTERRUPT:
1073 return "LOCAL_INTERRUPT";
1074 case LNET_MSG_STATUS_LOCAL_DROPPED:
1075 return "LOCAL_DROPPED";
1076 case LNET_MSG_STATUS_LOCAL_ABORTED:
1077 return "LOCAL_ABORTED";
1078 case LNET_MSG_STATUS_LOCAL_NO_ROUTE:
1079 return "LOCAL_NO_ROUTE";
1080 case LNET_MSG_STATUS_LOCAL_TIMEOUT:
1081 return "LOCAL_TIMEOUT";
1082 case LNET_MSG_STATUS_LOCAL_ERROR:
1083 return "LOCAL_ERROR";
1084 case LNET_MSG_STATUS_REMOTE_DROPPED:
1085 return "REMOTE_DROPPED";
1086 case LNET_MSG_STATUS_REMOTE_ERROR:
1087 return "REMOTE_ERROR";
1088 case LNET_MSG_STATUS_REMOTE_TIMEOUT:
1089 return "REMOTE_TIMEOUT";
1090 case LNET_MSG_STATUS_NETWORK_TIMEOUT:
1091 return "NETWORK_TIMEOUT";
1092 case LNET_MSG_STATUS_OK:
1100 lnet_send_error_simulation(struct lnet_msg *msg,
1101 enum lnet_msg_hstatus *hstatus)
1106 if (list_empty(&the_lnet.ln_drop_rules))
1109 /* match only health rules */
1110 if (!lnet_drop_rule_match(&msg->msg_hdr, LNET_NID_ANY,
1114 CDEBUG(D_NET, "src %s(%s)->dst %s: %s simulate health error: %s\n",
1115 libcfs_nid2str(msg->msg_hdr.src_nid),
1116 libcfs_nidstr(&msg->msg_txni->ni_nid),
1117 libcfs_nid2str(msg->msg_hdr.dest_nid),
1118 lnet_msgtyp2str(msg->msg_type),
1119 lnet_health_error2str(*hstatus));
1123 EXPORT_SYMBOL(lnet_send_error_simulation);
1126 lnet_finalize(struct lnet_msg *msg, int status)
1128 struct lnet_msg_container *container;
1133 LASSERT(!in_interrupt());
1138 msg->msg_ev.status = status;
1140 if (lnet_is_health_check(msg)) {
1142 * Check the health status of the message. If it has one
1143 * of the errors that we're supposed to handle, and it has
1144 * not timed out, then
1145 * 1. Decrement the appropriate health_value
1146 * 2. queue the message on the resend queue
1148 * if the message send is success, timed out or failed in the
1149 * health check for any reason then we'll just finalize the
1150 * message. Otherwise just return since the message has been
1151 * put on the resend queue.
1153 if (!lnet_health_check(msg))
1158 * We're not going to resend this message so detach its MD and invoke
1159 * the appropriate callbacks
1161 if (msg->msg_md != NULL)
1162 lnet_msg_detach_md(msg, status);
1165 if (!msg->msg_tx_committed && !msg->msg_rx_committed) {
1166 /* not committed to network yet */
1167 LASSERT(!msg->msg_onactivelist);
1173 * NB: routed message can be committed for both receiving and sending,
1174 * we should finalize in LIFO order and keep counters correct.
1175 * (finalize sending first then finalize receiving)
1177 cpt = msg->msg_tx_committed ? msg->msg_tx_cpt : msg->msg_rx_cpt;
1180 container = the_lnet.ln_msg_containers[cpt];
1182 /* Recursion breaker. Don't complete the message here if I am (or
1183 * enough other threads are) already completing messages */
1184 my_slot = lnet_check_finalize_recursion_locked(msg,
1185 &container->msc_finalizing,
1186 container->msc_nfinalizers,
1187 container->msc_finalizers);
1189 /* enough threads are resending */
1190 if (my_slot == -1) {
1191 lnet_net_unlock(cpt);
1196 while (!list_empty(&container->msc_finalizing)) {
1197 msg = list_entry(container->msc_finalizing.next,
1198 struct lnet_msg, msg_list);
1200 list_del_init(&msg->msg_list);
1202 /* NB drops and regains the lnet lock if it actually does
1203 * anything, so my finalizing friends can chomp along too */
1204 rc = lnet_complete_msg_locked(msg, cpt);
1209 if (unlikely(!list_empty(&the_lnet.ln_delay_rules))) {
1210 lnet_net_unlock(cpt);
1211 lnet_delay_rule_check();
1215 container->msc_finalizers[my_slot] = NULL;
1216 lnet_net_unlock(cpt);
1221 EXPORT_SYMBOL(lnet_finalize);
1224 lnet_msg_container_cleanup(struct lnet_msg_container *container)
1228 if (container->msc_init == 0)
1231 while (!list_empty(&container->msc_active)) {
1232 struct lnet_msg *msg;
1234 msg = list_entry(container->msc_active.next,
1235 struct lnet_msg, msg_activelist);
1236 LASSERT(msg->msg_onactivelist);
1237 msg->msg_onactivelist = 0;
1238 list_del_init(&msg->msg_activelist);
1244 CERROR("%d active msg on exit\n", count);
1246 if (container->msc_finalizers != NULL) {
1247 CFS_FREE_PTR_ARRAY(container->msc_finalizers,
1248 container->msc_nfinalizers);
1249 container->msc_finalizers = NULL;
1252 if (container->msc_resenders != NULL) {
1253 CFS_FREE_PTR_ARRAY(container->msc_resenders,
1254 container->msc_nfinalizers);
1255 container->msc_resenders = NULL;
1257 container->msc_init = 0;
1261 lnet_msg_container_setup(struct lnet_msg_container *container, int cpt)
1265 container->msc_init = 1;
1267 INIT_LIST_HEAD(&container->msc_active);
1268 INIT_LIST_HEAD(&container->msc_finalizing);
1269 INIT_LIST_HEAD(&container->msc_resending);
1271 /* number of CPUs */
1272 container->msc_nfinalizers = cfs_cpt_weight(lnet_cpt_table(), cpt);
1273 if (container->msc_nfinalizers == 0)
1274 container->msc_nfinalizers = 1;
1276 LIBCFS_CPT_ALLOC(container->msc_finalizers, lnet_cpt_table(), cpt,
1277 container->msc_nfinalizers *
1278 sizeof(*container->msc_finalizers));
1280 if (container->msc_finalizers == NULL) {
1281 CERROR("Failed to allocate message finalizers\n");
1282 lnet_msg_container_cleanup(container);
1286 LIBCFS_CPT_ALLOC(container->msc_resenders, lnet_cpt_table(), cpt,
1287 container->msc_nfinalizers *
1288 sizeof(*container->msc_resenders));
1290 if (container->msc_resenders == NULL) {
1291 CERROR("Failed to allocate message resenders\n");
1292 lnet_msg_container_cleanup(container);
1300 lnet_msg_containers_destroy(void)
1302 struct lnet_msg_container *container;
1305 if (the_lnet.ln_msg_containers == NULL)
1308 cfs_percpt_for_each(container, i, the_lnet.ln_msg_containers)
1309 lnet_msg_container_cleanup(container);
1311 cfs_percpt_free(the_lnet.ln_msg_containers);
1312 the_lnet.ln_msg_containers = NULL;
1316 lnet_msg_containers_create(void)
1318 struct lnet_msg_container *container;
1322 the_lnet.ln_msg_containers = cfs_percpt_alloc(lnet_cpt_table(),
1323 sizeof(*container));
1325 if (the_lnet.ln_msg_containers == NULL) {
1326 CERROR("Failed to allocate cpu-partition data for network\n");
1330 cfs_percpt_for_each(container, i, the_lnet.ln_msg_containers) {
1331 rc = lnet_msg_container_setup(container, i);
1333 lnet_msg_containers_destroy();