Whamcloud - gitweb
LU-9120 lnet: add global health statistics
[fs/lustre-release.git] / lnet / lnet / lib-msg.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lnet/lnet/lib-msg.c
33  *
34  * Message decoding, parsing and finalizing routines
35  */
36
37 #define DEBUG_SUBSYSTEM S_LNET
38
39 #include <lnet/lib-lnet.h>
40
41 void
42 lnet_build_unlink_event(struct lnet_libmd *md, struct lnet_event *ev)
43 {
44         ENTRY;
45
46         memset(ev, 0, sizeof(*ev));
47
48         ev->status   = 0;
49         ev->unlinked = 1;
50         ev->type     = LNET_EVENT_UNLINK;
51         lnet_md_deconstruct(md, &ev->md);
52         lnet_md2handle(&ev->md_handle, md);
53         EXIT;
54 }
55
56 /*
57  * Don't need any lock, must be called after lnet_commit_md
58  */
59 void
60 lnet_build_msg_event(struct lnet_msg *msg, enum lnet_event_kind ev_type)
61 {
62         struct lnet_hdr *hdr = &msg->msg_hdr;
63         struct lnet_event *ev = &msg->msg_ev;
64
65         LASSERT(!msg->msg_routing);
66
67         ev->type = ev_type;
68         ev->msg_type = msg->msg_type;
69
70         if (ev_type == LNET_EVENT_SEND) {
71                 /* event for active message */
72                 ev->target.nid    = le64_to_cpu(hdr->dest_nid);
73                 ev->target.pid    = le32_to_cpu(hdr->dest_pid);
74                 ev->initiator.nid = LNET_NID_ANY;
75                 ev->initiator.pid = the_lnet.ln_pid;
76                 ev->source.nid    = LNET_NID_ANY;
77                 ev->source.pid    = the_lnet.ln_pid;
78                 ev->sender        = LNET_NID_ANY;
79         } else {
80                 /* event for passive message */
81                 ev->target.pid    = hdr->dest_pid;
82                 ev->target.nid    = hdr->dest_nid;
83                 ev->initiator.pid = hdr->src_pid;
84                 /* Multi-Rail: resolve src_nid to "primary" peer NID */
85                 ev->initiator.nid = msg->msg_initiator;
86                 /* Multi-Rail: track source NID. */
87                 ev->source.pid    = hdr->src_pid;
88                 ev->source.nid    = hdr->src_nid;
89                 ev->rlength       = hdr->payload_length;
90                 ev->sender        = msg->msg_from;
91                 ev->mlength       = msg->msg_wanted;
92                 ev->offset        = msg->msg_offset;
93         }
94
95         switch (ev_type) {
96         default:
97                 LBUG();
98
99         case LNET_EVENT_PUT: /* passive PUT */
100                 ev->pt_index   = hdr->msg.put.ptl_index;
101                 ev->match_bits = hdr->msg.put.match_bits;
102                 ev->hdr_data   = hdr->msg.put.hdr_data;
103                 return;
104
105         case LNET_EVENT_GET: /* passive GET */
106                 ev->pt_index   = hdr->msg.get.ptl_index;
107                 ev->match_bits = hdr->msg.get.match_bits;
108                 ev->hdr_data   = 0;
109                 return;
110
111         case LNET_EVENT_ACK: /* ACK */
112                 ev->match_bits = hdr->msg.ack.match_bits;
113                 ev->mlength    = hdr->msg.ack.mlength;
114                 return;
115
116         case LNET_EVENT_REPLY: /* REPLY */
117                 return;
118
119         case LNET_EVENT_SEND: /* active message */
120                 if (msg->msg_type == LNET_MSG_PUT) {
121                         ev->pt_index   = le32_to_cpu(hdr->msg.put.ptl_index);
122                         ev->match_bits = le64_to_cpu(hdr->msg.put.match_bits);
123                         ev->offset     = le32_to_cpu(hdr->msg.put.offset);
124                         ev->mlength    =
125                         ev->rlength    = le32_to_cpu(hdr->payload_length);
126                         ev->hdr_data   = le64_to_cpu(hdr->msg.put.hdr_data);
127
128                 } else {
129                         LASSERT(msg->msg_type == LNET_MSG_GET);
130                         ev->pt_index   = le32_to_cpu(hdr->msg.get.ptl_index);
131                         ev->match_bits = le64_to_cpu(hdr->msg.get.match_bits);
132                         ev->mlength    =
133                         ev->rlength    = le32_to_cpu(hdr->msg.get.sink_length);
134                         ev->offset     = le32_to_cpu(hdr->msg.get.src_offset);
135                         ev->hdr_data   = 0;
136                 }
137                 return;
138         }
139 }
140
141 void
142 lnet_msg_commit(struct lnet_msg *msg, int cpt)
143 {
144         struct lnet_msg_container *container = the_lnet.ln_msg_containers[cpt];
145         struct lnet_counters *counters = the_lnet.ln_counters[cpt];
146         s64 timeout_ns;
147
148         /* set the message deadline */
149         timeout_ns = lnet_transaction_timeout * NSEC_PER_SEC;
150         msg->msg_deadline = ktime_add_ns(ktime_get(), timeout_ns);
151
152         /* routed message can be committed for both receiving and sending */
153         LASSERT(!msg->msg_tx_committed);
154
155         if (msg->msg_sending) {
156                 LASSERT(!msg->msg_receiving);
157                 msg->msg_tx_cpt = cpt;
158                 msg->msg_tx_committed = 1;
159                 if (msg->msg_rx_committed) { /* routed message REPLY */
160                         LASSERT(msg->msg_onactivelist);
161                         return;
162                 }
163         } else {
164                 LASSERT(!msg->msg_sending);
165                 msg->msg_rx_cpt = cpt;
166                 msg->msg_rx_committed = 1;
167         }
168
169         LASSERT(!msg->msg_onactivelist);
170
171         msg->msg_onactivelist = 1;
172         list_add_tail(&msg->msg_activelist, &container->msc_active);
173
174         counters->msgs_alloc++;
175         if (counters->msgs_alloc > counters->msgs_max)
176                 counters->msgs_max = counters->msgs_alloc;
177 }
178
179 static void
180 lnet_msg_decommit_tx(struct lnet_msg *msg, int status)
181 {
182         struct lnet_counters *counters;
183         struct lnet_event *ev = &msg->msg_ev;
184
185         LASSERT(msg->msg_tx_committed);
186         if (status != 0)
187                 goto out;
188
189         counters = the_lnet.ln_counters[msg->msg_tx_cpt];
190         switch (ev->type) {
191         default: /* routed message */
192                 LASSERT(msg->msg_routing);
193                 LASSERT(msg->msg_rx_committed);
194                 LASSERT(ev->type == 0);
195
196                 counters->route_length += msg->msg_len;
197                 counters->route_count++;
198                 goto incr_stats;
199
200         case LNET_EVENT_PUT:
201                 /* should have been decommitted */
202                 LASSERT(!msg->msg_rx_committed);
203                 /* overwritten while sending ACK */
204                 LASSERT(msg->msg_type == LNET_MSG_ACK);
205                 msg->msg_type = LNET_MSG_PUT; /* fix type */
206                 break;
207
208         case LNET_EVENT_SEND:
209                 LASSERT(!msg->msg_rx_committed);
210                 if (msg->msg_type == LNET_MSG_PUT)
211                         counters->send_length += msg->msg_len;
212                 break;
213
214         case LNET_EVENT_GET:
215                 LASSERT(msg->msg_rx_committed);
216                 /* overwritten while sending reply, we should never be
217                  * here for optimized GET */
218                 LASSERT(msg->msg_type == LNET_MSG_REPLY);
219                 msg->msg_type = LNET_MSG_GET; /* fix type */
220                 break;
221         }
222
223         counters->send_count++;
224
225 incr_stats:
226         if (msg->msg_txpeer)
227                 lnet_incr_stats(&msg->msg_txpeer->lpni_stats,
228                                 msg->msg_type,
229                                 LNET_STATS_TYPE_SEND);
230         if (msg->msg_txni)
231                 lnet_incr_stats(&msg->msg_txni->ni_stats,
232                                 msg->msg_type,
233                                 LNET_STATS_TYPE_SEND);
234  out:
235         lnet_return_tx_credits_locked(msg);
236         msg->msg_tx_committed = 0;
237 }
238
239 static void
240 lnet_msg_decommit_rx(struct lnet_msg *msg, int status)
241 {
242         struct lnet_counters *counters;
243         struct lnet_event *ev = &msg->msg_ev;
244
245         LASSERT(!msg->msg_tx_committed); /* decommitted or never committed */
246         LASSERT(msg->msg_rx_committed);
247
248         if (status != 0)
249                 goto out;
250
251         counters = the_lnet.ln_counters[msg->msg_rx_cpt];
252         switch (ev->type) {
253         default:
254                 LASSERT(ev->type == 0);
255                 LASSERT(msg->msg_routing);
256                 goto incr_stats;
257
258         case LNET_EVENT_ACK:
259                 LASSERT(msg->msg_type == LNET_MSG_ACK);
260                 break;
261
262         case LNET_EVENT_GET:
263                 /* type is "REPLY" if it's an optimized GET on passive side,
264                  * because optimized GET will never be committed for sending,
265                  * so message type wouldn't be changed back to "GET" by
266                  * lnet_msg_decommit_tx(), see details in lnet_parse_get() */
267                 LASSERT(msg->msg_type == LNET_MSG_REPLY ||
268                         msg->msg_type == LNET_MSG_GET);
269                 counters->send_length += msg->msg_wanted;
270                 break;
271
272         case LNET_EVENT_PUT:
273                 LASSERT(msg->msg_type == LNET_MSG_PUT);
274                 break;
275
276         case LNET_EVENT_REPLY:
277                 /* type is "GET" if it's an optimized GET on active side,
278                  * see details in lnet_create_reply_msg() */
279                 LASSERT(msg->msg_type == LNET_MSG_GET ||
280                         msg->msg_type == LNET_MSG_REPLY);
281                 break;
282         }
283
284         counters->recv_count++;
285
286 incr_stats:
287         if (msg->msg_rxpeer)
288                 lnet_incr_stats(&msg->msg_rxpeer->lpni_stats,
289                                 msg->msg_type,
290                                 LNET_STATS_TYPE_RECV);
291         if (msg->msg_rxni)
292                 lnet_incr_stats(&msg->msg_rxni->ni_stats,
293                                 msg->msg_type,
294                                 LNET_STATS_TYPE_RECV);
295         if (ev->type == LNET_EVENT_PUT || ev->type == LNET_EVENT_REPLY)
296                 counters->recv_length += msg->msg_wanted;
297
298  out:
299         lnet_return_rx_credits_locked(msg);
300         msg->msg_rx_committed = 0;
301 }
302
303 void
304 lnet_msg_decommit(struct lnet_msg *msg, int cpt, int status)
305 {
306         int     cpt2 = cpt;
307
308         LASSERT(msg->msg_tx_committed || msg->msg_rx_committed);
309         LASSERT(msg->msg_onactivelist);
310
311         if (msg->msg_tx_committed) { /* always decommit for sending first */
312                 LASSERT(cpt == msg->msg_tx_cpt);
313                 lnet_msg_decommit_tx(msg, status);
314         }
315
316         if (msg->msg_rx_committed) {
317                 /* forwarding msg committed for both receiving and sending */
318                 if (cpt != msg->msg_rx_cpt) {
319                         lnet_net_unlock(cpt);
320                         cpt2 = msg->msg_rx_cpt;
321                         lnet_net_lock(cpt2);
322                 }
323                 lnet_msg_decommit_rx(msg, status);
324         }
325
326         list_del(&msg->msg_activelist);
327         msg->msg_onactivelist = 0;
328
329         the_lnet.ln_counters[cpt2]->msgs_alloc--;
330
331         if (cpt2 != cpt) {
332                 lnet_net_unlock(cpt2);
333                 lnet_net_lock(cpt);
334         }
335 }
336
337 void
338 lnet_msg_attach_md(struct lnet_msg *msg, struct lnet_libmd *md,
339                    unsigned int offset, unsigned int mlen)
340 {
341         /* NB: @offset and @len are only useful for receiving */
342         /* Here, we attach the MD on lnet_msg and mark it busy and
343          * decrementing its threshold. Come what may, the lnet_msg "owns"
344          * the MD until a call to lnet_msg_detach_md or lnet_finalize()
345          * signals completion. */
346         LASSERT(!msg->msg_routing);
347
348         msg->msg_md = md;
349         if (msg->msg_receiving) { /* committed for receiving */
350                 msg->msg_offset = offset;
351                 msg->msg_wanted = mlen;
352         }
353
354         md->md_refcount++;
355         if (md->md_threshold != LNET_MD_THRESH_INF) {
356                 LASSERT(md->md_threshold > 0);
357                 md->md_threshold--;
358         }
359
360         /* build umd in event */
361         lnet_md2handle(&msg->msg_ev.md_handle, md);
362         lnet_md_deconstruct(md, &msg->msg_ev.md);
363 }
364
365 void
366 lnet_msg_detach_md(struct lnet_msg *msg, int status)
367 {
368         struct lnet_libmd *md = msg->msg_md;
369         int unlink;
370
371         /* Now it's safe to drop my caller's ref */
372         md->md_refcount--;
373         LASSERT(md->md_refcount >= 0);
374
375         unlink = lnet_md_unlinkable(md);
376         if (md->md_eq != NULL) {
377                 msg->msg_ev.status   = status;
378                 msg->msg_ev.unlinked = unlink;
379                 lnet_eq_enqueue_event(md->md_eq, &msg->msg_ev);
380         }
381
382         if (unlink)
383                 lnet_md_unlink(md);
384
385         msg->msg_md = NULL;
386 }
387
388 static int
389 lnet_complete_msg_locked(struct lnet_msg *msg, int cpt)
390 {
391         struct lnet_handle_wire ack_wmd;
392         int                rc;
393         int                status = msg->msg_ev.status;
394
395         LASSERT(msg->msg_onactivelist);
396
397         if (status == 0 && msg->msg_ack) {
398                 /* Only send an ACK if the PUT completed successfully */
399
400                 lnet_msg_decommit(msg, cpt, 0);
401
402                 msg->msg_ack = 0;
403                 lnet_net_unlock(cpt);
404
405                 LASSERT(msg->msg_ev.type == LNET_EVENT_PUT);
406                 LASSERT(!msg->msg_routing);
407
408                 ack_wmd = msg->msg_hdr.msg.put.ack_wmd;
409
410                 lnet_prep_send(msg, LNET_MSG_ACK, msg->msg_ev.source, 0, 0);
411
412                 msg->msg_hdr.msg.ack.dst_wmd = ack_wmd;
413                 msg->msg_hdr.msg.ack.match_bits = msg->msg_ev.match_bits;
414                 msg->msg_hdr.msg.ack.mlength = cpu_to_le32(msg->msg_ev.mlength);
415
416                 /* NB: we probably want to use NID of msg::msg_from as 3rd
417                  * parameter (router NID) if it's routed message */
418                 rc = lnet_send(msg->msg_ev.target.nid, msg, LNET_NID_ANY);
419
420                 lnet_net_lock(cpt);
421                 /*
422                  * NB: message is committed for sending, we should return
423                  * on success because LND will finalize this message later.
424                  *
425                  * Also, there is possibility that message is committed for
426                  * sending and also failed before delivering to LND,
427                  * i.e: ENOMEM, in that case we can't fall through either
428                  * because CPT for sending can be different with CPT for
429                  * receiving, so we should return back to lnet_finalize()
430                  * to make sure we are locking the correct partition.
431                  */
432                 return rc;
433
434         } else if (status == 0 &&       /* OK so far */
435                    (msg->msg_routing && !msg->msg_sending)) {
436                 /* not forwarded */
437                 LASSERT(!msg->msg_receiving);   /* called back recv already */
438                 lnet_net_unlock(cpt);
439
440                 rc = lnet_send(LNET_NID_ANY, msg, LNET_NID_ANY);
441
442                 lnet_net_lock(cpt);
443                 /*
444                  * NB: message is committed for sending, we should return
445                  * on success because LND will finalize this message later.
446                  *
447                  * Also, there is possibility that message is committed for
448                  * sending and also failed before delivering to LND,
449                  * i.e: ENOMEM, in that case we can't fall through either:
450                  * - The rule is message must decommit for sending first if
451                  *   the it's committed for both sending and receiving
452                  * - CPT for sending can be different with CPT for receiving,
453                  *   so we should return back to lnet_finalize() to make
454                  *   sure we are locking the correct partition.
455                  */
456                 return rc;
457         }
458
459         lnet_msg_decommit(msg, cpt, status);
460         lnet_msg_free(msg);
461         return 0;
462 }
463
464 static void
465 lnet_dec_healthv_locked(atomic_t *healthv)
466 {
467         int h = atomic_read(healthv);
468
469         if (h < lnet_health_sensitivity) {
470                 atomic_set(healthv, 0);
471         } else {
472                 h -= lnet_health_sensitivity;
473                 atomic_set(healthv, h);
474         }
475 }
476
477 static void
478 lnet_handle_local_failure(struct lnet_msg *msg)
479 {
480         struct lnet_ni *local_ni;
481
482         local_ni = msg->msg_txni;
483
484         /*
485          * the lnet_net_lock(0) is used to protect the addref on the ni
486          * and the recovery queue.
487          */
488         lnet_net_lock(0);
489         /* the mt could've shutdown and cleaned up the queues */
490         if (the_lnet.ln_mt_state != LNET_MT_STATE_RUNNING) {
491                 lnet_net_unlock(0);
492                 return;
493         }
494
495         lnet_dec_healthv_locked(&local_ni->ni_healthv);
496         /*
497          * add the NI to the recovery queue if it's not already there
498          * and it's health value is actually below the maximum. It's
499          * possible that the sensitivity might be set to 0, and the health
500          * value will not be reduced. In this case, there is no reason to
501          * invoke recovery
502          */
503         if (list_empty(&local_ni->ni_recovery) &&
504             atomic_read(&local_ni->ni_healthv) < LNET_MAX_HEALTH_VALUE) {
505                 CERROR("ni %s added to recovery queue. Health = %d\n",
506                         libcfs_nid2str(local_ni->ni_nid),
507                         atomic_read(&local_ni->ni_healthv));
508                 list_add_tail(&local_ni->ni_recovery,
509                               &the_lnet.ln_mt_localNIRecovq);
510                 lnet_ni_addref_locked(local_ni, 0);
511         }
512         lnet_net_unlock(0);
513 }
514
515 static void
516 lnet_handle_remote_failure(struct lnet_msg *msg)
517 {
518         struct lnet_peer_ni *lpni;
519
520         lpni = msg->msg_txpeer;
521
522         /* lpni could be NULL if we're in the LOLND case */
523         if (!lpni)
524                 return;
525
526         lnet_net_lock(0);
527         lnet_dec_healthv_locked(&lpni->lpni_healthv);
528         /*
529          * add the peer NI to the recovery queue if it's not already there
530          * and it's health value is actually below the maximum. It's
531          * possible that the sensitivity might be set to 0, and the health
532          * value will not be reduced. In this case, there is no reason to
533          * invoke recovery
534          */
535         lnet_peer_ni_add_to_recoveryq_locked(lpni);
536         lnet_net_unlock(0);
537 }
538
539 static void
540 lnet_incr_hstats(struct lnet_msg *msg, enum lnet_msg_hstatus hstatus)
541 {
542         struct lnet_ni *ni = msg->msg_txni;
543         struct lnet_peer_ni *lpni = msg->msg_txpeer;
544         struct lnet_counters *counters = the_lnet.ln_counters[0];
545
546         switch (hstatus) {
547         case LNET_MSG_STATUS_LOCAL_INTERRUPT:
548                 atomic_inc(&ni->ni_hstats.hlt_local_interrupt);
549                 counters->local_interrupt_count++;
550                 break;
551         case LNET_MSG_STATUS_LOCAL_DROPPED:
552                 atomic_inc(&ni->ni_hstats.hlt_local_dropped);
553                 counters->local_dropped_count++;
554                 break;
555         case LNET_MSG_STATUS_LOCAL_ABORTED:
556                 atomic_inc(&ni->ni_hstats.hlt_local_aborted);
557                 counters->local_aborted_count++;
558                 break;
559         case LNET_MSG_STATUS_LOCAL_NO_ROUTE:
560                 atomic_inc(&ni->ni_hstats.hlt_local_no_route);
561                 counters->local_no_route_count++;
562                 break;
563         case LNET_MSG_STATUS_LOCAL_TIMEOUT:
564                 atomic_inc(&ni->ni_hstats.hlt_local_timeout);
565                 counters->local_timeout_count++;
566                 break;
567         case LNET_MSG_STATUS_LOCAL_ERROR:
568                 atomic_inc(&ni->ni_hstats.hlt_local_error);
569                 counters->local_error_count++;
570                 break;
571         case LNET_MSG_STATUS_REMOTE_DROPPED:
572                 if (lpni)
573                         atomic_inc(&lpni->lpni_hstats.hlt_remote_dropped);
574                 counters->remote_dropped_count++;
575                 break;
576         case LNET_MSG_STATUS_REMOTE_ERROR:
577                 if (lpni)
578                         atomic_inc(&lpni->lpni_hstats.hlt_remote_error);
579                 counters->remote_error_count++;
580                 break;
581         case LNET_MSG_STATUS_REMOTE_TIMEOUT:
582                 if (lpni)
583                         atomic_inc(&lpni->lpni_hstats.hlt_remote_timeout);
584                 counters->remote_timeout_count++;
585                 break;
586         case LNET_MSG_STATUS_NETWORK_TIMEOUT:
587                 if (lpni)
588                         atomic_inc(&lpni->lpni_hstats.hlt_network_timeout);
589                 counters->network_timeout_count++;
590                 break;
591         case LNET_MSG_STATUS_OK:
592                 break;
593         default:
594                 LBUG();
595         }
596 }
597
598 /*
599  * Do a health check on the message:
600  * return -1 if we're not going to handle the error or
601  *   if we've reached the maximum number of retries.
602  *   success case will return -1 as well
603  * return 0 if it the message is requeued for send
604  */
605 static int
606 lnet_health_check(struct lnet_msg *msg)
607 {
608         enum lnet_msg_hstatus hstatus = msg->msg_health_status;
609         bool lo = false;
610
611         /* if we're shutting down no point in handling health. */
612         if (the_lnet.ln_state != LNET_STATE_RUNNING)
613                 return -1;
614
615         LASSERT(msg->msg_txni);
616
617         /*
618          * if we're sending to the LOLND then the msg_txpeer will not be
619          * set. So no need to sanity check it.
620          */
621         if (LNET_NETTYP(LNET_NIDNET(msg->msg_txni->ni_nid)) != LOLND)
622                 LASSERT(msg->msg_txpeer);
623         else
624                 lo = true;
625
626         if (hstatus != LNET_MSG_STATUS_OK &&
627             ktime_compare(ktime_get(), msg->msg_deadline) >= 0)
628                 return -1;
629
630         /*
631          * stats are only incremented for errors so avoid wasting time
632          * incrementing statistics if there is no error.
633          */
634         if (hstatus != LNET_MSG_STATUS_OK) {
635                 lnet_net_lock(0);
636                 lnet_incr_hstats(msg, hstatus);
637                 lnet_net_unlock(0);
638         }
639
640         CDEBUG(D_NET, "health check: %s->%s: %s: %s\n",
641                libcfs_nid2str(msg->msg_txni->ni_nid),
642                (lo) ? "self" : libcfs_nid2str(msg->msg_txpeer->lpni_nid),
643                lnet_msgtyp2str(msg->msg_type),
644                lnet_health_error2str(hstatus));
645
646         switch (hstatus) {
647         case LNET_MSG_STATUS_OK:
648                 lnet_inc_healthv(&msg->msg_txni->ni_healthv);
649                 /*
650                  * It's possible msg_txpeer is NULL in the LOLND
651                  * case.
652                  */
653                 if (msg->msg_txpeer)
654                         lnet_inc_healthv(&msg->msg_txpeer->lpni_healthv);
655
656                 /* we can finalize this message */
657                 return -1;
658         case LNET_MSG_STATUS_LOCAL_INTERRUPT:
659         case LNET_MSG_STATUS_LOCAL_DROPPED:
660         case LNET_MSG_STATUS_LOCAL_ABORTED:
661         case LNET_MSG_STATUS_LOCAL_NO_ROUTE:
662         case LNET_MSG_STATUS_LOCAL_TIMEOUT:
663                 lnet_handle_local_failure(msg);
664                 /* add to the re-send queue */
665                 goto resend;
666
667         /*
668          * These errors will not trigger a resend so simply
669          * finalize the message
670          */
671         case LNET_MSG_STATUS_LOCAL_ERROR:
672                 lnet_handle_local_failure(msg);
673                 return -1;
674
675         /*
676          * TODO: since the remote dropped the message we can
677          * attempt a resend safely.
678          */
679         case LNET_MSG_STATUS_REMOTE_DROPPED:
680                 lnet_handle_remote_failure(msg);
681                 goto resend;
682
683         case LNET_MSG_STATUS_REMOTE_ERROR:
684         case LNET_MSG_STATUS_REMOTE_TIMEOUT:
685         case LNET_MSG_STATUS_NETWORK_TIMEOUT:
686                 lnet_handle_remote_failure(msg);
687                 return -1;
688         default:
689                 LBUG();
690         }
691
692 resend:
693         /* don't resend recovery messages */
694         if (msg->msg_recovery)
695                 return -1;
696
697         /*
698          * if we explicitly indicated we don't want to resend then just
699          * return
700          */
701         if (msg->msg_no_resend)
702                 return -1;
703
704         /* check if the message has exceeded the number of retries */
705         if (msg->msg_retry_count >= lnet_retry_count)
706                 return -1;
707         msg->msg_retry_count++;
708
709         lnet_net_lock(msg->msg_tx_cpt);
710
711         /*
712          * remove message from the active list and reset it in preparation
713          * for a resend. Two exception to this
714          *
715          * 1. the router case, whe a message is committed for rx when
716          * received, then tx when it is sent. When committed to both tx and
717          * rx we don't want to remove it from the active list.
718          *
719          * 2. The REPLY case since it uses the same msg block for the GET
720          * that was received.
721          */
722         if (!msg->msg_routing && msg->msg_type != LNET_MSG_REPLY) {
723                 list_del_init(&msg->msg_activelist);
724                 msg->msg_onactivelist = 0;
725         }
726         /*
727          * The msg_target.nid which was originally set
728          * when calling LNetGet() or LNetPut() might've
729          * been overwritten if we're routing this message.
730          * Call lnet_return_tx_credits_locked() to return
731          * the credit this message consumed. The message will
732          * consume another credit when it gets resent.
733          */
734         msg->msg_target.nid = msg->msg_hdr.dest_nid;
735         lnet_msg_decommit_tx(msg, -EAGAIN);
736         msg->msg_sending = 0;
737         msg->msg_receiving = 0;
738         msg->msg_target_is_router = 0;
739
740         CDEBUG(D_NET, "%s->%s:%s:%s - queuing for resend\n",
741                libcfs_nid2str(msg->msg_hdr.src_nid),
742                libcfs_nid2str(msg->msg_hdr.dest_nid),
743                lnet_msgtyp2str(msg->msg_type),
744                lnet_health_error2str(hstatus));
745
746         list_add_tail(&msg->msg_list, the_lnet.ln_mt_resendqs[msg->msg_tx_cpt]);
747         lnet_net_unlock(msg->msg_tx_cpt);
748
749         wake_up(&the_lnet.ln_mt_waitq);
750         return 0;
751 }
752
753 static void
754 lnet_detach_md(struct lnet_msg *msg, int status)
755 {
756         int cpt = lnet_cpt_of_cookie(msg->msg_md->md_lh.lh_cookie);
757
758         lnet_res_lock(cpt);
759         lnet_msg_detach_md(msg, status);
760         lnet_res_unlock(cpt);
761 }
762
763 static bool
764 lnet_is_health_check(struct lnet_msg *msg)
765 {
766         bool hc;
767         int status = msg->msg_ev.status;
768
769         /*
770          * perform a health check for any message committed for transmit
771          */
772         hc = msg->msg_tx_committed;
773
774         /* Check for status inconsistencies */
775         if (hc &&
776             ((!status && msg->msg_health_status != LNET_MSG_STATUS_OK) ||
777              (status && msg->msg_health_status == LNET_MSG_STATUS_OK))) {
778                 CERROR("Msg is in inconsistent state, don't perform health "
779                        "checking (%d, %d)\n", status, msg->msg_health_status);
780                 hc = false;
781         }
782
783         CDEBUG(D_NET, "health check = %d, status = %d, hstatus = %d\n",
784                hc, status, msg->msg_health_status);
785
786         return hc;
787 }
788
789 char *
790 lnet_health_error2str(enum lnet_msg_hstatus hstatus)
791 {
792         switch (hstatus) {
793         case LNET_MSG_STATUS_LOCAL_INTERRUPT:
794                 return "LOCAL_INTERRUPT";
795         case LNET_MSG_STATUS_LOCAL_DROPPED:
796                 return "LOCAL_DROPPED";
797         case LNET_MSG_STATUS_LOCAL_ABORTED:
798                 return "LOCAL_ABORTED";
799         case LNET_MSG_STATUS_LOCAL_NO_ROUTE:
800                 return "LOCAL_NO_ROUTE";
801         case LNET_MSG_STATUS_LOCAL_TIMEOUT:
802                 return "LOCAL_TIMEOUT";
803         case LNET_MSG_STATUS_LOCAL_ERROR:
804                 return "LOCAL_ERROR";
805         case LNET_MSG_STATUS_REMOTE_DROPPED:
806                 return "REMOTE_DROPPED";
807         case LNET_MSG_STATUS_REMOTE_ERROR:
808                 return "REMOTE_ERROR";
809         case LNET_MSG_STATUS_REMOTE_TIMEOUT:
810                 return "REMOTE_TIMEOUT";
811         case LNET_MSG_STATUS_NETWORK_TIMEOUT:
812                 return "NETWORK_TIMEOUT";
813         case LNET_MSG_STATUS_OK:
814                 return "OK";
815         default:
816                 return "<UNKNOWN>";
817         }
818 }
819
820 void
821 lnet_finalize(struct lnet_msg *msg, int status)
822 {
823         struct lnet_msg_container *container;
824         int my_slot;
825         int cpt;
826         int rc;
827         int i;
828         bool hc;
829
830         LASSERT(!in_interrupt());
831
832         if (msg == NULL)
833                 return;
834
835         msg->msg_ev.status = status;
836
837         /*
838          * if this is an ACK or a REPLY then make sure to remove the
839          * response tracker.
840          */
841         if (msg->msg_ev.type == LNET_EVENT_REPLY ||
842             msg->msg_ev.type == LNET_EVENT_ACK) {
843                 cpt = lnet_cpt_of_cookie(msg->msg_md->md_lh.lh_cookie);
844                 lnet_detach_rsp_tracker(msg->msg_md, cpt);
845         }
846
847         /* if the message is successfully sent, no need to keep the MD around */
848         if (msg->msg_md != NULL && !status)
849                 lnet_detach_md(msg, status);
850
851 again:
852         hc = lnet_is_health_check(msg);
853
854         /*
855          * the MD would've been detached from the message if it was
856          * successfully sent. However, if it wasn't successfully sent the
857          * MD would be around. And since we recalculate whether to
858          * health check or not, it's possible that we change our minds and
859          * we don't want to health check this message. In this case also
860          * free the MD.
861          *
862          * If the message is successful we're going to
863          * go through the lnet_health_check() function, but that'll just
864          * increment the appropriate health value and return.
865          */
866         if (msg->msg_md != NULL && !hc)
867                 lnet_detach_md(msg, status);
868
869         rc = 0;
870         if (!msg->msg_tx_committed && !msg->msg_rx_committed) {
871                 /* not committed to network yet */
872                 LASSERT(!msg->msg_onactivelist);
873                 lnet_msg_free(msg);
874                 return;
875         }
876
877         if (hc) {
878                 /*
879                  * Check the health status of the message. If it has one
880                  * of the errors that we're supposed to handle, and it has
881                  * not timed out, then
882                  *      1. Decrement the appropriate health_value
883                  *      2. queue the message on the resend queue
884
885                  * if the message send is success, timed out or failed in the
886                  * health check for any reason then we'll just finalize the
887                  * message. Otherwise just return since the message has been
888                  * put on the resend queue.
889                  */
890                 if (!lnet_health_check(msg))
891                         return;
892
893                 /*
894                  * if we get here then we need to clean up the md because we're
895                  * finalizing the message.
896                 */
897                 if (msg->msg_md != NULL)
898                         lnet_detach_md(msg, status);
899         }
900
901         /*
902          * NB: routed message can be committed for both receiving and sending,
903          * we should finalize in LIFO order and keep counters correct.
904          * (finalize sending first then finalize receiving)
905          */
906         cpt = msg->msg_tx_committed ? msg->msg_tx_cpt : msg->msg_rx_cpt;
907         lnet_net_lock(cpt);
908
909         container = the_lnet.ln_msg_containers[cpt];
910         list_add_tail(&msg->msg_list, &container->msc_finalizing);
911
912         /* Recursion breaker.  Don't complete the message here if I am (or
913          * enough other threads are) already completing messages */
914
915         my_slot = -1;
916         for (i = 0; i < container->msc_nfinalizers; i++) {
917                 if (container->msc_finalizers[i] == current)
918                         break;
919
920                 if (my_slot < 0 && container->msc_finalizers[i] == NULL)
921                         my_slot = i;
922         }
923
924         if (i < container->msc_nfinalizers || my_slot < 0) {
925                 lnet_net_unlock(cpt);
926                 return;
927         }
928
929         container->msc_finalizers[my_slot] = current;
930
931         while (!list_empty(&container->msc_finalizing)) {
932                 msg = list_entry(container->msc_finalizing.next,
933                                  struct lnet_msg, msg_list);
934
935                 list_del_init(&msg->msg_list);
936
937                 /* NB drops and regains the lnet lock if it actually does
938                  * anything, so my finalizing friends can chomp along too */
939                 rc = lnet_complete_msg_locked(msg, cpt);
940                 if (rc != 0)
941                         break;
942         }
943
944         if (unlikely(!list_empty(&the_lnet.ln_delay_rules))) {
945                 lnet_net_unlock(cpt);
946                 lnet_delay_rule_check();
947                 lnet_net_lock(cpt);
948         }
949
950         container->msc_finalizers[my_slot] = NULL;
951         lnet_net_unlock(cpt);
952
953         if (rc != 0)
954                 goto again;
955 }
956 EXPORT_SYMBOL(lnet_finalize);
957
958 void
959 lnet_msg_container_cleanup(struct lnet_msg_container *container)
960 {
961         int     count = 0;
962
963         if (container->msc_init == 0)
964                 return;
965
966         while (!list_empty(&container->msc_active)) {
967                 struct lnet_msg *msg;
968
969                 msg  = list_entry(container->msc_active.next,
970                                   struct lnet_msg, msg_activelist);
971                 LASSERT(msg->msg_onactivelist);
972                 msg->msg_onactivelist = 0;
973                 list_del_init(&msg->msg_activelist);
974                 lnet_msg_free(msg);
975                 count++;
976         }
977
978         if (count > 0)
979                 CERROR("%d active msg on exit\n", count);
980
981         if (container->msc_finalizers != NULL) {
982                 LIBCFS_FREE(container->msc_finalizers,
983                             container->msc_nfinalizers *
984                             sizeof(*container->msc_finalizers));
985                 container->msc_finalizers = NULL;
986         }
987         container->msc_init = 0;
988 }
989
990 int
991 lnet_msg_container_setup(struct lnet_msg_container *container, int cpt)
992 {
993         int rc = 0;
994
995         container->msc_init = 1;
996
997         INIT_LIST_HEAD(&container->msc_active);
998         INIT_LIST_HEAD(&container->msc_finalizing);
999
1000         /* number of CPUs */
1001         container->msc_nfinalizers = cfs_cpt_weight(lnet_cpt_table(), cpt);
1002         if (container->msc_nfinalizers == 0)
1003                 container->msc_nfinalizers = 1;
1004
1005         LIBCFS_CPT_ALLOC(container->msc_finalizers, lnet_cpt_table(), cpt,
1006                          container->msc_nfinalizers *
1007                          sizeof(*container->msc_finalizers));
1008
1009         if (container->msc_finalizers == NULL) {
1010                 CERROR("Failed to allocate message finalizers\n");
1011                 lnet_msg_container_cleanup(container);
1012                 return -ENOMEM;
1013         }
1014
1015         return rc;
1016 }
1017
1018 void
1019 lnet_msg_containers_destroy(void)
1020 {
1021         struct lnet_msg_container *container;
1022         int     i;
1023
1024         if (the_lnet.ln_msg_containers == NULL)
1025                 return;
1026
1027         cfs_percpt_for_each(container, i, the_lnet.ln_msg_containers)
1028                 lnet_msg_container_cleanup(container);
1029
1030         cfs_percpt_free(the_lnet.ln_msg_containers);
1031         the_lnet.ln_msg_containers = NULL;
1032 }
1033
1034 int
1035 lnet_msg_containers_create(void)
1036 {
1037         struct lnet_msg_container *container;
1038         int     rc;
1039         int     i;
1040
1041         the_lnet.ln_msg_containers = cfs_percpt_alloc(lnet_cpt_table(),
1042                                                       sizeof(*container));
1043
1044         if (the_lnet.ln_msg_containers == NULL) {
1045                 CERROR("Failed to allocate cpu-partition data for network\n");
1046                 return -ENOMEM;
1047         }
1048
1049         cfs_percpt_for_each(container, i, the_lnet.ln_msg_containers) {
1050                 rc = lnet_msg_container_setup(container, i);
1051                 if (rc != 0) {
1052                         lnet_msg_containers_destroy();
1053                         return rc;
1054                 }
1055         }
1056
1057         return 0;
1058 }