Whamcloud - gitweb
LU-14538 gss: make namespace optional in lgss_keyring
[fs/lustre-release.git] / lnet / lnet / lib-msg.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lnet/lnet/lib-msg.c
33  *
34  * Message decoding, parsing and finalizing routines
35  */
36
37 #define DEBUG_SUBSYSTEM S_LNET
38
39 #include <lnet/lib-lnet.h>
40
41 void
42 lnet_build_unlink_event(struct lnet_libmd *md, struct lnet_event *ev)
43 {
44         ENTRY;
45
46         memset(ev, 0, sizeof(*ev));
47
48         ev->status   = 0;
49         ev->unlinked = 1;
50         ev->type     = LNET_EVENT_UNLINK;
51         lnet_md_deconstruct(md, ev);
52         lnet_md2handle(&ev->md_handle, md);
53         EXIT;
54 }
55
56 /*
57  * Don't need any lock, must be called after lnet_commit_md
58  */
59 void
60 lnet_build_msg_event(struct lnet_msg *msg, enum lnet_event_kind ev_type)
61 {
62         struct lnet_hdr *hdr = &msg->msg_hdr;
63         struct lnet_event *ev = &msg->msg_ev;
64
65         LASSERT(!msg->msg_routing);
66
67         ev->type = ev_type;
68         ev->msg_type = msg->msg_type;
69
70         if (ev_type == LNET_EVENT_SEND) {
71                 /* event for active message */
72                 ev->target.nid    = le64_to_cpu(hdr->dest_nid);
73                 ev->target.pid    = le32_to_cpu(hdr->dest_pid);
74                 ev->initiator.nid = LNET_NID_ANY;
75                 ev->initiator.pid = the_lnet.ln_pid;
76                 ev->source.nid    = LNET_NID_ANY;
77                 ev->source.pid    = the_lnet.ln_pid;
78                 ev->sender        = LNET_NID_ANY;
79         } else {
80                 /* event for passive message */
81                 ev->target.pid    = hdr->dest_pid;
82                 ev->target.nid    = hdr->dest_nid;
83                 ev->initiator.pid = hdr->src_pid;
84                 /* Multi-Rail: resolve src_nid to "primary" peer NID */
85                 ev->initiator.nid = msg->msg_initiator;
86                 /* Multi-Rail: track source NID. */
87                 ev->source.pid    = hdr->src_pid;
88                 ev->source.nid    = hdr->src_nid;
89                 ev->rlength       = hdr->payload_length;
90                 ev->sender        = msg->msg_from;
91                 ev->mlength       = msg->msg_wanted;
92                 ev->offset        = msg->msg_offset;
93         }
94
95         switch (ev_type) {
96         default:
97                 LBUG();
98
99         case LNET_EVENT_PUT: /* passive PUT */
100                 ev->pt_index   = hdr->msg.put.ptl_index;
101                 ev->match_bits = hdr->msg.put.match_bits;
102                 ev->hdr_data   = hdr->msg.put.hdr_data;
103                 return;
104
105         case LNET_EVENT_GET: /* passive GET */
106                 ev->pt_index   = hdr->msg.get.ptl_index;
107                 ev->match_bits = hdr->msg.get.match_bits;
108                 ev->hdr_data   = 0;
109                 return;
110
111         case LNET_EVENT_ACK: /* ACK */
112                 ev->match_bits = hdr->msg.ack.match_bits;
113                 ev->mlength    = hdr->msg.ack.mlength;
114                 return;
115
116         case LNET_EVENT_REPLY: /* REPLY */
117                 return;
118
119         case LNET_EVENT_SEND: /* active message */
120                 if (msg->msg_type == LNET_MSG_PUT) {
121                         ev->pt_index   = le32_to_cpu(hdr->msg.put.ptl_index);
122                         ev->match_bits = le64_to_cpu(hdr->msg.put.match_bits);
123                         ev->offset     = le32_to_cpu(hdr->msg.put.offset);
124                         ev->mlength    =
125                         ev->rlength    = le32_to_cpu(hdr->payload_length);
126                         ev->hdr_data   = le64_to_cpu(hdr->msg.put.hdr_data);
127
128                 } else {
129                         LASSERT(msg->msg_type == LNET_MSG_GET);
130                         ev->pt_index   = le32_to_cpu(hdr->msg.get.ptl_index);
131                         ev->match_bits = le64_to_cpu(hdr->msg.get.match_bits);
132                         ev->mlength    =
133                         ev->rlength    = le32_to_cpu(hdr->msg.get.sink_length);
134                         ev->offset     = le32_to_cpu(hdr->msg.get.src_offset);
135                         ev->hdr_data   = 0;
136                 }
137                 return;
138         }
139 }
140
141 void
142 lnet_msg_commit(struct lnet_msg *msg, int cpt)
143 {
144         struct lnet_msg_container *container = the_lnet.ln_msg_containers[cpt];
145         struct lnet_counters_common *common;
146         s64 timeout_ns;
147
148         /* set the message deadline */
149         timeout_ns = lnet_transaction_timeout * NSEC_PER_SEC;
150         msg->msg_deadline = ktime_add_ns(ktime_get(), timeout_ns);
151
152         /* routed message can be committed for both receiving and sending */
153         LASSERT(!msg->msg_tx_committed);
154
155         if (msg->msg_sending) {
156                 LASSERT(!msg->msg_receiving);
157                 msg->msg_tx_cpt = cpt;
158                 msg->msg_tx_committed = 1;
159                 if (msg->msg_rx_committed) { /* routed message REPLY */
160                         LASSERT(msg->msg_onactivelist);
161                         return;
162                 }
163         } else {
164                 LASSERT(!msg->msg_sending);
165                 msg->msg_rx_cpt = cpt;
166                 msg->msg_rx_committed = 1;
167         }
168
169         LASSERT(!msg->msg_onactivelist);
170
171         msg->msg_onactivelist = 1;
172         list_add_tail(&msg->msg_activelist, &container->msc_active);
173
174         common = &the_lnet.ln_counters[cpt]->lct_common;
175         common->lcc_msgs_alloc++;
176         if (common->lcc_msgs_alloc > common->lcc_msgs_max)
177                 common->lcc_msgs_max = common->lcc_msgs_alloc;
178 }
179
180 static void
181 lnet_msg_decommit_tx(struct lnet_msg *msg, int status)
182 {
183         struct lnet_counters_common *common;
184         struct lnet_event *ev = &msg->msg_ev;
185
186         LASSERT(msg->msg_tx_committed);
187         if (status != 0)
188                 goto out;
189
190         common = &(the_lnet.ln_counters[msg->msg_tx_cpt]->lct_common);
191         switch (ev->type) {
192         default: /* routed message */
193                 LASSERT(msg->msg_routing);
194                 LASSERT(msg->msg_rx_committed);
195                 LASSERT(ev->type == 0);
196
197                 common->lcc_route_length += msg->msg_len;
198                 common->lcc_route_count++;
199                 goto incr_stats;
200
201         case LNET_EVENT_PUT:
202                 /* should have been decommitted */
203                 LASSERT(!msg->msg_rx_committed);
204                 /* overwritten while sending ACK */
205                 LASSERT(msg->msg_type == LNET_MSG_ACK);
206                 msg->msg_type = LNET_MSG_PUT; /* fix type */
207                 break;
208
209         case LNET_EVENT_SEND:
210                 LASSERT(!msg->msg_rx_committed);
211                 if (msg->msg_type == LNET_MSG_PUT)
212                         common->lcc_send_length += msg->msg_len;
213                 break;
214
215         case LNET_EVENT_GET:
216                 LASSERT(msg->msg_rx_committed);
217                 /* overwritten while sending reply, we should never be
218                  * here for optimized GET */
219                 LASSERT(msg->msg_type == LNET_MSG_REPLY);
220                 msg->msg_type = LNET_MSG_GET; /* fix type */
221                 break;
222         }
223
224         common->lcc_send_count++;
225
226 incr_stats:
227         if (msg->msg_txpeer)
228                 lnet_incr_stats(&msg->msg_txpeer->lpni_stats,
229                                 msg->msg_type,
230                                 LNET_STATS_TYPE_SEND);
231         if (msg->msg_txni)
232                 lnet_incr_stats(&msg->msg_txni->ni_stats,
233                                 msg->msg_type,
234                                 LNET_STATS_TYPE_SEND);
235  out:
236         lnet_return_tx_credits_locked(msg);
237         msg->msg_tx_committed = 0;
238 }
239
240 static void
241 lnet_msg_decommit_rx(struct lnet_msg *msg, int status)
242 {
243         struct lnet_counters_common *common;
244         struct lnet_event *ev = &msg->msg_ev;
245
246         LASSERT(!msg->msg_tx_committed); /* decommitted or never committed */
247         LASSERT(msg->msg_rx_committed);
248
249         if (status != 0)
250                 goto out;
251
252         common = &(the_lnet.ln_counters[msg->msg_rx_cpt]->lct_common);
253         switch (ev->type) {
254         default:
255                 LASSERT(ev->type == 0);
256                 LASSERT(msg->msg_routing);
257                 goto incr_stats;
258
259         case LNET_EVENT_ACK:
260                 LASSERT(msg->msg_type == LNET_MSG_ACK);
261                 break;
262
263         case LNET_EVENT_GET:
264                 /* type is "REPLY" if it's an optimized GET on passive side,
265                  * because optimized GET will never be committed for sending,
266                  * so message type wouldn't be changed back to "GET" by
267                  * lnet_msg_decommit_tx(), see details in lnet_parse_get() */
268                 LASSERT(msg->msg_type == LNET_MSG_REPLY ||
269                         msg->msg_type == LNET_MSG_GET);
270                 common->lcc_send_length += msg->msg_wanted;
271                 break;
272
273         case LNET_EVENT_PUT:
274                 LASSERT(msg->msg_type == LNET_MSG_PUT);
275                 break;
276
277         case LNET_EVENT_REPLY:
278                 /* type is "GET" if it's an optimized GET on active side,
279                  * see details in lnet_create_reply_msg() */
280                 LASSERT(msg->msg_type == LNET_MSG_GET ||
281                         msg->msg_type == LNET_MSG_REPLY);
282                 break;
283         }
284
285         common->lcc_recv_count++;
286
287 incr_stats:
288         if (msg->msg_rxpeer)
289                 lnet_incr_stats(&msg->msg_rxpeer->lpni_stats,
290                                 msg->msg_type,
291                                 LNET_STATS_TYPE_RECV);
292         if (msg->msg_rxni)
293                 lnet_incr_stats(&msg->msg_rxni->ni_stats,
294                                 msg->msg_type,
295                                 LNET_STATS_TYPE_RECV);
296         if (ev->type == LNET_EVENT_PUT || ev->type == LNET_EVENT_REPLY)
297                 common->lcc_recv_length += msg->msg_wanted;
298
299  out:
300         lnet_return_rx_credits_locked(msg);
301         msg->msg_rx_committed = 0;
302 }
303
304 void
305 lnet_msg_decommit(struct lnet_msg *msg, int cpt, int status)
306 {
307         int     cpt2 = cpt;
308
309         LASSERT(msg->msg_tx_committed || msg->msg_rx_committed);
310         LASSERT(msg->msg_onactivelist);
311
312         if (msg->msg_tx_committed) { /* always decommit for sending first */
313                 LASSERT(cpt == msg->msg_tx_cpt);
314                 lnet_msg_decommit_tx(msg, status);
315         }
316
317         if (msg->msg_rx_committed) {
318                 /* forwarding msg committed for both receiving and sending */
319                 if (cpt != msg->msg_rx_cpt) {
320                         lnet_net_unlock(cpt);
321                         cpt2 = msg->msg_rx_cpt;
322                         lnet_net_lock(cpt2);
323                 }
324                 lnet_msg_decommit_rx(msg, status);
325         }
326
327         list_del(&msg->msg_activelist);
328         msg->msg_onactivelist = 0;
329
330         the_lnet.ln_counters[cpt2]->lct_common.lcc_msgs_alloc--;
331
332         if (cpt2 != cpt) {
333                 lnet_net_unlock(cpt2);
334                 lnet_net_lock(cpt);
335         }
336 }
337
338 void
339 lnet_msg_attach_md(struct lnet_msg *msg, struct lnet_libmd *md,
340                    unsigned int offset, unsigned int mlen)
341 {
342         /* NB: @offset and @len are only useful for receiving */
343         /* Here, we attach the MD on lnet_msg and mark it busy and
344          * decrementing its threshold. Come what may, the lnet_msg "owns"
345          * the MD until a call to lnet_msg_detach_md or lnet_finalize()
346          * signals completion. */
347         LASSERT(!msg->msg_routing);
348
349         msg->msg_md = md;
350         if (msg->msg_receiving) { /* committed for receiving */
351                 msg->msg_offset = offset;
352                 msg->msg_wanted = mlen;
353         }
354
355         md->md_refcount++;
356         if (md->md_threshold != LNET_MD_THRESH_INF) {
357                 LASSERT(md->md_threshold > 0);
358                 md->md_threshold--;
359         }
360
361         /* build umd in event */
362         lnet_md2handle(&msg->msg_ev.md_handle, md);
363         lnet_md_deconstruct(md, &msg->msg_ev);
364 }
365
366 static int
367 lnet_complete_msg_locked(struct lnet_msg *msg, int cpt)
368 {
369         struct lnet_handle_wire ack_wmd;
370         int                rc;
371         int                status = msg->msg_ev.status;
372
373         LASSERT(msg->msg_onactivelist);
374
375         if (status == 0 && msg->msg_ack) {
376                 /* Only send an ACK if the PUT completed successfully */
377
378                 lnet_msg_decommit(msg, cpt, 0);
379
380                 msg->msg_ack = 0;
381                 lnet_net_unlock(cpt);
382
383                 LASSERT(msg->msg_ev.type == LNET_EVENT_PUT);
384                 LASSERT(!msg->msg_routing);
385
386                 ack_wmd = msg->msg_hdr.msg.put.ack_wmd;
387
388                 lnet_prep_send(msg, LNET_MSG_ACK, msg->msg_ev.source, 0, 0);
389
390                 msg->msg_hdr.msg.ack.dst_wmd = ack_wmd;
391                 msg->msg_hdr.msg.ack.match_bits = msg->msg_ev.match_bits;
392                 msg->msg_hdr.msg.ack.mlength = cpu_to_le32(msg->msg_ev.mlength);
393
394                 rc = lnet_send(msg->msg_ev.target.nid, msg, msg->msg_from);
395
396                 lnet_net_lock(cpt);
397                 /*
398                  * NB: message is committed for sending, we should return
399                  * on success because LND will finalize this message later.
400                  *
401                  * Also, there is possibility that message is committed for
402                  * sending and also failed before delivering to LND,
403                  * i.e: ENOMEM, in that case we can't fall through either
404                  * because CPT for sending can be different with CPT for
405                  * receiving, so we should return back to lnet_finalize()
406                  * to make sure we are locking the correct partition.
407                  */
408                 return rc;
409
410         } else if (status == 0 &&       /* OK so far */
411                    (msg->msg_routing && !msg->msg_sending)) {
412                 /* not forwarded */
413                 LASSERT(!msg->msg_receiving);   /* called back recv already */
414                 lnet_net_unlock(cpt);
415
416                 rc = lnet_send(LNET_NID_ANY, msg, LNET_NID_ANY);
417
418                 lnet_net_lock(cpt);
419                 /*
420                  * NB: message is committed for sending, we should return
421                  * on success because LND will finalize this message later.
422                  *
423                  * Also, there is possibility that message is committed for
424                  * sending and also failed before delivering to LND,
425                  * i.e: ENOMEM, in that case we can't fall through either:
426                  * - The rule is message must decommit for sending first if
427                  *   the it's committed for both sending and receiving
428                  * - CPT for sending can be different with CPT for receiving,
429                  *   so we should return back to lnet_finalize() to make
430                  *   sure we are locking the correct partition.
431                  */
432                 return rc;
433         }
434
435         lnet_msg_decommit(msg, cpt, status);
436         lnet_msg_free(msg);
437         return 0;
438 }
439
440 static void
441 lnet_dec_healthv_locked(atomic_t *healthv, int sensitivity)
442 {
443         int h = atomic_read(healthv);
444
445         if (h < sensitivity) {
446                 atomic_set(healthv, 0);
447         } else {
448                 h -= sensitivity;
449                 atomic_set(healthv, h);
450         }
451 }
452
453 static void
454 lnet_handle_local_failure(struct lnet_ni *local_ni)
455 {
456         /*
457          * the lnet_net_lock(0) is used to protect the addref on the ni
458          * and the recovery queue.
459          */
460         lnet_net_lock(0);
461         /* the mt could've shutdown and cleaned up the queues */
462         if (the_lnet.ln_mt_state != LNET_MT_STATE_RUNNING) {
463                 lnet_net_unlock(0);
464                 return;
465         }
466
467         lnet_dec_healthv_locked(&local_ni->ni_healthv, lnet_health_sensitivity);
468         /*
469          * add the NI to the recovery queue if it's not already there
470          * and it's health value is actually below the maximum. It's
471          * possible that the sensitivity might be set to 0, and the health
472          * value will not be reduced. In this case, there is no reason to
473          * invoke recovery
474          */
475         if (list_empty(&local_ni->ni_recovery) &&
476             atomic_read(&local_ni->ni_healthv) < LNET_MAX_HEALTH_VALUE) {
477                 CDEBUG(D_NET, "ni %s added to recovery queue. Health = %d\n",
478                         libcfs_nid2str(local_ni->ni_nid),
479                         atomic_read(&local_ni->ni_healthv));
480                 list_add_tail(&local_ni->ni_recovery,
481                               &the_lnet.ln_mt_localNIRecovq);
482                 lnet_ni_addref_locked(local_ni, 0);
483         }
484         lnet_net_unlock(0);
485 }
486
487 /* must hold net_lock/0 */
488 void
489 lnet_handle_remote_failure_locked(struct lnet_peer_ni *lpni)
490 {
491         __u32 sensitivity = lnet_health_sensitivity;
492         __u32 lp_sensitivity;
493
494         /*
495          * If there is a health sensitivity in the peer then use that
496          * instead of the globally set one.
497          */
498         lp_sensitivity = lpni->lpni_peer_net->lpn_peer->lp_health_sensitivity;
499         if (lp_sensitivity)
500                 sensitivity = lp_sensitivity;
501
502         lnet_dec_healthv_locked(&lpni->lpni_healthv, sensitivity);
503
504         /* update the peer_net's health value */
505         lnet_update_peer_net_healthv(lpni);
506
507         /*
508          * add the peer NI to the recovery queue if it's not already there
509          * and it's health value is actually below the maximum. It's
510          * possible that the sensitivity might be set to 0, and the health
511          * value will not be reduced. In this case, there is no reason to
512          * invoke recovery
513          */
514         lnet_peer_ni_add_to_recoveryq_locked(lpni,
515                                              &the_lnet.ln_mt_peerNIRecovq,
516                                              ktime_get_seconds());
517 }
518
519 static void
520 lnet_handle_remote_failure(struct lnet_peer_ni *lpni)
521 {
522         /* lpni could be NULL if we're in the LOLND case */
523         if (!lpni)
524                 return;
525
526         lnet_net_lock(0);
527         /* the mt could've shutdown and cleaned up the queues */
528         if (the_lnet.ln_mt_state != LNET_MT_STATE_RUNNING) {
529                 lnet_net_unlock(0);
530                 return;
531         }
532         lnet_handle_remote_failure_locked(lpni);
533         lnet_net_unlock(0);
534 }
535
536 static void
537 lnet_incr_hstats(struct lnet_ni *ni, struct lnet_peer_ni *lpni,
538                  enum lnet_msg_hstatus hstatus)
539 {
540         struct lnet_counters_health *health;
541
542         health = &the_lnet.ln_counters[0]->lct_health;
543
544         switch (hstatus) {
545         case LNET_MSG_STATUS_LOCAL_INTERRUPT:
546                 atomic_inc(&ni->ni_hstats.hlt_local_interrupt);
547                 health->lch_local_interrupt_count++;
548                 break;
549         case LNET_MSG_STATUS_LOCAL_DROPPED:
550                 atomic_inc(&ni->ni_hstats.hlt_local_dropped);
551                 health->lch_local_dropped_count++;
552                 break;
553         case LNET_MSG_STATUS_LOCAL_ABORTED:
554                 atomic_inc(&ni->ni_hstats.hlt_local_aborted);
555                 health->lch_local_aborted_count++;
556                 break;
557         case LNET_MSG_STATUS_LOCAL_NO_ROUTE:
558                 atomic_inc(&ni->ni_hstats.hlt_local_no_route);
559                 health->lch_local_no_route_count++;
560                 break;
561         case LNET_MSG_STATUS_LOCAL_TIMEOUT:
562                 atomic_inc(&ni->ni_hstats.hlt_local_timeout);
563                 health->lch_local_timeout_count++;
564                 break;
565         case LNET_MSG_STATUS_LOCAL_ERROR:
566                 atomic_inc(&ni->ni_hstats.hlt_local_error);
567                 health->lch_local_error_count++;
568                 break;
569         case LNET_MSG_STATUS_REMOTE_DROPPED:
570                 if (lpni)
571                         atomic_inc(&lpni->lpni_hstats.hlt_remote_dropped);
572                 health->lch_remote_dropped_count++;
573                 break;
574         case LNET_MSG_STATUS_REMOTE_ERROR:
575                 if (lpni)
576                         atomic_inc(&lpni->lpni_hstats.hlt_remote_error);
577                 health->lch_remote_error_count++;
578                 break;
579         case LNET_MSG_STATUS_REMOTE_TIMEOUT:
580                 if (lpni)
581                         atomic_inc(&lpni->lpni_hstats.hlt_remote_timeout);
582                 health->lch_remote_timeout_count++;
583                 break;
584         case LNET_MSG_STATUS_NETWORK_TIMEOUT:
585                 if (lpni)
586                         atomic_inc(&lpni->lpni_hstats.hlt_network_timeout);
587                 health->lch_network_timeout_count++;
588                 break;
589         case LNET_MSG_STATUS_OK:
590                 break;
591         default:
592                 LBUG();
593         }
594 }
595
596 static void
597 lnet_resend_msg_locked(struct lnet_msg *msg)
598 {
599         msg->msg_retry_count++;
600
601         /*
602          * remove message from the active list and reset it to prepare
603          * for a resend. Two exceptions to this
604          *
605          * 1. the router case. When a message is being routed it is
606          * committed for rx when received and committed for tx when
607          * forwarded. We don't want to remove it from the active list, since
608          * code which handles receiving expects it to remain on the active
609          * list.
610          *
611          * 2. The REPLY case. Reply messages use the same message
612          * structure for the GET that was received.
613          */
614         if (!msg->msg_routing && msg->msg_type != LNET_MSG_REPLY) {
615                 list_del_init(&msg->msg_activelist);
616                 msg->msg_onactivelist = 0;
617         }
618         /*
619          * The msg_target.nid which was originally set
620          * when calling LNetGet() or LNetPut() might've
621          * been overwritten if we're routing this message.
622          * Call lnet_msg_decommit_tx() to return the credit
623          * this message consumed. The message will
624          * consume another credit when it gets resent.
625          */
626         msg->msg_target.nid = msg->msg_hdr.dest_nid;
627         lnet_msg_decommit_tx(msg, -EAGAIN);
628         msg->msg_sending = 0;
629         msg->msg_receiving = 0;
630         msg->msg_target_is_router = 0;
631
632         CDEBUG(D_NET, "%s->%s:%s:%s - queuing msg (%p) for resend\n",
633                libcfs_nid2str(msg->msg_hdr.src_nid),
634                libcfs_nid2str(msg->msg_hdr.dest_nid),
635                lnet_msgtyp2str(msg->msg_type),
636                lnet_health_error2str(msg->msg_health_status), msg);
637
638         list_add_tail(&msg->msg_list, the_lnet.ln_mt_resendqs[msg->msg_tx_cpt]);
639
640         complete(&the_lnet.ln_mt_wait_complete);
641 }
642
643 int
644 lnet_check_finalize_recursion_locked(struct lnet_msg *msg,
645                                      struct list_head *containerq,
646                                      int nworkers, void **workers)
647 {
648         int my_slot = -1;
649         int i;
650
651         list_add_tail(&msg->msg_list, containerq);
652
653         for (i = 0; i < nworkers; i++) {
654                 if (workers[i] == current)
655                         break;
656
657                 if (my_slot < 0 && workers[i] == NULL)
658                         my_slot = i;
659         }
660
661         if (i < nworkers || my_slot < 0)
662                 return -1;
663
664         workers[my_slot] = current;
665
666         return my_slot;
667 }
668
669 int
670 lnet_attempt_msg_resend(struct lnet_msg *msg)
671 {
672         struct lnet_msg_container *container;
673         int my_slot;
674         int cpt;
675
676         /* we can only resend tx_committed messages */
677         LASSERT(msg->msg_tx_committed);
678
679         /* don't resend recovery messages */
680         if (msg->msg_recovery) {
681                 CDEBUG(D_NET, "msg %s->%s is a recovery ping. retry# %d\n",
682                         libcfs_nid2str(msg->msg_from),
683                         libcfs_nid2str(msg->msg_target.nid),
684                         msg->msg_retry_count);
685                 return -ENOTRECOVERABLE;
686         }
687
688         /*
689          * if we explicitly indicated we don't want to resend then just
690          * return
691          */
692         if (msg->msg_no_resend) {
693                 CDEBUG(D_NET, "msg %s->%s requested no resend. retry# %d\n",
694                         libcfs_nid2str(msg->msg_from),
695                         libcfs_nid2str(msg->msg_target.nid),
696                         msg->msg_retry_count);
697                 return -ENOTRECOVERABLE;
698         }
699
700         /* check if the message has exceeded the number of retries */
701         if (msg->msg_retry_count >= lnet_retry_count) {
702                 CNETERR("msg %s->%s exceeded retry count %d\n",
703                         libcfs_nid2str(msg->msg_from),
704                         libcfs_nid2str(msg->msg_target.nid),
705                         msg->msg_retry_count);
706                 return -ENOTRECOVERABLE;
707         }
708
709         cpt = msg->msg_tx_cpt;
710         lnet_net_lock(cpt);
711
712         /* check again under lock */
713         if (the_lnet.ln_mt_state != LNET_MT_STATE_RUNNING) {
714                 lnet_net_unlock(cpt);
715                 return -ESHUTDOWN;
716         }
717
718         container = the_lnet.ln_msg_containers[cpt];
719         my_slot =
720                 lnet_check_finalize_recursion_locked(msg,
721                                         &container->msc_resending,
722                                         container->msc_nfinalizers,
723                                         container->msc_resenders);
724
725         /* enough threads are resending */
726         if (my_slot == -1) {
727                 lnet_net_unlock(cpt);
728                 return 0;
729         }
730
731         while (!list_empty(&container->msc_resending)) {
732                 msg = list_entry(container->msc_resending.next,
733                                         struct lnet_msg, msg_list);
734                 list_del(&msg->msg_list);
735
736                 /*
737                  * resending the message will require us to call
738                  * lnet_msg_decommit_tx() which will return the credit
739                  * which this message holds. This could trigger another
740                  * queued message to be sent. If that message fails and
741                  * requires a resend we will recurse.
742                  * But since at this point the slot is taken, the message
743                  * will be queued in the container and dealt with
744                  * later. This breaks the recursion.
745                  */
746                 lnet_resend_msg_locked(msg);
747         }
748
749         /*
750          * msc_resenders is an array of process pointers. Each entry holds
751          * a pointer to the current process operating on the message. An
752          * array entry is created per CPT. If the array slot is already
753          * set, then it means that there is a thread on the CPT currently
754          * resending a message.
755          * Once the thread finishes clear the slot to enable the thread to
756          * take on more resend work.
757          */
758         container->msc_resenders[my_slot] = NULL;
759         lnet_net_unlock(cpt);
760
761         return 0;
762 }
763
764 /*
765  * Do a health check on the message:
766  * return -1 if we're not going to handle the error or
767  *   if we've reached the maximum number of retries.
768  *   success case will return -1 as well
769  * return 0 if it the message is requeued for send
770  */
771 static int
772 lnet_health_check(struct lnet_msg *msg)
773 {
774         enum lnet_msg_hstatus hstatus = msg->msg_health_status;
775         struct lnet_peer_ni *lpni;
776         struct lnet_ni *ni;
777         bool lo = false;
778         bool attempt_local_resend;
779         bool attempt_remote_resend;
780         bool handle_local_health;
781         bool handle_remote_health;
782
783         /* if we're shutting down no point in handling health. */
784         if (the_lnet.ln_mt_state != LNET_MT_STATE_RUNNING)
785                 return -1;
786
787         LASSERT(msg->msg_tx_committed || msg->msg_rx_committed);
788
789         /*
790          * if we're sending to the LOLND then the msg_txpeer will not be
791          * set. So no need to sanity check it.
792          */
793         if (msg->msg_tx_committed && msg->msg_txni->ni_nid != LNET_NID_LO_0)
794                 LASSERT(msg->msg_txpeer);
795         else if (msg->msg_tx_committed &&
796                  msg->msg_txni->ni_nid == LNET_NID_LO_0)
797                 lo = true;
798
799         if (hstatus != LNET_MSG_STATUS_OK &&
800             ktime_compare(ktime_get(), msg->msg_deadline) >= 0)
801                 return -1;
802
803         /*
804          * always prefer txni/txpeer if they message is committed for both
805          * directions.
806          */
807         if (msg->msg_tx_committed) {
808                 ni = msg->msg_txni;
809                 lpni = msg->msg_txpeer;
810                 attempt_local_resend = attempt_remote_resend = true;
811         } else {
812                 ni = msg->msg_rxni;
813                 lpni = msg->msg_rxpeer;
814                 attempt_local_resend = attempt_remote_resend = false;
815         }
816
817         /* Don't further decrement the health value if a recovery message
818          * failed.
819          */
820         if (msg->msg_recovery)
821                 handle_local_health = handle_remote_health = false;
822         else
823                 handle_local_health = handle_remote_health = true;
824
825         /* For local failures, health/recovery/resends are not needed if I only
826          * have a single (non-lolnd) interface. NB: pb_nnis includes the lolnd
827          * interface, so a single-rail node would have pb_nnis == 2.
828          */
829         if (the_lnet.ln_ping_target->pb_nnis <= 2) {
830                 handle_local_health = false;
831                 attempt_local_resend = false;
832         }
833
834         /* For remote failures, health/recovery/resends are not needed if the
835          * peer only has a single interface. Special case for routers where we
836          * rely on health feature to manage route aliveness. NB: unlike pb_nnis
837          * above, lp_nnis does _not_ include the lolnd, so a single-rail node
838          * would have lp_nnis == 1.
839          */
840         if (lpni && lpni->lpni_peer_net->lpn_peer->lp_nnis <= 1) {
841                 attempt_remote_resend = false;
842                 if (!lnet_isrouter(lpni))
843                         handle_remote_health = false;
844         }
845
846         if (!lo)
847                 LASSERT(ni && lpni);
848         else
849                 LASSERT(ni);
850
851         CDEBUG(D_NET, "health check: %s->%s: %s: %s\n",
852                libcfs_nid2str(ni->ni_nid),
853                (lo) ? "self" : libcfs_nid2str(lpni->lpni_nid),
854                lnet_msgtyp2str(msg->msg_type),
855                lnet_health_error2str(hstatus));
856
857         /*
858          * stats are only incremented for errors so avoid wasting time
859          * incrementing statistics if there is no error.
860          */
861         if (hstatus != LNET_MSG_STATUS_OK) {
862                 lnet_net_lock(0);
863                 lnet_incr_hstats(ni, lpni, hstatus);
864                 lnet_net_unlock(0);
865         }
866
867         switch (hstatus) {
868         case LNET_MSG_STATUS_OK:
869                 /*
870                  * increment the local ni health whether we successfully
871                  * received or sent a message on it.
872                  *
873                  * Ping counts are reset to 0 as appropriate to allow for
874                  * faster recovery.
875                  */
876                 lnet_inc_healthv(&ni->ni_healthv, lnet_health_sensitivity);
877                 /*
878                  * It's possible msg_txpeer is NULL in the LOLND
879                  * case. Only increment the peer's health if we're
880                  * receiving a message from it. It's the only sure way to
881                  * know that a remote interface is up.
882                  * If this interface is part of a router, then take that
883                  * as indication that the router is fully healthy.
884                  */
885                 if (lpni && msg->msg_rx_committed) {
886                         lpni->lpni_ping_count = 0;
887                         /*
888                          * If we're receiving a message from the router or
889                          * I'm a router, then set that lpni's health to
890                          * maximum so we can commence communication
891                          */
892                         lnet_net_lock(0);
893                         if (lnet_isrouter(lpni) || the_lnet.ln_routing) {
894                                 lnet_set_lpni_healthv_locked(lpni,
895                                         LNET_MAX_HEALTH_VALUE);
896                         } else {
897                                 __u32 sensitivity = lpni->lpni_peer_net->
898                                         lpn_peer->lp_health_sensitivity;
899
900                                 lnet_inc_lpni_healthv_locked(lpni,
901                                         (sensitivity) ? sensitivity :
902                                         lnet_health_sensitivity);
903                                 /* This peer NI may have previously aged out
904                                  * of recovery. Now that we've received a
905                                  * message from it, we can continue recovery
906                                  * if its health value is still below the
907                                  * maximum.
908                                  */
909                                 lnet_peer_ni_add_to_recoveryq_locked(lpni,
910                                                 &the_lnet.ln_mt_peerNIRecovq,
911                                                 ktime_get_seconds());
912                         }
913                         lnet_net_unlock(0);
914                 }
915
916                 /* we can finalize this message */
917                 return -1;
918         case LNET_MSG_STATUS_LOCAL_INTERRUPT:
919         case LNET_MSG_STATUS_LOCAL_DROPPED:
920         case LNET_MSG_STATUS_LOCAL_ABORTED:
921         case LNET_MSG_STATUS_LOCAL_NO_ROUTE:
922         case LNET_MSG_STATUS_LOCAL_TIMEOUT:
923                 if (handle_local_health)
924                         lnet_handle_local_failure(ni);
925                 if (attempt_local_resend)
926                         return lnet_attempt_msg_resend(msg);
927                 break;
928         case LNET_MSG_STATUS_LOCAL_ERROR:
929                 if (handle_local_health)
930                         lnet_handle_local_failure(ni);
931                 return -1;
932         case LNET_MSG_STATUS_REMOTE_DROPPED:
933                 if (handle_remote_health)
934                         lnet_handle_remote_failure(lpni);
935                 if (attempt_remote_resend)
936                         return lnet_attempt_msg_resend(msg);
937                 break;
938         case LNET_MSG_STATUS_REMOTE_ERROR:
939         case LNET_MSG_STATUS_REMOTE_TIMEOUT:
940                 if (handle_remote_health)
941                         lnet_handle_remote_failure(lpni);
942                 return -1;
943         case LNET_MSG_STATUS_NETWORK_TIMEOUT:
944                 if (handle_remote_health)
945                         lnet_handle_remote_failure(lpni);
946                 if (handle_local_health)
947                         lnet_handle_local_failure(ni);
948                 return -1;
949         default:
950                 LBUG();
951         }
952
953         /* no resend is needed */
954         return -1;
955 }
956
957 static void
958 lnet_msg_detach_md(struct lnet_msg *msg, int status)
959 {
960         struct lnet_libmd *md = msg->msg_md;
961         lnet_handler_t handler = NULL;
962         int cpt = lnet_cpt_of_cookie(md->md_lh.lh_cookie);
963         int unlink;
964
965         lnet_res_lock(cpt);
966         while (md->md_flags & LNET_MD_FLAG_HANDLING)
967                 /* An event handler is running - wait for it to
968                  * complete to avoid races.
969                  */
970                 lnet_md_wait_handling(md, cpt);
971
972         /* Now it's safe to drop my caller's ref */
973         md->md_refcount--;
974         LASSERT(md->md_refcount >= 0);
975
976         unlink = lnet_md_unlinkable(md);
977         if (md->md_handler) {
978                 if ((md->md_flags & LNET_MD_FLAG_ABORTED) && !status) {
979                         msg->msg_ev.status   = -ETIMEDOUT;
980                         CDEBUG(D_NET, "md 0x%p already unlinked\n", md);
981                 } else {
982                         msg->msg_ev.status   = status;
983                 }
984                 msg->msg_ev.unlinked = unlink;
985                 handler = md->md_handler;
986                 if (!unlink)
987                         md->md_flags |= LNET_MD_FLAG_HANDLING;
988         }
989
990         if (unlink || (md->md_refcount == 0 &&
991                        md->md_threshold == LNET_MD_THRESH_INF))
992                 lnet_detach_rsp_tracker(md, cpt);
993
994         msg->msg_md = NULL;
995         if (unlink)
996                 lnet_md_unlink(md);
997
998         lnet_res_unlock(cpt);
999
1000         if (handler) {
1001                 handler(&msg->msg_ev);
1002                 if (!unlink) {
1003                         lnet_res_lock(cpt);
1004                         md->md_flags &= ~LNET_MD_FLAG_HANDLING;
1005                         wake_up_var(md);
1006                         lnet_res_unlock(cpt);
1007                 }
1008         }
1009 }
1010
1011 static bool
1012 lnet_is_health_check(struct lnet_msg *msg)
1013 {
1014         bool hc = true;
1015         int status = msg->msg_ev.status;
1016
1017         if ((!msg->msg_tx_committed && !msg->msg_rx_committed) ||
1018             !msg->msg_onactivelist) {
1019                 CDEBUG(D_NET, "msg %p not committed for send or receive\n",
1020                        msg);
1021                 return false;
1022         }
1023
1024         if ((msg->msg_tx_committed && !msg->msg_txpeer) ||
1025             (msg->msg_rx_committed && !msg->msg_rxpeer)) {
1026                 /* The optimized GET case does not set msg_rxpeer, but status
1027                  * could be zero. Only print the error message if we have a
1028                  * non-zero status.
1029                  */
1030                 if (status)
1031                         CDEBUG(D_NET, "msg %p status %d cannot retry\n", msg,
1032                                status);
1033                 return false;
1034         }
1035
1036         /* Check for status inconsistencies */
1037         if ((!status && msg->msg_health_status != LNET_MSG_STATUS_OK) ||
1038              (status && msg->msg_health_status == LNET_MSG_STATUS_OK)) {
1039                 CDEBUG(D_NET, "Msg %p is in inconsistent state, don't perform health "
1040                        "checking (%d, %d)\n", msg, status,
1041                        msg->msg_health_status);
1042                 hc = false;
1043         }
1044
1045         CDEBUG(D_NET, "health check = %d, status = %d, hstatus = %d\n",
1046                hc, status, msg->msg_health_status);
1047
1048         return hc;
1049 }
1050
1051 char *
1052 lnet_health_error2str(enum lnet_msg_hstatus hstatus)
1053 {
1054         switch (hstatus) {
1055         case LNET_MSG_STATUS_LOCAL_INTERRUPT:
1056                 return "LOCAL_INTERRUPT";
1057         case LNET_MSG_STATUS_LOCAL_DROPPED:
1058                 return "LOCAL_DROPPED";
1059         case LNET_MSG_STATUS_LOCAL_ABORTED:
1060                 return "LOCAL_ABORTED";
1061         case LNET_MSG_STATUS_LOCAL_NO_ROUTE:
1062                 return "LOCAL_NO_ROUTE";
1063         case LNET_MSG_STATUS_LOCAL_TIMEOUT:
1064                 return "LOCAL_TIMEOUT";
1065         case LNET_MSG_STATUS_LOCAL_ERROR:
1066                 return "LOCAL_ERROR";
1067         case LNET_MSG_STATUS_REMOTE_DROPPED:
1068                 return "REMOTE_DROPPED";
1069         case LNET_MSG_STATUS_REMOTE_ERROR:
1070                 return "REMOTE_ERROR";
1071         case LNET_MSG_STATUS_REMOTE_TIMEOUT:
1072                 return "REMOTE_TIMEOUT";
1073         case LNET_MSG_STATUS_NETWORK_TIMEOUT:
1074                 return "NETWORK_TIMEOUT";
1075         case LNET_MSG_STATUS_OK:
1076                 return "OK";
1077         default:
1078                 return "<UNKNOWN>";
1079         }
1080 }
1081
1082 bool
1083 lnet_send_error_simulation(struct lnet_msg *msg,
1084                            enum lnet_msg_hstatus *hstatus)
1085 {
1086         if (!msg)
1087                 return false;
1088
1089         if (list_empty(&the_lnet.ln_drop_rules))
1090             return false;
1091
1092         /* match only health rules */
1093         if (!lnet_drop_rule_match(&msg->msg_hdr, LNET_NID_ANY,
1094                                   hstatus))
1095                 return false;
1096
1097         CDEBUG(D_NET, "src %s(%s)->dst %s: %s simulate health error: %s\n",
1098                 libcfs_nid2str(msg->msg_hdr.src_nid),
1099                 libcfs_nid2str(msg->msg_txni->ni_nid),
1100                 libcfs_nid2str(msg->msg_hdr.dest_nid),
1101                 lnet_msgtyp2str(msg->msg_type),
1102                 lnet_health_error2str(*hstatus));
1103
1104         return true;
1105 }
1106 EXPORT_SYMBOL(lnet_send_error_simulation);
1107
1108 void
1109 lnet_finalize(struct lnet_msg *msg, int status)
1110 {
1111         struct lnet_msg_container *container;
1112         int my_slot;
1113         int cpt;
1114         int rc;
1115
1116         LASSERT(!in_interrupt());
1117
1118         if (msg == NULL)
1119                 return;
1120
1121         msg->msg_ev.status = status;
1122
1123         if (lnet_is_health_check(msg)) {
1124                 /*
1125                  * Check the health status of the message. If it has one
1126                  * of the errors that we're supposed to handle, and it has
1127                  * not timed out, then
1128                  *      1. Decrement the appropriate health_value
1129                  *      2. queue the message on the resend queue
1130
1131                  * if the message send is success, timed out or failed in the
1132                  * health check for any reason then we'll just finalize the
1133                  * message. Otherwise just return since the message has been
1134                  * put on the resend queue.
1135                  */
1136                 if (!lnet_health_check(msg))
1137                         return;
1138         }
1139
1140         /*
1141          * We're not going to resend this message so detach its MD and invoke
1142          * the appropriate callbacks
1143          */
1144         if (msg->msg_md != NULL)
1145                 lnet_msg_detach_md(msg, status);
1146
1147 again:
1148         if (!msg->msg_tx_committed && !msg->msg_rx_committed) {
1149                 /* not committed to network yet */
1150                 LASSERT(!msg->msg_onactivelist);
1151                 lnet_msg_free(msg);
1152                 return;
1153         }
1154
1155         /*
1156          * NB: routed message can be committed for both receiving and sending,
1157          * we should finalize in LIFO order and keep counters correct.
1158          * (finalize sending first then finalize receiving)
1159          */
1160         cpt = msg->msg_tx_committed ? msg->msg_tx_cpt : msg->msg_rx_cpt;
1161         lnet_net_lock(cpt);
1162
1163         container = the_lnet.ln_msg_containers[cpt];
1164
1165         /* Recursion breaker.  Don't complete the message here if I am (or
1166          * enough other threads are) already completing messages */
1167         my_slot = lnet_check_finalize_recursion_locked(msg,
1168                                                 &container->msc_finalizing,
1169                                                 container->msc_nfinalizers,
1170                                                 container->msc_finalizers);
1171
1172         /* enough threads are resending */
1173         if (my_slot == -1) {
1174                 lnet_net_unlock(cpt);
1175                 return;
1176         }
1177
1178         rc = 0;
1179         while (!list_empty(&container->msc_finalizing)) {
1180                 msg = list_entry(container->msc_finalizing.next,
1181                                  struct lnet_msg, msg_list);
1182
1183                 list_del_init(&msg->msg_list);
1184
1185                 /* NB drops and regains the lnet lock if it actually does
1186                  * anything, so my finalizing friends can chomp along too */
1187                 rc = lnet_complete_msg_locked(msg, cpt);
1188                 if (rc != 0)
1189                         break;
1190         }
1191
1192         if (unlikely(!list_empty(&the_lnet.ln_delay_rules))) {
1193                 lnet_net_unlock(cpt);
1194                 lnet_delay_rule_check();
1195                 lnet_net_lock(cpt);
1196         }
1197
1198         container->msc_finalizers[my_slot] = NULL;
1199         lnet_net_unlock(cpt);
1200
1201         if (rc != 0)
1202                 goto again;
1203 }
1204 EXPORT_SYMBOL(lnet_finalize);
1205
1206 void
1207 lnet_msg_container_cleanup(struct lnet_msg_container *container)
1208 {
1209         int     count = 0;
1210
1211         if (container->msc_init == 0)
1212                 return;
1213
1214         while (!list_empty(&container->msc_active)) {
1215                 struct lnet_msg *msg;
1216
1217                 msg  = list_entry(container->msc_active.next,
1218                                   struct lnet_msg, msg_activelist);
1219                 LASSERT(msg->msg_onactivelist);
1220                 msg->msg_onactivelist = 0;
1221                 list_del_init(&msg->msg_activelist);
1222                 lnet_msg_free(msg);
1223                 count++;
1224         }
1225
1226         if (count > 0)
1227                 CERROR("%d active msg on exit\n", count);
1228
1229         if (container->msc_finalizers != NULL) {
1230                 CFS_FREE_PTR_ARRAY(container->msc_finalizers,
1231                                    container->msc_nfinalizers);
1232                 container->msc_finalizers = NULL;
1233         }
1234
1235         if (container->msc_resenders != NULL) {
1236                 CFS_FREE_PTR_ARRAY(container->msc_resenders,
1237                                    container->msc_nfinalizers);
1238                 container->msc_resenders = NULL;
1239         }
1240         container->msc_init = 0;
1241 }
1242
1243 int
1244 lnet_msg_container_setup(struct lnet_msg_container *container, int cpt)
1245 {
1246         int rc = 0;
1247
1248         container->msc_init = 1;
1249
1250         INIT_LIST_HEAD(&container->msc_active);
1251         INIT_LIST_HEAD(&container->msc_finalizing);
1252         INIT_LIST_HEAD(&container->msc_resending);
1253
1254         /* number of CPUs */
1255         container->msc_nfinalizers = cfs_cpt_weight(lnet_cpt_table(), cpt);
1256         if (container->msc_nfinalizers == 0)
1257                 container->msc_nfinalizers = 1;
1258
1259         LIBCFS_CPT_ALLOC(container->msc_finalizers, lnet_cpt_table(), cpt,
1260                          container->msc_nfinalizers *
1261                          sizeof(*container->msc_finalizers));
1262
1263         if (container->msc_finalizers == NULL) {
1264                 CERROR("Failed to allocate message finalizers\n");
1265                 lnet_msg_container_cleanup(container);
1266                 return -ENOMEM;
1267         }
1268
1269         LIBCFS_CPT_ALLOC(container->msc_resenders, lnet_cpt_table(), cpt,
1270                          container->msc_nfinalizers *
1271                          sizeof(*container->msc_resenders));
1272
1273         if (container->msc_resenders == NULL) {
1274                 CERROR("Failed to allocate message resenders\n");
1275                 lnet_msg_container_cleanup(container);
1276                 return -ENOMEM;
1277         }
1278
1279         return rc;
1280 }
1281
1282 void
1283 lnet_msg_containers_destroy(void)
1284 {
1285         struct lnet_msg_container *container;
1286         int     i;
1287
1288         if (the_lnet.ln_msg_containers == NULL)
1289                 return;
1290
1291         cfs_percpt_for_each(container, i, the_lnet.ln_msg_containers)
1292                 lnet_msg_container_cleanup(container);
1293
1294         cfs_percpt_free(the_lnet.ln_msg_containers);
1295         the_lnet.ln_msg_containers = NULL;
1296 }
1297
1298 int
1299 lnet_msg_containers_create(void)
1300 {
1301         struct lnet_msg_container *container;
1302         int     rc;
1303         int     i;
1304
1305         the_lnet.ln_msg_containers = cfs_percpt_alloc(lnet_cpt_table(),
1306                                                       sizeof(*container));
1307
1308         if (the_lnet.ln_msg_containers == NULL) {
1309                 CERROR("Failed to allocate cpu-partition data for network\n");
1310                 return -ENOMEM;
1311         }
1312
1313         cfs_percpt_for_each(container, i, the_lnet.ln_msg_containers) {
1314                 rc = lnet_msg_container_setup(container, i);
1315                 if (rc != 0) {
1316                         lnet_msg_containers_destroy();
1317                         return rc;
1318                 }
1319         }
1320
1321         return 0;
1322 }