Whamcloud - gitweb
69a85be4977d73ca57da9ed96ea6f4ed256888da
[fs/lustre-release.git] / lnet / lnet / lib-msg.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * lnet/lnet/lib-msg.c
32  *
33  * Message decoding, parsing and finalizing routines
34  */
35
36 #define DEBUG_SUBSYSTEM S_LNET
37
38 #include <lnet/lib-lnet.h>
39
40 void
41 lnet_build_unlink_event(struct lnet_libmd *md, struct lnet_event *ev)
42 {
43         ENTRY;
44
45         memset(ev, 0, sizeof(*ev));
46
47         ev->status   = 0;
48         ev->unlinked = 1;
49         ev->type     = LNET_EVENT_UNLINK;
50         lnet_md_deconstruct(md, ev);
51         lnet_md2handle(&ev->md_handle, md);
52         EXIT;
53 }
54
55 /*
56  * Don't need any lock, must be called after lnet_commit_md
57  */
58 void
59 lnet_build_msg_event(struct lnet_msg *msg, enum lnet_event_kind ev_type)
60 {
61         struct lnet_hdr *hdr = &msg->msg_hdr;
62         struct lnet_event *ev = &msg->msg_ev;
63
64         LASSERT(!msg->msg_routing);
65
66         ev->type = ev_type;
67         ev->msg_type = msg->msg_type;
68
69         if (ev_type == LNET_EVENT_SEND) {
70                 /* event for active message */
71                 ev->target.nid    = hdr->dest_nid;
72                 ev->target.pid    = hdr->dest_pid;
73                 ev->initiator.nid = LNET_ANY_NID;
74                 ev->initiator.pid = the_lnet.ln_pid;
75                 ev->source.nid    = LNET_ANY_NID;
76                 ev->source.pid    = the_lnet.ln_pid;
77                 ev->sender        = LNET_ANY_NID;
78         } else {
79                 /* event for passive message */
80                 ev->target.pid    = hdr->dest_pid;
81                 ev->target.nid    = hdr->dest_nid;
82                 ev->initiator.pid = hdr->src_pid;
83                 /* Multi-Rail: resolve src_nid to "primary" peer NID */
84                 ev->initiator.nid = msg->msg_initiator;
85                 /* Multi-Rail: track source NID. */
86                 ev->source.pid    = hdr->src_pid;
87                 ev->source.nid    = hdr->src_nid;
88                 ev->rlength       = hdr->payload_length;
89                 ev->sender        = msg->msg_from;
90                 ev->mlength       = msg->msg_wanted;
91                 ev->offset        = msg->msg_offset;
92         }
93
94         switch (ev_type) {
95         default:
96                 LBUG();
97
98         case LNET_EVENT_PUT: /* passive PUT */
99                 ev->pt_index   = hdr->msg.put.ptl_index;
100                 ev->match_bits = hdr->msg.put.match_bits;
101                 ev->hdr_data   = hdr->msg.put.hdr_data;
102                 return;
103
104         case LNET_EVENT_GET: /* passive GET */
105                 ev->pt_index   = hdr->msg.get.ptl_index;
106                 ev->match_bits = hdr->msg.get.match_bits;
107                 ev->hdr_data   = 0;
108                 return;
109
110         case LNET_EVENT_ACK: /* ACK */
111                 ev->match_bits = hdr->msg.ack.match_bits;
112                 ev->mlength    = hdr->msg.ack.mlength;
113                 return;
114
115         case LNET_EVENT_REPLY: /* REPLY */
116                 return;
117
118         case LNET_EVENT_SEND: /* active message */
119                 if (msg->msg_type == LNET_MSG_PUT) {
120                         ev->pt_index   = le32_to_cpu(hdr->msg.put.ptl_index);
121                         ev->match_bits = le64_to_cpu(hdr->msg.put.match_bits);
122                         ev->offset     = le32_to_cpu(hdr->msg.put.offset);
123                         ev->mlength    =
124                         ev->rlength    = le32_to_cpu(hdr->payload_length);
125                         ev->hdr_data   = le64_to_cpu(hdr->msg.put.hdr_data);
126
127                 } else {
128                         LASSERT(msg->msg_type == LNET_MSG_GET);
129                         ev->pt_index   = le32_to_cpu(hdr->msg.get.ptl_index);
130                         ev->match_bits = le64_to_cpu(hdr->msg.get.match_bits);
131                         ev->mlength    =
132                         ev->rlength    = le32_to_cpu(hdr->msg.get.sink_length);
133                         ev->offset     = le32_to_cpu(hdr->msg.get.src_offset);
134                         ev->hdr_data   = 0;
135                 }
136                 return;
137         }
138 }
139
140 void
141 lnet_msg_commit(struct lnet_msg *msg, int cpt)
142 {
143         struct lnet_msg_container *container = the_lnet.ln_msg_containers[cpt];
144         struct lnet_counters_common *common;
145         s64 timeout_ns;
146
147         /* set the message deadline */
148         timeout_ns = lnet_transaction_timeout * NSEC_PER_SEC;
149         msg->msg_deadline = ktime_add_ns(ktime_get(), timeout_ns);
150
151         /* routed message can be committed for both receiving and sending */
152         LASSERT(!msg->msg_tx_committed);
153
154         if (msg->msg_sending) {
155                 LASSERT(!msg->msg_receiving);
156                 msg->msg_tx_cpt = cpt;
157                 msg->msg_tx_committed = 1;
158                 if (msg->msg_rx_committed) { /* routed message REPLY */
159                         LASSERT(msg->msg_onactivelist);
160                         return;
161                 }
162         } else {
163                 LASSERT(!msg->msg_sending);
164                 msg->msg_rx_cpt = cpt;
165                 msg->msg_rx_committed = 1;
166         }
167
168         LASSERT(!msg->msg_onactivelist);
169
170         msg->msg_onactivelist = 1;
171         list_add_tail(&msg->msg_activelist, &container->msc_active);
172
173         common = &the_lnet.ln_counters[cpt]->lct_common;
174         common->lcc_msgs_alloc++;
175         if (common->lcc_msgs_alloc > common->lcc_msgs_max)
176                 common->lcc_msgs_max = common->lcc_msgs_alloc;
177 }
178
179 static void
180 lnet_msg_decommit_tx(struct lnet_msg *msg, int status)
181 {
182         struct lnet_counters_common *common;
183         struct lnet_event *ev = &msg->msg_ev;
184
185         LASSERT(msg->msg_tx_committed);
186         if (status != 0)
187                 goto out;
188
189         common = &(the_lnet.ln_counters[msg->msg_tx_cpt]->lct_common);
190         switch (ev->type) {
191         default: /* routed message */
192                 LASSERT(msg->msg_routing);
193                 LASSERT(msg->msg_rx_committed);
194                 LASSERT(ev->type == 0);
195
196                 common->lcc_route_length += msg->msg_len;
197                 common->lcc_route_count++;
198                 goto incr_stats;
199
200         case LNET_EVENT_PUT:
201                 /* should have been decommitted */
202                 LASSERT(!msg->msg_rx_committed);
203                 /* overwritten while sending ACK */
204                 LASSERT(msg->msg_type == LNET_MSG_ACK);
205                 msg->msg_type = LNET_MSG_PUT; /* fix type */
206                 break;
207
208         case LNET_EVENT_SEND:
209                 LASSERT(!msg->msg_rx_committed);
210                 if (msg->msg_type == LNET_MSG_PUT)
211                         common->lcc_send_length += msg->msg_len;
212                 break;
213
214         case LNET_EVENT_GET:
215                 LASSERT(msg->msg_rx_committed);
216                 /* overwritten while sending reply, we should never be
217                  * here for optimized GET */
218                 LASSERT(msg->msg_type == LNET_MSG_REPLY);
219                 msg->msg_type = LNET_MSG_GET; /* fix type */
220                 break;
221         }
222
223         common->lcc_send_count++;
224
225 incr_stats:
226         if (msg->msg_txpeer)
227                 lnet_incr_stats(&msg->msg_txpeer->lpni_stats,
228                                 msg->msg_type,
229                                 LNET_STATS_TYPE_SEND);
230         if (msg->msg_txni)
231                 lnet_incr_stats(&msg->msg_txni->ni_stats,
232                                 msg->msg_type,
233                                 LNET_STATS_TYPE_SEND);
234  out:
235         lnet_return_tx_credits_locked(msg);
236         msg->msg_tx_committed = 0;
237 }
238
239 static void
240 lnet_msg_decommit_rx(struct lnet_msg *msg, int status)
241 {
242         struct lnet_counters_common *common;
243         struct lnet_event *ev = &msg->msg_ev;
244
245         LASSERT(!msg->msg_tx_committed); /* decommitted or never committed */
246         LASSERT(msg->msg_rx_committed);
247
248         if (status != 0)
249                 goto out;
250
251         common = &(the_lnet.ln_counters[msg->msg_rx_cpt]->lct_common);
252         switch (ev->type) {
253         default:
254                 LASSERT(ev->type == 0);
255                 LASSERT(msg->msg_routing);
256                 goto incr_stats;
257
258         case LNET_EVENT_ACK:
259                 LASSERT(msg->msg_type == LNET_MSG_ACK);
260                 break;
261
262         case LNET_EVENT_GET:
263                 /* type is "REPLY" if it's an optimized GET on passive side,
264                  * because optimized GET will never be committed for sending,
265                  * so message type wouldn't be changed back to "GET" by
266                  * lnet_msg_decommit_tx(), see details in lnet_parse_get() */
267                 LASSERT(msg->msg_type == LNET_MSG_REPLY ||
268                         msg->msg_type == LNET_MSG_GET);
269                 common->lcc_send_length += msg->msg_wanted;
270                 break;
271
272         case LNET_EVENT_PUT:
273                 LASSERT(msg->msg_type == LNET_MSG_PUT);
274                 break;
275
276         case LNET_EVENT_REPLY:
277                 /* type is "GET" if it's an optimized GET on active side,
278                  * see details in lnet_create_reply_msg() */
279                 LASSERT(msg->msg_type == LNET_MSG_GET ||
280                         msg->msg_type == LNET_MSG_REPLY);
281                 break;
282         }
283
284         common->lcc_recv_count++;
285
286 incr_stats:
287         if (msg->msg_rxpeer)
288                 lnet_incr_stats(&msg->msg_rxpeer->lpni_stats,
289                                 msg->msg_type,
290                                 LNET_STATS_TYPE_RECV);
291         if (msg->msg_rxni)
292                 lnet_incr_stats(&msg->msg_rxni->ni_stats,
293                                 msg->msg_type,
294                                 LNET_STATS_TYPE_RECV);
295         if (ev->type == LNET_EVENT_PUT || ev->type == LNET_EVENT_REPLY)
296                 common->lcc_recv_length += msg->msg_wanted;
297
298  out:
299         lnet_return_rx_credits_locked(msg);
300         msg->msg_rx_committed = 0;
301 }
302
303 void
304 lnet_msg_decommit(struct lnet_msg *msg, int cpt, int status)
305 {
306         int     cpt2 = cpt;
307
308         LASSERT(msg->msg_tx_committed || msg->msg_rx_committed);
309         LASSERT(msg->msg_onactivelist);
310
311         if (msg->msg_tx_committed) { /* always decommit for sending first */
312                 LASSERT(cpt == msg->msg_tx_cpt);
313                 lnet_msg_decommit_tx(msg, status);
314         }
315
316         if (msg->msg_rx_committed) {
317                 /* forwarding msg committed for both receiving and sending */
318                 if (cpt != msg->msg_rx_cpt) {
319                         lnet_net_unlock(cpt);
320                         cpt2 = msg->msg_rx_cpt;
321                         lnet_net_lock(cpt2);
322                 }
323                 lnet_msg_decommit_rx(msg, status);
324         }
325
326         list_del(&msg->msg_activelist);
327         msg->msg_onactivelist = 0;
328
329         the_lnet.ln_counters[cpt2]->lct_common.lcc_msgs_alloc--;
330
331         if (cpt2 != cpt) {
332                 lnet_net_unlock(cpt2);
333                 lnet_net_lock(cpt);
334         }
335 }
336
337 void
338 lnet_msg_attach_md(struct lnet_msg *msg, struct lnet_libmd *md,
339                    unsigned int offset, unsigned int mlen)
340 {
341         /* NB: @offset and @len are only useful for receiving */
342         /* Here, we attach the MD on lnet_msg and mark it busy and
343          * decrementing its threshold. Come what may, the lnet_msg "owns"
344          * the MD until a call to lnet_msg_detach_md or lnet_finalize()
345          * signals completion. */
346         LASSERT(!msg->msg_routing);
347
348         msg->msg_md = md;
349         if (msg->msg_receiving) { /* committed for receiving */
350                 msg->msg_offset = offset;
351                 msg->msg_wanted = mlen;
352         }
353
354         md->md_refcount++;
355         if (md->md_threshold != LNET_MD_THRESH_INF) {
356                 LASSERT(md->md_threshold > 0);
357                 md->md_threshold--;
358         }
359
360         /* build umd in event */
361         lnet_md2handle(&msg->msg_ev.md_handle, md);
362         lnet_md_deconstruct(md, &msg->msg_ev);
363 }
364
365 static int
366 lnet_complete_msg_locked(struct lnet_msg *msg, int cpt)
367 {
368         struct lnet_handle_wire ack_wmd;
369         int                rc;
370         int                status = msg->msg_ev.status;
371
372         LASSERT(msg->msg_onactivelist);
373
374         if (status == 0 && msg->msg_ack) {
375                 /* Only send an ACK if the PUT completed successfully */
376
377                 lnet_msg_decommit(msg, cpt, 0);
378
379                 msg->msg_ack = 0;
380                 lnet_net_unlock(cpt);
381
382                 LASSERT(msg->msg_ev.type == LNET_EVENT_PUT);
383                 LASSERT(!msg->msg_routing);
384
385                 ack_wmd = msg->msg_hdr.msg.put.ack_wmd;
386
387                 lnet_prep_send(msg, LNET_MSG_ACK, &msg->msg_ev.source, 0, 0);
388
389                 msg->msg_hdr.msg.ack.dst_wmd = ack_wmd;
390                 msg->msg_hdr.msg.ack.match_bits = msg->msg_ev.match_bits;
391                 msg->msg_hdr.msg.ack.mlength = cpu_to_le32(msg->msg_ev.mlength);
392
393                 rc = lnet_send(&msg->msg_ev.target.nid, msg,
394                                &msg->msg_from);
395
396                 lnet_net_lock(cpt);
397                 /*
398                  * NB: message is committed for sending, we should return
399                  * on success because LND will finalize this message later.
400                  *
401                  * Also, there is possibility that message is committed for
402                  * sending and also failed before delivering to LND,
403                  * i.e: ENOMEM, in that case we can't fall through either
404                  * because CPT for sending can be different with CPT for
405                  * receiving, so we should return back to lnet_finalize()
406                  * to make sure we are locking the correct partition.
407                  */
408                 return rc;
409
410         } else if (status == 0 &&       /* OK so far */
411                    (msg->msg_routing && !msg->msg_sending)) {
412                 /* not forwarded */
413                 LASSERT(!msg->msg_receiving);   /* called back recv already */
414                 lnet_net_unlock(cpt);
415
416                 rc = lnet_send(NULL, msg, NULL);
417
418                 lnet_net_lock(cpt);
419                 /*
420                  * NB: message is committed for sending, we should return
421                  * on success because LND will finalize this message later.
422                  *
423                  * Also, there is possibility that message is committed for
424                  * sending and also failed before delivering to LND,
425                  * i.e: ENOMEM, in that case we can't fall through either:
426                  * - The rule is message must decommit for sending first if
427                  *   the it's committed for both sending and receiving
428                  * - CPT for sending can be different with CPT for receiving,
429                  *   so we should return back to lnet_finalize() to make
430                  *   sure we are locking the correct partition.
431                  */
432                 return rc;
433         }
434
435         lnet_msg_decommit(msg, cpt, status);
436         lnet_msg_free(msg);
437         return 0;
438 }
439
440 static void
441 lnet_dec_healthv_locked(atomic_t *healthv, int sensitivity)
442 {
443         int h = atomic_read(healthv);
444
445         if (h < sensitivity) {
446                 atomic_set(healthv, 0);
447         } else {
448                 h -= sensitivity;
449                 atomic_set(healthv, h);
450         }
451 }
452
453 /* must hold net_lock/0 */
454 void
455 lnet_ni_add_to_recoveryq_locked(struct lnet_ni *ni,
456                                 struct list_head *recovery_queue, time64_t now)
457 {
458         if (!list_empty(&ni->ni_recovery))
459                 return;
460
461         if (atomic_read(&ni->ni_healthv) == LNET_MAX_HEALTH_VALUE)
462                 return;
463
464         /* This NI is going on the recovery queue, so take a ref on it */
465         lnet_ni_addref_locked(ni, 0);
466
467         lnet_ni_set_next_ping(ni, now);
468
469         CDEBUG(D_NET, "%s added to recovery queue. ping count: %u next ping: %lld health :%d\n",
470                libcfs_nidstr(&ni->ni_nid),
471                ni->ni_ping_count,
472                ni->ni_next_ping,
473                atomic_read(&ni->ni_healthv));
474
475         list_add_tail(&ni->ni_recovery, recovery_queue);
476 }
477
478 static void
479 lnet_handle_local_failure(struct lnet_ni *local_ni)
480 {
481         /*
482          * the lnet_net_lock(0) is used to protect the addref on the ni
483          * and the recovery queue.
484          */
485         lnet_net_lock(0);
486         /* the mt could've shutdown and cleaned up the queues */
487         if (the_lnet.ln_mt_state != LNET_MT_STATE_RUNNING) {
488                 lnet_net_unlock(0);
489                 return;
490         }
491
492         lnet_dec_healthv_locked(&local_ni->ni_healthv, lnet_health_sensitivity);
493         lnet_ni_add_to_recoveryq_locked(local_ni, &the_lnet.ln_mt_localNIRecovq,
494                                         ktime_get_seconds());
495         lnet_net_unlock(0);
496 }
497
498 /* must hold net_lock/0 */
499 void
500 lnet_handle_remote_failure_locked(struct lnet_peer_ni *lpni)
501 {
502         __u32 sensitivity = lnet_health_sensitivity;
503         __u32 lp_sensitivity;
504
505         /*
506          * If there is a health sensitivity in the peer then use that
507          * instead of the globally set one.
508          */
509         lp_sensitivity = lpni->lpni_peer_net->lpn_peer->lp_health_sensitivity;
510         if (lp_sensitivity)
511                 sensitivity = lp_sensitivity;
512
513         lnet_dec_healthv_locked(&lpni->lpni_healthv, sensitivity);
514
515         /* update the peer_net's health value */
516         lnet_update_peer_net_healthv(lpni);
517
518         /*
519          * add the peer NI to the recovery queue if it's not already there
520          * and it's health value is actually below the maximum. It's
521          * possible that the sensitivity might be set to 0, and the health
522          * value will not be reduced. In this case, there is no reason to
523          * invoke recovery
524          */
525         lnet_peer_ni_add_to_recoveryq_locked(lpni,
526                                              &the_lnet.ln_mt_peerNIRecovq,
527                                              ktime_get_seconds());
528 }
529
530 static void
531 lnet_handle_remote_failure(struct lnet_peer_ni *lpni)
532 {
533         /* lpni could be NULL if we're in the LOLND case */
534         if (!lpni)
535                 return;
536
537         lnet_net_lock(0);
538         /* the mt could've shutdown and cleaned up the queues */
539         if (the_lnet.ln_mt_state != LNET_MT_STATE_RUNNING) {
540                 lnet_net_unlock(0);
541                 return;
542         }
543         lnet_handle_remote_failure_locked(lpni);
544         lnet_net_unlock(0);
545 }
546
547 static void
548 lnet_incr_hstats(struct lnet_ni *ni, struct lnet_peer_ni *lpni,
549                  enum lnet_msg_hstatus hstatus)
550 {
551         struct lnet_counters_health *health;
552
553         health = &the_lnet.ln_counters[0]->lct_health;
554
555         switch (hstatus) {
556         case LNET_MSG_STATUS_LOCAL_INTERRUPT:
557                 atomic_inc(&ni->ni_hstats.hlt_local_interrupt);
558                 health->lch_local_interrupt_count++;
559                 break;
560         case LNET_MSG_STATUS_LOCAL_DROPPED:
561                 atomic_inc(&ni->ni_hstats.hlt_local_dropped);
562                 health->lch_local_dropped_count++;
563                 break;
564         case LNET_MSG_STATUS_LOCAL_ABORTED:
565                 atomic_inc(&ni->ni_hstats.hlt_local_aborted);
566                 health->lch_local_aborted_count++;
567                 break;
568         case LNET_MSG_STATUS_LOCAL_NO_ROUTE:
569                 atomic_inc(&ni->ni_hstats.hlt_local_no_route);
570                 health->lch_local_no_route_count++;
571                 break;
572         case LNET_MSG_STATUS_LOCAL_TIMEOUT:
573                 atomic_inc(&ni->ni_hstats.hlt_local_timeout);
574                 health->lch_local_timeout_count++;
575                 break;
576         case LNET_MSG_STATUS_LOCAL_ERROR:
577                 atomic_inc(&ni->ni_hstats.hlt_local_error);
578                 health->lch_local_error_count++;
579                 break;
580         case LNET_MSG_STATUS_REMOTE_DROPPED:
581                 if (lpni)
582                         atomic_inc(&lpni->lpni_hstats.hlt_remote_dropped);
583                 health->lch_remote_dropped_count++;
584                 break;
585         case LNET_MSG_STATUS_REMOTE_ERROR:
586                 if (lpni)
587                         atomic_inc(&lpni->lpni_hstats.hlt_remote_error);
588                 health->lch_remote_error_count++;
589                 break;
590         case LNET_MSG_STATUS_REMOTE_TIMEOUT:
591                 if (lpni)
592                         atomic_inc(&lpni->lpni_hstats.hlt_remote_timeout);
593                 health->lch_remote_timeout_count++;
594                 break;
595         case LNET_MSG_STATUS_NETWORK_TIMEOUT:
596                 if (lpni)
597                         atomic_inc(&lpni->lpni_hstats.hlt_network_timeout);
598                 health->lch_network_timeout_count++;
599                 break;
600         case LNET_MSG_STATUS_OK:
601                 break;
602         default:
603                 LBUG();
604         }
605 }
606
607 static void
608 lnet_resend_msg_locked(struct lnet_msg *msg)
609 {
610         msg->msg_retry_count++;
611
612         /*
613          * remove message from the active list and reset it to prepare
614          * for a resend. Two exceptions to this
615          *
616          * 1. the router case. When a message is being routed it is
617          * committed for rx when received and committed for tx when
618          * forwarded. We don't want to remove it from the active list, since
619          * code which handles receiving expects it to remain on the active
620          * list.
621          *
622          * 2. The REPLY case. Reply messages use the same message
623          * structure for the GET that was received.
624          */
625         if (!msg->msg_routing && msg->msg_type != LNET_MSG_REPLY) {
626                 list_del_init(&msg->msg_activelist);
627                 msg->msg_onactivelist = 0;
628         }
629         /*
630          * The msg_target.nid which was originally set
631          * when calling LNetGet() or LNetPut() might've
632          * been overwritten if we're routing this message.
633          * Call lnet_msg_decommit_tx() to return the credit
634          * this message consumed. The message will
635          * consume another credit when it gets resent.
636          */
637         msg->msg_target.nid = msg->msg_hdr.dest_nid;
638         lnet_msg_decommit_tx(msg, -EAGAIN);
639         msg->msg_sending = 0;
640         msg->msg_receiving = 0;
641         msg->msg_target_is_router = 0;
642
643         CDEBUG(D_NET, "%s->%s:%s:%s - queuing msg (%p) for resend\n",
644                libcfs_nidstr(&msg->msg_hdr.src_nid),
645                libcfs_nidstr(&msg->msg_hdr.dest_nid),
646                lnet_msgtyp2str(msg->msg_type),
647                lnet_health_error2str(msg->msg_health_status), msg);
648
649         list_add_tail(&msg->msg_list, the_lnet.ln_mt_resendqs[msg->msg_tx_cpt]);
650
651         complete(&the_lnet.ln_mt_wait_complete);
652 }
653
654 int
655 lnet_check_finalize_recursion_locked(struct lnet_msg *msg,
656                                      struct list_head *containerq,
657                                      int nworkers, void **workers)
658 {
659         int my_slot = -1;
660         int i;
661
662         list_add_tail(&msg->msg_list, containerq);
663
664         for (i = 0; i < nworkers; i++) {
665                 if (workers[i] == current)
666                         break;
667
668                 if (my_slot < 0 && workers[i] == NULL)
669                         my_slot = i;
670         }
671
672         if (i < nworkers || my_slot < 0)
673                 return -1;
674
675         workers[my_slot] = current;
676
677         return my_slot;
678 }
679
680 int
681 lnet_attempt_msg_resend(struct lnet_msg *msg)
682 {
683         struct lnet_msg_container *container;
684         int my_slot;
685         int cpt;
686
687         /* we can only resend tx_committed messages */
688         LASSERT(msg->msg_tx_committed);
689
690         /* don't resend recovery messages */
691         if (msg->msg_recovery) {
692                 CDEBUG(D_NET, "msg %s->%s is a recovery ping. retry# %d\n",
693                         libcfs_nidstr(&msg->msg_from),
694                         libcfs_nidstr(&msg->msg_target.nid),
695                         msg->msg_retry_count);
696                 return -ENOTRECOVERABLE;
697         }
698
699         /*
700          * if we explicitly indicated we don't want to resend then just
701          * return
702          */
703         if (msg->msg_no_resend) {
704                 CDEBUG(D_NET, "msg %s->%s requested no resend. retry# %d\n",
705                         libcfs_nidstr(&msg->msg_from),
706                         libcfs_nidstr(&msg->msg_target.nid),
707                         msg->msg_retry_count);
708                 return -ENOTRECOVERABLE;
709         }
710
711         /* check if the message has exceeded the number of retries */
712         if (msg->msg_retry_count >= lnet_retry_count) {
713                 CNETERR("msg %s->%s exceeded retry count %d\n",
714                         libcfs_nidstr(&msg->msg_from),
715                         libcfs_nidstr(&msg->msg_target.nid),
716                         msg->msg_retry_count);
717                 return -ENOTRECOVERABLE;
718         }
719
720         cpt = msg->msg_tx_cpt;
721         lnet_net_lock(cpt);
722
723         /* check again under lock */
724         if (the_lnet.ln_mt_state != LNET_MT_STATE_RUNNING) {
725                 lnet_net_unlock(cpt);
726                 return -ESHUTDOWN;
727         }
728
729         container = the_lnet.ln_msg_containers[cpt];
730         my_slot =
731                 lnet_check_finalize_recursion_locked(msg,
732                                         &container->msc_resending,
733                                         container->msc_nfinalizers,
734                                         container->msc_resenders);
735
736         /* enough threads are resending */
737         if (my_slot == -1) {
738                 lnet_net_unlock(cpt);
739                 return 0;
740         }
741
742         while ((msg = list_first_entry_or_null(&container->msc_resending,
743                                                struct lnet_msg,
744                                                msg_list)) != NULL) {
745                 list_del(&msg->msg_list);
746
747                 /*
748                  * resending the message will require us to call
749                  * lnet_msg_decommit_tx() which will return the credit
750                  * which this message holds. This could trigger another
751                  * queued message to be sent. If that message fails and
752                  * requires a resend we will recurse.
753                  * But since at this point the slot is taken, the message
754                  * will be queued in the container and dealt with
755                  * later. This breaks the recursion.
756                  */
757                 lnet_resend_msg_locked(msg);
758         }
759
760         /*
761          * msc_resenders is an array of process pointers. Each entry holds
762          * a pointer to the current process operating on the message. An
763          * array entry is created per CPT. If the array slot is already
764          * set, then it means that there is a thread on the CPT currently
765          * resending a message.
766          * Once the thread finishes clear the slot to enable the thread to
767          * take on more resend work.
768          */
769         container->msc_resenders[my_slot] = NULL;
770         lnet_net_unlock(cpt);
771
772         return 0;
773 }
774
775 /*
776  * Do a health check on the message:
777  * return -1 if we're not going to handle the error or
778  *   if we've reached the maximum number of retries.
779  *   success case will return -1 as well
780  * return 0 if it the message is requeued for send
781  */
782 static int
783 lnet_health_check(struct lnet_msg *msg)
784 {
785         enum lnet_msg_hstatus hstatus = msg->msg_health_status;
786         struct lnet_peer_ni *lpni;
787         struct lnet_ni *ni;
788         bool lo = false;
789         bool attempt_local_resend;
790         bool attempt_remote_resend;
791         bool handle_local_health;
792         bool handle_remote_health;
793
794         /* if we're shutting down no point in handling health. */
795         if (the_lnet.ln_mt_state != LNET_MT_STATE_RUNNING)
796                 return -1;
797
798         LASSERT(msg->msg_tx_committed || msg->msg_rx_committed);
799
800         /*
801          * if we're sending to the LOLND then the msg_txpeer will not be
802          * set. So no need to sanity check it.
803          */
804         if (msg->msg_tx_committed &&
805             !nid_is_lo0(&msg->msg_txni->ni_nid))
806                 LASSERT(msg->msg_txpeer);
807         else if (msg->msg_tx_committed &&
808                  nid_is_lo0(&msg->msg_txni->ni_nid))
809                 lo = true;
810
811         if (hstatus != LNET_MSG_STATUS_OK &&
812             ktime_compare(ktime_get(), msg->msg_deadline) >= 0)
813                 return -1;
814
815         /*
816          * always prefer txni/txpeer if they message is committed for both
817          * directions.
818          */
819         if (msg->msg_tx_committed) {
820                 ni = msg->msg_txni;
821                 lpni = msg->msg_txpeer;
822                 attempt_local_resend = attempt_remote_resend = true;
823         } else {
824                 ni = msg->msg_rxni;
825                 lpni = msg->msg_rxpeer;
826                 attempt_local_resend = attempt_remote_resend = false;
827         }
828
829         if (!lo)
830                 LASSERT(ni && lpni);
831         else
832                 LASSERT(ni);
833
834         CDEBUG(D_NET, "health check: %s->%s: %s: %s\n",
835                libcfs_nidstr(&ni->ni_nid),
836                (lo) ? "self" : libcfs_nidstr(&lpni->lpni_nid),
837                lnet_msgtyp2str(msg->msg_type),
838                lnet_health_error2str(hstatus));
839
840         /*
841          * stats are only incremented for errors so avoid wasting time
842          * incrementing statistics if there is no error. Similarly, whether to
843          * update health values or perform resends is only applicable for
844          * messages with a health status != OK.
845          */
846         if (hstatus != LNET_MSG_STATUS_OK) {
847                 /* Don't further decrement the health value if a recovery
848                  * message failed.
849                  */
850                 if (msg->msg_recovery)
851                         handle_local_health = handle_remote_health = false;
852                 else
853                         handle_local_health = handle_remote_health = true;
854
855                 /* For local failures, health/recovery/resends are not needed if
856                  * I only have a single (non-lolnd) interface. NB: pb_nnis
857                  * includes the lolnd interface, so a single-rail node would
858                  * have pb_nnis == 2.
859                  */
860                 if (the_lnet.ln_ping_target->pb_nnis <= 2) {
861                         handle_local_health = false;
862                         attempt_local_resend = false;
863                 }
864
865                 lnet_net_lock(0);
866                 lnet_incr_hstats(ni, lpni, hstatus);
867                 /* For remote failures, health/recovery/resends are not needed
868                  * if the peer only has a single interface. Special case for
869                  * routers where we rely on health feature to manage route
870                  * aliveness. NB: unlike pb_nnis above, lp_nnis does _not_
871                  * include the lolnd, so a single-rail node would have
872                  * lp_nnis == 1.
873                  */
874                 if (lpni && lpni->lpni_peer_net &&
875                     lpni->lpni_peer_net->lpn_peer &&
876                     lpni->lpni_peer_net->lpn_peer->lp_nnis <= 1) {
877                         attempt_remote_resend = false;
878                         if (!lnet_isrouter(lpni))
879                                 handle_remote_health = false;
880                 }
881                 /* Do not put my interfaces into peer NI recovery. They should
882                  * be handled with local NI recovery.
883                  */
884                 if (handle_remote_health && lpni &&
885                     lnet_nid_to_ni_locked(&lpni->lpni_nid, 0))
886                         handle_remote_health = false;
887                 lnet_net_unlock(0);
888         }
889
890         switch (hstatus) {
891         case LNET_MSG_STATUS_OK:
892                 /*
893                  * increment the local ni health whether we successfully
894                  * received or sent a message on it.
895                  *
896                  * Ping counts are reset to 0 as appropriate to allow for
897                  * faster recovery.
898                  */
899                 lnet_inc_healthv(&ni->ni_healthv, lnet_health_sensitivity);
900                 /*
901                  * It's possible msg_txpeer is NULL in the LOLND
902                  * case. Only increment the peer's health if we're
903                  * receiving a message from it. It's the only sure way to
904                  * know that a remote interface is up.
905                  * If this interface is part of a router, then take that
906                  * as indication that the router is fully healthy.
907                  */
908                 if (lpni && msg->msg_rx_committed) {
909                         lnet_net_lock(0);
910                         lpni->lpni_ping_count = 0;
911                         ni->ni_ping_count = 0;
912                         /*
913                          * If we're receiving a message from the router or
914                          * I'm a router, then set that lpni's health to
915                          * maximum so we can commence communication
916                          */
917                         if (lnet_isrouter(lpni) || the_lnet.ln_routing) {
918                                 lnet_set_lpni_healthv_locked(lpni,
919                                         LNET_MAX_HEALTH_VALUE);
920                         } else {
921                                 __u32 sensitivity = lpni->lpni_peer_net->
922                                         lpn_peer->lp_health_sensitivity;
923
924                                 lnet_inc_lpni_healthv_locked(lpni,
925                                         (sensitivity) ? sensitivity :
926                                         lnet_health_sensitivity);
927                                 /* This peer NI may have previously aged out
928                                  * of recovery. Now that we've received a
929                                  * message from it, we can continue recovery
930                                  * if its health value is still below the
931                                  * maximum.
932                                  */
933                                 lnet_peer_ni_add_to_recoveryq_locked(lpni,
934                                                 &the_lnet.ln_mt_peerNIRecovq,
935                                                 ktime_get_seconds());
936                         }
937                         lnet_net_unlock(0);
938                 }
939
940                 /* we can finalize this message */
941                 return -1;
942         case LNET_MSG_STATUS_LOCAL_INTERRUPT:
943         case LNET_MSG_STATUS_LOCAL_DROPPED:
944         case LNET_MSG_STATUS_LOCAL_ABORTED:
945         case LNET_MSG_STATUS_LOCAL_NO_ROUTE:
946         case LNET_MSG_STATUS_LOCAL_TIMEOUT:
947                 if (handle_local_health)
948                         lnet_handle_local_failure(ni);
949                 if (attempt_local_resend)
950                         return lnet_attempt_msg_resend(msg);
951                 break;
952         case LNET_MSG_STATUS_LOCAL_ERROR:
953                 if (handle_local_health)
954                         lnet_handle_local_failure(ni);
955                 return -1;
956         case LNET_MSG_STATUS_REMOTE_DROPPED:
957                 if (handle_remote_health)
958                         lnet_handle_remote_failure(lpni);
959                 if (attempt_remote_resend)
960                         return lnet_attempt_msg_resend(msg);
961                 break;
962         case LNET_MSG_STATUS_REMOTE_ERROR:
963         case LNET_MSG_STATUS_REMOTE_TIMEOUT:
964                 if (handle_remote_health)
965                         lnet_handle_remote_failure(lpni);
966                 return -1;
967         case LNET_MSG_STATUS_NETWORK_TIMEOUT:
968                 if (handle_remote_health)
969                         lnet_handle_remote_failure(lpni);
970                 if (handle_local_health)
971                         lnet_handle_local_failure(ni);
972                 return -1;
973         default:
974                 LBUG();
975         }
976
977         /* no resend is needed */
978         return -1;
979 }
980
981 static void
982 lnet_msg_detach_md(struct lnet_msg *msg, int status)
983 {
984         struct lnet_libmd *md = msg->msg_md;
985         lnet_handler_t handler = NULL;
986         int cpt = lnet_cpt_of_cookie(md->md_lh.lh_cookie);
987         int unlink;
988
989         lnet_res_lock(cpt);
990         while (md->md_flags & LNET_MD_FLAG_HANDLING)
991                 /* An event handler is running - wait for it to
992                  * complete to avoid races.
993                  */
994                 lnet_md_wait_handling(md, cpt);
995
996         /* Now it's safe to drop my caller's ref */
997         md->md_refcount--;
998         LASSERT(md->md_refcount >= 0);
999
1000         unlink = lnet_md_unlinkable(md);
1001         if (md->md_handler) {
1002                 if ((md->md_flags & LNET_MD_FLAG_ABORTED) && !status) {
1003                         msg->msg_ev.status   = -ETIMEDOUT;
1004                         CDEBUG(D_NET, "md 0x%p already unlinked\n", md);
1005                 } else {
1006                         msg->msg_ev.status   = status;
1007                 }
1008                 msg->msg_ev.unlinked = unlink;
1009                 handler = md->md_handler;
1010                 if (!unlink)
1011                         md->md_flags |= LNET_MD_FLAG_HANDLING;
1012         }
1013
1014         if (unlink || (md->md_refcount == 0 &&
1015                        md->md_threshold == LNET_MD_THRESH_INF))
1016                 lnet_detach_rsp_tracker(md, cpt);
1017
1018         msg->msg_md = NULL;
1019         if (unlink)
1020                 lnet_md_unlink(md);
1021
1022         lnet_res_unlock(cpt);
1023
1024         if (handler) {
1025                 handler(&msg->msg_ev);
1026                 if (!unlink) {
1027                         lnet_res_lock(cpt);
1028                         md->md_flags &= ~LNET_MD_FLAG_HANDLING;
1029                         wake_up_var(md);
1030                         lnet_res_unlock(cpt);
1031                 }
1032         }
1033 }
1034
1035 static bool
1036 lnet_is_health_check(struct lnet_msg *msg)
1037 {
1038         bool hc = true;
1039         int status = msg->msg_ev.status;
1040
1041         if ((!msg->msg_tx_committed && !msg->msg_rx_committed) ||
1042             !msg->msg_onactivelist) {
1043                 CDEBUG(D_NET, "msg %p not committed for send or receive\n",
1044                        msg);
1045                 return false;
1046         }
1047
1048         if ((msg->msg_tx_committed && !msg->msg_txpeer) ||
1049             (msg->msg_rx_committed && !msg->msg_rxpeer)) {
1050                 /* The optimized GET case does not set msg_rxpeer, but status
1051                  * could be zero. Only print the error message if we have a
1052                  * non-zero status.
1053                  */
1054                 if (status)
1055                         CDEBUG(D_NET, "msg %p status %d cannot retry\n", msg,
1056                                status);
1057                 return false;
1058         }
1059
1060         /* Check for status inconsistencies */
1061         if ((!status && msg->msg_health_status != LNET_MSG_STATUS_OK) ||
1062              (status && msg->msg_health_status == LNET_MSG_STATUS_OK)) {
1063                 CDEBUG(D_NET, "Msg %p is in inconsistent state, don't perform health "
1064                        "checking (%d, %d)\n", msg, status,
1065                        msg->msg_health_status);
1066                 hc = false;
1067         }
1068
1069         CDEBUG(D_NET, "health check = %d, status = %d, hstatus = %d\n",
1070                hc, status, msg->msg_health_status);
1071
1072         return hc;
1073 }
1074
1075 char *
1076 lnet_health_error2str(enum lnet_msg_hstatus hstatus)
1077 {
1078         switch (hstatus) {
1079         case LNET_MSG_STATUS_LOCAL_INTERRUPT:
1080                 return "LOCAL_INTERRUPT";
1081         case LNET_MSG_STATUS_LOCAL_DROPPED:
1082                 return "LOCAL_DROPPED";
1083         case LNET_MSG_STATUS_LOCAL_ABORTED:
1084                 return "LOCAL_ABORTED";
1085         case LNET_MSG_STATUS_LOCAL_NO_ROUTE:
1086                 return "LOCAL_NO_ROUTE";
1087         case LNET_MSG_STATUS_LOCAL_TIMEOUT:
1088                 return "LOCAL_TIMEOUT";
1089         case LNET_MSG_STATUS_LOCAL_ERROR:
1090                 return "LOCAL_ERROR";
1091         case LNET_MSG_STATUS_REMOTE_DROPPED:
1092                 return "REMOTE_DROPPED";
1093         case LNET_MSG_STATUS_REMOTE_ERROR:
1094                 return "REMOTE_ERROR";
1095         case LNET_MSG_STATUS_REMOTE_TIMEOUT:
1096                 return "REMOTE_TIMEOUT";
1097         case LNET_MSG_STATUS_NETWORK_TIMEOUT:
1098                 return "NETWORK_TIMEOUT";
1099         case LNET_MSG_STATUS_OK:
1100                 return "OK";
1101         default:
1102                 return "<UNKNOWN>";
1103         }
1104 }
1105
1106 bool
1107 lnet_send_error_simulation(struct lnet_msg *msg,
1108                            enum lnet_msg_hstatus *hstatus)
1109 {
1110         if (!msg)
1111                 return false;
1112
1113         if (list_empty(&the_lnet.ln_drop_rules))
1114             return false;
1115
1116         /* match only health rules */
1117         if (!lnet_drop_rule_match(&msg->msg_hdr, NULL, hstatus))
1118                 return false;
1119
1120         CDEBUG(D_NET, "src %s(%s)->dst %s: %s simulate health error: %s\n",
1121                 libcfs_nidstr(&msg->msg_hdr.src_nid),
1122                 libcfs_nidstr(&msg->msg_txni->ni_nid),
1123                 libcfs_nidstr(&msg->msg_hdr.dest_nid),
1124                 lnet_msgtyp2str(msg->msg_type),
1125                 lnet_health_error2str(*hstatus));
1126
1127         return true;
1128 }
1129 EXPORT_SYMBOL(lnet_send_error_simulation);
1130
1131 void
1132 lnet_finalize(struct lnet_msg *msg, int status)
1133 {
1134         struct lnet_msg_container *container;
1135         int my_slot;
1136         int cpt;
1137         int rc;
1138
1139         LASSERT(!in_interrupt());
1140
1141         if (msg == NULL)
1142                 return;
1143
1144         msg->msg_ev.status = status;
1145
1146         if (lnet_is_health_check(msg)) {
1147                 /*
1148                  * Check the health status of the message. If it has one
1149                  * of the errors that we're supposed to handle, and it has
1150                  * not timed out, then
1151                  *      1. Decrement the appropriate health_value
1152                  *      2. queue the message on the resend queue
1153
1154                  * if the message send is success, timed out or failed in the
1155                  * health check for any reason then we'll just finalize the
1156                  * message. Otherwise just return since the message has been
1157                  * put on the resend queue.
1158                  */
1159                 if (!lnet_health_check(msg))
1160                         return;
1161         }
1162
1163         /*
1164          * We're not going to resend this message so detach its MD and invoke
1165          * the appropriate callbacks
1166          */
1167         if (msg->msg_md != NULL)
1168                 lnet_msg_detach_md(msg, status);
1169
1170 again:
1171         if (!msg->msg_tx_committed && !msg->msg_rx_committed) {
1172                 /* not committed to network yet */
1173                 LASSERT(!msg->msg_onactivelist);
1174                 lnet_msg_free(msg);
1175                 return;
1176         }
1177
1178         /*
1179          * NB: routed message can be committed for both receiving and sending,
1180          * we should finalize in LIFO order and keep counters correct.
1181          * (finalize sending first then finalize receiving)
1182          */
1183         cpt = msg->msg_tx_committed ? msg->msg_tx_cpt : msg->msg_rx_cpt;
1184         lnet_net_lock(cpt);
1185
1186         container = the_lnet.ln_msg_containers[cpt];
1187
1188         /* Recursion breaker.  Don't complete the message here if I am (or
1189          * enough other threads are) already completing messages */
1190         my_slot = lnet_check_finalize_recursion_locked(msg,
1191                                                 &container->msc_finalizing,
1192                                                 container->msc_nfinalizers,
1193                                                 container->msc_finalizers);
1194
1195         /* enough threads are resending */
1196         if (my_slot == -1) {
1197                 lnet_net_unlock(cpt);
1198                 return;
1199         }
1200
1201         rc = 0;
1202         while ((msg = list_first_entry_or_null(&container->msc_finalizing,
1203                                                struct lnet_msg,
1204                                                msg_list)) != NULL) {
1205                 list_del_init(&msg->msg_list);
1206
1207                 /* NB drops and regains the lnet lock if it actually does
1208                  * anything, so my finalizing friends can chomp along too */
1209                 rc = lnet_complete_msg_locked(msg, cpt);
1210                 if (rc != 0)
1211                         break;
1212         }
1213
1214         if (unlikely(!list_empty(&the_lnet.ln_delay_rules))) {
1215                 lnet_net_unlock(cpt);
1216                 lnet_delay_rule_check();
1217                 lnet_net_lock(cpt);
1218         }
1219
1220         container->msc_finalizers[my_slot] = NULL;
1221         lnet_net_unlock(cpt);
1222
1223         if (rc != 0)
1224                 goto again;
1225 }
1226 EXPORT_SYMBOL(lnet_finalize);
1227
1228 void
1229 lnet_msg_container_cleanup(struct lnet_msg_container *container)
1230 {
1231         struct lnet_msg *msg;
1232         int count = 0;
1233
1234         if (container->msc_init == 0)
1235                 return;
1236
1237         while ((msg = list_first_entry_or_null(&container->msc_active,
1238                                                struct lnet_msg,
1239                                                msg_activelist)) != NULL) {
1240                 LASSERT(msg->msg_onactivelist);
1241                 msg->msg_onactivelist = 0;
1242                 list_del_init(&msg->msg_activelist);
1243                 lnet_msg_free(msg);
1244                 count++;
1245         }
1246
1247         if (count > 0)
1248                 CERROR("%d active msg on exit\n", count);
1249
1250         if (container->msc_finalizers != NULL) {
1251                 CFS_FREE_PTR_ARRAY(container->msc_finalizers,
1252                                    container->msc_nfinalizers);
1253                 container->msc_finalizers = NULL;
1254         }
1255
1256         if (container->msc_resenders != NULL) {
1257                 CFS_FREE_PTR_ARRAY(container->msc_resenders,
1258                                    container->msc_nfinalizers);
1259                 container->msc_resenders = NULL;
1260         }
1261         container->msc_init = 0;
1262 }
1263
1264 int
1265 lnet_msg_container_setup(struct lnet_msg_container *container, int cpt)
1266 {
1267         int rc = 0;
1268
1269         container->msc_init = 1;
1270
1271         INIT_LIST_HEAD(&container->msc_active);
1272         INIT_LIST_HEAD(&container->msc_finalizing);
1273         INIT_LIST_HEAD(&container->msc_resending);
1274
1275         /* number of CPUs */
1276         container->msc_nfinalizers = cfs_cpt_weight(lnet_cpt_table(), cpt);
1277         if (container->msc_nfinalizers == 0)
1278                 container->msc_nfinalizers = 1;
1279
1280         LIBCFS_CPT_ALLOC(container->msc_finalizers, lnet_cpt_table(), cpt,
1281                          container->msc_nfinalizers *
1282                          sizeof(*container->msc_finalizers));
1283
1284         if (container->msc_finalizers == NULL) {
1285                 CERROR("Failed to allocate message finalizers\n");
1286                 lnet_msg_container_cleanup(container);
1287                 return -ENOMEM;
1288         }
1289
1290         LIBCFS_CPT_ALLOC(container->msc_resenders, lnet_cpt_table(), cpt,
1291                          container->msc_nfinalizers *
1292                          sizeof(*container->msc_resenders));
1293
1294         if (container->msc_resenders == NULL) {
1295                 CERROR("Failed to allocate message resenders\n");
1296                 lnet_msg_container_cleanup(container);
1297                 return -ENOMEM;
1298         }
1299
1300         return rc;
1301 }
1302
1303 void
1304 lnet_msg_containers_destroy(void)
1305 {
1306         struct lnet_msg_container *container;
1307         int     i;
1308
1309         if (the_lnet.ln_msg_containers == NULL)
1310                 return;
1311
1312         cfs_percpt_for_each(container, i, the_lnet.ln_msg_containers)
1313                 lnet_msg_container_cleanup(container);
1314
1315         cfs_percpt_free(the_lnet.ln_msg_containers);
1316         the_lnet.ln_msg_containers = NULL;
1317 }
1318
1319 int
1320 lnet_msg_containers_create(void)
1321 {
1322         struct lnet_msg_container *container;
1323         int     rc;
1324         int     i;
1325
1326         the_lnet.ln_msg_containers = cfs_percpt_alloc(lnet_cpt_table(),
1327                                                       sizeof(*container));
1328
1329         if (the_lnet.ln_msg_containers == NULL) {
1330                 CERROR("Failed to allocate cpu-partition data for network\n");
1331                 return -ENOMEM;
1332         }
1333
1334         cfs_percpt_for_each(container, i, the_lnet.ln_msg_containers) {
1335                 rc = lnet_msg_container_setup(container, i);
1336                 if (rc != 0) {
1337                         lnet_msg_containers_destroy();
1338                         return rc;
1339                 }
1340         }
1341
1342         return 0;
1343 }