Whamcloud - gitweb
383232db86df09ded9bf278dfca0ea095c8c87f4
[fs/lustre-release.git] / lnet / lnet / lib-msg.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lnet/lnet/lib-msg.c
33  *
34  * Message decoding, parsing and finalizing routines
35  */
36
37 #define DEBUG_SUBSYSTEM S_LNET
38
39 #include <lnet/lib-lnet.h>
40
41 void
42 lnet_build_unlink_event(struct lnet_libmd *md, struct lnet_event *ev)
43 {
44         ENTRY;
45
46         memset(ev, 0, sizeof(*ev));
47
48         ev->status   = 0;
49         ev->unlinked = 1;
50         ev->type     = LNET_EVENT_UNLINK;
51         lnet_md_deconstruct(md, &ev->md);
52         lnet_md2handle(&ev->md_handle, md);
53         EXIT;
54 }
55
56 /*
57  * Don't need any lock, must be called after lnet_commit_md
58  */
59 void
60 lnet_build_msg_event(struct lnet_msg *msg, enum lnet_event_kind ev_type)
61 {
62         struct lnet_hdr *hdr = &msg->msg_hdr;
63         struct lnet_event *ev = &msg->msg_ev;
64
65         LASSERT(!msg->msg_routing);
66
67         ev->type = ev_type;
68         ev->msg_type = msg->msg_type;
69
70         if (ev_type == LNET_EVENT_SEND) {
71                 /* event for active message */
72                 ev->target.nid    = le64_to_cpu(hdr->dest_nid);
73                 ev->target.pid    = le32_to_cpu(hdr->dest_pid);
74                 ev->initiator.nid = LNET_NID_ANY;
75                 ev->initiator.pid = the_lnet.ln_pid;
76                 ev->source.nid    = LNET_NID_ANY;
77                 ev->source.pid    = the_lnet.ln_pid;
78                 ev->sender        = LNET_NID_ANY;
79         } else {
80                 /* event for passive message */
81                 ev->target.pid    = hdr->dest_pid;
82                 ev->target.nid    = hdr->dest_nid;
83                 ev->initiator.pid = hdr->src_pid;
84                 /* Multi-Rail: resolve src_nid to "primary" peer NID */
85                 ev->initiator.nid = msg->msg_initiator;
86                 /* Multi-Rail: track source NID. */
87                 ev->source.pid    = hdr->src_pid;
88                 ev->source.nid    = hdr->src_nid;
89                 ev->rlength       = hdr->payload_length;
90                 ev->sender        = msg->msg_from;
91                 ev->mlength       = msg->msg_wanted;
92                 ev->offset        = msg->msg_offset;
93         }
94
95         switch (ev_type) {
96         default:
97                 LBUG();
98
99         case LNET_EVENT_PUT: /* passive PUT */
100                 ev->pt_index   = hdr->msg.put.ptl_index;
101                 ev->match_bits = hdr->msg.put.match_bits;
102                 ev->hdr_data   = hdr->msg.put.hdr_data;
103                 return;
104
105         case LNET_EVENT_GET: /* passive GET */
106                 ev->pt_index   = hdr->msg.get.ptl_index;
107                 ev->match_bits = hdr->msg.get.match_bits;
108                 ev->hdr_data   = 0;
109                 return;
110
111         case LNET_EVENT_ACK: /* ACK */
112                 ev->match_bits = hdr->msg.ack.match_bits;
113                 ev->mlength    = hdr->msg.ack.mlength;
114                 return;
115
116         case LNET_EVENT_REPLY: /* REPLY */
117                 return;
118
119         case LNET_EVENT_SEND: /* active message */
120                 if (msg->msg_type == LNET_MSG_PUT) {
121                         ev->pt_index   = le32_to_cpu(hdr->msg.put.ptl_index);
122                         ev->match_bits = le64_to_cpu(hdr->msg.put.match_bits);
123                         ev->offset     = le32_to_cpu(hdr->msg.put.offset);
124                         ev->mlength    =
125                         ev->rlength    = le32_to_cpu(hdr->payload_length);
126                         ev->hdr_data   = le64_to_cpu(hdr->msg.put.hdr_data);
127
128                 } else {
129                         LASSERT(msg->msg_type == LNET_MSG_GET);
130                         ev->pt_index   = le32_to_cpu(hdr->msg.get.ptl_index);
131                         ev->match_bits = le64_to_cpu(hdr->msg.get.match_bits);
132                         ev->mlength    =
133                         ev->rlength    = le32_to_cpu(hdr->msg.get.sink_length);
134                         ev->offset     = le32_to_cpu(hdr->msg.get.src_offset);
135                         ev->hdr_data   = 0;
136                 }
137                 return;
138         }
139 }
140
141 void
142 lnet_msg_commit(struct lnet_msg *msg, int cpt)
143 {
144         struct lnet_msg_container *container = the_lnet.ln_msg_containers[cpt];
145         struct lnet_counters *counters = the_lnet.ln_counters[cpt];
146
147         /* routed message can be committed for both receiving and sending */
148         LASSERT(!msg->msg_tx_committed);
149
150         if (msg->msg_sending) {
151                 LASSERT(!msg->msg_receiving);
152
153                 msg->msg_tx_cpt = cpt;
154                 msg->msg_tx_committed = 1;
155                 if (msg->msg_rx_committed) { /* routed message REPLY */
156                         LASSERT(msg->msg_onactivelist);
157                         return;
158                 }
159         } else {
160                 LASSERT(!msg->msg_sending);
161                 msg->msg_rx_cpt = cpt;
162                 msg->msg_rx_committed = 1;
163         }
164
165         LASSERT(!msg->msg_onactivelist);
166         msg->msg_onactivelist = 1;
167         list_add(&msg->msg_activelist, &container->msc_active);
168
169         counters->msgs_alloc++;
170         if (counters->msgs_alloc > counters->msgs_max)
171                 counters->msgs_max = counters->msgs_alloc;
172 }
173
174 static void
175 lnet_msg_decommit_tx(struct lnet_msg *msg, int status)
176 {
177         struct lnet_counters *counters;
178         struct lnet_event *ev = &msg->msg_ev;
179
180         LASSERT(msg->msg_tx_committed);
181         if (status != 0)
182                 goto out;
183
184         counters = the_lnet.ln_counters[msg->msg_tx_cpt];
185         switch (ev->type) {
186         default: /* routed message */
187                 LASSERT(msg->msg_routing);
188                 LASSERT(msg->msg_rx_committed);
189                 LASSERT(ev->type == 0);
190
191                 counters->route_length += msg->msg_len;
192                 counters->route_count++;
193                 goto incr_stats;
194
195         case LNET_EVENT_PUT:
196                 /* should have been decommitted */
197                 LASSERT(!msg->msg_rx_committed);
198                 /* overwritten while sending ACK */
199                 LASSERT(msg->msg_type == LNET_MSG_ACK);
200                 msg->msg_type = LNET_MSG_PUT; /* fix type */
201                 break;
202
203         case LNET_EVENT_SEND:
204                 LASSERT(!msg->msg_rx_committed);
205                 if (msg->msg_type == LNET_MSG_PUT)
206                         counters->send_length += msg->msg_len;
207                 break;
208
209         case LNET_EVENT_GET:
210                 LASSERT(msg->msg_rx_committed);
211                 /* overwritten while sending reply, we should never be
212                  * here for optimized GET */
213                 LASSERT(msg->msg_type == LNET_MSG_REPLY);
214                 msg->msg_type = LNET_MSG_GET; /* fix type */
215                 break;
216         }
217
218         counters->send_count++;
219
220 incr_stats:
221         if (msg->msg_txpeer)
222                 lnet_incr_stats(&msg->msg_txpeer->lpni_stats,
223                                 msg->msg_type,
224                                 LNET_STATS_TYPE_SEND);
225         if (msg->msg_txni)
226                 lnet_incr_stats(&msg->msg_txni->ni_stats,
227                                 msg->msg_type,
228                                 LNET_STATS_TYPE_SEND);
229  out:
230         lnet_return_tx_credits_locked(msg);
231         msg->msg_tx_committed = 0;
232 }
233
234 static void
235 lnet_msg_decommit_rx(struct lnet_msg *msg, int status)
236 {
237         struct lnet_counters *counters;
238         struct lnet_event *ev = &msg->msg_ev;
239
240         LASSERT(!msg->msg_tx_committed); /* decommitted or never committed */
241         LASSERT(msg->msg_rx_committed);
242
243         if (status != 0)
244                 goto out;
245
246         counters = the_lnet.ln_counters[msg->msg_rx_cpt];
247         switch (ev->type) {
248         default:
249                 LASSERT(ev->type == 0);
250                 LASSERT(msg->msg_routing);
251                 goto incr_stats;
252
253         case LNET_EVENT_ACK:
254                 LASSERT(msg->msg_type == LNET_MSG_ACK);
255                 break;
256
257         case LNET_EVENT_GET:
258                 /* type is "REPLY" if it's an optimized GET on passive side,
259                  * because optimized GET will never be committed for sending,
260                  * so message type wouldn't be changed back to "GET" by
261                  * lnet_msg_decommit_tx(), see details in lnet_parse_get() */
262                 LASSERT(msg->msg_type == LNET_MSG_REPLY ||
263                         msg->msg_type == LNET_MSG_GET);
264                 counters->send_length += msg->msg_wanted;
265                 break;
266
267         case LNET_EVENT_PUT:
268                 LASSERT(msg->msg_type == LNET_MSG_PUT);
269                 break;
270
271         case LNET_EVENT_REPLY:
272                 /* type is "GET" if it's an optimized GET on active side,
273                  * see details in lnet_create_reply_msg() */
274                 LASSERT(msg->msg_type == LNET_MSG_GET ||
275                         msg->msg_type == LNET_MSG_REPLY);
276                 break;
277         }
278
279         counters->recv_count++;
280
281 incr_stats:
282         if (msg->msg_rxpeer)
283                 lnet_incr_stats(&msg->msg_rxpeer->lpni_stats,
284                                 msg->msg_type,
285                                 LNET_STATS_TYPE_RECV);
286         if (msg->msg_rxni)
287                 lnet_incr_stats(&msg->msg_rxni->ni_stats,
288                                 msg->msg_type,
289                                 LNET_STATS_TYPE_RECV);
290         if (ev->type == LNET_EVENT_PUT || ev->type == LNET_EVENT_REPLY)
291                 counters->recv_length += msg->msg_wanted;
292
293  out:
294         lnet_return_rx_credits_locked(msg);
295         msg->msg_rx_committed = 0;
296 }
297
298 void
299 lnet_msg_decommit(struct lnet_msg *msg, int cpt, int status)
300 {
301         int     cpt2 = cpt;
302
303         LASSERT(msg->msg_tx_committed || msg->msg_rx_committed);
304         LASSERT(msg->msg_onactivelist);
305
306         if (msg->msg_tx_committed) { /* always decommit for sending first */
307                 LASSERT(cpt == msg->msg_tx_cpt);
308                 lnet_msg_decommit_tx(msg, status);
309         }
310
311         if (msg->msg_rx_committed) {
312                 /* forwarding msg committed for both receiving and sending */
313                 if (cpt != msg->msg_rx_cpt) {
314                         lnet_net_unlock(cpt);
315                         cpt2 = msg->msg_rx_cpt;
316                         lnet_net_lock(cpt2);
317                 }
318                 lnet_msg_decommit_rx(msg, status);
319         }
320
321         list_del(&msg->msg_activelist);
322         msg->msg_onactivelist = 0;
323
324         the_lnet.ln_counters[cpt2]->msgs_alloc--;
325
326         if (cpt2 != cpt) {
327                 lnet_net_unlock(cpt2);
328                 lnet_net_lock(cpt);
329         }
330 }
331
332 void
333 lnet_msg_attach_md(struct lnet_msg *msg, struct lnet_libmd *md,
334                    unsigned int offset, unsigned int mlen)
335 {
336         /* NB: @offset and @len are only useful for receiving */
337         /* Here, we attach the MD on lnet_msg and mark it busy and
338          * decrementing its threshold. Come what may, the lnet_msg "owns"
339          * the MD until a call to lnet_msg_detach_md or lnet_finalize()
340          * signals completion. */
341         LASSERT(!msg->msg_routing);
342
343         msg->msg_md = md;
344         if (msg->msg_receiving) { /* committed for receiving */
345                 msg->msg_offset = offset;
346                 msg->msg_wanted = mlen;
347         }
348
349         md->md_refcount++;
350         if (md->md_threshold != LNET_MD_THRESH_INF) {
351                 LASSERT(md->md_threshold > 0);
352                 md->md_threshold--;
353         }
354
355         /* build umd in event */
356         lnet_md2handle(&msg->msg_ev.md_handle, md);
357         lnet_md_deconstruct(md, &msg->msg_ev.md);
358 }
359
360 void
361 lnet_msg_detach_md(struct lnet_msg *msg, int status)
362 {
363         struct lnet_libmd *md = msg->msg_md;
364         int unlink;
365
366         /* Now it's safe to drop my caller's ref */
367         md->md_refcount--;
368         LASSERT(md->md_refcount >= 0);
369
370         unlink = lnet_md_unlinkable(md);
371         if (md->md_eq != NULL) {
372                 msg->msg_ev.status   = status;
373                 msg->msg_ev.unlinked = unlink;
374                 lnet_eq_enqueue_event(md->md_eq, &msg->msg_ev);
375         }
376
377         if (unlink)
378                 lnet_md_unlink(md);
379
380         msg->msg_md = NULL;
381 }
382
383 static int
384 lnet_complete_msg_locked(struct lnet_msg *msg, int cpt)
385 {
386         struct lnet_handle_wire ack_wmd;
387         int                rc;
388         int                status = msg->msg_ev.status;
389
390         LASSERT(msg->msg_onactivelist);
391
392         if (status == 0 && msg->msg_ack) {
393                 /* Only send an ACK if the PUT completed successfully */
394
395                 lnet_msg_decommit(msg, cpt, 0);
396
397                 msg->msg_ack = 0;
398                 lnet_net_unlock(cpt);
399
400                 LASSERT(msg->msg_ev.type == LNET_EVENT_PUT);
401                 LASSERT(!msg->msg_routing);
402
403                 ack_wmd = msg->msg_hdr.msg.put.ack_wmd;
404
405                 lnet_prep_send(msg, LNET_MSG_ACK, msg->msg_ev.source, 0, 0);
406
407                 msg->msg_hdr.msg.ack.dst_wmd = ack_wmd;
408                 msg->msg_hdr.msg.ack.match_bits = msg->msg_ev.match_bits;
409                 msg->msg_hdr.msg.ack.mlength = cpu_to_le32(msg->msg_ev.mlength);
410
411                 /* NB: we probably want to use NID of msg::msg_from as 3rd
412                  * parameter (router NID) if it's routed message */
413                 rc = lnet_send(msg->msg_ev.target.nid, msg, LNET_NID_ANY);
414
415                 lnet_net_lock(cpt);
416                 /*
417                  * NB: message is committed for sending, we should return
418                  * on success because LND will finalize this message later.
419                  *
420                  * Also, there is possibility that message is committed for
421                  * sending and also failed before delivering to LND,
422                  * i.e: ENOMEM, in that case we can't fall through either
423                  * because CPT for sending can be different with CPT for
424                  * receiving, so we should return back to lnet_finalize()
425                  * to make sure we are locking the correct partition.
426                  */
427                 return rc;
428
429         } else if (status == 0 &&       /* OK so far */
430                    (msg->msg_routing && !msg->msg_sending)) {
431                 /* not forwarded */
432                 LASSERT(!msg->msg_receiving);   /* called back recv already */
433                 lnet_net_unlock(cpt);
434
435                 rc = lnet_send(LNET_NID_ANY, msg, LNET_NID_ANY);
436
437                 lnet_net_lock(cpt);
438                 /*
439                  * NB: message is committed for sending, we should return
440                  * on success because LND will finalize this message later.
441                  *
442                  * Also, there is possibility that message is committed for
443                  * sending and also failed before delivering to LND,
444                  * i.e: ENOMEM, in that case we can't fall through either:
445                  * - The rule is message must decommit for sending first if
446                  *   the it's committed for both sending and receiving
447                  * - CPT for sending can be different with CPT for receiving,
448                  *   so we should return back to lnet_finalize() to make
449                  *   sure we are locking the correct partition.
450                  */
451                 return rc;
452         }
453
454         lnet_msg_decommit(msg, cpt, status);
455         lnet_msg_free(msg);
456         return 0;
457 }
458
459 void
460 lnet_finalize(struct lnet_msg *msg, int status)
461 {
462         struct lnet_msg_container       *container;
463         int                             my_slot;
464         int                             cpt;
465         int                             rc;
466         int                             i;
467
468         LASSERT(!in_interrupt());
469
470         if (msg == NULL)
471                 return;
472
473         msg->msg_ev.status = status;
474
475         if (msg->msg_md != NULL) {
476                 cpt = lnet_cpt_of_cookie(msg->msg_md->md_lh.lh_cookie);
477
478                 lnet_res_lock(cpt);
479                 lnet_msg_detach_md(msg, status);
480                 lnet_res_unlock(cpt);
481         }
482
483  again:
484         rc = 0;
485         if (!msg->msg_tx_committed && !msg->msg_rx_committed) {
486                 /* not committed to network yet */
487                 LASSERT(!msg->msg_onactivelist);
488                 lnet_msg_free(msg);
489                 return;
490         }
491
492         /*
493          * NB: routed message can be committed for both receiving and sending,
494          * we should finalize in LIFO order and keep counters correct.
495          * (finalize sending first then finalize receiving)
496          */
497         cpt = msg->msg_tx_committed ? msg->msg_tx_cpt : msg->msg_rx_cpt;
498         lnet_net_lock(cpt);
499
500         container = the_lnet.ln_msg_containers[cpt];
501         list_add_tail(&msg->msg_list, &container->msc_finalizing);
502
503         /* Recursion breaker.  Don't complete the message here if I am (or
504          * enough other threads are) already completing messages */
505
506         my_slot = -1;
507         for (i = 0; i < container->msc_nfinalizers; i++) {
508                 if (container->msc_finalizers[i] == current)
509                         break;
510
511                 if (my_slot < 0 && container->msc_finalizers[i] == NULL)
512                         my_slot = i;
513         }
514
515         if (i < container->msc_nfinalizers || my_slot < 0) {
516                 lnet_net_unlock(cpt);
517                 return;
518         }
519
520         container->msc_finalizers[my_slot] = current;
521
522         while (!list_empty(&container->msc_finalizing)) {
523                 msg = list_entry(container->msc_finalizing.next,
524                                  struct lnet_msg, msg_list);
525
526                 list_del(&msg->msg_list);
527
528                 /* NB drops and regains the lnet lock if it actually does
529                  * anything, so my finalizing friends can chomp along too */
530                 rc = lnet_complete_msg_locked(msg, cpt);
531                 if (rc != 0)
532                         break;
533         }
534
535         if (unlikely(!list_empty(&the_lnet.ln_delay_rules))) {
536                 lnet_net_unlock(cpt);
537                 lnet_delay_rule_check();
538                 lnet_net_lock(cpt);
539         }
540
541         container->msc_finalizers[my_slot] = NULL;
542         lnet_net_unlock(cpt);
543
544         if (rc != 0)
545                 goto again;
546 }
547 EXPORT_SYMBOL(lnet_finalize);
548
549 void
550 lnet_msg_container_cleanup(struct lnet_msg_container *container)
551 {
552         int     count = 0;
553
554         if (container->msc_init == 0)
555                 return;
556
557         while (!list_empty(&container->msc_active)) {
558                 struct lnet_msg *msg;
559
560                 msg  = list_entry(container->msc_active.next,
561                                   struct lnet_msg, msg_activelist);
562                 LASSERT(msg->msg_onactivelist);
563                 msg->msg_onactivelist = 0;
564                 list_del(&msg->msg_activelist);
565                 lnet_msg_free(msg);
566                 count++;
567         }
568
569         if (count > 0)
570                 CERROR("%d active msg on exit\n", count);
571
572         if (container->msc_finalizers != NULL) {
573                 LIBCFS_FREE(container->msc_finalizers,
574                             container->msc_nfinalizers *
575                             sizeof(*container->msc_finalizers));
576                 container->msc_finalizers = NULL;
577         }
578         container->msc_init = 0;
579 }
580
581 int
582 lnet_msg_container_setup(struct lnet_msg_container *container, int cpt)
583 {
584         int rc = 0;
585
586         container->msc_init = 1;
587
588         INIT_LIST_HEAD(&container->msc_active);
589         INIT_LIST_HEAD(&container->msc_finalizing);
590
591         /* number of CPUs */
592         container->msc_nfinalizers = cfs_cpt_weight(lnet_cpt_table(), cpt);
593         if (container->msc_nfinalizers == 0)
594                 container->msc_nfinalizers = 1;
595
596         LIBCFS_CPT_ALLOC(container->msc_finalizers, lnet_cpt_table(), cpt,
597                          container->msc_nfinalizers *
598                          sizeof(*container->msc_finalizers));
599
600         if (container->msc_finalizers == NULL) {
601                 CERROR("Failed to allocate message finalizers\n");
602                 lnet_msg_container_cleanup(container);
603                 return -ENOMEM;
604         }
605
606         return rc;
607 }
608
609 void
610 lnet_msg_containers_destroy(void)
611 {
612         struct lnet_msg_container *container;
613         int     i;
614
615         if (the_lnet.ln_msg_containers == NULL)
616                 return;
617
618         cfs_percpt_for_each(container, i, the_lnet.ln_msg_containers)
619                 lnet_msg_container_cleanup(container);
620
621         cfs_percpt_free(the_lnet.ln_msg_containers);
622         the_lnet.ln_msg_containers = NULL;
623 }
624
625 int
626 lnet_msg_containers_create(void)
627 {
628         struct lnet_msg_container *container;
629         int     rc;
630         int     i;
631
632         the_lnet.ln_msg_containers = cfs_percpt_alloc(lnet_cpt_table(),
633                                                       sizeof(*container));
634
635         if (the_lnet.ln_msg_containers == NULL) {
636                 CERROR("Failed to allocate cpu-partition data for network\n");
637                 return -ENOMEM;
638         }
639
640         cfs_percpt_for_each(container, i, the_lnet.ln_msg_containers) {
641                 rc = lnet_msg_container_setup(container, i);
642                 if (rc != 0) {
643                         lnet_msg_containers_destroy();
644                         return rc;
645                 }
646         }
647
648         return 0;
649 }