4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2014, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
34 * Message decoding, parsing and finalizing routines
37 #define DEBUG_SUBSYSTEM S_LNET
39 #include <lnet/lib-lnet.h>
42 lnet_build_unlink_event(struct lnet_libmd *md, struct lnet_event *ev)
46 memset(ev, 0, sizeof(*ev));
50 ev->type = LNET_EVENT_UNLINK;
51 lnet_md_deconstruct(md, &ev->md);
52 lnet_md2handle(&ev->md_handle, md);
57 * Don't need any lock, must be called after lnet_commit_md
60 lnet_build_msg_event(struct lnet_msg *msg, enum lnet_event_kind ev_type)
62 struct lnet_hdr *hdr = &msg->msg_hdr;
63 struct lnet_event *ev = &msg->msg_ev;
65 LASSERT(!msg->msg_routing);
69 if (ev_type == LNET_EVENT_SEND) {
70 /* event for active message */
71 ev->target.nid = le64_to_cpu(hdr->dest_nid);
72 ev->target.pid = le32_to_cpu(hdr->dest_pid);
73 ev->initiator.nid = LNET_NID_ANY;
74 ev->initiator.pid = the_lnet.ln_pid;
75 ev->source.nid = LNET_NID_ANY;
76 ev->source.pid = the_lnet.ln_pid;
77 ev->sender = LNET_NID_ANY;
80 /* event for passive message */
81 ev->target.pid = hdr->dest_pid;
82 ev->target.nid = hdr->dest_nid;
83 ev->initiator.pid = hdr->src_pid;
84 /* Multi-Rail: resolve src_nid to "primary" peer NID */
85 ev->initiator.nid = msg->msg_initiator;
86 /* Multi-Rail: track source NID. */
87 ev->source.pid = hdr->src_pid;
88 ev->source.nid = hdr->src_nid;
89 ev->rlength = hdr->payload_length;
90 ev->sender = msg->msg_from;
91 ev->mlength = msg->msg_wanted;
92 ev->offset = msg->msg_offset;
99 case LNET_EVENT_PUT: /* passive PUT */
100 ev->pt_index = hdr->msg.put.ptl_index;
101 ev->match_bits = hdr->msg.put.match_bits;
102 ev->hdr_data = hdr->msg.put.hdr_data;
105 case LNET_EVENT_GET: /* passive GET */
106 ev->pt_index = hdr->msg.get.ptl_index;
107 ev->match_bits = hdr->msg.get.match_bits;
111 case LNET_EVENT_ACK: /* ACK */
112 ev->match_bits = hdr->msg.ack.match_bits;
113 ev->mlength = hdr->msg.ack.mlength;
116 case LNET_EVENT_REPLY: /* REPLY */
119 case LNET_EVENT_SEND: /* active message */
120 if (msg->msg_type == LNET_MSG_PUT) {
121 ev->pt_index = le32_to_cpu(hdr->msg.put.ptl_index);
122 ev->match_bits = le64_to_cpu(hdr->msg.put.match_bits);
123 ev->offset = le32_to_cpu(hdr->msg.put.offset);
125 ev->rlength = le32_to_cpu(hdr->payload_length);
126 ev->hdr_data = le64_to_cpu(hdr->msg.put.hdr_data);
129 LASSERT(msg->msg_type == LNET_MSG_GET);
130 ev->pt_index = le32_to_cpu(hdr->msg.get.ptl_index);
131 ev->match_bits = le64_to_cpu(hdr->msg.get.match_bits);
133 ev->rlength = le32_to_cpu(hdr->msg.get.sink_length);
134 ev->offset = le32_to_cpu(hdr->msg.get.src_offset);
142 lnet_msg_commit(struct lnet_msg *msg, int cpt)
144 struct lnet_msg_container *container = the_lnet.ln_msg_containers[cpt];
145 struct lnet_counters *counters = the_lnet.ln_counters[cpt];
147 /* routed message can be committed for both receiving and sending */
148 LASSERT(!msg->msg_tx_committed);
150 if (msg->msg_sending) {
151 LASSERT(!msg->msg_receiving);
153 msg->msg_tx_cpt = cpt;
154 msg->msg_tx_committed = 1;
155 if (msg->msg_rx_committed) { /* routed message REPLY */
156 LASSERT(msg->msg_onactivelist);
160 LASSERT(!msg->msg_sending);
161 msg->msg_rx_cpt = cpt;
162 msg->msg_rx_committed = 1;
165 LASSERT(!msg->msg_onactivelist);
166 msg->msg_onactivelist = 1;
167 list_add(&msg->msg_activelist, &container->msc_active);
169 counters->msgs_alloc++;
170 if (counters->msgs_alloc > counters->msgs_max)
171 counters->msgs_max = counters->msgs_alloc;
175 lnet_msg_decommit_tx(struct lnet_msg *msg, int status)
177 struct lnet_counters *counters;
178 struct lnet_event *ev = &msg->msg_ev;
180 LASSERT(msg->msg_tx_committed);
184 counters = the_lnet.ln_counters[msg->msg_tx_cpt];
186 default: /* routed message */
187 LASSERT(msg->msg_routing);
188 LASSERT(msg->msg_rx_committed);
189 LASSERT(ev->type == 0);
191 counters->route_length += msg->msg_len;
192 counters->route_count++;
196 /* should have been decommitted */
197 LASSERT(!msg->msg_rx_committed);
198 /* overwritten while sending ACK */
199 LASSERT(msg->msg_type == LNET_MSG_ACK);
200 msg->msg_type = LNET_MSG_PUT; /* fix type */
203 case LNET_EVENT_SEND:
204 LASSERT(!msg->msg_rx_committed);
205 if (msg->msg_type == LNET_MSG_PUT)
206 counters->send_length += msg->msg_len;
210 LASSERT(msg->msg_rx_committed);
211 /* overwritten while sending reply, we should never be
212 * here for optimized GET */
213 LASSERT(msg->msg_type == LNET_MSG_REPLY);
214 msg->msg_type = LNET_MSG_GET; /* fix type */
218 counters->send_count++;
220 atomic_inc(&msg->msg_txpeer->lpni_stats.send_count);
222 atomic_inc(&msg->msg_txni->ni_stats.send_count);
224 lnet_return_tx_credits_locked(msg);
225 msg->msg_tx_committed = 0;
229 lnet_msg_decommit_rx(struct lnet_msg *msg, int status)
231 struct lnet_counters *counters;
232 struct lnet_event *ev = &msg->msg_ev;
234 LASSERT(!msg->msg_tx_committed); /* decommitted or never committed */
235 LASSERT(msg->msg_rx_committed);
240 counters = the_lnet.ln_counters[msg->msg_rx_cpt];
243 LASSERT(ev->type == 0);
244 LASSERT(msg->msg_routing);
248 LASSERT(msg->msg_type == LNET_MSG_ACK);
252 /* type is "REPLY" if it's an optimized GET on passive side,
253 * because optimized GET will never be committed for sending,
254 * so message type wouldn't be changed back to "GET" by
255 * lnet_msg_decommit_tx(), see details in lnet_parse_get() */
256 LASSERT(msg->msg_type == LNET_MSG_REPLY ||
257 msg->msg_type == LNET_MSG_GET);
258 counters->send_length += msg->msg_wanted;
262 LASSERT(msg->msg_type == LNET_MSG_PUT);
265 case LNET_EVENT_REPLY:
266 /* type is "GET" if it's an optimized GET on active side,
267 * see details in lnet_create_reply_msg() */
268 LASSERT(msg->msg_type == LNET_MSG_GET ||
269 msg->msg_type == LNET_MSG_REPLY);
273 counters->recv_count++;
275 atomic_inc(&msg->msg_rxpeer->lpni_stats.recv_count);
277 atomic_inc(&msg->msg_rxni->ni_stats.recv_count);
278 if (ev->type == LNET_EVENT_PUT || ev->type == LNET_EVENT_REPLY)
279 counters->recv_length += msg->msg_wanted;
282 lnet_return_rx_credits_locked(msg);
283 msg->msg_rx_committed = 0;
287 lnet_msg_decommit(struct lnet_msg *msg, int cpt, int status)
291 LASSERT(msg->msg_tx_committed || msg->msg_rx_committed);
292 LASSERT(msg->msg_onactivelist);
294 if (msg->msg_tx_committed) { /* always decommit for sending first */
295 LASSERT(cpt == msg->msg_tx_cpt);
296 lnet_msg_decommit_tx(msg, status);
299 if (msg->msg_rx_committed) {
300 /* forwarding msg committed for both receiving and sending */
301 if (cpt != msg->msg_rx_cpt) {
302 lnet_net_unlock(cpt);
303 cpt2 = msg->msg_rx_cpt;
306 lnet_msg_decommit_rx(msg, status);
309 list_del(&msg->msg_activelist);
310 msg->msg_onactivelist = 0;
312 the_lnet.ln_counters[cpt2]->msgs_alloc--;
315 lnet_net_unlock(cpt2);
321 lnet_msg_attach_md(struct lnet_msg *msg, struct lnet_libmd *md,
322 unsigned int offset, unsigned int mlen)
324 /* NB: @offset and @len are only useful for receiving */
325 /* Here, we attach the MD on lnet_msg and mark it busy and
326 * decrementing its threshold. Come what may, the lnet_msg "owns"
327 * the MD until a call to lnet_msg_detach_md or lnet_finalize()
328 * signals completion. */
329 LASSERT(!msg->msg_routing);
332 if (msg->msg_receiving) { /* committed for receiving */
333 msg->msg_offset = offset;
334 msg->msg_wanted = mlen;
338 if (md->md_threshold != LNET_MD_THRESH_INF) {
339 LASSERT(md->md_threshold > 0);
343 /* build umd in event */
344 lnet_md2handle(&msg->msg_ev.md_handle, md);
345 lnet_md_deconstruct(md, &msg->msg_ev.md);
349 lnet_msg_detach_md(struct lnet_msg *msg, int status)
351 struct lnet_libmd *md = msg->msg_md;
354 /* Now it's safe to drop my caller's ref */
356 LASSERT(md->md_refcount >= 0);
358 unlink = lnet_md_unlinkable(md);
359 if (md->md_eq != NULL) {
360 msg->msg_ev.status = status;
361 msg->msg_ev.unlinked = unlink;
362 lnet_eq_enqueue_event(md->md_eq, &msg->msg_ev);
372 lnet_complete_msg_locked(struct lnet_msg *msg, int cpt)
374 struct lnet_handle_wire ack_wmd;
376 int status = msg->msg_ev.status;
378 LASSERT(msg->msg_onactivelist);
380 if (status == 0 && msg->msg_ack) {
381 /* Only send an ACK if the PUT completed successfully */
383 lnet_msg_decommit(msg, cpt, 0);
386 lnet_net_unlock(cpt);
388 LASSERT(msg->msg_ev.type == LNET_EVENT_PUT);
389 LASSERT(!msg->msg_routing);
391 ack_wmd = msg->msg_hdr.msg.put.ack_wmd;
393 lnet_prep_send(msg, LNET_MSG_ACK, msg->msg_ev.source, 0, 0);
395 msg->msg_hdr.msg.ack.dst_wmd = ack_wmd;
396 msg->msg_hdr.msg.ack.match_bits = msg->msg_ev.match_bits;
397 msg->msg_hdr.msg.ack.mlength = cpu_to_le32(msg->msg_ev.mlength);
399 /* NB: we probably want to use NID of msg::msg_from as 3rd
400 * parameter (router NID) if it's routed message */
401 rc = lnet_send(msg->msg_ev.target.nid, msg, LNET_NID_ANY);
405 * NB: message is committed for sending, we should return
406 * on success because LND will finalize this message later.
408 * Also, there is possibility that message is committed for
409 * sending and also failed before delivering to LND,
410 * i.e: ENOMEM, in that case we can't fall through either
411 * because CPT for sending can be different with CPT for
412 * receiving, so we should return back to lnet_finalize()
413 * to make sure we are locking the correct partition.
417 } else if (status == 0 && /* OK so far */
418 (msg->msg_routing && !msg->msg_sending)) {
420 LASSERT(!msg->msg_receiving); /* called back recv already */
421 lnet_net_unlock(cpt);
423 rc = lnet_send(LNET_NID_ANY, msg, LNET_NID_ANY);
427 * NB: message is committed for sending, we should return
428 * on success because LND will finalize this message later.
430 * Also, there is possibility that message is committed for
431 * sending and also failed before delivering to LND,
432 * i.e: ENOMEM, in that case we can't fall through either:
433 * - The rule is message must decommit for sending first if
434 * the it's committed for both sending and receiving
435 * - CPT for sending can be different with CPT for receiving,
436 * so we should return back to lnet_finalize() to make
437 * sure we are locking the correct partition.
442 lnet_msg_decommit(msg, cpt, status);
448 lnet_finalize(struct lnet_msg *msg, int status)
450 struct lnet_msg_container *container;
456 LASSERT(!in_interrupt());
461 msg->msg_ev.status = status;
463 if (msg->msg_md != NULL) {
464 cpt = lnet_cpt_of_cookie(msg->msg_md->md_lh.lh_cookie);
467 lnet_msg_detach_md(msg, status);
468 lnet_res_unlock(cpt);
473 if (!msg->msg_tx_committed && !msg->msg_rx_committed) {
474 /* not committed to network yet */
475 LASSERT(!msg->msg_onactivelist);
481 * NB: routed message can be committed for both receiving and sending,
482 * we should finalize in LIFO order and keep counters correct.
483 * (finalize sending first then finalize receiving)
485 cpt = msg->msg_tx_committed ? msg->msg_tx_cpt : msg->msg_rx_cpt;
488 container = the_lnet.ln_msg_containers[cpt];
489 list_add_tail(&msg->msg_list, &container->msc_finalizing);
491 /* Recursion breaker. Don't complete the message here if I am (or
492 * enough other threads are) already completing messages */
495 for (i = 0; i < container->msc_nfinalizers; i++) {
496 if (container->msc_finalizers[i] == current)
499 if (my_slot < 0 && container->msc_finalizers[i] == NULL)
503 if (i < container->msc_nfinalizers || my_slot < 0) {
504 lnet_net_unlock(cpt);
508 container->msc_finalizers[my_slot] = current;
510 while (!list_empty(&container->msc_finalizing)) {
511 msg = list_entry(container->msc_finalizing.next,
512 struct lnet_msg, msg_list);
514 list_del(&msg->msg_list);
516 /* NB drops and regains the lnet lock if it actually does
517 * anything, so my finalizing friends can chomp along too */
518 rc = lnet_complete_msg_locked(msg, cpt);
523 if (unlikely(!list_empty(&the_lnet.ln_delay_rules))) {
524 lnet_net_unlock(cpt);
525 lnet_delay_rule_check();
529 container->msc_finalizers[my_slot] = NULL;
530 lnet_net_unlock(cpt);
535 EXPORT_SYMBOL(lnet_finalize);
538 lnet_msg_container_cleanup(struct lnet_msg_container *container)
542 if (container->msc_init == 0)
545 while (!list_empty(&container->msc_active)) {
546 struct lnet_msg *msg;
548 msg = list_entry(container->msc_active.next,
549 struct lnet_msg, msg_activelist);
550 LASSERT(msg->msg_onactivelist);
551 msg->msg_onactivelist = 0;
552 list_del(&msg->msg_activelist);
558 CERROR("%d active msg on exit\n", count);
560 if (container->msc_finalizers != NULL) {
561 LIBCFS_FREE(container->msc_finalizers,
562 container->msc_nfinalizers *
563 sizeof(*container->msc_finalizers));
564 container->msc_finalizers = NULL;
566 container->msc_init = 0;
570 lnet_msg_container_setup(struct lnet_msg_container *container, int cpt)
574 container->msc_init = 1;
576 INIT_LIST_HEAD(&container->msc_active);
577 INIT_LIST_HEAD(&container->msc_finalizing);
581 container->msc_nfinalizers = cfs_cpt_weight(lnet_cpt_table(), cpt);
583 LIBCFS_CPT_ALLOC(container->msc_finalizers, lnet_cpt_table(), cpt,
584 container->msc_nfinalizers *
585 sizeof(*container->msc_finalizers));
587 if (container->msc_finalizers == NULL) {
588 CERROR("Failed to allocate message finalizers\n");
589 lnet_msg_container_cleanup(container);
597 lnet_msg_containers_destroy(void)
599 struct lnet_msg_container *container;
602 if (the_lnet.ln_msg_containers == NULL)
605 cfs_percpt_for_each(container, i, the_lnet.ln_msg_containers)
606 lnet_msg_container_cleanup(container);
608 cfs_percpt_free(the_lnet.ln_msg_containers);
609 the_lnet.ln_msg_containers = NULL;
613 lnet_msg_containers_create(void)
615 struct lnet_msg_container *container;
619 the_lnet.ln_msg_containers = cfs_percpt_alloc(lnet_cpt_table(),
622 if (the_lnet.ln_msg_containers == NULL) {
623 CERROR("Failed to allocate cpu-partition data for network\n");
627 cfs_percpt_for_each(container, i, the_lnet.ln_msg_containers) {
628 rc = lnet_msg_container_setup(container, i);
630 lnet_msg_containers_destroy();