4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2014, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
34 * Message decoding, parsing and finalizing routines
37 #define DEBUG_SUBSYSTEM S_LNET
39 #include <lnet/lib-lnet.h>
42 lnet_build_unlink_event(struct lnet_libmd *md, struct lnet_event *ev)
46 memset(ev, 0, sizeof(*ev));
50 ev->type = LNET_EVENT_UNLINK;
51 lnet_md_deconstruct(md, &ev->md);
52 lnet_md2handle(&ev->md_handle, md);
57 * Don't need any lock, must be called after lnet_commit_md
60 lnet_build_msg_event(struct lnet_msg *msg, enum lnet_event_kind ev_type)
62 struct lnet_hdr *hdr = &msg->msg_hdr;
63 struct lnet_event *ev = &msg->msg_ev;
65 LASSERT(!msg->msg_routing);
68 ev->msg_type = msg->msg_type;
70 if (ev_type == LNET_EVENT_SEND) {
71 /* event for active message */
72 ev->target.nid = le64_to_cpu(hdr->dest_nid);
73 ev->target.pid = le32_to_cpu(hdr->dest_pid);
74 ev->initiator.nid = LNET_NID_ANY;
75 ev->initiator.pid = the_lnet.ln_pid;
76 ev->source.nid = LNET_NID_ANY;
77 ev->source.pid = the_lnet.ln_pid;
78 ev->sender = LNET_NID_ANY;
80 /* event for passive message */
81 ev->target.pid = hdr->dest_pid;
82 ev->target.nid = hdr->dest_nid;
83 ev->initiator.pid = hdr->src_pid;
84 /* Multi-Rail: resolve src_nid to "primary" peer NID */
85 ev->initiator.nid = msg->msg_initiator;
86 /* Multi-Rail: track source NID. */
87 ev->source.pid = hdr->src_pid;
88 ev->source.nid = hdr->src_nid;
89 ev->rlength = hdr->payload_length;
90 ev->sender = msg->msg_from;
91 ev->mlength = msg->msg_wanted;
92 ev->offset = msg->msg_offset;
99 case LNET_EVENT_PUT: /* passive PUT */
100 ev->pt_index = hdr->msg.put.ptl_index;
101 ev->match_bits = hdr->msg.put.match_bits;
102 ev->hdr_data = hdr->msg.put.hdr_data;
105 case LNET_EVENT_GET: /* passive GET */
106 ev->pt_index = hdr->msg.get.ptl_index;
107 ev->match_bits = hdr->msg.get.match_bits;
111 case LNET_EVENT_ACK: /* ACK */
112 ev->match_bits = hdr->msg.ack.match_bits;
113 ev->mlength = hdr->msg.ack.mlength;
116 case LNET_EVENT_REPLY: /* REPLY */
119 case LNET_EVENT_SEND: /* active message */
120 if (msg->msg_type == LNET_MSG_PUT) {
121 ev->pt_index = le32_to_cpu(hdr->msg.put.ptl_index);
122 ev->match_bits = le64_to_cpu(hdr->msg.put.match_bits);
123 ev->offset = le32_to_cpu(hdr->msg.put.offset);
125 ev->rlength = le32_to_cpu(hdr->payload_length);
126 ev->hdr_data = le64_to_cpu(hdr->msg.put.hdr_data);
129 LASSERT(msg->msg_type == LNET_MSG_GET);
130 ev->pt_index = le32_to_cpu(hdr->msg.get.ptl_index);
131 ev->match_bits = le64_to_cpu(hdr->msg.get.match_bits);
133 ev->rlength = le32_to_cpu(hdr->msg.get.sink_length);
134 ev->offset = le32_to_cpu(hdr->msg.get.src_offset);
142 lnet_msg_commit(struct lnet_msg *msg, int cpt)
144 struct lnet_msg_container *container = the_lnet.ln_msg_containers[cpt];
145 struct lnet_counters *counters = the_lnet.ln_counters[cpt];
147 /* routed message can be committed for both receiving and sending */
148 LASSERT(!msg->msg_tx_committed);
150 if (msg->msg_sending) {
151 LASSERT(!msg->msg_receiving);
153 msg->msg_tx_cpt = cpt;
154 msg->msg_tx_committed = 1;
155 if (msg->msg_rx_committed) { /* routed message REPLY */
156 LASSERT(msg->msg_onactivelist);
160 LASSERT(!msg->msg_sending);
161 msg->msg_rx_cpt = cpt;
162 msg->msg_rx_committed = 1;
165 LASSERT(!msg->msg_onactivelist);
166 msg->msg_onactivelist = 1;
167 list_add(&msg->msg_activelist, &container->msc_active);
169 counters->msgs_alloc++;
170 if (counters->msgs_alloc > counters->msgs_max)
171 counters->msgs_max = counters->msgs_alloc;
175 lnet_msg_decommit_tx(struct lnet_msg *msg, int status)
177 struct lnet_counters *counters;
178 struct lnet_event *ev = &msg->msg_ev;
180 LASSERT(msg->msg_tx_committed);
184 counters = the_lnet.ln_counters[msg->msg_tx_cpt];
186 default: /* routed message */
187 LASSERT(msg->msg_routing);
188 LASSERT(msg->msg_rx_committed);
189 LASSERT(ev->type == 0);
191 counters->route_length += msg->msg_len;
192 counters->route_count++;
196 /* should have been decommitted */
197 LASSERT(!msg->msg_rx_committed);
198 /* overwritten while sending ACK */
199 LASSERT(msg->msg_type == LNET_MSG_ACK);
200 msg->msg_type = LNET_MSG_PUT; /* fix type */
203 case LNET_EVENT_SEND:
204 LASSERT(!msg->msg_rx_committed);
205 if (msg->msg_type == LNET_MSG_PUT)
206 counters->send_length += msg->msg_len;
210 LASSERT(msg->msg_rx_committed);
211 /* overwritten while sending reply, we should never be
212 * here for optimized GET */
213 LASSERT(msg->msg_type == LNET_MSG_REPLY);
214 msg->msg_type = LNET_MSG_GET; /* fix type */
218 counters->send_count++;
222 lnet_incr_stats(&msg->msg_txpeer->lpni_stats,
224 LNET_STATS_TYPE_SEND);
226 lnet_incr_stats(&msg->msg_txni->ni_stats,
228 LNET_STATS_TYPE_SEND);
230 lnet_return_tx_credits_locked(msg);
231 msg->msg_tx_committed = 0;
235 lnet_msg_decommit_rx(struct lnet_msg *msg, int status)
237 struct lnet_counters *counters;
238 struct lnet_event *ev = &msg->msg_ev;
240 LASSERT(!msg->msg_tx_committed); /* decommitted or never committed */
241 LASSERT(msg->msg_rx_committed);
246 counters = the_lnet.ln_counters[msg->msg_rx_cpt];
249 LASSERT(ev->type == 0);
250 LASSERT(msg->msg_routing);
254 LASSERT(msg->msg_type == LNET_MSG_ACK);
258 /* type is "REPLY" if it's an optimized GET on passive side,
259 * because optimized GET will never be committed for sending,
260 * so message type wouldn't be changed back to "GET" by
261 * lnet_msg_decommit_tx(), see details in lnet_parse_get() */
262 LASSERT(msg->msg_type == LNET_MSG_REPLY ||
263 msg->msg_type == LNET_MSG_GET);
264 counters->send_length += msg->msg_wanted;
268 LASSERT(msg->msg_type == LNET_MSG_PUT);
271 case LNET_EVENT_REPLY:
272 /* type is "GET" if it's an optimized GET on active side,
273 * see details in lnet_create_reply_msg() */
274 LASSERT(msg->msg_type == LNET_MSG_GET ||
275 msg->msg_type == LNET_MSG_REPLY);
279 counters->recv_count++;
283 lnet_incr_stats(&msg->msg_rxpeer->lpni_stats,
285 LNET_STATS_TYPE_RECV);
287 lnet_incr_stats(&msg->msg_rxni->ni_stats,
289 LNET_STATS_TYPE_RECV);
290 if (ev->type == LNET_EVENT_PUT || ev->type == LNET_EVENT_REPLY)
291 counters->recv_length += msg->msg_wanted;
294 lnet_return_rx_credits_locked(msg);
295 msg->msg_rx_committed = 0;
299 lnet_msg_decommit(struct lnet_msg *msg, int cpt, int status)
303 LASSERT(msg->msg_tx_committed || msg->msg_rx_committed);
304 LASSERT(msg->msg_onactivelist);
306 if (msg->msg_tx_committed) { /* always decommit for sending first */
307 LASSERT(cpt == msg->msg_tx_cpt);
308 lnet_msg_decommit_tx(msg, status);
311 if (msg->msg_rx_committed) {
312 /* forwarding msg committed for both receiving and sending */
313 if (cpt != msg->msg_rx_cpt) {
314 lnet_net_unlock(cpt);
315 cpt2 = msg->msg_rx_cpt;
318 lnet_msg_decommit_rx(msg, status);
321 list_del(&msg->msg_activelist);
322 msg->msg_onactivelist = 0;
324 the_lnet.ln_counters[cpt2]->msgs_alloc--;
327 lnet_net_unlock(cpt2);
333 lnet_msg_attach_md(struct lnet_msg *msg, struct lnet_libmd *md,
334 unsigned int offset, unsigned int mlen)
336 /* NB: @offset and @len are only useful for receiving */
337 /* Here, we attach the MD on lnet_msg and mark it busy and
338 * decrementing its threshold. Come what may, the lnet_msg "owns"
339 * the MD until a call to lnet_msg_detach_md or lnet_finalize()
340 * signals completion. */
341 LASSERT(!msg->msg_routing);
344 if (msg->msg_receiving) { /* committed for receiving */
345 msg->msg_offset = offset;
346 msg->msg_wanted = mlen;
350 if (md->md_threshold != LNET_MD_THRESH_INF) {
351 LASSERT(md->md_threshold > 0);
355 /* build umd in event */
356 lnet_md2handle(&msg->msg_ev.md_handle, md);
357 lnet_md_deconstruct(md, &msg->msg_ev.md);
361 lnet_msg_detach_md(struct lnet_msg *msg, int status)
363 struct lnet_libmd *md = msg->msg_md;
366 /* Now it's safe to drop my caller's ref */
368 LASSERT(md->md_refcount >= 0);
370 unlink = lnet_md_unlinkable(md);
371 if (md->md_eq != NULL) {
372 msg->msg_ev.status = status;
373 msg->msg_ev.unlinked = unlink;
374 lnet_eq_enqueue_event(md->md_eq, &msg->msg_ev);
384 lnet_complete_msg_locked(struct lnet_msg *msg, int cpt)
386 struct lnet_handle_wire ack_wmd;
388 int status = msg->msg_ev.status;
390 LASSERT(msg->msg_onactivelist);
392 if (status == 0 && msg->msg_ack) {
393 /* Only send an ACK if the PUT completed successfully */
395 lnet_msg_decommit(msg, cpt, 0);
398 lnet_net_unlock(cpt);
400 LASSERT(msg->msg_ev.type == LNET_EVENT_PUT);
401 LASSERT(!msg->msg_routing);
403 ack_wmd = msg->msg_hdr.msg.put.ack_wmd;
405 lnet_prep_send(msg, LNET_MSG_ACK, msg->msg_ev.source, 0, 0);
407 msg->msg_hdr.msg.ack.dst_wmd = ack_wmd;
408 msg->msg_hdr.msg.ack.match_bits = msg->msg_ev.match_bits;
409 msg->msg_hdr.msg.ack.mlength = cpu_to_le32(msg->msg_ev.mlength);
411 /* NB: we probably want to use NID of msg::msg_from as 3rd
412 * parameter (router NID) if it's routed message */
413 rc = lnet_send(msg->msg_ev.target.nid, msg, LNET_NID_ANY);
417 * NB: message is committed for sending, we should return
418 * on success because LND will finalize this message later.
420 * Also, there is possibility that message is committed for
421 * sending and also failed before delivering to LND,
422 * i.e: ENOMEM, in that case we can't fall through either
423 * because CPT for sending can be different with CPT for
424 * receiving, so we should return back to lnet_finalize()
425 * to make sure we are locking the correct partition.
429 } else if (status == 0 && /* OK so far */
430 (msg->msg_routing && !msg->msg_sending)) {
432 LASSERT(!msg->msg_receiving); /* called back recv already */
433 lnet_net_unlock(cpt);
435 rc = lnet_send(LNET_NID_ANY, msg, LNET_NID_ANY);
439 * NB: message is committed for sending, we should return
440 * on success because LND will finalize this message later.
442 * Also, there is possibility that message is committed for
443 * sending and also failed before delivering to LND,
444 * i.e: ENOMEM, in that case we can't fall through either:
445 * - The rule is message must decommit for sending first if
446 * the it's committed for both sending and receiving
447 * - CPT for sending can be different with CPT for receiving,
448 * so we should return back to lnet_finalize() to make
449 * sure we are locking the correct partition.
454 lnet_msg_decommit(msg, cpt, status);
460 lnet_finalize(struct lnet_msg *msg, int status)
462 struct lnet_msg_container *container;
468 LASSERT(!in_interrupt());
473 msg->msg_ev.status = status;
475 if (msg->msg_md != NULL) {
476 cpt = lnet_cpt_of_cookie(msg->msg_md->md_lh.lh_cookie);
479 lnet_msg_detach_md(msg, status);
480 lnet_res_unlock(cpt);
485 if (!msg->msg_tx_committed && !msg->msg_rx_committed) {
486 /* not committed to network yet */
487 LASSERT(!msg->msg_onactivelist);
493 * NB: routed message can be committed for both receiving and sending,
494 * we should finalize in LIFO order and keep counters correct.
495 * (finalize sending first then finalize receiving)
497 cpt = msg->msg_tx_committed ? msg->msg_tx_cpt : msg->msg_rx_cpt;
500 container = the_lnet.ln_msg_containers[cpt];
501 list_add_tail(&msg->msg_list, &container->msc_finalizing);
503 /* Recursion breaker. Don't complete the message here if I am (or
504 * enough other threads are) already completing messages */
507 for (i = 0; i < container->msc_nfinalizers; i++) {
508 if (container->msc_finalizers[i] == current)
511 if (my_slot < 0 && container->msc_finalizers[i] == NULL)
515 if (i < container->msc_nfinalizers || my_slot < 0) {
516 lnet_net_unlock(cpt);
520 container->msc_finalizers[my_slot] = current;
522 while (!list_empty(&container->msc_finalizing)) {
523 msg = list_entry(container->msc_finalizing.next,
524 struct lnet_msg, msg_list);
526 list_del(&msg->msg_list);
528 /* NB drops and regains the lnet lock if it actually does
529 * anything, so my finalizing friends can chomp along too */
530 rc = lnet_complete_msg_locked(msg, cpt);
535 if (unlikely(!list_empty(&the_lnet.ln_delay_rules))) {
536 lnet_net_unlock(cpt);
537 lnet_delay_rule_check();
541 container->msc_finalizers[my_slot] = NULL;
542 lnet_net_unlock(cpt);
547 EXPORT_SYMBOL(lnet_finalize);
550 lnet_msg_container_cleanup(struct lnet_msg_container *container)
554 if (container->msc_init == 0)
557 while (!list_empty(&container->msc_active)) {
558 struct lnet_msg *msg;
560 msg = list_entry(container->msc_active.next,
561 struct lnet_msg, msg_activelist);
562 LASSERT(msg->msg_onactivelist);
563 msg->msg_onactivelist = 0;
564 list_del(&msg->msg_activelist);
570 CERROR("%d active msg on exit\n", count);
572 if (container->msc_finalizers != NULL) {
573 LIBCFS_FREE(container->msc_finalizers,
574 container->msc_nfinalizers *
575 sizeof(*container->msc_finalizers));
576 container->msc_finalizers = NULL;
578 container->msc_init = 0;
582 lnet_msg_container_setup(struct lnet_msg_container *container, int cpt)
586 container->msc_init = 1;
588 INIT_LIST_HEAD(&container->msc_active);
589 INIT_LIST_HEAD(&container->msc_finalizing);
592 container->msc_nfinalizers = cfs_cpt_weight(lnet_cpt_table(), cpt);
593 if (container->msc_nfinalizers == 0)
594 container->msc_nfinalizers = 1;
596 LIBCFS_CPT_ALLOC(container->msc_finalizers, lnet_cpt_table(), cpt,
597 container->msc_nfinalizers *
598 sizeof(*container->msc_finalizers));
600 if (container->msc_finalizers == NULL) {
601 CERROR("Failed to allocate message finalizers\n");
602 lnet_msg_container_cleanup(container);
610 lnet_msg_containers_destroy(void)
612 struct lnet_msg_container *container;
615 if (the_lnet.ln_msg_containers == NULL)
618 cfs_percpt_for_each(container, i, the_lnet.ln_msg_containers)
619 lnet_msg_container_cleanup(container);
621 cfs_percpt_free(the_lnet.ln_msg_containers);
622 the_lnet.ln_msg_containers = NULL;
626 lnet_msg_containers_create(void)
628 struct lnet_msg_container *container;
632 the_lnet.ln_msg_containers = cfs_percpt_alloc(lnet_cpt_table(),
635 if (the_lnet.ln_msg_containers == NULL) {
636 CERROR("Failed to allocate cpu-partition data for network\n");
640 cfs_percpt_for_each(container, i, the_lnet.ln_msg_containers) {
641 rc = lnet_msg_container_setup(container, i);
643 lnet_msg_containers_destroy();