4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2014, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
34 * Message decoding, parsing and finalizing routines
37 #define DEBUG_SUBSYSTEM S_LNET
39 #include <lnet/lib-lnet.h>
42 lnet_build_unlink_event(struct lnet_libmd *md, struct lnet_event *ev)
46 memset(ev, 0, sizeof(*ev));
50 ev->type = LNET_EVENT_UNLINK;
51 lnet_md_deconstruct(md, &ev->md);
52 lnet_md2handle(&ev->md_handle, md);
57 * Don't need any lock, must be called after lnet_commit_md
60 lnet_build_msg_event(struct lnet_msg *msg, enum lnet_event_kind ev_type)
62 struct lnet_hdr *hdr = &msg->msg_hdr;
63 struct lnet_event *ev = &msg->msg_ev;
65 LASSERT(!msg->msg_routing);
69 if (ev_type == LNET_EVENT_SEND) {
70 /* event for active message */
71 ev->target.nid = le64_to_cpu(hdr->dest_nid);
72 ev->target.pid = le32_to_cpu(hdr->dest_pid);
73 ev->initiator.nid = LNET_NID_ANY;
74 ev->initiator.pid = the_lnet.ln_pid;
75 ev->source.nid = LNET_NID_ANY;
76 ev->source.pid = the_lnet.ln_pid;
77 ev->sender = LNET_NID_ANY;
80 /* event for passive message */
81 ev->target.pid = hdr->dest_pid;
82 ev->target.nid = hdr->dest_nid;
83 ev->initiator.pid = hdr->src_pid;
84 /* Multi-Rail: resolve src_nid to "primary" peer NID */
85 ev->initiator.nid = msg->msg_initiator;
86 /* Multi-Rail: track source NID. */
87 ev->source.pid = hdr->src_pid;
88 ev->source.nid = hdr->src_nid;
89 ev->rlength = hdr->payload_length;
90 ev->sender = msg->msg_from;
91 ev->mlength = msg->msg_wanted;
92 ev->offset = msg->msg_offset;
99 case LNET_EVENT_PUT: /* passive PUT */
100 ev->pt_index = hdr->msg.put.ptl_index;
101 ev->match_bits = hdr->msg.put.match_bits;
102 ev->hdr_data = hdr->msg.put.hdr_data;
105 case LNET_EVENT_GET: /* passive GET */
106 ev->pt_index = hdr->msg.get.ptl_index;
107 ev->match_bits = hdr->msg.get.match_bits;
111 case LNET_EVENT_ACK: /* ACK */
112 ev->match_bits = hdr->msg.ack.match_bits;
113 ev->mlength = hdr->msg.ack.mlength;
116 case LNET_EVENT_REPLY: /* REPLY */
119 case LNET_EVENT_SEND: /* active message */
120 if (msg->msg_type == LNET_MSG_PUT) {
121 ev->pt_index = le32_to_cpu(hdr->msg.put.ptl_index);
122 ev->match_bits = le64_to_cpu(hdr->msg.put.match_bits);
123 ev->offset = le32_to_cpu(hdr->msg.put.offset);
125 ev->rlength = le32_to_cpu(hdr->payload_length);
126 ev->hdr_data = le64_to_cpu(hdr->msg.put.hdr_data);
129 LASSERT(msg->msg_type == LNET_MSG_GET);
130 ev->pt_index = le32_to_cpu(hdr->msg.get.ptl_index);
131 ev->match_bits = le64_to_cpu(hdr->msg.get.match_bits);
133 ev->rlength = le32_to_cpu(hdr->msg.get.sink_length);
134 ev->offset = le32_to_cpu(hdr->msg.get.src_offset);
142 lnet_msg_commit(struct lnet_msg *msg, int cpt)
144 struct lnet_msg_container *container = the_lnet.ln_msg_containers[cpt];
145 struct lnet_counters *counters = the_lnet.ln_counters[cpt];
147 /* routed message can be committed for both receiving and sending */
148 LASSERT(!msg->msg_tx_committed);
150 if (msg->msg_sending) {
151 LASSERT(!msg->msg_receiving);
153 msg->msg_tx_cpt = cpt;
154 msg->msg_tx_committed = 1;
155 if (msg->msg_rx_committed) { /* routed message REPLY */
156 LASSERT(msg->msg_onactivelist);
160 LASSERT(!msg->msg_sending);
161 msg->msg_rx_cpt = cpt;
162 msg->msg_rx_committed = 1;
165 LASSERT(!msg->msg_onactivelist);
166 msg->msg_onactivelist = 1;
167 list_add(&msg->msg_activelist, &container->msc_active);
169 counters->msgs_alloc++;
170 if (counters->msgs_alloc > counters->msgs_max)
171 counters->msgs_max = counters->msgs_alloc;
175 lnet_msg_decommit_tx(struct lnet_msg *msg, int status)
177 struct lnet_counters *counters;
178 struct lnet_event *ev = &msg->msg_ev;
180 LASSERT(msg->msg_tx_committed);
184 counters = the_lnet.ln_counters[msg->msg_tx_cpt];
186 default: /* routed message */
187 LASSERT(msg->msg_routing);
188 LASSERT(msg->msg_rx_committed);
189 LASSERT(ev->type == 0);
191 counters->route_length += msg->msg_len;
192 counters->route_count++;
196 /* should have been decommitted */
197 LASSERT(!msg->msg_rx_committed);
198 /* overwritten while sending ACK */
199 LASSERT(msg->msg_type == LNET_MSG_ACK);
200 msg->msg_type = LNET_MSG_PUT; /* fix type */
203 case LNET_EVENT_SEND:
204 LASSERT(!msg->msg_rx_committed);
205 if (msg->msg_type == LNET_MSG_PUT)
206 counters->send_length += msg->msg_len;
210 LASSERT(msg->msg_rx_committed);
211 /* overwritten while sending reply, we should never be
212 * here for optimized GET */
213 LASSERT(msg->msg_type == LNET_MSG_REPLY);
214 msg->msg_type = LNET_MSG_GET; /* fix type */
218 counters->send_count++;
222 atomic_inc(&msg->msg_txpeer->lpni_stats.send_count);
224 atomic_inc(&msg->msg_txni->ni_stats.send_count);
226 lnet_return_tx_credits_locked(msg);
227 msg->msg_tx_committed = 0;
231 lnet_msg_decommit_rx(struct lnet_msg *msg, int status)
233 struct lnet_counters *counters;
234 struct lnet_event *ev = &msg->msg_ev;
236 LASSERT(!msg->msg_tx_committed); /* decommitted or never committed */
237 LASSERT(msg->msg_rx_committed);
242 counters = the_lnet.ln_counters[msg->msg_rx_cpt];
245 LASSERT(ev->type == 0);
246 LASSERT(msg->msg_routing);
250 LASSERT(msg->msg_type == LNET_MSG_ACK);
254 /* type is "REPLY" if it's an optimized GET on passive side,
255 * because optimized GET will never be committed for sending,
256 * so message type wouldn't be changed back to "GET" by
257 * lnet_msg_decommit_tx(), see details in lnet_parse_get() */
258 LASSERT(msg->msg_type == LNET_MSG_REPLY ||
259 msg->msg_type == LNET_MSG_GET);
260 counters->send_length += msg->msg_wanted;
264 LASSERT(msg->msg_type == LNET_MSG_PUT);
267 case LNET_EVENT_REPLY:
268 /* type is "GET" if it's an optimized GET on active side,
269 * see details in lnet_create_reply_msg() */
270 LASSERT(msg->msg_type == LNET_MSG_GET ||
271 msg->msg_type == LNET_MSG_REPLY);
275 counters->recv_count++;
279 atomic_inc(&msg->msg_rxpeer->lpni_stats.recv_count);
281 atomic_inc(&msg->msg_rxni->ni_stats.recv_count);
282 if (ev->type == LNET_EVENT_PUT || ev->type == LNET_EVENT_REPLY)
283 counters->recv_length += msg->msg_wanted;
286 lnet_return_rx_credits_locked(msg);
287 msg->msg_rx_committed = 0;
291 lnet_msg_decommit(struct lnet_msg *msg, int cpt, int status)
295 LASSERT(msg->msg_tx_committed || msg->msg_rx_committed);
296 LASSERT(msg->msg_onactivelist);
298 if (msg->msg_tx_committed) { /* always decommit for sending first */
299 LASSERT(cpt == msg->msg_tx_cpt);
300 lnet_msg_decommit_tx(msg, status);
303 if (msg->msg_rx_committed) {
304 /* forwarding msg committed for both receiving and sending */
305 if (cpt != msg->msg_rx_cpt) {
306 lnet_net_unlock(cpt);
307 cpt2 = msg->msg_rx_cpt;
310 lnet_msg_decommit_rx(msg, status);
313 list_del(&msg->msg_activelist);
314 msg->msg_onactivelist = 0;
316 the_lnet.ln_counters[cpt2]->msgs_alloc--;
319 lnet_net_unlock(cpt2);
325 lnet_msg_attach_md(struct lnet_msg *msg, struct lnet_libmd *md,
326 unsigned int offset, unsigned int mlen)
328 /* NB: @offset and @len are only useful for receiving */
329 /* Here, we attach the MD on lnet_msg and mark it busy and
330 * decrementing its threshold. Come what may, the lnet_msg "owns"
331 * the MD until a call to lnet_msg_detach_md or lnet_finalize()
332 * signals completion. */
333 LASSERT(!msg->msg_routing);
336 if (msg->msg_receiving) { /* committed for receiving */
337 msg->msg_offset = offset;
338 msg->msg_wanted = mlen;
342 if (md->md_threshold != LNET_MD_THRESH_INF) {
343 LASSERT(md->md_threshold > 0);
347 /* build umd in event */
348 lnet_md2handle(&msg->msg_ev.md_handle, md);
349 lnet_md_deconstruct(md, &msg->msg_ev.md);
353 lnet_msg_detach_md(struct lnet_msg *msg, int status)
355 struct lnet_libmd *md = msg->msg_md;
358 /* Now it's safe to drop my caller's ref */
360 LASSERT(md->md_refcount >= 0);
362 unlink = lnet_md_unlinkable(md);
363 if (md->md_eq != NULL) {
364 msg->msg_ev.status = status;
365 msg->msg_ev.unlinked = unlink;
366 lnet_eq_enqueue_event(md->md_eq, &msg->msg_ev);
376 lnet_complete_msg_locked(struct lnet_msg *msg, int cpt)
378 struct lnet_handle_wire ack_wmd;
380 int status = msg->msg_ev.status;
382 LASSERT(msg->msg_onactivelist);
384 if (status == 0 && msg->msg_ack) {
385 /* Only send an ACK if the PUT completed successfully */
387 lnet_msg_decommit(msg, cpt, 0);
390 lnet_net_unlock(cpt);
392 LASSERT(msg->msg_ev.type == LNET_EVENT_PUT);
393 LASSERT(!msg->msg_routing);
395 ack_wmd = msg->msg_hdr.msg.put.ack_wmd;
397 lnet_prep_send(msg, LNET_MSG_ACK, msg->msg_ev.source, 0, 0);
399 msg->msg_hdr.msg.ack.dst_wmd = ack_wmd;
400 msg->msg_hdr.msg.ack.match_bits = msg->msg_ev.match_bits;
401 msg->msg_hdr.msg.ack.mlength = cpu_to_le32(msg->msg_ev.mlength);
403 /* NB: we probably want to use NID of msg::msg_from as 3rd
404 * parameter (router NID) if it's routed message */
405 rc = lnet_send(msg->msg_ev.target.nid, msg, LNET_NID_ANY);
409 * NB: message is committed for sending, we should return
410 * on success because LND will finalize this message later.
412 * Also, there is possibility that message is committed for
413 * sending and also failed before delivering to LND,
414 * i.e: ENOMEM, in that case we can't fall through either
415 * because CPT for sending can be different with CPT for
416 * receiving, so we should return back to lnet_finalize()
417 * to make sure we are locking the correct partition.
421 } else if (status == 0 && /* OK so far */
422 (msg->msg_routing && !msg->msg_sending)) {
424 LASSERT(!msg->msg_receiving); /* called back recv already */
425 lnet_net_unlock(cpt);
427 rc = lnet_send(LNET_NID_ANY, msg, LNET_NID_ANY);
431 * NB: message is committed for sending, we should return
432 * on success because LND will finalize this message later.
434 * Also, there is possibility that message is committed for
435 * sending and also failed before delivering to LND,
436 * i.e: ENOMEM, in that case we can't fall through either:
437 * - The rule is message must decommit for sending first if
438 * the it's committed for both sending and receiving
439 * - CPT for sending can be different with CPT for receiving,
440 * so we should return back to lnet_finalize() to make
441 * sure we are locking the correct partition.
446 lnet_msg_decommit(msg, cpt, status);
452 lnet_finalize(struct lnet_msg *msg, int status)
454 struct lnet_msg_container *container;
460 LASSERT(!in_interrupt());
465 msg->msg_ev.status = status;
467 if (msg->msg_md != NULL) {
468 cpt = lnet_cpt_of_cookie(msg->msg_md->md_lh.lh_cookie);
471 lnet_msg_detach_md(msg, status);
472 lnet_res_unlock(cpt);
477 if (!msg->msg_tx_committed && !msg->msg_rx_committed) {
478 /* not committed to network yet */
479 LASSERT(!msg->msg_onactivelist);
485 * NB: routed message can be committed for both receiving and sending,
486 * we should finalize in LIFO order and keep counters correct.
487 * (finalize sending first then finalize receiving)
489 cpt = msg->msg_tx_committed ? msg->msg_tx_cpt : msg->msg_rx_cpt;
492 container = the_lnet.ln_msg_containers[cpt];
493 list_add_tail(&msg->msg_list, &container->msc_finalizing);
495 /* Recursion breaker. Don't complete the message here if I am (or
496 * enough other threads are) already completing messages */
499 for (i = 0; i < container->msc_nfinalizers; i++) {
500 if (container->msc_finalizers[i] == current)
503 if (my_slot < 0 && container->msc_finalizers[i] == NULL)
507 if (i < container->msc_nfinalizers || my_slot < 0) {
508 lnet_net_unlock(cpt);
512 container->msc_finalizers[my_slot] = current;
514 while (!list_empty(&container->msc_finalizing)) {
515 msg = list_entry(container->msc_finalizing.next,
516 struct lnet_msg, msg_list);
518 list_del(&msg->msg_list);
520 /* NB drops and regains the lnet lock if it actually does
521 * anything, so my finalizing friends can chomp along too */
522 rc = lnet_complete_msg_locked(msg, cpt);
527 if (unlikely(!list_empty(&the_lnet.ln_delay_rules))) {
528 lnet_net_unlock(cpt);
529 lnet_delay_rule_check();
533 container->msc_finalizers[my_slot] = NULL;
534 lnet_net_unlock(cpt);
539 EXPORT_SYMBOL(lnet_finalize);
542 lnet_msg_container_cleanup(struct lnet_msg_container *container)
546 if (container->msc_init == 0)
549 while (!list_empty(&container->msc_active)) {
550 struct lnet_msg *msg;
552 msg = list_entry(container->msc_active.next,
553 struct lnet_msg, msg_activelist);
554 LASSERT(msg->msg_onactivelist);
555 msg->msg_onactivelist = 0;
556 list_del(&msg->msg_activelist);
562 CERROR("%d active msg on exit\n", count);
564 if (container->msc_finalizers != NULL) {
565 LIBCFS_FREE(container->msc_finalizers,
566 container->msc_nfinalizers *
567 sizeof(*container->msc_finalizers));
568 container->msc_finalizers = NULL;
570 container->msc_init = 0;
574 lnet_msg_container_setup(struct lnet_msg_container *container, int cpt)
578 container->msc_init = 1;
580 INIT_LIST_HEAD(&container->msc_active);
581 INIT_LIST_HEAD(&container->msc_finalizing);
584 container->msc_nfinalizers = cfs_cpt_weight(lnet_cpt_table(), cpt);
585 if (container->msc_nfinalizers == 0)
586 container->msc_nfinalizers = 1;
588 LIBCFS_CPT_ALLOC(container->msc_finalizers, lnet_cpt_table(), cpt,
589 container->msc_nfinalizers *
590 sizeof(*container->msc_finalizers));
592 if (container->msc_finalizers == NULL) {
593 CERROR("Failed to allocate message finalizers\n");
594 lnet_msg_container_cleanup(container);
602 lnet_msg_containers_destroy(void)
604 struct lnet_msg_container *container;
607 if (the_lnet.ln_msg_containers == NULL)
610 cfs_percpt_for_each(container, i, the_lnet.ln_msg_containers)
611 lnet_msg_container_cleanup(container);
613 cfs_percpt_free(the_lnet.ln_msg_containers);
614 the_lnet.ln_msg_containers = NULL;
618 lnet_msg_containers_create(void)
620 struct lnet_msg_container *container;
624 the_lnet.ln_msg_containers = cfs_percpt_alloc(lnet_cpt_table(),
627 if (the_lnet.ln_msg_containers == NULL) {
628 CERROR("Failed to allocate cpu-partition data for network\n");
632 cfs_percpt_for_each(container, i, the_lnet.ln_msg_containers) {
633 rc = lnet_msg_container_setup(container, i);
635 lnet_msg_containers_destroy();