4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
31 * This file is part of Lustre, http://www.lustre.org/
32 * Lustre is a trademark of Sun Microsystems, Inc.
36 * Message decoding, parsing and finalizing routines
39 #define DEBUG_SUBSYSTEM S_LNET
41 #include <lnet/lib-lnet.h>
44 lnet_build_unlink_event (lnet_libmd_t *md, lnet_event_t *ev)
48 memset(ev, 0, sizeof(*ev));
52 ev->type = LNET_EVENT_UNLINK;
53 lnet_md_deconstruct(md, &ev->md);
54 lnet_md2handle(&ev->md_handle, md);
59 * Don't need any lock, must be called after lnet_commit_md
62 lnet_build_msg_event(lnet_msg_t *msg, lnet_event_kind_t ev_type)
64 lnet_hdr_t *hdr = &msg->msg_hdr;
65 lnet_event_t *ev = &msg->msg_ev;
67 LASSERT(!msg->msg_routing);
71 if (ev_type == LNET_EVENT_SEND) {
72 /* event for active message */
73 ev->target.nid = le64_to_cpu(hdr->dest_nid);
74 ev->target.pid = le32_to_cpu(hdr->dest_pid);
75 ev->initiator.nid = LNET_NID_ANY;
76 ev->initiator.pid = the_lnet.ln_pid;
77 ev->sender = LNET_NID_ANY;
80 /* event for passive message */
81 ev->target.pid = hdr->dest_pid;
82 ev->target.nid = hdr->dest_nid;
83 ev->initiator.pid = hdr->src_pid;
84 ev->initiator.nid = hdr->src_nid;
85 ev->rlength = hdr->payload_length;
86 ev->sender = msg->msg_from;
87 ev->mlength = msg->msg_wanted;
88 ev->offset = msg->msg_offset;
95 case LNET_EVENT_PUT: /* passive PUT */
96 ev->pt_index = hdr->msg.put.ptl_index;
97 ev->match_bits = hdr->msg.put.match_bits;
98 ev->hdr_data = hdr->msg.put.hdr_data;
101 case LNET_EVENT_GET: /* passive GET */
102 ev->pt_index = hdr->msg.get.ptl_index;
103 ev->match_bits = hdr->msg.get.match_bits;
107 case LNET_EVENT_ACK: /* ACK */
108 ev->match_bits = hdr->msg.ack.match_bits;
109 ev->mlength = hdr->msg.ack.mlength;
112 case LNET_EVENT_REPLY: /* REPLY */
115 case LNET_EVENT_SEND: /* active message */
116 if (msg->msg_type == LNET_MSG_PUT) {
117 ev->pt_index = le32_to_cpu(hdr->msg.put.ptl_index);
118 ev->match_bits = le64_to_cpu(hdr->msg.put.match_bits);
119 ev->offset = le32_to_cpu(hdr->msg.put.offset);
121 ev->rlength = le32_to_cpu(hdr->payload_length);
122 ev->hdr_data = le64_to_cpu(hdr->msg.put.hdr_data);
125 LASSERT(msg->msg_type == LNET_MSG_GET);
126 ev->pt_index = le32_to_cpu(hdr->msg.get.ptl_index);
127 ev->match_bits = le64_to_cpu(hdr->msg.get.match_bits);
129 ev->rlength = le32_to_cpu(hdr->msg.get.sink_length);
130 ev->offset = le32_to_cpu(hdr->msg.get.src_offset);
138 lnet_msg_commit(lnet_msg_t *msg, int cpt)
140 struct lnet_msg_container *container = the_lnet.ln_msg_containers[cpt];
141 lnet_counters_t *counters = the_lnet.ln_counters[cpt];
143 /* routed message can be committed for both receiving and sending */
144 LASSERT(!msg->msg_tx_committed);
146 if (msg->msg_sending) {
147 LASSERT(!msg->msg_receiving);
149 msg->msg_tx_cpt = cpt;
150 msg->msg_tx_committed = 1;
151 if (msg->msg_rx_committed) { /* routed message REPLY */
152 LASSERT(msg->msg_onactivelist);
156 LASSERT(!msg->msg_sending);
157 msg->msg_rx_cpt = cpt;
158 msg->msg_rx_committed = 1;
161 LASSERT(!msg->msg_onactivelist);
162 msg->msg_onactivelist = 1;
163 cfs_list_add(&msg->msg_activelist, &container->msc_active);
165 counters->msgs_alloc++;
166 if (counters->msgs_alloc > counters->msgs_max)
167 counters->msgs_max = counters->msgs_alloc;
171 lnet_msg_decommit_tx(lnet_msg_t *msg, int status)
173 lnet_counters_t *counters;
174 lnet_event_t *ev = &msg->msg_ev;
176 LASSERT(msg->msg_tx_committed);
180 counters = the_lnet.ln_counters[msg->msg_tx_cpt];
182 default: /* routed message */
183 LASSERT(msg->msg_routing);
184 LASSERT(msg->msg_rx_committed);
185 LASSERT(ev->type == 0);
187 counters->route_length += msg->msg_len;
188 counters->route_count++;
192 /* should have been decommitted */
193 LASSERT(!msg->msg_rx_committed);
194 /* overwritten while sending ACK */
195 LASSERT(msg->msg_type == LNET_MSG_ACK);
196 msg->msg_type = LNET_MSG_PUT; /* fix type */
199 case LNET_EVENT_SEND:
200 LASSERT(!msg->msg_rx_committed);
201 if (msg->msg_type == LNET_MSG_PUT)
202 counters->send_length += msg->msg_len;
206 LASSERT(msg->msg_rx_committed);
207 /* overwritten while sending reply, we should never be
208 * here for optimized GET */
209 LASSERT(msg->msg_type == LNET_MSG_REPLY);
210 msg->msg_type = LNET_MSG_GET; /* fix type */
214 counters->send_count++;
216 lnet_return_tx_credits_locked(msg);
217 msg->msg_tx_committed = 0;
221 lnet_msg_decommit_rx(lnet_msg_t *msg, int status)
223 lnet_counters_t *counters;
224 lnet_event_t *ev = &msg->msg_ev;
226 LASSERT(!msg->msg_tx_committed); /* decommitted or never committed */
227 LASSERT(msg->msg_rx_committed);
232 counters = the_lnet.ln_counters[msg->msg_rx_cpt];
235 LASSERT(ev->type == 0);
236 LASSERT(msg->msg_routing);
240 LASSERT(msg->msg_type == LNET_MSG_ACK);
244 /* type is "REPLY" if it's an optimized GET on passive side,
245 * because optimized GET will never be committed for sending,
246 * so message type wouldn't be changed back to "GET" by
247 * lnet_msg_decommit_tx(), see details in lnet_parse_get() */
248 LASSERT(msg->msg_type == LNET_MSG_REPLY ||
249 msg->msg_type == LNET_MSG_GET);
250 counters->send_length += msg->msg_wanted;
254 LASSERT(msg->msg_type == LNET_MSG_PUT);
257 case LNET_EVENT_REPLY:
258 /* type is "GET" if it's an optimized GET on active side,
259 * see details in lnet_create_reply_msg() */
260 LASSERT(msg->msg_type == LNET_MSG_GET ||
261 msg->msg_type == LNET_MSG_REPLY);
265 counters->recv_count++;
266 if (ev->type == LNET_EVENT_PUT || ev->type == LNET_EVENT_REPLY)
267 counters->recv_length += msg->msg_wanted;
270 lnet_return_rx_credits_locked(msg);
271 msg->msg_rx_committed = 0;
275 lnet_msg_decommit(lnet_msg_t *msg, int cpt, int status)
279 LASSERT(msg->msg_tx_committed || msg->msg_rx_committed);
280 LASSERT(msg->msg_onactivelist);
282 if (msg->msg_tx_committed) { /* always decommit for sending first */
283 LASSERT(cpt == msg->msg_tx_cpt);
284 lnet_msg_decommit_tx(msg, status);
287 if (msg->msg_rx_committed) {
288 /* forwarding msg committed for both receiving and sending */
289 if (cpt != msg->msg_rx_cpt) {
290 lnet_net_unlock(cpt);
291 cpt2 = msg->msg_rx_cpt;
294 lnet_msg_decommit_rx(msg, status);
297 cfs_list_del(&msg->msg_activelist);
298 msg->msg_onactivelist = 0;
300 the_lnet.ln_counters[cpt2]->msgs_alloc--;
303 lnet_net_unlock(cpt2);
309 lnet_msg_attach_md(lnet_msg_t *msg, lnet_libmd_t *md,
310 unsigned int offset, unsigned int mlen)
312 /* NB: @offset and @len are only useful for receiving */
313 /* Here, we attach the MD on lnet_msg and mark it busy and
314 * decrementing its threshold. Come what may, the lnet_msg "owns"
315 * the MD until a call to lnet_msg_detach_md or lnet_finalize()
316 * signals completion. */
317 LASSERT(!msg->msg_routing);
320 if (msg->msg_receiving) { /* commited for receiving */
321 msg->msg_offset = offset;
322 msg->msg_wanted = mlen;
326 if (md->md_threshold != LNET_MD_THRESH_INF) {
327 LASSERT(md->md_threshold > 0);
331 /* build umd in event */
332 lnet_md2handle(&msg->msg_ev.md_handle, md);
333 lnet_md_deconstruct(md, &msg->msg_ev.md);
337 lnet_msg_detach_md(lnet_msg_t *msg, int status)
339 lnet_libmd_t *md = msg->msg_md;
342 /* Now it's safe to drop my caller's ref */
344 LASSERT(md->md_refcount >= 0);
346 unlink = lnet_md_unlinkable(md);
347 if (md->md_eq != NULL) {
348 msg->msg_ev.status = status;
349 msg->msg_ev.unlinked = unlink;
350 lnet_eq_enqueue_event(md->md_eq, &msg->msg_ev);
360 lnet_complete_msg_locked(lnet_msg_t *msg, int cpt)
362 lnet_handle_wire_t ack_wmd;
364 int status = msg->msg_ev.status;
366 LASSERT (msg->msg_onactivelist);
368 if (status == 0 && msg->msg_ack) {
369 /* Only send an ACK if the PUT completed successfully */
371 lnet_msg_decommit(msg, cpt, 0);
374 lnet_net_unlock(cpt);
376 LASSERT(msg->msg_ev.type == LNET_EVENT_PUT);
377 LASSERT(!msg->msg_routing);
379 ack_wmd = msg->msg_hdr.msg.put.ack_wmd;
381 lnet_prep_send(msg, LNET_MSG_ACK, msg->msg_ev.initiator, 0, 0);
383 msg->msg_hdr.msg.ack.dst_wmd = ack_wmd;
384 msg->msg_hdr.msg.ack.match_bits = msg->msg_ev.match_bits;
385 msg->msg_hdr.msg.ack.mlength = cpu_to_le32(msg->msg_ev.mlength);
387 /* NB: we probably want to use NID of msg::msg_from as 3rd
388 * parameter (router NID) if it's routed message */
389 rc = lnet_send(msg->msg_ev.target.nid, msg, LNET_NID_ANY);
393 * NB: message is committed for sending, we should return
394 * on success because LND will finalize this message later.
396 * Also, there is possibility that message is commited for
397 * sending and also failed before delivering to LND,
398 * i.e: ENOMEM, in that case we can't fall through either
399 * because CPT for sending can be different with CPT for
400 * receiving, so we should return back to lnet_finalize()
401 * to make sure we are locking the correct partition.
405 } else if (status == 0 && /* OK so far */
406 (msg->msg_routing && !msg->msg_sending)) {
408 LASSERT(!msg->msg_receiving); /* called back recv already */
409 lnet_net_unlock(cpt);
411 rc = lnet_send(LNET_NID_ANY, msg, LNET_NID_ANY);
415 * NB: message is committed for sending, we should return
416 * on success because LND will finalize this message later.
418 * Also, there is possibility that message is commited for
419 * sending and also failed before delivering to LND,
420 * i.e: ENOMEM, in that case we can't fall through either:
421 * - The rule is message must decommit for sending first if
422 * the it's committed for both sending and receiving
423 * - CPT for sending can be different with CPT for receiving,
424 * so we should return back to lnet_finalize() to make
425 * sure we are locking the correct partition.
430 lnet_msg_decommit(msg, cpt, status);
431 lnet_msg_free_locked(msg);
436 lnet_finalize (lnet_ni_t *ni, lnet_msg_t *msg, int status)
438 struct lnet_msg_container *container;
444 LASSERT (!cfs_in_interrupt ());
449 CDEBUG(D_WARNING, "%s msg->%s Flags:%s%s%s%s%s%s%s%s%s%s%s txp %s rxp %s\n",
450 lnet_msgtyp2str(msg->msg_type), libcfs_id2str(msg->msg_target),
451 msg->msg_target_is_router ? "t" : "",
452 msg->msg_routing ? "X" : "",
453 msg->msg_ack ? "A" : "",
454 msg->msg_sending ? "S" : "",
455 msg->msg_receiving ? "R" : "",
456 msg->msg_delayed ? "d" : "",
457 msg->msg_txcredit ? "C" : "",
458 msg->msg_peertxcredit ? "c" : "",
459 msg->msg_rtrcredit ? "F" : "",
460 msg->msg_peerrtrcredit ? "f" : "",
461 msg->msg_onactivelist ? "!" : "",
462 msg->msg_txpeer == NULL ? "<none>" : libcfs_nid2str(msg->msg_txpeer->lp_nid),
463 msg->msg_rxpeer == NULL ? "<none>" : libcfs_nid2str(msg->msg_rxpeer->lp_nid));
465 msg->msg_ev.status = status;
467 if (msg->msg_md != NULL) {
468 cpt = lnet_cpt_of_cookie(msg->msg_md->md_lh.lh_cookie);
471 lnet_msg_detach_md(msg, status);
472 lnet_res_unlock(cpt);
477 if (!msg->msg_tx_committed && !msg->msg_rx_committed) {
478 /* not commited to network yet */
479 LASSERT(!msg->msg_onactivelist);
485 * NB: routed message can be commited for both receiving and sending,
486 * we should finalize in LIFO order and keep counters correct.
487 * (finalize sending first then finalize receiving)
489 cpt = msg->msg_tx_committed ? msg->msg_tx_cpt : msg->msg_rx_cpt;
492 container = the_lnet.ln_msg_containers[cpt];
493 cfs_list_add_tail(&msg->msg_list, &container->msc_finalizing);
495 /* Recursion breaker. Don't complete the message here if I am (or
496 * enough other threads are) already completing messages */
500 for (i = 0; i < container->msc_nfinalizers; i++) {
501 if (container->msc_finalizers[i] == cfs_current())
504 if (my_slot < 0 && container->msc_finalizers[i] == NULL)
508 if (i < container->msc_nfinalizers || my_slot < 0) {
509 lnet_net_unlock(cpt);
513 container->msc_finalizers[my_slot] = cfs_current();
515 LASSERT(container->msc_nfinalizers == 1);
516 if (container->msc_finalizers[0] != NULL) {
517 lnet_net_unlock(cpt);
522 container->msc_finalizers[0] = (struct lnet_msg_container *)1;
525 while (!cfs_list_empty(&container->msc_finalizing)) {
526 msg = cfs_list_entry(container->msc_finalizing.next,
527 lnet_msg_t, msg_list);
529 cfs_list_del(&msg->msg_list);
531 /* NB drops and regains the lnet lock if it actually does
532 * anything, so my finalizing friends can chomp along too */
533 rc = lnet_complete_msg_locked(msg, cpt);
538 container->msc_finalizers[my_slot] = NULL;
539 lnet_net_unlock(cpt);
544 EXPORT_SYMBOL(lnet_finalize);
547 lnet_msg_container_cleanup(struct lnet_msg_container *container)
551 if (container->msc_init == 0)
554 while (!cfs_list_empty(&container->msc_active)) {
555 lnet_msg_t *msg = cfs_list_entry(container->msc_active.next,
556 lnet_msg_t, msg_activelist);
558 LASSERT(msg->msg_onactivelist);
559 msg->msg_onactivelist = 0;
560 cfs_list_del(&msg->msg_activelist);
566 CERROR("%d active msg on exit\n", count);
568 if (container->msc_finalizers != NULL) {
569 LIBCFS_FREE(container->msc_finalizers,
570 container->msc_nfinalizers *
571 sizeof(*container->msc_finalizers));
572 container->msc_finalizers = NULL;
574 #ifdef LNET_USE_LIB_FREELIST
575 lnet_freelist_fini(&container->msc_freelist);
577 container->msc_init = 0;
581 lnet_msg_container_setup(struct lnet_msg_container *container, int cpt)
585 container->msc_init = 1;
587 CFS_INIT_LIST_HEAD(&container->msc_active);
588 CFS_INIT_LIST_HEAD(&container->msc_finalizing);
590 #ifdef LNET_USE_LIB_FREELIST
591 memset(&container->msc_freelist, 0, sizeof(lnet_freelist_t));
593 rc = lnet_freelist_init(&container->msc_freelist,
594 LNET_FL_MAX_MSGS, sizeof(lnet_msg_t));
596 CERROR("Failed to init freelist for message container\n");
597 lnet_msg_container_cleanup(container);
604 container->msc_nfinalizers = cfs_cpt_weight(lnet_cpt_table(), cpt);
606 LIBCFS_CPT_ALLOC(container->msc_finalizers, lnet_cpt_table(), cpt,
607 container->msc_nfinalizers *
608 sizeof(*container->msc_finalizers));
610 if (container->msc_finalizers == NULL) {
611 CERROR("Failed to allocate message finalizers\n");
612 lnet_msg_container_cleanup(container);
620 lnet_msg_containers_destroy(void)
622 struct lnet_msg_container *container;
625 if (the_lnet.ln_msg_containers == NULL)
628 cfs_percpt_for_each(container, i, the_lnet.ln_msg_containers)
629 lnet_msg_container_cleanup(container);
631 cfs_percpt_free(the_lnet.ln_msg_containers);
632 the_lnet.ln_msg_containers = NULL;
636 lnet_msg_containers_create(void)
638 struct lnet_msg_container *container;
642 the_lnet.ln_msg_containers = cfs_percpt_alloc(lnet_cpt_table(),
645 if (the_lnet.ln_msg_containers == NULL) {
646 CERROR("Failed to allocate cpu-partition data for network\n");
650 cfs_percpt_for_each(container, i, the_lnet.ln_msg_containers) {
651 rc = lnet_msg_container_setup(container, i);
653 lnet_msg_containers_destroy();