2 * -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
3 * vim:expandtab:shiftwidth=8:tabstop=8:
7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 only,
11 * as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License version 2 for more details (a copy is included
17 * in the LICENSE file that accompanied this code).
19 * You should have received a copy of the GNU General Public License
20 * version 2 along with this program; If not, see [sun.com URL with a
23 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
24 * CA 95054 USA or visit www.sun.com if you need additional information or
30 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
31 * Use is subject to license terms.
33 * Copyright (c) 2003 Los Alamos National Laboratory (LANL)
36 * This file is part of Lustre, http://www.lustre.org/
37 * Lustre is a trademark of Sun Microsystems, Inc.
41 * This file contains all gmnal send and receive functions
47 gmnal_notify_peer_down(gmnal_tx_t *tx)
51 then = cfs_time_current_sec() -
52 cfs_duration_sec(cfs_time_current() -
55 lnet_notify(tx->tx_gmni->gmni_ni, tx->tx_nid, 0, then);
59 gmnal_pack_msg(gmnal_ni_t *gmni, gmnal_msg_t *msg,
60 lnet_nid_t dstnid, int type)
62 /* CAVEAT EMPTOR! this only sets the common message fields. */
63 msg->gmm_magic = GMNAL_MSG_MAGIC;
64 msg->gmm_version = GMNAL_MSG_VERSION;
66 msg->gmm_srcnid = lnet_ptlcompat_srcnid(gmni->gmni_ni->ni_nid,
68 msg->gmm_dstnid = dstnid;
72 gmnal_unpack_msg(gmnal_ni_t *gmni, gmnal_rx_t *rx)
74 gmnal_msg_t *msg = GMNAL_NETBUF_MSG(&rx->rx_buf);
75 const int hdr_size = offsetof(gmnal_msg_t, gmm_u);
76 int buffnob = rx->rx_islarge ? gmni->gmni_large_msgsize :
77 gmni->gmni_small_msgsize;
80 /* rc = 0:SUCCESS -ve:failure +ve:version mismatch */
82 /* GM may not overflow our buffer */
83 LASSERT (rx->rx_recv_nob <= buffnob);
85 /* 6 bytes are enough to have received magic + version */
86 if (rx->rx_recv_nob < 6) {
87 CERROR("Short message from gmid %u: %d\n",
88 rx->rx_recv_gmid, rx->rx_recv_nob);
92 if (msg->gmm_magic == GMNAL_MSG_MAGIC) {
94 } else if (msg->gmm_magic == __swab32(GMNAL_MSG_MAGIC)) {
96 } else if (msg->gmm_magic == LNET_PROTO_MAGIC ||
97 msg->gmm_magic == __swab32(LNET_PROTO_MAGIC)) {
100 CERROR("Bad magic from gmid %u: %08x\n",
101 rx->rx_recv_gmid, msg->gmm_magic);
105 if (msg->gmm_version !=
106 (flip ? __swab16(GMNAL_MSG_VERSION) : GMNAL_MSG_VERSION)) {
110 if (rx->rx_recv_nob < hdr_size) {
111 CERROR("Short message from %u: %d\n",
112 rx->rx_recv_gmid, rx->rx_recv_nob);
117 /* leave magic unflipped as a clue to peer endianness */
118 __swab16s(&msg->gmm_version);
119 __swab16s(&msg->gmm_type);
120 __swab64s(&msg->gmm_srcnid);
121 __swab64s(&msg->gmm_dstnid);
124 if (msg->gmm_srcnid == LNET_NID_ANY) {
125 CERROR("Bad src nid from %u: %s\n",
126 rx->rx_recv_gmid, libcfs_nid2str(msg->gmm_srcnid));
130 if (!lnet_ptlcompat_matchnid(gmni->gmni_ni->ni_nid,
132 CERROR("Bad dst nid from %u: %s\n",
133 rx->rx_recv_gmid, libcfs_nid2str(msg->gmm_dstnid));
137 switch (msg->gmm_type) {
139 CERROR("Unknown message type from %u: %x\n",
140 rx->rx_recv_gmid, msg->gmm_type);
143 case GMNAL_MSG_IMMEDIATE:
144 if (rx->rx_recv_nob < offsetof(gmnal_msg_t, gmm_u.immediate.gmim_payload[0])) {
145 CERROR("Short IMMEDIATE from %u: %d("LPSZ")\n",
146 rx->rx_recv_gmid, rx->rx_recv_nob,
147 offsetof(gmnal_msg_t, gmm_u.immediate.gmim_payload[0]));
156 gmnal_get_tx(gmnal_ni_t *gmni)
158 gmnal_tx_t *tx = NULL;
160 spin_lock(&gmni->gmni_tx_lock);
162 if (gmni->gmni_shutdown ||
163 list_empty(&gmni->gmni_idle_txs)) {
164 spin_unlock(&gmni->gmni_tx_lock);
168 tx = list_entry(gmni->gmni_idle_txs.next, gmnal_tx_t, tx_list);
169 list_del(&tx->tx_list);
171 spin_unlock(&gmni->gmni_tx_lock);
173 LASSERT (tx->tx_lntmsg == NULL);
174 LASSERT (tx->tx_ltxb == NULL);
175 LASSERT (!tx->tx_credit);
181 gmnal_tx_done(gmnal_tx_t *tx, int rc)
183 gmnal_ni_t *gmni = tx->tx_gmni;
185 lnet_msg_t *lnetmsg = tx->tx_lntmsg;
187 tx->tx_lntmsg = NULL;
189 spin_lock(&gmni->gmni_tx_lock);
191 if (tx->tx_ltxb != NULL) {
193 list_add_tail(&tx->tx_ltxb->txb_list, &gmni->gmni_idle_ltxbs);
199 gmni->gmni_tx_credits++;
203 list_add_tail(&tx->tx_list, &gmni->gmni_idle_txs);
206 gmnal_check_txqueues_locked(gmni);
208 spin_unlock(&gmni->gmni_tx_lock);
210 /* Delay finalize until tx is free */
212 lnet_finalize(gmni->gmni_ni, lnetmsg, rc);
216 gmnal_drop_sends_callback(struct gm_port *gm_port, void *context,
219 gmnal_tx_t *tx = (gmnal_tx_t*)context;
221 LASSERT(!in_interrupt());
223 CDEBUG(D_NET, "status for tx [%p] is [%d][%s], nid %s\n",
224 tx, status, gmnal_gmstatus2str(status),
225 libcfs_nid2str(tx->tx_nid));
227 gmnal_tx_done(tx, -EIO);
231 gmnal_tx_callback(gm_port_t *gm_port, void *context, gm_status_t status)
233 gmnal_tx_t *tx = (gmnal_tx_t*)context;
234 gmnal_ni_t *gmni = tx->tx_gmni;
236 LASSERT(!in_interrupt());
240 gmnal_tx_done(tx, 0);
243 case GM_SEND_DROPPED:
244 CDEBUG(D_NETERROR, "Dropped tx %p to %s\n",
245 tx, libcfs_nid2str(tx->tx_nid));
246 /* Another tx failed and called gm_drop_sends() which made this
247 * one complete immediately */
248 gmnal_tx_done(tx, -EIO);
252 /* Some error; NB don't complete tx yet; we need its credit for
254 CDEBUG(D_NETERROR, "tx %p error %d(%s), nid %s\n",
255 tx, status, gmnal_gmstatus2str(status),
256 libcfs_nid2str(tx->tx_nid));
258 gmnal_notify_peer_down(tx);
260 spin_lock(&gmni->gmni_gm_lock);
261 gm_drop_sends(gmni->gmni_port,
262 tx->tx_ltxb != NULL ?
263 GMNAL_LARGE_PRIORITY : GMNAL_SMALL_PRIORITY,
264 tx->tx_gmlid, *gmnal_tunables.gm_port,
265 gmnal_drop_sends_callback, tx);
266 spin_unlock(&gmni->gmni_gm_lock);
275 gmnal_check_txqueues_locked (gmnal_ni_t *gmni)
283 tx = list_empty(&gmni->gmni_buf_txq) ? NULL :
284 list_entry(gmni->gmni_buf_txq.next, gmnal_tx_t, tx_list);
287 (tx->tx_large_nob == 0 ||
288 !list_empty(&gmni->gmni_idle_ltxbs))) {
291 list_del(&tx->tx_list);
293 LASSERT (tx->tx_ltxb == NULL);
295 if (tx->tx_large_nob != 0) {
296 ltxb = list_entry(gmni->gmni_idle_ltxbs.next,
297 gmnal_txbuf_t, txb_list);
299 /* consume large buffer */
300 list_del(<xb->txb_list);
302 spin_unlock(&gmni->gmni_tx_lock);
304 /* Unlocking here allows sends to get re-ordered,
305 * but we want to allow other CPUs to progress... */
309 /* marshall message in tx_ltxb...
310 * 1. Copy what was marshalled so far (in tx_buf) */
311 memcpy(GMNAL_NETBUF_MSG(<xb->txb_buf),
312 GMNAL_NETBUF_MSG(&tx->tx_buf), tx->tx_msgnob);
314 /* 2. Copy the payload */
315 if (tx->tx_large_iskiov)
317 gmni->gmni_large_pages,
318 ltxb->txb_buf.nb_kiov,
321 tx->tx_large_frags.kiov,
326 gmni->gmni_large_pages,
327 ltxb->txb_buf.nb_kiov,
330 tx->tx_large_frags.iov,
334 tx->tx_msgnob += tx->tx_large_nob;
336 spin_lock(&gmni->gmni_tx_lock);
339 list_add_tail(&tx->tx_list, &gmni->gmni_cred_txq);
342 if (!list_empty(&gmni->gmni_cred_txq) &&
343 gmni->gmni_tx_credits != 0) {
345 tx = list_entry(gmni->gmni_cred_txq.next, gmnal_tx_t, tx_list);
347 /* consume tx and 1 credit */
348 list_del(&tx->tx_list);
349 gmni->gmni_tx_credits--;
351 spin_unlock(&gmni->gmni_tx_lock);
353 /* Unlocking here allows sends to get re-ordered, but we want
354 * to allow other CPUs to progress... */
356 LASSERT(!tx->tx_credit);
359 tx->tx_launchtime = cfs_time_current();
361 if (tx->tx_msgnob <= gmni->gmni_small_msgsize) {
362 LASSERT (tx->tx_ltxb == NULL);
363 netaddr = GMNAL_NETBUF_LOCAL_NETADDR(&tx->tx_buf);
364 gmsize = gmni->gmni_small_gmsize;
365 pri = GMNAL_SMALL_PRIORITY;
367 LASSERT (tx->tx_ltxb != NULL);
368 netaddr = GMNAL_NETBUF_LOCAL_NETADDR(&tx->tx_ltxb->txb_buf);
369 gmsize = gmni->gmni_large_gmsize;
370 pri = GMNAL_LARGE_PRIORITY;
373 spin_lock(&gmni->gmni_gm_lock);
375 gm_send_to_peer_with_callback(gmni->gmni_port,
383 spin_unlock(&gmni->gmni_gm_lock);
384 spin_lock(&gmni->gmni_tx_lock);
389 gmnal_post_rx(gmnal_ni_t *gmni, gmnal_rx_t *rx)
391 int gmsize = rx->rx_islarge ? gmni->gmni_large_gmsize :
392 gmni->gmni_small_gmsize;
393 int pri = rx->rx_islarge ? GMNAL_LARGE_PRIORITY :
394 GMNAL_SMALL_PRIORITY;
395 void *buffer = GMNAL_NETBUF_LOCAL_NETADDR(&rx->rx_buf);
397 CDEBUG(D_NET, "posting rx %p buf %p\n", rx, buffer);
399 spin_lock(&gmni->gmni_gm_lock);
400 gm_provide_receive_buffer_with_tag(gmni->gmni_port,
401 buffer, gmsize, pri, 0);
402 spin_unlock(&gmni->gmni_gm_lock);
406 gmnal_version_reply (gmnal_ni_t *gmni, gmnal_rx_t *rx)
408 /* Future protocol version compatibility support!
409 * The next gmlnd-specific protocol rev will first send a message to
410 * check version; I reply with a stub message containing my current
411 * magic+version... */
413 gmnal_tx_t *tx = gmnal_get_tx(gmni);
416 CERROR("Can't allocate tx to send version info to %u\n",
421 LASSERT (tx->tx_lntmsg == NULL); /* no finalize */
423 tx->tx_nid = LNET_NID_ANY;
424 tx->tx_gmlid = rx->rx_recv_gmid;
426 msg = GMNAL_NETBUF_MSG(&tx->tx_buf);
427 msg->gmm_magic = GMNAL_MSG_MAGIC;
428 msg->gmm_version = GMNAL_MSG_VERSION;
430 /* just send magic + version */
431 tx->tx_msgnob = offsetof(gmnal_msg_t, gmm_type);
432 tx->tx_large_nob = 0;
434 spin_lock(&gmni->gmni_tx_lock);
436 list_add_tail(&tx->tx_list, &gmni->gmni_buf_txq);
437 gmnal_check_txqueues_locked(gmni);
439 spin_unlock(&gmni->gmni_tx_lock);
443 gmnal_rx_thread(void *arg)
445 gmnal_ni_t *gmni = arg;
446 gm_recv_event_t *rxevent = NULL;
447 gm_recv_t *recv = NULL;
451 cfs_daemonize("gmnal_rxd");
453 while (!gmni->gmni_shutdown) {
454 rc = down_interruptible(&gmni->gmni_rx_mutex);
455 LASSERT (rc == 0 || rc == -EINTR);
459 spin_lock(&gmni->gmni_gm_lock);
460 rxevent = gm_blocking_receive_no_spin(gmni->gmni_port);
461 spin_unlock(&gmni->gmni_gm_lock);
463 switch (GM_RECV_EVENT_TYPE(rxevent)) {
465 gm_unknown(gmni->gmni_port, rxevent);
466 up(&gmni->gmni_rx_mutex);
469 case GM_FAST_RECV_EVENT:
470 case GM_FAST_PEER_RECV_EVENT:
471 case GM_PEER_RECV_EVENT:
472 case GM_FAST_HIGH_RECV_EVENT:
473 case GM_FAST_HIGH_PEER_RECV_EVENT:
474 case GM_HIGH_PEER_RECV_EVENT:
476 case GM_HIGH_RECV_EVENT:
480 recv = &rxevent->recv;
481 rx = gm_hash_find(gmni->gmni_rx_hash,
482 gm_ntohp(recv->buffer));
483 LASSERT (rx != NULL);
485 rx->rx_recv_nob = gm_ntoh_u32(recv->length);
486 rx->rx_recv_gmid = gm_ntoh_u16(recv->sender_node_id);
487 rx->rx_recv_port = gm_ntoh_u8(recv->sender_port_id);
488 rx->rx_recv_type = gm_ntoh_u8(recv->type);
490 switch (GM_RECV_EVENT_TYPE(rxevent)) {
491 case GM_FAST_RECV_EVENT:
492 case GM_FAST_PEER_RECV_EVENT:
493 case GM_FAST_HIGH_RECV_EVENT:
494 case GM_FAST_HIGH_PEER_RECV_EVENT:
495 LASSERT (rx->rx_recv_nob <= PAGE_SIZE);
497 memcpy(GMNAL_NETBUF_MSG(&rx->rx_buf),
498 gm_ntohp(recv->message), rx->rx_recv_nob);
502 up(&gmni->gmni_rx_mutex);
504 CDEBUG (D_NET, "rx %p: buf %p(%p) nob %d\n", rx,
505 GMNAL_NETBUF_LOCAL_NETADDR(&rx->rx_buf),
506 gm_ntohp(recv->buffer), rx->rx_recv_nob);
508 /* We're connectionless: simply drop packets with
510 rc = gmnal_unpack_msg(gmni, rx);
513 gmnal_msg_t *msg = GMNAL_NETBUF_MSG(&rx->rx_buf);
515 LASSERT (msg->gmm_type == GMNAL_MSG_IMMEDIATE);
516 rc = lnet_parse(gmni->gmni_ni,
517 &msg->gmm_u.immediate.gmim_hdr,
518 msg->gmm_srcnid, rx, 0);
520 gmnal_version_reply(gmni, rx);
521 rc = -EPROTO; /* repost rx */
524 if (rc < 0) /* parse failure */
525 gmnal_post_rx(gmni, rx);
528 CDEBUG(D_NET, "exiting\n");
529 atomic_dec(&gmni->gmni_nthreads);
534 gmnal_stop_threads(gmnal_ni_t *gmni)
538 gmni->gmni_shutdown = 1;
541 /* wake rxthread owning gmni_rx_mutex with an alarm. */
542 spin_lock(&gmni->gmni_gm_lock);
543 gm_set_alarm(gmni->gmni_port, &gmni->gmni_alarm, 0, NULL, NULL);
544 spin_unlock(&gmni->gmni_gm_lock);
546 while (atomic_read(&gmni->gmni_nthreads) != 0) {
548 if ((count & (count - 1)) == 0)
549 CWARN("Waiting for %d threads to stop\n",
550 atomic_read(&gmni->gmni_nthreads));
556 gmnal_start_threads(gmnal_ni_t *gmni)
561 LASSERT (!gmni->gmni_shutdown);
562 LASSERT (atomic_read(&gmni->gmni_nthreads) == 0);
564 gm_initialize_alarm(&gmni->gmni_alarm);
566 for (i = 0; i < num_online_cpus(); i++) {
568 pid = kernel_thread(gmnal_rx_thread, (void*)gmni, 0);
570 CERROR("rx thread failed to start: %d\n", pid);
571 gmnal_stop_threads(gmni);
575 atomic_inc(&gmni->gmni_nthreads);