1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
32 * Copyright (c) 2003 Los Alamos National Laboratory (LANL)
35 * This file is part of Lustre, http://www.lustre.org/
36 * Lustre is a trademark of Sun Microsystems, Inc.
40 * This file contains all gmnal send and receive functions
46 gmnal_notify_peer_down(gmnal_tx_t *tx)
50 then = cfs_time_current_sec() -
51 cfs_duration_sec(cfs_time_current() -
54 lnet_notify(tx->tx_gmni->gmni_ni, tx->tx_nid, 0, then);
58 gmnal_pack_msg(gmnal_ni_t *gmni, gmnal_msg_t *msg,
59 lnet_nid_t dstnid, int type)
61 /* CAVEAT EMPTOR! this only sets the common message fields. */
62 msg->gmm_magic = GMNAL_MSG_MAGIC;
63 msg->gmm_version = GMNAL_MSG_VERSION;
65 msg->gmm_srcnid = gmni->gmni_ni->ni_nid;
66 msg->gmm_dstnid = dstnid;
70 gmnal_unpack_msg(gmnal_ni_t *gmni, gmnal_rx_t *rx)
72 gmnal_msg_t *msg = GMNAL_NETBUF_MSG(&rx->rx_buf);
73 const int hdr_size = offsetof(gmnal_msg_t, gmm_u);
74 int buffnob = rx->rx_islarge ? gmni->gmni_large_msgsize :
75 gmni->gmni_small_msgsize;
78 /* rc = 0:SUCCESS -ve:failure +ve:version mismatch */
80 /* GM may not overflow our buffer */
81 LASSERT (rx->rx_recv_nob <= buffnob);
83 /* 6 bytes are enough to have received magic + version */
84 if (rx->rx_recv_nob < 6) {
85 CERROR("Short message from gmid %u: %d\n",
86 rx->rx_recv_gmid, rx->rx_recv_nob);
90 if (msg->gmm_magic == GMNAL_MSG_MAGIC) {
92 } else if (msg->gmm_magic == __swab32(GMNAL_MSG_MAGIC)) {
94 } else if (msg->gmm_magic == LNET_PROTO_MAGIC ||
95 msg->gmm_magic == __swab32(LNET_PROTO_MAGIC)) {
98 CERROR("Bad magic from gmid %u: %08x\n",
99 rx->rx_recv_gmid, msg->gmm_magic);
103 if (msg->gmm_version !=
104 (flip ? __swab16(GMNAL_MSG_VERSION) : GMNAL_MSG_VERSION)) {
108 if (rx->rx_recv_nob < hdr_size) {
109 CERROR("Short message from %u: %d\n",
110 rx->rx_recv_gmid, rx->rx_recv_nob);
115 /* leave magic unflipped as a clue to peer endianness */
116 __swab16s(&msg->gmm_version);
117 __swab16s(&msg->gmm_type);
118 __swab64s(&msg->gmm_srcnid);
119 __swab64s(&msg->gmm_dstnid);
122 if (msg->gmm_srcnid == LNET_NID_ANY) {
123 CERROR("Bad src nid from %u: %s\n",
124 rx->rx_recv_gmid, libcfs_nid2str(msg->gmm_srcnid));
128 if (gmni->gmni_ni->ni_nid != msg->gmm_dstnid) {
129 CERROR("Bad dst nid from %u: %s\n",
130 rx->rx_recv_gmid, libcfs_nid2str(msg->gmm_dstnid));
134 switch (msg->gmm_type) {
136 CERROR("Unknown message type from %u: %x\n",
137 rx->rx_recv_gmid, msg->gmm_type);
140 case GMNAL_MSG_IMMEDIATE:
141 if (rx->rx_recv_nob < offsetof(gmnal_msg_t, gmm_u.immediate.gmim_payload[0])) {
142 CERROR("Short IMMEDIATE from %u: %d("LPSZ")\n",
143 rx->rx_recv_gmid, rx->rx_recv_nob,
144 offsetof(gmnal_msg_t, gmm_u.immediate.gmim_payload[0]));
153 gmnal_get_tx(gmnal_ni_t *gmni)
155 gmnal_tx_t *tx = NULL;
157 spin_lock(&gmni->gmni_tx_lock);
159 if (gmni->gmni_shutdown ||
160 list_empty(&gmni->gmni_idle_txs)) {
161 spin_unlock(&gmni->gmni_tx_lock);
165 tx = list_entry(gmni->gmni_idle_txs.next, gmnal_tx_t, tx_list);
166 list_del(&tx->tx_list);
168 spin_unlock(&gmni->gmni_tx_lock);
170 LASSERT (tx->tx_lntmsg == NULL);
171 LASSERT (tx->tx_ltxb == NULL);
172 LASSERT (!tx->tx_credit);
178 gmnal_tx_done(gmnal_tx_t *tx, int rc)
180 gmnal_ni_t *gmni = tx->tx_gmni;
182 lnet_msg_t *lnetmsg = tx->tx_lntmsg;
184 tx->tx_lntmsg = NULL;
186 spin_lock(&gmni->gmni_tx_lock);
188 if (tx->tx_ltxb != NULL) {
190 list_add_tail(&tx->tx_ltxb->txb_list, &gmni->gmni_idle_ltxbs);
196 gmni->gmni_tx_credits++;
200 list_add_tail(&tx->tx_list, &gmni->gmni_idle_txs);
203 gmnal_check_txqueues_locked(gmni);
205 spin_unlock(&gmni->gmni_tx_lock);
207 /* Delay finalize until tx is free */
209 lnet_finalize(gmni->gmni_ni, lnetmsg, rc);
213 gmnal_drop_sends_callback(struct gm_port *gm_port, void *context,
216 gmnal_tx_t *tx = (gmnal_tx_t*)context;
218 LASSERT(!in_interrupt());
220 CDEBUG(D_NET, "status for tx [%p] is [%d][%s], nid %s\n",
221 tx, status, gmnal_gmstatus2str(status),
222 libcfs_nid2str(tx->tx_nid));
224 gmnal_tx_done(tx, -EIO);
228 gmnal_tx_callback(gm_port_t *gm_port, void *context, gm_status_t status)
230 gmnal_tx_t *tx = (gmnal_tx_t*)context;
231 gmnal_ni_t *gmni = tx->tx_gmni;
233 LASSERT(!in_interrupt());
237 gmnal_tx_done(tx, 0);
240 case GM_SEND_DROPPED:
241 CDEBUG(D_NETERROR, "Dropped tx %p to %s\n",
242 tx, libcfs_nid2str(tx->tx_nid));
243 /* Another tx failed and called gm_drop_sends() which made this
244 * one complete immediately */
245 gmnal_tx_done(tx, -EIO);
249 /* Some error; NB don't complete tx yet; we need its credit for
251 CDEBUG(D_NETERROR, "tx %p error %d(%s), nid %s\n",
252 tx, status, gmnal_gmstatus2str(status),
253 libcfs_nid2str(tx->tx_nid));
255 gmnal_notify_peer_down(tx);
257 spin_lock(&gmni->gmni_gm_lock);
258 gm_drop_sends(gmni->gmni_port,
259 tx->tx_ltxb != NULL ?
260 GMNAL_LARGE_PRIORITY : GMNAL_SMALL_PRIORITY,
261 tx->tx_gmlid, *gmnal_tunables.gm_port,
262 gmnal_drop_sends_callback, tx);
263 spin_unlock(&gmni->gmni_gm_lock);
272 gmnal_check_txqueues_locked (gmnal_ni_t *gmni)
280 tx = list_empty(&gmni->gmni_buf_txq) ? NULL :
281 list_entry(gmni->gmni_buf_txq.next, gmnal_tx_t, tx_list);
284 (tx->tx_large_nob == 0 ||
285 !list_empty(&gmni->gmni_idle_ltxbs))) {
288 list_del(&tx->tx_list);
290 LASSERT (tx->tx_ltxb == NULL);
292 if (tx->tx_large_nob != 0) {
293 ltxb = list_entry(gmni->gmni_idle_ltxbs.next,
294 gmnal_txbuf_t, txb_list);
296 /* consume large buffer */
297 list_del(<xb->txb_list);
299 spin_unlock(&gmni->gmni_tx_lock);
301 /* Unlocking here allows sends to get re-ordered,
302 * but we want to allow other CPUs to progress... */
306 /* marshall message in tx_ltxb...
307 * 1. Copy what was marshalled so far (in tx_buf) */
308 memcpy(GMNAL_NETBUF_MSG(<xb->txb_buf),
309 GMNAL_NETBUF_MSG(&tx->tx_buf), tx->tx_msgnob);
311 /* 2. Copy the payload */
312 if (tx->tx_large_iskiov)
314 gmni->gmni_large_pages,
315 ltxb->txb_buf.nb_kiov,
318 tx->tx_large_frags.kiov,
323 gmni->gmni_large_pages,
324 ltxb->txb_buf.nb_kiov,
327 tx->tx_large_frags.iov,
331 tx->tx_msgnob += tx->tx_large_nob;
333 spin_lock(&gmni->gmni_tx_lock);
336 list_add_tail(&tx->tx_list, &gmni->gmni_cred_txq);
339 if (!list_empty(&gmni->gmni_cred_txq) &&
340 gmni->gmni_tx_credits != 0) {
342 tx = list_entry(gmni->gmni_cred_txq.next, gmnal_tx_t, tx_list);
344 /* consume tx and 1 credit */
345 list_del(&tx->tx_list);
346 gmni->gmni_tx_credits--;
348 spin_unlock(&gmni->gmni_tx_lock);
350 /* Unlocking here allows sends to get re-ordered, but we want
351 * to allow other CPUs to progress... */
353 LASSERT(!tx->tx_credit);
356 tx->tx_launchtime = cfs_time_current();
358 if (tx->tx_msgnob <= gmni->gmni_small_msgsize) {
359 LASSERT (tx->tx_ltxb == NULL);
360 netaddr = GMNAL_NETBUF_LOCAL_NETADDR(&tx->tx_buf);
361 gmsize = gmni->gmni_small_gmsize;
362 pri = GMNAL_SMALL_PRIORITY;
364 LASSERT (tx->tx_ltxb != NULL);
365 netaddr = GMNAL_NETBUF_LOCAL_NETADDR(&tx->tx_ltxb->txb_buf);
366 gmsize = gmni->gmni_large_gmsize;
367 pri = GMNAL_LARGE_PRIORITY;
370 spin_lock(&gmni->gmni_gm_lock);
372 gm_send_to_peer_with_callback(gmni->gmni_port,
380 spin_unlock(&gmni->gmni_gm_lock);
381 spin_lock(&gmni->gmni_tx_lock);
386 gmnal_post_rx(gmnal_ni_t *gmni, gmnal_rx_t *rx)
388 int gmsize = rx->rx_islarge ? gmni->gmni_large_gmsize :
389 gmni->gmni_small_gmsize;
390 int pri = rx->rx_islarge ? GMNAL_LARGE_PRIORITY :
391 GMNAL_SMALL_PRIORITY;
392 void *buffer = GMNAL_NETBUF_LOCAL_NETADDR(&rx->rx_buf);
394 CDEBUG(D_NET, "posting rx %p buf %p\n", rx, buffer);
396 spin_lock(&gmni->gmni_gm_lock);
397 gm_provide_receive_buffer_with_tag(gmni->gmni_port,
398 buffer, gmsize, pri, 0);
399 spin_unlock(&gmni->gmni_gm_lock);
403 gmnal_version_reply (gmnal_ni_t *gmni, gmnal_rx_t *rx)
405 /* Future protocol version compatibility support!
406 * The next gmlnd-specific protocol rev will first send a message to
407 * check version; I reply with a stub message containing my current
408 * magic+version... */
410 gmnal_tx_t *tx = gmnal_get_tx(gmni);
413 CERROR("Can't allocate tx to send version info to %u\n",
418 LASSERT (tx->tx_lntmsg == NULL); /* no finalize */
420 tx->tx_nid = LNET_NID_ANY;
421 tx->tx_gmlid = rx->rx_recv_gmid;
423 msg = GMNAL_NETBUF_MSG(&tx->tx_buf);
424 msg->gmm_magic = GMNAL_MSG_MAGIC;
425 msg->gmm_version = GMNAL_MSG_VERSION;
427 /* just send magic + version */
428 tx->tx_msgnob = offsetof(gmnal_msg_t, gmm_type);
429 tx->tx_large_nob = 0;
431 spin_lock(&gmni->gmni_tx_lock);
433 list_add_tail(&tx->tx_list, &gmni->gmni_buf_txq);
434 gmnal_check_txqueues_locked(gmni);
436 spin_unlock(&gmni->gmni_tx_lock);
440 gmnal_rx_thread(void *arg)
442 gmnal_ni_t *gmni = arg;
443 gm_recv_event_t *rxevent = NULL;
444 gm_recv_t *recv = NULL;
448 cfs_daemonize("gmnal_rxd");
450 while (!gmni->gmni_shutdown) {
451 rc = down_interruptible(&gmni->gmni_rx_mutex);
452 LASSERT (rc == 0 || rc == -EINTR);
456 spin_lock(&gmni->gmni_gm_lock);
457 rxevent = gm_blocking_receive_no_spin(gmni->gmni_port);
458 spin_unlock(&gmni->gmni_gm_lock);
460 switch (GM_RECV_EVENT_TYPE(rxevent)) {
462 gm_unknown(gmni->gmni_port, rxevent);
463 up(&gmni->gmni_rx_mutex);
466 case GM_FAST_RECV_EVENT:
467 case GM_FAST_PEER_RECV_EVENT:
468 case GM_PEER_RECV_EVENT:
469 case GM_FAST_HIGH_RECV_EVENT:
470 case GM_FAST_HIGH_PEER_RECV_EVENT:
471 case GM_HIGH_PEER_RECV_EVENT:
473 case GM_HIGH_RECV_EVENT:
477 recv = &rxevent->recv;
478 rx = gm_hash_find(gmni->gmni_rx_hash,
479 gm_ntohp(recv->buffer));
480 LASSERT (rx != NULL);
482 rx->rx_recv_nob = gm_ntoh_u32(recv->length);
483 rx->rx_recv_gmid = gm_ntoh_u16(recv->sender_node_id);
484 rx->rx_recv_port = gm_ntoh_u8(recv->sender_port_id);
485 rx->rx_recv_type = gm_ntoh_u8(recv->type);
487 switch (GM_RECV_EVENT_TYPE(rxevent)) {
488 case GM_FAST_RECV_EVENT:
489 case GM_FAST_PEER_RECV_EVENT:
490 case GM_FAST_HIGH_RECV_EVENT:
491 case GM_FAST_HIGH_PEER_RECV_EVENT:
492 LASSERT (rx->rx_recv_nob <= PAGE_SIZE);
494 memcpy(GMNAL_NETBUF_MSG(&rx->rx_buf),
495 gm_ntohp(recv->message), rx->rx_recv_nob);
499 up(&gmni->gmni_rx_mutex);
501 CDEBUG (D_NET, "rx %p: buf %p(%p) nob %d\n", rx,
502 GMNAL_NETBUF_LOCAL_NETADDR(&rx->rx_buf),
503 gm_ntohp(recv->buffer), rx->rx_recv_nob);
505 /* We're connectionless: simply drop packets with
507 rc = gmnal_unpack_msg(gmni, rx);
510 gmnal_msg_t *msg = GMNAL_NETBUF_MSG(&rx->rx_buf);
512 LASSERT (msg->gmm_type == GMNAL_MSG_IMMEDIATE);
513 rc = lnet_parse(gmni->gmni_ni,
514 &msg->gmm_u.immediate.gmim_hdr,
515 msg->gmm_srcnid, rx, 0);
517 gmnal_version_reply(gmni, rx);
518 rc = -EPROTO; /* repost rx */
521 if (rc < 0) /* parse failure */
522 gmnal_post_rx(gmni, rx);
525 CDEBUG(D_NET, "exiting\n");
526 atomic_dec(&gmni->gmni_nthreads);
531 gmnal_stop_threads(gmnal_ni_t *gmni)
535 gmni->gmni_shutdown = 1;
538 /* wake rxthread owning gmni_rx_mutex with an alarm. */
539 spin_lock(&gmni->gmni_gm_lock);
540 gm_set_alarm(gmni->gmni_port, &gmni->gmni_alarm, 0, NULL, NULL);
541 spin_unlock(&gmni->gmni_gm_lock);
543 while (atomic_read(&gmni->gmni_nthreads) != 0) {
545 if ((count & (count - 1)) == 0)
546 CWARN("Waiting for %d threads to stop\n",
547 atomic_read(&gmni->gmni_nthreads));
553 gmnal_start_threads(gmnal_ni_t *gmni)
558 LASSERT (!gmni->gmni_shutdown);
559 LASSERT (atomic_read(&gmni->gmni_nthreads) == 0);
561 gm_initialize_alarm(&gmni->gmni_alarm);
563 for (i = 0; i < num_online_cpus(); i++) {
565 pid = kernel_thread(gmnal_rx_thread, (void*)gmni, 0);
567 CERROR("rx thread failed to start: %d\n", pid);
568 gmnal_stop_threads(gmni);
572 atomic_inc(&gmni->gmni_nthreads);