2 * -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
3 * vim:expandtab:shiftwidth=8:tabstop=8:
7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 only,
11 * as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License version 2 for more details (a copy is included
17 * in the LICENSE file that accompanied this code).
19 * You should have received a copy of the GNU General Public License
20 * version 2 along with this program; If not, see [sun.com URL with a
23 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
24 * CA 95054 USA or visit www.sun.com if you need additional information or
30 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
31 * Use is subject to license terms.
33 * Copyright (c) 2003 Los Alamos National Laboratory (LANL)
36 * This file is part of Lustre, http://www.lustre.org/
37 * Lustre is a trademark of Sun Microsystems, Inc.
41 * This file contains all gmnal send and receive functions
47 gmnal_notify_peer_down(gmnal_tx_t *tx)
51 then = cfs_time_current_sec() -
52 cfs_duration_sec(cfs_time_current() -
55 lnet_notify(tx->tx_gmni->gmni_ni, tx->tx_nid, 0, then);
59 gmnal_pack_msg(gmnal_ni_t *gmni, gmnal_msg_t *msg,
60 lnet_nid_t dstnid, int type)
62 /* CAVEAT EMPTOR! this only sets the common message fields. */
63 msg->gmm_magic = GMNAL_MSG_MAGIC;
64 msg->gmm_version = GMNAL_MSG_VERSION;
66 msg->gmm_srcnid = gmni->gmni_ni->ni_nid;
67 msg->gmm_dstnid = dstnid;
71 gmnal_unpack_msg(gmnal_ni_t *gmni, gmnal_rx_t *rx)
73 gmnal_msg_t *msg = GMNAL_NETBUF_MSG(&rx->rx_buf);
74 const int hdr_size = offsetof(gmnal_msg_t, gmm_u);
75 int buffnob = rx->rx_islarge ? gmni->gmni_large_msgsize :
76 gmni->gmni_small_msgsize;
79 /* rc = 0:SUCCESS -ve:failure +ve:version mismatch */
81 /* GM may not overflow our buffer */
82 LASSERT (rx->rx_recv_nob <= buffnob);
84 /* 6 bytes are enough to have received magic + version */
85 if (rx->rx_recv_nob < 6) {
86 CERROR("Short message from gmid %u: %d\n",
87 rx->rx_recv_gmid, rx->rx_recv_nob);
91 if (msg->gmm_magic == GMNAL_MSG_MAGIC) {
93 } else if (msg->gmm_magic == __swab32(GMNAL_MSG_MAGIC)) {
95 } else if (msg->gmm_magic == LNET_PROTO_MAGIC ||
96 msg->gmm_magic == __swab32(LNET_PROTO_MAGIC)) {
99 CERROR("Bad magic from gmid %u: %08x\n",
100 rx->rx_recv_gmid, msg->gmm_magic);
104 if (msg->gmm_version !=
105 (flip ? __swab16(GMNAL_MSG_VERSION) : GMNAL_MSG_VERSION)) {
109 if (rx->rx_recv_nob < hdr_size) {
110 CERROR("Short message from %u: %d\n",
111 rx->rx_recv_gmid, rx->rx_recv_nob);
116 /* leave magic unflipped as a clue to peer endianness */
117 __swab16s(&msg->gmm_version);
118 __swab16s(&msg->gmm_type);
119 __swab64s(&msg->gmm_srcnid);
120 __swab64s(&msg->gmm_dstnid);
123 if (msg->gmm_srcnid == LNET_NID_ANY) {
124 CERROR("Bad src nid from %u: %s\n",
125 rx->rx_recv_gmid, libcfs_nid2str(msg->gmm_srcnid));
129 if (gmni->gmni_ni->ni_nid != msg->gmm_dstnid) {
130 CERROR("Bad dst nid from %u: %s\n",
131 rx->rx_recv_gmid, libcfs_nid2str(msg->gmm_dstnid));
135 switch (msg->gmm_type) {
137 CERROR("Unknown message type from %u: %x\n",
138 rx->rx_recv_gmid, msg->gmm_type);
141 case GMNAL_MSG_IMMEDIATE:
142 if (rx->rx_recv_nob < offsetof(gmnal_msg_t, gmm_u.immediate.gmim_payload[0])) {
143 CERROR("Short IMMEDIATE from %u: %d("LPSZ")\n",
144 rx->rx_recv_gmid, rx->rx_recv_nob,
145 offsetof(gmnal_msg_t, gmm_u.immediate.gmim_payload[0]));
154 gmnal_get_tx(gmnal_ni_t *gmni)
156 gmnal_tx_t *tx = NULL;
158 spin_lock(&gmni->gmni_tx_lock);
160 if (gmni->gmni_shutdown ||
161 list_empty(&gmni->gmni_idle_txs)) {
162 spin_unlock(&gmni->gmni_tx_lock);
166 tx = list_entry(gmni->gmni_idle_txs.next, gmnal_tx_t, tx_list);
167 list_del(&tx->tx_list);
169 spin_unlock(&gmni->gmni_tx_lock);
171 LASSERT (tx->tx_lntmsg == NULL);
172 LASSERT (tx->tx_ltxb == NULL);
173 LASSERT (!tx->tx_credit);
179 gmnal_tx_done(gmnal_tx_t *tx, int rc)
181 gmnal_ni_t *gmni = tx->tx_gmni;
183 lnet_msg_t *lnetmsg = tx->tx_lntmsg;
185 tx->tx_lntmsg = NULL;
187 spin_lock(&gmni->gmni_tx_lock);
189 if (tx->tx_ltxb != NULL) {
191 list_add_tail(&tx->tx_ltxb->txb_list, &gmni->gmni_idle_ltxbs);
197 gmni->gmni_tx_credits++;
201 list_add_tail(&tx->tx_list, &gmni->gmni_idle_txs);
204 gmnal_check_txqueues_locked(gmni);
206 spin_unlock(&gmni->gmni_tx_lock);
208 /* Delay finalize until tx is free */
210 lnet_finalize(gmni->gmni_ni, lnetmsg, rc);
214 gmnal_drop_sends_callback(struct gm_port *gm_port, void *context,
217 gmnal_tx_t *tx = (gmnal_tx_t*)context;
219 LASSERT(!in_interrupt());
221 CDEBUG(D_NET, "status for tx [%p] is [%d][%s], nid %s\n",
222 tx, status, gmnal_gmstatus2str(status),
223 libcfs_nid2str(tx->tx_nid));
225 gmnal_tx_done(tx, -EIO);
229 gmnal_tx_callback(gm_port_t *gm_port, void *context, gm_status_t status)
231 gmnal_tx_t *tx = (gmnal_tx_t*)context;
232 gmnal_ni_t *gmni = tx->tx_gmni;
234 LASSERT(!in_interrupt());
238 gmnal_tx_done(tx, 0);
241 case GM_SEND_DROPPED:
242 CDEBUG(D_NETERROR, "Dropped tx %p to %s\n",
243 tx, libcfs_nid2str(tx->tx_nid));
244 /* Another tx failed and called gm_drop_sends() which made this
245 * one complete immediately */
246 gmnal_tx_done(tx, -EIO);
250 /* Some error; NB don't complete tx yet; we need its credit for
252 CDEBUG(D_NETERROR, "tx %p error %d(%s), nid %s\n",
253 tx, status, gmnal_gmstatus2str(status),
254 libcfs_nid2str(tx->tx_nid));
256 gmnal_notify_peer_down(tx);
258 spin_lock(&gmni->gmni_gm_lock);
259 gm_drop_sends(gmni->gmni_port,
260 tx->tx_ltxb != NULL ?
261 GMNAL_LARGE_PRIORITY : GMNAL_SMALL_PRIORITY,
262 tx->tx_gmlid, *gmnal_tunables.gm_port,
263 gmnal_drop_sends_callback, tx);
264 spin_unlock(&gmni->gmni_gm_lock);
273 gmnal_check_txqueues_locked (gmnal_ni_t *gmni)
281 tx = list_empty(&gmni->gmni_buf_txq) ? NULL :
282 list_entry(gmni->gmni_buf_txq.next, gmnal_tx_t, tx_list);
285 (tx->tx_large_nob == 0 ||
286 !list_empty(&gmni->gmni_idle_ltxbs))) {
289 list_del(&tx->tx_list);
291 LASSERT (tx->tx_ltxb == NULL);
293 if (tx->tx_large_nob != 0) {
294 ltxb = list_entry(gmni->gmni_idle_ltxbs.next,
295 gmnal_txbuf_t, txb_list);
297 /* consume large buffer */
298 list_del(<xb->txb_list);
300 spin_unlock(&gmni->gmni_tx_lock);
302 /* Unlocking here allows sends to get re-ordered,
303 * but we want to allow other CPUs to progress... */
307 /* marshall message in tx_ltxb...
308 * 1. Copy what was marshalled so far (in tx_buf) */
309 memcpy(GMNAL_NETBUF_MSG(<xb->txb_buf),
310 GMNAL_NETBUF_MSG(&tx->tx_buf), tx->tx_msgnob);
312 /* 2. Copy the payload */
313 if (tx->tx_large_iskiov)
315 gmni->gmni_large_pages,
316 ltxb->txb_buf.nb_kiov,
319 tx->tx_large_frags.kiov,
324 gmni->gmni_large_pages,
325 ltxb->txb_buf.nb_kiov,
328 tx->tx_large_frags.iov,
332 tx->tx_msgnob += tx->tx_large_nob;
334 spin_lock(&gmni->gmni_tx_lock);
337 list_add_tail(&tx->tx_list, &gmni->gmni_cred_txq);
340 if (!list_empty(&gmni->gmni_cred_txq) &&
341 gmni->gmni_tx_credits != 0) {
343 tx = list_entry(gmni->gmni_cred_txq.next, gmnal_tx_t, tx_list);
345 /* consume tx and 1 credit */
346 list_del(&tx->tx_list);
347 gmni->gmni_tx_credits--;
349 spin_unlock(&gmni->gmni_tx_lock);
351 /* Unlocking here allows sends to get re-ordered, but we want
352 * to allow other CPUs to progress... */
354 LASSERT(!tx->tx_credit);
357 tx->tx_launchtime = cfs_time_current();
359 if (tx->tx_msgnob <= gmni->gmni_small_msgsize) {
360 LASSERT (tx->tx_ltxb == NULL);
361 netaddr = GMNAL_NETBUF_LOCAL_NETADDR(&tx->tx_buf);
362 gmsize = gmni->gmni_small_gmsize;
363 pri = GMNAL_SMALL_PRIORITY;
365 LASSERT (tx->tx_ltxb != NULL);
366 netaddr = GMNAL_NETBUF_LOCAL_NETADDR(&tx->tx_ltxb->txb_buf);
367 gmsize = gmni->gmni_large_gmsize;
368 pri = GMNAL_LARGE_PRIORITY;
371 spin_lock(&gmni->gmni_gm_lock);
373 gm_send_to_peer_with_callback(gmni->gmni_port,
381 spin_unlock(&gmni->gmni_gm_lock);
382 spin_lock(&gmni->gmni_tx_lock);
387 gmnal_post_rx(gmnal_ni_t *gmni, gmnal_rx_t *rx)
389 int gmsize = rx->rx_islarge ? gmni->gmni_large_gmsize :
390 gmni->gmni_small_gmsize;
391 int pri = rx->rx_islarge ? GMNAL_LARGE_PRIORITY :
392 GMNAL_SMALL_PRIORITY;
393 void *buffer = GMNAL_NETBUF_LOCAL_NETADDR(&rx->rx_buf);
395 CDEBUG(D_NET, "posting rx %p buf %p\n", rx, buffer);
397 spin_lock(&gmni->gmni_gm_lock);
398 gm_provide_receive_buffer_with_tag(gmni->gmni_port,
399 buffer, gmsize, pri, 0);
400 spin_unlock(&gmni->gmni_gm_lock);
404 gmnal_version_reply (gmnal_ni_t *gmni, gmnal_rx_t *rx)
406 /* Future protocol version compatibility support!
407 * The next gmlnd-specific protocol rev will first send a message to
408 * check version; I reply with a stub message containing my current
409 * magic+version... */
411 gmnal_tx_t *tx = gmnal_get_tx(gmni);
414 CERROR("Can't allocate tx to send version info to %u\n",
419 LASSERT (tx->tx_lntmsg == NULL); /* no finalize */
421 tx->tx_nid = LNET_NID_ANY;
422 tx->tx_gmlid = rx->rx_recv_gmid;
424 msg = GMNAL_NETBUF_MSG(&tx->tx_buf);
425 msg->gmm_magic = GMNAL_MSG_MAGIC;
426 msg->gmm_version = GMNAL_MSG_VERSION;
428 /* just send magic + version */
429 tx->tx_msgnob = offsetof(gmnal_msg_t, gmm_type);
430 tx->tx_large_nob = 0;
432 spin_lock(&gmni->gmni_tx_lock);
434 list_add_tail(&tx->tx_list, &gmni->gmni_buf_txq);
435 gmnal_check_txqueues_locked(gmni);
437 spin_unlock(&gmni->gmni_tx_lock);
441 gmnal_rx_thread(void *arg)
443 gmnal_ni_t *gmni = arg;
444 gm_recv_event_t *rxevent = NULL;
445 gm_recv_t *recv = NULL;
449 cfs_daemonize("gmnal_rxd");
451 while (!gmni->gmni_shutdown) {
452 rc = down_interruptible(&gmni->gmni_rx_mutex);
453 LASSERT (rc == 0 || rc == -EINTR);
457 spin_lock(&gmni->gmni_gm_lock);
458 rxevent = gm_blocking_receive_no_spin(gmni->gmni_port);
459 spin_unlock(&gmni->gmni_gm_lock);
461 switch (GM_RECV_EVENT_TYPE(rxevent)) {
463 gm_unknown(gmni->gmni_port, rxevent);
464 up(&gmni->gmni_rx_mutex);
467 case GM_FAST_RECV_EVENT:
468 case GM_FAST_PEER_RECV_EVENT:
469 case GM_PEER_RECV_EVENT:
470 case GM_FAST_HIGH_RECV_EVENT:
471 case GM_FAST_HIGH_PEER_RECV_EVENT:
472 case GM_HIGH_PEER_RECV_EVENT:
474 case GM_HIGH_RECV_EVENT:
478 recv = &rxevent->recv;
479 rx = gm_hash_find(gmni->gmni_rx_hash,
480 gm_ntohp(recv->buffer));
481 LASSERT (rx != NULL);
483 rx->rx_recv_nob = gm_ntoh_u32(recv->length);
484 rx->rx_recv_gmid = gm_ntoh_u16(recv->sender_node_id);
485 rx->rx_recv_port = gm_ntoh_u8(recv->sender_port_id);
486 rx->rx_recv_type = gm_ntoh_u8(recv->type);
488 switch (GM_RECV_EVENT_TYPE(rxevent)) {
489 case GM_FAST_RECV_EVENT:
490 case GM_FAST_PEER_RECV_EVENT:
491 case GM_FAST_HIGH_RECV_EVENT:
492 case GM_FAST_HIGH_PEER_RECV_EVENT:
493 LASSERT (rx->rx_recv_nob <= PAGE_SIZE);
495 memcpy(GMNAL_NETBUF_MSG(&rx->rx_buf),
496 gm_ntohp(recv->message), rx->rx_recv_nob);
500 up(&gmni->gmni_rx_mutex);
502 CDEBUG (D_NET, "rx %p: buf %p(%p) nob %d\n", rx,
503 GMNAL_NETBUF_LOCAL_NETADDR(&rx->rx_buf),
504 gm_ntohp(recv->buffer), rx->rx_recv_nob);
506 /* We're connectionless: simply drop packets with
508 rc = gmnal_unpack_msg(gmni, rx);
511 gmnal_msg_t *msg = GMNAL_NETBUF_MSG(&rx->rx_buf);
513 LASSERT (msg->gmm_type == GMNAL_MSG_IMMEDIATE);
514 rc = lnet_parse(gmni->gmni_ni,
515 &msg->gmm_u.immediate.gmim_hdr,
516 msg->gmm_srcnid, rx, 0);
518 gmnal_version_reply(gmni, rx);
519 rc = -EPROTO; /* repost rx */
522 if (rc < 0) /* parse failure */
523 gmnal_post_rx(gmni, rx);
526 CDEBUG(D_NET, "exiting\n");
527 atomic_dec(&gmni->gmni_nthreads);
532 gmnal_stop_threads(gmnal_ni_t *gmni)
536 gmni->gmni_shutdown = 1;
539 /* wake rxthread owning gmni_rx_mutex with an alarm. */
540 spin_lock(&gmni->gmni_gm_lock);
541 gm_set_alarm(gmni->gmni_port, &gmni->gmni_alarm, 0, NULL, NULL);
542 spin_unlock(&gmni->gmni_gm_lock);
544 while (atomic_read(&gmni->gmni_nthreads) != 0) {
546 if ((count & (count - 1)) == 0)
547 CWARN("Waiting for %d threads to stop\n",
548 atomic_read(&gmni->gmni_nthreads));
554 gmnal_start_threads(gmnal_ni_t *gmni)
559 LASSERT (!gmni->gmni_shutdown);
560 LASSERT (atomic_read(&gmni->gmni_nthreads) == 0);
562 gm_initialize_alarm(&gmni->gmni_alarm);
564 for (i = 0; i < num_online_cpus(); i++) {
566 pid = kernel_thread(gmnal_rx_thread, (void*)gmni, 0);
568 CERROR("rx thread failed to start: %d\n", pid);
569 gmnal_stop_threads(gmni);
573 atomic_inc(&gmni->gmni_nthreads);