2 * Copyright (C) 2004 Cluster File Systems, Inc.
4 * Copyright (C) 2009-2012 Cray, Inc.
6 * Derived from work by Eric Barton <eric@bartonsoftware.com>
7 * Author: James Shimek <jshimek@cray.com>
8 * Author: Nic Henke <nic@cray.com>
10 * This file is part of Lustre, http://www.lustre.org.
12 * Lustre is free software; you can redistribute it and/or
13 * modify it under the terms of version 2 of the GNU General Public
14 * License as published by the Free Software Foundation.
16 * Lustre is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with Lustre; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
28 #include <linux/nmi.h>
31 /* this is useful when needed to debug wire corruption. */
33 kgnilnd_dump_blob(int level, char *prefix, void *buf, int len) {
41 "%s 0x%p: 0x%16.16llx 0x%16.16llx 0x%16.16llx 0x%16.16llx\n",
42 prefix, ptr, *(ptr), *(ptr + 1), *(ptr + 2), *(ptr + 3));
45 } else if (len >= 16) {
47 "%s 0x%p: 0x%16.16llx 0x%16.16llx\n",
48 prefix, ptr, *(ptr), *(ptr + 1));
52 CDEBUG(level, "%s 0x%p: 0x%16.16llx\n",
61 kgnilnd_dump_msg(int mask, kgn_msg_t *msg)
63 CDEBUG(mask, "0x%8.8x 0x%4.4x 0x%4.4x 0x%16.16llx"
64 " 0x%16.16llx 0x%8.8x 0x%4.4x 0x%4.4x 0x%8.8x\n",
65 msg->gnm_magic, msg->gnm_version,
66 msg->gnm_type, msg->gnm_srcnid,
67 msg->gnm_connstamp, msg->gnm_seq,
68 msg->gnm_cksum, msg->gnm_payload_cksum,
69 msg->gnm_payload_len);
73 kgnilnd_schedule_device(kgn_device_t *dev)
75 short already_live = 0;
77 /* we'll only want to wake if the scheduler thread
78 * has come around and set ready to zero */
79 already_live = cmpxchg(&dev->gnd_ready, GNILND_DEV_IDLE, GNILND_DEV_IRQ);
82 wake_up_all(&dev->gnd_waitq);
87 void kgnilnd_schedule_device_timer(unsigned long arg)
89 kgn_device_t *dev = (kgn_device_t *) arg;
91 kgnilnd_schedule_device(dev);
95 kgnilnd_device_callback(__u32 devid, __u64 arg)
98 int index = (int) arg;
100 if (index >= kgnilnd_data.kgn_ndevs) {
101 /* use _EMERG instead of an LBUG to prevent LBUG'ing in
102 * interrupt context. */
103 LCONSOLE_EMERG("callback for unknown device %d->%d\n",
108 dev = &kgnilnd_data.kgn_devices[index];
109 /* just basic sanity */
110 if (dev->gnd_id == devid) {
111 kgnilnd_schedule_device(dev);
113 LCONSOLE_EMERG("callback for bad device %d devid %d\n",
118 /* sched_intent values:
119 * < 0 : do not reschedule under any circumstances
120 * == 0: reschedule if someone marked him WANTS_SCHED
121 * > 0 : force a reschedule */
122 /* Return code 0 means it did not schedule the conn, 1
123 * means it successfully scheduled the conn.
127 kgnilnd_schedule_process_conn(kgn_conn_t *conn, int sched_intent)
131 /* move back to IDLE but save previous state.
132 * if we see WANTS_SCHED, we'll call kgnilnd_schedule_conn and
133 * let the xchg there handle any racing callers to get it
134 * onto gnd_ready_conns */
136 conn_sched = xchg(&conn->gnc_scheduled, GNILND_CONN_IDLE);
137 LASSERTF(conn_sched == GNILND_CONN_WANTS_SCHED ||
138 conn_sched == GNILND_CONN_PROCESS,
139 "conn %p after process in bad state: %d\n",
142 if (sched_intent >= 0) {
143 if ((sched_intent > 0 || (conn_sched == GNILND_CONN_WANTS_SCHED))) {
144 return kgnilnd_schedule_conn_refheld(conn, 1);
150 /* Return of 0 for conn not scheduled, 1 returned if conn was scheduled or marked
154 _kgnilnd_schedule_conn(kgn_conn_t *conn, const char *caller, int line, int refheld)
156 kgn_device_t *dev = conn->gnc_device;
160 sched = xchg(&conn->gnc_scheduled, GNILND_CONN_WANTS_SCHED);
161 /* we only care about the last person who marked want_sched since they
162 * are most likely the culprit
164 memcpy(conn->gnc_sched_caller, caller, sizeof(conn->gnc_sched_caller));
165 conn->gnc_sched_line = line;
166 /* if we are IDLE, add to list - only one guy sees IDLE and "wins"
167 * the chance to put it onto gnd_ready_conns.
168 * otherwise, leave marked as WANTS_SCHED and the thread that "owns"
169 * the conn in process_conns will take care of moving it back to
170 * SCHED when it is done processing */
172 if (sched == GNILND_CONN_IDLE) {
173 /* if the conn is already scheduled, we've already requested
174 * the scheduler thread wakeup */
176 /* Add a reference to the conn if we are not holding a reference
177 * already from the exisiting scheduler. We now use the same
178 * reference if we need to reschedule a conn while in a scheduler
181 kgnilnd_conn_addref(conn);
183 LASSERTF(list_empty(&conn->gnc_schedlist), "conn %p already sched state %d\n",
186 CDEBUG(D_INFO, "scheduling conn 0x%p caller %s:%d\n", conn, caller, line);
188 spin_lock(&dev->gnd_lock);
189 list_add_tail(&conn->gnc_schedlist, &dev->gnd_ready_conns);
190 spin_unlock(&dev->gnd_lock);
191 set_mb(conn->gnc_last_sched_ask, jiffies);
194 CDEBUG(D_INFO, "not scheduling conn 0x%p: %d caller %s:%d\n", conn, sched, caller, line);
198 /* make sure thread(s) going to process conns - but let it make
199 * separate decision from conn schedule */
200 kgnilnd_schedule_device(dev);
205 kgnilnd_schedule_dgram(kgn_device_t *dev)
209 wake = xchg(&dev->gnd_dgram_ready, GNILND_DGRAM_SCHED);
210 if (wake != GNILND_DGRAM_SCHED) {
211 wake_up(&dev->gnd_dgram_waitq);
213 CDEBUG(D_NETTRACE, "not waking: %d\n", wake);
218 kgnilnd_free_tx(kgn_tx_t *tx)
220 /* taken from kgnilnd_tx_add_state_locked */
222 LASSERTF((tx->tx_list_p == NULL &&
223 tx->tx_list_state == GNILND_TX_ALLOCD) &&
224 list_empty(&tx->tx_list),
225 "tx %p with bad state %s (list_p %p) tx_list %s\n",
226 tx, kgnilnd_tx_state2str(tx->tx_list_state), tx->tx_list_p,
227 list_empty(&tx->tx_list) ? "empty" : "not empty");
229 atomic_dec(&kgnilnd_data.kgn_ntx);
231 /* we only allocate this if we need to */
232 if (tx->tx_phys != NULL) {
233 kmem_cache_free(kgnilnd_data.kgn_tx_phys_cache, tx->tx_phys);
234 CDEBUG(D_MALLOC, "slab-freed 'tx_phys': %lu at %p.\n",
235 LNET_MAX_IOV * sizeof(gni_mem_segment_t), tx->tx_phys);
238 /* Only free the buffer if we used it */
239 if (tx->tx_buffer_copy != NULL) {
240 vfree(tx->tx_buffer_copy);
241 tx->tx_buffer_copy = NULL;
242 CDEBUG(D_MALLOC, "vfreed buffer2\n");
245 KGNILND_POISON(tx, 0x5a, sizeof(kgn_tx_t));
247 CDEBUG(D_MALLOC, "slab-freed 'tx': %lu at %p.\n", sizeof(*tx), tx);
248 kmem_cache_free(kgnilnd_data.kgn_tx_cache, tx);
252 kgnilnd_alloc_tx (void)
256 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_ALLOC_TX))
259 tx = kmem_cache_alloc(kgnilnd_data.kgn_tx_cache, GFP_ATOMIC);
261 CERROR("failed to allocate tx\n");
264 CDEBUG(D_MALLOC, "slab-alloced 'tx': %lu at %p.\n",
267 /* need this memset, cache alloc'd memory is not cleared */
268 memset(tx, 0, sizeof(*tx));
270 /* setup everything here to minimize time under the lock */
271 tx->tx_buftype = GNILND_BUF_NONE;
272 tx->tx_msg.gnm_type = GNILND_MSG_NONE;
273 INIT_LIST_HEAD(&tx->tx_list);
274 INIT_LIST_HEAD(&tx->tx_map_list);
275 tx->tx_list_state = GNILND_TX_ALLOCD;
277 atomic_inc(&kgnilnd_data.kgn_ntx);
282 /* csum_fold needs to be run on the return value before shipping over the wire */
283 #define _kgnilnd_cksum(seed, ptr, nob) csum_partial(ptr, nob, seed)
285 /* we don't use offset as every one is passing a buffer reference that already
286 * includes the offset into the base address -
287 * see kgnilnd_setup_virt_buffer and kgnilnd_setup_immediate_buffer */
289 kgnilnd_cksum(void *ptr, size_t nob)
293 sum = csum_fold(_kgnilnd_cksum(0, ptr, nob));
295 /* don't use magic 'no checksum' value */
299 CDEBUG(D_INFO, "cksum 0x%x for ptr 0x%p sz %zu\n",
306 kgnilnd_cksum_kiov(unsigned int nkiov, lnet_kiov_t *kiov,
307 unsigned int offset, unsigned int nob, int dump_blob)
313 unsigned int fraglen;
319 CDEBUG(D_BUFFS, "calc cksum for kiov 0x%p nkiov %u offset %u nob %u, dump %d\n",
320 kiov, nkiov, offset, nob, dump_blob);
322 /* if loops changes, please change kgnilnd_setup_phys_buffer */
324 while (offset >= kiov->kiov_len) {
325 offset -= kiov->kiov_len;
331 /* ignore nob here, if nob < (kiov_len - offset), kiov == 1 */
332 odd = (unsigned long) (kiov[0].kiov_len - offset) & 1;
334 if ((odd || *kgnilnd_tunables.kgn_vmap_cksum) && nkiov > 1) {
335 struct page **pages = kgnilnd_data.kgn_cksum_map_pages[get_cpu()];
337 LASSERTF(pages != NULL, "NULL pages for cpu %d map_pages 0x%p\n",
338 get_cpu(), kgnilnd_data.kgn_cksum_map_pages);
340 CDEBUG(D_BUFFS, "odd %d len %u offset %u nob %u\n",
341 odd, kiov[0].kiov_len, offset, nob);
343 for (i = 0; i < nkiov; i++) {
344 pages[i] = kiov[i].kiov_page;
347 addr = vmap(pages, nkiov, VM_MAP, PAGE_KERNEL);
349 CNETERR("Couldn't vmap %d frags on %d bytes to avoid odd length fragment in cksum\n",
351 /* return zero to avoid killing tx - we'll just get warning on console
352 * when remote end sees zero checksum */
355 atomic_inc(&kgnilnd_data.kgn_nvmap_cksum);
357 tmpck = _kgnilnd_cksum(0, (void *) addr + kiov[0].kiov_offset + offset, nob);
361 kgnilnd_dump_blob(D_BUFFS, "flat kiov RDMA payload",
362 (void *)addr + kiov[0].kiov_offset + offset, nob);
364 CDEBUG(D_BUFFS, "cksum 0x%x (+0x%x) for addr 0x%p+%u len %u offset %u\n",
365 cksum, tmpck, addr, kiov[0].kiov_offset, nob, offset);
369 fraglen = min(kiov->kiov_len - offset, nob);
371 /* make dang sure we don't send a bogus checksum if somehow we get
372 * an odd length fragment on anything but the last entry in a kiov -
373 * we know from kgnilnd_setup_rdma_buffer that we can't have non
374 * PAGE_SIZE pages in the middle, so if nob < PAGE_SIZE, it is the last one */
375 LASSERTF(!(fraglen&1) || (nob < PAGE_SIZE),
376 "odd fraglen %u on nkiov %d, nob %u kiov_len %u offset %u kiov 0x%p\n",
377 fraglen, nkiov, nob, kiov->kiov_len, offset, kiov);
379 addr = (void *)kmap(kiov->kiov_page) + kiov->kiov_offset + offset;
380 tmpck = _kgnilnd_cksum(cksum, addr, fraglen);
383 "cksum 0x%x (+0x%x) for page 0x%p+%u (0x%p) len %u offset %u\n",
384 cksum, tmpck, kiov->kiov_page, kiov->kiov_offset, addr,
390 kgnilnd_dump_blob(D_BUFFS, "kiov cksum", addr, fraglen);
392 kunmap(kiov->kiov_page);
399 /* iov must not run out before end of data */
400 LASSERTF(nob == 0 || nkiov > 0, "nob %u nkiov %u\n", nob, nkiov);
405 retsum = csum_fold(cksum);
407 /* don't use magic 'no checksum' value */
411 CDEBUG(D_BUFFS, "retsum 0x%x from cksum 0x%x\n", retsum, cksum);
417 kgnilnd_init_msg(kgn_msg_t *msg, int type, lnet_nid_t source)
419 msg->gnm_magic = GNILND_MSG_MAGIC;
420 msg->gnm_version = GNILND_MSG_VERSION;
421 msg->gnm_type = type;
422 msg->gnm_payload_len = 0;
423 msg->gnm_srcnid = source;
424 /* gnm_connstamp gets set when FMA is sent */
425 /* gnm_srcnid is set on creation via function argument
426 * The right interface/net and nid is passed in when the message
432 kgnilnd_new_tx_msg(int type, lnet_nid_t source)
434 kgn_tx_t *tx = kgnilnd_alloc_tx();
437 kgnilnd_init_msg(&tx->tx_msg, type, source);
439 CERROR("couldn't allocate new tx type %s!\n",
440 kgnilnd_msgtype2str(type));
447 kgnilnd_nak_rdma(kgn_conn_t *conn, int rx_type, int error, __u64 cookie, lnet_nid_t source) {
453 case GNILND_MSG_GET_REQ:
454 case GNILND_MSG_GET_DONE:
455 nak_type = GNILND_MSG_GET_NAK;
457 case GNILND_MSG_PUT_REQ:
458 case GNILND_MSG_PUT_ACK:
459 case GNILND_MSG_PUT_DONE:
460 nak_type = GNILND_MSG_PUT_NAK;
462 case GNILND_MSG_PUT_REQ_REV:
463 case GNILND_MSG_PUT_DONE_REV:
464 nak_type = GNILND_MSG_PUT_NAK_REV;
466 case GNILND_MSG_GET_REQ_REV:
467 case GNILND_MSG_GET_ACK_REV:
468 case GNILND_MSG_GET_DONE_REV:
469 nak_type = GNILND_MSG_GET_NAK_REV;
472 CERROR("invalid msg type %s (%d)\n",
473 kgnilnd_msgtype2str(rx_type), rx_type);
476 /* only allow NAK on error and truncate to zero */
477 LASSERTF(error <= 0, "error %d conn 0x%p, cookie "LPU64"\n",
478 error, conn, cookie);
480 tx = kgnilnd_new_tx_msg(nak_type, source);
482 CNETERR("can't get TX to NAK RDMA to %s\n",
483 libcfs_nid2str(conn->gnc_peer->gnp_nid));
487 tx->tx_msg.gnm_u.completion.gncm_retval = error;
488 tx->tx_msg.gnm_u.completion.gncm_cookie = cookie;
489 kgnilnd_queue_tx(conn, tx);
493 kgnilnd_setup_immediate_buffer(kgn_tx_t *tx, unsigned int niov, struct iovec *iov,
494 lnet_kiov_t *kiov, unsigned int offset, unsigned int nob)
497 kgn_msg_t *msg = &tx->tx_msg;
500 /* To help save on MDDs for short messages, we'll vmap a kiov to allow
501 * gni_smsg_send to send that as the payload */
503 LASSERT(tx->tx_buftype == GNILND_BUF_NONE);
506 tx->tx_buffer = NULL;
507 } else if (kiov != NULL) {
508 LASSERTF(niov > 0 && niov < GNILND_MAX_IMMEDIATE/PAGE_SIZE,
509 "bad niov %d\n", niov);
511 while (offset >= kiov->kiov_len) {
512 offset -= kiov->kiov_len;
517 for (i = 0; i < niov; i++) {
518 /* We can't have a kiov_offset on anything but the first entry,
519 * otherwise we'll have a hole at the end of the mapping as we only map
521 * Also, if we have a kiov_len < PAGE_SIZE but we need to map more
522 * than kiov_len, we will also have a whole at the end of that page
523 * which isn't allowed */
524 if ((kiov[i].kiov_offset != 0 && i > 0) ||
525 (kiov[i].kiov_offset + kiov[i].kiov_len != PAGE_SIZE && i < niov - 1)) {
526 CNETERR("Can't make payload contiguous in I/O VM:"
527 "page %d, offset %u, nob %u, kiov_offset %u kiov_len %u \n",
528 i, offset, nob, kiov->kiov_offset, kiov->kiov_len);
531 tx->tx_imm_pages[i] = kiov[i].kiov_page;
534 /* hijack tx_phys for the later unmap */
536 /* tx->phyx being equal to NULL is the signal for unmap to discern between kmap and vmap */
538 tx->tx_buffer = (void *)kmap(tx->tx_imm_pages[0]) + kiov[0].kiov_offset + offset;
539 atomic_inc(&kgnilnd_data.kgn_nkmap_short);
540 GNIDBG_TX(D_NET, tx, "kmapped page for %d bytes for kiov 0x%p, buffer 0x%p",
541 nob, kiov, tx->tx_buffer);
543 tx->tx_phys = vmap(tx->tx_imm_pages, niov, VM_MAP, PAGE_KERNEL);
544 if (tx->tx_phys == NULL) {
545 CNETERR("Couldn't vmap %d frags on %d bytes\n", niov, nob);
549 atomic_inc(&kgnilnd_data.kgn_nvmap_short);
550 /* make sure we take into account the kiov offset as the start of the buffer */
551 tx->tx_buffer = (void *)tx->tx_phys + kiov[0].kiov_offset + offset;
552 GNIDBG_TX(D_NET, tx, "mapped %d pages for %d bytes from kiov 0x%p to 0x%p, buffer 0x%p",
553 niov, nob, kiov, tx->tx_phys, tx->tx_buffer);
555 tx->tx_buftype = GNILND_BUF_IMMEDIATE_KIOV;
559 /* For now this is almost identical to kgnilnd_setup_virt_buffer, but we
560 * could "flatten" the payload into a single contiguous buffer ready
561 * for sending direct over an FMA if we ever needed to. */
565 while (offset >= iov->iov_len) {
566 offset -= iov->iov_len;
572 if (nob > iov->iov_len - offset) {
573 CERROR("Can't handle multiple vaddr fragments\n");
577 tx->tx_buffer = (void *)(((unsigned long)iov->iov_base) + offset);
579 tx->tx_buftype = GNILND_BUF_IMMEDIATE;
583 /* checksum payload early - it shouldn't be changing after lnd_send */
584 if (*kgnilnd_tunables.kgn_checksum >= 2) {
585 msg->gnm_payload_cksum = kgnilnd_cksum(tx->tx_buffer, nob);
586 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_SMSG_CKSUM2)) {
587 msg->gnm_payload_cksum += 0xe00e;
589 if (*kgnilnd_tunables.kgn_checksum_dump > 1) {
590 kgnilnd_dump_blob(D_BUFFS, "payload checksum",
594 msg->gnm_payload_cksum = 0;
601 kgnilnd_setup_virt_buffer(kgn_tx_t *tx,
602 unsigned int niov, struct iovec *iov,
603 unsigned int offset, unsigned int nob)
608 LASSERT(tx->tx_buftype == GNILND_BUF_NONE);
610 while (offset >= iov->iov_len) {
611 offset -= iov->iov_len;
617 if (nob > iov->iov_len - offset) {
618 CERROR("Can't handle multiple vaddr fragments\n");
622 tx->tx_buftype = GNILND_BUF_VIRT_UNMAPPED;
624 tx->tx_buffer = (void *)(((unsigned long)iov->iov_base) + offset);
629 kgnilnd_setup_phys_buffer(kgn_tx_t *tx, int nkiov, lnet_kiov_t *kiov,
630 unsigned int offset, unsigned int nob)
632 gni_mem_segment_t *phys;
634 unsigned int fraglen;
636 GNIDBG_TX(D_NET, tx, "niov %d kiov 0x%p offset %u nob %u", nkiov, kiov, offset, nob);
640 LASSERT(tx->tx_buftype == GNILND_BUF_NONE);
642 /* only allocate this if we are going to use it */
643 tx->tx_phys = kmem_cache_alloc(kgnilnd_data.kgn_tx_phys_cache,
645 if (tx->tx_phys == NULL) {
646 CERROR("failed to allocate tx_phys\n");
651 CDEBUG(D_MALLOC, "slab-alloced 'tx->tx_phys': %lu at %p.\n",
652 LNET_MAX_IOV * sizeof(gni_mem_segment_t), tx->tx_phys);
654 /* if loops changes, please change kgnilnd_cksum_kiov
655 * and kgnilnd_setup_immediate_buffer */
657 while (offset >= kiov->kiov_len) {
658 offset -= kiov->kiov_len;
664 /* at this point, kiov points to the first page that we'll actually map
665 * now that we've seeked into the koiv for offset and dropped any
666 * leading pages that fall entirely within the offset */
667 tx->tx_buftype = GNILND_BUF_PHYS_UNMAPPED;
670 /* kiov_offset is start of 'valid' buffer, so index offset past that */
671 tx->tx_buffer = (void *)((unsigned long)(kiov->kiov_offset + offset));
674 CDEBUG(D_NET, "tx 0x%p buffer 0x%p map start kiov 0x%p+%u niov %d offset %u\n",
675 tx, tx->tx_buffer, kiov, kiov->kiov_offset, nkiov, offset);
678 fraglen = min(kiov->kiov_len - offset, nob);
680 /* We can't have a kiov_offset on anything but the first entry,
681 * otherwise we'll have a hole at the end of the mapping as we only map
682 * whole pages. Only the first page is allowed to have an offset -
683 * we'll add that into tx->tx_buffer and that will get used when we
684 * map in the segments (see kgnilnd_map_buffer).
685 * Also, if we have a kiov_len < PAGE_SIZE but we need to map more
686 * than kiov_len, we will also have a whole at the end of that page
687 * which isn't allowed */
688 if ((phys != tx->tx_phys) &&
689 ((kiov->kiov_offset != 0) ||
690 ((kiov->kiov_len < PAGE_SIZE) && (nob > kiov->kiov_len)))) {
691 CERROR("Can't make payload contiguous in I/O VM:"
692 "page %d, offset %u, nob %u, kiov_offset %u kiov_len %u \n",
693 (int)(phys - tx->tx_phys),
694 offset, nob, kiov->kiov_offset, kiov->kiov_len);
699 if ((phys - tx->tx_phys) == LNET_MAX_IOV) {
700 CERROR ("payload too big (%d)\n", (int)(phys - tx->tx_phys));
705 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_PHYS_SETUP)) {
710 CDEBUG(D_BUFFS, "page 0x%p kiov_offset %u kiov_len %u nob %u "
711 "nkiov %u offset %u\n",
712 kiov->kiov_page, kiov->kiov_offset, kiov->kiov_len, nob, nkiov, offset);
714 phys->address = page_to_phys(kiov->kiov_page);
721 /* iov must not run out before end of data */
722 LASSERTF(nob == 0 || nkiov > 0, "nob %u nkiov %u\n", nob, nkiov);
726 tx->tx_phys_npages = phys - tx->tx_phys;
731 if (tx->tx_phys != NULL) {
732 kmem_cache_free(kgnilnd_data.kgn_tx_phys_cache, tx->tx_phys);
733 CDEBUG(D_MALLOC, "slab-freed 'tx_phys': %lu at %p.\n",
734 sizeof(*tx->tx_phys), tx->tx_phys);
741 kgnilnd_setup_rdma_buffer(kgn_tx_t *tx, unsigned int niov,
742 struct iovec *iov, lnet_kiov_t *kiov,
743 unsigned int offset, unsigned int nob)
747 LASSERTF((iov == NULL) != (kiov == NULL), "iov 0x%p, kiov 0x%p, tx 0x%p,"
748 " offset %d, nob %d, niov %d\n"
749 , iov, kiov, tx, offset, nob, niov);
752 rc = kgnilnd_setup_phys_buffer(tx, niov, kiov, offset, nob);
754 rc = kgnilnd_setup_virt_buffer(tx, niov, iov, offset, nob);
759 /* kgnilnd_parse_lnet_rdma()
760 * lntmsg - message passed in from lnet.
761 * niov, kiov, offset - see lnd_t in lib-types.h for descriptions.
762 * nob - actual number of bytes to in this message.
763 * put_len - It is possible for PUTs to have a different length than the
764 * length stored in lntmsg->msg_len since LNET can adjust this
765 * length based on it's buffer size and offset.
766 * lnet_try_match_md() sets the mlength that we use to do the RDMA
770 kgnilnd_parse_lnet_rdma(lnet_msg_t *lntmsg, unsigned int *niov,
771 unsigned int *offset, unsigned int *nob,
772 lnet_kiov_t **kiov, int put_len)
774 /* GETs are weird, see kgnilnd_send */
775 if (lntmsg->msg_type == LNET_MSG_GET) {
776 if ((lntmsg->msg_md->md_options & LNET_MD_KIOV) == 0) {
779 *kiov = lntmsg->msg_md->md_iov.kiov;
781 *niov = lntmsg->msg_md->md_niov;
782 *nob = lntmsg->msg_md->md_length;
785 *kiov = lntmsg->msg_kiov;
786 *niov = lntmsg->msg_niov;
788 *offset = lntmsg->msg_offset;
793 kgnilnd_compute_rdma_cksum(kgn_tx_t *tx, int put_len)
795 unsigned int niov, offset, nob;
797 lnet_msg_t *lntmsg = tx->tx_lntmsg[0];
798 int dump_cksum = (*kgnilnd_tunables.kgn_checksum_dump > 1);
800 GNITX_ASSERTF(tx, ((tx->tx_msg.gnm_type == GNILND_MSG_PUT_DONE) ||
801 (tx->tx_msg.gnm_type == GNILND_MSG_GET_DONE) ||
802 (tx->tx_msg.gnm_type == GNILND_MSG_PUT_DONE_REV) ||
803 (tx->tx_msg.gnm_type == GNILND_MSG_GET_DONE_REV) ||
804 (tx->tx_msg.gnm_type == GNILND_MSG_GET_ACK_REV) ||
805 (tx->tx_msg.gnm_type == GNILND_MSG_PUT_REQ_REV)),
806 "bad type %s", kgnilnd_msgtype2str(tx->tx_msg.gnm_type));
808 if ((tx->tx_msg.gnm_type == GNILND_MSG_PUT_DONE_REV) ||
809 (tx->tx_msg.gnm_type == GNILND_MSG_GET_DONE_REV)) {
810 tx->tx_msg.gnm_payload_cksum = 0;
813 if (*kgnilnd_tunables.kgn_checksum < 3) {
814 tx->tx_msg.gnm_payload_cksum = 0;
818 GNITX_ASSERTF(tx, lntmsg, "no LNet message!", NULL);
820 kgnilnd_parse_lnet_rdma(lntmsg, &niov, &offset, &nob, &kiov,
824 tx->tx_msg.gnm_payload_cksum = kgnilnd_cksum_kiov(niov, kiov, offset, nob, dump_cksum);
826 tx->tx_msg.gnm_payload_cksum = kgnilnd_cksum(tx->tx_buffer, nob);
828 kgnilnd_dump_blob(D_BUFFS, "peer RDMA payload", tx->tx_buffer, nob);
832 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_SMSG_CKSUM3)) {
833 tx->tx_msg.gnm_payload_cksum += 0xd00d;
837 /* kgnilnd_verify_rdma_cksum()
838 * tx - PUT_DONE/GET_DONE matched tx.
839 * rx_cksum - received checksum to compare against.
840 * put_len - see kgnilnd_parse_lnet_rdma comments.
843 kgnilnd_verify_rdma_cksum(kgn_tx_t *tx, __u16 rx_cksum, int put_len)
847 unsigned int niov, offset, nob;
849 lnet_msg_t *lntmsg = tx->tx_lntmsg[0];
850 int dump_on_err = *kgnilnd_tunables.kgn_checksum_dump;
852 /* we can only match certain requests */
853 GNITX_ASSERTF(tx, ((tx->tx_msg.gnm_type == GNILND_MSG_GET_REQ) ||
854 (tx->tx_msg.gnm_type == GNILND_MSG_PUT_ACK) ||
855 (tx->tx_msg.gnm_type == GNILND_MSG_PUT_REQ_REV) ||
856 (tx->tx_msg.gnm_type == GNILND_MSG_GET_ACK_REV) ||
857 (tx->tx_msg.gnm_type == GNILND_MSG_GET_DONE_REV) ||
858 (tx->tx_msg.gnm_type == GNILND_MSG_PUT_DONE_REV)),
859 "bad type %s", kgnilnd_msgtype2str(tx->tx_msg.gnm_type));
861 if ((tx->tx_msg.gnm_type == GNILND_MSG_PUT_REQ_REV) ||
862 (tx->tx_msg.gnm_type == GNILND_MSG_GET_ACK_REV)) {
867 if (*kgnilnd_tunables.kgn_checksum >= 3) {
868 GNIDBG_MSG(D_WARNING, &tx->tx_msg,
869 "no RDMA payload checksum when enabled");
874 GNITX_ASSERTF(tx, lntmsg, "no LNet message!", NULL);
876 kgnilnd_parse_lnet_rdma(lntmsg, &niov, &offset, &nob, &kiov, put_len);
879 cksum = kgnilnd_cksum_kiov(niov, kiov, offset, nob, 0);
881 cksum = kgnilnd_cksum(tx->tx_buffer, nob);
884 if (cksum != rx_cksum) {
885 GNIDBG_MSG(D_NETERROR, &tx->tx_msg,
886 "Bad RDMA payload checksum (%x expected %x); "
887 "kiov 0x%p niov %d nob %u offset %u",
888 cksum, rx_cksum, kiov, niov, nob, offset);
889 switch (dump_on_err) {
892 kgnilnd_cksum_kiov(niov, kiov, offset, nob, 1);
894 kgnilnd_dump_blob(D_BUFFS, "RDMA payload",
897 /* fall through to dump log */
899 libcfs_debug_dumplog();
905 /* kgnilnd_check_fma_rx will close conn, kill tx with error */
911 kgnilnd_mem_add_map_list(kgn_device_t *dev, kgn_tx_t *tx)
915 GNITX_ASSERTF(tx, list_empty(&tx->tx_map_list),
916 "already mapped!", NULL);
918 spin_lock(&dev->gnd_map_lock);
919 switch (tx->tx_buftype) {
921 GNIDBG_TX(D_EMERG, tx,
922 "SOFTWARE BUG: invalid mapping %d", tx->tx_buftype);
923 spin_unlock(&dev->gnd_map_lock);
927 case GNILND_BUF_PHYS_MAPPED:
928 bytes = tx->tx_phys_npages * PAGE_SIZE;
929 dev->gnd_map_nphys++;
930 dev->gnd_map_physnop += tx->tx_phys_npages;
933 case GNILND_BUF_VIRT_MAPPED:
935 dev->gnd_map_nvirt++;
936 dev->gnd_map_virtnob += tx->tx_nob;
940 if (tx->tx_msg.gnm_type == GNILND_MSG_PUT_ACK ||
941 tx->tx_msg.gnm_type == GNILND_MSG_GET_REQ) {
942 atomic64_add(bytes, &dev->gnd_rdmaq_bytes_out);
943 GNIDBG_TX(D_NETTRACE, tx, "rdma ++ %d to "LPD64"",
944 bytes, atomic64_read(&dev->gnd_rdmaq_bytes_out));
947 atomic_inc(&dev->gnd_n_mdd);
948 atomic64_add(bytes, &dev->gnd_nbytes_map);
950 /* clear retrans to prevent any SMSG goofiness as that code uses the same counter */
953 /* we only get here in the valid cases */
954 list_add_tail(&tx->tx_map_list, &dev->gnd_map_list);
955 dev->gnd_map_version++;
956 spin_unlock(&dev->gnd_map_lock);
960 kgnilnd_mem_del_map_list(kgn_device_t *dev, kgn_tx_t *tx)
964 GNITX_ASSERTF(tx, !list_empty(&tx->tx_map_list),
965 "not mapped!", NULL);
966 spin_lock(&dev->gnd_map_lock);
968 switch (tx->tx_buftype) {
970 GNIDBG_TX(D_EMERG, tx,
971 "SOFTWARE BUG: invalid mapping %d", tx->tx_buftype);
972 spin_unlock(&dev->gnd_map_lock);
976 case GNILND_BUF_PHYS_UNMAPPED:
977 bytes = tx->tx_phys_npages * PAGE_SIZE;
978 dev->gnd_map_nphys--;
979 dev->gnd_map_physnop -= tx->tx_phys_npages;
982 case GNILND_BUF_VIRT_UNMAPPED:
984 dev->gnd_map_nvirt--;
985 dev->gnd_map_virtnob -= tx->tx_nob;
989 if (tx->tx_msg.gnm_type == GNILND_MSG_PUT_ACK ||
990 tx->tx_msg.gnm_type == GNILND_MSG_GET_REQ) {
991 atomic64_sub(bytes, &dev->gnd_rdmaq_bytes_out);
992 LASSERTF(atomic64_read(&dev->gnd_rdmaq_bytes_out) >= 0,
993 "bytes_out negative! %ld\n", atomic64_read(&dev->gnd_rdmaq_bytes_out));
994 GNIDBG_TX(D_NETTRACE, tx, "rdma -- %d to "LPD64"",
995 bytes, atomic64_read(&dev->gnd_rdmaq_bytes_out));
998 atomic_dec(&dev->gnd_n_mdd);
999 atomic64_sub(bytes, &dev->gnd_nbytes_map);
1001 /* we only get here in the valid cases */
1002 list_del_init(&tx->tx_map_list);
1003 dev->gnd_map_version++;
1004 spin_unlock(&dev->gnd_map_lock);
1008 kgnilnd_map_buffer(kgn_tx_t *tx)
1010 kgn_conn_t *conn = tx->tx_conn;
1011 kgn_device_t *dev = conn->gnc_device;
1012 __u32 flags = GNI_MEM_READWRITE;
1015 /* The kgnilnd_mem_register(_segments) Gemini Driver functions can
1016 * be called concurrently as there are internal locks that protect
1017 * any data structures or HW resources. We just need to ensure
1018 * that our concurrency doesn't result in the kgn_device_t
1019 * getting nuked while we are in here */
1021 LASSERTF(conn != NULL, "tx %p with NULL conn, someone forgot"
1022 " to set tx_conn before calling %s\n", tx, __FUNCTION__);
1024 if (unlikely(CFS_FAIL_CHECK(CFS_FAIL_GNI_MAP_TX)))
1027 if (*kgnilnd_tunables.kgn_bte_relaxed_ordering) {
1028 flags |= GNI_MEM_RELAXED_PI_ORDERING;
1031 switch (tx->tx_buftype) {
1035 case GNILND_BUF_NONE:
1036 case GNILND_BUF_IMMEDIATE:
1037 case GNILND_BUF_IMMEDIATE_KIOV:
1038 case GNILND_BUF_PHYS_MAPPED:
1039 case GNILND_BUF_VIRT_MAPPED:
1042 case GNILND_BUF_PHYS_UNMAPPED:
1043 GNITX_ASSERTF(tx, tx->tx_phys != NULL, "physical buffer not there!", NULL);
1044 rrc = kgnilnd_mem_register_segments(dev->gnd_handle,
1045 tx->tx_phys, tx->tx_phys_npages, NULL,
1046 GNI_MEM_PHYS_SEGMENTS | flags,
1048 /* could race with other uses of the map counts, but this is ok
1049 * - this needs to turn into a non-fatal error soon to allow
1050 * GART resource, etc starvation handling */
1051 if (rrc != GNI_RC_SUCCESS) {
1052 GNIDBG_TX(D_NET, tx, "Can't map %d pages: dev %d "
1053 "phys %u pp %u, virt %u nob "LPU64"",
1054 tx->tx_phys_npages, dev->gnd_id,
1055 dev->gnd_map_nphys, dev->gnd_map_physnop,
1056 dev->gnd_map_nvirt, dev->gnd_map_virtnob);
1057 RETURN(rrc == GNI_RC_ERROR_RESOURCE ? -ENOMEM : -EINVAL);
1060 tx->tx_buftype = GNILND_BUF_PHYS_MAPPED;
1061 kgnilnd_mem_add_map_list(dev, tx);
1064 case GNILND_BUF_VIRT_UNMAPPED:
1065 rrc = kgnilnd_mem_register(dev->gnd_handle,
1066 (__u64)tx->tx_buffer, tx->tx_nob,
1067 NULL, flags, &tx->tx_map_key);
1068 if (rrc != GNI_RC_SUCCESS) {
1069 GNIDBG_TX(D_NET, tx, "Can't map %u bytes: dev %d "
1070 "phys %u pp %u, virt %u nob "LPU64"",
1071 tx->tx_nob, dev->gnd_id,
1072 dev->gnd_map_nphys, dev->gnd_map_physnop,
1073 dev->gnd_map_nvirt, dev->gnd_map_virtnob);
1074 RETURN(rrc == GNI_RC_ERROR_RESOURCE ? -ENOMEM : -EINVAL);
1077 tx->tx_buftype = GNILND_BUF_VIRT_MAPPED;
1078 kgnilnd_mem_add_map_list(dev, tx);
1079 if (tx->tx_msg.gnm_type == GNILND_MSG_PUT_ACK ||
1080 tx->tx_msg.gnm_type == GNILND_MSG_GET_REQ) {
1081 atomic64_add(tx->tx_nob, &dev->gnd_rdmaq_bytes_out);
1082 GNIDBG_TX(D_NETTRACE, tx, "rdma ++ %d to %ld\n",
1083 tx->tx_nob, atomic64_read(&dev->gnd_rdmaq_bytes_out));
1091 kgnilnd_add_purgatory_tx(kgn_tx_t *tx)
1093 kgn_conn_t *conn = tx->tx_conn;
1094 kgn_mdd_purgatory_t *gmp;
1096 LIBCFS_ALLOC(gmp, sizeof(*gmp));
1097 LASSERTF(gmp != NULL, "couldn't allocate MDD purgatory member;"
1098 " asserting to avoid data corruption\n");
1099 if (tx->tx_buffer_copy)
1100 gmp->gmp_map_key = tx->tx_buffer_copy_map_key;
1102 gmp->gmp_map_key = tx->tx_map_key;
1104 atomic_inc(&conn->gnc_device->gnd_n_mdd_held);
1106 /* ensure that we don't have a blank purgatory - indicating the
1107 * conn is not already on purgatory lists - we'd never recover these
1108 * MDD if that were the case */
1109 GNITX_ASSERTF(tx, conn->gnc_in_purgatory,
1110 "conn 0x%p->%s with NULL purgatory",
1111 conn, libcfs_nid2str(conn->gnc_peer->gnp_nid));
1113 /* link 'er up! - only place we really need to lock for
1114 * concurrent access */
1115 spin_lock(&conn->gnc_list_lock);
1116 list_add_tail(&gmp->gmp_list, &conn->gnc_mdd_list);
1117 spin_unlock(&conn->gnc_list_lock);
1121 kgnilnd_unmap_buffer(kgn_tx_t *tx, int error)
1125 int hold_timeout = 0;
1127 /* code below relies on +1 relationship ... */
1128 CLASSERT(GNILND_BUF_PHYS_MAPPED == (GNILND_BUF_PHYS_UNMAPPED + 1));
1129 CLASSERT(GNILND_BUF_VIRT_MAPPED == (GNILND_BUF_VIRT_UNMAPPED + 1));
1131 switch (tx->tx_buftype) {
1135 case GNILND_BUF_NONE:
1136 case GNILND_BUF_IMMEDIATE:
1137 case GNILND_BUF_PHYS_UNMAPPED:
1138 case GNILND_BUF_VIRT_UNMAPPED:
1140 case GNILND_BUF_IMMEDIATE_KIOV:
1141 if (tx->tx_phys != NULL) {
1142 vunmap(tx->tx_phys);
1143 } else if (tx->tx_phys == NULL && tx->tx_buffer != NULL) {
1144 kunmap(tx->tx_imm_pages[0]);
1146 /* clear to prevent kgnilnd_free_tx from thinking
1147 * this is a RDMA descriptor */
1151 case GNILND_BUF_PHYS_MAPPED:
1152 case GNILND_BUF_VIRT_MAPPED:
1153 LASSERT(tx->tx_conn != NULL);
1155 dev = tx->tx_conn->gnc_device;
1157 /* only want to hold if we are closing conn without
1158 * verified peer notification - the theory is that
1159 * a TX error can be communicated in all other cases */
1160 if (tx->tx_conn->gnc_state != GNILND_CONN_ESTABLISHED &&
1161 kgnilnd_check_purgatory_conn(tx->tx_conn)) {
1162 kgnilnd_add_purgatory_tx(tx);
1164 /* The timeout we give to kgni is a deadman stop only.
1165 * we are setting high to ensure we don't have the kgni timer
1166 * fire before ours fires _and_ is handled */
1167 hold_timeout = GNILND_TIMEOUT2DEADMAN;
1169 GNIDBG_TX(D_NET, tx,
1170 "dev %p delaying MDD release for %dms key "LPX64"."LPX64"",
1171 tx->tx_conn->gnc_device, hold_timeout,
1172 tx->tx_map_key.qword1, tx->tx_map_key.qword2);
1174 if (tx->tx_buffer_copy != NULL) {
1175 rrc = kgnilnd_mem_deregister(dev->gnd_handle, &tx->tx_buffer_copy_map_key, hold_timeout);
1176 LASSERTF(rrc == GNI_RC_SUCCESS, "rrc %d\n", rrc);
1177 rrc = kgnilnd_mem_deregister(dev->gnd_handle, &tx->tx_map_key, 0);
1178 LASSERTF(rrc == GNI_RC_SUCCESS, "rrc %d\n", rrc);
1180 rrc = kgnilnd_mem_deregister(dev->gnd_handle, &tx->tx_map_key, hold_timeout);
1181 LASSERTF(rrc == GNI_RC_SUCCESS, "rrc %d\n", rrc);
1185 kgnilnd_mem_del_map_list(dev, tx);
1191 kgnilnd_tx_done(kgn_tx_t *tx, int completion)
1193 lnet_msg_t *lntmsg0, *lntmsg1;
1194 int status0, status1;
1195 lnet_ni_t *ni = NULL;
1196 kgn_conn_t *conn = tx->tx_conn;
1198 LASSERT(!in_interrupt());
1200 lntmsg0 = tx->tx_lntmsg[0]; tx->tx_lntmsg[0] = NULL;
1201 lntmsg1 = tx->tx_lntmsg[1]; tx->tx_lntmsg[1] = NULL;
1204 !(tx->tx_state & GNILND_TX_QUIET_ERROR) &&
1205 !kgnilnd_conn_clean_errno(completion)) {
1206 GNIDBG_TOMSG(D_NETERROR, &tx->tx_msg,
1207 "error %d on tx 0x%p->%s id %u/%d state %s age %ds",
1208 completion, tx, conn ?
1209 libcfs_nid2str(conn->gnc_peer->gnp_nid) : "<?>",
1210 tx->tx_id.txe_smsg_id, tx->tx_id.txe_idx,
1211 kgnilnd_tx_state2str(tx->tx_list_state),
1212 cfs_duration_sec((unsigned long)jiffies - tx->tx_qtime));
1215 /* The error codes determine if we hold onto the MDD */
1216 kgnilnd_unmap_buffer(tx, completion);
1218 /* we have to deliver a reply on lntmsg[1] for the GET, so make sure
1219 * we play nice with the error codes to avoid delivering a failed
1220 * REQUEST and then a REPLY event as well */
1222 /* return -EIO to lnet - it is the magic value for failed sends */
1223 if (tx->tx_msg.gnm_type == GNILND_MSG_GET_REQ) {
1225 status1 = completion;
1227 status0 = status1 = completion;
1230 tx->tx_buftype = GNILND_BUF_NONE;
1231 tx->tx_msg.gnm_type = GNILND_MSG_NONE;
1233 /* lnet_finalize doesn't do anything with the *ni, so ok for us to
1234 * set NULL when we are a tx without a conn */
1236 ni = conn->gnc_peer->gnp_net->gnn_ni;
1238 spin_lock(&conn->gnc_tx_lock);
1240 LASSERTF(test_and_clear_bit(tx->tx_id.txe_idx,
1241 (volatile unsigned long *)&conn->gnc_tx_bits),
1242 "conn %p tx %p bit %d already cleared\n",
1243 conn, tx, tx->tx_id.txe_idx);
1245 LASSERTF(conn->gnc_tx_ref_table[tx->tx_id.txe_idx] != NULL,
1246 "msg_id %d already NULL\n", tx->tx_id.txe_idx);
1248 conn->gnc_tx_ref_table[tx->tx_id.txe_idx] = NULL;
1249 spin_unlock(&conn->gnc_tx_lock);
1252 kgnilnd_free_tx(tx);
1254 /* finalize AFTER freeing lnet msgs */
1256 /* warning - we should hold no locks here - calling lnet_finalize
1257 * could free up lnet credits, resulting in a call chain back into
1258 * the LND via kgnilnd_send and friends */
1260 lnet_finalize(ni, lntmsg0, status0);
1262 if (lntmsg1 != NULL) {
1263 lnet_finalize(ni, lntmsg1, status1);
1268 kgnilnd_txlist_done(struct list_head *txlist, int error)
1271 int err_printed = 0;
1273 if (list_empty(txlist))
1276 list_for_each_entry_safe(tx, txn, txlist, tx_list) {
1277 /* only print the first error */
1279 tx->tx_state |= GNILND_TX_QUIET_ERROR;
1280 list_del_init(&tx->tx_list);
1281 kgnilnd_tx_done(tx, error);
1286 kgnilnd_set_tx_id(kgn_tx_t *tx, kgn_conn_t *conn)
1290 spin_lock(&conn->gnc_tx_lock);
1292 /* ID zero is NOT ALLOWED!!! */
1295 id = find_next_zero_bit((unsigned long *)&conn->gnc_tx_bits,
1296 GNILND_MAX_MSG_ID, conn->gnc_next_tx);
1297 if (id == GNILND_MAX_MSG_ID) {
1298 if (conn->gnc_next_tx != 1) {
1299 /* we only searched from next_tx to end and didn't find
1300 * one, so search again from start */
1301 conn->gnc_next_tx = 1;
1304 /* couldn't find one! */
1305 spin_unlock(&conn->gnc_tx_lock);
1309 /* bump next_tx to prevent immediate reuse */
1310 conn->gnc_next_tx = id + 1;
1312 set_bit(id, (volatile unsigned long *)&conn->gnc_tx_bits);
1313 LASSERTF(conn->gnc_tx_ref_table[id] == NULL,
1314 "tx 0x%p already at id %d\n",
1315 conn->gnc_tx_ref_table[id], id);
1317 /* delay these until we have a valid ID - prevents bad clear of the bit
1318 * in kgnilnd_tx_done */
1320 tx->tx_id.txe_cqid = conn->gnc_cqid;
1322 tx->tx_id.txe_idx = id;
1323 conn->gnc_tx_ref_table[id] = tx;
1325 /* Using jiffies to help differentiate against TX reuse - with
1326 * the usual minimum of a 250HZ clock, we wrap jiffies on the same TX
1327 * if we are sending to the same node faster than 256000/sec.
1328 * To help guard against this, we OR in the tx_seq - that is 32 bits */
1330 tx->tx_id.txe_chips = (__u32)(jiffies | atomic_read(&conn->gnc_tx_seq));
1332 GNIDBG_TX(D_NET, tx, "set cookie/id/bits", NULL);
1334 spin_unlock(&conn->gnc_tx_lock);
1339 kgnilnd_tx_should_retry(kgn_conn_t *conn, kgn_tx_t *tx)
1341 int max_retrans = *kgnilnd_tunables.kgn_max_retransmits;
1343 int log_retrans_level;
1345 /* I need kgni credits to send this. Replace tx at the head of the
1346 * fmaq and I'll get rescheduled when credits appear */
1349 conn->gnc_tx_retrans++;
1350 log_retrans = ((tx->tx_retrans < 25) || ((tx->tx_retrans % 25) == 0) ||
1351 (tx->tx_retrans > (max_retrans / 2)));
1352 log_retrans_level = tx->tx_retrans < (max_retrans / 2) ? D_NET : D_NETERROR;
1354 /* Decision time - either error, warn or just retransmit */
1356 /* we don't care about TX timeout - it could be that the network is slower
1357 * or throttled. We'll keep retranmitting - so if the network is so slow
1358 * that we fill up our mailbox, we'll keep trying to resend that msg
1359 * until we exceed the max_retrans _or_ gnc_last_rx expires, indicating
1360 * that he hasn't send us any traffic in return */
1362 if (tx->tx_retrans > max_retrans) {
1363 /* this means we are not backing off the retransmits
1364 * in a healthy manner and are likely chewing up the
1365 * CPU cycles quite badly */
1366 GNIDBG_TOMSG(D_ERROR, &tx->tx_msg,
1367 "SOFTWARE BUG: too many retransmits (%d) for tx id %x "
1369 tx->tx_retrans, tx->tx_id, conn,
1370 libcfs_nid2str(conn->gnc_peer->gnp_nid));
1372 /* yes - double errors to help debug this condition */
1373 GNIDBG_TOMSG(D_NETERROR, &tx->tx_msg, "connection dead. "
1374 "unable to send to %s for %lu secs (%d tries)",
1375 libcfs_nid2str(tx->tx_conn->gnc_peer->gnp_nid),
1376 cfs_duration_sec(jiffies - tx->tx_cred_wait),
1379 kgnilnd_close_conn(conn, -ETIMEDOUT);
1381 /* caller should terminate */
1384 /* some reasonable throttling of the debug message */
1386 unsigned long now = jiffies;
1387 /* XXX Nic: Mystical TX debug here... */
1388 GNIDBG_SMSG_CREDS(log_retrans_level, conn);
1389 GNIDBG_TOMSG(log_retrans_level, &tx->tx_msg,
1390 "NOT_DONE on conn 0x%p->%s id %x retrans %d wait %dus"
1391 " last_msg %uus/%uus last_cq %uus/%uus",
1392 conn, libcfs_nid2str(conn->gnc_peer->gnp_nid),
1393 tx->tx_id, tx->tx_retrans,
1394 jiffies_to_usecs(now - tx->tx_cred_wait),
1395 jiffies_to_usecs(now - conn->gnc_last_tx),
1396 jiffies_to_usecs(now - conn->gnc_last_rx),
1397 jiffies_to_usecs(now - conn->gnc_last_tx_cq),
1398 jiffies_to_usecs(now - conn->gnc_last_rx_cq));
1400 /* caller should retry */
1405 /* caller must be holding gnd_cq_mutex and not unlock it afterwards, as we need to drop it
1406 * to avoid bad ordering with state_lock */
1409 kgnilnd_sendmsg_nolock(kgn_tx_t *tx, void *immediate, unsigned int immediatenob,
1410 spinlock_t *state_lock, kgn_tx_list_state_t state)
1412 kgn_conn_t *conn = tx->tx_conn;
1413 kgn_msg_t *msg = &tx->tx_msg;
1416 unsigned long newest_last_rx, timeout;
1419 LASSERTF((msg->gnm_type == GNILND_MSG_IMMEDIATE) ?
1420 immediatenob <= *kgnilnd_tunables.kgn_max_immediate :
1422 "msg 0x%p type %d wrong payload size %d\n",
1423 msg, msg->gnm_type, immediatenob);
1425 /* make sure we catch all the cases where we'd send on a dirty old mbox
1426 * but allow case for sending CLOSE. Since this check is within the CQ
1427 * mutex barrier and the close message is only sent through
1428 * kgnilnd_send_conn_close the last message out the door will be the
1431 if (atomic_read(&conn->gnc_peer->gnp_dirty_eps) != 0 && msg->gnm_type != GNILND_MSG_CLOSE) {
1432 kgnilnd_conn_mutex_unlock(&conn->gnc_smsg_mutex);
1433 kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
1434 /* Return -ETIME, we are closing the connection already so we dont want to
1435 * have this tx hit the wire. The tx will be killed by the calling function.
1436 * Once the EP is marked dirty the close message will be the last
1437 * thing to hit the wire */
1442 timeout = cfs_time_seconds(conn->gnc_timeout);
1444 newest_last_rx = GNILND_LASTRX(conn);
1446 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_SEND_TIMEOUT)) {
1447 now = now + (GNILND_TIMEOUTRX(timeout) * 2);
1450 if (time_after_eq(now, newest_last_rx + GNILND_TIMEOUTRX(timeout))) {
1451 GNIDBG_CONN(D_NETERROR|D_CONSOLE, conn,
1452 "Cant send to %s after timeout lapse of %lu; TO %lu\n",
1453 libcfs_nid2str(conn->gnc_peer->gnp_nid),
1454 cfs_duration_sec(now - newest_last_rx),
1455 cfs_duration_sec(GNILND_TIMEOUTRX(timeout)));
1456 kgnilnd_conn_mutex_unlock(&conn->gnc_smsg_mutex);
1457 kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
1461 GNITX_ASSERTF(tx, (conn != NULL) && (tx->tx_id.txe_idx != 0), "tx id unset!", NULL);
1462 /* msg->gnm_srcnid is set when the message is initialized by whatever function is
1463 * creating the message this allows the message to contain the correct LNET NID/NET needed
1464 * instead of the one that the peer/conn uses for sending the data.
1466 msg->gnm_connstamp = conn->gnc_my_connstamp;
1467 msg->gnm_payload_len = immediatenob;
1468 kgnilnd_conn_mutex_lock(&conn->gnc_smsg_mutex);
1469 msg->gnm_seq = atomic_read(&conn->gnc_tx_seq);
1471 /* always init here - kgn_checksum is a /sys module tunable
1472 * and can be flipped at any point, even between msg init and sending */
1474 if (*kgnilnd_tunables.kgn_checksum) {
1475 /* We must set here and not in kgnilnd_init_msg,
1476 * we could resend this msg many times
1477 * (NOT_DONE from gni_smsg_send below) and wouldn't pass
1478 * through init_msg again */
1479 msg->gnm_cksum = kgnilnd_cksum(msg, sizeof(kgn_msg_t));
1480 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_SMSG_CKSUM1)) {
1481 msg->gnm_cksum += 0xf00f;
1485 GNIDBG_TOMSG(D_NET, msg, "tx 0x%p conn 0x%p->%s sending SMSG sz %u id %x/%d [%p for %u]",
1486 tx, conn, libcfs_nid2str(conn->gnc_peer->gnp_nid),
1487 sizeof(kgn_msg_t), tx->tx_id.txe_smsg_id,
1488 tx->tx_id.txe_idx, immediate, immediatenob);
1490 if (unlikely(tx->tx_state & GNILND_TX_FAIL_SMSG)) {
1491 rrc = cfs_fail_val ? cfs_fail_val : GNI_RC_NOT_DONE;
1493 rrc = kgnilnd_smsg_send(conn->gnc_ephandle,
1494 msg, sizeof(*msg), immediate,
1496 tx->tx_id.txe_smsg_id);
1500 case GNI_RC_SUCCESS:
1501 atomic_inc(&conn->gnc_tx_seq);
1502 conn->gnc_last_tx = jiffies;
1503 /* no locking here as LIVE isn't a list */
1504 kgnilnd_tx_add_state_locked(tx, NULL, conn, GNILND_TX_LIVE_FMAQ, 1);
1506 /* this needs to be checked under lock as it might be freed from a completion
1509 if (msg->gnm_type == GNILND_MSG_NOOP) {
1510 set_mb(conn->gnc_last_noop_sent, jiffies);
1513 /* serialize with seeing CQ events for completion on this, as well as
1515 kgnilnd_conn_mutex_unlock(&conn->gnc_smsg_mutex);
1516 kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
1518 atomic_inc(&conn->gnc_device->gnd_short_ntx);
1519 atomic64_add(immediatenob, &conn->gnc_device->gnd_short_txbytes);
1520 kgnilnd_peer_alive(conn->gnc_peer);
1521 GNIDBG_SMSG_CREDS(D_NET, conn);
1524 case GNI_RC_NOT_DONE:
1525 /* XXX Nic: We need to figure out how to track this
1526 * - there are bound to be good reasons for it,
1527 * but we want to know when it happens */
1528 kgnilnd_conn_mutex_unlock(&conn->gnc_smsg_mutex);
1529 kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
1530 /* We'll handle this error inline - makes the calling logic much more
1533 /* If no lock, caller doesn't want us to retry */
1534 if (state_lock == NULL) {
1538 retry_send = kgnilnd_tx_should_retry(conn, tx);
1540 /* add to head of list for the state and retries */
1541 spin_lock(state_lock);
1542 kgnilnd_tx_add_state_locked(tx, conn->gnc_peer, conn, state, 0);
1543 spin_unlock(state_lock);
1545 /* We only reschedule for a certain number of retries, then
1546 * we will wait for the CQ events indicating a release of SMSG
1548 if (tx->tx_retrans < (*kgnilnd_tunables.kgn_max_retransmits/4)) {
1549 kgnilnd_schedule_conn(conn);
1552 /* CQ event coming in signifies either TX completed or
1553 * RX receive. Either of these *could* free up credits
1554 * in the SMSG mbox and we should try sending again */
1555 GNIDBG_TX(D_NET, tx, "waiting for CQID %u event to resend",
1556 tx->tx_conn->gnc_cqid);
1557 /* use +ve return code to let upper layers know they
1558 * should stop looping on sends */
1565 /* handle bad retcode gracefully */
1566 kgnilnd_conn_mutex_unlock(&conn->gnc_smsg_mutex);
1567 kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
1572 /* kgnilnd_sendmsg has hard wait on gnd_cq_mutex */
1574 kgnilnd_sendmsg(kgn_tx_t *tx, void *immediate, unsigned int immediatenob,
1575 spinlock_t *state_lock, kgn_tx_list_state_t state)
1577 kgn_device_t *dev = tx->tx_conn->gnc_device;
1578 unsigned long timestamp;
1581 timestamp = jiffies;
1582 kgnilnd_gl_mutex_lock(&dev->gnd_cq_mutex);
1583 /* delay in jiffies - we are really concerned only with things that
1584 * result in a schedule() or really holding this off for long times .
1585 * NB - mutex_lock could spin for 2 jiffies before going to sleep to wait */
1586 dev->gnd_mutex_delay += (long) jiffies - timestamp;
1588 rc = kgnilnd_sendmsg_nolock(tx, immediate, immediatenob, state_lock, state);
1594 /* returns -EAGAIN for lock miss, anything else < 0 is hard error, >=0 for success */
1596 kgnilnd_sendmsg_trylock(kgn_tx_t *tx, void *immediate, unsigned int immediatenob,
1597 spinlock_t *state_lock, kgn_tx_list_state_t state)
1599 kgn_conn_t *conn = tx->tx_conn;
1600 kgn_device_t *dev = conn->gnc_device;
1601 unsigned long timestamp;
1604 timestamp = jiffies;
1606 /* technically we are doing bad things with the read_lock on the peer_conn
1607 * table, but we shouldn't be sleeping inside here - and we don't sleep/block
1608 * for the mutex. I bet lockdep is gonna flag this one though... */
1610 /* there are a few cases where we don't want the immediate send - like
1611 * when we are in the scheduler thread and it'd harm the latency of
1612 * getting messages up to LNet */
1614 /* rmb for gnd_ready */
1616 if (conn->gnc_device->gnd_ready == GNILND_DEV_LOOP) {
1618 atomic_inc(&conn->gnc_device->gnd_fast_block);
1619 } else if (unlikely(kgnilnd_data.kgn_quiesce_trigger)) {
1620 /* dont hit HW during quiesce */
1622 } else if (unlikely(atomic_read(&conn->gnc_peer->gnp_dirty_eps))) {
1623 /* dont hit HW if stale EPs and conns left to close */
1626 atomic_inc(&conn->gnc_device->gnd_fast_try);
1627 rc = kgnilnd_gl_mutex_trylock(&conn->gnc_device->gnd_cq_mutex);
1632 /* we got the mutex and weren't blocked */
1634 /* delay in jiffies - we are really concerned only with things that
1635 * result in a schedule() or really holding this off for long times .
1636 * NB - mutex_lock could spin for 2 jiffies before going to sleep to wait */
1637 dev->gnd_mutex_delay += (long) jiffies - timestamp;
1639 atomic_inc(&conn->gnc_device->gnd_fast_ok);
1640 tx->tx_qtime = jiffies;
1641 tx->tx_state = GNILND_TX_WAITING_COMPLETION;
1642 rc = kgnilnd_sendmsg_nolock(tx, tx->tx_buffer, tx->tx_nob, &conn->gnc_list_lock, GNILND_TX_FMAQ);
1643 /* _nolock unlocks the mutex for us */
1649 /* lets us know if we can push this RDMA through now */
1651 kgnilnd_auth_rdma_bytes(kgn_device_t *dev, kgn_tx_t *tx)
1655 bytes_left = atomic64_sub_return(tx->tx_nob, &dev->gnd_rdmaq_bytes_ok);
1657 if (bytes_left < 0) {
1658 atomic64_add(tx->tx_nob, &dev->gnd_rdmaq_bytes_ok);
1659 atomic_inc(&dev->gnd_rdmaq_nstalls);
1662 CDEBUG(D_NET, "no bytes to send, turning on timer for %lu\n",
1663 dev->gnd_rdmaq_deadline);
1664 mod_timer(&dev->gnd_rdmaq_timer, dev->gnd_rdmaq_deadline);
1665 /* we never del this timer - at worst it schedules us.. */
1672 /* this adds a TX to the queue pending throttling authorization before
1673 * we allow our remote peer to launch a PUT at us */
1675 kgnilnd_queue_rdma(kgn_conn_t *conn, kgn_tx_t *tx)
1679 /* we cannot go into send_mapped_tx from here as we are holding locks
1680 * and mem registration might end up allocating memory in kgni.
1681 * That said, we'll push this as far as we can into the queue process */
1682 rc = kgnilnd_auth_rdma_bytes(conn->gnc_device, tx);
1685 spin_lock(&conn->gnc_device->gnd_rdmaq_lock);
1686 kgnilnd_tx_add_state_locked(tx, NULL, conn, GNILND_TX_RDMAQ, 0);
1687 /* lets us know how delayed RDMA is */
1688 tx->tx_qtime = jiffies;
1689 spin_unlock(&conn->gnc_device->gnd_rdmaq_lock);
1691 /* we have RDMA authorized, now it just needs a MDD and to hit the wire */
1692 spin_lock(&tx->tx_conn->gnc_device->gnd_lock);
1693 kgnilnd_tx_add_state_locked(tx, NULL, tx->tx_conn, GNILND_TX_MAPQ, 0);
1694 /* lets us know how delayed mapping is */
1695 tx->tx_qtime = jiffies;
1696 spin_unlock(&tx->tx_conn->gnc_device->gnd_lock);
1699 /* make sure we wake up sched to run this */
1700 kgnilnd_schedule_device(tx->tx_conn->gnc_device);
1703 /* push TX through state machine */
1705 kgnilnd_queue_tx(kgn_conn_t *conn, kgn_tx_t *tx)
1710 /* set the tx_id here, we delay it until we have an actual conn
1712 * in some cases, the tx_id is already set to provide for things
1713 * like RDMA completion cookies, etc */
1714 if (tx->tx_id.txe_idx == 0) {
1715 rc = kgnilnd_set_tx_id(tx, conn);
1717 kgnilnd_tx_done(tx, rc);
1722 CDEBUG(D_NET, "%s to conn %p for %s\n", kgnilnd_msgtype2str(tx->tx_msg.gnm_type),
1723 conn, libcfs_nid2str(conn->gnc_peer->gnp_nid));
1725 /* Only let NOOPs to be sent while fail loc is set, otherwise kill the tx.
1727 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_ONLY_NOOP) && (tx->tx_msg.gnm_type != GNILND_MSG_NOOP)) {
1728 kgnilnd_tx_done(tx, rc);
1732 switch (tx->tx_msg.gnm_type) {
1733 case GNILND_MSG_PUT_ACK:
1734 case GNILND_MSG_GET_REQ:
1735 case GNILND_MSG_PUT_REQ_REV:
1736 case GNILND_MSG_GET_ACK_REV:
1737 /* hijacking time! If this messages will authorize our peer to
1738 * send his dirty little bytes in an RDMA, we need to get permission */
1739 kgnilnd_queue_rdma(conn, tx);
1741 case GNILND_MSG_IMMEDIATE:
1742 /* try to send right now, can help reduce latency */
1743 rc = kgnilnd_sendmsg_trylock(tx, tx->tx_buffer, tx->tx_nob, &conn->gnc_list_lock, GNILND_TX_FMAQ);
1746 /* it was sent, break out of switch to avoid default case of queueing */
1749 /* needs to queue to try again, so fall through to default case */
1750 case GNILND_MSG_NOOP:
1751 /* Just make sure this goes out first for this conn */
1753 /* fall through... */
1755 spin_lock(&conn->gnc_list_lock);
1756 kgnilnd_tx_add_state_locked(tx, conn->gnc_peer, conn, GNILND_TX_FMAQ, add_tail);
1757 tx->tx_qtime = jiffies;
1758 spin_unlock(&conn->gnc_list_lock);
1759 kgnilnd_schedule_conn(conn);
1764 kgnilnd_launch_tx(kgn_tx_t *tx, kgn_net_t *net, lnet_process_id_t *target)
1767 kgn_peer_t *new_peer = NULL;
1768 kgn_conn_t *conn = NULL;
1774 /* If I get here, I've committed to send, so I complete the tx with
1775 * failure on any problems */
1777 GNITX_ASSERTF(tx, tx->tx_conn == NULL,
1778 "tx already has connection %p", tx->tx_conn);
1780 /* do all of the peer & conn searching in one swoop - this avoids
1781 * nastiness when dropping locks and needing to maintain a sane state
1782 * in the face of stack reset or something else nuking peers & conns */
1784 /* I expect to find him, so only take a read lock */
1785 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1787 peer = kgnilnd_find_peer_locked(target->nid);
1789 conn = kgnilnd_find_conn_locked(peer);
1790 /* this could be NULL during quiesce */
1792 /* Connection exists; queue message on it */
1793 kgnilnd_queue_tx(conn, tx);
1794 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1798 /* don't create a connection if the peer is marked down */
1799 if (peer->gnp_down == GNILND_RCA_NODE_DOWN) {
1800 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1806 /* creating peer or conn; I'll need a write lock... */
1807 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1809 CFS_RACE(CFS_FAIL_GNI_FIND_TARGET);
1811 node_state = kgnilnd_get_node_state(LNET_NIDADDR(target->nid));
1813 /* NB - this will not block during normal operations -
1814 * the only writer of this is in the startup/shutdown path. */
1815 rc = down_read_trylock(&kgnilnd_data.kgn_net_rw_sem);
1821 /* ignore previous peer entirely - we cycled the lock, so we
1822 * will create new peer and at worst drop it if peer is still
1824 rc = kgnilnd_create_peer_safe(&new_peer, target->nid, net, node_state);
1826 up_read(&kgnilnd_data.kgn_net_rw_sem);
1830 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1831 up_read(&kgnilnd_data.kgn_net_rw_sem);
1833 /* search for peer again now that we have the lock
1834 * if we don't find it, add our new one to the list */
1835 kgnilnd_add_peer_locked(target->nid, new_peer, &peer);
1837 /* don't create a connection if the peer is not up */
1838 if (peer->gnp_down != GNILND_RCA_NODE_UP) {
1839 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1844 conn = kgnilnd_find_or_create_conn_locked(peer);
1846 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_DGRAM_DROP_TX)) {
1847 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1852 /* oh hey, found a conn now... magical */
1853 kgnilnd_queue_tx(conn, tx);
1855 /* no conn, must be trying to connect - so we queue for now */
1856 tx->tx_qtime = jiffies;
1857 kgnilnd_tx_add_state_locked(tx, peer, NULL, GNILND_TX_PEERQ, 1);
1859 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1862 kgnilnd_tx_done(tx, rc);
1867 kgnilnd_rdma(kgn_tx_t *tx, int type,
1868 kgn_rdma_desc_t *sink, unsigned int nob, __u64 cookie)
1870 kgn_conn_t *conn = tx->tx_conn;
1871 unsigned long timestamp;
1872 gni_post_type_t post_type;
1875 unsigned int desc_nob = nob;
1876 void *desc_buffer = tx->tx_buffer;
1877 gni_mem_handle_t desc_map_key = tx->tx_map_key;
1878 LASSERTF(kgnilnd_tx_mapped(tx),
1879 "unmapped tx %p\n", tx);
1880 LASSERTF(conn != NULL,
1881 "NULL conn on tx %p, naughty, naughty\n", tx);
1882 LASSERTF(nob <= sink->gnrd_nob,
1883 "nob %u > sink->gnrd_nob %d (%p)\n",
1884 nob, sink->gnrd_nob, sink);
1885 LASSERTF(nob <= tx->tx_nob,
1886 "nob %d > tx(%p)->tx_nob %d\n",
1887 nob, tx, tx->tx_nob);
1890 case GNILND_MSG_GET_DONE:
1891 case GNILND_MSG_PUT_DONE:
1892 post_type = GNI_POST_RDMA_PUT;
1894 case GNILND_MSG_GET_DONE_REV:
1895 case GNILND_MSG_PUT_DONE_REV:
1896 post_type = GNI_POST_RDMA_GET;
1899 CERROR("invalid msg type %s (%d)\n",
1900 kgnilnd_msgtype2str(type), type);
1903 if (post_type == GNI_POST_RDMA_GET) {
1904 /* Check for remote buffer / local buffer / length alignment. All must be 4 byte
1905 * aligned. If the local buffer is not aligned correctly using the copy buffer
1906 * will fix that issue. If length is misaligned copy buffer will also fix the issue, we end
1907 * up transferring extra bytes into the buffer but only copy the correct nob into the original
1908 * buffer. Remote offset correction is done through a combination of adjusting the offset,
1909 * making sure the length and addr are aligned and copying the data into the correct location
1910 * once the transfer has completed.
1912 if ((((__u64)((unsigned long)tx->tx_buffer)) & 3) ||
1913 (sink->gnrd_addr & 3) ||
1916 tx->tx_offset = ((__u64)((unsigned long)sink->gnrd_addr)) & 3;
1918 kgnilnd_admin_addref(kgnilnd_data.kgn_rev_offset);
1920 if ((nob + tx->tx_offset) & 3) {
1921 desc_nob = ((nob + tx->tx_offset) + (4 - ((nob + tx->tx_offset) & 3)));
1922 kgnilnd_admin_addref(kgnilnd_data.kgn_rev_length);
1924 desc_nob = (nob + tx->tx_offset);
1927 if (tx->tx_buffer_copy == NULL) {
1928 /* Allocate the largest copy buffer we will need, this will prevent us from overwriting data
1929 * and require at most we allocate a few extra bytes. */
1930 tx->tx_buffer_copy = vmalloc(desc_nob);
1932 if (!tx->tx_buffer_copy) {
1933 /* allocation of buffer failed nak the rdma */
1934 kgnilnd_nak_rdma(tx->tx_conn, tx->tx_msg.gnm_type, -EFAULT, cookie, tx->tx_msg.gnm_srcnid);
1935 kgnilnd_tx_done(tx, -EFAULT);
1938 kgnilnd_admin_addref(kgnilnd_data.kgn_rev_copy_buff);
1939 rc = kgnilnd_mem_register(conn->gnc_device->gnd_handle, (__u64)tx->tx_buffer_copy, desc_nob, NULL, GNI_MEM_READWRITE, &tx->tx_buffer_copy_map_key);
1940 if (rc != GNI_RC_SUCCESS) {
1941 /* Registration Failed nak rdma and kill the tx. */
1942 vfree(tx->tx_buffer_copy);
1943 tx->tx_buffer_copy = NULL;
1944 kgnilnd_nak_rdma(tx->tx_conn, tx->tx_msg.gnm_type, -EFAULT, cookie, tx->tx_msg.gnm_srcnid);
1945 kgnilnd_tx_done(tx, -EFAULT);
1949 desc_map_key = tx->tx_buffer_copy_map_key;
1950 desc_buffer = tx->tx_buffer_copy;
1954 memset(&tx->tx_rdma_desc, 0, sizeof(tx->tx_rdma_desc));
1955 tx->tx_rdma_desc.post_id = tx->tx_id.txe_cookie;
1956 tx->tx_rdma_desc.type = post_type;
1957 tx->tx_rdma_desc.cq_mode = GNI_CQMODE_GLOBAL_EVENT;
1958 tx->tx_rdma_desc.local_addr = (__u64)((unsigned long)desc_buffer);
1959 tx->tx_rdma_desc.local_mem_hndl = desc_map_key;
1960 tx->tx_rdma_desc.remote_addr = sink->gnrd_addr - tx->tx_offset;
1961 tx->tx_rdma_desc.remote_mem_hndl = sink->gnrd_key;
1962 tx->tx_rdma_desc.length = desc_nob;
1963 tx->tx_nob_rdma = nob;
1964 if (*kgnilnd_tunables.kgn_bte_dlvr_mode)
1965 tx->tx_rdma_desc.dlvr_mode = *kgnilnd_tunables.kgn_bte_dlvr_mode;
1966 /* prep final completion message */
1967 kgnilnd_init_msg(&tx->tx_msg, type, tx->tx_msg.gnm_srcnid);
1968 tx->tx_msg.gnm_u.completion.gncm_cookie = cookie;
1969 /* send actual size RDMA'd in retval */
1970 tx->tx_msg.gnm_u.completion.gncm_retval = nob;
1972 kgnilnd_compute_rdma_cksum(tx, nob);
1975 kgnilnd_queue_tx(conn, tx);
1979 /* Don't lie (CLOSE == RDMA idle) */
1980 LASSERTF(!conn->gnc_close_sent, "tx %p on conn %p after close sent %d\n",
1981 tx, conn, conn->gnc_close_sent);
1983 GNIDBG_TX(D_NET, tx, "Post RDMA type 0x%02x conn %p dlvr_mode "
1984 "0x%x cookie:"LPX64,
1985 type, conn, tx->tx_rdma_desc.dlvr_mode, cookie);
1987 /* set CQ dedicated for RDMA */
1988 tx->tx_rdma_desc.src_cq_hndl = conn->gnc_device->gnd_snd_rdma_cqh;
1990 timestamp = jiffies;
1991 kgnilnd_conn_mutex_lock(&conn->gnc_rdma_mutex);
1992 kgnilnd_gl_mutex_lock(&conn->gnc_device->gnd_cq_mutex);
1993 /* delay in jiffies - we are really concerned only with things that
1994 * result in a schedule() or really holding this off for long times .
1995 * NB - mutex_lock could spin for 2 jiffies before going to sleep to wait */
1996 conn->gnc_device->gnd_mutex_delay += (long) jiffies - timestamp;
1998 rrc = kgnilnd_post_rdma(conn->gnc_ephandle, &tx->tx_rdma_desc);
2000 if (rrc == GNI_RC_ERROR_RESOURCE) {
2001 kgnilnd_conn_mutex_unlock(&conn->gnc_rdma_mutex);
2002 kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
2003 kgnilnd_unmap_buffer(tx, 0);
2005 if (tx->tx_buffer_copy != NULL) {
2006 vfree(tx->tx_buffer_copy);
2007 tx->tx_buffer_copy = NULL;
2010 spin_lock(&tx->tx_conn->gnc_device->gnd_lock);
2011 kgnilnd_tx_add_state_locked(tx, NULL, tx->tx_conn,
2013 spin_unlock(&tx->tx_conn->gnc_device->gnd_lock);
2014 kgnilnd_schedule_device(tx->tx_conn->gnc_device);
2018 spin_lock(&conn->gnc_list_lock);
2019 kgnilnd_tx_add_state_locked(tx, conn->gnc_peer, conn, GNILND_TX_LIVE_RDMAQ, 1);
2020 tx->tx_qtime = jiffies;
2021 spin_unlock(&conn->gnc_list_lock);
2022 kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
2023 kgnilnd_conn_mutex_unlock(&conn->gnc_rdma_mutex);
2025 /* XXX Nic: is this a place we should handle more errors for
2026 * robustness sake */
2027 LASSERT(rrc == GNI_RC_SUCCESS);
2032 kgnilnd_alloc_rx(void)
2036 rx = kmem_cache_alloc(kgnilnd_data.kgn_rx_cache, GFP_ATOMIC);
2038 CERROR("failed to allocate rx\n");
2041 CDEBUG(D_MALLOC, "slab-alloced 'rx': %lu at %p.\n",
2044 /* no memset to zero, we'll always fill all members */
2048 /* release is to just free connection resources
2049 * we use this for the eager path after copying */
2051 kgnilnd_release_msg(kgn_conn_t *conn)
2054 unsigned long timestamp;
2056 CDEBUG(D_NET, "consuming %p\n", conn);
2058 timestamp = jiffies;
2059 kgnilnd_gl_mutex_lock(&conn->gnc_device->gnd_cq_mutex);
2060 /* delay in jiffies - we are really concerned only with things that
2061 * result in a schedule() or really holding this off for long times .
2062 * NB - mutex_lock could spin for 2 jiffies before going to sleep to wait */
2063 conn->gnc_device->gnd_mutex_delay += (long) jiffies - timestamp;
2065 rrc = kgnilnd_smsg_release(conn->gnc_ephandle);
2066 kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
2068 LASSERTF(rrc == GNI_RC_SUCCESS, "bad rrc %d\n", rrc);
2069 GNIDBG_SMSG_CREDS(D_NET, conn);
2075 kgnilnd_consume_rx(kgn_rx_t *rx)
2077 kgn_conn_t *conn = rx->grx_conn;
2078 kgn_msg_t *rxmsg = rx->grx_msg;
2080 /* if we are eager, free the cache alloc'd msg */
2081 if (unlikely(rx->grx_eager)) {
2082 LIBCFS_FREE(rxmsg, sizeof(*rxmsg) + *kgnilnd_tunables.kgn_max_immediate);
2083 atomic_dec(&kgnilnd_data.kgn_neager_allocs);
2085 /* release ref from eager_recv */
2086 kgnilnd_conn_decref(conn);
2088 GNIDBG_MSG(D_NET, rxmsg, "rx %p processed", rx);
2089 kgnilnd_release_msg(conn);
2092 kmem_cache_free(kgnilnd_data.kgn_rx_cache, rx);
2093 CDEBUG(D_MALLOC, "slab-freed 'rx': %lu at %p.\n",
2100 kgnilnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
2102 lnet_hdr_t *hdr = &lntmsg->msg_hdr;
2103 int type = lntmsg->msg_type;
2104 lnet_process_id_t target = lntmsg->msg_target;
2105 int target_is_router = lntmsg->msg_target_is_router;
2106 int routing = lntmsg->msg_routing;
2107 unsigned int niov = lntmsg->msg_niov;
2108 struct iovec *iov = lntmsg->msg_iov;
2109 lnet_kiov_t *kiov = lntmsg->msg_kiov;
2110 unsigned int offset = lntmsg->msg_offset;
2111 unsigned int nob = lntmsg->msg_len;
2112 unsigned int msg_vmflush = lntmsg->msg_vmflush;
2113 kgn_net_t *net = ni->ni_data;
2117 int reverse_rdma_flag = *kgnilnd_tunables.kgn_reverse_rdma;
2119 /* NB 'private' is different depending on what we're sending.... */
2120 LASSERT(!in_interrupt());
2122 CDEBUG(D_NET, "sending msg type %d with %d bytes in %d frags to %s\n",
2123 type, nob, niov, libcfs_id2str(target));
2125 LASSERTF(nob == 0 || niov > 0,
2126 "lntmsg %p nob %d niov %d\n", lntmsg, nob, niov);
2127 LASSERTF(niov <= LNET_MAX_IOV,
2128 "lntmsg %p niov %d\n", lntmsg, niov);
2130 /* payload is either all vaddrs or all pages */
2131 LASSERTF(!(kiov != NULL && iov != NULL),
2132 "lntmsg %p kiov %p iov %p\n", lntmsg, kiov, iov);
2135 mpflag = cfs_memory_pressure_get_and_set();
2139 CERROR("lntmsg %p with unexpected type %d\n",
2144 LASSERTF(nob == 0, "lntmsg %p nob %d\n",
2152 if (routing || target_is_router)
2153 break; /* send IMMEDIATE */
2155 /* it is safe to do direct GET with out mapping buffer for RDMA as we
2156 * check the eventual sink buffer here - if small enough, remote
2157 * end is perfectly capable of returning data in short message -
2158 * The magic is that we call lnet_parse in kgnilnd_recv with rdma_req=0
2159 * for IMMEDIATE messages which will have it send a real reply instead
2160 * of doing kgnilnd_recv to have the RDMA continued */
2161 if (lntmsg->msg_md->md_length <= *kgnilnd_tunables.kgn_max_immediate)
2164 if ((reverse_rdma_flag & GNILND_REVERSE_GET) == 0)
2165 tx = kgnilnd_new_tx_msg(GNILND_MSG_GET_REQ, ni->ni_nid);
2167 tx = kgnilnd_new_tx_msg(GNILND_MSG_GET_REQ_REV, ni->ni_nid);
2173 /* slightly different options as we might actually have a GET with a
2174 * MD_KIOV set but a non-NULL md_iov.iov */
2175 if ((lntmsg->msg_md->md_options & LNET_MD_KIOV) == 0)
2176 rc = kgnilnd_setup_rdma_buffer(tx, lntmsg->msg_md->md_niov,
2177 lntmsg->msg_md->md_iov.iov, NULL,
2178 0, lntmsg->msg_md->md_length);
2180 rc = kgnilnd_setup_rdma_buffer(tx, lntmsg->msg_md->md_niov,
2181 NULL, lntmsg->msg_md->md_iov.kiov,
2182 0, lntmsg->msg_md->md_length);
2184 CERROR("unable to setup buffer: %d\n", rc);
2185 kgnilnd_tx_done(tx, rc);
2190 tx->tx_lntmsg[1] = lnet_create_reply_msg(ni, lntmsg);
2191 if (tx->tx_lntmsg[1] == NULL) {
2192 CERROR("Can't create reply for GET to %s\n",
2193 libcfs_nid2str(target.nid));
2194 kgnilnd_tx_done(tx, rc);
2199 tx->tx_lntmsg[0] = lntmsg;
2200 if ((reverse_rdma_flag & GNILND_REVERSE_GET) == 0)
2201 tx->tx_msg.gnm_u.get.gngm_hdr = *hdr;
2203 tx->tx_msg.gnm_u.putreq.gnprm_hdr = *hdr;
2205 /* rest of tx_msg is setup just before it is sent */
2206 kgnilnd_launch_tx(tx, net, &target);
2208 case LNET_MSG_REPLY:
2210 /* to save on MDDs, we'll handle short kiov by vmap'ing
2211 * and sending via SMSG */
2212 if (nob <= *kgnilnd_tunables.kgn_max_immediate)
2215 if ((reverse_rdma_flag & GNILND_REVERSE_PUT) == 0)
2216 tx = kgnilnd_new_tx_msg(GNILND_MSG_PUT_REQ, ni->ni_nid);
2218 tx = kgnilnd_new_tx_msg(GNILND_MSG_PUT_REQ_REV, ni->ni_nid);
2225 rc = kgnilnd_setup_rdma_buffer(tx, niov, iov, kiov, offset, nob);
2227 kgnilnd_tx_done(tx, rc);
2232 tx->tx_lntmsg[0] = lntmsg;
2233 if ((reverse_rdma_flag & GNILND_REVERSE_PUT) == 0)
2234 tx->tx_msg.gnm_u.putreq.gnprm_hdr = *hdr;
2236 tx->tx_msg.gnm_u.get.gngm_hdr = *hdr;
2238 /* rest of tx_msg is setup just before it is sent */
2239 kgnilnd_launch_tx(tx, net, &target);
2243 /* send IMMEDIATE */
2245 LASSERTF(nob <= *kgnilnd_tunables.kgn_max_immediate,
2246 "lntmsg 0x%p too large %d\n", lntmsg, nob);
2248 tx = kgnilnd_new_tx_msg(GNILND_MSG_IMMEDIATE, ni->ni_nid);
2254 rc = kgnilnd_setup_immediate_buffer(tx, niov, iov, kiov, offset, nob);
2256 kgnilnd_tx_done(tx, rc);
2260 tx->tx_msg.gnm_u.immediate.gnim_hdr = *hdr;
2261 tx->tx_lntmsg[0] = lntmsg;
2262 kgnilnd_launch_tx(tx, net, &target);
2265 /* use stored value as we could have already finalized lntmsg here from a failed launch */
2267 cfs_memory_pressure_restore(mpflag);
2272 kgnilnd_setup_rdma(lnet_ni_t *ni, kgn_rx_t *rx, lnet_msg_t *lntmsg, int mlen)
2274 kgn_conn_t *conn = rx->grx_conn;
2275 kgn_msg_t *rxmsg = rx->grx_msg;
2276 unsigned int niov = lntmsg->msg_niov;
2277 struct iovec *iov = lntmsg->msg_iov;
2278 lnet_kiov_t *kiov = lntmsg->msg_kiov;
2279 unsigned int offset = lntmsg->msg_offset;
2280 unsigned int nob = lntmsg->msg_len;
2285 switch (rxmsg->gnm_type) {
2286 case GNILND_MSG_PUT_REQ_REV:
2287 done_type = GNILND_MSG_PUT_DONE_REV;
2290 case GNILND_MSG_GET_REQ:
2291 done_type = GNILND_MSG_GET_DONE;
2294 CERROR("invalid msg type %s (%d)\n",
2295 kgnilnd_msgtype2str(rxmsg->gnm_type),
2300 tx = kgnilnd_new_tx_msg(done_type, ni->ni_nid);
2304 rc = kgnilnd_set_tx_id(tx, conn);
2308 rc = kgnilnd_setup_rdma_buffer(tx, niov, iov, kiov, offset, nob);
2312 tx->tx_lntmsg[0] = lntmsg;
2313 tx->tx_getinfo = rxmsg->gnm_u.get;
2315 /* we only queue from kgnilnd_recv - we might get called from other contexts
2316 * and we don't want to block the mutex in those cases */
2318 spin_lock(&tx->tx_conn->gnc_device->gnd_lock);
2319 kgnilnd_tx_add_state_locked(tx, NULL, tx->tx_conn, GNILND_TX_MAPQ, 1);
2320 spin_unlock(&tx->tx_conn->gnc_device->gnd_lock);
2321 kgnilnd_schedule_device(tx->tx_conn->gnc_device);
2326 kgnilnd_tx_done(tx, rc);
2327 kgnilnd_nak_rdma(conn, done_type, rc, rxmsg->gnm_u.get.gngm_cookie, ni->ni_nid);
2329 lnet_finalize(ni, lntmsg, rc);
2333 kgnilnd_eager_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
2336 kgn_rx_t *rx = private;
2337 kgn_conn_t *conn = rx->grx_conn;
2338 kgn_msg_t *rxmsg = rx->grx_msg;
2339 kgn_msg_t *eagermsg = NULL;
2340 kgn_peer_t *peer = NULL;
2341 kgn_conn_t *found_conn = NULL;
2343 GNIDBG_MSG(D_NET, rxmsg, "eager recv for conn %p, rxmsg %p, lntmsg %p",
2344 conn, rxmsg, lntmsg);
2346 if (rxmsg->gnm_payload_len > *kgnilnd_tunables.kgn_max_immediate) {
2347 GNIDBG_MSG(D_ERROR, rxmsg, "payload too large %d",
2348 rxmsg->gnm_payload_len);
2351 /* Grab a read lock so the connection doesnt disappear on us
2352 * while we look it up
2354 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
2356 peer = kgnilnd_find_peer_locked(rxmsg->gnm_srcnid);
2358 found_conn = kgnilnd_find_conn_locked(peer);
2361 /* Verify the connection found is the same one that the message
2362 * is supposed to be using, if it is not output an error message
2365 if (!peer || !found_conn
2366 || found_conn->gnc_peer_connstamp != rxmsg->gnm_connstamp) {
2367 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
2368 CERROR("Couldnt find matching peer %p or conn %p / %p\n",
2369 peer, conn, found_conn);
2371 CERROR("Unexpected connstamp "LPX64"("LPX64" expected)"
2372 " from %s", rxmsg->gnm_connstamp,
2373 found_conn->gnc_peer_connstamp,
2374 libcfs_nid2str(peer->gnp_nid));
2379 /* add conn ref to ensure it doesn't go away until all eager
2380 * messages processed */
2381 kgnilnd_conn_addref(conn);
2383 /* Now that we have verified the connection is valid and added a
2384 * reference we can remove the read_lock on the peer_conn_lock */
2385 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
2387 /* we have no credits or buffers for this message, so copy it
2388 * somewhere for a later kgnilnd_recv */
2389 if (atomic_read(&kgnilnd_data.kgn_neager_allocs) >=
2390 *kgnilnd_tunables.kgn_eager_credits) {
2391 CERROR("Out of eager credits to %s\n",
2392 libcfs_nid2str(conn->gnc_peer->gnp_nid));
2396 atomic_inc(&kgnilnd_data.kgn_neager_allocs);
2398 LIBCFS_ALLOC(eagermsg, sizeof(*eagermsg) + *kgnilnd_tunables.kgn_max_immediate);
2399 if (eagermsg == NULL) {
2400 kgnilnd_conn_decref(conn);
2401 CERROR("couldn't allocate eager rx message for conn %p to %s\n",
2402 conn, libcfs_nid2str(conn->gnc_peer->gnp_nid));
2406 /* copy msg and payload */
2407 memcpy(eagermsg, rxmsg, sizeof(*rxmsg) + rxmsg->gnm_payload_len);
2408 rx->grx_msg = eagermsg;
2411 /* stash this for lnet_finalize on cancel-on-conn-close */
2412 rx->grx_lntmsg = lntmsg;
2414 /* keep the same rx_t, it just has a new grx_msg now */
2415 *new_private = private;
2417 /* release SMSG buffer */
2418 kgnilnd_release_msg(conn);
2424 kgnilnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
2425 int delayed, unsigned int niov,
2426 struct iovec *iov, lnet_kiov_t *kiov,
2427 unsigned int offset, unsigned int mlen, unsigned int rlen)
2429 kgn_rx_t *rx = private;
2430 kgn_conn_t *conn = rx->grx_conn;
2431 kgn_msg_t *rxmsg = rx->grx_msg;
2437 LASSERT(!in_interrupt());
2438 LASSERTF(mlen <= rlen, "%d <= %d\n", mlen, rlen);
2439 /* Either all pages or all vaddrs */
2440 LASSERTF(!(kiov != NULL && iov != NULL), "kiov %p iov %p\n",
2443 GNIDBG_MSG(D_NET, rxmsg, "conn %p, rxmsg %p, lntmsg %p"
2444 " niov=%d kiov=%p iov=%p offset=%d mlen=%d rlen=%d",
2445 conn, rxmsg, lntmsg,
2446 niov, kiov, iov, offset, mlen, rlen);
2448 /* we need to lock here as recv can be called from any context */
2449 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
2450 if (rx->grx_eager && conn->gnc_state != GNILND_CONN_ESTABLISHED) {
2451 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
2453 /* someone closed the conn after we copied this out, nuke it */
2454 kgnilnd_consume_rx(rx);
2455 lnet_finalize(ni, lntmsg, conn->gnc_error);
2458 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
2460 switch (rxmsg->gnm_type) {
2462 GNIDBG_MSG(D_NETERROR, rxmsg, "conn %p, rx %p, rxmsg %p, lntmsg %p"
2463 " niov=%d kiov=%p iov=%p offset=%d mlen=%d rlen=%d",
2464 conn, rx, rxmsg, lntmsg, niov, kiov, iov, offset, mlen, rlen);
2467 case GNILND_MSG_IMMEDIATE:
2468 if (mlen > rxmsg->gnm_payload_len) {
2469 GNIDBG_MSG(D_ERROR, rxmsg,
2470 "Immediate message from %s too big: %d > %d",
2471 libcfs_nid2str(conn->gnc_peer->gnp_nid), mlen,
2472 rxmsg->gnm_payload_len);
2474 kgnilnd_consume_rx(rx);
2478 /* rxmsg[1] is a pointer to the payload, sitting in the buffer
2479 * right after the kgn_msg_t header - so just 'cute' way of saying
2480 * rxmsg + sizeof(kgn_msg_t) */
2482 /* check payload checksum if sent */
2484 if (*kgnilnd_tunables.kgn_checksum >= 2 &&
2485 !rxmsg->gnm_payload_cksum &&
2486 rxmsg->gnm_payload_len != 0)
2487 GNIDBG_MSG(D_WARNING, rxmsg, "no msg payload checksum when enabled");
2489 if (rxmsg->gnm_payload_cksum != 0) {
2490 /* gnm_payload_len set in kgnilnd_sendmsg from tx->tx_nob,
2491 * which is what is used to calculate the cksum on the TX side */
2492 pload_cksum = kgnilnd_cksum(&rxmsg[1], rxmsg->gnm_payload_len);
2494 if (rxmsg->gnm_payload_cksum != pload_cksum) {
2495 GNIDBG_MSG(D_NETERROR, rxmsg,
2496 "Bad payload checksum (%x expected %x)",
2497 pload_cksum, rxmsg->gnm_payload_cksum);
2498 switch (*kgnilnd_tunables.kgn_checksum_dump) {
2500 kgnilnd_dump_blob(D_BUFFS, "bad payload checksum",
2501 &rxmsg[1], rxmsg->gnm_payload_len);
2502 /* fall through to dump */
2504 libcfs_debug_dumplog();
2510 /* checksum problems are fatal, kill the conn */
2511 kgnilnd_consume_rx(rx);
2512 kgnilnd_close_conn(conn, rc);
2518 lnet_copy_flat2kiov(
2520 *kgnilnd_tunables.kgn_max_immediate,
2521 &rxmsg[1], 0, mlen);
2525 *kgnilnd_tunables.kgn_max_immediate,
2526 &rxmsg[1], 0, mlen);
2528 kgnilnd_consume_rx(rx);
2529 lnet_finalize(ni, lntmsg, 0);
2532 case GNILND_MSG_PUT_REQ:
2533 /* LNET wants to truncate or drop transaction, sending NAK */
2535 kgnilnd_consume_rx(rx);
2536 lnet_finalize(ni, lntmsg, 0);
2538 /* only error if lntmsg == NULL, otherwise we are just
2539 * short circuiting the rdma process of 0 bytes */
2540 kgnilnd_nak_rdma(conn, rxmsg->gnm_type,
2541 lntmsg == NULL ? -ENOENT : 0,
2542 rxmsg->gnm_u.get.gngm_cookie,
2546 /* sending ACK with sink buff. info */
2547 tx = kgnilnd_new_tx_msg(GNILND_MSG_PUT_ACK, ni->ni_nid);
2549 kgnilnd_consume_rx(rx);
2553 rc = kgnilnd_set_tx_id(tx, conn);
2555 GOTO(nak_put_req, rc);
2558 rc = kgnilnd_setup_rdma_buffer(tx, niov, iov, kiov, offset, mlen);
2560 GOTO(nak_put_req, rc);
2563 tx->tx_msg.gnm_u.putack.gnpam_src_cookie =
2564 rxmsg->gnm_u.putreq.gnprm_cookie;
2565 tx->tx_msg.gnm_u.putack.gnpam_dst_cookie = tx->tx_id.txe_cookie;
2566 tx->tx_msg.gnm_u.putack.gnpam_desc.gnrd_addr =
2567 (__u64)((unsigned long)tx->tx_buffer);
2568 tx->tx_msg.gnm_u.putack.gnpam_desc.gnrd_nob = mlen;
2570 tx->tx_lntmsg[0] = lntmsg; /* finalize this on RDMA_DONE */
2571 tx->tx_qtime = jiffies;
2572 /* we only queue from kgnilnd_recv - we might get called from other contexts
2573 * and we don't want to block the mutex in those cases */
2575 spin_lock(&tx->tx_conn->gnc_device->gnd_lock);
2576 kgnilnd_tx_add_state_locked(tx, NULL, tx->tx_conn, GNILND_TX_MAPQ, 1);
2577 spin_unlock(&tx->tx_conn->gnc_device->gnd_lock);
2578 kgnilnd_schedule_device(tx->tx_conn->gnc_device);
2580 kgnilnd_consume_rx(rx);
2584 /* make sure we send an error back when the PUT fails */
2585 kgnilnd_nak_rdma(conn, rxmsg->gnm_type, rc, rxmsg->gnm_u.get.gngm_cookie, ni->ni_nid);
2586 kgnilnd_tx_done(tx, rc);
2587 kgnilnd_consume_rx(rx);
2589 /* return magic LNet network error */
2591 case GNILND_MSG_GET_REQ_REV:
2592 /* LNET wants to truncate or drop transaction, sending NAK */
2594 kgnilnd_consume_rx(rx);
2595 lnet_finalize(ni, lntmsg, 0);
2597 /* only error if lntmsg == NULL, otherwise we are just
2598 * short circuiting the rdma process of 0 bytes */
2599 kgnilnd_nak_rdma(conn, rxmsg->gnm_type,
2600 lntmsg == NULL ? -ENOENT : 0,
2601 rxmsg->gnm_u.get.gngm_cookie,
2605 /* lntmsg can be null when parsing a LNET_GET */
2606 if (lntmsg != NULL) {
2607 /* sending ACK with sink buff. info */
2608 tx = kgnilnd_new_tx_msg(GNILND_MSG_GET_ACK_REV, ni->ni_nid);
2610 kgnilnd_consume_rx(rx);
2614 rc = kgnilnd_set_tx_id(tx, conn);
2616 GOTO(nak_get_req_rev, rc);
2619 rc = kgnilnd_setup_rdma_buffer(tx, niov, iov, kiov, offset, mlen);
2621 GOTO(nak_get_req_rev, rc);
2624 tx->tx_msg.gnm_u.putack.gnpam_src_cookie =
2625 rxmsg->gnm_u.putreq.gnprm_cookie;
2626 tx->tx_msg.gnm_u.putack.gnpam_dst_cookie = tx->tx_id.txe_cookie;
2627 tx->tx_msg.gnm_u.putack.gnpam_desc.gnrd_addr =
2628 (__u64)((unsigned long)tx->tx_buffer);
2629 tx->tx_msg.gnm_u.putack.gnpam_desc.gnrd_nob = mlen;
2631 tx->tx_lntmsg[0] = lntmsg; /* finalize this on RDMA_DONE */
2633 /* we only queue from kgnilnd_recv - we might get called from other contexts
2634 * and we don't want to block the mutex in those cases */
2636 spin_lock(&tx->tx_conn->gnc_device->gnd_lock);
2637 kgnilnd_tx_add_state_locked(tx, NULL, tx->tx_conn, GNILND_TX_MAPQ, 1);
2638 spin_unlock(&tx->tx_conn->gnc_device->gnd_lock);
2639 kgnilnd_schedule_device(tx->tx_conn->gnc_device);
2642 kgnilnd_nak_rdma(conn, rxmsg->gnm_type,
2644 rxmsg->gnm_u.get.gngm_cookie,
2648 kgnilnd_consume_rx(rx);
2652 /* make sure we send an error back when the GET fails */
2653 kgnilnd_nak_rdma(conn, rxmsg->gnm_type, rc, rxmsg->gnm_u.get.gngm_cookie, ni->ni_nid);
2654 kgnilnd_tx_done(tx, rc);
2655 kgnilnd_consume_rx(rx);
2657 /* return magic LNet network error */
2661 case GNILND_MSG_PUT_REQ_REV:
2662 /* LNET wants to truncate or drop transaction, sending NAK */
2664 kgnilnd_consume_rx(rx);
2665 lnet_finalize(ni, lntmsg, 0);
2667 /* only error if lntmsg == NULL, otherwise we are just
2668 * short circuiting the rdma process of 0 bytes */
2669 kgnilnd_nak_rdma(conn, rxmsg->gnm_type,
2670 lntmsg == NULL ? -ENOENT : 0,
2671 rxmsg->gnm_u.get.gngm_cookie,
2676 if (lntmsg != NULL) {
2678 kgnilnd_setup_rdma(ni, rx, lntmsg, mlen);
2681 kgnilnd_nak_rdma(conn, rxmsg->gnm_type,
2683 rxmsg->gnm_u.get.gngm_cookie,
2686 kgnilnd_consume_rx(rx);
2688 case GNILND_MSG_GET_REQ:
2689 if (lntmsg != NULL) {
2691 kgnilnd_setup_rdma(ni, rx, lntmsg, mlen);
2694 kgnilnd_nak_rdma(conn, rxmsg->gnm_type,
2696 rxmsg->gnm_u.get.gngm_cookie,
2699 kgnilnd_consume_rx(rx);
2705 /* needs write_lock on kgn_peer_conn_lock held */
2707 kgnilnd_check_conn_timeouts_locked(kgn_conn_t *conn)
2709 unsigned long timeout, keepalive;
2710 unsigned long now = jiffies;
2711 unsigned long newest_last_rx;
2714 /* given that we found this conn hanging off a peer, it better damned
2715 * well be connected */
2716 LASSERTF(conn->gnc_state == GNILND_CONN_ESTABLISHED,
2717 "conn 0x%p->%s with bad state%s\n", conn,
2718 conn->gnc_peer ? libcfs_nid2str(conn->gnc_peer->gnp_nid)
2720 kgnilnd_conn_state2str(conn));
2722 CDEBUG(D_NET, "checking conn %p->%s timeout %d keepalive %d "
2723 "rx_diff %lu tx_diff %lu\n",
2724 conn, libcfs_nid2str(conn->gnc_peer->gnp_nid),
2725 conn->gnc_timeout, GNILND_TO2KA(conn->gnc_timeout),
2726 cfs_duration_sec(now - conn->gnc_last_rx_cq),
2727 cfs_duration_sec(now - conn->gnc_last_tx));
2729 timeout = cfs_time_seconds(conn->gnc_timeout);
2730 keepalive = cfs_time_seconds(GNILND_TO2KA(conn->gnc_timeout));
2732 /* just in case our lack of RX msg processing is gumming up the works - give the
2733 * remove an extra chance */
2735 newest_last_rx = GNILND_LASTRX(conn);
2737 if (time_after_eq(now, newest_last_rx + timeout)) {
2738 uint32_t level = D_CONSOLE|D_NETERROR;
2740 if (conn->gnc_peer->gnp_down == GNILND_RCA_NODE_DOWN) {
2743 GNIDBG_CONN(level, conn,
2744 "No gnilnd traffic received from %s for %lu "
2745 "seconds, terminating connection. Is node down? ",
2746 libcfs_nid2str(conn->gnc_peer->gnp_nid),
2747 cfs_duration_sec(now - newest_last_rx));
2751 /* we don't timeout on last_tx stalls - we are going to trust the
2752 * underlying network to let us know when sends are failing.
2753 * At worst, the peer will timeout our RX stamp and drop the connection
2754 * at that point. We'll then see his CLOSE or at worst his RX
2755 * stamp stop and drop the connection on our end */
2757 if (time_after_eq(now, conn->gnc_last_tx + keepalive)) {
2758 CDEBUG(D_NET, "sending NOOP -> %s (%p idle %lu(%lu)) "
2759 "last %lu/%lu/%lu %lus/%lus/%lus\n",
2760 libcfs_nid2str(conn->gnc_peer->gnp_nid), conn,
2761 cfs_duration_sec(jiffies - conn->gnc_last_tx),
2763 conn->gnc_last_noop_want, conn->gnc_last_noop_sent,
2764 conn->gnc_last_noop_cq,
2765 cfs_duration_sec(jiffies - conn->gnc_last_noop_want),
2766 cfs_duration_sec(jiffies - conn->gnc_last_noop_sent),
2767 cfs_duration_sec(jiffies - conn->gnc_last_noop_cq));
2768 set_mb(conn->gnc_last_noop_want, jiffies);
2769 atomic_inc(&conn->gnc_reaper_noop);
2770 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_NOOP_SEND))
2773 tx = kgnilnd_new_tx_msg(GNILND_MSG_NOOP, conn->gnc_peer->gnp_net->gnn_ni->ni_nid);
2776 kgnilnd_queue_tx(conn, tx);
2782 /* needs write_lock on kgn_peer_conn_lock held */
2784 kgnilnd_check_peer_timeouts_locked(kgn_peer_t *peer, struct list_head *todie,
2785 struct list_head *souls)
2787 unsigned long timeout;
2788 kgn_conn_t *conn, *connN = NULL;
2794 short releaseconn = 0;
2795 unsigned long first_rx = 0;
2796 int purgatory_conn_cnt = 0;
2798 CDEBUG(D_NET, "checking peer 0x%p->%s for timeouts; interval %lus\n",
2799 peer, libcfs_nid2str(peer->gnp_nid),
2800 peer->gnp_reconnect_interval);
2802 timeout = cfs_time_seconds(MAX(*kgnilnd_tunables.kgn_timeout,
2803 GNILND_MIN_TIMEOUT));
2805 conn = kgnilnd_find_conn_locked(peer);
2807 /* if there is a valid conn, check the queues for timeouts */
2808 rc = kgnilnd_check_conn_timeouts_locked(conn);
2810 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_RX_CLOSE_CLOSING)) {
2811 /* simulate a RX CLOSE after the timeout but before
2812 * the scheduler thread gets it */
2813 conn->gnc_close_recvd = GNILND_CLOSE_INJECT1;
2814 conn->gnc_peer_error = -ETIMEDOUT;
2816 /* Once we mark closed, any of the scheduler threads could
2817 * get it and move through before we hit the fail loc code */
2818 kgnilnd_close_conn_locked(conn, rc);
2820 /* first_rx is used to decide when to release a conn from purgatory.
2822 first_rx = conn->gnc_first_rx;
2826 /* now regardless of starting new conn, find tx on peer queue that
2827 * are old and smell bad - do this first so we don't trigger
2828 * reconnect on empty queue if we timeout all */
2829 list_for_each_entry_safe(tx, txN, &peer->gnp_tx_queue, tx_list) {
2830 if (time_after_eq(jiffies, tx->tx_qtime + timeout)) {
2832 LCONSOLE_INFO("could not send to %s due to connection"
2833 " setup failure after %lu seconds\n",
2834 libcfs_nid2str(peer->gnp_nid),
2835 cfs_duration_sec(jiffies - tx->tx_qtime));
2837 kgnilnd_tx_del_state_locked(tx, peer, NULL,
2839 list_add_tail(&tx->tx_list, todie);
2844 if (count || peer->gnp_connecting == GNILND_PEER_KILL) {
2845 CDEBUG(D_NET, "canceling %d tx for peer 0x%p->%s\n",
2846 count, peer, libcfs_nid2str(peer->gnp_nid));
2847 /* if we nuked all the TX, stop peer connection attempt (if there is one..) */
2848 if (list_empty(&peer->gnp_tx_queue) ||
2849 peer->gnp_connecting == GNILND_PEER_KILL) {
2850 /* we pass down todie to use a common function - but we know there are
2852 kgnilnd_cancel_peer_connect_locked(peer, todie);
2856 /* Don't reconnect if we are still trying to clear out old conns.
2857 * This prevents us sending traffic on the new mbox before ensuring we are done
2858 * with the old one */
2859 reconnect = (peer->gnp_down == GNILND_RCA_NODE_UP) &&
2860 (atomic_read(&peer->gnp_dirty_eps) == 0);
2862 /* fast reconnect after a timeout */
2863 to_reconn = !conn &&
2864 (peer->gnp_last_errno == -ETIMEDOUT) &&
2865 *kgnilnd_tunables.kgn_fast_reconn;
2867 /* if we are not connected and there are tx on the gnp_tx_queue waiting
2868 * to be sent, we'll check the reconnect interval and fire up a new
2869 * connection request */
2872 (peer->gnp_connecting == GNILND_PEER_IDLE) &&
2873 (time_after_eq(jiffies, peer->gnp_reconnect_time)) &&
2874 (!list_empty(&peer->gnp_tx_queue) || to_reconn)) {
2876 CDEBUG(D_NET, "starting connect to %s\n",
2877 libcfs_nid2str(peer->gnp_nid));
2878 LASSERTF(peer->gnp_connecting == GNILND_PEER_IDLE, "Peer was idle and we"
2879 "have a write_lock, state issue %d\n", peer->gnp_connecting);
2881 peer->gnp_connecting = GNILND_PEER_CONNECT;
2882 kgnilnd_peer_addref(peer); /* extra ref for connd */
2884 spin_lock(&peer->gnp_net->gnn_dev->gnd_connd_lock);
2885 list_add_tail(&peer->gnp_connd_list,
2886 &peer->gnp_net->gnn_dev->gnd_connd_peers);
2887 spin_unlock(&peer->gnp_net->gnn_dev->gnd_connd_lock);
2889 kgnilnd_schedule_dgram(peer->gnp_net->gnn_dev);
2892 /* fail_loc to allow us to delay release of purgatory */
2893 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_PURG_REL_DELAY))
2896 /* This check allows us to verify that the new conn is actually being used. This allows us to
2897 * pull the old conns out of purgatory if they have actually seen traffic.
2898 * We only release a conn from purgatory during stack reset, admin command, or when a peer reconnects
2901 time_after(jiffies, first_rx + cfs_time_seconds(*kgnilnd_tunables.kgn_hardware_timeout))) {
2902 CDEBUG(D_INFO, "We can release peer %s conn's from purgatory %lu\n",
2903 libcfs_nid2str(peer->gnp_nid), first_rx + cfs_time_seconds(*kgnilnd_tunables.kgn_hardware_timeout));
2907 list_for_each_entry_safe (conn, connN, &peer->gnp_conns, gnc_list) {
2908 /* check for purgatory timeouts */
2909 if (conn->gnc_in_purgatory) {
2910 /* We cannot detach this conn from purgatory if it has not been closed so we reschedule it
2911 * that way the next time we check it we can detach it from purgatory
2914 if (conn->gnc_state != GNILND_CONN_DONE) {
2915 /* Skip over conns that are currently not DONE. If they arent already scheduled
2916 * for completion something in the state machine is broken.
2921 /* We only detach a conn that is in purgatory if we have received a close message,
2922 * we have a new valid connection that has successfully received data, or an admin
2923 * command tells us we need to detach.
2926 if (conn->gnc_close_recvd || releaseconn || conn->gnc_needs_detach) {
2927 unsigned long waiting;
2929 waiting = (long) jiffies - conn->gnc_last_rx_cq;
2931 /* C.E: The remote peer is expected to close the
2932 * connection (see kgnilnd_check_conn_timeouts)
2933 * via the reaper thread and nuke out the MDD and
2934 * FMA resources after conn->gnc_timeout has expired
2935 * without an FMA RX */
2936 CDEBUG(D_NET, "Reconnected to %s in %lds or admin forced detach, dropping "
2937 " held resources\n",
2938 libcfs_nid2str(conn->gnc_peer->gnp_nid),
2939 cfs_duration_sec(waiting));
2941 kgnilnd_detach_purgatory_locked(conn, souls);
2943 purgatory_conn_cnt++;
2948 /* If we have too many connections in purgatory we could run out of
2949 * resources. Limit the number of connections to a tunable number,
2950 * clean up to the minimum all in one fell swoop... there are
2951 * situations where dvs will retry tx's and we can eat up several
2952 * hundread connection requests at once.
2954 if (purgatory_conn_cnt > *kgnilnd_tunables.kgn_max_purgatory) {
2955 list_for_each_entry_safe(conn, connN, &peer->gnp_conns,
2957 if (conn->gnc_in_purgatory &&
2958 conn->gnc_state == GNILND_CONN_DONE) {
2959 CDEBUG(D_NET, "Dropping Held resource due to"
2960 " resource limits being hit\n");
2961 kgnilnd_detach_purgatory_locked(conn, souls);
2963 if (purgatory_conn_cnt-- <
2964 *kgnilnd_tunables.kgn_max_purgatory)
2974 kgnilnd_reaper_check(int idx)
2976 struct list_head *peers = &kgnilnd_data.kgn_peers[idx];
2977 struct list_head *ctmp, *ctmpN;
2978 struct list_head geriatrics;
2979 struct list_head souls;
2981 INIT_LIST_HEAD(&geriatrics);
2982 INIT_LIST_HEAD(&souls);
2984 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
2986 list_for_each_safe(ctmp, ctmpN, peers) {
2987 kgn_peer_t *peer = NULL;
2989 /* don't timeout stuff if the network is mucked or shutting down */
2990 if (kgnilnd_check_hw_quiesce()) {
2993 peer = list_entry(ctmp, kgn_peer_t, gnp_list);
2995 kgnilnd_check_peer_timeouts_locked(peer, &geriatrics, &souls);
2998 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
3000 kgnilnd_txlist_done(&geriatrics, -EHOSTUNREACH);
3001 kgnilnd_release_purgatory_list(&souls);
3005 kgnilnd_update_reaper_timeout(long timeout)
3007 LASSERT(timeout > 0);
3009 spin_lock(&kgnilnd_data.kgn_reaper_lock);
3011 if (timeout < kgnilnd_data.kgn_new_min_timeout)
3012 kgnilnd_data.kgn_new_min_timeout = timeout;
3014 spin_unlock(&kgnilnd_data.kgn_reaper_lock);
3018 kgnilnd_reaper_poke_with_stick(unsigned long arg)
3020 wake_up(&kgnilnd_data.kgn_reaper_waitq);
3024 kgnilnd_reaper(void *arg)
3029 unsigned long next_check_time = jiffies;
3030 long current_min_timeout = MAX_SCHEDULE_TIMEOUT;
3031 struct timer_list timer;
3034 cfs_block_allsigs();
3036 /* all gnilnd threads need to run fairly urgently */
3037 set_user_nice(current, *kgnilnd_tunables.kgn_nice);
3038 spin_lock(&kgnilnd_data.kgn_reaper_lock);
3040 while (!kgnilnd_data.kgn_shutdown) {
3041 /* I wake up every 'p' seconds to check for timeouts on some
3042 * more peers. I try to check every connection 'n' times
3043 * within the global minimum of all keepalive and timeout
3044 * intervals, to ensure I attend to every connection within
3045 * (n+1)/n times its timeout intervals. */
3046 const int p = GNILND_REAPER_THREAD_WAKE;
3047 const int n = GNILND_REAPER_NCHECKS;
3049 /* to quiesce or to not quiesce, that is the question */
3050 if (unlikely(kgnilnd_data.kgn_quiesce_trigger)) {
3051 spin_unlock(&kgnilnd_data.kgn_reaper_lock);
3052 KGNILND_SPIN_QUIESCE;
3053 spin_lock(&kgnilnd_data.kgn_reaper_lock);
3056 /* careful with the jiffy wrap... */
3057 timeout = (long)(next_check_time - jiffies);
3060 prepare_to_wait(&kgnilnd_data.kgn_reaper_waitq, &wait,
3061 TASK_INTERRUPTIBLE);
3062 spin_unlock(&kgnilnd_data.kgn_reaper_lock);
3063 setup_timer(&timer, kgnilnd_reaper_poke_with_stick,
3065 mod_timer(&timer, (long) jiffies + timeout);
3067 /* check flag variables before comitting */
3068 if (!kgnilnd_data.kgn_shutdown &&
3069 !kgnilnd_data.kgn_quiesce_trigger) {
3070 CDEBUG(D_INFO, "schedule timeout %ld (%lu sec)\n",
3071 timeout, cfs_duration_sec(timeout));
3073 CDEBUG(D_INFO, "awake after schedule\n");
3076 del_singleshot_timer_sync(&timer);
3077 spin_lock(&kgnilnd_data.kgn_reaper_lock);
3078 finish_wait(&kgnilnd_data.kgn_reaper_waitq, &wait);
3082 /* new_min_timeout is set from the conn timeouts and keepalive
3083 * this should end up with a min timeout of
3084 * GNILND_TIMEOUT2KEEPALIVE(t) or roughly LND_TIMEOUT/2 */
3085 if (kgnilnd_data.kgn_new_min_timeout < current_min_timeout) {
3086 current_min_timeout = kgnilnd_data.kgn_new_min_timeout;
3087 CDEBUG(D_NET, "Set new min timeout %ld\n",
3088 current_min_timeout);
3091 spin_unlock(&kgnilnd_data.kgn_reaper_lock);
3093 /* Compute how many table entries to check now so I get round
3094 * the whole table fast enough given that I do this at fixed
3095 * intervals of 'p' seconds) */
3096 chunk = *kgnilnd_tunables.kgn_peer_hash_size;
3097 if (kgnilnd_data.kgn_new_min_timeout > n * p)
3098 chunk = (chunk * n * p) /
3099 kgnilnd_data.kgn_new_min_timeout;
3102 for (i = 0; i < chunk; i++) {
3103 kgnilnd_reaper_check(hash_index);
3104 hash_index = (hash_index + 1) %
3105 *kgnilnd_tunables.kgn_peer_hash_size;
3107 next_check_time = (long) jiffies + cfs_time_seconds(p);
3108 CDEBUG(D_INFO, "next check at %lu or in %d sec\n", next_check_time, p);
3110 spin_lock(&kgnilnd_data.kgn_reaper_lock);
3113 spin_unlock(&kgnilnd_data.kgn_reaper_lock);
3115 kgnilnd_thread_fini();
3120 kgnilnd_recv_bte_get(kgn_tx_t *tx) {
3121 unsigned niov, offset, nob;
3123 lnet_msg_t *lntmsg = tx->tx_lntmsg[0];
3124 kgnilnd_parse_lnet_rdma(lntmsg, &niov, &offset, &nob, &kiov, tx->tx_nob_rdma);
3127 lnet_copy_flat2kiov(
3130 tx->tx_buffer_copy + tx->tx_offset, 0, nob);
3132 memcpy(tx->tx_buffer, tx->tx_buffer_copy + tx->tx_offset, nob);
3139 kgnilnd_check_rdma_cq(kgn_device_t *dev)
3142 gni_post_descriptor_t *desc;
3144 kgn_tx_ev_id_t ev_id;
3146 int should_retry, rc;
3147 long num_processed = 0;
3148 kgn_conn_t *conn = NULL;
3149 kgn_tx_t *tx = NULL;
3150 kgn_rdma_desc_t *rdesc;
3155 /* make sure we don't keep looping if we need to reset */
3156 if (unlikely(kgnilnd_data.kgn_quiesce_trigger)) {
3157 return num_processed;
3159 rc = kgnilnd_mutex_trylock(&dev->gnd_cq_mutex);
3161 /* we didn't get the mutex, so return that there is still work
3165 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_DELAY_RDMA)) {
3166 /* a bit gross - but we need a good way to test for
3167 * delayed RDMA completions and the easiest way to do
3168 * that is to delay the RDMA CQ events */
3169 rrc = GNI_RC_NOT_DONE;
3171 rrc = kgnilnd_cq_get_event(dev->gnd_snd_rdma_cqh, &event_data);
3174 if (rrc == GNI_RC_NOT_DONE) {
3175 kgnilnd_gl_mutex_unlock(&dev->gnd_cq_mutex);
3176 CDEBUG(D_INFO, "SEND RDMA CQ %d empty processed %ld\n",
3177 dev->gnd_id, num_processed);
3178 return num_processed;
3180 dev->gnd_sched_alive = jiffies;
3183 LASSERTF(!GNI_CQ_OVERRUN(event_data),
3184 "this is bad, somehow our credits didn't protect us"
3185 " from CQ overrun\n");
3186 LASSERTF(GNI_CQ_GET_TYPE(event_data) == GNI_CQ_EVENT_TYPE_POST,
3187 "rrc %d, GNI_CQ_GET_TYPE("LPX64") = "LPX64"\n", rrc,
3188 event_data, GNI_CQ_GET_TYPE(event_data));
3190 rrc = kgnilnd_get_completed(dev->gnd_snd_rdma_cqh, event_data,
3192 kgnilnd_gl_mutex_unlock(&dev->gnd_cq_mutex);
3194 /* XXX Nic: Need better error handling here... */
3195 LASSERTF((rrc == GNI_RC_SUCCESS) ||
3196 (rrc == GNI_RC_TRANSACTION_ERROR),
3199 ev_id.txe_cookie = desc->post_id;
3201 kgnilnd_validate_tx_ev_id(&ev_id, &tx, &conn);
3203 if (conn == NULL || tx == NULL) {
3204 /* either conn or tx was already nuked and this is a "late"
3205 * completion, so drop it */
3209 GNITX_ASSERTF(tx, tx->tx_msg.gnm_type == GNILND_MSG_PUT_DONE ||
3210 tx->tx_msg.gnm_type == GNILND_MSG_GET_DONE ||
3211 tx->tx_msg.gnm_type == GNILND_MSG_PUT_DONE_REV ||
3212 tx->tx_msg.gnm_type == GNILND_MSG_GET_DONE_REV,
3213 "tx %p with type %d\n", tx, tx->tx_msg.gnm_type);
3215 GNIDBG_TX(D_NET, tx, "RDMA completion for %d bytes", tx->tx_nob);
3217 if (tx->tx_msg.gnm_type == GNILND_MSG_GET_DONE_REV) {
3218 lnet_set_reply_msg_len(NULL, tx->tx_lntmsg[1],
3219 tx->tx_msg.gnm_u.completion.gncm_retval);
3223 if (tx->tx_msg.gnm_type == GNILND_MSG_GET_DONE_REV && desc->status == GNI_RC_SUCCESS) {
3224 if (tx->tx_buffer_copy != NULL)
3225 kgnilnd_recv_bte_get(tx);
3226 rc = kgnilnd_verify_rdma_cksum(tx, tx->tx_putinfo.gnpam_payload_cksum, tx->tx_nob_rdma);
3229 if (tx->tx_msg.gnm_type == GNILND_MSG_PUT_DONE_REV && desc->status == GNI_RC_SUCCESS) {
3230 if (tx->tx_buffer_copy != NULL)
3231 kgnilnd_recv_bte_get(tx);
3232 rc = kgnilnd_verify_rdma_cksum(tx, tx->tx_getinfo.gngm_payload_cksum, tx->tx_nob_rdma);
3235 /* remove from rdmaq */
3236 kgnilnd_conn_mutex_lock(&conn->gnc_rdma_mutex);
3237 spin_lock(&conn->gnc_list_lock);
3238 kgnilnd_tx_del_state_locked(tx, NULL, conn, GNILND_TX_ALLOCD);
3239 spin_unlock(&conn->gnc_list_lock);
3240 kgnilnd_conn_mutex_unlock(&conn->gnc_rdma_mutex);
3242 if (likely(desc->status == GNI_RC_SUCCESS) && rc == 0) {
3243 atomic_inc(&dev->gnd_rdma_ntx);
3244 atomic64_add(tx->tx_nob, &dev->gnd_rdma_txbytes);
3245 /* transaction succeeded, add into fmaq */
3246 kgnilnd_queue_tx(conn, tx);
3247 kgnilnd_peer_alive(conn->gnc_peer);
3249 /* drop ref from kgnilnd_validate_tx_ev_id */
3250 kgnilnd_admin_decref(conn->gnc_tx_in_use);
3251 kgnilnd_conn_decref(conn);
3256 /* fall through to the TRANSACTION_ERROR case */
3259 /* get stringified version for log messages */
3260 kgnilnd_cq_error_str(event_data, &err_str, 256);
3261 kgnilnd_cq_error_recoverable(event_data, &should_retry);
3263 /* make sure we are not off in the weeds with this tx */
3264 if (tx->tx_retrans >
3265 *kgnilnd_tunables.kgn_max_retransmits) {
3266 GNIDBG_TX(D_NETERROR, tx,
3267 "giving up on TX, too many retries", NULL);
3271 GNIDBG_TX(D_NETERROR, tx, "RDMA %s error (%s)",
3272 should_retry ? "transient" : "unrecoverable", err_str);
3274 if (tx->tx_msg.gnm_type == GNILND_MSG_PUT_DONE ||
3275 tx->tx_msg.gnm_type == GNILND_MSG_GET_DONE_REV) {
3276 rdesc = &tx->tx_putinfo.gnpam_desc;
3277 rnob = tx->tx_putinfo.gnpam_desc.gnrd_nob;
3278 rcookie = tx->tx_putinfo.gnpam_dst_cookie;
3280 rdesc = &tx->tx_getinfo.gngm_desc;
3281 rnob = tx->tx_lntmsg[0]->msg_len;
3282 rcookie = tx->tx_getinfo.gngm_cookie;
3287 tx->tx_msg.gnm_type,
3291 kgnilnd_nak_rdma(conn,
3292 tx->tx_msg.gnm_type,
3295 tx->tx_msg.gnm_srcnid);
3296 kgnilnd_tx_done(tx, -EFAULT);
3297 kgnilnd_close_conn(conn, -ECOMM);
3300 /* drop ref from kgnilnd_validate_tx_ev_id */
3301 kgnilnd_admin_decref(conn->gnc_tx_in_use);
3302 kgnilnd_conn_decref(conn);
3307 kgnilnd_check_fma_send_cq(kgn_device_t *dev)
3311 kgn_tx_ev_id_t ev_id;
3312 kgn_tx_t *tx = NULL;
3313 kgn_conn_t *conn = NULL;
3314 int queued_fma, saw_reply, rc;
3315 long num_processed = 0;
3318 /* make sure we don't keep looping if we need to reset */
3319 if (unlikely(kgnilnd_data.kgn_quiesce_trigger)) {
3320 return num_processed;
3323 rc = kgnilnd_mutex_trylock(&dev->gnd_cq_mutex);
3325 /* we didn't get the mutex, so return that there is still work
3330 rrc = kgnilnd_cq_get_event(dev->gnd_snd_fma_cqh, &event_data);
3331 kgnilnd_gl_mutex_unlock(&dev->gnd_cq_mutex);
3333 if (rrc == GNI_RC_NOT_DONE) {
3335 "SMSG send CQ %d not ready (data "LPX64") "
3336 "processed %ld\n", dev->gnd_id, event_data,
3338 return num_processed;
3341 dev->gnd_sched_alive = jiffies;
3344 LASSERTF(!GNI_CQ_OVERRUN(event_data),
3345 "this is bad, somehow our credits didn't "
3346 "protect us from CQ overrun\n");
3347 LASSERTF(GNI_CQ_GET_TYPE(event_data) == GNI_CQ_EVENT_TYPE_SMSG,
3348 "rrc %d, GNI_CQ_GET_TYPE("LPX64") = "LPX64"\n", rrc,
3349 event_data, GNI_CQ_GET_TYPE(event_data));
3351 /* if SMSG couldn't handle an error, time for conn to die */
3352 if (unlikely(rrc == GNI_RC_TRANSACTION_ERROR)) {
3355 /* need to take the write_lock to ensure atomicity
3356 * on the conn state if we need to close it */
3357 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
3358 conn = kgnilnd_cqid2conn_locked(GNI_CQ_GET_INST_ID(event_data));
3360 /* Conn was destroyed? */
3362 "SMSG CQID lookup "LPX64" failed\n",
3363 GNI_CQ_GET_INST_ID(event_data));
3364 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
3368 kgnilnd_cq_error_str(event_data, &err_str, 256);
3369 CNETERR("SMSG send error to %s: rc %d (%s)\n",
3370 libcfs_nid2str(conn->gnc_peer->gnp_nid),
3372 kgnilnd_close_conn_locked(conn, -ECOMM);
3374 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
3376 /* no need to process rest of this tx -
3377 * it is getting canceled */
3381 /* fall through to GNI_RC_SUCCESS case */
3382 ev_id.txe_smsg_id = GNI_CQ_GET_MSG_ID(event_data);
3384 kgnilnd_validate_tx_ev_id(&ev_id, &tx, &conn);
3385 if (conn == NULL || tx == NULL) {
3386 /* either conn or tx was already nuked and this is a "late"
3387 * completion, so drop it */
3391 tx->tx_conn->gnc_last_tx_cq = jiffies;
3392 if (tx->tx_msg.gnm_type == GNILND_MSG_NOOP) {
3393 set_mb(conn->gnc_last_noop_cq, jiffies);
3396 /* lock tx_list_state and tx_state */
3397 kgnilnd_conn_mutex_lock(&conn->gnc_smsg_mutex);
3398 spin_lock(&tx->tx_conn->gnc_list_lock);
3400 GNITX_ASSERTF(tx, tx->tx_list_state == GNILND_TX_LIVE_FMAQ,
3401 "state not GNILND_TX_LIVE_FMAQ", NULL);
3402 GNITX_ASSERTF(tx, tx->tx_state & GNILND_TX_WAITING_COMPLETION,
3403 "not waiting for completion", NULL);
3405 GNIDBG_TX(D_NET, tx, "SMSG complete tx_state %x rc %d",
3408 tx->tx_state &= ~GNILND_TX_WAITING_COMPLETION;
3410 /* This will trigger other FMA sends that were
3411 * pending this completion */
3412 queued_fma = !list_empty(&tx->tx_conn->gnc_fmaq);
3414 /* we either did not expect reply or we already got it */
3415 saw_reply = !(tx->tx_state & GNILND_TX_WAITING_REPLY);
3417 spin_unlock(&tx->tx_conn->gnc_list_lock);
3418 kgnilnd_conn_mutex_unlock(&conn->gnc_smsg_mutex);
3421 CDEBUG(D_NET, "scheduling conn 0x%p->%s for fmaq\n",
3423 libcfs_nid2str(conn->gnc_peer->gnp_nid));
3424 kgnilnd_schedule_conn(conn);
3427 /* If saw_reply is false as soon as gnc_list_lock is dropped the tx could be nuked
3428 * If saw_reply is true we know that the tx is safe to use as the other thread
3429 * is already finished with it.
3433 /* no longer need to track on the live_fmaq */
3434 kgnilnd_tx_del_state_locked(tx, NULL, tx->tx_conn, GNILND_TX_ALLOCD);
3436 if (tx->tx_state & GNILND_TX_PENDING_RDMA) {
3437 /* we already got reply &