2 * Copyright (C) 2004 Cluster File Systems, Inc.
4 * Copyright (C) 2009-2012 Cray, Inc.
6 * Derived from work by Eric Barton <eric@bartonsoftware.com>
7 * Author: Nic Henke <nic@cray.com>
9 * This file is part of Lustre, http://www.lustre.org.
11 * Lustre is free software; you can redistribute it and/or
12 * modify it under the terms of version 2 of the GNU General Public
13 * License as published by the Free Software Foundation.
15 * Lustre is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with Lustre; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 #include <linux/nmi.h>
30 /* this is useful when needed to debug wire corruption. */
32 kgnilnd_dump_blob(int level, char *prefix, void *buf, int len) {
40 "%s 0x%p: 0x%16.16llx 0x%16.16llx 0x%16.16llx 0x%16.16llx\n",
41 prefix, ptr, *(ptr), *(ptr + 1), *(ptr + 2), *(ptr + 3));
44 } else if (len >= 16) {
46 "%s 0x%p: 0x%16.16llx 0x%16.16llx\n",
47 prefix, ptr, *(ptr), *(ptr + 1));
51 CDEBUG(level, "%s 0x%p: 0x%16.16llx\n",
60 kgnilnd_dump_msg(int mask, kgn_msg_t *msg)
62 CDEBUG(mask, "0x%8.8x 0x%4.4x 0x%4.4x 0x%16.16llx"
63 " 0x%16.16llx 0x%8.8x 0x%4.4x 0x%4.4x 0x%8.8x\n",
64 msg->gnm_magic, msg->gnm_version,
65 msg->gnm_type, msg->gnm_srcnid,
66 msg->gnm_connstamp, msg->gnm_seq,
67 msg->gnm_cksum, msg->gnm_payload_cksum,
68 msg->gnm_payload_len);
72 kgnilnd_schedule_device(kgn_device_t *dev)
74 short already_live = 0;
76 /* we'll only want to wake if the scheduler thread
77 * has come around and set ready to zero */
78 already_live = cmpxchg(&dev->gnd_ready, GNILND_DEV_IDLE, GNILND_DEV_IRQ);
81 wake_up_all(&dev->gnd_waitq);
86 void kgnilnd_schedule_device_timer(unsigned long arg)
88 kgn_device_t *dev = (kgn_device_t *) arg;
90 kgnilnd_schedule_device(dev);
94 kgnilnd_device_callback(__u32 devid, __u64 arg)
97 int index = (int) arg;
99 if (index >= kgnilnd_data.kgn_ndevs) {
100 /* use _EMERG instead of an LBUG to prevent LBUG'ing in
101 * interrupt context. */
102 LCONSOLE_EMERG("callback for unknown device %d->%d\n",
107 dev = &kgnilnd_data.kgn_devices[index];
108 /* just basic sanity */
109 if (dev->gnd_id == devid) {
110 kgnilnd_schedule_device(dev);
112 LCONSOLE_EMERG("callback for bad device %d devid %d\n",
117 /* sched_intent values:
118 * < 0 : do not reschedule under any circumstances
119 * == 0: reschedule if someone marked him WANTS_SCHED
120 * > 0 : force a reschedule */
121 /* Return code 0 means it did not schedule the conn, 1
122 * means it succesfully scheduled the conn.
126 kgnilnd_schedule_process_conn(kgn_conn_t *conn, int sched_intent)
130 /* move back to IDLE but save previous state.
131 * if we see WANTS_SCHED, we'll call kgnilnd_schedule_conn and
132 * let the xchg there handle any racing callers to get it
133 * onto gnd_ready_conns */
135 conn_sched = xchg(&conn->gnc_scheduled, GNILND_CONN_IDLE);
136 LASSERTF(conn_sched == GNILND_CONN_WANTS_SCHED ||
137 conn_sched == GNILND_CONN_PROCESS,
138 "conn %p after process in bad state: %d\n",
141 if (sched_intent >= 0) {
142 if ((sched_intent > 0 || (conn_sched == GNILND_CONN_WANTS_SCHED))) {
143 return kgnilnd_schedule_conn_refheld(conn, 1);
149 /* Return of 0 for conn not scheduled, 1 returned if conn was scheduled or marked
153 _kgnilnd_schedule_conn(kgn_conn_t *conn, const char *caller, int line, int refheld)
155 kgn_device_t *dev = conn->gnc_device;
159 sched = xchg(&conn->gnc_scheduled, GNILND_CONN_WANTS_SCHED);
160 /* we only care about the last person who marked want_sched since they
161 * are most likely the culprit
163 memcpy(conn->gnc_sched_caller, caller, sizeof(conn->gnc_sched_caller));
164 conn->gnc_sched_line = line;
165 /* if we are IDLE, add to list - only one guy sees IDLE and "wins"
166 * the chance to put it onto gnd_ready_conns.
167 * otherwise, leave marked as WANTS_SCHED and the thread that "owns"
168 * the conn in process_conns will take care of moving it back to
169 * SCHED when it is done processing */
171 if (sched == GNILND_CONN_IDLE) {
172 /* if the conn is already scheduled, we've already requested
173 * the scheduler thread wakeup */
175 /* Add a reference to the conn if we are not holding a reference
176 * already from the exisiting scheduler. We now use the same
177 * reference if we need to reschedule a conn while in a scheduler
180 kgnilnd_conn_addref(conn);
182 LASSERTF(list_empty(&conn->gnc_schedlist), "conn %p already sched state %d\n",
185 CDEBUG(D_INFO, "scheduling conn 0x%p caller %s:%d\n", conn, caller, line);
187 spin_lock(&dev->gnd_lock);
188 list_add_tail(&conn->gnc_schedlist, &dev->gnd_ready_conns);
189 spin_unlock(&dev->gnd_lock);
190 set_mb(conn->gnc_last_sched_ask, jiffies);
193 CDEBUG(D_INFO, "not scheduling conn 0x%p: %d caller %s:%d\n", conn, sched, caller, line);
197 /* make sure thread(s) going to process conns - but let it make
198 * separate decision from conn schedule */
199 kgnilnd_schedule_device(dev);
204 kgnilnd_schedule_dgram(kgn_device_t *dev)
208 wake = xchg(&dev->gnd_dgram_ready, GNILND_DGRAM_SCHED);
209 if (wake != GNILND_DGRAM_SCHED) {
210 wake_up(&dev->gnd_dgram_waitq);
212 CDEBUG(D_NETTRACE, "not waking: %d\n", wake);
217 kgnilnd_free_tx(kgn_tx_t *tx)
219 /* taken from kgnilnd_tx_add_state_locked */
221 LASSERTF((tx->tx_list_p == NULL &&
222 tx->tx_list_state == GNILND_TX_ALLOCD) &&
223 list_empty(&tx->tx_list),
224 "tx %p with bad state %s (list_p %p) tx_list %s\n",
225 tx, kgnilnd_tx_state2str(tx->tx_list_state), tx->tx_list_p,
226 list_empty(&tx->tx_list) ? "empty" : "not empty");
228 atomic_dec(&kgnilnd_data.kgn_ntx);
230 /* we only allocate this if we need to */
231 if (tx->tx_phys != NULL) {
232 kmem_cache_free(kgnilnd_data.kgn_tx_phys_cache, tx->tx_phys);
233 CDEBUG(D_MALLOC, "slab-freed 'tx_phys': %lu at %p.\n",
234 LNET_MAX_IOV * sizeof(gni_mem_segment_t), tx->tx_phys);
237 /* Only free the buffer if we used it */
238 if (tx->tx_buffer_copy != NULL) {
239 vfree(tx->tx_buffer_copy);
240 tx->tx_buffer_copy = NULL;
241 CDEBUG(D_MALLOC, "vfreed buffer2\n");
244 KGNILND_POISON(tx, 0x5a, sizeof(kgn_tx_t));
246 CDEBUG(D_MALLOC, "slab-freed 'tx': %lu at %p.\n", sizeof(*tx), tx);
247 kmem_cache_free(kgnilnd_data.kgn_tx_cache, tx);
251 kgnilnd_alloc_tx (void)
255 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_ALLOC_TX))
258 tx = kmem_cache_alloc(kgnilnd_data.kgn_tx_cache, GFP_ATOMIC);
260 CERROR("failed to allocate tx\n");
263 CDEBUG(D_MALLOC, "slab-alloced 'tx': %lu at %p.\n",
266 /* need this memset, cache alloc'd memory is not cleared */
267 memset(tx, 0, sizeof(*tx));
269 /* setup everything here to minimize time under the lock */
270 tx->tx_buftype = GNILND_BUF_NONE;
271 tx->tx_msg.gnm_type = GNILND_MSG_NONE;
272 INIT_LIST_HEAD(&tx->tx_list);
273 INIT_LIST_HEAD(&tx->tx_map_list);
274 tx->tx_list_state = GNILND_TX_ALLOCD;
276 atomic_inc(&kgnilnd_data.kgn_ntx);
281 /* csum_fold needs to be run on the return value before shipping over the wire */
282 #define _kgnilnd_cksum(seed, ptr, nob) csum_partial(ptr, nob, seed)
284 /* we don't use offset as every one is passing a buffer reference that already
285 * includes the offset into the base address -
286 * see kgnilnd_setup_virt_buffer and kgnilnd_setup_immediate_buffer */
288 kgnilnd_cksum(void *ptr, size_t nob)
292 sum = csum_fold(_kgnilnd_cksum(0, ptr, nob));
294 /* don't use magic 'no checksum' value */
298 CDEBUG(D_INFO, "cksum 0x%x for ptr 0x%p sz %zu\n",
305 kgnilnd_cksum_kiov(unsigned int nkiov, lnet_kiov_t *kiov,
306 unsigned int offset, unsigned int nob, int dump_blob)
312 unsigned int fraglen;
318 CDEBUG(D_BUFFS, "calc cksum for kiov 0x%p nkiov %u offset %u nob %u, dump %d\n",
319 kiov, nkiov, offset, nob, dump_blob);
321 /* if loops changes, please change kgnilnd_setup_phys_buffer */
323 while (offset >= kiov->kiov_len) {
324 offset -= kiov->kiov_len;
330 /* ignore nob here, if nob < (kiov_len - offset), kiov == 1 */
331 odd = (unsigned long) (kiov[0].kiov_len - offset) & 1;
333 if ((odd || *kgnilnd_tunables.kgn_vmap_cksum) && nkiov > 1) {
334 struct page **pages = kgnilnd_data.kgn_cksum_map_pages[get_cpu()];
336 LASSERTF(pages != NULL, "NULL pages for cpu %d map_pages 0x%p\n",
337 get_cpu(), kgnilnd_data.kgn_cksum_map_pages);
339 CDEBUG(D_BUFFS, "odd %d len %u offset %u nob %u\n",
340 odd, kiov[0].kiov_len, offset, nob);
342 for (i = 0; i < nkiov; i++) {
343 pages[i] = kiov[i].kiov_page;
346 addr = vmap(pages, nkiov, VM_MAP, PAGE_KERNEL);
348 CNETERR("Couldn't vmap %d frags on %d bytes to avoid odd length fragment in cksum\n",
350 /* return zero to avoid killing tx - we'll just get warning on console
351 * when remote end sees zero checksum */
354 atomic_inc(&kgnilnd_data.kgn_nvmap_cksum);
356 tmpck = _kgnilnd_cksum(0, (void *) addr + kiov[0].kiov_offset + offset, nob);
360 kgnilnd_dump_blob(D_BUFFS, "flat kiov RDMA payload",
361 (void *)addr + kiov[0].kiov_offset + offset, nob);
363 CDEBUG(D_BUFFS, "cksum 0x%x (+0x%x) for addr 0x%p+%u len %u offset %u\n",
364 cksum, tmpck, addr, kiov[0].kiov_offset, nob, offset);
368 fraglen = min(kiov->kiov_len - offset, nob);
370 /* make dang sure we don't send a bogus checksum if somehow we get
371 * an odd length fragment on anything but the last entry in a kiov -
372 * we know from kgnilnd_setup_rdma_buffer that we can't have non
373 * PAGE_SIZE pages in the middle, so if nob < PAGE_SIZE, it is the last one */
374 LASSERTF(!(fraglen&1) || (nob < PAGE_SIZE),
375 "odd fraglen %u on nkiov %d, nob %u kiov_len %u offset %u kiov 0x%p\n",
376 fraglen, nkiov, nob, kiov->kiov_len, offset, kiov);
378 addr = (void *)kmap(kiov->kiov_page) + kiov->kiov_offset + offset;
379 tmpck = _kgnilnd_cksum(cksum, addr, fraglen);
382 "cksum 0x%x (+0x%x) for page 0x%p+%u (0x%p) len %u offset %u\n",
383 cksum, tmpck, kiov->kiov_page, kiov->kiov_offset, addr,
389 kgnilnd_dump_blob(D_BUFFS, "kiov cksum", addr, fraglen);
391 kunmap(kiov->kiov_page);
398 /* iov must not run out before end of data */
399 LASSERTF(nob == 0 || nkiov > 0, "nob %u nkiov %u\n", nob, nkiov);
404 retsum = csum_fold(cksum);
406 /* don't use magic 'no checksum' value */
410 CDEBUG(D_BUFFS, "retsum 0x%x from cksum 0x%x\n", retsum, cksum);
416 kgnilnd_init_msg(kgn_msg_t *msg, int type, lnet_nid_t source)
418 msg->gnm_magic = GNILND_MSG_MAGIC;
419 msg->gnm_version = GNILND_MSG_VERSION;
420 msg->gnm_type = type;
421 msg->gnm_payload_len = 0;
422 msg->gnm_srcnid = source;
423 /* gnm_connstamp gets set when FMA is sent */
424 /* gnm_srcnid is set on creation via function argument
425 * The right interface/net and nid is passed in when the message
431 kgnilnd_new_tx_msg(int type, lnet_nid_t source)
433 kgn_tx_t *tx = kgnilnd_alloc_tx();
436 kgnilnd_init_msg(&tx->tx_msg, type, source);
438 CERROR("couldn't allocate new tx type %s!\n",
439 kgnilnd_msgtype2str(type));
446 kgnilnd_nak_rdma(kgn_conn_t *conn, int rx_type, int error, __u64 cookie, lnet_nid_t source) {
452 case GNILND_MSG_GET_REQ:
453 case GNILND_MSG_GET_DONE:
454 nak_type = GNILND_MSG_GET_NAK;
456 case GNILND_MSG_PUT_REQ:
457 case GNILND_MSG_PUT_ACK:
458 case GNILND_MSG_PUT_DONE:
459 nak_type = GNILND_MSG_PUT_NAK;
461 case GNILND_MSG_PUT_REQ_REV:
462 case GNILND_MSG_PUT_DONE_REV:
463 nak_type = GNILND_MSG_PUT_NAK_REV;
465 case GNILND_MSG_GET_REQ_REV:
466 case GNILND_MSG_GET_ACK_REV:
467 case GNILND_MSG_GET_DONE_REV:
468 nak_type = GNILND_MSG_GET_NAK_REV;
471 CERROR("invalid msg type %s (%d)\n",
472 kgnilnd_msgtype2str(rx_type), rx_type);
475 /* only allow NAK on error and truncate to zero */
476 LASSERTF(error <= 0, "error %d conn 0x%p, cookie "LPU64"\n",
477 error, conn, cookie);
479 tx = kgnilnd_new_tx_msg(nak_type, source);
481 CNETERR("can't get TX to NAK RDMA to %s\n",
482 libcfs_nid2str(conn->gnc_peer->gnp_nid));
486 tx->tx_msg.gnm_u.completion.gncm_retval = error;
487 tx->tx_msg.gnm_u.completion.gncm_cookie = cookie;
488 kgnilnd_queue_tx(conn, tx);
492 kgnilnd_setup_immediate_buffer(kgn_tx_t *tx, unsigned int niov, struct iovec *iov,
493 lnet_kiov_t *kiov, unsigned int offset, unsigned int nob)
496 kgn_msg_t *msg = &tx->tx_msg;
499 /* To help save on MDDs for short messages, we'll vmap a kiov to allow
500 * gni_smsg_send to send that as the payload */
502 LASSERT(tx->tx_buftype == GNILND_BUF_NONE);
505 tx->tx_buffer = NULL;
506 } else if (kiov != NULL) {
507 LASSERTF(niov > 0 && niov < GNILND_MAX_IMMEDIATE/PAGE_SIZE,
508 "bad niov %d\n", niov);
510 while (offset >= kiov->kiov_len) {
511 offset -= kiov->kiov_len;
516 for (i = 0; i < niov; i++) {
517 /* We can't have a kiov_offset on anything but the first entry,
518 * otherwise we'll have a hole at the end of the mapping as we only map
520 * Also, if we have a kiov_len < PAGE_SIZE but we need to map more
521 * than kiov_len, we will also have a whole at the end of that page
522 * which isn't allowed */
523 if ((kiov[i].kiov_offset != 0 && i > 0) ||
524 (kiov[i].kiov_offset + kiov[i].kiov_len != PAGE_SIZE && i < niov - 1)) {
525 CNETERR("Can't make payload contiguous in I/O VM:"
526 "page %d, offset %u, nob %u, kiov_offset %u kiov_len %u \n",
527 i, offset, nob, kiov->kiov_offset, kiov->kiov_len);
530 tx->tx_imm_pages[i] = kiov[i].kiov_page;
533 /* hijack tx_phys for the later unmap */
535 /* tx->phyx being equal to NULL is the signal for unmap to discern between kmap and vmap */
537 tx->tx_buffer = (void *)kmap(tx->tx_imm_pages[0]) + kiov[0].kiov_offset + offset;
538 atomic_inc(&kgnilnd_data.kgn_nkmap_short);
539 GNIDBG_TX(D_NET, tx, "kmapped page for %d bytes for kiov 0x%p, buffer 0x%p",
540 nob, kiov, tx->tx_buffer);
542 tx->tx_phys = vmap(tx->tx_imm_pages, niov, VM_MAP, PAGE_KERNEL);
543 if (tx->tx_phys == NULL) {
544 CNETERR("Couldn't vmap %d frags on %d bytes\n", niov, nob);
548 atomic_inc(&kgnilnd_data.kgn_nvmap_short);
549 /* make sure we take into account the kiov offset as the start of the buffer */
550 tx->tx_buffer = (void *)tx->tx_phys + kiov[0].kiov_offset + offset;
551 GNIDBG_TX(D_NET, tx, "mapped %d pages for %d bytes from kiov 0x%p to 0x%p, buffer 0x%p",
552 niov, nob, kiov, tx->tx_phys, tx->tx_buffer);
554 tx->tx_buftype = GNILND_BUF_IMMEDIATE_KIOV;
558 /* For now this is almost identical to kgnilnd_setup_virt_buffer, but we
559 * could "flatten" the payload into a single contiguous buffer ready
560 * for sending direct over an FMA if we ever needed to. */
564 while (offset >= iov->iov_len) {
565 offset -= iov->iov_len;
571 if (nob > iov->iov_len - offset) {
572 CERROR("Can't handle multiple vaddr fragments\n");
576 tx->tx_buffer = (void *)(((unsigned long)iov->iov_base) + offset);
578 tx->tx_buftype = GNILND_BUF_IMMEDIATE;
582 /* checksum payload early - it shouldn't be changing after lnd_send */
583 if (*kgnilnd_tunables.kgn_checksum >= 2) {
584 msg->gnm_payload_cksum = kgnilnd_cksum(tx->tx_buffer, nob);
585 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_SMSG_CKSUM2)) {
586 msg->gnm_payload_cksum += 0xe00e;
588 if (*kgnilnd_tunables.kgn_checksum_dump > 1) {
589 kgnilnd_dump_blob(D_BUFFS, "payload checksum",
593 msg->gnm_payload_cksum = 0;
600 kgnilnd_setup_virt_buffer(kgn_tx_t *tx,
601 unsigned int niov, struct iovec *iov,
602 unsigned int offset, unsigned int nob)
607 LASSERT(tx->tx_buftype == GNILND_BUF_NONE);
609 while (offset >= iov->iov_len) {
610 offset -= iov->iov_len;
616 if (nob > iov->iov_len - offset) {
617 CERROR("Can't handle multiple vaddr fragments\n");
621 tx->tx_buftype = GNILND_BUF_VIRT_UNMAPPED;
623 tx->tx_buffer = (void *)(((unsigned long)iov->iov_base) + offset);
628 kgnilnd_setup_phys_buffer(kgn_tx_t *tx, int nkiov, lnet_kiov_t *kiov,
629 unsigned int offset, unsigned int nob)
631 gni_mem_segment_t *phys;
633 unsigned int fraglen;
635 GNIDBG_TX(D_NET, tx, "niov %d kiov 0x%p offset %u nob %u", nkiov, kiov, offset, nob);
639 LASSERT(tx->tx_buftype == GNILND_BUF_NONE);
641 /* only allocate this if we are going to use it */
642 tx->tx_phys = kmem_cache_alloc(kgnilnd_data.kgn_tx_phys_cache,
644 if (tx->tx_phys == NULL) {
645 CERROR("failed to allocate tx_phys\n");
650 CDEBUG(D_MALLOC, "slab-alloced 'tx->tx_phys': %lu at %p.\n",
651 LNET_MAX_IOV * sizeof(gni_mem_segment_t), tx->tx_phys);
653 /* if loops changes, please change kgnilnd_cksum_kiov
654 * and kgnilnd_setup_immediate_buffer */
656 while (offset >= kiov->kiov_len) {
657 offset -= kiov->kiov_len;
663 /* at this point, kiov points to the first page that we'll actually map
664 * now that we've seeked into the koiv for offset and dropped any
665 * leading pages that fall entirely within the offset */
666 tx->tx_buftype = GNILND_BUF_PHYS_UNMAPPED;
669 /* kiov_offset is start of 'valid' buffer, so index offset past that */
670 tx->tx_buffer = (void *)((unsigned long)(kiov->kiov_offset + offset));
673 CDEBUG(D_NET, "tx 0x%p buffer 0x%p map start kiov 0x%p+%u niov %d offset %u\n",
674 tx, tx->tx_buffer, kiov, kiov->kiov_offset, nkiov, offset);
677 fraglen = min(kiov->kiov_len - offset, nob);
679 /* We can't have a kiov_offset on anything but the first entry,
680 * otherwise we'll have a hole at the end of the mapping as we only map
681 * whole pages. Only the first page is allowed to have an offset -
682 * we'll add that into tx->tx_buffer and that will get used when we
683 * map in the segments (see kgnilnd_map_buffer).
684 * Also, if we have a kiov_len < PAGE_SIZE but we need to map more
685 * than kiov_len, we will also have a whole at the end of that page
686 * which isn't allowed */
687 if ((phys != tx->tx_phys) &&
688 ((kiov->kiov_offset != 0) ||
689 ((kiov->kiov_len < PAGE_SIZE) && (nob > kiov->kiov_len)))) {
690 CERROR("Can't make payload contiguous in I/O VM:"
691 "page %d, offset %u, nob %u, kiov_offset %u kiov_len %u \n",
692 (int)(phys - tx->tx_phys),
693 offset, nob, kiov->kiov_offset, kiov->kiov_len);
698 if ((phys - tx->tx_phys) == LNET_MAX_IOV) {
699 CERROR ("payload too big (%d)\n", (int)(phys - tx->tx_phys));
704 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_PHYS_SETUP)) {
709 CDEBUG(D_BUFFS, "page 0x%p kiov_offset %u kiov_len %u nob %u "
710 "nkiov %u offset %u\n",
711 kiov->kiov_page, kiov->kiov_offset, kiov->kiov_len, nob, nkiov, offset);
713 phys->address = page_to_phys(kiov->kiov_page);
720 /* iov must not run out before end of data */
721 LASSERTF(nob == 0 || nkiov > 0, "nob %u nkiov %u\n", nob, nkiov);
725 tx->tx_phys_npages = phys - tx->tx_phys;
730 if (tx->tx_phys != NULL) {
731 kmem_cache_free(kgnilnd_data.kgn_tx_phys_cache, tx->tx_phys);
732 CDEBUG(D_MALLOC, "slab-freed 'tx_phys': %lu at %p.\n",
733 sizeof(*tx->tx_phys), tx->tx_phys);
740 kgnilnd_setup_rdma_buffer(kgn_tx_t *tx, unsigned int niov,
741 struct iovec *iov, lnet_kiov_t *kiov,
742 unsigned int offset, unsigned int nob)
746 LASSERTF((iov == NULL) != (kiov == NULL), "iov 0x%p, kiov 0x%p, tx 0x%p,"
747 " offset %d, nob %d, niov %d\n"
748 , iov, kiov, tx, offset, nob, niov);
751 rc = kgnilnd_setup_phys_buffer(tx, niov, kiov, offset, nob);
753 rc = kgnilnd_setup_virt_buffer(tx, niov, iov, offset, nob);
758 /* kgnilnd_parse_lnet_rdma()
759 * lntmsg - message passed in from lnet.
760 * niov, kiov, offset - see lnd_t in lib-types.h for descriptions.
761 * nob - actual number of bytes to in this message.
762 * put_len - It is possible for PUTs to have a different length than the
763 * length stored in lntmsg->msg_len since LNET can adjust this
764 * length based on it's buffer size and offset.
765 * lnet_try_match_md() sets the mlength that we use to do the RDMA
769 kgnilnd_parse_lnet_rdma(lnet_msg_t *lntmsg, unsigned int *niov,
770 unsigned int *offset, unsigned int *nob,
771 lnet_kiov_t **kiov, int put_len)
773 /* GETs are weird, see kgnilnd_send */
774 if (lntmsg->msg_type == LNET_MSG_GET) {
775 if ((lntmsg->msg_md->md_options & LNET_MD_KIOV) == 0) {
778 *kiov = lntmsg->msg_md->md_iov.kiov;
780 *niov = lntmsg->msg_md->md_niov;
781 *nob = lntmsg->msg_md->md_length;
784 *kiov = lntmsg->msg_kiov;
785 *niov = lntmsg->msg_niov;
787 *offset = lntmsg->msg_offset;
792 kgnilnd_compute_rdma_cksum(kgn_tx_t *tx, int put_len)
794 unsigned int niov, offset, nob;
796 lnet_msg_t *lntmsg = tx->tx_lntmsg[0];
797 int dump_cksum = (*kgnilnd_tunables.kgn_checksum_dump > 1);
799 GNITX_ASSERTF(tx, ((tx->tx_msg.gnm_type == GNILND_MSG_PUT_DONE) ||
800 (tx->tx_msg.gnm_type == GNILND_MSG_GET_DONE) ||
801 (tx->tx_msg.gnm_type == GNILND_MSG_PUT_DONE_REV) ||
802 (tx->tx_msg.gnm_type == GNILND_MSG_GET_DONE_REV) ||
803 (tx->tx_msg.gnm_type == GNILND_MSG_GET_ACK_REV) ||
804 (tx->tx_msg.gnm_type == GNILND_MSG_PUT_REQ_REV)),
805 "bad type %s", kgnilnd_msgtype2str(tx->tx_msg.gnm_type));
807 if ((tx->tx_msg.gnm_type == GNILND_MSG_PUT_DONE_REV) ||
808 (tx->tx_msg.gnm_type == GNILND_MSG_GET_DONE_REV)) {
809 tx->tx_msg.gnm_payload_cksum = 0;
812 if (*kgnilnd_tunables.kgn_checksum < 3) {
813 tx->tx_msg.gnm_payload_cksum = 0;
817 GNITX_ASSERTF(tx, lntmsg, "no LNet message!", NULL);
819 kgnilnd_parse_lnet_rdma(lntmsg, &niov, &offset, &nob, &kiov,
823 tx->tx_msg.gnm_payload_cksum = kgnilnd_cksum_kiov(niov, kiov, offset, nob, dump_cksum);
825 tx->tx_msg.gnm_payload_cksum = kgnilnd_cksum(tx->tx_buffer, nob);
827 kgnilnd_dump_blob(D_BUFFS, "peer RDMA payload", tx->tx_buffer, nob);
831 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_SMSG_CKSUM3)) {
832 tx->tx_msg.gnm_payload_cksum += 0xd00d;
836 /* kgnilnd_verify_rdma_cksum()
837 * tx - PUT_DONE/GET_DONE matched tx.
838 * rx_cksum - received checksum to compare against.
839 * put_len - see kgnilnd_parse_lnet_rdma comments.
842 kgnilnd_verify_rdma_cksum(kgn_tx_t *tx, __u16 rx_cksum, int put_len)
846 unsigned int niov, offset, nob;
848 lnet_msg_t *lntmsg = tx->tx_lntmsg[0];
849 int dump_on_err = *kgnilnd_tunables.kgn_checksum_dump;
851 /* we can only match certain requests */
852 GNITX_ASSERTF(tx, ((tx->tx_msg.gnm_type == GNILND_MSG_GET_REQ) ||
853 (tx->tx_msg.gnm_type == GNILND_MSG_PUT_ACK) ||
854 (tx->tx_msg.gnm_type == GNILND_MSG_PUT_REQ_REV) ||
855 (tx->tx_msg.gnm_type == GNILND_MSG_GET_ACK_REV) ||
856 (tx->tx_msg.gnm_type == GNILND_MSG_GET_DONE_REV) ||
857 (tx->tx_msg.gnm_type == GNILND_MSG_PUT_DONE_REV)),
858 "bad type %s", kgnilnd_msgtype2str(tx->tx_msg.gnm_type));
860 if ((tx->tx_msg.gnm_type == GNILND_MSG_PUT_REQ_REV) ||
861 (tx->tx_msg.gnm_type == GNILND_MSG_GET_ACK_REV)) {
866 if (*kgnilnd_tunables.kgn_checksum >= 3) {
867 GNIDBG_MSG(D_WARNING, &tx->tx_msg,
868 "no RDMA payload checksum when enabled");
873 GNITX_ASSERTF(tx, lntmsg, "no LNet message!", NULL);
875 kgnilnd_parse_lnet_rdma(lntmsg, &niov, &offset, &nob, &kiov, put_len);
878 cksum = kgnilnd_cksum_kiov(niov, kiov, offset, nob, 0);
880 cksum = kgnilnd_cksum(tx->tx_buffer, nob);
883 if (cksum != rx_cksum) {
884 GNIDBG_MSG(D_NETERROR, &tx->tx_msg,
885 "Bad RDMA payload checksum (%x expected %x); "
886 "kiov 0x%p niov %d nob %u offset %u",
887 cksum, rx_cksum, kiov, niov, nob, offset);
888 switch (dump_on_err) {
891 kgnilnd_cksum_kiov(niov, kiov, offset, nob, 1);
893 kgnilnd_dump_blob(D_BUFFS, "RDMA payload",
896 /* fall through to dump log */
898 libcfs_debug_dumplog();
904 /* kgnilnd_check_fma_rx will close conn, kill tx with error */
910 kgnilnd_mem_add_map_list(kgn_device_t *dev, kgn_tx_t *tx)
914 GNITX_ASSERTF(tx, list_empty(&tx->tx_map_list),
915 "already mapped!", NULL);
917 spin_lock(&dev->gnd_map_lock);
918 switch (tx->tx_buftype) {
920 GNIDBG_TX(D_EMERG, tx,
921 "SOFTWARE BUG: invalid mapping %d", tx->tx_buftype);
922 spin_unlock(&dev->gnd_map_lock);
926 case GNILND_BUF_PHYS_MAPPED:
927 bytes = tx->tx_phys_npages * PAGE_SIZE;
928 dev->gnd_map_nphys++;
929 dev->gnd_map_physnop += tx->tx_phys_npages;
932 case GNILND_BUF_VIRT_MAPPED:
934 dev->gnd_map_nvirt++;
935 dev->gnd_map_virtnob += tx->tx_nob;
939 if (tx->tx_msg.gnm_type == GNILND_MSG_PUT_ACK ||
940 tx->tx_msg.gnm_type == GNILND_MSG_GET_REQ) {
941 atomic64_add(bytes, &dev->gnd_rdmaq_bytes_out);
942 GNIDBG_TX(D_NETTRACE, tx, "rdma ++ %d to "LPD64"",
943 bytes, atomic64_read(&dev->gnd_rdmaq_bytes_out));
946 atomic_inc(&dev->gnd_n_mdd);
947 atomic64_add(bytes, &dev->gnd_nbytes_map);
949 /* clear retrans to prevent any SMSG goofiness as that code uses the same counter */
952 /* we only get here in the valid cases */
953 list_add_tail(&tx->tx_map_list, &dev->gnd_map_list);
954 dev->gnd_map_version++;
955 spin_unlock(&dev->gnd_map_lock);
959 kgnilnd_mem_del_map_list(kgn_device_t *dev, kgn_tx_t *tx)
963 GNITX_ASSERTF(tx, !list_empty(&tx->tx_map_list),
964 "not mapped!", NULL);
965 spin_lock(&dev->gnd_map_lock);
967 switch (tx->tx_buftype) {
969 GNIDBG_TX(D_EMERG, tx,
970 "SOFTWARE BUG: invalid mapping %d", tx->tx_buftype);
971 spin_unlock(&dev->gnd_map_lock);
975 case GNILND_BUF_PHYS_UNMAPPED:
976 bytes = tx->tx_phys_npages * PAGE_SIZE;
977 dev->gnd_map_nphys--;
978 dev->gnd_map_physnop -= tx->tx_phys_npages;
981 case GNILND_BUF_VIRT_UNMAPPED:
983 dev->gnd_map_nvirt--;
984 dev->gnd_map_virtnob -= tx->tx_nob;
988 if (tx->tx_msg.gnm_type == GNILND_MSG_PUT_ACK ||
989 tx->tx_msg.gnm_type == GNILND_MSG_GET_REQ) {
990 atomic64_sub(bytes, &dev->gnd_rdmaq_bytes_out);
991 LASSERTF(atomic64_read(&dev->gnd_rdmaq_bytes_out) >= 0,
992 "bytes_out negative! %ld\n", atomic64_read(&dev->gnd_rdmaq_bytes_out));
993 GNIDBG_TX(D_NETTRACE, tx, "rdma -- %d to "LPD64"",
994 bytes, atomic64_read(&dev->gnd_rdmaq_bytes_out));
997 atomic_dec(&dev->gnd_n_mdd);
998 atomic64_sub(bytes, &dev->gnd_nbytes_map);
1000 /* we only get here in the valid cases */
1001 list_del_init(&tx->tx_map_list);
1002 dev->gnd_map_version++;
1003 spin_unlock(&dev->gnd_map_lock);
1007 kgnilnd_map_buffer(kgn_tx_t *tx)
1009 kgn_conn_t *conn = tx->tx_conn;
1010 kgn_device_t *dev = conn->gnc_device;
1011 __u32 flags = GNI_MEM_READWRITE;
1014 /* The kgnilnd_mem_register(_segments) Gemini Driver functions can
1015 * be called concurrently as there are internal locks that protect
1016 * any data structures or HW resources. We just need to ensure
1017 * that our concurrency doesn't result in the kgn_device_t
1018 * getting nuked while we are in here */
1020 LASSERTF(conn != NULL, "tx %p with NULL conn, someone forgot"
1021 " to set tx_conn before calling %s\n", tx, __FUNCTION__);
1023 if (unlikely(CFS_FAIL_CHECK(CFS_FAIL_GNI_MAP_TX)))
1026 if (*kgnilnd_tunables.kgn_bte_relaxed_ordering) {
1027 flags |= GNI_MEM_RELAXED_PI_ORDERING;
1030 switch (tx->tx_buftype) {
1034 case GNILND_BUF_NONE:
1035 case GNILND_BUF_IMMEDIATE:
1036 case GNILND_BUF_IMMEDIATE_KIOV:
1037 case GNILND_BUF_PHYS_MAPPED:
1038 case GNILND_BUF_VIRT_MAPPED:
1041 case GNILND_BUF_PHYS_UNMAPPED:
1042 GNITX_ASSERTF(tx, tx->tx_phys != NULL, "physical buffer not there!", NULL);
1043 rrc = kgnilnd_mem_register_segments(dev->gnd_handle,
1044 tx->tx_phys, tx->tx_phys_npages, NULL,
1045 GNI_MEM_PHYS_SEGMENTS | flags,
1047 /* could race with other uses of the map counts, but this is ok
1048 * - this needs to turn into a non-fatal error soon to allow
1049 * GART resource, etc starvation handling */
1050 if (rrc != GNI_RC_SUCCESS) {
1051 GNIDBG_TX(D_NET, tx, "Can't map %d pages: dev %d "
1052 "phys %u pp %u, virt %u nob "LPU64"",
1053 tx->tx_phys_npages, dev->gnd_id,
1054 dev->gnd_map_nphys, dev->gnd_map_physnop,
1055 dev->gnd_map_nvirt, dev->gnd_map_virtnob);
1056 RETURN(rrc == GNI_RC_ERROR_RESOURCE ? -ENOMEM : -EINVAL);
1059 tx->tx_buftype = GNILND_BUF_PHYS_MAPPED;
1060 kgnilnd_mem_add_map_list(dev, tx);
1063 case GNILND_BUF_VIRT_UNMAPPED:
1064 rrc = kgnilnd_mem_register(dev->gnd_handle,
1065 (__u64)tx->tx_buffer, tx->tx_nob,
1066 NULL, flags, &tx->tx_map_key);
1067 if (rrc != GNI_RC_SUCCESS) {
1068 GNIDBG_TX(D_NET, tx, "Can't map %u bytes: dev %d "
1069 "phys %u pp %u, virt %u nob "LPU64"",
1070 tx->tx_nob, dev->gnd_id,
1071 dev->gnd_map_nphys, dev->gnd_map_physnop,
1072 dev->gnd_map_nvirt, dev->gnd_map_virtnob);
1073 RETURN(rrc == GNI_RC_ERROR_RESOURCE ? -ENOMEM : -EINVAL);
1076 tx->tx_buftype = GNILND_BUF_VIRT_MAPPED;
1077 kgnilnd_mem_add_map_list(dev, tx);
1078 if (tx->tx_msg.gnm_type == GNILND_MSG_PUT_ACK ||
1079 tx->tx_msg.gnm_type == GNILND_MSG_GET_REQ) {
1080 atomic64_add(tx->tx_nob, &dev->gnd_rdmaq_bytes_out);
1081 GNIDBG_TX(D_NETTRACE, tx, "rdma ++ %d to %ld\n",
1082 tx->tx_nob, atomic64_read(&dev->gnd_rdmaq_bytes_out));
1090 kgnilnd_add_purgatory_tx(kgn_tx_t *tx)
1092 kgn_conn_t *conn = tx->tx_conn;
1093 kgn_mdd_purgatory_t *gmp;
1095 LIBCFS_ALLOC(gmp, sizeof(*gmp));
1096 LASSERTF(gmp != NULL, "couldn't allocate MDD purgatory member;"
1097 " asserting to avoid data corruption\n");
1098 if (tx->tx_buffer_copy)
1099 gmp->gmp_map_key = tx->tx_buffer_copy_map_key;
1101 gmp->gmp_map_key = tx->tx_map_key;
1103 atomic_inc(&conn->gnc_device->gnd_n_mdd_held);
1105 /* ensure that we don't have a blank purgatory - indicating the
1106 * conn is not already on purgatory lists - we'd never recover these
1107 * MDD if that were the case */
1108 GNITX_ASSERTF(tx, conn->gnc_in_purgatory,
1109 "conn 0x%p->%s with NULL purgatory",
1110 conn, libcfs_nid2str(conn->gnc_peer->gnp_nid));
1112 /* link 'er up! - only place we really need to lock for
1113 * concurrent access */
1114 spin_lock(&conn->gnc_list_lock);
1115 list_add_tail(&gmp->gmp_list, &conn->gnc_mdd_list);
1116 spin_unlock(&conn->gnc_list_lock);
1120 kgnilnd_unmap_buffer(kgn_tx_t *tx, int error)
1124 int hold_timeout = 0;
1126 /* code below relies on +1 relationship ... */
1127 CLASSERT(GNILND_BUF_PHYS_MAPPED == (GNILND_BUF_PHYS_UNMAPPED + 1));
1128 CLASSERT(GNILND_BUF_VIRT_MAPPED == (GNILND_BUF_VIRT_UNMAPPED + 1));
1130 switch (tx->tx_buftype) {
1134 case GNILND_BUF_NONE:
1135 case GNILND_BUF_IMMEDIATE:
1136 case GNILND_BUF_PHYS_UNMAPPED:
1137 case GNILND_BUF_VIRT_UNMAPPED:
1139 case GNILND_BUF_IMMEDIATE_KIOV:
1140 if (tx->tx_phys != NULL) {
1141 vunmap(tx->tx_phys);
1142 } else if (tx->tx_phys == NULL && tx->tx_buffer != NULL) {
1143 kunmap(tx->tx_imm_pages[0]);
1145 /* clear to prevent kgnilnd_free_tx from thinking
1146 * this is a RDMA descriptor */
1150 case GNILND_BUF_PHYS_MAPPED:
1151 case GNILND_BUF_VIRT_MAPPED:
1152 LASSERT(tx->tx_conn != NULL);
1154 dev = tx->tx_conn->gnc_device;
1156 /* only want to hold if we are closing conn without
1157 * verified peer notification - the theory is that
1158 * a TX error can be communicated in all other cases */
1159 if (tx->tx_conn->gnc_state != GNILND_CONN_ESTABLISHED &&
1160 kgnilnd_check_purgatory_conn(tx->tx_conn)) {
1161 kgnilnd_add_purgatory_tx(tx);
1163 /* The timeout we give to kgni is a deadman stop only.
1164 * we are setting high to ensure we don't have the kgni timer
1165 * fire before ours fires _and_ is handled */
1166 hold_timeout = GNILND_TIMEOUT2DEADMAN;
1168 GNIDBG_TX(D_NET, tx,
1169 "dev %p delaying MDD release for %dms key "LPX64"."LPX64"",
1170 tx->tx_conn->gnc_device, hold_timeout,
1171 tx->tx_map_key.qword1, tx->tx_map_key.qword2);
1173 if (tx->tx_buffer_copy != NULL) {
1174 rrc = kgnilnd_mem_deregister(dev->gnd_handle, &tx->tx_buffer_copy_map_key, hold_timeout);
1175 LASSERTF(rrc == GNI_RC_SUCCESS, "rrc %d\n", rrc);
1176 rrc = kgnilnd_mem_deregister(dev->gnd_handle, &tx->tx_map_key, 0);
1177 LASSERTF(rrc == GNI_RC_SUCCESS, "rrc %d\n", rrc);
1179 rrc = kgnilnd_mem_deregister(dev->gnd_handle, &tx->tx_map_key, hold_timeout);
1180 LASSERTF(rrc == GNI_RC_SUCCESS, "rrc %d\n", rrc);
1184 kgnilnd_mem_del_map_list(dev, tx);
1190 kgnilnd_tx_done(kgn_tx_t *tx, int completion)
1192 lnet_msg_t *lntmsg0, *lntmsg1;
1193 int status0, status1;
1194 lnet_ni_t *ni = NULL;
1195 kgn_conn_t *conn = tx->tx_conn;
1197 LASSERT(!in_interrupt());
1199 lntmsg0 = tx->tx_lntmsg[0]; tx->tx_lntmsg[0] = NULL;
1200 lntmsg1 = tx->tx_lntmsg[1]; tx->tx_lntmsg[1] = NULL;
1203 !(tx->tx_state & GNILND_TX_QUIET_ERROR) &&
1204 !kgnilnd_conn_clean_errno(completion)) {
1205 GNIDBG_TOMSG(D_NETERROR, &tx->tx_msg,
1206 "error %d on tx 0x%p->%s id %u/%d state %s age %ds",
1207 completion, tx, conn ?
1208 libcfs_nid2str(conn->gnc_peer->gnp_nid) : "<?>",
1209 tx->tx_id.txe_smsg_id, tx->tx_id.txe_idx,
1210 kgnilnd_tx_state2str(tx->tx_list_state),
1211 cfs_duration_sec((unsigned long)jiffies - tx->tx_qtime));
1214 /* The error codes determine if we hold onto the MDD */
1215 kgnilnd_unmap_buffer(tx, completion);
1217 /* we have to deliver a reply on lntmsg[1] for the GET, so make sure
1218 * we play nice with the error codes to avoid delivering a failed
1219 * REQUEST and then a REPLY event as well */
1221 /* return -EIO to lnet - it is the magic value for failed sends */
1222 if (tx->tx_msg.gnm_type == GNILND_MSG_GET_REQ) {
1224 status1 = completion;
1226 status0 = status1 = completion;
1229 tx->tx_buftype = GNILND_BUF_NONE;
1230 tx->tx_msg.gnm_type = GNILND_MSG_NONE;
1232 /* lnet_finalize doesn't do anything with the *ni, so ok for us to
1233 * set NULL when we are a tx without a conn */
1235 ni = conn->gnc_peer->gnp_net->gnn_ni;
1237 spin_lock(&conn->gnc_tx_lock);
1239 LASSERTF(test_and_clear_bit(tx->tx_id.txe_idx,
1240 (volatile unsigned long *)&conn->gnc_tx_bits),
1241 "conn %p tx %p bit %d already cleared\n",
1242 conn, tx, tx->tx_id.txe_idx);
1244 LASSERTF(conn->gnc_tx_ref_table[tx->tx_id.txe_idx] != NULL,
1245 "msg_id %d already NULL\n", tx->tx_id.txe_idx);
1247 conn->gnc_tx_ref_table[tx->tx_id.txe_idx] = NULL;
1248 spin_unlock(&conn->gnc_tx_lock);
1251 kgnilnd_free_tx(tx);
1253 /* finalize AFTER freeing lnet msgs */
1255 /* warning - we should hold no locks here - calling lnet_finalize
1256 * could free up lnet credits, resulting in a call chain back into
1257 * the LND via kgnilnd_send and friends */
1259 lnet_finalize(ni, lntmsg0, status0);
1261 if (lntmsg1 != NULL) {
1262 lnet_finalize(ni, lntmsg1, status1);
1267 kgnilnd_txlist_done(struct list_head *txlist, int error)
1270 int err_printed = 0;
1272 if (list_empty(txlist))
1275 list_for_each_entry_safe(tx, txn, txlist, tx_list) {
1276 /* only print the first error */
1278 tx->tx_state |= GNILND_TX_QUIET_ERROR;
1279 list_del_init(&tx->tx_list);
1280 kgnilnd_tx_done(tx, error);
1285 kgnilnd_set_tx_id(kgn_tx_t *tx, kgn_conn_t *conn)
1289 spin_lock(&conn->gnc_tx_lock);
1291 /* ID zero is NOT ALLOWED!!! */
1294 id = find_next_zero_bit((unsigned long *)&conn->gnc_tx_bits,
1295 GNILND_MAX_MSG_ID, conn->gnc_next_tx);
1296 if (id == GNILND_MAX_MSG_ID) {
1297 if (conn->gnc_next_tx != 1) {
1298 /* we only searched from next_tx to end and didn't find
1299 * one, so search again from start */
1300 conn->gnc_next_tx = 1;
1303 /* couldn't find one! */
1304 spin_unlock(&conn->gnc_tx_lock);
1308 /* bump next_tx to prevent immediate reuse */
1309 conn->gnc_next_tx = id + 1;
1311 set_bit(id, (volatile unsigned long *)&conn->gnc_tx_bits);
1312 LASSERTF(conn->gnc_tx_ref_table[id] == NULL,
1313 "tx 0x%p already at id %d\n",
1314 conn->gnc_tx_ref_table[id], id);
1316 /* delay these until we have a valid ID - prevents bad clear of the bit
1317 * in kgnilnd_tx_done */
1319 tx->tx_id.txe_cqid = conn->gnc_cqid;
1321 tx->tx_id.txe_idx = id;
1322 conn->gnc_tx_ref_table[id] = tx;
1324 /* Using jiffies to help differentiate against TX reuse - with
1325 * the usual minimum of a 250HZ clock, we wrap jiffies on the same TX
1326 * if we are sending to the same node faster than 256000/sec.
1327 * To help guard against this, we OR in the tx_seq - that is 32 bits */
1329 tx->tx_id.txe_chips = (__u32)(jiffies | conn->gnc_tx_seq);
1331 GNIDBG_TX(D_NET, tx, "set cookie/id/bits", NULL);
1333 spin_unlock(&conn->gnc_tx_lock);
1338 kgnilnd_tx_should_retry(kgn_conn_t *conn, kgn_tx_t *tx)
1340 int max_retrans = *kgnilnd_tunables.kgn_max_retransmits;
1342 int log_retrans_level;
1344 /* I need kgni credits to send this. Replace tx at the head of the
1345 * fmaq and I'll get rescheduled when credits appear */
1348 conn->gnc_tx_retrans++;
1349 log_retrans = ((tx->tx_retrans < 25) || ((tx->tx_retrans % 25) == 0) ||
1350 (tx->tx_retrans > (max_retrans / 2)));
1351 log_retrans_level = tx->tx_retrans < (max_retrans / 2) ? D_NET : D_NETERROR;
1353 /* Decision time - either error, warn or just retransmit */
1355 /* we don't care about TX timeout - it could be that the network is slower
1356 * or throttled. We'll keep retranmitting - so if the network is so slow
1357 * that we fill up our mailbox, we'll keep trying to resend that msg
1358 * until we exceed the max_retrans _or_ gnc_last_rx expires, indicating
1359 * that he hasn't send us any traffic in return */
1361 if (tx->tx_retrans > max_retrans) {
1362 /* this means we are not backing off the retransmits
1363 * in a healthy manner and are likely chewing up the
1364 * CPU cycles quite badly */
1365 GNIDBG_TOMSG(D_ERROR, &tx->tx_msg,
1366 "SOFTWARE BUG: too many retransmits (%d) for tx id %x "
1368 tx->tx_retrans, tx->tx_id, conn,
1369 libcfs_nid2str(conn->gnc_peer->gnp_nid));
1371 /* yes - double errors to help debug this condition */
1372 GNIDBG_TOMSG(D_NETERROR, &tx->tx_msg, "connection dead. "
1373 "unable to send to %s for %lu secs (%d tries)",
1374 libcfs_nid2str(tx->tx_conn->gnc_peer->gnp_nid),
1375 cfs_duration_sec(jiffies - tx->tx_cred_wait),
1378 kgnilnd_close_conn(conn, -ETIMEDOUT);
1380 /* caller should terminate */
1383 /* some reasonable throttling of the debug message */
1385 unsigned long now = jiffies;
1386 /* XXX Nic: Mystical TX debug here... */
1387 GNIDBG_SMSG_CREDS(log_retrans_level, conn);
1388 GNIDBG_TOMSG(log_retrans_level, &tx->tx_msg,
1389 "NOT_DONE on conn 0x%p->%s id %x retrans %d wait %dus"
1390 " last_msg %uus/%uus last_cq %uus/%uus",
1391 conn, libcfs_nid2str(conn->gnc_peer->gnp_nid),
1392 tx->tx_id, tx->tx_retrans,
1393 jiffies_to_usecs(now - tx->tx_cred_wait),
1394 jiffies_to_usecs(now - conn->gnc_last_tx),
1395 jiffies_to_usecs(now - conn->gnc_last_rx),
1396 jiffies_to_usecs(now - conn->gnc_last_tx_cq),
1397 jiffies_to_usecs(now - conn->gnc_last_rx_cq));
1399 /* caller should retry */
1404 /* caller must be holding gnd_cq_mutex and not unlock it afterwards, as we need to drop it
1405 * to avoid bad ordering with state_lock */
1408 kgnilnd_sendmsg_nolock(kgn_tx_t *tx, void *immediate, unsigned int immediatenob,
1409 spinlock_t *state_lock, kgn_tx_list_state_t state)
1411 kgn_conn_t *conn = tx->tx_conn;
1412 kgn_msg_t *msg = &tx->tx_msg;
1415 unsigned long newest_last_rx, timeout;
1418 LASSERTF((msg->gnm_type == GNILND_MSG_IMMEDIATE) ?
1419 immediatenob <= *kgnilnd_tunables.kgn_max_immediate :
1421 "msg 0x%p type %d wrong payload size %d\n",
1422 msg, msg->gnm_type, immediatenob);
1424 /* make sure we catch all the cases where we'd send on a dirty old mbox
1425 * but allow case for sending CLOSE. Since this check is within the CQ
1426 * mutex barrier and the close message is only sent through
1427 * kgnilnd_send_conn_close the last message out the door will be the
1430 if (atomic_read(&conn->gnc_peer->gnp_dirty_eps) != 0 && msg->gnm_type != GNILND_MSG_CLOSE) {
1431 mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
1432 /* Return -ETIME, we are closing the connection already so we dont want to
1433 * have this tx hit the wire. The tx will be killed by the calling function.
1434 * Once the EP is marked dirty the close message will be the last
1435 * thing to hit the wire */
1440 timeout = cfs_time_seconds(conn->gnc_timeout);
1442 newest_last_rx = GNILND_LASTRX(conn);
1444 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_SEND_TIMEOUT)) {
1445 now = now + (GNILND_TIMEOUTRX(timeout) * 2);
1448 if (time_after_eq(now, newest_last_rx + GNILND_TIMEOUTRX(timeout))) {
1449 GNIDBG_CONN(D_NETERROR|D_CONSOLE, conn, "Cant send to %s after timeout lapse of %lu; TO %lu",
1450 libcfs_nid2str(conn->gnc_peer->gnp_nid),
1451 cfs_duration_sec(now - newest_last_rx),
1452 cfs_duration_sec(GNILND_TIMEOUTRX(timeout)));
1453 mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
1457 GNITX_ASSERTF(tx, (conn != NULL) && (tx->tx_id.txe_idx != 0), "tx id unset!", NULL);
1458 /* msg->gnm_srcnid is set when the message is initialized by whatever function is
1459 * creating the message this allows the message to contain the correct LNET NID/NET needed
1460 * instead of the one that the peer/conn uses for sending the data.
1462 msg->gnm_connstamp = conn->gnc_my_connstamp;
1463 msg->gnm_payload_len = immediatenob;
1464 msg->gnm_seq = conn->gnc_tx_seq;
1466 /* always init here - kgn_checksum is a /sys module tunable
1467 * and can be flipped at any point, even between msg init and sending */
1469 if (*kgnilnd_tunables.kgn_checksum) {
1470 /* We must set here and not in kgnilnd_init_msg,
1471 * we could resend this msg many times
1472 * (NOT_DONE from gni_smsg_send below) and wouldn't pass
1473 * through init_msg again */
1474 msg->gnm_cksum = kgnilnd_cksum(msg, sizeof(kgn_msg_t));
1475 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_SMSG_CKSUM1)) {
1476 msg->gnm_cksum += 0xf00f;
1480 GNIDBG_TOMSG(D_NET, msg, "tx 0x%p conn 0x%p->%s sending SMSG sz %u id %x/%d [%p for %u]",
1481 tx, conn, libcfs_nid2str(conn->gnc_peer->gnp_nid),
1482 sizeof(kgn_msg_t), tx->tx_id.txe_smsg_id,
1483 tx->tx_id.txe_idx, immediate, immediatenob);
1485 if (unlikely(tx->tx_state & GNILND_TX_FAIL_SMSG)) {
1486 rrc = cfs_fail_val ? cfs_fail_val : GNI_RC_NOT_DONE;
1488 rrc = kgnilnd_smsg_send(conn->gnc_ephandle,
1489 msg, sizeof(*msg), immediate, immediatenob,
1490 tx->tx_id.txe_smsg_id);
1494 case GNI_RC_SUCCESS:
1496 conn->gnc_last_tx = jiffies;
1497 /* no locking here as LIVE isn't a list */
1498 kgnilnd_tx_add_state_locked(tx, NULL, conn, GNILND_TX_LIVE_FMAQ, 1);
1500 /* this needs to be checked under lock as it might be freed from a completion
1503 if (msg->gnm_type == GNILND_MSG_NOOP) {
1504 set_mb(conn->gnc_last_noop_sent, jiffies);
1507 /* serialize with seeing CQ events for completion on this, as well as
1509 mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
1511 atomic_inc(&conn->gnc_device->gnd_short_ntx);
1512 atomic64_add(immediatenob, &conn->gnc_device->gnd_short_txbytes);
1513 kgnilnd_peer_alive(conn->gnc_peer);
1514 GNIDBG_SMSG_CREDS(D_NET, conn);
1517 case GNI_RC_NOT_DONE:
1518 /* XXX Nic: We need to figure out how to track this
1519 * - there are bound to be good reasons for it,
1520 * but we want to know when it happens */
1522 mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
1523 /* We'll handle this error inline - makes the calling logic much more
1526 /* If no lock, caller doesn't want us to retry */
1527 if (state_lock == NULL) {
1531 retry_send = kgnilnd_tx_should_retry(conn, tx);
1533 /* add to head of list for the state and retries */
1534 spin_lock(state_lock);
1535 kgnilnd_tx_add_state_locked(tx, conn->gnc_peer, conn, state, 0);
1536 spin_unlock(state_lock);
1538 /* We only reschedule for a certain number of retries, then
1539 * we will wait for the CQ events indicating a release of SMSG
1541 if (tx->tx_retrans < (*kgnilnd_tunables.kgn_max_retransmits/4)) {
1542 kgnilnd_schedule_conn(conn);
1545 /* CQ event coming in signifies either TX completed or
1546 * RX receive. Either of these *could* free up credits
1547 * in the SMSG mbox and we should try sending again */
1548 GNIDBG_TX(D_NET, tx, "waiting for CQID %u event to resend",
1549 tx->tx_conn->gnc_cqid);
1550 /* use +ve return code to let upper layers know they
1551 * should stop looping on sends */
1558 /* handle bad retcode gracefully */
1559 mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
1564 /* kgnilnd_sendmsg has hard wait on gnd_cq_mutex */
1566 kgnilnd_sendmsg(kgn_tx_t *tx, void *immediate, unsigned int immediatenob,
1567 spinlock_t *state_lock, kgn_tx_list_state_t state)
1569 kgn_device_t *dev = tx->tx_conn->gnc_device;
1570 unsigned long timestamp;
1573 timestamp = jiffies;
1574 mutex_lock(&dev->gnd_cq_mutex);
1575 /* delay in jiffies - we are really concerned only with things that
1576 * result in a schedule() or really holding this off for long times .
1577 * NB - mutex_lock could spin for 2 jiffies before going to sleep to wait */
1578 dev->gnd_mutex_delay += (long) jiffies - timestamp;
1580 rc = kgnilnd_sendmsg_nolock(tx, immediate, immediatenob, state_lock, state);
1586 /* returns -EAGAIN for lock miss, anything else < 0 is hard error, >=0 for success */
1588 kgnilnd_sendmsg_trylock(kgn_tx_t *tx, void *immediate, unsigned int immediatenob,
1589 spinlock_t *state_lock, kgn_tx_list_state_t state)
1591 kgn_conn_t *conn = tx->tx_conn;
1592 kgn_device_t *dev = conn->gnc_device;
1593 unsigned long timestamp;
1596 timestamp = jiffies;
1598 /* technically we are doing bad things with the read_lock on the peer_conn
1599 * table, but we shouldn't be sleeping inside here - and we don't sleep/block
1600 * for the mutex. I bet lockdep is gonna flag this one though... */
1602 /* there are a few cases where we don't want the immediate send - like
1603 * when we are in the scheduler thread and it'd harm the latency of
1604 * getting messages up to LNet */
1606 /* rmb for gnd_ready */
1608 if (conn->gnc_device->gnd_ready == GNILND_DEV_LOOP) {
1610 atomic_inc(&conn->gnc_device->gnd_fast_block);
1611 } else if (unlikely(kgnilnd_data.kgn_quiesce_trigger)) {
1612 /* dont hit HW during quiesce */
1614 } else if (unlikely(atomic_read(&conn->gnc_peer->gnp_dirty_eps))) {
1615 /* dont hit HW if stale EPs and conns left to close */
1618 atomic_inc(&conn->gnc_device->gnd_fast_try);
1619 rc = mutex_trylock(&conn->gnc_device->gnd_cq_mutex);
1624 /* we got the mutex and weren't blocked */
1626 /* delay in jiffies - we are really concerned only with things that
1627 * result in a schedule() or really holding this off for long times .
1628 * NB - mutex_lock could spin for 2 jiffies before going to sleep to wait */
1629 dev->gnd_mutex_delay += (long) jiffies - timestamp;
1631 atomic_inc(&conn->gnc_device->gnd_fast_ok);
1632 tx->tx_qtime = jiffies;
1633 tx->tx_state = GNILND_TX_WAITING_COMPLETION;
1634 rc = kgnilnd_sendmsg_nolock(tx, tx->tx_buffer, tx->tx_nob, &conn->gnc_list_lock, GNILND_TX_FMAQ);
1635 /* _nolock unlocks the mutex for us */
1641 /* lets us know if we can push this RDMA through now */
1643 kgnilnd_auth_rdma_bytes(kgn_device_t *dev, kgn_tx_t *tx)
1647 bytes_left = atomic64_sub_return(tx->tx_nob, &dev->gnd_rdmaq_bytes_ok);
1649 if (bytes_left < 0) {
1650 atomic64_add(tx->tx_nob, &dev->gnd_rdmaq_bytes_ok);
1651 atomic_inc(&dev->gnd_rdmaq_nstalls);
1654 CDEBUG(D_NET, "no bytes to send, turning on timer for %lu\n",
1655 dev->gnd_rdmaq_deadline);
1656 mod_timer(&dev->gnd_rdmaq_timer, dev->gnd_rdmaq_deadline);
1657 /* we never del this timer - at worst it schedules us.. */
1664 /* this adds a TX to the queue pending throttling authorization before
1665 * we allow our remote peer to launch a PUT at us */
1667 kgnilnd_queue_rdma(kgn_conn_t *conn, kgn_tx_t *tx)
1671 /* we cannot go into send_mapped_tx from here as we are holding locks
1672 * and mem registration might end up allocating memory in kgni.
1673 * That said, we'll push this as far as we can into the queue process */
1674 rc = kgnilnd_auth_rdma_bytes(conn->gnc_device, tx);
1677 spin_lock(&conn->gnc_device->gnd_rdmaq_lock);
1678 kgnilnd_tx_add_state_locked(tx, NULL, conn, GNILND_TX_RDMAQ, 0);
1679 /* lets us know how delayed RDMA is */
1680 tx->tx_qtime = jiffies;
1681 spin_unlock(&conn->gnc_device->gnd_rdmaq_lock);
1683 /* we have RDMA authorized, now it just needs a MDD and to hit the wire */
1684 spin_lock(&tx->tx_conn->gnc_device->gnd_lock);
1685 kgnilnd_tx_add_state_locked(tx, NULL, tx->tx_conn, GNILND_TX_MAPQ, 0);
1686 /* lets us know how delayed mapping is */
1687 tx->tx_qtime = jiffies;
1688 spin_unlock(&tx->tx_conn->gnc_device->gnd_lock);
1691 /* make sure we wake up sched to run this */
1692 kgnilnd_schedule_device(tx->tx_conn->gnc_device);
1695 /* push TX through state machine */
1697 kgnilnd_queue_tx(kgn_conn_t *conn, kgn_tx_t *tx)
1702 /* set the tx_id here, we delay it until we have an actual conn
1704 * in some cases, the tx_id is already set to provide for things
1705 * like RDMA completion cookies, etc */
1706 if (tx->tx_id.txe_idx == 0) {
1707 rc = kgnilnd_set_tx_id(tx, conn);
1709 kgnilnd_tx_done(tx, rc);
1714 CDEBUG(D_NET, "%s to conn %p for %s\n", kgnilnd_msgtype2str(tx->tx_msg.gnm_type),
1715 conn, libcfs_nid2str(conn->gnc_peer->gnp_nid));
1717 /* Only let NOOPs to be sent while fail loc is set, otherwise kill the tx.
1719 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_ONLY_NOOP) && (tx->tx_msg.gnm_type != GNILND_MSG_NOOP)) {
1720 kgnilnd_tx_done(tx, rc);
1724 switch (tx->tx_msg.gnm_type) {
1725 case GNILND_MSG_PUT_ACK:
1726 case GNILND_MSG_GET_REQ:
1727 case GNILND_MSG_PUT_REQ_REV:
1728 case GNILND_MSG_GET_ACK_REV:
1729 /* hijacking time! If this messages will authorize our peer to
1730 * send his dirty little bytes in an RDMA, we need to get permission */
1731 kgnilnd_queue_rdma(conn, tx);
1733 case GNILND_MSG_IMMEDIATE:
1734 /* try to send right now, can help reduce latency */
1735 rc = kgnilnd_sendmsg_trylock(tx, tx->tx_buffer, tx->tx_nob, &conn->gnc_list_lock, GNILND_TX_FMAQ);
1738 /* it was sent, break out of switch to avoid default case of queueing */
1741 /* needs to queue to try again, so fall through to default case */
1742 case GNILND_MSG_NOOP:
1743 /* Just make sure this goes out first for this conn */
1745 /* fall through... */
1747 spin_lock(&conn->gnc_list_lock);
1748 kgnilnd_tx_add_state_locked(tx, conn->gnc_peer, conn, GNILND_TX_FMAQ, add_tail);
1749 tx->tx_qtime = jiffies;
1750 spin_unlock(&conn->gnc_list_lock);
1751 kgnilnd_schedule_conn(conn);
1756 kgnilnd_launch_tx(kgn_tx_t *tx, kgn_net_t *net, lnet_process_id_t *target)
1759 kgn_peer_t *new_peer = NULL;
1760 kgn_conn_t *conn = NULL;
1766 /* If I get here, I've committed to send, so I complete the tx with
1767 * failure on any problems */
1769 GNITX_ASSERTF(tx, tx->tx_conn == NULL,
1770 "tx already has connection %p", tx->tx_conn);
1772 /* do all of the peer & conn searching in one swoop - this avoids
1773 * nastiness when dropping locks and needing to maintain a sane state
1774 * in the face of stack reset or something else nuking peers & conns */
1776 /* I expect to find him, so only take a read lock */
1777 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1779 peer = kgnilnd_find_peer_locked(target->nid);
1781 conn = kgnilnd_find_conn_locked(peer);
1782 /* this could be NULL during quiesce */
1784 /* Connection exists; queue message on it */
1785 kgnilnd_queue_tx(conn, tx);
1786 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1790 /* don't create a connection if the peer is marked down */
1791 if (peer->gnp_down == GNILND_RCA_NODE_DOWN) {
1792 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1798 /* creating peer or conn; I'll need a write lock... */
1799 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1801 CFS_RACE(CFS_FAIL_GNI_FIND_TARGET);
1803 node_state = kgnilnd_get_node_state(LNET_NIDADDR(target->nid));
1805 /* NB - this will not block during normal operations -
1806 * the only writer of this is in the startup/shutdown path. */
1807 rc = down_read_trylock(&kgnilnd_data.kgn_net_rw_sem);
1813 /* ignore previous peer entirely - we cycled the lock, so we
1814 * will create new peer and at worst drop it if peer is still
1816 rc = kgnilnd_create_peer_safe(&new_peer, target->nid, net, node_state);
1818 up_read(&kgnilnd_data.kgn_net_rw_sem);
1822 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1823 up_read(&kgnilnd_data.kgn_net_rw_sem);
1825 /* search for peer again now that we have the lock
1826 * if we don't find it, add our new one to the list */
1827 kgnilnd_add_peer_locked(target->nid, new_peer, &peer);
1829 /* don't create a connection if the peer is not up */
1830 if (peer->gnp_down != GNILND_RCA_NODE_UP) {
1831 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1836 conn = kgnilnd_find_or_create_conn_locked(peer);
1838 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_DGRAM_DROP_TX)) {
1839 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1844 /* oh hey, found a conn now... magical */
1845 kgnilnd_queue_tx(conn, tx);
1847 /* no conn, must be trying to connect - so we queue for now */
1848 tx->tx_qtime = jiffies;
1849 kgnilnd_tx_add_state_locked(tx, peer, NULL, GNILND_TX_PEERQ, 1);
1851 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1854 kgnilnd_tx_done(tx, rc);
1859 kgnilnd_rdma(kgn_tx_t *tx, int type,
1860 kgn_rdma_desc_t *sink, unsigned int nob, __u64 cookie)
1862 kgn_conn_t *conn = tx->tx_conn;
1863 unsigned long timestamp;
1864 gni_post_type_t post_type;
1867 unsigned int desc_nob = nob;
1868 void *desc_buffer = tx->tx_buffer;
1869 gni_mem_handle_t desc_map_key = tx->tx_map_key;
1870 LASSERTF(kgnilnd_tx_mapped(tx),
1871 "unmapped tx %p\n", tx);
1872 LASSERTF(conn != NULL,
1873 "NULL conn on tx %p, naughty, naughty\n", tx);
1874 LASSERTF(nob <= sink->gnrd_nob,
1875 "nob %u > sink->gnrd_nob %d (%p)\n",
1876 nob, sink->gnrd_nob, sink);
1877 LASSERTF(nob <= tx->tx_nob,
1878 "nob %d > tx(%p)->tx_nob %d\n",
1879 nob, tx, tx->tx_nob);
1882 case GNILND_MSG_GET_DONE:
1883 case GNILND_MSG_PUT_DONE:
1884 post_type = GNI_POST_RDMA_PUT;
1886 case GNILND_MSG_GET_DONE_REV:
1887 case GNILND_MSG_PUT_DONE_REV:
1888 post_type = GNI_POST_RDMA_GET;
1891 CERROR("invalid msg type %s (%d)\n",
1892 kgnilnd_msgtype2str(type), type);
1895 if (post_type == GNI_POST_RDMA_GET) {
1896 /* Check for remote buffer / local buffer / length alignment. All must be 4 byte
1897 * aligned. If the local buffer is not aligned correctly using the copy buffer
1898 * will fix that issue. If length is misaligned copy buffer will also fix the issue, we end
1899 * up transferring extra bytes into the buffer but only copy the correct nob into the original
1900 * buffer. Remote offset correction is done through a combination of adjusting the offset,
1901 * making sure the length and addr are aligned and copying the data into the correct location
1902 * once the transfer has completed.
1904 if ((((__u64)((unsigned long)tx->tx_buffer)) & 3) ||
1905 (sink->gnrd_addr & 3) ||
1908 tx->tx_offset = ((__u64)((unsigned long)sink->gnrd_addr)) & 3;
1910 kgnilnd_admin_addref(kgnilnd_data.kgn_rev_offset);
1912 if ((nob + tx->tx_offset) & 3) {
1913 desc_nob = ((nob + tx->tx_offset) + (4 - ((nob + tx->tx_offset) & 3)));
1914 kgnilnd_admin_addref(kgnilnd_data.kgn_rev_length);
1916 desc_nob = (nob + tx->tx_offset);
1919 if (tx->tx_buffer_copy == NULL) {
1920 /* Allocate the largest copy buffer we will need, this will prevent us from overwriting data
1921 * and require at most we allocate a few extra bytes. */
1922 tx->tx_buffer_copy = vmalloc(desc_nob);
1924 if (!tx->tx_buffer_copy) {
1925 /* allocation of buffer failed nak the rdma */
1926 kgnilnd_nak_rdma(tx->tx_conn, tx->tx_msg.gnm_type, -EFAULT, cookie, tx->tx_msg.gnm_srcnid);
1927 kgnilnd_tx_done(tx, -EFAULT);
1930 kgnilnd_admin_addref(kgnilnd_data.kgn_rev_copy_buff);
1931 rc = kgnilnd_mem_register(conn->gnc_device->gnd_handle, (__u64)tx->tx_buffer_copy, desc_nob, NULL, GNI_MEM_READWRITE, &tx->tx_buffer_copy_map_key);
1932 if (rc != GNI_RC_SUCCESS) {
1933 /* Registration Failed nak rdma and kill the tx. */
1934 vfree(tx->tx_buffer_copy);
1935 tx->tx_buffer_copy = NULL;
1936 kgnilnd_nak_rdma(tx->tx_conn, tx->tx_msg.gnm_type, -EFAULT, cookie, tx->tx_msg.gnm_srcnid);
1937 kgnilnd_tx_done(tx, -EFAULT);
1941 desc_map_key = tx->tx_buffer_copy_map_key;
1942 desc_buffer = tx->tx_buffer_copy;
1946 memset(&tx->tx_rdma_desc, 0, sizeof(tx->tx_rdma_desc));
1947 tx->tx_rdma_desc.post_id = tx->tx_id.txe_cookie;
1948 tx->tx_rdma_desc.type = post_type;
1949 tx->tx_rdma_desc.cq_mode = GNI_CQMODE_GLOBAL_EVENT;
1950 tx->tx_rdma_desc.local_addr = (__u64)((unsigned long)desc_buffer);
1951 tx->tx_rdma_desc.local_mem_hndl = desc_map_key;
1952 tx->tx_rdma_desc.remote_addr = sink->gnrd_addr - tx->tx_offset;
1953 tx->tx_rdma_desc.remote_mem_hndl = sink->gnrd_key;
1954 tx->tx_rdma_desc.length = desc_nob;
1955 tx->tx_nob_rdma = nob;
1956 if (*kgnilnd_tunables.kgn_bte_dlvr_mode)
1957 tx->tx_rdma_desc.dlvr_mode = *kgnilnd_tunables.kgn_bte_dlvr_mode;
1958 /* prep final completion message */
1959 kgnilnd_init_msg(&tx->tx_msg, type, tx->tx_msg.gnm_srcnid);
1960 tx->tx_msg.gnm_u.completion.gncm_cookie = cookie;
1961 /* send actual size RDMA'd in retval */
1962 tx->tx_msg.gnm_u.completion.gncm_retval = nob;
1964 kgnilnd_compute_rdma_cksum(tx, nob);
1967 kgnilnd_queue_tx(conn, tx);
1971 /* Don't lie (CLOSE == RDMA idle) */
1972 LASSERTF(!conn->gnc_close_sent, "tx %p on conn %p after close sent %d\n",
1973 tx, conn, conn->gnc_close_sent);
1975 GNIDBG_TX(D_NET, tx, "Post RDMA type 0x%02x dlvr_mode 0x%x cookie:"LPX64,
1976 type, tx->tx_rdma_desc.dlvr_mode, cookie);
1978 /* set CQ dedicated for RDMA */
1979 tx->tx_rdma_desc.src_cq_hndl = conn->gnc_device->gnd_snd_rdma_cqh;
1981 timestamp = jiffies;
1982 mutex_lock(&conn->gnc_device->gnd_cq_mutex);
1983 /* delay in jiffies - we are really concerned only with things that
1984 * result in a schedule() or really holding this off for long times .
1985 * NB - mutex_lock could spin for 2 jiffies before going to sleep to wait */
1986 conn->gnc_device->gnd_mutex_delay += (long) jiffies - timestamp;
1988 rrc = kgnilnd_post_rdma(conn->gnc_ephandle, &tx->tx_rdma_desc);
1990 spin_lock(&conn->gnc_list_lock);
1991 kgnilnd_tx_add_state_locked(tx, conn->gnc_peer, conn, GNILND_TX_LIVE_RDMAQ, 1);
1992 tx->tx_qtime = jiffies;
1993 spin_unlock(&conn->gnc_list_lock);
1995 mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
1997 /* XXX Nic: is this a place we should handle more errors for
1998 * robustness sake */
1999 LASSERT(rrc == GNI_RC_SUCCESS);
2004 kgnilnd_alloc_rx(void)
2008 rx = kmem_cache_alloc(kgnilnd_data.kgn_rx_cache, GFP_ATOMIC);
2010 CERROR("failed to allocate rx\n");
2013 CDEBUG(D_MALLOC, "slab-alloced 'rx': %lu at %p.\n",
2016 /* no memset to zero, we'll always fill all members */
2020 /* release is to just free connection resources
2021 * we use this for the eager path after copying */
2023 kgnilnd_release_msg(kgn_conn_t *conn)
2026 unsigned long timestamp;
2028 CDEBUG(D_NET, "consuming %p\n", conn);
2030 timestamp = jiffies;
2031 mutex_lock(&conn->gnc_device->gnd_cq_mutex);
2032 /* delay in jiffies - we are really concerned only with things that
2033 * result in a schedule() or really holding this off for long times .
2034 * NB - mutex_lock could spin for 2 jiffies before going to sleep to wait */
2035 conn->gnc_device->gnd_mutex_delay += (long) jiffies - timestamp;
2037 rrc = kgnilnd_smsg_release(conn->gnc_ephandle);
2038 mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
2040 LASSERTF(rrc == GNI_RC_SUCCESS, "bad rrc %d\n", rrc);
2041 GNIDBG_SMSG_CREDS(D_NET, conn);
2047 kgnilnd_consume_rx(kgn_rx_t *rx)
2049 kgn_conn_t *conn = rx->grx_conn;
2050 kgn_msg_t *rxmsg = rx->grx_msg;
2052 /* if we are eager, free the cache alloc'd msg */
2053 if (unlikely(rx->grx_eager)) {
2054 LIBCFS_FREE(rxmsg, sizeof(*rxmsg) + *kgnilnd_tunables.kgn_max_immediate);
2055 atomic_dec(&kgnilnd_data.kgn_neager_allocs);
2057 /* release ref from eager_recv */
2058 kgnilnd_conn_decref(conn);
2060 GNIDBG_MSG(D_NET, rxmsg, "rx %p processed", rx);
2061 kgnilnd_release_msg(conn);
2064 kmem_cache_free(kgnilnd_data.kgn_rx_cache, rx);
2065 CDEBUG(D_MALLOC, "slab-freed 'rx': %lu at %p.\n",
2072 kgnilnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
2074 lnet_hdr_t *hdr = &lntmsg->msg_hdr;
2075 int type = lntmsg->msg_type;
2076 lnet_process_id_t target = lntmsg->msg_target;
2077 int target_is_router = lntmsg->msg_target_is_router;
2078 int routing = lntmsg->msg_routing;
2079 unsigned int niov = lntmsg->msg_niov;
2080 struct iovec *iov = lntmsg->msg_iov;
2081 lnet_kiov_t *kiov = lntmsg->msg_kiov;
2082 unsigned int offset = lntmsg->msg_offset;
2083 unsigned int nob = lntmsg->msg_len;
2084 unsigned int msg_vmflush = lntmsg->msg_vmflush;
2085 kgn_net_t *net = ni->ni_data;
2089 int reverse_rdma_flag = *kgnilnd_tunables.kgn_reverse_rdma;
2091 /* NB 'private' is different depending on what we're sending.... */
2092 LASSERT(!in_interrupt());
2094 CDEBUG(D_NET, "sending msg type %d with %d bytes in %d frags to %s\n",
2095 type, nob, niov, libcfs_id2str(target));
2097 LASSERTF(nob == 0 || niov > 0,
2098 "lntmsg %p nob %d niov %d\n", lntmsg, nob, niov);
2099 LASSERTF(niov <= LNET_MAX_IOV,
2100 "lntmsg %p niov %d\n", lntmsg, niov);
2102 /* payload is either all vaddrs or all pages */
2103 LASSERTF(!(kiov != NULL && iov != NULL),
2104 "lntmsg %p kiov %p iov %p\n", lntmsg, kiov, iov);
2107 mpflag = cfs_memory_pressure_get_and_set();
2111 CERROR("lntmsg %p with unexpected type %d\n",
2116 LASSERTF(nob == 0, "lntmsg %p nob %d\n",
2124 if (routing || target_is_router)
2125 break; /* send IMMEDIATE */
2127 /* it is safe to do direct GET with out mapping buffer for RDMA as we
2128 * check the eventual sink buffer here - if small enough, remote
2129 * end is perfectly capable of returning data in short message -
2130 * The magic is that we call lnet_parse in kgnilnd_recv with rdma_req=0
2131 * for IMMEDIATE messages which will have it send a real reply instead
2132 * of doing kgnilnd_recv to have the RDMA continued */
2133 if (lntmsg->msg_md->md_length <= *kgnilnd_tunables.kgn_max_immediate)
2136 if ((reverse_rdma_flag & GNILND_REVERSE_GET) == 0)
2137 tx = kgnilnd_new_tx_msg(GNILND_MSG_GET_REQ, ni->ni_nid);
2139 tx = kgnilnd_new_tx_msg(GNILND_MSG_GET_REQ_REV, ni->ni_nid);
2145 /* slightly different options as we might actually have a GET with a
2146 * MD_KIOV set but a non-NULL md_iov.iov */
2147 if ((lntmsg->msg_md->md_options & LNET_MD_KIOV) == 0)
2148 rc = kgnilnd_setup_rdma_buffer(tx, lntmsg->msg_md->md_niov,
2149 lntmsg->msg_md->md_iov.iov, NULL,
2150 0, lntmsg->msg_md->md_length);
2152 rc = kgnilnd_setup_rdma_buffer(tx, lntmsg->msg_md->md_niov,
2153 NULL, lntmsg->msg_md->md_iov.kiov,
2154 0, lntmsg->msg_md->md_length);
2156 CERROR("unable to setup buffer: %d\n", rc);
2157 kgnilnd_tx_done(tx, rc);
2162 tx->tx_lntmsg[1] = lnet_create_reply_msg(ni, lntmsg);
2163 if (tx->tx_lntmsg[1] == NULL) {
2164 CERROR("Can't create reply for GET to %s\n",
2165 libcfs_nid2str(target.nid));
2166 kgnilnd_tx_done(tx, rc);
2171 tx->tx_lntmsg[0] = lntmsg;
2172 if ((reverse_rdma_flag & GNILND_REVERSE_GET) == 0)
2173 tx->tx_msg.gnm_u.get.gngm_hdr = *hdr;
2175 tx->tx_msg.gnm_u.putreq.gnprm_hdr = *hdr;
2177 /* rest of tx_msg is setup just before it is sent */
2178 kgnilnd_launch_tx(tx, net, &target);
2180 case LNET_MSG_REPLY:
2182 /* to save on MDDs, we'll handle short kiov by vmap'ing
2183 * and sending via SMSG */
2184 if (nob <= *kgnilnd_tunables.kgn_max_immediate)
2187 if ((reverse_rdma_flag & GNILND_REVERSE_PUT) == 0)
2188 tx = kgnilnd_new_tx_msg(GNILND_MSG_PUT_REQ, ni->ni_nid);
2190 tx = kgnilnd_new_tx_msg(GNILND_MSG_PUT_REQ_REV, ni->ni_nid);
2197 rc = kgnilnd_setup_rdma_buffer(tx, niov, iov, kiov, offset, nob);
2199 kgnilnd_tx_done(tx, rc);
2204 tx->tx_lntmsg[0] = lntmsg;
2205 if ((reverse_rdma_flag & GNILND_REVERSE_PUT) == 0)
2206 tx->tx_msg.gnm_u.putreq.gnprm_hdr = *hdr;
2208 tx->tx_msg.gnm_u.get.gngm_hdr = *hdr;
2210 /* rest of tx_msg is setup just before it is sent */
2211 kgnilnd_launch_tx(tx, net, &target);
2215 /* send IMMEDIATE */
2217 LASSERTF(nob <= *kgnilnd_tunables.kgn_max_immediate,
2218 "lntmsg 0x%p too large %d\n", lntmsg, nob);
2220 tx = kgnilnd_new_tx_msg(GNILND_MSG_IMMEDIATE, ni->ni_nid);
2226 rc = kgnilnd_setup_immediate_buffer(tx, niov, iov, kiov, offset, nob);
2228 kgnilnd_tx_done(tx, rc);
2232 tx->tx_msg.gnm_u.immediate.gnim_hdr = *hdr;
2233 tx->tx_lntmsg[0] = lntmsg;
2234 kgnilnd_launch_tx(tx, net, &target);
2237 /* use stored value as we could have already finalized lntmsg here from a failed launch */
2239 cfs_memory_pressure_restore(mpflag);
2244 kgnilnd_setup_rdma(lnet_ni_t *ni, kgn_rx_t *rx, lnet_msg_t *lntmsg, int mlen)
2246 kgn_conn_t *conn = rx->grx_conn;
2247 kgn_msg_t *rxmsg = rx->grx_msg;
2248 unsigned int niov = lntmsg->msg_niov;
2249 struct iovec *iov = lntmsg->msg_iov;
2250 lnet_kiov_t *kiov = lntmsg->msg_kiov;
2251 unsigned int offset = lntmsg->msg_offset;
2252 unsigned int nob = lntmsg->msg_len;
2257 switch (rxmsg->gnm_type) {
2258 case GNILND_MSG_PUT_REQ_REV:
2259 done_type = GNILND_MSG_PUT_DONE_REV;
2262 case GNILND_MSG_GET_REQ:
2263 done_type = GNILND_MSG_GET_DONE;
2266 CERROR("invalid msg type %s (%d)\n",
2267 kgnilnd_msgtype2str(rxmsg->gnm_type),
2272 tx = kgnilnd_new_tx_msg(done_type, ni->ni_nid);
2276 rc = kgnilnd_set_tx_id(tx, conn);
2280 rc = kgnilnd_setup_rdma_buffer(tx, niov, iov, kiov, offset, nob);
2284 tx->tx_lntmsg[0] = lntmsg;
2285 tx->tx_getinfo = rxmsg->gnm_u.get;
2287 /* we only queue from kgnilnd_recv - we might get called from other contexts
2288 * and we don't want to block the mutex in those cases */
2290 spin_lock(&tx->tx_conn->gnc_device->gnd_lock);
2291 kgnilnd_tx_add_state_locked(tx, NULL, tx->tx_conn, GNILND_TX_MAPQ, 1);
2292 spin_unlock(&tx->tx_conn->gnc_device->gnd_lock);
2293 kgnilnd_schedule_device(tx->tx_conn->gnc_device);
2298 kgnilnd_tx_done(tx, rc);
2299 kgnilnd_nak_rdma(conn, done_type, rc, rxmsg->gnm_u.get.gngm_cookie, ni->ni_nid);
2301 lnet_finalize(ni, lntmsg, rc);
2305 kgnilnd_eager_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
2308 kgn_rx_t *rx = private;
2309 kgn_conn_t *conn = rx->grx_conn;
2310 kgn_msg_t *rxmsg = rx->grx_msg;
2311 kgn_msg_t *eagermsg = NULL;
2312 kgn_peer_t *peer = NULL;
2313 kgn_conn_t *found_conn = NULL;
2315 GNIDBG_MSG(D_NET, rxmsg, "eager recv for conn %p, rxmsg %p, lntmsg %p",
2316 conn, rxmsg, lntmsg);
2318 if (rxmsg->gnm_payload_len > *kgnilnd_tunables.kgn_max_immediate) {
2319 GNIDBG_MSG(D_ERROR, rxmsg, "payload too large %d",
2320 rxmsg->gnm_payload_len);
2323 /* Grab a read lock so the connection doesnt disappear on us
2324 * while we look it up
2326 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
2328 peer = kgnilnd_find_peer_locked(rxmsg->gnm_srcnid);
2330 found_conn = kgnilnd_find_conn_locked(peer);
2333 /* Verify the connection found is the same one that the message
2334 * is supposed to be using, if it is not output an error message
2337 if (!peer || !found_conn
2338 || found_conn->gnc_peer_connstamp != rxmsg->gnm_connstamp) {
2339 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
2340 CERROR("Couldnt find matching peer %p or conn %p / %p\n",
2341 peer, conn, found_conn);
2343 CERROR("Unexpected connstamp "LPX64"("LPX64" expected)"
2344 " from %s", rxmsg->gnm_connstamp,
2345 found_conn->gnc_peer_connstamp,
2346 libcfs_nid2str(peer->gnp_nid));
2351 /* add conn ref to ensure it doesn't go away until all eager
2352 * messages processed */
2353 kgnilnd_conn_addref(conn);
2355 /* Now that we have verified the connection is valid and added a
2356 * reference we can remove the read_lock on the peer_conn_lock */
2357 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
2359 /* we have no credits or buffers for this message, so copy it
2360 * somewhere for a later kgnilnd_recv */
2361 if (atomic_read(&kgnilnd_data.kgn_neager_allocs) >=
2362 *kgnilnd_tunables.kgn_eager_credits) {
2363 CERROR("Out of eager credits to %s\n",
2364 libcfs_nid2str(conn->gnc_peer->gnp_nid));
2368 atomic_inc(&kgnilnd_data.kgn_neager_allocs);
2370 LIBCFS_ALLOC(eagermsg, sizeof(*eagermsg) + *kgnilnd_tunables.kgn_max_immediate);
2371 if (eagermsg == NULL) {
2372 kgnilnd_conn_decref(conn);
2373 CERROR("couldn't allocate eager rx message for conn %p to %s\n",
2374 conn, libcfs_nid2str(conn->gnc_peer->gnp_nid));
2378 /* copy msg and payload */
2379 memcpy(eagermsg, rxmsg, sizeof(*rxmsg) + rxmsg->gnm_payload_len);
2380 rx->grx_msg = eagermsg;
2383 /* stash this for lnet_finalize on cancel-on-conn-close */
2384 rx->grx_lntmsg = lntmsg;
2386 /* keep the same rx_t, it just has a new grx_msg now */
2387 *new_private = private;
2389 /* release SMSG buffer */
2390 kgnilnd_release_msg(conn);
2396 kgnilnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
2397 int delayed, unsigned int niov,
2398 struct iovec *iov, lnet_kiov_t *kiov,
2399 unsigned int offset, unsigned int mlen, unsigned int rlen)
2401 kgn_rx_t *rx = private;
2402 kgn_conn_t *conn = rx->grx_conn;
2403 kgn_msg_t *rxmsg = rx->grx_msg;
2409 LASSERT(!in_interrupt());
2410 LASSERTF(mlen <= rlen, "%d <= %d\n", mlen, rlen);
2411 /* Either all pages or all vaddrs */
2412 LASSERTF(!(kiov != NULL && iov != NULL), "kiov %p iov %p\n",
2415 GNIDBG_MSG(D_NET, rxmsg, "conn %p, rxmsg %p, lntmsg %p"
2416 " niov=%d kiov=%p iov=%p offset=%d mlen=%d rlen=%d",
2417 conn, rxmsg, lntmsg,
2418 niov, kiov, iov, offset, mlen, rlen);
2420 /* we need to lock here as recv can be called from any context */
2421 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
2422 if (rx->grx_eager && conn->gnc_state != GNILND_CONN_ESTABLISHED) {
2423 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
2425 /* someone closed the conn after we copied this out, nuke it */
2426 kgnilnd_consume_rx(rx);
2427 lnet_finalize(ni, lntmsg, conn->gnc_error);
2430 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
2432 switch (rxmsg->gnm_type) {
2434 GNIDBG_MSG(D_NETERROR, rxmsg, "conn %p, rx %p, rxmsg %p, lntmsg %p"
2435 " niov=%d kiov=%p iov=%p offset=%d mlen=%d rlen=%d",
2436 conn, rx, rxmsg, lntmsg, niov, kiov, iov, offset, mlen, rlen);
2439 case GNILND_MSG_IMMEDIATE:
2440 if (mlen > rxmsg->gnm_payload_len) {
2441 GNIDBG_MSG(D_ERROR, rxmsg,
2442 "Immediate message from %s too big: %d > %d",
2443 libcfs_nid2str(conn->gnc_peer->gnp_nid), mlen,
2444 rxmsg->gnm_payload_len);
2446 kgnilnd_consume_rx(rx);
2450 /* rxmsg[1] is a pointer to the payload, sitting in the buffer
2451 * right after the kgn_msg_t header - so just 'cute' way of saying
2452 * rxmsg + sizeof(kgn_msg_t) */
2454 /* check payload checksum if sent */
2456 if (*kgnilnd_tunables.kgn_checksum >= 2 &&
2457 !rxmsg->gnm_payload_cksum &&
2458 rxmsg->gnm_payload_len != 0)
2459 GNIDBG_MSG(D_WARNING, rxmsg, "no msg payload checksum when enabled");
2461 if (rxmsg->gnm_payload_cksum != 0) {
2462 /* gnm_payload_len set in kgnilnd_sendmsg from tx->tx_nob,
2463 * which is what is used to calculate the cksum on the TX side */
2464 pload_cksum = kgnilnd_cksum(&rxmsg[1], rxmsg->gnm_payload_len);
2466 if (rxmsg->gnm_payload_cksum != pload_cksum) {
2467 GNIDBG_MSG(D_NETERROR, rxmsg,
2468 "Bad payload checksum (%x expected %x)",
2469 pload_cksum, rxmsg->gnm_payload_cksum);
2470 switch (*kgnilnd_tunables.kgn_checksum_dump) {
2472 kgnilnd_dump_blob(D_BUFFS, "bad payload checksum",
2473 &rxmsg[1], rxmsg->gnm_payload_len);
2474 /* fall through to dump */
2476 libcfs_debug_dumplog();
2482 /* checksum problems are fatal, kill the conn */
2483 kgnilnd_consume_rx(rx);
2484 kgnilnd_close_conn(conn, rc);
2490 lnet_copy_flat2kiov(
2492 *kgnilnd_tunables.kgn_max_immediate,
2493 &rxmsg[1], 0, mlen);
2497 *kgnilnd_tunables.kgn_max_immediate,
2498 &rxmsg[1], 0, mlen);
2500 kgnilnd_consume_rx(rx);
2501 lnet_finalize(ni, lntmsg, 0);
2504 case GNILND_MSG_PUT_REQ:
2505 /* LNET wants to truncate or drop transaction, sending NAK */
2507 kgnilnd_consume_rx(rx);
2508 lnet_finalize(ni, lntmsg, 0);
2510 /* only error if lntmsg == NULL, otherwise we are just
2511 * short circuiting the rdma process of 0 bytes */
2512 kgnilnd_nak_rdma(conn, rxmsg->gnm_type,
2513 lntmsg == NULL ? -ENOENT : 0,
2514 rxmsg->gnm_u.get.gngm_cookie,
2518 /* sending ACK with sink buff. info */
2519 tx = kgnilnd_new_tx_msg(GNILND_MSG_PUT_ACK, ni->ni_nid);
2521 kgnilnd_consume_rx(rx);
2525 rc = kgnilnd_set_tx_id(tx, conn);
2527 GOTO(nak_put_req, rc);
2530 rc = kgnilnd_setup_rdma_buffer(tx, niov, iov, kiov, offset, mlen);
2532 GOTO(nak_put_req, rc);
2535 tx->tx_msg.gnm_u.putack.gnpam_src_cookie =
2536 rxmsg->gnm_u.putreq.gnprm_cookie;
2537 tx->tx_msg.gnm_u.putack.gnpam_dst_cookie = tx->tx_id.txe_cookie;
2538 tx->tx_msg.gnm_u.putack.gnpam_desc.gnrd_addr =
2539 (__u64)((unsigned long)tx->tx_buffer);
2540 tx->tx_msg.gnm_u.putack.gnpam_desc.gnrd_nob = mlen;
2542 tx->tx_lntmsg[0] = lntmsg; /* finalize this on RDMA_DONE */
2543 tx->tx_qtime = jiffies;
2544 /* we only queue from kgnilnd_recv - we might get called from other contexts
2545 * and we don't want to block the mutex in those cases */
2547 spin_lock(&tx->tx_conn->gnc_device->gnd_lock);
2548 kgnilnd_tx_add_state_locked(tx, NULL, tx->tx_conn, GNILND_TX_MAPQ, 1);
2549 spin_unlock(&tx->tx_conn->gnc_device->gnd_lock);
2550 kgnilnd_schedule_device(tx->tx_conn->gnc_device);
2552 kgnilnd_consume_rx(rx);
2556 /* make sure we send an error back when the PUT fails */
2557 kgnilnd_nak_rdma(conn, rxmsg->gnm_type, rc, rxmsg->gnm_u.get.gngm_cookie, ni->ni_nid);
2558 kgnilnd_tx_done(tx, rc);
2559 kgnilnd_consume_rx(rx);
2561 /* return magic LNet network error */
2563 case GNILND_MSG_GET_REQ_REV:
2564 /* LNET wants to truncate or drop transaction, sending NAK */
2566 kgnilnd_consume_rx(rx);
2567 lnet_finalize(ni, lntmsg, 0);
2569 /* only error if lntmsg == NULL, otherwise we are just
2570 * short circuiting the rdma process of 0 bytes */
2571 kgnilnd_nak_rdma(conn, rxmsg->gnm_type,
2572 lntmsg == NULL ? -ENOENT : 0,
2573 rxmsg->gnm_u.get.gngm_cookie,
2577 /* lntmsg can be null when parsing a LNET_GET */
2578 if (lntmsg != NULL) {
2579 /* sending ACK with sink buff. info */
2580 tx = kgnilnd_new_tx_msg(GNILND_MSG_GET_ACK_REV, ni->ni_nid);
2582 kgnilnd_consume_rx(rx);
2586 rc = kgnilnd_set_tx_id(tx, conn);
2588 GOTO(nak_get_req_rev, rc);
2591 rc = kgnilnd_setup_rdma_buffer(tx, niov, iov, kiov, offset, mlen);
2593 GOTO(nak_get_req_rev, rc);
2596 tx->tx_msg.gnm_u.putack.gnpam_src_cookie =
2597 rxmsg->gnm_u.putreq.gnprm_cookie;
2598 tx->tx_msg.gnm_u.putack.gnpam_dst_cookie = tx->tx_id.txe_cookie;
2599 tx->tx_msg.gnm_u.putack.gnpam_desc.gnrd_addr =
2600 (__u64)((unsigned long)tx->tx_buffer);
2601 tx->tx_msg.gnm_u.putack.gnpam_desc.gnrd_nob = mlen;
2603 tx->tx_lntmsg[0] = lntmsg; /* finalize this on RDMA_DONE */
2605 /* we only queue from kgnilnd_recv - we might get called from other contexts
2606 * and we don't want to block the mutex in those cases */
2608 spin_lock(&tx->tx_conn->gnc_device->gnd_lock);
2609 kgnilnd_tx_add_state_locked(tx, NULL, tx->tx_conn, GNILND_TX_MAPQ, 1);
2610 spin_unlock(&tx->tx_conn->gnc_device->gnd_lock);
2611 kgnilnd_schedule_device(tx->tx_conn->gnc_device);
2614 kgnilnd_nak_rdma(conn, rxmsg->gnm_type,
2616 rxmsg->gnm_u.get.gngm_cookie,
2620 kgnilnd_consume_rx(rx);
2624 /* make sure we send an error back when the GET fails */
2625 kgnilnd_nak_rdma(conn, rxmsg->gnm_type, rc, rxmsg->gnm_u.get.gngm_cookie, ni->ni_nid);
2626 kgnilnd_tx_done(tx, rc);
2627 kgnilnd_consume_rx(rx);
2629 /* return magic LNet network error */
2633 case GNILND_MSG_PUT_REQ_REV:
2634 /* LNET wants to truncate or drop transaction, sending NAK */
2636 kgnilnd_consume_rx(rx);
2637 lnet_finalize(ni, lntmsg, 0);
2639 /* only error if lntmsg == NULL, otherwise we are just
2640 * short circuiting the rdma process of 0 bytes */
2641 kgnilnd_nak_rdma(conn, rxmsg->gnm_type,
2642 lntmsg == NULL ? -ENOENT : 0,
2643 rxmsg->gnm_u.get.gngm_cookie,
2648 if (lntmsg != NULL) {
2650 kgnilnd_setup_rdma(ni, rx, lntmsg, mlen);
2653 kgnilnd_nak_rdma(conn, rxmsg->gnm_type,
2655 rxmsg->gnm_u.get.gngm_cookie,
2658 kgnilnd_consume_rx(rx);
2660 case GNILND_MSG_GET_REQ:
2661 if (lntmsg != NULL) {
2663 kgnilnd_setup_rdma(ni, rx, lntmsg, mlen);
2666 kgnilnd_nak_rdma(conn, rxmsg->gnm_type,
2668 rxmsg->gnm_u.get.gngm_cookie,
2671 kgnilnd_consume_rx(rx);
2677 /* needs write_lock on kgn_peer_conn_lock held */
2679 kgnilnd_check_conn_timeouts_locked(kgn_conn_t *conn)
2681 unsigned long timeout, keepalive;
2682 unsigned long now = jiffies;
2683 unsigned long newest_last_rx;
2686 /* given that we found this conn hanging off a peer, it better damned
2687 * well be connected */
2688 LASSERTF(conn->gnc_state == GNILND_CONN_ESTABLISHED,
2689 "conn 0x%p->%s with bad state%s\n", conn,
2690 conn->gnc_peer ? libcfs_nid2str(conn->gnc_peer->gnp_nid)
2692 kgnilnd_conn_state2str(conn));
2694 CDEBUG(D_NET, "checking conn %p->%s timeout %d keepalive %d "
2695 "rx_diff %lu tx_diff %lu\n",
2696 conn, libcfs_nid2str(conn->gnc_peer->gnp_nid),
2697 conn->gnc_timeout, GNILND_TO2KA(conn->gnc_timeout),
2698 cfs_duration_sec(now - conn->gnc_last_rx_cq),
2699 cfs_duration_sec(now - conn->gnc_last_tx));
2701 timeout = cfs_time_seconds(conn->gnc_timeout);
2702 keepalive = cfs_time_seconds(GNILND_TO2KA(conn->gnc_timeout));
2704 /* just in case our lack of RX msg processing is gumming up the works - give the
2705 * remove an extra chance */
2707 newest_last_rx = GNILND_LASTRX(conn);
2709 if (time_after_eq(now, newest_last_rx + timeout)) {
2710 uint32_t level = D_CONSOLE|D_NETERROR;
2712 if (conn->gnc_peer->gnp_down == GNILND_RCA_NODE_DOWN) {
2715 GNIDBG_CONN(level, conn,
2716 "No gnilnd traffic received from %s for %lu "
2717 "seconds, terminating connection. Is node down? ",
2718 libcfs_nid2str(conn->gnc_peer->gnp_nid),
2719 cfs_duration_sec(now - newest_last_rx));
2723 /* we don't timeout on last_tx stalls - we are going to trust the
2724 * underlying network to let us know when sends are failing.
2725 * At worst, the peer will timeout our RX stamp and drop the connection
2726 * at that point. We'll then see his CLOSE or at worst his RX
2727 * stamp stop and drop the connection on our end */
2729 if (time_after_eq(now, conn->gnc_last_tx + keepalive)) {
2730 CDEBUG(D_NET, "sending NOOP -> %s (%p idle %lu(%lu)) "
2731 "last %lu/%lu/%lu %lus/%lus/%lus\n",
2732 libcfs_nid2str(conn->gnc_peer->gnp_nid), conn,
2733 cfs_duration_sec(jiffies - conn->gnc_last_tx),
2735 conn->gnc_last_noop_want, conn->gnc_last_noop_sent,
2736 conn->gnc_last_noop_cq,
2737 cfs_duration_sec(jiffies - conn->gnc_last_noop_want),
2738 cfs_duration_sec(jiffies - conn->gnc_last_noop_sent),
2739 cfs_duration_sec(jiffies - conn->gnc_last_noop_cq));
2740 set_mb(conn->gnc_last_noop_want, jiffies);
2741 atomic_inc(&conn->gnc_reaper_noop);
2742 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_NOOP_SEND))
2745 tx = kgnilnd_new_tx_msg(GNILND_MSG_NOOP, conn->gnc_peer->gnp_net->gnn_ni->ni_nid);
2748 kgnilnd_queue_tx(conn, tx);
2754 /* needs write_lock on kgn_peer_conn_lock held */
2756 kgnilnd_check_peer_timeouts_locked(kgn_peer_t *peer, struct list_head *todie,
2757 struct list_head *souls)
2759 unsigned long timeout;
2760 kgn_conn_t *conn, *connN = NULL;
2765 short releaseconn = 0;
2766 unsigned long first_rx = 0;
2768 CDEBUG(D_NET, "checking peer 0x%p->%s for timeouts; interval %lus\n",
2769 peer, libcfs_nid2str(peer->gnp_nid),
2770 peer->gnp_reconnect_interval);
2772 timeout = cfs_time_seconds(MAX(*kgnilnd_tunables.kgn_timeout,
2773 GNILND_MIN_TIMEOUT));
2775 conn = kgnilnd_find_conn_locked(peer);
2777 /* if there is a valid conn, check the queues for timeouts */
2778 rc = kgnilnd_check_conn_timeouts_locked(conn);
2780 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_RX_CLOSE_CLOSING)) {
2781 /* simulate a RX CLOSE after the timeout but before
2782 * the scheduler thread gets it */
2783 conn->gnc_close_recvd = GNILND_CLOSE_INJECT1;
2784 conn->gnc_peer_error = -ETIMEDOUT;
2786 /* Once we mark closed, any of the scheduler threads could
2787 * get it and move through before we hit the fail loc code */
2788 kgnilnd_close_conn_locked(conn, rc);
2790 /* first_rx is used to decide when to release a conn from purgatory.
2792 first_rx = conn->gnc_first_rx;
2796 /* now regardless of starting new conn, find tx on peer queue that
2797 * are old and smell bad - do this first so we don't trigger
2798 * reconnect on empty queue if we timeout all */
2799 list_for_each_entry_safe(tx, txN, &peer->gnp_tx_queue, tx_list) {
2800 if (time_after_eq(jiffies, tx->tx_qtime + timeout)) {
2802 LCONSOLE_INFO("could not send to %s due to connection"
2803 " setup failure after %lu seconds\n",
2804 libcfs_nid2str(peer->gnp_nid),
2805 cfs_duration_sec(jiffies - tx->tx_qtime));
2807 kgnilnd_tx_del_state_locked(tx, peer, NULL,
2809 list_add_tail(&tx->tx_list, todie);
2814 if (count || peer->gnp_connecting == GNILND_PEER_KILL) {
2815 CDEBUG(D_NET, "canceling %d tx for peer 0x%p->%s\n",
2816 count, peer, libcfs_nid2str(peer->gnp_nid));
2817 /* if we nuked all the TX, stop peer connection attempt (if there is one..) */
2818 if (list_empty(&peer->gnp_tx_queue) ||
2819 peer->gnp_connecting == GNILND_PEER_KILL) {
2820 /* we pass down todie to use a common function - but we know there are
2822 kgnilnd_cancel_peer_connect_locked(peer, todie);
2826 /* Don't reconnect if we are still trying to clear out old conns.
2827 * This prevents us sending traffic on the new mbox before ensuring we are done
2828 * with the old one */
2829 reconnect = (peer->gnp_down == GNILND_RCA_NODE_UP) &&
2830 (atomic_read(&peer->gnp_dirty_eps) == 0);
2832 /* if we are not connected and there are tx on the gnp_tx_queue waiting
2833 * to be sent, we'll check the reconnect interval and fire up a new
2834 * connection request */
2836 if ((peer->gnp_connecting == GNILND_PEER_IDLE) &&
2837 (time_after_eq(jiffies, peer->gnp_reconnect_time)) &&
2838 !list_empty(&peer->gnp_tx_queue) && reconnect) {
2840 CDEBUG(D_NET, "starting connect to %s\n",
2841 libcfs_nid2str(peer->gnp_nid));
2842 LASSERTF(peer->gnp_connecting == GNILND_PEER_IDLE, "Peer was idle and we"
2843 "have a write_lock, state issue %d\n", peer->gnp_connecting);
2845 peer->gnp_connecting = GNILND_PEER_CONNECT;
2846 kgnilnd_peer_addref(peer); /* extra ref for connd */
2848 spin_lock(&peer->gnp_net->gnn_dev->gnd_connd_lock);
2849 list_add_tail(&peer->gnp_connd_list,
2850 &peer->gnp_net->gnn_dev->gnd_connd_peers);
2851 spin_unlock(&peer->gnp_net->gnn_dev->gnd_connd_lock);
2853 kgnilnd_schedule_dgram(peer->gnp_net->gnn_dev);
2856 /* fail_loc to allow us to delay release of purgatory */
2857 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_PURG_REL_DELAY))
2860 /* This check allows us to verify that the new conn is actually being used. This allows us to
2861 * pull the old conns out of purgatory if they have actually seen traffic.
2862 * We only release a conn from purgatory during stack reset, admin command, or when a peer reconnects
2865 time_after(jiffies, first_rx + cfs_time_seconds(*kgnilnd_tunables.kgn_hardware_timeout))) {
2866 CDEBUG(D_INFO, "We can release peer %s conn's from purgatory %lu\n",
2867 libcfs_nid2str(peer->gnp_nid), first_rx + cfs_time_seconds(*kgnilnd_tunables.kgn_hardware_timeout));
2871 list_for_each_entry_safe (conn, connN, &peer->gnp_conns, gnc_list) {
2872 /* check for purgatory timeouts */
2873 if (conn->gnc_in_purgatory) {
2874 /* We cannot detach this conn from purgatory if it has not been closed so we reschedule it
2875 * that way the next time we check it we can detach it from purgatory
2878 if (conn->gnc_state != GNILND_CONN_DONE) {
2879 /* Skip over conns that are currently not DONE. If they arent already scheduled
2880 * for completion something in the state machine is broken.
2885 /* We only detach a conn that is in purgatory if we have received a close message,
2886 * we have a new valid connection that has successfully received data, or an admin
2887 * command tells us we need to detach.
2890 if (conn->gnc_close_recvd || releaseconn || conn->gnc_needs_detach) {
2891 unsigned long waiting;
2893 waiting = (long) jiffies - conn->gnc_last_rx_cq;
2895 /* C.E: The remote peer is expected to close the
2896 * connection (see kgnilnd_check_conn_timeouts)
2897 * via the reaper thread and nuke out the MDD and
2898 * FMA resources after conn->gnc_timeout has expired
2899 * without an FMA RX */
2900 CDEBUG(D_NET, "Reconnected to %s in %lds or admin forced detach, dropping "
2901 " held resources\n",
2902 libcfs_nid2str(conn->gnc_peer->gnp_nid),
2903 cfs_duration_sec(waiting));
2905 kgnilnd_detach_purgatory_locked(conn, souls);
2914 kgnilnd_reaper_check(int idx)
2916 struct list_head *peers = &kgnilnd_data.kgn_peers[idx];
2917 struct list_head *ctmp, *ctmpN;
2918 struct list_head geriatrics;
2919 struct list_head souls;
2921 INIT_LIST_HEAD(&geriatrics);
2922 INIT_LIST_HEAD(&souls);
2924 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
2926 list_for_each_safe(ctmp, ctmpN, peers) {
2927 kgn_peer_t *peer = NULL;
2929 /* don't timeout stuff if the network is mucked or shutting down */
2930 if (kgnilnd_check_hw_quiesce()) {
2933 peer = list_entry(ctmp, kgn_peer_t, gnp_list);
2935 kgnilnd_check_peer_timeouts_locked(peer, &geriatrics, &souls);
2938 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
2940 kgnilnd_txlist_done(&geriatrics, -EHOSTUNREACH);
2941 kgnilnd_release_purgatory_list(&souls);
2945 kgnilnd_update_reaper_timeout(long timeout)
2947 LASSERT(timeout > 0);
2949 spin_lock(&kgnilnd_data.kgn_reaper_lock);
2951 if (timeout < kgnilnd_data.kgn_new_min_timeout)
2952 kgnilnd_data.kgn_new_min_timeout = timeout;
2954 spin_unlock(&kgnilnd_data.kgn_reaper_lock);
2958 kgnilnd_reaper_poke_with_stick(unsigned long arg)
2960 wake_up(&kgnilnd_data.kgn_reaper_waitq);
2964 kgnilnd_reaper(void *arg)
2969 unsigned long next_check_time = jiffies;
2970 long current_min_timeout = MAX_SCHEDULE_TIMEOUT;
2971 struct timer_list timer;
2974 cfs_block_allsigs();
2976 /* all gnilnd threads need to run fairly urgently */
2977 set_user_nice(current, *kgnilnd_tunables.kgn_nice);
2978 spin_lock(&kgnilnd_data.kgn_reaper_lock);
2980 while (!kgnilnd_data.kgn_shutdown) {
2981 /* I wake up every 'p' seconds to check for timeouts on some
2982 * more peers. I try to check every connection 'n' times
2983 * within the global minimum of all keepalive and timeout
2984 * intervals, to ensure I attend to every connection within
2985 * (n+1)/n times its timeout intervals. */
2986 const int p = GNILND_REAPER_THREAD_WAKE;
2987 const int n = GNILND_REAPER_NCHECKS;
2989 /* to quiesce or to not quiesce, that is the question */
2990 if (unlikely(kgnilnd_data.kgn_quiesce_trigger)) {
2991 spin_unlock(&kgnilnd_data.kgn_reaper_lock);
2992 KGNILND_SPIN_QUIESCE;
2993 spin_lock(&kgnilnd_data.kgn_reaper_lock);
2996 /* careful with the jiffy wrap... */
2997 timeout = (long)(next_check_time - jiffies);
3000 prepare_to_wait(&kgnilnd_data.kgn_reaper_waitq, &wait,
3001 TASK_INTERRUPTIBLE);
3002 spin_unlock(&kgnilnd_data.kgn_reaper_lock);
3003 setup_timer(&timer, kgnilnd_reaper_poke_with_stick,
3005 mod_timer(&timer, (long) jiffies + timeout);
3007 /* check flag variables before comitting */
3008 if (!kgnilnd_data.kgn_shutdown &&
3009 !kgnilnd_data.kgn_quiesce_trigger) {
3010 CDEBUG(D_INFO, "schedule timeout %ld (%lu sec)\n",
3011 timeout, cfs_duration_sec(timeout));
3013 CDEBUG(D_INFO, "awake after schedule\n");
3016 del_singleshot_timer_sync(&timer);
3017 spin_lock(&kgnilnd_data.kgn_reaper_lock);
3018 finish_wait(&kgnilnd_data.kgn_reaper_waitq, &wait);
3022 /* new_min_timeout is set from the conn timeouts and keepalive
3023 * this should end up with a min timeout of
3024 * GNILND_TIMEOUT2KEEPALIVE(t) or roughly LND_TIMEOUT/2 */
3025 if (kgnilnd_data.kgn_new_min_timeout < current_min_timeout) {
3026 current_min_timeout = kgnilnd_data.kgn_new_min_timeout;
3027 CDEBUG(D_NET, "Set new min timeout %ld\n",
3028 current_min_timeout);
3031 spin_unlock(&kgnilnd_data.kgn_reaper_lock);
3033 /* Compute how many table entries to check now so I get round
3034 * the whole table fast enough given that I do this at fixed
3035 * intervals of 'p' seconds) */
3036 chunk = *kgnilnd_tunables.kgn_peer_hash_size;
3037 if (kgnilnd_data.kgn_new_min_timeout > n * p)
3038 chunk = (chunk * n * p) /
3039 kgnilnd_data.kgn_new_min_timeout;
3042 for (i = 0; i < chunk; i++) {
3043 kgnilnd_reaper_check(hash_index);
3044 hash_index = (hash_index + 1) %
3045 *kgnilnd_tunables.kgn_peer_hash_size;
3047 next_check_time = (long) jiffies + cfs_time_seconds(p);
3048 CDEBUG(D_INFO, "next check at %lu or in %d sec\n", next_check_time, p);
3050 spin_lock(&kgnilnd_data.kgn_reaper_lock);
3053 spin_unlock(&kgnilnd_data.kgn_reaper_lock);
3055 kgnilnd_thread_fini();
3060 kgnilnd_recv_bte_get(kgn_tx_t *tx) {
3061 unsigned niov, offset, nob;
3063 lnet_msg_t *lntmsg = tx->tx_lntmsg[0];
3064 kgnilnd_parse_lnet_rdma(lntmsg, &niov, &offset, &nob, &kiov, tx->tx_nob_rdma);
3067 lnet_copy_flat2kiov(
3070 tx->tx_buffer_copy + tx->tx_offset, 0, nob);
3072 memcpy(tx->tx_buffer, tx->tx_buffer_copy + tx->tx_offset, nob);
3079 kgnilnd_check_rdma_cq(kgn_device_t *dev)
3082 gni_post_descriptor_t *desc;
3084 kgn_tx_ev_id_t ev_id;
3086 int should_retry, rc;
3087 long num_processed = 0;
3088 kgn_conn_t *conn = NULL;
3089 kgn_tx_t *tx = NULL;
3090 kgn_rdma_desc_t *rdesc;
3095 /* make sure we don't keep looping if we need to reset */
3096 if (unlikely(kgnilnd_data.kgn_quiesce_trigger)) {
3097 return num_processed;
3099 rc = kgnilnd_mutex_trylock(&dev->gnd_cq_mutex);
3101 /* we didn't get the mutex, so return that there is still work
3105 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_DELAY_RDMA)) {
3106 /* a bit gross - but we need a good way to test for
3107 * delayed RDMA completions and the easiest way to do
3108 * that is to delay the RDMA CQ events */
3109 rrc = GNI_RC_NOT_DONE;
3111 rrc = kgnilnd_cq_get_event(dev->gnd_snd_rdma_cqh, &event_data);
3114 if (rrc == GNI_RC_NOT_DONE) {
3115 mutex_unlock(&dev->gnd_cq_mutex);
3116 CDEBUG(D_INFO, "SEND RDMA CQ %d empty processed %ld\n",
3117 dev->gnd_id, num_processed);
3118 return num_processed;
3120 dev->gnd_sched_alive = jiffies;
3123 LASSERTF(!GNI_CQ_OVERRUN(event_data),
3124 "this is bad, somehow our credits didn't protect us"
3125 " from CQ overrun\n");
3126 LASSERTF(GNI_CQ_GET_TYPE(event_data) == GNI_CQ_EVENT_TYPE_POST,
3127 "rrc %d, GNI_CQ_GET_TYPE("LPX64") = "LPX64"\n", rrc,
3128 event_data, GNI_CQ_GET_TYPE(event_data));
3130 rrc = kgnilnd_get_completed(dev->gnd_snd_rdma_cqh, event_data,
3132 mutex_unlock(&dev->gnd_cq_mutex);
3134 /* XXX Nic: Need better error handling here... */
3135 LASSERTF((rrc == GNI_RC_SUCCESS) ||
3136 (rrc == GNI_RC_TRANSACTION_ERROR),
3139 ev_id.txe_cookie = desc->post_id;
3141 kgnilnd_validate_tx_ev_id(&ev_id, &tx, &conn);
3143 if (conn == NULL || tx == NULL) {
3144 /* either conn or tx was already nuked and this is a "late"
3145 * completion, so drop it */
3149 GNITX_ASSERTF(tx, tx->tx_msg.gnm_type == GNILND_MSG_PUT_DONE ||
3150 tx->tx_msg.gnm_type == GNILND_MSG_GET_DONE ||
3151 tx->tx_msg.gnm_type == GNILND_MSG_PUT_DONE_REV ||
3152 tx->tx_msg.gnm_type == GNILND_MSG_GET_DONE_REV,
3153 "tx %p with type %d\n", tx, tx->tx_msg.gnm_type);
3155 GNIDBG_TX(D_NET, tx, "RDMA completion for %d bytes", tx->tx_nob);
3157 if (tx->tx_msg.gnm_type == GNILND_MSG_GET_DONE_REV) {
3158 lnet_set_reply_msg_len(NULL, tx->tx_lntmsg[1],
3159 tx->tx_msg.gnm_u.completion.gncm_retval);
3163 if (tx->tx_msg.gnm_type == GNILND_MSG_GET_DONE_REV && desc->status == GNI_RC_SUCCESS) {
3164 if (tx->tx_buffer_copy != NULL)
3165 kgnilnd_recv_bte_get(tx);
3166 rc = kgnilnd_verify_rdma_cksum(tx, tx->tx_putinfo.gnpam_payload_cksum, tx->tx_nob_rdma);
3169 if (tx->tx_msg.gnm_type == GNILND_MSG_PUT_DONE_REV && desc->status == GNI_RC_SUCCESS) {
3170 if (tx->tx_buffer_copy != NULL)
3171 kgnilnd_recv_bte_get(tx);
3172 rc = kgnilnd_verify_rdma_cksum(tx, tx->tx_getinfo.gngm_payload_cksum, tx->tx_nob_rdma);
3175 /* remove from rdmaq */
3176 spin_lock(&conn->gnc_list_lock);
3177 kgnilnd_tx_del_state_locked(tx, NULL, conn, GNILND_TX_ALLOCD);
3178 spin_unlock(&conn->gnc_list_lock);
3180 if (likely(desc->status == GNI_RC_SUCCESS) && rc == 0) {
3181 atomic_inc(&dev->gnd_rdma_ntx);
3182 atomic64_add(tx->tx_nob, &dev->gnd_rdma_txbytes);
3183 /* transaction succeeded, add into fmaq */
3184 kgnilnd_queue_tx(conn, tx);
3185 kgnilnd_peer_alive(conn->gnc_peer);
3187 /* drop ref from kgnilnd_validate_tx_ev_id */
3188 kgnilnd_admin_decref(conn->gnc_tx_in_use);
3189 kgnilnd_conn_decref(conn);
3193 /* fall through to the TRANSACTION_ERROR case */
3196 /* get stringified version for log messages */
3197 kgnilnd_cq_error_str(event_data, &err_str, 256);
3198 kgnilnd_cq_error_recoverable(event_data, &should_retry);
3200 /* make sure we are not off in the weeds with this tx */
3201 if (tx->tx_retrans >
3202 *kgnilnd_tunables.kgn_max_retransmits) {
3203 GNIDBG_TX(D_NETERROR, tx,
3204 "giving up on TX, too many retries", NULL);
3208 GNIDBG_TX(D_NETERROR, tx, "RDMA %s error (%s)",
3209 should_retry ? "transient" : "unrecoverable", err_str);
3211 if (tx->tx_msg.gnm_type == GNILND_MSG_PUT_DONE ||
3212 tx->tx_msg.gnm_type == GNILND_MSG_GET_DONE_REV) {
3213 rdesc = &tx->tx_putinfo.gnpam_desc;
3214 rnob = tx->tx_putinfo.gnpam_desc.gnrd_nob;
3215 rcookie = tx->tx_putinfo.gnpam_dst_cookie;
3217 rdesc = &tx->tx_getinfo.gngm_desc;
3218 rnob = tx->tx_lntmsg[0]->msg_len;
3219 rcookie = tx->tx_getinfo.gngm_cookie;
3224 tx->tx_msg.gnm_type,
3228 kgnilnd_nak_rdma(conn,
3229 tx->tx_msg.gnm_type,
3232 tx->tx_msg.gnm_srcnid);
3233 kgnilnd_tx_done(tx, -EFAULT);
3234 kgnilnd_close_conn(conn, -ECOMM);
3237 /* drop ref from kgnilnd_validate_tx_ev_id */
3238 kgnilnd_admin_decref(conn->gnc_tx_in_use);
3239 kgnilnd_conn_decref(conn);
3244 kgnilnd_check_fma_send_cq(kgn_device_t *dev)
3248 kgn_tx_ev_id_t ev_id;
3249 kgn_tx_t *tx = NULL;
3250 kgn_conn_t *conn = NULL;
3251 int queued_fma, saw_reply, rc;
3252 long num_processed = 0;
3255 /* make sure we don't keep looping if we need to reset */
3256 if (unlikely(kgnilnd_data.kgn_quiesce_trigger)) {
3257 return num_processed;
3260 rc = kgnilnd_mutex_trylock(&dev->gnd_cq_mutex);
3262 /* we didn't get the mutex, so return that there is still work
3267 rrc = kgnilnd_cq_get_event(dev->gnd_snd_fma_cqh, &event_data);
3268 mutex_unlock(&dev->gnd_cq_mutex);
3270 if (rrc == GNI_RC_NOT_DONE) {
3272 "SMSG send CQ %d not ready (data "LPX64") "
3273 "processed %ld\n", dev->gnd_id, event_data,
3275 return num_processed;
3278 dev->gnd_sched_alive = jiffies;
3281 LASSERTF(!GNI_CQ_OVERRUN(event_data),
3282 "this is bad, somehow our credits didn't "
3283 "protect us from CQ overrun\n");
3284 LASSERTF(GNI_CQ_GET_TYPE(event_data) == GNI_CQ_EVENT_TYPE_SMSG,
3285 "rrc %d, GNI_CQ_GET_TYPE("LPX64") = "LPX64"\n", rrc,
3286 event_data, GNI_CQ_GET_TYPE(event_data));
3288 /* if SMSG couldn't handle an error, time for conn to die */
3289 if (unlikely(rrc == GNI_RC_TRANSACTION_ERROR)) {
3292 /* need to take the write_lock to ensure atomicity
3293 * on the conn state if we need to close it */
3294 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
3295 conn = kgnilnd_cqid2conn_locked(GNI_CQ_GET_INST_ID(event_data));
3297 /* Conn was destroyed? */
3299 "SMSG CQID lookup "LPX64" failed\n",
3300 GNI_CQ_GET_INST_ID(event_data));
3301 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
3305 kgnilnd_cq_error_str(event_data, &err_str, 256);
3306 CNETERR("SMSG send error to %s: rc %d (%s)\n",
3307 libcfs_nid2str(conn->gnc_peer->gnp_nid),
3309 kgnilnd_close_conn_locked(conn, -ECOMM);
3311 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
3313 /* no need to process rest of this tx -
3314 * it is getting canceled */
3318 /* fall through to GNI_RC_SUCCESS case */
3319 ev_id.txe_smsg_id = GNI_CQ_GET_MSG_ID(event_data);
3321 kgnilnd_validate_tx_ev_id(&ev_id, &tx, &conn);
3322 if (conn == NULL || tx == NULL) {
3323 /* either conn or tx was already nuked and this is a "late"
3324 * completion, so drop it */
3328 tx->tx_conn->gnc_last_tx_cq = jiffies;
3329 if (tx->tx_msg.gnm_type == GNILND_MSG_NOOP) {
3330 set_mb(conn->gnc_last_noop_cq, jiffies);
3333 /* lock tx_list_state and tx_state */
3334 spin_lock(&tx->tx_conn->gnc_list_lock);
3336 GNITX_ASSERTF(tx, tx->tx_list_state == GNILND_TX_LIVE_FMAQ,
3337 "state not GNILND_TX_LIVE_FMAQ", NULL);
3338 GNITX_ASSERTF(tx, tx->tx_state & GNILND_TX_WAITING_COMPLETION,
3339 "not waiting for completion", NULL);
3341 GNIDBG_TX(D_NET, tx, "SMSG complete tx_state %x rc %d",
3344 tx->tx_state &= ~GNILND_TX_WAITING_COMPLETION;
3346 /* This will trigger other FMA sends that were
3347 * pending this completion */
3348 queued_fma = !list_empty(&tx->tx_conn->gnc_fmaq);
3350 /* we either did not expect reply or we already got it */
3351 saw_reply = !(tx->tx_state & GNILND_TX_WAITING_REPLY);
3353 spin_unlock(&tx->tx_conn->gnc_list_lock);
3356 CDEBUG(D_NET, "scheduling conn 0x%p->%s for fmaq\n",
3358 libcfs_nid2str(conn->gnc_peer->gnp_nid));
3359 kgnilnd_schedule_conn(conn);
3362 /* If saw_reply is false as soon as gnc_list_lock is dropped the tx could be nuked
3363 * If saw_reply is true we know that the tx is safe to use as the other thread
3364 * is already finished with it.
3368 /* no longer need to track on the live_fmaq */
3369 kgnilnd_tx_del_state_locked(tx, NULL, tx->tx_conn, GNILND_TX_ALLOCD);
3371 if (tx->tx_state & GNILND_TX_PENDING_RDMA) {
3372 /* we already got reply & were waiting for
3373 * completion of initial send */
3374 /* to initiate RDMA transaction */
3375 GNIDBG_TX(D_NET, tx,
3376 "Pending RDMA 0x%p type 0x%02x",
3377 tx->tx_msg.gnm_type);
3378 tx->tx_state &= ~GNILND_TX_PENDING_RDMA;
3379 rc = kgnilnd_send_mapped_tx(tx, 0);
3380 GNITX_ASSERTF(tx, rc == 0, "RDMA send failed: %d\n", rc);
3382 /* we are done with this tx */
3383 GNIDBG_TX(D_NET, tx,
3384 "Done with tx type 0x%02x",
3385 tx->tx_msg.gnm_type);
3386 kgnilnd_tx_done(tx, tx->tx_rc);
3390 /* drop ref from kgnilnd_validate_tx_ev_id */
3391 kgnilnd_admin_decref(conn->gnc_tx_in_use);
3392 kgnilnd_conn_decref(conn);
3394 /* if we are waiting for a REPLY, we'll handle the tx then */
3395 } /* end for loop */
3399 kgnilnd_check_fma_rcv_cq(kgn_device_t *dev)
3404 long num_processed = 0;
3405 struct list_head *conns;
3406 struct list_head *tmp;
3410 /* make sure we don't keep looping if we need to reset */
3411 if (unlikely(kgnilnd_data.kgn_quiesce_trigger)) {
3412 return num_processed;
3415 rc = kgnilnd_mutex_trylock(&dev->gnd_cq_mutex);
3417 /* we didn't get the mutex, so return that there is still work
3421 rrc = kgnilnd_cq_get_event(dev->gnd_rcv_fma_cqh, &event_data);
3422 mutex_unlock(&dev->gnd_cq_mutex);
3424 if (rrc == GNI_RC_NOT_DONE) {
3425 CDEBUG(D_INFO, "SMSG RX CQ %d empty data "LPX64" "
3427 dev->gnd_id, event_data, num_processed);
3428 return num_processed;
3430 dev->gnd_sched_alive = jiffies;
3433 /* this is the only CQ that can really handle transient
3435 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_CQ_GET_EVENT)) {
3436 rrc = cfs_fail_val ? cfs_fail_val
3437 : GNI_RC_ERROR_RESOURCE;
3438 if (rrc == GNI_RC_ERROR_RESOURCE) {
3439 /* set overrun too */
3440 event_data |= (1UL << 63);
3441 LASSERTF(GNI_CQ_OVERRUN(event_data),
3442 "(1UL << 63) is no longer the bit to"
3443 "set to indicate CQ_OVERRUN\n");
3446 /* sender should get error event too and take care
3447 of failed transaction by re-transmitting */
3448 if (rrc == GNI_RC_TRANSACTION_ERROR) {
3449 CDEBUG(D_NET, "SMSG RX CQ error "LPX64"\n", event_data);
3453 if (likely(!GNI_CQ_OVERRUN(event_data))) {
3454 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
3455 conn = kgnilnd_cqid2conn_locked(
3456 GNI_CQ_GET_INST_ID(event_data));
3458 CDEBUG(D_NET, "SMSG RX CQID lookup "LPU64" "
3459 "failed, dropping event "LPX64"\n",
3460 GNI_CQ_GET_INST_ID(event_data),
3463 CDEBUG(D_NET, "SMSG RX: CQID "LPU64" "
3465 GNI_CQ_GET_INST_ID(event_data),
3466 conn, conn->gnc_peer ?
3467 libcfs_nid2str(conn->gnc_peer->gnp_nid) :
3470 conn->gnc_last_rx_cq = jiffies;
3472 /* stash first rx so we can clear out purgatory.
3474 if (conn->gnc_first_rx == 0) {
3475 conn->gnc_first_rx = jiffies;
3477 kgnilnd_peer_alive(conn->gnc_peer);
3478 kgnilnd_schedule_conn(conn);
3480 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
3484 /* FMA CQ has overflowed: check ALL conns */
3485 CNETERR("SMSG RX CQ overflow: scheduling ALL "
3486 "conns on device %d\n", dev->gnd_id);
3488 for (rc = 0; rc < *kgnilnd_tunables.kgn_peer_hash_size; rc++) {
3490 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
3491 conns = &kgnilnd_data.kgn_conns[rc];
3493 list_for_each(tmp, conns) {
3494 conn = list_entry(tmp, kgn_conn_t,
3497 if (conn->gnc_device == dev) {
3498 kgnilnd_schedule_conn(conn);
3499 conn->gnc_last_rx_cq = jiffies;
3503 /* don't block write lockers for too long... */
3504 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
3509 /* try_map_if_full should only be used when processing TX from list of
3510 * backlog TX waiting on mappings to free up
3513 * try_map_if_full = 0: 0 (sent or queued), (-|+)errno failure of kgnilnd_sendmsg
3514 * try_map_if_full = 1: 0 (sent), -ENOMEM for caller to requeue, (-|+)errno failure of kgnilnd_sendmsg */
3517 kgnilnd_send_mapped_tx(kgn_tx_t *tx, int try_map_if_full)
3519 /* slight bit of race if multiple people calling, but at worst we'll have
3520 * order altered just a bit... which would not be determenistic anyways */
3521 int rc = atomic_read(&tx->tx_conn->gnc_device->gnd_nq_map);
3523 GNIDBG_TX(D_NET, tx, "try %d nq_map %d", try_map_if_full, rc);
3525 /* We know that we have a GART reservation that should guarantee forward progress.
3526 * This means we don't need to take any extraordinary efforts if we are failing
3527 * mappings here - even if we are holding a very small number of these. */
3529 if (try_map_if_full || (rc == 0)) {
3530 rc = kgnilnd_map_buffer(tx);
3533 /* rc should be 0 if we mapped succesfully here, if non-zero we are queueing */
3535 /* if try_map_if_full set, they handle requeuing */
3536 if (unlikely(try_map_if_full)) {
3539 spin_lock(&tx->tx_conn->gnc_device->gnd_lock);
3540 kgnilnd_tx_add_state_locked(tx, NULL, tx->tx_conn, GNILND_TX_MAPQ, 1);
3541 spin_unlock(&tx->tx_conn->gnc_device->gnd_lock);
3542 /* make sure we wake up sched to run this */
3543 kgnilnd_schedule_device(tx->tx_conn->gnc_device);
3544 /* return 0 as this is now queued for later sending */
3549 switch (tx->tx_msg.gnm_type) {
3553 /* GET_REQ and PUT_ACK are outbound messages sending our mapping key to
3554 * remote node where the RDMA will be started
3555 * Special case -EAGAIN logic - this should just queued as if the mapping couldn't
3556 * be satisified. The rest of the errors are "hard" errors that require
3557 * upper layers to handle themselves */
3558 case GNILND_MSG_GET_REQ:
3559 tx->tx_msg.gnm_u.get.gngm_desc.gnrd_key = tx->tx_map_key;
3560 tx->tx_msg.gnm_u.get.gngm_cookie = tx->tx_id.txe_cookie;
3561 tx->tx_msg.gnm_u.get.gngm_desc.gnrd_addr = (__u64)((unsigned long)tx->tx_buffer);
3562 tx->tx_msg.gnm_u.get.gngm_desc.gnrd_nob = tx->tx_nob;
3563 tx->tx_state = GNILND_TX_WAITING_COMPLETION | GNILND_TX_WAITING_REPLY;
3564 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_GET_REQ_AGAIN)) {
3565 tx->tx_state |= GNILND_TX_FAIL_SMSG;
3567 /* redirect to FMAQ on failure, no need to infinite loop here in MAPQ */
3568 rc = kgnilnd_sendmsg(tx, NULL, 0, &tx->tx_conn->gnc_list_lock, GNILND_TX_FMAQ);
3570 case GNILND_MSG_PUT_ACK:
3571 tx->tx_msg.gnm_u.putack.gnpam_desc.gnrd_key = tx->tx_map_key;
3572 tx->tx_state = GNILND_TX_WAITING_COMPLETION | GNILND_TX_WAITING_REPLY;
3573 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_PUT_ACK_AGAIN)) {
3574 tx->tx_state |= GNILND_TX_FAIL_SMSG;
3576 /* redirect to FMAQ on failure, no need to infinite loop here in MAPQ */
3577 rc = kgnilnd_sendmsg(tx, NULL, 0, &tx->tx_conn->gnc_list_lock, GNILND_TX_FMAQ);
3580 /* PUT_REQ and GET_DONE are where we do the actual RDMA */
3581 case GNILND_MSG_PUT_REQ:
3582 kgnilnd_rdma(tx, GNILND_MSG_PUT_DONE,
3583 &tx->tx_putinfo.gnpam_desc,
3584 tx->tx_putinfo.gnpam_desc.gnrd_nob,
3585 tx->tx_putinfo.gnpam_dst_cookie);
3587 case GNILND_MSG_GET_DONE:
3588 kgnilnd_rdma(tx, GNILND_MSG_GET_DONE,
3589 &tx->tx_getinfo.gngm_desc,
3590 tx->tx_lntmsg[0]->msg_len,
3591 tx->tx_getinfo.gngm_cookie);
3594 case GNILND_MSG_PUT_REQ_REV:
3595 tx->tx_msg.gnm_u.get.gngm_desc.gnrd_key = tx->tx_map_key;
3596 tx->tx_msg.gnm_u.get.gngm_cookie = tx->tx_id.txe_cookie;
3597 tx->tx_msg.gnm_u.get.gngm_desc.gnrd_addr = (__u64)((unsigned long)tx->tx_buffer);
3598 tx->tx_msg.gnm_u.get.gngm_desc.gnrd_nob = tx->tx_nob;
3599 tx->tx_state = GNILND_TX_WAITING_COMPLETION | GNILND_TX_WAITING_REPLY;
3600 kgnilnd_compute_rdma_cksum(tx, tx->tx_nob);
3601 tx->tx_msg.gnm_u.get.gngm_payload_cksum = tx->tx_msg.gnm_payload_cksum;
3603 rc = kgnilnd_sendmsg(tx, NULL, 0, &tx->tx_conn->gnc_list_lock, GNILND_TX_FMAQ);
3605 case GNILND_MSG_PUT_DONE_REV:
3606 kgnilnd_rdma(tx, GNILND_MSG_PUT_DONE_REV,
3607 &tx->tx_getinfo.gngm_desc,
3609 tx->tx_getinfo.gngm_cookie);
3611 case GNILND_MSG_GET_ACK_REV:
3612 tx->tx_msg.gnm_u.putack.gnpam_desc.gnrd_key = tx->tx_map_key;
3613 tx->tx_state = GNILND_TX_WAITING_COMPLETION | GNILND_TX_WAITING_REPLY;
3614 /* LNET_GETS are a special case for parse */
3615 kgnilnd_compute_rdma_cksum(tx, tx->tx_msg.gnm_u.putack.gnpam_desc.gnrd_nob);
3616 tx->tx_msg.gnm_u.putack.gnpam_payload_cksum = tx->tx_msg.gnm_payload_cksum;
3618 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_PUT_ACK_AGAIN))
3619 tx->tx_state |= GNILND_TX_FAIL_SMSG;
3621 /* redirect to FMAQ on failure, no need to infinite loop here in MAPQ */
3622 rc = kgnilnd_sendmsg(tx, NULL, 0, &tx->tx_conn->gnc_list_lock, GNILND_TX_FMAQ);
3624 case GNILND_MSG_GET_REQ_REV:
3625 kgnilnd_rdma(tx, GNILND_MSG_GET_DONE_REV,
3626 &tx->tx_putinfo.gnpam_desc,
3627 tx->tx_putinfo.gnpam_desc.gnrd_nob,
3628 tx->tx_putinfo.gnpam_dst_cookie);
3637 kgnilnd_process_fmaq(kgn_conn_t *conn)
3640 kgn_tx_t *tx = NULL;
3641 void *buffer = NULL;
3642 unsigned int nob = 0;
3645 /* NB 1. kgnilnd_sendmsg() may fail if I'm out of credits right now.
3646 * However I will be rescheduled by an FMA completion event
3647 * when I eventually get some.
3648 * NB 2. Sampling gnc_state here races with setting it elsewhere.
3649 * But it doesn't matter if I try to send a "real" message just
3650 * as I start closing because I'll get scheduled to send the
3653 /* Short circuit if the ep_handle is null we cant send anyway. */
3654 if (conn->gnc_ephandle == NULL)
3657 LASSERTF(!conn->gnc_close_sent, "Conn %p close was sent\n", conn);
3659 spin_lock(&conn->gnc_list_lock);
3661 if (list_empty(&conn->gnc_fmaq)) {
3662 int keepalive = GNILND_TO2KA(conn->gnc_timeout);
3664 spin_unlock(&conn->gnc_list_lock);
3666 if (time_after_eq(jiffies, conn->gnc_last_tx + cfs_time_seconds(keepalive))) {
3667 CDEBUG(D_NET, "sending NOOP -> %s (%p idle %lu(%d)) "
3668 "last %lu/%lu/%lu %lus/%lus/%lus\n",
3669 libcfs_nid2str(conn->gnc_peer->gnp_nid), conn,
3670 cfs_duration_sec(jiffies - conn->gnc_last_tx),
3672 conn->gnc_last_noop_want, conn->gnc_last_noop_sent,
3673 conn->gnc_last_noop_cq,
3674 cfs_duration_sec(jiffies - conn->gnc_last_noop_want),
3675 cfs_duration_sec(jiffies - conn->gnc_last_noop_sent),
3676 cfs_duration_sec(jiffies - conn->gnc_last_noop_cq));
3677 atomic_inc(&conn->gnc_sched_noop);
3678 set_mb(conn->gnc_last_noop_want, jiffies);
3680 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_NOOP_SEND))
3683 tx = kgnilnd_new_tx_msg(GNILND_MSG_NOOP, conn->gnc_peer->gnp_net->gnn_ni->ni_nid);
3687 rc = kgnilnd_set_tx_id(tx, conn);
3689 kgnilnd_tx_done(tx, rc);
3695 tx = list_first_entry(&conn->gnc_fmaq, kgn_tx_t, tx_list);
3696 /* move from fmaq to allocd, kgnilnd_sendmsg will move to live_fmaq */
3697 kgnilnd_tx_del_state_locked(tx, NULL, conn, GNILND_TX_ALLOCD);
3698 more_to_do = !list_empty(&conn->gnc_fmaq);
3699 spin_unlock(&conn->gnc_list_lock);
3702 /* if there is no real TX or no NOOP to send, bail */
3707 if (!tx->tx_retrans)
3708 tx->tx_cred_wait = jiffies;
3710 GNITX_ASSERTF(tx, tx->tx_id.txe_smsg_id != 0,
3711 "tx with zero id", NULL);
3713 CDEBUG(D_NET, "sending regular msg: %p, type %s(0x%02x), cookie "LPX64"\n",
3714 tx, kgnilnd_msgtype2str(tx->tx_msg.gnm_type),
3715 tx->tx_msg.gnm_type, tx->tx_id.txe_cookie);
3719 switch (tx->tx_msg.gnm_type) {
3723 case GNILND_MSG_NOOP:
3724 case GNILND_MSG_CLOSE:
3725 case GNILND_MSG_IMMEDIATE:
3726 tx->tx_state = GNILND_TX_WAITING_COMPLETION;
3727 buffer = tx->tx_buffer;
3731 case GNILND_MSG_GET_DONE:
3732 case GNILND_MSG_PUT_DONE:
3733 case GNILND_MSG_PUT_DONE_REV:
3734 case GNILND_MSG_GET_DONE_REV:
3735 case GNILND_MSG_PUT_NAK:
3736 case GNILND_MSG_GET_NAK:
3737 case GNILND_MSG_GET_NAK_REV:
3738 case GNILND_MSG_PUT_NAK_REV:
3739 tx->tx_state = GNILND_TX_WAITING_COMPLETION;
3742 case GNILND_MSG_PUT_REQ:
3743 case GNILND_MSG_GET_REQ_REV:
3744 tx->tx_msg.gnm_u.putreq.gnprm_cookie = tx->tx_id.txe_cookie;
3746 case GNILND_MSG_PUT_ACK:
3747 case GNILND_MSG_PUT_REQ_REV:
3748 case GNILND_MSG_GET_ACK_REV:
3749 case GNILND_MSG_GET_REQ:
3750 /* This is really only to handle the retransmit of SMSG once these
3751 * two messages are setup in send_mapped_tx */
3752 tx->tx_state = GNILND_TX_WAITING_COMPLETION | GNILND_TX_WAITING_REPLY;
3756 if (likely(rc == 0)) {
3757 rc = kgnilnd_sendmsg(tx, buffer, nob, &conn->gnc_list_lock, GNILND_TX_FMAQ);
3761 /* don't explicitly reschedule here - we are short credits and will rely on
3762 * kgnilnd_sendmsg to resched the conn if need be */
3764 } else if (rc < 0) {
3765 /* bail: it wasn't sent and we didn't get EAGAIN indicating we should retrans
3766 * almost certainly a software bug, but lets play nice with the other kids */
3767 kgnilnd_tx_done(tx, rc);
3768 /* just for fun, kick peer in arse - resetting conn might help to correct
3769 * this almost certainly buggy software caused return code */
3770 kgnilnd_close_conn(conn, rc);
3774 CDEBUG(D_NET, "Rescheduling %p (more to do)\n", conn);
3775 kgnilnd_schedule_conn(conn);
3780 kgnilnd_process_rdmaq(kgn_device_t *dev)
3785 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_DELAY_RDMAQ)) {
3789 if (time_after_eq(jiffies, dev->gnd_rdmaq_deadline)) {
3790 unsigned long dead_bump;
3793 /* if we think we need to adjust, take lock to serialize and recheck */
3794 spin_lock(&dev->gnd_rdmaq_lock);
3795 if (time_after_eq(jiffies, dev->gnd_rdmaq_deadline)) {
3796 del_singleshot_timer_sync(&dev->gnd_rdmaq_timer);
3798 dead_bump = cfs_time_seconds(1) / *kgnilnd_tunables.kgn_rdmaq_intervals;
3800 /* roll the bucket forward */
3801 dev->gnd_rdmaq_deadline = jiffies + dead_bump;
3803 if (kgnilnd_data.kgn_rdmaq_override &&
3804 (*kgnilnd_tunables.kgn_rdmaq_intervals != 0)) {
3805 new_ok = kgnilnd_data.kgn_rdmaq_override / *kgnilnd_tunables.kgn_rdmaq_intervals;
3810 /* roll current outstanding forward to make sure we carry outstanding
3811 * committment forward
3812 * new_ok starts out as the whole interval value
3813 * - first subtract bytes_out from last interval, as that would push us over
3814 * strict limits for this interval
3815 * - second, set bytes_ok to new_ok to ensure it doesn't exceed the current auth
3817 * there is a small race here if someone is actively processing mappings and
3818 * adding to rdmaq_bytes_out, but it should be small as the mappings are triggered
3819 * quite quickly after kgnilnd_auth_rdma_bytes gives us the go-ahead
3820 * - if this gives us problems in the future, we could use a read/write lock
3821 * to protect the resetting of these values */
3822 new_ok -= atomic64_read(&dev->gnd_rdmaq_bytes_out);
3823 atomic64_set(&dev->gnd_rdmaq_bytes_ok, new_ok);
3825 CDEBUG(D_NET, "resetting rdmaq bytes to %ld, deadline +%lu -> %lu, "
3826 "current out %ld\n",
3827 atomic64_read(&dev->gnd_rdmaq_bytes_ok), dead_bump, dev->gnd_rdmaq_deadline,
3828 atomic64_read(&dev->gnd_rdmaq_bytes_out));
3830 spin_unlock(&dev->gnd_rdmaq_lock);
3833 spin_lock(&dev->gnd_rdmaq_lock);
3834 while (!list_empty(&dev->gnd_rdmaq)) {
3837 /* make sure we break out early on quiesce */
3838 if (unlikely(kgnilnd_data.kgn_quiesce_trigger)) {
3839 /* always break with lock held - we unlock outside loop */
3843 tx = list_first_entry(&dev->gnd_rdmaq, kgn_tx_t, tx_list);
3844 kgnilnd_tx_del_state_locked(tx, NULL, tx->tx_conn, GNILND_TX_ALLOCD);
3847 /* sample with lock held, serializing with kgnilnd_complete_closed_conn */
3848 if (tx->tx_conn->gnc_state != GNILND_CONN_ESTABLISHED) {
3849 /* if conn is dying, mark tx in tx_ref_table for
3850 * kgnilnd_complete_closed_conn to finish up */
3851 kgnilnd_tx_add_state_locked(tx, NULL, tx->tx_conn, GNILND_TX_DYING, 1);
3853 /* tx was moved to DYING, get next */
3856 spin_unlock(&dev->gnd_rdmaq_lock);
3858 rc = kgnilnd_auth_rdma_bytes(dev, tx);
3859 spin_lock(&dev->gnd_rdmaq_lock);
3862 /* no ticket! add back to head */
3863 kgnilnd_tx_add_state_locked(tx, NULL, tx->tx_conn, GNILND_TX_RDMAQ, 0);
3864 /* clear found_work so scheduler threads wait for timer */
3868 /* TX is GO for launch */
3869 tx->tx_qtime = jiffies;
3870 kgnilnd_send_mapped_tx(tx, 0);
3874 spin_unlock(&dev->gnd_rdmaq_lock);
3880 kgnilnd_swab_rdma_desc(kgn_rdma_desc_t *d)
3882 __swab64s(&d->gnrd_key.qword1);
3883 __swab64s(&d->gnrd_key.qword2);
3884 __swab64s(&d->gnrd_addr);
3885 __swab32s(&d->gnrd_nob);
3888 #define kgnilnd_match_reply_either(w, x, y, z) _kgnilnd_match_reply(w, x, y, z)
3889 #define kgnilnd_match_reply(x, y, z) _kgnilnd_match_reply(x, y, GNILND_MSG_NONE, z)
3892 _kgnilnd_match_reply(kgn_conn_t *conn, int type1, int type2, __u64 cookie)
3894 kgn_tx_ev_id_t ev_id;
3897 /* we use the cookie from the original TX, so we can find the match
3898 * by parsing that and using the txe_idx */
3899 ev_id.txe_cookie = cookie;
3901 tx = conn->gnc_tx_ref_table[ev_id.txe_idx];
3904 /* check tx to make sure kgni didn't eat it */
3905 GNITX_ASSERTF(tx, tx->tx_msg.gnm_magic == GNILND_MSG_MAGIC,
3906 "came back from kgni with bad magic %x\n", tx->tx_msg.gnm_magic);
3908 GNITX_ASSERTF(tx, ((tx->tx_id.txe_idx == ev_id.txe_idx) &&
3909 (tx->tx_id.txe_cookie = cookie)),
3910 "conn 0x%p->%s tx_ref_table hosed: wanted "
3911 "txe_cookie "LPX64" txe_idx %d "
3912 "found tx %p cookie "LPX64" txe_idx %d\n",
3913 conn, libcfs_nid2str(conn->gnc_peer->gnp_nid),
3914 cookie, ev_id.txe_idx,
3915 tx, tx->tx_id.txe_cookie, tx->tx_id.txe_idx);
3917 LASSERTF((((tx->tx_msg.gnm_type == type1) || (tx->tx_msg.gnm_type == type2)) &&
3918 (tx->tx_state & GNILND_TX_WAITING_REPLY)),
3919 "Unexpected TX type (%x, %x or %x) "
3920 "or state (%x, expected +%x) "
3921 "matched reply from %s\n",
3922 tx->tx_msg.gnm_type, type1, type2,
3923 tx->tx_state, GNILND_TX_WAITING_REPLY,
3924 libcfs_nid2str(conn->gnc_peer->gnp_nid));
3926 CWARN("Unmatched reply %02x, or %02x/"LPX64" from %s\n",
3927 type1, type2, cookie, libcfs_nid2str(conn->gnc_peer->gnp_nid));
3933 kgnilnd_complete_tx(kgn_tx_t *tx, int rc)
3936 kgn_conn_t *conn = tx->tx_conn;
3937 __u64 nob = tx->tx_nob;
3938 __u32 physnop = tx->tx_phys_npages;
3939 int id = tx->tx_id.txe_smsg_id;
3940 int buftype = tx->tx_buftype;
3941 gni_mem_handle_t hndl;
3942 hndl.qword1 = tx->tx_map_key.qword1;
3943 hndl.qword2 = tx->tx_map_key.qword2;
3945 spin_lock(&conn->gnc_list_lock);
3947 GNITX_ASSERTF(tx, tx->tx_state & GNILND_TX_WAITING_REPLY,
3948 "not waiting for reply", NULL);
3951 tx->tx_state &= ~GNILND_TX_WAITING_REPLY;
3953 if (rc == -EFAULT) {
3954 CDEBUG(D_NETERROR, "Error %d TX data: TX %p tx_id %x nob %16"LPF64"u physnop %8d buffertype %#8x MemHandle "LPX64"."LPX64"x\n",
3955 rc, tx, id, nob, physnop, buftype, hndl.qword1, hndl.qword2);
3957 if(*kgnilnd_tunables.kgn_efault_lbug) {
3958 GNIDBG_TOMSG(D_NETERROR, &tx->tx_msg,
3959 "error %d on tx 0x%p->%s id %u/%d state %s age %ds",
3961 libcfs_nid2str(conn->gnc_peer->gnp_nid) : "<?>",
3962 tx->tx_id.txe_smsg_id, tx->tx_id.txe_idx,
3963 kgnilnd_tx_state2str(tx->tx_list_state),
3964 cfs_duration_sec((unsigned long) jiffies - tx->tx_qtime));
3969 if (!(tx->tx_state & GNILND_TX_WAITING_COMPLETION)) {
3970 kgnilnd_tx_del_state_locked(tx, NULL, conn, GNILND_TX_ALLOCD);
3971 /* sample under lock as follow on steps require gnc_list_lock
3972 * - or call kgnilnd_tx_done which requires no locks held over
3973 * call to lnet_finalize */
3976 spin_unlock(&conn->gnc_list_lock);
3979 kgnilnd_tx_done(tx, tx->tx_rc);
3984 kgnilnd_finalize_rx_done(kgn_tx_t *tx, kgn_msg_t *msg)
3987 kgn_conn_t *conn = tx->tx_conn;
3989 atomic_inc(&conn->gnc_device->gnd_rdma_nrx);
3990 atomic64_add(tx->tx_nob, &conn->gnc_device->gnd_rdma_rxbytes);
3992 /* the gncm_retval is passed in for PUTs */
3993 rc = kgnilnd_verify_rdma_cksum(tx, msg->gnm_payload_cksum,
3994 msg->gnm_u.completion.gncm_retval);
3996 kgnilnd_complete_tx(tx, rc);
4000 kgnilnd_check_fma_rx(kgn_conn_t *conn)
4008 kgn_peer_t *peer = conn->gnc_peer;
4011 __u16 tmp_cksum = 0, msg_cksum = 0;
4012 int repost = 1, saw_complete;
4013 unsigned long timestamp, newest_last_rx, timeout;
4017 /* Short circuit if the ep_handle is null.
4018 * It's likely that its about to be closed as stale.
4020 if (conn->gnc_ephandle == NULL)
4023 timestamp = jiffies;
4024 mutex_lock(&conn->gnc_device->gnd_cq_mutex);
4025 /* delay in jiffies - we are really concerned only with things that
4026 * result in a schedule() or really holding this off for long times .
4027 * NB - mutex_lock could spin for 2 jiffies before going to sleep to wait */
4028 conn->gnc_device->gnd_mutex_delay += (long) jiffies - timestamp;
4030 /* Resample current time as we have no idea how long it took to get the mutex */
4031 timestamp = jiffies;
4033 /* We check here when the last time we received an rx, we do this before
4034 * we call getnext in case the thread has been blocked for a while. If we
4035 * havent received an rx since our timeout value we close the connection
4036 * as we should assume the other side has closed the connection. This will
4037 * stop us from sending replies to a mailbox that is already in purgatory.
4040 timeout = cfs_time_seconds(conn->gnc_timeout);
4041 newest_last_rx = GNILND_LASTRX(conn);
4043 /* Error injection to validate that timestamp checking works and closing the conn */
4044 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_RECV_TIMEOUT)) {
4045 timestamp = timestamp + (GNILND_TIMEOUTRX(timeout) * 2);
4048 if (time_after_eq(timestamp, newest_last_rx + (GNILND_TIMEOUTRX(timeout)))) {
4049 GNIDBG_CONN(D_NETERROR|D_CONSOLE, conn, "Cant receive from %s after timeout lapse of %lu; TO %lu",
4050 libcfs_nid2str(conn->gnc_peer->gnp_nid),
4051 cfs_duration_sec(timestamp - newest_last_rx),
4052 cfs_duration_sec(GNILND_TIMEOUTRX(timeout)));
4053 mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
4055 kgnilnd_close_conn(conn, rc);
4059 rrc = kgnilnd_smsg_getnext(conn->gnc_ephandle, &prefix);
4061 if (rrc == GNI_RC_NOT_DONE) {
4062 mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
4063 CDEBUG(D_INFO, "SMSG RX empty\n");
4067 /* Instead of asserting when we get mailbox corruption lets attempt to
4068 * close the conn and recover. We can put the conn/mailbox into
4069 * purgatory and let purgatory deal with the problem. If we see
4070 * this NETTERROR reported on production systems in large amounts
4071 * we will need to revisit the state machine to see if we can tighten
4072 * it up further to improve data protection.
4075 if (rrc == GNI_RC_INVALID_STATE) {
4076 mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
4077 GNIDBG_CONN(D_NETERROR | D_CONSOLE, conn, "Mailbox corruption "
4078 "detected closing conn %p from peer %s\n", conn,
4079 libcfs_nid2str(conn->gnc_peer->gnp_nid));
4081 kgnilnd_close_conn(conn, rc);
4085 LASSERTF(rrc == GNI_RC_SUCCESS,
4086 "bad rc %d on conn %p from peer %s\n",
4087 rrc, conn, libcfs_nid2str(peer->gnp_nid));
4089 msg = (kgn_msg_t *)prefix;
4091 rx = kgnilnd_alloc_rx();
4093 mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
4094 kgnilnd_release_msg(conn);
4095 GNIDBG_MSG(D_NETERROR, msg, "Dropping SMSG RX from 0x%p->%s, no RX memory",
4096 conn, libcfs_nid2str(peer->gnp_nid));
4100 GNIDBG_MSG(D_INFO, msg, "SMSG RX on %p from %s",
4101 conn, libcfs_nid2str(peer->gnp_nid));
4103 timestamp = conn->gnc_last_rx;
4104 last_seq = conn->gnc_rx_seq;
4106 conn->gnc_last_rx = jiffies;
4107 /* stash first rx so we can clear out purgatory
4109 if (conn->gnc_first_rx == 0)
4110 conn->gnc_first_rx = jiffies;
4112 seq = conn->gnc_rx_seq++;
4114 /* needs to linger to protect gnc_rx_seq like we do with gnc_tx_seq */
4115 mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
4116 kgnilnd_peer_alive(conn->gnc_peer);
4119 rx->grx_conn = conn;
4121 rx->grx_received = current_kernel_time();
4123 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_NET_LOOKUP)) {
4126 rc = kgnilnd_find_net(msg->gnm_srcnid, &net);
4132 kgnilnd_net_decref(net);
4135 if (*kgnilnd_tunables.kgn_checksum && !msg->gnm_cksum)
4136 GNIDBG_MSG(D_WARNING, msg, "no msg header checksum when enabled");
4138 /* XXX Nic: Do we need to swab cksum */
4139 if (msg->gnm_cksum != 0) {
4140 msg_cksum = msg->gnm_cksum;
4142 tmp_cksum = kgnilnd_cksum(msg, sizeof(kgn_msg_t));
4144 if (tmp_cksum != msg_cksum) {
4145 GNIDBG_MSG(D_NETERROR, msg, "Bad hdr checksum (%x expected %x)",
4146 tmp_cksum, msg_cksum);
4147 kgnilnd_dump_msg(D_BUFFS, msg);
4152 /* restore checksum for future debug messages */
4153 msg->gnm_cksum = tmp_cksum;
4155 if (msg->gnm_magic != GNILND_MSG_MAGIC) {
4156 if (__swab32(msg->gnm_magic) != GNILND_MSG_MAGIC) {
4157 GNIDBG_MSG(D_NETERROR, msg, "Unexpected magic %08x from %s",
4158 msg->gnm_magic, libcfs_nid2str(peer->gnp_nid));
4163 __swab32s(&msg->gnm_magic);
4164 __swab16s(&msg->gnm_version);
4165 __swab16s(&msg->gnm_type);
4166 __swab64s(&msg->gnm_srcnid);
4167 __swab64s(&msg->gnm_connstamp);
4168 __swab32s(&msg->gnm_seq);
4170 /* NB message type checked below; NOT here... */
4171 switch (msg->gnm_type) {
4172 case GNILND_MSG_GET_ACK_REV:
4173 case GNILND_MSG_PUT_ACK:
4174 kgnilnd_swab_rdma_desc(&msg->gnm_u.putack.gnpam_desc);
4177 case GNILND_MSG_PUT_REQ_REV:
4178 case GNILND_MSG_GET_REQ:
4179 kgnilnd_swab_rdma_desc(&msg->gnm_u.get.gngm_desc);
4187 if (msg->gnm_version != GNILND_MSG_VERSION) {
4188 GNIDBG_MSG(D_NETERROR, msg, "Unexpected protocol version %d from %s",
4189 msg->gnm_version, libcfs_nid2str(peer->gnp_nid));
4194 if (LNET_NIDADDR(msg->gnm_srcnid) != LNET_NIDADDR(peer->gnp_nid)) {
4195 GNIDBG_MSG(D_NETERROR, msg, "Unexpected peer %s from %s",
4196 libcfs_nid2str(msg->gnm_srcnid),
4197 libcfs_nid2str(peer->gnp_nid));
4202 if (msg->gnm_connstamp != conn->gnc_peer_connstamp) {
4203 GNIDBG_MSG(D_NETERROR, msg, "Unexpected connstamp "LPX64"("LPX64
4204 " expected) from %s",
4205 msg->gnm_connstamp, conn->gnc_peer_connstamp,
4206 libcfs_nid2str(peer->gnp_nid));
4211 if (msg->gnm_seq != seq) {
4212 GNIDBG_MSG(D_NETERROR, msg, "Unexpected sequence number %d(%d expected) from %s",
4213 msg->gnm_seq, seq, libcfs_nid2str(peer->gnp_nid));
4218 atomic_inc(&conn->gnc_device->gnd_short_nrx);
4220 if (msg->gnm_type == GNILND_MSG_CLOSE) {
4221 CDEBUG(D_NETTRACE, "%s sent us CLOSE msg\n",
4222 libcfs_nid2str(conn->gnc_peer->gnp_nid));
4223 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
4224 conn->gnc_close_recvd = GNILND_CLOSE_RX;
4225 conn->gnc_peer_error = msg->gnm_u.completion.gncm_retval;
4226 /* double check state with lock held */
4227 if (conn->gnc_state == GNILND_CONN_ESTABLISHED) {
4228 /* only error if we are not already closing */
4229 if (conn->gnc_peer_error == -ETIMEDOUT) {
4230 unsigned long now = jiffies;
4231 CNETERR("peer 0x%p->%s closed connection 0x%p due to timeout. "
4233 "RX %d @ %lus/%lus; TX %d @ %lus/%lus; "
4234 "NOOP %lus/%lus/%lus; sched %lus/%lus/%lus ago\n",
4235 conn->gnc_peer, libcfs_nid2str(conn->gnc_peer->gnp_nid),
4237 cfs_duration_sec(now - timestamp),
4238 cfs_duration_sec(now - conn->gnc_last_rx_cq),
4240 cfs_duration_sec(now - conn->gnc_last_tx),
4241 cfs_duration_sec(now - conn->gnc_last_tx_cq),
4242 cfs_duration_sec(now - conn->gnc_last_noop_want),
4243 cfs_duration_sec(now - conn->gnc_last_noop_sent),
4244 cfs_duration_sec(now - conn->gnc_last_noop_cq),
4245 cfs_duration_sec(now - conn->gnc_last_sched_ask),
4246 cfs_duration_sec(now - conn->gnc_last_sched_do),
4247 cfs_duration_sec(now - conn->gnc_device->gnd_sched_alive));
4249 kgnilnd_close_conn_locked(conn, -ECONNRESET);
4251 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
4255 if (conn->gnc_close_recvd) {
4256 GNIDBG_MSG(D_NETERROR, msg, "Unexpected message %s(%d/%d) after CLOSE from %s",
4257 kgnilnd_msgtype2str(msg->gnm_type),
4258 msg->gnm_type, conn->gnc_close_recvd,
4259 libcfs_nid2str(conn->gnc_peer->gnp_nid));
4264 if (conn->gnc_state != GNILND_CONN_ESTABLISHED) {
4265 /* XXX Nic: log message received on bad connection state */
4269 switch (msg->gnm_type) {
4270 case GNILND_MSG_NOOP:
4271 /* Nothing to do; just a keepalive */
4274 case GNILND_MSG_IMMEDIATE:
4275 /* only get SMSG payload for IMMEDIATE */
4276 atomic64_add(msg->gnm_payload_len, &conn->gnc_device->gnd_short_rxbytes);
4277 rc = lnet_parse(net->gnn_ni, &msg->gnm_u.immediate.gnim_hdr,
4278 msg->gnm_srcnid, rx, 0);
4281 case GNILND_MSG_GET_REQ_REV:
4282 case GNILND_MSG_PUT_REQ:
4283 rc = lnet_parse(net->gnn_ni, &msg->gnm_u.putreq.gnprm_hdr,
4284 msg->gnm_srcnid, rx, 1);
4287 case GNILND_MSG_GET_NAK_REV:
4288 tx = kgnilnd_match_reply_either(conn, GNILND_MSG_GET_REQ_REV, GNILND_MSG_GET_ACK_REV,
4289 msg->gnm_u.completion.gncm_cookie);
4293 kgnilnd_complete_tx(tx, msg->gnm_u.completion.gncm_retval);
4295 case GNILND_MSG_PUT_NAK:
4296 tx = kgnilnd_match_reply_either(conn, GNILND_MSG_PUT_REQ, GNILND_MSG_PUT_ACK,
4297 msg->gnm_u.completion.gncm_cookie);
4301 kgnilnd_complete_tx(tx, msg->gnm_u.completion.gncm_retval);
4303 case GNILND_MSG_PUT_ACK:
4304 tx = kgnilnd_match_reply(conn, GNILND_MSG_PUT_REQ,
4305 msg->gnm_u.putack.gnpam_src_cookie);
4309 /* store putack data for later: deferred rdma or re-try */
4310 tx->tx_putinfo = msg->gnm_u.putack;
4313 spin_lock(&tx->tx_conn->gnc_list_lock);
4315 GNITX_ASSERTF(tx, tx->tx_state & GNILND_TX_WAITING_REPLY,
4316 "not waiting for reply", NULL);
4318 tx->tx_state &= ~GNILND_TX_WAITING_REPLY;
4320 if (likely(!(tx->tx_state & GNILND_TX_WAITING_COMPLETION))) {
4321 kgnilnd_tx_del_state_locked(tx, NULL, conn, GNILND_TX_ALLOCD);
4322 /* sample under lock as follow on steps require gnc_list_lock
4323 * - or call kgnilnd_tx_done which requires no locks held over
4324 * call to lnet_finalize */
4327 /* cannot launch rdma if still waiting for fma-msg completion */
4328 CDEBUG(D_NET, "tx 0x%p type 0x%02x will need to "
4329 "wait for SMSG completion\n", tx, tx->tx_msg.gnm_type);
4330 tx->tx_state |= GNILND_TX_PENDING_RDMA;
4332 spin_unlock(&tx->tx_conn->gnc_list_lock);
4335 rc = kgnilnd_send_mapped_tx(tx, 0);
4337 kgnilnd_tx_done(tx, rc);
4340 case GNILND_MSG_GET_ACK_REV:
4341 tx = kgnilnd_match_reply(conn, GNILND_MSG_GET_REQ_REV,
4342 msg->gnm_u.putack.gnpam_src_cookie);
4346 /* store putack data for later: deferred rdma or re-try */
4347 tx->tx_putinfo = msg->gnm_u.putack;
4349 spin_lock(&tx->tx_conn->gnc_list_lock);
4351 GNITX_ASSERTF(tx, tx->tx_state & GNILND_TX_WAITING_REPLY,
4352 "not waiting for reply", NULL);
4354 tx->tx_state &= ~GNILND_TX_WAITING_REPLY;
4356 if (likely(!(tx->tx_state & GNILND_TX_WAITING_COMPLETION))) {
4357 kgnilnd_tx_del_state_locked(tx, NULL, conn, GNILND_TX_ALLOCD);
4358 /* sample under lock as follow on steps require gnc_list_lock
4359 * - or call kgnilnd_tx_done which requires no locks held over
4360 * call to lnet_finalize */
4363 /* cannot launch rdma if still waiting for fma-msg completion */
4364 CDEBUG(D_NET, "tx 0x%p type 0x%02x will need to "
4365 "wait for SMSG completion\n", tx, tx->tx_msg.gnm_type);
4366 tx->tx_state |= GNILND_TX_PENDING_RDMA;
4368 spin_unlock(&tx->tx_conn->gnc_list_lock);
4371 rc = kgnilnd_send_mapped_tx(tx, 0);
4373 kgnilnd_tx_done(tx, rc);
4376 case GNILND_MSG_PUT_DONE:
4377 tx = kgnilnd_match_reply(conn, GNILND_MSG_PUT_ACK,
4378 msg->gnm_u.completion.gncm_cookie);
4382 GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED ||
4383 tx->tx_buftype == GNILND_BUF_VIRT_MAPPED,
4384 "bad tx buftype %d", tx->tx_buftype);
4386 kgnilnd_finalize_rx_done(tx, msg);
4388 case GNILND_MSG_PUT_REQ_REV:
4389 case GNILND_MSG_GET_REQ:
4390 rc = lnet_parse(net->gnn_ni, &msg->gnm_u.get.gngm_hdr,
4391 msg->gnm_srcnid, rx, 1);
4395 case GNILND_MSG_GET_NAK:
4396 tx = kgnilnd_match_reply(conn, GNILND_MSG_GET_REQ,
4397 msg->gnm_u.completion.gncm_cookie);
4401 GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED ||
4402 tx->tx_buftype == GNILND_BUF_VIRT_MAPPED,
4403 "bad tx buftype %d", tx->tx_buftype);
4405 kgnilnd_complete_tx(tx, msg->gnm_u.completion.gncm_retval);
4408 case GNILND_MSG_GET_DONE:
4409 tx = kgnilnd_match_reply(conn, GNILND_MSG_GET_REQ,
4410 msg->gnm_u.completion.gncm_cookie);
4414 GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED ||
4415 tx->tx_buftype == GNILND_BUF_VIRT_MAPPED,
4416 "bad tx buftype %d", tx->tx_buftype);
4418 lnet_set_reply_msg_len(net->gnn_ni, tx->tx_lntmsg[1],
4419 msg->gnm_u.completion.gncm_retval);
4421 kgnilnd_finalize_rx_done(tx, msg);
4423 case GNILND_MSG_GET_DONE_REV:
4424 tx = kgnilnd_match_reply(conn, GNILND_MSG_GET_ACK_REV,
4425 msg->gnm_u.completion.gncm_cookie);
4429 GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED ||
4430 tx->tx_buftype == GNILND_BUF_VIRT_MAPPED,
4431 "bad tx buftype %d", tx->tx_buftype);
4433 kgnilnd_finalize_rx_done(tx, msg);
4436 case GNILND_MSG_PUT_DONE_REV:
4437 tx = kgnilnd_match_reply(conn, GNILND_MSG_PUT_REQ_REV,
4438 msg->gnm_u.completion.gncm_cookie);
4443 GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED ||
4444 tx->tx_buftype == GNILND_BUF_VIRT_MAPPED,
4445 "bad tx buftype %d", tx->tx_buftype);
4447 kgnilnd_finalize_rx_done(tx, msg);
4449 case GNILND_MSG_PUT_NAK_REV:
4450 tx = kgnilnd_match_reply(conn, GNILND_MSG_PUT_REQ_REV,
4451 msg->gnm_u.completion.gncm_cookie);
4456 GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED ||
4457 tx->tx_buftype == GNILND_BUF_VIRT_MAPPED,
4458 "bad tx buftype %d", tx->tx_buftype);
4460 kgnilnd_complete_tx(tx, msg->gnm_u.completion.gncm_retval);
4465 if (rc < 0) /* protocol/comms error */
4466 kgnilnd_close_conn(conn, rc);
4468 if (repost && rx != NULL) {
4469 kgnilnd_consume_rx(rx);
4472 /* we got an event so assume more there and call for reschedule */
4474 kgnilnd_schedule_conn(conn);
4478 /* Do the failure injections that we need to affect conn processing in the following function.
4479 * When writing tests that use this function make sure to use a fail_loc with a fail mask.
4480 * If you dont you can cause the scheduler threads to spin on the conn without it leaving
4483 * intent is used to signal the calling function whether or not the conn needs to be rescheduled.
4487 kgnilnd_check_conn_fail_loc(kgn_device_t *dev, kgn_conn_t *conn, int *intent)
4491 /* short circuit out when not set */
4492 if (likely(!cfs_fail_loc)) {
4496 /* failure injection to test for stack reset clean ups */
4497 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_DROP_CLOSING)) {
4498 /* we can't rely on busy loops being nice enough to get the
4499 * stack reset triggered - it'd just spin on this conn */
4500 CFS_RACE(CFS_FAIL_GNI_DROP_CLOSING);
4503 GOTO(did_fail_loc, rc);
4506 if (conn->gnc_state == GNILND_CONN_DESTROY_EP) {
4507 /* DESTROY_EP set in kgnilnd_conn_decref on gnc_refcount = 1 */
4509 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_DROP_DESTROY_EP)) {
4510 CFS_RACE(CFS_FAIL_GNI_DROP_DESTROY_EP);
4513 GOTO(did_fail_loc, rc);
4517 /* CFS_FAIL_GNI_FINISH_PURG2 is used to stop a connection from fully closing. This scheduler
4518 * will spin on the CFS_FAIL_TIMEOUT until the fail_loc is cleared at which time the connection
4519 * will be closed by kgnilnd_complete_closed_conn.
4521 if ((conn->gnc_state == GNILND_CONN_CLOSED) && CFS_FAIL_CHECK(CFS_FAIL_GNI_FINISH_PURG2)) {
4522 while (CFS_FAIL_TIMEOUT(CFS_FAIL_GNI_FINISH_PURG2, 1)) {};
4525 GOTO(did_fail_loc, rc);
4528 /* this one is a bit gross - we can't hold the mutex from process_conns
4529 * across a CFS_RACE here - it'd block the conn threads from doing an ep_bind
4530 * and moving onto finish_connect
4531 * so, we'll just set the rc - kgnilnd_process_conns will clear
4532 * found_work on a fail_loc, getting the scheduler thread to call schedule()
4533 * and effectively getting this thread to sleep */
4534 if ((conn->gnc_state == GNILND_CONN_CLOSED) && CFS_FAIL_CHECK(CFS_FAIL_GNI_FINISH_PURG)) {
4537 GOTO(did_fail_loc, rc);
4545 kgnilnd_send_conn_close(kgn_conn_t *conn)
4549 /* we are closing the conn - we will try to send the CLOSE msg
4550 * but will not wait for anything else to flush */
4552 /* send the close if not already done so or received one */
4553 if (!conn->gnc_close_sent && !conn->gnc_close_recvd) {
4554 /* set close_sent regardless of the success of the
4555 * CLOSE message. We are going to try once and then
4556 * kick him out of the sandbox */
4557 conn->gnc_close_sent = 1;
4560 /* EP might be null already if remote side initiated a new connection.
4561 * kgnilnd_finish_connect destroys existing ep_handles before wiring up the new connection,
4562 * so this check is here to make sure we dont attempt to send with a null ep_handle.
4564 if (conn->gnc_ephandle != NULL) {
4567 tx = kgnilnd_new_tx_msg(GNILND_MSG_CLOSE, conn->gnc_peer->gnp_net->gnn_ni->ni_nid);
4569 tx->tx_msg.gnm_u.completion.gncm_retval = conn->gnc_error;
4570 tx->tx_state = GNILND_TX_WAITING_COMPLETION;
4571 tx->tx_qtime = jiffies;
4573 if (tx->tx_id.txe_idx == 0) {
4574 rc = kgnilnd_set_tx_id(tx, conn);
4576 kgnilnd_tx_done(tx, rc);
4580 CDEBUG(D_NETTRACE, "sending close with errno %d\n",
4583 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_CLOSE_SEND)) {
4584 kgnilnd_tx_done(tx, -EAGAIN);
4586 rc = kgnilnd_sendmsg(tx, NULL, 0, NULL, GNILND_TX_FMAQ);
4588 /* It wasnt sent and we dont care. */
4589 kgnilnd_tx_done(tx, rc);
4597 /* When changing gnc_state we need to take the kgn_peer_conn_lock */
4598 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
4599 conn->gnc_state = GNILND_CONN_CLOSED;
4600 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
4601 /* mark this conn as CLOSED now that we processed it
4602 * do after TX, so we can use CLOSING in asserts */
4606 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_RX_CLOSE_CLOSED)) {
4607 /* simulate a RX CLOSE after the timeout but before
4608 * the scheduler thread gets it */
4609 conn->gnc_close_recvd = GNILND_CLOSE_INJECT2;
4610 conn->gnc_peer_error = -ETIMEDOUT;
4612 /* schedule to allow potential CLOSE and get the complete phase run */
4613 kgnilnd_schedule_conn(conn);
4617 kgnilnd_process_mapped_tx(kgn_device_t *dev)
4622 int fast_remaps = GNILND_FAST_MAPPING_TRY;
4623 int log_retrans, log_retrans_level;
4624 static int last_map_version;
4627 spin_lock(&dev->gnd_lock);
4628 if (list_empty(&dev->gnd_map_tx)) {
4629 /* if the list is empty make sure we dont have a timer running */
4630 del_singleshot_timer_sync(&dev->gnd_map_timer);
4631 spin_unlock(&dev->gnd_lock);
4635 dev->gnd_sched_alive = jiffies;
4637 /* we'll retry as fast as possible up to 25% of the limit, then we start
4638 * backing off until our map version changes - indicating we unmapped
4640 tx = list_first_entry(&dev->gnd_map_tx, kgn_tx_t, tx_list);
4641 if (likely(dev->gnd_map_attempt == 0) ||
4642 time_after_eq(jiffies, dev->gnd_next_map) ||
4643 last_map_version != dev->gnd_map_version) {
4645 /* if this is our first attempt at mapping set last mapped to current
4646 * jiffies so we can timeout our attempt correctly.
4648 if (dev->gnd_map_attempt == 0)
4649 dev->gnd_last_map = jiffies;
4651 GNIDBG_TX(D_NET, tx, "waiting for mapping event event to retry", NULL);
4652 spin_unlock(&dev->gnd_lock);
4656 /* delete the previous timer if it exists */
4657 del_singleshot_timer_sync(&dev->gnd_map_timer);
4658 /* stash the last map version to let us know when a good one was seen */
4659 last_map_version = dev->gnd_map_version;
4661 /* we need to to take the lock and continually refresh the head of the list as
4662 * kgnilnd_complete_closed_conn might be nuking stuff and we are cycling the lock
4663 * allowing them to squeeze in */
4665 while (!list_empty(&dev->gnd_map_tx)) {
4666 /* make sure we break out early on quiesce */
4667 if (unlikely(kgnilnd_data.kgn_quiesce_trigger)) {
4668 /* always break with lock held - we unlock outside loop */
4672 tx = list_first_entry(&dev->gnd_map_tx, kgn_tx_t, tx_list);
4674 kgnilnd_tx_del_state_locked(tx, NULL, tx->tx_conn, GNILND_TX_ALLOCD);
4677 /* sample with lock held, serializing with kgnilnd_complete_closed_conn */
4678 if (tx->tx_conn->gnc_state != GNILND_CONN_ESTABLISHED) {
4679 /* if conn is dying, mark tx in tx_ref_table for
4680 * kgnilnd_complete_closed_conn to finish up */
4681 kgnilnd_tx_add_state_locked(tx, NULL, tx->tx_conn, GNILND_TX_DYING, 1);
4684 /* tx was moved to DYING, get next */
4688 spin_unlock(&dev->gnd_lock);
4689 rc = kgnilnd_send_mapped_tx(tx, 1);
4691 /* We made it! skip error handling.. */
4693 /* OK to continue on +ve errors as it won't get seen until
4694 * this function is called again - we operate on a copy of the original
4695 * list and not the live list */
4696 spin_lock(&dev->gnd_lock);
4697 /* reset map attempts back to zero we successfully
4698 * mapped so we can reset our timers */
4699 dev->gnd_map_attempt = 0;
4701 } else if (rc != -ENOMEM) {
4702 /* carp, failure we can't handle */
4703 kgnilnd_tx_done(tx, rc);
4704 spin_lock(&dev->gnd_lock);
4705 /* reset map attempts back to zero we dont know what happened but it
4706 * wasnt a failed mapping
4708 dev->gnd_map_attempt = 0;
4712 /* time to handle the retry cases.. lock so we dont have 2 threads
4713 * mucking with gnd_map_attempt, or gnd_next_map at the same time.
4715 spin_lock(&dev->gnd_lock);
4716 dev->gnd_map_attempt++;
4717 if (dev->gnd_map_attempt < fast_remaps) {
4718 /* do nothing we just want it to go as fast as possible.
4719 * just set gnd_next_map to current jiffies so it will process
4720 * as fast as possible.
4722 dev->gnd_next_map = jiffies;
4724 /* Retry based on GNILND_MAP_RETRY_RATE */
4725 dev->gnd_next_map = jiffies + GNILND_MAP_RETRY_RATE;
4728 /* only log occasionally once we've retried fast_remaps */
4729 log_retrans = (dev->gnd_map_attempt >= fast_remaps) &&
4730 ((dev->gnd_map_attempt % fast_remaps) == 0);
4731 log_retrans_level = log_retrans ? D_NETERROR : D_NET;
4733 /* make sure we are not off in the weeds with this tx */
4734 if (time_after(jiffies, dev->gnd_last_map + GNILND_MAP_TIMEOUT)) {
4735 GNIDBG_TX(D_NETERROR, tx,
4736 "giving up on TX, too many retries", NULL);
4737 spin_unlock(&dev->gnd_lock);
4738 if (tx->tx_msg.gnm_type == GNILND_MSG_PUT_REQ ||
4739 tx->tx_msg.gnm_type == GNILND_MSG_GET_REQ_REV) {
4740 kgnilnd_nak_rdma(tx->tx_conn, tx->tx_msg.gnm_type,
4742 tx->tx_putinfo.gnpam_dst_cookie,
4743 tx->tx_msg.gnm_srcnid);
4745 kgnilnd_nak_rdma(tx->tx_conn, tx->tx_msg.gnm_type,
4747 tx->tx_getinfo.gngm_cookie,
4748 tx->tx_msg.gnm_srcnid);
4750 kgnilnd_tx_done(tx, -ENOMEM);
4751 GOTO(get_out_mapped, rc);
4753 GNIDBG_TX(log_retrans_level, tx,
4754 "transient map failure #%d %d pages/%d bytes phys %u@%u "
4756 "nq_map %d mdd# %d/%d GART %ld",
4757 dev->gnd_map_attempt, tx->tx_phys_npages, tx->tx_nob,
4758 dev->gnd_map_nphys, dev->gnd_map_physnop * PAGE_SIZE,
4759 dev->gnd_map_nvirt, dev->gnd_map_virtnob,
4760 atomic_read(&dev->gnd_nq_map),
4761 atomic_read(&dev->gnd_n_mdd), atomic_read(&dev->gnd_n_mdd_held),
4762 atomic64_read(&dev->gnd_nbytes_map));
4765 /* we need to stop processing the rest of the list, so add it back in */
4766 /* set timer to wake device when we need to schedule this tx */
4767 mod_timer(&dev->gnd_map_timer, dev->gnd_next_map);
4768 kgnilnd_tx_add_state_locked(tx, NULL, tx->tx_conn, GNILND_TX_MAPQ, 0);
4769 spin_unlock(&dev->gnd_lock);
4770 GOTO(get_out_mapped, rc);
4772 spin_unlock(&dev->gnd_lock);
4778 kgnilnd_process_conns(kgn_device_t *dev, unsigned long deadline)
4783 int error_inject = 0;
4787 spin_lock(&dev->gnd_lock);
4788 while (!list_empty(&dev->gnd_ready_conns) && time_before(jiffies, deadline)) {
4789 dev->gnd_sched_alive = jiffies;
4793 if (unlikely(kgnilnd_data.kgn_quiesce_trigger)) {
4794 /* break with lock held */
4798 conn = list_first_entry(&dev->gnd_ready_conns, kgn_conn_t, gnc_schedlist);
4799 list_del_init(&conn->gnc_schedlist);
4800 spin_unlock(&dev->gnd_lock);
4802 conn_sched = xchg(&conn->gnc_scheduled, GNILND_CONN_PROCESS);
4804 LASSERTF(conn_sched != GNILND_CONN_IDLE &&
4805 conn_sched != GNILND_CONN_PROCESS,
4806 "conn %p on ready list but in bad state: %d\n",
4809 CDEBUG(D_INFO, "conn %p@%s for processing\n",
4810 conn, kgnilnd_conn_state2str(conn));
4813 set_mb(conn->gnc_last_sched_do, jiffies);
4815 if (kgnilnd_check_conn_fail_loc(dev, conn, &intent)) {
4817 /* based on intent see if we should run again. */
4818 rc = kgnilnd_schedule_process_conn(conn, intent);
4820 /* drop ref from gnd_ready_conns */
4821 if (atomic_read(&conn->gnc_refcount) == 1 && rc != 1) {
4822 down_write(&dev->gnd_conn_sem);
4823 kgnilnd_conn_decref(conn);
4824 up_write(&dev->gnd_conn_sem);
4825 } else if (rc != 1) {
4826 kgnilnd_conn_decref(conn);
4828 /* clear this so that scheduler thread doesn't spin */
4830 /* break with lock held... */
4831 spin_lock(&dev->gnd_lock);
4835 if (unlikely(conn->gnc_state == GNILND_CONN_CLOSED)) {
4836 down_write(&dev->gnd_conn_sem);
4838 /* CONN_CLOSED set in procces_fmaq when CLOSE is sent */
4839 if (unlikely(atomic_read(&conn->gnc_tx_in_use))) {
4840 /* If there are tx's currently in use in another
4841 * thread we dont want to complete the close
4842 * yet. Cycle this conn back through
4844 kgnilnd_schedule_conn(conn);
4846 kgnilnd_complete_closed_conn(conn);
4848 up_write(&dev->gnd_conn_sem);
4849 } else if (unlikely(conn->gnc_state == GNILND_CONN_DESTROY_EP)) {
4850 /* DESTROY_EP set in kgnilnd_conn_decref on gnc_refcount = 1 */
4851 /* serialize SMSG CQs with ep_bind and smsg_release */
4852 down_write(&dev->gnd_conn_sem);
4853 kgnilnd_destroy_conn_ep(conn);
4854 up_write(&dev->gnd_conn_sem);
4855 } else if (unlikely(conn->gnc_state == GNILND_CONN_CLOSING)) {
4856 /* if we need to do some CLOSE sending, etc done here do it */
4857 down_write(&dev->gnd_conn_sem);
4858 kgnilnd_send_conn_close(conn);
4859 kgnilnd_check_fma_rx(conn);
4860 up_write(&dev->gnd_conn_sem);
4861 } else if (atomic_read(&conn->gnc_peer->gnp_dirty_eps) == 0) {
4862 /* start moving traffic if the old conns are cleared out */
4863 down_read(&dev->gnd_conn_sem);
4864 kgnilnd_check_fma_rx(conn);
4865 kgnilnd_process_fmaq(conn);
4866 up_read(&dev->gnd_conn_sem);
4869 rc = kgnilnd_schedule_process_conn(conn, 0);
4871 /* drop ref from gnd_ready_conns */
4872 if (atomic_read(&conn->gnc_refcount) == 1 && rc != 1) {
4873 down_write(&dev->gnd_conn_sem);
4874 kgnilnd_conn_decref(conn);
4875 up_write(&dev->gnd_conn_sem);
4876 } else if (rc != 1) {
4877 kgnilnd_conn_decref(conn);
4880 /* check list again with lock held */
4881 spin_lock(&dev->gnd_lock);
4884 /* If we are short circuiting due to timing we want to be scheduled
4885 * as soon as possible.
4887 if (!list_empty(&dev->gnd_ready_conns) && !error_inject)
4890 spin_unlock(&dev->gnd_lock);
4896 kgnilnd_scheduler(void *arg)
4898 int threadno = (long)arg;
4901 unsigned long deadline = 0;
4904 dev = &kgnilnd_data.kgn_devices[(threadno + 1) % kgnilnd_data.kgn_ndevs];
4906 cfs_block_allsigs();
4908 /* all gnilnd threads need to run fairly urgently */
4909 set_user_nice(current, *kgnilnd_tunables.kgn_sched_nice);
4910 deadline = jiffies + cfs_time_seconds(*kgnilnd_tunables.kgn_sched_timeout);
4911 while (!kgnilnd_data.kgn_shutdown) {
4913 /* Safe: kgn_shutdown only set when quiescent */
4915 /* to quiesce or to not quiesce, that is the question */
4917 if (unlikely(kgnilnd_data.kgn_quiesce_trigger)) {
4918 KGNILND_SPIN_QUIESCE;
4921 /* tracking for when thread goes AWOL */
4922 dev->gnd_sched_alive = jiffies;
4924 CFS_FAIL_TIMEOUT(CFS_FAIL_GNI_SCHED_DEADLINE,
4925 (*kgnilnd_tunables.kgn_sched_timeout + 1));
4926 /* let folks know we are up and kicking
4927 * - they can use this for latency savings, etc
4928 * - only change if IRQ, if IDLE leave alone as that
4929 * schedule_device calls to put us back to IRQ */
4930 (void)cmpxchg(&dev->gnd_ready, GNILND_DEV_IRQ, GNILND_DEV_LOOP);
4932 down_read(&dev->gnd_conn_sem);
4933 /* always check these - they are super low cost */
4934 found_work += kgnilnd_check_fma_send_cq(dev);
4935 found_work += kgnilnd_check_fma_rcv_cq(dev);
4937 /* rdma CQ doesn't care about eps */
4938 found_work += kgnilnd_check_rdma_cq(dev);
4940 /* move some RDMA ? */
4941 found_work += kgnilnd_process_rdmaq(dev);
4943 /* map some pending RDMA requests ? */
4944 found_work += kgnilnd_process_mapped_tx(dev);
4946 /* the EP for a conn is not destroyed until all the references
4947 * to it are gone, so these checks should be safe
4948 * even if run in parallel with the CQ checking functions
4949 * _AND_ a thread that processes the CLOSED->DONE
4953 up_read(&dev->gnd_conn_sem);
4955 /* process all conns ready now */
4956 found_work += kgnilnd_process_conns(dev, deadline);
4958 /* do an eager check to avoid the IRQ disabling in
4959 * prepare_to_wait and friends */
4962 (busy_loops++ < *kgnilnd_tunables.kgn_loops) &&
4963 time_before(jiffies, deadline)) {
4965 if ((busy_loops % 10) == 0) {
4966 /* tickle heartbeat and watchdog to ensure our
4967 * piggishness doesn't turn into heartbeat failure */
4968 touch_nmi_watchdog();
4974 /* if we got here, found_work was zero or busy_loops means we
4975 * need to take a break. We'll clear gnd_ready but we'll check
4976 * one last time if there is an IRQ that needs processing */
4978 prepare_to_wait(&dev->gnd_waitq, &wait, TASK_INTERRUPTIBLE);
4980 /* the first time this will go LOOP -> IDLE and let us do one final check
4981 * during which we might get an IRQ, then IDLE->IDLE and schedule()
4982 * - this might allow other threads to block us for a bit if they
4983 * try to get the mutex, but that is good as we'd need to wake
4984 * up soon to handle the CQ or other processing anyways */
4986 found_work += xchg(&dev->gnd_ready, GNILND_DEV_IDLE);
4988 if ((busy_loops >= *kgnilnd_tunables.kgn_loops) ||
4989 time_after_eq(jiffies, deadline)) {
4991 "yeilding: found_work %d busy_loops %d\n",
4992 found_work, busy_loops);
4994 /* use yield if we are bailing due to busy_loops
4995 * - this will ensure we wake up soonish. This closes
4996 * a race with kgnilnd_device_callback - where it'd
4997 * not call wake_up() because gnd_ready == 1, but then
4998 * we come down and schedule() because of busy_loops.
4999 * We'd not be woken up until something poked our waitq
5000 * again. yield() ensures we wake up without another
5001 * waitq poke in that case */
5002 atomic_inc(&dev->gnd_n_yield);
5003 kgnilnd_data.kgn_last_condresched = jiffies;
5005 CDEBUG(D_INFO, "awake after yeild\n");
5006 deadline = jiffies + cfs_time_seconds(*kgnilnd_tunables.kgn_sched_timeout);
5007 } else if (found_work == GNILND_DEV_IDLE) {
5008 /* busy_loops is low and there is nothing to do,
5009 * go to sleep and wait for a waitq poke */
5011 "scheduling: found_work %d busy_loops %d\n",
5012 found_work, busy_loops);
5013 atomic_inc(&dev->gnd_n_schedule);
5014 kgnilnd_data.kgn_last_scheduled = jiffies;
5016 CDEBUG(D_INFO, "awake after schedule\n");
5017 deadline = jiffies + cfs_time_seconds(*kgnilnd_tunables.kgn_sched_timeout);
5019 finish_wait(&dev->gnd_waitq, &wait);
5022 kgnilnd_thread_fini();