2 * Copyright (C) 2004 Cluster File Systems, Inc.
4 * Copyright (C) 2009-2012 Cray, Inc.
6 * Derived from work by Eric Barton <eric@bartonsoftware.com>
7 * Author: James Shimek <jshimek@cray.com>
8 * Author: Nic Henke <nic@cray.com>
10 * This file is part of Lustre, http://www.lustre.org.
12 * Lustre is free software; you can redistribute it and/or
13 * modify it under the terms of version 2 of the GNU General Public
14 * License as published by the Free Software Foundation.
16 * Lustre is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with Lustre; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
28 #include <linux/nmi.h>
29 #include <linux/pagemap.h>
31 #include <libcfs/linux/linux-mem.h>
35 /* this is useful when needed to debug wire corruption. */
37 kgnilnd_dump_blob(int level, char *prefix, void *buf, int len) {
45 "%s 0x%p: 0x%16.16llx 0x%16.16llx 0x%16.16llx 0x%16.16llx\n",
46 prefix, ptr, *(ptr), *(ptr + 1), *(ptr + 2), *(ptr + 3));
49 } else if (len >= 16) {
51 "%s 0x%p: 0x%16.16llx 0x%16.16llx\n",
52 prefix, ptr, *(ptr), *(ptr + 1));
56 CDEBUG(level, "%s 0x%p: 0x%16.16llx\n",
65 kgnilnd_dump_msg(int mask, kgn_msg_t *msg)
67 CDEBUG(mask, "0x%8.8x 0x%4.4x 0x%4.4x 0x%16.16llx"
68 " 0x%16.16llx 0x%8.8x 0x%4.4x 0x%4.4x 0x%8.8x\n",
69 msg->gnm_magic, msg->gnm_version,
70 msg->gnm_type, msg->gnm_srcnid,
71 msg->gnm_connstamp, msg->gnm_seq,
72 msg->gnm_cksum, msg->gnm_payload_cksum,
73 msg->gnm_payload_len);
77 kgnilnd_schedule_device(kgn_device_t *dev)
79 short already_live = 0;
81 /* we'll only want to wake if the scheduler thread
82 * has come around and set ready to zero */
83 already_live = cmpxchg(&dev->gnd_ready, GNILND_DEV_IDLE, GNILND_DEV_IRQ);
86 wake_up(&dev->gnd_waitq);
89 void kgnilnd_schedule_device_timer(cfs_timer_cb_arg_t data)
91 kgn_device_t *dev = cfs_from_timer(dev, data, gnd_map_timer);
93 kgnilnd_schedule_device(dev);
96 void kgnilnd_schedule_device_timer_rd(cfs_timer_cb_arg_t data)
98 kgn_device_t *dev = cfs_from_timer(dev, data, gnd_rdmaq_timer);
100 kgnilnd_schedule_device(dev);
104 kgnilnd_device_callback(__u32 devid, __u64 arg)
107 int index = (int) arg;
109 if (index >= kgnilnd_data.kgn_ndevs) {
110 /* use _EMERG instead of an LBUG to prevent LBUG'ing in
111 * interrupt context. */
112 LCONSOLE_EMERG("callback for unknown device %d->%d\n",
117 dev = &kgnilnd_data.kgn_devices[index];
118 /* just basic sanity */
119 if (dev->gnd_id == devid) {
120 kgnilnd_schedule_device(dev);
122 LCONSOLE_EMERG("callback for bad device %d devid %d\n",
127 /* sched_intent values:
128 * < 0 : do not reschedule under any circumstances
129 * == 0: reschedule if someone marked him WANTS_SCHED
130 * > 0 : force a reschedule */
131 /* Return code 0 means it did not schedule the conn, 1
132 * means it successfully scheduled the conn.
136 kgnilnd_schedule_process_conn(kgn_conn_t *conn, int sched_intent)
140 /* move back to IDLE but save previous state.
141 * if we see WANTS_SCHED, we'll call kgnilnd_schedule_conn and
142 * let the xchg there handle any racing callers to get it
143 * onto gnd_ready_conns */
145 conn_sched = xchg(&conn->gnc_scheduled, GNILND_CONN_IDLE);
146 LASSERTF(conn_sched == GNILND_CONN_WANTS_SCHED ||
147 conn_sched == GNILND_CONN_PROCESS,
148 "conn %p after process in bad state: %d\n",
151 if (sched_intent >= 0) {
152 if ((sched_intent > 0 || (conn_sched == GNILND_CONN_WANTS_SCHED))) {
153 return kgnilnd_schedule_conn_refheld(conn, 1);
159 /* Return of 0 for conn not scheduled, 1 returned if conn was scheduled or marked
163 _kgnilnd_schedule_conn(kgn_conn_t *conn, const char *caller, int line, int refheld, int lock_held)
165 kgn_device_t *dev = conn->gnc_device;
169 sched = xchg(&conn->gnc_scheduled, GNILND_CONN_WANTS_SCHED);
170 /* we only care about the last person who marked want_sched since they
171 * are most likely the culprit
173 memcpy(conn->gnc_sched_caller, caller, sizeof(conn->gnc_sched_caller));
174 conn->gnc_sched_line = line;
175 /* if we are IDLE, add to list - only one guy sees IDLE and "wins"
176 * the chance to put it onto gnd_ready_conns.
177 * otherwise, leave marked as WANTS_SCHED and the thread that "owns"
178 * the conn in process_conns will take care of moving it back to
179 * SCHED when it is done processing */
181 if (sched == GNILND_CONN_IDLE) {
182 /* if the conn is already scheduled, we've already requested
183 * the scheduler thread wakeup */
185 /* Add a reference to the conn if we are not holding a reference
186 * already from the exisiting scheduler. We now use the same
187 * reference if we need to reschedule a conn while in a scheduler
190 kgnilnd_conn_addref(conn);
192 LASSERTF(list_empty(&conn->gnc_schedlist), "conn %p already sched state %d\n",
195 CDEBUG(D_INFO, "scheduling conn 0x%p caller %s:%d\n", conn, caller, line);
197 spin_lock(&dev->gnd_lock);
198 list_add_tail(&conn->gnc_schedlist, &dev->gnd_ready_conns);
200 spin_unlock(&dev->gnd_lock);
201 set_mb(conn->gnc_last_sched_ask, jiffies);
204 CDEBUG(D_INFO, "not scheduling conn 0x%p: %d caller %s:%d\n", conn, sched, caller, line);
208 /* make sure thread(s) going to process conns - but let it make
209 * separate decision from conn schedule */
211 kgnilnd_schedule_device(dev);
216 _kgnilnd_schedule_delay_conn(kgn_conn_t *conn)
218 kgn_device_t *dev = conn->gnc_device;
220 spin_lock(&dev->gnd_lock);
221 if (list_empty(&conn->gnc_delaylist)) {
222 list_add_tail(&conn->gnc_delaylist, &dev->gnd_delay_conns);
225 spin_unlock(&dev->gnd_lock);
227 kgnilnd_schedule_device(dev);
232 kgnilnd_schedule_dgram(kgn_device_t *dev)
236 wake = xchg(&dev->gnd_dgram_ready, GNILND_DGRAM_SCHED);
237 if (wake != GNILND_DGRAM_SCHED) {
238 wake_up(&dev->gnd_dgram_waitq);
240 CDEBUG(D_NETTRACE, "not waking: %d\n", wake);
245 kgnilnd_free_tx(kgn_tx_t *tx)
247 /* taken from kgnilnd_tx_add_state_locked */
249 LASSERTF((tx->tx_list_p == NULL &&
250 tx->tx_list_state == GNILND_TX_ALLOCD) &&
251 list_empty(&tx->tx_list),
252 "tx %p with bad state %s (list_p %p) tx_list %s\n",
253 tx, kgnilnd_tx_state2str(tx->tx_list_state), tx->tx_list_p,
254 list_empty(&tx->tx_list) ? "empty" : "not empty");
256 atomic_dec(&kgnilnd_data.kgn_ntx);
258 /* we only allocate this if we need to */
259 if (tx->tx_phys != NULL) {
260 kmem_cache_free(kgnilnd_data.kgn_tx_phys_cache, tx->tx_phys);
261 CDEBUG(D_MALLOC, "slab-freed 'tx_phys': %lu at %p.\n",
262 GNILND_MAX_IOV * sizeof(gni_mem_segment_t), tx->tx_phys);
265 /* Only free the buffer if we used it */
266 if (tx->tx_buffer_copy != NULL) {
267 kgnilnd_vfree(tx->tx_buffer_copy, tx->tx_rdma_desc.length);
268 tx->tx_buffer_copy = NULL;
269 CDEBUG(D_MALLOC, "vfreed buffer2\n");
272 KGNILND_POISON(tx, 0x5a, sizeof(kgn_tx_t));
274 CDEBUG(D_MALLOC, "slab-freed 'tx': %lu at %p.\n", sizeof(*tx), tx);
275 kmem_cache_free(kgnilnd_data.kgn_tx_cache, tx);
279 kgnilnd_alloc_tx (void)
283 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_ALLOC_TX))
286 tx = kmem_cache_zalloc(kgnilnd_data.kgn_tx_cache, GFP_ATOMIC);
288 CERROR("failed to allocate tx\n");
291 CDEBUG(D_MALLOC, "slab-alloced 'tx': %lu at %p.\n",
294 /* setup everything here to minimize time under the lock */
295 tx->tx_buftype = GNILND_BUF_NONE;
296 tx->tx_msg.gnm_type = GNILND_MSG_NONE;
297 INIT_LIST_HEAD(&tx->tx_list);
298 INIT_LIST_HEAD(&tx->tx_map_list);
299 tx->tx_list_state = GNILND_TX_ALLOCD;
301 atomic_inc(&kgnilnd_data.kgn_ntx);
306 /* csum_fold needs to be run on the return value before shipping over the wire */
307 #define _kgnilnd_cksum(seed, ptr, nob) csum_partial(ptr, nob, seed)
309 /* we don't use offset as every one is passing a buffer reference that already
310 * includes the offset into the base address.
313 kgnilnd_cksum(void *ptr, size_t nob)
317 sum = csum_fold(_kgnilnd_cksum(0, ptr, nob));
319 /* don't use magic 'no checksum' value */
323 CDEBUG(D_INFO, "cksum 0x%x for ptr 0x%p sz %zu\n",
330 kgnilnd_cksum_kiov(unsigned int nkiov, struct bio_vec *kiov,
331 unsigned int offset, unsigned int nob, int dump_blob)
337 unsigned int fraglen;
343 CDEBUG(D_BUFFS, "calc cksum for kiov 0x%p nkiov %u offset %u nob %u, dump %d\n",
344 kiov, nkiov, offset, nob, dump_blob);
346 /* if loops changes, please change kgnilnd_setup_phys_buffer */
348 while (offset >= kiov->bv_len) {
349 offset -= kiov->bv_len;
355 /* ignore nob here, if nob < (bv_len - offset), kiov == 1 */
356 odd = (unsigned long) (kiov[0].bv_len - offset) & 1;
358 if ((odd || *kgnilnd_tunables.kgn_vmap_cksum) && nkiov > 1) {
359 struct page **pages = kgnilnd_data.kgn_cksum_map_pages[get_cpu()];
361 LASSERTF(pages != NULL, "NULL pages for cpu %d map_pages 0x%p\n",
362 get_cpu(), kgnilnd_data.kgn_cksum_map_pages);
364 CDEBUG(D_BUFFS, "odd %d len %u offset %u nob %u\n",
365 odd, kiov[0].bv_len, offset, nob);
367 for (i = 0; i < nkiov; i++) {
368 pages[i] = kiov[i].bv_page;
371 addr = vmap(pages, nkiov, VM_MAP, PAGE_KERNEL);
373 CNETERR("Couldn't vmap %d frags on %d bytes to avoid odd length fragment in cksum\n",
375 /* return zero to avoid killing tx - we'll just get warning on console
376 * when remote end sees zero checksum */
379 atomic_inc(&kgnilnd_data.kgn_nvmap_cksum);
381 tmpck = _kgnilnd_cksum(0, ((void *) addr + kiov[0].bv_offset +
386 kgnilnd_dump_blob(D_BUFFS, "flat kiov RDMA payload",
387 (void *)addr + kiov[0].bv_offset +
390 CDEBUG(D_BUFFS, "cksum 0x%x (+0x%x) for addr 0x%p+%u len %u offset %u\n",
391 cksum, tmpck, addr, kiov[0].bv_offset, nob, offset);
395 fraglen = min(kiov->bv_len - offset, nob);
397 /* make dang sure we don't send a bogus checksum if somehow we get
398 * an odd length fragment on anything but the last entry in a kiov -
399 * we know from kgnilnd_setup_rdma_buffer that we can't have non
400 * PAGE_SIZE pages in the middle, so if nob < PAGE_SIZE, it is the last one */
401 LASSERTF(!(fraglen&1) || (nob < PAGE_SIZE),
402 "odd fraglen %u on nkiov %d, nob %u bv_len %u offset %u kiov 0x%p\n",
403 fraglen, nkiov, nob, kiov->bv_len,
406 addr = (void *)kmap(kiov->bv_page) + kiov->bv_offset +
408 tmpck = _kgnilnd_cksum(cksum, addr, fraglen);
411 "cksum 0x%x (+0x%x) for page 0x%p+%u (0x%p) len %u offset %u\n",
412 cksum, tmpck, kiov->bv_page, kiov->bv_offset,
413 addr, fraglen, offset);
418 kgnilnd_dump_blob(D_BUFFS, "kiov cksum", addr, fraglen);
420 kunmap(kiov->bv_page);
427 /* iov must not run out before end of data */
428 LASSERTF(nob == 0 || nkiov > 0, "nob %u nkiov %u\n", nob, nkiov);
433 retsum = csum_fold(cksum);
435 /* don't use magic 'no checksum' value */
439 CDEBUG(D_BUFFS, "retsum 0x%x from cksum 0x%x\n", retsum, cksum);
445 kgnilnd_init_msg(kgn_msg_t *msg, int type, lnet_nid_t source)
447 msg->gnm_magic = GNILND_MSG_MAGIC;
448 msg->gnm_version = GNILND_MSG_VERSION;
449 msg->gnm_type = type;
450 msg->gnm_payload_len = 0;
451 msg->gnm_srcnid = source;
452 /* gnm_connstamp gets set when FMA is sent */
453 /* gnm_srcnid is set on creation via function argument
454 * The right interface/net and nid is passed in when the message
460 kgnilnd_new_tx_msg(int type, lnet_nid_t source)
462 kgn_tx_t *tx = kgnilnd_alloc_tx();
465 kgnilnd_init_msg(&tx->tx_msg, type, source);
467 CERROR("couldn't allocate new tx type %s!\n",
468 kgnilnd_msgtype2str(type));
475 kgnilnd_nak_rdma(kgn_conn_t *conn, int rx_type, int error, __u64 cookie, lnet_nid_t source) {
481 case GNILND_MSG_GET_REQ:
482 case GNILND_MSG_GET_DONE:
483 nak_type = GNILND_MSG_GET_NAK;
485 case GNILND_MSG_PUT_REQ:
486 case GNILND_MSG_PUT_ACK:
487 case GNILND_MSG_PUT_DONE:
488 nak_type = GNILND_MSG_PUT_NAK;
490 case GNILND_MSG_PUT_REQ_REV:
491 case GNILND_MSG_PUT_DONE_REV:
492 nak_type = GNILND_MSG_PUT_NAK_REV;
494 case GNILND_MSG_GET_REQ_REV:
495 case GNILND_MSG_GET_ACK_REV:
496 case GNILND_MSG_GET_DONE_REV:
497 nak_type = GNILND_MSG_GET_NAK_REV;
500 CERROR("invalid msg type %s (%d)\n",
501 kgnilnd_msgtype2str(rx_type), rx_type);
504 /* only allow NAK on error and truncate to zero */
505 LASSERTF(error <= 0, "error %d conn 0x%p, cookie %llu\n",
506 error, conn, cookie);
508 tx = kgnilnd_new_tx_msg(nak_type, source);
510 CNETERR("can't get TX to NAK RDMA to %s\n",
511 libcfs_nid2str(conn->gnc_peer->gnp_nid));
515 tx->tx_msg.gnm_u.completion.gncm_retval = error;
516 tx->tx_msg.gnm_u.completion.gncm_cookie = cookie;
517 kgnilnd_queue_tx(conn, tx);
521 kgnilnd_setup_immediate_buffer(kgn_tx_t *tx, unsigned int niov,
522 struct bio_vec *kiov,
523 unsigned int offset, unsigned int nob)
525 kgn_msg_t *msg = &tx->tx_msg;
528 /* To help save on MDDs for short messages, we'll vmap a kiov to allow
529 * gni_smsg_send to send that as the payload */
531 LASSERT(tx->tx_buftype == GNILND_BUF_NONE);
534 tx->tx_buffer = NULL;
537 if (niov && niov > (nob >> PAGE_SHIFT))
538 niov = DIV_ROUND_UP(nob + offset + kiov->bv_offset,
541 LASSERTF(niov > 0 && niov < GNILND_MAX_IMMEDIATE/PAGE_SIZE,
542 "bad niov %d msg %p kiov %p offset %d nob%d\n",
543 niov, msg, kiov, offset, nob);
545 while (offset >= kiov->bv_len) {
546 offset -= kiov->bv_len;
551 for (i = 0; i < niov; i++) {
552 /* We can't have a bv_offset on anything but the first
553 * entry, otherwise we'll have a hole at the end of the
554 * mapping as we only map whole pages.
555 * Also, if we have a bv_len < PAGE_SIZE but we need to
556 * map more than bv_len, we will also have a whole at
557 * the end of that page which isn't allowed
559 if ((kiov[i].bv_offset != 0 && i > 0) ||
560 (kiov[i].bv_offset + kiov[i].bv_len != PAGE_SIZE &&
562 CNETERR("Can't make payload contiguous in I/O VM:page %d, offset %u, nob %u, bv_offset %u bv_len %u\n",
563 i, offset, nob, kiov->bv_offset,
567 tx->tx_imm_pages[i] = kiov[i].bv_page;
570 /* hijack tx_phys for the later unmap */
572 /* tx->phyx being equal to NULL is the signal for unmap to discern between kmap and vmap */
574 tx->tx_buffer = (void *)kmap(tx->tx_imm_pages[0]) +
575 kiov[0].bv_offset + offset;
576 atomic_inc(&kgnilnd_data.kgn_nkmap_short);
577 GNIDBG_TX(D_NET, tx, "kmapped page for %d bytes for kiov 0x%p, buffer 0x%p",
578 nob, kiov, tx->tx_buffer);
580 tx->tx_phys = vmap(tx->tx_imm_pages, niov, VM_MAP, PAGE_KERNEL);
581 if (tx->tx_phys == NULL) {
582 CNETERR("Couldn't vmap %d frags on %d bytes\n", niov, nob);
586 atomic_inc(&kgnilnd_data.kgn_nvmap_short);
587 /* make sure we take into account the kiov offset as the
588 * start of the buffer
590 tx->tx_buffer = (void *)tx->tx_phys + kiov[0].bv_offset
593 "mapped %d pages for %d bytes from kiov 0x%p to 0x%p, buffer 0x%p",
594 niov, nob, kiov, tx->tx_phys, tx->tx_buffer);
596 tx->tx_buftype = GNILND_BUF_IMMEDIATE_KIOV;
601 /* checksum payload early - it shouldn't be changing after lnd_send */
602 if (*kgnilnd_tunables.kgn_checksum >= 2) {
603 msg->gnm_payload_cksum = kgnilnd_cksum(tx->tx_buffer, nob);
604 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_SMSG_CKSUM2)) {
605 msg->gnm_payload_cksum += 0xe00e;
607 if (*kgnilnd_tunables.kgn_checksum_dump > 1) {
608 kgnilnd_dump_blob(D_BUFFS, "payload checksum",
612 msg->gnm_payload_cksum = 0;
619 kgnilnd_setup_phys_buffer(kgn_tx_t *tx, int nkiov, struct bio_vec *kiov,
620 unsigned int offset, unsigned int nob)
622 gni_mem_segment_t *phys;
624 unsigned int fraglen;
626 GNIDBG_TX(D_NET, tx, "niov %d kiov 0x%p offset %u nob %u", nkiov, kiov, offset, nob);
630 LASSERT(tx->tx_buftype == GNILND_BUF_NONE);
632 /* only allocate this if we are going to use it */
633 tx->tx_phys = kmem_cache_alloc(kgnilnd_data.kgn_tx_phys_cache,
635 if (tx->tx_phys == NULL) {
636 CERROR("failed to allocate tx_phys\n");
641 CDEBUG(D_MALLOC, "slab-alloced 'tx->tx_phys': %lu at %p.\n",
642 GNILND_MAX_IOV * sizeof(gni_mem_segment_t), tx->tx_phys);
644 /* if loops changes, please change kgnilnd_cksum_kiov
645 * and kgnilnd_setup_immediate_buffer */
647 while (offset >= kiov->bv_len) {
648 offset -= kiov->bv_len;
654 /* at this point, kiov points to the first page that we'll actually map
655 * now that we've seeked into the koiv for offset and dropped any
656 * leading pages that fall entirely within the offset */
657 tx->tx_buftype = GNILND_BUF_PHYS_UNMAPPED;
660 /* bv_offset is start of 'valid' buffer, so index offset past that */
661 tx->tx_buffer = (void *)((unsigned long)(kiov->bv_offset + offset));
664 CDEBUG(D_NET, "tx 0x%p buffer 0x%p map start kiov 0x%p+%u niov %d offset %u\n",
665 tx, tx->tx_buffer, kiov, kiov->bv_offset, nkiov, offset);
668 fraglen = min(kiov->bv_len - offset, nob);
670 /* We can't have a bv_offset on anything but the first entry,
671 * otherwise we'll have a hole at the end of the mapping as we
672 * only map whole pages. Only the first page is allowed to
673 * have an offset - we'll add that into tx->tx_buffer and that
674 * will get used when we map in the segments (see
675 * kgnilnd_map_buffer). Also, if we have a bv_len < PAGE_SIZE
676 * but we need to map more than bv_len, we will also have a
677 * whole at the end of that page which isn't allowed
679 if ((phys != tx->tx_phys) &&
680 ((kiov->bv_offset != 0) ||
681 ((kiov->bv_len < PAGE_SIZE) && (nob > kiov->bv_len)))) {
682 CERROR("Can't make payload contiguous in I/O VM:page %d, offset %u, nob %u, bv_offset %u bv_len %u\n",
683 (int)(phys - tx->tx_phys),
684 offset, nob, kiov->bv_offset, kiov->bv_len);
689 if ((phys - tx->tx_phys) == GNILND_MAX_IOV) {
690 CERROR ("payload too big (%d)\n", (int)(phys - tx->tx_phys));
695 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_PHYS_SETUP)) {
701 "page 0x%p bv_offset %u bv_len %u nob %u nkiov %u offset %u\n",
702 kiov->bv_page, kiov->bv_offset, kiov->bv_len, nob, nkiov,
705 phys->address = page_to_phys(kiov->bv_page);
712 /* iov must not run out before end of data */
713 LASSERTF(nob == 0 || nkiov > 0, "nob %u nkiov %u\n", nob, nkiov);
717 tx->tx_phys_npages = phys - tx->tx_phys;
722 if (tx->tx_phys != NULL) {
723 kmem_cache_free(kgnilnd_data.kgn_tx_phys_cache, tx->tx_phys);
724 CDEBUG(D_MALLOC, "slab-freed 'tx_phys': %lu at %p.\n",
725 sizeof(*tx->tx_phys), tx->tx_phys);
732 kgnilnd_setup_rdma_buffer(kgn_tx_t *tx, unsigned int niov,
733 struct bio_vec *kiov,
734 unsigned int offset, unsigned int nob)
736 return kgnilnd_setup_phys_buffer(tx, niov, kiov, offset, nob);
739 /* kgnilnd_parse_lnet_rdma()
740 * lntmsg - message passed in from lnet.
741 * niov, kiov, offset - see lnd_t in lib-types.h for descriptions.
742 * nob - actual number of bytes to in this message.
743 * put_len - It is possible for PUTs to have a different length than the
744 * length stored in lntmsg->msg_len since LNET can adjust this
745 * length based on it's buffer size and offset.
746 * lnet_try_match_md() sets the mlength that we use to do the RDMA
750 kgnilnd_parse_lnet_rdma(struct lnet_msg *lntmsg, unsigned int *niov,
751 unsigned int *offset, unsigned int *nob,
752 struct bio_vec **kiov, int put_len)
754 /* GETs are weird, see kgnilnd_send */
755 if (lntmsg->msg_type == LNET_MSG_GET) {
756 if ((lntmsg->msg_md->md_options & LNET_MD_KIOV) == 0) {
759 *kiov = lntmsg->msg_md->md_kiov;
761 *niov = lntmsg->msg_md->md_niov;
762 *nob = lntmsg->msg_md->md_length;
765 *kiov = lntmsg->msg_kiov;
766 *niov = lntmsg->msg_niov;
768 *offset = lntmsg->msg_offset;
773 kgnilnd_compute_rdma_cksum(kgn_tx_t *tx, int put_len)
775 unsigned int niov, offset, nob;
776 struct bio_vec *kiov;
777 struct lnet_msg *lntmsg = tx->tx_lntmsg[0];
778 int dump_cksum = (*kgnilnd_tunables.kgn_checksum_dump > 1);
780 GNITX_ASSERTF(tx, ((tx->tx_msg.gnm_type == GNILND_MSG_PUT_DONE) ||
781 (tx->tx_msg.gnm_type == GNILND_MSG_GET_DONE) ||
782 (tx->tx_msg.gnm_type == GNILND_MSG_PUT_DONE_REV) ||
783 (tx->tx_msg.gnm_type == GNILND_MSG_GET_DONE_REV) ||
784 (tx->tx_msg.gnm_type == GNILND_MSG_GET_ACK_REV) ||
785 (tx->tx_msg.gnm_type == GNILND_MSG_PUT_REQ_REV)),
786 "bad type %s", kgnilnd_msgtype2str(tx->tx_msg.gnm_type));
788 if ((tx->tx_msg.gnm_type == GNILND_MSG_PUT_DONE_REV) ||
789 (tx->tx_msg.gnm_type == GNILND_MSG_GET_DONE_REV)) {
790 tx->tx_msg.gnm_payload_cksum = 0;
793 if (*kgnilnd_tunables.kgn_checksum < 3) {
794 tx->tx_msg.gnm_payload_cksum = 0;
798 GNITX_ASSERTF(tx, lntmsg, "no LNet message!", NULL);
800 kgnilnd_parse_lnet_rdma(lntmsg, &niov, &offset, &nob, &kiov,
804 tx->tx_msg.gnm_payload_cksum = kgnilnd_cksum_kiov(niov, kiov, offset, nob, dump_cksum);
806 tx->tx_msg.gnm_payload_cksum = kgnilnd_cksum(tx->tx_buffer, nob);
808 kgnilnd_dump_blob(D_BUFFS, "peer RDMA payload", tx->tx_buffer, nob);
812 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_SMSG_CKSUM3)) {
813 tx->tx_msg.gnm_payload_cksum += 0xd00d;
817 /* kgnilnd_verify_rdma_cksum()
818 * tx - PUT_DONE/GET_DONE matched tx.
819 * rx_cksum - received checksum to compare against.
820 * put_len - see kgnilnd_parse_lnet_rdma comments.
823 kgnilnd_verify_rdma_cksum(kgn_tx_t *tx, __u16 rx_cksum, int put_len)
827 unsigned int niov, offset, nob;
828 struct bio_vec *kiov;
829 struct lnet_msg *lntmsg = tx->tx_lntmsg[0];
830 int dump_on_err = *kgnilnd_tunables.kgn_checksum_dump;
832 /* we can only match certain requests */
833 GNITX_ASSERTF(tx, ((tx->tx_msg.gnm_type == GNILND_MSG_GET_REQ) ||
834 (tx->tx_msg.gnm_type == GNILND_MSG_PUT_ACK) ||
835 (tx->tx_msg.gnm_type == GNILND_MSG_PUT_REQ_REV) ||
836 (tx->tx_msg.gnm_type == GNILND_MSG_GET_ACK_REV) ||
837 (tx->tx_msg.gnm_type == GNILND_MSG_GET_DONE_REV) ||
838 (tx->tx_msg.gnm_type == GNILND_MSG_PUT_DONE_REV)),
839 "bad type %s", kgnilnd_msgtype2str(tx->tx_msg.gnm_type));
841 if ((tx->tx_msg.gnm_type == GNILND_MSG_PUT_REQ_REV) ||
842 (tx->tx_msg.gnm_type == GNILND_MSG_GET_ACK_REV)) {
847 if (*kgnilnd_tunables.kgn_checksum >= 3) {
848 GNIDBG_MSG(D_WARNING, &tx->tx_msg,
849 "no RDMA payload checksum when enabled");
854 GNITX_ASSERTF(tx, lntmsg, "no LNet message!", NULL);
856 kgnilnd_parse_lnet_rdma(lntmsg, &niov, &offset, &nob, &kiov, put_len);
859 cksum = kgnilnd_cksum_kiov(niov, kiov, offset, nob, 0);
861 cksum = kgnilnd_cksum(tx->tx_buffer, nob);
864 if (cksum != rx_cksum) {
865 GNIDBG_MSG(D_NETERROR, &tx->tx_msg,
866 "Bad RDMA payload checksum (%x expected %x); "
867 "kiov 0x%p niov %d nob %u offset %u",
868 cksum, rx_cksum, kiov, niov, nob, offset);
869 switch (dump_on_err) {
872 kgnilnd_cksum_kiov(niov, kiov, offset, nob, 1);
874 kgnilnd_dump_blob(D_BUFFS, "RDMA payload",
879 libcfs_debug_dumplog();
885 /* kgnilnd_check_fma_rx will close conn, kill tx with error */
891 kgnilnd_mem_add_map_list(kgn_device_t *dev, kgn_tx_t *tx)
895 GNITX_ASSERTF(tx, list_empty(&tx->tx_map_list),
896 "already mapped!", NULL);
898 spin_lock(&dev->gnd_map_lock);
899 switch (tx->tx_buftype) {
901 GNIDBG_TX(D_EMERG, tx,
902 "SOFTWARE BUG: invalid mapping %d", tx->tx_buftype);
903 spin_unlock(&dev->gnd_map_lock);
907 case GNILND_BUF_PHYS_MAPPED:
908 bytes = tx->tx_phys_npages * PAGE_SIZE;
909 dev->gnd_map_nphys++;
910 dev->gnd_map_physnop += tx->tx_phys_npages;
914 if (tx->tx_msg.gnm_type == GNILND_MSG_PUT_ACK ||
915 tx->tx_msg.gnm_type == GNILND_MSG_GET_REQ) {
916 atomic64_add(bytes, &dev->gnd_rdmaq_bytes_out);
917 GNIDBG_TX(D_NETTRACE, tx, "rdma ++ %d to %lld",
918 bytes, atomic64_read(&dev->gnd_rdmaq_bytes_out));
921 atomic_inc(&dev->gnd_n_mdd);
922 atomic64_add(bytes, &dev->gnd_nbytes_map);
924 /* clear retrans to prevent any SMSG goofiness as that code uses the same counter */
927 /* we only get here in the valid cases */
928 list_add_tail(&tx->tx_map_list, &dev->gnd_map_list);
929 dev->gnd_map_version++;
930 spin_unlock(&dev->gnd_map_lock);
934 kgnilnd_mem_del_map_list(kgn_device_t *dev, kgn_tx_t *tx)
938 GNITX_ASSERTF(tx, !list_empty(&tx->tx_map_list),
939 "not mapped!", NULL);
940 spin_lock(&dev->gnd_map_lock);
942 switch (tx->tx_buftype) {
944 GNIDBG_TX(D_EMERG, tx,
945 "SOFTWARE BUG: invalid mapping %d", tx->tx_buftype);
946 spin_unlock(&dev->gnd_map_lock);
950 case GNILND_BUF_PHYS_UNMAPPED:
951 bytes = tx->tx_phys_npages * PAGE_SIZE;
952 dev->gnd_map_nphys--;
953 dev->gnd_map_physnop -= tx->tx_phys_npages;
957 if (tx->tx_msg.gnm_type == GNILND_MSG_PUT_ACK ||
958 tx->tx_msg.gnm_type == GNILND_MSG_GET_REQ) {
959 atomic64_sub(bytes, &dev->gnd_rdmaq_bytes_out);
960 LASSERTF(atomic64_read(&dev->gnd_rdmaq_bytes_out) >= 0,
961 "bytes_out negative! %lld\n",
962 (s64)atomic64_read(&dev->gnd_rdmaq_bytes_out));
963 GNIDBG_TX(D_NETTRACE, tx, "rdma -- %d to %lld",
964 bytes, (s64)atomic64_read(&dev->gnd_rdmaq_bytes_out));
967 atomic_dec(&dev->gnd_n_mdd);
968 atomic64_sub(bytes, &dev->gnd_nbytes_map);
970 /* we only get here in the valid cases */
971 list_del_init(&tx->tx_map_list);
972 dev->gnd_map_version++;
973 spin_unlock(&dev->gnd_map_lock);
977 kgnilnd_map_buffer(kgn_tx_t *tx)
979 kgn_conn_t *conn = tx->tx_conn;
980 kgn_device_t *dev = conn->gnc_device;
981 __u32 flags = GNI_MEM_READWRITE;
984 /* The kgnilnd_mem_register(_segments) Gemini Driver functions can
985 * be called concurrently as there are internal locks that protect
986 * any data structures or HW resources. We just need to ensure
987 * that our concurrency doesn't result in the kgn_device_t
988 * getting nuked while we are in here */
990 LASSERTF(conn != NULL, "tx %p with NULL conn, someone forgot"
991 " to set tx_conn before calling %s\n", tx, __FUNCTION__);
993 if (unlikely(CFS_FAIL_CHECK(CFS_FAIL_GNI_MAP_TX)))
996 if (*kgnilnd_tunables.kgn_bte_relaxed_ordering) {
997 flags |= GNI_MEM_RELAXED_PI_ORDERING;
1000 switch (tx->tx_buftype) {
1004 case GNILND_BUF_NONE:
1005 case GNILND_BUF_IMMEDIATE:
1006 case GNILND_BUF_IMMEDIATE_KIOV:
1007 case GNILND_BUF_PHYS_MAPPED:
1010 case GNILND_BUF_PHYS_UNMAPPED:
1011 GNITX_ASSERTF(tx, tx->tx_phys != NULL, "physical buffer not there!", NULL);
1012 rrc = kgnilnd_mem_register_segments(dev->gnd_handle,
1013 tx->tx_phys, tx->tx_phys_npages, NULL,
1014 GNI_MEM_PHYS_SEGMENTS | flags,
1016 /* could race with other uses of the map counts, but this is ok
1017 * - this needs to turn into a non-fatal error soon to allow
1018 * GART resource, etc starvation handling */
1019 if (rrc != GNI_RC_SUCCESS) {
1020 GNIDBG_TX(D_NET, tx,
1021 "Can't map %d pages: dev %d phys %u pp %u",
1022 tx->tx_phys_npages, dev->gnd_id,
1023 dev->gnd_map_nphys, dev->gnd_map_physnop);
1024 RETURN(rrc == GNI_RC_ERROR_RESOURCE ? -ENOMEM : -EINVAL);
1027 tx->tx_buftype = GNILND_BUF_PHYS_MAPPED;
1028 kgnilnd_mem_add_map_list(dev, tx);
1034 kgnilnd_add_purgatory_tx(kgn_tx_t *tx)
1036 kgn_conn_t *conn = tx->tx_conn;
1037 kgn_mdd_purgatory_t *gmp;
1039 LIBCFS_ALLOC(gmp, sizeof(*gmp));
1040 LASSERTF(gmp != NULL, "couldn't allocate MDD purgatory member;"
1041 " asserting to avoid data corruption\n");
1042 if (tx->tx_buffer_copy)
1043 gmp->gmp_map_key = tx->tx_buffer_copy_map_key;
1045 gmp->gmp_map_key = tx->tx_map_key;
1047 atomic_inc(&conn->gnc_device->gnd_n_mdd_held);
1049 /* ensure that we don't have a blank purgatory - indicating the
1050 * conn is not already on purgatory lists - we'd never recover these
1051 * MDD if that were the case */
1052 GNITX_ASSERTF(tx, conn->gnc_in_purgatory,
1053 "conn 0x%p->%s with NULL purgatory",
1054 conn, libcfs_nid2str(conn->gnc_peer->gnp_nid));
1056 /* link 'er up! - only place we really need to lock for
1057 * concurrent access */
1058 spin_lock(&conn->gnc_list_lock);
1059 list_add_tail(&gmp->gmp_list, &conn->gnc_mdd_list);
1060 spin_unlock(&conn->gnc_list_lock);
1064 kgnilnd_unmap_buffer(kgn_tx_t *tx, int error)
1068 int hold_timeout = 0;
1070 /* code below relies on +1 relationship ... */
1071 BUILD_BUG_ON(GNILND_BUF_PHYS_MAPPED !=
1072 (GNILND_BUF_PHYS_UNMAPPED + 1));
1074 switch (tx->tx_buftype) {
1078 case GNILND_BUF_NONE:
1079 case GNILND_BUF_IMMEDIATE:
1080 case GNILND_BUF_PHYS_UNMAPPED:
1082 case GNILND_BUF_IMMEDIATE_KIOV:
1083 if (tx->tx_phys != NULL) {
1084 vunmap(tx->tx_phys);
1085 } else if (tx->tx_phys == NULL && tx->tx_buffer != NULL) {
1086 kunmap(tx->tx_imm_pages[0]);
1088 /* clear to prevent kgnilnd_free_tx from thinking
1089 * this is a RDMA descriptor */
1093 case GNILND_BUF_PHYS_MAPPED:
1094 LASSERT(tx->tx_conn != NULL);
1096 dev = tx->tx_conn->gnc_device;
1098 /* only want to hold if we are closing conn without
1099 * verified peer notification - the theory is that
1100 * a TX error can be communicated in all other cases */
1101 if (tx->tx_conn->gnc_state != GNILND_CONN_ESTABLISHED &&
1102 error != -GNILND_NOPURG &&
1103 kgnilnd_check_purgatory_conn(tx->tx_conn)) {
1104 kgnilnd_add_purgatory_tx(tx);
1106 /* The timeout we give to kgni is a deadman stop only.
1107 * we are setting high to ensure we don't have the kgni timer
1108 * fire before ours fires _and_ is handled */
1109 hold_timeout = GNILND_TIMEOUT2DEADMAN;
1111 GNIDBG_TX(D_NET, tx,
1112 "dev %p delaying MDD release for %dms key %#llx.%#llx",
1113 tx->tx_conn->gnc_device, hold_timeout,
1114 tx->tx_map_key.qword1, tx->tx_map_key.qword2);
1116 if (tx->tx_buffer_copy != NULL) {
1117 rrc = kgnilnd_mem_deregister(dev->gnd_handle, &tx->tx_buffer_copy_map_key, hold_timeout);
1118 LASSERTF(rrc == GNI_RC_SUCCESS, "rrc %d\n", rrc);
1119 rrc = kgnilnd_mem_deregister(dev->gnd_handle, &tx->tx_map_key, 0);
1120 LASSERTF(rrc == GNI_RC_SUCCESS, "rrc %d\n", rrc);
1122 rrc = kgnilnd_mem_deregister(dev->gnd_handle, &tx->tx_map_key, hold_timeout);
1123 LASSERTF(rrc == GNI_RC_SUCCESS, "rrc %d\n", rrc);
1127 kgnilnd_mem_del_map_list(dev, tx);
1133 kgnilnd_tx_done(kgn_tx_t *tx, int completion)
1135 struct lnet_msg *lntmsg0, *lntmsg1;
1136 int status0, status1;
1137 struct lnet_ni *ni = NULL;
1138 kgn_conn_t *conn = tx->tx_conn;
1140 LASSERT(!in_interrupt());
1142 lntmsg0 = tx->tx_lntmsg[0]; tx->tx_lntmsg[0] = NULL;
1143 lntmsg1 = tx->tx_lntmsg[1]; tx->tx_lntmsg[1] = NULL;
1146 !(tx->tx_state & GNILND_TX_QUIET_ERROR) &&
1147 !kgnilnd_conn_clean_errno(completion)) {
1148 GNIDBG_TOMSG(D_NETERROR, &tx->tx_msg,
1149 "error %d on tx 0x%p->%s id %u/%d state %s age %ds",
1150 completion, tx, conn ?
1151 libcfs_nid2str(conn->gnc_peer->gnp_nid) : "<?>",
1152 tx->tx_id.txe_smsg_id, tx->tx_id.txe_idx,
1153 kgnilnd_tx_state2str(tx->tx_list_state),
1154 cfs_duration_sec((unsigned long)jiffies - tx->tx_qtime));
1157 /* The error codes determine if we hold onto the MDD */
1158 kgnilnd_unmap_buffer(tx, completion);
1160 /* we have to deliver a reply on lntmsg[1] for the GET, so make sure
1161 * we play nice with the error codes to avoid delivering a failed
1162 * REQUEST and then a REPLY event as well */
1164 /* return -EIO to lnet - it is the magic value for failed sends */
1165 if (tx->tx_msg.gnm_type == GNILND_MSG_GET_REQ) {
1167 status1 = completion;
1169 status0 = status1 = completion;
1172 tx->tx_buftype = GNILND_BUF_NONE;
1173 tx->tx_msg.gnm_type = GNILND_MSG_NONE;
1175 /* lnet_finalize doesn't do anything with the *ni, so ok for us to
1176 * set NULL when we are a tx without a conn */
1178 ni = conn->gnc_peer->gnp_net->gnn_ni;
1180 spin_lock(&conn->gnc_tx_lock);
1182 LASSERTF(test_and_clear_bit(tx->tx_id.txe_idx,
1183 (volatile unsigned long *)&conn->gnc_tx_bits),
1184 "conn %p tx %p bit %d already cleared\n",
1185 conn, tx, tx->tx_id.txe_idx);
1187 LASSERTF(conn->gnc_tx_ref_table[tx->tx_id.txe_idx] != NULL,
1188 "msg_id %d already NULL\n", tx->tx_id.txe_idx);
1190 conn->gnc_tx_ref_table[tx->tx_id.txe_idx] = NULL;
1191 spin_unlock(&conn->gnc_tx_lock);
1194 kgnilnd_free_tx(tx);
1196 /* finalize AFTER freeing lnet msgs */
1198 /* warning - we should hold no locks here - calling lnet_finalize
1199 * could free up lnet credits, resulting in a call chain back into
1200 * the LND via kgnilnd_send and friends */
1202 lnet_finalize(lntmsg0, status0);
1204 if (lntmsg1 != NULL) {
1205 lnet_finalize(lntmsg1, status1);
1210 kgnilnd_txlist_done(struct list_head *txlist, int error)
1213 int err_printed = 0;
1215 if (list_empty(txlist))
1218 list_for_each_entry_safe(tx, txn, txlist, tx_list) {
1219 /* only print the first error */
1221 tx->tx_state |= GNILND_TX_QUIET_ERROR;
1222 list_del_init(&tx->tx_list);
1223 kgnilnd_tx_done(tx, error);
1228 kgnilnd_set_tx_id(kgn_tx_t *tx, kgn_conn_t *conn)
1232 spin_lock(&conn->gnc_tx_lock);
1234 /* ID zero is NOT ALLOWED!!! */
1237 id = find_next_zero_bit((unsigned long *)&conn->gnc_tx_bits,
1238 GNILND_MAX_MSG_ID, conn->gnc_next_tx);
1239 if (id == GNILND_MAX_MSG_ID) {
1240 if (conn->gnc_next_tx != 1) {
1241 /* we only searched from next_tx to end and didn't find
1242 * one, so search again from start */
1243 conn->gnc_next_tx = 1;
1246 /* couldn't find one! */
1247 spin_unlock(&conn->gnc_tx_lock);
1251 /* bump next_tx to prevent immediate reuse */
1252 conn->gnc_next_tx = id + 1;
1254 set_bit(id, (volatile unsigned long *)&conn->gnc_tx_bits);
1255 LASSERTF(conn->gnc_tx_ref_table[id] == NULL,
1256 "tx 0x%p already at id %d\n",
1257 conn->gnc_tx_ref_table[id], id);
1259 /* delay these until we have a valid ID - prevents bad clear of the bit
1260 * in kgnilnd_tx_done */
1262 tx->tx_id.txe_cqid = conn->gnc_cqid;
1264 tx->tx_id.txe_idx = id;
1265 conn->gnc_tx_ref_table[id] = tx;
1267 /* Using jiffies to help differentiate against TX reuse - with
1268 * the usual minimum of a 250HZ clock, we wrap jiffies on the same TX
1269 * if we are sending to the same node faster than 256000/sec.
1270 * To help guard against this, we OR in the tx_seq - that is 32 bits */
1272 tx->tx_id.txe_chips = (__u32)(jiffies | atomic_read(&conn->gnc_tx_seq));
1274 GNIDBG_TX(D_NET, tx, "set cookie/id/bits", NULL);
1276 spin_unlock(&conn->gnc_tx_lock);
1281 kgnilnd_tx_log_retrans(kgn_conn_t *conn, kgn_tx_t *tx)
1285 log_retrans = ((tx->tx_retrans < 25) || ((tx->tx_retrans % 25) == 0));
1287 /* we don't care about TX timeout - it could be that the network is slower
1288 * or throttled. We'll keep retranmitting - so if the network is so slow
1289 * that we fill up our mailbox, we'll keep trying to resend that msg
1290 * until we exceed the max_retrans _or_ gnc_last_rx expires, indicating
1291 * that he hasn't send us any traffic in return */
1293 /* some reasonable throttling of the debug message */
1295 unsigned long now = jiffies;
1296 /* XXX Nic: Mystical TX debug here... */
1297 /* We expect retransmissions so only log when D_NET is enabled */
1298 GNIDBG_SMSG_CREDS(D_NET, conn);
1299 GNIDBG_TOMSG(D_NET, &tx->tx_msg,
1300 "NOT_DONE on conn 0x%p->%s id %x retrans %d wait %dus"
1301 " last_msg %uus/%uus last_cq %uus/%uus",
1302 conn, libcfs_nid2str(conn->gnc_peer->gnp_nid),
1303 tx->tx_id, tx->tx_retrans,
1304 jiffies_to_usecs(now - tx->tx_cred_wait),
1305 jiffies_to_usecs(now - conn->gnc_last_tx),
1306 jiffies_to_usecs(now - conn->gnc_last_rx),
1307 jiffies_to_usecs(now - conn->gnc_last_tx_cq),
1308 jiffies_to_usecs(now - conn->gnc_last_rx_cq));
1312 /* caller must be holding gnd_cq_mutex and not unlock it afterwards, as we need to drop it
1313 * to avoid bad ordering with state_lock */
1316 kgnilnd_sendmsg_nolock(kgn_tx_t *tx, void *immediate, unsigned int immediatenob,
1317 spinlock_t *state_lock, kgn_tx_list_state_t state)
1319 kgn_conn_t *conn = tx->tx_conn;
1320 kgn_msg_t *msg = &tx->tx_msg;
1322 unsigned long newest_last_rx, timeout;
1325 LASSERTF((msg->gnm_type == GNILND_MSG_IMMEDIATE) ?
1326 immediatenob <= *kgnilnd_tunables.kgn_max_immediate :
1328 "msg 0x%p type %d wrong payload size %d\n",
1329 msg, msg->gnm_type, immediatenob);
1331 /* make sure we catch all the cases where we'd send on a dirty old mbox
1332 * but allow case for sending CLOSE. Since this check is within the CQ
1333 * mutex barrier and the close message is only sent through
1334 * kgnilnd_send_conn_close the last message out the door will be the
1337 if (atomic_read(&conn->gnc_peer->gnp_dirty_eps) != 0 && msg->gnm_type != GNILND_MSG_CLOSE) {
1338 kgnilnd_conn_mutex_unlock(&conn->gnc_smsg_mutex);
1339 kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
1340 /* Return -ETIME, we are closing the connection already so we dont want to
1341 * have this tx hit the wire. The tx will be killed by the calling function.
1342 * Once the EP is marked dirty the close message will be the last
1343 * thing to hit the wire */
1348 timeout = cfs_time_seconds(conn->gnc_timeout);
1350 newest_last_rx = GNILND_LASTRX(conn);
1352 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_SEND_TIMEOUT)) {
1353 now = now + (GNILND_TIMEOUTRX(timeout) * 2);
1356 if (time_after_eq(now, newest_last_rx + GNILND_TIMEOUTRX(timeout))) {
1357 GNIDBG_CONN(D_NETERROR|D_CONSOLE, conn,
1358 "Cant send to %s after timeout lapse of %lu; TO %lu\n",
1359 libcfs_nid2str(conn->gnc_peer->gnp_nid),
1360 cfs_duration_sec(now - newest_last_rx),
1361 cfs_duration_sec(GNILND_TIMEOUTRX(timeout)));
1362 kgnilnd_conn_mutex_unlock(&conn->gnc_smsg_mutex);
1363 kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
1367 GNITX_ASSERTF(tx, (conn != NULL) && (tx->tx_id.txe_idx != 0), "tx id unset!", NULL);
1368 /* msg->gnm_srcnid is set when the message is initialized by whatever function is
1369 * creating the message this allows the message to contain the correct LNET NID/NET needed
1370 * instead of the one that the peer/conn uses for sending the data.
1372 msg->gnm_connstamp = conn->gnc_my_connstamp;
1373 msg->gnm_payload_len = immediatenob;
1374 msg->gnm_seq = atomic_read(&conn->gnc_tx_seq);
1376 /* always init here - kgn_checksum is a /sys module tunable
1377 * and can be flipped at any point, even between msg init and sending */
1379 if (*kgnilnd_tunables.kgn_checksum) {
1380 /* We must set here and not in kgnilnd_init_msg,
1381 * we could resend this msg many times
1382 * (NOT_DONE from gni_smsg_send below) and wouldn't pass
1383 * through init_msg again */
1384 msg->gnm_cksum = kgnilnd_cksum(msg, sizeof(kgn_msg_t));
1385 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_SMSG_CKSUM1)) {
1386 msg->gnm_cksum += 0xf00f;
1390 GNIDBG_TOMSG(D_NET, msg, "tx 0x%p conn 0x%p->%s sending SMSG sz %u id %x/%d [%p for %u]",
1391 tx, conn, libcfs_nid2str(conn->gnc_peer->gnp_nid),
1392 sizeof(kgn_msg_t), tx->tx_id.txe_smsg_id,
1393 tx->tx_id.txe_idx, immediate, immediatenob);
1395 if (unlikely(tx->tx_state & GNILND_TX_FAIL_SMSG)) {
1396 rrc = cfs_fail_val ? cfs_fail_val : GNI_RC_NOT_DONE;
1398 rrc = kgnilnd_smsg_send(conn->gnc_ephandle,
1399 msg, sizeof(*msg), immediate,
1401 tx->tx_id.txe_smsg_id);
1405 case GNI_RC_SUCCESS:
1406 atomic_inc(&conn->gnc_tx_seq);
1407 conn->gnc_last_tx = jiffies;
1408 /* no locking here as LIVE isn't a list */
1409 kgnilnd_tx_add_state_locked(tx, NULL, conn, GNILND_TX_LIVE_FMAQ, 1);
1411 /* this needs to be checked under lock as it might be freed from a completion
1414 if (msg->gnm_type == GNILND_MSG_NOOP) {
1415 set_mb(conn->gnc_last_noop_sent, jiffies);
1418 /* serialize with seeing CQ events for completion on this, as well as
1420 kgnilnd_conn_mutex_unlock(&conn->gnc_smsg_mutex);
1421 kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
1423 atomic_inc(&conn->gnc_device->gnd_short_ntx);
1424 atomic64_add(immediatenob, &conn->gnc_device->gnd_short_txbytes);
1425 kgnilnd_peer_alive(conn->gnc_peer);
1426 GNIDBG_SMSG_CREDS(D_NET, conn);
1429 case GNI_RC_NOT_DONE:
1430 /* Jshimek: We can get GNI_RC_NOT_DONE for 3 reasons currently
1431 * 1: out of mbox credits
1432 * 2: out of mbox payload credits
1433 * 3: On Aries out of dla credits
1435 kgnilnd_conn_mutex_unlock(&conn->gnc_smsg_mutex);
1436 kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
1437 /* We'll handle this error inline - makes the calling logic much more
1440 /* If no lock, caller doesn't want us to retry */
1441 if (state_lock == NULL) {
1445 /* I need kgni credits to send this. Replace tx at the head of the
1446 * fmaq and I'll get rescheduled when credits appear. Reset the tx_state
1447 * and bump retrans counts since we are requeueing the tx.
1451 conn->gnc_tx_retrans++;
1453 kgnilnd_tx_log_retrans(conn, tx);
1454 /* add to head of list for the state and retries */
1455 spin_lock(state_lock);
1456 kgnilnd_tx_add_state_locked(tx, conn->gnc_peer, conn, state, 0);
1457 spin_unlock(state_lock);
1459 /* We only reschedule for a certain number of retries, then
1460 * we will wait for the CQ events indicating a release of SMSG
1462 if (tx->tx_retrans < *kgnilnd_tunables.kgn_max_retransmits) {
1463 kgnilnd_schedule_conn(conn);
1466 /* CQ event coming in signifies either TX completed or
1467 * RX receive. Either of these *could* free up credits
1468 * in the SMSG mbox and we should try sending again */
1469 GNIDBG_TX(D_NET, tx, "waiting for CQID %u event to resend",
1470 tx->tx_conn->gnc_cqid);
1471 kgnilnd_schedule_delay_conn(conn);
1472 /* use +ve return code to let upper layers know they
1473 * should stop looping on sends */
1477 /* handle bad retcode gracefully */
1478 kgnilnd_conn_mutex_unlock(&conn->gnc_smsg_mutex);
1479 kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
1484 /* kgnilnd_sendmsg has hard wait on gnd_cq_mutex */
1486 kgnilnd_sendmsg(kgn_tx_t *tx, void *immediate, unsigned int immediatenob,
1487 spinlock_t *state_lock, kgn_tx_list_state_t state)
1489 kgn_device_t *dev = tx->tx_conn->gnc_device;
1490 unsigned long timestamp;
1493 timestamp = jiffies;
1494 kgnilnd_gl_mutex_lock(&dev->gnd_cq_mutex);
1495 kgnilnd_conn_mutex_lock(&tx->tx_conn->gnc_smsg_mutex);
1496 /* delay in jiffies - we are really concerned only with things that
1497 * result in a schedule() or really holding this off for long times .
1498 * NB - mutex_lock could spin for 2 jiffies before going to sleep to wait */
1499 dev->gnd_mutex_delay += (long) jiffies - timestamp;
1501 rc = kgnilnd_sendmsg_nolock(tx, immediate, immediatenob, state_lock, state);
1507 /* returns -EAGAIN for lock miss, anything else < 0 is hard error, >=0 for success */
1509 kgnilnd_sendmsg_trylock(kgn_tx_t *tx, void *immediate, unsigned int immediatenob,
1510 spinlock_t *state_lock, kgn_tx_list_state_t state)
1512 kgn_conn_t *conn = tx->tx_conn;
1513 kgn_device_t *dev = conn->gnc_device;
1514 unsigned long timestamp;
1517 timestamp = jiffies;
1519 /* technically we are doing bad things with the read_lock on the peer_conn
1520 * table, but we shouldn't be sleeping inside here - and we don't sleep/block
1521 * for the mutex. I bet lockdep is gonna flag this one though... */
1523 /* there are a few cases where we don't want the immediate send - like
1524 * when we are in the scheduler thread and it'd harm the latency of
1525 * getting messages up to LNet */
1527 /* rmb for gnd_ready */
1529 if (conn->gnc_device->gnd_ready == GNILND_DEV_LOOP) {
1531 atomic_inc(&conn->gnc_device->gnd_fast_block);
1532 } else if (unlikely(kgnilnd_data.kgn_quiesce_trigger)) {
1533 /* dont hit HW during quiesce */
1535 } else if (unlikely(atomic_read(&conn->gnc_peer->gnp_dirty_eps))) {
1536 /* dont hit HW if stale EPs and conns left to close */
1539 atomic_inc(&conn->gnc_device->gnd_fast_try);
1540 rc = kgnilnd_trylock(&conn->gnc_device->gnd_cq_mutex,
1541 &conn->gnc_smsg_mutex);
1546 /* we got the mutex and weren't blocked */
1548 /* delay in jiffies - we are really concerned only with things that
1549 * result in a schedule() or really holding this off for long times .
1550 * NB - mutex_lock could spin for 2 jiffies before going to sleep to wait */
1551 dev->gnd_mutex_delay += (long) jiffies - timestamp;
1553 atomic_inc(&conn->gnc_device->gnd_fast_ok);
1554 tx->tx_qtime = jiffies;
1555 tx->tx_state = GNILND_TX_WAITING_COMPLETION;
1556 rc = kgnilnd_sendmsg_nolock(tx, tx->tx_buffer, tx->tx_nob, &conn->gnc_list_lock, GNILND_TX_FMAQ);
1557 /* _nolock unlocks the mutex for us */
1563 /* lets us know if we can push this RDMA through now */
1565 kgnilnd_auth_rdma_bytes(kgn_device_t *dev, kgn_tx_t *tx)
1569 bytes_left = atomic64_sub_return(tx->tx_nob, &dev->gnd_rdmaq_bytes_ok);
1571 if (bytes_left < 0) {
1572 atomic64_add(tx->tx_nob, &dev->gnd_rdmaq_bytes_ok);
1573 atomic_inc(&dev->gnd_rdmaq_nstalls);
1576 CDEBUG(D_NET, "no bytes to send, turning on timer for %lu\n",
1577 dev->gnd_rdmaq_deadline);
1578 mod_timer(&dev->gnd_rdmaq_timer, dev->gnd_rdmaq_deadline);
1579 /* we never del this timer - at worst it schedules us.. */
1586 /* this adds a TX to the queue pending throttling authorization before
1587 * we allow our remote peer to launch a PUT at us */
1589 kgnilnd_queue_rdma(kgn_conn_t *conn, kgn_tx_t *tx)
1593 /* we cannot go into send_mapped_tx from here as we are holding locks
1594 * and mem registration might end up allocating memory in kgni.
1595 * That said, we'll push this as far as we can into the queue process */
1596 rc = kgnilnd_auth_rdma_bytes(conn->gnc_device, tx);
1599 spin_lock(&conn->gnc_device->gnd_rdmaq_lock);
1600 kgnilnd_tx_add_state_locked(tx, NULL, conn, GNILND_TX_RDMAQ, 0);
1601 /* lets us know how delayed RDMA is */
1602 tx->tx_qtime = jiffies;
1603 spin_unlock(&conn->gnc_device->gnd_rdmaq_lock);
1605 /* we have RDMA authorized, now it just needs a MDD and to hit the wire */
1606 spin_lock(&tx->tx_conn->gnc_device->gnd_lock);
1607 kgnilnd_tx_add_state_locked(tx, NULL, tx->tx_conn, GNILND_TX_MAPQ, 0);
1608 /* lets us know how delayed mapping is */
1609 tx->tx_qtime = jiffies;
1610 spin_unlock(&tx->tx_conn->gnc_device->gnd_lock);
1613 /* make sure we wake up sched to run this */
1614 kgnilnd_schedule_device(tx->tx_conn->gnc_device);
1617 /* push TX through state machine */
1619 kgnilnd_queue_tx(kgn_conn_t *conn, kgn_tx_t *tx)
1624 /* set the tx_id here, we delay it until we have an actual conn
1626 * in some cases, the tx_id is already set to provide for things
1627 * like RDMA completion cookies, etc */
1628 if (tx->tx_id.txe_idx == 0) {
1629 rc = kgnilnd_set_tx_id(tx, conn);
1631 kgnilnd_tx_done(tx, rc);
1636 CDEBUG(D_NET, "%s to conn %p for %s\n", kgnilnd_msgtype2str(tx->tx_msg.gnm_type),
1637 conn, libcfs_nid2str(conn->gnc_peer->gnp_nid));
1639 /* Only let NOOPs to be sent while fail loc is set, otherwise kill the tx.
1641 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_ONLY_NOOP) && (tx->tx_msg.gnm_type != GNILND_MSG_NOOP)) {
1642 kgnilnd_tx_done(tx, rc);
1646 switch (tx->tx_msg.gnm_type) {
1647 case GNILND_MSG_PUT_ACK:
1648 case GNILND_MSG_GET_REQ:
1649 case GNILND_MSG_PUT_REQ_REV:
1650 case GNILND_MSG_GET_ACK_REV:
1651 /* hijacking time! If this messages will authorize our peer to
1652 * send his dirty little bytes in an RDMA, we need to get permission */
1653 kgnilnd_queue_rdma(conn, tx);
1655 case GNILND_MSG_IMMEDIATE:
1656 /* try to send right now, can help reduce latency */
1657 rc = kgnilnd_sendmsg_trylock(tx, tx->tx_buffer, tx->tx_nob, &conn->gnc_list_lock, GNILND_TX_FMAQ);
1660 /* it was sent, break out of switch to avoid default case of queueing */
1663 /* needs to queue to try again, so... */
1664 /* fall through... */
1665 case GNILND_MSG_NOOP:
1666 /* Just make sure this goes out first for this conn */
1668 /* fall through... */
1670 spin_lock(&conn->gnc_list_lock);
1671 kgnilnd_tx_add_state_locked(tx, conn->gnc_peer, conn, GNILND_TX_FMAQ, add_tail);
1672 tx->tx_qtime = jiffies;
1673 spin_unlock(&conn->gnc_list_lock);
1674 kgnilnd_schedule_conn(conn);
1679 kgnilnd_launch_tx(kgn_tx_t *tx, kgn_net_t *net, struct lnet_processid *target)
1682 kgn_peer_t *new_peer = NULL;
1683 kgn_conn_t *conn = NULL;
1689 /* If I get here, I've committed to send, so I complete the tx with
1690 * failure on any problems */
1692 GNITX_ASSERTF(tx, tx->tx_conn == NULL,
1693 "tx already has connection %p", tx->tx_conn);
1695 /* do all of the peer & conn searching in one swoop - this avoids
1696 * nastiness when dropping locks and needing to maintain a sane state
1697 * in the face of stack reset or something else nuking peers & conns */
1699 /* I expect to find him, so only take a read lock */
1700 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1702 peer = kgnilnd_find_peer_locked(lnet_nid_to_nid4(&target->nid));
1704 conn = kgnilnd_find_conn_locked(peer);
1705 /* this could be NULL during quiesce */
1707 /* Connection exists; queue message on it */
1708 kgnilnd_queue_tx(conn, tx);
1709 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1713 /* don't create a connection if the peer is marked down */
1714 if (peer->gnp_state != GNILND_PEER_UP) {
1715 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1721 /* creating peer or conn; I'll need a write lock... */
1722 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1724 CFS_RACE(CFS_FAIL_GNI_FIND_TARGET);
1726 node_state = kgnilnd_get_node_state(ntohl(target->nid.nid_addr[0]));
1728 /* NB - this will not block during normal operations -
1729 * the only writer of this is in the startup/shutdown path. */
1730 rc = down_read_trylock(&kgnilnd_data.kgn_net_rw_sem);
1736 /* ignore previous peer entirely - we cycled the lock, so we
1737 * will create new peer and at worst drop it if peer is still
1739 rc = kgnilnd_create_peer_safe(&new_peer, lnet_nid_to_nid4(&target->nid),
1742 up_read(&kgnilnd_data.kgn_net_rw_sem);
1746 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1747 up_read(&kgnilnd_data.kgn_net_rw_sem);
1749 /* search for peer again now that we have the lock
1750 * if we don't find it, add our new one to the list */
1751 kgnilnd_add_peer_locked(lnet_nid_to_nid4(&target->nid), new_peer,
1754 /* don't create a connection if the peer is not up */
1755 if (peer->gnp_state != GNILND_PEER_UP) {
1756 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1761 conn = kgnilnd_find_or_create_conn_locked(peer);
1763 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_DGRAM_DROP_TX)) {
1764 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1769 /* oh hey, found a conn now... magical */
1770 kgnilnd_queue_tx(conn, tx);
1772 /* no conn, must be trying to connect - so we queue for now */
1773 tx->tx_qtime = jiffies;
1774 kgnilnd_tx_add_state_locked(tx, peer, NULL, GNILND_TX_PEERQ, 1);
1776 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1779 kgnilnd_tx_done(tx, rc);
1784 kgnilnd_rdma(kgn_tx_t *tx, int type,
1785 kgn_rdma_desc_t *sink, unsigned int nob, __u64 cookie)
1787 kgn_conn_t *conn = tx->tx_conn;
1788 unsigned long timestamp;
1789 gni_post_type_t post_type;
1792 unsigned int desc_nob = nob;
1793 void *desc_buffer = tx->tx_buffer;
1794 gni_mem_handle_t desc_map_key = tx->tx_map_key;
1795 LASSERTF(kgnilnd_tx_mapped(tx),
1796 "unmapped tx %p\n", tx);
1797 LASSERTF(conn != NULL,
1798 "NULL conn on tx %p, naughty, naughty\n", tx);
1799 LASSERTF(nob <= sink->gnrd_nob,
1800 "nob %u > sink->gnrd_nob %d (%p)\n",
1801 nob, sink->gnrd_nob, sink);
1802 LASSERTF(nob <= tx->tx_nob,
1803 "nob %d > tx(%p)->tx_nob %d\n",
1804 nob, tx, tx->tx_nob);
1807 case GNILND_MSG_GET_DONE:
1808 case GNILND_MSG_PUT_DONE:
1809 post_type = GNI_POST_RDMA_PUT;
1811 case GNILND_MSG_GET_DONE_REV:
1812 case GNILND_MSG_PUT_DONE_REV:
1813 post_type = GNI_POST_RDMA_GET;
1816 CERROR("invalid msg type %s (%d)\n",
1817 kgnilnd_msgtype2str(type), type);
1820 if (post_type == GNI_POST_RDMA_GET) {
1821 /* Check for remote buffer / local buffer / length alignment. All must be 4 byte
1822 * aligned. If the local buffer is not aligned correctly using the copy buffer
1823 * will fix that issue. If length is misaligned copy buffer will also fix the issue, we end
1824 * up transferring extra bytes into the buffer but only copy the correct nob into the original
1825 * buffer. Remote offset correction is done through a combination of adjusting the offset,
1826 * making sure the length and addr are aligned and copying the data into the correct location
1827 * once the transfer has completed.
1829 if ((((__u64)((unsigned long)tx->tx_buffer)) & 3) ||
1830 (sink->gnrd_addr & 3) ||
1833 tx->tx_offset = ((__u64)((unsigned long)sink->gnrd_addr)) & 3;
1835 atomic_inc(&kgnilnd_data.kgn_rev_offset);
1837 if ((nob + tx->tx_offset) & 3) {
1838 desc_nob = ((nob + tx->tx_offset) + (4 - ((nob + tx->tx_offset) & 3)));
1839 atomic_inc(&kgnilnd_data.kgn_rev_length);
1841 desc_nob = (nob + tx->tx_offset);
1844 if (tx->tx_buffer_copy == NULL) {
1845 /* Allocate the largest copy buffer we will need, this will prevent us from overwriting data
1846 * and require at most we allocate a few extra bytes. */
1847 tx->tx_buffer_copy = kgnilnd_vzalloc(desc_nob);
1849 if (!tx->tx_buffer_copy) {
1850 /* allocation of buffer failed nak the rdma */
1851 kgnilnd_nak_rdma(tx->tx_conn, tx->tx_msg.gnm_type, -EFAULT, cookie, tx->tx_msg.gnm_srcnid);
1852 kgnilnd_tx_done(tx, -EFAULT);
1855 atomic_inc(&kgnilnd_data.kgn_rev_copy_buff);
1856 rc = kgnilnd_mem_register(conn->gnc_device->gnd_handle, (__u64)tx->tx_buffer_copy, desc_nob, NULL, GNI_MEM_READWRITE, &tx->tx_buffer_copy_map_key);
1857 if (rc != GNI_RC_SUCCESS) {
1858 /* Registration Failed nak rdma and kill the tx. */
1859 kgnilnd_vfree(tx->tx_buffer_copy,
1861 tx->tx_buffer_copy = NULL;
1862 kgnilnd_nak_rdma(tx->tx_conn, tx->tx_msg.gnm_type, -EFAULT, cookie, tx->tx_msg.gnm_srcnid);
1863 kgnilnd_tx_done(tx, -EFAULT);
1867 desc_map_key = tx->tx_buffer_copy_map_key;
1868 desc_buffer = tx->tx_buffer_copy;
1872 memset(&tx->tx_rdma_desc, 0, sizeof(tx->tx_rdma_desc));
1873 tx->tx_rdma_desc.post_id = tx->tx_id.txe_cookie;
1874 tx->tx_rdma_desc.type = post_type;
1875 tx->tx_rdma_desc.cq_mode = GNI_CQMODE_GLOBAL_EVENT;
1876 tx->tx_rdma_desc.local_addr = (__u64)((unsigned long)desc_buffer);
1877 tx->tx_rdma_desc.local_mem_hndl = desc_map_key;
1878 tx->tx_rdma_desc.remote_addr = sink->gnrd_addr - tx->tx_offset;
1879 tx->tx_rdma_desc.remote_mem_hndl = sink->gnrd_key;
1880 tx->tx_rdma_desc.length = desc_nob;
1881 tx->tx_nob_rdma = nob;
1882 if (post_type == GNI_POST_RDMA_PUT && *kgnilnd_tunables.kgn_bte_put_dlvr_mode)
1883 tx->tx_rdma_desc.dlvr_mode = *kgnilnd_tunables.kgn_bte_put_dlvr_mode;
1884 if (post_type == GNI_POST_RDMA_GET && *kgnilnd_tunables.kgn_bte_get_dlvr_mode)
1885 tx->tx_rdma_desc.dlvr_mode = *kgnilnd_tunables.kgn_bte_get_dlvr_mode;
1886 /* prep final completion message */
1887 kgnilnd_init_msg(&tx->tx_msg, type, tx->tx_msg.gnm_srcnid);
1888 tx->tx_msg.gnm_u.completion.gncm_cookie = cookie;
1889 /* send actual size RDMA'd in retval */
1890 tx->tx_msg.gnm_u.completion.gncm_retval = nob;
1892 kgnilnd_compute_rdma_cksum(tx, nob);
1895 kgnilnd_queue_tx(conn, tx);
1899 /* Don't lie (CLOSE == RDMA idle) */
1900 LASSERTF(!conn->gnc_close_sent, "tx %p on conn %p after close sent %d\n",
1901 tx, conn, conn->gnc_close_sent);
1903 GNIDBG_TX(D_NET, tx, "Post RDMA type 0x%02x conn %p dlvr_mode "
1904 "0x%x cookie:%#llx",
1905 type, conn, tx->tx_rdma_desc.dlvr_mode, cookie);
1907 /* set CQ dedicated for RDMA */
1908 tx->tx_rdma_desc.src_cq_hndl = conn->gnc_device->gnd_snd_rdma_cqh;
1910 timestamp = jiffies;
1911 kgnilnd_conn_mutex_lock(&conn->gnc_rdma_mutex);
1912 kgnilnd_gl_mutex_lock(&conn->gnc_device->gnd_cq_mutex);
1913 /* delay in jiffies - we are really concerned only with things that
1914 * result in a schedule() or really holding this off for long times .
1915 * NB - mutex_lock could spin for 2 jiffies before going to sleep to wait */
1916 conn->gnc_device->gnd_mutex_delay += (long) jiffies - timestamp;
1918 rrc = kgnilnd_post_rdma(conn->gnc_ephandle, &tx->tx_rdma_desc);
1920 if (rrc == GNI_RC_ERROR_RESOURCE) {
1921 kgnilnd_conn_mutex_unlock(&conn->gnc_rdma_mutex);
1922 kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
1923 kgnilnd_unmap_buffer(tx, 0);
1925 if (tx->tx_buffer_copy != NULL) {
1926 kgnilnd_vfree(tx->tx_buffer_copy, desc_nob);
1927 tx->tx_buffer_copy = NULL;
1930 spin_lock(&tx->tx_conn->gnc_device->gnd_lock);
1931 kgnilnd_tx_add_state_locked(tx, NULL, tx->tx_conn,
1933 spin_unlock(&tx->tx_conn->gnc_device->gnd_lock);
1934 kgnilnd_schedule_device(tx->tx_conn->gnc_device);
1938 spin_lock(&conn->gnc_list_lock);
1939 kgnilnd_tx_add_state_locked(tx, conn->gnc_peer, conn, GNILND_TX_LIVE_RDMAQ, 1);
1940 tx->tx_qtime = jiffies;
1941 spin_unlock(&conn->gnc_list_lock);
1942 kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
1943 kgnilnd_conn_mutex_unlock(&conn->gnc_rdma_mutex);
1945 /* XXX Nic: is this a place we should handle more errors for
1946 * robustness sake */
1947 LASSERT(rrc == GNI_RC_SUCCESS);
1952 kgnilnd_alloc_rx(void)
1956 rx = kmem_cache_alloc(kgnilnd_data.kgn_rx_cache, GFP_ATOMIC);
1958 CERROR("failed to allocate rx\n");
1961 CDEBUG(D_MALLOC, "slab-alloced 'rx': %lu at %p.\n",
1964 /* no memset to zero, we'll always fill all members */
1968 /* release is to just free connection resources
1969 * we use this for the eager path after copying */
1971 kgnilnd_release_msg(kgn_conn_t *conn)
1974 unsigned long timestamp;
1976 CDEBUG(D_NET, "consuming %p\n", conn);
1978 timestamp = jiffies;
1979 kgnilnd_gl_mutex_lock(&conn->gnc_device->gnd_cq_mutex);
1980 /* delay in jiffies - we are really concerned only with things that
1981 * result in a schedule() or really holding this off for long times .
1982 * NB - mutex_lock could spin for 2 jiffies before going to sleep to wait */
1983 conn->gnc_device->gnd_mutex_delay += (long) jiffies - timestamp;
1985 rrc = kgnilnd_smsg_release(conn->gnc_ephandle);
1986 kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
1988 LASSERTF(rrc == GNI_RC_SUCCESS, "bad rrc %d\n", rrc);
1989 GNIDBG_SMSG_CREDS(D_NET, conn);
1991 kgnilnd_schedule_conn(conn);
1995 kgnilnd_consume_rx(kgn_rx_t *rx)
1997 kgn_conn_t *conn = rx->grx_conn;
1998 kgn_msg_t *rxmsg = rx->grx_msg;
2000 /* if we are eager, free the cache alloc'd msg */
2001 if (unlikely(rx->grx_eager)) {
2002 LIBCFS_FREE(rxmsg, sizeof(*rxmsg) + *kgnilnd_tunables.kgn_max_immediate);
2003 atomic_dec(&kgnilnd_data.kgn_neager_allocs);
2005 /* release ref from eager_recv */
2006 kgnilnd_conn_decref(conn);
2008 GNIDBG_MSG(D_NET, rxmsg, "rx %p processed", rx);
2009 kgnilnd_release_msg(conn);
2012 kmem_cache_free(kgnilnd_data.kgn_rx_cache, rx);
2013 CDEBUG(D_MALLOC, "slab-freed 'rx': %lu at %p.\n",
2018 kgnilnd_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg)
2020 struct lnet_hdr *hdr = &lntmsg->msg_hdr;
2021 int type = lntmsg->msg_type;
2022 struct lnet_processid *target = &lntmsg->msg_target;
2023 int target_is_router = lntmsg->msg_target_is_router;
2024 int routing = lntmsg->msg_routing;
2025 unsigned int niov = lntmsg->msg_niov;
2026 struct bio_vec *kiov = lntmsg->msg_kiov;
2027 unsigned int offset = lntmsg->msg_offset;
2028 unsigned int nob = lntmsg->msg_len;
2029 unsigned int msg_vmflush = lntmsg->msg_vmflush;
2030 kgn_net_t *net = ni->ni_data;
2033 /* '1' for consistency with code that checks !mpflag to restore */
2034 unsigned int mpflag = 1;
2035 int reverse_rdma_flag = *kgnilnd_tunables.kgn_reverse_rdma;
2037 /* NB 'private' is different depending on what we're sending.... */
2038 LASSERT(!in_interrupt());
2040 CDEBUG(D_NET, "sending msg type %d with %d bytes in %d frags to %s\n",
2041 type, nob, niov, libcfs_idstr(target));
2043 LASSERTF(nob == 0 || niov > 0,
2044 "lntmsg %p nob %d niov %d\n", lntmsg, nob, niov);
2045 LASSERTF(niov <= GNILND_MAX_IOV,
2046 "lntmsg %p niov %d\n", lntmsg, niov);
2049 mpflag = memalloc_noreclaim_save();
2053 CERROR("lntmsg %p with unexpected type %d\n",
2058 LASSERTF(nob == 0, "lntmsg %p nob %d\n",
2066 if (routing || target_is_router)
2067 break; /* send IMMEDIATE */
2069 /* it is safe to do direct GET with out mapping buffer for RDMA as we
2070 * check the eventual sink buffer here - if small enough, remote
2071 * end is perfectly capable of returning data in short message -
2072 * The magic is that we call lnet_parse in kgnilnd_recv with rdma_req=0
2073 * for IMMEDIATE messages which will have it send a real reply instead
2074 * of doing kgnilnd_recv to have the RDMA continued */
2075 if (lntmsg->msg_md->md_length <= *kgnilnd_tunables.kgn_max_immediate)
2078 if ((reverse_rdma_flag & GNILND_REVERSE_GET) == 0)
2079 tx = kgnilnd_new_tx_msg(GNILND_MSG_GET_REQ,
2080 lnet_nid_to_nid4(&ni->ni_nid));
2082 tx = kgnilnd_new_tx_msg(GNILND_MSG_GET_REQ_REV,
2083 lnet_nid_to_nid4(&ni->ni_nid));
2089 rc = kgnilnd_setup_rdma_buffer(tx, lntmsg->msg_md->md_niov,
2090 lntmsg->msg_md->md_kiov,
2091 0, lntmsg->msg_md->md_length);
2093 CERROR("unable to setup buffer: %d\n", rc);
2094 kgnilnd_tx_done(tx, rc);
2099 tx->tx_lntmsg[1] = lnet_create_reply_msg(ni, lntmsg);
2100 if (tx->tx_lntmsg[1] == NULL) {
2101 CERROR("Can't create reply for GET to %s\n",
2102 libcfs_nidstr(&target->nid));
2103 kgnilnd_tx_done(tx, rc);
2108 tx->tx_lntmsg[0] = lntmsg;
2109 if ((reverse_rdma_flag & GNILND_REVERSE_GET) == 0)
2110 lnet_hdr_to_nid4(hdr, &tx->tx_msg.gnm_u.get.gngm_hdr);
2112 lnet_hdr_to_nid4(hdr,
2113 &tx->tx_msg.gnm_u.putreq.gnprm_hdr);
2115 /* rest of tx_msg is setup just before it is sent */
2116 kgnilnd_launch_tx(tx, net, target);
2118 case LNET_MSG_REPLY:
2120 /* to save on MDDs, we'll handle short kiov by vmap'ing
2121 * and sending via SMSG */
2122 if (nob <= *kgnilnd_tunables.kgn_max_immediate)
2125 if ((reverse_rdma_flag & GNILND_REVERSE_PUT) == 0)
2126 tx = kgnilnd_new_tx_msg(GNILND_MSG_PUT_REQ,
2127 lnet_nid_to_nid4(&ni->ni_nid));
2129 tx = kgnilnd_new_tx_msg(GNILND_MSG_PUT_REQ_REV,
2130 lnet_nid_to_nid4(&ni->ni_nid));
2137 rc = kgnilnd_setup_rdma_buffer(tx, niov,
2140 kgnilnd_tx_done(tx, rc);
2145 tx->tx_lntmsg[0] = lntmsg;
2146 if ((reverse_rdma_flag & GNILND_REVERSE_PUT) == 0)
2147 lnet_hdr_to_nid4(hdr,
2148 &tx->tx_msg.gnm_u.putreq.gnprm_hdr);
2150 lnet_hdr_to_nid4(hdr, &tx->tx_msg.gnm_u.get.gngm_hdr);
2152 /* rest of tx_msg is setup just before it is sent */
2153 kgnilnd_launch_tx(tx, net, target);
2157 /* send IMMEDIATE */
2159 LASSERTF(nob <= *kgnilnd_tunables.kgn_max_immediate,
2160 "lntmsg 0x%p too large %d\n", lntmsg, nob);
2162 tx = kgnilnd_new_tx_msg(GNILND_MSG_IMMEDIATE,
2163 lnet_nid_to_nid4(&ni->ni_nid));
2169 rc = kgnilnd_setup_immediate_buffer(tx, niov, kiov, offset, nob);
2171 kgnilnd_tx_done(tx, rc);
2175 lnet_hdr_to_nid4(hdr, &tx->tx_msg.gnm_u.immediate.gnim_hdr);
2176 tx->tx_lntmsg[0] = lntmsg;
2177 kgnilnd_launch_tx(tx, net, target);
2180 /* use stored value as we could have already finalized lntmsg here from a failed launch */
2182 memalloc_noreclaim_restore(mpflag);
2187 kgnilnd_setup_rdma(struct lnet_ni *ni, kgn_rx_t *rx, struct lnet_msg *lntmsg, int mlen)
2189 kgn_conn_t *conn = rx->grx_conn;
2190 kgn_msg_t *rxmsg = rx->grx_msg;
2191 unsigned int niov = lntmsg->msg_niov;
2192 struct bio_vec *kiov = lntmsg->msg_kiov;
2193 unsigned int offset = lntmsg->msg_offset;
2194 unsigned int nob = lntmsg->msg_len;
2199 switch (rxmsg->gnm_type) {
2200 case GNILND_MSG_PUT_REQ_REV:
2201 done_type = GNILND_MSG_PUT_DONE_REV;
2204 case GNILND_MSG_GET_REQ:
2205 done_type = GNILND_MSG_GET_DONE;
2208 CERROR("invalid msg type %s (%d)\n",
2209 kgnilnd_msgtype2str(rxmsg->gnm_type),
2214 tx = kgnilnd_new_tx_msg(done_type, lnet_nid_to_nid4(&ni->ni_nid));
2218 rc = kgnilnd_set_tx_id(tx, conn);
2222 rc = kgnilnd_setup_rdma_buffer(tx, niov, kiov, offset, nob);
2226 tx->tx_lntmsg[0] = lntmsg;
2227 tx->tx_getinfo = rxmsg->gnm_u.get;
2229 /* we only queue from kgnilnd_recv - we might get called from other contexts
2230 * and we don't want to block the mutex in those cases */
2232 spin_lock(&tx->tx_conn->gnc_device->gnd_lock);
2233 kgnilnd_tx_add_state_locked(tx, NULL, tx->tx_conn, GNILND_TX_MAPQ, 1);
2234 spin_unlock(&tx->tx_conn->gnc_device->gnd_lock);
2235 kgnilnd_schedule_device(tx->tx_conn->gnc_device);
2240 kgnilnd_tx_done(tx, rc);
2241 kgnilnd_nak_rdma(conn, done_type, rc, rxmsg->gnm_u.get.gngm_cookie,
2242 lnet_nid_to_nid4(&ni->ni_nid));
2244 lnet_finalize(lntmsg, rc);
2248 kgnilnd_eager_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg,
2251 kgn_rx_t *rx = private;
2252 kgn_conn_t *conn = rx->grx_conn;
2253 kgn_msg_t *rxmsg = rx->grx_msg;
2254 kgn_msg_t *eagermsg = NULL;
2255 kgn_peer_t *peer = NULL;
2256 kgn_conn_t *found_conn = NULL;
2258 GNIDBG_MSG(D_NET, rxmsg, "eager recv for conn %p, rxmsg %p, lntmsg %p",
2259 conn, rxmsg, lntmsg);
2261 if (rxmsg->gnm_payload_len > *kgnilnd_tunables.kgn_max_immediate) {
2262 GNIDBG_MSG(D_ERROR, rxmsg, "payload too large %d",
2263 rxmsg->gnm_payload_len);
2266 /* Grab a read lock so the connection doesnt disappear on us
2267 * while we look it up
2269 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
2271 peer = kgnilnd_find_peer_locked(rxmsg->gnm_srcnid);
2273 found_conn = kgnilnd_find_conn_locked(peer);
2276 /* Verify the connection found is the same one that the message
2277 * is supposed to be using, if it is not output an error message
2280 if (!peer || !found_conn
2281 || found_conn->gnc_peer_connstamp != rxmsg->gnm_connstamp) {
2282 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
2283 CERROR("Couldnt find matching peer %p or conn %p / %p\n",
2284 peer, conn, found_conn);
2286 CERROR("Unexpected connstamp %#llx(%#llx expected) from %s\n",
2287 rxmsg->gnm_connstamp,
2288 found_conn->gnc_peer_connstamp,
2289 libcfs_nid2str(peer->gnp_nid));
2294 /* add conn ref to ensure it doesn't go away until all eager
2295 * messages processed */
2296 kgnilnd_conn_addref(conn);
2298 /* Now that we have verified the connection is valid and added a
2299 * reference we can remove the read_lock on the peer_conn_lock */
2300 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
2302 /* we have no credits or buffers for this message, so copy it
2303 * somewhere for a later kgnilnd_recv */
2304 if (atomic_read(&kgnilnd_data.kgn_neager_allocs) >=
2305 *kgnilnd_tunables.kgn_eager_credits) {
2306 CERROR("Out of eager credits to %s\n",
2307 libcfs_nid2str(conn->gnc_peer->gnp_nid));
2311 atomic_inc(&kgnilnd_data.kgn_neager_allocs);
2313 LIBCFS_ALLOC(eagermsg, sizeof(*eagermsg) + *kgnilnd_tunables.kgn_max_immediate);
2314 if (eagermsg == NULL) {
2315 kgnilnd_conn_decref(conn);
2316 CERROR("couldn't allocate eager rx message for conn %p to %s\n",
2317 conn, libcfs_nid2str(conn->gnc_peer->gnp_nid));
2321 /* copy msg and payload */
2322 memcpy(eagermsg, rxmsg, sizeof(*rxmsg) + rxmsg->gnm_payload_len);
2323 rx->grx_msg = eagermsg;
2326 /* stash this for lnet_finalize on cancel-on-conn-close */
2327 rx->grx_lntmsg = lntmsg;
2329 /* keep the same rx_t, it just has a new grx_msg now */
2330 *new_private = private;
2332 /* release SMSG buffer */
2333 kgnilnd_release_msg(conn);
2339 kgnilnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg,
2340 int delayed, unsigned int niov,
2341 struct bio_vec *kiov,
2342 unsigned int offset, unsigned int mlen, unsigned int rlen)
2344 kgn_rx_t *rx = private;
2345 kgn_conn_t *conn = rx->grx_conn;
2346 kgn_msg_t *rxmsg = rx->grx_msg;
2352 LASSERT(!in_interrupt());
2353 LASSERTF(mlen <= rlen, "%d <= %d\n", mlen, rlen);
2355 GNIDBG_MSG(D_NET, rxmsg, "conn %p, rxmsg %p, lntmsg %p"
2356 " niov=%d kiov=%p offset=%d mlen=%d rlen=%d",
2357 conn, rxmsg, lntmsg,
2358 niov, kiov, offset, mlen, rlen);
2360 /* we need to lock here as recv can be called from any context */
2361 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
2362 if (rx->grx_eager && conn->gnc_state != GNILND_CONN_ESTABLISHED) {
2363 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
2365 /* someone closed the conn after we copied this out, nuke it */
2366 kgnilnd_consume_rx(rx);
2367 lnet_finalize(lntmsg, conn->gnc_error);
2370 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
2372 switch (rxmsg->gnm_type) {
2374 GNIDBG_MSG(D_NETERROR, rxmsg, "conn %p, rx %p, rxmsg %p, lntmsg %p"
2375 " niov=%d kiov=%p offset=%d mlen=%d rlen=%d",
2376 conn, rx, rxmsg, lntmsg, niov, kiov, offset, mlen, rlen);
2379 case GNILND_MSG_IMMEDIATE:
2380 if (mlen > rxmsg->gnm_payload_len) {
2381 GNIDBG_MSG(D_ERROR, rxmsg,
2382 "Immediate message from %s too big: %d > %d",
2383 libcfs_nid2str(conn->gnc_peer->gnp_nid), mlen,
2384 rxmsg->gnm_payload_len);
2386 kgnilnd_consume_rx(rx);
2390 /* rxmsg[1] is a pointer to the payload, sitting in the buffer
2391 * right after the kgn_msg_t header - so just 'cute' way of saying
2392 * rxmsg + sizeof(kgn_msg_t) */
2394 /* check payload checksum if sent */
2396 if (*kgnilnd_tunables.kgn_checksum >= 2 &&
2397 !rxmsg->gnm_payload_cksum &&
2398 rxmsg->gnm_payload_len != 0)
2399 GNIDBG_MSG(D_WARNING, rxmsg, "no msg payload checksum when enabled");
2401 if (rxmsg->gnm_payload_cksum != 0) {
2402 /* gnm_payload_len set in kgnilnd_sendmsg from tx->tx_nob,
2403 * which is what is used to calculate the cksum on the TX side */
2404 pload_cksum = kgnilnd_cksum(&rxmsg[1], rxmsg->gnm_payload_len);
2406 if (rxmsg->gnm_payload_cksum != pload_cksum) {
2407 GNIDBG_MSG(D_NETERROR, rxmsg,
2408 "Bad payload checksum (%x expected %x)",
2409 pload_cksum, rxmsg->gnm_payload_cksum);
2410 switch (*kgnilnd_tunables.kgn_checksum_dump) {
2412 kgnilnd_dump_blob(D_BUFFS, "bad payload checksum",
2413 &rxmsg[1], rxmsg->gnm_payload_len);
2416 libcfs_debug_dumplog();
2422 /* checksum problems are fatal, kill the conn */
2423 kgnilnd_consume_rx(rx);
2424 kgnilnd_close_conn(conn, rc);
2429 lnet_copy_flat2kiov(
2431 *kgnilnd_tunables.kgn_max_immediate,
2432 &rxmsg[1], 0, mlen);
2434 kgnilnd_consume_rx(rx);
2435 lnet_finalize(lntmsg, 0);
2438 case GNILND_MSG_PUT_REQ:
2439 /* LNET wants to truncate or drop transaction, sending NAK */
2441 kgnilnd_consume_rx(rx);
2442 lnet_finalize(lntmsg, 0);
2444 /* only error if lntmsg == NULL, otherwise we are just
2445 * short circuiting the rdma process of 0 bytes */
2446 kgnilnd_nak_rdma(conn, rxmsg->gnm_type,
2447 lntmsg == NULL ? -ENOENT : 0,
2448 rxmsg->gnm_u.get.gngm_cookie,
2449 lnet_nid_to_nid4(&ni->ni_nid));
2452 /* sending ACK with sink buff. info */
2453 tx = kgnilnd_new_tx_msg(GNILND_MSG_PUT_ACK,
2454 lnet_nid_to_nid4(&ni->ni_nid));
2456 kgnilnd_consume_rx(rx);
2460 rc = kgnilnd_set_tx_id(tx, conn);
2462 GOTO(nak_put_req, rc);
2465 rc = kgnilnd_setup_rdma_buffer(tx, niov,
2466 kiov, offset, mlen);
2468 GOTO(nak_put_req, rc);
2471 tx->tx_msg.gnm_u.putack.gnpam_src_cookie =
2472 rxmsg->gnm_u.putreq.gnprm_cookie;
2473 tx->tx_msg.gnm_u.putack.gnpam_dst_cookie = tx->tx_id.txe_cookie;
2474 tx->tx_msg.gnm_u.putack.gnpam_desc.gnrd_addr =
2475 (__u64)((unsigned long)tx->tx_buffer);
2476 tx->tx_msg.gnm_u.putack.gnpam_desc.gnrd_nob = mlen;
2478 tx->tx_lntmsg[0] = lntmsg; /* finalize this on RDMA_DONE */
2479 tx->tx_qtime = jiffies;
2480 /* we only queue from kgnilnd_recv - we might get called from other contexts
2481 * and we don't want to block the mutex in those cases */
2483 spin_lock(&tx->tx_conn->gnc_device->gnd_lock);
2484 kgnilnd_tx_add_state_locked(tx, NULL, tx->tx_conn, GNILND_TX_MAPQ, 1);
2485 spin_unlock(&tx->tx_conn->gnc_device->gnd_lock);
2486 kgnilnd_schedule_device(tx->tx_conn->gnc_device);
2488 kgnilnd_consume_rx(rx);
2492 /* make sure we send an error back when the PUT fails */
2493 kgnilnd_nak_rdma(conn, rxmsg->gnm_type, rc,
2494 rxmsg->gnm_u.get.gngm_cookie,
2495 lnet_nid_to_nid4(&ni->ni_nid));
2496 kgnilnd_tx_done(tx, rc);
2497 kgnilnd_consume_rx(rx);
2499 /* return magic LNet network error */
2501 case GNILND_MSG_GET_REQ_REV:
2502 /* LNET wants to truncate or drop transaction, sending NAK */
2504 kgnilnd_consume_rx(rx);
2505 lnet_finalize(lntmsg, 0);
2507 /* only error if lntmsg == NULL, otherwise we are just
2508 * short circuiting the rdma process of 0 bytes */
2509 kgnilnd_nak_rdma(conn, rxmsg->gnm_type,
2510 lntmsg == NULL ? -ENOENT : 0,
2511 rxmsg->gnm_u.get.gngm_cookie,
2512 lnet_nid_to_nid4(&ni->ni_nid));
2515 /* lntmsg can be null when parsing a LNET_GET */
2516 if (lntmsg != NULL) {
2517 /* sending ACK with sink buff. info */
2518 tx = kgnilnd_new_tx_msg(GNILND_MSG_GET_ACK_REV,
2519 lnet_nid_to_nid4(&ni->ni_nid));
2521 kgnilnd_consume_rx(rx);
2525 rc = kgnilnd_set_tx_id(tx, conn);
2527 GOTO(nak_get_req_rev, rc);
2529 rc = kgnilnd_setup_rdma_buffer(tx, niov,
2530 kiov, offset, mlen);
2532 GOTO(nak_get_req_rev, rc);
2534 tx->tx_msg.gnm_u.putack.gnpam_src_cookie =
2535 rxmsg->gnm_u.putreq.gnprm_cookie;
2536 tx->tx_msg.gnm_u.putack.gnpam_dst_cookie = tx->tx_id.txe_cookie;
2537 tx->tx_msg.gnm_u.putack.gnpam_desc.gnrd_addr =
2538 (__u64)((unsigned long)tx->tx_buffer);
2539 tx->tx_msg.gnm_u.putack.gnpam_desc.gnrd_nob = mlen;
2541 tx->tx_lntmsg[0] = lntmsg; /* finalize this on RDMA_DONE */
2543 /* we only queue from kgnilnd_recv - we might get called from other contexts
2544 * and we don't want to block the mutex in those cases */
2546 spin_lock(&tx->tx_conn->gnc_device->gnd_lock);
2547 kgnilnd_tx_add_state_locked(tx, NULL, tx->tx_conn, GNILND_TX_MAPQ, 1);
2548 spin_unlock(&tx->tx_conn->gnc_device->gnd_lock);
2549 kgnilnd_schedule_device(tx->tx_conn->gnc_device);
2552 kgnilnd_nak_rdma(conn, rxmsg->gnm_type,
2554 rxmsg->gnm_u.get.gngm_cookie,
2555 lnet_nid_to_nid4(&ni->ni_nid));
2558 kgnilnd_consume_rx(rx);
2562 /* make sure we send an error back when the GET fails */
2563 kgnilnd_nak_rdma(conn, rxmsg->gnm_type, rc,
2564 rxmsg->gnm_u.get.gngm_cookie,
2565 lnet_nid_to_nid4(&ni->ni_nid));
2566 kgnilnd_tx_done(tx, rc);
2567 kgnilnd_consume_rx(rx);
2569 /* return magic LNet network error */
2573 case GNILND_MSG_PUT_REQ_REV:
2574 /* LNET wants to truncate or drop transaction, sending NAK */
2576 kgnilnd_consume_rx(rx);
2577 lnet_finalize(lntmsg, 0);
2579 /* only error if lntmsg == NULL, otherwise we are just
2580 * short circuiting the rdma process of 0 bytes */
2581 kgnilnd_nak_rdma(conn, rxmsg->gnm_type,
2582 lntmsg == NULL ? -ENOENT : 0,
2583 rxmsg->gnm_u.get.gngm_cookie,
2584 lnet_nid_to_nid4(&ni->ni_nid));
2588 if (lntmsg != NULL) {
2590 kgnilnd_setup_rdma(ni, rx, lntmsg, mlen);
2593 kgnilnd_nak_rdma(conn, rxmsg->gnm_type,
2595 rxmsg->gnm_u.get.gngm_cookie,
2596 lnet_nid_to_nid4(&ni->ni_nid));
2598 kgnilnd_consume_rx(rx);
2600 case GNILND_MSG_GET_REQ:
2601 if (lntmsg != NULL) {
2603 kgnilnd_setup_rdma(ni, rx, lntmsg, mlen);
2606 kgnilnd_nak_rdma(conn, rxmsg->gnm_type,
2608 rxmsg->gnm_u.get.gngm_cookie,
2609 lnet_nid_to_nid4(&ni->ni_nid));
2611 kgnilnd_consume_rx(rx);
2617 /* needs write_lock on kgn_peer_conn_lock held */
2619 kgnilnd_check_conn_timeouts_locked(kgn_conn_t *conn)
2621 unsigned long timeout, keepalive;
2622 unsigned long now = jiffies;
2623 unsigned long newest_last_rx;
2626 /* given that we found this conn hanging off a peer, it better damned
2627 * well be connected */
2628 LASSERTF(conn->gnc_state == GNILND_CONN_ESTABLISHED,
2629 "conn 0x%p->%s with bad state%s\n", conn,
2630 conn->gnc_peer ? libcfs_nid2str(conn->gnc_peer->gnp_nid)
2632 kgnilnd_conn_state2str(conn));
2634 CDEBUG(D_NET, "checking conn %p->%s timeout %d keepalive %d "
2635 "rx_diff %lu tx_diff %lu\n",
2636 conn, libcfs_nid2str(conn->gnc_peer->gnp_nid),
2637 conn->gnc_timeout, GNILND_TO2KA(conn->gnc_timeout),
2638 cfs_duration_sec(now - conn->gnc_last_rx_cq),
2639 cfs_duration_sec(now - conn->gnc_last_tx));
2641 timeout = cfs_time_seconds(conn->gnc_timeout);
2642 keepalive = cfs_time_seconds(GNILND_TO2KA(conn->gnc_timeout));
2644 /* just in case our lack of RX msg processing is gumming up the works - give the
2645 * remove an extra chance */
2647 newest_last_rx = GNILND_LASTRX(conn);
2649 if (time_after_eq(now, newest_last_rx + timeout)) {
2650 uint32_t level = D_CONSOLE|D_NETERROR;
2652 if (conn->gnc_peer->gnp_state == GNILND_PEER_DOWN) {
2655 GNIDBG_CONN(level, conn,
2656 "No gnilnd traffic received from %s for %lu "
2657 "seconds, terminating connection. Is node down? ",
2658 libcfs_nid2str(conn->gnc_peer->gnp_nid),
2659 cfs_duration_sec(now - newest_last_rx));
2663 /* we don't timeout on last_tx stalls - we are going to trust the
2664 * underlying network to let us know when sends are failing.
2665 * At worst, the peer will timeout our RX stamp and drop the connection
2666 * at that point. We'll then see his CLOSE or at worst his RX
2667 * stamp stop and drop the connection on our end */
2669 if (time_after_eq(now, conn->gnc_last_tx + keepalive)) {
2670 CDEBUG(D_NET, "sending NOOP -> %s (%p idle %lu(%lu)) "
2671 "last %lu/%lu/%lu %lus/%lus/%lus\n",
2672 libcfs_nid2str(conn->gnc_peer->gnp_nid), conn,
2673 cfs_duration_sec(jiffies - conn->gnc_last_tx),
2675 conn->gnc_last_noop_want, conn->gnc_last_noop_sent,
2676 conn->gnc_last_noop_cq,
2677 cfs_duration_sec(jiffies - conn->gnc_last_noop_want),
2678 cfs_duration_sec(jiffies - conn->gnc_last_noop_sent),
2679 cfs_duration_sec(jiffies - conn->gnc_last_noop_cq));
2680 set_mb(conn->gnc_last_noop_want, jiffies);
2681 atomic_inc(&conn->gnc_reaper_noop);
2682 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_NOOP_SEND))
2685 tx = kgnilnd_new_tx_msg(GNILND_MSG_NOOP,
2686 lnet_nid_to_nid4(&conn->gnc_peer->gnp_net->gnn_ni->ni_nid));
2689 kgnilnd_queue_tx(conn, tx);
2695 /* needs write_lock on kgn_peer_conn_lock held */
2697 kgnilnd_check_peer_timeouts_locked(kgn_peer_t *peer, struct list_head *todie,
2698 struct list_head *souls)
2700 unsigned long timeout;
2701 kgn_conn_t *conn, *connN = NULL;
2707 short releaseconn = 0;
2708 unsigned long first_rx = 0;
2709 int purgatory_conn_cnt = 0;
2711 CDEBUG(D_NET, "checking peer 0x%p->%s for timeouts; interval %lus\n",
2712 peer, libcfs_nid2str(peer->gnp_nid),
2713 peer->gnp_reconnect_interval);
2715 timeout = cfs_time_seconds(max(*kgnilnd_tunables.kgn_timeout,
2716 GNILND_MIN_TIMEOUT));
2718 conn = kgnilnd_find_conn_locked(peer);
2720 /* if there is a valid conn, check the queues for timeouts */
2721 rc = kgnilnd_check_conn_timeouts_locked(conn);
2723 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_RX_CLOSE_CLOSING)) {
2724 /* simulate a RX CLOSE after the timeout but before
2725 * the scheduler thread gets it */
2726 conn->gnc_close_recvd = GNILND_CLOSE_INJECT1;
2727 conn->gnc_peer_error = -ETIMEDOUT;
2730 if (*kgnilnd_tunables.kgn_to_reconn_disable &&
2732 peer->gnp_state = GNILND_PEER_TIMED_OUT;
2733 CDEBUG(D_WARNING, "%s conn timed out, will "
2734 "reconnect upon request from peer\n",
2735 libcfs_nid2str(conn->gnc_peer->gnp_nid));
2737 /* Once we mark closed, any of the scheduler threads could
2738 * get it and move through before we hit the fail loc code */
2739 kgnilnd_close_conn_locked(conn, rc);
2741 /* first_rx is used to decide when to release a conn from purgatory.
2743 first_rx = conn->gnc_first_rx;
2747 /* now regardless of starting new conn, find tx on peer queue that
2748 * are old and smell bad - do this first so we don't trigger
2749 * reconnect on empty queue if we timeout all */
2750 list_for_each_entry_safe(tx, txN, &peer->gnp_tx_queue, tx_list) {
2751 if (time_after_eq(jiffies, tx->tx_qtime + timeout)) {
2753 LCONSOLE_INFO("could not send to %s due to connection"
2754 " setup failure after %lu seconds\n",
2755 libcfs_nid2str(peer->gnp_nid),
2756 cfs_duration_sec(jiffies - tx->tx_qtime));
2758 kgnilnd_tx_del_state_locked(tx, peer, NULL,
2760 list_add_tail(&tx->tx_list, todie);
2765 if (count || peer->gnp_connecting == GNILND_PEER_KILL) {
2766 CDEBUG(D_NET, "canceling %d tx for peer 0x%p->%s\n",
2767 count, peer, libcfs_nid2str(peer->gnp_nid));
2768 /* if we nuked all the TX, stop peer connection attempt (if there is one..) */
2769 if (list_empty(&peer->gnp_tx_queue) ||
2770 peer->gnp_connecting == GNILND_PEER_KILL) {
2771 /* we pass down todie to use a common function - but we know there are
2773 kgnilnd_cancel_peer_connect_locked(peer, todie);
2777 /* Don't reconnect if we are still trying to clear out old conns.
2778 * This prevents us sending traffic on the new mbox before ensuring we are done
2779 * with the old one */
2780 reconnect = (peer->gnp_state == GNILND_PEER_UP) &&
2781 (atomic_read(&peer->gnp_dirty_eps) == 0);
2783 /* fast reconnect after a timeout */
2784 to_reconn = !conn &&
2785 (peer->gnp_last_errno == -ETIMEDOUT) &&
2786 *kgnilnd_tunables.kgn_fast_reconn;
2788 /* if we are not connected and there are tx on the gnp_tx_queue waiting
2789 * to be sent, we'll check the reconnect interval and fire up a new
2790 * connection request */
2793 (peer->gnp_connecting == GNILND_PEER_IDLE) &&
2794 (time_after_eq(jiffies, peer->gnp_reconnect_time)) &&
2795 (!list_empty(&peer->gnp_tx_queue) || to_reconn)) {
2797 CDEBUG(D_NET, "starting connect to %s\n",
2798 libcfs_nid2str(peer->gnp_nid));
2799 LASSERTF(peer->gnp_connecting == GNILND_PEER_IDLE,
2800 "Peer was idle and we have a write_lock, state issue %d\n",
2801 peer->gnp_connecting);
2803 peer->gnp_connecting = GNILND_PEER_CONNECT;
2804 kgnilnd_peer_addref(peer); /* extra ref for connd */
2806 spin_lock(&peer->gnp_net->gnn_dev->gnd_connd_lock);
2807 list_add_tail(&peer->gnp_connd_list,
2808 &peer->gnp_net->gnn_dev->gnd_connd_peers);
2809 spin_unlock(&peer->gnp_net->gnn_dev->gnd_connd_lock);
2811 kgnilnd_schedule_dgram(peer->gnp_net->gnn_dev);
2814 /* fail_loc to allow us to delay release of purgatory */
2815 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_PURG_REL_DELAY))
2818 /* This check allows us to verify that the new conn is actually being used. This allows us to
2819 * pull the old conns out of purgatory if they have actually seen traffic.
2820 * We only release a conn from purgatory during stack reset, admin command, or when a peer reconnects
2823 time_after(jiffies, first_rx + cfs_time_seconds(*kgnilnd_tunables.kgn_hardware_timeout))) {
2824 CDEBUG(D_INFO, "We can release peer %s conn's from purgatory %lu\n",
2825 libcfs_nid2str(peer->gnp_nid), first_rx + cfs_time_seconds(*kgnilnd_tunables.kgn_hardware_timeout));
2829 list_for_each_entry_safe (conn, connN, &peer->gnp_conns, gnc_list) {
2830 /* check for purgatory timeouts */
2831 if (conn->gnc_in_purgatory) {
2832 /* We cannot detach this conn from purgatory if it has not been closed so we reschedule it
2833 * that way the next time we check it we can detach it from purgatory
2836 if (conn->gnc_state != GNILND_CONN_DONE) {
2837 /* Skip over conns that are currently not DONE. If they arent already scheduled
2838 * for completion something in the state machine is broken.
2843 /* We only detach a conn that is in purgatory if we have received a close message,
2844 * we have a new valid connection that has successfully received data, or an admin
2845 * command tells us we need to detach.
2848 if (conn->gnc_close_recvd || releaseconn || conn->gnc_needs_detach) {
2849 unsigned long waiting;
2851 waiting = (long) jiffies - conn->gnc_last_rx_cq;
2853 /* C.E: The remote peer is expected to close the
2854 * connection (see kgnilnd_check_conn_timeouts)
2855 * via the reaper thread and nuke out the MDD and
2856 * FMA resources after conn->gnc_timeout has expired
2857 * without an FMA RX */
2858 CDEBUG(D_NET, "Reconnected to %s in %lds or admin forced detach, dropping "
2859 " held resources\n",
2860 libcfs_nid2str(conn->gnc_peer->gnp_nid),
2861 cfs_duration_sec(waiting));
2863 kgnilnd_detach_purgatory_locked(conn, souls);
2865 purgatory_conn_cnt++;
2870 /* If we have too many connections in purgatory we could run out of
2871 * resources. Limit the number of connections to a tunable number,
2872 * clean up to the minimum all in one fell swoop... there are
2873 * situations where dvs will retry tx's and we can eat up several
2874 * hundread connection requests at once.
2876 if (purgatory_conn_cnt > *kgnilnd_tunables.kgn_max_purgatory) {
2877 list_for_each_entry_safe(conn, connN, &peer->gnp_conns,
2879 if (conn->gnc_in_purgatory &&
2880 conn->gnc_state == GNILND_CONN_DONE) {
2881 CDEBUG(D_NET, "Dropping Held resource due to"
2882 " resource limits being hit\n");
2883 kgnilnd_detach_purgatory_locked(conn, souls);
2885 if (purgatory_conn_cnt-- <
2886 *kgnilnd_tunables.kgn_max_purgatory)
2894 kgnilnd_reaper_check(int idx)
2896 struct list_head *peers = &kgnilnd_data.kgn_peers[idx];
2897 struct list_head *ctmp, *ctmpN;
2898 LIST_HEAD(geriatrics);
2901 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
2903 list_for_each_safe(ctmp, ctmpN, peers) {
2904 kgn_peer_t *peer = NULL;
2906 /* don't timeout stuff if the network is mucked or shutting down */
2907 if (kgnilnd_check_hw_quiesce()) {
2910 peer = list_entry(ctmp, kgn_peer_t, gnp_list);
2912 kgnilnd_check_peer_timeouts_locked(peer, &geriatrics, &souls);
2915 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
2917 kgnilnd_txlist_done(&geriatrics, -EHOSTUNREACH);
2918 kgnilnd_release_purgatory_list(&souls);
2922 kgnilnd_update_reaper_timeout(long timeout)
2924 LASSERT(timeout > 0);
2926 spin_lock(&kgnilnd_data.kgn_reaper_lock);
2928 if (timeout < kgnilnd_data.kgn_new_min_timeout)
2929 kgnilnd_data.kgn_new_min_timeout = timeout;
2931 spin_unlock(&kgnilnd_data.kgn_reaper_lock);
2935 kgnilnd_reaper_poke_with_stick(cfs_timer_cb_arg_t arg)
2937 wake_up(&kgnilnd_data.kgn_reaper_waitq);
2941 kgnilnd_reaper(void *arg)
2946 unsigned long next_check_time = jiffies;
2947 long current_min_timeout = MAX_SCHEDULE_TIMEOUT;
2948 struct timer_list timer;
2951 /* all gnilnd threads need to run fairly urgently */
2952 set_user_nice(current, *kgnilnd_tunables.kgn_nice);
2953 spin_lock(&kgnilnd_data.kgn_reaper_lock);
2955 while (!kgnilnd_data.kgn_shutdown) {
2956 /* I wake up every 'p' seconds to check for timeouts on some
2957 * more peers. I try to check every connection 'n' times
2958 * within the global minimum of all keepalive and timeout
2959 * intervals, to ensure I attend to every connection within
2960 * (n+1)/n times its timeout intervals. */
2961 const int p = GNILND_REAPER_THREAD_WAKE;
2962 const int n = GNILND_REAPER_NCHECKS;
2964 /* to quiesce or to not quiesce, that is the question */
2965 if (unlikely(kgnilnd_data.kgn_quiesce_trigger)) {
2966 spin_unlock(&kgnilnd_data.kgn_reaper_lock);
2967 KGNILND_SPIN_QUIESCE;
2968 spin_lock(&kgnilnd_data.kgn_reaper_lock);
2971 /* careful with the jiffy wrap... */
2972 timeout = (long)(next_check_time - jiffies);
2975 prepare_to_wait(&kgnilnd_data.kgn_reaper_waitq, &wait,
2976 TASK_INTERRUPTIBLE);
2977 spin_unlock(&kgnilnd_data.kgn_reaper_lock);
2978 cfs_timer_setup(&timer, kgnilnd_reaper_poke_with_stick,
2979 next_check_time, 0);
2980 mod_timer(&timer, (long) jiffies + timeout);
2982 /* check flag variables before committing */
2983 if (!kgnilnd_data.kgn_shutdown &&
2984 !kgnilnd_data.kgn_quiesce_trigger) {
2985 CDEBUG(D_INFO, "schedule timeout %ld (%lu sec)\n",
2986 timeout, cfs_duration_sec(timeout));
2988 CDEBUG(D_INFO, "awake after schedule\n");
2991 del_singleshot_timer_sync(&timer);
2992 spin_lock(&kgnilnd_data.kgn_reaper_lock);
2993 finish_wait(&kgnilnd_data.kgn_reaper_waitq, &wait);
2997 /* new_min_timeout is set from the conn timeouts and keepalive
2998 * this should end up with a min timeout of
2999 * GNILND_TIMEOUT2KEEPALIVE(t) or roughly LND_TIMEOUT/2 */
3000 if (kgnilnd_data.kgn_new_min_timeout < current_min_timeout) {
3001 current_min_timeout = kgnilnd_data.kgn_new_min_timeout;
3002 CDEBUG(D_NET, "Set new min timeout %ld\n",
3003 current_min_timeout);
3006 spin_unlock(&kgnilnd_data.kgn_reaper_lock);
3008 /* Compute how many table entries to check now so I get round
3009 * the whole table fast enough given that I do this at fixed
3010 * intervals of 'p' seconds) */
3011 chunk = *kgnilnd_tunables.kgn_peer_hash_size;
3012 if (kgnilnd_data.kgn_new_min_timeout > n * p)
3013 chunk = (chunk * n * p) /
3014 kgnilnd_data.kgn_new_min_timeout;
3017 for (i = 0; i < chunk; i++) {
3018 kgnilnd_reaper_check(hash_index);
3019 hash_index = (hash_index + 1) %
3020 *kgnilnd_tunables.kgn_peer_hash_size;
3022 next_check_time = (long) jiffies + cfs_time_seconds(p);
3023 CDEBUG(D_INFO, "next check at %lu or in %d sec\n", next_check_time, p);
3025 spin_lock(&kgnilnd_data.kgn_reaper_lock);
3028 spin_unlock(&kgnilnd_data.kgn_reaper_lock);
3030 kgnilnd_thread_fini();
3035 kgnilnd_recv_bte_get(kgn_tx_t *tx) {
3036 unsigned niov, offset, nob;
3037 struct bio_vec *kiov;
3038 struct lnet_msg *lntmsg = tx->tx_lntmsg[0];
3039 kgnilnd_parse_lnet_rdma(lntmsg, &niov, &offset, &nob, &kiov, tx->tx_nob_rdma);
3042 lnet_copy_flat2kiov(
3045 tx->tx_buffer_copy + tx->tx_offset, 0, nob);
3047 memcpy(tx->tx_buffer, tx->tx_buffer_copy + tx->tx_offset, nob);
3054 kgnilnd_check_rdma_cq(kgn_device_t *dev)
3057 gni_post_descriptor_t *desc;
3059 kgn_tx_ev_id_t ev_id;
3061 int should_retry, rc;
3062 long num_processed = 0;
3063 kgn_conn_t *conn = NULL;
3064 kgn_tx_t *tx = NULL;
3065 kgn_rdma_desc_t *rdesc;
3070 /* make sure we don't keep looping if we need to reset */
3071 if (unlikely(kgnilnd_data.kgn_quiesce_trigger)) {
3072 return num_processed;
3074 rc = kgnilnd_mutex_trylock(&dev->gnd_cq_mutex);
3076 /* we didn't get the mutex, so return that there is still work
3080 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_DELAY_RDMA)) {
3081 /* a bit gross - but we need a good way to test for
3082 * delayed RDMA completions and the easiest way to do
3083 * that is to delay the RDMA CQ events */
3084 rrc = GNI_RC_NOT_DONE;
3086 rrc = kgnilnd_cq_get_event(dev->gnd_snd_rdma_cqh, &event_data);
3089 if (rrc == GNI_RC_NOT_DONE) {
3090 kgnilnd_gl_mutex_unlock(&dev->gnd_cq_mutex);
3091 CDEBUG(D_INFO, "SEND RDMA CQ %d empty processed %ld\n",
3092 dev->gnd_id, num_processed);
3093 return num_processed;
3095 dev->gnd_sched_alive = jiffies;
3098 LASSERTF(!GNI_CQ_OVERRUN(event_data),
3099 "this is bad, somehow our credits didn't protect us"
3100 " from CQ overrun\n");
3101 LASSERTF(GNI_CQ_GET_TYPE(event_data) == GNI_CQ_EVENT_TYPE_POST,
3102 "rrc %d, GNI_CQ_GET_TYPE(%#llx) = %#llx\n", rrc,
3103 event_data, GNI_CQ_GET_TYPE(event_data));
3105 rrc = kgnilnd_get_completed(dev->gnd_snd_rdma_cqh, event_data,
3107 kgnilnd_gl_mutex_unlock(&dev->gnd_cq_mutex);
3109 /* XXX Nic: Need better error handling here... */
3110 LASSERTF((rrc == GNI_RC_SUCCESS) ||
3111 (rrc == GNI_RC_TRANSACTION_ERROR),
3114 ev_id.txe_cookie = desc->post_id;
3116 kgnilnd_validate_tx_ev_id(&ev_id, &tx, &conn);
3118 if (conn == NULL || tx == NULL) {
3119 /* either conn or tx was already nuked and this is a "late"
3120 * completion, so drop it */
3124 GNITX_ASSERTF(tx, tx->tx_msg.gnm_type == GNILND_MSG_PUT_DONE ||
3125 tx->tx_msg.gnm_type == GNILND_MSG_GET_DONE ||
3126 tx->tx_msg.gnm_type == GNILND_MSG_PUT_DONE_REV ||
3127 tx->tx_msg.gnm_type == GNILND_MSG_GET_DONE_REV,
3128 "tx %p with type %d\n", tx, tx->tx_msg.gnm_type);
3130 GNIDBG_TX(D_NET, tx, "RDMA completion for %d bytes", tx->tx_nob);
3132 if (tx->tx_msg.gnm_type == GNILND_MSG_GET_DONE_REV) {
3133 lnet_set_reply_msg_len(NULL, tx->tx_lntmsg[1],
3134 tx->tx_msg.gnm_u.completion.gncm_retval);
3138 if (tx->tx_msg.gnm_type == GNILND_MSG_GET_DONE_REV && desc->status == GNI_RC_SUCCESS) {
3139 if (tx->tx_buffer_copy != NULL)
3140 kgnilnd_recv_bte_get(tx);
3141 rc = kgnilnd_verify_rdma_cksum(tx, tx->tx_putinfo.gnpam_payload_cksum, tx->tx_nob_rdma);
3144 if (tx->tx_msg.gnm_type == GNILND_MSG_PUT_DONE_REV && desc->status == GNI_RC_SUCCESS) {
3145 if (tx->tx_buffer_copy != NULL)
3146 kgnilnd_recv_bte_get(tx);
3147 rc = kgnilnd_verify_rdma_cksum(tx, tx->tx_getinfo.gngm_payload_cksum, tx->tx_nob_rdma);
3150 /* remove from rdmaq */
3151 kgnilnd_conn_mutex_lock(&conn->gnc_rdma_mutex);
3152 spin_lock(&conn->gnc_list_lock);
3153 kgnilnd_tx_del_state_locked(tx, NULL, conn, GNILND_TX_ALLOCD);
3154 spin_unlock(&conn->gnc_list_lock);
3155 kgnilnd_conn_mutex_unlock(&conn->gnc_rdma_mutex);
3157 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_RDMA_CQ_ERROR)) {
3158 event_data = 1LL << 48;
3162 if (likely(desc->status == GNI_RC_SUCCESS) && rc == 0) {
3163 atomic_inc(&dev->gnd_rdma_ntx);
3164 atomic64_add(tx->tx_nob, &dev->gnd_rdma_txbytes);
3165 /* transaction succeeded, add into fmaq */
3166 kgnilnd_queue_tx(conn, tx);
3167 kgnilnd_peer_alive(conn->gnc_peer);
3169 /* drop ref from kgnilnd_validate_tx_ev_id */
3170 kgnilnd_admin_decref(conn->gnc_tx_in_use);
3171 kgnilnd_conn_decref(conn);
3176 /* fall through to the TRANSACTION_ERROR case */
3179 /* get stringified version for log messages */
3180 kgnilnd_cq_error_str(event_data, &err_str, 256);
3181 kgnilnd_cq_error_recoverable(event_data, &should_retry);
3183 /* make sure we are not off in the weeds with this tx */
3184 if (tx->tx_retrans >
3185 *kgnilnd_tunables.kgn_max_retransmits) {
3186 GNIDBG_TX(D_NETERROR, tx,
3187 "giving up on TX, too many retries", NULL);
3191 GNIDBG_TX(D_NETERROR, tx, "RDMA %s error (%s)",
3192 should_retry ? "transient" : "unrecoverable", err_str);
3194 if (tx->tx_msg.gnm_type == GNILND_MSG_PUT_DONE ||
3195 tx->tx_msg.gnm_type == GNILND_MSG_GET_DONE_REV) {
3196 rdesc = &tx->tx_putinfo.gnpam_desc;
3197 rnob = tx->tx_putinfo.gnpam_desc.gnrd_nob;
3198 rcookie = tx->tx_putinfo.gnpam_dst_cookie;
3200 rdesc = &tx->tx_getinfo.gngm_desc;
3201 rnob = tx->tx_lntmsg[0]->msg_len;
3202 rcookie = tx->tx_getinfo.gngm_cookie;
3207 tx->tx_msg.gnm_type,
3211 kgnilnd_nak_rdma(conn,
3212 tx->tx_msg.gnm_type,
3215 tx->tx_msg.gnm_srcnid);
3216 kgnilnd_tx_done(tx, -GNILND_NOPURG);
3217 kgnilnd_close_conn(conn, -ECOMM);
3220 /* drop ref from kgnilnd_validate_tx_ev_id */
3221 kgnilnd_admin_decref(conn->gnc_tx_in_use);
3222 kgnilnd_conn_decref(conn);
3227 kgnilnd_check_fma_send_cq(kgn_device_t *dev)
3231 kgn_tx_ev_id_t ev_id;
3232 kgn_tx_t *tx = NULL;
3233 kgn_conn_t *conn = NULL;
3234 int queued_fma, saw_reply, rc;
3235 long num_processed = 0;
3236 struct list_head *ctmp, *ctmpN;
3239 /* make sure we don't keep looping if we need to reset */
3240 if (unlikely(kgnilnd_data.kgn_quiesce_trigger)) {
3241 return num_processed;
3244 rc = kgnilnd_mutex_trylock(&dev->gnd_cq_mutex);
3246 /* we didn't get the mutex, so return that there is still work
3251 rrc = kgnilnd_cq_get_event(dev->gnd_snd_fma_cqh, &event_data);
3252 kgnilnd_gl_mutex_unlock(&dev->gnd_cq_mutex);
3254 if (rrc == GNI_RC_NOT_DONE) {
3256 "SMSG send CQ %d not ready (data %#llx) "
3257 "processed %ld\n", dev->gnd_id, event_data,
3260 if (num_processed > 0) {
3261 spin_lock(&dev->gnd_lock);
3262 if (!list_empty(&dev->gnd_delay_conns)) {
3263 list_for_each_safe(ctmp, ctmpN, &dev->gnd_delay_conns) {
3264 conn = list_entry(ctmp, kgn_conn_t, gnc_delaylist);
3265 list_del_init(&conn->gnc_delaylist);
3266 CDEBUG(D_NET, "Moving Conn %p from delay queue to ready_queue\n", conn);
3267 kgnilnd_schedule_conn_nolock(conn);
3269 spin_unlock(&dev->gnd_lock);
3270 kgnilnd_schedule_device(dev);
3272 spin_unlock(&dev->gnd_lock);
3275 return num_processed;
3278 dev->gnd_sched_alive = jiffies;
3281 LASSERTF(!GNI_CQ_OVERRUN(event_data),
3282 "this is bad, somehow our credits didn't "
3283 "protect us from CQ overrun\n");
3284 LASSERTF(GNI_CQ_GET_TYPE(event_data) == GNI_CQ_EVENT_TYPE_SMSG,
3285 "rrc %d, GNI_CQ_GET_TYPE(%#llx) = %#llx\n", rrc,
3286 event_data, GNI_CQ_GET_TYPE(event_data));
3288 /* if SMSG couldn't handle an error, time for conn to die */
3289 if (unlikely(rrc == GNI_RC_TRANSACTION_ERROR)) {
3292 /* need to take the write_lock to ensure atomicity
3293 * on the conn state if we need to close it */
3294 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
3295 conn = kgnilnd_cqid2conn_locked(GNI_CQ_GET_INST_ID(event_data));
3297 /* Conn was destroyed? */
3299 "SMSG CQID lookup %#llx failed\n",
3300 GNI_CQ_GET_INST_ID(event_data));
3301 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
3305 kgnilnd_cq_error_str(event_data, &err_str, 256);
3306 CNETERR("SMSG send error to %s: rc %d (%s)\n",
3307 libcfs_nid2str(conn->gnc_peer->gnp_nid),
3309 kgnilnd_close_conn_locked(conn, -ECOMM);
3311 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
3313 /* no need to process rest of this tx -
3314 * it is getting canceled */
3318 /* fall through to GNI_RC_SUCCESS case */
3319 ev_id.txe_smsg_id = GNI_CQ_GET_MSG_ID(event_data);
3321 kgnilnd_validate_tx_ev_id(&ev_id, &tx, &conn);
3322 if (conn == NULL || tx == NULL) {
3323 /* either conn or tx was already nuked and this is a "late"
3324 * completion, so drop it */
3328 tx->tx_conn->gnc_last_tx_cq = jiffies;
3329 if (tx->tx_msg.gnm_type == GNILND_MSG_NOOP) {
3330 set_mb(conn->gnc_last_noop_cq, jiffies);
3333 /* lock tx_list_state and tx_state */
3334 kgnilnd_conn_mutex_lock(&conn->gnc_smsg_mutex);
3335 spin_lock(&tx->tx_conn->gnc_list_lock);
3337 GNITX_ASSERTF(tx, tx->tx_list_state == GNILND_TX_LIVE_FMAQ,
3338 "state not GNILND_TX_LIVE_FMAQ", NULL);
3339 GNITX_ASSERTF(tx, tx->tx_state & GNILND_TX_WAITING_COMPLETION,
3340 "not waiting for completion", NULL);
3342 GNIDBG_TX(D_NET, tx, "SMSG complete tx_state %x rc %d",
3345 tx->tx_state &= ~GNILND_TX_WAITING_COMPLETION;
3347 /* This will trigger other FMA sends that were
3348 * pending this completion */
3349 queued_fma = !list_empty(&tx->tx_conn->gnc_fmaq);
3351 /* we either did not expect reply or we already got it */
3352 saw_reply = !(tx->tx_state & GNILND_TX_WAITING_REPLY);
3354 spin_unlock(&tx->tx_conn->gnc_list_lock);
3355 kgnilnd_conn_mutex_unlock(&conn->gnc_smsg_mutex);
3358 CDEBUG(D_NET, "scheduling conn 0x%p->%s for fmaq\n",
3360 libcfs_nid2str(conn->gnc_peer->gnp_nid));
3361 kgnilnd_schedule_conn(conn);
3364 /* If saw_reply is false as soon as gnc_list_lock is dropped the tx could be nuked
3365 * If saw_reply is true we know that the tx is safe to use as the other thread
3366 * is already finished with it.
3370 /* no longer need to track on the live_fmaq */
3371 kgnilnd_tx_del_state_locked(tx, NULL, tx->tx_conn, GNILND_TX_ALLOCD);
3373 if (tx->tx_state & GNILND_TX_PENDING_RDMA) {
3374 /* we already got reply & were waiting for
3375 * completion of initial send */
3376 /* to initiate RDMA transaction */
3377 GNIDBG_TX(D_NET, tx,
3378 "Pending RDMA 0x%p type 0x%02x",
3379 tx->tx_msg.gnm_type);
3380 tx->tx_state &= ~GNILND_TX_PENDING_RDMA;
3381 rc = kgnilnd_send_mapped_tx(tx, 0);
3382 GNITX_ASSERTF(tx, rc == 0, "RDMA send failed: %d\n", rc);
3384 /* we are done with this tx */
3385 GNIDBG_TX(D_NET, tx,
3386 "Done with tx type 0x%02x",
3387 tx->tx_msg.gnm_type);
3388 kgnilnd_tx_done(tx, tx->tx_rc);
3392 /* drop ref from kgnilnd_validate_tx_ev_id */
3393 kgnilnd_admin_decref(conn->gnc_tx_in_use);
3394 kgnilnd_conn_decref(conn);
3396 /* if we are waiting for a REPLY, we'll handle the tx then */
3397 } /* end for loop */
3401 kgnilnd_check_fma_rcv_cq(kgn_device_t *dev)
3406 long num_processed = 0;
3407 struct list_head *conns;
3408 struct list_head *tmp;
3412 /* make sure we don't keep looping if we need to reset */
3413 if (unlikely(kgnilnd_data.kgn_quiesce_trigger)) {
3414 return num_processed;
3417 rc = kgnilnd_mutex_trylock(&dev->gnd_cq_mutex);
3419 /* we didn't get the mutex, so return that there is still work
3423 rrc = kgnilnd_cq_get_event(dev->gnd_rcv_fma_cqh, &event_data);
3424 kgnilnd_gl_mutex_unlock(&dev->gnd_cq_mutex);
3426 if (rrc == GNI_RC_NOT_DONE) {
3427 CDEBUG(D_INFO, "SMSG RX CQ %d empty data %#llx "
3429 dev->gnd_id, event_data, num_processed);
3430 return num_processed;
3432 dev->gnd_sched_alive = jiffies;
3435 /* this is the only CQ that can really handle transient
3437 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_CQ_GET_EVENT)) {
3438 rrc = cfs_fail_val ? cfs_fail_val
3439 : GNI_RC_ERROR_RESOURCE;
3440 if (rrc == GNI_RC_ERROR_RESOURCE) {
3441 /* set overrun too */
3442 event_data |= (1UL << 63);
3443 LASSERTF(GNI_CQ_OVERRUN(event_data),
3444 "(1UL << 63) is no longer the bit to set to indicate CQ_OVERRUN\n");
3447 /* sender should get error event too and take care
3448 of failed transaction by re-transmitting */
3449 if (rrc == GNI_RC_TRANSACTION_ERROR) {
3450 CDEBUG(D_NET, "SMSG RX CQ error %#llx\n", event_data);
3454 if (likely(!GNI_CQ_OVERRUN(event_data))) {
3455 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
3456 conn = kgnilnd_cqid2conn_locked(
3457 GNI_CQ_GET_INST_ID(event_data));
3459 CDEBUG(D_NET, "SMSG RX CQID lookup %llu "
3460 "failed, dropping event %#llx\n",
3461 GNI_CQ_GET_INST_ID(event_data),
3464 CDEBUG(D_NET, "SMSG RX: CQID %llu "
3466 GNI_CQ_GET_INST_ID(event_data),
3467 conn, conn->gnc_peer ?
3468 libcfs_nid2str(conn->gnc_peer->gnp_nid) :
3471 conn->gnc_last_rx_cq = jiffies;
3473 /* stash first rx so we can clear out purgatory.
3475 if (conn->gnc_first_rx == 0) {
3476 conn->gnc_first_rx = jiffies;
3478 kgnilnd_peer_alive(conn->gnc_peer);
3479 kgnilnd_schedule_conn(conn);
3481 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
3485 /* FMA CQ has overflowed: check ALL conns */
3486 CNETERR("SMSG RX CQ overflow: scheduling ALL "
3487 "conns on device %d\n", dev->gnd_id);
3489 for (rc = 0; rc < *kgnilnd_tunables.kgn_peer_hash_size; rc++) {
3491 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
3492 conns = &kgnilnd_data.kgn_conns[rc];
3494 list_for_each(tmp, conns) {
3495 conn = list_entry(tmp, kgn_conn_t,
3498 if (conn->gnc_device == dev) {
3499 kgnilnd_schedule_conn(conn);
3500 conn->gnc_last_rx_cq = jiffies;
3504 /* don't block write lockers for too long... */
3505 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
3510 /* try_map_if_full should only be used when processing TX from list of
3511 * backlog TX waiting on mappings to free up
3514 * try_map_if_full = 0: 0 (sent or queued), (-|+)errno failure of kgnilnd_sendmsg
3515 * try_map_if_full = 1: 0 (sent), -ENOMEM for caller to requeue, (-|+)errno failure of kgnilnd_sendmsg */
3518 kgnilnd_send_mapped_tx(kgn_tx_t *tx, int try_map_if_full)
3520 /* slight bit of race if multiple people calling, but at worst we'll have
3521 * order altered just a bit... which would not be determenistic anyways */
3522 int rc = atomic_read(&tx->tx_conn->gnc_device->gnd_nq_map);
3524 GNIDBG_TX(D_NET, tx, "try %d nq_map %d", try_map_if_full, rc);
3526 /* We know that we have a GART reservation that should guarantee forward progress.
3527 * This means we don't need to take any extraordinary efforts if we are failing
3528 * mappings here - even if we are holding a very small number of these. */
3530 if (try_map_if_full || (rc == 0)) {
3531 rc = kgnilnd_map_buffer(tx);
3534 /* rc should be 0 if we mapped successfully here, if non-zero
3535 * we are queueing */
3537 /* if try_map_if_full set, they handle requeuing */
3538 if (unlikely(try_map_if_full)) {
3541 spin_lock(&tx->tx_conn->gnc_device->gnd_lock);
3542 kgnilnd_tx_add_state_locked(tx, NULL, tx->tx_conn, GNILND_TX_MAPQ, 1);
3543 spin_unlock(&tx->tx_conn->gnc_device->gnd_lock);
3544 /* make sure we wake up sched to run this */
3545 kgnilnd_schedule_device(tx->tx_conn->gnc_device);
3546 /* return 0 as this is now queued for later sending */
3551 switch (tx->tx_msg.gnm_type) {
3555 /* GET_REQ and PUT_ACK are outbound messages sending our mapping key to
3556 * remote node where the RDMA will be started
3557 * Special case -EAGAIN logic - this should just queued as if the mapping couldn't
3558 * be satisified. The rest of the errors are "hard" errors that require
3559 * upper layers to handle themselves.
3560 * If kgnilnd_post_rdma returns a resource error, kgnilnd_rdma will put
3561 * the tx back on the TX_MAPQ. When this tx is pulled back off the MAPQ,
3562 * it's gnm_type will now be GNILND_MSG_PUT_DONE or
3563 * GNILND_MSG_GET_DONE_REV.
3565 case GNILND_MSG_GET_REQ:
3566 tx->tx_msg.gnm_u.get.gngm_desc.gnrd_key = tx->tx_map_key;
3567 tx->tx_msg.gnm_u.get.gngm_cookie = tx->tx_id.txe_cookie;
3568 tx->tx_msg.gnm_u.get.gngm_desc.gnrd_addr = (__u64)((unsigned long)tx->tx_buffer);
3569 tx->tx_msg.gnm_u.get.gngm_desc.gnrd_nob = tx->tx_nob;
3570 tx->tx_state = GNILND_TX_WAITING_COMPLETION | GNILND_TX_WAITING_REPLY;
3571 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_GET_REQ_AGAIN)) {
3572 tx->tx_state |= GNILND_TX_FAIL_SMSG;
3574 /* redirect to FMAQ on failure, no need to infinite loop here in MAPQ */
3575 rc = kgnilnd_sendmsg(tx, NULL, 0, &tx->tx_conn->gnc_list_lock, GNILND_TX_FMAQ);
3577 case GNILND_MSG_PUT_ACK:
3578 tx->tx_msg.gnm_u.putack.gnpam_desc.gnrd_key = tx->tx_map_key;
3579 tx->tx_state = GNILND_TX_WAITING_COMPLETION | GNILND_TX_WAITING_REPLY;
3580 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_PUT_ACK_AGAIN)) {
3581 tx->tx_state |= GNILND_TX_FAIL_SMSG;
3583 /* redirect to FMAQ on failure, no need to infinite loop here in MAPQ */
3584 rc = kgnilnd_sendmsg(tx, NULL, 0, &tx->tx_conn->gnc_list_lock, GNILND_TX_FMAQ);
3587 /* PUT_REQ and GET_DONE are where we do the actual RDMA */
3588 case GNILND_MSG_PUT_DONE:
3589 case GNILND_MSG_PUT_REQ:
3590 rc = kgnilnd_rdma(tx, GNILND_MSG_PUT_DONE,
3591 &tx->tx_putinfo.gnpam_desc,
3592 tx->tx_putinfo.gnpam_desc.gnrd_nob,
3593 tx->tx_putinfo.gnpam_dst_cookie);
3594 RETURN(try_map_if_full ? rc : 0);
3596 case GNILND_MSG_GET_DONE:
3597 rc = kgnilnd_rdma(tx, GNILND_MSG_GET_DONE,
3598 &tx->tx_getinfo.gngm_desc,
3599 tx->tx_lntmsg[0]->msg_len,
3600 tx->tx_getinfo.gngm_cookie);
3601 RETURN(try_map_if_full ? rc : 0);
3603 case GNILND_MSG_PUT_REQ_REV:
3604 tx->tx_msg.gnm_u.get.gngm_desc.gnrd_key = tx->tx_map_key;
3605 tx->tx_msg.gnm_u.get.gngm_cookie = tx->tx_id.txe_cookie;
3606 tx->tx_msg.gnm_u.get.gngm_desc.gnrd_addr = (__u64)((unsigned long)tx->tx_buffer);
3607 tx->tx_msg.gnm_u.get.gngm_desc.gnrd_nob = tx->tx_nob;
3608 tx->tx_state = GNILND_TX_WAITING_COMPLETION | GNILND_TX_WAITING_REPLY;
3609 kgnilnd_compute_rdma_cksum(tx, tx->tx_nob);
3610 tx->tx_msg.gnm_u.get.gngm_payload_cksum = tx->tx_msg.gnm_payload_cksum;
3612 rc = kgnilnd_sendmsg(tx, NULL, 0, &tx->tx_conn->gnc_list_lock, GNILND_TX_FMAQ);
3614 case GNILND_MSG_PUT_DONE_REV:
3615 rc = kgnilnd_rdma(tx, GNILND_MSG_PUT_DONE_REV,
3616 &tx->tx_getinfo.gngm_desc,
3618 tx->tx_getinfo.gngm_cookie);
3619 RETURN(try_map_if_full ? rc : 0);
3621 case GNILND_MSG_GET_ACK_REV:
3622 tx->tx_msg.gnm_u.putack.gnpam_desc.gnrd_key = tx->tx_map_key;
3623 tx->tx_state = GNILND_TX_WAITING_COMPLETION | GNILND_TX_WAITING_REPLY;
3624 /* LNET_GETS are a special case for parse */
3625 kgnilnd_compute_rdma_cksum(tx, tx->tx_msg.gnm_u.putack.gnpam_desc.gnrd_nob);
3626 tx->tx_msg.gnm_u.putack.gnpam_payload_cksum = tx->tx_msg.gnm_payload_cksum;
3628 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_PUT_ACK_AGAIN))
3629 tx->tx_state |= GNILND_TX_FAIL_SMSG;
3631 /* redirect to FMAQ on failure, no need to infinite loop here in MAPQ */
3632 rc = kgnilnd_sendmsg(tx, NULL, 0, &tx->tx_conn->gnc_list_lock, GNILND_TX_FMAQ);
3634 case GNILND_MSG_GET_DONE_REV:
3635 case GNILND_MSG_GET_REQ_REV:
3636 rc = kgnilnd_rdma(tx, GNILND_MSG_GET_DONE_REV,
3637 &tx->tx_putinfo.gnpam_desc,
3638 tx->tx_putinfo.gnpam_desc.gnrd_nob,
3639 tx->tx_putinfo.gnpam_dst_cookie);
3640 RETURN(try_map_if_full ? rc : 0);
3648 kgnilnd_process_fmaq(kgn_conn_t *conn)
3651 kgn_tx_t *tx = NULL;
3652 void *buffer = NULL;
3653 unsigned int nob = 0;
3656 /* NB 1. kgnilnd_sendmsg() may fail if I'm out of credits right now.
3657 * However I will be rescheduled by an FMA completion event
3658 * when I eventually get some.
3659 * NB 2. Sampling gnc_state here races with setting it elsewhere.
3660 * But it doesn't matter if I try to send a "real" message just
3661 * as I start closing because I'll get scheduled to send the
3664 /* Short circuit if the ep_handle is null we cant send anyway. */
3665 if (conn->gnc_ephandle == NULL)
3668 LASSERTF(!conn->gnc_close_sent, "Conn %p close was sent\n", conn);
3670 spin_lock(&conn->gnc_list_lock);
3672 if (list_empty(&conn->gnc_fmaq)) {
3673 int keepalive = GNILND_TO2KA(conn->gnc_timeout);
3675 spin_unlock(&conn->gnc_list_lock);
3677 if (time_after_eq(jiffies, conn->gnc_last_tx + cfs_time_seconds(keepalive))) {
3678 CDEBUG(D_NET, "sending NOOP -> %s (%p idle %lu(%d)) "
3679 "last %lu/%lu/%lu %lus/%lus/%lus\n",
3680 libcfs_nid2str(conn->gnc_peer->gnp_nid), conn,
3681 cfs_duration_sec(jiffies - conn->gnc_last_tx),
3683 conn->gnc_last_noop_want, conn->gnc_last_noop_sent,
3684 conn->gnc_last_noop_cq,
3685 cfs_duration_sec(jiffies - conn->gnc_last_noop_want),
3686 cfs_duration_sec(jiffies - conn->gnc_last_noop_sent),
3687 cfs_duration_sec(jiffies - conn->gnc_last_noop_cq));
3688 atomic_inc(&conn->gnc_sched_noop);
3689 set_mb(conn->gnc_last_noop_want, jiffies);
3691 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_NOOP_SEND))
3694 tx = kgnilnd_new_tx_msg(GNILND_MSG_NOOP,
3695 lnet_nid_to_nid4(&conn->gnc_peer->gnp_net->gnn_ni->ni_nid));
3699 rc = kgnilnd_set_tx_id(tx, conn);
3701 kgnilnd_tx_done(tx, rc);
3707 tx = list_first_entry(&conn->gnc_fmaq, kgn_tx_t, tx_list);
3708 /* move from fmaq to allocd, kgnilnd_sendmsg will move to live_fmaq */
3709 kgnilnd_tx_del_state_locked(tx, NULL, conn, GNILND_TX_ALLOCD);
3710 more_to_do = !list_empty(&conn->gnc_fmaq);
3711 spin_unlock(&conn->gnc_list_lock);
3714 /* if there is no real TX or no NOOP to send, bail */
3719 if (!tx->tx_retrans)
3720 tx->tx_cred_wait = jiffies;
3722 GNITX_ASSERTF(tx, tx->tx_id.txe_smsg_id != 0,
3723 "tx with zero id", NULL);
3725 CDEBUG(D_NET, "sending regular msg: %p, type %s(0x%02x), cookie %#llx\n",
3726 tx, kgnilnd_msgtype2str(tx->tx_msg.gnm_type),
3727 tx->tx_msg.gnm_type, tx->tx_id.txe_cookie);
3731 switch (tx->tx_msg.gnm_type) {
3735 case GNILND_MSG_NOOP:
3736 case GNILND_MSG_CLOSE:
3737 case GNILND_MSG_IMMEDIATE:
3738 tx->tx_state = GNILND_TX_WAITING_COMPLETION;
3739 buffer = tx->tx_buffer;
3743 case GNILND_MSG_GET_DONE:
3744 case GNILND_MSG_PUT_DONE:
3745 case GNILND_MSG_PUT_DONE_REV:
3746 case GNILND_MSG_GET_DONE_REV:
3747 case GNILND_MSG_PUT_NAK:
3748 case GNILND_MSG_GET_NAK:
3749 case GNILND_MSG_GET_NAK_REV:
3750 case GNILND_MSG_PUT_NAK_REV:
3751 tx->tx_state = GNILND_TX_WAITING_COMPLETION;
3754 case GNILND_MSG_PUT_REQ:
3755 case GNILND_MSG_GET_REQ_REV:
3756 tx->tx_msg.gnm_u.putreq.gnprm_cookie = tx->tx_id.txe_cookie;
3758 case GNILND_MSG_PUT_ACK:
3759 case GNILND_MSG_PUT_REQ_REV:
3760 case GNILND_MSG_GET_ACK_REV:
3761 case GNILND_MSG_GET_REQ:
3762 /* This is really only to handle the retransmit of SMSG once these
3763 * two messages are setup in send_mapped_tx */
3764 tx->tx_state = GNILND_TX_WAITING_COMPLETION | GNILND_TX_WAITING_REPLY;
3768 if (likely(rc == 0)) {
3769 rc = kgnilnd_sendmsg(tx, buffer, nob, &conn->gnc_list_lock, GNILND_TX_FMAQ);
3773 /* don't explicitly reschedule here - we are short credits and will rely on
3774 * kgnilnd_sendmsg to resched the conn if need be */
3776 } else if (rc < 0) {
3777 /* bail: it wasn't sent and we didn't get EAGAIN indicating we should retrans
3778 * almost certainly a software bug, but lets play nice with the other kids */
3779 kgnilnd_tx_done(tx, rc);
3780 /* just for fun, kick peer in arse - resetting conn might help to correct
3781 * this almost certainly buggy software caused return code */
3782 kgnilnd_close_conn(conn, rc);
3786 CDEBUG(D_NET, "Rescheduling %p (more to do)\n", conn);
3787 kgnilnd_schedule_conn(conn);
3792 kgnilnd_process_rdmaq(kgn_device_t *dev)
3797 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_DELAY_RDMAQ)) {
3801 if (time_after_eq(jiffies, dev->gnd_rdmaq_deadline)) {
3802 unsigned long dead_bump;
3805 /* if we think we need to adjust, take lock to serialize and recheck */
3806 spin_lock(&dev->gnd_rdmaq_lock);
3807 if (time_after_eq(jiffies, dev->gnd_rdmaq_deadline)) {
3808 del_singleshot_timer_sync(&dev->gnd_rdmaq_timer);
3810 dead_bump = cfs_time_seconds(1) / *kgnilnd_tunables.kgn_rdmaq_intervals;
3812 /* roll the bucket forward */
3813 dev->gnd_rdmaq_deadline = jiffies + dead_bump;
3815 if (kgnilnd_data.kgn_rdmaq_override &&
3816 (*kgnilnd_tunables.kgn_rdmaq_intervals != 0)) {
3817 new_ok = kgnilnd_data.kgn_rdmaq_override / *kgnilnd_tunables.kgn_rdmaq_intervals;
3822 /* roll current outstanding forward to make sure we carry outstanding
3823 * committment forward
3824 * new_ok starts out as the whole interval value
3825 * - first subtract bytes_out from last interval, as that would push us over
3826 * strict limits for this interval
3827 * - second, set bytes_ok to new_ok to ensure it doesn't exceed the current auth
3829 * there is a small race here if someone is actively processing mappings and
3830 * adding to rdmaq_bytes_out, but it should be small as the mappings are triggered
3831 * quite quickly after kgnilnd_auth_rdma_bytes gives us the go-ahead
3832 * - if this gives us problems in the future, we could use a read/write lock
3833 * to protect the resetting of these values */
3834 new_ok -= atomic64_read(&dev->gnd_rdmaq_bytes_out);
3835 atomic64_set(&dev->gnd_rdmaq_bytes_ok, new_ok);
3837 CDEBUG(D_NET, "resetting rdmaq bytes to %lld, deadline +%lu -> %lu, current out %lld\n",
3838 (s64)atomic64_read(&dev->gnd_rdmaq_bytes_ok), dead_bump, dev->gnd_rdmaq_deadline,
3839 (s64)atomic64_read(&dev->gnd_rdmaq_bytes_out));
3841 spin_unlock(&dev->gnd_rdmaq_lock);
3844 spin_lock(&dev->gnd_rdmaq_lock);
3845 while (!list_empty(&dev->gnd_rdmaq)) {
3848 /* make sure we break out early on quiesce */
3849 if (unlikely(kgnilnd_data.kgn_quiesce_trigger)) {
3850 /* always break with lock held - we unlock outside loop */
3854 tx = list_first_entry(&dev->gnd_rdmaq, kgn_tx_t, tx_list);
3855 kgnilnd_tx_del_state_locked(tx, NULL, tx->tx_conn, GNILND_TX_ALLOCD);
3858 /* sample with lock held, serializing with kgnilnd_complete_closed_conn */
3859 if (tx->tx_conn->gnc_state != GNILND_CONN_ESTABLISHED) {
3860 /* if conn is dying, mark tx in tx_ref_table for
3861 * kgnilnd_complete_closed_conn to finish up */
3862 kgnilnd_tx_add_state_locked(tx, NULL, tx->tx_conn, GNILND_TX_DYING, 1);
3864 /* tx was moved to DYING, get next */
3867 spin_unlock(&dev->gnd_rdmaq_lock);
3869 rc = kgnilnd_auth_rdma_bytes(dev, tx);
3870 spin_lock(&dev->gnd_rdmaq_lock);
3873 /* no ticket! add back to head */
3874 kgnilnd_tx_add_state_locked(tx, NULL, tx->tx_conn, GNILND_TX_RDMAQ, 0);
3875 /* clear found_work so scheduler threads wait for timer */
3879 /* TX is GO for launch */
3880 tx->tx_qtime = jiffies;
3881 kgnilnd_send_mapped_tx(tx, 0);
3885 spin_unlock(&dev->gnd_rdmaq_lock);
3891 kgnilnd_swab_rdma_desc(kgn_rdma_desc_t *d)
3893 __swab64s(&d->gnrd_key.qword1);
3894 __swab64s(&d->gnrd_key.qword2);
3895 __swab64s(&d->gnrd_addr);
3896 __swab32s(&d->gnrd_nob);
3899 #define kgnilnd_match_reply_either(w, x, y, z) _kgnilnd_match_reply(w, x, y, z)
3900 #define kgnilnd_match_reply(x, y, z) _kgnilnd_match_reply(x, y, GNILND_MSG_NONE, z)
3903 _kgnilnd_match_reply(kgn_conn_t *conn, int type1, int type2, __u64 cookie)
3905 kgn_tx_ev_id_t ev_id;
3908 /* we use the cookie from the original TX, so we can find the match
3909 * by parsing that and using the txe_idx */
3910 ev_id.txe_cookie = cookie;
3912 tx = conn->gnc_tx_ref_table[ev_id.txe_idx];
3915 /* check tx to make sure kgni didn't eat it */
3916 GNITX_ASSERTF(tx, tx->tx_msg.gnm_magic == GNILND_MSG_MAGIC,
3917 "came back from kgni with bad magic %x\n", tx->tx_msg.gnm_magic);
3919 GNITX_ASSERTF(tx, ((tx->tx_id.txe_idx == ev_id.txe_idx) &&
3920 (tx->tx_id.txe_cookie = cookie)),
3921 "conn 0x%p->%s tx_ref_table hosed: wanted "
3922 "txe_cookie %#llx txe_idx %d "
3923 "found tx %p cookie %#llx txe_idx %d\n",
3924 conn, libcfs_nid2str(conn->gnc_peer->gnp_nid),
3925 cookie, ev_id.txe_idx,
3926 tx, tx->tx_id.txe_cookie, tx->tx_id.txe_idx);
3928 LASSERTF((((tx->tx_msg.gnm_type == type1) || (tx->tx_msg.gnm_type == type2)) &&
3929 (tx->tx_state & GNILND_TX_WAITING_REPLY)),
3930 "Unexpected TX type (%x, %x or %x) "
3931 "or state (%x, expected +%x) "
3932 "matched reply from %s\n",
3933 tx->tx_msg.gnm_type, type1, type2,
3934 tx->tx_state, GNILND_TX_WAITING_REPLY,
3935 libcfs_nid2str(conn->gnc_peer->gnp_nid));
3937 CWARN("Unmatched reply %02x, or %02x/%#llx from %s\n",
3938 type1, type2, cookie, libcfs_nid2str(conn->gnc_peer->gnp_nid));
3944 kgnilnd_complete_tx(kgn_tx_t *tx, int rc)
3947 kgn_conn_t *conn = tx->tx_conn;
3948 __u64 nob = tx->tx_nob;
3949 __u32 physnop = tx->tx_phys_npages;
3950 int id = tx->tx_id.txe_smsg_id;
3951 int buftype = tx->tx_buftype;
3952 gni_mem_handle_t hndl;
3953 hndl.qword1 = tx->tx_map_key.qword1;
3954 hndl.qword2 = tx->tx_map_key.qword2;
3956 spin_lock(&conn->gnc_list_lock);
3958 GNITX_ASSERTF(tx, tx->tx_state & GNILND_TX_WAITING_REPLY,
3959 "not waiting for reply", NULL);
3962 tx->tx_state &= ~GNILND_TX_WAITING_REPLY;
3964 if (rc == -EFAULT) {
3965 CDEBUG(D_NETERROR, "Error %d TX data: TX %p tx_id %x nob %16llu physnop %8d buffertype %#8x MemHandle %#llx.%#llxx\n",
3966 rc, tx, id, nob, physnop, buftype, hndl.qword1, hndl.qword2);
3968 if(*kgnilnd_tunables.kgn_efault_lbug) {
3969 GNIDBG_TOMSG(D_NETERROR, &tx->tx_msg,
3970 "error %d on tx 0x%p->%s id %u/%d state %s age %ds",
3972 libcfs_nid2str(conn->gnc_peer->gnp_nid) : "<?>",
3973 tx->tx_id.txe_smsg_id, tx->tx_id.txe_idx,
3974 kgnilnd_tx_state2str(tx->tx_list_state),
3975 cfs_duration_sec((unsigned long) jiffies - tx->tx_qtime));
3980 if (!(tx->tx_state & GNILND_TX_WAITING_COMPLETION)) {
3981 kgnilnd_tx_del_state_locked(tx, NULL, conn, GNILND_TX_ALLOCD);
3982 /* sample under lock as follow on steps require gnc_list_lock
3983 * - or call kgnilnd_tx_done which requires no locks held over
3984 * call to lnet_finalize */
3987 spin_unlock(&conn->gnc_list_lock);
3990 kgnilnd_tx_done(tx, tx->tx_rc);
3995 kgnilnd_finalize_rx_done(kgn_tx_t *tx, kgn_msg_t *msg)
3998 kgn_conn_t *conn = tx->tx_conn;
4000 atomic_inc(&conn->gnc_device->gnd_rdma_nrx);
4001 atomic64_add(tx->tx_nob, &conn->gnc_device->gnd_rdma_rxbytes);
4003 /* the gncm_retval is passed in for PUTs */
4004 rc = kgnilnd_verify_rdma_cksum(tx, msg->gnm_payload_cksum,
4005 msg->gnm_u.completion.gncm_retval);
4007 kgnilnd_complete_tx(tx, rc);
4011 kgnilnd_check_fma_rx(kgn_conn_t *conn)
4019 kgn_peer_t *peer = conn->gnc_peer;
4022 __u16 tmp_cksum = 0, msg_cksum = 0;
4023 int repost = 1, saw_complete;
4024 unsigned long timestamp, newest_last_rx, timeout;
4026 struct lnet_hdr hdr;
4027 struct lnet_nid srcnid;
4030 /* Short circuit if the ep_handle is null.
4031 * It's likely that its about to be closed as stale.
4033 if (conn->gnc_ephandle == NULL)
4036 timestamp = jiffies;
4037 kgnilnd_gl_mutex_lock(&conn->gnc_device->gnd_cq_mutex);
4038 /* delay in jiffies - we are really concerned only with things that
4039 * result in a schedule() or really holding this off for long times .
4040 * NB - mutex_lock could spin for 2 jiffies before going to sleep to wait */
4041 conn->gnc_device->gnd_mutex_delay += (long) jiffies - timestamp;
4043 /* Resample current time as we have no idea how long it took to get the mutex */
4044 timestamp = jiffies;
4046 /* We check here when the last time we received an rx, we do this before
4047 * we call getnext in case the thread has been blocked for a while. If we
4048 * havent received an rx since our timeout value we close the connection
4049 * as we should assume the other side has closed the connection. This will
4050 * stop us from sending replies to a mailbox that is already in purgatory.
4053 timeout = cfs_time_seconds(conn->gnc_timeout);
4054 newest_last_rx = GNILND_LASTRX(conn);
4056 /* Error injection to validate that timestamp checking works and closing the conn */
4057 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_RECV_TIMEOUT)) {
4058 timestamp = timestamp + (GNILND_TIMEOUTRX(timeout) * 2);
4061 if (time_after_eq(timestamp, newest_last_rx + (GNILND_TIMEOUTRX(timeout)))) {
4062 GNIDBG_CONN(D_NETERROR|D_CONSOLE, conn, "Cant receive from %s after timeout lapse of %lu; TO %lu",
4063 libcfs_nid2str(conn->gnc_peer->gnp_nid),
4064 cfs_duration_sec(timestamp - newest_last_rx),
4065 cfs_duration_sec(GNILND_TIMEOUTRX(timeout)));
4066 kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
4068 kgnilnd_close_conn(conn, rc);
4072 rrc = kgnilnd_smsg_getnext(conn->gnc_ephandle, &prefix);
4074 if (rrc == GNI_RC_NOT_DONE) {
4075 kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
4076 CDEBUG(D_INFO, "SMSG RX empty conn 0x%p\n", conn);
4080 /* Instead of asserting when we get mailbox corruption lets attempt to
4081 * close the conn and recover. We can put the conn/mailbox into
4082 * purgatory and let purgatory deal with the problem. If we see
4083 * this NETTERROR reported on production systems in large amounts
4084 * we will need to revisit the state machine to see if we can tighten
4085 * it up further to improve data protection.
4088 if (rrc == GNI_RC_INVALID_STATE) {
4089 kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
4090 GNIDBG_CONN(D_NETERROR | D_CONSOLE, conn, "Mailbox corruption "
4091 "detected closing conn %p from peer %s\n", conn,
4092 libcfs_nid2str(conn->gnc_peer->gnp_nid));
4094 kgnilnd_close_conn(conn, rc);
4098 LASSERTF(rrc == GNI_RC_SUCCESS,
4099 "bad rc %d on conn %p from peer %s\n",
4100 rrc, conn, libcfs_nid2str(peer->gnp_nid));
4102 msg = (kgn_msg_t *)prefix;
4104 rx = kgnilnd_alloc_rx();
4106 kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
4107 kgnilnd_release_msg(conn);
4108 GNIDBG_MSG(D_NETERROR, msg, "Dropping SMSG RX from 0x%p->%s, no RX memory",
4109 conn, libcfs_nid2str(peer->gnp_nid));
4113 GNIDBG_MSG(D_INFO, msg, "SMSG RX on %p", conn);
4115 timestamp = conn->gnc_last_rx;
4116 seq = last_seq = atomic_read(&conn->gnc_rx_seq);
4117 atomic_inc(&conn->gnc_rx_seq);
4119 conn->gnc_last_rx = jiffies;
4120 /* stash first rx so we can clear out purgatory
4122 if (conn->gnc_first_rx == 0)
4123 conn->gnc_first_rx = jiffies;
4125 /* needs to linger to protect gnc_rx_seq like we do with gnc_tx_seq */
4126 kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
4127 kgnilnd_peer_alive(conn->gnc_peer);
4130 rx->grx_conn = conn;
4132 ktime_get_ts64(&rx->grx_received);
4134 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_NET_LOOKUP)) {
4137 rc = kgnilnd_find_net(msg->gnm_srcnid, &net);
4143 kgnilnd_net_decref(net);
4146 if (*kgnilnd_tunables.kgn_checksum && !msg->gnm_cksum)
4147 GNIDBG_MSG(D_WARNING, msg, "no msg header checksum when enabled");
4149 /* XXX Nic: Do we need to swab cksum */
4150 if (msg->gnm_cksum != 0) {
4151 msg_cksum = msg->gnm_cksum;
4153 tmp_cksum = kgnilnd_cksum(msg, sizeof(kgn_msg_t));
4155 if (tmp_cksum != msg_cksum) {
4156 GNIDBG_MSG(D_NETERROR, msg, "Bad hdr checksum (%x expected %x)",
4157 tmp_cksum, msg_cksum);
4158 kgnilnd_dump_msg(D_BUFFS, msg);
4163 /* restore checksum for future debug messages */
4164 msg->gnm_cksum = tmp_cksum;
4166 if (msg->gnm_magic != GNILND_MSG_MAGIC) {
4167 if (__swab32(msg->gnm_magic) != GNILND_MSG_MAGIC) {
4168 GNIDBG_MSG(D_NETERROR, msg, "Unexpected magic %08x from %s",
4169 msg->gnm_magic, libcfs_nid2str(peer->gnp_nid));
4174 __swab32s(&msg->gnm_magic);
4175 __swab16s(&msg->gnm_version);
4176 __swab16s(&msg->gnm_type);
4177 __swab64s(&msg->gnm_srcnid);
4178 __swab64s(&msg->gnm_connstamp);
4179 __swab32s(&msg->gnm_seq);
4181 /* NB message type checked below; NOT here... */
4182 switch (msg->gnm_type) {
4183 case GNILND_MSG_GET_ACK_REV:
4184 case GNILND_MSG_PUT_ACK:
4185 kgnilnd_swab_rdma_desc(&msg->gnm_u.putack.gnpam_desc);
4188 case GNILND_MSG_PUT_REQ_REV:
4189 case GNILND_MSG_GET_REQ:
4190 kgnilnd_swab_rdma_desc(&msg->gnm_u.get.gngm_desc);
4198 if (msg->gnm_version != GNILND_MSG_VERSION) {
4199 GNIDBG_MSG(D_NETERROR, msg, "Unexpected protocol version %d from %s",
4200 msg->gnm_version, libcfs_nid2str(peer->gnp_nid));
4205 if (LNET_NIDADDR(msg->gnm_srcnid) != LNET_NIDADDR(peer->gnp_nid)) {
4206 GNIDBG_MSG(D_NETERROR, msg, "Unexpected peer %s from %s",
4207 libcfs_nid2str(msg->gnm_srcnid),
4208 libcfs_nid2str(peer->gnp_nid));
4213 if (msg->gnm_connstamp != conn->gnc_peer_connstamp) {
4214 GNIDBG_MSG(D_NETERROR, msg, "Unexpected connstamp %#llx(%#llx"
4215 " expected) from %s",
4216 msg->gnm_connstamp, conn->gnc_peer_connstamp,
4217 libcfs_nid2str(peer->gnp_nid));
4222 if (msg->gnm_seq != seq) {
4223 GNIDBG_MSG(D_NETERROR, msg, "Unexpected sequence number %d(%d expected) from %s",
4224 msg->gnm_seq, seq, libcfs_nid2str(peer->gnp_nid));
4229 atomic_inc(&conn->gnc_device->gnd_short_nrx);
4231 if (msg->gnm_type == GNILND_MSG_CLOSE) {
4232 CDEBUG(D_NETTRACE, "%s sent us CLOSE msg\n",
4233 libcfs_nid2str(conn->gnc_peer->gnp_nid));
4234 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
4235 conn->gnc_close_recvd = GNILND_CLOSE_RX;
4236 conn->gnc_peer_error = msg->gnm_u.completion.gncm_retval;
4237 /* double check state with lock held */
4238 if (conn->gnc_state == GNILND_CONN_ESTABLISHED) {
4239 /* only error if we are not already closing */
4240 if (conn->gnc_peer_error == -ETIMEDOUT) {
4241 unsigned long now = jiffies;
4242 CNETERR("peer 0x%p->%s closed connection 0x%p due to timeout. "
4244 "RX %d @ %lus/%lus; TX %d @ %lus/%lus; "
4245 "NOOP %lus/%lus/%lus; sched %lus/%lus/%lus ago\n",
4246 conn->gnc_peer, libcfs_nid2str(conn->gnc_peer->gnp_nid),
4248 cfs_duration_sec(now - timestamp),
4249 cfs_duration_sec(now - conn->gnc_last_rx_cq),
4250 atomic_read(&conn->gnc_tx_seq),
4251 cfs_duration_sec(now - conn->gnc_last_tx),
4252 cfs_duration_sec(now - conn->gnc_last_tx_cq),
4253 cfs_duration_sec(now - conn->gnc_last_noop_want),
4254 cfs_duration_sec(now - conn->gnc_last_noop_sent),
4255 cfs_duration_sec(now - conn->gnc_last_noop_cq),
4256 cfs_duration_sec(now - conn->gnc_last_sched_ask),
4257 cfs_duration_sec(now - conn->gnc_last_sched_do),
4258 cfs_duration_sec(now - conn->gnc_device->gnd_sched_alive));
4260 kgnilnd_close_conn_locked(conn, -ECONNRESET);
4262 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
4266 if (conn->gnc_close_recvd) {
4267 GNIDBG_MSG(D_NETERROR, msg, "Unexpected message %s(%d/%d) after CLOSE from %s",
4268 kgnilnd_msgtype2str(msg->gnm_type),
4269 msg->gnm_type, conn->gnc_close_recvd,
4270 libcfs_nid2str(conn->gnc_peer->gnp_nid));
4275 if (conn->gnc_state != GNILND_CONN_ESTABLISHED) {
4276 /* XXX Nic: log message received on bad connection state */
4280 switch (msg->gnm_type) {
4281 case GNILND_MSG_NOOP:
4282 /* Nothing to do; just a keepalive */
4285 case GNILND_MSG_IMMEDIATE:
4286 /* only get SMSG payload for IMMEDIATE */
4287 atomic64_add(msg->gnm_payload_len, &conn->gnc_device->gnd_short_rxbytes);
4288 lnet_hdr_from_nid4(&hdr, &msg->gnm_u.immediate.gnim_hdr);
4289 lnet_nid4_to_nid(msg->gnm_srcnid, &srcnid);
4290 rc = lnet_parse(net->gnn_ni, &hdr, &srcnid, rx, 0);
4293 case GNILND_MSG_GET_REQ_REV:
4294 case GNILND_MSG_PUT_REQ:
4295 lnet_hdr_from_nid4(&hdr, &msg->gnm_u.putreq.gnprm_hdr);
4296 lnet_nid4_to_nid(msg->gnm_srcnid, &srcnid);
4297 rc = lnet_parse(net->gnn_ni, &hdr, &srcnid, rx, 1);
4300 case GNILND_MSG_GET_NAK_REV:
4301 tx = kgnilnd_match_reply_either(conn, GNILND_MSG_GET_REQ_REV, GNILND_MSG_GET_ACK_REV,
4302 msg->gnm_u.completion.gncm_cookie);
4306 kgnilnd_complete_tx(tx, msg->gnm_u.completion.gncm_retval);
4308 case GNILND_MSG_PUT_NAK:
4309 tx = kgnilnd_match_reply_either(conn, GNILND_MSG_PUT_REQ, GNILND_MSG_PUT_ACK,
4310 msg->gnm_u.completion.gncm_cookie);
4314 kgnilnd_complete_tx(tx, msg->gnm_u.completion.gncm_retval);
4316 case GNILND_MSG_PUT_ACK:
4317 tx = kgnilnd_match_reply(conn, GNILND_MSG_PUT_REQ,
4318 msg->gnm_u.putack.gnpam_src_cookie);
4322 /* store putack data for later: deferred rdma or re-try */
4323 tx->tx_putinfo = msg->gnm_u.putack;
4326 spin_lock(&tx->tx_conn->gnc_list_lock);
4328 GNITX_ASSERTF(tx, tx->tx_state & GNILND_TX_WAITING_REPLY,
4329 "not waiting for reply", NULL);
4331 tx->tx_state &= ~GNILND_TX_WAITING_REPLY;
4333 if (likely(!(tx->tx_state & GNILND_TX_WAITING_COMPLETION))) {
4334 kgnilnd_tx_del_state_locked(tx, NULL, conn, GNILND_TX_ALLOCD);
4335 /* sample under lock as follow on steps require gnc_list_lock
4336 * - or call kgnilnd_tx_done which requires no locks held over
4337 * call to lnet_finalize */
4340 /* cannot launch rdma if still waiting for fma-msg completion */
4341 CDEBUG(D_NET, "tx 0x%p type 0x%02x will need to "
4342 "wait for SMSG completion\n", tx, tx->tx_msg.gnm_type);
4343 tx->tx_state |= GNILND_TX_PENDING_RDMA;
4345 spin_unlock(&tx->tx_conn->gnc_list_lock);
4348 rc = kgnilnd_send_mapped_tx(tx, 0);
4350 kgnilnd_tx_done(tx, rc);
4353 case GNILND_MSG_GET_ACK_REV:
4354 tx = kgnilnd_match_reply(conn, GNILND_MSG_GET_REQ_REV,
4355 msg->gnm_u.putack.gnpam_src_cookie);
4359 /* store putack data for later: deferred rdma or re-try */
4360 tx->tx_putinfo = msg->gnm_u.putack;
4362 spin_lock(&tx->tx_conn->gnc_list_lock);
4364 GNITX_ASSERTF(tx, tx->tx_state & GNILND_TX_WAITING_REPLY,
4365 "not waiting for reply", NULL);
4367 tx->tx_state &= ~GNILND_TX_WAITING_REPLY;
4369 if (likely(!(tx->tx_state & GNILND_TX_WAITING_COMPLETION))) {
4370 kgnilnd_tx_del_state_locked(tx, NULL, conn, GNILND_TX_ALLOCD);
4371 /* sample under lock as follow on steps require gnc_list_lock
4372 * - or call kgnilnd_tx_done which requires no locks held over
4373 * call to lnet_finalize */
4376 /* cannot launch rdma if still waiting for fma-msg completion */
4377 CDEBUG(D_NET, "tx 0x%p type 0x%02x will need to "
4378 "wait for SMSG completion\n", tx, tx->tx_msg.gnm_type);
4379 tx->tx_state |= GNILND_TX_PENDING_RDMA;
4381 spin_unlock(&tx->tx_conn->gnc_list_lock);
4384 rc = kgnilnd_send_mapped_tx(tx, 0);
4386 kgnilnd_tx_done(tx, rc);
4389 case GNILND_MSG_PUT_DONE:
4390 tx = kgnilnd_match_reply(conn, GNILND_MSG_PUT_ACK,
4391 msg->gnm_u.completion.gncm_cookie);
4395 GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED,
4396 "bad tx buftype %d", tx->tx_buftype);
4398 kgnilnd_finalize_rx_done(tx, msg);
4400 case GNILND_MSG_PUT_REQ_REV:
4401 case GNILND_MSG_GET_REQ:
4402 lnet_hdr_from_nid4(&hdr, &msg->gnm_u.get.gngm_hdr);
4403 lnet_nid4_to_nid(msg->gnm_srcnid, &srcnid);
4404 rc = lnet_parse(net->gnn_ni, &hdr, &srcnid, rx, 1);
4408 case GNILND_MSG_GET_NAK:
4409 tx = kgnilnd_match_reply(conn, GNILND_MSG_GET_REQ,
4410 msg->gnm_u.completion.gncm_cookie);
4414 GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED,
4415 "bad tx buftype %d", tx->tx_buftype);
4417 kgnilnd_complete_tx(tx, msg->gnm_u.completion.gncm_retval);
4420 case GNILND_MSG_GET_DONE:
4421 tx = kgnilnd_match_reply(conn, GNILND_MSG_GET_REQ,
4422 msg->gnm_u.completion.gncm_cookie);
4426 GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED,
4427 "bad tx buftype %d", tx->tx_buftype);
4429 lnet_set_reply_msg_len(net->gnn_ni, tx->tx_lntmsg[1],
4430 msg->gnm_u.completion.gncm_retval);
4432 kgnilnd_finalize_rx_done(tx, msg);
4434 case GNILND_MSG_GET_DONE_REV:
4435 tx = kgnilnd_match_reply(conn, GNILND_MSG_GET_ACK_REV,
4436 msg->gnm_u.completion.gncm_cookie);
4440 GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED,
4441 "bad tx buftype %d", tx->tx_buftype);
4443 kgnilnd_finalize_rx_done(tx, msg);
4446 case GNILND_MSG_PUT_DONE_REV:
4447 tx = kgnilnd_match_reply(conn, GNILND_MSG_PUT_REQ_REV,
4448 msg->gnm_u.completion.gncm_cookie);
4453 GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED,
4454 "bad tx buftype %d", tx->tx_buftype);
4456 kgnilnd_finalize_rx_done(tx, msg);
4458 case GNILND_MSG_PUT_NAK_REV:
4459 tx = kgnilnd_match_reply(conn, GNILND_MSG_PUT_REQ_REV,
4460 msg->gnm_u.completion.gncm_cookie);
4465 GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED,
4466 "bad tx buftype %d", tx->tx_buftype);
4468 kgnilnd_complete_tx(tx, msg->gnm_u.completion.gncm_retval);
4473 if (rc < 0) /* protocol/comms error */
4474 kgnilnd_close_conn(conn, rc);
4476 if (repost && rx != NULL) {
4477 kgnilnd_consume_rx(rx);
4480 /* we got an event so assume more there and call for reschedule */
4482 kgnilnd_schedule_conn(conn);
4486 /* Do the failure injections that we need to affect conn processing in the following function.
4487 * When writing tests that use this function make sure to use a fail_loc with a fail mask.
4488 * If you dont you can cause the scheduler threads to spin on the conn without it leaving
4491 * intent is used to signal the calling function whether or not the conn needs to be rescheduled.
4495 kgnilnd_check_conn_fail_loc(kgn_device_t *dev, kgn_conn_t *conn, int *intent)
4499 /* short circuit out when not set */
4500 if (likely(!cfs_fail_loc)) {
4504 /* failure injection to test for stack reset clean ups */
4505 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_DROP_CLOSING)) {
4506 /* we can't rely on busy loops being nice enough to get the
4507 * stack reset triggered - it'd just spin on this conn */
4508 CFS_RACE(CFS_FAIL_GNI_DROP_CLOSING);
4511 GOTO(did_fail_loc, rc);
4514 if (conn->gnc_state == GNILND_CONN_DESTROY_EP) {
4515 /* DESTROY_EP set in kgnilnd_conn_decref on gnc_refcount = 1 */
4517 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_DROP_DESTROY_EP)) {
4518 CFS_RACE(CFS_FAIL_GNI_DROP_DESTROY_EP);
4521 GOTO(did_fail_loc, rc);
4525 /* CFS_FAIL_GNI_FINISH_PURG2 is used to stop a connection from fully closing. This scheduler
4526 * will spin on the CFS_FAIL_TIMEOUT until the fail_loc is cleared at which time the connection
4527 * will be closed by kgnilnd_complete_closed_conn.
4529 if ((conn->gnc_state == GNILND_CONN_CLOSED) && CFS_FAIL_CHECK(CFS_FAIL_GNI_FINISH_PURG2)) {
4530 while (CFS_FAIL_TIMEOUT(CFS_FAIL_GNI_FINISH_PURG2, 1)) {};
4533 GOTO(did_fail_loc, rc);
4536 /* this one is a bit gross - we can't hold the mutex from process_conns
4537 * across a CFS_RACE here - it'd block the conn threads from doing an ep_bind
4538 * and moving onto finish_connect
4539 * so, we'll just set the rc - kgnilnd_process_conns will clear
4540 * found_work on a fail_loc, getting the scheduler thread to call schedule()
4541 * and effectively getting this thread to sleep */
4542 if ((conn->gnc_state == GNILND_CONN_CLOSED) && CFS_FAIL_CHECK(CFS_FAIL_GNI_FINISH_PURG)) {
4545 GOTO(did_fail_loc, rc);
4553 kgnilnd_send_conn_close(kgn_conn_t *conn)
4557 /* we are closing the conn - we will try to send the CLOSE msg
4558 * but will not wait for anything else to flush */
4560 /* send the close if not already done so or received one */
4561 if (!conn->gnc_close_sent && !conn->gnc_close_recvd) {
4562 /* set close_sent regardless of the success of the
4563 * CLOSE message. We are going to try once and then
4564 * kick him out of the sandbox */
4565 conn->gnc_close_sent = 1;
4568 /* EP might be null already if remote side initiated a new connection.
4569 * kgnilnd_finish_connect destroys existing ep_handles before wiring up the new connection,
4570 * so this check is here to make sure we dont attempt to send with a null ep_handle.
4572 if (conn->gnc_ephandle != NULL) {
4575 tx = kgnilnd_new_tx_msg(GNILND_MSG_CLOSE,
4576 lnet_nid_to_nid4(&conn->gnc_peer->gnp_net->gnn_ni->ni_nid));
4578 tx->tx_msg.gnm_u.completion.gncm_retval = conn->gnc_error;
4579 tx->tx_state = GNILND_TX_WAITING_COMPLETION;
4580 tx->tx_qtime = jiffies;
4582 if (tx->tx_id.txe_idx == 0) {
4583 rc = kgnilnd_set_tx_id(tx, conn);
4585 kgnilnd_tx_done(tx, rc);
4589 CDEBUG(D_NETTRACE, "sending close with errno %d\n",
4592 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_CLOSE_SEND)) {
4593 kgnilnd_tx_done(tx, -EAGAIN);
4595 rc = kgnilnd_sendmsg(tx, NULL, 0, NULL, GNILND_TX_FMAQ);
4597 /* It wasnt sent and we dont care. */
4598 kgnilnd_tx_done(tx, rc);
4606 /* When changing gnc_state we need to take the kgn_peer_conn_lock */
4607 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
4608 conn->gnc_state = GNILND_CONN_CLOSED;
4609 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
4610 /* mark this conn as CLOSED now that we processed it
4611 * do after TX, so we can use CLOSING in asserts */
4615 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_RX_CLOSE_CLOSED)) {
4616 /* simulate a RX CLOSE after the timeout but before
4617 * the scheduler thread gets it */
4618 conn->gnc_close_recvd = GNILND_CLOSE_INJECT2;
4619 conn->gnc_peer_error = -ETIMEDOUT;
4621 /* schedule to allow potential CLOSE and get the complete phase run */
4622 kgnilnd_schedule_conn(conn);
4626 kgnilnd_process_mapped_tx(kgn_device_t *dev)
4631 int fast_remaps = GNILND_FAST_MAPPING_TRY;
4632 int log_retrans, log_retrans_level;
4633 static int last_map_version;
4636 spin_lock(&dev->gnd_lock);
4637 if (list_empty(&dev->gnd_map_tx)) {
4638 /* if the list is empty make sure we dont have a timer running */
4639 del_singleshot_timer_sync(&dev->gnd_map_timer);
4640 spin_unlock(&dev->gnd_lock);
4644 dev->gnd_sched_alive = jiffies;
4646 /* we'll retry as fast as possible up to 25% of the limit, then we start
4647 * backing off until our map version changes - indicating we unmapped
4649 tx = list_first_entry(&dev->gnd_map_tx, kgn_tx_t, tx_list);
4650 if (likely(dev->gnd_map_attempt == 0) ||
4651 time_after_eq(jiffies, dev->gnd_next_map) ||
4652 last_map_version != dev->gnd_map_version) {
4654 /* if this is our first attempt at mapping set last mapped to current
4655 * jiffies so we can timeout our attempt correctly.
4657 if (dev->gnd_map_attempt == 0)
4658 dev->gnd_last_map = jiffies;
4660 GNIDBG_TX(D_NET, tx, "waiting for mapping event event to retry", NULL);
4661 spin_unlock(&dev->gnd_lock);
4665 /* delete the previous timer if it exists */
4666 del_singleshot_timer_sync(&dev->gnd_map_timer);
4667 /* stash the last map version to let us know when a good one was seen */
4668 last_map_version = dev->gnd_map_version;
4670 /* we need to to take the lock and continually refresh the head of the list as
4671 * kgnilnd_complete_closed_conn might be nuking stuff and we are cycling the lock
4672 * allowing them to squeeze in */
4674 while (!list_empty(&dev->gnd_map_tx)) {
4675 /* make sure we break out early on quiesce */
4676 if (unlikely(kgnilnd_data.kgn_quiesce_trigger)) {
4677 /* always break with lock held - we unlock outside loop */
4681 tx = list_first_entry(&dev->gnd_map_tx, kgn_tx_t, tx_list);
4683 kgnilnd_tx_del_state_locked(tx, NULL, tx->tx_conn, GNILND_TX_ALLOCD);
4686 /* sample with lock held, serializing with kgnilnd_complete_closed_conn */
4687 if (tx->tx_conn->gnc_state != GNILND_CONN_ESTABLISHED) {
4688 /* if conn is dying, mark tx in tx_ref_table for
4689 * kgnilnd_complete_closed_conn to finish up */
4690 kgnilnd_tx_add_state_locked(tx, NULL, tx->tx_conn, GNILND_TX_DYING, 1);
4693 /* tx was moved to DYING, get next */
4697 spin_unlock(&dev->gnd_lock);
4698 rc = kgnilnd_send_mapped_tx(tx, 1);
4700 /* We made it! skip error handling.. */
4702 /* OK to continue on +ve errors as it won't get seen until
4703 * this function is called again - we operate on a copy of the original
4704 * list and not the live list */
4705 spin_lock(&dev->gnd_lock);
4706 /* reset map attempts back to zero we successfully
4707 * mapped so we can reset our timers */
4708 dev->gnd_map_attempt = 0;
4710 } else if (rc == -EAGAIN) {
4711 spin_lock(&dev->gnd_lock);
4712 mod_timer(&dev->gnd_map_timer, dev->gnd_next_map);
4713 spin_unlock(&dev->gnd_lock);
4714 GOTO(get_out_mapped, rc);
4715 } else if (rc != -ENOMEM) {
4716 /* carp, failure we can't handle */
4717 kgnilnd_tx_done(tx, rc);
4718 spin_lock(&dev->gnd_lock);
4719 /* reset map attempts back to zero we dont know what happened but it
4720 * wasnt a failed mapping
4722 dev->gnd_map_attempt = 0;
4726 /* time to handle the retry cases.. lock so we dont have 2 threads
4727 * mucking with gnd_map_attempt, or gnd_next_map at the same time.
4729 spin_lock(&dev->gnd_lock);
4730 dev->gnd_map_attempt++;
4731 if (dev->gnd_map_attempt < fast_remaps) {
4732 /* do nothing we just want it to go as fast as possible.
4733 * just set gnd_next_map to current jiffies so it will process
4734 * as fast as possible.
4736 dev->gnd_next_map = jiffies;
4738 /* Retry based on GNILND_MAP_RETRY_RATE */
4739 dev->gnd_next_map = jiffies + GNILND_MAP_RETRY_RATE;
4742 /* only log occasionally once we've retried fast_remaps */
4743 log_retrans = (dev->gnd_map_attempt >= fast_remaps) &&
4744 ((dev->gnd_map_attempt % fast_remaps) == 0);
4745 log_retrans_level = log_retrans ? D_NETERROR : D_NET;
4747 /* make sure we are not off in the weeds with this tx */
4748 if (time_after(jiffies, dev->gnd_last_map + GNILND_MAP_TIMEOUT)) {
4749 GNIDBG_TX(D_NETERROR, tx,
4750 "giving up on TX, too many retries", NULL);
4751 spin_unlock(&dev->gnd_lock);
4752 if (tx->tx_msg.gnm_type == GNILND_MSG_PUT_REQ ||
4753 tx->tx_msg.gnm_type == GNILND_MSG_GET_REQ_REV) {
4754 kgnilnd_nak_rdma(tx->tx_conn, tx->tx_msg.gnm_type,
4756 tx->tx_putinfo.gnpam_dst_cookie,
4757 tx->tx_msg.gnm_srcnid);
4759 kgnilnd_nak_rdma(tx->tx_conn, tx->tx_msg.gnm_type,
4761 tx->tx_getinfo.gngm_cookie,
4762 tx->tx_msg.gnm_srcnid);
4764 kgnilnd_tx_done(tx, -ENOMEM);
4765 GOTO(get_out_mapped, rc);
4767 GNIDBG_TX(log_retrans_level, tx,
4768 "transient map failure #%d %d pages/%d bytes phys %u@%u "
4769 "nq_map %d mdd# %d/%d GART %ld",
4770 dev->gnd_map_attempt, tx->tx_phys_npages, tx->tx_nob,
4771 dev->gnd_map_nphys, dev->gnd_map_physnop * PAGE_SIZE,
4772 atomic_read(&dev->gnd_nq_map),
4773 atomic_read(&dev->gnd_n_mdd), atomic_read(&dev->gnd_n_mdd_held),
4774 atomic64_read(&dev->gnd_nbytes_map));
4777 /* we need to stop processing the rest of the list, so add it back in */
4778 /* set timer to wake device when we need to schedule this tx */
4779 mod_timer(&dev->gnd_map_timer, dev->gnd_next_map);
4780 kgnilnd_tx_add_state_locked(tx, NULL, tx->tx_conn, GNILND_TX_MAPQ, 0);
4781 spin_unlock(&dev->gnd_lock);
4782 GOTO(get_out_mapped, rc);
4784 spin_unlock(&dev->gnd_lock);
4790 kgnilnd_process_conns(kgn_device_t *dev, unsigned long deadline)
4795 int error_inject = 0;
4799 spin_lock(&dev->gnd_lock);
4800 while (!list_empty(&dev->gnd_ready_conns) && time_before(jiffies, deadline)) {
4801 dev->gnd_sched_alive = jiffies;
4805 if (unlikely(kgnilnd_data.kgn_quiesce_trigger)) {
4806 /* break with lock held */
4810 conn = list_first_entry(&dev->gnd_ready_conns, kgn_conn_t, gnc_schedlist);
4811 list_del_init(&conn->gnc_schedlist);
4813 * Since we are processing conn now, we don't need to be on the delaylist any longer.
4816 if (!list_empty(&conn->gnc_delaylist))
4817 list_del_init(&conn->gnc_delaylist);
4818 spin_unlock(&dev->gnd_lock);
4820 conn_sched = xchg(&conn->gnc_scheduled, GNILND_CONN_PROCESS);
4822 LASSERTF(conn_sched != GNILND_CONN_IDLE &&
4823 conn_sched != GNILND_CONN_PROCESS,
4824 "conn %p on ready list but in bad state: %d\n",
4827 CDEBUG(D_INFO, "conn %p@%s for processing\n",
4828 conn, kgnilnd_conn_state2str(conn));
4831 set_mb(conn->gnc_last_sched_do, jiffies);
4833 if (kgnilnd_check_conn_fail_loc(dev, conn, &intent)) {
4835 /* based on intent see if we should run again. */
4836 rc = kgnilnd_schedule_process_conn(conn, intent);
4838 /* drop ref from gnd_ready_conns */
4839 if (atomic_read(&conn->gnc_refcount) == 1 && rc != 1) {
4840 down_write(&dev->gnd_conn_sem);
4841 kgnilnd_conn_decref(conn);
4842 up_write(&dev->gnd_conn_sem);
4843 } else if (rc != 1) {
4844 kgnilnd_conn_decref(conn);
4846 /* clear this so that scheduler thread doesn't spin */
4848 /* break with lock held... */
4849 spin_lock(&dev->gnd_lock);
4853 if (unlikely(conn->gnc_state == GNILND_CONN_CLOSED)) {
4854 down_write(&dev->gnd_conn_sem);
4856 /* CONN_CLOSED set in procces_fmaq when CLOSE is sent */
4857 if (unlikely(atomic_read(&conn->gnc_tx_in_use))) {
4858 /* If there are tx's currently in use in another
4859 * thread we dont want to complete the close
4860 * yet. Cycle this conn back through
4862 kgnilnd_schedule_conn(conn);
4864 kgnilnd_complete_closed_conn(conn);
4866 up_write(&dev->gnd_conn_sem);
4867 } else if (unlikely(conn->gnc_state == GNILND_CONN_DESTROY_EP)) {
4868 /* DESTROY_EP set in kgnilnd_conn_decref on gnc_refcount = 1 */
4869 /* serialize SMSG CQs with ep_bind and smsg_release */
4870 down_write(&dev->gnd_conn_sem);
4871 kgnilnd_destroy_conn_ep(conn);
4872 up_write(&dev->gnd_conn_sem);
4873 } else if (unlikely(conn->gnc_state == GNILND_CONN_CLOSING)) {
4874 /* if we need to do some CLOSE sending, etc done here do it */
4875 down_write(&dev->gnd_conn_sem);
4876 kgnilnd_send_conn_close(conn);
4877 kgnilnd_check_fma_rx(conn);
4878 up_write(&dev->gnd_conn_sem);
4879 } else if (atomic_read(&conn->gnc_peer->gnp_dirty_eps) == 0) {
4880 /* start moving traffic if the old conns are cleared out */
4881 down_read(&dev->gnd_conn_sem);
4882 kgnilnd_check_fma_rx(conn);
4883 kgnilnd_process_fmaq(conn);
4884 up_read(&dev->gnd_conn_sem);
4887 rc = kgnilnd_schedule_process_conn(conn, 0);
4889 /* drop ref from gnd_ready_conns */
4890 if (atomic_read(&conn->gnc_refcount) == 1 && rc != 1) {
4891 down_write(&dev->gnd_conn_sem);
4892 kgnilnd_conn_decref(conn);
4893 up_write(&dev->gnd_conn_sem);
4894 } else if (rc != 1) {
4895 kgnilnd_conn_decref(conn);
4898 /* check list again with lock held */
4899 spin_lock(&dev->gnd_lock);
4902 /* If we are short circuiting due to timing we want to be scheduled
4903 * as soon as possible.
4905 if (!list_empty(&dev->gnd_ready_conns) && !error_inject)
4908 spin_unlock(&dev->gnd_lock);
4914 kgnilnd_scheduler(void *arg)
4916 int threadno = (long)arg;
4919 unsigned long deadline = 0;
4922 dev = &kgnilnd_data.kgn_devices[(threadno + 1) % kgnilnd_data.kgn_ndevs];
4924 /* all gnilnd threads need to run fairly urgently */
4925 set_user_nice(current, *kgnilnd_tunables.kgn_sched_nice);
4926 deadline = jiffies + cfs_time_seconds(*kgnilnd_tunables.kgn_sched_timeout);
4927 while (!kgnilnd_data.kgn_shutdown) {
4929 /* Safe: kgn_shutdown only set when quiescent */
4931 /* to quiesce or to not quiesce, that is the question */
4933 if (unlikely(kgnilnd_data.kgn_quiesce_trigger)) {
4934 KGNILND_SPIN_QUIESCE;
4937 /* tracking for when thread goes AWOL */
4938 dev->gnd_sched_alive = jiffies;
4940 CFS_FAIL_TIMEOUT(CFS_FAIL_GNI_SCHED_DEADLINE,
4941 (*kgnilnd_tunables.kgn_sched_timeout + 1));
4942 /* let folks know we are up and kicking
4943 * - they can use this for latency savings, etc
4944 * - only change if IRQ, if IDLE leave alone as that
4945 * schedule_device calls to put us back to IRQ */
4946 (void)cmpxchg(&dev->gnd_ready, GNILND_DEV_IRQ, GNILND_DEV_LOOP);
4948 down_read(&dev->gnd_conn_sem);
4949 /* always check these - they are super low cost */
4950 found_work += kgnilnd_check_fma_send_cq(dev);
4951 found_work += kgnilnd_check_fma_rcv_cq(dev);
4953 /* rdma CQ doesn't care about eps */
4954 found_work += kgnilnd_check_rdma_cq(dev);
4956 /* move some RDMA ? */
4957 found_work += kgnilnd_process_rdmaq(dev);
4959 /* map some pending RDMA requests ? */
4960 found_work += kgnilnd_process_mapped_tx(dev);
4962 /* the EP for a conn is not destroyed until all the references
4963 * to it are gone, so these checks should be safe
4964 * even if run in parallel with the CQ checking functions
4965 * _AND_ a thread that processes the CLOSED->DONE
4969 up_read(&dev->gnd_conn_sem);
4971 /* process all conns ready now */
4972 found_work += kgnilnd_process_conns(dev, deadline);
4974 /* do an eager check to avoid the IRQ disabling in
4975 * prepare_to_wait and friends */
4978 (busy_loops++ < *kgnilnd_tunables.kgn_loops) &&
4979 time_before(jiffies, deadline)) {
4981 if ((busy_loops % 10) == 0) {
4982 /* tickle heartbeat and watchdog to ensure our
4983 * piggishness doesn't turn into heartbeat failure */
4984 touch_nmi_watchdog();
4990 /* if we got here, found_work was zero or busy_loops means we
4991 * need to take a break. We'll clear gnd_ready but we'll check
4992 * one last time if there is an IRQ that needs processing */
4994 prepare_to_wait(&dev->gnd_waitq, &wait, TASK_INTERRUPTIBLE);
4996 /* the first time this will go LOOP -> IDLE and let us do one final check
4997 * during which we might get an IRQ, then IDLE->IDLE and schedule()
4998 * - this might allow other threads to block us for a bit if they
4999 * try to get the mutex, but that is good as we'd need to wake
5000 * up soon to handle the CQ or other processing anyways */
5002 found_work += xchg(&dev->gnd_ready, GNILND_DEV_IDLE);
5004 if ((busy_loops >= *kgnilnd_tunables.kgn_loops) ||
5005 time_after_eq(jiffies, deadline)) {
5007 "yeilding: found_work %d busy_loops %d\n",
5008 found_work, busy_loops);
5010 /* use yield if we are bailing due to busy_loops
5011 * - this will ensure we wake up soonish. This closes
5012 * a race with kgnilnd_device_callback - where it'd
5013 * not call wake_up() because gnd_ready == 1, but then
5014 * we come down and schedule() because of busy_loops.
5015 * We'd not be woken up until something poked our waitq
5016 * again. yield() ensures we wake up without another
5017 * waitq poke in that case */
5018 atomic_inc(&dev->gnd_n_yield);
5019 kgnilnd_data.kgn_last_condresched = jiffies;
5021 CDEBUG(D_INFO, "awake after yeild\n");
5022 deadline = jiffies + cfs_time_seconds(*kgnilnd_tunables.kgn_sched_timeout);
5023 } else if (found_work == GNILND_DEV_IDLE) {
5024 /* busy_loops is low and there is nothing to do,
5025 * go to sleep and wait for a waitq poke */
5027 "scheduling: found_work %d busy_loops %d\n",
5028 found_work, busy_loops);
5029 atomic_inc(&dev->gnd_n_schedule);
5030 kgnilnd_data.kgn_last_scheduled = jiffies;
5032 CDEBUG(D_INFO, "awake after schedule\n");
5033 deadline = jiffies + cfs_time_seconds(*kgnilnd_tunables.kgn_sched_timeout);
5035 finish_wait(&dev->gnd_waitq, &wait);
5038 kgnilnd_thread_fini();