2 * Copyright (C) 2004 Cluster File Systems, Inc.
4 * Copyright (C) 2009-2012 Cray, Inc.
6 * Derived from work by Eric Barton <eric@bartonsoftware.com>
7 * Author: James Shimek <jshimek@cray.com>
8 * Author: Nic Henke <nic@cray.com>
10 * This file is part of Lustre, http://www.lustre.org.
12 * Lustre is free software; you can redistribute it and/or
13 * modify it under the terms of version 2 of the GNU General Public
14 * License as published by the Free Software Foundation.
16 * Lustre is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with Lustre; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
28 #include <linux/nmi.h>
31 /* this is useful when needed to debug wire corruption. */
33 kgnilnd_dump_blob(int level, char *prefix, void *buf, int len) {
41 "%s 0x%p: 0x%16.16llx 0x%16.16llx 0x%16.16llx 0x%16.16llx\n",
42 prefix, ptr, *(ptr), *(ptr + 1), *(ptr + 2), *(ptr + 3));
45 } else if (len >= 16) {
47 "%s 0x%p: 0x%16.16llx 0x%16.16llx\n",
48 prefix, ptr, *(ptr), *(ptr + 1));
52 CDEBUG(level, "%s 0x%p: 0x%16.16llx\n",
61 kgnilnd_dump_msg(int mask, kgn_msg_t *msg)
63 CDEBUG(mask, "0x%8.8x 0x%4.4x 0x%4.4x 0x%16.16llx"
64 " 0x%16.16llx 0x%8.8x 0x%4.4x 0x%4.4x 0x%8.8x\n",
65 msg->gnm_magic, msg->gnm_version,
66 msg->gnm_type, msg->gnm_srcnid,
67 msg->gnm_connstamp, msg->gnm_seq,
68 msg->gnm_cksum, msg->gnm_payload_cksum,
69 msg->gnm_payload_len);
73 kgnilnd_schedule_device(kgn_device_t *dev)
75 short already_live = 0;
77 /* we'll only want to wake if the scheduler thread
78 * has come around and set ready to zero */
79 already_live = cmpxchg(&dev->gnd_ready, GNILND_DEV_IDLE, GNILND_DEV_IRQ);
82 wake_up_all(&dev->gnd_waitq);
87 void kgnilnd_schedule_device_timer(unsigned long arg)
89 kgn_device_t *dev = (kgn_device_t *) arg;
91 kgnilnd_schedule_device(dev);
95 kgnilnd_device_callback(__u32 devid, __u64 arg)
98 int index = (int) arg;
100 if (index >= kgnilnd_data.kgn_ndevs) {
101 /* use _EMERG instead of an LBUG to prevent LBUG'ing in
102 * interrupt context. */
103 LCONSOLE_EMERG("callback for unknown device %d->%d\n",
108 dev = &kgnilnd_data.kgn_devices[index];
109 /* just basic sanity */
110 if (dev->gnd_id == devid) {
111 kgnilnd_schedule_device(dev);
113 LCONSOLE_EMERG("callback for bad device %d devid %d\n",
118 /* sched_intent values:
119 * < 0 : do not reschedule under any circumstances
120 * == 0: reschedule if someone marked him WANTS_SCHED
121 * > 0 : force a reschedule */
122 /* Return code 0 means it did not schedule the conn, 1
123 * means it successfully scheduled the conn.
127 kgnilnd_schedule_process_conn(kgn_conn_t *conn, int sched_intent)
131 /* move back to IDLE but save previous state.
132 * if we see WANTS_SCHED, we'll call kgnilnd_schedule_conn and
133 * let the xchg there handle any racing callers to get it
134 * onto gnd_ready_conns */
136 conn_sched = xchg(&conn->gnc_scheduled, GNILND_CONN_IDLE);
137 LASSERTF(conn_sched == GNILND_CONN_WANTS_SCHED ||
138 conn_sched == GNILND_CONN_PROCESS,
139 "conn %p after process in bad state: %d\n",
142 if (sched_intent >= 0) {
143 if ((sched_intent > 0 || (conn_sched == GNILND_CONN_WANTS_SCHED))) {
144 return kgnilnd_schedule_conn_refheld(conn, 1);
150 /* Return of 0 for conn not scheduled, 1 returned if conn was scheduled or marked
154 _kgnilnd_schedule_conn(kgn_conn_t *conn, const char *caller, int line, int refheld, int lock_held)
156 kgn_device_t *dev = conn->gnc_device;
160 sched = xchg(&conn->gnc_scheduled, GNILND_CONN_WANTS_SCHED);
161 /* we only care about the last person who marked want_sched since they
162 * are most likely the culprit
164 memcpy(conn->gnc_sched_caller, caller, sizeof(conn->gnc_sched_caller));
165 conn->gnc_sched_line = line;
166 /* if we are IDLE, add to list - only one guy sees IDLE and "wins"
167 * the chance to put it onto gnd_ready_conns.
168 * otherwise, leave marked as WANTS_SCHED and the thread that "owns"
169 * the conn in process_conns will take care of moving it back to
170 * SCHED when it is done processing */
172 if (sched == GNILND_CONN_IDLE) {
173 /* if the conn is already scheduled, we've already requested
174 * the scheduler thread wakeup */
176 /* Add a reference to the conn if we are not holding a reference
177 * already from the exisiting scheduler. We now use the same
178 * reference if we need to reschedule a conn while in a scheduler
181 kgnilnd_conn_addref(conn);
183 LASSERTF(list_empty(&conn->gnc_schedlist), "conn %p already sched state %d\n",
186 CDEBUG(D_INFO, "scheduling conn 0x%p caller %s:%d\n", conn, caller, line);
188 spin_lock(&dev->gnd_lock);
189 list_add_tail(&conn->gnc_schedlist, &dev->gnd_ready_conns);
191 spin_unlock(&dev->gnd_lock);
192 set_mb(conn->gnc_last_sched_ask, jiffies);
195 CDEBUG(D_INFO, "not scheduling conn 0x%p: %d caller %s:%d\n", conn, sched, caller, line);
199 /* make sure thread(s) going to process conns - but let it make
200 * separate decision from conn schedule */
202 kgnilnd_schedule_device(dev);
207 _kgnilnd_schedule_delay_conn(kgn_conn_t *conn)
209 kgn_device_t *dev = conn->gnc_device;
211 spin_lock(&dev->gnd_lock);
212 if (list_empty(&conn->gnc_delaylist)) {
213 list_add_tail(&conn->gnc_delaylist, &dev->gnd_delay_conns);
216 spin_unlock(&dev->gnd_lock);
218 kgnilnd_schedule_device(dev);
223 kgnilnd_schedule_dgram(kgn_device_t *dev)
227 wake = xchg(&dev->gnd_dgram_ready, GNILND_DGRAM_SCHED);
228 if (wake != GNILND_DGRAM_SCHED) {
229 wake_up(&dev->gnd_dgram_waitq);
231 CDEBUG(D_NETTRACE, "not waking: %d\n", wake);
236 kgnilnd_free_tx(kgn_tx_t *tx)
238 /* taken from kgnilnd_tx_add_state_locked */
240 LASSERTF((tx->tx_list_p == NULL &&
241 tx->tx_list_state == GNILND_TX_ALLOCD) &&
242 list_empty(&tx->tx_list),
243 "tx %p with bad state %s (list_p %p) tx_list %s\n",
244 tx, kgnilnd_tx_state2str(tx->tx_list_state), tx->tx_list_p,
245 list_empty(&tx->tx_list) ? "empty" : "not empty");
247 atomic_dec(&kgnilnd_data.kgn_ntx);
249 /* we only allocate this if we need to */
250 if (tx->tx_phys != NULL) {
251 kmem_cache_free(kgnilnd_data.kgn_tx_phys_cache, tx->tx_phys);
252 CDEBUG(D_MALLOC, "slab-freed 'tx_phys': %lu at %p.\n",
253 LNET_MAX_IOV * sizeof(gni_mem_segment_t), tx->tx_phys);
256 /* Only free the buffer if we used it */
257 if (tx->tx_buffer_copy != NULL) {
258 kgnilnd_vfree(tx->tx_buffer_copy, tx->tx_rdma_desc.length);
259 tx->tx_buffer_copy = NULL;
260 CDEBUG(D_MALLOC, "vfreed buffer2\n");
263 KGNILND_POISON(tx, 0x5a, sizeof(kgn_tx_t));
265 CDEBUG(D_MALLOC, "slab-freed 'tx': %lu at %p.\n", sizeof(*tx), tx);
266 kmem_cache_free(kgnilnd_data.kgn_tx_cache, tx);
270 kgnilnd_alloc_tx (void)
274 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_ALLOC_TX))
277 tx = kmem_cache_alloc(kgnilnd_data.kgn_tx_cache, GFP_ATOMIC);
279 CERROR("failed to allocate tx\n");
282 CDEBUG(D_MALLOC, "slab-alloced 'tx': %lu at %p.\n",
285 /* need this memset, cache alloc'd memory is not cleared */
286 memset(tx, 0, sizeof(*tx));
288 /* setup everything here to minimize time under the lock */
289 tx->tx_buftype = GNILND_BUF_NONE;
290 tx->tx_msg.gnm_type = GNILND_MSG_NONE;
291 INIT_LIST_HEAD(&tx->tx_list);
292 INIT_LIST_HEAD(&tx->tx_map_list);
293 tx->tx_list_state = GNILND_TX_ALLOCD;
295 atomic_inc(&kgnilnd_data.kgn_ntx);
300 /* csum_fold needs to be run on the return value before shipping over the wire */
301 #define _kgnilnd_cksum(seed, ptr, nob) csum_partial(ptr, nob, seed)
303 /* we don't use offset as every one is passing a buffer reference that already
304 * includes the offset into the base address -
305 * see kgnilnd_setup_virt_buffer and kgnilnd_setup_immediate_buffer */
307 kgnilnd_cksum(void *ptr, size_t nob)
311 sum = csum_fold(_kgnilnd_cksum(0, ptr, nob));
313 /* don't use magic 'no checksum' value */
317 CDEBUG(D_INFO, "cksum 0x%x for ptr 0x%p sz %zu\n",
324 kgnilnd_cksum_kiov(unsigned int nkiov, lnet_kiov_t *kiov,
325 unsigned int offset, unsigned int nob, int dump_blob)
331 unsigned int fraglen;
337 CDEBUG(D_BUFFS, "calc cksum for kiov 0x%p nkiov %u offset %u nob %u, dump %d\n",
338 kiov, nkiov, offset, nob, dump_blob);
340 /* if loops changes, please change kgnilnd_setup_phys_buffer */
342 while (offset >= kiov->kiov_len) {
343 offset -= kiov->kiov_len;
349 /* ignore nob here, if nob < (kiov_len - offset), kiov == 1 */
350 odd = (unsigned long) (kiov[0].kiov_len - offset) & 1;
352 if ((odd || *kgnilnd_tunables.kgn_vmap_cksum) && nkiov > 1) {
353 struct page **pages = kgnilnd_data.kgn_cksum_map_pages[get_cpu()];
355 LASSERTF(pages != NULL, "NULL pages for cpu %d map_pages 0x%p\n",
356 get_cpu(), kgnilnd_data.kgn_cksum_map_pages);
358 CDEBUG(D_BUFFS, "odd %d len %u offset %u nob %u\n",
359 odd, kiov[0].kiov_len, offset, nob);
361 for (i = 0; i < nkiov; i++) {
362 pages[i] = kiov[i].kiov_page;
365 addr = vmap(pages, nkiov, VM_MAP, PAGE_KERNEL);
367 CNETERR("Couldn't vmap %d frags on %d bytes to avoid odd length fragment in cksum\n",
369 /* return zero to avoid killing tx - we'll just get warning on console
370 * when remote end sees zero checksum */
373 atomic_inc(&kgnilnd_data.kgn_nvmap_cksum);
375 tmpck = _kgnilnd_cksum(0, (void *) addr + kiov[0].kiov_offset + offset, nob);
379 kgnilnd_dump_blob(D_BUFFS, "flat kiov RDMA payload",
380 (void *)addr + kiov[0].kiov_offset + offset, nob);
382 CDEBUG(D_BUFFS, "cksum 0x%x (+0x%x) for addr 0x%p+%u len %u offset %u\n",
383 cksum, tmpck, addr, kiov[0].kiov_offset, nob, offset);
387 fraglen = min(kiov->kiov_len - offset, nob);
389 /* make dang sure we don't send a bogus checksum if somehow we get
390 * an odd length fragment on anything but the last entry in a kiov -
391 * we know from kgnilnd_setup_rdma_buffer that we can't have non
392 * PAGE_SIZE pages in the middle, so if nob < PAGE_SIZE, it is the last one */
393 LASSERTF(!(fraglen&1) || (nob < PAGE_SIZE),
394 "odd fraglen %u on nkiov %d, nob %u kiov_len %u offset %u kiov 0x%p\n",
395 fraglen, nkiov, nob, kiov->kiov_len, offset, kiov);
397 addr = (void *)kmap(kiov->kiov_page) + kiov->kiov_offset + offset;
398 tmpck = _kgnilnd_cksum(cksum, addr, fraglen);
401 "cksum 0x%x (+0x%x) for page 0x%p+%u (0x%p) len %u offset %u\n",
402 cksum, tmpck, kiov->kiov_page, kiov->kiov_offset, addr,
408 kgnilnd_dump_blob(D_BUFFS, "kiov cksum", addr, fraglen);
410 kunmap(kiov->kiov_page);
417 /* iov must not run out before end of data */
418 LASSERTF(nob == 0 || nkiov > 0, "nob %u nkiov %u\n", nob, nkiov);
423 retsum = csum_fold(cksum);
425 /* don't use magic 'no checksum' value */
429 CDEBUG(D_BUFFS, "retsum 0x%x from cksum 0x%x\n", retsum, cksum);
435 kgnilnd_init_msg(kgn_msg_t *msg, int type, lnet_nid_t source)
437 msg->gnm_magic = GNILND_MSG_MAGIC;
438 msg->gnm_version = GNILND_MSG_VERSION;
439 msg->gnm_type = type;
440 msg->gnm_payload_len = 0;
441 msg->gnm_srcnid = source;
442 /* gnm_connstamp gets set when FMA is sent */
443 /* gnm_srcnid is set on creation via function argument
444 * The right interface/net and nid is passed in when the message
450 kgnilnd_new_tx_msg(int type, lnet_nid_t source)
452 kgn_tx_t *tx = kgnilnd_alloc_tx();
455 kgnilnd_init_msg(&tx->tx_msg, type, source);
457 CERROR("couldn't allocate new tx type %s!\n",
458 kgnilnd_msgtype2str(type));
465 kgnilnd_nak_rdma(kgn_conn_t *conn, int rx_type, int error, __u64 cookie, lnet_nid_t source) {
471 case GNILND_MSG_GET_REQ:
472 case GNILND_MSG_GET_DONE:
473 nak_type = GNILND_MSG_GET_NAK;
475 case GNILND_MSG_PUT_REQ:
476 case GNILND_MSG_PUT_ACK:
477 case GNILND_MSG_PUT_DONE:
478 nak_type = GNILND_MSG_PUT_NAK;
480 case GNILND_MSG_PUT_REQ_REV:
481 case GNILND_MSG_PUT_DONE_REV:
482 nak_type = GNILND_MSG_PUT_NAK_REV;
484 case GNILND_MSG_GET_REQ_REV:
485 case GNILND_MSG_GET_ACK_REV:
486 case GNILND_MSG_GET_DONE_REV:
487 nak_type = GNILND_MSG_GET_NAK_REV;
490 CERROR("invalid msg type %s (%d)\n",
491 kgnilnd_msgtype2str(rx_type), rx_type);
494 /* only allow NAK on error and truncate to zero */
495 LASSERTF(error <= 0, "error %d conn 0x%p, cookie %llu\n",
496 error, conn, cookie);
498 tx = kgnilnd_new_tx_msg(nak_type, source);
500 CNETERR("can't get TX to NAK RDMA to %s\n",
501 libcfs_nid2str(conn->gnc_peer->gnp_nid));
505 tx->tx_msg.gnm_u.completion.gncm_retval = error;
506 tx->tx_msg.gnm_u.completion.gncm_cookie = cookie;
507 kgnilnd_queue_tx(conn, tx);
511 kgnilnd_setup_immediate_buffer(kgn_tx_t *tx, unsigned int niov,
512 struct kvec *iov, lnet_kiov_t *kiov,
513 unsigned int offset, unsigned int nob)
515 kgn_msg_t *msg = &tx->tx_msg;
518 /* To help save on MDDs for short messages, we'll vmap a kiov to allow
519 * gni_smsg_send to send that as the payload */
521 LASSERT(tx->tx_buftype == GNILND_BUF_NONE);
524 tx->tx_buffer = NULL;
525 } else if (kiov != NULL) {
527 if ((niov > 0) && unlikely(niov > (nob/PAGE_SIZE))) {
528 niov = ((nob + offset + kiov->kiov_offset + PAGE_SIZE - 1) /
532 LASSERTF(niov > 0 && niov < GNILND_MAX_IMMEDIATE/PAGE_SIZE,
533 "bad niov %d msg %p kiov %p iov %p offset %d nob%d\n",
534 niov, msg, kiov, iov, offset, nob);
536 while (offset >= kiov->kiov_len) {
537 offset -= kiov->kiov_len;
542 for (i = 0; i < niov; i++) {
543 /* We can't have a kiov_offset on anything but the first entry,
544 * otherwise we'll have a hole at the end of the mapping as we only map
546 * Also, if we have a kiov_len < PAGE_SIZE but we need to map more
547 * than kiov_len, we will also have a whole at the end of that page
548 * which isn't allowed */
549 if ((kiov[i].kiov_offset != 0 && i > 0) ||
550 (kiov[i].kiov_offset + kiov[i].kiov_len != PAGE_SIZE && i < niov - 1)) {
551 CNETERR("Can't make payload contiguous in I/O VM:"
552 "page %d, offset %u, nob %u, kiov_offset %u kiov_len %u \n",
553 i, offset, nob, kiov->kiov_offset, kiov->kiov_len);
556 tx->tx_imm_pages[i] = kiov[i].kiov_page;
559 /* hijack tx_phys for the later unmap */
561 /* tx->phyx being equal to NULL is the signal for unmap to discern between kmap and vmap */
563 tx->tx_buffer = (void *)kmap(tx->tx_imm_pages[0]) + kiov[0].kiov_offset + offset;
564 atomic_inc(&kgnilnd_data.kgn_nkmap_short);
565 GNIDBG_TX(D_NET, tx, "kmapped page for %d bytes for kiov 0x%p, buffer 0x%p",
566 nob, kiov, tx->tx_buffer);
568 tx->tx_phys = vmap(tx->tx_imm_pages, niov, VM_MAP, PAGE_KERNEL);
569 if (tx->tx_phys == NULL) {
570 CNETERR("Couldn't vmap %d frags on %d bytes\n", niov, nob);
574 atomic_inc(&kgnilnd_data.kgn_nvmap_short);
575 /* make sure we take into account the kiov offset as the start of the buffer */
576 tx->tx_buffer = (void *)tx->tx_phys + kiov[0].kiov_offset + offset;
577 GNIDBG_TX(D_NET, tx, "mapped %d pages for %d bytes from kiov 0x%p to 0x%p, buffer 0x%p",
578 niov, nob, kiov, tx->tx_phys, tx->tx_buffer);
580 tx->tx_buftype = GNILND_BUF_IMMEDIATE_KIOV;
584 /* For now this is almost identical to kgnilnd_setup_virt_buffer, but we
585 * could "flatten" the payload into a single contiguous buffer ready
586 * for sending direct over an FMA if we ever needed to. */
590 while (offset >= iov->iov_len) {
591 offset -= iov->iov_len;
597 if (nob > iov->iov_len - offset) {
598 CERROR("Can't handle multiple vaddr fragments\n");
602 tx->tx_buffer = (void *)(((unsigned long)iov->iov_base) + offset);
604 tx->tx_buftype = GNILND_BUF_IMMEDIATE;
608 /* checksum payload early - it shouldn't be changing after lnd_send */
609 if (*kgnilnd_tunables.kgn_checksum >= 2) {
610 msg->gnm_payload_cksum = kgnilnd_cksum(tx->tx_buffer, nob);
611 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_SMSG_CKSUM2)) {
612 msg->gnm_payload_cksum += 0xe00e;
614 if (*kgnilnd_tunables.kgn_checksum_dump > 1) {
615 kgnilnd_dump_blob(D_BUFFS, "payload checksum",
619 msg->gnm_payload_cksum = 0;
626 kgnilnd_setup_virt_buffer(kgn_tx_t *tx,
627 unsigned int niov, struct kvec *iov,
628 unsigned int offset, unsigned int nob)
633 LASSERT(tx->tx_buftype == GNILND_BUF_NONE);
635 while (offset >= iov->iov_len) {
636 offset -= iov->iov_len;
642 if (nob > iov->iov_len - offset) {
643 CERROR("Can't handle multiple vaddr fragments\n");
647 tx->tx_buftype = GNILND_BUF_VIRT_UNMAPPED;
649 tx->tx_buffer = (void *)(((unsigned long)iov->iov_base) + offset);
654 kgnilnd_setup_phys_buffer(kgn_tx_t *tx, int nkiov, lnet_kiov_t *kiov,
655 unsigned int offset, unsigned int nob)
657 gni_mem_segment_t *phys;
659 unsigned int fraglen;
661 GNIDBG_TX(D_NET, tx, "niov %d kiov 0x%p offset %u nob %u", nkiov, kiov, offset, nob);
665 LASSERT(tx->tx_buftype == GNILND_BUF_NONE);
667 /* only allocate this if we are going to use it */
668 tx->tx_phys = kmem_cache_alloc(kgnilnd_data.kgn_tx_phys_cache,
670 if (tx->tx_phys == NULL) {
671 CERROR("failed to allocate tx_phys\n");
676 CDEBUG(D_MALLOC, "slab-alloced 'tx->tx_phys': %lu at %p.\n",
677 LNET_MAX_IOV * sizeof(gni_mem_segment_t), tx->tx_phys);
679 /* if loops changes, please change kgnilnd_cksum_kiov
680 * and kgnilnd_setup_immediate_buffer */
682 while (offset >= kiov->kiov_len) {
683 offset -= kiov->kiov_len;
689 /* at this point, kiov points to the first page that we'll actually map
690 * now that we've seeked into the koiv for offset and dropped any
691 * leading pages that fall entirely within the offset */
692 tx->tx_buftype = GNILND_BUF_PHYS_UNMAPPED;
695 /* kiov_offset is start of 'valid' buffer, so index offset past that */
696 tx->tx_buffer = (void *)((unsigned long)(kiov->kiov_offset + offset));
699 CDEBUG(D_NET, "tx 0x%p buffer 0x%p map start kiov 0x%p+%u niov %d offset %u\n",
700 tx, tx->tx_buffer, kiov, kiov->kiov_offset, nkiov, offset);
703 fraglen = min(kiov->kiov_len - offset, nob);
705 /* We can't have a kiov_offset on anything but the first entry,
706 * otherwise we'll have a hole at the end of the mapping as we only map
707 * whole pages. Only the first page is allowed to have an offset -
708 * we'll add that into tx->tx_buffer and that will get used when we
709 * map in the segments (see kgnilnd_map_buffer).
710 * Also, if we have a kiov_len < PAGE_SIZE but we need to map more
711 * than kiov_len, we will also have a whole at the end of that page
712 * which isn't allowed */
713 if ((phys != tx->tx_phys) &&
714 ((kiov->kiov_offset != 0) ||
715 ((kiov->kiov_len < PAGE_SIZE) && (nob > kiov->kiov_len)))) {
716 CERROR("Can't make payload contiguous in I/O VM:"
717 "page %d, offset %u, nob %u, kiov_offset %u kiov_len %u \n",
718 (int)(phys - tx->tx_phys),
719 offset, nob, kiov->kiov_offset, kiov->kiov_len);
724 if ((phys - tx->tx_phys) == LNET_MAX_IOV) {
725 CERROR ("payload too big (%d)\n", (int)(phys - tx->tx_phys));
730 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_PHYS_SETUP)) {
735 CDEBUG(D_BUFFS, "page 0x%p kiov_offset %u kiov_len %u nob %u "
736 "nkiov %u offset %u\n",
737 kiov->kiov_page, kiov->kiov_offset, kiov->kiov_len, nob, nkiov, offset);
739 phys->address = page_to_phys(kiov->kiov_page);
746 /* iov must not run out before end of data */
747 LASSERTF(nob == 0 || nkiov > 0, "nob %u nkiov %u\n", nob, nkiov);
751 tx->tx_phys_npages = phys - tx->tx_phys;
756 if (tx->tx_phys != NULL) {
757 kmem_cache_free(kgnilnd_data.kgn_tx_phys_cache, tx->tx_phys);
758 CDEBUG(D_MALLOC, "slab-freed 'tx_phys': %lu at %p.\n",
759 sizeof(*tx->tx_phys), tx->tx_phys);
766 kgnilnd_setup_rdma_buffer(kgn_tx_t *tx, unsigned int niov,
767 struct kvec *iov, lnet_kiov_t *kiov,
768 unsigned int offset, unsigned int nob)
772 LASSERTF((iov == NULL) != (kiov == NULL), "iov 0x%p, kiov 0x%p, tx 0x%p,"
773 " offset %d, nob %d, niov %d\n"
774 , iov, kiov, tx, offset, nob, niov);
777 rc = kgnilnd_setup_phys_buffer(tx, niov, kiov, offset, nob);
779 rc = kgnilnd_setup_virt_buffer(tx, niov, iov, offset, nob);
784 /* kgnilnd_parse_lnet_rdma()
785 * lntmsg - message passed in from lnet.
786 * niov, kiov, offset - see lnd_t in lib-types.h for descriptions.
787 * nob - actual number of bytes to in this message.
788 * put_len - It is possible for PUTs to have a different length than the
789 * length stored in lntmsg->msg_len since LNET can adjust this
790 * length based on it's buffer size and offset.
791 * lnet_try_match_md() sets the mlength that we use to do the RDMA
795 kgnilnd_parse_lnet_rdma(lnet_msg_t *lntmsg, unsigned int *niov,
796 unsigned int *offset, unsigned int *nob,
797 lnet_kiov_t **kiov, int put_len)
799 /* GETs are weird, see kgnilnd_send */
800 if (lntmsg->msg_type == LNET_MSG_GET) {
801 if ((lntmsg->msg_md->md_options & LNET_MD_KIOV) == 0) {
804 *kiov = lntmsg->msg_md->md_iov.kiov;
806 *niov = lntmsg->msg_md->md_niov;
807 *nob = lntmsg->msg_md->md_length;
810 *kiov = lntmsg->msg_kiov;
811 *niov = lntmsg->msg_niov;
813 *offset = lntmsg->msg_offset;
818 kgnilnd_compute_rdma_cksum(kgn_tx_t *tx, int put_len)
820 unsigned int niov, offset, nob;
822 lnet_msg_t *lntmsg = tx->tx_lntmsg[0];
823 int dump_cksum = (*kgnilnd_tunables.kgn_checksum_dump > 1);
825 GNITX_ASSERTF(tx, ((tx->tx_msg.gnm_type == GNILND_MSG_PUT_DONE) ||
826 (tx->tx_msg.gnm_type == GNILND_MSG_GET_DONE) ||
827 (tx->tx_msg.gnm_type == GNILND_MSG_PUT_DONE_REV) ||
828 (tx->tx_msg.gnm_type == GNILND_MSG_GET_DONE_REV) ||
829 (tx->tx_msg.gnm_type == GNILND_MSG_GET_ACK_REV) ||
830 (tx->tx_msg.gnm_type == GNILND_MSG_PUT_REQ_REV)),
831 "bad type %s", kgnilnd_msgtype2str(tx->tx_msg.gnm_type));
833 if ((tx->tx_msg.gnm_type == GNILND_MSG_PUT_DONE_REV) ||
834 (tx->tx_msg.gnm_type == GNILND_MSG_GET_DONE_REV)) {
835 tx->tx_msg.gnm_payload_cksum = 0;
838 if (*kgnilnd_tunables.kgn_checksum < 3) {
839 tx->tx_msg.gnm_payload_cksum = 0;
843 GNITX_ASSERTF(tx, lntmsg, "no LNet message!", NULL);
845 kgnilnd_parse_lnet_rdma(lntmsg, &niov, &offset, &nob, &kiov,
849 tx->tx_msg.gnm_payload_cksum = kgnilnd_cksum_kiov(niov, kiov, offset, nob, dump_cksum);
851 tx->tx_msg.gnm_payload_cksum = kgnilnd_cksum(tx->tx_buffer, nob);
853 kgnilnd_dump_blob(D_BUFFS, "peer RDMA payload", tx->tx_buffer, nob);
857 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_SMSG_CKSUM3)) {
858 tx->tx_msg.gnm_payload_cksum += 0xd00d;
862 /* kgnilnd_verify_rdma_cksum()
863 * tx - PUT_DONE/GET_DONE matched tx.
864 * rx_cksum - received checksum to compare against.
865 * put_len - see kgnilnd_parse_lnet_rdma comments.
868 kgnilnd_verify_rdma_cksum(kgn_tx_t *tx, __u16 rx_cksum, int put_len)
872 unsigned int niov, offset, nob;
874 lnet_msg_t *lntmsg = tx->tx_lntmsg[0];
875 int dump_on_err = *kgnilnd_tunables.kgn_checksum_dump;
877 /* we can only match certain requests */
878 GNITX_ASSERTF(tx, ((tx->tx_msg.gnm_type == GNILND_MSG_GET_REQ) ||
879 (tx->tx_msg.gnm_type == GNILND_MSG_PUT_ACK) ||
880 (tx->tx_msg.gnm_type == GNILND_MSG_PUT_REQ_REV) ||
881 (tx->tx_msg.gnm_type == GNILND_MSG_GET_ACK_REV) ||
882 (tx->tx_msg.gnm_type == GNILND_MSG_GET_DONE_REV) ||
883 (tx->tx_msg.gnm_type == GNILND_MSG_PUT_DONE_REV)),
884 "bad type %s", kgnilnd_msgtype2str(tx->tx_msg.gnm_type));
886 if ((tx->tx_msg.gnm_type == GNILND_MSG_PUT_REQ_REV) ||
887 (tx->tx_msg.gnm_type == GNILND_MSG_GET_ACK_REV)) {
892 if (*kgnilnd_tunables.kgn_checksum >= 3) {
893 GNIDBG_MSG(D_WARNING, &tx->tx_msg,
894 "no RDMA payload checksum when enabled");
899 GNITX_ASSERTF(tx, lntmsg, "no LNet message!", NULL);
901 kgnilnd_parse_lnet_rdma(lntmsg, &niov, &offset, &nob, &kiov, put_len);
904 cksum = kgnilnd_cksum_kiov(niov, kiov, offset, nob, 0);
906 cksum = kgnilnd_cksum(tx->tx_buffer, nob);
909 if (cksum != rx_cksum) {
910 GNIDBG_MSG(D_NETERROR, &tx->tx_msg,
911 "Bad RDMA payload checksum (%x expected %x); "
912 "kiov 0x%p niov %d nob %u offset %u",
913 cksum, rx_cksum, kiov, niov, nob, offset);
914 switch (dump_on_err) {
917 kgnilnd_cksum_kiov(niov, kiov, offset, nob, 1);
919 kgnilnd_dump_blob(D_BUFFS, "RDMA payload",
922 /* fall through to dump log */
924 libcfs_debug_dumplog();
930 /* kgnilnd_check_fma_rx will close conn, kill tx with error */
936 kgnilnd_mem_add_map_list(kgn_device_t *dev, kgn_tx_t *tx)
940 GNITX_ASSERTF(tx, list_empty(&tx->tx_map_list),
941 "already mapped!", NULL);
943 spin_lock(&dev->gnd_map_lock);
944 switch (tx->tx_buftype) {
946 GNIDBG_TX(D_EMERG, tx,
947 "SOFTWARE BUG: invalid mapping %d", tx->tx_buftype);
948 spin_unlock(&dev->gnd_map_lock);
952 case GNILND_BUF_PHYS_MAPPED:
953 bytes = tx->tx_phys_npages * PAGE_SIZE;
954 dev->gnd_map_nphys++;
955 dev->gnd_map_physnop += tx->tx_phys_npages;
958 case GNILND_BUF_VIRT_MAPPED:
960 dev->gnd_map_nvirt++;
961 dev->gnd_map_virtnob += tx->tx_nob;
965 if (tx->tx_msg.gnm_type == GNILND_MSG_PUT_ACK ||
966 tx->tx_msg.gnm_type == GNILND_MSG_GET_REQ) {
967 atomic64_add(bytes, &dev->gnd_rdmaq_bytes_out);
968 GNIDBG_TX(D_NETTRACE, tx, "rdma ++ %d to %lld",
969 bytes, atomic64_read(&dev->gnd_rdmaq_bytes_out));
972 atomic_inc(&dev->gnd_n_mdd);
973 atomic64_add(bytes, &dev->gnd_nbytes_map);
975 /* clear retrans to prevent any SMSG goofiness as that code uses the same counter */
978 /* we only get here in the valid cases */
979 list_add_tail(&tx->tx_map_list, &dev->gnd_map_list);
980 dev->gnd_map_version++;
981 spin_unlock(&dev->gnd_map_lock);
985 kgnilnd_mem_del_map_list(kgn_device_t *dev, kgn_tx_t *tx)
989 GNITX_ASSERTF(tx, !list_empty(&tx->tx_map_list),
990 "not mapped!", NULL);
991 spin_lock(&dev->gnd_map_lock);
993 switch (tx->tx_buftype) {
995 GNIDBG_TX(D_EMERG, tx,
996 "SOFTWARE BUG: invalid mapping %d", tx->tx_buftype);
997 spin_unlock(&dev->gnd_map_lock);
1001 case GNILND_BUF_PHYS_UNMAPPED:
1002 bytes = tx->tx_phys_npages * PAGE_SIZE;
1003 dev->gnd_map_nphys--;
1004 dev->gnd_map_physnop -= tx->tx_phys_npages;
1007 case GNILND_BUF_VIRT_UNMAPPED:
1009 dev->gnd_map_nvirt--;
1010 dev->gnd_map_virtnob -= tx->tx_nob;
1014 if (tx->tx_msg.gnm_type == GNILND_MSG_PUT_ACK ||
1015 tx->tx_msg.gnm_type == GNILND_MSG_GET_REQ) {
1016 atomic64_sub(bytes, &dev->gnd_rdmaq_bytes_out);
1017 LASSERTF(atomic64_read(&dev->gnd_rdmaq_bytes_out) >= 0,
1018 "bytes_out negative! %ld\n", atomic64_read(&dev->gnd_rdmaq_bytes_out));
1019 GNIDBG_TX(D_NETTRACE, tx, "rdma -- %d to %lld",
1020 bytes, atomic64_read(&dev->gnd_rdmaq_bytes_out));
1023 atomic_dec(&dev->gnd_n_mdd);
1024 atomic64_sub(bytes, &dev->gnd_nbytes_map);
1026 /* we only get here in the valid cases */
1027 list_del_init(&tx->tx_map_list);
1028 dev->gnd_map_version++;
1029 spin_unlock(&dev->gnd_map_lock);
1033 kgnilnd_map_buffer(kgn_tx_t *tx)
1035 kgn_conn_t *conn = tx->tx_conn;
1036 kgn_device_t *dev = conn->gnc_device;
1037 __u32 flags = GNI_MEM_READWRITE;
1040 /* The kgnilnd_mem_register(_segments) Gemini Driver functions can
1041 * be called concurrently as there are internal locks that protect
1042 * any data structures or HW resources. We just need to ensure
1043 * that our concurrency doesn't result in the kgn_device_t
1044 * getting nuked while we are in here */
1046 LASSERTF(conn != NULL, "tx %p with NULL conn, someone forgot"
1047 " to set tx_conn before calling %s\n", tx, __FUNCTION__);
1049 if (unlikely(CFS_FAIL_CHECK(CFS_FAIL_GNI_MAP_TX)))
1052 if (*kgnilnd_tunables.kgn_bte_relaxed_ordering) {
1053 flags |= GNI_MEM_RELAXED_PI_ORDERING;
1056 switch (tx->tx_buftype) {
1060 case GNILND_BUF_NONE:
1061 case GNILND_BUF_IMMEDIATE:
1062 case GNILND_BUF_IMMEDIATE_KIOV:
1063 case GNILND_BUF_PHYS_MAPPED:
1064 case GNILND_BUF_VIRT_MAPPED:
1067 case GNILND_BUF_PHYS_UNMAPPED:
1068 GNITX_ASSERTF(tx, tx->tx_phys != NULL, "physical buffer not there!", NULL);
1069 rrc = kgnilnd_mem_register_segments(dev->gnd_handle,
1070 tx->tx_phys, tx->tx_phys_npages, NULL,
1071 GNI_MEM_PHYS_SEGMENTS | flags,
1073 /* could race with other uses of the map counts, but this is ok
1074 * - this needs to turn into a non-fatal error soon to allow
1075 * GART resource, etc starvation handling */
1076 if (rrc != GNI_RC_SUCCESS) {
1077 GNIDBG_TX(D_NET, tx, "Can't map %d pages: dev %d "
1078 "phys %u pp %u, virt %u nob %llu",
1079 tx->tx_phys_npages, dev->gnd_id,
1080 dev->gnd_map_nphys, dev->gnd_map_physnop,
1081 dev->gnd_map_nvirt, dev->gnd_map_virtnob);
1082 RETURN(rrc == GNI_RC_ERROR_RESOURCE ? -ENOMEM : -EINVAL);
1085 tx->tx_buftype = GNILND_BUF_PHYS_MAPPED;
1086 kgnilnd_mem_add_map_list(dev, tx);
1089 case GNILND_BUF_VIRT_UNMAPPED:
1090 rrc = kgnilnd_mem_register(dev->gnd_handle,
1091 (__u64)tx->tx_buffer, tx->tx_nob,
1092 NULL, flags, &tx->tx_map_key);
1093 if (rrc != GNI_RC_SUCCESS) {
1094 GNIDBG_TX(D_NET, tx, "Can't map %u bytes: dev %d "
1095 "phys %u pp %u, virt %u nob %llu",
1096 tx->tx_nob, dev->gnd_id,
1097 dev->gnd_map_nphys, dev->gnd_map_physnop,
1098 dev->gnd_map_nvirt, dev->gnd_map_virtnob);
1099 RETURN(rrc == GNI_RC_ERROR_RESOURCE ? -ENOMEM : -EINVAL);
1102 tx->tx_buftype = GNILND_BUF_VIRT_MAPPED;
1103 kgnilnd_mem_add_map_list(dev, tx);
1104 if (tx->tx_msg.gnm_type == GNILND_MSG_PUT_ACK ||
1105 tx->tx_msg.gnm_type == GNILND_MSG_GET_REQ) {
1106 atomic64_add(tx->tx_nob, &dev->gnd_rdmaq_bytes_out);
1107 GNIDBG_TX(D_NETTRACE, tx, "rdma ++ %d to %ld\n",
1108 tx->tx_nob, atomic64_read(&dev->gnd_rdmaq_bytes_out));
1116 kgnilnd_add_purgatory_tx(kgn_tx_t *tx)
1118 kgn_conn_t *conn = tx->tx_conn;
1119 kgn_mdd_purgatory_t *gmp;
1121 LIBCFS_ALLOC(gmp, sizeof(*gmp));
1122 LASSERTF(gmp != NULL, "couldn't allocate MDD purgatory member;"
1123 " asserting to avoid data corruption\n");
1124 if (tx->tx_buffer_copy)
1125 gmp->gmp_map_key = tx->tx_buffer_copy_map_key;
1127 gmp->gmp_map_key = tx->tx_map_key;
1129 atomic_inc(&conn->gnc_device->gnd_n_mdd_held);
1131 /* ensure that we don't have a blank purgatory - indicating the
1132 * conn is not already on purgatory lists - we'd never recover these
1133 * MDD if that were the case */
1134 GNITX_ASSERTF(tx, conn->gnc_in_purgatory,
1135 "conn 0x%p->%s with NULL purgatory",
1136 conn, libcfs_nid2str(conn->gnc_peer->gnp_nid));
1138 /* link 'er up! - only place we really need to lock for
1139 * concurrent access */
1140 spin_lock(&conn->gnc_list_lock);
1141 list_add_tail(&gmp->gmp_list, &conn->gnc_mdd_list);
1142 spin_unlock(&conn->gnc_list_lock);
1146 kgnilnd_unmap_buffer(kgn_tx_t *tx, int error)
1150 int hold_timeout = 0;
1152 /* code below relies on +1 relationship ... */
1153 CLASSERT(GNILND_BUF_PHYS_MAPPED == (GNILND_BUF_PHYS_UNMAPPED + 1));
1154 CLASSERT(GNILND_BUF_VIRT_MAPPED == (GNILND_BUF_VIRT_UNMAPPED + 1));
1156 switch (tx->tx_buftype) {
1160 case GNILND_BUF_NONE:
1161 case GNILND_BUF_IMMEDIATE:
1162 case GNILND_BUF_PHYS_UNMAPPED:
1163 case GNILND_BUF_VIRT_UNMAPPED:
1165 case GNILND_BUF_IMMEDIATE_KIOV:
1166 if (tx->tx_phys != NULL) {
1167 vunmap(tx->tx_phys);
1168 } else if (tx->tx_phys == NULL && tx->tx_buffer != NULL) {
1169 kunmap(tx->tx_imm_pages[0]);
1171 /* clear to prevent kgnilnd_free_tx from thinking
1172 * this is a RDMA descriptor */
1176 case GNILND_BUF_PHYS_MAPPED:
1177 case GNILND_BUF_VIRT_MAPPED:
1178 LASSERT(tx->tx_conn != NULL);
1180 dev = tx->tx_conn->gnc_device;
1182 /* only want to hold if we are closing conn without
1183 * verified peer notification - the theory is that
1184 * a TX error can be communicated in all other cases */
1185 if (tx->tx_conn->gnc_state != GNILND_CONN_ESTABLISHED &&
1186 error != -GNILND_NOPURG &&
1187 kgnilnd_check_purgatory_conn(tx->tx_conn)) {
1188 kgnilnd_add_purgatory_tx(tx);
1190 /* The timeout we give to kgni is a deadman stop only.
1191 * we are setting high to ensure we don't have the kgni timer
1192 * fire before ours fires _and_ is handled */
1193 hold_timeout = GNILND_TIMEOUT2DEADMAN;
1195 GNIDBG_TX(D_NET, tx,
1196 "dev %p delaying MDD release for %dms key %#llx.%#llx",
1197 tx->tx_conn->gnc_device, hold_timeout,
1198 tx->tx_map_key.qword1, tx->tx_map_key.qword2);
1200 if (tx->tx_buffer_copy != NULL) {
1201 rrc = kgnilnd_mem_deregister(dev->gnd_handle, &tx->tx_buffer_copy_map_key, hold_timeout);
1202 LASSERTF(rrc == GNI_RC_SUCCESS, "rrc %d\n", rrc);
1203 rrc = kgnilnd_mem_deregister(dev->gnd_handle, &tx->tx_map_key, 0);
1204 LASSERTF(rrc == GNI_RC_SUCCESS, "rrc %d\n", rrc);
1206 rrc = kgnilnd_mem_deregister(dev->gnd_handle, &tx->tx_map_key, hold_timeout);
1207 LASSERTF(rrc == GNI_RC_SUCCESS, "rrc %d\n", rrc);
1211 kgnilnd_mem_del_map_list(dev, tx);
1217 kgnilnd_tx_done(kgn_tx_t *tx, int completion)
1219 lnet_msg_t *lntmsg0, *lntmsg1;
1220 int status0, status1;
1221 lnet_ni_t *ni = NULL;
1222 kgn_conn_t *conn = tx->tx_conn;
1224 LASSERT(!in_interrupt());
1226 lntmsg0 = tx->tx_lntmsg[0]; tx->tx_lntmsg[0] = NULL;
1227 lntmsg1 = tx->tx_lntmsg[1]; tx->tx_lntmsg[1] = NULL;
1230 !(tx->tx_state & GNILND_TX_QUIET_ERROR) &&
1231 !kgnilnd_conn_clean_errno(completion)) {
1232 GNIDBG_TOMSG(D_NETERROR, &tx->tx_msg,
1233 "error %d on tx 0x%p->%s id %u/%d state %s age %ds",
1234 completion, tx, conn ?
1235 libcfs_nid2str(conn->gnc_peer->gnp_nid) : "<?>",
1236 tx->tx_id.txe_smsg_id, tx->tx_id.txe_idx,
1237 kgnilnd_tx_state2str(tx->tx_list_state),
1238 cfs_duration_sec((unsigned long)jiffies - tx->tx_qtime));
1241 /* The error codes determine if we hold onto the MDD */
1242 kgnilnd_unmap_buffer(tx, completion);
1244 /* we have to deliver a reply on lntmsg[1] for the GET, so make sure
1245 * we play nice with the error codes to avoid delivering a failed
1246 * REQUEST and then a REPLY event as well */
1248 /* return -EIO to lnet - it is the magic value for failed sends */
1249 if (tx->tx_msg.gnm_type == GNILND_MSG_GET_REQ) {
1251 status1 = completion;
1253 status0 = status1 = completion;
1256 tx->tx_buftype = GNILND_BUF_NONE;
1257 tx->tx_msg.gnm_type = GNILND_MSG_NONE;
1259 /* lnet_finalize doesn't do anything with the *ni, so ok for us to
1260 * set NULL when we are a tx without a conn */
1262 ni = conn->gnc_peer->gnp_net->gnn_ni;
1264 spin_lock(&conn->gnc_tx_lock);
1266 LASSERTF(test_and_clear_bit(tx->tx_id.txe_idx,
1267 (volatile unsigned long *)&conn->gnc_tx_bits),
1268 "conn %p tx %p bit %d already cleared\n",
1269 conn, tx, tx->tx_id.txe_idx);
1271 LASSERTF(conn->gnc_tx_ref_table[tx->tx_id.txe_idx] != NULL,
1272 "msg_id %d already NULL\n", tx->tx_id.txe_idx);
1274 conn->gnc_tx_ref_table[tx->tx_id.txe_idx] = NULL;
1275 spin_unlock(&conn->gnc_tx_lock);
1278 kgnilnd_free_tx(tx);
1280 /* finalize AFTER freeing lnet msgs */
1282 /* warning - we should hold no locks here - calling lnet_finalize
1283 * could free up lnet credits, resulting in a call chain back into
1284 * the LND via kgnilnd_send and friends */
1286 lnet_finalize(lntmsg0, status0);
1288 if (lntmsg1 != NULL) {
1289 lnet_finalize(lntmsg1, status1);
1294 kgnilnd_txlist_done(struct list_head *txlist, int error)
1297 int err_printed = 0;
1299 if (list_empty(txlist))
1302 list_for_each_entry_safe(tx, txn, txlist, tx_list) {
1303 /* only print the first error */
1305 tx->tx_state |= GNILND_TX_QUIET_ERROR;
1306 list_del_init(&tx->tx_list);
1307 kgnilnd_tx_done(tx, error);
1312 kgnilnd_set_tx_id(kgn_tx_t *tx, kgn_conn_t *conn)
1316 spin_lock(&conn->gnc_tx_lock);
1318 /* ID zero is NOT ALLOWED!!! */
1321 id = find_next_zero_bit((unsigned long *)&conn->gnc_tx_bits,
1322 GNILND_MAX_MSG_ID, conn->gnc_next_tx);
1323 if (id == GNILND_MAX_MSG_ID) {
1324 if (conn->gnc_next_tx != 1) {
1325 /* we only searched from next_tx to end and didn't find
1326 * one, so search again from start */
1327 conn->gnc_next_tx = 1;
1330 /* couldn't find one! */
1331 spin_unlock(&conn->gnc_tx_lock);
1335 /* bump next_tx to prevent immediate reuse */
1336 conn->gnc_next_tx = id + 1;
1338 set_bit(id, (volatile unsigned long *)&conn->gnc_tx_bits);
1339 LASSERTF(conn->gnc_tx_ref_table[id] == NULL,
1340 "tx 0x%p already at id %d\n",
1341 conn->gnc_tx_ref_table[id], id);
1343 /* delay these until we have a valid ID - prevents bad clear of the bit
1344 * in kgnilnd_tx_done */
1346 tx->tx_id.txe_cqid = conn->gnc_cqid;
1348 tx->tx_id.txe_idx = id;
1349 conn->gnc_tx_ref_table[id] = tx;
1351 /* Using jiffies to help differentiate against TX reuse - with
1352 * the usual minimum of a 250HZ clock, we wrap jiffies on the same TX
1353 * if we are sending to the same node faster than 256000/sec.
1354 * To help guard against this, we OR in the tx_seq - that is 32 bits */
1356 tx->tx_id.txe_chips = (__u32)(jiffies | atomic_read(&conn->gnc_tx_seq));
1358 GNIDBG_TX(D_NET, tx, "set cookie/id/bits", NULL);
1360 spin_unlock(&conn->gnc_tx_lock);
1365 kgnilnd_tx_log_retrans(kgn_conn_t *conn, kgn_tx_t *tx)
1369 log_retrans = ((tx->tx_retrans < 25) || ((tx->tx_retrans % 25) == 0));
1371 /* we don't care about TX timeout - it could be that the network is slower
1372 * or throttled. We'll keep retranmitting - so if the network is so slow
1373 * that we fill up our mailbox, we'll keep trying to resend that msg
1374 * until we exceed the max_retrans _or_ gnc_last_rx expires, indicating
1375 * that he hasn't send us any traffic in return */
1377 /* some reasonable throttling of the debug message */
1379 unsigned long now = jiffies;
1380 /* XXX Nic: Mystical TX debug here... */
1381 /* We expect retransmissions so only log when D_NET is enabled */
1382 GNIDBG_SMSG_CREDS(D_NET, conn);
1383 GNIDBG_TOMSG(D_NET, &tx->tx_msg,
1384 "NOT_DONE on conn 0x%p->%s id %x retrans %d wait %dus"
1385 " last_msg %uus/%uus last_cq %uus/%uus",
1386 conn, libcfs_nid2str(conn->gnc_peer->gnp_nid),
1387 tx->tx_id, tx->tx_retrans,
1388 jiffies_to_usecs(now - tx->tx_cred_wait),
1389 jiffies_to_usecs(now - conn->gnc_last_tx),
1390 jiffies_to_usecs(now - conn->gnc_last_rx),
1391 jiffies_to_usecs(now - conn->gnc_last_tx_cq),
1392 jiffies_to_usecs(now - conn->gnc_last_rx_cq));
1396 /* caller must be holding gnd_cq_mutex and not unlock it afterwards, as we need to drop it
1397 * to avoid bad ordering with state_lock */
1400 kgnilnd_sendmsg_nolock(kgn_tx_t *tx, void *immediate, unsigned int immediatenob,
1401 spinlock_t *state_lock, kgn_tx_list_state_t state)
1403 kgn_conn_t *conn = tx->tx_conn;
1404 kgn_msg_t *msg = &tx->tx_msg;
1406 unsigned long newest_last_rx, timeout;
1409 LASSERTF((msg->gnm_type == GNILND_MSG_IMMEDIATE) ?
1410 immediatenob <= *kgnilnd_tunables.kgn_max_immediate :
1412 "msg 0x%p type %d wrong payload size %d\n",
1413 msg, msg->gnm_type, immediatenob);
1415 /* make sure we catch all the cases where we'd send on a dirty old mbox
1416 * but allow case for sending CLOSE. Since this check is within the CQ
1417 * mutex barrier and the close message is only sent through
1418 * kgnilnd_send_conn_close the last message out the door will be the
1421 if (atomic_read(&conn->gnc_peer->gnp_dirty_eps) != 0 && msg->gnm_type != GNILND_MSG_CLOSE) {
1422 kgnilnd_conn_mutex_unlock(&conn->gnc_smsg_mutex);
1423 kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
1424 /* Return -ETIME, we are closing the connection already so we dont want to
1425 * have this tx hit the wire. The tx will be killed by the calling function.
1426 * Once the EP is marked dirty the close message will be the last
1427 * thing to hit the wire */
1432 timeout = cfs_time_seconds(conn->gnc_timeout);
1434 newest_last_rx = GNILND_LASTRX(conn);
1436 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_SEND_TIMEOUT)) {
1437 now = now + (GNILND_TIMEOUTRX(timeout) * 2);
1440 if (time_after_eq(now, newest_last_rx + GNILND_TIMEOUTRX(timeout))) {
1441 GNIDBG_CONN(D_NETERROR|D_CONSOLE, conn,
1442 "Cant send to %s after timeout lapse of %lu; TO %lu\n",
1443 libcfs_nid2str(conn->gnc_peer->gnp_nid),
1444 cfs_duration_sec(now - newest_last_rx),
1445 cfs_duration_sec(GNILND_TIMEOUTRX(timeout)));
1446 kgnilnd_conn_mutex_unlock(&conn->gnc_smsg_mutex);
1447 kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
1451 GNITX_ASSERTF(tx, (conn != NULL) && (tx->tx_id.txe_idx != 0), "tx id unset!", NULL);
1452 /* msg->gnm_srcnid is set when the message is initialized by whatever function is
1453 * creating the message this allows the message to contain the correct LNET NID/NET needed
1454 * instead of the one that the peer/conn uses for sending the data.
1456 msg->gnm_connstamp = conn->gnc_my_connstamp;
1457 msg->gnm_payload_len = immediatenob;
1458 msg->gnm_seq = atomic_read(&conn->gnc_tx_seq);
1460 /* always init here - kgn_checksum is a /sys module tunable
1461 * and can be flipped at any point, even between msg init and sending */
1463 if (*kgnilnd_tunables.kgn_checksum) {
1464 /* We must set here and not in kgnilnd_init_msg,
1465 * we could resend this msg many times
1466 * (NOT_DONE from gni_smsg_send below) and wouldn't pass
1467 * through init_msg again */
1468 msg->gnm_cksum = kgnilnd_cksum(msg, sizeof(kgn_msg_t));
1469 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_SMSG_CKSUM1)) {
1470 msg->gnm_cksum += 0xf00f;
1474 GNIDBG_TOMSG(D_NET, msg, "tx 0x%p conn 0x%p->%s sending SMSG sz %u id %x/%d [%p for %u]",
1475 tx, conn, libcfs_nid2str(conn->gnc_peer->gnp_nid),
1476 sizeof(kgn_msg_t), tx->tx_id.txe_smsg_id,
1477 tx->tx_id.txe_idx, immediate, immediatenob);
1479 if (unlikely(tx->tx_state & GNILND_TX_FAIL_SMSG)) {
1480 rrc = cfs_fail_val ? cfs_fail_val : GNI_RC_NOT_DONE;
1482 rrc = kgnilnd_smsg_send(conn->gnc_ephandle,
1483 msg, sizeof(*msg), immediate,
1485 tx->tx_id.txe_smsg_id);
1489 case GNI_RC_SUCCESS:
1490 atomic_inc(&conn->gnc_tx_seq);
1491 conn->gnc_last_tx = jiffies;
1492 /* no locking here as LIVE isn't a list */
1493 kgnilnd_tx_add_state_locked(tx, NULL, conn, GNILND_TX_LIVE_FMAQ, 1);
1495 /* this needs to be checked under lock as it might be freed from a completion
1498 if (msg->gnm_type == GNILND_MSG_NOOP) {
1499 set_mb(conn->gnc_last_noop_sent, jiffies);
1502 /* serialize with seeing CQ events for completion on this, as well as
1504 kgnilnd_conn_mutex_unlock(&conn->gnc_smsg_mutex);
1505 kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
1507 atomic_inc(&conn->gnc_device->gnd_short_ntx);
1508 atomic64_add(immediatenob, &conn->gnc_device->gnd_short_txbytes);
1509 kgnilnd_peer_alive(conn->gnc_peer);
1510 GNIDBG_SMSG_CREDS(D_NET, conn);
1513 case GNI_RC_NOT_DONE:
1514 /* Jshimek: We can get GNI_RC_NOT_DONE for 3 reasons currently
1515 * 1: out of mbox credits
1516 * 2: out of mbox payload credits
1517 * 3: On Aries out of dla credits
1519 kgnilnd_conn_mutex_unlock(&conn->gnc_smsg_mutex);
1520 kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
1521 /* We'll handle this error inline - makes the calling logic much more
1524 /* If no lock, caller doesn't want us to retry */
1525 if (state_lock == NULL) {
1529 /* I need kgni credits to send this. Replace tx at the head of the
1530 * fmaq and I'll get rescheduled when credits appear. Reset the tx_state
1531 * and bump retrans counts since we are requeueing the tx.
1535 conn->gnc_tx_retrans++;
1537 kgnilnd_tx_log_retrans(conn, tx);
1538 /* add to head of list for the state and retries */
1539 spin_lock(state_lock);
1540 kgnilnd_tx_add_state_locked(tx, conn->gnc_peer, conn, state, 0);
1541 spin_unlock(state_lock);
1543 /* We only reschedule for a certain number of retries, then
1544 * we will wait for the CQ events indicating a release of SMSG
1546 if (tx->tx_retrans < *kgnilnd_tunables.kgn_max_retransmits) {
1547 kgnilnd_schedule_conn(conn);
1550 /* CQ event coming in signifies either TX completed or
1551 * RX receive. Either of these *could* free up credits
1552 * in the SMSG mbox and we should try sending again */
1553 GNIDBG_TX(D_NET, tx, "waiting for CQID %u event to resend",
1554 tx->tx_conn->gnc_cqid);
1555 kgnilnd_schedule_delay_conn(conn);
1556 /* use +ve return code to let upper layers know they
1557 * should stop looping on sends */
1561 /* handle bad retcode gracefully */
1562 kgnilnd_conn_mutex_unlock(&conn->gnc_smsg_mutex);
1563 kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
1568 /* kgnilnd_sendmsg has hard wait on gnd_cq_mutex */
1570 kgnilnd_sendmsg(kgn_tx_t *tx, void *immediate, unsigned int immediatenob,
1571 spinlock_t *state_lock, kgn_tx_list_state_t state)
1573 kgn_device_t *dev = tx->tx_conn->gnc_device;
1574 unsigned long timestamp;
1577 timestamp = jiffies;
1578 kgnilnd_gl_mutex_lock(&dev->gnd_cq_mutex);
1579 kgnilnd_conn_mutex_lock(&tx->tx_conn->gnc_smsg_mutex);
1580 /* delay in jiffies - we are really concerned only with things that
1581 * result in a schedule() or really holding this off for long times .
1582 * NB - mutex_lock could spin for 2 jiffies before going to sleep to wait */
1583 dev->gnd_mutex_delay += (long) jiffies - timestamp;
1585 rc = kgnilnd_sendmsg_nolock(tx, immediate, immediatenob, state_lock, state);
1591 /* returns -EAGAIN for lock miss, anything else < 0 is hard error, >=0 for success */
1593 kgnilnd_sendmsg_trylock(kgn_tx_t *tx, void *immediate, unsigned int immediatenob,
1594 spinlock_t *state_lock, kgn_tx_list_state_t state)
1596 kgn_conn_t *conn = tx->tx_conn;
1597 kgn_device_t *dev = conn->gnc_device;
1598 unsigned long timestamp;
1601 timestamp = jiffies;
1603 /* technically we are doing bad things with the read_lock on the peer_conn
1604 * table, but we shouldn't be sleeping inside here - and we don't sleep/block
1605 * for the mutex. I bet lockdep is gonna flag this one though... */
1607 /* there are a few cases where we don't want the immediate send - like
1608 * when we are in the scheduler thread and it'd harm the latency of
1609 * getting messages up to LNet */
1611 /* rmb for gnd_ready */
1613 if (conn->gnc_device->gnd_ready == GNILND_DEV_LOOP) {
1615 atomic_inc(&conn->gnc_device->gnd_fast_block);
1616 } else if (unlikely(kgnilnd_data.kgn_quiesce_trigger)) {
1617 /* dont hit HW during quiesce */
1619 } else if (unlikely(atomic_read(&conn->gnc_peer->gnp_dirty_eps))) {
1620 /* dont hit HW if stale EPs and conns left to close */
1623 atomic_inc(&conn->gnc_device->gnd_fast_try);
1624 rc = kgnilnd_trylock(&conn->gnc_device->gnd_cq_mutex,
1625 &conn->gnc_smsg_mutex);
1630 /* we got the mutex and weren't blocked */
1632 /* delay in jiffies - we are really concerned only with things that
1633 * result in a schedule() or really holding this off for long times .
1634 * NB - mutex_lock could spin for 2 jiffies before going to sleep to wait */
1635 dev->gnd_mutex_delay += (long) jiffies - timestamp;
1637 atomic_inc(&conn->gnc_device->gnd_fast_ok);
1638 tx->tx_qtime = jiffies;
1639 tx->tx_state = GNILND_TX_WAITING_COMPLETION;
1640 rc = kgnilnd_sendmsg_nolock(tx, tx->tx_buffer, tx->tx_nob, &conn->gnc_list_lock, GNILND_TX_FMAQ);
1641 /* _nolock unlocks the mutex for us */
1647 /* lets us know if we can push this RDMA through now */
1649 kgnilnd_auth_rdma_bytes(kgn_device_t *dev, kgn_tx_t *tx)
1653 bytes_left = atomic64_sub_return(tx->tx_nob, &dev->gnd_rdmaq_bytes_ok);
1655 if (bytes_left < 0) {
1656 atomic64_add(tx->tx_nob, &dev->gnd_rdmaq_bytes_ok);
1657 atomic_inc(&dev->gnd_rdmaq_nstalls);
1660 CDEBUG(D_NET, "no bytes to send, turning on timer for %lu\n",
1661 dev->gnd_rdmaq_deadline);
1662 mod_timer(&dev->gnd_rdmaq_timer, dev->gnd_rdmaq_deadline);
1663 /* we never del this timer - at worst it schedules us.. */
1670 /* this adds a TX to the queue pending throttling authorization before
1671 * we allow our remote peer to launch a PUT at us */
1673 kgnilnd_queue_rdma(kgn_conn_t *conn, kgn_tx_t *tx)
1677 /* we cannot go into send_mapped_tx from here as we are holding locks
1678 * and mem registration might end up allocating memory in kgni.
1679 * That said, we'll push this as far as we can into the queue process */
1680 rc = kgnilnd_auth_rdma_bytes(conn->gnc_device, tx);
1683 spin_lock(&conn->gnc_device->gnd_rdmaq_lock);
1684 kgnilnd_tx_add_state_locked(tx, NULL, conn, GNILND_TX_RDMAQ, 0);
1685 /* lets us know how delayed RDMA is */
1686 tx->tx_qtime = jiffies;
1687 spin_unlock(&conn->gnc_device->gnd_rdmaq_lock);
1689 /* we have RDMA authorized, now it just needs a MDD and to hit the wire */
1690 spin_lock(&tx->tx_conn->gnc_device->gnd_lock);
1691 kgnilnd_tx_add_state_locked(tx, NULL, tx->tx_conn, GNILND_TX_MAPQ, 0);
1692 /* lets us know how delayed mapping is */
1693 tx->tx_qtime = jiffies;
1694 spin_unlock(&tx->tx_conn->gnc_device->gnd_lock);
1697 /* make sure we wake up sched to run this */
1698 kgnilnd_schedule_device(tx->tx_conn->gnc_device);
1701 /* push TX through state machine */
1703 kgnilnd_queue_tx(kgn_conn_t *conn, kgn_tx_t *tx)
1708 /* set the tx_id here, we delay it until we have an actual conn
1710 * in some cases, the tx_id is already set to provide for things
1711 * like RDMA completion cookies, etc */
1712 if (tx->tx_id.txe_idx == 0) {
1713 rc = kgnilnd_set_tx_id(tx, conn);
1715 kgnilnd_tx_done(tx, rc);
1720 CDEBUG(D_NET, "%s to conn %p for %s\n", kgnilnd_msgtype2str(tx->tx_msg.gnm_type),
1721 conn, libcfs_nid2str(conn->gnc_peer->gnp_nid));
1723 /* Only let NOOPs to be sent while fail loc is set, otherwise kill the tx.
1725 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_ONLY_NOOP) && (tx->tx_msg.gnm_type != GNILND_MSG_NOOP)) {
1726 kgnilnd_tx_done(tx, rc);
1730 switch (tx->tx_msg.gnm_type) {
1731 case GNILND_MSG_PUT_ACK:
1732 case GNILND_MSG_GET_REQ:
1733 case GNILND_MSG_PUT_REQ_REV:
1734 case GNILND_MSG_GET_ACK_REV:
1735 /* hijacking time! If this messages will authorize our peer to
1736 * send his dirty little bytes in an RDMA, we need to get permission */
1737 kgnilnd_queue_rdma(conn, tx);
1739 case GNILND_MSG_IMMEDIATE:
1740 /* try to send right now, can help reduce latency */
1741 rc = kgnilnd_sendmsg_trylock(tx, tx->tx_buffer, tx->tx_nob, &conn->gnc_list_lock, GNILND_TX_FMAQ);
1744 /* it was sent, break out of switch to avoid default case of queueing */
1747 /* needs to queue to try again, so fall through to default case */
1748 case GNILND_MSG_NOOP:
1749 /* Just make sure this goes out first for this conn */
1751 /* fall through... */
1753 spin_lock(&conn->gnc_list_lock);
1754 kgnilnd_tx_add_state_locked(tx, conn->gnc_peer, conn, GNILND_TX_FMAQ, add_tail);
1755 tx->tx_qtime = jiffies;
1756 spin_unlock(&conn->gnc_list_lock);
1757 kgnilnd_schedule_conn(conn);
1762 kgnilnd_launch_tx(kgn_tx_t *tx, kgn_net_t *net, lnet_process_id_t *target)
1765 kgn_peer_t *new_peer = NULL;
1766 kgn_conn_t *conn = NULL;
1772 /* If I get here, I've committed to send, so I complete the tx with
1773 * failure on any problems */
1775 GNITX_ASSERTF(tx, tx->tx_conn == NULL,
1776 "tx already has connection %p", tx->tx_conn);
1778 /* do all of the peer & conn searching in one swoop - this avoids
1779 * nastiness when dropping locks and needing to maintain a sane state
1780 * in the face of stack reset or something else nuking peers & conns */
1782 /* I expect to find him, so only take a read lock */
1783 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1785 peer = kgnilnd_find_peer_locked(target->nid);
1787 conn = kgnilnd_find_conn_locked(peer);
1788 /* this could be NULL during quiesce */
1790 /* Connection exists; queue message on it */
1791 kgnilnd_queue_tx(conn, tx);
1792 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1796 /* don't create a connection if the peer is marked down */
1797 if (peer->gnp_state != GNILND_PEER_UP) {
1798 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1804 /* creating peer or conn; I'll need a write lock... */
1805 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1807 CFS_RACE(CFS_FAIL_GNI_FIND_TARGET);
1809 node_state = kgnilnd_get_node_state(LNET_NIDADDR(target->nid));
1811 /* NB - this will not block during normal operations -
1812 * the only writer of this is in the startup/shutdown path. */
1813 rc = down_read_trylock(&kgnilnd_data.kgn_net_rw_sem);
1819 /* ignore previous peer entirely - we cycled the lock, so we
1820 * will create new peer and at worst drop it if peer is still
1822 rc = kgnilnd_create_peer_safe(&new_peer, target->nid, net, node_state);
1824 up_read(&kgnilnd_data.kgn_net_rw_sem);
1828 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1829 up_read(&kgnilnd_data.kgn_net_rw_sem);
1831 /* search for peer again now that we have the lock
1832 * if we don't find it, add our new one to the list */
1833 kgnilnd_add_peer_locked(target->nid, new_peer, &peer);
1835 /* don't create a connection if the peer is not up */
1836 if (peer->gnp_state != GNILND_PEER_UP) {
1837 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1842 conn = kgnilnd_find_or_create_conn_locked(peer);
1844 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_DGRAM_DROP_TX)) {
1845 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1850 /* oh hey, found a conn now... magical */
1851 kgnilnd_queue_tx(conn, tx);
1853 /* no conn, must be trying to connect - so we queue for now */
1854 tx->tx_qtime = jiffies;
1855 kgnilnd_tx_add_state_locked(tx, peer, NULL, GNILND_TX_PEERQ, 1);
1857 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1860 kgnilnd_tx_done(tx, rc);
1865 kgnilnd_rdma(kgn_tx_t *tx, int type,
1866 kgn_rdma_desc_t *sink, unsigned int nob, __u64 cookie)
1868 kgn_conn_t *conn = tx->tx_conn;
1869 unsigned long timestamp;
1870 gni_post_type_t post_type;
1873 unsigned int desc_nob = nob;
1874 void *desc_buffer = tx->tx_buffer;
1875 gni_mem_handle_t desc_map_key = tx->tx_map_key;
1876 LASSERTF(kgnilnd_tx_mapped(tx),
1877 "unmapped tx %p\n", tx);
1878 LASSERTF(conn != NULL,
1879 "NULL conn on tx %p, naughty, naughty\n", tx);
1880 LASSERTF(nob <= sink->gnrd_nob,
1881 "nob %u > sink->gnrd_nob %d (%p)\n",
1882 nob, sink->gnrd_nob, sink);
1883 LASSERTF(nob <= tx->tx_nob,
1884 "nob %d > tx(%p)->tx_nob %d\n",
1885 nob, tx, tx->tx_nob);
1888 case GNILND_MSG_GET_DONE:
1889 case GNILND_MSG_PUT_DONE:
1890 post_type = GNI_POST_RDMA_PUT;
1892 case GNILND_MSG_GET_DONE_REV:
1893 case GNILND_MSG_PUT_DONE_REV:
1894 post_type = GNI_POST_RDMA_GET;
1897 CERROR("invalid msg type %s (%d)\n",
1898 kgnilnd_msgtype2str(type), type);
1901 if (post_type == GNI_POST_RDMA_GET) {
1902 /* Check for remote buffer / local buffer / length alignment. All must be 4 byte
1903 * aligned. If the local buffer is not aligned correctly using the copy buffer
1904 * will fix that issue. If length is misaligned copy buffer will also fix the issue, we end
1905 * up transferring extra bytes into the buffer but only copy the correct nob into the original
1906 * buffer. Remote offset correction is done through a combination of adjusting the offset,
1907 * making sure the length and addr are aligned and copying the data into the correct location
1908 * once the transfer has completed.
1910 if ((((__u64)((unsigned long)tx->tx_buffer)) & 3) ||
1911 (sink->gnrd_addr & 3) ||
1914 tx->tx_offset = ((__u64)((unsigned long)sink->gnrd_addr)) & 3;
1916 kgnilnd_admin_addref(kgnilnd_data.kgn_rev_offset);
1918 if ((nob + tx->tx_offset) & 3) {
1919 desc_nob = ((nob + tx->tx_offset) + (4 - ((nob + tx->tx_offset) & 3)));
1920 kgnilnd_admin_addref(kgnilnd_data.kgn_rev_length);
1922 desc_nob = (nob + tx->tx_offset);
1925 if (tx->tx_buffer_copy == NULL) {
1926 /* Allocate the largest copy buffer we will need, this will prevent us from overwriting data
1927 * and require at most we allocate a few extra bytes. */
1928 tx->tx_buffer_copy = kgnilnd_vzalloc(desc_nob);
1930 if (!tx->tx_buffer_copy) {
1931 /* allocation of buffer failed nak the rdma */
1932 kgnilnd_nak_rdma(tx->tx_conn, tx->tx_msg.gnm_type, -EFAULT, cookie, tx->tx_msg.gnm_srcnid);
1933 kgnilnd_tx_done(tx, -EFAULT);
1936 kgnilnd_admin_addref(kgnilnd_data.kgn_rev_copy_buff);
1937 rc = kgnilnd_mem_register(conn->gnc_device->gnd_handle, (__u64)tx->tx_buffer_copy, desc_nob, NULL, GNI_MEM_READWRITE, &tx->tx_buffer_copy_map_key);
1938 if (rc != GNI_RC_SUCCESS) {
1939 /* Registration Failed nak rdma and kill the tx. */
1940 kgnilnd_vfree(tx->tx_buffer_copy,
1942 tx->tx_buffer_copy = NULL;
1943 kgnilnd_nak_rdma(tx->tx_conn, tx->tx_msg.gnm_type, -EFAULT, cookie, tx->tx_msg.gnm_srcnid);
1944 kgnilnd_tx_done(tx, -EFAULT);
1948 desc_map_key = tx->tx_buffer_copy_map_key;
1949 desc_buffer = tx->tx_buffer_copy;
1953 memset(&tx->tx_rdma_desc, 0, sizeof(tx->tx_rdma_desc));
1954 tx->tx_rdma_desc.post_id = tx->tx_id.txe_cookie;
1955 tx->tx_rdma_desc.type = post_type;
1956 tx->tx_rdma_desc.cq_mode = GNI_CQMODE_GLOBAL_EVENT;
1957 tx->tx_rdma_desc.local_addr = (__u64)((unsigned long)desc_buffer);
1958 tx->tx_rdma_desc.local_mem_hndl = desc_map_key;
1959 tx->tx_rdma_desc.remote_addr = sink->gnrd_addr - tx->tx_offset;
1960 tx->tx_rdma_desc.remote_mem_hndl = sink->gnrd_key;
1961 tx->tx_rdma_desc.length = desc_nob;
1962 tx->tx_nob_rdma = nob;
1963 if (post_type == GNI_POST_RDMA_PUT && *kgnilnd_tunables.kgn_bte_put_dlvr_mode)
1964 tx->tx_rdma_desc.dlvr_mode = *kgnilnd_tunables.kgn_bte_put_dlvr_mode;
1965 if (post_type == GNI_POST_RDMA_GET && *kgnilnd_tunables.kgn_bte_get_dlvr_mode)
1966 tx->tx_rdma_desc.dlvr_mode = *kgnilnd_tunables.kgn_bte_get_dlvr_mode;
1967 /* prep final completion message */
1968 kgnilnd_init_msg(&tx->tx_msg, type, tx->tx_msg.gnm_srcnid);
1969 tx->tx_msg.gnm_u.completion.gncm_cookie = cookie;
1970 /* send actual size RDMA'd in retval */
1971 tx->tx_msg.gnm_u.completion.gncm_retval = nob;
1973 kgnilnd_compute_rdma_cksum(tx, nob);
1976 kgnilnd_queue_tx(conn, tx);
1980 /* Don't lie (CLOSE == RDMA idle) */
1981 LASSERTF(!conn->gnc_close_sent, "tx %p on conn %p after close sent %d\n",
1982 tx, conn, conn->gnc_close_sent);
1984 GNIDBG_TX(D_NET, tx, "Post RDMA type 0x%02x conn %p dlvr_mode "
1985 "0x%x cookie:%#llx",
1986 type, conn, tx->tx_rdma_desc.dlvr_mode, cookie);
1988 /* set CQ dedicated for RDMA */
1989 tx->tx_rdma_desc.src_cq_hndl = conn->gnc_device->gnd_snd_rdma_cqh;
1991 timestamp = jiffies;
1992 kgnilnd_conn_mutex_lock(&conn->gnc_rdma_mutex);
1993 kgnilnd_gl_mutex_lock(&conn->gnc_device->gnd_cq_mutex);
1994 /* delay in jiffies - we are really concerned only with things that
1995 * result in a schedule() or really holding this off for long times .
1996 * NB - mutex_lock could spin for 2 jiffies before going to sleep to wait */
1997 conn->gnc_device->gnd_mutex_delay += (long) jiffies - timestamp;
1999 rrc = kgnilnd_post_rdma(conn->gnc_ephandle, &tx->tx_rdma_desc);
2001 if (rrc == GNI_RC_ERROR_RESOURCE) {
2002 kgnilnd_conn_mutex_unlock(&conn->gnc_rdma_mutex);
2003 kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
2004 kgnilnd_unmap_buffer(tx, 0);
2006 if (tx->tx_buffer_copy != NULL) {
2007 kgnilnd_vfree(tx->tx_buffer_copy, desc_nob);
2008 tx->tx_buffer_copy = NULL;
2011 spin_lock(&tx->tx_conn->gnc_device->gnd_lock);
2012 kgnilnd_tx_add_state_locked(tx, NULL, tx->tx_conn,
2014 spin_unlock(&tx->tx_conn->gnc_device->gnd_lock);
2015 kgnilnd_schedule_device(tx->tx_conn->gnc_device);
2019 spin_lock(&conn->gnc_list_lock);
2020 kgnilnd_tx_add_state_locked(tx, conn->gnc_peer, conn, GNILND_TX_LIVE_RDMAQ, 1);
2021 tx->tx_qtime = jiffies;
2022 spin_unlock(&conn->gnc_list_lock);
2023 kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
2024 kgnilnd_conn_mutex_unlock(&conn->gnc_rdma_mutex);
2026 /* XXX Nic: is this a place we should handle more errors for
2027 * robustness sake */
2028 LASSERT(rrc == GNI_RC_SUCCESS);
2033 kgnilnd_alloc_rx(void)
2037 rx = kmem_cache_alloc(kgnilnd_data.kgn_rx_cache, GFP_ATOMIC);
2039 CERROR("failed to allocate rx\n");
2042 CDEBUG(D_MALLOC, "slab-alloced 'rx': %lu at %p.\n",
2045 /* no memset to zero, we'll always fill all members */
2049 /* release is to just free connection resources
2050 * we use this for the eager path after copying */
2052 kgnilnd_release_msg(kgn_conn_t *conn)
2055 unsigned long timestamp;
2057 CDEBUG(D_NET, "consuming %p\n", conn);
2059 timestamp = jiffies;
2060 kgnilnd_gl_mutex_lock(&conn->gnc_device->gnd_cq_mutex);
2061 /* delay in jiffies - we are really concerned only with things that
2062 * result in a schedule() or really holding this off for long times .
2063 * NB - mutex_lock could spin for 2 jiffies before going to sleep to wait */
2064 conn->gnc_device->gnd_mutex_delay += (long) jiffies - timestamp;
2066 rrc = kgnilnd_smsg_release(conn->gnc_ephandle);
2067 kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
2069 LASSERTF(rrc == GNI_RC_SUCCESS, "bad rrc %d\n", rrc);
2070 GNIDBG_SMSG_CREDS(D_NET, conn);
2072 kgnilnd_schedule_conn(conn);
2078 kgnilnd_consume_rx(kgn_rx_t *rx)
2080 kgn_conn_t *conn = rx->grx_conn;
2081 kgn_msg_t *rxmsg = rx->grx_msg;
2083 /* if we are eager, free the cache alloc'd msg */
2084 if (unlikely(rx->grx_eager)) {
2085 LIBCFS_FREE(rxmsg, sizeof(*rxmsg) + *kgnilnd_tunables.kgn_max_immediate);
2086 atomic_dec(&kgnilnd_data.kgn_neager_allocs);
2088 /* release ref from eager_recv */
2089 kgnilnd_conn_decref(conn);
2091 GNIDBG_MSG(D_NET, rxmsg, "rx %p processed", rx);
2092 kgnilnd_release_msg(conn);
2095 kmem_cache_free(kgnilnd_data.kgn_rx_cache, rx);
2096 CDEBUG(D_MALLOC, "slab-freed 'rx': %lu at %p.\n",
2103 kgnilnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
2105 struct lnet_hdr *hdr = &lntmsg->msg_hdr;
2106 int type = lntmsg->msg_type;
2107 lnet_process_id_t target = lntmsg->msg_target;
2108 int target_is_router = lntmsg->msg_target_is_router;
2109 int routing = lntmsg->msg_routing;
2110 unsigned int niov = lntmsg->msg_niov;
2111 struct kvec *iov = lntmsg->msg_iov;
2112 lnet_kiov_t *kiov = lntmsg->msg_kiov;
2113 unsigned int offset = lntmsg->msg_offset;
2114 unsigned int nob = lntmsg->msg_len;
2115 unsigned int msg_vmflush = lntmsg->msg_vmflush;
2116 kgn_net_t *net = ni->ni_data;
2120 int reverse_rdma_flag = *kgnilnd_tunables.kgn_reverse_rdma;
2122 /* NB 'private' is different depending on what we're sending.... */
2123 LASSERT(!in_interrupt());
2125 CDEBUG(D_NET, "sending msg type %d with %d bytes in %d frags to %s\n",
2126 type, nob, niov, libcfs_id2str(target));
2128 LASSERTF(nob == 0 || niov > 0,
2129 "lntmsg %p nob %d niov %d\n", lntmsg, nob, niov);
2130 LASSERTF(niov <= LNET_MAX_IOV,
2131 "lntmsg %p niov %d\n", lntmsg, niov);
2133 /* payload is either all vaddrs or all pages */
2134 LASSERTF(!(kiov != NULL && iov != NULL),
2135 "lntmsg %p kiov %p iov %p\n", lntmsg, kiov, iov);
2138 mpflag = cfs_memory_pressure_get_and_set();
2142 CERROR("lntmsg %p with unexpected type %d\n",
2147 LASSERTF(nob == 0, "lntmsg %p nob %d\n",
2155 if (routing || target_is_router)
2156 break; /* send IMMEDIATE */
2158 /* it is safe to do direct GET with out mapping buffer for RDMA as we
2159 * check the eventual sink buffer here - if small enough, remote
2160 * end is perfectly capable of returning data in short message -
2161 * The magic is that we call lnet_parse in kgnilnd_recv with rdma_req=0
2162 * for IMMEDIATE messages which will have it send a real reply instead
2163 * of doing kgnilnd_recv to have the RDMA continued */
2164 if (lntmsg->msg_md->md_length <= *kgnilnd_tunables.kgn_max_immediate)
2167 if ((reverse_rdma_flag & GNILND_REVERSE_GET) == 0)
2168 tx = kgnilnd_new_tx_msg(GNILND_MSG_GET_REQ, ni->ni_nid);
2170 tx = kgnilnd_new_tx_msg(GNILND_MSG_GET_REQ_REV, ni->ni_nid);
2176 /* slightly different options as we might actually have a GET with a
2177 * MD_KIOV set but a non-NULL md_iov.iov */
2178 if ((lntmsg->msg_md->md_options & LNET_MD_KIOV) == 0)
2179 rc = kgnilnd_setup_rdma_buffer(tx, lntmsg->msg_md->md_niov,
2180 lntmsg->msg_md->md_iov.iov, NULL,
2181 0, lntmsg->msg_md->md_length);
2183 rc = kgnilnd_setup_rdma_buffer(tx, lntmsg->msg_md->md_niov,
2184 NULL, lntmsg->msg_md->md_iov.kiov,
2185 0, lntmsg->msg_md->md_length);
2187 CERROR("unable to setup buffer: %d\n", rc);
2188 kgnilnd_tx_done(tx, rc);
2193 tx->tx_lntmsg[1] = lnet_create_reply_msg(ni, lntmsg);
2194 if (tx->tx_lntmsg[1] == NULL) {
2195 CERROR("Can't create reply for GET to %s\n",
2196 libcfs_nid2str(target.nid));
2197 kgnilnd_tx_done(tx, rc);
2202 tx->tx_lntmsg[0] = lntmsg;
2203 if ((reverse_rdma_flag & GNILND_REVERSE_GET) == 0)
2204 tx->tx_msg.gnm_u.get.gngm_hdr = *hdr;
2206 tx->tx_msg.gnm_u.putreq.gnprm_hdr = *hdr;
2208 /* rest of tx_msg is setup just before it is sent */
2209 kgnilnd_launch_tx(tx, net, &target);
2211 case LNET_MSG_REPLY:
2213 /* to save on MDDs, we'll handle short kiov by vmap'ing
2214 * and sending via SMSG */
2215 if (nob <= *kgnilnd_tunables.kgn_max_immediate)
2218 if ((reverse_rdma_flag & GNILND_REVERSE_PUT) == 0)
2219 tx = kgnilnd_new_tx_msg(GNILND_MSG_PUT_REQ, ni->ni_nid);
2221 tx = kgnilnd_new_tx_msg(GNILND_MSG_PUT_REQ_REV, ni->ni_nid);
2228 rc = kgnilnd_setup_rdma_buffer(tx, niov, iov, kiov, offset, nob);
2230 kgnilnd_tx_done(tx, rc);
2235 tx->tx_lntmsg[0] = lntmsg;
2236 if ((reverse_rdma_flag & GNILND_REVERSE_PUT) == 0)
2237 tx->tx_msg.gnm_u.putreq.gnprm_hdr = *hdr;
2239 tx->tx_msg.gnm_u.get.gngm_hdr = *hdr;
2241 /* rest of tx_msg is setup just before it is sent */
2242 kgnilnd_launch_tx(tx, net, &target);
2246 /* send IMMEDIATE */
2248 LASSERTF(nob <= *kgnilnd_tunables.kgn_max_immediate,
2249 "lntmsg 0x%p too large %d\n", lntmsg, nob);
2251 tx = kgnilnd_new_tx_msg(GNILND_MSG_IMMEDIATE, ni->ni_nid);
2257 rc = kgnilnd_setup_immediate_buffer(tx, niov, iov, kiov, offset, nob);
2259 kgnilnd_tx_done(tx, rc);
2263 tx->tx_msg.gnm_u.immediate.gnim_hdr = *hdr;
2264 tx->tx_lntmsg[0] = lntmsg;
2265 kgnilnd_launch_tx(tx, net, &target);
2268 /* use stored value as we could have already finalized lntmsg here from a failed launch */
2270 cfs_memory_pressure_restore(mpflag);
2275 kgnilnd_setup_rdma(lnet_ni_t *ni, kgn_rx_t *rx, lnet_msg_t *lntmsg, int mlen)
2277 kgn_conn_t *conn = rx->grx_conn;
2278 kgn_msg_t *rxmsg = rx->grx_msg;
2279 unsigned int niov = lntmsg->msg_niov;
2280 struct kvec *iov = lntmsg->msg_iov;
2281 lnet_kiov_t *kiov = lntmsg->msg_kiov;
2282 unsigned int offset = lntmsg->msg_offset;
2283 unsigned int nob = lntmsg->msg_len;
2288 switch (rxmsg->gnm_type) {
2289 case GNILND_MSG_PUT_REQ_REV:
2290 done_type = GNILND_MSG_PUT_DONE_REV;
2293 case GNILND_MSG_GET_REQ:
2294 done_type = GNILND_MSG_GET_DONE;
2297 CERROR("invalid msg type %s (%d)\n",
2298 kgnilnd_msgtype2str(rxmsg->gnm_type),
2303 tx = kgnilnd_new_tx_msg(done_type, ni->ni_nid);
2307 rc = kgnilnd_set_tx_id(tx, conn);
2311 rc = kgnilnd_setup_rdma_buffer(tx, niov, iov, kiov, offset, nob);
2315 tx->tx_lntmsg[0] = lntmsg;
2316 tx->tx_getinfo = rxmsg->gnm_u.get;
2318 /* we only queue from kgnilnd_recv - we might get called from other contexts
2319 * and we don't want to block the mutex in those cases */
2321 spin_lock(&tx->tx_conn->gnc_device->gnd_lock);
2322 kgnilnd_tx_add_state_locked(tx, NULL, tx->tx_conn, GNILND_TX_MAPQ, 1);
2323 spin_unlock(&tx->tx_conn->gnc_device->gnd_lock);
2324 kgnilnd_schedule_device(tx->tx_conn->gnc_device);
2329 kgnilnd_tx_done(tx, rc);
2330 kgnilnd_nak_rdma(conn, done_type, rc, rxmsg->gnm_u.get.gngm_cookie, ni->ni_nid);
2332 lnet_finalize(lntmsg, rc);
2336 kgnilnd_eager_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
2339 kgn_rx_t *rx = private;
2340 kgn_conn_t *conn = rx->grx_conn;
2341 kgn_msg_t *rxmsg = rx->grx_msg;
2342 kgn_msg_t *eagermsg = NULL;
2343 kgn_peer_t *peer = NULL;
2344 kgn_conn_t *found_conn = NULL;
2346 GNIDBG_MSG(D_NET, rxmsg, "eager recv for conn %p, rxmsg %p, lntmsg %p",
2347 conn, rxmsg, lntmsg);
2349 if (rxmsg->gnm_payload_len > *kgnilnd_tunables.kgn_max_immediate) {
2350 GNIDBG_MSG(D_ERROR, rxmsg, "payload too large %d",
2351 rxmsg->gnm_payload_len);
2354 /* Grab a read lock so the connection doesnt disappear on us
2355 * while we look it up
2357 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
2359 peer = kgnilnd_find_peer_locked(rxmsg->gnm_srcnid);
2361 found_conn = kgnilnd_find_conn_locked(peer);
2364 /* Verify the connection found is the same one that the message
2365 * is supposed to be using, if it is not output an error message
2368 if (!peer || !found_conn
2369 || found_conn->gnc_peer_connstamp != rxmsg->gnm_connstamp) {
2370 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
2371 CERROR("Couldnt find matching peer %p or conn %p / %p\n",
2372 peer, conn, found_conn);
2374 CERROR("Unexpected connstamp %#llx(%#llx expected)"
2375 " from %s", rxmsg->gnm_connstamp,
2376 found_conn->gnc_peer_connstamp,
2377 libcfs_nid2str(peer->gnp_nid));
2382 /* add conn ref to ensure it doesn't go away until all eager
2383 * messages processed */
2384 kgnilnd_conn_addref(conn);
2386 /* Now that we have verified the connection is valid and added a
2387 * reference we can remove the read_lock on the peer_conn_lock */
2388 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
2390 /* we have no credits or buffers for this message, so copy it
2391 * somewhere for a later kgnilnd_recv */
2392 if (atomic_read(&kgnilnd_data.kgn_neager_allocs) >=
2393 *kgnilnd_tunables.kgn_eager_credits) {
2394 CERROR("Out of eager credits to %s\n",
2395 libcfs_nid2str(conn->gnc_peer->gnp_nid));
2399 atomic_inc(&kgnilnd_data.kgn_neager_allocs);
2401 LIBCFS_ALLOC(eagermsg, sizeof(*eagermsg) + *kgnilnd_tunables.kgn_max_immediate);
2402 if (eagermsg == NULL) {
2403 kgnilnd_conn_decref(conn);
2404 CERROR("couldn't allocate eager rx message for conn %p to %s\n",
2405 conn, libcfs_nid2str(conn->gnc_peer->gnp_nid));
2409 /* copy msg and payload */
2410 memcpy(eagermsg, rxmsg, sizeof(*rxmsg) + rxmsg->gnm_payload_len);
2411 rx->grx_msg = eagermsg;
2414 /* stash this for lnet_finalize on cancel-on-conn-close */
2415 rx->grx_lntmsg = lntmsg;
2417 /* keep the same rx_t, it just has a new grx_msg now */
2418 *new_private = private;
2420 /* release SMSG buffer */
2421 kgnilnd_release_msg(conn);
2427 kgnilnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
2428 int delayed, unsigned int niov,
2429 struct kvec *iov, lnet_kiov_t *kiov,
2430 unsigned int offset, unsigned int mlen, unsigned int rlen)
2432 kgn_rx_t *rx = private;
2433 kgn_conn_t *conn = rx->grx_conn;
2434 kgn_msg_t *rxmsg = rx->grx_msg;
2440 LASSERT(!in_interrupt());
2441 LASSERTF(mlen <= rlen, "%d <= %d\n", mlen, rlen);
2442 /* Either all pages or all vaddrs */
2443 LASSERTF(!(kiov != NULL && iov != NULL), "kiov %p iov %p\n",
2446 GNIDBG_MSG(D_NET, rxmsg, "conn %p, rxmsg %p, lntmsg %p"
2447 " niov=%d kiov=%p iov=%p offset=%d mlen=%d rlen=%d",
2448 conn, rxmsg, lntmsg,
2449 niov, kiov, iov, offset, mlen, rlen);
2451 /* we need to lock here as recv can be called from any context */
2452 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
2453 if (rx->grx_eager && conn->gnc_state != GNILND_CONN_ESTABLISHED) {
2454 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
2456 /* someone closed the conn after we copied this out, nuke it */
2457 kgnilnd_consume_rx(rx);
2458 lnet_finalize(lntmsg, conn->gnc_error);
2461 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
2463 switch (rxmsg->gnm_type) {
2465 GNIDBG_MSG(D_NETERROR, rxmsg, "conn %p, rx %p, rxmsg %p, lntmsg %p"
2466 " niov=%d kiov=%p iov=%p offset=%d mlen=%d rlen=%d",
2467 conn, rx, rxmsg, lntmsg, niov, kiov, iov, offset, mlen, rlen);
2470 case GNILND_MSG_IMMEDIATE:
2471 if (mlen > rxmsg->gnm_payload_len) {
2472 GNIDBG_MSG(D_ERROR, rxmsg,
2473 "Immediate message from %s too big: %d > %d",
2474 libcfs_nid2str(conn->gnc_peer->gnp_nid), mlen,
2475 rxmsg->gnm_payload_len);
2477 kgnilnd_consume_rx(rx);
2481 /* rxmsg[1] is a pointer to the payload, sitting in the buffer
2482 * right after the kgn_msg_t header - so just 'cute' way of saying
2483 * rxmsg + sizeof(kgn_msg_t) */
2485 /* check payload checksum if sent */
2487 if (*kgnilnd_tunables.kgn_checksum >= 2 &&
2488 !rxmsg->gnm_payload_cksum &&
2489 rxmsg->gnm_payload_len != 0)
2490 GNIDBG_MSG(D_WARNING, rxmsg, "no msg payload checksum when enabled");
2492 if (rxmsg->gnm_payload_cksum != 0) {
2493 /* gnm_payload_len set in kgnilnd_sendmsg from tx->tx_nob,
2494 * which is what is used to calculate the cksum on the TX side */
2495 pload_cksum = kgnilnd_cksum(&rxmsg[1], rxmsg->gnm_payload_len);
2497 if (rxmsg->gnm_payload_cksum != pload_cksum) {
2498 GNIDBG_MSG(D_NETERROR, rxmsg,
2499 "Bad payload checksum (%x expected %x)",
2500 pload_cksum, rxmsg->gnm_payload_cksum);
2501 switch (*kgnilnd_tunables.kgn_checksum_dump) {
2503 kgnilnd_dump_blob(D_BUFFS, "bad payload checksum",
2504 &rxmsg[1], rxmsg->gnm_payload_len);
2505 /* fall through to dump */
2507 libcfs_debug_dumplog();
2513 /* checksum problems are fatal, kill the conn */
2514 kgnilnd_consume_rx(rx);
2515 kgnilnd_close_conn(conn, rc);
2521 lnet_copy_flat2kiov(
2523 *kgnilnd_tunables.kgn_max_immediate,
2524 &rxmsg[1], 0, mlen);
2528 *kgnilnd_tunables.kgn_max_immediate,
2529 &rxmsg[1], 0, mlen);
2531 kgnilnd_consume_rx(rx);
2532 lnet_finalize(lntmsg, 0);
2535 case GNILND_MSG_PUT_REQ:
2536 /* LNET wants to truncate or drop transaction, sending NAK */
2538 kgnilnd_consume_rx(rx);
2539 lnet_finalize(lntmsg, 0);
2541 /* only error if lntmsg == NULL, otherwise we are just
2542 * short circuiting the rdma process of 0 bytes */
2543 kgnilnd_nak_rdma(conn, rxmsg->gnm_type,
2544 lntmsg == NULL ? -ENOENT : 0,
2545 rxmsg->gnm_u.get.gngm_cookie,
2549 /* sending ACK with sink buff. info */
2550 tx = kgnilnd_new_tx_msg(GNILND_MSG_PUT_ACK, ni->ni_nid);
2552 kgnilnd_consume_rx(rx);
2556 rc = kgnilnd_set_tx_id(tx, conn);
2558 GOTO(nak_put_req, rc);
2561 rc = kgnilnd_setup_rdma_buffer(tx, niov, iov, kiov, offset, mlen);
2563 GOTO(nak_put_req, rc);
2566 tx->tx_msg.gnm_u.putack.gnpam_src_cookie =
2567 rxmsg->gnm_u.putreq.gnprm_cookie;
2568 tx->tx_msg.gnm_u.putack.gnpam_dst_cookie = tx->tx_id.txe_cookie;
2569 tx->tx_msg.gnm_u.putack.gnpam_desc.gnrd_addr =
2570 (__u64)((unsigned long)tx->tx_buffer);
2571 tx->tx_msg.gnm_u.putack.gnpam_desc.gnrd_nob = mlen;
2573 tx->tx_lntmsg[0] = lntmsg; /* finalize this on RDMA_DONE */
2574 tx->tx_qtime = jiffies;
2575 /* we only queue from kgnilnd_recv - we might get called from other contexts
2576 * and we don't want to block the mutex in those cases */
2578 spin_lock(&tx->tx_conn->gnc_device->gnd_lock);
2579 kgnilnd_tx_add_state_locked(tx, NULL, tx->tx_conn, GNILND_TX_MAPQ, 1);
2580 spin_unlock(&tx->tx_conn->gnc_device->gnd_lock);
2581 kgnilnd_schedule_device(tx->tx_conn->gnc_device);
2583 kgnilnd_consume_rx(rx);
2587 /* make sure we send an error back when the PUT fails */
2588 kgnilnd_nak_rdma(conn, rxmsg->gnm_type, rc, rxmsg->gnm_u.get.gngm_cookie, ni->ni_nid);
2589 kgnilnd_tx_done(tx, rc);
2590 kgnilnd_consume_rx(rx);
2592 /* return magic LNet network error */
2594 case GNILND_MSG_GET_REQ_REV:
2595 /* LNET wants to truncate or drop transaction, sending NAK */
2597 kgnilnd_consume_rx(rx);
2598 lnet_finalize(lntmsg, 0);
2600 /* only error if lntmsg == NULL, otherwise we are just
2601 * short circuiting the rdma process of 0 bytes */
2602 kgnilnd_nak_rdma(conn, rxmsg->gnm_type,
2603 lntmsg == NULL ? -ENOENT : 0,
2604 rxmsg->gnm_u.get.gngm_cookie,
2608 /* lntmsg can be null when parsing a LNET_GET */
2609 if (lntmsg != NULL) {
2610 /* sending ACK with sink buff. info */
2611 tx = kgnilnd_new_tx_msg(GNILND_MSG_GET_ACK_REV, ni->ni_nid);
2613 kgnilnd_consume_rx(rx);
2617 rc = kgnilnd_set_tx_id(tx, conn);
2619 GOTO(nak_get_req_rev, rc);
2622 rc = kgnilnd_setup_rdma_buffer(tx, niov, iov, kiov, offset, mlen);
2624 GOTO(nak_get_req_rev, rc);
2627 tx->tx_msg.gnm_u.putack.gnpam_src_cookie =
2628 rxmsg->gnm_u.putreq.gnprm_cookie;
2629 tx->tx_msg.gnm_u.putack.gnpam_dst_cookie = tx->tx_id.txe_cookie;
2630 tx->tx_msg.gnm_u.putack.gnpam_desc.gnrd_addr =
2631 (__u64)((unsigned long)tx->tx_buffer);
2632 tx->tx_msg.gnm_u.putack.gnpam_desc.gnrd_nob = mlen;
2634 tx->tx_lntmsg[0] = lntmsg; /* finalize this on RDMA_DONE */
2636 /* we only queue from kgnilnd_recv - we might get called from other contexts
2637 * and we don't want to block the mutex in those cases */
2639 spin_lock(&tx->tx_conn->gnc_device->gnd_lock);
2640 kgnilnd_tx_add_state_locked(tx, NULL, tx->tx_conn, GNILND_TX_MAPQ, 1);
2641 spin_unlock(&tx->tx_conn->gnc_device->gnd_lock);
2642 kgnilnd_schedule_device(tx->tx_conn->gnc_device);
2645 kgnilnd_nak_rdma(conn, rxmsg->gnm_type,
2647 rxmsg->gnm_u.get.gngm_cookie,
2651 kgnilnd_consume_rx(rx);
2655 /* make sure we send an error back when the GET fails */
2656 kgnilnd_nak_rdma(conn, rxmsg->gnm_type, rc, rxmsg->gnm_u.get.gngm_cookie, ni->ni_nid);
2657 kgnilnd_tx_done(tx, rc);
2658 kgnilnd_consume_rx(rx);
2660 /* return magic LNet network error */
2664 case GNILND_MSG_PUT_REQ_REV:
2665 /* LNET wants to truncate or drop transaction, sending NAK */
2667 kgnilnd_consume_rx(rx);
2668 lnet_finalize(lntmsg, 0);
2670 /* only error if lntmsg == NULL, otherwise we are just
2671 * short circuiting the rdma process of 0 bytes */
2672 kgnilnd_nak_rdma(conn, rxmsg->gnm_type,
2673 lntmsg == NULL ? -ENOENT : 0,
2674 rxmsg->gnm_u.get.gngm_cookie,
2679 if (lntmsg != NULL) {
2681 kgnilnd_setup_rdma(ni, rx, lntmsg, mlen);
2684 kgnilnd_nak_rdma(conn, rxmsg->gnm_type,
2686 rxmsg->gnm_u.get.gngm_cookie,
2689 kgnilnd_consume_rx(rx);
2691 case GNILND_MSG_GET_REQ:
2692 if (lntmsg != NULL) {
2694 kgnilnd_setup_rdma(ni, rx, lntmsg, mlen);
2697 kgnilnd_nak_rdma(conn, rxmsg->gnm_type,
2699 rxmsg->gnm_u.get.gngm_cookie,
2702 kgnilnd_consume_rx(rx);
2708 /* needs write_lock on kgn_peer_conn_lock held */
2710 kgnilnd_check_conn_timeouts_locked(kgn_conn_t *conn)
2712 unsigned long timeout, keepalive;
2713 unsigned long now = jiffies;
2714 unsigned long newest_last_rx;
2717 /* given that we found this conn hanging off a peer, it better damned
2718 * well be connected */
2719 LASSERTF(conn->gnc_state == GNILND_CONN_ESTABLISHED,
2720 "conn 0x%p->%s with bad state%s\n", conn,
2721 conn->gnc_peer ? libcfs_nid2str(conn->gnc_peer->gnp_nid)
2723 kgnilnd_conn_state2str(conn));
2725 CDEBUG(D_NET, "checking conn %p->%s timeout %d keepalive %d "
2726 "rx_diff %lu tx_diff %lu\n",
2727 conn, libcfs_nid2str(conn->gnc_peer->gnp_nid),
2728 conn->gnc_timeout, GNILND_TO2KA(conn->gnc_timeout),
2729 cfs_duration_sec(now - conn->gnc_last_rx_cq),
2730 cfs_duration_sec(now - conn->gnc_last_tx));
2732 timeout = cfs_time_seconds(conn->gnc_timeout);
2733 keepalive = cfs_time_seconds(GNILND_TO2KA(conn->gnc_timeout));
2735 /* just in case our lack of RX msg processing is gumming up the works - give the
2736 * remove an extra chance */
2738 newest_last_rx = GNILND_LASTRX(conn);
2740 if (time_after_eq(now, newest_last_rx + timeout)) {
2741 uint32_t level = D_CONSOLE|D_NETERROR;
2743 if (conn->gnc_peer->gnp_state == GNILND_PEER_DOWN) {
2746 GNIDBG_CONN(level, conn,
2747 "No gnilnd traffic received from %s for %lu "
2748 "seconds, terminating connection. Is node down? ",
2749 libcfs_nid2str(conn->gnc_peer->gnp_nid),
2750 cfs_duration_sec(now - newest_last_rx));
2754 /* we don't timeout on last_tx stalls - we are going to trust the
2755 * underlying network to let us know when sends are failing.
2756 * At worst, the peer will timeout our RX stamp and drop the connection
2757 * at that point. We'll then see his CLOSE or at worst his RX
2758 * stamp stop and drop the connection on our end */
2760 if (time_after_eq(now, conn->gnc_last_tx + keepalive)) {
2761 CDEBUG(D_NET, "sending NOOP -> %s (%p idle %lu(%lu)) "
2762 "last %lu/%lu/%lu %lus/%lus/%lus\n",
2763 libcfs_nid2str(conn->gnc_peer->gnp_nid), conn,
2764 cfs_duration_sec(jiffies - conn->gnc_last_tx),
2766 conn->gnc_last_noop_want, conn->gnc_last_noop_sent,
2767 conn->gnc_last_noop_cq,
2768 cfs_duration_sec(jiffies - conn->gnc_last_noop_want),
2769 cfs_duration_sec(jiffies - conn->gnc_last_noop_sent),
2770 cfs_duration_sec(jiffies - conn->gnc_last_noop_cq));
2771 set_mb(conn->gnc_last_noop_want, jiffies);
2772 atomic_inc(&conn->gnc_reaper_noop);
2773 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_NOOP_SEND))
2776 tx = kgnilnd_new_tx_msg(GNILND_MSG_NOOP, conn->gnc_peer->gnp_net->gnn_ni->ni_nid);
2779 kgnilnd_queue_tx(conn, tx);
2785 /* needs write_lock on kgn_peer_conn_lock held */
2787 kgnilnd_check_peer_timeouts_locked(kgn_peer_t *peer, struct list_head *todie,
2788 struct list_head *souls)
2790 unsigned long timeout;
2791 kgn_conn_t *conn, *connN = NULL;
2797 short releaseconn = 0;
2798 unsigned long first_rx = 0;
2799 int purgatory_conn_cnt = 0;
2801 CDEBUG(D_NET, "checking peer 0x%p->%s for timeouts; interval %lus\n",
2802 peer, libcfs_nid2str(peer->gnp_nid),
2803 peer->gnp_reconnect_interval);
2805 timeout = cfs_time_seconds(MAX(*kgnilnd_tunables.kgn_timeout,
2806 GNILND_MIN_TIMEOUT));
2808 conn = kgnilnd_find_conn_locked(peer);
2810 /* if there is a valid conn, check the queues for timeouts */
2811 rc = kgnilnd_check_conn_timeouts_locked(conn);
2813 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_RX_CLOSE_CLOSING)) {
2814 /* simulate a RX CLOSE after the timeout but before
2815 * the scheduler thread gets it */
2816 conn->gnc_close_recvd = GNILND_CLOSE_INJECT1;
2817 conn->gnc_peer_error = -ETIMEDOUT;
2820 if (*kgnilnd_tunables.kgn_to_reconn_disable &&
2822 peer->gnp_state = GNILND_PEER_TIMED_OUT;
2823 CDEBUG(D_WARNING, "%s conn timed out, will "
2824 "reconnect upon request from peer\n",
2825 libcfs_nid2str(conn->gnc_peer->gnp_nid));
2827 /* Once we mark closed, any of the scheduler threads could
2828 * get it and move through before we hit the fail loc code */
2829 kgnilnd_close_conn_locked(conn, rc);
2831 /* first_rx is used to decide when to release a conn from purgatory.
2833 first_rx = conn->gnc_first_rx;
2837 /* now regardless of starting new conn, find tx on peer queue that
2838 * are old and smell bad - do this first so we don't trigger
2839 * reconnect on empty queue if we timeout all */
2840 list_for_each_entry_safe(tx, txN, &peer->gnp_tx_queue, tx_list) {
2841 if (time_after_eq(jiffies, tx->tx_qtime + timeout)) {
2843 LCONSOLE_INFO("could not send to %s due to connection"
2844 " setup failure after %lu seconds\n",
2845 libcfs_nid2str(peer->gnp_nid),
2846 cfs_duration_sec(jiffies - tx->tx_qtime));
2848 kgnilnd_tx_del_state_locked(tx, peer, NULL,
2850 list_add_tail(&tx->tx_list, todie);
2855 if (count || peer->gnp_connecting == GNILND_PEER_KILL) {
2856 CDEBUG(D_NET, "canceling %d tx for peer 0x%p->%s\n",
2857 count, peer, libcfs_nid2str(peer->gnp_nid));
2858 /* if we nuked all the TX, stop peer connection attempt (if there is one..) */
2859 if (list_empty(&peer->gnp_tx_queue) ||
2860 peer->gnp_connecting == GNILND_PEER_KILL) {
2861 /* we pass down todie to use a common function - but we know there are
2863 kgnilnd_cancel_peer_connect_locked(peer, todie);
2867 /* Don't reconnect if we are still trying to clear out old conns.
2868 * This prevents us sending traffic on the new mbox before ensuring we are done
2869 * with the old one */
2870 reconnect = (peer->gnp_state == GNILND_PEER_UP) &&
2871 (atomic_read(&peer->gnp_dirty_eps) == 0);
2873 /* fast reconnect after a timeout */
2874 to_reconn = !conn &&
2875 (peer->gnp_last_errno == -ETIMEDOUT) &&
2876 *kgnilnd_tunables.kgn_fast_reconn;
2878 /* if we are not connected and there are tx on the gnp_tx_queue waiting
2879 * to be sent, we'll check the reconnect interval and fire up a new
2880 * connection request */
2883 (peer->gnp_connecting == GNILND_PEER_IDLE) &&
2884 (time_after_eq(jiffies, peer->gnp_reconnect_time)) &&
2885 (!list_empty(&peer->gnp_tx_queue) || to_reconn)) {
2887 CDEBUG(D_NET, "starting connect to %s\n",
2888 libcfs_nid2str(peer->gnp_nid));
2889 LASSERTF(peer->gnp_connecting == GNILND_PEER_IDLE, "Peer was idle and we"
2890 "have a write_lock, state issue %d\n", peer->gnp_connecting);
2892 peer->gnp_connecting = GNILND_PEER_CONNECT;
2893 kgnilnd_peer_addref(peer); /* extra ref for connd */
2895 spin_lock(&peer->gnp_net->gnn_dev->gnd_connd_lock);
2896 list_add_tail(&peer->gnp_connd_list,
2897 &peer->gnp_net->gnn_dev->gnd_connd_peers);
2898 spin_unlock(&peer->gnp_net->gnn_dev->gnd_connd_lock);
2900 kgnilnd_schedule_dgram(peer->gnp_net->gnn_dev);
2903 /* fail_loc to allow us to delay release of purgatory */
2904 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_PURG_REL_DELAY))
2907 /* This check allows us to verify that the new conn is actually being used. This allows us to
2908 * pull the old conns out of purgatory if they have actually seen traffic.
2909 * We only release a conn from purgatory during stack reset, admin command, or when a peer reconnects
2912 time_after(jiffies, first_rx + cfs_time_seconds(*kgnilnd_tunables.kgn_hardware_timeout))) {
2913 CDEBUG(D_INFO, "We can release peer %s conn's from purgatory %lu\n",
2914 libcfs_nid2str(peer->gnp_nid), first_rx + cfs_time_seconds(*kgnilnd_tunables.kgn_hardware_timeout));
2918 list_for_each_entry_safe (conn, connN, &peer->gnp_conns, gnc_list) {
2919 /* check for purgatory timeouts */
2920 if (conn->gnc_in_purgatory) {
2921 /* We cannot detach this conn from purgatory if it has not been closed so we reschedule it
2922 * that way the next time we check it we can detach it from purgatory
2925 if (conn->gnc_state != GNILND_CONN_DONE) {
2926 /* Skip over conns that are currently not DONE. If they arent already scheduled
2927 * for completion something in the state machine is broken.
2932 /* We only detach a conn that is in purgatory if we have received a close message,
2933 * we have a new valid connection that has successfully received data, or an admin
2934 * command tells us we need to detach.
2937 if (conn->gnc_close_recvd || releaseconn || conn->gnc_needs_detach) {
2938 unsigned long waiting;
2940 waiting = (long) jiffies - conn->gnc_last_rx_cq;
2942 /* C.E: The remote peer is expected to close the
2943 * connection (see kgnilnd_check_conn_timeouts)
2944 * via the reaper thread and nuke out the MDD and
2945 * FMA resources after conn->gnc_timeout has expired
2946 * without an FMA RX */
2947 CDEBUG(D_NET, "Reconnected to %s in %lds or admin forced detach, dropping "
2948 " held resources\n",
2949 libcfs_nid2str(conn->gnc_peer->gnp_nid),
2950 cfs_duration_sec(waiting));
2952 kgnilnd_detach_purgatory_locked(conn, souls);
2954 purgatory_conn_cnt++;
2959 /* If we have too many connections in purgatory we could run out of
2960 * resources. Limit the number of connections to a tunable number,
2961 * clean up to the minimum all in one fell swoop... there are
2962 * situations where dvs will retry tx's and we can eat up several
2963 * hundread connection requests at once.
2965 if (purgatory_conn_cnt > *kgnilnd_tunables.kgn_max_purgatory) {
2966 list_for_each_entry_safe(conn, connN, &peer->gnp_conns,
2968 if (conn->gnc_in_purgatory &&
2969 conn->gnc_state == GNILND_CONN_DONE) {
2970 CDEBUG(D_NET, "Dropping Held resource due to"
2971 " resource limits being hit\n");
2972 kgnilnd_detach_purgatory_locked(conn, souls);
2974 if (purgatory_conn_cnt-- <
2975 *kgnilnd_tunables.kgn_max_purgatory)
2985 kgnilnd_reaper_check(int idx)
2987 struct list_head *peers = &kgnilnd_data.kgn_peers[idx];
2988 struct list_head *ctmp, *ctmpN;
2989 struct list_head geriatrics;
2990 struct list_head souls;
2992 INIT_LIST_HEAD(&geriatrics);
2993 INIT_LIST_HEAD(&souls);
2995 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
2997 list_for_each_safe(ctmp, ctmpN, peers) {
2998 kgn_peer_t *peer = NULL;
3000 /* don't timeout stuff if the network is mucked or shutting down */
3001 if (kgnilnd_check_hw_quiesce()) {
3004 peer = list_entry(ctmp, kgn_peer_t, gnp_list);
3006 kgnilnd_check_peer_timeouts_locked(peer, &geriatrics, &souls);
3009 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
3011 kgnilnd_txlist_done(&geriatrics, -EHOSTUNREACH);
3012 kgnilnd_release_purgatory_list(&souls);
3016 kgnilnd_update_reaper_timeout(long timeout)
3018 LASSERT(timeout > 0);
3020 spin_lock(&kgnilnd_data.kgn_reaper_lock);
3022 if (timeout < kgnilnd_data.kgn_new_min_timeout)
3023 kgnilnd_data.kgn_new_min_timeout = timeout;
3025 spin_unlock(&kgnilnd_data.kgn_reaper_lock);
3029 kgnilnd_reaper_poke_with_stick(unsigned long arg)
3031 wake_up(&kgnilnd_data.kgn_reaper_waitq);
3035 kgnilnd_reaper(void *arg)
3040 unsigned long next_check_time = jiffies;
3041 long current_min_timeout = MAX_SCHEDULE_TIMEOUT;
3042 struct timer_list timer;
3045 cfs_block_allsigs();
3047 /* all gnilnd threads need to run fairly urgently */
3048 set_user_nice(current, *kgnilnd_tunables.kgn_nice);
3049 spin_lock(&kgnilnd_data.kgn_reaper_lock);
3051 while (!kgnilnd_data.kgn_shutdown) {
3052 /* I wake up every 'p' seconds to check for timeouts on some
3053 * more peers. I try to check every connection 'n' times
3054 * within the global minimum of all keepalive and timeout
3055 * intervals, to ensure I attend to every connection within
3056 * (n+1)/n times its timeout intervals. */
3057 const int p = GNILND_REAPER_THREAD_WAKE;
3058 const int n = GNILND_REAPER_NCHECKS;
3060 /* to quiesce or to not quiesce, that is the question */
3061 if (unlikely(kgnilnd_data.kgn_quiesce_trigger)) {
3062 spin_unlock(&kgnilnd_data.kgn_reaper_lock);
3063 KGNILND_SPIN_QUIESCE;
3064 spin_lock(&kgnilnd_data.kgn_reaper_lock);
3067 /* careful with the jiffy wrap... */
3068 timeout = (long)(next_check_time - jiffies);
3071 prepare_to_wait(&kgnilnd_data.kgn_reaper_waitq, &wait,
3072 TASK_INTERRUPTIBLE);
3073 spin_unlock(&kgnilnd_data.kgn_reaper_lock);
3074 setup_timer(&timer, kgnilnd_reaper_poke_with_stick,
3076 mod_timer(&timer, (long) jiffies + timeout);
3078 /* check flag variables before committing */
3079 if (!kgnilnd_data.kgn_shutdown &&
3080 !kgnilnd_data.kgn_quiesce_trigger) {
3081 CDEBUG(D_INFO, "schedule timeout %ld (%lu sec)\n",
3082 timeout, cfs_duration_sec(timeout));
3084 CDEBUG(D_INFO, "awake after schedule\n");
3087 del_singleshot_timer_sync(&timer);
3088 spin_lock(&kgnilnd_data.kgn_reaper_lock);
3089 finish_wait(&kgnilnd_data.kgn_reaper_waitq, &wait);
3093 /* new_min_timeout is set from the conn timeouts and keepalive
3094 * this should end up with a min timeout of
3095 * GNILND_TIMEOUT2KEEPALIVE(t) or roughly LND_TIMEOUT/2 */
3096 if (kgnilnd_data.kgn_new_min_timeout < current_min_timeout) {
3097 current_min_timeout = kgnilnd_data.kgn_new_min_timeout;
3098 CDEBUG(D_NET, "Set new min timeout %ld\n",
3099 current_min_timeout);
3102 spin_unlock(&kgnilnd_data.kgn_reaper_lock);
3104 /* Compute how many table entries to check now so I get round
3105 * the whole table fast enough given that I do this at fixed
3106 * intervals of 'p' seconds) */
3107 chunk = *kgnilnd_tunables.kgn_peer_hash_size;
3108 if (kgnilnd_data.kgn_new_min_timeout > n * p)
3109 chunk = (chunk * n * p) /
3110 kgnilnd_data.kgn_new_min_timeout;
3113 for (i = 0; i < chunk; i++) {
3114 kgnilnd_reaper_check(hash_index);
3115 hash_index = (hash_index + 1) %
3116 *kgnilnd_tunables.kgn_peer_hash_size;
3118 next_check_time = (long) jiffies + cfs_time_seconds(p);
3119 CDEBUG(D_INFO, "next check at %lu or in %d sec\n", next_check_time, p);
3121 spin_lock(&kgnilnd_data.kgn_reaper_lock);
3124 spin_unlock(&kgnilnd_data.kgn_reaper_lock);
3126 kgnilnd_thread_fini();
3131 kgnilnd_recv_bte_get(kgn_tx_t *tx) {
3132 unsigned niov, offset, nob;
3134 lnet_msg_t *lntmsg = tx->tx_lntmsg[0];
3135 kgnilnd_parse_lnet_rdma(lntmsg, &niov, &offset, &nob, &kiov, tx->tx_nob_rdma);
3138 lnet_copy_flat2kiov(
3141 tx->tx_buffer_copy + tx->tx_offset, 0, nob);
3143 memcpy(tx->tx_buffer, tx->tx_buffer_copy + tx->tx_offset, nob);
3150 kgnilnd_check_rdma_cq(kgn_device_t *dev)
3153 gni_post_descriptor_t *desc;
3155 kgn_tx_ev_id_t ev_id;
3157 int should_retry, rc;
3158 long num_processed = 0;
3159 kgn_conn_t *conn = NULL;
3160 kgn_tx_t *tx = NULL;
3161 kgn_rdma_desc_t *rdesc;
3166 /* make sure we don't keep looping if we need to reset */
3167 if (unlikely(kgnilnd_data.kgn_quiesce_trigger)) {
3168 return num_processed;
3170 rc = kgnilnd_mutex_trylock(&dev->gnd_cq_mutex);
3172 /* we didn't get the mutex, so return that there is still work
3176 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_DELAY_RDMA)) {
3177 /* a bit gross - but we need a good way to test for
3178 * delayed RDMA completions and the easiest way to do
3179 * that is to delay the RDMA CQ events */
3180 rrc = GNI_RC_NOT_DONE;
3182 rrc = kgnilnd_cq_get_event(dev->gnd_snd_rdma_cqh, &event_data);
3185 if (rrc == GNI_RC_NOT_DONE) {
3186 kgnilnd_gl_mutex_unlock(&dev->gnd_cq_mutex);
3187 CDEBUG(D_INFO, "SEND RDMA CQ %d empty processed %ld\n",
3188 dev->gnd_id, num_processed);
3189 return num_processed;
3191 dev->gnd_sched_alive = jiffies;
3194 LASSERTF(!GNI_CQ_OVERRUN(event_data),
3195 "this is bad, somehow our credits didn't protect us"
3196 " from CQ overrun\n");
3197 LASSERTF(GNI_CQ_GET_TYPE(event_data) == GNI_CQ_EVENT_TYPE_POST,
3198 "rrc %d, GNI_CQ_GET_TYPE(%#llx) = %#llx\n", rrc,
3199 event_data, GNI_CQ_GET_TYPE(event_data));
3201 rrc = kgnilnd_get_completed(dev->gnd_snd_rdma_cqh, event_data,
3203 kgnilnd_gl_mutex_unlock(&dev->gnd_cq_mutex);
3205 /* XXX Nic: Need better error handling here... */
3206 LASSERTF((rrc == GNI_RC_SUCCESS) ||
3207 (rrc == GNI_RC_TRANSACTION_ERROR),
3210 ev_id.txe_cookie = desc->post_id;
3212 kgnilnd_validate_tx_ev_id(&ev_id, &tx, &conn);
3214 if (conn == NULL || tx == NULL) {
3215 /* either conn or tx was already nuked and this is a "late"
3216 * completion, so drop it */
3220 GNITX_ASSERTF(tx, tx->tx_msg.gnm_type == GNILND_MSG_PUT_DONE ||
3221 tx->tx_msg.gnm_type == GNILND_MSG_GET_DONE ||
3222 tx->tx_msg.gnm_type == GNILND_MSG_PUT_DONE_REV ||
3223 tx->tx_msg.gnm_type == GNILND_MSG_GET_DONE_REV,
3224 "tx %p with type %d\n", tx, tx->tx_msg.gnm_type);
3226 GNIDBG_TX(D_NET, tx, "RDMA completion for %d bytes", tx->tx_nob);
3228 if (tx->tx_msg.gnm_type == GNILND_MSG_GET_DONE_REV) {
3229 lnet_set_reply_msg_len(NULL, tx->tx_lntmsg[1],
3230 tx->tx_msg.gnm_u.completion.gncm_retval);
3234 if (tx->tx_msg.gnm_type == GNILND_MSG_GET_DONE_REV && desc->status == GNI_RC_SUCCESS) {
3235 if (tx->tx_buffer_copy != NULL)
3236 kgnilnd_recv_bte_get(tx);
3237 rc = kgnilnd_verify_rdma_cksum(tx, tx->tx_putinfo.gnpam_payload_cksum, tx->tx_nob_rdma);
3240 if (tx->tx_msg.gnm_type == GNILND_MSG_PUT_DONE_REV && desc->status == GNI_RC_SUCCESS) {
3241 if (tx->tx_buffer_copy != NULL)
3242 kgnilnd_recv_bte_get(tx);
3243 rc = kgnilnd_verify_rdma_cksum(tx, tx->tx_getinfo.gngm_payload_cksum, tx->tx_nob_rdma);
3246 /* remove from rdmaq */
3247 kgnilnd_conn_mutex_lock(&conn->gnc_rdma_mutex);
3248 spin_lock(&conn->gnc_list_lock);
3249 kgnilnd_tx_del_state_locked(tx, NULL, conn, GNILND_TX_ALLOCD);
3250 spin_unlock(&conn->gnc_list_lock);
3251 kgnilnd_conn_mutex_unlock(&conn->gnc_rdma_mutex);
3253 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_RDMA_CQ_ERROR)) {
3254 event_data = 1LL << 48;
3258 if (likely(desc->status == GNI_RC_SUCCESS) && rc == 0) {
3259 atomic_inc(&dev->gnd_rdma_ntx);
3260 atomic64_add(tx->tx_nob, &dev->gnd_rdma_txbytes);
3261 /* transaction succeeded, add into fmaq */
3262 kgnilnd_queue_tx(conn, tx);
3263 kgnilnd_peer_alive(conn->gnc_peer);
3265 /* drop ref from kgnilnd_validate_tx_ev_id */
3266 kgnilnd_admin_decref(conn->gnc_tx_in_use);
3267 kgnilnd_conn_decref(conn);
3272 /* fall through to the TRANSACTION_ERROR case */
3275 /* get stringified version for log messages */
3276 kgnilnd_cq_error_str(event_data, &err_str, 256);
3277 kgnilnd_cq_error_recoverable(event_data, &should_retry);
3279 /* make sure we are not off in the weeds with this tx */
3280 if (tx->tx_retrans >
3281 *kgnilnd_tunables.kgn_max_retransmits) {
3282 GNIDBG_TX(D_NETERROR, tx,
3283 "giving up on TX, too many retries", NULL);
3287 GNIDBG_TX(D_NETERROR, tx, "RDMA %s error (%s)",
3288 should_retry ? "transient" : "unrecoverable", err_str);
3290 if (tx->tx_msg.gnm_type == GNILND_MSG_PUT_DONE ||
3291 tx->tx_msg.gnm_type == GNILND_MSG_GET_DONE_REV) {
3292 rdesc = &tx->tx_putinfo.gnpam_desc;
3293 rnob = tx->tx_putinfo.gnpam_desc.gnrd_nob;
3294 rcookie = tx->tx_putinfo.gnpam_dst_cookie;
3296 rdesc = &tx->tx_getinfo.gngm_desc;
3297 rnob = tx->tx_lntmsg[0]->msg_len;
3298 rcookie = tx->tx_getinfo.gngm_cookie;
3303 tx->tx_msg.gnm_type,
3307 kgnilnd_nak_rdma(conn,
3308 tx->tx_msg.gnm_type,
3311 tx->tx_msg.gnm_srcnid);
3312 kgnilnd_tx_done(tx, -GNILND_NOPURG);
3313 kgnilnd_close_conn(conn, -ECOMM);
3316 /* drop ref from kgnilnd_validate_tx_ev_id */
3317 kgnilnd_admin_decref(conn->gnc_tx_in_use);
3318 kgnilnd_conn_decref(conn);
3323 kgnilnd_check_fma_send_cq(kgn_device_t *dev)
3327 kgn_tx_ev_id_t ev_id;
3328 kgn_tx_t *tx = NULL;
3329 kgn_conn_t *conn = NULL;
3330 int queued_fma, saw_reply, rc;
3331 long num_processed = 0;
3332 struct list_head *ctmp, *ctmpN;
3335 /* make sure we don't keep looping if we need to reset */
3336 if (unlikely(kgnilnd_data.kgn_quiesce_trigger)) {
3337 return num_processed;
3340 rc = kgnilnd_mutex_trylock(&dev->gnd_cq_mutex);
3342 /* we didn't get the mutex, so return that there is still work
3347 rrc = kgnilnd_cq_get_event(dev->gnd_snd_fma_cqh, &event_data);
3348 kgnilnd_gl_mutex_unlock(&dev->gnd_cq_mutex);
3350 if (rrc == GNI_RC_NOT_DONE) {
3352 "SMSG send CQ %d not ready (data %#llx) "
3353 "processed %ld\n", dev->gnd_id, event_data,
3356 if (num_processed > 0) {
3357 spin_lock(&dev->gnd_lock);
3358 if (!list_empty(&dev->gnd_delay_conns)) {
3359 list_for_each_safe(ctmp, ctmpN, &dev->gnd_delay_conns) {
3360 conn = list_entry(ctmp, kgn_conn_t, gnc_delaylist);
3361 list_del_init(&conn->gnc_delaylist);
3362 CDEBUG(D_NET, "Moving Conn %p from delay queue to ready_queue\n", conn);
3363 kgnilnd_schedule_conn_nolock(conn);
3365 spin_unlock(&dev->gnd_lock);
3366 kgnilnd_schedule_device(dev);
3368 spin_unlock(&dev->gnd_lock);
3371 return num_processed;
3374 dev->gnd_sched_alive = jiffies;
3377 LASSERTF(!GNI_CQ_OVERRUN(event_data),
3378 "this is bad, somehow our credits didn't "
3379 "protect us from CQ overrun\n");
3380 LASSERTF(GNI_CQ_GET_TYPE(event_data) == GNI_CQ_EVENT_TYPE_SMSG,
3381 "rrc %d, GNI_CQ_GET_TYPE(%#llx) = %#llx\n", rrc,
3382 event_data, GNI_CQ_GET_TYPE(event_data));
3384 /* if SMSG couldn't handle an error, time for conn to die */
3385 if (unlikely(rrc == GNI_RC_TRANSACTION_ERROR)) {
3388 /* need to take the write_lock to ensure atomicity
3389 * on the conn state if we need to close it */
3390 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
3391 conn = kgnilnd_cqid2conn_locked(GNI_CQ_GET_INST_ID(event_data));
3393 /* Conn was destroyed? */
3395 "SMSG CQID lookup %#llx failed\n",
3396 GNI_CQ_GET_INST_ID(event_data));
3397 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
3401 kgnilnd_cq_error_str(event_data, &err_str, 256);
3402 CNETERR("SMSG send error to %s: rc %d (%s)\n",
3403 libcfs_nid2str(conn->gnc_peer->gnp_nid),
3405 kgnilnd_close_conn_locked(conn, -ECOMM);
3407 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
3409 /* no need to process rest of this tx -
3410 * it is getting canceled */
3414 /* fall through to GNI_RC_SUCCESS case */
3415 ev_id.txe_smsg_id = GNI_CQ_GET_MSG_ID(event_data);
3417 kgnilnd_validate_tx_ev_id(&ev_id, &tx, &conn);
3418 if (conn == NULL || tx == NULL) {
3419 /* either conn or tx was already nuked and this is a "late"
3420 * completion, so drop it */
3424 tx->tx_conn->gnc_last_tx_cq = jiffies;
3425 if (tx->tx_msg.gnm_type == GNILND_MSG_NOOP) {
3426 set_mb(conn->gnc_last_noop_cq, jiffies);
3429 /* lock tx_list_state and tx_state */
3430 kgnilnd_conn_mutex_lock(&conn->gnc_smsg_mutex);
3431 spin_lock(&tx->tx_conn->gnc_list_lock);
3433 GNITX_ASSERTF(tx, tx->tx_list_state == GNILND_TX_LIVE_FMAQ,
3434 "state not GNILND_TX_LIVE_FMAQ", NULL);
3435 GNITX_ASSERTF(tx, tx->tx_state & GNILND_TX_WAITING_COMPLETION,
3436 "not waiting for completion", NULL);
3438 GNIDBG_TX(D_NET, tx, "SMSG complete tx_state %x rc %d",
3441 tx->tx_state &= ~GNILND_TX_WAITING_COMPLETION;
3443 /* This will trigger other FMA sends that were
3444 * pending this completion */
3445 queued_fma = !list_empty(&tx->tx_conn->gnc_fmaq);
3447 /* we either did not expect reply or we already got it */
3448 saw_reply = !(tx->tx_state & GNILND_TX_WAITING_REPLY);
3450 spin_unlock(&tx->tx_conn->gnc_list_lock);
3451 kgnilnd_conn_mutex_unlock(&conn->gnc_smsg_mutex);
3454 CDEBUG(D_NET, "scheduling conn 0x%p->%s for fmaq\n",
3456 libcfs_nid2str(conn->gnc_peer->gnp_nid));
3457 kgnilnd_schedule_conn(conn);
3460 /* If saw_reply is false as soon as gnc_list_lock is dropped the tx could be nuked
3461 * If saw_reply is true we know that the tx is safe to use as the other thread
3462 * is already finished with it.
3466 /* no longer need to track on the live_fmaq */
3467 kgnilnd_tx_del_state_locked(tx, NULL, tx->tx_conn, GNILND_TX_ALLOCD);
3469 if (tx->tx_state & GNILND_TX_PENDING_RDMA) {
3470 /* we already got reply & were waiting for
3471 * completion of initial send */
3472 /* to initiate RDMA transaction */
3473 GNIDBG_TX(D_NET, tx,
3474 "Pending RDMA 0x%p type 0x%02x",
3475 tx->tx_msg.gnm_type);
3476 tx->tx_state &= ~GNILND_TX_PENDING_RDMA;
3477 rc = kgnilnd_send_mapped_tx(tx, 0);
3478 GNITX_ASSERTF(tx, rc == 0, "RDMA send failed: %d\n", rc);
3480 /* we are done with this tx */
3481 GNIDBG_TX(D_NET, tx,
3482 "Done with tx type 0x%02x",
3483 tx->tx_msg.gnm_type);
3484 kgnilnd_tx_done(tx, tx->tx_rc);
3488 /* drop ref from kgnilnd_validate_tx_ev_id */
3489 kgnilnd_admin_decref(conn->gnc_tx_in_use);
3490 kgnilnd_conn_decref(conn);
3492 /* if we are waiting for a REPLY, we'll handle the tx then */
3493 } /* end for loop */
3497 kgnilnd_check_fma_rcv_cq(kgn_device_t *dev)
3502 long num_processed = 0;
3503 struct list_head *conns;
3504 struct list_head *tmp;
3508 /* make sure we don't keep looping if we need to reset */
3509 if (unlikely(kgnilnd_data.kgn_quiesce_trigger)) {
3510 return num_processed;
3513 rc = kgnilnd_mutex_trylock(&dev->gnd_cq_mutex);
3515 /* we didn't get the mutex, so return that there is still work
3519 rrc = kgnilnd_cq_get_event(dev->gnd_rcv_fma_cqh, &event_data);
3520 kgnilnd_gl_mutex_unlock(&dev->gnd_cq_mutex);
3522 if (rrc == GNI_RC_NOT_DONE) {
3523 CDEBUG(D_INFO, "SMSG RX CQ %d empty data %#llx "
3525 dev->gnd_id, event_data, num_processed);
3526 return num_processed;
3528 dev->gnd_sched_alive = jiffies;
3531 /* this is the only CQ that can really handle transient
3533 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_CQ_GET_EVENT)) {
3534 rrc = cfs_fail_val ? cfs_fail_val
3535 : GNI_RC_ERROR_RESOURCE;
3536 if (rrc == GNI_RC_ERROR_RESOURCE) {
3537 /* set overrun too */
3538 event_data |= (1UL << 63);
3539 LASSERTF(GNI_CQ_OVERRUN(event_data),
3540 "(1UL << 63) is no longer the bit to"
3541 "set to indicate CQ_OVERRUN\n");
3544 /* sender should get error event too and take care
3545 of failed transaction by re-transmitting */
3546 if (rrc == GNI_RC_TRANSACTION_ERROR) {
3547 CDEBUG(D_NET, "SMSG RX CQ error %#llx\n", event_data);
3551 if (likely(!GNI_CQ_OVERRUN(event_data))) {
3552 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
3553 conn = kgnilnd_cqid2conn_locked(
3554 GNI_CQ_GET_INST_ID(event_data));
3556 CDEBUG(D_NET, "SMSG RX CQID lookup %llu "
3557 "failed, dropping event %#llx\n",
3558 GNI_CQ_GET_INST_ID(event_data),
3561 CDEBUG(D_NET, "SMSG RX: CQID %llu "
3563 GNI_CQ_GET_INST_ID(event_data),
3564 conn, conn->gnc_peer ?
3565 libcfs_nid2str(conn->gnc_peer->gnp_nid) :
3568 conn->gnc_last_rx_cq = jiffies;
3570 /* stash first rx so we can clear out purgatory.
3572 if (conn->gnc_first_rx == 0) {
3573 conn->gnc_first_rx = jiffies;
3575 kgnilnd_peer_alive(conn->gnc_peer);
3576 kgnilnd_schedule_conn(conn);
3578 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
3582 /* FMA CQ has overflowed: check ALL conns */
3583 CNETERR("SMSG RX CQ overflow: scheduling ALL "
3584 "conns on device %d\n", dev->gnd_id);
3586 for (rc = 0; rc < *kgnilnd_tunables.kgn_peer_hash_size; rc++) {
3588 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
3589 conns = &kgnilnd_data.kgn_conns[rc];
3591 list_for_each(tmp, conns) {
3592 conn = list_entry(tmp, kgn_conn_t,
3595 if (conn->gnc_device == dev) {
3596 kgnilnd_schedule_conn(conn);
3597 conn->gnc_last_rx_cq = jiffies;
3601 /* don't block write lockers for too long... */
3602 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
3607 /* try_map_if_full should only be used when processing TX from list of
3608 * backlog TX waiting on mappings to free up
3611 * try_map_if_full = 0: 0 (sent or queued), (-|+)errno failure of kgnilnd_sendmsg
3612 * try_map_if_full = 1: 0 (sent), -ENOMEM for caller to requeue, (-|+)errno failure of kgnilnd_sendmsg */
3615 kgnilnd_send_mapped_tx(kgn_tx_t *tx, int try_map_if_full)
3617 /* slight bit of race if multiple people calling, but at worst we'll have
3618 * order altered just a bit... which would not be determenistic anyways */
3619 int rc = atomic_read(&tx->tx_conn->gnc_device->gnd_nq_map);
3621 GNIDBG_TX(D_NET, tx, "try %d nq_map %d", try_map_if_full, rc);
3623 /* We know that we have a GART reservation that should guarantee forward progress.
3624 * This means we don't need to take any extraordinary efforts if we are failing
3625 * mappings here - even if we are holding a very small number of these. */
3627 if (try_map_if_full || (rc == 0)) {
3628 rc = kgnilnd_map_buffer(tx);
3631 /* rc should be 0 if we mapped successfully here, if non-zero
3632 * we are queueing */
3634 /* if try_map_if_full set, they handle requeuing */
3635 if (unlikely(try_map_if_full)) {
3638 spin_lock(&tx->tx_conn->gnc_device->gnd_lock);
3639 kgnilnd_tx_add_state_locked(tx, NULL, tx->tx_conn, GNILND_TX_MAPQ, 1);
3640 spin_unlock(&tx->tx_conn->gnc_device->gnd_lock);
3641 /* make sure we wake up sched to run this */
3642 kgnilnd_schedule_device(tx->tx_conn->gnc_device);
3643 /* return 0 as this is now queued for later sending */
3648 switch (tx->tx_msg.gnm_type) {
3652 /* GET_REQ and PUT_ACK are outbound messages sending our mapping key to
3653 * remote node where the RDMA will be started
3654 * Special case -EAGAIN logic - this should just queued as if the mapping couldn't
3655 * be satisified. The rest of the errors are "hard" errors that require
3656 * upper layers to handle themselves.
3657 * If kgnilnd_post_rdma returns a resource error, kgnilnd_rdma will put
3658 * the tx back on the TX_MAPQ. When this tx is pulled back off the MAPQ,
3659 * it's gnm_type will now be GNILND_MSG_PUT_DONE or
3660 * GNILND_MSG_GET_DONE_REV.
3662 case GNILND_MSG_GET_REQ:
3663 tx->tx_msg.gnm_u.get.gngm_desc.gnrd_key = tx->tx_map_key;
3664 tx->tx_msg.gnm_u.get.gngm_cookie = tx->tx_id.txe_cookie;
3665 tx->tx_msg.gnm_u.get.gngm_desc.gnrd_addr = (__u64)((unsigned long)tx->tx_buffer);
3666 tx->tx_msg.gnm_u.get.gngm_desc.gnrd_nob = tx->tx_nob;
3667 tx->tx_state = GNILND_TX_WAITING_COMPLETION | GNILND_TX_WAITING_REPLY;
3668 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_GET_REQ_AGAIN)) {
3669 tx->tx_state |= GNILND_TX_FAIL_SMSG;
3671 /* redirect to FMAQ on failure, no need to infinite loop here in MAPQ */
3672 rc = kgnilnd_sendmsg(tx, NULL, 0, &tx->tx_conn->gnc_list_lock, GNILND_TX_FMAQ);
3674 case GNILND_MSG_PUT_ACK:
3675 tx->tx_msg.gnm_u.putack.gnpam_desc.gnrd_key = tx->tx_map_key;
3676 tx->tx_state = GNILND_TX_WAITING_COMPLETION | GNILND_TX_WAITING_REPLY;
3677 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_PUT_ACK_AGAIN)) {
3678 tx->tx_state |= GNILND_TX_FAIL_SMSG;
3680 /* redirect to FMAQ on failure, no need to infinite loop here in MAPQ */
3681 rc = kgnilnd_sendmsg(tx, NULL, 0, &tx->tx_conn->gnc_list_lock, GNILND_TX_FMAQ);
3684 /* PUT_REQ and GET_DONE are where we do the actual RDMA */
3685 case GNILND_MSG_PUT_DONE:
3686 case GNILND_MSG_PUT_REQ:
3687 rc = kgnilnd_rdma(tx, GNILND_MSG_PUT_DONE,
3688 &tx->tx_putinfo.gnpam_desc,
3689 tx->tx_putinfo.gnpam_desc.gnrd_nob,
3690 tx->tx_putinfo.gnpam_dst_cookie);
3691 RETURN(try_map_if_full ? rc : 0);
3693 case GNILND_MSG_GET_DONE:
3694 rc = kgnilnd_rdma(tx, GNILND_MSG_GET_DONE,
3695 &tx->tx_getinfo.gngm_desc,
3696 tx->tx_lntmsg[0]->msg_len,
3697 tx->tx_getinfo.gngm_cookie);
3698 RETURN(try_map_if_full ? rc : 0);
3700 case GNILND_MSG_PUT_REQ_REV:
3701 tx->tx_msg.gnm_u.get.gngm_desc.gnrd_key = tx->tx_map_key;
3702 tx->tx_msg.gnm_u.get.gngm_cookie = tx->tx_id.txe_cookie;
3703 tx->tx_msg.gnm_u.get.gngm_desc.gnrd_addr = (__u64)((unsigned long)tx->tx_buffer);
3704 tx->tx_msg.gnm_u.get.gngm_desc.gnrd_nob = tx->tx_nob;
3705 tx->tx_state = GNILND_TX_WAITING_COMPLETION | GNILND_TX_WAITING_REPLY;
3706 kgnilnd_compute_rdma_cksum(tx, tx->tx_nob);
3707 tx->tx_msg.gnm_u.get.gngm_payload_cksum = tx->tx_msg.gnm_payload_cksum;
3709 rc = kgnilnd_sendmsg(tx, NULL, 0, &tx->tx_conn->gnc_list_lock, GNILND_TX_FMAQ);
3711 case GNILND_MSG_PUT_DONE_REV:
3712 rc = kgnilnd_rdma(tx, GNILND_MSG_PUT_DONE_REV,
3713 &tx->tx_getinfo.gngm_desc,
3715 tx->tx_getinfo.gngm_cookie);
3716 RETURN(try_map_if_full ? rc : 0);
3718 case GNILND_MSG_GET_ACK_REV:
3719 tx->tx_msg.gnm_u.putack.gnpam_desc.gnrd_key = tx->tx_map_key;
3720 tx->tx_state = GNILND_TX_WAITING_COMPLETION | GNILND_TX_WAITING_REPLY;
3721 /* LNET_GETS are a special case for parse */
3722 kgnilnd_compute_rdma_cksum(tx, tx->tx_msg.gnm_u.putack.gnpam_desc.gnrd_nob);
3723 tx->tx_msg.gnm_u.putack.gnpam_payload_cksum = tx->tx_msg.gnm_payload_cksum;
3725 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_PUT_ACK_AGAIN))
3726 tx->tx_state |= GNILND_TX_FAIL_SMSG;
3728 /* redirect to FMAQ on failure, no need to infinite loop here in MAPQ */
3729 rc = kgnilnd_sendmsg(tx, NULL, 0, &tx->tx_conn->gnc_list_lock, GNILND_TX_FMAQ);
3731 case GNILND_MSG_GET_DONE_REV:
3732 case GNILND_MSG_GET_REQ_REV:
3733 rc = kgnilnd_rdma(tx, GNILND_MSG_GET_DONE_REV,
3734 &tx->tx_putinfo.gnpam_desc,
3735 tx->tx_putinfo.gnpam_desc.gnrd_nob,
3736 tx->tx_putinfo.gnpam_dst_cookie);
3737 RETURN(try_map_if_full ? rc : 0);
3745 kgnilnd_process_fmaq(kgn_conn_t *conn)
3748 kgn_tx_t *tx = NULL;
3749 void *buffer = NULL;
3750 unsigned int nob = 0;
3753 /* NB 1. kgnilnd_sendmsg() may fail if I'm out of credits right now.
3754 * However I will be rescheduled by an FMA completion event
3755 * when I eventually get some.
3756 * NB 2. Sampling gnc_state here races with setting it elsewhere.
3757 * But it doesn't matter if I try to send a "real" message just
3758 * as I start closing because I'll get scheduled to send the
3761 /* Short circuit if the ep_handle is null we cant send anyway. */
3762 if (conn->gnc_ephandle == NULL)
3765 LASSERTF(!conn->gnc_close_sent, "Conn %p close was sent\n", conn);
3767 spin_lock(&conn->gnc_list_lock);
3769 if (list_empty(&conn->gnc_fmaq)) {
3770 int keepalive = GNILND_TO2KA(conn->gnc_timeout);
3772 spin_unlock(&conn->gnc_list_lock);
3774 if (time_after_eq(jiffies, conn->gnc_last_tx + cfs_time_seconds(keepalive))) {
3775 CDEBUG(D_NET, "sending NOOP -> %s (%p idle %lu(%d)) "
3776 "last %lu/%lu/%lu %lus/%lus/%lus\n",
3777 libcfs_nid2str(conn->gnc_peer->gnp_nid), conn,
3778 cfs_duration_sec(jiffies - conn->gnc_last_tx),
3780 conn->gnc_last_noop_want, conn->gnc_last_noop_sent,
3781 conn->gnc_last_noop_cq,
3782 cfs_duration_sec(jiffies - conn->gnc_last_noop_want),
3783 cfs_duration_sec(jiffies - conn->gnc_last_noop_sent),
3784 cfs_duration_sec(jiffies - conn->gnc_last_noop_cq));
3785 atomic_inc(&conn->gnc_sched_noop);
3786 set_mb(conn->gnc_last_noop_want, jiffies);
3788 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_NOOP_SEND))
3791 tx = kgnilnd_new_tx_msg(GNILND_MSG_NOOP, conn->gnc_peer->gnp_net->gnn_ni->ni_nid);
3795 rc = kgnilnd_set_tx_id(tx, conn);
3797 kgnilnd_tx_done(tx, rc);
3803 tx = list_first_entry(&conn->gnc_fmaq, kgn_tx_t, tx_list);
3804 /* move from fmaq to allocd, kgnilnd_sendmsg will move to live_fmaq */
3805 kgnilnd_tx_del_state_locked(tx, NULL, conn, GNILND_TX_ALLOCD);
3806 more_to_do = !list_empty(&conn->gnc_fmaq);
3807 spin_unlock(&conn->gnc_list_lock);
3810 /* if there is no real TX or no NOOP to send, bail */
3815 if (!tx->tx_retrans)
3816 tx->tx_cred_wait = jiffies;
3818 GNITX_ASSERTF(tx, tx->tx_id.txe_smsg_id != 0,
3819 "tx with zero id", NULL);
3821 CDEBUG(D_NET, "sending regular msg: %p, type %s(0x%02x), cookie %#llx\n",
3822 tx, kgnilnd_msgtype2str(tx->tx_msg.gnm_type),
3823 tx->tx_msg.gnm_type, tx->tx_id.txe_cookie);
3827 switch (tx->tx_msg.gnm_type) {
3831 case GNILND_MSG_NOOP:
3832 case GNILND_MSG_CLOSE:
3833 case GNILND_MSG_IMMEDIATE:
3834 tx->tx_state = GNILND_TX_WAITING_COMPLETION;
3835 buffer = tx->tx_buffer;
3839 case GNILND_MSG_GET_DONE:
3840 case GNILND_MSG_PUT_DONE:
3841 case GNILND_MSG_PUT_DONE_REV:
3842 case GNILND_MSG_GET_DONE_REV:
3843 case GNILND_MSG_PUT_NAK:
3844 case GNILND_MSG_GET_NAK:
3845 case GNILND_MSG_GET_NAK_REV:
3846 case GNILND_MSG_PUT_NAK_REV:
3847 tx->tx_state = GNILND_TX_WAITING_COMPLETION;
3850 case GNILND_MSG_PUT_REQ:
3851 case GNILND_MSG_GET_REQ_REV:
3852 tx->tx_msg.gnm_u.putreq.gnprm_cookie = tx->tx_id.txe_cookie;
3854 case GNILND_MSG_PUT_ACK:
3855 case GNILND_MSG_PUT_REQ_REV:
3856 case GNILND_MSG_GET_ACK_REV:
3857 case GNILND_MSG_GET_REQ:
3858 /* This is really only to handle the retransmit of SMSG once these
3859 * two messages are setup in send_mapped_tx */
3860 tx->tx_state = GNILND_TX_WAITING_COMPLETION | GNILND_TX_WAITING_REPLY;
3864 if (likely(rc == 0)) {
3865 rc = kgnilnd_sendmsg(tx, buffer, nob, &conn->gnc_list_lock, GNILND_TX_FMAQ);
3869 /* don't explicitly reschedule here - we are short credits and will rely on
3870 * kgnilnd_sendmsg to resched the conn if need be */
3872 } else if (rc < 0) {
3873 /* bail: it wasn't sent and we didn't get EAGAIN indicating we should retrans
3874 * almost certainly a software bug, but lets play nice with the other kids */
3875 kgnilnd_tx_done(tx, rc);
3876 /* just for fun, kick peer in arse - resetting conn might help to correct
3877 * this almost certainly buggy software caused return code */
3878 kgnilnd_close_conn(conn, rc);
3882 CDEBUG(D_NET, "Rescheduling %p (more to do)\n", conn);
3883 kgnilnd_schedule_conn(conn);
3888 kgnilnd_process_rdmaq(kgn_device_t *dev)
3893 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_DELAY_RDMAQ)) {
3897 if (time_after_eq(jiffies, dev->gnd_rdmaq_deadline)) {
3898 unsigned long dead_bump;
3901 /* if we think we need to adjust, take lock to serialize and recheck */
3902 spin_lock(&dev->gnd_rdmaq_lock);
3903 if (time_after_eq(jiffies, dev->gnd_rdmaq_deadline)) {
3904 del_singleshot_timer_sync(&dev->gnd_rdmaq_timer);
3906 dead_bump = cfs_time_seconds(1) / *kgnilnd_tunables.kgn_rdmaq_intervals;
3908 /* roll the bucket forward */
3909 dev->gnd_rdmaq_deadline = jiffies + dead_bump;
3911 if (kgnilnd_data.kgn_rdmaq_override &&
3912 (*kgnilnd_tunables.kgn_rdmaq_intervals != 0)) {
3913 new_ok = kgnilnd_data.kgn_rdmaq_override / *kgnilnd_tunables.kgn_rdmaq_intervals;
3918 /* roll current outstanding forward to make sure we carry outstanding
3919 * committment forward
3920 * new_ok starts out as the whole interval value
3921 * - first subtract bytes_out from last interval, as that would push us over
3922 * strict limits for this interval
3923 * - second, set bytes_ok to new_ok to ensure it doesn't exceed the current auth
3925 * there is a small race here if someone is actively processing mappings and
3926 * adding to rdmaq_bytes_out, but it should be small as the mappings are triggered
3927 * quite quickly after kgnilnd_auth_rdma_bytes gives us the go-ahead
3928 * - if this gives us problems in the future, we could use a read/write lock
3929 * to protect the resetting of these values */
3930 new_ok -= atomic64_read(&dev->gnd_rdmaq_bytes_out);
3931 atomic64_set(&dev->gnd_rdmaq_bytes_ok, new_ok);
3933 CDEBUG(D_NET, "resetting rdmaq bytes to %ld, deadline +%lu -> %lu, "
3934 "current out %ld\n",
3935 atomic64_read(&dev->gnd_rdmaq_bytes_ok), dead_bump, dev->gnd_rdmaq_deadline,
3936 atomic64_read(&dev->gnd_rdmaq_bytes_out));
3938 spin_unlock(&dev->gnd_rdmaq_lock);
3941 spin_lock(&dev->gnd_rdmaq_lock);
3942 while (!list_empty(&dev->gnd_rdmaq)) {
3945 /* make sure we break out early on quiesce */
3946 if (unlikely(kgnilnd_data.kgn_quiesce_trigger)) {
3947 /* always break with lock held - we unlock outside loop */
3951 tx = list_first_entry(&dev->gnd_rdmaq, kgn_tx_t, tx_list);
3952 kgnilnd_tx_del_state_locked(tx, NULL, tx->tx_conn, GNILND_TX_ALLOCD);
3955 /* sample with lock held, serializing with kgnilnd_complete_closed_conn */
3956 if (tx->tx_conn->gnc_state != GNILND_CONN_ESTABLISHED) {
3957 /* if conn is dying, mark tx in tx_ref_table for
3958 * kgnilnd_complete_closed_conn to finish up */
3959 kgnilnd_tx_add_state_locked(tx, NULL, tx->tx_conn, GNILND_TX_DYING, 1);
3961 /* tx was moved to DYING, get next */
3964 spin_unlock(&dev->gnd_rdmaq_lock);
3966 rc = kgnilnd_auth_rdma_bytes(dev, tx);
3967 spin_lock(&dev->gnd_rdmaq_lock);
3970 /* no ticket! add back to head */
3971 kgnilnd_tx_add_state_locked(tx, NULL, tx->tx_conn, GNILND_TX_RDMAQ, 0);
3972 /* clear found_work so scheduler threads wait for timer */
3976 /* TX is GO for launch */
3977 tx->tx_qtime = jiffies;
3978 kgnilnd_send_mapped_tx(tx, 0);
3982 spin_unlock(&dev->gnd_rdmaq_lock);
3988 kgnilnd_swab_rdma_desc(kgn_rdma_desc_t *d)
3990 __swab64s(&d->gnrd_key.qword1);
3991 __swab64s(&d->gnrd_key.qword2);
3992 __swab64s(&d->gnrd_addr);
3993 __swab32s(&d->gnrd_nob);
3996 #define kgnilnd_match_reply_either(w, x, y, z) _kgnilnd_match_reply(w, x, y, z)
3997 #define kgnilnd_match_reply(x, y, z) _kgnilnd_match_reply(x, y, GNILND_MSG_NONE, z)
4000 _kgnilnd_match_reply(kgn_conn_t *conn, int type1, int type2, __u64 cookie)
4002 kgn_tx_ev_id_t ev_id;
4005 /* we use the cookie from the original TX, so we can find the match
4006 * by parsing that and using the txe_idx */
4007 ev_id.txe_cookie = cookie;
4009 tx = conn->gnc_tx_ref_table[ev_id.txe_idx];
4012 /* check tx to make sure kgni didn't eat it */
4013 GNITX_ASSERTF(tx, tx->tx_msg.gnm_magic == GNILND_MSG_MAGIC,
4014 "came back from kgni with bad magic %x\n", tx->tx_msg.gnm_magic);
4016 GNITX_ASSERTF(tx, ((tx->tx_id.txe_idx == ev_id.txe_idx) &&
4017 (tx->tx_id.txe_cookie = cookie)),
4018 "conn 0x%p->%s tx_ref_table hosed: wanted "
4019 "txe_cookie %#llx txe_idx %d "
4020 "found tx %p cookie %#llx txe_idx %d\n",
4021 conn, libcfs_nid2str(conn->gnc_peer->gnp_nid),
4022 cookie, ev_id.txe_idx,
4023 tx, tx->tx_id.txe_cookie, tx->tx_id.txe_idx);
4025 LASSERTF((((tx->tx_msg.gnm_type == type1) || (tx->tx_msg.gnm_type == type2)) &&
4026 (tx->tx_state & GNILND_TX_WAITING_REPLY)),
4027 "Unexpected TX type (%x, %x or %x) "
4028 "or state (%x, expected +%x) "
4029 "matched reply from %s\n",
4030 tx->tx_msg.gnm_type, type1, type2,
4031 tx->tx_state, GNILND_TX_WAITING_REPLY,
4032 libcfs_nid2str(conn->gnc_peer->gnp_nid));
4034 CWARN("Unmatched reply %02x, or %02x/%#llx from %s\n",
4035 type1, type2, cookie, libcfs_nid2str(conn->gnc_peer->gnp_nid));
4041 kgnilnd_complete_tx(kgn_tx_t *tx, int rc)
4044 kgn_conn_t *conn = tx->tx_conn;
4045 __u64 nob = tx->tx_nob;
4046 __u32 physnop = tx->tx_phys_npages;
4047 int id = tx->tx_id.txe_smsg_id;
4048 int buftype = tx->tx_buftype;
4049 gni_mem_handle_t hndl;
4050 hndl.qword1 = tx->tx_map_key.qword1;
4051 hndl.qword2 = tx->tx_map_key.qword2;
4053 spin_lock(&conn->gnc_list_lock);
4055 GNITX_ASSERTF(tx, tx->tx_state & GNILND_TX_WAITING_REPLY,
4056 "not waiting for reply", NULL);
4059 tx->tx_state &= ~GNILND_TX_WAITING_REPLY;
4061 if (rc == -EFAULT) {
4062 CDEBUG(D_NETERROR, "Error %d TX data: TX %p tx_id %x nob %16llu physnop %8d buffertype %#8x MemHandle %#llx.%#llxx\n",
4063 rc, tx, id, nob, physnop, buftype, hndl.qword1, hndl.qword2);
4065 if(*kgnilnd_tunables.kgn_efault_lbug) {
4066 GNIDBG_TOMSG(D_NETERROR, &tx->tx_msg,
4067 "error %d on tx 0x%p->%s id %u/%d state %s age %ds",
4069 libcfs_nid2str(conn->gnc_peer->gnp_nid) : "<?>",
4070 tx->tx_id.txe_smsg_id, tx->tx_id.txe_idx,
4071 kgnilnd_tx_state2str(tx->tx_list_state),
4072 cfs_duration_sec((unsigned long) jiffies - tx->tx_qtime));
4077 if (!(tx->tx_state & GNILND_TX_WAITING_COMPLETION)) {
4078 kgnilnd_tx_del_state_locked(tx, NULL, conn, GNILND_TX_ALLOCD);
4079 /* sample under lock as follow on steps require gnc_list_lock
4080 * - or call kgnilnd_tx_done which requires no locks held over
4081 * call to lnet_finalize */
4084 spin_unlock(&conn->gnc_list_lock);
4087 kgnilnd_tx_done(tx, tx->tx_rc);
4092 kgnilnd_finalize_rx_done(kgn_tx_t *tx, kgn_msg_t *msg)
4095 kgn_conn_t *conn = tx->tx_conn;
4097 atomic_inc(&conn->gnc_device->gnd_rdma_nrx);
4098 atomic64_add(tx->tx_nob, &conn->gnc_device->gnd_rdma_rxbytes);
4100 /* the gncm_retval is passed in for PUTs */
4101 rc = kgnilnd_verify_rdma_cksum(tx, msg->gnm_payload_cksum,
4102 msg->gnm_u.completion.gncm_retval);
4104 kgnilnd_complete_tx(tx, rc);
4108 kgnilnd_check_fma_rx(kgn_conn_t *conn)
4116 kgn_peer_t *peer = conn->gnc_peer;
4119 __u16 tmp_cksum = 0, msg_cksum = 0;
4120 int repost = 1, saw_complete;
4121 unsigned long timestamp, newest_last_rx, timeout;
4125 /* Short circuit if the ep_handle is null.
4126 * It's likely that its about to be closed as stale.
4128 if (conn->gnc_ephandle == NULL)
4131 timestamp = jiffies;
4132 kgnilnd_gl_mutex_lock(&conn->gnc_device->gnd_cq_mutex);
4133 /* delay in jiffies - we are really concerned only with things that
4134 * result in a schedule() or really holding this off for long times .
4135 * NB - mutex_lock could spin for 2 jiffies before going to sleep to wait */
4136 conn->gnc_device->gnd_mutex_delay += (long) jiffies - timestamp;
4138 /* Resample current time as we have no idea how long it took to get the mutex */
4139 timestamp = jiffies;
4141 /* We check here when the last time we received an rx, we do this before
4142 * we call getnext in case the thread has been blocked for a while. If we
4143 * havent received an rx since our timeout value we close the connection
4144 * as we should assume the other side has closed the connection. This will
4145 * stop us from sending replies to a mailbox that is already in purgatory.
4148 timeout = cfs_time_seconds(conn->gnc_timeout);
4149 newest_last_rx = GNILND_LASTRX(conn);
4151 /* Error injection to validate that timestamp checking works and closing the conn */
4152 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_RECV_TIMEOUT)) {
4153 timestamp = timestamp + (GNILND_TIMEOUTRX(timeout) * 2);
4156 if (time_after_eq(timestamp, newest_last_rx + (GNILND_TIMEOUTRX(timeout)))) {
4157 GNIDBG_CONN(D_NETERROR|D_CONSOLE, conn, "Cant receive from %s after timeout lapse of %lu; TO %lu",
4158 libcfs_nid2str(conn->gnc_peer->gnp_nid),
4159 cfs_duration_sec(timestamp - newest_last_rx),
4160 cfs_duration_sec(GNILND_TIMEOUTRX(timeout)));
4161 kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
4163 kgnilnd_close_conn(conn, rc);
4167 rrc = kgnilnd_smsg_getnext(conn->gnc_ephandle, &prefix);
4169 if (rrc == GNI_RC_NOT_DONE) {
4170 kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
4171 CDEBUG(D_INFO, "SMSG RX empty conn 0x%p\n", conn);
4175 /* Instead of asserting when we get mailbox corruption lets attempt to
4176 * close the conn and recover. We can put the conn/mailbox into
4177 * purgatory and let purgatory deal with the problem. If we see
4178 * this NETTERROR reported on production systems in large amounts
4179 * we will need to revisit the state machine to see if we can tighten
4180 * it up further to improve data protection.
4183 if (rrc == GNI_RC_INVALID_STATE) {
4184 kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
4185 GNIDBG_CONN(D_NETERROR | D_CONSOLE, conn, "Mailbox corruption "
4186 "detected closing conn %p from peer %s\n", conn,
4187 libcfs_nid2str(conn->gnc_peer->gnp_nid));
4189 kgnilnd_close_conn(conn, rc);
4193 LASSERTF(rrc == GNI_RC_SUCCESS,
4194 "bad rc %d on conn %p from peer %s\n",
4195 rrc, conn, libcfs_nid2str(peer->gnp_nid));
4197 msg = (kgn_msg_t *)prefix;
4199 rx = kgnilnd_alloc_rx();
4201 kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
4202 kgnilnd_release_msg(conn);
4203 GNIDBG_MSG(D_NETERROR, msg, "Dropping SMSG RX from 0x%p->%s, no RX memory",
4204 conn, libcfs_nid2str(peer->gnp_nid));
4208 GNIDBG_MSG(D_INFO, msg, "SMSG RX on %p", conn);
4210 timestamp = conn->gnc_last_rx;
4211 seq = last_seq = atomic_read(&conn->gnc_rx_seq);
4212 atomic_inc(&conn->gnc_rx_seq);
4214 conn->gnc_last_rx = jiffies;
4215 /* stash first rx so we can clear out purgatory
4217 if (conn->gnc_first_rx == 0)
4218 conn->gnc_first_rx = jiffies;
4220 /* needs to linger to protect gnc_rx_seq like we do with gnc_tx_seq */
4221 kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
4222 kgnilnd_peer_alive(conn->gnc_peer);
4225 rx->grx_conn = conn;
4227 rx->grx_received = current_kernel_time();
4229 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_NET_LOOKUP)) {
4232 rc = kgnilnd_find_net(msg->gnm_srcnid, &net);
4238 kgnilnd_net_decref(net);
4241 if (*kgnilnd_tunables.kgn_checksum && !msg->gnm_cksum)
4242 GNIDBG_MSG(D_WARNING, msg, "no msg header checksum when enabled");
4244 /* XXX Nic: Do we need to swab cksum */
4245 if (msg->gnm_cksum != 0) {
4246 msg_cksum = msg->gnm_cksum;
4248 tmp_cksum = kgnilnd_cksum(msg, sizeof(kgn_msg_t));
4250 if (tmp_cksum != msg_cksum) {
4251 GNIDBG_MSG(D_NETERROR, msg, "Bad hdr checksum (%x expected %x)",
4252 tmp_cksum, msg_cksum);
4253 kgnilnd_dump_msg(D_BUFFS, msg);
4258 /* restore checksum for future debug messages */
4259 msg->gnm_cksum = tmp_cksum;
4261 if (msg->gnm_magic != GNILND_MSG_MAGIC) {
4262 if (__swab32(msg->gnm_magic) != GNILND_MSG_MAGIC) {
4263 GNIDBG_MSG(D_NETERROR, msg, "Unexpected magic %08x from %s",
4264 msg->gnm_magic, libcfs_nid2str(peer->gnp_nid));
4269 __swab32s(&msg->gnm_magic);
4270 __swab16s(&msg->gnm_version);
4271 __swab16s(&msg->gnm_type);
4272 __swab64s(&msg->gnm_srcnid);
4273 __swab64s(&msg->gnm_connstamp);
4274 __swab32s(&msg->gnm_seq);
4276 /* NB message type checked below; NOT here... */
4277 switch (msg->gnm_type) {
4278 case GNILND_MSG_GET_ACK_REV:
4279 case GNILND_MSG_PUT_ACK:
4280 kgnilnd_swab_rdma_desc(&msg->gnm_u.putack.gnpam_desc);
4283 case GNILND_MSG_PUT_REQ_REV:
4284 case GNILND_MSG_GET_REQ:
4285 kgnilnd_swab_rdma_desc(&msg->gnm_u.get.gngm_desc);
4293 if (msg->gnm_version != GNILND_MSG_VERSION) {
4294 GNIDBG_MSG(D_NETERROR, msg, "Unexpected protocol version %d from %s",
4295 msg->gnm_version, libcfs_nid2str(peer->gnp_nid));
4300 if (LNET_NIDADDR(msg->gnm_srcnid) != LNET_NIDADDR(peer->gnp_nid)) {
4301 GNIDBG_MSG(D_NETERROR, msg, "Unexpected peer %s from %s",
4302 libcfs_nid2str(msg->gnm_srcnid),
4303 libcfs_nid2str(peer->gnp_nid));
4308 if (msg->gnm_connstamp != conn->gnc_peer_connstamp) {
4309 GNIDBG_MSG(D_NETERROR, msg, "Unexpected connstamp %#llx(%#llx"
4310 " expected) from %s",
4311 msg->gnm_connstamp, conn->gnc_peer_connstamp,
4312 libcfs_nid2str(peer->gnp_nid));
4317 if (msg->gnm_seq != seq) {
4318 GNIDBG_MSG(D_NETERROR, msg, "Unexpected sequence number %d(%d expected) from %s",
4319 msg->gnm_seq, seq, libcfs_nid2str(peer->gnp_nid));
4324 atomic_inc(&conn->gnc_device->gnd_short_nrx);
4326 if (msg->gnm_type == GNILND_MSG_CLOSE) {
4327 CDEBUG(D_NETTRACE, "%s sent us CLOSE msg\n",
4328 libcfs_nid2str(conn->gnc_peer->gnp_nid));
4329 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
4330 conn->gnc_close_recvd = GNILND_CLOSE_RX;
4331 conn->gnc_peer_error = msg->gnm_u.completion.gncm_retval;
4332 /* double check state with lock held */
4333 if (conn->gnc_state == GNILND_CONN_ESTABLISHED) {
4334 /* only error if we are not already closing */
4335 if (conn->gnc_peer_error == -ETIMEDOUT) {
4336 unsigned long now = jiffies;
4337 CNETERR("peer 0x%p->%s closed connection 0x%p due to timeout. "
4339 "RX %d @ %lus/%lus; TX %d @ %lus/%lus; "
4340 "NOOP %lus/%lus/%lus; sched %lus/%lus/%lus ago\n",
4341 conn->gnc_peer, libcfs_nid2str(conn->gnc_peer->gnp_nid),
4343 cfs_duration_sec(now - timestamp),
4344 cfs_duration_sec(now - conn->gnc_last_rx_cq),
4345 atomic_read(&conn->gnc_tx_seq),
4346 cfs_duration_sec(now - conn->gnc_last_tx),
4347 cfs_duration_sec(now - conn->gnc_last_tx_cq),
4348 cfs_duration_sec(now - conn->gnc_last_noop_want),
4349 cfs_duration_sec(now - conn->gnc_last_noop_sent),
4350 cfs_duration_sec(now - conn->gnc_last_noop_cq),
4351 cfs_duration_sec(now - conn->gnc_last_sched_ask),
4352 cfs_duration_sec(now - conn->gnc_last_sched_do),
4353 cfs_duration_sec(now - conn->gnc_device->gnd_sched_alive));
4355 kgnilnd_close_conn_locked(conn, -ECONNRESET);
4357 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
4361 if (conn->gnc_close_recvd) {
4362 GNIDBG_MSG(D_NETERROR, msg, "Unexpected message %s(%d/%d) after CLOSE from %s",
4363 kgnilnd_msgtype2str(msg->gnm_type),
4364 msg->gnm_type, conn->gnc_close_recvd,
4365 libcfs_nid2str(conn->gnc_peer->gnp_nid));
4370 if (conn->gnc_state != GNILND_CONN_ESTABLISHED) {
4371 /* XXX Nic: log message received on bad connection state */
4375 switch (msg->gnm_type) {
4376 case GNILND_MSG_NOOP:
4377 /* Nothing to do; just a keepalive */
4380 case GNILND_MSG_IMMEDIATE:
4381 /* only get SMSG payload for IMMEDIATE */
4382 atomic64_add(msg->gnm_payload_len, &conn->gnc_device->gnd_short_rxbytes);
4383 rc = lnet_parse(net->gnn_ni, &msg->gnm_u.immediate.gnim_hdr,
4384 msg->gnm_srcnid, rx, 0);
4387 case GNILND_MSG_GET_REQ_REV:
4388 case GNILND_MSG_PUT_REQ:
4389 rc = lnet_parse(net->gnn_ni, &msg->gnm_u.putreq.gnprm_hdr,
4390 msg->gnm_srcnid, rx, 1);
4393 case GNILND_MSG_GET_NAK_REV:
4394 tx = kgnilnd_match_reply_either(conn, GNILND_MSG_GET_REQ_REV, GNILND_MSG_GET_ACK_REV,
4395 msg->gnm_u.completion.gncm_cookie);
4399 kgnilnd_complete_tx(tx, msg->gnm_u.completion.gncm_retval);
4401 case GNILND_MSG_PUT_NAK:
4402 tx = kgnilnd_match_reply_either(conn, GNILND_MSG_PUT_REQ, GNILND_MSG_PUT_ACK,
4403 msg->gnm_u.completion.gncm_cookie);
4407 kgnilnd_complete_tx(tx, msg->gnm_u.completion.gncm_retval);
4409 case GNILND_MSG_PUT_ACK:
4410 tx = kgnilnd_match_reply(conn, GNILND_MSG_PUT_REQ,
4411 msg->gnm_u.putack.gnpam_src_cookie);
4415 /* store putack data for later: deferred rdma or re-try */
4416 tx->tx_putinfo = msg->gnm_u.putack;
4419 spin_lock(&tx->tx_conn->gnc_list_lock);
4421 GNITX_ASSERTF(tx, tx->tx_state & GNILND_TX_WAITING_REPLY,
4422 "not waiting for reply", NULL);
4424 tx->tx_state &= ~GNILND_TX_WAITING_REPLY;
4426 if (likely(!(tx->tx_state & GNILND_TX_WAITING_COMPLETION))) {
4427 kgnilnd_tx_del_state_locked(tx, NULL, conn, GNILND_TX_ALLOCD);
4428 /* sample under lock as follow on steps require gnc_list_lock
4429 * - or call kgnilnd_tx_done which requires no locks held over
4430 * call to lnet_finalize */
4433 /* cannot launch rdma if still waiting for fma-msg completion */
4434 CDEBUG(D_NET, "tx 0x%p type 0x%02x will need to "
4435 "wait for SMSG completion\n", tx, tx->tx_msg.gnm_type);
4436 tx->tx_state |= GNILND_TX_PENDING_RDMA;
4438 spin_unlock(&tx->tx_conn->gnc_list_lock);
4441 rc = kgnilnd_send_mapped_tx(tx, 0);
4443 kgnilnd_tx_done(tx, rc);
4446 case GNILND_MSG_GET_ACK_REV:
4447 tx = kgnilnd_match_reply(conn, GNILND_MSG_GET_REQ_REV,
4448 msg->gnm_u.putack.gnpam_src_cookie);
4452 /* store putack data for later: deferred rdma or re-try */
4453 tx->tx_putinfo = msg->gnm_u.putack;
4455 spin_lock(&tx->tx_conn->gnc_list_lock);
4457 GNITX_ASSERTF(tx, tx->tx_state & GNILND_TX_WAITING_REPLY,
4458 "not waiting for reply", NULL);
4460 tx->tx_state &= ~GNILND_TX_WAITING_REPLY;
4462 if (likely(!(tx->tx_state & GNILND_TX_WAITING_COMPLETION))) {
4463 kgnilnd_tx_del_state_locked(tx, NULL, conn, GNILND_TX_ALLOCD);
4464 /* sample under lock as follow on steps require gnc_list_lock
4465 * - or call kgnilnd_tx_done which requires no locks held over
4466 * call to lnet_finalize */
4469 /* cannot launch rdma if still waiting for fma-msg completion */
4470 CDEBUG(D_NET, "tx 0x%p type 0x%02x will need to "
4471 "wait for SMSG completion\n", tx, tx->tx_msg.gnm_type);
4472 tx->tx_state |= GNILND_TX_PENDING_RDMA;
4474 spin_unlock(&tx->tx_conn->gnc_list_lock);
4477 rc = kgnilnd_send_mapped_tx(tx, 0);
4479 kgnilnd_tx_done(tx, rc);
4482 case GNILND_MSG_PUT_DONE:
4483 tx = kgnilnd_match_reply(conn, GNILND_MSG_PUT_ACK,
4484 msg->gnm_u.completion.gncm_cookie);
4488 GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED ||
4489 tx->tx_buftype == GNILND_BUF_VIRT_MAPPED,
4490 "bad tx buftype %d", tx->tx_buftype);
4492 kgnilnd_finalize_rx_done(tx, msg);
4494 case GNILND_MSG_PUT_REQ_REV:
4495 case GNILND_MSG_GET_REQ:
4496 rc = lnet_parse(net->gnn_ni, &msg->gnm_u.get.gngm_hdr,
4497 msg->gnm_srcnid, rx, 1);
4501 case GNILND_MSG_GET_NAK:
4502 tx = kgnilnd_match_reply(conn, GNILND_MSG_GET_REQ,
4503 msg->gnm_u.completion.gncm_cookie);
4507 GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED ||
4508 tx->tx_buftype == GNILND_BUF_VIRT_MAPPED,
4509 "bad tx buftype %d", tx->tx_buftype);
4511 kgnilnd_complete_tx(tx, msg->gnm_u.completion.gncm_retval);
4514 case GNILND_MSG_GET_DONE:
4515 tx = kgnilnd_match_reply(conn, GNILND_MSG_GET_REQ,
4516 msg->gnm_u.completion.gncm_cookie);
4520 GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED ||
4521 tx->tx_buftype == GNILND_BUF_VIRT_MAPPED,
4522 "bad tx buftype %d", tx->tx_buftype);
4524 lnet_set_reply_msg_len(net->gnn_ni, tx->tx_lntmsg[1],
4525 msg->gnm_u.completion.gncm_retval);
4527 kgnilnd_finalize_rx_done(tx, msg);
4529 case GNILND_MSG_GET_DONE_REV:
4530 tx = kgnilnd_match_reply(conn, GNILND_MSG_GET_ACK_REV,
4531 msg->gnm_u.completion.gncm_cookie);
4535 GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED ||
4536 tx->tx_buftype == GNILND_BUF_VIRT_MAPPED,
4537 "bad tx buftype %d", tx->tx_buftype);
4539 kgnilnd_finalize_rx_done(tx, msg);
4542 case GNILND_MSG_PUT_DONE_REV:
4543 tx = kgnilnd_match_reply(conn, GNILND_MSG_PUT_REQ_REV,
4544 msg->gnm_u.completion.gncm_cookie);
4549 GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED ||
4550 tx->tx_buftype == GNILND_BUF_VIRT_MAPPED,
4551 "bad tx buftype %d", tx->tx_buftype);
4553 kgnilnd_finalize_rx_done(tx, msg);
4555 case GNILND_MSG_PUT_NAK_REV:
4556 tx = kgnilnd_match_reply(conn, GNILND_MSG_PUT_REQ_REV,
4557 msg->gnm_u.completion.gncm_cookie);
4562 GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED ||
4563 tx->tx_buftype == GNILND_BUF_VIRT_MAPPED,
4564 "bad tx buftype %d", tx->tx_buftype);
4566 kgnilnd_complete_tx(tx, msg->gnm_u.completion.gncm_retval);
4571 if (rc < 0) /* protocol/comms error */
4572 kgnilnd_close_conn(conn, rc);
4574 if (repost && rx != NULL) {
4575 kgnilnd_consume_rx(rx);
4578 /* we got an event so assume more there and call for reschedule */
4580 kgnilnd_schedule_conn(conn);
4584 /* Do the failure injections that we need to affect conn processing in the following function.
4585 * When writing tests that use this function make sure to use a fail_loc with a fail mask.
4586 * If you dont you can cause the scheduler threads to spin on the conn without it leaving
4589 * intent is used to signal the calling function whether or not the conn needs to be rescheduled.
4593 kgnilnd_check_conn_fail_loc(kgn_device_t *dev, kgn_conn_t *conn, int *intent)
4597 /* short circuit out when not set */
4598 if (likely(!cfs_fail_loc)) {
4602 /* failure injection to test for stack reset clean ups */
4603 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_DROP_CLOSING)) {
4604 /* we can't rely on busy loops being nice enough to get the
4605 * stack reset triggered - it'd just spin on this conn */
4606 CFS_RACE(CFS_FAIL_GNI_DROP_CLOSING);
4609 GOTO(did_fail_loc, rc);
4612 if (conn->gnc_state == GNILND_CONN_DESTROY_EP) {
4613 /* DESTROY_EP set in kgnilnd_conn_decref on gnc_refcount = 1 */
4615 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_DROP_DESTROY_EP)) {
4616 CFS_RACE(CFS_FAIL_GNI_DROP_DESTROY_EP);
4619 GOTO(did_fail_loc, rc);
4623 /* CFS_FAIL_GNI_FINISH_PURG2 is used to stop a connection from fully closing. This scheduler
4624 * will spin on the CFS_FAIL_TIMEOUT until the fail_loc is cleared at which time the connection
4625 * will be closed by kgnilnd_complete_closed_conn.
4627 if ((conn->gnc_state == GNILND_CONN_CLOSED) && CFS_FAIL_CHECK(CFS_FAIL_GNI_FINISH_PURG2)) {
4628 while (CFS_FAIL_TIMEOUT(CFS_FAIL_GNI_FINISH_PURG2, 1)) {};
4631 GOTO(did_fail_loc, rc);
4634 /* this one is a bit gross - we can't hold the mutex from process_conns
4635 * across a CFS_RACE here - it'd block the conn threads from doing an ep_bind
4636 * and moving onto finish_connect
4637 * so, we'll just set the rc - kgnilnd_process_conns will clear
4638 * found_work on a fail_loc, getting the scheduler thread to call schedule()
4639 * and effectively getting this thread to sleep */
4640 if ((conn->gnc_state == GNILND_CONN_CLOSED) && CFS_FAIL_CHECK(CFS_FAIL_GNI_FINISH_PURG)) {
4643 GOTO(did_fail_loc, rc);
4651 kgnilnd_send_conn_close(kgn_conn_t *conn)
4655 /* we are closing the conn - we will try to send the CLOSE msg
4656 * but will not wait for anything else to flush */
4658 /* send the close if not already done so or received one */
4659 if (!conn->gnc_close_sent && !conn->gnc_close_recvd) {
4660 /* set close_sent regardless of the success of the
4661 * CLOSE message. We are going to try once and then
4662 * kick him out of the sandbox */
4663 conn->gnc_close_sent = 1;
4666 /* EP might be null already if remote side initiated a new connection.
4667 * kgnilnd_finish_connect destroys existing ep_handles before wiring up the new connection,
4668 * so this check is here to make sure we dont attempt to send with a null ep_handle.
4670 if (conn->gnc_ephandle != NULL) {
4673 tx = kgnilnd_new_tx_msg(GNILND_MSG_CLOSE, conn->gnc_peer->gnp_net->gnn_ni->ni_nid);
4675 tx->tx_msg.gnm_u.completion.gncm_retval = conn->gnc_error;
4676 tx->tx_state = GNILND_TX_WAITING_COMPLETION;
4677 tx->tx_qtime = jiffies;
4679 if (tx->tx_id.txe_idx == 0) {
4680 rc = kgnilnd_set_tx_id(tx, conn);
4682 kgnilnd_tx_done(tx, rc);
4686 CDEBUG(D_NETTRACE, "sending close with errno %d\n",
4689 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_CLOSE_SEND)) {
4690 kgnilnd_tx_done(tx, -EAGAIN);
4692 rc = kgnilnd_sendmsg(tx, NULL, 0, NULL, GNILND_TX_FMAQ);
4694 /* It wasnt sent and we dont care. */
4695 kgnilnd_tx_done(tx, rc);
4703 /* When changing gnc_state we need to take the kgn_peer_conn_lock */
4704 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
4705 conn->gnc_state = GNILND_CONN_CLOSED;
4706 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
4707 /* mark this conn as CLOSED now that we processed it
4708 * do after TX, so we can use CLOSING in asserts */
4712 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_RX_CLOSE_CLOSED)) {
4713 /* simulate a RX CLOSE after the timeout but before
4714 * the scheduler thread gets it */
4715 conn->gnc_close_recvd = GNILND_CLOSE_INJECT2;
4716 conn->gnc_peer_error = -ETIMEDOUT;
4718 /* schedule to allow potential CLOSE and get the complete phase run */
4719 kgnilnd_schedule_conn(conn);
4723 kgnilnd_process_mapped_tx(kgn_device_t *dev)
4728 int fast_remaps = GNILND_FAST_MAPPING_TRY;
4729 int log_retrans, log_retrans_level;
4730 static int last_map_version;
4733 spin_lock(&dev->gnd_lock);
4734 if (list_empty(&dev->gnd_map_tx)) {
4735 /* if the list is empty make sure we dont have a timer running */
4736 del_singleshot_timer_sync(&dev->gnd_map_timer);
4737 spin_unlock(&dev->gnd_lock);
4741 dev->gnd_sched_alive = jiffies;
4743 /* we'll retry as fast as possible up to 25% of the limit, then we start
4744 * backing off until our map version changes - indicating we unmapped
4746 tx = list_first_entry(&dev->gnd_map_tx, kgn_tx_t, tx_list);
4747 if (likely(dev->gnd_map_attempt == 0) ||
4748 time_after_eq(jiffies, dev->gnd_next_map) ||
4749 last_map_version != dev->gnd_map_version) {
4751 /* if this is our first attempt at mapping set last mapped to current
4752 * jiffies so we can timeout our attempt correctly.
4754 if (dev->gnd_map_attempt == 0)
4755 dev->gnd_last_map = jiffies;
4757 GNIDBG_TX(D_NET, tx, "waiting for mapping event event to retry", NULL);
4758 spin_unlock(&dev->gnd_lock);
4762 /* delete the previous timer if it exists */
4763 del_singleshot_timer_sync(&dev->gnd_map_timer);
4764 /* stash the last map version to let us know when a good one was seen */
4765 last_map_version = dev->gnd_map_version;
4767 /* we need to to take the lock and continually refresh the head of the list as
4768 * kgnilnd_complete_closed_conn might be nuking stuff and we are cycling the lock
4769 * allowing them to squeeze in */
4771 while (!list_empty(&dev->gnd_map_tx)) {
4772 /* make sure we break out early on quiesce */
4773 if (unlikely(kgnilnd_data.kgn_quiesce_trigger)) {
4774 /* always break with lock held - we unlock outside loop */
4778 tx = list_first_entry(&dev->gnd_map_tx, kgn_tx_t, tx_list);
4780 kgnilnd_tx_del_state_locked(tx, NULL, tx->tx_conn, GNILND_TX_ALLOCD);
4783 /* sample with lock held, serializing with kgnilnd_complete_closed_conn */
4784 if (tx->tx_conn->gnc_state != GNILND_CONN_ESTABLISHED) {
4785 /* if conn is dying, mark tx in tx_ref_table for
4786 * kgnilnd_complete_closed_conn to finish up */
4787 kgnilnd_tx_add_state_locked(tx, NULL, tx->tx_conn, GNILND_TX_DYING, 1);
4790 /* tx was moved to DYING, get next */
4794 spin_unlock(&dev->gnd_lock);
4795 rc = kgnilnd_send_mapped_tx(tx, 1);
4797 /* We made it! skip error handling.. */
4799 /* OK to continue on +ve errors as it won't get seen until
4800 * this function is called again - we operate on a copy of the original
4801 * list and not the live list */
4802 spin_lock(&dev->gnd_lock);
4803 /* reset map attempts back to zero we successfully
4804 * mapped so we can reset our timers */
4805 dev->gnd_map_attempt = 0;
4807 } else if (rc == -EAGAIN) {
4808 spin_lock(&dev->gnd_lock);
4809 mod_timer(&dev->gnd_map_timer, dev->gnd_next_map);
4810 spin_unlock(&dev->gnd_lock);
4811 GOTO(get_out_mapped, rc);
4812 } else if (rc != -ENOMEM) {
4813 /* carp, failure we can't handle */
4814 kgnilnd_tx_done(tx, rc);
4815 spin_lock(&dev->gnd_lock);
4816 /* reset map attempts back to zero we dont know what happened but it
4817 * wasnt a failed mapping
4819 dev->gnd_map_attempt = 0;
4823 /* time to handle the retry cases.. lock so we dont have 2 threads
4824 * mucking with gnd_map_attempt, or gnd_next_map at the same time.
4826 spin_lock(&dev->gnd_lock);
4827 dev->gnd_map_attempt++;
4828 if (dev->gnd_map_attempt < fast_remaps) {
4829 /* do nothing we just want it to go as fast as possible.
4830 * just set gnd_next_map to current jiffies so it will process
4831 * as fast as possible.
4833 dev->gnd_next_map = jiffies;
4835 /* Retry based on GNILND_MAP_RETRY_RATE */
4836 dev->gnd_next_map = jiffies + GNILND_MAP_RETRY_RATE;
4839 /* only log occasionally once we've retried fast_remaps */
4840 log_retrans = (dev->gnd_map_attempt >= fast_remaps) &&
4841 ((dev->gnd_map_attempt % fast_remaps) == 0);
4842 log_retrans_level = log_retrans ? D_NETERROR : D_NET;
4844 /* make sure we are not off in the weeds with this tx */
4845 if (time_after(jiffies, dev->gnd_last_map + GNILND_MAP_TIMEOUT)) {
4846 GNIDBG_TX(D_NETERROR, tx,
4847 "giving up on TX, too many retries", NULL);
4848 spin_unlock(&dev->gnd_lock);
4849 if (tx->tx_msg.gnm_type == GNILND_MSG_PUT_REQ ||
4850 tx->tx_msg.gnm_type == GNILND_MSG_GET_REQ_REV) {
4851 kgnilnd_nak_rdma(tx->tx_conn, tx->tx_msg.gnm_type,
4853 tx->tx_putinfo.gnpam_dst_cookie,
4854 tx->tx_msg.gnm_srcnid);
4856 kgnilnd_nak_rdma(tx->tx_conn, tx->tx_msg.gnm_type,
4858 tx->tx_getinfo.gngm_cookie,
4859 tx->tx_msg.gnm_srcnid);
4861 kgnilnd_tx_done(tx, -ENOMEM);
4862 GOTO(get_out_mapped, rc);
4864 GNIDBG_TX(log_retrans_level, tx,
4865 "transient map failure #%d %d pages/%d bytes phys %u@%u "
4867 "nq_map %d mdd# %d/%d GART %ld",
4868 dev->gnd_map_attempt, tx->tx_phys_npages, tx->tx_nob,
4869 dev->gnd_map_nphys, dev->gnd_map_physnop * PAGE_SIZE,
4870 dev->gnd_map_nvirt, dev->gnd_map_virtnob,
4871 atomic_read(&dev->gnd_nq_map),
4872 atomic_read(&dev->gnd_n_mdd), atomic_read(&dev->gnd_n_mdd_held),
4873 atomic64_read(&dev->gnd_nbytes_map));
4876 /* we need to stop processing the rest of the list, so add it back in */
4877 /* set timer to wake device when we need to schedule this tx */
4878 mod_timer(&dev->gnd_map_timer, dev->gnd_next_map);
4879 kgnilnd_tx_add_state_locked(tx, NULL, tx->tx_conn, GNILND_TX_MAPQ, 0);
4880 spin_unlock(&dev->gnd_lock);
4881 GOTO(get_out_mapped, rc);
4883 spin_unlock(&dev->gnd_lock);
4889 kgnilnd_process_conns(kgn_device_t *dev, unsigned long deadline)
4894 int error_inject = 0;
4898 spin_lock(&dev->gnd_lock);
4899 while (!list_empty(&dev->gnd_ready_conns) && time_before(jiffies, deadline)) {
4900 dev->gnd_sched_alive = jiffies;
4904 if (unlikely(kgnilnd_data.kgn_quiesce_trigger)) {
4905 /* break with lock held */
4909 conn = list_first_entry(&dev->gnd_ready_conns, kgn_conn_t, gnc_schedlist);
4910 list_del_init(&conn->gnc_schedlist);
4912 * Since we are processing conn now, we don't need to be on the delaylist any longer.
4915 if (!list_empty(&conn->gnc_delaylist))
4916 list_del_init(&conn->gnc_delaylist);
4917 spin_unlock(&dev->gnd_lock);
4919 conn_sched = xchg(&conn->gnc_scheduled, GNILND_CONN_PROCESS);
4921 LASSERTF(conn_sched != GNILND_CONN_IDLE &&
4922 conn_sched != GNILND_CONN_PROCESS,
4923 "conn %p on ready list but in bad state: %d\n",
4926 CDEBUG(D_INFO, "conn %p@%s for processing\n",
4927 conn, kgnilnd_conn_state2str(conn));
4930 set_mb(conn->gnc_last_sched_do, jiffies);
4932 if (kgnilnd_check_conn_fail_loc(dev, conn, &intent)) {
4934 /* based on intent see if we should run again. */
4935 rc = kgnilnd_schedule_process_conn(conn, intent);
4937 /* drop ref from gnd_ready_conns */
4938 if (atomic_read(&conn->gnc_refcount) == 1 && rc != 1) {
4939 down_write(&dev->gnd_conn_sem);
4940 kgnilnd_conn_decref(conn);
4941 up_write(&dev->gnd_conn_sem);
4942 } else if (rc != 1) {
4943 kgnilnd_conn_decref(conn);
4945 /* clear this so that scheduler thread doesn't spin */
4947 /* break with lock held... */
4948 spin_lock(&dev->gnd_lock);
4952 if (unlikely(conn->gnc_state == GNILND_CONN_CLOSED)) {
4953 down_write(&dev->gnd_conn_sem);
4955 /* CONN_CLOSED set in procces_fmaq when CLOSE is sent */
4956 if (unlikely(atomic_read(&conn->gnc_tx_in_use))) {
4957 /* If there are tx's currently in use in another
4958 * thread we dont want to complete the close
4959 * yet. Cycle this conn back through
4961 kgnilnd_schedule_conn(conn);
4963 kgnilnd_complete_closed_conn(conn);
4965 up_write(&dev->gnd_conn_sem);
4966 } else if (unlikely(conn->gnc_state == GNILND_CONN_DESTROY_EP)) {
4967 /* DESTROY_EP set in kgnilnd_conn_decref on gnc_refcount = 1 */
4968 /* serialize SMSG CQs with ep_bind and smsg_release */
4969 down_write(&dev->gnd_conn_sem);
4970 kgnilnd_destroy_conn_ep(conn);
4971 up_write(&dev->gnd_conn_sem);
4972 } else if (unlikely(conn->gnc_state == GNILND_CONN_CLOSING)) {
4973 /* if we need to do some CLOSE sending, etc done here do it */
4974 down_write(&dev->gnd_conn_sem);
4975 kgnilnd_send_conn_close(conn);
4976 kgnilnd_check_fma_rx(conn);
4977 up_write(&dev->gnd_conn_sem);
4978 } else if (atomic_read(&conn->gnc_peer->gnp_dirty_eps) == 0) {
4979 /* start moving traffic if the old conns are cleared out */
4980 down_read(&dev->gnd_conn_sem);
4981 kgnilnd_check_fma_rx(conn);
4982 kgnilnd_process_fmaq(conn);
4983 up_read(&dev->gnd_conn_sem);
4986 rc = kgnilnd_schedule_process_conn(conn, 0);
4988 /* drop ref from gnd_ready_conns */
4989 if (atomic_read(&conn->gnc_refcount) == 1 && rc != 1) {
4990 down_write(&dev->gnd_conn_sem);
4991 kgnilnd_conn_decref(conn);
4992 up_write(&dev->gnd_conn_sem);
4993 } else if (rc != 1) {
4994 kgnilnd_conn_decref(conn);
4997 /* check list again with lock held */
4998 spin_lock(&dev->gnd_lock);
5001 /* If we are short circuiting due to timing we want to be scheduled
5002 * as soon as possible.
5004 if (!list_empty(&dev->gnd_ready_conns) && !error_inject)
5007 spin_unlock(&dev->gnd_lock);
5013 kgnilnd_scheduler(void *arg)
5015 int threadno = (long)arg;
5018 unsigned long deadline = 0;
5021 dev = &kgnilnd_data.kgn_devices[(threadno + 1) % kgnilnd_data.kgn_ndevs];
5023 cfs_block_allsigs();
5025 /* all gnilnd threads need to run fairly urgently */
5026 set_user_nice(current, *kgnilnd_tunables.kgn_sched_nice);
5027 deadline = jiffies + cfs_time_seconds(*kgnilnd_tunables.kgn_sched_timeout);
5028 while (!kgnilnd_data.kgn_shutdown) {
5030 /* Safe: kgn_shutdown only set when quiescent */
5032 /* to quiesce or to not quiesce, that is the question */
5034 if (unlikely(kgnilnd_data.kgn_quiesce_trigger)) {
5035 KGNILND_SPIN_QUIESCE;
5038 /* tracking for when thread goes AWOL */
5039 dev->gnd_sched_alive = jiffies;
5041 CFS_FAIL_TIMEOUT(CFS_FAIL_GNI_SCHED_DEADLINE,
5042 (*kgnilnd_tunables.kgn_sched_timeout + 1));
5043 /* let folks know we are up and kicking
5044 * - they can use this for latency savings, etc
5045 * - only change if IRQ, if IDLE leave alone as that
5046 * schedule_device calls to put us back to IRQ */
5047 (void)cmpxchg(&dev->gnd_ready, GNILND_DEV_IRQ, GNILND_DEV_LOOP);
5049 down_read(&dev->gnd_conn_sem);
5050 /* always check these - they are super low cost */
5051 found_work += kgnilnd_check_fma_send_cq(dev);
5052 found_work += kgnilnd_check_fma_rcv_cq(dev);
5054 /* rdma CQ doesn't care about eps */
5055 found_work += kgnilnd_check_rdma_cq(dev);
5057 /* move some RDMA ? */
5058 found_work += kgnilnd_process_rdmaq(dev);
5060 /* map some pending RDMA requests ? */
5061 found_work += kgnilnd_process_mapped_tx(dev);
5063 /* the EP for a conn is not destroyed until all the references
5064 * to it are gone, so these checks should be safe
5065 * even if run in parallel with the CQ checking functions
5066 * _AND_ a thread that processes the CLOSED->DONE
5070 up_read(&dev->gnd_conn_sem);
5072 /* process all conns ready now */
5073 found_work += kgnilnd_process_conns(dev, deadline);
5075 /* do an eager check to avoid the IRQ disabling in
5076 * prepare_to_wait and friends */
5079 (busy_loops++ < *kgnilnd_tunables.kgn_loops) &&
5080 time_before(jiffies, deadline)) {
5082 if ((busy_loops % 10) == 0) {
5083 /* tickle heartbeat and watchdog to ensure our
5084 * piggishness doesn't turn into heartbeat failure */
5085 touch_nmi_watchdog();
5091 /* if we got here, found_work was zero or busy_loops means we
5092 * need to take a break. We'll clear gnd_ready but we'll check
5093 * one last time if there is an IRQ that needs processing */
5095 prepare_to_wait(&dev->gnd_waitq, &wait, TASK_INTERRUPTIBLE);
5097 /* the first time this will go LOOP -> IDLE and let us do one final check
5098 * during which we might get an IRQ, then IDLE->IDLE and schedule()
5099 * - this might allow other threads to block us for a bit if they
5100 * try to get the mutex, but that is good as we'd need to wake
5101 * up soon to handle the CQ or other processing anyways */
5103 found_work += xchg(&dev->gnd_ready, GNILND_DEV_IDLE);
5105 if ((busy_loops >= *kgnilnd_tunables.kgn_loops) ||
5106 time_after_eq(jiffies, deadline)) {
5108 "yeilding: found_work %d busy_loops %d\n",
5109 found_work, busy_loops);
5111 /* use yield if we are bailing due to busy_loops
5112 * - this will ensure we wake up soonish. This closes
5113 * a race with kgnilnd_device_callback - where it'd
5114 * not call wake_up() because gnd_ready == 1, but then
5115 * we come down and schedule() because of busy_loops.
5116 * We'd not be woken up until something poked our waitq
5117 * again. yield() ensures we wake up without another
5118 * waitq poke in that case */
5119 atomic_inc(&dev->gnd_n_yield);
5120 kgnilnd_data.kgn_last_condresched = jiffies;
5122 CDEBUG(D_INFO, "awake after yeild\n");
5123 deadline = jiffies + cfs_time_seconds(*kgnilnd_tunables.kgn_sched_timeout);
5124 } else if (found_work == GNILND_DEV_IDLE) {
5125 /* busy_loops is low and there is nothing to do,
5126 * go to sleep and wait for a waitq poke */
5128 "scheduling: found_work %d busy_loops %d\n",
5129 found_work, busy_loops);
5130 atomic_inc(&dev->gnd_n_schedule);
5131 kgnilnd_data.kgn_last_scheduled = jiffies;
5133 CDEBUG(D_INFO, "awake after schedule\n");
5134 deadline = jiffies + cfs_time_seconds(*kgnilnd_tunables.kgn_sched_timeout);
5136 finish_wait(&dev->gnd_waitq, &wait);
5139 kgnilnd_thread_fini();