2 * Copyright (C) 2004 Cluster File Systems, Inc.
4 * Copyright (C) 2009-2012 Cray, Inc.
6 * Derived from work by Eric Barton <eric@bartonsoftware.com>
7 * Author: Nic Henke <nic@cray.com>
9 * This file is part of Lustre, http://www.lustre.org.
11 * Lustre is free software; you can redistribute it and/or
12 * modify it under the terms of version 2 of the GNU General Public
13 * License as published by the Free Software Foundation.
15 * Lustre is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with Lustre; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/nmi.h>
29 /* this is useful when needed to debug wire corruption. */
31 kgnilnd_dump_blob(int level, char *prefix, void *buf, int len) {
39 "%s 0x%p: 0x%16.16llx 0x%16.16llx 0x%16.16llx 0x%16.16llx\n",
40 prefix, ptr, *(ptr), *(ptr + 1), *(ptr + 2), *(ptr + 3));
43 } else if (len >= 16) {
45 "%s 0x%p: 0x%16.16llx 0x%16.16llx\n",
46 prefix, ptr, *(ptr), *(ptr + 1));
50 CDEBUG(level, "%s 0x%p: 0x%16.16llx\n",
59 kgnilnd_dump_msg(int mask, kgn_msg_t *msg)
61 CDEBUG(mask, "0x%8.8x 0x%4.4x 0x%4.4x 0x%16.16llx"
62 " 0x%16.16llx 0x%8.8x 0x%4.4x 0x%4.4x 0x%8.8x\n",
63 msg->gnm_magic, msg->gnm_version,
64 msg->gnm_type, msg->gnm_srcnid,
65 msg->gnm_connstamp, msg->gnm_seq,
66 msg->gnm_cksum, msg->gnm_payload_cksum,
67 msg->gnm_payload_len);
71 kgnilnd_schedule_device(kgn_device_t *dev)
73 short already_live = 0;
75 /* we'll only want to wake if the scheduler thread
76 * has come around and set ready to zero */
77 already_live = cmpxchg(&dev->gnd_ready, GNILND_DEV_IDLE, GNILND_DEV_IRQ);
80 wake_up_all(&dev->gnd_waitq);
85 void kgnilnd_schedule_device_timer(unsigned long arg)
87 kgn_device_t *dev = (kgn_device_t *) arg;
89 kgnilnd_schedule_device(dev);
93 kgnilnd_device_callback(__u32 devid, __u64 arg)
96 int index = (int) arg;
98 if (index >= kgnilnd_data.kgn_ndevs) {
99 /* use _EMERG instead of an LBUG to prevent LBUG'ing in
100 * interrupt context. */
101 LCONSOLE_EMERG("callback for unknown device %d->%d\n",
106 dev = &kgnilnd_data.kgn_devices[index];
107 /* just basic sanity */
108 if (dev->gnd_id == devid) {
109 kgnilnd_schedule_device(dev);
111 LCONSOLE_EMERG("callback for bad device %d devid %d\n",
116 /* sched_intent values:
117 * < 0 : do not reschedule under any circumstances
118 * == 0: reschedule if someone marked him WANTS_SCHED
119 * > 0 : force a reschedule */
120 /* Return code 0 means it did not schedule the conn, 1
121 * means it succesfully scheduled the conn.
125 kgnilnd_schedule_process_conn(kgn_conn_t *conn, int sched_intent)
129 /* move back to IDLE but save previous state.
130 * if we see WANTS_SCHED, we'll call kgnilnd_schedule_conn and
131 * let the xchg there handle any racing callers to get it
132 * onto gnd_ready_conns */
134 conn_sched = xchg(&conn->gnc_scheduled, GNILND_CONN_IDLE);
135 LASSERTF(conn_sched == GNILND_CONN_WANTS_SCHED ||
136 conn_sched == GNILND_CONN_PROCESS,
137 "conn %p after process in bad state: %d\n",
140 if (sched_intent >= 0) {
141 if ((sched_intent > 0 || (conn_sched == GNILND_CONN_WANTS_SCHED))) {
142 return kgnilnd_schedule_conn_refheld(conn, 1);
148 /* Return of 0 for conn not scheduled, 1 returned if conn was scheduled or marked
152 _kgnilnd_schedule_conn(kgn_conn_t *conn, const char *caller, int line, int refheld)
154 kgn_device_t *dev = conn->gnc_device;
158 sched = xchg(&conn->gnc_scheduled, GNILND_CONN_WANTS_SCHED);
159 /* we only care about the last person who marked want_sched since they
160 * are most likely the culprit
162 memcpy(conn->gnc_sched_caller, caller, sizeof(conn->gnc_sched_caller));
163 conn->gnc_sched_line = line;
164 /* if we are IDLE, add to list - only one guy sees IDLE and "wins"
165 * the chance to put it onto gnd_ready_conns.
166 * otherwise, leave marked as WANTS_SCHED and the thread that "owns"
167 * the conn in process_conns will take care of moving it back to
168 * SCHED when it is done processing */
170 if (sched == GNILND_CONN_IDLE) {
171 /* if the conn is already scheduled, we've already requested
172 * the scheduler thread wakeup */
174 /* Add a reference to the conn if we are not holding a reference
175 * already from the exisiting scheduler. We now use the same
176 * reference if we need to reschedule a conn while in a scheduler
179 kgnilnd_conn_addref(conn);
181 LASSERTF(list_empty(&conn->gnc_schedlist), "conn %p already sched state %d\n",
184 CDEBUG(D_INFO, "scheduling conn 0x%p caller %s:%d\n", conn, caller, line);
186 spin_lock(&dev->gnd_lock);
187 list_add_tail(&conn->gnc_schedlist, &dev->gnd_ready_conns);
188 spin_unlock(&dev->gnd_lock);
189 set_mb(conn->gnc_last_sched_ask, jiffies);
192 CDEBUG(D_INFO, "not scheduling conn 0x%p: %d caller %s:%d\n", conn, sched, caller, line);
196 /* make sure thread(s) going to process conns - but let it make
197 * separate decision from conn schedule */
198 kgnilnd_schedule_device(dev);
203 kgnilnd_schedule_dgram(kgn_device_t *dev)
207 wake = xchg(&dev->gnd_dgram_ready, GNILND_DGRAM_SCHED);
208 if (wake != GNILND_DGRAM_SCHED) {
209 wake_up(&dev->gnd_dgram_waitq);
211 CDEBUG(D_NETTRACE, "not waking: %d\n", wake);
216 kgnilnd_free_tx(kgn_tx_t *tx)
218 /* taken from kgnilnd_tx_add_state_locked */
220 LASSERTF((tx->tx_list_p == NULL &&
221 tx->tx_list_state == GNILND_TX_ALLOCD) &&
222 list_empty(&tx->tx_list),
223 "tx %p with bad state %s (list_p %p) tx_list %s\n",
224 tx, kgnilnd_tx_state2str(tx->tx_list_state), tx->tx_list_p,
225 list_empty(&tx->tx_list) ? "empty" : "not empty");
227 atomic_dec(&kgnilnd_data.kgn_ntx);
229 /* we only allocate this if we need to */
230 if (tx->tx_phys != NULL) {
231 kmem_cache_free(kgnilnd_data.kgn_tx_phys_cache, tx->tx_phys);
232 CDEBUG(D_MALLOC, "slab-freed 'tx_phys': %lu at %p.\n",
233 LNET_MAX_IOV * sizeof(gni_mem_segment_t), tx->tx_phys);
236 /* Only free the buffer if we used it */
237 if (tx->tx_buffer_copy != NULL) {
238 vfree(tx->tx_buffer_copy);
239 tx->tx_buffer_copy = NULL;
240 CDEBUG(D_MALLOC, "vfreed buffer2\n");
243 KGNILND_POISON(tx, 0x5a, sizeof(kgn_tx_t));
245 CDEBUG(D_MALLOC, "slab-freed 'tx': %lu at %p.\n", sizeof(*tx), tx);
246 kmem_cache_free(kgnilnd_data.kgn_tx_cache, tx);
250 kgnilnd_alloc_tx (void)
254 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_ALLOC_TX))
257 tx = kmem_cache_alloc(kgnilnd_data.kgn_tx_cache, GFP_ATOMIC);
259 CERROR("failed to allocate tx\n");
262 CDEBUG(D_MALLOC, "slab-alloced 'tx': %lu at %p.\n",
265 /* need this memset, cache alloc'd memory is not cleared */
266 memset(tx, 0, sizeof(*tx));
268 /* setup everything here to minimize time under the lock */
269 tx->tx_buftype = GNILND_BUF_NONE;
270 tx->tx_msg.gnm_type = GNILND_MSG_NONE;
271 INIT_LIST_HEAD(&tx->tx_list);
272 INIT_LIST_HEAD(&tx->tx_map_list);
273 tx->tx_list_state = GNILND_TX_ALLOCD;
275 atomic_inc(&kgnilnd_data.kgn_ntx);
280 /* csum_fold needs to be run on the return value before shipping over the wire */
281 #define _kgnilnd_cksum(seed, ptr, nob) csum_partial(ptr, nob, seed)
283 /* we don't use offset as every one is passing a buffer reference that already
284 * includes the offset into the base address -
285 * see kgnilnd_setup_virt_buffer and kgnilnd_setup_immediate_buffer */
287 kgnilnd_cksum(void *ptr, size_t nob)
291 sum = csum_fold(_kgnilnd_cksum(0, ptr, nob));
293 /* don't use magic 'no checksum' value */
297 CDEBUG(D_INFO, "cksum 0x%x for ptr 0x%p sz %zu\n",
304 kgnilnd_cksum_kiov(unsigned int nkiov, lnet_kiov_t *kiov,
305 unsigned int offset, unsigned int nob, int dump_blob)
311 unsigned int fraglen;
317 CDEBUG(D_BUFFS, "calc cksum for kiov 0x%p nkiov %u offset %u nob %u, dump %d\n",
318 kiov, nkiov, offset, nob, dump_blob);
320 /* if loops changes, please change kgnilnd_setup_phys_buffer */
322 while (offset >= kiov->kiov_len) {
323 offset -= kiov->kiov_len;
329 /* ignore nob here, if nob < (kiov_len - offset), kiov == 1 */
330 odd = (unsigned long) (kiov[0].kiov_len - offset) & 1;
332 if ((odd || *kgnilnd_tunables.kgn_vmap_cksum) && nkiov > 1) {
333 struct page **pages = kgnilnd_data.kgn_cksum_map_pages[get_cpu()];
335 LASSERTF(pages != NULL, "NULL pages for cpu %d map_pages 0x%p\n",
336 get_cpu(), kgnilnd_data.kgn_cksum_map_pages);
338 CDEBUG(D_BUFFS, "odd %d len %u offset %u nob %u\n",
339 odd, kiov[0].kiov_len, offset, nob);
341 for (i = 0; i < nkiov; i++) {
342 pages[i] = kiov[i].kiov_page;
345 addr = vmap(pages, nkiov, VM_MAP, PAGE_KERNEL);
347 CNETERR("Couldn't vmap %d frags on %d bytes to avoid odd length fragment in cksum\n",
349 /* return zero to avoid killing tx - we'll just get warning on console
350 * when remote end sees zero checksum */
353 atomic_inc(&kgnilnd_data.kgn_nvmap_cksum);
355 tmpck = _kgnilnd_cksum(0, (void *) addr + kiov[0].kiov_offset + offset, nob);
359 kgnilnd_dump_blob(D_BUFFS, "flat kiov RDMA payload",
360 (void *)addr + kiov[0].kiov_offset + offset, nob);
362 CDEBUG(D_BUFFS, "cksum 0x%x (+0x%x) for addr 0x%p+%u len %u offset %u\n",
363 cksum, tmpck, addr, kiov[0].kiov_offset, nob, offset);
367 fraglen = min(kiov->kiov_len - offset, nob);
369 /* make dang sure we don't send a bogus checksum if somehow we get
370 * an odd length fragment on anything but the last entry in a kiov -
371 * we know from kgnilnd_setup_rdma_buffer that we can't have non
372 * PAGE_SIZE pages in the middle, so if nob < PAGE_SIZE, it is the last one */
373 LASSERTF(!(fraglen&1) || (nob < PAGE_SIZE),
374 "odd fraglen %u on nkiov %d, nob %u kiov_len %u offset %u kiov 0x%p\n",
375 fraglen, nkiov, nob, kiov->kiov_len, offset, kiov);
377 addr = (void *)kmap(kiov->kiov_page) + kiov->kiov_offset + offset;
378 tmpck = _kgnilnd_cksum(cksum, addr, fraglen);
381 "cksum 0x%x (+0x%x) for page 0x%p+%u (0x%p) len %u offset %u\n",
382 cksum, tmpck, kiov->kiov_page, kiov->kiov_offset, addr,
388 kgnilnd_dump_blob(D_BUFFS, "kiov cksum", addr, fraglen);
390 kunmap(kiov->kiov_page);
397 /* iov must not run out before end of data */
398 LASSERTF(nob == 0 || nkiov > 0, "nob %u nkiov %u\n", nob, nkiov);
403 retsum = csum_fold(cksum);
405 /* don't use magic 'no checksum' value */
409 CDEBUG(D_BUFFS, "retsum 0x%x from cksum 0x%x\n", retsum, cksum);
415 kgnilnd_init_msg(kgn_msg_t *msg, int type, lnet_nid_t source)
417 msg->gnm_magic = GNILND_MSG_MAGIC;
418 msg->gnm_version = GNILND_MSG_VERSION;
419 msg->gnm_type = type;
420 msg->gnm_payload_len = 0;
421 msg->gnm_srcnid = source;
422 /* gnm_connstamp gets set when FMA is sent */
423 /* gnm_srcnid is set on creation via function argument
424 * The right interface/net and nid is passed in when the message
430 kgnilnd_new_tx_msg(int type, lnet_nid_t source)
432 kgn_tx_t *tx = kgnilnd_alloc_tx();
435 kgnilnd_init_msg(&tx->tx_msg, type, source);
437 CERROR("couldn't allocate new tx type %s!\n",
438 kgnilnd_msgtype2str(type));
445 kgnilnd_nak_rdma(kgn_conn_t *conn, int rx_type, int error, __u64 cookie, lnet_nid_t source) {
451 case GNILND_MSG_GET_REQ:
452 case GNILND_MSG_GET_DONE:
453 nak_type = GNILND_MSG_GET_NAK;
455 case GNILND_MSG_PUT_REQ:
456 case GNILND_MSG_PUT_ACK:
457 case GNILND_MSG_PUT_DONE:
458 nak_type = GNILND_MSG_PUT_NAK;
460 case GNILND_MSG_PUT_REQ_REV:
461 case GNILND_MSG_PUT_DONE_REV:
462 nak_type = GNILND_MSG_PUT_NAK_REV;
464 case GNILND_MSG_GET_REQ_REV:
465 case GNILND_MSG_GET_ACK_REV:
466 case GNILND_MSG_GET_DONE_REV:
467 nak_type = GNILND_MSG_GET_NAK_REV;
470 CERROR("invalid msg type %s (%d)\n",
471 kgnilnd_msgtype2str(rx_type), rx_type);
474 /* only allow NAK on error and truncate to zero */
475 LASSERTF(error <= 0, "error %d conn 0x%p, cookie "LPU64"\n",
476 error, conn, cookie);
478 tx = kgnilnd_new_tx_msg(nak_type, source);
480 CNETERR("can't get TX to NAK RDMA to %s\n",
481 libcfs_nid2str(conn->gnc_peer->gnp_nid));
485 tx->tx_msg.gnm_u.completion.gncm_retval = error;
486 tx->tx_msg.gnm_u.completion.gncm_cookie = cookie;
487 kgnilnd_queue_tx(conn, tx);
491 kgnilnd_setup_immediate_buffer(kgn_tx_t *tx, unsigned int niov, struct iovec *iov,
492 lnet_kiov_t *kiov, unsigned int offset, unsigned int nob)
495 kgn_msg_t *msg = &tx->tx_msg;
498 /* To help save on MDDs for short messages, we'll vmap a kiov to allow
499 * gni_smsg_send to send that as the payload */
501 LASSERT(tx->tx_buftype == GNILND_BUF_NONE);
504 tx->tx_buffer = NULL;
505 } else if (kiov != NULL) {
506 LASSERTF(niov > 0 && niov < GNILND_MAX_IMMEDIATE/PAGE_SIZE,
507 "bad niov %d\n", niov);
509 while (offset >= kiov->kiov_len) {
510 offset -= kiov->kiov_len;
515 for (i = 0; i < niov; i++) {
516 /* We can't have a kiov_offset on anything but the first entry,
517 * otherwise we'll have a hole at the end of the mapping as we only map
519 * Also, if we have a kiov_len < PAGE_SIZE but we need to map more
520 * than kiov_len, we will also have a whole at the end of that page
521 * which isn't allowed */
522 if ((kiov[i].kiov_offset != 0 && i > 0) ||
523 (kiov[i].kiov_offset + kiov[i].kiov_len != PAGE_SIZE && i < niov - 1)) {
524 CNETERR("Can't make payload contiguous in I/O VM:"
525 "page %d, offset %u, nob %u, kiov_offset %u kiov_len %u \n",
526 i, offset, nob, kiov->kiov_offset, kiov->kiov_len);
529 tx->tx_imm_pages[i] = kiov[i].kiov_page;
532 /* hijack tx_phys for the later unmap */
534 /* tx->phyx being equal to NULL is the signal for unmap to discern between kmap and vmap */
536 tx->tx_buffer = (void *)kmap(tx->tx_imm_pages[0]) + kiov[0].kiov_offset + offset;
537 atomic_inc(&kgnilnd_data.kgn_nkmap_short);
538 GNIDBG_TX(D_NET, tx, "kmapped page for %d bytes for kiov 0x%p, buffer 0x%p",
539 nob, kiov, tx->tx_buffer);
541 tx->tx_phys = vmap(tx->tx_imm_pages, niov, VM_MAP, PAGE_KERNEL);
542 if (tx->tx_phys == NULL) {
543 CNETERR("Couldn't vmap %d frags on %d bytes\n", niov, nob);
547 atomic_inc(&kgnilnd_data.kgn_nvmap_short);
548 /* make sure we take into account the kiov offset as the start of the buffer */
549 tx->tx_buffer = (void *)tx->tx_phys + kiov[0].kiov_offset + offset;
550 GNIDBG_TX(D_NET, tx, "mapped %d pages for %d bytes from kiov 0x%p to 0x%p, buffer 0x%p",
551 niov, nob, kiov, tx->tx_phys, tx->tx_buffer);
553 tx->tx_buftype = GNILND_BUF_IMMEDIATE_KIOV;
557 /* For now this is almost identical to kgnilnd_setup_virt_buffer, but we
558 * could "flatten" the payload into a single contiguous buffer ready
559 * for sending direct over an FMA if we ever needed to. */
563 while (offset >= iov->iov_len) {
564 offset -= iov->iov_len;
570 if (nob > iov->iov_len - offset) {
571 CERROR("Can't handle multiple vaddr fragments\n");
575 tx->tx_buffer = (void *)(((unsigned long)iov->iov_base) + offset);
577 tx->tx_buftype = GNILND_BUF_IMMEDIATE;
581 /* checksum payload early - it shouldn't be changing after lnd_send */
582 if (*kgnilnd_tunables.kgn_checksum >= 2) {
583 msg->gnm_payload_cksum = kgnilnd_cksum(tx->tx_buffer, nob);
584 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_SMSG_CKSUM2)) {
585 msg->gnm_payload_cksum += 0xe00e;
587 if (*kgnilnd_tunables.kgn_checksum_dump > 1) {
588 kgnilnd_dump_blob(D_BUFFS, "payload checksum",
592 msg->gnm_payload_cksum = 0;
599 kgnilnd_setup_virt_buffer(kgn_tx_t *tx,
600 unsigned int niov, struct iovec *iov,
601 unsigned int offset, unsigned int nob)
606 LASSERT(tx->tx_buftype == GNILND_BUF_NONE);
608 while (offset >= iov->iov_len) {
609 offset -= iov->iov_len;
615 if (nob > iov->iov_len - offset) {
616 CERROR("Can't handle multiple vaddr fragments\n");
620 tx->tx_buftype = GNILND_BUF_VIRT_UNMAPPED;
622 tx->tx_buffer = (void *)(((unsigned long)iov->iov_base) + offset);
627 kgnilnd_setup_phys_buffer(kgn_tx_t *tx, int nkiov, lnet_kiov_t *kiov,
628 unsigned int offset, unsigned int nob)
630 gni_mem_segment_t *phys;
632 unsigned int fraglen;
634 GNIDBG_TX(D_NET, tx, "niov %d kiov 0x%p offset %u nob %u", nkiov, kiov, offset, nob);
638 LASSERT(tx->tx_buftype == GNILND_BUF_NONE);
640 /* only allocate this if we are going to use it */
641 tx->tx_phys = kmem_cache_alloc(kgnilnd_data.kgn_tx_phys_cache,
643 if (tx->tx_phys == NULL) {
644 CERROR("failed to allocate tx_phys\n");
649 CDEBUG(D_MALLOC, "slab-alloced 'tx->tx_phys': %lu at %p.\n",
650 LNET_MAX_IOV * sizeof(gni_mem_segment_t), tx->tx_phys);
652 /* if loops changes, please change kgnilnd_cksum_kiov
653 * and kgnilnd_setup_immediate_buffer */
655 while (offset >= kiov->kiov_len) {
656 offset -= kiov->kiov_len;
662 /* at this point, kiov points to the first page that we'll actually map
663 * now that we've seeked into the koiv for offset and dropped any
664 * leading pages that fall entirely within the offset */
665 tx->tx_buftype = GNILND_BUF_PHYS_UNMAPPED;
668 /* kiov_offset is start of 'valid' buffer, so index offset past that */
669 tx->tx_buffer = (void *)((unsigned long)(kiov->kiov_offset + offset));
672 CDEBUG(D_NET, "tx 0x%p buffer 0x%p map start kiov 0x%p+%u niov %d offset %u\n",
673 tx, tx->tx_buffer, kiov, kiov->kiov_offset, nkiov, offset);
676 fraglen = min(kiov->kiov_len - offset, nob);
678 /* We can't have a kiov_offset on anything but the first entry,
679 * otherwise we'll have a hole at the end of the mapping as we only map
680 * whole pages. Only the first page is allowed to have an offset -
681 * we'll add that into tx->tx_buffer and that will get used when we
682 * map in the segments (see kgnilnd_map_buffer).
683 * Also, if we have a kiov_len < PAGE_SIZE but we need to map more
684 * than kiov_len, we will also have a whole at the end of that page
685 * which isn't allowed */
686 if ((phys != tx->tx_phys) &&
687 ((kiov->kiov_offset != 0) ||
688 ((kiov->kiov_len < PAGE_SIZE) && (nob > kiov->kiov_len)))) {
689 CERROR("Can't make payload contiguous in I/O VM:"
690 "page %d, offset %u, nob %u, kiov_offset %u kiov_len %u \n",
691 (int)(phys - tx->tx_phys),
692 offset, nob, kiov->kiov_offset, kiov->kiov_len);
697 if ((phys - tx->tx_phys) == LNET_MAX_IOV) {
698 CERROR ("payload too big (%d)\n", (int)(phys - tx->tx_phys));
703 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_PHYS_SETUP)) {
708 CDEBUG(D_BUFFS, "page 0x%p kiov_offset %u kiov_len %u nob %u "
709 "nkiov %u offset %u\n",
710 kiov->kiov_page, kiov->kiov_offset, kiov->kiov_len, nob, nkiov, offset);
712 phys->address = lnet_page2phys(kiov->kiov_page);
719 /* iov must not run out before end of data */
720 LASSERTF(nob == 0 || nkiov > 0, "nob %u nkiov %u\n", nob, nkiov);
724 tx->tx_phys_npages = phys - tx->tx_phys;
729 if (tx->tx_phys != NULL) {
730 kmem_cache_free(kgnilnd_data.kgn_tx_phys_cache, tx->tx_phys);
731 CDEBUG(D_MALLOC, "slab-freed 'tx_phys': %lu at %p.\n",
732 sizeof(*tx->tx_phys), tx->tx_phys);
739 kgnilnd_setup_rdma_buffer(kgn_tx_t *tx, unsigned int niov,
740 struct iovec *iov, lnet_kiov_t *kiov,
741 unsigned int offset, unsigned int nob)
745 LASSERTF((iov == NULL) != (kiov == NULL), "iov 0x%p, kiov 0x%p, tx 0x%p,"
746 " offset %d, nob %d, niov %d\n"
747 , iov, kiov, tx, offset, nob, niov);
750 rc = kgnilnd_setup_phys_buffer(tx, niov, kiov, offset, nob);
752 rc = kgnilnd_setup_virt_buffer(tx, niov, iov, offset, nob);
757 /* kgnilnd_parse_lnet_rdma()
758 * lntmsg - message passed in from lnet.
759 * niov, kiov, offset - see lnd_t in lib-types.h for descriptions.
760 * nob - actual number of bytes to in this message.
761 * put_len - It is possible for PUTs to have a different length than the
762 * length stored in lntmsg->msg_len since LNET can adjust this
763 * length based on it's buffer size and offset.
764 * lnet_try_match_md() sets the mlength that we use to do the RDMA
768 kgnilnd_parse_lnet_rdma(lnet_msg_t *lntmsg, unsigned int *niov,
769 unsigned int *offset, unsigned int *nob,
770 lnet_kiov_t **kiov, int put_len)
772 /* GETs are weird, see kgnilnd_send */
773 if (lntmsg->msg_type == LNET_MSG_GET) {
774 if ((lntmsg->msg_md->md_options & LNET_MD_KIOV) == 0) {
777 *kiov = lntmsg->msg_md->md_iov.kiov;
779 *niov = lntmsg->msg_md->md_niov;
780 *nob = lntmsg->msg_md->md_length;
783 *kiov = lntmsg->msg_kiov;
784 *niov = lntmsg->msg_niov;
786 *offset = lntmsg->msg_offset;
791 kgnilnd_compute_rdma_cksum(kgn_tx_t *tx, int put_len)
793 unsigned int niov, offset, nob;
795 lnet_msg_t *lntmsg = tx->tx_lntmsg[0];
796 int dump_cksum = (*kgnilnd_tunables.kgn_checksum_dump > 1);
798 GNITX_ASSERTF(tx, ((tx->tx_msg.gnm_type == GNILND_MSG_PUT_DONE) ||
799 (tx->tx_msg.gnm_type == GNILND_MSG_GET_DONE) ||
800 (tx->tx_msg.gnm_type == GNILND_MSG_PUT_DONE_REV) ||
801 (tx->tx_msg.gnm_type == GNILND_MSG_GET_DONE_REV) ||
802 (tx->tx_msg.gnm_type == GNILND_MSG_GET_ACK_REV) ||
803 (tx->tx_msg.gnm_type == GNILND_MSG_PUT_REQ_REV)),
804 "bad type %s", kgnilnd_msgtype2str(tx->tx_msg.gnm_type));
806 if ((tx->tx_msg.gnm_type == GNILND_MSG_PUT_DONE_REV) ||
807 (tx->tx_msg.gnm_type == GNILND_MSG_GET_DONE_REV)) {
808 tx->tx_msg.gnm_payload_cksum = 0;
811 if (*kgnilnd_tunables.kgn_checksum < 3) {
812 tx->tx_msg.gnm_payload_cksum = 0;
816 GNITX_ASSERTF(tx, lntmsg, "no LNet message!", NULL);
818 kgnilnd_parse_lnet_rdma(lntmsg, &niov, &offset, &nob, &kiov,
822 tx->tx_msg.gnm_payload_cksum = kgnilnd_cksum_kiov(niov, kiov, offset, nob, dump_cksum);
824 tx->tx_msg.gnm_payload_cksum = kgnilnd_cksum(tx->tx_buffer, nob);
826 kgnilnd_dump_blob(D_BUFFS, "peer RDMA payload", tx->tx_buffer, nob);
830 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_SMSG_CKSUM3)) {
831 tx->tx_msg.gnm_payload_cksum += 0xd00d;
835 /* kgnilnd_verify_rdma_cksum()
836 * tx - PUT_DONE/GET_DONE matched tx.
837 * rx_cksum - received checksum to compare against.
838 * put_len - see kgnilnd_parse_lnet_rdma comments.
841 kgnilnd_verify_rdma_cksum(kgn_tx_t *tx, __u16 rx_cksum, int put_len)
845 unsigned int niov, offset, nob;
847 lnet_msg_t *lntmsg = tx->tx_lntmsg[0];
848 int dump_on_err = *kgnilnd_tunables.kgn_checksum_dump;
850 /* we can only match certain requests */
851 GNITX_ASSERTF(tx, ((tx->tx_msg.gnm_type == GNILND_MSG_GET_REQ) ||
852 (tx->tx_msg.gnm_type == GNILND_MSG_PUT_ACK) ||
853 (tx->tx_msg.gnm_type == GNILND_MSG_PUT_REQ_REV) ||
854 (tx->tx_msg.gnm_type == GNILND_MSG_GET_ACK_REV) ||
855 (tx->tx_msg.gnm_type == GNILND_MSG_GET_DONE_REV) ||
856 (tx->tx_msg.gnm_type == GNILND_MSG_PUT_DONE_REV)),
857 "bad type %s", kgnilnd_msgtype2str(tx->tx_msg.gnm_type));
859 if ((tx->tx_msg.gnm_type == GNILND_MSG_PUT_REQ_REV) ||
860 (tx->tx_msg.gnm_type == GNILND_MSG_GET_ACK_REV)) {
865 if (*kgnilnd_tunables.kgn_checksum >= 3) {
866 GNIDBG_MSG(D_WARNING, &tx->tx_msg,
867 "no RDMA payload checksum when enabled");
872 GNITX_ASSERTF(tx, lntmsg, "no LNet message!", NULL);
874 kgnilnd_parse_lnet_rdma(lntmsg, &niov, &offset, &nob, &kiov, put_len);
877 cksum = kgnilnd_cksum_kiov(niov, kiov, offset, nob, 0);
879 cksum = kgnilnd_cksum(tx->tx_buffer, nob);
882 if (cksum != rx_cksum) {
883 GNIDBG_MSG(D_NETERROR, &tx->tx_msg,
884 "Bad RDMA payload checksum (%x expected %x); "
885 "kiov 0x%p niov %d nob %u offset %u",
886 cksum, rx_cksum, kiov, niov, nob, offset);
887 switch (dump_on_err) {
890 kgnilnd_cksum_kiov(niov, kiov, offset, nob, 1);
892 kgnilnd_dump_blob(D_BUFFS, "RDMA payload",
895 /* fall through to dump log */
897 libcfs_debug_dumplog();
903 /* kgnilnd_check_fma_rx will close conn, kill tx with error */
909 kgnilnd_mem_add_map_list(kgn_device_t *dev, kgn_tx_t *tx)
913 GNITX_ASSERTF(tx, list_empty(&tx->tx_map_list),
914 "already mapped!", NULL);
916 spin_lock(&dev->gnd_map_lock);
917 switch (tx->tx_buftype) {
919 GNIDBG_TX(D_EMERG, tx,
920 "SOFTWARE BUG: invalid mapping %d", tx->tx_buftype);
921 spin_unlock(&dev->gnd_map_lock);
925 case GNILND_BUF_PHYS_MAPPED:
926 bytes = tx->tx_phys_npages * PAGE_SIZE;
927 dev->gnd_map_nphys++;
928 dev->gnd_map_physnop += tx->tx_phys_npages;
931 case GNILND_BUF_VIRT_MAPPED:
933 dev->gnd_map_nvirt++;
934 dev->gnd_map_virtnob += tx->tx_nob;
938 if (tx->tx_msg.gnm_type == GNILND_MSG_PUT_ACK ||
939 tx->tx_msg.gnm_type == GNILND_MSG_GET_REQ) {
940 atomic64_add(bytes, &dev->gnd_rdmaq_bytes_out);
941 GNIDBG_TX(D_NETTRACE, tx, "rdma ++ %d to "LPD64"",
942 bytes, atomic64_read(&dev->gnd_rdmaq_bytes_out));
945 atomic_inc(&dev->gnd_n_mdd);
946 atomic64_add(bytes, &dev->gnd_nbytes_map);
948 /* clear retrans to prevent any SMSG goofiness as that code uses the same counter */
951 /* we only get here in the valid cases */
952 list_add_tail(&tx->tx_map_list, &dev->gnd_map_list);
953 dev->gnd_map_version++;
954 spin_unlock(&dev->gnd_map_lock);
958 kgnilnd_mem_del_map_list(kgn_device_t *dev, kgn_tx_t *tx)
962 GNITX_ASSERTF(tx, !list_empty(&tx->tx_map_list),
963 "not mapped!", NULL);
964 spin_lock(&dev->gnd_map_lock);
966 switch (tx->tx_buftype) {
968 GNIDBG_TX(D_EMERG, tx,
969 "SOFTWARE BUG: invalid mapping %d", tx->tx_buftype);
970 spin_unlock(&dev->gnd_map_lock);
974 case GNILND_BUF_PHYS_UNMAPPED:
975 bytes = tx->tx_phys_npages * PAGE_SIZE;
976 dev->gnd_map_nphys--;
977 dev->gnd_map_physnop -= tx->tx_phys_npages;
980 case GNILND_BUF_VIRT_UNMAPPED:
982 dev->gnd_map_nvirt--;
983 dev->gnd_map_virtnob -= tx->tx_nob;
987 if (tx->tx_msg.gnm_type == GNILND_MSG_PUT_ACK ||
988 tx->tx_msg.gnm_type == GNILND_MSG_GET_REQ) {
989 atomic64_sub(bytes, &dev->gnd_rdmaq_bytes_out);
990 LASSERTF(atomic64_read(&dev->gnd_rdmaq_bytes_out) >= 0,
991 "bytes_out negative! %ld\n", atomic64_read(&dev->gnd_rdmaq_bytes_out));
992 GNIDBG_TX(D_NETTRACE, tx, "rdma -- %d to "LPD64"",
993 bytes, atomic64_read(&dev->gnd_rdmaq_bytes_out));
996 atomic_dec(&dev->gnd_n_mdd);
997 atomic64_sub(bytes, &dev->gnd_nbytes_map);
999 /* we only get here in the valid cases */
1000 list_del_init(&tx->tx_map_list);
1001 dev->gnd_map_version++;
1002 spin_unlock(&dev->gnd_map_lock);
1006 kgnilnd_map_buffer(kgn_tx_t *tx)
1008 kgn_conn_t *conn = tx->tx_conn;
1009 kgn_device_t *dev = conn->gnc_device;
1010 __u32 flags = GNI_MEM_READWRITE;
1013 /* The kgnilnd_mem_register(_segments) Gemini Driver functions can
1014 * be called concurrently as there are internal locks that protect
1015 * any data structures or HW resources. We just need to ensure
1016 * that our concurrency doesn't result in the kgn_device_t
1017 * getting nuked while we are in here */
1019 LASSERTF(conn != NULL, "tx %p with NULL conn, someone forgot"
1020 " to set tx_conn before calling %s\n", tx, __FUNCTION__);
1022 if (unlikely(CFS_FAIL_CHECK(CFS_FAIL_GNI_MAP_TX)))
1025 if (*kgnilnd_tunables.kgn_bte_relaxed_ordering) {
1026 flags |= GNI_MEM_RELAXED_PI_ORDERING;
1029 switch (tx->tx_buftype) {
1033 case GNILND_BUF_NONE:
1034 case GNILND_BUF_IMMEDIATE:
1035 case GNILND_BUF_IMMEDIATE_KIOV:
1036 case GNILND_BUF_PHYS_MAPPED:
1037 case GNILND_BUF_VIRT_MAPPED:
1040 case GNILND_BUF_PHYS_UNMAPPED:
1041 GNITX_ASSERTF(tx, tx->tx_phys != NULL, "physical buffer not there!", NULL);
1042 rrc = kgnilnd_mem_register_segments(dev->gnd_handle,
1043 tx->tx_phys, tx->tx_phys_npages, NULL,
1044 GNI_MEM_PHYS_SEGMENTS | flags,
1046 /* could race with other uses of the map counts, but this is ok
1047 * - this needs to turn into a non-fatal error soon to allow
1048 * GART resource, etc starvation handling */
1049 if (rrc != GNI_RC_SUCCESS) {
1050 GNIDBG_TX(D_NET, tx, "Can't map %d pages: dev %d "
1051 "phys %u pp %u, virt %u nob "LPU64"",
1052 tx->tx_phys_npages, dev->gnd_id,
1053 dev->gnd_map_nphys, dev->gnd_map_physnop,
1054 dev->gnd_map_nvirt, dev->gnd_map_virtnob);
1055 RETURN(rrc == GNI_RC_ERROR_RESOURCE ? -ENOMEM : -EINVAL);
1058 tx->tx_buftype = GNILND_BUF_PHYS_MAPPED;
1059 kgnilnd_mem_add_map_list(dev, tx);
1062 case GNILND_BUF_VIRT_UNMAPPED:
1063 rrc = kgnilnd_mem_register(dev->gnd_handle,
1064 (__u64)tx->tx_buffer, tx->tx_nob,
1065 NULL, flags, &tx->tx_map_key);
1066 if (rrc != GNI_RC_SUCCESS) {
1067 GNIDBG_TX(D_NET, tx, "Can't map %u bytes: dev %d "
1068 "phys %u pp %u, virt %u nob "LPU64"",
1069 tx->tx_nob, dev->gnd_id,
1070 dev->gnd_map_nphys, dev->gnd_map_physnop,
1071 dev->gnd_map_nvirt, dev->gnd_map_virtnob);
1072 RETURN(rrc == GNI_RC_ERROR_RESOURCE ? -ENOMEM : -EINVAL);
1075 tx->tx_buftype = GNILND_BUF_VIRT_MAPPED;
1076 kgnilnd_mem_add_map_list(dev, tx);
1077 if (tx->tx_msg.gnm_type == GNILND_MSG_PUT_ACK ||
1078 tx->tx_msg.gnm_type == GNILND_MSG_GET_REQ) {
1079 atomic64_add(tx->tx_nob, &dev->gnd_rdmaq_bytes_out);
1080 GNIDBG_TX(D_NETTRACE, tx, "rdma ++ %d to %ld\n",
1081 tx->tx_nob, atomic64_read(&dev->gnd_rdmaq_bytes_out));
1089 kgnilnd_add_purgatory_tx(kgn_tx_t *tx)
1091 kgn_conn_t *conn = tx->tx_conn;
1092 kgn_mdd_purgatory_t *gmp;
1094 LIBCFS_ALLOC(gmp, sizeof(*gmp));
1095 LASSERTF(gmp != NULL, "couldn't allocate MDD purgatory member;"
1096 " asserting to avoid data corruption\n");
1097 if (tx->tx_buffer_copy)
1098 gmp->gmp_map_key = tx->tx_buffer_copy_map_key;
1100 gmp->gmp_map_key = tx->tx_map_key;
1102 atomic_inc(&conn->gnc_device->gnd_n_mdd_held);
1104 /* ensure that we don't have a blank purgatory - indicating the
1105 * conn is not already on purgatory lists - we'd never recover these
1106 * MDD if that were the case */
1107 GNITX_ASSERTF(tx, conn->gnc_in_purgatory,
1108 "conn 0x%p->%s with NULL purgatory",
1109 conn, libcfs_nid2str(conn->gnc_peer->gnp_nid));
1111 /* link 'er up! - only place we really need to lock for
1112 * concurrent access */
1113 spin_lock(&conn->gnc_list_lock);
1114 list_add_tail(&gmp->gmp_list, &conn->gnc_mdd_list);
1115 spin_unlock(&conn->gnc_list_lock);
1119 kgnilnd_unmap_buffer(kgn_tx_t *tx, int error)
1123 int hold_timeout = 0;
1125 /* code below relies on +1 relationship ... */
1126 CLASSERT(GNILND_BUF_PHYS_MAPPED == (GNILND_BUF_PHYS_UNMAPPED + 1));
1127 CLASSERT(GNILND_BUF_VIRT_MAPPED == (GNILND_BUF_VIRT_UNMAPPED + 1));
1129 switch (tx->tx_buftype) {
1133 case GNILND_BUF_NONE:
1134 case GNILND_BUF_IMMEDIATE:
1135 case GNILND_BUF_PHYS_UNMAPPED:
1136 case GNILND_BUF_VIRT_UNMAPPED:
1138 case GNILND_BUF_IMMEDIATE_KIOV:
1139 if (tx->tx_phys != NULL) {
1140 vunmap(tx->tx_phys);
1141 } else if (tx->tx_phys == NULL && tx->tx_buffer != NULL) {
1142 kunmap(tx->tx_imm_pages[0]);
1144 /* clear to prevent kgnilnd_free_tx from thinking
1145 * this is a RDMA descriptor */
1149 case GNILND_BUF_PHYS_MAPPED:
1150 case GNILND_BUF_VIRT_MAPPED:
1151 LASSERT(tx->tx_conn != NULL);
1153 dev = tx->tx_conn->gnc_device;
1155 /* only want to hold if we are closing conn without
1156 * verified peer notification - the theory is that
1157 * a TX error can be communicated in all other cases */
1158 if (tx->tx_conn->gnc_state != GNILND_CONN_ESTABLISHED &&
1159 kgnilnd_check_purgatory_conn(tx->tx_conn)) {
1160 kgnilnd_add_purgatory_tx(tx);
1162 /* The timeout we give to kgni is a deadman stop only.
1163 * we are setting high to ensure we don't have the kgni timer
1164 * fire before ours fires _and_ is handled */
1165 hold_timeout = GNILND_TIMEOUT2DEADMAN;
1167 GNIDBG_TX(D_NET, tx,
1168 "dev %p delaying MDD release for %dms key "LPX64"."LPX64"",
1169 tx->tx_conn->gnc_device, hold_timeout,
1170 tx->tx_map_key.qword1, tx->tx_map_key.qword2);
1172 if (tx->tx_buffer_copy != NULL) {
1173 rrc = kgnilnd_mem_deregister(dev->gnd_handle, &tx->tx_buffer_copy_map_key, hold_timeout);
1174 LASSERTF(rrc == GNI_RC_SUCCESS, "rrc %d\n", rrc);
1175 rrc = kgnilnd_mem_deregister(dev->gnd_handle, &tx->tx_map_key, 0);
1176 LASSERTF(rrc == GNI_RC_SUCCESS, "rrc %d\n", rrc);
1178 rrc = kgnilnd_mem_deregister(dev->gnd_handle, &tx->tx_map_key, hold_timeout);
1179 LASSERTF(rrc == GNI_RC_SUCCESS, "rrc %d\n", rrc);
1183 kgnilnd_mem_del_map_list(dev, tx);
1189 kgnilnd_tx_done(kgn_tx_t *tx, int completion)
1191 lnet_msg_t *lntmsg0, *lntmsg1;
1192 int status0, status1;
1193 lnet_ni_t *ni = NULL;
1194 kgn_conn_t *conn = tx->tx_conn;
1196 LASSERT(!in_interrupt());
1198 lntmsg0 = tx->tx_lntmsg[0]; tx->tx_lntmsg[0] = NULL;
1199 lntmsg1 = tx->tx_lntmsg[1]; tx->tx_lntmsg[1] = NULL;
1202 !(tx->tx_state & GNILND_TX_QUIET_ERROR) &&
1203 !kgnilnd_conn_clean_errno(completion)) {
1204 GNIDBG_TOMSG(D_NETERROR, &tx->tx_msg,
1205 "error %d on tx 0x%p->%s id %u/%d state %s age %ds",
1206 completion, tx, conn ?
1207 libcfs_nid2str(conn->gnc_peer->gnp_nid) : "<?>",
1208 tx->tx_id.txe_smsg_id, tx->tx_id.txe_idx,
1209 kgnilnd_tx_state2str(tx->tx_list_state),
1210 cfs_duration_sec((unsigned long)jiffies - tx->tx_qtime));
1213 /* The error codes determine if we hold onto the MDD */
1214 kgnilnd_unmap_buffer(tx, completion);
1216 /* we have to deliver a reply on lntmsg[1] for the GET, so make sure
1217 * we play nice with the error codes to avoid delivering a failed
1218 * REQUEST and then a REPLY event as well */
1220 /* return -EIO to lnet - it is the magic value for failed sends */
1221 if (tx->tx_msg.gnm_type == GNILND_MSG_GET_REQ) {
1223 status1 = completion;
1225 status0 = status1 = completion;
1228 tx->tx_buftype = GNILND_BUF_NONE;
1229 tx->tx_msg.gnm_type = GNILND_MSG_NONE;
1231 /* lnet_finalize doesn't do anything with the *ni, so ok for us to
1232 * set NULL when we are a tx without a conn */
1234 ni = conn->gnc_peer->gnp_net->gnn_ni;
1236 spin_lock(&conn->gnc_tx_lock);
1238 LASSERTF(test_and_clear_bit(tx->tx_id.txe_idx,
1239 (volatile unsigned long *)&conn->gnc_tx_bits),
1240 "conn %p tx %p bit %d already cleared\n",
1241 conn, tx, tx->tx_id.txe_idx);
1243 LASSERTF(conn->gnc_tx_ref_table[tx->tx_id.txe_idx] != NULL,
1244 "msg_id %d already NULL\n", tx->tx_id.txe_idx);
1246 conn->gnc_tx_ref_table[tx->tx_id.txe_idx] = NULL;
1247 spin_unlock(&conn->gnc_tx_lock);
1250 kgnilnd_free_tx(tx);
1252 /* finalize AFTER freeing lnet msgs */
1254 /* warning - we should hold no locks here - calling lnet_finalize
1255 * could free up lnet credits, resulting in a call chain back into
1256 * the LND via kgnilnd_send and friends */
1258 lnet_finalize(ni, lntmsg0, status0);
1260 if (lntmsg1 != NULL) {
1261 lnet_finalize(ni, lntmsg1, status1);
1266 kgnilnd_txlist_done(struct list_head *txlist, int error)
1269 int err_printed = 0;
1271 if (list_empty(txlist))
1274 list_for_each_entry_safe(tx, txn, txlist, tx_list) {
1275 /* only print the first error */
1277 tx->tx_state |= GNILND_TX_QUIET_ERROR;
1278 list_del_init(&tx->tx_list);
1279 kgnilnd_tx_done(tx, error);
1284 kgnilnd_set_tx_id(kgn_tx_t *tx, kgn_conn_t *conn)
1288 spin_lock(&conn->gnc_tx_lock);
1290 /* ID zero is NOT ALLOWED!!! */
1293 id = find_next_zero_bit((unsigned long *)&conn->gnc_tx_bits,
1294 GNILND_MAX_MSG_ID, conn->gnc_next_tx);
1295 if (id == GNILND_MAX_MSG_ID) {
1296 if (conn->gnc_next_tx != 1) {
1297 /* we only searched from next_tx to end and didn't find
1298 * one, so search again from start */
1299 conn->gnc_next_tx = 1;
1302 /* couldn't find one! */
1303 spin_unlock(&conn->gnc_tx_lock);
1307 /* bump next_tx to prevent immediate reuse */
1308 conn->gnc_next_tx = id + 1;
1310 set_bit(id, (volatile unsigned long *)&conn->gnc_tx_bits);
1311 LASSERTF(conn->gnc_tx_ref_table[id] == NULL,
1312 "tx 0x%p already at id %d\n",
1313 conn->gnc_tx_ref_table[id], id);
1315 /* delay these until we have a valid ID - prevents bad clear of the bit
1316 * in kgnilnd_tx_done */
1318 tx->tx_id.txe_cqid = conn->gnc_cqid;
1320 tx->tx_id.txe_idx = id;
1321 conn->gnc_tx_ref_table[id] = tx;
1323 /* Using jiffies to help differentiate against TX reuse - with
1324 * the usual minimum of a 250HZ clock, we wrap jiffies on the same TX
1325 * if we are sending to the same node faster than 256000/sec.
1326 * To help guard against this, we OR in the tx_seq - that is 32 bits */
1328 tx->tx_id.txe_chips = (__u32)(jiffies | conn->gnc_tx_seq);
1330 GNIDBG_TX(D_NET, tx, "set cookie/id/bits", NULL);
1332 spin_unlock(&conn->gnc_tx_lock);
1337 kgnilnd_tx_should_retry(kgn_conn_t *conn, kgn_tx_t *tx)
1339 int max_retrans = *kgnilnd_tunables.kgn_max_retransmits;
1341 int log_retrans_level;
1343 /* I need kgni credits to send this. Replace tx at the head of the
1344 * fmaq and I'll get rescheduled when credits appear */
1347 conn->gnc_tx_retrans++;
1348 log_retrans = ((tx->tx_retrans < 25) || ((tx->tx_retrans % 25) == 0) ||
1349 (tx->tx_retrans > (max_retrans / 2)));
1350 log_retrans_level = tx->tx_retrans < (max_retrans / 2) ? D_NET : D_NETERROR;
1352 /* Decision time - either error, warn or just retransmit */
1354 /* we don't care about TX timeout - it could be that the network is slower
1355 * or throttled. We'll keep retranmitting - so if the network is so slow
1356 * that we fill up our mailbox, we'll keep trying to resend that msg
1357 * until we exceed the max_retrans _or_ gnc_last_rx expires, indicating
1358 * that he hasn't send us any traffic in return */
1360 if (tx->tx_retrans > max_retrans) {
1361 /* this means we are not backing off the retransmits
1362 * in a healthy manner and are likely chewing up the
1363 * CPU cycles quite badly */
1364 GNIDBG_TOMSG(D_ERROR, &tx->tx_msg,
1365 "SOFTWARE BUG: too many retransmits (%d) for tx id %x "
1367 tx->tx_retrans, tx->tx_id, conn,
1368 libcfs_nid2str(conn->gnc_peer->gnp_nid));
1370 /* yes - double errors to help debug this condition */
1371 GNIDBG_TOMSG(D_NETERROR, &tx->tx_msg, "connection dead. "
1372 "unable to send to %s for %lu secs (%d tries)",
1373 libcfs_nid2str(tx->tx_conn->gnc_peer->gnp_nid),
1374 cfs_duration_sec(jiffies - tx->tx_cred_wait),
1377 kgnilnd_close_conn(conn, -ETIMEDOUT);
1379 /* caller should terminate */
1382 /* some reasonable throttling of the debug message */
1384 unsigned long now = jiffies;
1385 /* XXX Nic: Mystical TX debug here... */
1386 GNIDBG_SMSG_CREDS(log_retrans_level, conn);
1387 GNIDBG_TOMSG(log_retrans_level, &tx->tx_msg,
1388 "NOT_DONE on conn 0x%p->%s id %x retrans %d wait %dus"
1389 " last_msg %uus/%uus last_cq %uus/%uus",
1390 conn, libcfs_nid2str(conn->gnc_peer->gnp_nid),
1391 tx->tx_id, tx->tx_retrans,
1392 jiffies_to_usecs(now - tx->tx_cred_wait),
1393 jiffies_to_usecs(now - conn->gnc_last_tx),
1394 jiffies_to_usecs(now - conn->gnc_last_rx),
1395 jiffies_to_usecs(now - conn->gnc_last_tx_cq),
1396 jiffies_to_usecs(now - conn->gnc_last_rx_cq));
1398 /* caller should retry */
1403 /* caller must be holding gnd_cq_mutex and not unlock it afterwards, as we need to drop it
1404 * to avoid bad ordering with state_lock */
1407 kgnilnd_sendmsg_nolock(kgn_tx_t *tx, void *immediate, unsigned int immediatenob,
1408 spinlock_t *state_lock, kgn_tx_list_state_t state)
1410 kgn_conn_t *conn = tx->tx_conn;
1411 kgn_msg_t *msg = &tx->tx_msg;
1414 unsigned long newest_last_rx, timeout;
1417 LASSERTF((msg->gnm_type == GNILND_MSG_IMMEDIATE) ?
1418 immediatenob <= *kgnilnd_tunables.kgn_max_immediate :
1420 "msg 0x%p type %d wrong payload size %d\n",
1421 msg, msg->gnm_type, immediatenob);
1423 /* make sure we catch all the cases where we'd send on a dirty old mbox
1424 * but allow case for sending CLOSE. Since this check is within the CQ
1425 * mutex barrier and the close message is only sent through
1426 * kgnilnd_send_conn_close the last message out the door will be the
1429 if (atomic_read(&conn->gnc_peer->gnp_dirty_eps) != 0 && msg->gnm_type != GNILND_MSG_CLOSE) {
1430 mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
1431 /* Return -ETIME, we are closing the connection already so we dont want to
1432 * have this tx hit the wire. The tx will be killed by the calling function.
1433 * Once the EP is marked dirty the close message will be the last
1434 * thing to hit the wire */
1439 timeout = cfs_time_seconds(conn->gnc_timeout);
1441 newest_last_rx = GNILND_LASTRX(conn);
1443 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_SEND_TIMEOUT)) {
1444 now = now + (GNILND_TIMEOUTRX(timeout) * 2);
1447 if (time_after_eq(now, newest_last_rx + GNILND_TIMEOUTRX(timeout))) {
1448 GNIDBG_CONN(D_NETERROR|D_CONSOLE, conn, "Cant send to %s after timeout lapse of %lu; TO %lu",
1449 libcfs_nid2str(conn->gnc_peer->gnp_nid),
1450 cfs_duration_sec(now - newest_last_rx),
1451 cfs_duration_sec(GNILND_TIMEOUTRX(timeout)));
1452 mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
1456 GNITX_ASSERTF(tx, (conn != NULL) && (tx->tx_id.txe_idx != 0), "tx id unset!", NULL);
1457 /* msg->gnm_srcnid is set when the message is initialized by whatever function is
1458 * creating the message this allows the message to contain the correct LNET NID/NET needed
1459 * instead of the one that the peer/conn uses for sending the data.
1461 msg->gnm_connstamp = conn->gnc_my_connstamp;
1462 msg->gnm_payload_len = immediatenob;
1463 msg->gnm_seq = conn->gnc_tx_seq;
1465 /* always init here - kgn_checksum is a /sys module tunable
1466 * and can be flipped at any point, even between msg init and sending */
1468 if (*kgnilnd_tunables.kgn_checksum) {
1469 /* We must set here and not in kgnilnd_init_msg,
1470 * we could resend this msg many times
1471 * (NOT_DONE from gni_smsg_send below) and wouldn't pass
1472 * through init_msg again */
1473 msg->gnm_cksum = kgnilnd_cksum(msg, sizeof(kgn_msg_t));
1474 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_SMSG_CKSUM1)) {
1475 msg->gnm_cksum += 0xf00f;
1479 GNIDBG_TOMSG(D_NET, msg, "tx 0x%p conn 0x%p->%s sending SMSG sz %u id %x/%d [%p for %u]",
1480 tx, conn, libcfs_nid2str(conn->gnc_peer->gnp_nid),
1481 sizeof(kgn_msg_t), tx->tx_id.txe_smsg_id,
1482 tx->tx_id.txe_idx, immediate, immediatenob);
1484 if (unlikely(tx->tx_state & GNILND_TX_FAIL_SMSG)) {
1485 rrc = cfs_fail_val ? cfs_fail_val : GNI_RC_NOT_DONE;
1487 rrc = kgnilnd_smsg_send(conn->gnc_ephandle,
1488 msg, sizeof(*msg), immediate, immediatenob,
1489 tx->tx_id.txe_smsg_id);
1493 case GNI_RC_SUCCESS:
1495 conn->gnc_last_tx = jiffies;
1496 /* no locking here as LIVE isn't a list */
1497 kgnilnd_tx_add_state_locked(tx, NULL, conn, GNILND_TX_LIVE_FMAQ, 1);
1499 /* this needs to be checked under lock as it might be freed from a completion
1502 if (msg->gnm_type == GNILND_MSG_NOOP) {
1503 set_mb(conn->gnc_last_noop_sent, jiffies);
1506 /* serialize with seeing CQ events for completion on this, as well as
1508 mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
1510 atomic_inc(&conn->gnc_device->gnd_short_ntx);
1511 atomic64_add(immediatenob, &conn->gnc_device->gnd_short_txbytes);
1512 kgnilnd_peer_alive(conn->gnc_peer);
1513 GNIDBG_SMSG_CREDS(D_NET, conn);
1516 case GNI_RC_NOT_DONE:
1517 /* XXX Nic: We need to figure out how to track this
1518 * - there are bound to be good reasons for it,
1519 * but we want to know when it happens */
1521 mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
1522 /* We'll handle this error inline - makes the calling logic much more
1525 /* If no lock, caller doesn't want us to retry */
1526 if (state_lock == NULL) {
1530 retry_send = kgnilnd_tx_should_retry(conn, tx);
1532 /* add to head of list for the state and retries */
1533 spin_lock(state_lock);
1534 kgnilnd_tx_add_state_locked(tx, conn->gnc_peer, conn, state, 0);
1535 spin_unlock(state_lock);
1537 /* We only reschedule for a certain number of retries, then
1538 * we will wait for the CQ events indicating a release of SMSG
1540 if (tx->tx_retrans < (*kgnilnd_tunables.kgn_max_retransmits/4)) {
1541 kgnilnd_schedule_conn(conn);
1544 /* CQ event coming in signifies either TX completed or
1545 * RX receive. Either of these *could* free up credits
1546 * in the SMSG mbox and we should try sending again */
1547 GNIDBG_TX(D_NET, tx, "waiting for CQID %u event to resend",
1548 tx->tx_conn->gnc_cqid);
1549 /* use +ve return code to let upper layers know they
1550 * should stop looping on sends */
1557 /* handle bad retcode gracefully */
1558 mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
1563 /* kgnilnd_sendmsg has hard wait on gnd_cq_mutex */
1565 kgnilnd_sendmsg(kgn_tx_t *tx, void *immediate, unsigned int immediatenob,
1566 spinlock_t *state_lock, kgn_tx_list_state_t state)
1568 kgn_device_t *dev = tx->tx_conn->gnc_device;
1569 unsigned long timestamp;
1572 timestamp = jiffies;
1573 mutex_lock(&dev->gnd_cq_mutex);
1574 /* delay in jiffies - we are really concerned only with things that
1575 * result in a schedule() or really holding this off for long times .
1576 * NB - mutex_lock could spin for 2 jiffies before going to sleep to wait */
1577 dev->gnd_mutex_delay += (long) jiffies - timestamp;
1579 rc = kgnilnd_sendmsg_nolock(tx, immediate, immediatenob, state_lock, state);
1585 /* returns -EAGAIN for lock miss, anything else < 0 is hard error, >=0 for success */
1587 kgnilnd_sendmsg_trylock(kgn_tx_t *tx, void *immediate, unsigned int immediatenob,
1588 spinlock_t *state_lock, kgn_tx_list_state_t state)
1590 kgn_conn_t *conn = tx->tx_conn;
1591 kgn_device_t *dev = conn->gnc_device;
1592 unsigned long timestamp;
1595 timestamp = jiffies;
1597 /* technically we are doing bad things with the read_lock on the peer_conn
1598 * table, but we shouldn't be sleeping inside here - and we don't sleep/block
1599 * for the mutex. I bet lockdep is gonna flag this one though... */
1601 /* there are a few cases where we don't want the immediate send - like
1602 * when we are in the scheduler thread and it'd harm the latency of
1603 * getting messages up to LNet */
1605 /* rmb for gnd_ready */
1607 if (conn->gnc_device->gnd_ready == GNILND_DEV_LOOP) {
1609 atomic_inc(&conn->gnc_device->gnd_fast_block);
1610 } else if (unlikely(kgnilnd_data.kgn_quiesce_trigger)) {
1611 /* dont hit HW during quiesce */
1613 } else if (unlikely(atomic_read(&conn->gnc_peer->gnp_dirty_eps))) {
1614 /* dont hit HW if stale EPs and conns left to close */
1617 atomic_inc(&conn->gnc_device->gnd_fast_try);
1618 rc = mutex_trylock(&conn->gnc_device->gnd_cq_mutex);
1623 /* we got the mutex and weren't blocked */
1625 /* delay in jiffies - we are really concerned only with things that
1626 * result in a schedule() or really holding this off for long times .
1627 * NB - mutex_lock could spin for 2 jiffies before going to sleep to wait */
1628 dev->gnd_mutex_delay += (long) jiffies - timestamp;
1630 atomic_inc(&conn->gnc_device->gnd_fast_ok);
1631 tx->tx_qtime = jiffies;
1632 tx->tx_state = GNILND_TX_WAITING_COMPLETION;
1633 rc = kgnilnd_sendmsg_nolock(tx, tx->tx_buffer, tx->tx_nob, &conn->gnc_list_lock, GNILND_TX_FMAQ);
1634 /* _nolock unlocks the mutex for us */
1640 /* lets us know if we can push this RDMA through now */
1642 kgnilnd_auth_rdma_bytes(kgn_device_t *dev, kgn_tx_t *tx)
1646 bytes_left = atomic64_sub_return(tx->tx_nob, &dev->gnd_rdmaq_bytes_ok);
1648 if (bytes_left < 0) {
1649 atomic64_add(tx->tx_nob, &dev->gnd_rdmaq_bytes_ok);
1650 atomic_inc(&dev->gnd_rdmaq_nstalls);
1653 CDEBUG(D_NET, "no bytes to send, turning on timer for %lu\n",
1654 dev->gnd_rdmaq_deadline);
1655 mod_timer(&dev->gnd_rdmaq_timer, dev->gnd_rdmaq_deadline);
1656 /* we never del this timer - at worst it schedules us.. */
1663 /* this adds a TX to the queue pending throttling authorization before
1664 * we allow our remote peer to launch a PUT at us */
1666 kgnilnd_queue_rdma(kgn_conn_t *conn, kgn_tx_t *tx)
1670 /* we cannot go into send_mapped_tx from here as we are holding locks
1671 * and mem registration might end up allocating memory in kgni.
1672 * That said, we'll push this as far as we can into the queue process */
1673 rc = kgnilnd_auth_rdma_bytes(conn->gnc_device, tx);
1676 spin_lock(&conn->gnc_device->gnd_rdmaq_lock);
1677 kgnilnd_tx_add_state_locked(tx, NULL, conn, GNILND_TX_RDMAQ, 0);
1678 /* lets us know how delayed RDMA is */
1679 tx->tx_qtime = jiffies;
1680 spin_unlock(&conn->gnc_device->gnd_rdmaq_lock);
1682 /* we have RDMA authorized, now it just needs a MDD and to hit the wire */
1683 spin_lock(&tx->tx_conn->gnc_device->gnd_lock);
1684 kgnilnd_tx_add_state_locked(tx, NULL, tx->tx_conn, GNILND_TX_MAPQ, 0);
1685 /* lets us know how delayed mapping is */
1686 tx->tx_qtime = jiffies;
1687 spin_unlock(&tx->tx_conn->gnc_device->gnd_lock);
1690 /* make sure we wake up sched to run this */
1691 kgnilnd_schedule_device(tx->tx_conn->gnc_device);
1694 /* push TX through state machine */
1696 kgnilnd_queue_tx(kgn_conn_t *conn, kgn_tx_t *tx)
1701 /* set the tx_id here, we delay it until we have an actual conn
1703 * in some cases, the tx_id is already set to provide for things
1704 * like RDMA completion cookies, etc */
1705 if (tx->tx_id.txe_idx == 0) {
1706 rc = kgnilnd_set_tx_id(tx, conn);
1708 kgnilnd_tx_done(tx, rc);
1713 CDEBUG(D_NET, "%s to conn %p for %s\n", kgnilnd_msgtype2str(tx->tx_msg.gnm_type),
1714 conn, libcfs_nid2str(conn->gnc_peer->gnp_nid));
1716 /* Only let NOOPs to be sent while fail loc is set, otherwise kill the tx.
1718 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_ONLY_NOOP) && (tx->tx_msg.gnm_type != GNILND_MSG_NOOP)) {
1719 kgnilnd_tx_done(tx, rc);
1723 switch (tx->tx_msg.gnm_type) {
1724 case GNILND_MSG_PUT_ACK:
1725 case GNILND_MSG_GET_REQ:
1726 case GNILND_MSG_PUT_REQ_REV:
1727 case GNILND_MSG_GET_ACK_REV:
1728 /* hijacking time! If this messages will authorize our peer to
1729 * send his dirty little bytes in an RDMA, we need to get permission */
1730 kgnilnd_queue_rdma(conn, tx);
1732 case GNILND_MSG_IMMEDIATE:
1733 /* try to send right now, can help reduce latency */
1734 rc = kgnilnd_sendmsg_trylock(tx, tx->tx_buffer, tx->tx_nob, &conn->gnc_list_lock, GNILND_TX_FMAQ);
1737 /* it was sent, break out of switch to avoid default case of queueing */
1740 /* needs to queue to try again, so fall through to default case */
1741 case GNILND_MSG_NOOP:
1742 /* Just make sure this goes out first for this conn */
1744 /* fall through... */
1746 spin_lock(&conn->gnc_list_lock);
1747 kgnilnd_tx_add_state_locked(tx, conn->gnc_peer, conn, GNILND_TX_FMAQ, add_tail);
1748 tx->tx_qtime = jiffies;
1749 spin_unlock(&conn->gnc_list_lock);
1750 kgnilnd_schedule_conn(conn);
1755 kgnilnd_launch_tx(kgn_tx_t *tx, kgn_net_t *net, lnet_process_id_t *target)
1758 kgn_peer_t *new_peer = NULL;
1759 kgn_conn_t *conn = NULL;
1765 /* If I get here, I've committed to send, so I complete the tx with
1766 * failure on any problems */
1768 GNITX_ASSERTF(tx, tx->tx_conn == NULL,
1769 "tx already has connection %p", tx->tx_conn);
1771 /* do all of the peer & conn searching in one swoop - this avoids
1772 * nastiness when dropping locks and needing to maintain a sane state
1773 * in the face of stack reset or something else nuking peers & conns */
1775 /* I expect to find him, so only take a read lock */
1776 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1778 peer = kgnilnd_find_peer_locked(target->nid);
1780 conn = kgnilnd_find_conn_locked(peer);
1781 /* this could be NULL during quiesce */
1783 /* Connection exists; queue message on it */
1784 kgnilnd_queue_tx(conn, tx);
1785 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1789 /* don't create a connection if the peer is marked down */
1790 if (peer->gnp_down == GNILND_RCA_NODE_DOWN) {
1791 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1797 /* creating peer or conn; I'll need a write lock... */
1798 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1800 CFS_RACE(CFS_FAIL_GNI_FIND_TARGET);
1802 node_state = kgnilnd_get_node_state(LNET_NIDADDR(target->nid));
1804 /* NB - this will not block during normal operations -
1805 * the only writer of this is in the startup/shutdown path. */
1806 rc = down_read_trylock(&kgnilnd_data.kgn_net_rw_sem);
1812 /* ignore previous peer entirely - we cycled the lock, so we
1813 * will create new peer and at worst drop it if peer is still
1815 rc = kgnilnd_create_peer_safe(&new_peer, target->nid, net, node_state);
1817 up_read(&kgnilnd_data.kgn_net_rw_sem);
1821 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1822 up_read(&kgnilnd_data.kgn_net_rw_sem);
1824 /* search for peer again now that we have the lock
1825 * if we don't find it, add our new one to the list */
1826 kgnilnd_add_peer_locked(target->nid, new_peer, &peer);
1828 /* don't create a connection if the peer is not up */
1829 if (peer->gnp_down != GNILND_RCA_NODE_UP) {
1830 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1835 conn = kgnilnd_find_or_create_conn_locked(peer);
1837 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_DGRAM_DROP_TX)) {
1838 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1843 /* oh hey, found a conn now... magical */
1844 kgnilnd_queue_tx(conn, tx);
1846 /* no conn, must be trying to connect - so we queue for now */
1847 tx->tx_qtime = jiffies;
1848 kgnilnd_tx_add_state_locked(tx, peer, NULL, GNILND_TX_PEERQ, 1);
1850 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1853 kgnilnd_tx_done(tx, rc);
1858 kgnilnd_rdma(kgn_tx_t *tx, int type,
1859 kgn_rdma_desc_t *sink, unsigned int nob, __u64 cookie)
1861 kgn_conn_t *conn = tx->tx_conn;
1862 unsigned long timestamp;
1863 gni_post_type_t post_type;
1866 unsigned int desc_nob = nob;
1867 void *desc_buffer = tx->tx_buffer;
1868 gni_mem_handle_t desc_map_key = tx->tx_map_key;
1869 LASSERTF(kgnilnd_tx_mapped(tx),
1870 "unmapped tx %p\n", tx);
1871 LASSERTF(conn != NULL,
1872 "NULL conn on tx %p, naughty, naughty\n", tx);
1873 LASSERTF(nob <= sink->gnrd_nob,
1874 "nob %u > sink->gnrd_nob %d (%p)\n",
1875 nob, sink->gnrd_nob, sink);
1876 LASSERTF(nob <= tx->tx_nob,
1877 "nob %d > tx(%p)->tx_nob %d\n",
1878 nob, tx, tx->tx_nob);
1881 case GNILND_MSG_GET_DONE:
1882 case GNILND_MSG_PUT_DONE:
1883 post_type = GNI_POST_RDMA_PUT;
1885 case GNILND_MSG_GET_DONE_REV:
1886 case GNILND_MSG_PUT_DONE_REV:
1887 post_type = GNI_POST_RDMA_GET;
1890 CERROR("invalid msg type %s (%d)\n",
1891 kgnilnd_msgtype2str(type), type);
1894 if (post_type == GNI_POST_RDMA_GET) {
1895 /* Check for remote buffer / local buffer / length alignment. All must be 4 byte
1896 * aligned. If the local buffer is not aligned correctly using the copy buffer
1897 * will fix that issue. If length is misaligned copy buffer will also fix the issue, we end
1898 * up transferring extra bytes into the buffer but only copy the correct nob into the original
1899 * buffer. Remote offset correction is done through a combination of adjusting the offset,
1900 * making sure the length and addr are aligned and copying the data into the correct location
1901 * once the transfer has completed.
1903 if ((((__u64)((unsigned long)tx->tx_buffer)) & 3) ||
1904 (sink->gnrd_addr & 3) ||
1907 tx->tx_offset = ((__u64)((unsigned long)sink->gnrd_addr)) & 3;
1909 kgnilnd_admin_addref(kgnilnd_data.kgn_rev_offset);
1911 if ((nob + tx->tx_offset) & 3) {
1912 desc_nob = ((nob + tx->tx_offset) + (4 - ((nob + tx->tx_offset) & 3)));
1913 kgnilnd_admin_addref(kgnilnd_data.kgn_rev_length);
1915 desc_nob = (nob + tx->tx_offset);
1918 if (tx->tx_buffer_copy == NULL) {
1919 /* Allocate the largest copy buffer we will need, this will prevent us from overwriting data
1920 * and require at most we allocate a few extra bytes. */
1921 tx->tx_buffer_copy = vmalloc(desc_nob);
1923 if (!tx->tx_buffer_copy) {
1924 /* allocation of buffer failed nak the rdma */
1925 kgnilnd_nak_rdma(tx->tx_conn, tx->tx_msg.gnm_type, -EFAULT, cookie, tx->tx_msg.gnm_srcnid);
1926 kgnilnd_tx_done(tx, -EFAULT);
1929 kgnilnd_admin_addref(kgnilnd_data.kgn_rev_copy_buff);
1930 rc = kgnilnd_mem_register(conn->gnc_device->gnd_handle, (__u64)tx->tx_buffer_copy, desc_nob, NULL, GNI_MEM_READWRITE, &tx->tx_buffer_copy_map_key);
1931 if (rc != GNI_RC_SUCCESS) {
1932 /* Registration Failed nak rdma and kill the tx. */
1933 vfree(tx->tx_buffer_copy);
1934 tx->tx_buffer_copy = NULL;
1935 kgnilnd_nak_rdma(tx->tx_conn, tx->tx_msg.gnm_type, -EFAULT, cookie, tx->tx_msg.gnm_srcnid);
1936 kgnilnd_tx_done(tx, -EFAULT);
1940 desc_map_key = tx->tx_buffer_copy_map_key;
1941 desc_buffer = tx->tx_buffer_copy;
1945 memset(&tx->tx_rdma_desc, 0, sizeof(tx->tx_rdma_desc));
1946 tx->tx_rdma_desc.post_id = tx->tx_id.txe_cookie;
1947 tx->tx_rdma_desc.type = post_type;
1948 tx->tx_rdma_desc.cq_mode = GNI_CQMODE_GLOBAL_EVENT;
1949 tx->tx_rdma_desc.local_addr = (__u64)((unsigned long)desc_buffer);
1950 tx->tx_rdma_desc.local_mem_hndl = desc_map_key;
1951 tx->tx_rdma_desc.remote_addr = sink->gnrd_addr - tx->tx_offset;
1952 tx->tx_rdma_desc.remote_mem_hndl = sink->gnrd_key;
1953 tx->tx_rdma_desc.length = desc_nob;
1954 tx->tx_nob_rdma = nob;
1955 if (*kgnilnd_tunables.kgn_bte_dlvr_mode)
1956 tx->tx_rdma_desc.dlvr_mode = *kgnilnd_tunables.kgn_bte_dlvr_mode;
1957 /* prep final completion message */
1958 kgnilnd_init_msg(&tx->tx_msg, type, tx->tx_msg.gnm_srcnid);
1959 tx->tx_msg.gnm_u.completion.gncm_cookie = cookie;
1960 /* send actual size RDMA'd in retval */
1961 tx->tx_msg.gnm_u.completion.gncm_retval = nob;
1963 kgnilnd_compute_rdma_cksum(tx, nob);
1966 kgnilnd_queue_tx(conn, tx);
1970 /* Don't lie (CLOSE == RDMA idle) */
1971 LASSERTF(!conn->gnc_close_sent, "tx %p on conn %p after close sent %d\n",
1972 tx, conn, conn->gnc_close_sent);
1974 GNIDBG_TX(D_NET, tx, "Post RDMA type 0x%02x dlvr_mode 0x%x cookie:"LPX64,
1975 type, tx->tx_rdma_desc.dlvr_mode, cookie);
1977 /* set CQ dedicated for RDMA */
1978 tx->tx_rdma_desc.src_cq_hndl = conn->gnc_device->gnd_snd_rdma_cqh;
1980 timestamp = jiffies;
1981 mutex_lock(&conn->gnc_device->gnd_cq_mutex);
1982 /* delay in jiffies - we are really concerned only with things that
1983 * result in a schedule() or really holding this off for long times .
1984 * NB - mutex_lock could spin for 2 jiffies before going to sleep to wait */
1985 conn->gnc_device->gnd_mutex_delay += (long) jiffies - timestamp;
1987 rrc = kgnilnd_post_rdma(conn->gnc_ephandle, &tx->tx_rdma_desc);
1989 spin_lock(&conn->gnc_list_lock);
1990 kgnilnd_tx_add_state_locked(tx, conn->gnc_peer, conn, GNILND_TX_LIVE_RDMAQ, 1);
1991 tx->tx_qtime = jiffies;
1992 spin_unlock(&conn->gnc_list_lock);
1994 mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
1996 /* XXX Nic: is this a place we should handle more errors for
1997 * robustness sake */
1998 LASSERT(rrc == GNI_RC_SUCCESS);
2003 kgnilnd_alloc_rx(void)
2007 rx = kmem_cache_alloc(kgnilnd_data.kgn_rx_cache, GFP_ATOMIC);
2009 CERROR("failed to allocate rx\n");
2012 CDEBUG(D_MALLOC, "slab-alloced 'rx': %lu at %p.\n",
2015 /* no memset to zero, we'll always fill all members */
2019 /* release is to just free connection resources
2020 * we use this for the eager path after copying */
2022 kgnilnd_release_msg(kgn_conn_t *conn)
2025 unsigned long timestamp;
2027 CDEBUG(D_NET, "consuming %p\n", conn);
2029 timestamp = jiffies;
2030 mutex_lock(&conn->gnc_device->gnd_cq_mutex);
2031 /* delay in jiffies - we are really concerned only with things that
2032 * result in a schedule() or really holding this off for long times .
2033 * NB - mutex_lock could spin for 2 jiffies before going to sleep to wait */
2034 conn->gnc_device->gnd_mutex_delay += (long) jiffies - timestamp;
2036 rrc = kgnilnd_smsg_release(conn->gnc_ephandle);
2037 mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
2039 LASSERTF(rrc == GNI_RC_SUCCESS, "bad rrc %d\n", rrc);
2040 GNIDBG_SMSG_CREDS(D_NET, conn);
2046 kgnilnd_consume_rx(kgn_rx_t *rx)
2048 kgn_conn_t *conn = rx->grx_conn;
2049 kgn_msg_t *rxmsg = rx->grx_msg;
2051 /* if we are eager, free the cache alloc'd msg */
2052 if (unlikely(rx->grx_eager)) {
2053 LIBCFS_FREE(rxmsg, sizeof(*rxmsg) + *kgnilnd_tunables.kgn_max_immediate);
2054 atomic_dec(&kgnilnd_data.kgn_neager_allocs);
2056 /* release ref from eager_recv */
2057 kgnilnd_conn_decref(conn);
2059 GNIDBG_MSG(D_NET, rxmsg, "rx %p processed", rx);
2060 kgnilnd_release_msg(conn);
2063 kmem_cache_free(kgnilnd_data.kgn_rx_cache, rx);
2064 CDEBUG(D_MALLOC, "slab-freed 'rx': %lu at %p.\n",
2071 kgnilnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
2073 lnet_hdr_t *hdr = &lntmsg->msg_hdr;
2074 int type = lntmsg->msg_type;
2075 lnet_process_id_t target = lntmsg->msg_target;
2076 int target_is_router = lntmsg->msg_target_is_router;
2077 int routing = lntmsg->msg_routing;
2078 unsigned int niov = lntmsg->msg_niov;
2079 struct iovec *iov = lntmsg->msg_iov;
2080 lnet_kiov_t *kiov = lntmsg->msg_kiov;
2081 unsigned int offset = lntmsg->msg_offset;
2082 unsigned int nob = lntmsg->msg_len;
2083 unsigned int msg_vmflush = lntmsg->msg_vmflush;
2084 kgn_net_t *net = ni->ni_data;
2088 int reverse_rdma_flag = *kgnilnd_tunables.kgn_reverse_rdma;
2090 /* NB 'private' is different depending on what we're sending.... */
2091 LASSERT(!in_interrupt());
2093 CDEBUG(D_NET, "sending msg type %d with %d bytes in %d frags to %s\n",
2094 type, nob, niov, libcfs_id2str(target));
2096 LASSERTF(nob == 0 || niov > 0,
2097 "lntmsg %p nob %d niov %d\n", lntmsg, nob, niov);
2098 LASSERTF(niov <= LNET_MAX_IOV,
2099 "lntmsg %p niov %d\n", lntmsg, niov);
2101 /* payload is either all vaddrs or all pages */
2102 LASSERTF(!(kiov != NULL && iov != NULL),
2103 "lntmsg %p kiov %p iov %p\n", lntmsg, kiov, iov);
2106 mpflag = cfs_memory_pressure_get_and_set();
2110 CERROR("lntmsg %p with unexpected type %d\n",
2115 LASSERTF(nob == 0, "lntmsg %p nob %d\n",
2123 if (routing || target_is_router)
2124 break; /* send IMMEDIATE */
2126 /* it is safe to do direct GET with out mapping buffer for RDMA as we
2127 * check the eventual sink buffer here - if small enough, remote
2128 * end is perfectly capable of returning data in short message -
2129 * The magic is that we call lnet_parse in kgnilnd_recv with rdma_req=0
2130 * for IMMEDIATE messages which will have it send a real reply instead
2131 * of doing kgnilnd_recv to have the RDMA continued */
2132 if (lntmsg->msg_md->md_length <= *kgnilnd_tunables.kgn_max_immediate)
2135 if ((reverse_rdma_flag & GNILND_REVERSE_GET) == 0)
2136 tx = kgnilnd_new_tx_msg(GNILND_MSG_GET_REQ, ni->ni_nid);
2138 tx = kgnilnd_new_tx_msg(GNILND_MSG_GET_REQ_REV, ni->ni_nid);
2144 /* slightly different options as we might actually have a GET with a
2145 * MD_KIOV set but a non-NULL md_iov.iov */
2146 if ((lntmsg->msg_md->md_options & LNET_MD_KIOV) == 0)
2147 rc = kgnilnd_setup_rdma_buffer(tx, lntmsg->msg_md->md_niov,
2148 lntmsg->msg_md->md_iov.iov, NULL,
2149 0, lntmsg->msg_md->md_length);
2151 rc = kgnilnd_setup_rdma_buffer(tx, lntmsg->msg_md->md_niov,
2152 NULL, lntmsg->msg_md->md_iov.kiov,
2153 0, lntmsg->msg_md->md_length);
2155 CERROR("unable to setup buffer: %d\n", rc);
2156 kgnilnd_tx_done(tx, rc);
2161 tx->tx_lntmsg[1] = lnet_create_reply_msg(ni, lntmsg);
2162 if (tx->tx_lntmsg[1] == NULL) {
2163 CERROR("Can't create reply for GET to %s\n",
2164 libcfs_nid2str(target.nid));
2165 kgnilnd_tx_done(tx, rc);
2170 tx->tx_lntmsg[0] = lntmsg;
2171 if ((reverse_rdma_flag & GNILND_REVERSE_GET) == 0)
2172 tx->tx_msg.gnm_u.get.gngm_hdr = *hdr;
2174 tx->tx_msg.gnm_u.putreq.gnprm_hdr = *hdr;
2176 /* rest of tx_msg is setup just before it is sent */
2177 kgnilnd_launch_tx(tx, net, &target);
2179 case LNET_MSG_REPLY:
2181 /* to save on MDDs, we'll handle short kiov by vmap'ing
2182 * and sending via SMSG */
2183 if (nob <= *kgnilnd_tunables.kgn_max_immediate)
2186 if ((reverse_rdma_flag & GNILND_REVERSE_PUT) == 0)
2187 tx = kgnilnd_new_tx_msg(GNILND_MSG_PUT_REQ, ni->ni_nid);
2189 tx = kgnilnd_new_tx_msg(GNILND_MSG_PUT_REQ_REV, ni->ni_nid);
2196 rc = kgnilnd_setup_rdma_buffer(tx, niov, iov, kiov, offset, nob);
2198 kgnilnd_tx_done(tx, rc);
2203 tx->tx_lntmsg[0] = lntmsg;
2204 if ((reverse_rdma_flag & GNILND_REVERSE_PUT) == 0)
2205 tx->tx_msg.gnm_u.putreq.gnprm_hdr = *hdr;
2207 tx->tx_msg.gnm_u.get.gngm_hdr = *hdr;
2209 /* rest of tx_msg is setup just before it is sent */
2210 kgnilnd_launch_tx(tx, net, &target);
2214 /* send IMMEDIATE */
2216 LASSERTF(nob <= *kgnilnd_tunables.kgn_max_immediate,
2217 "lntmsg 0x%p too large %d\n", lntmsg, nob);
2219 tx = kgnilnd_new_tx_msg(GNILND_MSG_IMMEDIATE, ni->ni_nid);
2225 rc = kgnilnd_setup_immediate_buffer(tx, niov, iov, kiov, offset, nob);
2227 kgnilnd_tx_done(tx, rc);
2231 tx->tx_msg.gnm_u.immediate.gnim_hdr = *hdr;
2232 tx->tx_lntmsg[0] = lntmsg;
2233 kgnilnd_launch_tx(tx, net, &target);
2236 /* use stored value as we could have already finalized lntmsg here from a failed launch */
2238 cfs_memory_pressure_restore(mpflag);
2243 kgnilnd_setup_rdma(lnet_ni_t *ni, kgn_rx_t *rx, lnet_msg_t *lntmsg, int mlen)
2245 kgn_conn_t *conn = rx->grx_conn;
2246 kgn_msg_t *rxmsg = rx->grx_msg;
2247 unsigned int niov = lntmsg->msg_niov;
2248 struct iovec *iov = lntmsg->msg_iov;
2249 lnet_kiov_t *kiov = lntmsg->msg_kiov;
2250 unsigned int offset = lntmsg->msg_offset;
2251 unsigned int nob = lntmsg->msg_len;
2256 switch (rxmsg->gnm_type) {
2257 case GNILND_MSG_PUT_REQ_REV:
2258 done_type = GNILND_MSG_PUT_DONE_REV;
2261 case GNILND_MSG_GET_REQ:
2262 done_type = GNILND_MSG_GET_DONE;
2265 CERROR("invalid msg type %s (%d)\n",
2266 kgnilnd_msgtype2str(rxmsg->gnm_type),
2271 tx = kgnilnd_new_tx_msg(done_type, ni->ni_nid);
2275 rc = kgnilnd_set_tx_id(tx, conn);
2279 rc = kgnilnd_setup_rdma_buffer(tx, niov, iov, kiov, offset, nob);
2283 tx->tx_lntmsg[0] = lntmsg;
2284 tx->tx_getinfo = rxmsg->gnm_u.get;
2286 /* we only queue from kgnilnd_recv - we might get called from other contexts
2287 * and we don't want to block the mutex in those cases */
2289 spin_lock(&tx->tx_conn->gnc_device->gnd_lock);
2290 kgnilnd_tx_add_state_locked(tx, NULL, tx->tx_conn, GNILND_TX_MAPQ, 1);
2291 spin_unlock(&tx->tx_conn->gnc_device->gnd_lock);
2292 kgnilnd_schedule_device(tx->tx_conn->gnc_device);
2297 kgnilnd_tx_done(tx, rc);
2298 kgnilnd_nak_rdma(conn, done_type, rc, rxmsg->gnm_u.get.gngm_cookie, ni->ni_nid);
2300 lnet_finalize(ni, lntmsg, rc);
2304 kgnilnd_eager_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
2307 kgn_rx_t *rx = private;
2308 kgn_conn_t *conn = rx->grx_conn;
2309 kgn_msg_t *rxmsg = rx->grx_msg;
2310 kgn_msg_t *eagermsg = NULL;
2311 kgn_peer_t *peer = NULL;
2312 kgn_conn_t *found_conn = NULL;
2314 GNIDBG_MSG(D_NET, rxmsg, "eager recv for conn %p, rxmsg %p, lntmsg %p",
2315 conn, rxmsg, lntmsg);
2317 if (rxmsg->gnm_payload_len > *kgnilnd_tunables.kgn_max_immediate) {
2318 GNIDBG_MSG(D_ERROR, rxmsg, "payload too large %d",
2319 rxmsg->gnm_payload_len);
2322 /* Grab a read lock so the connection doesnt disappear on us
2323 * while we look it up
2325 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
2327 peer = kgnilnd_find_peer_locked(rxmsg->gnm_srcnid);
2329 found_conn = kgnilnd_find_conn_locked(peer);
2332 /* Verify the connection found is the same one that the message
2333 * is supposed to be using, if it is not output an error message
2336 if (!peer || !found_conn
2337 || found_conn->gnc_peer_connstamp != rxmsg->gnm_connstamp) {
2338 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
2339 CERROR("Couldnt find matching peer %p or conn %p / %p\n",
2340 peer, conn, found_conn);
2342 CERROR("Unexpected connstamp "LPX64"("LPX64" expected)"
2343 " from %s", rxmsg->gnm_connstamp,
2344 found_conn->gnc_peer_connstamp,
2345 libcfs_nid2str(peer->gnp_nid));
2350 /* add conn ref to ensure it doesn't go away until all eager
2351 * messages processed */
2352 kgnilnd_conn_addref(conn);
2354 /* Now that we have verified the connection is valid and added a
2355 * reference we can remove the read_lock on the peer_conn_lock */
2356 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
2358 /* we have no credits or buffers for this message, so copy it
2359 * somewhere for a later kgnilnd_recv */
2360 if (atomic_read(&kgnilnd_data.kgn_neager_allocs) >=
2361 *kgnilnd_tunables.kgn_eager_credits) {
2362 CERROR("Out of eager credits to %s\n",
2363 libcfs_nid2str(conn->gnc_peer->gnp_nid));
2367 atomic_inc(&kgnilnd_data.kgn_neager_allocs);
2369 LIBCFS_ALLOC(eagermsg, sizeof(*eagermsg) + *kgnilnd_tunables.kgn_max_immediate);
2370 if (eagermsg == NULL) {
2371 kgnilnd_conn_decref(conn);
2372 CERROR("couldn't allocate eager rx message for conn %p to %s\n",
2373 conn, libcfs_nid2str(conn->gnc_peer->gnp_nid));
2377 /* copy msg and payload */
2378 memcpy(eagermsg, rxmsg, sizeof(*rxmsg) + rxmsg->gnm_payload_len);
2379 rx->grx_msg = eagermsg;
2382 /* stash this for lnet_finalize on cancel-on-conn-close */
2383 rx->grx_lntmsg = lntmsg;
2385 /* keep the same rx_t, it just has a new grx_msg now */
2386 *new_private = private;
2388 /* release SMSG buffer */
2389 kgnilnd_release_msg(conn);
2395 kgnilnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
2396 int delayed, unsigned int niov,
2397 struct iovec *iov, lnet_kiov_t *kiov,
2398 unsigned int offset, unsigned int mlen, unsigned int rlen)
2400 kgn_rx_t *rx = private;
2401 kgn_conn_t *conn = rx->grx_conn;
2402 kgn_msg_t *rxmsg = rx->grx_msg;
2408 LASSERT(!in_interrupt());
2409 LASSERTF(mlen <= rlen, "%d <= %d\n", mlen, rlen);
2410 /* Either all pages or all vaddrs */
2411 LASSERTF(!(kiov != NULL && iov != NULL), "kiov %p iov %p\n",
2414 GNIDBG_MSG(D_NET, rxmsg, "conn %p, rxmsg %p, lntmsg %p"
2415 " niov=%d kiov=%p iov=%p offset=%d mlen=%d rlen=%d",
2416 conn, rxmsg, lntmsg,
2417 niov, kiov, iov, offset, mlen, rlen);
2419 /* we need to lock here as recv can be called from any context */
2420 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
2421 if (rx->grx_eager && conn->gnc_state != GNILND_CONN_ESTABLISHED) {
2422 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
2424 /* someone closed the conn after we copied this out, nuke it */
2425 kgnilnd_consume_rx(rx);
2426 lnet_finalize(ni, lntmsg, conn->gnc_error);
2429 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
2431 switch (rxmsg->gnm_type) {
2433 GNIDBG_MSG(D_NETERROR, rxmsg, "conn %p, rx %p, rxmsg %p, lntmsg %p"
2434 " niov=%d kiov=%p iov=%p offset=%d mlen=%d rlen=%d",
2435 conn, rx, rxmsg, lntmsg, niov, kiov, iov, offset, mlen, rlen);
2438 case GNILND_MSG_IMMEDIATE:
2439 if (mlen > rxmsg->gnm_payload_len) {
2440 GNIDBG_MSG(D_ERROR, rxmsg,
2441 "Immediate message from %s too big: %d > %d",
2442 libcfs_nid2str(conn->gnc_peer->gnp_nid), mlen,
2443 rxmsg->gnm_payload_len);
2445 kgnilnd_consume_rx(rx);
2449 /* rxmsg[1] is a pointer to the payload, sitting in the buffer
2450 * right after the kgn_msg_t header - so just 'cute' way of saying
2451 * rxmsg + sizeof(kgn_msg_t) */
2453 /* check payload checksum if sent */
2455 if (*kgnilnd_tunables.kgn_checksum >= 2 &&
2456 !rxmsg->gnm_payload_cksum &&
2457 rxmsg->gnm_payload_len != 0)
2458 GNIDBG_MSG(D_WARNING, rxmsg, "no msg payload checksum when enabled");
2460 if (rxmsg->gnm_payload_cksum != 0) {
2461 /* gnm_payload_len set in kgnilnd_sendmsg from tx->tx_nob,
2462 * which is what is used to calculate the cksum on the TX side */
2463 pload_cksum = kgnilnd_cksum(&rxmsg[1], rxmsg->gnm_payload_len);
2465 if (rxmsg->gnm_payload_cksum != pload_cksum) {
2466 GNIDBG_MSG(D_NETERROR, rxmsg,
2467 "Bad payload checksum (%x expected %x)",
2468 pload_cksum, rxmsg->gnm_payload_cksum);
2469 switch (*kgnilnd_tunables.kgn_checksum_dump) {
2471 kgnilnd_dump_blob(D_BUFFS, "bad payload checksum",
2472 &rxmsg[1], rxmsg->gnm_payload_len);
2473 /* fall through to dump */
2475 libcfs_debug_dumplog();
2481 /* checksum problems are fatal, kill the conn */
2482 kgnilnd_consume_rx(rx);
2483 kgnilnd_close_conn(conn, rc);
2489 lnet_copy_flat2kiov(
2491 *kgnilnd_tunables.kgn_max_immediate,
2492 &rxmsg[1], 0, mlen);
2496 *kgnilnd_tunables.kgn_max_immediate,
2497 &rxmsg[1], 0, mlen);
2499 kgnilnd_consume_rx(rx);
2500 lnet_finalize(ni, lntmsg, 0);
2503 case GNILND_MSG_PUT_REQ:
2504 /* LNET wants to truncate or drop transaction, sending NAK */
2506 kgnilnd_consume_rx(rx);
2507 lnet_finalize(ni, lntmsg, 0);
2509 /* only error if lntmsg == NULL, otherwise we are just
2510 * short circuiting the rdma process of 0 bytes */
2511 kgnilnd_nak_rdma(conn, rxmsg->gnm_type,
2512 lntmsg == NULL ? -ENOENT : 0,
2513 rxmsg->gnm_u.get.gngm_cookie,
2517 /* sending ACK with sink buff. info */
2518 tx = kgnilnd_new_tx_msg(GNILND_MSG_PUT_ACK, ni->ni_nid);
2520 kgnilnd_consume_rx(rx);
2524 rc = kgnilnd_set_tx_id(tx, conn);
2526 GOTO(nak_put_req, rc);
2529 rc = kgnilnd_setup_rdma_buffer(tx, niov, iov, kiov, offset, mlen);
2531 GOTO(nak_put_req, rc);
2534 tx->tx_msg.gnm_u.putack.gnpam_src_cookie =
2535 rxmsg->gnm_u.putreq.gnprm_cookie;
2536 tx->tx_msg.gnm_u.putack.gnpam_dst_cookie = tx->tx_id.txe_cookie;
2537 tx->tx_msg.gnm_u.putack.gnpam_desc.gnrd_addr =
2538 (__u64)((unsigned long)tx->tx_buffer);
2539 tx->tx_msg.gnm_u.putack.gnpam_desc.gnrd_nob = mlen;
2541 tx->tx_lntmsg[0] = lntmsg; /* finalize this on RDMA_DONE */
2542 tx->tx_qtime = jiffies;
2543 /* we only queue from kgnilnd_recv - we might get called from other contexts
2544 * and we don't want to block the mutex in those cases */
2546 spin_lock(&tx->tx_conn->gnc_device->gnd_lock);
2547 kgnilnd_tx_add_state_locked(tx, NULL, tx->tx_conn, GNILND_TX_MAPQ, 1);
2548 spin_unlock(&tx->tx_conn->gnc_device->gnd_lock);
2549 kgnilnd_schedule_device(tx->tx_conn->gnc_device);
2551 kgnilnd_consume_rx(rx);
2555 /* make sure we send an error back when the PUT fails */
2556 kgnilnd_nak_rdma(conn, rxmsg->gnm_type, rc, rxmsg->gnm_u.get.gngm_cookie, ni->ni_nid);
2557 kgnilnd_tx_done(tx, rc);
2558 kgnilnd_consume_rx(rx);
2560 /* return magic LNet network error */
2562 case GNILND_MSG_GET_REQ_REV:
2563 /* LNET wants to truncate or drop transaction, sending NAK */
2565 kgnilnd_consume_rx(rx);
2566 lnet_finalize(ni, lntmsg, 0);
2568 /* only error if lntmsg == NULL, otherwise we are just
2569 * short circuiting the rdma process of 0 bytes */
2570 kgnilnd_nak_rdma(conn, rxmsg->gnm_type,
2571 lntmsg == NULL ? -ENOENT : 0,
2572 rxmsg->gnm_u.get.gngm_cookie,
2576 /* lntmsg can be null when parsing a LNET_GET */
2577 if (lntmsg != NULL) {
2578 /* sending ACK with sink buff. info */
2579 tx = kgnilnd_new_tx_msg(GNILND_MSG_GET_ACK_REV, ni->ni_nid);
2581 kgnilnd_consume_rx(rx);
2585 rc = kgnilnd_set_tx_id(tx, conn);
2587 GOTO(nak_get_req_rev, rc);
2590 rc = kgnilnd_setup_rdma_buffer(tx, niov, iov, kiov, offset, mlen);
2592 GOTO(nak_get_req_rev, rc);
2595 tx->tx_msg.gnm_u.putack.gnpam_src_cookie =
2596 rxmsg->gnm_u.putreq.gnprm_cookie;
2597 tx->tx_msg.gnm_u.putack.gnpam_dst_cookie = tx->tx_id.txe_cookie;
2598 tx->tx_msg.gnm_u.putack.gnpam_desc.gnrd_addr =
2599 (__u64)((unsigned long)tx->tx_buffer);
2600 tx->tx_msg.gnm_u.putack.gnpam_desc.gnrd_nob = mlen;
2602 tx->tx_lntmsg[0] = lntmsg; /* finalize this on RDMA_DONE */
2604 /* we only queue from kgnilnd_recv - we might get called from other contexts
2605 * and we don't want to block the mutex in those cases */
2607 spin_lock(&tx->tx_conn->gnc_device->gnd_lock);
2608 kgnilnd_tx_add_state_locked(tx, NULL, tx->tx_conn, GNILND_TX_MAPQ, 1);
2609 spin_unlock(&tx->tx_conn->gnc_device->gnd_lock);
2610 kgnilnd_schedule_device(tx->tx_conn->gnc_device);
2613 kgnilnd_nak_rdma(conn, rxmsg->gnm_type,
2615 rxmsg->gnm_u.get.gngm_cookie,
2619 kgnilnd_consume_rx(rx);
2623 /* make sure we send an error back when the GET fails */
2624 kgnilnd_nak_rdma(conn, rxmsg->gnm_type, rc, rxmsg->gnm_u.get.gngm_cookie, ni->ni_nid);
2625 kgnilnd_tx_done(tx, rc);
2626 kgnilnd_consume_rx(rx);
2628 /* return magic LNet network error */
2632 case GNILND_MSG_PUT_REQ_REV:
2633 /* LNET wants to truncate or drop transaction, sending NAK */
2635 kgnilnd_consume_rx(rx);
2636 lnet_finalize(ni, lntmsg, 0);
2638 /* only error if lntmsg == NULL, otherwise we are just
2639 * short circuiting the rdma process of 0 bytes */
2640 kgnilnd_nak_rdma(conn, rxmsg->gnm_type,
2641 lntmsg == NULL ? -ENOENT : 0,
2642 rxmsg->gnm_u.get.gngm_cookie,
2647 if (lntmsg != NULL) {
2649 kgnilnd_setup_rdma(ni, rx, lntmsg, mlen);
2652 kgnilnd_nak_rdma(conn, rxmsg->gnm_type,
2654 rxmsg->gnm_u.get.gngm_cookie,
2657 kgnilnd_consume_rx(rx);
2659 case GNILND_MSG_GET_REQ:
2660 if (lntmsg != NULL) {
2662 kgnilnd_setup_rdma(ni, rx, lntmsg, mlen);
2665 kgnilnd_nak_rdma(conn, rxmsg->gnm_type,
2667 rxmsg->gnm_u.get.gngm_cookie,
2670 kgnilnd_consume_rx(rx);
2676 /* needs write_lock on kgn_peer_conn_lock held */
2678 kgnilnd_check_conn_timeouts_locked(kgn_conn_t *conn)
2680 unsigned long timeout, keepalive;
2681 unsigned long now = jiffies;
2682 unsigned long newest_last_rx;
2685 /* given that we found this conn hanging off a peer, it better damned
2686 * well be connected */
2687 LASSERTF(conn->gnc_state == GNILND_CONN_ESTABLISHED,
2688 "conn 0x%p->%s with bad state%s\n", conn,
2689 conn->gnc_peer ? libcfs_nid2str(conn->gnc_peer->gnp_nid)
2691 kgnilnd_conn_state2str(conn));
2693 CDEBUG(D_NET, "checking conn %p->%s timeout %d keepalive %d "
2694 "rx_diff %lu tx_diff %lu\n",
2695 conn, libcfs_nid2str(conn->gnc_peer->gnp_nid),
2696 conn->gnc_timeout, GNILND_TO2KA(conn->gnc_timeout),
2697 cfs_duration_sec(now - conn->gnc_last_rx_cq),
2698 cfs_duration_sec(now - conn->gnc_last_tx));
2700 timeout = cfs_time_seconds(conn->gnc_timeout);
2701 keepalive = cfs_time_seconds(GNILND_TO2KA(conn->gnc_timeout));
2703 /* just in case our lack of RX msg processing is gumming up the works - give the
2704 * remove an extra chance */
2706 newest_last_rx = GNILND_LASTRX(conn);
2708 if (time_after_eq(now, newest_last_rx + timeout)) {
2709 uint32_t level = D_CONSOLE|D_NETERROR;
2711 if (conn->gnc_peer->gnp_down == GNILND_RCA_NODE_DOWN) {
2714 GNIDBG_CONN(level, conn,
2715 "No gnilnd traffic received from %s for %lu "
2716 "seconds, terminating connection. Is node down? ",
2717 libcfs_nid2str(conn->gnc_peer->gnp_nid),
2718 cfs_duration_sec(now - newest_last_rx));
2722 /* we don't timeout on last_tx stalls - we are going to trust the
2723 * underlying network to let us know when sends are failing.
2724 * At worst, the peer will timeout our RX stamp and drop the connection
2725 * at that point. We'll then see his CLOSE or at worst his RX
2726 * stamp stop and drop the connection on our end */
2728 if (time_after_eq(now, conn->gnc_last_tx + keepalive)) {
2729 CDEBUG(D_NET, "sending NOOP -> %s (%p idle %lu(%lu)) "
2730 "last %lu/%lu/%lu %lus/%lus/%lus\n",
2731 libcfs_nid2str(conn->gnc_peer->gnp_nid), conn,
2732 cfs_duration_sec(jiffies - conn->gnc_last_tx),
2734 conn->gnc_last_noop_want, conn->gnc_last_noop_sent,
2735 conn->gnc_last_noop_cq,
2736 cfs_duration_sec(jiffies - conn->gnc_last_noop_want),
2737 cfs_duration_sec(jiffies - conn->gnc_last_noop_sent),
2738 cfs_duration_sec(jiffies - conn->gnc_last_noop_cq));
2739 set_mb(conn->gnc_last_noop_want, jiffies);
2740 atomic_inc(&conn->gnc_reaper_noop);
2741 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_NOOP_SEND))
2744 tx = kgnilnd_new_tx_msg(GNILND_MSG_NOOP, conn->gnc_peer->gnp_net->gnn_ni->ni_nid);
2747 kgnilnd_queue_tx(conn, tx);
2753 /* needs write_lock on kgn_peer_conn_lock held */
2755 kgnilnd_check_peer_timeouts_locked(kgn_peer_t *peer, struct list_head *todie,
2756 struct list_head *souls)
2758 unsigned long timeout;
2759 kgn_conn_t *conn, *connN = NULL;
2764 short releaseconn = 0;
2765 unsigned long first_rx = 0;
2767 CDEBUG(D_NET, "checking peer 0x%p->%s for timeouts; interval %lus\n",
2768 peer, libcfs_nid2str(peer->gnp_nid),
2769 peer->gnp_reconnect_interval);
2771 timeout = cfs_time_seconds(MAX(*kgnilnd_tunables.kgn_timeout,
2772 GNILND_MIN_TIMEOUT));
2774 conn = kgnilnd_find_conn_locked(peer);
2776 /* if there is a valid conn, check the queues for timeouts */
2777 rc = kgnilnd_check_conn_timeouts_locked(conn);
2779 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_RX_CLOSE_CLOSING)) {
2780 /* simulate a RX CLOSE after the timeout but before
2781 * the scheduler thread gets it */
2782 conn->gnc_close_recvd = GNILND_CLOSE_INJECT1;
2783 conn->gnc_peer_error = -ETIMEDOUT;
2785 /* Once we mark closed, any of the scheduler threads could
2786 * get it and move through before we hit the fail loc code */
2787 kgnilnd_close_conn_locked(conn, rc);
2789 /* first_rx is used to decide when to release a conn from purgatory.
2791 first_rx = conn->gnc_first_rx;
2795 /* now regardless of starting new conn, find tx on peer queue that
2796 * are old and smell bad - do this first so we don't trigger
2797 * reconnect on empty queue if we timeout all */
2798 list_for_each_entry_safe(tx, txN, &peer->gnp_tx_queue, tx_list) {
2799 if (time_after_eq(jiffies, tx->tx_qtime + timeout)) {
2801 LCONSOLE_INFO("could not send to %s due to connection"
2802 " setup failure after %lu seconds\n",
2803 libcfs_nid2str(peer->gnp_nid),
2804 cfs_duration_sec(jiffies - tx->tx_qtime));
2806 kgnilnd_tx_del_state_locked(tx, peer, NULL,
2808 list_add_tail(&tx->tx_list, todie);
2813 if (count || peer->gnp_connecting == GNILND_PEER_KILL) {
2814 CDEBUG(D_NET, "canceling %d tx for peer 0x%p->%s\n",
2815 count, peer, libcfs_nid2str(peer->gnp_nid));
2816 /* if we nuked all the TX, stop peer connection attempt (if there is one..) */
2817 if (list_empty(&peer->gnp_tx_queue) ||
2818 peer->gnp_connecting == GNILND_PEER_KILL) {
2819 /* we pass down todie to use a common function - but we know there are
2821 kgnilnd_cancel_peer_connect_locked(peer, todie);
2825 /* Don't reconnect if we are still trying to clear out old conns.
2826 * This prevents us sending traffic on the new mbox before ensuring we are done
2827 * with the old one */
2828 reconnect = (peer->gnp_down == GNILND_RCA_NODE_UP) &&
2829 (atomic_read(&peer->gnp_dirty_eps) == 0);
2831 /* if we are not connected and there are tx on the gnp_tx_queue waiting
2832 * to be sent, we'll check the reconnect interval and fire up a new
2833 * connection request */
2835 if ((peer->gnp_connecting == GNILND_PEER_IDLE) &&
2836 (time_after_eq(jiffies, peer->gnp_reconnect_time)) &&
2837 !list_empty(&peer->gnp_tx_queue) && reconnect) {
2839 CDEBUG(D_NET, "starting connect to %s\n",
2840 libcfs_nid2str(peer->gnp_nid));
2841 LASSERTF(peer->gnp_connecting == GNILND_PEER_IDLE, "Peer was idle and we"
2842 "have a write_lock, state issue %d\n", peer->gnp_connecting);
2844 peer->gnp_connecting = GNILND_PEER_CONNECT;
2845 kgnilnd_peer_addref(peer); /* extra ref for connd */
2847 spin_lock(&peer->gnp_net->gnn_dev->gnd_connd_lock);
2848 list_add_tail(&peer->gnp_connd_list,
2849 &peer->gnp_net->gnn_dev->gnd_connd_peers);
2850 spin_unlock(&peer->gnp_net->gnn_dev->gnd_connd_lock);
2852 kgnilnd_schedule_dgram(peer->gnp_net->gnn_dev);
2855 /* fail_loc to allow us to delay release of purgatory */
2856 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_PURG_REL_DELAY))
2859 /* This check allows us to verify that the new conn is actually being used. This allows us to
2860 * pull the old conns out of purgatory if they have actually seen traffic.
2861 * We only release a conn from purgatory during stack reset, admin command, or when a peer reconnects
2864 time_after(jiffies, first_rx + cfs_time_seconds(*kgnilnd_tunables.kgn_hardware_timeout))) {
2865 CDEBUG(D_INFO, "We can release peer %s conn's from purgatory %lu\n",
2866 libcfs_nid2str(peer->gnp_nid), first_rx + cfs_time_seconds(*kgnilnd_tunables.kgn_hardware_timeout));
2870 list_for_each_entry_safe (conn, connN, &peer->gnp_conns, gnc_list) {
2871 /* check for purgatory timeouts */
2872 if (conn->gnc_in_purgatory) {
2873 /* We cannot detach this conn from purgatory if it has not been closed so we reschedule it
2874 * that way the next time we check it we can detach it from purgatory
2877 if (conn->gnc_state != GNILND_CONN_DONE) {
2878 /* Skip over conns that are currently not DONE. If they arent already scheduled
2879 * for completion something in the state machine is broken.
2884 /* We only detach a conn that is in purgatory if we have received a close message,
2885 * we have a new valid connection that has successfully received data, or an admin
2886 * command tells us we need to detach.
2889 if (conn->gnc_close_recvd || releaseconn || conn->gnc_needs_detach) {
2890 unsigned long waiting;
2892 waiting = (long) jiffies - conn->gnc_last_rx_cq;
2894 /* C.E: The remote peer is expected to close the
2895 * connection (see kgnilnd_check_conn_timeouts)
2896 * via the reaper thread and nuke out the MDD and
2897 * FMA resources after conn->gnc_timeout has expired
2898 * without an FMA RX */
2899 CDEBUG(D_NET, "Reconnected to %s in %lds or admin forced detach, dropping "
2900 " held resources\n",
2901 libcfs_nid2str(conn->gnc_peer->gnp_nid),
2902 cfs_duration_sec(waiting));
2904 kgnilnd_detach_purgatory_locked(conn, souls);
2913 kgnilnd_reaper_check(int idx)
2915 struct list_head *peers = &kgnilnd_data.kgn_peers[idx];
2916 struct list_head *ctmp, *ctmpN;
2917 struct list_head geriatrics;
2918 struct list_head souls;
2920 INIT_LIST_HEAD(&geriatrics);
2921 INIT_LIST_HEAD(&souls);
2923 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
2925 list_for_each_safe(ctmp, ctmpN, peers) {
2926 kgn_peer_t *peer = NULL;
2928 /* don't timeout stuff if the network is mucked or shutting down */
2929 if (kgnilnd_check_hw_quiesce()) {
2932 peer = list_entry(ctmp, kgn_peer_t, gnp_list);
2934 kgnilnd_check_peer_timeouts_locked(peer, &geriatrics, &souls);
2937 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
2939 kgnilnd_txlist_done(&geriatrics, -EHOSTUNREACH);
2940 kgnilnd_release_purgatory_list(&souls);
2944 kgnilnd_update_reaper_timeout(long timeout)
2946 LASSERT(timeout > 0);
2948 spin_lock(&kgnilnd_data.kgn_reaper_lock);
2950 if (timeout < kgnilnd_data.kgn_new_min_timeout)
2951 kgnilnd_data.kgn_new_min_timeout = timeout;
2953 spin_unlock(&kgnilnd_data.kgn_reaper_lock);
2957 kgnilnd_reaper_poke_with_stick(unsigned long arg)
2959 wake_up(&kgnilnd_data.kgn_reaper_waitq);
2963 kgnilnd_reaper(void *arg)
2968 unsigned long next_check_time = jiffies;
2969 long current_min_timeout = MAX_SCHEDULE_TIMEOUT;
2970 struct timer_list timer;
2973 cfs_block_allsigs();
2975 /* all gnilnd threads need to run fairly urgently */
2976 set_user_nice(current, *kgnilnd_tunables.kgn_nice);
2977 spin_lock(&kgnilnd_data.kgn_reaper_lock);
2979 while (!kgnilnd_data.kgn_shutdown) {
2980 /* I wake up every 'p' seconds to check for timeouts on some
2981 * more peers. I try to check every connection 'n' times
2982 * within the global minimum of all keepalive and timeout
2983 * intervals, to ensure I attend to every connection within
2984 * (n+1)/n times its timeout intervals. */
2985 const int p = GNILND_REAPER_THREAD_WAKE;
2986 const int n = GNILND_REAPER_NCHECKS;
2988 /* to quiesce or to not quiesce, that is the question */
2989 if (unlikely(kgnilnd_data.kgn_quiesce_trigger)) {
2990 spin_unlock(&kgnilnd_data.kgn_reaper_lock);
2991 KGNILND_SPIN_QUIESCE;
2992 spin_lock(&kgnilnd_data.kgn_reaper_lock);
2995 /* careful with the jiffy wrap... */
2996 timeout = (long)(next_check_time - jiffies);
2999 prepare_to_wait(&kgnilnd_data.kgn_reaper_waitq, &wait,
3000 TASK_INTERRUPTIBLE);
3001 spin_unlock(&kgnilnd_data.kgn_reaper_lock);
3002 setup_timer(&timer, kgnilnd_reaper_poke_with_stick,
3004 mod_timer(&timer, (long) jiffies + timeout);
3006 /* check flag variables before comitting */
3007 if (!kgnilnd_data.kgn_shutdown &&
3008 !kgnilnd_data.kgn_quiesce_trigger) {
3009 CDEBUG(D_INFO, "schedule timeout %ld (%lu sec)\n",
3010 timeout, cfs_duration_sec(timeout));
3012 CDEBUG(D_INFO, "awake after schedule\n");
3015 del_singleshot_timer_sync(&timer);
3016 spin_lock(&kgnilnd_data.kgn_reaper_lock);
3017 finish_wait(&kgnilnd_data.kgn_reaper_waitq, &wait);
3021 /* new_min_timeout is set from the conn timeouts and keepalive
3022 * this should end up with a min timeout of
3023 * GNILND_TIMEOUT2KEEPALIVE(t) or roughly LND_TIMEOUT/2 */
3024 if (kgnilnd_data.kgn_new_min_timeout < current_min_timeout) {
3025 current_min_timeout = kgnilnd_data.kgn_new_min_timeout;
3026 CDEBUG(D_NET, "Set new min timeout %ld\n",
3027 current_min_timeout);
3030 spin_unlock(&kgnilnd_data.kgn_reaper_lock);
3032 /* Compute how many table entries to check now so I get round
3033 * the whole table fast enough given that I do this at fixed
3034 * intervals of 'p' seconds) */
3035 chunk = *kgnilnd_tunables.kgn_peer_hash_size;
3036 if (kgnilnd_data.kgn_new_min_timeout > n * p)
3037 chunk = (chunk * n * p) /
3038 kgnilnd_data.kgn_new_min_timeout;
3041 for (i = 0; i < chunk; i++) {
3042 kgnilnd_reaper_check(hash_index);
3043 hash_index = (hash_index + 1) %
3044 *kgnilnd_tunables.kgn_peer_hash_size;
3046 next_check_time = (long) jiffies + cfs_time_seconds(p);
3047 CDEBUG(D_INFO, "next check at %lu or in %d sec\n", next_check_time, p);
3049 spin_lock(&kgnilnd_data.kgn_reaper_lock);
3052 spin_unlock(&kgnilnd_data.kgn_reaper_lock);
3054 kgnilnd_thread_fini();
3059 kgnilnd_recv_bte_get(kgn_tx_t *tx) {
3060 unsigned niov, offset, nob;
3062 lnet_msg_t *lntmsg = tx->tx_lntmsg[0];
3063 kgnilnd_parse_lnet_rdma(lntmsg, &niov, &offset, &nob, &kiov, tx->tx_nob_rdma);
3066 lnet_copy_flat2kiov(
3069 tx->tx_buffer_copy + tx->tx_offset, 0, nob);
3071 memcpy(tx->tx_buffer, tx->tx_buffer_copy + tx->tx_offset, nob);
3078 kgnilnd_check_rdma_cq(kgn_device_t *dev)
3081 gni_post_descriptor_t *desc;
3083 kgn_tx_ev_id_t ev_id;
3085 int should_retry, rc;
3086 long num_processed = 0;
3087 kgn_conn_t *conn = NULL;
3088 kgn_tx_t *tx = NULL;
3089 kgn_rdma_desc_t *rdesc;
3094 /* make sure we don't keep looping if we need to reset */
3095 if (unlikely(kgnilnd_data.kgn_quiesce_trigger)) {
3096 return num_processed;
3098 rc = kgnilnd_mutex_trylock(&dev->gnd_cq_mutex);
3100 /* we didn't get the mutex, so return that there is still work
3104 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_DELAY_RDMA)) {
3105 /* a bit gross - but we need a good way to test for
3106 * delayed RDMA completions and the easiest way to do
3107 * that is to delay the RDMA CQ events */
3108 rrc = GNI_RC_NOT_DONE;
3110 rrc = kgnilnd_cq_get_event(dev->gnd_snd_rdma_cqh, &event_data);
3113 if (rrc == GNI_RC_NOT_DONE) {
3114 mutex_unlock(&dev->gnd_cq_mutex);
3115 CDEBUG(D_INFO, "SEND RDMA CQ %d empty processed %ld\n",
3116 dev->gnd_id, num_processed);
3117 return num_processed;
3119 dev->gnd_sched_alive = jiffies;
3122 LASSERTF(!GNI_CQ_OVERRUN(event_data),
3123 "this is bad, somehow our credits didn't protect us"
3124 " from CQ overrun\n");
3125 LASSERTF(GNI_CQ_GET_TYPE(event_data) == GNI_CQ_EVENT_TYPE_POST,
3126 "rrc %d, GNI_CQ_GET_TYPE("LPX64") = "LPX64"\n", rrc,
3127 event_data, GNI_CQ_GET_TYPE(event_data));
3129 rrc = kgnilnd_get_completed(dev->gnd_snd_rdma_cqh, event_data,
3131 mutex_unlock(&dev->gnd_cq_mutex);
3133 /* XXX Nic: Need better error handling here... */
3134 LASSERTF((rrc == GNI_RC_SUCCESS) ||
3135 (rrc == GNI_RC_TRANSACTION_ERROR),
3138 ev_id.txe_cookie = desc->post_id;
3140 kgnilnd_validate_tx_ev_id(&ev_id, &tx, &conn);
3142 if (conn == NULL || tx == NULL) {
3143 /* either conn or tx was already nuked and this is a "late"
3144 * completion, so drop it */
3148 GNITX_ASSERTF(tx, tx->tx_msg.gnm_type == GNILND_MSG_PUT_DONE ||
3149 tx->tx_msg.gnm_type == GNILND_MSG_GET_DONE ||
3150 tx->tx_msg.gnm_type == GNILND_MSG_PUT_DONE_REV ||
3151 tx->tx_msg.gnm_type == GNILND_MSG_GET_DONE_REV,
3152 "tx %p with type %d\n", tx, tx->tx_msg.gnm_type);
3154 GNIDBG_TX(D_NET, tx, "RDMA completion for %d bytes", tx->tx_nob);
3156 if (tx->tx_msg.gnm_type == GNILND_MSG_GET_DONE_REV) {
3157 lnet_set_reply_msg_len(NULL, tx->tx_lntmsg[1],
3158 tx->tx_msg.gnm_u.completion.gncm_retval);
3162 if (tx->tx_msg.gnm_type == GNILND_MSG_GET_DONE_REV && desc->status == GNI_RC_SUCCESS) {
3163 if (tx->tx_buffer_copy != NULL)
3164 kgnilnd_recv_bte_get(tx);
3165 rc = kgnilnd_verify_rdma_cksum(tx, tx->tx_putinfo.gnpam_payload_cksum, tx->tx_nob_rdma);
3168 if (tx->tx_msg.gnm_type == GNILND_MSG_PUT_DONE_REV && desc->status == GNI_RC_SUCCESS) {
3169 if (tx->tx_buffer_copy != NULL)
3170 kgnilnd_recv_bte_get(tx);
3171 rc = kgnilnd_verify_rdma_cksum(tx, tx->tx_getinfo.gngm_payload_cksum, tx->tx_nob_rdma);
3174 /* remove from rdmaq */
3175 spin_lock(&conn->gnc_list_lock);
3176 kgnilnd_tx_del_state_locked(tx, NULL, conn, GNILND_TX_ALLOCD);
3177 spin_unlock(&conn->gnc_list_lock);
3179 if (likely(desc->status == GNI_RC_SUCCESS) && rc == 0) {
3180 atomic_inc(&dev->gnd_rdma_ntx);
3181 atomic64_add(tx->tx_nob, &dev->gnd_rdma_txbytes);
3182 /* transaction succeeded, add into fmaq */
3183 kgnilnd_queue_tx(conn, tx);
3184 kgnilnd_peer_alive(conn->gnc_peer);
3186 /* drop ref from kgnilnd_validate_tx_ev_id */
3187 kgnilnd_admin_decref(conn->gnc_tx_in_use);
3188 kgnilnd_conn_decref(conn);
3192 /* fall through to the TRANSACTION_ERROR case */
3195 /* get stringified version for log messages */
3196 kgnilnd_cq_error_str(event_data, &err_str, 256);
3197 kgnilnd_cq_error_recoverable(event_data, &should_retry);
3199 /* make sure we are not off in the weeds with this tx */
3200 if (tx->tx_retrans >
3201 *kgnilnd_tunables.kgn_max_retransmits) {
3202 GNIDBG_TX(D_NETERROR, tx,
3203 "giving up on TX, too many retries", NULL);
3207 GNIDBG_TX(D_NETERROR, tx, "RDMA %s error (%s)",
3208 should_retry ? "transient" : "unrecoverable", err_str);
3210 if (tx->tx_msg.gnm_type == GNILND_MSG_PUT_DONE ||
3211 tx->tx_msg.gnm_type == GNILND_MSG_GET_DONE_REV) {
3212 rdesc = &tx->tx_putinfo.gnpam_desc;
3213 rnob = tx->tx_putinfo.gnpam_desc.gnrd_nob;
3214 rcookie = tx->tx_putinfo.gnpam_dst_cookie;
3216 rdesc = &tx->tx_getinfo.gngm_desc;
3217 rnob = tx->tx_lntmsg[0]->msg_len;
3218 rcookie = tx->tx_getinfo.gngm_cookie;
3223 tx->tx_msg.gnm_type,
3227 kgnilnd_nak_rdma(conn,
3228 tx->tx_msg.gnm_type,
3231 tx->tx_msg.gnm_srcnid);
3232 kgnilnd_tx_done(tx, -EFAULT);
3233 kgnilnd_close_conn(conn, -ECOMM);
3236 /* drop ref from kgnilnd_validate_tx_ev_id */
3237 kgnilnd_admin_decref(conn->gnc_tx_in_use);
3238 kgnilnd_conn_decref(conn);
3243 kgnilnd_check_fma_send_cq(kgn_device_t *dev)
3247 kgn_tx_ev_id_t ev_id;
3248 kgn_tx_t *tx = NULL;
3249 kgn_conn_t *conn = NULL;
3250 int queued_fma, saw_reply, rc;
3251 long num_processed = 0;
3254 /* make sure we don't keep looping if we need to reset */
3255 if (unlikely(kgnilnd_data.kgn_quiesce_trigger)) {
3256 return num_processed;
3259 rc = kgnilnd_mutex_trylock(&dev->gnd_cq_mutex);
3261 /* we didn't get the mutex, so return that there is still work
3266 rrc = kgnilnd_cq_get_event(dev->gnd_snd_fma_cqh, &event_data);
3267 mutex_unlock(&dev->gnd_cq_mutex);
3269 if (rrc == GNI_RC_NOT_DONE) {
3271 "SMSG send CQ %d not ready (data "LPX64") "
3272 "processed %ld\n", dev->gnd_id, event_data,
3274 return num_processed;
3277 dev->gnd_sched_alive = jiffies;
3280 LASSERTF(!GNI_CQ_OVERRUN(event_data),
3281 "this is bad, somehow our credits didn't "
3282 "protect us from CQ overrun\n");
3283 LASSERTF(GNI_CQ_GET_TYPE(event_data) == GNI_CQ_EVENT_TYPE_SMSG,
3284 "rrc %d, GNI_CQ_GET_TYPE("LPX64") = "LPX64"\n", rrc,
3285 event_data, GNI_CQ_GET_TYPE(event_data));
3287 /* if SMSG couldn't handle an error, time for conn to die */
3288 if (unlikely(rrc == GNI_RC_TRANSACTION_ERROR)) {
3291 /* need to take the write_lock to ensure atomicity
3292 * on the conn state if we need to close it */
3293 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
3294 conn = kgnilnd_cqid2conn_locked(GNI_CQ_GET_INST_ID(event_data));
3296 /* Conn was destroyed? */
3298 "SMSG CQID lookup "LPX64" failed\n",
3299 GNI_CQ_GET_INST_ID(event_data));
3300 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
3304 kgnilnd_cq_error_str(event_data, &err_str, 256);
3305 CNETERR("SMSG send error to %s: rc %d (%s)\n",
3306 libcfs_nid2str(conn->gnc_peer->gnp_nid),
3308 kgnilnd_close_conn_locked(conn, -ECOMM);
3310 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
3312 /* no need to process rest of this tx -
3313 * it is getting canceled */
3317 /* fall through to GNI_RC_SUCCESS case */
3318 ev_id.txe_smsg_id = GNI_CQ_GET_MSG_ID(event_data);
3320 kgnilnd_validate_tx_ev_id(&ev_id, &tx, &conn);
3321 if (conn == NULL || tx == NULL) {
3322 /* either conn or tx was already nuked and this is a "late"
3323 * completion, so drop it */
3327 tx->tx_conn->gnc_last_tx_cq = jiffies;
3328 if (tx->tx_msg.gnm_type == GNILND_MSG_NOOP) {
3329 set_mb(conn->gnc_last_noop_cq, jiffies);
3332 /* lock tx_list_state and tx_state */
3333 spin_lock(&tx->tx_conn->gnc_list_lock);
3335 GNITX_ASSERTF(tx, tx->tx_list_state == GNILND_TX_LIVE_FMAQ,
3336 "state not GNILND_TX_LIVE_FMAQ", NULL);
3337 GNITX_ASSERTF(tx, tx->tx_state & GNILND_TX_WAITING_COMPLETION,
3338 "not waiting for completion", NULL);
3340 GNIDBG_TX(D_NET, tx, "SMSG complete tx_state %x rc %d",
3343 tx->tx_state &= ~GNILND_TX_WAITING_COMPLETION;
3345 /* This will trigger other FMA sends that were
3346 * pending this completion */
3347 queued_fma = !list_empty(&tx->tx_conn->gnc_fmaq);
3349 /* we either did not expect reply or we already got it */
3350 saw_reply = !(tx->tx_state & GNILND_TX_WAITING_REPLY);
3352 spin_unlock(&tx->tx_conn->gnc_list_lock);
3355 CDEBUG(D_NET, "scheduling conn 0x%p->%s for fmaq\n",
3357 libcfs_nid2str(conn->gnc_peer->gnp_nid));
3358 kgnilnd_schedule_conn(conn);
3361 /* If saw_reply is false as soon as gnc_list_lock is dropped the tx could be nuked
3362 * If saw_reply is true we know that the tx is safe to use as the other thread
3363 * is already finished with it.
3367 /* no longer need to track on the live_fmaq */
3368 kgnilnd_tx_del_state_locked(tx, NULL, tx->tx_conn, GNILND_TX_ALLOCD);
3370 if (tx->tx_state & GNILND_TX_PENDING_RDMA) {
3371 /* we already got reply & were waiting for
3372 * completion of initial send */
3373 /* to initiate RDMA transaction */
3374 GNIDBG_TX(D_NET, tx,
3375 "Pending RDMA 0x%p type 0x%02x",
3376 tx->tx_msg.gnm_type);
3377 tx->tx_state &= ~GNILND_TX_PENDING_RDMA;
3378 rc = kgnilnd_send_mapped_tx(tx, 0);
3379 GNITX_ASSERTF(tx, rc == 0, "RDMA send failed: %d\n", rc);
3381 /* we are done with this tx */
3382 GNIDBG_TX(D_NET, tx,
3383 "Done with tx type 0x%02x",
3384 tx->tx_msg.gnm_type);
3385 kgnilnd_tx_done(tx, tx->tx_rc);
3389 /* drop ref from kgnilnd_validate_tx_ev_id */
3390 kgnilnd_admin_decref(conn->gnc_tx_in_use);
3391 kgnilnd_conn_decref(conn);
3393 /* if we are waiting for a REPLY, we'll handle the tx then */
3394 } /* end for loop */
3398 kgnilnd_check_fma_rcv_cq(kgn_device_t *dev)
3403 long num_processed = 0;
3404 struct list_head *conns;
3405 struct list_head *tmp;
3409 /* make sure we don't keep looping if we need to reset */
3410 if (unlikely(kgnilnd_data.kgn_quiesce_trigger)) {
3411 return num_processed;
3414 rc = kgnilnd_mutex_trylock(&dev->gnd_cq_mutex);
3416 /* we didn't get the mutex, so return that there is still work
3420 rrc = kgnilnd_cq_get_event(dev->gnd_rcv_fma_cqh, &event_data);
3421 mutex_unlock(&dev->gnd_cq_mutex);
3423 if (rrc == GNI_RC_NOT_DONE) {
3424 CDEBUG(D_INFO, "SMSG RX CQ %d empty data "LPX64" "
3426 dev->gnd_id, event_data, num_processed);
3427 return num_processed;
3429 dev->gnd_sched_alive = jiffies;
3432 /* this is the only CQ that can really handle transient
3434 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_CQ_GET_EVENT)) {
3435 rrc = cfs_fail_val ? cfs_fail_val
3436 : GNI_RC_ERROR_RESOURCE;
3437 if (rrc == GNI_RC_ERROR_RESOURCE) {