X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lnet%2Fklnds%2Fgnilnd%2Fgnilnd_cb.c;h=fbcec237cf52dc422cab6391a0ebc366a4c8a599;hp=5d8e9243b82ee39063f4713f653efa743e60f201;hb=0703fa84c0f2dc69025a0849ba13f0d0d1a97738;hpb=70bb27b746c1e80196815e335c08bdc113ef052a;ds=sidebyside diff --git a/lnet/klnds/gnilnd/gnilnd_cb.c b/lnet/klnds/gnilnd/gnilnd_cb.c index 5d8e924..fbcec23 100644 --- a/lnet/klnds/gnilnd/gnilnd_cb.c +++ b/lnet/klnds/gnilnd/gnilnd_cb.c @@ -26,6 +26,10 @@ #include #include +#include + +#include + #include "gnilnd.h" /* this is useful when needed to debug wire corruption. */ @@ -78,15 +82,20 @@ kgnilnd_schedule_device(kgn_device_t *dev) * has come around and set ready to zero */ already_live = cmpxchg(&dev->gnd_ready, GNILND_DEV_IDLE, GNILND_DEV_IRQ); - if (!already_live) { - wake_up_all(&dev->gnd_waitq); - } - return; + if (!already_live) + wake_up(&dev->gnd_waitq); +} + +void kgnilnd_schedule_device_timer(cfs_timer_cb_arg_t data) +{ + kgn_device_t *dev = cfs_from_timer(dev, data, gnd_map_timer); + + kgnilnd_schedule_device(dev); } -void kgnilnd_schedule_device_timer(unsigned long arg) +void kgnilnd_schedule_device_timer_rd(cfs_timer_cb_arg_t data) { - kgn_device_t *dev = (kgn_device_t *) arg; + kgn_device_t *dev = cfs_from_timer(dev, data, gnd_rdmaq_timer); kgnilnd_schedule_device(dev); } @@ -151,7 +160,7 @@ kgnilnd_schedule_process_conn(kgn_conn_t *conn, int sched_intent) * as scheduled */ int -_kgnilnd_schedule_conn(kgn_conn_t *conn, const char *caller, int line, int refheld) +_kgnilnd_schedule_conn(kgn_conn_t *conn, const char *caller, int line, int refheld, int lock_held) { kgn_device_t *dev = conn->gnc_device; int sched; @@ -184,10 +193,11 @@ _kgnilnd_schedule_conn(kgn_conn_t *conn, const char *caller, int line, int refhe conn, sched); CDEBUG(D_INFO, "scheduling conn 0x%p caller %s:%d\n", conn, caller, line); - - spin_lock(&dev->gnd_lock); + if (!lock_held) + spin_lock(&dev->gnd_lock); list_add_tail(&conn->gnc_schedlist, &dev->gnd_ready_conns); - spin_unlock(&dev->gnd_lock); + if (!lock_held) + spin_unlock(&dev->gnd_lock); set_mb(conn->gnc_last_sched_ask, jiffies); rc = 1; } else { @@ -197,6 +207,23 @@ _kgnilnd_schedule_conn(kgn_conn_t *conn, const char *caller, int line, int refhe /* make sure thread(s) going to process conns - but let it make * separate decision from conn schedule */ + if (!lock_held) + kgnilnd_schedule_device(dev); + return rc; +} + +int +_kgnilnd_schedule_delay_conn(kgn_conn_t *conn) +{ + kgn_device_t *dev = conn->gnc_device; + int rc = 0; + spin_lock(&dev->gnd_lock); + if (list_empty(&conn->gnc_delaylist)) { + list_add_tail(&conn->gnc_delaylist, &dev->gnd_delay_conns); + rc = 1; + } + spin_unlock(&dev->gnd_lock); + kgnilnd_schedule_device(dev); return rc; } @@ -232,12 +259,12 @@ kgnilnd_free_tx(kgn_tx_t *tx) if (tx->tx_phys != NULL) { kmem_cache_free(kgnilnd_data.kgn_tx_phys_cache, tx->tx_phys); CDEBUG(D_MALLOC, "slab-freed 'tx_phys': %lu at %p.\n", - LNET_MAX_IOV * sizeof(gni_mem_segment_t), tx->tx_phys); + GNILND_MAX_IOV * sizeof(gni_mem_segment_t), tx->tx_phys); } /* Only free the buffer if we used it */ if (tx->tx_buffer_copy != NULL) { - vfree(tx->tx_buffer_copy); + kgnilnd_vfree(tx->tx_buffer_copy, tx->tx_rdma_desc.length); tx->tx_buffer_copy = NULL; CDEBUG(D_MALLOC, "vfreed buffer2\n"); } @@ -256,7 +283,7 @@ kgnilnd_alloc_tx (void) if (CFS_FAIL_CHECK(CFS_FAIL_GNI_ALLOC_TX)) return tx; - tx = kmem_cache_alloc(kgnilnd_data.kgn_tx_cache, GFP_ATOMIC); + tx = kmem_cache_zalloc(kgnilnd_data.kgn_tx_cache, GFP_ATOMIC); if (tx == NULL) { CERROR("failed to allocate tx\n"); return NULL; @@ -264,9 +291,6 @@ kgnilnd_alloc_tx (void) CDEBUG(D_MALLOC, "slab-alloced 'tx': %lu at %p.\n", sizeof(*tx), tx); - /* need this memset, cache alloc'd memory is not cleared */ - memset(tx, 0, sizeof(*tx)); - /* setup everything here to minimize time under the lock */ tx->tx_buftype = GNILND_BUF_NONE; tx->tx_msg.gnm_type = GNILND_MSG_NONE; @@ -283,8 +307,8 @@ kgnilnd_alloc_tx (void) #define _kgnilnd_cksum(seed, ptr, nob) csum_partial(ptr, nob, seed) /* we don't use offset as every one is passing a buffer reference that already - * includes the offset into the base address - - * see kgnilnd_setup_virt_buffer and kgnilnd_setup_immediate_buffer */ + * includes the offset into the base address. + */ static inline __u16 kgnilnd_cksum(void *ptr, size_t nob) { @@ -302,9 +326,9 @@ kgnilnd_cksum(void *ptr, size_t nob) return sum; } -inline __u16 -kgnilnd_cksum_kiov(unsigned int nkiov, lnet_kiov_t *kiov, - unsigned int offset, unsigned int nob, int dump_blob) +__u16 +kgnilnd_cksum_kiov(unsigned int nkiov, struct bio_vec *kiov, + unsigned int offset, unsigned int nob, int dump_blob) { __wsum cksum = 0; __wsum tmpck; @@ -321,15 +345,15 @@ kgnilnd_cksum_kiov(unsigned int nkiov, lnet_kiov_t *kiov, /* if loops changes, please change kgnilnd_setup_phys_buffer */ - while (offset >= kiov->kiov_len) { - offset -= kiov->kiov_len; + while (offset >= kiov->bv_len) { + offset -= kiov->bv_len; nkiov--; kiov++; LASSERT(nkiov > 0); } - /* ignore nob here, if nob < (kiov_len - offset), kiov == 1 */ - odd = (unsigned long) (kiov[0].kiov_len - offset) & 1; + /* ignore nob here, if nob < (bv_len - offset), kiov == 1 */ + odd = (unsigned long) (kiov[0].bv_len - offset) & 1; if ((odd || *kgnilnd_tunables.kgn_vmap_cksum) && nkiov > 1) { struct page **pages = kgnilnd_data.kgn_cksum_map_pages[get_cpu()]; @@ -338,10 +362,10 @@ kgnilnd_cksum_kiov(unsigned int nkiov, lnet_kiov_t *kiov, get_cpu(), kgnilnd_data.kgn_cksum_map_pages); CDEBUG(D_BUFFS, "odd %d len %u offset %u nob %u\n", - odd, kiov[0].kiov_len, offset, nob); + odd, kiov[0].bv_len, offset, nob); for (i = 0; i < nkiov; i++) { - pages[i] = kiov[i].kiov_page; + pages[i] = kiov[i].bv_page; } addr = vmap(pages, nkiov, VM_MAP, PAGE_KERNEL); @@ -354,42 +378,46 @@ kgnilnd_cksum_kiov(unsigned int nkiov, lnet_kiov_t *kiov, } atomic_inc(&kgnilnd_data.kgn_nvmap_cksum); - tmpck = _kgnilnd_cksum(0, (void *) addr + kiov[0].kiov_offset + offset, nob); + tmpck = _kgnilnd_cksum(0, ((void *) addr + kiov[0].bv_offset + + offset), nob); cksum = tmpck; if (dump_blob) { kgnilnd_dump_blob(D_BUFFS, "flat kiov RDMA payload", - (void *)addr + kiov[0].kiov_offset + offset, nob); + (void *)addr + kiov[0].bv_offset + + offset, nob); } CDEBUG(D_BUFFS, "cksum 0x%x (+0x%x) for addr 0x%p+%u len %u offset %u\n", - cksum, tmpck, addr, kiov[0].kiov_offset, nob, offset); + cksum, tmpck, addr, kiov[0].bv_offset, nob, offset); vunmap(addr); } else { do { - fraglen = min(kiov->kiov_len - offset, nob); + fraglen = min(kiov->bv_len - offset, nob); /* make dang sure we don't send a bogus checksum if somehow we get * an odd length fragment on anything but the last entry in a kiov - * we know from kgnilnd_setup_rdma_buffer that we can't have non * PAGE_SIZE pages in the middle, so if nob < PAGE_SIZE, it is the last one */ LASSERTF(!(fraglen&1) || (nob < PAGE_SIZE), - "odd fraglen %u on nkiov %d, nob %u kiov_len %u offset %u kiov 0x%p\n", - fraglen, nkiov, nob, kiov->kiov_len, offset, kiov); + "odd fraglen %u on nkiov %d, nob %u bv_len %u offset %u kiov 0x%p\n", + fraglen, nkiov, nob, kiov->bv_len, + offset, kiov); - addr = (void *)kmap(kiov->kiov_page) + kiov->kiov_offset + offset; + addr = (void *)kmap(kiov->bv_page) + kiov->bv_offset + + offset; tmpck = _kgnilnd_cksum(cksum, addr, fraglen); CDEBUG(D_BUFFS, "cksum 0x%x (+0x%x) for page 0x%p+%u (0x%p) len %u offset %u\n", - cksum, tmpck, kiov->kiov_page, kiov->kiov_offset, addr, - fraglen, offset); + cksum, tmpck, kiov->bv_page, kiov->bv_offset, + addr, fraglen, offset); cksum = tmpck; if (dump_blob) kgnilnd_dump_blob(D_BUFFS, "kiov cksum", addr, fraglen); - kunmap(kiov->kiov_page); + kunmap(kiov->bv_page); kiov++; nkiov--; @@ -474,7 +502,7 @@ kgnilnd_nak_rdma(kgn_conn_t *conn, int rx_type, int error, __u64 cookie, lnet_ni LBUG(); } /* only allow NAK on error and truncate to zero */ - LASSERTF(error <= 0, "error %d conn 0x%p, cookie "LPU64"\n", + LASSERTF(error <= 0, "error %d conn 0x%p, cookie %llu\n", error, conn, cookie); tx = kgnilnd_new_tx_msg(nak_type, source); @@ -489,9 +517,9 @@ kgnilnd_nak_rdma(kgn_conn_t *conn, int rx_type, int error, __u64 cookie, lnet_ni kgnilnd_queue_tx(conn, tx); } -int +static int kgnilnd_setup_immediate_buffer(kgn_tx_t *tx, unsigned int niov, - struct kvec *iov, lnet_kiov_t *kiov, + struct bio_vec *kiov, unsigned int offset, unsigned int nob) { kgn_msg_t *msg = &tx->tx_msg; @@ -504,44 +532,48 @@ kgnilnd_setup_immediate_buffer(kgn_tx_t *tx, unsigned int niov, if (nob == 0) { tx->tx_buffer = NULL; - } else if (kiov != NULL) { - - if ((niov > 0) && unlikely(niov > (nob/PAGE_SIZE))) { - niov = ((nob + offset + PAGE_SIZE - 1) / PAGE_SIZE); - } + } else { - LASSERTF(niov > 0 && niov < GNILND_MAX_IMMEDIATE/PAGE_SIZE, - "bad niov %d msg %p kiov %p iov %p offset %d nob%d\n", - niov, msg, kiov, iov, offset, nob); + if (niov && niov > (nob >> PAGE_SHIFT)) + niov = DIV_ROUND_UP(nob + offset + kiov->bv_offset, + PAGE_SIZE); - while (offset >= kiov->kiov_len) { - offset -= kiov->kiov_len; + while (offset >= kiov->bv_len) { + offset -= kiov->bv_len; niov--; kiov++; LASSERT(niov > 0); } + + LASSERTF(niov > 0 && niov < GNILND_MAX_IMMEDIATE/PAGE_SIZE, + "bad niov %d msg %p kiov %p offset %d nob%d\n", + niov, msg, kiov, offset, nob); + for (i = 0; i < niov; i++) { - /* We can't have a kiov_offset on anything but the first entry, - * otherwise we'll have a hole at the end of the mapping as we only map - * whole pages. - * Also, if we have a kiov_len < PAGE_SIZE but we need to map more - * than kiov_len, we will also have a whole at the end of that page - * which isn't allowed */ - if ((kiov[i].kiov_offset != 0 && i > 0) || - (kiov[i].kiov_offset + kiov[i].kiov_len != PAGE_SIZE && i < niov - 1)) { - CNETERR("Can't make payload contiguous in I/O VM:" - "page %d, offset %u, nob %u, kiov_offset %u kiov_len %u \n", - i, offset, nob, kiov->kiov_offset, kiov->kiov_len); + /* We can't have a bv_offset on anything but the first + * entry, otherwise we'll have a hole at the end of the + * mapping as we only map whole pages. + * Also, if we have a bv_len < PAGE_SIZE but we need to + * map more than bv_len, we will also have a whole at + * the end of that page which isn't allowed + */ + if ((kiov[i].bv_offset != 0 && i > 0) || + (kiov[i].bv_offset + kiov[i].bv_len != PAGE_SIZE && + i < niov - 1)) { + CNETERR("Can't make payload contiguous in I/O VM:page %d, offset %u, nob %u, bv_offset %u bv_len %u\n", + i, offset, nob, kiov->bv_offset, + kiov->bv_len); RETURN(-EINVAL); } - tx->tx_imm_pages[i] = kiov[i].kiov_page; + tx->tx_imm_pages[i] = kiov[i].bv_page; } /* hijack tx_phys for the later unmap */ if (niov == 1) { /* tx->phyx being equal to NULL is the signal for unmap to discern between kmap and vmap */ tx->tx_phys = NULL; - tx->tx_buffer = (void *)kmap(tx->tx_imm_pages[0]) + kiov[0].kiov_offset + offset; + tx->tx_buffer = (void *)kmap(tx->tx_imm_pages[0]) + + kiov[0].bv_offset + offset; atomic_inc(&kgnilnd_data.kgn_nkmap_short); GNIDBG_TX(D_NET, tx, "kmapped page for %d bytes for kiov 0x%p, buffer 0x%p", nob, kiov, tx->tx_buffer); @@ -553,37 +585,18 @@ kgnilnd_setup_immediate_buffer(kgn_tx_t *tx, unsigned int niov, } atomic_inc(&kgnilnd_data.kgn_nvmap_short); - /* make sure we take into account the kiov offset as the start of the buffer */ - tx->tx_buffer = (void *)tx->tx_phys + kiov[0].kiov_offset + offset; - GNIDBG_TX(D_NET, tx, "mapped %d pages for %d bytes from kiov 0x%p to 0x%p, buffer 0x%p", - niov, nob, kiov, tx->tx_phys, tx->tx_buffer); + /* make sure we take into account the kiov offset as the + * start of the buffer + */ + tx->tx_buffer = (void *)tx->tx_phys + kiov[0].bv_offset + + offset; + GNIDBG_TX(D_NET, tx, + "mapped %d pages for %d bytes from kiov 0x%p to 0x%p, buffer 0x%p", + niov, nob, kiov, tx->tx_phys, tx->tx_buffer); } tx->tx_buftype = GNILND_BUF_IMMEDIATE_KIOV; tx->tx_nob = nob; - } else { - /* For now this is almost identical to kgnilnd_setup_virt_buffer, but we - * could "flatten" the payload into a single contiguous buffer ready - * for sending direct over an FMA if we ever needed to. */ - - LASSERT(niov > 0); - - while (offset >= iov->iov_len) { - offset -= iov->iov_len; - niov--; - iov++; - LASSERT(niov > 0); - } - - if (nob > iov->iov_len - offset) { - CERROR("Can't handle multiple vaddr fragments\n"); - return -EMSGSIZE; - } - - tx->tx_buffer = (void *)(((unsigned long)iov->iov_base) + offset); - - tx->tx_buftype = GNILND_BUF_IMMEDIATE; - tx->tx_nob = nob; } /* checksum payload early - it shouldn't be changing after lnd_send */ @@ -604,35 +617,7 @@ kgnilnd_setup_immediate_buffer(kgn_tx_t *tx, unsigned int niov, } int -kgnilnd_setup_virt_buffer(kgn_tx_t *tx, - unsigned int niov, struct kvec *iov, - unsigned int offset, unsigned int nob) - -{ - LASSERT(nob > 0); - LASSERT(niov > 0); - LASSERT(tx->tx_buftype == GNILND_BUF_NONE); - - while (offset >= iov->iov_len) { - offset -= iov->iov_len; - niov--; - iov++; - LASSERT(niov > 0); - } - - if (nob > iov->iov_len - offset) { - CERROR("Can't handle multiple vaddr fragments\n"); - return -EMSGSIZE; - } - - tx->tx_buftype = GNILND_BUF_VIRT_UNMAPPED; - tx->tx_nob = nob; - tx->tx_buffer = (void *)(((unsigned long)iov->iov_base) + offset); - return 0; -} - -int -kgnilnd_setup_phys_buffer(kgn_tx_t *tx, int nkiov, lnet_kiov_t *kiov, +kgnilnd_setup_phys_buffer(kgn_tx_t *tx, int nkiov, struct bio_vec *kiov, unsigned int offset, unsigned int nob) { gni_mem_segment_t *phys; @@ -655,13 +640,13 @@ kgnilnd_setup_phys_buffer(kgn_tx_t *tx, int nkiov, lnet_kiov_t *kiov, } CDEBUG(D_MALLOC, "slab-alloced 'tx->tx_phys': %lu at %p.\n", - LNET_MAX_IOV * sizeof(gni_mem_segment_t), tx->tx_phys); + GNILND_MAX_IOV * sizeof(gni_mem_segment_t), tx->tx_phys); /* if loops changes, please change kgnilnd_cksum_kiov * and kgnilnd_setup_immediate_buffer */ - while (offset >= kiov->kiov_len) { - offset -= kiov->kiov_len; + while (offset >= kiov->bv_len) { + offset -= kiov->bv_len; nkiov--; kiov++; LASSERT(nkiov > 0); @@ -673,36 +658,36 @@ kgnilnd_setup_phys_buffer(kgn_tx_t *tx, int nkiov, lnet_kiov_t *kiov, tx->tx_buftype = GNILND_BUF_PHYS_UNMAPPED; tx->tx_nob = nob; - /* kiov_offset is start of 'valid' buffer, so index offset past that */ - tx->tx_buffer = (void *)((unsigned long)(kiov->kiov_offset + offset)); + /* bv_offset is start of 'valid' buffer, so index offset past that */ + tx->tx_buffer = (void *)((unsigned long)(kiov->bv_offset + offset)); phys = tx->tx_phys; CDEBUG(D_NET, "tx 0x%p buffer 0x%p map start kiov 0x%p+%u niov %d offset %u\n", - tx, tx->tx_buffer, kiov, kiov->kiov_offset, nkiov, offset); + tx, tx->tx_buffer, kiov, kiov->bv_offset, nkiov, offset); do { - fraglen = min(kiov->kiov_len - offset, nob); - - /* We can't have a kiov_offset on anything but the first entry, - * otherwise we'll have a hole at the end of the mapping as we only map - * whole pages. Only the first page is allowed to have an offset - - * we'll add that into tx->tx_buffer and that will get used when we - * map in the segments (see kgnilnd_map_buffer). - * Also, if we have a kiov_len < PAGE_SIZE but we need to map more - * than kiov_len, we will also have a whole at the end of that page - * which isn't allowed */ + fraglen = min(kiov->bv_len - offset, nob); + + /* We can't have a bv_offset on anything but the first entry, + * otherwise we'll have a hole at the end of the mapping as we + * only map whole pages. Only the first page is allowed to + * have an offset - we'll add that into tx->tx_buffer and that + * will get used when we map in the segments (see + * kgnilnd_map_buffer). Also, if we have a bv_len < PAGE_SIZE + * but we need to map more than bv_len, we will also have a + * whole at the end of that page which isn't allowed + */ if ((phys != tx->tx_phys) && - ((kiov->kiov_offset != 0) || - ((kiov->kiov_len < PAGE_SIZE) && (nob > kiov->kiov_len)))) { - CERROR("Can't make payload contiguous in I/O VM:" - "page %d, offset %u, nob %u, kiov_offset %u kiov_len %u \n", + ((kiov->bv_offset != 0) || + ((kiov->bv_len < PAGE_SIZE) && (nob > kiov->bv_len)))) { + CERROR("Can't make payload contiguous in I/O VM:page %d, offset %u, nob %u, bv_offset %u bv_len %u\n", (int)(phys - tx->tx_phys), - offset, nob, kiov->kiov_offset, kiov->kiov_len); + offset, nob, kiov->bv_offset, kiov->bv_len); rc = -EINVAL; GOTO(error, rc); } - if ((phys - tx->tx_phys) == LNET_MAX_IOV) { + if ((phys - tx->tx_phys) == GNILND_MAX_IOV) { CERROR ("payload too big (%d)\n", (int)(phys - tx->tx_phys)); rc = -EMSGSIZE; GOTO(error, rc); @@ -713,11 +698,12 @@ kgnilnd_setup_phys_buffer(kgn_tx_t *tx, int nkiov, lnet_kiov_t *kiov, GOTO(error, rc); } - CDEBUG(D_BUFFS, "page 0x%p kiov_offset %u kiov_len %u nob %u " - "nkiov %u offset %u\n", - kiov->kiov_page, kiov->kiov_offset, kiov->kiov_len, nob, nkiov, offset); + CDEBUG(D_BUFFS, + "page 0x%p bv_offset %u bv_len %u nob %u nkiov %u offset %u\n", + kiov->bv_page, kiov->bv_offset, kiov->bv_len, nob, nkiov, + offset); - phys->address = page_to_phys(kiov->kiov_page); + phys->address = page_to_phys(kiov->bv_page); phys++; kiov++; nkiov--; @@ -745,21 +731,10 @@ error: static inline int kgnilnd_setup_rdma_buffer(kgn_tx_t *tx, unsigned int niov, - struct kvec *iov, lnet_kiov_t *kiov, + struct bio_vec *kiov, unsigned int offset, unsigned int nob) { - int rc; - - LASSERTF((iov == NULL) != (kiov == NULL), "iov 0x%p, kiov 0x%p, tx 0x%p," - " offset %d, nob %d, niov %d\n" - , iov, kiov, tx, offset, nob, niov); - - if (kiov != NULL) { - rc = kgnilnd_setup_phys_buffer(tx, niov, kiov, offset, nob); - } else { - rc = kgnilnd_setup_virt_buffer(tx, niov, iov, offset, nob); - } - return rc; + return kgnilnd_setup_phys_buffer(tx, niov, kiov, offset, nob); } /* kgnilnd_parse_lnet_rdma() @@ -773,16 +748,16 @@ kgnilnd_setup_rdma_buffer(kgn_tx_t *tx, unsigned int niov, * transfer. */ static void -kgnilnd_parse_lnet_rdma(lnet_msg_t *lntmsg, unsigned int *niov, +kgnilnd_parse_lnet_rdma(struct lnet_msg *lntmsg, unsigned int *niov, unsigned int *offset, unsigned int *nob, - lnet_kiov_t **kiov, int put_len) + struct bio_vec **kiov, int put_len) { /* GETs are weird, see kgnilnd_send */ if (lntmsg->msg_type == LNET_MSG_GET) { if ((lntmsg->msg_md->md_options & LNET_MD_KIOV) == 0) { *kiov = NULL; } else { - *kiov = lntmsg->msg_md->md_iov.kiov; + *kiov = lntmsg->msg_md->md_kiov; } *niov = lntmsg->msg_md->md_niov; *nob = lntmsg->msg_md->md_length; @@ -798,10 +773,10 @@ kgnilnd_parse_lnet_rdma(lnet_msg_t *lntmsg, unsigned int *niov, static inline void kgnilnd_compute_rdma_cksum(kgn_tx_t *tx, int put_len) { - unsigned int niov, offset, nob; - lnet_kiov_t *kiov; - lnet_msg_t *lntmsg = tx->tx_lntmsg[0]; - int dump_cksum = (*kgnilnd_tunables.kgn_checksum_dump > 1); + unsigned int niov, offset, nob; + struct bio_vec *kiov; + struct lnet_msg *lntmsg = tx->tx_lntmsg[0]; + int dump_cksum = (*kgnilnd_tunables.kgn_checksum_dump > 1); GNITX_ASSERTF(tx, ((tx->tx_msg.gnm_type == GNILND_MSG_PUT_DONE) || (tx->tx_msg.gnm_type == GNILND_MSG_GET_DONE) || @@ -851,8 +826,8 @@ kgnilnd_verify_rdma_cksum(kgn_tx_t *tx, __u16 rx_cksum, int put_len) int rc = 0; __u16 cksum; unsigned int niov, offset, nob; - lnet_kiov_t *kiov; - lnet_msg_t *lntmsg = tx->tx_lntmsg[0]; + struct bio_vec *kiov; + struct lnet_msg *lntmsg = tx->tx_lntmsg[0]; int dump_on_err = *kgnilnd_tunables.kgn_checksum_dump; /* we can only match certain requests */ @@ -900,7 +875,7 @@ kgnilnd_verify_rdma_cksum(kgn_tx_t *tx, __u16 rx_cksum, int put_len) kgnilnd_dump_blob(D_BUFFS, "RDMA payload", tx->tx_buffer, nob); } - /* fall through to dump log */ + fallthrough; case 1: libcfs_debug_dumplog(); break; @@ -935,18 +910,12 @@ kgnilnd_mem_add_map_list(kgn_device_t *dev, kgn_tx_t *tx) dev->gnd_map_nphys++; dev->gnd_map_physnop += tx->tx_phys_npages; break; - - case GNILND_BUF_VIRT_MAPPED: - bytes = tx->tx_nob; - dev->gnd_map_nvirt++; - dev->gnd_map_virtnob += tx->tx_nob; - break; } if (tx->tx_msg.gnm_type == GNILND_MSG_PUT_ACK || tx->tx_msg.gnm_type == GNILND_MSG_GET_REQ) { atomic64_add(bytes, &dev->gnd_rdmaq_bytes_out); - GNIDBG_TX(D_NETTRACE, tx, "rdma ++ %d to "LPD64"", + GNIDBG_TX(D_NETTRACE, tx, "rdma ++ %d to %lld", bytes, atomic64_read(&dev->gnd_rdmaq_bytes_out)); } @@ -984,21 +953,16 @@ kgnilnd_mem_del_map_list(kgn_device_t *dev, kgn_tx_t *tx) dev->gnd_map_nphys--; dev->gnd_map_physnop -= tx->tx_phys_npages; break; - - case GNILND_BUF_VIRT_UNMAPPED: - bytes = tx->tx_nob; - dev->gnd_map_nvirt--; - dev->gnd_map_virtnob -= tx->tx_nob; - break; } if (tx->tx_msg.gnm_type == GNILND_MSG_PUT_ACK || tx->tx_msg.gnm_type == GNILND_MSG_GET_REQ) { atomic64_sub(bytes, &dev->gnd_rdmaq_bytes_out); LASSERTF(atomic64_read(&dev->gnd_rdmaq_bytes_out) >= 0, - "bytes_out negative! %ld\n", atomic64_read(&dev->gnd_rdmaq_bytes_out)); - GNIDBG_TX(D_NETTRACE, tx, "rdma -- %d to "LPD64"", - bytes, atomic64_read(&dev->gnd_rdmaq_bytes_out)); + "bytes_out negative! %lld\n", + (s64)atomic64_read(&dev->gnd_rdmaq_bytes_out)); + GNIDBG_TX(D_NETTRACE, tx, "rdma -- %d to %lld", + bytes, (s64)atomic64_read(&dev->gnd_rdmaq_bytes_out)); } atomic_dec(&dev->gnd_n_mdd); @@ -1042,7 +1006,6 @@ kgnilnd_map_buffer(kgn_tx_t *tx) case GNILND_BUF_IMMEDIATE: case GNILND_BUF_IMMEDIATE_KIOV: case GNILND_BUF_PHYS_MAPPED: - case GNILND_BUF_VIRT_MAPPED: return 0; case GNILND_BUF_PHYS_UNMAPPED: @@ -1055,41 +1018,16 @@ kgnilnd_map_buffer(kgn_tx_t *tx) * - this needs to turn into a non-fatal error soon to allow * GART resource, etc starvation handling */ if (rrc != GNI_RC_SUCCESS) { - GNIDBG_TX(D_NET, tx, "Can't map %d pages: dev %d " - "phys %u pp %u, virt %u nob "LPU64"", + GNIDBG_TX(D_NET, tx, + "Can't map %d pages: dev %d phys %u pp %u", tx->tx_phys_npages, dev->gnd_id, - dev->gnd_map_nphys, dev->gnd_map_physnop, - dev->gnd_map_nvirt, dev->gnd_map_virtnob); + dev->gnd_map_nphys, dev->gnd_map_physnop); RETURN(rrc == GNI_RC_ERROR_RESOURCE ? -ENOMEM : -EINVAL); } tx->tx_buftype = GNILND_BUF_PHYS_MAPPED; kgnilnd_mem_add_map_list(dev, tx); return 0; - - case GNILND_BUF_VIRT_UNMAPPED: - rrc = kgnilnd_mem_register(dev->gnd_handle, - (__u64)tx->tx_buffer, tx->tx_nob, - NULL, flags, &tx->tx_map_key); - if (rrc != GNI_RC_SUCCESS) { - GNIDBG_TX(D_NET, tx, "Can't map %u bytes: dev %d " - "phys %u pp %u, virt %u nob "LPU64"", - tx->tx_nob, dev->gnd_id, - dev->gnd_map_nphys, dev->gnd_map_physnop, - dev->gnd_map_nvirt, dev->gnd_map_virtnob); - RETURN(rrc == GNI_RC_ERROR_RESOURCE ? -ENOMEM : -EINVAL); - } - - tx->tx_buftype = GNILND_BUF_VIRT_MAPPED; - kgnilnd_mem_add_map_list(dev, tx); - if (tx->tx_msg.gnm_type == GNILND_MSG_PUT_ACK || - tx->tx_msg.gnm_type == GNILND_MSG_GET_REQ) { - atomic64_add(tx->tx_nob, &dev->gnd_rdmaq_bytes_out); - GNIDBG_TX(D_NETTRACE, tx, "rdma ++ %d to %ld\n", - tx->tx_nob, atomic64_read(&dev->gnd_rdmaq_bytes_out)); - } - - return 0; } } @@ -1131,8 +1069,8 @@ kgnilnd_unmap_buffer(kgn_tx_t *tx, int error) int hold_timeout = 0; /* code below relies on +1 relationship ... */ - CLASSERT(GNILND_BUF_PHYS_MAPPED == (GNILND_BUF_PHYS_UNMAPPED + 1)); - CLASSERT(GNILND_BUF_VIRT_MAPPED == (GNILND_BUF_VIRT_UNMAPPED + 1)); + BUILD_BUG_ON(GNILND_BUF_PHYS_MAPPED != + (GNILND_BUF_PHYS_UNMAPPED + 1)); switch (tx->tx_buftype) { default: @@ -1141,7 +1079,6 @@ kgnilnd_unmap_buffer(kgn_tx_t *tx, int error) case GNILND_BUF_NONE: case GNILND_BUF_IMMEDIATE: case GNILND_BUF_PHYS_UNMAPPED: - case GNILND_BUF_VIRT_UNMAPPED: break; case GNILND_BUF_IMMEDIATE_KIOV: if (tx->tx_phys != NULL) { @@ -1155,7 +1092,6 @@ kgnilnd_unmap_buffer(kgn_tx_t *tx, int error) break; case GNILND_BUF_PHYS_MAPPED: - case GNILND_BUF_VIRT_MAPPED: LASSERT(tx->tx_conn != NULL); dev = tx->tx_conn->gnc_device; @@ -1174,7 +1110,7 @@ kgnilnd_unmap_buffer(kgn_tx_t *tx, int error) hold_timeout = GNILND_TIMEOUT2DEADMAN; GNIDBG_TX(D_NET, tx, - "dev %p delaying MDD release for %dms key "LPX64"."LPX64"", + "dev %p delaying MDD release for %dms key %#llx.%#llx", tx->tx_conn->gnc_device, hold_timeout, tx->tx_map_key.qword1, tx->tx_map_key.qword2); } @@ -1197,9 +1133,9 @@ kgnilnd_unmap_buffer(kgn_tx_t *tx, int error) void kgnilnd_tx_done(kgn_tx_t *tx, int completion) { - lnet_msg_t *lntmsg0, *lntmsg1; + struct lnet_msg *lntmsg0, *lntmsg1; int status0, status1; - lnet_ni_t *ni = NULL; + struct lnet_ni *ni = NULL; kgn_conn_t *conn = tx->tx_conn; LASSERT(!in_interrupt()); @@ -1264,10 +1200,10 @@ kgnilnd_tx_done(kgn_tx_t *tx, int completion) * could free up lnet credits, resulting in a call chain back into * the LND via kgnilnd_send and friends */ - lnet_finalize(ni, lntmsg0, status0); + lnet_finalize(lntmsg0, status0); if (lntmsg1 != NULL) { - lnet_finalize(ni, lntmsg1, status1); + lnet_finalize(lntmsg1, status1); } } @@ -1342,23 +1278,12 @@ search_again: return 0; } -static inline int -kgnilnd_tx_should_retry(kgn_conn_t *conn, kgn_tx_t *tx) +static inline void +kgnilnd_tx_log_retrans(kgn_conn_t *conn, kgn_tx_t *tx) { - int max_retrans = *kgnilnd_tunables.kgn_max_retransmits; int log_retrans; - int log_retrans_level; - - /* I need kgni credits to send this. Replace tx at the head of the - * fmaq and I'll get rescheduled when credits appear */ - tx->tx_state = 0; - tx->tx_retrans++; - conn->gnc_tx_retrans++; - log_retrans = ((tx->tx_retrans < 25) || ((tx->tx_retrans % 25) == 0) || - (tx->tx_retrans > (max_retrans / 2))); - log_retrans_level = tx->tx_retrans < (max_retrans / 2) ? D_NET : D_NETERROR; - /* Decision time - either error, warn or just retransmit */ + log_retrans = ((tx->tx_retrans < 25) || ((tx->tx_retrans % 25) == 0)); /* we don't care about TX timeout - it could be that the network is slower * or throttled. We'll keep retranmitting - so if the network is so slow @@ -1366,46 +1291,22 @@ kgnilnd_tx_should_retry(kgn_conn_t *conn, kgn_tx_t *tx) * until we exceed the max_retrans _or_ gnc_last_rx expires, indicating * that he hasn't send us any traffic in return */ - if (tx->tx_retrans > max_retrans) { - /* this means we are not backing off the retransmits - * in a healthy manner and are likely chewing up the - * CPU cycles quite badly */ - GNIDBG_TOMSG(D_ERROR, &tx->tx_msg, - "SOFTWARE BUG: too many retransmits (%d) for tx id %x " - "conn 0x%p->%s\n", - tx->tx_retrans, tx->tx_id, conn, - libcfs_nid2str(conn->gnc_peer->gnp_nid)); - - /* yes - double errors to help debug this condition */ - GNIDBG_TOMSG(D_NETERROR, &tx->tx_msg, "connection dead. " - "unable to send to %s for %lu secs (%d tries)", - libcfs_nid2str(tx->tx_conn->gnc_peer->gnp_nid), - cfs_duration_sec(jiffies - tx->tx_cred_wait), - tx->tx_retrans); - - kgnilnd_close_conn(conn, -ETIMEDOUT); - - /* caller should terminate */ - RETURN(0); - } else { - /* some reasonable throttling of the debug message */ - if (log_retrans) { - unsigned long now = jiffies; - /* XXX Nic: Mystical TX debug here... */ - GNIDBG_SMSG_CREDS(log_retrans_level, conn); - GNIDBG_TOMSG(log_retrans_level, &tx->tx_msg, - "NOT_DONE on conn 0x%p->%s id %x retrans %d wait %dus" - " last_msg %uus/%uus last_cq %uus/%uus", - conn, libcfs_nid2str(conn->gnc_peer->gnp_nid), - tx->tx_id, tx->tx_retrans, - jiffies_to_usecs(now - tx->tx_cred_wait), - jiffies_to_usecs(now - conn->gnc_last_tx), - jiffies_to_usecs(now - conn->gnc_last_rx), - jiffies_to_usecs(now - conn->gnc_last_tx_cq), - jiffies_to_usecs(now - conn->gnc_last_rx_cq)); - } - /* caller should retry */ - RETURN(1); + /* some reasonable throttling of the debug message */ + if (log_retrans) { + unsigned long now = jiffies; + /* XXX Nic: Mystical TX debug here... */ + /* We expect retransmissions so only log when D_NET is enabled */ + GNIDBG_SMSG_CREDS(D_NET, conn); + GNIDBG_TOMSG(D_NET, &tx->tx_msg, + "NOT_DONE on conn 0x%p->%s id %x retrans %d wait %dus" + " last_msg %uus/%uus last_cq %uus/%uus", + conn, libcfs_nid2str(conn->gnc_peer->gnp_nid), + tx->tx_id, tx->tx_retrans, + jiffies_to_usecs(now - tx->tx_cred_wait), + jiffies_to_usecs(now - conn->gnc_last_tx), + jiffies_to_usecs(now - conn->gnc_last_rx), + jiffies_to_usecs(now - conn->gnc_last_tx_cq), + jiffies_to_usecs(now - conn->gnc_last_rx_cq)); } } @@ -1418,7 +1319,6 @@ kgnilnd_sendmsg_nolock(kgn_tx_t *tx, void *immediate, unsigned int immediatenob, { kgn_conn_t *conn = tx->tx_conn; kgn_msg_t *msg = &tx->tx_msg; - int retry_send; gni_return_t rrc; unsigned long newest_last_rx, timeout; unsigned long now; @@ -1528,9 +1428,11 @@ kgnilnd_sendmsg_nolock(kgn_tx_t *tx, void *immediate, unsigned int immediatenob, return 0; case GNI_RC_NOT_DONE: - /* XXX Nic: We need to figure out how to track this - * - there are bound to be good reasons for it, - * but we want to know when it happens */ + /* Jshimek: We can get GNI_RC_NOT_DONE for 3 reasons currently + * 1: out of mbox credits + * 2: out of mbox payload credits + * 3: On Aries out of dla credits + */ kgnilnd_conn_mutex_unlock(&conn->gnc_smsg_mutex); kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex); /* We'll handle this error inline - makes the calling logic much more @@ -1541,31 +1443,36 @@ kgnilnd_sendmsg_nolock(kgn_tx_t *tx, void *immediate, unsigned int immediatenob, return -EAGAIN; } - retry_send = kgnilnd_tx_should_retry(conn, tx); - if (retry_send) { - /* add to head of list for the state and retries */ - spin_lock(state_lock); - kgnilnd_tx_add_state_locked(tx, conn->gnc_peer, conn, state, 0); - spin_unlock(state_lock); - - /* We only reschedule for a certain number of retries, then - * we will wait for the CQ events indicating a release of SMSG - * credits */ - if (tx->tx_retrans < (*kgnilnd_tunables.kgn_max_retransmits/4)) { - kgnilnd_schedule_conn(conn); - return 0; - } else { - /* CQ event coming in signifies either TX completed or - * RX receive. Either of these *could* free up credits - * in the SMSG mbox and we should try sending again */ - GNIDBG_TX(D_NET, tx, "waiting for CQID %u event to resend", - tx->tx_conn->gnc_cqid); - /* use +ve return code to let upper layers know they - * should stop looping on sends */ - return EAGAIN; - } + /* I need kgni credits to send this. Replace tx at the head of the + * fmaq and I'll get rescheduled when credits appear. Reset the tx_state + * and bump retrans counts since we are requeueing the tx. + */ + tx->tx_state = 0; + tx->tx_retrans++; + conn->gnc_tx_retrans++; + + kgnilnd_tx_log_retrans(conn, tx); + /* add to head of list for the state and retries */ + spin_lock(state_lock); + kgnilnd_tx_add_state_locked(tx, conn->gnc_peer, conn, state, 0); + spin_unlock(state_lock); + + /* We only reschedule for a certain number of retries, then + * we will wait for the CQ events indicating a release of SMSG + * credits */ + if (tx->tx_retrans < *kgnilnd_tunables.kgn_max_retransmits) { + kgnilnd_schedule_conn(conn); + return 0; } else { - return -EAGAIN; + /* CQ event coming in signifies either TX completed or + * RX receive. Either of these *could* free up credits + * in the SMSG mbox and we should try sending again */ + GNIDBG_TX(D_NET, tx, "waiting for CQID %u event to resend", + tx->tx_conn->gnc_cqid); + kgnilnd_schedule_delay_conn(conn); + /* use +ve return code to let upper layers know they + * should stop looping on sends */ + return EAGAIN; } default: /* handle bad retcode gracefully */ @@ -1655,7 +1562,7 @@ kgnilnd_sendmsg_trylock(kgn_tx_t *tx, void *immediate, unsigned int immediatenob } /* lets us know if we can push this RDMA through now */ -inline int +static int kgnilnd_auth_rdma_bytes(kgn_device_t *dev, kgn_tx_t *tx) { long bytes_left; @@ -1754,11 +1661,12 @@ kgnilnd_queue_tx(kgn_conn_t *conn, kgn_tx_t *tx) /* it was sent, break out of switch to avoid default case of queueing */ break; } - /* needs to queue to try again, so fall through to default case */ + /* needs to queue to try again, so... */ + fallthrough; case GNILND_MSG_NOOP: /* Just make sure this goes out first for this conn */ add_tail = 0; - /* fall through... */ + fallthrough; default: spin_lock(&conn->gnc_list_lock); kgnilnd_tx_add_state_locked(tx, conn->gnc_peer, conn, GNILND_TX_FMAQ, add_tail); @@ -1769,7 +1677,7 @@ kgnilnd_queue_tx(kgn_conn_t *conn, kgn_tx_t *tx) } void -kgnilnd_launch_tx(kgn_tx_t *tx, kgn_net_t *net, lnet_process_id_t *target) +kgnilnd_launch_tx(kgn_tx_t *tx, kgn_net_t *net, struct lnet_processid *target) { kgn_peer_t *peer; kgn_peer_t *new_peer = NULL; @@ -1792,7 +1700,7 @@ kgnilnd_launch_tx(kgn_tx_t *tx, kgn_net_t *net, lnet_process_id_t *target) /* I expect to find him, so only take a read lock */ read_lock(&kgnilnd_data.kgn_peer_conn_lock); - peer = kgnilnd_find_peer_locked(target->nid); + peer = kgnilnd_find_peer_locked(lnet_nid_to_nid4(&target->nid)); if (peer != NULL) { conn = kgnilnd_find_conn_locked(peer); /* this could be NULL during quiesce */ @@ -1804,7 +1712,7 @@ kgnilnd_launch_tx(kgn_tx_t *tx, kgn_net_t *net, lnet_process_id_t *target) } /* don't create a connection if the peer is marked down */ - if (peer->gnp_down == GNILND_RCA_NODE_DOWN) { + if (peer->gnp_state != GNILND_PEER_UP) { read_unlock(&kgnilnd_data.kgn_peer_conn_lock); rc = -ENETRESET; GOTO(no_peer, rc); @@ -1816,7 +1724,7 @@ kgnilnd_launch_tx(kgn_tx_t *tx, kgn_net_t *net, lnet_process_id_t *target) CFS_RACE(CFS_FAIL_GNI_FIND_TARGET); - node_state = kgnilnd_get_node_state(LNET_NIDADDR(target->nid)); + node_state = kgnilnd_get_node_state(ntohl(target->nid.nid_addr[0])); /* NB - this will not block during normal operations - * the only writer of this is in the startup/shutdown path. */ @@ -1829,7 +1737,8 @@ kgnilnd_launch_tx(kgn_tx_t *tx, kgn_net_t *net, lnet_process_id_t *target) /* ignore previous peer entirely - we cycled the lock, so we * will create new peer and at worst drop it if peer is still * in the tables */ - rc = kgnilnd_create_peer_safe(&new_peer, target->nid, net, node_state); + rc = kgnilnd_create_peer_safe(&new_peer, lnet_nid_to_nid4(&target->nid), + net, node_state); if (rc != 0) { up_read(&kgnilnd_data.kgn_net_rw_sem); GOTO(no_peer, rc); @@ -1840,10 +1749,11 @@ kgnilnd_launch_tx(kgn_tx_t *tx, kgn_net_t *net, lnet_process_id_t *target) /* search for peer again now that we have the lock * if we don't find it, add our new one to the list */ - kgnilnd_add_peer_locked(target->nid, new_peer, &peer); + kgnilnd_add_peer_locked(lnet_nid_to_nid4(&target->nid), new_peer, + &peer); /* don't create a connection if the peer is not up */ - if (peer->gnp_down != GNILND_RCA_NODE_UP) { + if (peer->gnp_state != GNILND_PEER_UP) { write_unlock(&kgnilnd_data.kgn_peer_conn_lock); rc = -ENETRESET; GOTO(no_peer, rc); @@ -1923,11 +1833,11 @@ kgnilnd_rdma(kgn_tx_t *tx, int type, tx->tx_offset = ((__u64)((unsigned long)sink->gnrd_addr)) & 3; if (tx->tx_offset) - kgnilnd_admin_addref(kgnilnd_data.kgn_rev_offset); + atomic_inc(&kgnilnd_data.kgn_rev_offset); if ((nob + tx->tx_offset) & 3) { desc_nob = ((nob + tx->tx_offset) + (4 - ((nob + tx->tx_offset) & 3))); - kgnilnd_admin_addref(kgnilnd_data.kgn_rev_length); + atomic_inc(&kgnilnd_data.kgn_rev_length); } else { desc_nob = (nob + tx->tx_offset); } @@ -1935,7 +1845,7 @@ kgnilnd_rdma(kgn_tx_t *tx, int type, if (tx->tx_buffer_copy == NULL) { /* Allocate the largest copy buffer we will need, this will prevent us from overwriting data * and require at most we allocate a few extra bytes. */ - tx->tx_buffer_copy = vmalloc(desc_nob); + tx->tx_buffer_copy = kgnilnd_vzalloc(desc_nob); if (!tx->tx_buffer_copy) { /* allocation of buffer failed nak the rdma */ @@ -1943,11 +1853,12 @@ kgnilnd_rdma(kgn_tx_t *tx, int type, kgnilnd_tx_done(tx, -EFAULT); return 0; } - kgnilnd_admin_addref(kgnilnd_data.kgn_rev_copy_buff); + atomic_inc(&kgnilnd_data.kgn_rev_copy_buff); rc = kgnilnd_mem_register(conn->gnc_device->gnd_handle, (__u64)tx->tx_buffer_copy, desc_nob, NULL, GNI_MEM_READWRITE, &tx->tx_buffer_copy_map_key); if (rc != GNI_RC_SUCCESS) { /* Registration Failed nak rdma and kill the tx. */ - vfree(tx->tx_buffer_copy); + kgnilnd_vfree(tx->tx_buffer_copy, + desc_nob); tx->tx_buffer_copy = NULL; kgnilnd_nak_rdma(tx->tx_conn, tx->tx_msg.gnm_type, -EFAULT, cookie, tx->tx_msg.gnm_srcnid); kgnilnd_tx_done(tx, -EFAULT); @@ -1969,8 +1880,10 @@ kgnilnd_rdma(kgn_tx_t *tx, int type, tx->tx_rdma_desc.remote_mem_hndl = sink->gnrd_key; tx->tx_rdma_desc.length = desc_nob; tx->tx_nob_rdma = nob; - if (*kgnilnd_tunables.kgn_bte_dlvr_mode) - tx->tx_rdma_desc.dlvr_mode = *kgnilnd_tunables.kgn_bte_dlvr_mode; + if (post_type == GNI_POST_RDMA_PUT && *kgnilnd_tunables.kgn_bte_put_dlvr_mode) + tx->tx_rdma_desc.dlvr_mode = *kgnilnd_tunables.kgn_bte_put_dlvr_mode; + if (post_type == GNI_POST_RDMA_GET && *kgnilnd_tunables.kgn_bte_get_dlvr_mode) + tx->tx_rdma_desc.dlvr_mode = *kgnilnd_tunables.kgn_bte_get_dlvr_mode; /* prep final completion message */ kgnilnd_init_msg(&tx->tx_msg, type, tx->tx_msg.gnm_srcnid); tx->tx_msg.gnm_u.completion.gncm_cookie = cookie; @@ -1989,7 +1902,7 @@ kgnilnd_rdma(kgn_tx_t *tx, int type, tx, conn, conn->gnc_close_sent); GNIDBG_TX(D_NET, tx, "Post RDMA type 0x%02x conn %p dlvr_mode " - "0x%x cookie:"LPX64, + "0x%x cookie:%#llx", type, conn, tx->tx_rdma_desc.dlvr_mode, cookie); /* set CQ dedicated for RDMA */ @@ -2011,7 +1924,7 @@ kgnilnd_rdma(kgn_tx_t *tx, int type, kgnilnd_unmap_buffer(tx, 0); if (tx->tx_buffer_copy != NULL) { - vfree(tx->tx_buffer_copy); + kgnilnd_vfree(tx->tx_buffer_copy, desc_nob); tx->tx_buffer_copy = NULL; } @@ -2076,7 +1989,7 @@ kgnilnd_release_msg(kgn_conn_t *conn) LASSERTF(rrc == GNI_RC_SUCCESS, "bad rrc %d\n", rrc); GNIDBG_SMSG_CREDS(D_NET, conn); - return; + kgnilnd_schedule_conn(conn); } void @@ -2100,47 +2013,39 @@ kgnilnd_consume_rx(kgn_rx_t *rx) kmem_cache_free(kgnilnd_data.kgn_rx_cache, rx); CDEBUG(D_MALLOC, "slab-freed 'rx': %lu at %p.\n", sizeof(*rx), rx); - - return; } int -kgnilnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) +kgnilnd_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg) { - lnet_hdr_t *hdr = &lntmsg->msg_hdr; + struct lnet_hdr *hdr = &lntmsg->msg_hdr; int type = lntmsg->msg_type; - lnet_process_id_t target = lntmsg->msg_target; + struct lnet_processid *target = &lntmsg->msg_target; int target_is_router = lntmsg->msg_target_is_router; int routing = lntmsg->msg_routing; unsigned int niov = lntmsg->msg_niov; - struct kvec *iov = lntmsg->msg_iov; - lnet_kiov_t *kiov = lntmsg->msg_kiov; + struct bio_vec *kiov = lntmsg->msg_kiov; unsigned int offset = lntmsg->msg_offset; unsigned int nob = lntmsg->msg_len; unsigned int msg_vmflush = lntmsg->msg_vmflush; kgn_net_t *net = ni->ni_data; kgn_tx_t *tx; int rc = 0; - int mpflag = 0; + /* '1' for consistency with code that checks !mpflag to restore */ + unsigned int mpflag = 1; int reverse_rdma_flag = *kgnilnd_tunables.kgn_reverse_rdma; /* NB 'private' is different depending on what we're sending.... */ LASSERT(!in_interrupt()); CDEBUG(D_NET, "sending msg type %d with %d bytes in %d frags to %s\n", - type, nob, niov, libcfs_id2str(target)); + type, nob, niov, libcfs_idstr(target)); LASSERTF(nob == 0 || niov > 0, "lntmsg %p nob %d niov %d\n", lntmsg, nob, niov); - LASSERTF(niov <= LNET_MAX_IOV, - "lntmsg %p niov %d\n", lntmsg, niov); - - /* payload is either all vaddrs or all pages */ - LASSERTF(!(kiov != NULL && iov != NULL), - "lntmsg %p kiov %p iov %p\n", lntmsg, kiov, iov); if (msg_vmflush) - mpflag = cfs_memory_pressure_get_and_set(); + mpflag = memalloc_noreclaim_save(); switch (type) { default: @@ -2170,24 +2075,19 @@ kgnilnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) break; if ((reverse_rdma_flag & GNILND_REVERSE_GET) == 0) - tx = kgnilnd_new_tx_msg(GNILND_MSG_GET_REQ, ni->ni_nid); + tx = kgnilnd_new_tx_msg(GNILND_MSG_GET_REQ, + lnet_nid_to_nid4(&ni->ni_nid)); else - tx = kgnilnd_new_tx_msg(GNILND_MSG_GET_REQ_REV, ni->ni_nid); + tx = kgnilnd_new_tx_msg(GNILND_MSG_GET_REQ_REV, + lnet_nid_to_nid4(&ni->ni_nid)); if (tx == NULL) { rc = -ENOMEM; goto out; } - /* slightly different options as we might actually have a GET with a - * MD_KIOV set but a non-NULL md_iov.iov */ - if ((lntmsg->msg_md->md_options & LNET_MD_KIOV) == 0) - rc = kgnilnd_setup_rdma_buffer(tx, lntmsg->msg_md->md_niov, - lntmsg->msg_md->md_iov.iov, NULL, - 0, lntmsg->msg_md->md_length); - else - rc = kgnilnd_setup_rdma_buffer(tx, lntmsg->msg_md->md_niov, - NULL, lntmsg->msg_md->md_iov.kiov, - 0, lntmsg->msg_md->md_length); + rc = kgnilnd_setup_rdma_buffer(tx, lntmsg->msg_md->md_niov, + lntmsg->msg_md->md_kiov, + 0, lntmsg->msg_md->md_length); if (rc != 0) { CERROR("unable to setup buffer: %d\n", rc); kgnilnd_tx_done(tx, rc); @@ -2198,7 +2098,7 @@ kgnilnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) tx->tx_lntmsg[1] = lnet_create_reply_msg(ni, lntmsg); if (tx->tx_lntmsg[1] == NULL) { CERROR("Can't create reply for GET to %s\n", - libcfs_nid2str(target.nid)); + libcfs_nidstr(&target->nid)); kgnilnd_tx_done(tx, rc); rc = -EIO; goto out; @@ -2206,12 +2106,13 @@ kgnilnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) tx->tx_lntmsg[0] = lntmsg; if ((reverse_rdma_flag & GNILND_REVERSE_GET) == 0) - tx->tx_msg.gnm_u.get.gngm_hdr = *hdr; + lnet_hdr_to_nid4(hdr, &tx->tx_msg.gnm_u.get.gngm_hdr); else - tx->tx_msg.gnm_u.putreq.gnprm_hdr = *hdr; + lnet_hdr_to_nid4(hdr, + &tx->tx_msg.gnm_u.putreq.gnprm_hdr); /* rest of tx_msg is setup just before it is sent */ - kgnilnd_launch_tx(tx, net, &target); + kgnilnd_launch_tx(tx, net, target); goto out; case LNET_MSG_REPLY: case LNET_MSG_PUT: @@ -2221,16 +2122,19 @@ kgnilnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) break; if ((reverse_rdma_flag & GNILND_REVERSE_PUT) == 0) - tx = kgnilnd_new_tx_msg(GNILND_MSG_PUT_REQ, ni->ni_nid); + tx = kgnilnd_new_tx_msg(GNILND_MSG_PUT_REQ, + lnet_nid_to_nid4(&ni->ni_nid)); else - tx = kgnilnd_new_tx_msg(GNILND_MSG_PUT_REQ_REV, ni->ni_nid); + tx = kgnilnd_new_tx_msg(GNILND_MSG_PUT_REQ_REV, + lnet_nid_to_nid4(&ni->ni_nid)); if (tx == NULL) { rc = -ENOMEM; goto out; } - rc = kgnilnd_setup_rdma_buffer(tx, niov, iov, kiov, offset, nob); + rc = kgnilnd_setup_rdma_buffer(tx, niov, + kiov, offset, nob); if (rc != 0) { kgnilnd_tx_done(tx, rc); rc = -EIO; @@ -2239,12 +2143,13 @@ kgnilnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) tx->tx_lntmsg[0] = lntmsg; if ((reverse_rdma_flag & GNILND_REVERSE_PUT) == 0) - tx->tx_msg.gnm_u.putreq.gnprm_hdr = *hdr; + lnet_hdr_to_nid4(hdr, + &tx->tx_msg.gnm_u.putreq.gnprm_hdr); else - tx->tx_msg.gnm_u.get.gngm_hdr = *hdr; + lnet_hdr_to_nid4(hdr, &tx->tx_msg.gnm_u.get.gngm_hdr); /* rest of tx_msg is setup just before it is sent */ - kgnilnd_launch_tx(tx, net, &target); + kgnilnd_launch_tx(tx, net, target); goto out; } @@ -2253,37 +2158,37 @@ kgnilnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) LASSERTF(nob <= *kgnilnd_tunables.kgn_max_immediate, "lntmsg 0x%p too large %d\n", lntmsg, nob); - tx = kgnilnd_new_tx_msg(GNILND_MSG_IMMEDIATE, ni->ni_nid); + tx = kgnilnd_new_tx_msg(GNILND_MSG_IMMEDIATE, + lnet_nid_to_nid4(&ni->ni_nid)); if (tx == NULL) { rc = -ENOMEM; goto out; } - rc = kgnilnd_setup_immediate_buffer(tx, niov, iov, kiov, offset, nob); + rc = kgnilnd_setup_immediate_buffer(tx, niov, kiov, offset, nob); if (rc != 0) { kgnilnd_tx_done(tx, rc); goto out; } - tx->tx_msg.gnm_u.immediate.gnim_hdr = *hdr; + lnet_hdr_to_nid4(hdr, &tx->tx_msg.gnm_u.immediate.gnim_hdr); tx->tx_lntmsg[0] = lntmsg; - kgnilnd_launch_tx(tx, net, &target); + kgnilnd_launch_tx(tx, net, target); out: /* use stored value as we could have already finalized lntmsg here from a failed launch */ if (msg_vmflush) - cfs_memory_pressure_restore(mpflag); + memalloc_noreclaim_restore(mpflag); return rc; } void -kgnilnd_setup_rdma(lnet_ni_t *ni, kgn_rx_t *rx, lnet_msg_t *lntmsg, int mlen) +kgnilnd_setup_rdma(struct lnet_ni *ni, kgn_rx_t *rx, struct lnet_msg *lntmsg, int mlen) { kgn_conn_t *conn = rx->grx_conn; kgn_msg_t *rxmsg = rx->grx_msg; unsigned int niov = lntmsg->msg_niov; - struct kvec *iov = lntmsg->msg_iov; - lnet_kiov_t *kiov = lntmsg->msg_kiov; + struct bio_vec *kiov = lntmsg->msg_kiov; unsigned int offset = lntmsg->msg_offset; unsigned int nob = lntmsg->msg_len; int done_type; @@ -2305,7 +2210,7 @@ kgnilnd_setup_rdma(lnet_ni_t *ni, kgn_rx_t *rx, lnet_msg_t *lntmsg, int mlen) LBUG(); } - tx = kgnilnd_new_tx_msg(done_type, ni->ni_nid); + tx = kgnilnd_new_tx_msg(done_type, lnet_nid_to_nid4(&ni->ni_nid)); if (tx == NULL) goto failed_0; @@ -2313,7 +2218,7 @@ kgnilnd_setup_rdma(lnet_ni_t *ni, kgn_rx_t *rx, lnet_msg_t *lntmsg, int mlen) if (rc != 0) goto failed_1; - rc = kgnilnd_setup_rdma_buffer(tx, niov, iov, kiov, offset, nob); + rc = kgnilnd_setup_rdma_buffer(tx, niov, kiov, offset, nob); if (rc != 0) goto failed_1; @@ -2332,13 +2237,14 @@ kgnilnd_setup_rdma(lnet_ni_t *ni, kgn_rx_t *rx, lnet_msg_t *lntmsg, int mlen) failed_1: kgnilnd_tx_done(tx, rc); - kgnilnd_nak_rdma(conn, done_type, rc, rxmsg->gnm_u.get.gngm_cookie, ni->ni_nid); + kgnilnd_nak_rdma(conn, done_type, rc, rxmsg->gnm_u.get.gngm_cookie, + lnet_nid_to_nid4(&ni->ni_nid)); failed_0: - lnet_finalize(ni, lntmsg, rc); + lnet_finalize(lntmsg, rc); } int -kgnilnd_eager_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, +kgnilnd_eager_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg, void **new_private) { kgn_rx_t *rx = private; @@ -2376,8 +2282,8 @@ kgnilnd_eager_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, CERROR("Couldnt find matching peer %p or conn %p / %p\n", peer, conn, found_conn); if (found_conn) { - CERROR("Unexpected connstamp "LPX64"("LPX64" expected)" - " from %s", rxmsg->gnm_connstamp, + CERROR("Unexpected connstamp %#llx(%#llx expected) from %s\n", + rxmsg->gnm_connstamp, found_conn->gnc_peer_connstamp, libcfs_nid2str(peer->gnp_nid)); } @@ -2429,9 +2335,9 @@ kgnilnd_eager_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, } int -kgnilnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, +kgnilnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg, int delayed, unsigned int niov, - struct kvec *iov, lnet_kiov_t *kiov, + struct bio_vec *kiov, unsigned int offset, unsigned int mlen, unsigned int rlen) { kgn_rx_t *rx = private; @@ -2444,14 +2350,11 @@ kgnilnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, LASSERT(!in_interrupt()); LASSERTF(mlen <= rlen, "%d <= %d\n", mlen, rlen); - /* Either all pages or all vaddrs */ - LASSERTF(!(kiov != NULL && iov != NULL), "kiov %p iov %p\n", - kiov, iov); GNIDBG_MSG(D_NET, rxmsg, "conn %p, rxmsg %p, lntmsg %p" - " niov=%d kiov=%p iov=%p offset=%d mlen=%d rlen=%d", + " niov=%d kiov=%p offset=%d mlen=%d rlen=%d", conn, rxmsg, lntmsg, - niov, kiov, iov, offset, mlen, rlen); + niov, kiov, offset, mlen, rlen); /* we need to lock here as recv can be called from any context */ read_lock(&kgnilnd_data.kgn_peer_conn_lock); @@ -2460,7 +2363,7 @@ kgnilnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, /* someone closed the conn after we copied this out, nuke it */ kgnilnd_consume_rx(rx); - lnet_finalize(ni, lntmsg, conn->gnc_error); + lnet_finalize(lntmsg, conn->gnc_error); RETURN(0); } read_unlock(&kgnilnd_data.kgn_peer_conn_lock); @@ -2468,8 +2371,8 @@ kgnilnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, switch (rxmsg->gnm_type) { default: GNIDBG_MSG(D_NETERROR, rxmsg, "conn %p, rx %p, rxmsg %p, lntmsg %p" - " niov=%d kiov=%p iov=%p offset=%d mlen=%d rlen=%d", - conn, rx, rxmsg, lntmsg, niov, kiov, iov, offset, mlen, rlen); + " niov=%d kiov=%p offset=%d mlen=%d rlen=%d", + conn, rx, rxmsg, lntmsg, niov, kiov, offset, mlen, rlen); LBUG(); case GNILND_MSG_IMMEDIATE: @@ -2507,7 +2410,7 @@ kgnilnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, case 2: kgnilnd_dump_blob(D_BUFFS, "bad payload checksum", &rxmsg[1], rxmsg->gnm_payload_len); - /* fall through to dump */ + fallthrough; case 1: libcfs_debug_dumplog(); break; @@ -2522,37 +2425,32 @@ kgnilnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, } } - if (kiov != NULL) - lnet_copy_flat2kiov( - niov, kiov, offset, - *kgnilnd_tunables.kgn_max_immediate, - &rxmsg[1], 0, mlen); - else - lnet_copy_flat2iov( - niov, iov, offset, - *kgnilnd_tunables.kgn_max_immediate, - &rxmsg[1], 0, mlen); + lnet_copy_flat2kiov( + niov, kiov, offset, + *kgnilnd_tunables.kgn_max_immediate, + &rxmsg[1], 0, mlen); kgnilnd_consume_rx(rx); - lnet_finalize(ni, lntmsg, 0); + lnet_finalize(lntmsg, 0); RETURN(0); case GNILND_MSG_PUT_REQ: /* LNET wants to truncate or drop transaction, sending NAK */ if (mlen == 0) { kgnilnd_consume_rx(rx); - lnet_finalize(ni, lntmsg, 0); + lnet_finalize(lntmsg, 0); /* only error if lntmsg == NULL, otherwise we are just * short circuiting the rdma process of 0 bytes */ kgnilnd_nak_rdma(conn, rxmsg->gnm_type, - lntmsg == NULL ? -ENOENT : 0, - rxmsg->gnm_u.get.gngm_cookie, - ni->ni_nid); + lntmsg == NULL ? -ENOENT : 0, + rxmsg->gnm_u.get.gngm_cookie, + lnet_nid_to_nid4(&ni->ni_nid)); RETURN(0); } /* sending ACK with sink buff. info */ - tx = kgnilnd_new_tx_msg(GNILND_MSG_PUT_ACK, ni->ni_nid); + tx = kgnilnd_new_tx_msg(GNILND_MSG_PUT_ACK, + lnet_nid_to_nid4(&ni->ni_nid)); if (tx == NULL) { kgnilnd_consume_rx(rx); RETURN(-ENOMEM); @@ -2563,7 +2461,8 @@ kgnilnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, GOTO(nak_put_req, rc); } - rc = kgnilnd_setup_rdma_buffer(tx, niov, iov, kiov, offset, mlen); + rc = kgnilnd_setup_rdma_buffer(tx, niov, + kiov, offset, mlen); if (rc != 0) { GOTO(nak_put_req, rc); } @@ -2590,7 +2489,9 @@ kgnilnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, nak_put_req: /* make sure we send an error back when the PUT fails */ - kgnilnd_nak_rdma(conn, rxmsg->gnm_type, rc, rxmsg->gnm_u.get.gngm_cookie, ni->ni_nid); + kgnilnd_nak_rdma(conn, rxmsg->gnm_type, rc, + rxmsg->gnm_u.get.gngm_cookie, + lnet_nid_to_nid4(&ni->ni_nid)); kgnilnd_tx_done(tx, rc); kgnilnd_consume_rx(rx); @@ -2600,20 +2501,21 @@ nak_put_req: /* LNET wants to truncate or drop transaction, sending NAK */ if (mlen == 0) { kgnilnd_consume_rx(rx); - lnet_finalize(ni, lntmsg, 0); + lnet_finalize(lntmsg, 0); /* only error if lntmsg == NULL, otherwise we are just * short circuiting the rdma process of 0 bytes */ kgnilnd_nak_rdma(conn, rxmsg->gnm_type, - lntmsg == NULL ? -ENOENT : 0, - rxmsg->gnm_u.get.gngm_cookie, - ni->ni_nid); + lntmsg == NULL ? -ENOENT : 0, + rxmsg->gnm_u.get.gngm_cookie, + lnet_nid_to_nid4(&ni->ni_nid)); RETURN(0); } /* lntmsg can be null when parsing a LNET_GET */ if (lntmsg != NULL) { /* sending ACK with sink buff. info */ - tx = kgnilnd_new_tx_msg(GNILND_MSG_GET_ACK_REV, ni->ni_nid); + tx = kgnilnd_new_tx_msg(GNILND_MSG_GET_ACK_REV, + lnet_nid_to_nid4(&ni->ni_nid)); if (tx == NULL) { kgnilnd_consume_rx(rx); RETURN(-ENOMEM); @@ -2623,12 +2525,11 @@ nak_put_req: if (rc != 0) GOTO(nak_get_req_rev, rc); - - rc = kgnilnd_setup_rdma_buffer(tx, niov, iov, kiov, offset, mlen); + rc = kgnilnd_setup_rdma_buffer(tx, niov, + kiov, offset, mlen); if (rc != 0) GOTO(nak_get_req_rev, rc); - tx->tx_msg.gnm_u.putack.gnpam_src_cookie = rxmsg->gnm_u.putreq.gnprm_cookie; tx->tx_msg.gnm_u.putack.gnpam_dst_cookie = tx->tx_id.txe_cookie; @@ -2648,9 +2549,9 @@ nak_put_req: } else { /* No match */ kgnilnd_nak_rdma(conn, rxmsg->gnm_type, - -ENOENT, - rxmsg->gnm_u.get.gngm_cookie, - ni->ni_nid); + -ENOENT, + rxmsg->gnm_u.get.gngm_cookie, + lnet_nid_to_nid4(&ni->ni_nid)); } kgnilnd_consume_rx(rx); @@ -2658,7 +2559,9 @@ nak_put_req: nak_get_req_rev: /* make sure we send an error back when the GET fails */ - kgnilnd_nak_rdma(conn, rxmsg->gnm_type, rc, rxmsg->gnm_u.get.gngm_cookie, ni->ni_nid); + kgnilnd_nak_rdma(conn, rxmsg->gnm_type, rc, + rxmsg->gnm_u.get.gngm_cookie, + lnet_nid_to_nid4(&ni->ni_nid)); kgnilnd_tx_done(tx, rc); kgnilnd_consume_rx(rx); @@ -2670,14 +2573,14 @@ nak_get_req_rev: /* LNET wants to truncate or drop transaction, sending NAK */ if (mlen == 0) { kgnilnd_consume_rx(rx); - lnet_finalize(ni, lntmsg, 0); + lnet_finalize(lntmsg, 0); /* only error if lntmsg == NULL, otherwise we are just * short circuiting the rdma process of 0 bytes */ kgnilnd_nak_rdma(conn, rxmsg->gnm_type, - lntmsg == NULL ? -ENOENT : 0, - rxmsg->gnm_u.get.gngm_cookie, - ni->ni_nid); + lntmsg == NULL ? -ENOENT : 0, + rxmsg->gnm_u.get.gngm_cookie, + lnet_nid_to_nid4(&ni->ni_nid)); RETURN(0); } @@ -2687,9 +2590,9 @@ nak_get_req_rev: } else { /* No match */ kgnilnd_nak_rdma(conn, rxmsg->gnm_type, - -ENOENT, - rxmsg->gnm_u.get.gngm_cookie, - ni->ni_nid); + -ENOENT, + rxmsg->gnm_u.get.gngm_cookie, + lnet_nid_to_nid4(&ni->ni_nid)); } kgnilnd_consume_rx(rx); RETURN(0); @@ -2700,9 +2603,9 @@ nak_get_req_rev: } else { /* No match */ kgnilnd_nak_rdma(conn, rxmsg->gnm_type, - -ENOENT, - rxmsg->gnm_u.get.gngm_cookie, - ni->ni_nid); + -ENOENT, + rxmsg->gnm_u.get.gngm_cookie, + lnet_nid_to_nid4(&ni->ni_nid)); } kgnilnd_consume_rx(rx); RETURN(0); @@ -2745,7 +2648,7 @@ kgnilnd_check_conn_timeouts_locked(kgn_conn_t *conn) if (time_after_eq(now, newest_last_rx + timeout)) { uint32_t level = D_CONSOLE|D_NETERROR; - if (conn->gnc_peer->gnp_down == GNILND_RCA_NODE_DOWN) { + if (conn->gnc_peer->gnp_state == GNILND_PEER_DOWN) { level = D_NET; } GNIDBG_CONN(level, conn, @@ -2778,7 +2681,8 @@ kgnilnd_check_conn_timeouts_locked(kgn_conn_t *conn) if (CFS_FAIL_CHECK(CFS_FAIL_GNI_NOOP_SEND)) return 0; - tx = kgnilnd_new_tx_msg(GNILND_MSG_NOOP, conn->gnc_peer->gnp_net->gnn_ni->ni_nid); + tx = kgnilnd_new_tx_msg(GNILND_MSG_NOOP, + lnet_nid_to_nid4(&conn->gnc_peer->gnp_net->gnn_ni->ni_nid)); if (tx == NULL) return 0; kgnilnd_queue_tx(conn, tx); @@ -2807,7 +2711,7 @@ kgnilnd_check_peer_timeouts_locked(kgn_peer_t *peer, struct list_head *todie, peer, libcfs_nid2str(peer->gnp_nid), peer->gnp_reconnect_interval); - timeout = cfs_time_seconds(MAX(*kgnilnd_tunables.kgn_timeout, + timeout = cfs_time_seconds(max(*kgnilnd_tunables.kgn_timeout, GNILND_MIN_TIMEOUT)); conn = kgnilnd_find_conn_locked(peer); @@ -2821,6 +2725,14 @@ kgnilnd_check_peer_timeouts_locked(kgn_peer_t *peer, struct list_head *todie, conn->gnc_close_recvd = GNILND_CLOSE_INJECT1; conn->gnc_peer_error = -ETIMEDOUT; } + + if (*kgnilnd_tunables.kgn_to_reconn_disable && + rc == -ETIMEDOUT) { + peer->gnp_state = GNILND_PEER_TIMED_OUT; + CDEBUG(D_WARNING, "%s conn timed out, will " + "reconnect upon request from peer\n", + libcfs_nid2str(conn->gnc_peer->gnp_nid)); + } /* Once we mark closed, any of the scheduler threads could * get it and move through before we hit the fail loc code */ kgnilnd_close_conn_locked(conn, rc); @@ -2864,7 +2776,7 @@ kgnilnd_check_peer_timeouts_locked(kgn_peer_t *peer, struct list_head *todie, /* Don't reconnect if we are still trying to clear out old conns. * This prevents us sending traffic on the new mbox before ensuring we are done * with the old one */ - reconnect = (peer->gnp_down == GNILND_RCA_NODE_UP) && + reconnect = (peer->gnp_state == GNILND_PEER_UP) && (atomic_read(&peer->gnp_dirty_eps) == 0); /* fast reconnect after a timeout */ @@ -2883,8 +2795,9 @@ kgnilnd_check_peer_timeouts_locked(kgn_peer_t *peer, struct list_head *todie, CDEBUG(D_NET, "starting connect to %s\n", libcfs_nid2str(peer->gnp_nid)); - LASSERTF(peer->gnp_connecting == GNILND_PEER_IDLE, "Peer was idle and we" - "have a write_lock, state issue %d\n", peer->gnp_connecting); + LASSERTF(peer->gnp_connecting == GNILND_PEER_IDLE, + "Peer was idle and we have a write_lock, state issue %d\n", + peer->gnp_connecting); peer->gnp_connecting = GNILND_PEER_CONNECT; kgnilnd_peer_addref(peer); /* extra ref for connd */ @@ -2974,8 +2887,6 @@ kgnilnd_check_peer_timeouts_locked(kgn_peer_t *peer, struct list_head *todie, } } } - - return; } void @@ -2983,11 +2894,8 @@ kgnilnd_reaper_check(int idx) { struct list_head *peers = &kgnilnd_data.kgn_peers[idx]; struct list_head *ctmp, *ctmpN; - struct list_head geriatrics; - struct list_head souls; - - INIT_LIST_HEAD(&geriatrics); - INIT_LIST_HEAD(&souls); + LIST_HEAD(geriatrics); + LIST_HEAD(souls); write_lock(&kgnilnd_data.kgn_peer_conn_lock); @@ -3023,7 +2931,7 @@ kgnilnd_update_reaper_timeout(long timeout) } static void -kgnilnd_reaper_poke_with_stick(unsigned long arg) +kgnilnd_reaper_poke_with_stick(cfs_timer_cb_arg_t arg) { wake_up(&kgnilnd_data.kgn_reaper_waitq); } @@ -3039,8 +2947,6 @@ kgnilnd_reaper(void *arg) struct timer_list timer; DEFINE_WAIT(wait); - cfs_block_allsigs(); - /* all gnilnd threads need to run fairly urgently */ set_user_nice(current, *kgnilnd_tunables.kgn_nice); spin_lock(&kgnilnd_data.kgn_reaper_lock); @@ -3068,8 +2974,8 @@ kgnilnd_reaper(void *arg) prepare_to_wait(&kgnilnd_data.kgn_reaper_waitq, &wait, TASK_INTERRUPTIBLE); spin_unlock(&kgnilnd_data.kgn_reaper_lock); - setup_timer(&timer, kgnilnd_reaper_poke_with_stick, - next_check_time); + cfs_timer_setup(&timer, kgnilnd_reaper_poke_with_stick, + next_check_time, 0); mod_timer(&timer, (long) jiffies + timeout); /* check flag variables before committing */ @@ -3127,8 +3033,8 @@ kgnilnd_reaper(void *arg) int kgnilnd_recv_bte_get(kgn_tx_t *tx) { unsigned niov, offset, nob; - lnet_kiov_t *kiov; - lnet_msg_t *lntmsg = tx->tx_lntmsg[0]; + struct bio_vec *kiov; + struct lnet_msg *lntmsg = tx->tx_lntmsg[0]; kgnilnd_parse_lnet_rdma(lntmsg, &niov, &offset, &nob, &kiov, tx->tx_nob_rdma); if (kiov != NULL) { @@ -3192,7 +3098,7 @@ kgnilnd_check_rdma_cq(kgn_device_t *dev) "this is bad, somehow our credits didn't protect us" " from CQ overrun\n"); LASSERTF(GNI_CQ_GET_TYPE(event_data) == GNI_CQ_EVENT_TYPE_POST, - "rrc %d, GNI_CQ_GET_TYPE("LPX64") = "LPX64"\n", rrc, + "rrc %d, GNI_CQ_GET_TYPE(%#llx) = %#llx\n", rrc, event_data, GNI_CQ_GET_TYPE(event_data)); rrc = kgnilnd_get_completed(dev->gnd_snd_rdma_cqh, event_data, @@ -3326,6 +3232,7 @@ kgnilnd_check_fma_send_cq(kgn_device_t *dev) kgn_conn_t *conn = NULL; int queued_fma, saw_reply, rc; long num_processed = 0; + struct list_head *ctmp, *ctmpN; for (;;) { /* make sure we don't keep looping if we need to reset */ @@ -3345,9 +3252,25 @@ kgnilnd_check_fma_send_cq(kgn_device_t *dev) if (rrc == GNI_RC_NOT_DONE) { CDEBUG(D_INFO, - "SMSG send CQ %d not ready (data "LPX64") " + "SMSG send CQ %d not ready (data %#llx) " "processed %ld\n", dev->gnd_id, event_data, num_processed); + + if (num_processed > 0) { + spin_lock(&dev->gnd_lock); + if (!list_empty(&dev->gnd_delay_conns)) { + list_for_each_safe(ctmp, ctmpN, &dev->gnd_delay_conns) { + conn = list_entry(ctmp, kgn_conn_t, gnc_delaylist); + list_del_init(&conn->gnc_delaylist); + CDEBUG(D_NET, "Moving Conn %p from delay queue to ready_queue\n", conn); + kgnilnd_schedule_conn_nolock(conn); + } + spin_unlock(&dev->gnd_lock); + kgnilnd_schedule_device(dev); + } else { + spin_unlock(&dev->gnd_lock); + } + } return num_processed; } @@ -3358,7 +3281,7 @@ kgnilnd_check_fma_send_cq(kgn_device_t *dev) "this is bad, somehow our credits didn't " "protect us from CQ overrun\n"); LASSERTF(GNI_CQ_GET_TYPE(event_data) == GNI_CQ_EVENT_TYPE_SMSG, - "rrc %d, GNI_CQ_GET_TYPE("LPX64") = "LPX64"\n", rrc, + "rrc %d, GNI_CQ_GET_TYPE(%#llx) = %#llx\n", rrc, event_data, GNI_CQ_GET_TYPE(event_data)); /* if SMSG couldn't handle an error, time for conn to die */ @@ -3372,7 +3295,7 @@ kgnilnd_check_fma_send_cq(kgn_device_t *dev) if (conn == NULL) { /* Conn was destroyed? */ CDEBUG(D_NET, - "SMSG CQID lookup "LPX64" failed\n", + "SMSG CQID lookup %#llx failed\n", GNI_CQ_GET_INST_ID(event_data)); write_unlock(&kgnilnd_data.kgn_peer_conn_lock); continue; @@ -3500,7 +3423,7 @@ kgnilnd_check_fma_rcv_cq(kgn_device_t *dev) kgnilnd_gl_mutex_unlock(&dev->gnd_cq_mutex); if (rrc == GNI_RC_NOT_DONE) { - CDEBUG(D_INFO, "SMSG RX CQ %d empty data "LPX64" " + CDEBUG(D_INFO, "SMSG RX CQ %d empty data %#llx " "processed %ld\n", dev->gnd_id, event_data, num_processed); return num_processed; @@ -3517,14 +3440,13 @@ kgnilnd_check_fma_rcv_cq(kgn_device_t *dev) /* set overrun too */ event_data |= (1UL << 63); LASSERTF(GNI_CQ_OVERRUN(event_data), - "(1UL << 63) is no longer the bit to" - "set to indicate CQ_OVERRUN\n"); + "(1UL << 63) is no longer the bit to set to indicate CQ_OVERRUN\n"); } } /* sender should get error event too and take care of failed transaction by re-transmitting */ if (rrc == GNI_RC_TRANSACTION_ERROR) { - CDEBUG(D_NET, "SMSG RX CQ error "LPX64"\n", event_data); + CDEBUG(D_NET, "SMSG RX CQ error %#llx\n", event_data); continue; } @@ -3533,12 +3455,12 @@ kgnilnd_check_fma_rcv_cq(kgn_device_t *dev) conn = kgnilnd_cqid2conn_locked( GNI_CQ_GET_INST_ID(event_data)); if (conn == NULL) { - CDEBUG(D_NET, "SMSG RX CQID lookup "LPU64" " - "failed, dropping event "LPX64"\n", + CDEBUG(D_NET, "SMSG RX CQID lookup %llu " + "failed, dropping event %#llx\n", GNI_CQ_GET_INST_ID(event_data), event_data); } else { - CDEBUG(D_NET, "SMSG RX: CQID "LPU64" " + CDEBUG(D_NET, "SMSG RX: CQID %llu " "conn %p->%s\n", GNI_CQ_GET_INST_ID(event_data), conn, conn->gnc_peer ? @@ -3768,7 +3690,8 @@ kgnilnd_process_fmaq(kgn_conn_t *conn) if (CFS_FAIL_CHECK(CFS_FAIL_GNI_NOOP_SEND)) return; - tx = kgnilnd_new_tx_msg(GNILND_MSG_NOOP, conn->gnc_peer->gnp_net->gnn_ni->ni_nid); + tx = kgnilnd_new_tx_msg(GNILND_MSG_NOOP, + lnet_nid_to_nid4(&conn->gnc_peer->gnp_net->gnn_ni->ni_nid)); if (tx != NULL) { int rc; @@ -3798,7 +3721,7 @@ kgnilnd_process_fmaq(kgn_conn_t *conn) GNITX_ASSERTF(tx, tx->tx_id.txe_smsg_id != 0, "tx with zero id", NULL); - CDEBUG(D_NET, "sending regular msg: %p, type %s(0x%02x), cookie "LPX64"\n", + CDEBUG(D_NET, "sending regular msg: %p, type %s(0x%02x), cookie %#llx\n", tx, kgnilnd_msgtype2str(tx->tx_msg.gnm_type), tx->tx_msg.gnm_type, tx->tx_id.txe_cookie); @@ -3830,7 +3753,7 @@ kgnilnd_process_fmaq(kgn_conn_t *conn) case GNILND_MSG_PUT_REQ: case GNILND_MSG_GET_REQ_REV: tx->tx_msg.gnm_u.putreq.gnprm_cookie = tx->tx_id.txe_cookie; - + fallthrough; case GNILND_MSG_PUT_ACK: case GNILND_MSG_PUT_REQ_REV: case GNILND_MSG_GET_ACK_REV: @@ -3910,10 +3833,9 @@ kgnilnd_process_rdmaq(kgn_device_t *dev) new_ok -= atomic64_read(&dev->gnd_rdmaq_bytes_out); atomic64_set(&dev->gnd_rdmaq_bytes_ok, new_ok); - CDEBUG(D_NET, "resetting rdmaq bytes to %ld, deadline +%lu -> %lu, " - "current out %ld\n", - atomic64_read(&dev->gnd_rdmaq_bytes_ok), dead_bump, dev->gnd_rdmaq_deadline, - atomic64_read(&dev->gnd_rdmaq_bytes_out)); + CDEBUG(D_NET, "resetting rdmaq bytes to %lld, deadline +%lu -> %lu, current out %lld\n", + (s64)atomic64_read(&dev->gnd_rdmaq_bytes_ok), dead_bump, dev->gnd_rdmaq_deadline, + (s64)atomic64_read(&dev->gnd_rdmaq_bytes_out)); } spin_unlock(&dev->gnd_rdmaq_lock); } @@ -3996,8 +3918,8 @@ _kgnilnd_match_reply(kgn_conn_t *conn, int type1, int type2, __u64 cookie) GNITX_ASSERTF(tx, ((tx->tx_id.txe_idx == ev_id.txe_idx) && (tx->tx_id.txe_cookie = cookie)), "conn 0x%p->%s tx_ref_table hosed: wanted " - "txe_cookie "LPX64" txe_idx %d " - "found tx %p cookie "LPX64" txe_idx %d\n", + "txe_cookie %#llx txe_idx %d " + "found tx %p cookie %#llx txe_idx %d\n", conn, libcfs_nid2str(conn->gnc_peer->gnp_nid), cookie, ev_id.txe_idx, tx, tx->tx_id.txe_cookie, tx->tx_id.txe_idx); @@ -4011,7 +3933,7 @@ _kgnilnd_match_reply(kgn_conn_t *conn, int type1, int type2, __u64 cookie) tx->tx_state, GNILND_TX_WAITING_REPLY, libcfs_nid2str(conn->gnc_peer->gnp_nid)); } else { - CWARN("Unmatched reply %02x, or %02x/"LPX64" from %s\n", + CWARN("Unmatched reply %02x, or %02x/%#llx from %s\n", type1, type2, cookie, libcfs_nid2str(conn->gnc_peer->gnp_nid)); } return tx; @@ -4039,7 +3961,7 @@ kgnilnd_complete_tx(kgn_tx_t *tx, int rc) tx->tx_state &= ~GNILND_TX_WAITING_REPLY; if (rc == -EFAULT) { - CDEBUG(D_NETERROR, "Error %d TX data: TX %p tx_id %x nob %16"LPF64"u physnop %8d buffertype %#8x MemHandle "LPX64"."LPX64"x\n", + CDEBUG(D_NETERROR, "Error %d TX data: TX %p tx_id %x nob %16llu physnop %8d buffertype %#8x MemHandle %#llx.%#llxx\n", rc, tx, id, nob, physnop, buftype, hndl.qword1, hndl.qword2); if(*kgnilnd_tunables.kgn_efault_lbug) { @@ -4100,6 +4022,8 @@ kgnilnd_check_fma_rx(kgn_conn_t *conn) int repost = 1, saw_complete; unsigned long timestamp, newest_last_rx, timeout; int last_seq; + struct lnet_hdr hdr; + struct lnet_nid srcnid; ENTRY; /* Short circuit if the ep_handle is null. @@ -4204,7 +4128,7 @@ kgnilnd_check_fma_rx(kgn_conn_t *conn) rx->grx_msg = msg; rx->grx_conn = conn; rx->grx_eager = 0; - rx->grx_received = current_kernel_time(); + ktime_get_ts64(&rx->grx_received); if (CFS_FAIL_CHECK(CFS_FAIL_GNI_NET_LOOKUP)) { rc = -ENONET; @@ -4286,7 +4210,7 @@ kgnilnd_check_fma_rx(kgn_conn_t *conn) } if (msg->gnm_connstamp != conn->gnc_peer_connstamp) { - GNIDBG_MSG(D_NETERROR, msg, "Unexpected connstamp "LPX64"("LPX64 + GNIDBG_MSG(D_NETERROR, msg, "Unexpected connstamp %#llx(%#llx" " expected) from %s", msg->gnm_connstamp, conn->gnc_peer_connstamp, libcfs_nid2str(peer->gnp_nid)); @@ -4360,14 +4284,16 @@ kgnilnd_check_fma_rx(kgn_conn_t *conn) case GNILND_MSG_IMMEDIATE: /* only get SMSG payload for IMMEDIATE */ atomic64_add(msg->gnm_payload_len, &conn->gnc_device->gnd_short_rxbytes); - rc = lnet_parse(net->gnn_ni, &msg->gnm_u.immediate.gnim_hdr, - msg->gnm_srcnid, rx, 0); + lnet_hdr_from_nid4(&hdr, &msg->gnm_u.immediate.gnim_hdr); + lnet_nid4_to_nid(msg->gnm_srcnid, &srcnid); + rc = lnet_parse(net->gnn_ni, &hdr, &srcnid, rx, 0); repost = rc < 0; break; case GNILND_MSG_GET_REQ_REV: case GNILND_MSG_PUT_REQ: - rc = lnet_parse(net->gnn_ni, &msg->gnm_u.putreq.gnprm_hdr, - msg->gnm_srcnid, rx, 1); + lnet_hdr_from_nid4(&hdr, &msg->gnm_u.putreq.gnprm_hdr); + lnet_nid4_to_nid(msg->gnm_srcnid, &srcnid); + rc = lnet_parse(net->gnn_ni, &hdr, &srcnid, rx, 1); repost = rc < 0; break; case GNILND_MSG_GET_NAK_REV: @@ -4465,16 +4391,16 @@ kgnilnd_check_fma_rx(kgn_conn_t *conn) if (tx == NULL) break; - GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED || - tx->tx_buftype == GNILND_BUF_VIRT_MAPPED, + GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED, "bad tx buftype %d", tx->tx_buftype); kgnilnd_finalize_rx_done(tx, msg); break; case GNILND_MSG_PUT_REQ_REV: case GNILND_MSG_GET_REQ: - rc = lnet_parse(net->gnn_ni, &msg->gnm_u.get.gngm_hdr, - msg->gnm_srcnid, rx, 1); + lnet_hdr_from_nid4(&hdr, &msg->gnm_u.get.gngm_hdr); + lnet_nid4_to_nid(msg->gnm_srcnid, &srcnid); + rc = lnet_parse(net->gnn_ni, &hdr, &srcnid, rx, 1); repost = rc < 0; break; @@ -4484,8 +4410,7 @@ kgnilnd_check_fma_rx(kgn_conn_t *conn) if (tx == NULL) break; - GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED || - tx->tx_buftype == GNILND_BUF_VIRT_MAPPED, + GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED, "bad tx buftype %d", tx->tx_buftype); kgnilnd_complete_tx(tx, msg->gnm_u.completion.gncm_retval); @@ -4497,8 +4422,7 @@ kgnilnd_check_fma_rx(kgn_conn_t *conn) if (tx == NULL) break; - GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED || - tx->tx_buftype == GNILND_BUF_VIRT_MAPPED, + GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED, "bad tx buftype %d", tx->tx_buftype); lnet_set_reply_msg_len(net->gnn_ni, tx->tx_lntmsg[1], @@ -4512,8 +4436,7 @@ kgnilnd_check_fma_rx(kgn_conn_t *conn) if (tx == NULL) break; - GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED || - tx->tx_buftype == GNILND_BUF_VIRT_MAPPED, + GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED, "bad tx buftype %d", tx->tx_buftype); kgnilnd_finalize_rx_done(tx, msg); @@ -4526,8 +4449,7 @@ kgnilnd_check_fma_rx(kgn_conn_t *conn) if (tx == NULL) break; - GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED || - tx->tx_buftype == GNILND_BUF_VIRT_MAPPED, + GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED, "bad tx buftype %d", tx->tx_buftype); kgnilnd_finalize_rx_done(tx, msg); @@ -4539,8 +4461,7 @@ kgnilnd_check_fma_rx(kgn_conn_t *conn) if (tx == NULL) break; - GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED || - tx->tx_buftype == GNILND_BUF_VIRT_MAPPED, + GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED, "bad tx buftype %d", tx->tx_buftype); kgnilnd_complete_tx(tx, msg->gnm_u.completion.gncm_retval); @@ -4650,7 +4571,8 @@ kgnilnd_send_conn_close(kgn_conn_t *conn) if (conn->gnc_ephandle != NULL) { int rc = 0; - tx = kgnilnd_new_tx_msg(GNILND_MSG_CLOSE, conn->gnc_peer->gnp_net->gnn_ni->ni_nid); + tx = kgnilnd_new_tx_msg(GNILND_MSG_CLOSE, + lnet_nid_to_nid4(&conn->gnc_peer->gnp_net->gnn_ni->ni_nid)); if (tx != NULL) { tx->tx_msg.gnm_u.completion.gncm_retval = conn->gnc_error; tx->tx_state = GNILND_TX_WAITING_COMPLETION; @@ -4843,11 +4765,9 @@ kgnilnd_process_mapped_tx(kgn_device_t *dev) } else { GNIDBG_TX(log_retrans_level, tx, "transient map failure #%d %d pages/%d bytes phys %u@%u " - "virt %u@"LPU64" " "nq_map %d mdd# %d/%d GART %ld", dev->gnd_map_attempt, tx->tx_phys_npages, tx->tx_nob, dev->gnd_map_nphys, dev->gnd_map_physnop * PAGE_SIZE, - dev->gnd_map_nvirt, dev->gnd_map_virtnob, atomic_read(&dev->gnd_nq_map), atomic_read(&dev->gnd_n_mdd), atomic_read(&dev->gnd_n_mdd_held), atomic64_read(&dev->gnd_nbytes_map)); @@ -4888,6 +4808,12 @@ kgnilnd_process_conns(kgn_device_t *dev, unsigned long deadline) conn = list_first_entry(&dev->gnd_ready_conns, kgn_conn_t, gnc_schedlist); list_del_init(&conn->gnc_schedlist); + /* + * Since we are processing conn now, we don't need to be on the delaylist any longer. + */ + + if (!list_empty(&conn->gnc_delaylist)) + list_del_init(&conn->gnc_delaylist); spin_unlock(&dev->gnd_lock); conn_sched = xchg(&conn->gnc_scheduled, GNILND_CONN_PROCESS); @@ -4914,7 +4840,7 @@ kgnilnd_process_conns(kgn_device_t *dev, unsigned long deadline) kgnilnd_conn_decref(conn); up_write(&dev->gnd_conn_sem); } else if (rc != 1) { - kgnilnd_conn_decref(conn); + kgnilnd_conn_decref(conn); } /* clear this so that scheduler thread doesn't spin */ found_work = 0; @@ -4965,7 +4891,7 @@ kgnilnd_process_conns(kgn_device_t *dev, unsigned long deadline) kgnilnd_conn_decref(conn); up_write(&dev->gnd_conn_sem); } else if (rc != 1) { - kgnilnd_conn_decref(conn); + kgnilnd_conn_decref(conn); } /* check list again with lock held */ @@ -4994,8 +4920,6 @@ kgnilnd_scheduler(void *arg) dev = &kgnilnd_data.kgn_devices[(threadno + 1) % kgnilnd_data.kgn_ndevs]; - cfs_block_allsigs(); - /* all gnilnd threads need to run fairly urgently */ set_user_nice(current, *kgnilnd_tunables.kgn_sched_nice); deadline = jiffies + cfs_time_seconds(*kgnilnd_tunables.kgn_sched_timeout);