-/*
- * Copyright (C) 2004 Cluster File Systems, Inc.
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2004 Cluster File Systems, Inc.
*
* Copyright (C) 2009-2012 Cray, Inc.
+ */
+
+/* This file is part of Lustre, http://www.lustre.org.
*
- * Derived from work by Eric Barton <eric@bartonsoftware.com>
- * Author: James Shimek <jshimek@cray.com>
- * Author: Nic Henke <nic@cray.com>
- *
- * This file is part of Lustre, http://www.lustre.org.
- *
- * Lustre is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * Lustre is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with Lustre; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
+ * Derived from work by Eric Barton <eric@bartonsoftware.com>
+ * Author: James Shimek <jshimek@cray.com>
+ * Author: Nic Henke <nic@cray.com>
*/
#include <asm/page.h>
#include <linux/nmi.h>
#include <linux/pagemap.h>
+
+#include <libcfs/linux/linux-mem.h>
+
#include "gnilnd.h"
/* this is useful when needed to debug wire corruption. */
* has come around and set ready to zero */
already_live = cmpxchg(&dev->gnd_ready, GNILND_DEV_IDLE, GNILND_DEV_IRQ);
- if (!already_live) {
- wake_up_all(&dev->gnd_waitq);
- }
+ if (!already_live)
+ wake_up(&dev->gnd_waitq);
}
-void kgnilnd_schedule_device_timer(unsigned long arg)
+void kgnilnd_schedule_device_timer(cfs_timer_cb_arg_t data)
{
- kgn_device_t *dev = (kgn_device_t *) arg;
+ kgn_device_t *dev = cfs_from_timer(dev, data, gnd_map_timer);
+
+ kgnilnd_schedule_device(dev);
+}
+
+void kgnilnd_schedule_device_timer_rd(cfs_timer_cb_arg_t data)
+{
+ kgn_device_t *dev = cfs_from_timer(dev, data, gnd_rdmaq_timer);
kgnilnd_schedule_device(dev);
}
conn_sched = xchg(&conn->gnc_scheduled, GNILND_CONN_IDLE);
LASSERTF(conn_sched == GNILND_CONN_WANTS_SCHED ||
conn_sched == GNILND_CONN_PROCESS,
- "conn %p after process in bad state: %d\n",
+ "conn %px after process in bad state: %d\n",
conn, conn_sched);
if (sched_intent >= 0) {
*/
kgnilnd_conn_addref(conn);
}
- LASSERTF(list_empty(&conn->gnc_schedlist), "conn %p already sched state %d\n",
+ LASSERTF(list_empty(&conn->gnc_schedlist),
+ "conn %px already sched state %d\n",
conn, sched);
CDEBUG(D_INFO, "scheduling conn 0x%p caller %s:%d\n", conn, caller, line);
LASSERTF((tx->tx_list_p == NULL &&
tx->tx_list_state == GNILND_TX_ALLOCD) &&
list_empty(&tx->tx_list),
- "tx %p with bad state %s (list_p %p) tx_list %s\n",
+ "tx %px with bad state %s (list_p %px) tx_list %s\n",
tx, kgnilnd_tx_state2str(tx->tx_list_state), tx->tx_list_p,
list_empty(&tx->tx_list) ? "empty" : "not empty");
/* we only allocate this if we need to */
if (tx->tx_phys != NULL) {
kmem_cache_free(kgnilnd_data.kgn_tx_phys_cache, tx->tx_phys);
- CDEBUG(D_MALLOC, "slab-freed 'tx_phys': %lu at %p.\n",
- LNET_MAX_IOV * sizeof(gni_mem_segment_t), tx->tx_phys);
+ LIBCFS_MEM_MSG(tx->tx_phys,
+ GNILND_MAX_IOV * sizeof(gni_mem_segment_t),
+ "slab-freed");
}
/* Only free the buffer if we used it */
if (tx->tx_buffer_copy != NULL) {
+ LIBCFS_MEM_MSG(tx->tx_buffer_copy, tx->tx_rdma_desc.length,
+ "vfreed");
kgnilnd_vfree(tx->tx_buffer_copy, tx->tx_rdma_desc.length);
tx->tx_buffer_copy = NULL;
- CDEBUG(D_MALLOC, "vfreed buffer2\n");
}
#if 0
KGNILND_POISON(tx, 0x5a, sizeof(kgn_tx_t));
#endif
- CDEBUG(D_MALLOC, "slab-freed 'tx': %lu at %p.\n", sizeof(*tx), tx);
+ LIBCFS_MEM_MSG(tx, sizeof(*tx), "slab-freed");
kmem_cache_free(kgnilnd_data.kgn_tx_cache, tx);
}
if (CFS_FAIL_CHECK(CFS_FAIL_GNI_ALLOC_TX))
return tx;
- tx = kmem_cache_alloc(kgnilnd_data.kgn_tx_cache, GFP_ATOMIC);
+ tx = kmem_cache_zalloc(kgnilnd_data.kgn_tx_cache, GFP_ATOMIC);
if (tx == NULL) {
CERROR("failed to allocate tx\n");
return NULL;
}
- CDEBUG(D_MALLOC, "slab-alloced 'tx': %lu at %p.\n",
- sizeof(*tx), tx);
-
- /* need this memset, cache alloc'd memory is not cleared */
- memset(tx, 0, sizeof(*tx));
+ LIBCFS_MEM_MSG(tx, sizeof(*tx), "slab-alloced");
/* setup everything here to minimize time under the lock */
tx->tx_buftype = GNILND_BUF_NONE;
#define _kgnilnd_cksum(seed, ptr, nob) csum_partial(ptr, nob, seed)
/* we don't use offset as every one is passing a buffer reference that already
- * includes the offset into the base address -
- * see kgnilnd_setup_virt_buffer and kgnilnd_setup_immediate_buffer */
+ * includes the offset into the base address.
+ */
static inline __u16
kgnilnd_cksum(void *ptr, size_t nob)
{
}
__u16
-kgnilnd_cksum_kiov(unsigned int nkiov, lnet_kiov_t *kiov,
- unsigned int offset, unsigned int nob, int dump_blob)
+kgnilnd_cksum_kiov(unsigned int nkiov, struct bio_vec *kiov,
+ unsigned int offset, unsigned int nob, int dump_blob)
{
__wsum cksum = 0;
__wsum tmpck;
/* if loops changes, please change kgnilnd_setup_phys_buffer */
- while (offset >= kiov->kiov_len) {
- offset -= kiov->kiov_len;
+ while (offset >= kiov->bv_len) {
+ offset -= kiov->bv_len;
nkiov--;
kiov++;
LASSERT(nkiov > 0);
}
- /* ignore nob here, if nob < (kiov_len - offset), kiov == 1 */
- odd = (unsigned long) (kiov[0].kiov_len - offset) & 1;
+ /* ignore nob here, if nob < (bv_len - offset), kiov == 1 */
+ odd = (unsigned long) (kiov[0].bv_len - offset) & 1;
if ((odd || *kgnilnd_tunables.kgn_vmap_cksum) && nkiov > 1) {
struct page **pages = kgnilnd_data.kgn_cksum_map_pages[get_cpu()];
get_cpu(), kgnilnd_data.kgn_cksum_map_pages);
CDEBUG(D_BUFFS, "odd %d len %u offset %u nob %u\n",
- odd, kiov[0].kiov_len, offset, nob);
+ odd, kiov[0].bv_len, offset, nob);
for (i = 0; i < nkiov; i++) {
- pages[i] = kiov[i].kiov_page;
+ pages[i] = kiov[i].bv_page;
}
addr = vmap(pages, nkiov, VM_MAP, PAGE_KERNEL);
}
atomic_inc(&kgnilnd_data.kgn_nvmap_cksum);
- tmpck = _kgnilnd_cksum(0, (void *) addr + kiov[0].kiov_offset + offset, nob);
+ tmpck = _kgnilnd_cksum(0, ((void *) addr + kiov[0].bv_offset +
+ offset), nob);
cksum = tmpck;
if (dump_blob) {
kgnilnd_dump_blob(D_BUFFS, "flat kiov RDMA payload",
- (void *)addr + kiov[0].kiov_offset + offset, nob);
+ (void *)addr + kiov[0].bv_offset +
+ offset, nob);
}
CDEBUG(D_BUFFS, "cksum 0x%x (+0x%x) for addr 0x%p+%u len %u offset %u\n",
- cksum, tmpck, addr, kiov[0].kiov_offset, nob, offset);
+ cksum, tmpck, addr, kiov[0].bv_offset, nob, offset);
vunmap(addr);
} else {
do {
- fraglen = min(kiov->kiov_len - offset, nob);
+ fraglen = min(kiov->bv_len - offset, nob);
/* make dang sure we don't send a bogus checksum if somehow we get
* an odd length fragment on anything but the last entry in a kiov -
* we know from kgnilnd_setup_rdma_buffer that we can't have non
* PAGE_SIZE pages in the middle, so if nob < PAGE_SIZE, it is the last one */
LASSERTF(!(fraglen&1) || (nob < PAGE_SIZE),
- "odd fraglen %u on nkiov %d, nob %u kiov_len %u offset %u kiov 0x%p\n",
- fraglen, nkiov, nob, kiov->kiov_len, offset, kiov);
+ "odd fraglen %u on nkiov %d, nob %u bv_len %u offset %u kiov 0x%p\n",
+ fraglen, nkiov, nob, kiov->bv_len,
+ offset, kiov);
- addr = (void *)kmap(kiov->kiov_page) + kiov->kiov_offset + offset;
+ addr = (void *)kmap(kiov->bv_page) + kiov->bv_offset +
+ offset;
tmpck = _kgnilnd_cksum(cksum, addr, fraglen);
CDEBUG(D_BUFFS,
"cksum 0x%x (+0x%x) for page 0x%p+%u (0x%p) len %u offset %u\n",
- cksum, tmpck, kiov->kiov_page, kiov->kiov_offset, addr,
- fraglen, offset);
+ cksum, tmpck, kiov->bv_page, kiov->bv_offset,
+ addr, fraglen, offset);
cksum = tmpck;
if (dump_blob)
kgnilnd_dump_blob(D_BUFFS, "kiov cksum", addr, fraglen);
- kunmap(kiov->kiov_page);
+ kunmap(kiov->bv_page);
kiov++;
nkiov--;
kgnilnd_queue_tx(conn, tx);
}
-int
+static int
kgnilnd_setup_immediate_buffer(kgn_tx_t *tx, unsigned int niov,
- struct kvec *iov, lnet_kiov_t *kiov,
+ struct bio_vec *kiov,
unsigned int offset, unsigned int nob)
{
kgn_msg_t *msg = &tx->tx_msg;
if (nob == 0) {
tx->tx_buffer = NULL;
- } else if (kiov != NULL) {
-
- if ((niov > 0) && unlikely(niov > (nob/PAGE_SIZE))) {
- niov = ((nob + offset + kiov->kiov_offset + PAGE_SIZE - 1) /
- PAGE_SIZE);
- }
+ } else {
- LASSERTF(niov > 0 && niov < GNILND_MAX_IMMEDIATE/PAGE_SIZE,
- "bad niov %d msg %p kiov %p iov %p offset %d nob%d\n",
- niov, msg, kiov, iov, offset, nob);
+ if (niov && niov > (nob >> PAGE_SHIFT))
+ niov = DIV_ROUND_UP(nob + offset + kiov->bv_offset,
+ PAGE_SIZE);
- while (offset >= kiov->kiov_len) {
- offset -= kiov->kiov_len;
+ while (offset >= kiov->bv_len) {
+ offset -= kiov->bv_len;
niov--;
kiov++;
LASSERT(niov > 0);
}
+
+ LASSERTF(niov > 0 && niov < GNILND_MAX_IMMEDIATE/PAGE_SIZE,
+ "bad niov %d msg %px kiov %px offset %d nob%d\n",
+ niov, msg, kiov, offset, nob);
+
for (i = 0; i < niov; i++) {
- /* We can't have a kiov_offset on anything but the first entry,
- * otherwise we'll have a hole at the end of the mapping as we only map
- * whole pages.
- * Also, if we have a kiov_len < PAGE_SIZE but we need to map more
- * than kiov_len, we will also have a whole at the end of that page
- * which isn't allowed */
- if ((kiov[i].kiov_offset != 0 && i > 0) ||
- (kiov[i].kiov_offset + kiov[i].kiov_len != PAGE_SIZE && i < niov - 1)) {
- CNETERR("Can't make payload contiguous in I/O VM: page %d, offset %u, nob %u, kiov_offset %u, kiov_len %u\n",
- i, offset, nob, kiov->kiov_offset, kiov->kiov_len);
+ /* We can't have a bv_offset on anything but the first
+ * entry, otherwise we'll have a hole at the end of the
+ * mapping as we only map whole pages.
+ * Also, if we have a bv_len < PAGE_SIZE but we need to
+ * map more than bv_len, we will also have a whole at
+ * the end of that page which isn't allowed
+ */
+ if ((kiov[i].bv_offset != 0 && i > 0) ||
+ (kiov[i].bv_offset + kiov[i].bv_len != PAGE_SIZE &&
+ i < niov - 1)) {
+ CNETERR("Can't make payload contiguous in I/O VM:page %d, offset %u, nob %u, bv_offset %u bv_len %u\n",
+ i, offset, nob, kiov->bv_offset,
+ kiov->bv_len);
RETURN(-EINVAL);
}
- tx->tx_imm_pages[i] = kiov[i].kiov_page;
+ tx->tx_imm_pages[i] = kiov[i].bv_page;
}
/* hijack tx_phys for the later unmap */
if (niov == 1) {
/* tx->phyx being equal to NULL is the signal for unmap to discern between kmap and vmap */
tx->tx_phys = NULL;
- tx->tx_buffer = (void *)kmap(tx->tx_imm_pages[0]) + kiov[0].kiov_offset + offset;
+ tx->tx_buffer = (void *)kmap(tx->tx_imm_pages[0]) +
+ kiov[0].bv_offset + offset;
atomic_inc(&kgnilnd_data.kgn_nkmap_short);
GNIDBG_TX(D_NET, tx, "kmapped page for %d bytes for kiov 0x%p, buffer 0x%p",
nob, kiov, tx->tx_buffer);
}
atomic_inc(&kgnilnd_data.kgn_nvmap_short);
- /* make sure we take into account the kiov offset as the start of the buffer */
- tx->tx_buffer = (void *)tx->tx_phys + kiov[0].kiov_offset + offset;
- GNIDBG_TX(D_NET, tx, "mapped %d pages for %d bytes from kiov 0x%p to 0x%p, buffer 0x%p",
- niov, nob, kiov, tx->tx_phys, tx->tx_buffer);
+ /* make sure we take into account the kiov offset as the
+ * start of the buffer
+ */
+ tx->tx_buffer = (void *)tx->tx_phys + kiov[0].bv_offset
+ + offset;
+ GNIDBG_TX(D_NET, tx,
+ "mapped %d pages for %d bytes from kiov 0x%p to 0x%p, buffer 0x%p",
+ niov, nob, kiov, tx->tx_phys, tx->tx_buffer);
}
tx->tx_buftype = GNILND_BUF_IMMEDIATE_KIOV;
tx->tx_nob = nob;
- } else {
- /* For now this is almost identical to kgnilnd_setup_virt_buffer, but we
- * could "flatten" the payload into a single contiguous buffer ready
- * for sending direct over an FMA if we ever needed to. */
-
- LASSERT(niov > 0);
-
- while (offset >= iov->iov_len) {
- offset -= iov->iov_len;
- niov--;
- iov++;
- LASSERT(niov > 0);
- }
-
- if (nob > iov->iov_len - offset) {
- CERROR("Can't handle multiple vaddr fragments\n");
- return -EMSGSIZE;
- }
-
- tx->tx_buffer = (void *)(((unsigned long)iov->iov_base) + offset);
-
- tx->tx_buftype = GNILND_BUF_IMMEDIATE;
- tx->tx_nob = nob;
}
/* checksum payload early - it shouldn't be changing after lnd_send */
}
int
-kgnilnd_setup_virt_buffer(kgn_tx_t *tx,
- unsigned int niov, struct kvec *iov,
- unsigned int offset, unsigned int nob)
-
-{
- LASSERT(nob > 0);
- LASSERT(niov > 0);
- LASSERT(tx->tx_buftype == GNILND_BUF_NONE);
-
- while (offset >= iov->iov_len) {
- offset -= iov->iov_len;
- niov--;
- iov++;
- LASSERT(niov > 0);
- }
-
- if (nob > iov->iov_len - offset) {
- CERROR("Can't handle multiple vaddr fragments\n");
- return -EMSGSIZE;
- }
-
- tx->tx_buftype = GNILND_BUF_VIRT_UNMAPPED;
- tx->tx_nob = nob;
- tx->tx_buffer = (void *)(((unsigned long)iov->iov_base) + offset);
- return 0;
-}
-
-int
-kgnilnd_setup_phys_buffer(kgn_tx_t *tx, int nkiov, lnet_kiov_t *kiov,
+kgnilnd_setup_phys_buffer(kgn_tx_t *tx, int nkiov, struct bio_vec *kiov,
unsigned int offset, unsigned int nob)
{
gni_mem_segment_t *phys;
GOTO(error, rc);
}
- CDEBUG(D_MALLOC, "slab-alloced 'tx->tx_phys': %lu at %p.\n",
- LNET_MAX_IOV * sizeof(gni_mem_segment_t), tx->tx_phys);
+ LIBCFS_MEM_MSG(tx->tx_phys,
+ GNILND_MAX_IOV * sizeof(gni_mem_segment_t),
+ "slab-alloced");
/* if loops changes, please change kgnilnd_cksum_kiov
* and kgnilnd_setup_immediate_buffer */
- while (offset >= kiov->kiov_len) {
- offset -= kiov->kiov_len;
+ while (offset >= kiov->bv_len) {
+ offset -= kiov->bv_len;
nkiov--;
kiov++;
LASSERT(nkiov > 0);
tx->tx_buftype = GNILND_BUF_PHYS_UNMAPPED;
tx->tx_nob = nob;
- /* kiov_offset is start of 'valid' buffer, so index offset past that */
- tx->tx_buffer = (void *)((unsigned long)(kiov->kiov_offset + offset));
+ /* bv_offset is start of 'valid' buffer, so index offset past that */
+ tx->tx_buffer = (void *)((unsigned long)(kiov->bv_offset + offset));
phys = tx->tx_phys;
CDEBUG(D_NET, "tx 0x%p buffer 0x%p map start kiov 0x%p+%u niov %d offset %u\n",
- tx, tx->tx_buffer, kiov, kiov->kiov_offset, nkiov, offset);
+ tx, tx->tx_buffer, kiov, kiov->bv_offset, nkiov, offset);
do {
- fraglen = min(kiov->kiov_len - offset, nob);
-
- /* We can't have a kiov_offset on anything but the first entry,
- * otherwise we'll have a hole at the end of the mapping as we only map
- * whole pages. Only the first page is allowed to have an offset -
- * we'll add that into tx->tx_buffer and that will get used when we
- * map in the segments (see kgnilnd_map_buffer).
- * Also, if we have a kiov_len < PAGE_SIZE but we need to map more
- * than kiov_len, we will also have a whole at the end of that page
- * which isn't allowed */
+ fraglen = min(kiov->bv_len - offset, nob);
+
+ /* We can't have a bv_offset on anything but the first entry,
+ * otherwise we'll have a hole at the end of the mapping as we
+ * only map whole pages. Only the first page is allowed to
+ * have an offset - we'll add that into tx->tx_buffer and that
+ * will get used when we map in the segments (see
+ * kgnilnd_map_buffer). Also, if we have a bv_len < PAGE_SIZE
+ * but we need to map more than bv_len, we will also have a
+ * whole at the end of that page which isn't allowed
+ */
if ((phys != tx->tx_phys) &&
- ((kiov->kiov_offset != 0) ||
- ((kiov->kiov_len < PAGE_SIZE) && (nob > kiov->kiov_len)))) {
- CERROR("Can't make payload contiguous in I/O VM: page %d, offset %u, nob %u, kiov_offset %u, kiov_len %u\n",
+ ((kiov->bv_offset != 0) ||
+ ((kiov->bv_len < PAGE_SIZE) && (nob > kiov->bv_len)))) {
+ CERROR("Can't make payload contiguous in I/O VM:page %d, offset %u, nob %u, bv_offset %u bv_len %u\n",
(int)(phys - tx->tx_phys),
- offset, nob, kiov->kiov_offset, kiov->kiov_len);
+ offset, nob, kiov->bv_offset, kiov->bv_len);
rc = -EINVAL;
GOTO(error, rc);
}
- if ((phys - tx->tx_phys) == LNET_MAX_IOV) {
+ if ((phys - tx->tx_phys) == GNILND_MAX_IOV) {
CERROR ("payload too big (%d)\n", (int)(phys - tx->tx_phys));
rc = -EMSGSIZE;
GOTO(error, rc);
GOTO(error, rc);
}
- CDEBUG(D_BUFFS, "page 0x%p kiov_offset %u kiov_len %u nob %u "
- "nkiov %u offset %u\n",
- kiov->kiov_page, kiov->kiov_offset, kiov->kiov_len, nob, nkiov, offset);
+ CDEBUG(D_BUFFS,
+ "page 0x%p bv_offset %u bv_len %u nob %u nkiov %u offset %u\n",
+ kiov->bv_page, kiov->bv_offset, kiov->bv_len, nob, nkiov,
+ offset);
- phys->address = page_to_phys(kiov->kiov_page);
+ phys->address = page_to_phys(kiov->bv_page);
phys++;
kiov++;
nkiov--;
error:
if (tx->tx_phys != NULL) {
kmem_cache_free(kgnilnd_data.kgn_tx_phys_cache, tx->tx_phys);
- CDEBUG(D_MALLOC, "slab-freed 'tx_phys': %lu at %p.\n",
- sizeof(*tx->tx_phys), tx->tx_phys);
+ LIBCFS_MEM_MSG(tx->tx_phys, sizeof(*tx->tx_phys), "slab-freed");
tx->tx_phys = NULL;
}
return rc;
static inline int
kgnilnd_setup_rdma_buffer(kgn_tx_t *tx, unsigned int niov,
- struct kvec *iov, lnet_kiov_t *kiov,
+ struct bio_vec *kiov,
unsigned int offset, unsigned int nob)
{
- int rc;
-
- LASSERTF((iov == NULL) != (kiov == NULL), "iov 0x%p, kiov 0x%p, tx 0x%p,"
- " offset %d, nob %d, niov %d\n"
- , iov, kiov, tx, offset, nob, niov);
-
- if (kiov != NULL) {
- rc = kgnilnd_setup_phys_buffer(tx, niov, kiov, offset, nob);
- } else {
- rc = kgnilnd_setup_virt_buffer(tx, niov, iov, offset, nob);
- }
- return rc;
+ return kgnilnd_setup_phys_buffer(tx, niov, kiov, offset, nob);
}
/* kgnilnd_parse_lnet_rdma()
static void
kgnilnd_parse_lnet_rdma(struct lnet_msg *lntmsg, unsigned int *niov,
unsigned int *offset, unsigned int *nob,
- lnet_kiov_t **kiov, int put_len)
+ struct bio_vec **kiov, int put_len)
{
/* GETs are weird, see kgnilnd_send */
if (lntmsg->msg_type == LNET_MSG_GET) {
if ((lntmsg->msg_md->md_options & LNET_MD_KIOV) == 0) {
*kiov = NULL;
} else {
- *kiov = lntmsg->msg_md->md_iov.kiov;
+ *kiov = lntmsg->msg_md->md_kiov;
}
*niov = lntmsg->msg_md->md_niov;
*nob = lntmsg->msg_md->md_length;
static inline void
kgnilnd_compute_rdma_cksum(kgn_tx_t *tx, int put_len)
{
- unsigned int niov, offset, nob;
- lnet_kiov_t *kiov;
- struct lnet_msg *lntmsg = tx->tx_lntmsg[0];
- int dump_cksum = (*kgnilnd_tunables.kgn_checksum_dump > 1);
+ unsigned int niov, offset, nob;
+ struct bio_vec *kiov;
+ struct lnet_msg *lntmsg = tx->tx_lntmsg[0];
+ int dump_cksum = (*kgnilnd_tunables.kgn_checksum_dump > 1);
GNITX_ASSERTF(tx, ((tx->tx_msg.gnm_type == GNILND_MSG_PUT_DONE) ||
(tx->tx_msg.gnm_type == GNILND_MSG_GET_DONE) ||
int rc = 0;
__u16 cksum;
unsigned int niov, offset, nob;
- lnet_kiov_t *kiov;
- struct lnet_msg *lntmsg = tx->tx_lntmsg[0];
+ struct bio_vec *kiov;
+ struct lnet_msg *lntmsg = tx->tx_lntmsg[0];
int dump_on_err = *kgnilnd_tunables.kgn_checksum_dump;
/* we can only match certain requests */
kgnilnd_dump_blob(D_BUFFS, "RDMA payload",
tx->tx_buffer, nob);
}
- /* fall through to dump log */
+ fallthrough;
case 1:
libcfs_debug_dumplog();
break;
dev->gnd_map_nphys++;
dev->gnd_map_physnop += tx->tx_phys_npages;
break;
-
- case GNILND_BUF_VIRT_MAPPED:
- bytes = tx->tx_nob;
- dev->gnd_map_nvirt++;
- dev->gnd_map_virtnob += tx->tx_nob;
- break;
}
if (tx->tx_msg.gnm_type == GNILND_MSG_PUT_ACK ||
dev->gnd_map_nphys--;
dev->gnd_map_physnop -= tx->tx_phys_npages;
break;
-
- case GNILND_BUF_VIRT_UNMAPPED:
- bytes = tx->tx_nob;
- dev->gnd_map_nvirt--;
- dev->gnd_map_virtnob -= tx->tx_nob;
- break;
}
if (tx->tx_msg.gnm_type == GNILND_MSG_PUT_ACK ||
tx->tx_msg.gnm_type == GNILND_MSG_GET_REQ) {
atomic64_sub(bytes, &dev->gnd_rdmaq_bytes_out);
LASSERTF(atomic64_read(&dev->gnd_rdmaq_bytes_out) >= 0,
- "bytes_out negative! %ld\n", atomic64_read(&dev->gnd_rdmaq_bytes_out));
+ "bytes_out negative! %lld\n",
+ (s64)atomic64_read(&dev->gnd_rdmaq_bytes_out));
GNIDBG_TX(D_NETTRACE, tx, "rdma -- %d to %lld",
- bytes, atomic64_read(&dev->gnd_rdmaq_bytes_out));
+ bytes, (s64)atomic64_read(&dev->gnd_rdmaq_bytes_out));
}
atomic_dec(&dev->gnd_n_mdd);
* that our concurrency doesn't result in the kgn_device_t
* getting nuked while we are in here */
- LASSERTF(conn != NULL, "tx %p with NULL conn, someone forgot"
+ LASSERTF(conn != NULL, "tx %px with NULL conn, someone forgot"
" to set tx_conn before calling %s\n", tx, __FUNCTION__);
if (unlikely(CFS_FAIL_CHECK(CFS_FAIL_GNI_MAP_TX)))
case GNILND_BUF_IMMEDIATE:
case GNILND_BUF_IMMEDIATE_KIOV:
case GNILND_BUF_PHYS_MAPPED:
- case GNILND_BUF_VIRT_MAPPED:
return 0;
case GNILND_BUF_PHYS_UNMAPPED:
* - this needs to turn into a non-fatal error soon to allow
* GART resource, etc starvation handling */
if (rrc != GNI_RC_SUCCESS) {
- GNIDBG_TX(D_NET, tx, "Can't map %d pages: dev %d "
- "phys %u pp %u, virt %u nob %llu",
+ GNIDBG_TX(D_NET, tx,
+ "Can't map %d pages: dev %d phys %u pp %u",
tx->tx_phys_npages, dev->gnd_id,
- dev->gnd_map_nphys, dev->gnd_map_physnop,
- dev->gnd_map_nvirt, dev->gnd_map_virtnob);
+ dev->gnd_map_nphys, dev->gnd_map_physnop);
RETURN(rrc == GNI_RC_ERROR_RESOURCE ? -ENOMEM : -EINVAL);
}
tx->tx_buftype = GNILND_BUF_PHYS_MAPPED;
kgnilnd_mem_add_map_list(dev, tx);
return 0;
-
- case GNILND_BUF_VIRT_UNMAPPED:
- rrc = kgnilnd_mem_register(dev->gnd_handle,
- (__u64)tx->tx_buffer, tx->tx_nob,
- NULL, flags, &tx->tx_map_key);
- if (rrc != GNI_RC_SUCCESS) {
- GNIDBG_TX(D_NET, tx, "Can't map %u bytes: dev %d "
- "phys %u pp %u, virt %u nob %llu",
- tx->tx_nob, dev->gnd_id,
- dev->gnd_map_nphys, dev->gnd_map_physnop,
- dev->gnd_map_nvirt, dev->gnd_map_virtnob);
- RETURN(rrc == GNI_RC_ERROR_RESOURCE ? -ENOMEM : -EINVAL);
- }
-
- tx->tx_buftype = GNILND_BUF_VIRT_MAPPED;
- kgnilnd_mem_add_map_list(dev, tx);
- if (tx->tx_msg.gnm_type == GNILND_MSG_PUT_ACK ||
- tx->tx_msg.gnm_type == GNILND_MSG_GET_REQ) {
- atomic64_add(tx->tx_nob, &dev->gnd_rdmaq_bytes_out);
- GNIDBG_TX(D_NETTRACE, tx, "rdma ++ %d to %ld\n",
- tx->tx_nob, atomic64_read(&dev->gnd_rdmaq_bytes_out));
- }
-
- return 0;
}
}
/* code below relies on +1 relationship ... */
BUILD_BUG_ON(GNILND_BUF_PHYS_MAPPED !=
(GNILND_BUF_PHYS_UNMAPPED + 1));
- BUILD_BUG_ON(GNILND_BUF_VIRT_MAPPED !=
- (GNILND_BUF_VIRT_UNMAPPED + 1));
switch (tx->tx_buftype) {
default:
case GNILND_BUF_NONE:
case GNILND_BUF_IMMEDIATE:
case GNILND_BUF_PHYS_UNMAPPED:
- case GNILND_BUF_VIRT_UNMAPPED:
break;
case GNILND_BUF_IMMEDIATE_KIOV:
if (tx->tx_phys != NULL) {
break;
case GNILND_BUF_PHYS_MAPPED:
- case GNILND_BUF_VIRT_MAPPED:
LASSERT(tx->tx_conn != NULL);
dev = tx->tx_conn->gnc_device;
LASSERTF(test_and_clear_bit(tx->tx_id.txe_idx,
(volatile unsigned long *)&conn->gnc_tx_bits),
- "conn %p tx %p bit %d already cleared\n",
+ "conn %px tx %px bit %d already cleared\n",
conn, tx, tx->tx_id.txe_idx);
LASSERTF(conn->gnc_tx_ref_table[tx->tx_id.txe_idx] != NULL,
* that we fill up our mailbox, we'll keep trying to resend that msg
* until we exceed the max_retrans _or_ gnc_last_rx expires, indicating
* that he hasn't send us any traffic in return */
-
+
/* some reasonable throttling of the debug message */
if (log_retrans) {
unsigned long now = jiffies;
/* it was sent, break out of switch to avoid default case of queueing */
break;
}
- /* needs to queue to try again, so fall through to default case */
+ /* needs to queue to try again, so... */
+ fallthrough;
case GNILND_MSG_NOOP:
/* Just make sure this goes out first for this conn */
add_tail = 0;
- /* fall through... */
+ fallthrough;
default:
spin_lock(&conn->gnc_list_lock);
kgnilnd_tx_add_state_locked(tx, conn->gnc_peer, conn, GNILND_TX_FMAQ, add_tail);
}
void
-kgnilnd_launch_tx(kgn_tx_t *tx, kgn_net_t *net, struct lnet_process_id *target)
+kgnilnd_launch_tx(kgn_tx_t *tx, kgn_net_t *net, struct lnet_processid *target)
{
kgn_peer_t *peer;
kgn_peer_t *new_peer = NULL;
* failure on any problems */
GNITX_ASSERTF(tx, tx->tx_conn == NULL,
- "tx already has connection %p", tx->tx_conn);
+ "tx already has connection %px", tx->tx_conn);
/* do all of the peer & conn searching in one swoop - this avoids
* nastiness when dropping locks and needing to maintain a sane state
/* I expect to find him, so only take a read lock */
read_lock(&kgnilnd_data.kgn_peer_conn_lock);
- peer = kgnilnd_find_peer_locked(target->nid);
+ peer = kgnilnd_find_peer_locked(lnet_nid_to_nid4(&target->nid));
if (peer != NULL) {
conn = kgnilnd_find_conn_locked(peer);
/* this could be NULL during quiesce */
CFS_RACE(CFS_FAIL_GNI_FIND_TARGET);
- node_state = kgnilnd_get_node_state(LNET_NIDADDR(target->nid));
+ node_state = kgnilnd_get_node_state(ntohl(target->nid.nid_addr[0]));
/* NB - this will not block during normal operations -
* the only writer of this is in the startup/shutdown path. */
/* ignore previous peer entirely - we cycled the lock, so we
* will create new peer and at worst drop it if peer is still
* in the tables */
- rc = kgnilnd_create_peer_safe(&new_peer, target->nid, net, node_state);
+ rc = kgnilnd_create_peer_safe(&new_peer, lnet_nid_to_nid4(&target->nid),
+ net, node_state);
if (rc != 0) {
up_read(&kgnilnd_data.kgn_net_rw_sem);
GOTO(no_peer, rc);
/* search for peer again now that we have the lock
* if we don't find it, add our new one to the list */
- kgnilnd_add_peer_locked(target->nid, new_peer, &peer);
+ kgnilnd_add_peer_locked(lnet_nid_to_nid4(&target->nid), new_peer,
+ &peer);
/* don't create a connection if the peer is not up */
if (peer->gnp_state != GNILND_PEER_UP) {
void *desc_buffer = tx->tx_buffer;
gni_mem_handle_t desc_map_key = tx->tx_map_key;
LASSERTF(kgnilnd_tx_mapped(tx),
- "unmapped tx %p\n", tx);
+ "unmapped tx %px\n", tx);
LASSERTF(conn != NULL,
- "NULL conn on tx %p, naughty, naughty\n", tx);
+ "NULL conn on tx %px, naughty, naughty\n", tx);
LASSERTF(nob <= sink->gnrd_nob,
- "nob %u > sink->gnrd_nob %d (%p)\n",
+ "nob %u > sink->gnrd_nob %d (%px)\n",
nob, sink->gnrd_nob, sink);
LASSERTF(nob <= tx->tx_nob,
- "nob %d > tx(%p)->tx_nob %d\n",
+ "nob %d > tx(%px)->tx_nob %d\n",
nob, tx, tx->tx_nob);
switch (type) {
}
/* Don't lie (CLOSE == RDMA idle) */
- LASSERTF(!conn->gnc_close_sent, "tx %p on conn %p after close sent %d\n",
+ LASSERTF(!conn->gnc_close_sent,
+ "tx %px on conn %px after close sent %d\n",
tx, conn, conn->gnc_close_sent);
GNIDBG_TX(D_NET, tx, "Post RDMA type 0x%02x conn %p dlvr_mode "
CERROR("failed to allocate rx\n");
return NULL;
}
- CDEBUG(D_MALLOC, "slab-alloced 'rx': %lu at %p.\n",
- sizeof(*rx), rx);
+ LIBCFS_MEM_MSG(rx, sizeof(*rx), "slab-alloced");
/* no memset to zero, we'll always fill all members */
return rx;
kgnilnd_release_msg(conn);
}
+ LIBCFS_MEM_MSG(rx, sizeof(*rx), "slab-freed");
kmem_cache_free(kgnilnd_data.kgn_rx_cache, rx);
- CDEBUG(D_MALLOC, "slab-freed 'rx': %lu at %p.\n",
- sizeof(*rx), rx);
}
int
{
struct lnet_hdr *hdr = &lntmsg->msg_hdr;
int type = lntmsg->msg_type;
- struct lnet_process_id target = lntmsg->msg_target;
+ struct lnet_processid *target = &lntmsg->msg_target;
int target_is_router = lntmsg->msg_target_is_router;
int routing = lntmsg->msg_routing;
unsigned int niov = lntmsg->msg_niov;
- struct kvec *iov = lntmsg->msg_iov;
- lnet_kiov_t *kiov = lntmsg->msg_kiov;
+ struct bio_vec *kiov = lntmsg->msg_kiov;
unsigned int offset = lntmsg->msg_offset;
unsigned int nob = lntmsg->msg_len;
unsigned int msg_vmflush = lntmsg->msg_vmflush;
kgn_net_t *net = ni->ni_data;
kgn_tx_t *tx;
int rc = 0;
- int mpflag = 0;
+ /* '1' for consistency with code that checks !mpflag to restore */
+ unsigned int mpflag = 1;
int reverse_rdma_flag = *kgnilnd_tunables.kgn_reverse_rdma;
/* NB 'private' is different depending on what we're sending.... */
LASSERT(!in_interrupt());
CDEBUG(D_NET, "sending msg type %d with %d bytes in %d frags to %s\n",
- type, nob, niov, libcfs_id2str(target));
+ type, nob, niov, libcfs_idstr(target));
LASSERTF(nob == 0 || niov > 0,
- "lntmsg %p nob %d niov %d\n", lntmsg, nob, niov);
- LASSERTF(niov <= LNET_MAX_IOV,
- "lntmsg %p niov %d\n", lntmsg, niov);
-
- /* payload is either all vaddrs or all pages */
- LASSERTF(!(kiov != NULL && iov != NULL),
- "lntmsg %p kiov %p iov %p\n", lntmsg, kiov, iov);
+ "lntmsg %px nob %d niov %d\n", lntmsg, nob, niov);
if (msg_vmflush)
- mpflag = cfs_memory_pressure_get_and_set();
+ mpflag = memalloc_noreclaim_save();
switch (type) {
default:
LBUG();
case LNET_MSG_ACK:
- LASSERTF(nob == 0, "lntmsg %p nob %d\n",
+ LASSERTF(nob == 0, "lntmsg %px nob %d\n",
lntmsg, nob);
break;
break;
if ((reverse_rdma_flag & GNILND_REVERSE_GET) == 0)
- tx = kgnilnd_new_tx_msg(GNILND_MSG_GET_REQ, ni->ni_nid);
+ tx = kgnilnd_new_tx_msg(GNILND_MSG_GET_REQ,
+ lnet_nid_to_nid4(&ni->ni_nid));
else
- tx = kgnilnd_new_tx_msg(GNILND_MSG_GET_REQ_REV, ni->ni_nid);
+ tx = kgnilnd_new_tx_msg(GNILND_MSG_GET_REQ_REV,
+ lnet_nid_to_nid4(&ni->ni_nid));
if (tx == NULL) {
rc = -ENOMEM;
goto out;
}
- /* slightly different options as we might actually have a GET with a
- * MD_KIOV set but a non-NULL md_iov.iov */
- if ((lntmsg->msg_md->md_options & LNET_MD_KIOV) == 0)
- rc = kgnilnd_setup_rdma_buffer(tx, lntmsg->msg_md->md_niov,
- lntmsg->msg_md->md_iov.iov, NULL,
- 0, lntmsg->msg_md->md_length);
- else
- rc = kgnilnd_setup_rdma_buffer(tx, lntmsg->msg_md->md_niov,
- NULL, lntmsg->msg_md->md_iov.kiov,
- 0, lntmsg->msg_md->md_length);
+ rc = kgnilnd_setup_rdma_buffer(tx, lntmsg->msg_md->md_niov,
+ lntmsg->msg_md->md_kiov,
+ 0, lntmsg->msg_md->md_length);
if (rc != 0) {
CERROR("unable to setup buffer: %d\n", rc);
kgnilnd_tx_done(tx, rc);
tx->tx_lntmsg[1] = lnet_create_reply_msg(ni, lntmsg);
if (tx->tx_lntmsg[1] == NULL) {
CERROR("Can't create reply for GET to %s\n",
- libcfs_nid2str(target.nid));
+ libcfs_nidstr(&target->nid));
kgnilnd_tx_done(tx, rc);
rc = -EIO;
goto out;
tx->tx_lntmsg[0] = lntmsg;
if ((reverse_rdma_flag & GNILND_REVERSE_GET) == 0)
- tx->tx_msg.gnm_u.get.gngm_hdr = *hdr;
+ lnet_hdr_to_nid4(hdr, &tx->tx_msg.gnm_u.get.gngm_hdr);
else
- tx->tx_msg.gnm_u.putreq.gnprm_hdr = *hdr;
+ lnet_hdr_to_nid4(hdr,
+ &tx->tx_msg.gnm_u.putreq.gnprm_hdr);
/* rest of tx_msg is setup just before it is sent */
- kgnilnd_launch_tx(tx, net, &target);
+ kgnilnd_launch_tx(tx, net, target);
goto out;
case LNET_MSG_REPLY:
case LNET_MSG_PUT:
break;
if ((reverse_rdma_flag & GNILND_REVERSE_PUT) == 0)
- tx = kgnilnd_new_tx_msg(GNILND_MSG_PUT_REQ, ni->ni_nid);
+ tx = kgnilnd_new_tx_msg(GNILND_MSG_PUT_REQ,
+ lnet_nid_to_nid4(&ni->ni_nid));
else
- tx = kgnilnd_new_tx_msg(GNILND_MSG_PUT_REQ_REV, ni->ni_nid);
+ tx = kgnilnd_new_tx_msg(GNILND_MSG_PUT_REQ_REV,
+ lnet_nid_to_nid4(&ni->ni_nid));
if (tx == NULL) {
rc = -ENOMEM;
goto out;
}
- rc = kgnilnd_setup_rdma_buffer(tx, niov, iov, kiov, offset, nob);
+ rc = kgnilnd_setup_rdma_buffer(tx, niov,
+ kiov, offset, nob);
if (rc != 0) {
kgnilnd_tx_done(tx, rc);
rc = -EIO;
tx->tx_lntmsg[0] = lntmsg;
if ((reverse_rdma_flag & GNILND_REVERSE_PUT) == 0)
- tx->tx_msg.gnm_u.putreq.gnprm_hdr = *hdr;
+ lnet_hdr_to_nid4(hdr,
+ &tx->tx_msg.gnm_u.putreq.gnprm_hdr);
else
- tx->tx_msg.gnm_u.get.gngm_hdr = *hdr;
+ lnet_hdr_to_nid4(hdr, &tx->tx_msg.gnm_u.get.gngm_hdr);
/* rest of tx_msg is setup just before it is sent */
- kgnilnd_launch_tx(tx, net, &target);
+ kgnilnd_launch_tx(tx, net, target);
goto out;
}
LASSERTF(nob <= *kgnilnd_tunables.kgn_max_immediate,
"lntmsg 0x%p too large %d\n", lntmsg, nob);
- tx = kgnilnd_new_tx_msg(GNILND_MSG_IMMEDIATE, ni->ni_nid);
+ tx = kgnilnd_new_tx_msg(GNILND_MSG_IMMEDIATE,
+ lnet_nid_to_nid4(&ni->ni_nid));
if (tx == NULL) {
rc = -ENOMEM;
goto out;
}
- rc = kgnilnd_setup_immediate_buffer(tx, niov, iov, kiov, offset, nob);
+ rc = kgnilnd_setup_immediate_buffer(tx, niov, kiov, offset, nob);
if (rc != 0) {
kgnilnd_tx_done(tx, rc);
goto out;
}
- tx->tx_msg.gnm_u.immediate.gnim_hdr = *hdr;
+ lnet_hdr_to_nid4(hdr, &tx->tx_msg.gnm_u.immediate.gnim_hdr);
tx->tx_lntmsg[0] = lntmsg;
- kgnilnd_launch_tx(tx, net, &target);
+ kgnilnd_launch_tx(tx, net, target);
out:
/* use stored value as we could have already finalized lntmsg here from a failed launch */
if (msg_vmflush)
- cfs_memory_pressure_restore(mpflag);
+ memalloc_noreclaim_restore(mpflag);
return rc;
}
kgn_conn_t *conn = rx->grx_conn;
kgn_msg_t *rxmsg = rx->grx_msg;
unsigned int niov = lntmsg->msg_niov;
- struct kvec *iov = lntmsg->msg_iov;
- lnet_kiov_t *kiov = lntmsg->msg_kiov;
+ struct bio_vec *kiov = lntmsg->msg_kiov;
unsigned int offset = lntmsg->msg_offset;
unsigned int nob = lntmsg->msg_len;
int done_type;
LBUG();
}
- tx = kgnilnd_new_tx_msg(done_type, ni->ni_nid);
+ tx = kgnilnd_new_tx_msg(done_type, lnet_nid_to_nid4(&ni->ni_nid));
if (tx == NULL)
goto failed_0;
if (rc != 0)
goto failed_1;
- rc = kgnilnd_setup_rdma_buffer(tx, niov, iov, kiov, offset, nob);
+ rc = kgnilnd_setup_rdma_buffer(tx, niov, kiov, offset, nob);
if (rc != 0)
goto failed_1;
failed_1:
kgnilnd_tx_done(tx, rc);
- kgnilnd_nak_rdma(conn, done_type, rc, rxmsg->gnm_u.get.gngm_cookie, ni->ni_nid);
+ kgnilnd_nak_rdma(conn, done_type, rc, rxmsg->gnm_u.get.gngm_cookie,
+ lnet_nid_to_nid4(&ni->ni_nid));
failed_0:
lnet_finalize(lntmsg, rc);
}
CERROR("Couldnt find matching peer %p or conn %p / %p\n",
peer, conn, found_conn);
if (found_conn) {
- CERROR("Unexpected connstamp %#llx(%#llx expected)"
- " from %s", rxmsg->gnm_connstamp,
+ CERROR("Unexpected connstamp %#llx(%#llx expected) from %s\n",
+ rxmsg->gnm_connstamp,
found_conn->gnc_peer_connstamp,
libcfs_nid2str(peer->gnp_nid));
}
int
kgnilnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg,
int delayed, unsigned int niov,
- struct kvec *iov, lnet_kiov_t *kiov,
+ struct bio_vec *kiov,
unsigned int offset, unsigned int mlen, unsigned int rlen)
{
kgn_rx_t *rx = private;
LASSERT(!in_interrupt());
LASSERTF(mlen <= rlen, "%d <= %d\n", mlen, rlen);
- /* Either all pages or all vaddrs */
- LASSERTF(!(kiov != NULL && iov != NULL), "kiov %p iov %p\n",
- kiov, iov);
GNIDBG_MSG(D_NET, rxmsg, "conn %p, rxmsg %p, lntmsg %p"
- " niov=%d kiov=%p iov=%p offset=%d mlen=%d rlen=%d",
+ " niov=%d kiov=%p offset=%d mlen=%d rlen=%d",
conn, rxmsg, lntmsg,
- niov, kiov, iov, offset, mlen, rlen);
+ niov, kiov, offset, mlen, rlen);
/* we need to lock here as recv can be called from any context */
read_lock(&kgnilnd_data.kgn_peer_conn_lock);
switch (rxmsg->gnm_type) {
default:
GNIDBG_MSG(D_NETERROR, rxmsg, "conn %p, rx %p, rxmsg %p, lntmsg %p"
- " niov=%d kiov=%p iov=%p offset=%d mlen=%d rlen=%d",
- conn, rx, rxmsg, lntmsg, niov, kiov, iov, offset, mlen, rlen);
+ " niov=%d kiov=%p offset=%d mlen=%d rlen=%d",
+ conn, rx, rxmsg, lntmsg, niov, kiov, offset, mlen, rlen);
LBUG();
case GNILND_MSG_IMMEDIATE:
case 2:
kgnilnd_dump_blob(D_BUFFS, "bad payload checksum",
&rxmsg[1], rxmsg->gnm_payload_len);
- /* fall through to dump */
+ fallthrough;
case 1:
libcfs_debug_dumplog();
break;
}
}
- if (kiov != NULL)
- lnet_copy_flat2kiov(
- niov, kiov, offset,
- *kgnilnd_tunables.kgn_max_immediate,
- &rxmsg[1], 0, mlen);
- else
- lnet_copy_flat2iov(
- niov, iov, offset,
- *kgnilnd_tunables.kgn_max_immediate,
- &rxmsg[1], 0, mlen);
+ lnet_copy_flat2kiov(
+ niov, kiov, offset,
+ *kgnilnd_tunables.kgn_max_immediate,
+ &rxmsg[1], 0, mlen);
kgnilnd_consume_rx(rx);
lnet_finalize(lntmsg, 0);
/* only error if lntmsg == NULL, otherwise we are just
* short circuiting the rdma process of 0 bytes */
kgnilnd_nak_rdma(conn, rxmsg->gnm_type,
- lntmsg == NULL ? -ENOENT : 0,
- rxmsg->gnm_u.get.gngm_cookie,
- ni->ni_nid);
+ lntmsg == NULL ? -ENOENT : 0,
+ rxmsg->gnm_u.get.gngm_cookie,
+ lnet_nid_to_nid4(&ni->ni_nid));
RETURN(0);
}
/* sending ACK with sink buff. info */
- tx = kgnilnd_new_tx_msg(GNILND_MSG_PUT_ACK, ni->ni_nid);
+ tx = kgnilnd_new_tx_msg(GNILND_MSG_PUT_ACK,
+ lnet_nid_to_nid4(&ni->ni_nid));
if (tx == NULL) {
kgnilnd_consume_rx(rx);
RETURN(-ENOMEM);
GOTO(nak_put_req, rc);
}
- rc = kgnilnd_setup_rdma_buffer(tx, niov, iov, kiov, offset, mlen);
+ rc = kgnilnd_setup_rdma_buffer(tx, niov,
+ kiov, offset, mlen);
if (rc != 0) {
GOTO(nak_put_req, rc);
}
nak_put_req:
/* make sure we send an error back when the PUT fails */
- kgnilnd_nak_rdma(conn, rxmsg->gnm_type, rc, rxmsg->gnm_u.get.gngm_cookie, ni->ni_nid);
+ kgnilnd_nak_rdma(conn, rxmsg->gnm_type, rc,
+ rxmsg->gnm_u.get.gngm_cookie,
+ lnet_nid_to_nid4(&ni->ni_nid));
kgnilnd_tx_done(tx, rc);
kgnilnd_consume_rx(rx);
/* only error if lntmsg == NULL, otherwise we are just
* short circuiting the rdma process of 0 bytes */
kgnilnd_nak_rdma(conn, rxmsg->gnm_type,
- lntmsg == NULL ? -ENOENT : 0,
- rxmsg->gnm_u.get.gngm_cookie,
- ni->ni_nid);
+ lntmsg == NULL ? -ENOENT : 0,
+ rxmsg->gnm_u.get.gngm_cookie,
+ lnet_nid_to_nid4(&ni->ni_nid));
RETURN(0);
}
/* lntmsg can be null when parsing a LNET_GET */
if (lntmsg != NULL) {
/* sending ACK with sink buff. info */
- tx = kgnilnd_new_tx_msg(GNILND_MSG_GET_ACK_REV, ni->ni_nid);
+ tx = kgnilnd_new_tx_msg(GNILND_MSG_GET_ACK_REV,
+ lnet_nid_to_nid4(&ni->ni_nid));
if (tx == NULL) {
kgnilnd_consume_rx(rx);
RETURN(-ENOMEM);
if (rc != 0)
GOTO(nak_get_req_rev, rc);
-
- rc = kgnilnd_setup_rdma_buffer(tx, niov, iov, kiov, offset, mlen);
+ rc = kgnilnd_setup_rdma_buffer(tx, niov,
+ kiov, offset, mlen);
if (rc != 0)
GOTO(nak_get_req_rev, rc);
-
tx->tx_msg.gnm_u.putack.gnpam_src_cookie =
rxmsg->gnm_u.putreq.gnprm_cookie;
tx->tx_msg.gnm_u.putack.gnpam_dst_cookie = tx->tx_id.txe_cookie;
} else {
/* No match */
kgnilnd_nak_rdma(conn, rxmsg->gnm_type,
- -ENOENT,
- rxmsg->gnm_u.get.gngm_cookie,
- ni->ni_nid);
+ -ENOENT,
+ rxmsg->gnm_u.get.gngm_cookie,
+ lnet_nid_to_nid4(&ni->ni_nid));
}
kgnilnd_consume_rx(rx);
nak_get_req_rev:
/* make sure we send an error back when the GET fails */
- kgnilnd_nak_rdma(conn, rxmsg->gnm_type, rc, rxmsg->gnm_u.get.gngm_cookie, ni->ni_nid);
+ kgnilnd_nak_rdma(conn, rxmsg->gnm_type, rc,
+ rxmsg->gnm_u.get.gngm_cookie,
+ lnet_nid_to_nid4(&ni->ni_nid));
kgnilnd_tx_done(tx, rc);
kgnilnd_consume_rx(rx);
/* only error if lntmsg == NULL, otherwise we are just
* short circuiting the rdma process of 0 bytes */
kgnilnd_nak_rdma(conn, rxmsg->gnm_type,
- lntmsg == NULL ? -ENOENT : 0,
- rxmsg->gnm_u.get.gngm_cookie,
- ni->ni_nid);
+ lntmsg == NULL ? -ENOENT : 0,
+ rxmsg->gnm_u.get.gngm_cookie,
+ lnet_nid_to_nid4(&ni->ni_nid));
RETURN(0);
}
} else {
/* No match */
kgnilnd_nak_rdma(conn, rxmsg->gnm_type,
- -ENOENT,
- rxmsg->gnm_u.get.gngm_cookie,
- ni->ni_nid);
+ -ENOENT,
+ rxmsg->gnm_u.get.gngm_cookie,
+ lnet_nid_to_nid4(&ni->ni_nid));
}
kgnilnd_consume_rx(rx);
RETURN(0);
} else {
/* No match */
kgnilnd_nak_rdma(conn, rxmsg->gnm_type,
- -ENOENT,
- rxmsg->gnm_u.get.gngm_cookie,
- ni->ni_nid);
+ -ENOENT,
+ rxmsg->gnm_u.get.gngm_cookie,
+ lnet_nid_to_nid4(&ni->ni_nid));
}
kgnilnd_consume_rx(rx);
RETURN(0);
if (CFS_FAIL_CHECK(CFS_FAIL_GNI_NOOP_SEND))
return 0;
- tx = kgnilnd_new_tx_msg(GNILND_MSG_NOOP, conn->gnc_peer->gnp_net->gnn_ni->ni_nid);
+ tx = kgnilnd_new_tx_msg(GNILND_MSG_NOOP,
+ lnet_nid_to_nid4(&conn->gnc_peer->gnp_net->gnn_ni->ni_nid));
if (tx == NULL)
return 0;
kgnilnd_queue_tx(conn, tx);
}
static void
-kgnilnd_reaper_poke_with_stick(unsigned long arg)
+kgnilnd_reaper_poke_with_stick(cfs_timer_cb_arg_t arg)
{
wake_up(&kgnilnd_data.kgn_reaper_waitq);
}
prepare_to_wait(&kgnilnd_data.kgn_reaper_waitq, &wait,
TASK_INTERRUPTIBLE);
spin_unlock(&kgnilnd_data.kgn_reaper_lock);
- setup_timer(&timer, kgnilnd_reaper_poke_with_stick,
- next_check_time);
+ cfs_timer_setup(&timer, kgnilnd_reaper_poke_with_stick,
+ next_check_time, 0);
mod_timer(&timer, (long) jiffies + timeout);
/* check flag variables before committing */
CDEBUG(D_INFO, "awake after schedule\n");
}
- del_singleshot_timer_sync(&timer);
+ timer_delete_sync(&timer);
spin_lock(&kgnilnd_data.kgn_reaper_lock);
finish_wait(&kgnilnd_data.kgn_reaper_waitq, &wait);
continue;
int
kgnilnd_recv_bte_get(kgn_tx_t *tx) {
unsigned niov, offset, nob;
- lnet_kiov_t *kiov;
+ struct bio_vec *kiov;
struct lnet_msg *lntmsg = tx->tx_lntmsg[0];
kgnilnd_parse_lnet_rdma(lntmsg, &niov, &offset, &nob, &kiov, tx->tx_nob_rdma);
tx->tx_msg.gnm_type == GNILND_MSG_GET_DONE ||
tx->tx_msg.gnm_type == GNILND_MSG_PUT_DONE_REV ||
tx->tx_msg.gnm_type == GNILND_MSG_GET_DONE_REV,
- "tx %p with type %d\n", tx, tx->tx_msg.gnm_type);
+ "tx %px with type %d\n", tx, tx->tx_msg.gnm_type);
GNIDBG_TX(D_NET, tx, "RDMA completion for %d bytes", tx->tx_nob);
if (conn->gnc_ephandle == NULL)
return;
- LASSERTF(!conn->gnc_close_sent, "Conn %p close was sent\n", conn);
+ LASSERTF(!conn->gnc_close_sent, "Conn %px close was sent\n", conn);
spin_lock(&conn->gnc_list_lock);
if (CFS_FAIL_CHECK(CFS_FAIL_GNI_NOOP_SEND))
return;
- tx = kgnilnd_new_tx_msg(GNILND_MSG_NOOP, conn->gnc_peer->gnp_net->gnn_ni->ni_nid);
+ tx = kgnilnd_new_tx_msg(GNILND_MSG_NOOP,
+ lnet_nid_to_nid4(&conn->gnc_peer->gnp_net->gnn_ni->ni_nid));
if (tx != NULL) {
int rc;
case GNILND_MSG_PUT_REQ:
case GNILND_MSG_GET_REQ_REV:
tx->tx_msg.gnm_u.putreq.gnprm_cookie = tx->tx_id.txe_cookie;
-
+ fallthrough;
case GNILND_MSG_PUT_ACK:
case GNILND_MSG_PUT_REQ_REV:
case GNILND_MSG_GET_ACK_REV:
/* if we think we need to adjust, take lock to serialize and recheck */
spin_lock(&dev->gnd_rdmaq_lock);
if (time_after_eq(jiffies, dev->gnd_rdmaq_deadline)) {
- del_singleshot_timer_sync(&dev->gnd_rdmaq_timer);
+ timer_delete_sync(&dev->gnd_rdmaq_timer);
dead_bump = cfs_time_seconds(1) / *kgnilnd_tunables.kgn_rdmaq_intervals;
new_ok -= atomic64_read(&dev->gnd_rdmaq_bytes_out);
atomic64_set(&dev->gnd_rdmaq_bytes_ok, new_ok);
- CDEBUG(D_NET, "resetting rdmaq bytes to %ld, deadline +%lu -> %lu, "
- "current out %ld\n",
- atomic64_read(&dev->gnd_rdmaq_bytes_ok), dead_bump, dev->gnd_rdmaq_deadline,
- atomic64_read(&dev->gnd_rdmaq_bytes_out));
+ CDEBUG(D_NET, "resetting rdmaq bytes to %lld, deadline +%lu -> %lu, current out %lld\n",
+ (s64)atomic64_read(&dev->gnd_rdmaq_bytes_ok), dead_bump, dev->gnd_rdmaq_deadline,
+ (s64)atomic64_read(&dev->gnd_rdmaq_bytes_out));
}
spin_unlock(&dev->gnd_rdmaq_lock);
}
GNITX_ASSERTF(tx, ((tx->tx_id.txe_idx == ev_id.txe_idx) &&
(tx->tx_id.txe_cookie = cookie)),
- "conn 0x%p->%s tx_ref_table hosed: wanted "
- "txe_cookie %#llx txe_idx %d "
- "found tx %p cookie %#llx txe_idx %d\n",
+ "conn 0x%p->%s tx_ref_table hosed: wanted txe_cookie %#llx txe_idx %d found tx %px cookie %#llx txe_idx %d\n",
conn, libcfs_nid2str(conn->gnc_peer->gnp_nid),
cookie, ev_id.txe_idx,
tx, tx->tx_id.txe_cookie, tx->tx_id.txe_idx);
int repost = 1, saw_complete;
unsigned long timestamp, newest_last_rx, timeout;
int last_seq;
+ struct lnet_hdr hdr;
+ struct lnet_nid srcnid;
ENTRY;
/* Short circuit if the ep_handle is null.
}
LASSERTF(rrc == GNI_RC_SUCCESS,
- "bad rc %d on conn %p from peer %s\n",
+ "bad rc %d on conn %px from peer %s\n",
rrc, conn, libcfs_nid2str(peer->gnp_nid));
msg = (kgn_msg_t *)prefix;
rx->grx_msg = msg;
rx->grx_conn = conn;
rx->grx_eager = 0;
- rx->grx_received = current_kernel_time();
+ ktime_get_ts64(&rx->grx_received);
if (CFS_FAIL_CHECK(CFS_FAIL_GNI_NET_LOOKUP)) {
rc = -ENONET;
case GNILND_MSG_IMMEDIATE:
/* only get SMSG payload for IMMEDIATE */
atomic64_add(msg->gnm_payload_len, &conn->gnc_device->gnd_short_rxbytes);
- rc = lnet_parse(net->gnn_ni, &msg->gnm_u.immediate.gnim_hdr,
- msg->gnm_srcnid, rx, 0);
+ lnet_hdr_from_nid4(&hdr, &msg->gnm_u.immediate.gnim_hdr);
+ lnet_nid4_to_nid(msg->gnm_srcnid, &srcnid);
+ rc = lnet_parse(net->gnn_ni, &hdr, &srcnid, rx, 0);
repost = rc < 0;
break;
case GNILND_MSG_GET_REQ_REV:
case GNILND_MSG_PUT_REQ:
- rc = lnet_parse(net->gnn_ni, &msg->gnm_u.putreq.gnprm_hdr,
- msg->gnm_srcnid, rx, 1);
+ lnet_hdr_from_nid4(&hdr, &msg->gnm_u.putreq.gnprm_hdr);
+ lnet_nid4_to_nid(msg->gnm_srcnid, &srcnid);
+ rc = lnet_parse(net->gnn_ni, &hdr, &srcnid, rx, 1);
repost = rc < 0;
break;
case GNILND_MSG_GET_NAK_REV:
if (tx == NULL)
break;
- GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED ||
- tx->tx_buftype == GNILND_BUF_VIRT_MAPPED,
+ GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED,
"bad tx buftype %d", tx->tx_buftype);
kgnilnd_finalize_rx_done(tx, msg);
break;
case GNILND_MSG_PUT_REQ_REV:
case GNILND_MSG_GET_REQ:
- rc = lnet_parse(net->gnn_ni, &msg->gnm_u.get.gngm_hdr,
- msg->gnm_srcnid, rx, 1);
+ lnet_hdr_from_nid4(&hdr, &msg->gnm_u.get.gngm_hdr);
+ lnet_nid4_to_nid(msg->gnm_srcnid, &srcnid);
+ rc = lnet_parse(net->gnn_ni, &hdr, &srcnid, rx, 1);
repost = rc < 0;
break;
if (tx == NULL)
break;
- GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED ||
- tx->tx_buftype == GNILND_BUF_VIRT_MAPPED,
+ GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED,
"bad tx buftype %d", tx->tx_buftype);
kgnilnd_complete_tx(tx, msg->gnm_u.completion.gncm_retval);
if (tx == NULL)
break;
- GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED ||
- tx->tx_buftype == GNILND_BUF_VIRT_MAPPED,
+ GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED,
"bad tx buftype %d", tx->tx_buftype);
lnet_set_reply_msg_len(net->gnn_ni, tx->tx_lntmsg[1],
if (tx == NULL)
break;
- GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED ||
- tx->tx_buftype == GNILND_BUF_VIRT_MAPPED,
+ GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED,
"bad tx buftype %d", tx->tx_buftype);
kgnilnd_finalize_rx_done(tx, msg);
if (tx == NULL)
break;
- GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED ||
- tx->tx_buftype == GNILND_BUF_VIRT_MAPPED,
+ GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED,
"bad tx buftype %d", tx->tx_buftype);
kgnilnd_finalize_rx_done(tx, msg);
if (tx == NULL)
break;
- GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED ||
- tx->tx_buftype == GNILND_BUF_VIRT_MAPPED,
+ GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED,
"bad tx buftype %d", tx->tx_buftype);
kgnilnd_complete_tx(tx, msg->gnm_u.completion.gncm_retval);
if (conn->gnc_ephandle != NULL) {
int rc = 0;
- tx = kgnilnd_new_tx_msg(GNILND_MSG_CLOSE, conn->gnc_peer->gnp_net->gnn_ni->ni_nid);
+ tx = kgnilnd_new_tx_msg(GNILND_MSG_CLOSE,
+ lnet_nid_to_nid4(&conn->gnc_peer->gnp_net->gnn_ni->ni_nid));
if (tx != NULL) {
tx->tx_msg.gnm_u.completion.gncm_retval = conn->gnc_error;
tx->tx_state = GNILND_TX_WAITING_COMPLETION;
spin_lock(&dev->gnd_lock);
if (list_empty(&dev->gnd_map_tx)) {
/* if the list is empty make sure we dont have a timer running */
- del_singleshot_timer_sync(&dev->gnd_map_timer);
+ timer_delete_sync(&dev->gnd_map_timer);
spin_unlock(&dev->gnd_lock);
RETURN(0);
}
}
/* delete the previous timer if it exists */
- del_singleshot_timer_sync(&dev->gnd_map_timer);
+ timer_delete_sync(&dev->gnd_map_timer);
/* stash the last map version to let us know when a good one was seen */
last_map_version = dev->gnd_map_version;
} else {
GNIDBG_TX(log_retrans_level, tx,
"transient map failure #%d %d pages/%d bytes phys %u@%u "
- "virt %u@%llu "
"nq_map %d mdd# %d/%d GART %ld",
dev->gnd_map_attempt, tx->tx_phys_npages, tx->tx_nob,
dev->gnd_map_nphys, dev->gnd_map_physnop * PAGE_SIZE,
- dev->gnd_map_nvirt, dev->gnd_map_virtnob,
atomic_read(&dev->gnd_nq_map),
atomic_read(&dev->gnd_n_mdd), atomic_read(&dev->gnd_n_mdd_held),
atomic64_read(&dev->gnd_nbytes_map));
conn = list_first_entry(&dev->gnd_ready_conns, kgn_conn_t, gnc_schedlist);
list_del_init(&conn->gnc_schedlist);
- /*
+ /*
* Since we are processing conn now, we don't need to be on the delaylist any longer.
*/
LASSERTF(conn_sched != GNILND_CONN_IDLE &&
conn_sched != GNILND_CONN_PROCESS,
- "conn %p on ready list but in bad state: %d\n",
+ "conn %px on ready list but in bad state: %d\n",
conn, conn_sched);
CDEBUG(D_INFO, "conn %p@%s for processing\n",